file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
licensePlateDetectorOptimized.py | print("\n\nLOADING PROGRAM\n\n")
import cv2 as cv
print("Loaded CV")
import numpy as np
print("Loaded NP")
import tensorflow as tf
print("Loaded TF")
import imutils
print("Loaded IMUTILS")
import os
print("Loaded OS")
'''
SOME NOTES ABOUT THE PROGRAM:
1) Make sure to change the paths at the top of the file to reflect the correct paths to your files
2) The program is slow right now. I am working on improvements
3) All togglable settings are at the top of the file and in the __init__ / settings_init functions of the FindPlate class
Have fun!! :)
'''
### CHANGE THESE PATHS ###
#the paths below are the paths that work on my machine!!
video_path = "/Users/tristanbrigham/Downloads/BostonVid.mp4"
folder_path = os.getcwd() + "/MrCrutcherProjects/LicensePlateProject/Tristans_Code/"
training_data_path = "/Users/tristanbrigham/GithubProjects/AI_Training_Data/LicensePlateProject/"
### GLOBAL VARIABLES FOR THE PROGRAM ###
show_images_bool = True #if true, shows the images that are being processed
collect_data = False #if true, asks the user for data on what letter is detected. input nothing if image is not a letter or contains more than one letter
get_chars = True #if true, applies the algorithm model to the characters that are detected to get what the plate says
optimize = True #checks to see whether the user only wants the program to analyze the bottom portion of the vid/image
debug = False #if true, shows the gray ROI's and the license plate ROI's
checkIndividual = False #if true, this waits for user input before showing each contour on each image
start_frame_number = 0 #where does the user want the video to start?
frames_skipped = 30 #how many frames pass before the frame is analyzed (for instance, analyze every 20th frame if this value is 20)
| model = tf.keras.models.load_model(folder_path + "kerasModelandData/model.h5") #getting the model and loading it
########################################################################################
#################################### GENERAL SETUP #####################################
########################################################################################
def skip_forward(): #a function to skip forward in the video
frame_count = cap.get(cv.CAP_PROP_POS_FRAMES)
cap.set(cv.CAP_PROP_POS_FRAMES, frame_count + 1000)
def setup_dictionary(): #a function to set up the dictionary mapping values to letters for the NN
alphabet = open(folder_path + "kerasModelandData/alphabet.txt")
for count, line in enumerate(alphabet.readlines()):
letter_dict[count] = line[0]
print(letter_dict)
### Class for plate detection
########################################################################################
#################################### SETUP PROGRAM! ####################################
########################################################################################
class FindPlate:
#should maybe make the parameters global variables or controlled by the command line
# Have to adjust so that the min and max are larger when analyzing the images and smaller when looking at the vids
def __init__(self, counter, check_wait = False, imgAddress = None, img = None):
self.check_wait = check_wait #initializing whether we need to wait between drawing contours for debugging
if imgAddress is None and img is not None: #getting the image from the video
self.img = img
elif imgAddress is not None and img is None:
self.img = cv.resize(cv.imread(imgAddress), (860, 480))
else:
print("-----------------------ERROR FINDING IMAGE-----------------------")
exit(0)
if(counter == 0):
self.setup_exec() #execute the program every certain amount of frames
elif show_images_bool:
self.show_images() #Show the images if that option is on
self.check_keys()
def setup_exec(self):
self.settings_init() #initializing all of the settings
if optimize: #Currently, optimize makes it so that only the bottom portion of the screen is analyzed
self.offset = int(self.img.shape[0] * (self.divideArea - 1) / self.divideArea) #How many pixels in the y direction are not analyzed from the top
self.top_img = self.img[ : self.offset ] #Taking the top potion of the image and saving it for later
self.img = self.img[self.offset : ] #reassigning the image to the portion being analyed
self.x = self.img.shape[1] #getting the width of the image
self.img_copy = self.img.copy() #getting copies for analysis/blurring
self.img_rects = self.img.copy() #the copy that will be used for bounding rectangles
self.Canny = None #Initializing variable to hold Canny image
self.run()
if optimize: #After being run, rejoin the images if optimize is on
self.img = np.append(self.top_img, self.img, axis=0)
self.img_rects = np.append(self.top_img, self.img_rects, axis=0)
if show_images_bool: #if show the images, show the images
self.show_images_exec()
def settings_init(self):
self.divideArea = 2.5 #This is the denominator for how much of the screen is analyzed (analyzes the [1/(variable)] portion of the image/vid)
#For example, if the bottom half should be analyzed, put in '2'
self.amt_digits = 6 #defining the amount of characters and digits that should be found on the license plate
self.ratio_max = 3 #This is the maximum width to height ratio that a license plate can be in the program (for the US, about 4 is good while for EU plates, about 6-7 is good)
self.ratio_min = 1.5 #This is the minimum width to height ratio
self.angle_min = 84 #After the angle of the cv.areaMinRect has a modulo 90 applied to it, the angle either needs to be close to upright (this value or above)
self.angle_max = 6 # or close to horizontal (this value or below) in degrees
self.img_size = self.img.shape[0] * self.img.shape[1]
#current size: about 240,000 pixels
self.area_min = int(self.img_size / 5000) #minimum area of the accepted bounding boxes -- it recognizes plates with smaller values but there is no way that characters can be picked out. No use to have smaller
self.area_max = int(self.img_size / 600) #max area of the accepted bounding boxes
self.lower_canny = 110 #upper value for canny thresholding
self.upper_canny = 120 #Lower value for canny thresholding
#ASPECT variables are not used:
self.aspect_max = 1 #the max amount of area that the license plate can cover within a bounding box to be considered
self.aspect_min = 0.3 #the minimum amount of area that a license plate can cover within a bounding box to be considered
self.img_dilate = 40 #specifying the value that the pixels which are being brightened will be increased by
self.blur = (9, 9) #initializing the size component of the gaussian blur that is applied to the image
self.offset = 0 #initializing the variable which keeps track of how far from the top of the image the program begins to analyze
self.top_img = None #initializing the variable which may hold the top part of the image for later
self.element_structure = cv.getStructuringElement(shape=cv.MORPH_RECT, ksize=(5, 5)) #basic elem structure for blurring
self.letter_contour_min = 2000 #The minimum size a contour has to be for it to be considered for letter analysis
self.roi_array = [] #array for holding the ROI's
def run(self): #master run function for the program
_ = self.contour_manipulation(self.preprocess_canny_contours())
license_plate_arr = self.analyze_image()
########################################################################################
#################################### EXECUTE PROGRAM ###################################
########################################################################################
def contour_manipulation(self, contours):
ret = [] #init the ret array
for c in contours:
boolRect = self.check_min_rect(c) #checking whether the bounding rect encapsulates a valid license plate region
if boolRect: #if the region is valid, then append it to the return array
ret.append(c)
if checkIndividual: #if the check individual option is on, then go through the contours one-by-one, write them to the image, and show the image
checkIndividual = self.check_indiv_contour(c)
return ret
def show_images_exec(self, height = 300): #shows the important images that are being used for execution
# cv.imshow("Contours", imutils.resize(self.img_copy, height = height))
cv.imshow("Bounding Rects", self.img_rects)
cv.imshow("Canny", imutils.resize(self.Canny, height = height))
self.show_images()
self.check_keys()
def check_indiv_contour(self, c): #show the image and block for a key input to figure out what to do next
print("\n\nCONTOUR: {}".format(cv.contourArea(c)))
cv.imshow("Bounding Rects", self.img_rects)
if cv.waitKey(0) & 0xFF == ord('c'): #This cycles through to the next contour
return True
elif cv.waitKey(0) & 0xFF == ord('f'): #This makes it so that the rest of the contours are drawn in an instant
return False
elif cv.waitKey(0) & 0xFF == ord('q'): #quits the program
exit()
########################################################################################
############################# ANALYZING/ANNOTATING IMAGES ##############################
########################################################################################
def preprocess_canny_contours(self):
gray = cv.cvtColor(self.img, cv.COLOR_BGR2GRAY) #get grayscale image
gray = cv.GaussianBlur(gray, self.blur, 0) #Apply a blur
edged = cv.Canny(gray, self.lower_canny, self.upper_canny) #Getting the canny contours
self.Canny = edged #assign the variable
contours = cv.findContours(edged.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE) #Get the contours of the Canny image [remember that this will return more contours than we need
#Because Canny just returns lines]
contours = imutils.grab_contours(contours) #Get the contours using imutils
return contours
def analyze_image(self): # getting an array of the potential letters from each license plate ROI
str_arr = []
#SHOWING THE ROI's
for count, (regionOfInterest, x, y, w, h) in enumerate(self.roi_array): #iterate through the ROI array
data = self.process_ROI(regionOfInterest, count) #process the ROI for letter regions
if data is not None and get_chars: #check if the data is found and if the get_chars option is on
str_arr.append(self.show_bounds_and_text(data, x, y, w, h)) #if get_chars, run the plate through the NN for the characters
return str_arr #return an array of the strings that were found on the screen
def show_bounds_and_text(self, data, x, y, w, h):
cv.rectangle(self.img_rects, (x, y), (x + w, y + h), (0, 255, 0), thickness=4) #draw a bounding rectangle on the img_rects image
neuralNet = NeuralNetwork() #init a NN (neural network)
ret_str = neuralNet.get_chars_string(data) #get the character string from the plate
cv.putText(self.img_rects, ret_str, (x, y - 5), cv.FONT_HERSHEY_DUPLEX, 1.7, (0, 255, 0), thickness=2) #write those characters to the image
return ret_str #return the string
########################################################################################
################################### CHECKING CONTOURS ##################################
########################################################################################
def process_ROI(self, roi, counter): #takes a license plate ROI as an input with a counter (for showing images) and segments the character ROI's
regionOfInterest = roi[int(roi.shape[0] / 4) : roi.shape[0] - int(roi.shape[0] / 5), int(roi.shape[1] / 18) : roi.shape[1] - int(roi.shape[1] / 18)]
#^cut down on the region of interest that is passed (values determined through trial and error)
name = "ROI {}".format(counter) #format the name
regionOfInterest = cv.cvtColor(regionOfInterest, cv.COLOR_BGR2GRAY) #convert the color to grayscale
regionOfInterest[: int(regionOfInterest.shape[0] / 6), :] += self.img_dilate #Increasing the brightness of the top of the image (BREAKS WITH HIGH VALUES because of overflow)
regionOfInterest = imutils.resize(regionOfInterest, height=200, inter=cv.INTER_AREA) #resize the region of interest bigger
image = cv.GaussianBlur(regionOfInterest, (0, 0), 3) #try to sharpen the image using a blur of 0, 0
image = cv.addWeighted(image, 1.5, regionOfInterest, -0.5, 0) #continue to try to sharpen the image
_, thresh = cv.threshold(image, 0, 255, cv.THRESH_BINARY+cv.THRESH_OTSU) #threshold the image for the characters
thresh = cv.bitwise_not(thresh) #convert to bitwise not (so that the chars are now the white parts)
thresh = cv.erode(thresh, (81, 61), iterations = 15) #erode to try to break the erroneous connections that the computer picks up between the chars
contours, _ = cv.findContours(thresh, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) #find the contours
contours = self.sort_contours_left(contours) #sort the contours from left to right
image = cv.cvtColor(image, cv.COLOR_GRAY2BGR) #convert the image colors back
letters = []
for contour in contours:
if cv.contourArea(contour) > self.letter_contour_min: #check if each contour is too small or not (makes sure we are analyzing potential chars)
x, y, w, h = cv.boundingRect(contour) #bouding rect around the supposed character
letterInterest = thresh[0 : y + h, x : x + w] #making sure that the top of the letter is not cut off when getting the ROI
cv.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0)) #bounding box on the passed image
letterImage = cv.resize(letterInterest, (60, 80)) #resize the char ROI
letters.append(letterImage) #append the char ROI
if debug: #if debugging, show all relevant images
cv.imshow("GRAY {}".format(counter), imutils.resize(thresh, height=200))
cv.imshow(name, image) #showing and resizing image
cv.moveWindow(name, 0, 110 * counter - 50) #Moving the ROI windows into the right spot on the screen
if len(letters) > 4: #if atleast four letters are detected, then return the array
if collect_data:
NeuralNetwork.label_letter(letters) #if we are collecting data, then do what is necessary there to create the label
return letters
else: return None
def check_min_rect(self, contour): #function for getting the min-area rectangle and validating whether it is ok
rect = cv.minAreaRect(contour) #get the min area rect
rx, ry, rw, rh = cv.boundingRect(contour) #get the bounding rectangle coordinates
if self.validateRatio(rect, rw, rh): #check the ratios of the ROI
brect = self.img[ry : ry + rh, rx : rx + rw] #get the ROI
self.roi_array.append((brect, rx, ry, rw, rh)) #append this ROI to the ROI array
return True #if everything is right, then return true to show that it is valid
else:
return False #else, return false
def validateRatio(self, rect, rw, rh): #more checking that the contour could be a license plate
(x, y), (width, height), angle = rect #get all of the data about the minarea bounding rectangle
if width == 0 or height == 0: #to defend against illegal math operations which panic the program
return False
angle = angle % 90 #getting the angle in the most basic form
area = width * height #calc the area
if not ((angle < self.angle_max or angle > self.angle_min) and (area > self.area_min and area < self.area_max)):
return False #if something is off, then return false (check that the angle is almost 90 or almost 0 and that the area is ok)
if rw < rh: #if the width is less than the height, return false
return False
return self.rat_check(width, height) #check the ratios
def rat_check(self, width, height):
ratio = float(width) / float(height) #check whether the width to height ratio is wrong
if ratio < 1:
ratio = 1 / ratio #making it so that the ratio is always more than 1
return not (ratio < self.ratio_min or ratio > self.ratio_max) #if the area is not in range or the ratio is off, return false
########################################################################################
#################################### SHOWING IMAGES ####################################
########################################################################################
def show_images(self, height = 300): #showing the image which is necessary every iteration
cv.imshow("Original", imutils.resize(self.img, height = 200))
def check_keys(self):
if self.check_wait: #if going through the contours, check if q is pressed
key = cv.waitKey(0) & 0xFF
print("NEXT IMAGE")
if key == ord('q'): #exit button
exit(0)
else:
key = cv.waitKey(1)
if key & 0xFF == ord('q'): #exit button
exit(0)
elif key == ord('s'): #skip forward in the video
skip_forward()
elif key & 0xFF == ord('p'): # this creates a pause button for the video, in essence
print("VIDEO PAUSED @ FRAME {}".format(cap.get(cv.CAP_PROP_POS_FRAMES)))
while True:
key = cv.waitKey(25) & 0xFF
if key == ord('p'): #unpause
break
elif key == ord('q'): #quit the program button
exit(0)
elif key == ord('s'): #skip forward
skip_forward()
########################################################################################
#################################### CONTOUR SORTING ###################################
########################################################################################
def sort_contours_left(self, contours): #This sorts the contours based on how far the contours are from the middle of the screen (only looks at the x-pos)
retContourMapping = []
for i, contour in enumerate(contours): #for every contour, first get the middle part of the bouding box x-pos wise
x, _, _, _ = cv.boundingRect(contour) #then we take the abs. value of that and sort those values in increasing fashion
retContourMapping.append((contour, x, i))
retContourMapping.sort(key=lambda tup: tup[1]) # sorts in place by distance from vertical horizontal line
contours = []
for contour, _, _ in retContourMapping:
contours.append(contour)
return contours
############# NOT BEING USED #############
def sort_contours_middle(self, contours): #This sorts the contours based on how far the contours are from the middle of the screen (only looks at the x-pos)
rects = []
for c in contours:
rects.append((cv.boundingRect(c), c)) #Creating a tuple array with bouding rects and the contours
retContourMapping = []
for i in range(len(rects)): #for every contour, first get the middle part of the bouding box x-pos wise
rect, contour = rects[i] #Then we are going to subtract that value from the middle of the screen
x, _, w, _ = rect #then we take the abs. value of that and sort those values in increasing fashion
x = int(self.x / 2) - x #If necessary, this would allow us to put a cap on processing and only look at contours in the middle of the screen
x = abs(x + int(w/2))
retContourMapping.append((i, x, rects[i], contour))
retContourMapping.sort(key=lambda tup: tup[1]) # sorts in place by distance from vertical horizontal line
keys = []
for index, _, _, _ in retContourMapping:
keys.append(index)
return keys
########################################################################################
#################################### NEURAL NETWORK ####################################
########################################################################################
imageNumber = 0
training_file_keys = training_data_path + "training_data.txt"
class NeuralNetwork:
def __init__(self):
self.model = model #initializing all of the pertinent settings
self.plate_ret = ""
################ TRAINING THE MODEL ################
def label_letter(self, imagearr):
for image in imagearr:
print("FRAME COUNT: {}".format(cap.get(cv.CAP_PROP_POS_FRAMES))) #printing the frame count incase my progress is lost
global imageNumber #getting the global variable names
global training_file_keys
cv.imshow("POSSIBLE LETTER", image) #show the ROI to get the character
cv.waitKey(1) #wait for a key for one millis
imageNumber = imageNumber + 1 #increasing the image number
letter = input("Please input the letter: ").upper() #turning the input to Upper case
hexval = ":".join("{:02x}".format(ord(c)) for c in letter) #print the hexval for each character that is input
if len(letter) < 1 or hexval == "0c": #if no letter is input or the value is a space, then input the letter as underscore (unknown)
letter = '_'
else:
letter = letter[0] #make the input letter the first character which is input
file = open(training_data_path + str(imageNumber) + ".txt", "w") #write the image file as a txt file
for row in image: #save the image to the txt file
np.savetxt(file, row)
print("Letter passed: " + letter) #print the letter passed and append the passed letter to the keys file (for training)
training_file = open(training_file_keys, "a")
training_file.write("\n" + str(letter))
################ PREDICTING WITH THE MODEL ################
def get_chars(self, array): #getting the character string
ret = ""
for image in array: #for each character...
ret += self.predict_char(image) #run it through the NN to get the character
return ret
def predict_char(self, image): #predict the input character using the neural network
image_formatted = self.setup_array(image) #setup the array
pred = model.predict(image_formatted) #predict the character
return letter_dict[int(np.argmax(pred))] #return the letter using the dict which was set up earlier
def setup_array(self, image): #formatting the input image to make sure it plays nice with the Neural Network
number_array = np.zeros((1, 80, 60, 1), dtype="float32")
number_array[0] = image.reshape(80, 60, 1) #sizing and inputting the array
return number_array
################ MODEL FUNCTIONS ################
def network_summary(self): #get a summary of the model (not used)
return self.model.summary()
########################################################################################
############################### VID PROCESSING AND SETUP ###############################
########################################################################################
if __name__ == "__main__":
setup_dictionary()
#addresses for testing my code on still images:
imageAddresses = [
"kerasModelandData/licensePlate.jpeg",
"kerasModelandData/licensePlate2.jpeg",
"kerasModelandData/licensePlate3.jpeg",
"kerasModelandData/licensePlate4.jpeg",
"kerasModelandData/licensePlate5.jpeg"
]
print("\n\nWelcome\nPlease press q to quit the program\nPlease press p to pause and unpause during the video\nPlease press s to skip forward within the video\nPlease press anything else to continue through the images when analyzing individual images")
print("\nOnce you have looked at all of the still images, the video will begin\n\n")
print("Green boxes signify possible license plate regions \nwhile red ones show other ROI's which were picked up and discarded")
print("\nUnderscore characters ('_') represent characters which the Neural Network could not decipher with a high confidence")
cap = cv.VideoCapture(video_path)
print("Starting Video @ frame " + str(start_frame_number))
cap.set(cv.CAP_PROP_POS_FRAMES, start_frame_number) #setting the starting frame number to the correct number
if collect_data:
file_keys = open(training_file_keys, "r")
imageNumber = int(file_keys.readline().rstrip())
print("INDEX: " + str(imageNumber))
counter = 0
while(cap.isOpened()): #reading and analyzing the video as it runs
counter = counter + 1
counter = counter % frames_skipped
ret, img = cap.read()
if ret == True:
FindPlate(counter=counter, img = img)
else:
break
cap.release()
cv.destroyAllWindows() | letter_dict = {} #init the letter dictionary to get the letters that are put through the NN | random_line_split |
licensePlateDetectorOptimized.py | print("\n\nLOADING PROGRAM\n\n")
import cv2 as cv
print("Loaded CV")
import numpy as np
print("Loaded NP")
import tensorflow as tf
print("Loaded TF")
import imutils
print("Loaded IMUTILS")
import os
print("Loaded OS")
'''
SOME NOTES ABOUT THE PROGRAM:
1) Make sure to change the paths at the top of the file to reflect the correct paths to your files
2) The program is slow right now. I am working on improvements
3) All togglable settings are at the top of the file and in the __init__ / settings_init functions of the FindPlate class
Have fun!! :)
'''
### CHANGE THESE PATHS ###
#the paths below are the paths that work on my machine!!
video_path = "/Users/tristanbrigham/Downloads/BostonVid.mp4"
folder_path = os.getcwd() + "/MrCrutcherProjects/LicensePlateProject/Tristans_Code/"
training_data_path = "/Users/tristanbrigham/GithubProjects/AI_Training_Data/LicensePlateProject/"
### GLOBAL VARIABLES FOR THE PROGRAM ###
show_images_bool = True #if true, shows the images that are being processed
collect_data = False #if true, asks the user for data on what letter is detected. input nothing if image is not a letter or contains more than one letter
get_chars = True #if true, applies the algorithm model to the characters that are detected to get what the plate says
optimize = True #checks to see whether the user only wants the program to analyze the bottom portion of the vid/image
debug = False #if true, shows the gray ROI's and the license plate ROI's
checkIndividual = False #if true, this waits for user input before showing each contour on each image
start_frame_number = 0 #where does the user want the video to start?
frames_skipped = 30 #how many frames pass before the frame is analyzed (for instance, analyze every 20th frame if this value is 20)
letter_dict = {} #init the letter dictionary to get the letters that are put through the NN
model = tf.keras.models.load_model(folder_path + "kerasModelandData/model.h5") #getting the model and loading it
########################################################################################
#################################### GENERAL SETUP #####################################
########################################################################################
def skip_forward(): #a function to skip forward in the video
frame_count = cap.get(cv.CAP_PROP_POS_FRAMES)
cap.set(cv.CAP_PROP_POS_FRAMES, frame_count + 1000)
def setup_dictionary(): #a function to set up the dictionary mapping values to letters for the NN
alphabet = open(folder_path + "kerasModelandData/alphabet.txt")
for count, line in enumerate(alphabet.readlines()):
letter_dict[count] = line[0]
print(letter_dict)
### Class for plate detection
########################################################################################
#################################### SETUP PROGRAM! ####################################
########################################################################################
class FindPlate:
#should maybe make the parameters global variables or controlled by the command line
# Have to adjust so that the min and max are larger when analyzing the images and smaller when looking at the vids
def __init__(self, counter, check_wait = False, imgAddress = None, img = None):
|
def setup_exec(self):
self.settings_init() #initializing all of the settings
if optimize: #Currently, optimize makes it so that only the bottom portion of the screen is analyzed
self.offset = int(self.img.shape[0] * (self.divideArea - 1) / self.divideArea) #How many pixels in the y direction are not analyzed from the top
self.top_img = self.img[ : self.offset ] #Taking the top potion of the image and saving it for later
self.img = self.img[self.offset : ] #reassigning the image to the portion being analyed
self.x = self.img.shape[1] #getting the width of the image
self.img_copy = self.img.copy() #getting copies for analysis/blurring
self.img_rects = self.img.copy() #the copy that will be used for bounding rectangles
self.Canny = None #Initializing variable to hold Canny image
self.run()
if optimize: #After being run, rejoin the images if optimize is on
self.img = np.append(self.top_img, self.img, axis=0)
self.img_rects = np.append(self.top_img, self.img_rects, axis=0)
if show_images_bool: #if show the images, show the images
self.show_images_exec()
def settings_init(self):
self.divideArea = 2.5 #This is the denominator for how much of the screen is analyzed (analyzes the [1/(variable)] portion of the image/vid)
#For example, if the bottom half should be analyzed, put in '2'
self.amt_digits = 6 #defining the amount of characters and digits that should be found on the license plate
self.ratio_max = 3 #This is the maximum width to height ratio that a license plate can be in the program (for the US, about 4 is good while for EU plates, about 6-7 is good)
self.ratio_min = 1.5 #This is the minimum width to height ratio
self.angle_min = 84 #After the angle of the cv.areaMinRect has a modulo 90 applied to it, the angle either needs to be close to upright (this value or above)
self.angle_max = 6 # or close to horizontal (this value or below) in degrees
self.img_size = self.img.shape[0] * self.img.shape[1]
#current size: about 240,000 pixels
self.area_min = int(self.img_size / 5000) #minimum area of the accepted bounding boxes -- it recognizes plates with smaller values but there is no way that characters can be picked out. No use to have smaller
self.area_max = int(self.img_size / 600) #max area of the accepted bounding boxes
self.lower_canny = 110 #upper value for canny thresholding
self.upper_canny = 120 #Lower value for canny thresholding
#ASPECT variables are not used:
self.aspect_max = 1 #the max amount of area that the license plate can cover within a bounding box to be considered
self.aspect_min = 0.3 #the minimum amount of area that a license plate can cover within a bounding box to be considered
self.img_dilate = 40 #specifying the value that the pixels which are being brightened will be increased by
self.blur = (9, 9) #initializing the size component of the gaussian blur that is applied to the image
self.offset = 0 #initializing the variable which keeps track of how far from the top of the image the program begins to analyze
self.top_img = None #initializing the variable which may hold the top part of the image for later
self.element_structure = cv.getStructuringElement(shape=cv.MORPH_RECT, ksize=(5, 5)) #basic elem structure for blurring
self.letter_contour_min = 2000 #The minimum size a contour has to be for it to be considered for letter analysis
self.roi_array = [] #array for holding the ROI's
def run(self): #master run function for the program
_ = self.contour_manipulation(self.preprocess_canny_contours())
license_plate_arr = self.analyze_image()
########################################################################################
#################################### EXECUTE PROGRAM ###################################
########################################################################################
def contour_manipulation(self, contours):
ret = [] #init the ret array
for c in contours:
boolRect = self.check_min_rect(c) #checking whether the bounding rect encapsulates a valid license plate region
if boolRect: #if the region is valid, then append it to the return array
ret.append(c)
if checkIndividual: #if the check individual option is on, then go through the contours one-by-one, write them to the image, and show the image
checkIndividual = self.check_indiv_contour(c)
return ret
def show_images_exec(self, height = 300): #shows the important images that are being used for execution
# cv.imshow("Contours", imutils.resize(self.img_copy, height = height))
cv.imshow("Bounding Rects", self.img_rects)
cv.imshow("Canny", imutils.resize(self.Canny, height = height))
self.show_images()
self.check_keys()
def check_indiv_contour(self, c): #show the image and block for a key input to figure out what to do next
print("\n\nCONTOUR: {}".format(cv.contourArea(c)))
cv.imshow("Bounding Rects", self.img_rects)
if cv.waitKey(0) & 0xFF == ord('c'): #This cycles through to the next contour
return True
elif cv.waitKey(0) & 0xFF == ord('f'): #This makes it so that the rest of the contours are drawn in an instant
return False
elif cv.waitKey(0) & 0xFF == ord('q'): #quits the program
exit()
########################################################################################
############################# ANALYZING/ANNOTATING IMAGES ##############################
########################################################################################
def preprocess_canny_contours(self):
gray = cv.cvtColor(self.img, cv.COLOR_BGR2GRAY) #get grayscale image
gray = cv.GaussianBlur(gray, self.blur, 0) #Apply a blur
edged = cv.Canny(gray, self.lower_canny, self.upper_canny) #Getting the canny contours
self.Canny = edged #assign the variable
contours = cv.findContours(edged.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE) #Get the contours of the Canny image [remember that this will return more contours than we need
#Because Canny just returns lines]
contours = imutils.grab_contours(contours) #Get the contours using imutils
return contours
def analyze_image(self): # getting an array of the potential letters from each license plate ROI
str_arr = []
#SHOWING THE ROI's
for count, (regionOfInterest, x, y, w, h) in enumerate(self.roi_array): #iterate through the ROI array
data = self.process_ROI(regionOfInterest, count) #process the ROI for letter regions
if data is not None and get_chars: #check if the data is found and if the get_chars option is on
str_arr.append(self.show_bounds_and_text(data, x, y, w, h)) #if get_chars, run the plate through the NN for the characters
return str_arr #return an array of the strings that were found on the screen
def show_bounds_and_text(self, data, x, y, w, h):
cv.rectangle(self.img_rects, (x, y), (x + w, y + h), (0, 255, 0), thickness=4) #draw a bounding rectangle on the img_rects image
neuralNet = NeuralNetwork() #init a NN (neural network)
ret_str = neuralNet.get_chars_string(data) #get the character string from the plate
cv.putText(self.img_rects, ret_str, (x, y - 5), cv.FONT_HERSHEY_DUPLEX, 1.7, (0, 255, 0), thickness=2) #write those characters to the image
return ret_str #return the string
########################################################################################
################################### CHECKING CONTOURS ##################################
########################################################################################
def process_ROI(self, roi, counter): #takes a license plate ROI as an input with a counter (for showing images) and segments the character ROI's
regionOfInterest = roi[int(roi.shape[0] / 4) : roi.shape[0] - int(roi.shape[0] / 5), int(roi.shape[1] / 18) : roi.shape[1] - int(roi.shape[1] / 18)]
#^cut down on the region of interest that is passed (values determined through trial and error)
name = "ROI {}".format(counter) #format the name
regionOfInterest = cv.cvtColor(regionOfInterest, cv.COLOR_BGR2GRAY) #convert the color to grayscale
regionOfInterest[: int(regionOfInterest.shape[0] / 6), :] += self.img_dilate #Increasing the brightness of the top of the image (BREAKS WITH HIGH VALUES because of overflow)
regionOfInterest = imutils.resize(regionOfInterest, height=200, inter=cv.INTER_AREA) #resize the region of interest bigger
image = cv.GaussianBlur(regionOfInterest, (0, 0), 3) #try to sharpen the image using a blur of 0, 0
image = cv.addWeighted(image, 1.5, regionOfInterest, -0.5, 0) #continue to try to sharpen the image
_, thresh = cv.threshold(image, 0, 255, cv.THRESH_BINARY+cv.THRESH_OTSU) #threshold the image for the characters
thresh = cv.bitwise_not(thresh) #convert to bitwise not (so that the chars are now the white parts)
thresh = cv.erode(thresh, (81, 61), iterations = 15) #erode to try to break the erroneous connections that the computer picks up between the chars
contours, _ = cv.findContours(thresh, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) #find the contours
contours = self.sort_contours_left(contours) #sort the contours from left to right
image = cv.cvtColor(image, cv.COLOR_GRAY2BGR) #convert the image colors back
letters = []
for contour in contours:
if cv.contourArea(contour) > self.letter_contour_min: #check if each contour is too small or not (makes sure we are analyzing potential chars)
x, y, w, h = cv.boundingRect(contour) #bouding rect around the supposed character
letterInterest = thresh[0 : y + h, x : x + w] #making sure that the top of the letter is not cut off when getting the ROI
cv.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0)) #bounding box on the passed image
letterImage = cv.resize(letterInterest, (60, 80)) #resize the char ROI
letters.append(letterImage) #append the char ROI
if debug: #if debugging, show all relevant images
cv.imshow("GRAY {}".format(counter), imutils.resize(thresh, height=200))
cv.imshow(name, image) #showing and resizing image
cv.moveWindow(name, 0, 110 * counter - 50) #Moving the ROI windows into the right spot on the screen
if len(letters) > 4: #if atleast four letters are detected, then return the array
if collect_data:
NeuralNetwork.label_letter(letters) #if we are collecting data, then do what is necessary there to create the label
return letters
else: return None
def check_min_rect(self, contour): #function for getting the min-area rectangle and validating whether it is ok
rect = cv.minAreaRect(contour) #get the min area rect
rx, ry, rw, rh = cv.boundingRect(contour) #get the bounding rectangle coordinates
if self.validateRatio(rect, rw, rh): #check the ratios of the ROI
brect = self.img[ry : ry + rh, rx : rx + rw] #get the ROI
self.roi_array.append((brect, rx, ry, rw, rh)) #append this ROI to the ROI array
return True #if everything is right, then return true to show that it is valid
else:
return False #else, return false
def validateRatio(self, rect, rw, rh): #more checking that the contour could be a license plate
(x, y), (width, height), angle = rect #get all of the data about the minarea bounding rectangle
if width == 0 or height == 0: #to defend against illegal math operations which panic the program
return False
angle = angle % 90 #getting the angle in the most basic form
area = width * height #calc the area
if not ((angle < self.angle_max or angle > self.angle_min) and (area > self.area_min and area < self.area_max)):
return False #if something is off, then return false (check that the angle is almost 90 or almost 0 and that the area is ok)
if rw < rh: #if the width is less than the height, return false
return False
return self.rat_check(width, height) #check the ratios
def rat_check(self, width, height):
ratio = float(width) / float(height) #check whether the width to height ratio is wrong
if ratio < 1:
ratio = 1 / ratio #making it so that the ratio is always more than 1
return not (ratio < self.ratio_min or ratio > self.ratio_max) #if the area is not in range or the ratio is off, return false
########################################################################################
#################################### SHOWING IMAGES ####################################
########################################################################################
def show_images(self, height = 300): #showing the image which is necessary every iteration
cv.imshow("Original", imutils.resize(self.img, height = 200))
def check_keys(self):
if self.check_wait: #if going through the contours, check if q is pressed
key = cv.waitKey(0) & 0xFF
print("NEXT IMAGE")
if key == ord('q'): #exit button
exit(0)
else:
key = cv.waitKey(1)
if key & 0xFF == ord('q'): #exit button
exit(0)
elif key == ord('s'): #skip forward in the video
skip_forward()
elif key & 0xFF == ord('p'): # this creates a pause button for the video, in essence
print("VIDEO PAUSED @ FRAME {}".format(cap.get(cv.CAP_PROP_POS_FRAMES)))
while True:
key = cv.waitKey(25) & 0xFF
if key == ord('p'): #unpause
break
elif key == ord('q'): #quit the program button
exit(0)
elif key == ord('s'): #skip forward
skip_forward()
########################################################################################
#################################### CONTOUR SORTING ###################################
########################################################################################
def sort_contours_left(self, contours): #This sorts the contours based on how far the contours are from the middle of the screen (only looks at the x-pos)
retContourMapping = []
for i, contour in enumerate(contours): #for every contour, first get the middle part of the bouding box x-pos wise
x, _, _, _ = cv.boundingRect(contour) #then we take the abs. value of that and sort those values in increasing fashion
retContourMapping.append((contour, x, i))
retContourMapping.sort(key=lambda tup: tup[1]) # sorts in place by distance from vertical horizontal line
contours = []
for contour, _, _ in retContourMapping:
contours.append(contour)
return contours
############# NOT BEING USED #############
def sort_contours_middle(self, contours): #This sorts the contours based on how far the contours are from the middle of the screen (only looks at the x-pos)
rects = []
for c in contours:
rects.append((cv.boundingRect(c), c)) #Creating a tuple array with bouding rects and the contours
retContourMapping = []
for i in range(len(rects)): #for every contour, first get the middle part of the bouding box x-pos wise
rect, contour = rects[i] #Then we are going to subtract that value from the middle of the screen
x, _, w, _ = rect #then we take the abs. value of that and sort those values in increasing fashion
x = int(self.x / 2) - x #If necessary, this would allow us to put a cap on processing and only look at contours in the middle of the screen
x = abs(x + int(w/2))
retContourMapping.append((i, x, rects[i], contour))
retContourMapping.sort(key=lambda tup: tup[1]) # sorts in place by distance from vertical horizontal line
keys = []
for index, _, _, _ in retContourMapping:
keys.append(index)
return keys
########################################################################################
#################################### NEURAL NETWORK ####################################
########################################################################################
imageNumber = 0
training_file_keys = training_data_path + "training_data.txt"
class NeuralNetwork:
def __init__(self):
self.model = model #initializing all of the pertinent settings
self.plate_ret = ""
################ TRAINING THE MODEL ################
def label_letter(self, imagearr):
for image in imagearr:
print("FRAME COUNT: {}".format(cap.get(cv.CAP_PROP_POS_FRAMES))) #printing the frame count incase my progress is lost
global imageNumber #getting the global variable names
global training_file_keys
cv.imshow("POSSIBLE LETTER", image) #show the ROI to get the character
cv.waitKey(1) #wait for a key for one millis
imageNumber = imageNumber + 1 #increasing the image number
letter = input("Please input the letter: ").upper() #turning the input to Upper case
hexval = ":".join("{:02x}".format(ord(c)) for c in letter) #print the hexval for each character that is input
if len(letter) < 1 or hexval == "0c": #if no letter is input or the value is a space, then input the letter as underscore (unknown)
letter = '_'
else:
letter = letter[0] #make the input letter the first character which is input
file = open(training_data_path + str(imageNumber) + ".txt", "w") #write the image file as a txt file
for row in image: #save the image to the txt file
np.savetxt(file, row)
print("Letter passed: " + letter) #print the letter passed and append the passed letter to the keys file (for training)
training_file = open(training_file_keys, "a")
training_file.write("\n" + str(letter))
################ PREDICTING WITH THE MODEL ################
def get_chars(self, array): #getting the character string
ret = ""
for image in array: #for each character...
ret += self.predict_char(image) #run it through the NN to get the character
return ret
def predict_char(self, image): #predict the input character using the neural network
image_formatted = self.setup_array(image) #setup the array
pred = model.predict(image_formatted) #predict the character
return letter_dict[int(np.argmax(pred))] #return the letter using the dict which was set up earlier
def setup_array(self, image): #formatting the input image to make sure it plays nice with the Neural Network
number_array = np.zeros((1, 80, 60, 1), dtype="float32")
number_array[0] = image.reshape(80, 60, 1) #sizing and inputting the array
return number_array
################ MODEL FUNCTIONS ################
def network_summary(self): #get a summary of the model (not used)
return self.model.summary()
########################################################################################
############################### VID PROCESSING AND SETUP ###############################
########################################################################################
if __name__ == "__main__":
setup_dictionary()
#addresses for testing my code on still images:
imageAddresses = [
"kerasModelandData/licensePlate.jpeg",
"kerasModelandData/licensePlate2.jpeg",
"kerasModelandData/licensePlate3.jpeg",
"kerasModelandData/licensePlate4.jpeg",
"kerasModelandData/licensePlate5.jpeg"
]
print("\n\nWelcome\nPlease press q to quit the program\nPlease press p to pause and unpause during the video\nPlease press s to skip forward within the video\nPlease press anything else to continue through the images when analyzing individual images")
print("\nOnce you have looked at all of the still images, the video will begin\n\n")
print("Green boxes signify possible license plate regions \nwhile red ones show other ROI's which were picked up and discarded")
print("\nUnderscore characters ('_') represent characters which the Neural Network could not decipher with a high confidence")
cap = cv.VideoCapture(video_path)
print("Starting Video @ frame " + str(start_frame_number))
cap.set(cv.CAP_PROP_POS_FRAMES, start_frame_number) #setting the starting frame number to the correct number
if collect_data:
file_keys = open(training_file_keys, "r")
imageNumber = int(file_keys.readline().rstrip())
print("INDEX: " + str(imageNumber))
counter = 0
while(cap.isOpened()): #reading and analyzing the video as it runs
counter = counter + 1
counter = counter % frames_skipped
ret, img = cap.read()
if ret == True:
FindPlate(counter=counter, img = img)
else:
break
cap.release()
cv.destroyAllWindows()
| self.check_wait = check_wait #initializing whether we need to wait between drawing contours for debugging
if imgAddress is None and img is not None: #getting the image from the video
self.img = img
elif imgAddress is not None and img is None:
self.img = cv.resize(cv.imread(imgAddress), (860, 480))
else:
print("-----------------------ERROR FINDING IMAGE-----------------------")
exit(0)
if(counter == 0):
self.setup_exec() #execute the program every certain amount of frames
elif show_images_bool:
self.show_images() #Show the images if that option is on
self.check_keys() | identifier_body |
licensePlateDetectorOptimized.py | print("\n\nLOADING PROGRAM\n\n")
import cv2 as cv
print("Loaded CV")
import numpy as np
print("Loaded NP")
import tensorflow as tf
print("Loaded TF")
import imutils
print("Loaded IMUTILS")
import os
print("Loaded OS")
'''
SOME NOTES ABOUT THE PROGRAM:
1) Make sure to change the paths at the top of the file to reflect the correct paths to your files
2) The program is slow right now. I am working on improvements
3) All togglable settings are at the top of the file and in the __init__ / settings_init functions of the FindPlate class
Have fun!! :)
'''
### CHANGE THESE PATHS ###
#the paths below are the paths that work on my machine!!
video_path = "/Users/tristanbrigham/Downloads/BostonVid.mp4"
folder_path = os.getcwd() + "/MrCrutcherProjects/LicensePlateProject/Tristans_Code/"
training_data_path = "/Users/tristanbrigham/GithubProjects/AI_Training_Data/LicensePlateProject/"
### GLOBAL VARIABLES FOR THE PROGRAM ###
show_images_bool = True #if true, shows the images that are being processed
collect_data = False #if true, asks the user for data on what letter is detected. input nothing if image is not a letter or contains more than one letter
get_chars = True #if true, applies the algorithm model to the characters that are detected to get what the plate says
optimize = True #checks to see whether the user only wants the program to analyze the bottom portion of the vid/image
debug = False #if true, shows the gray ROI's and the license plate ROI's
checkIndividual = False #if true, this waits for user input before showing each contour on each image
start_frame_number = 0 #where does the user want the video to start?
frames_skipped = 30 #how many frames pass before the frame is analyzed (for instance, analyze every 20th frame if this value is 20)
letter_dict = {} #init the letter dictionary to get the letters that are put through the NN
model = tf.keras.models.load_model(folder_path + "kerasModelandData/model.h5") #getting the model and loading it
########################################################################################
#################################### GENERAL SETUP #####################################
########################################################################################
def skip_forward(): #a function to skip forward in the video
frame_count = cap.get(cv.CAP_PROP_POS_FRAMES)
cap.set(cv.CAP_PROP_POS_FRAMES, frame_count + 1000)
def setup_dictionary(): #a function to set up the dictionary mapping values to letters for the NN
alphabet = open(folder_path + "kerasModelandData/alphabet.txt")
for count, line in enumerate(alphabet.readlines()):
letter_dict[count] = line[0]
print(letter_dict)
### Class for plate detection
########################################################################################
#################################### SETUP PROGRAM! ####################################
########################################################################################
class FindPlate:
#should maybe make the parameters global variables or controlled by the command line
# Have to adjust so that the min and max are larger when analyzing the images and smaller when looking at the vids
def __init__(self, counter, check_wait = False, imgAddress = None, img = None):
self.check_wait = check_wait #initializing whether we need to wait between drawing contours for debugging
if imgAddress is None and img is not None: #getting the image from the video
self.img = img
elif imgAddress is not None and img is None:
self.img = cv.resize(cv.imread(imgAddress), (860, 480))
else:
print("-----------------------ERROR FINDING IMAGE-----------------------")
exit(0)
if(counter == 0):
self.setup_exec() #execute the program every certain amount of frames
elif show_images_bool:
self.show_images() #Show the images if that option is on
self.check_keys()
def setup_exec(self):
self.settings_init() #initializing all of the settings
if optimize: #Currently, optimize makes it so that only the bottom portion of the screen is analyzed
self.offset = int(self.img.shape[0] * (self.divideArea - 1) / self.divideArea) #How many pixels in the y direction are not analyzed from the top
self.top_img = self.img[ : self.offset ] #Taking the top potion of the image and saving it for later
self.img = self.img[self.offset : ] #reassigning the image to the portion being analyed
self.x = self.img.shape[1] #getting the width of the image
self.img_copy = self.img.copy() #getting copies for analysis/blurring
self.img_rects = self.img.copy() #the copy that will be used for bounding rectangles
self.Canny = None #Initializing variable to hold Canny image
self.run()
if optimize: #After being run, rejoin the images if optimize is on
self.img = np.append(self.top_img, self.img, axis=0)
self.img_rects = np.append(self.top_img, self.img_rects, axis=0)
if show_images_bool: #if show the images, show the images
self.show_images_exec()
def settings_init(self):
self.divideArea = 2.5 #This is the denominator for how much of the screen is analyzed (analyzes the [1/(variable)] portion of the image/vid)
#For example, if the bottom half should be analyzed, put in '2'
self.amt_digits = 6 #defining the amount of characters and digits that should be found on the license plate
self.ratio_max = 3 #This is the maximum width to height ratio that a license plate can be in the program (for the US, about 4 is good while for EU plates, about 6-7 is good)
self.ratio_min = 1.5 #This is the minimum width to height ratio
self.angle_min = 84 #After the angle of the cv.areaMinRect has a modulo 90 applied to it, the angle either needs to be close to upright (this value or above)
self.angle_max = 6 # or close to horizontal (this value or below) in degrees
self.img_size = self.img.shape[0] * self.img.shape[1]
#current size: about 240,000 pixels
self.area_min = int(self.img_size / 5000) #minimum area of the accepted bounding boxes -- it recognizes plates with smaller values but there is no way that characters can be picked out. No use to have smaller
self.area_max = int(self.img_size / 600) #max area of the accepted bounding boxes
self.lower_canny = 110 #upper value for canny thresholding
self.upper_canny = 120 #Lower value for canny thresholding
#ASPECT variables are not used:
self.aspect_max = 1 #the max amount of area that the license plate can cover within a bounding box to be considered
self.aspect_min = 0.3 #the minimum amount of area that a license plate can cover within a bounding box to be considered
self.img_dilate = 40 #specifying the value that the pixels which are being brightened will be increased by
self.blur = (9, 9) #initializing the size component of the gaussian blur that is applied to the image
self.offset = 0 #initializing the variable which keeps track of how far from the top of the image the program begins to analyze
self.top_img = None #initializing the variable which may hold the top part of the image for later
self.element_structure = cv.getStructuringElement(shape=cv.MORPH_RECT, ksize=(5, 5)) #basic elem structure for blurring
self.letter_contour_min = 2000 #The minimum size a contour has to be for it to be considered for letter analysis
self.roi_array = [] #array for holding the ROI's
def run(self): #master run function for the program
_ = self.contour_manipulation(self.preprocess_canny_contours())
license_plate_arr = self.analyze_image()
########################################################################################
#################################### EXECUTE PROGRAM ###################################
########################################################################################
def contour_manipulation(self, contours):
ret = [] #init the ret array
for c in contours:
boolRect = self.check_min_rect(c) #checking whether the bounding rect encapsulates a valid license plate region
if boolRect: #if the region is valid, then append it to the return array
ret.append(c)
if checkIndividual: #if the check individual option is on, then go through the contours one-by-one, write them to the image, and show the image
checkIndividual = self.check_indiv_contour(c)
return ret
def | (self, height = 300): #shows the important images that are being used for execution
# cv.imshow("Contours", imutils.resize(self.img_copy, height = height))
cv.imshow("Bounding Rects", self.img_rects)
cv.imshow("Canny", imutils.resize(self.Canny, height = height))
self.show_images()
self.check_keys()
def check_indiv_contour(self, c): #show the image and block for a key input to figure out what to do next
print("\n\nCONTOUR: {}".format(cv.contourArea(c)))
cv.imshow("Bounding Rects", self.img_rects)
if cv.waitKey(0) & 0xFF == ord('c'): #This cycles through to the next contour
return True
elif cv.waitKey(0) & 0xFF == ord('f'): #This makes it so that the rest of the contours are drawn in an instant
return False
elif cv.waitKey(0) & 0xFF == ord('q'): #quits the program
exit()
########################################################################################
############################# ANALYZING/ANNOTATING IMAGES ##############################
########################################################################################
def preprocess_canny_contours(self):
gray = cv.cvtColor(self.img, cv.COLOR_BGR2GRAY) #get grayscale image
gray = cv.GaussianBlur(gray, self.blur, 0) #Apply a blur
edged = cv.Canny(gray, self.lower_canny, self.upper_canny) #Getting the canny contours
self.Canny = edged #assign the variable
contours = cv.findContours(edged.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE) #Get the contours of the Canny image [remember that this will return more contours than we need
#Because Canny just returns lines]
contours = imutils.grab_contours(contours) #Get the contours using imutils
return contours
def analyze_image(self): # getting an array of the potential letters from each license plate ROI
str_arr = []
#SHOWING THE ROI's
for count, (regionOfInterest, x, y, w, h) in enumerate(self.roi_array): #iterate through the ROI array
data = self.process_ROI(regionOfInterest, count) #process the ROI for letter regions
if data is not None and get_chars: #check if the data is found and if the get_chars option is on
str_arr.append(self.show_bounds_and_text(data, x, y, w, h)) #if get_chars, run the plate through the NN for the characters
return str_arr #return an array of the strings that were found on the screen
def show_bounds_and_text(self, data, x, y, w, h):
cv.rectangle(self.img_rects, (x, y), (x + w, y + h), (0, 255, 0), thickness=4) #draw a bounding rectangle on the img_rects image
neuralNet = NeuralNetwork() #init a NN (neural network)
ret_str = neuralNet.get_chars_string(data) #get the character string from the plate
cv.putText(self.img_rects, ret_str, (x, y - 5), cv.FONT_HERSHEY_DUPLEX, 1.7, (0, 255, 0), thickness=2) #write those characters to the image
return ret_str #return the string
########################################################################################
################################### CHECKING CONTOURS ##################################
########################################################################################
def process_ROI(self, roi, counter): #takes a license plate ROI as an input with a counter (for showing images) and segments the character ROI's
regionOfInterest = roi[int(roi.shape[0] / 4) : roi.shape[0] - int(roi.shape[0] / 5), int(roi.shape[1] / 18) : roi.shape[1] - int(roi.shape[1] / 18)]
#^cut down on the region of interest that is passed (values determined through trial and error)
name = "ROI {}".format(counter) #format the name
regionOfInterest = cv.cvtColor(regionOfInterest, cv.COLOR_BGR2GRAY) #convert the color to grayscale
regionOfInterest[: int(regionOfInterest.shape[0] / 6), :] += self.img_dilate #Increasing the brightness of the top of the image (BREAKS WITH HIGH VALUES because of overflow)
regionOfInterest = imutils.resize(regionOfInterest, height=200, inter=cv.INTER_AREA) #resize the region of interest bigger
image = cv.GaussianBlur(regionOfInterest, (0, 0), 3) #try to sharpen the image using a blur of 0, 0
image = cv.addWeighted(image, 1.5, regionOfInterest, -0.5, 0) #continue to try to sharpen the image
_, thresh = cv.threshold(image, 0, 255, cv.THRESH_BINARY+cv.THRESH_OTSU) #threshold the image for the characters
thresh = cv.bitwise_not(thresh) #convert to bitwise not (so that the chars are now the white parts)
thresh = cv.erode(thresh, (81, 61), iterations = 15) #erode to try to break the erroneous connections that the computer picks up between the chars
contours, _ = cv.findContours(thresh, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) #find the contours
contours = self.sort_contours_left(contours) #sort the contours from left to right
image = cv.cvtColor(image, cv.COLOR_GRAY2BGR) #convert the image colors back
letters = []
for contour in contours:
if cv.contourArea(contour) > self.letter_contour_min: #check if each contour is too small or not (makes sure we are analyzing potential chars)
x, y, w, h = cv.boundingRect(contour) #bouding rect around the supposed character
letterInterest = thresh[0 : y + h, x : x + w] #making sure that the top of the letter is not cut off when getting the ROI
cv.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0)) #bounding box on the passed image
letterImage = cv.resize(letterInterest, (60, 80)) #resize the char ROI
letters.append(letterImage) #append the char ROI
if debug: #if debugging, show all relevant images
cv.imshow("GRAY {}".format(counter), imutils.resize(thresh, height=200))
cv.imshow(name, image) #showing and resizing image
cv.moveWindow(name, 0, 110 * counter - 50) #Moving the ROI windows into the right spot on the screen
if len(letters) > 4: #if atleast four letters are detected, then return the array
if collect_data:
NeuralNetwork.label_letter(letters) #if we are collecting data, then do what is necessary there to create the label
return letters
else: return None
def check_min_rect(self, contour): #function for getting the min-area rectangle and validating whether it is ok
rect = cv.minAreaRect(contour) #get the min area rect
rx, ry, rw, rh = cv.boundingRect(contour) #get the bounding rectangle coordinates
if self.validateRatio(rect, rw, rh): #check the ratios of the ROI
brect = self.img[ry : ry + rh, rx : rx + rw] #get the ROI
self.roi_array.append((brect, rx, ry, rw, rh)) #append this ROI to the ROI array
return True #if everything is right, then return true to show that it is valid
else:
return False #else, return false
def validateRatio(self, rect, rw, rh): #more checking that the contour could be a license plate
(x, y), (width, height), angle = rect #get all of the data about the minarea bounding rectangle
if width == 0 or height == 0: #to defend against illegal math operations which panic the program
return False
angle = angle % 90 #getting the angle in the most basic form
area = width * height #calc the area
if not ((angle < self.angle_max or angle > self.angle_min) and (area > self.area_min and area < self.area_max)):
return False #if something is off, then return false (check that the angle is almost 90 or almost 0 and that the area is ok)
if rw < rh: #if the width is less than the height, return false
return False
return self.rat_check(width, height) #check the ratios
def rat_check(self, width, height):
ratio = float(width) / float(height) #check whether the width to height ratio is wrong
if ratio < 1:
ratio = 1 / ratio #making it so that the ratio is always more than 1
return not (ratio < self.ratio_min or ratio > self.ratio_max) #if the area is not in range or the ratio is off, return false
########################################################################################
#################################### SHOWING IMAGES ####################################
########################################################################################
def show_images(self, height = 300): #showing the image which is necessary every iteration
cv.imshow("Original", imutils.resize(self.img, height = 200))
def check_keys(self):
if self.check_wait: #if going through the contours, check if q is pressed
key = cv.waitKey(0) & 0xFF
print("NEXT IMAGE")
if key == ord('q'): #exit button
exit(0)
else:
key = cv.waitKey(1)
if key & 0xFF == ord('q'): #exit button
exit(0)
elif key == ord('s'): #skip forward in the video
skip_forward()
elif key & 0xFF == ord('p'): # this creates a pause button for the video, in essence
print("VIDEO PAUSED @ FRAME {}".format(cap.get(cv.CAP_PROP_POS_FRAMES)))
while True:
key = cv.waitKey(25) & 0xFF
if key == ord('p'): #unpause
break
elif key == ord('q'): #quit the program button
exit(0)
elif key == ord('s'): #skip forward
skip_forward()
########################################################################################
#################################### CONTOUR SORTING ###################################
########################################################################################
def sort_contours_left(self, contours): #This sorts the contours based on how far the contours are from the middle of the screen (only looks at the x-pos)
retContourMapping = []
for i, contour in enumerate(contours): #for every contour, first get the middle part of the bouding box x-pos wise
x, _, _, _ = cv.boundingRect(contour) #then we take the abs. value of that and sort those values in increasing fashion
retContourMapping.append((contour, x, i))
retContourMapping.sort(key=lambda tup: tup[1]) # sorts in place by distance from vertical horizontal line
contours = []
for contour, _, _ in retContourMapping:
contours.append(contour)
return contours
############# NOT BEING USED #############
def sort_contours_middle(self, contours): #This sorts the contours based on how far the contours are from the middle of the screen (only looks at the x-pos)
rects = []
for c in contours:
rects.append((cv.boundingRect(c), c)) #Creating a tuple array with bouding rects and the contours
retContourMapping = []
for i in range(len(rects)): #for every contour, first get the middle part of the bouding box x-pos wise
rect, contour = rects[i] #Then we are going to subtract that value from the middle of the screen
x, _, w, _ = rect #then we take the abs. value of that and sort those values in increasing fashion
x = int(self.x / 2) - x #If necessary, this would allow us to put a cap on processing and only look at contours in the middle of the screen
x = abs(x + int(w/2))
retContourMapping.append((i, x, rects[i], contour))
retContourMapping.sort(key=lambda tup: tup[1]) # sorts in place by distance from vertical horizontal line
keys = []
for index, _, _, _ in retContourMapping:
keys.append(index)
return keys
########################################################################################
#################################### NEURAL NETWORK ####################################
########################################################################################
imageNumber = 0
training_file_keys = training_data_path + "training_data.txt"
class NeuralNetwork:
def __init__(self):
self.model = model #initializing all of the pertinent settings
self.plate_ret = ""
################ TRAINING THE MODEL ################
def label_letter(self, imagearr):
for image in imagearr:
print("FRAME COUNT: {}".format(cap.get(cv.CAP_PROP_POS_FRAMES))) #printing the frame count incase my progress is lost
global imageNumber #getting the global variable names
global training_file_keys
cv.imshow("POSSIBLE LETTER", image) #show the ROI to get the character
cv.waitKey(1) #wait for a key for one millis
imageNumber = imageNumber + 1 #increasing the image number
letter = input("Please input the letter: ").upper() #turning the input to Upper case
hexval = ":".join("{:02x}".format(ord(c)) for c in letter) #print the hexval for each character that is input
if len(letter) < 1 or hexval == "0c": #if no letter is input or the value is a space, then input the letter as underscore (unknown)
letter = '_'
else:
letter = letter[0] #make the input letter the first character which is input
file = open(training_data_path + str(imageNumber) + ".txt", "w") #write the image file as a txt file
for row in image: #save the image to the txt file
np.savetxt(file, row)
print("Letter passed: " + letter) #print the letter passed and append the passed letter to the keys file (for training)
training_file = open(training_file_keys, "a")
training_file.write("\n" + str(letter))
################ PREDICTING WITH THE MODEL ################
def get_chars(self, array): #getting the character string
ret = ""
for image in array: #for each character...
ret += self.predict_char(image) #run it through the NN to get the character
return ret
def predict_char(self, image): #predict the input character using the neural network
image_formatted = self.setup_array(image) #setup the array
pred = model.predict(image_formatted) #predict the character
return letter_dict[int(np.argmax(pred))] #return the letter using the dict which was set up earlier
def setup_array(self, image): #formatting the input image to make sure it plays nice with the Neural Network
number_array = np.zeros((1, 80, 60, 1), dtype="float32")
number_array[0] = image.reshape(80, 60, 1) #sizing and inputting the array
return number_array
################ MODEL FUNCTIONS ################
def network_summary(self): #get a summary of the model (not used)
return self.model.summary()
########################################################################################
############################### VID PROCESSING AND SETUP ###############################
########################################################################################
if __name__ == "__main__":
setup_dictionary()
#addresses for testing my code on still images:
imageAddresses = [
"kerasModelandData/licensePlate.jpeg",
"kerasModelandData/licensePlate2.jpeg",
"kerasModelandData/licensePlate3.jpeg",
"kerasModelandData/licensePlate4.jpeg",
"kerasModelandData/licensePlate5.jpeg"
]
print("\n\nWelcome\nPlease press q to quit the program\nPlease press p to pause and unpause during the video\nPlease press s to skip forward within the video\nPlease press anything else to continue through the images when analyzing individual images")
print("\nOnce you have looked at all of the still images, the video will begin\n\n")
print("Green boxes signify possible license plate regions \nwhile red ones show other ROI's which were picked up and discarded")
print("\nUnderscore characters ('_') represent characters which the Neural Network could not decipher with a high confidence")
cap = cv.VideoCapture(video_path)
print("Starting Video @ frame " + str(start_frame_number))
cap.set(cv.CAP_PROP_POS_FRAMES, start_frame_number) #setting the starting frame number to the correct number
if collect_data:
file_keys = open(training_file_keys, "r")
imageNumber = int(file_keys.readline().rstrip())
print("INDEX: " + str(imageNumber))
counter = 0
while(cap.isOpened()): #reading and analyzing the video as it runs
counter = counter + 1
counter = counter % frames_skipped
ret, img = cap.read()
if ret == True:
FindPlate(counter=counter, img = img)
else:
break
cap.release()
cv.destroyAllWindows()
| show_images_exec | identifier_name |
SelfOrganizingMap.py | #python libraries needed in code
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import copy
from collections import defaultdict
class SelfOrganizingMap:
def __init__(self, inputSize, variables, hiddenSize):
##############################
# author: Ali Javed
# October 14 2020
# email: [email protected]
#############################
# Description: Class initilizer. This function creates an instance of neural network saving the parameters and setting random weights
# inputsSize: number of input nodes needed i.e. 5.
# variables: In case of multivariate time series data, the number of variables. For a list of featuers, this parameter will be 1.
# hiddenSize: number of hidden layer nodes [2,3] will create a 2x3 node grid
########################################
#set random see for reproducibility
np.random.seed(0)
# initilize variables
self.hiddenSize = np.asarray(hiddenSize)
self.inputSize = inputSize
# always start learning rate at 0.9
self.learningRateInitial = 0.9
self.learningRate = 0.9
self.neighborhoodSizeInitial = int(self.hiddenSize[0] / 2)
self.neighborhoodSize = int(self.hiddenSize[0] / 2)
self.Umatrix = np.zeros((self.hiddenSize[0],self.hiddenSize[1]))
self.dim = variables
# initilize weights between 0 and 1 for a 3d weights matrix
self.weights_Kohonen = np.random.uniform(low=0, high=1,
size=(self.hiddenSize[0], self.hiddenSize[1], self.dim, self.inputSize))
# grid layer activations to allow for finding winning node
self.gridActivation = np.zeros((self.hiddenSize[0], self.hiddenSize[1]))
# Kohonen/Output Layer winning node initlize to -1 (no node is minus 1, this is just a place holder)
self.winnerNeuronIndex = -1
# def oned_to_grid_inde(index):
# function returns the index (in int) for each node in grid with the top left node being 0 and the bottom right node being i*j.
# return np.unravel_index(index, self.hiddenSize)
# def grid_to_1d_index(i,j)
# return np.ravel_multi_index((i,j),dims = hiddenSize, mode = 'raise')
def | (self, iteration, epoch):
#############################
# Description: Update the neighborhood size and learning rate
# iteration: number of current iteration
# epoch: total epochs to run the SOM for
########################################
self.neighborhoodSize = self.neighborhoodSizeInitial * (1 - (iteration / epoch))
self.learningRate = self.learningRateInitial * (1 - (iteration / epoch))
def find_neighbor_indices(self, i, j):
# function finds the neighboring rows and columns to include
# i : i-th index
# j : j-th index
# dist: how big the neighborhood should span
#########################################################
rows = []
columns = []
# python indexing starts with 0 so adjust here
i = i + 1
j = j + 1
if i > self.hiddenSize[0] or i < 1 or j > self.hiddenSize[1] or j < 1:
neighborhood = set()
return neighborhood
rows = np.arange(i - int(self.neighborhoodSize), i + int(self.neighborhoodSize) + 1)
columns = np.arange(j - int(self.neighborhoodSize), j + int(self.neighborhoodSize) + 1)
# get neighbor indexes as a combination of rows and columns
neighborhood = set()
for row in rows:
for column in columns:
#do not do wrap around neighborhood
if row > self.hiddenSize[0] or row < 1 or column > self.hiddenSize[1] or column < 1:
continue
row = row % self.hiddenSize[0]
column = column % self.hiddenSize[1]
if row == 0:
row = self.hiddenSize[0]
if column == 0:
column = self.hiddenSize[1]
# do not update actual row, because it is used in the loop
row_temp = row - 1
column_temp = column - 1
neighborhood.add((row_temp, column_temp))
return neighborhood
def findWinningNode(self, x, windowSize):
## function to find winning node
#: input observatiopn
# format input for use in this function --- dtw distance
# x = np.reshape(x[0], (1, 1, len(x[0])))
####################################
# calculate distances (in Euclidean and DTW it is the minimum). Iterate over all nodes to find distance
distances = np.zeros((self.hiddenSize[0], self.hiddenSize[1]))
distances = distances + float('inf')
for i in range(0, self.hiddenSize[0]):
for j in range(0, self.hiddenSize[1]):
# get weights associated to i-th and j-th node
weights = self.weights_Kohonen[i, j, :,:]
# make sure correct shape
weights = np.reshape(weights, (1, np.shape(weights)[0], np.shape(weights)[1]))
# format inputs for dynamic time warping
# use dynamic time warping as distance measure which has a window size parameter
d1 = np.reshape(x,(1,np.shape(x)[1],np.shape(x)[2]))
d2 = np.reshape(weights, (1, np.shape(weights)[1], np.shape(weights)[2]))
distance = self.dtw_d(d1, d2, windowSize)
if distance != distance:
print('DTW error: Nan Value')
distances[i, j] = distance
# minimum value is winner
winnerNeuronIndex = np.argmin(distances)
return winnerNeuronIndex
def propogateForward(self, x, windowSize):
############
# Description: Function forward propogates from input to grid
# x: single input
# windowSize: window size for dynamic time warping
##############################
# input to Kohonen
##############################
# make sure x is in correct shape for matrix multiplication
x = np.reshape(x, (1, np.shape(x)[0], np.shape(x)[1]))
self.winnerNeuronIndex = self.findWinningNode(x, windowSize)
def update_weights_Kohonen(self, x):
############
# Description: Function updates the Kohonen layer (SOM layer) after one forward pass (i.e., forwardPropogate)
# x: single input
# make sure x is in correct shape
##############
x = np.reshape(x, (1, np.shape(x)[0], np.shape(x)[1]))
# convert the single winning index into grid location (x, and y cordinate)
two_dIndex = np.unravel_index(self.winnerNeuronIndex, self.hiddenSize)
neighborhood = self.find_neighbor_indices(two_dIndex[0], two_dIndex[1])
# implement update formula to update all neighborhood
for neighbors in neighborhood:
i = neighbors[0]
j = neighbors[1]
# calculate the update
loss = 0
for m in range(0,np.shape(x)[1]):
update = self.learningRate * (x[0,m,:] - self.weights_Kohonen[i, j, m,:])
# update the weights
self.weights_Kohonen[i, j, m,:] = self.weights_Kohonen[i, j, m,:] + update
loss+= sum(abs(update))
return loss
def iterate(self, inputs, epochs, path = '',windowSize = 0, targets = [], labels = [],legend_dict = {}, showPlot = 'False'):
############
# Description: Function iterates to organize the Kohonen layer
# inputs: all inputs
# epochs: epochs to iterate for
# path: Path to save SOM plots
# windowSize: windowSize to be used by DTW (for project), not usefull in assignment and set to 0.
# targets: keys or ids for each observation
# labels: labels, or ground truth.
#returns
#all_data: list of dictionary objects with 4 keys, 'target','x','y', and 'labels'
# x: x coordinate on SOM
# y: y cordinate on SOM
# target: keys or name of observation if provided.
# label: label (ground truth) of observation if provided.
##############
# reinitilize weights based on inputs
# initilize weights between 0 and 1 for a 3d weights matrix
self.weights_Kohonen = np.random.uniform(low=0, high=1,
size=(self.hiddenSize[0], self.hiddenSize[1], self.dim,self.inputSize))
# formula for weights initilization
for i in range(0, np.shape(inputs)[1]):
for j in range(0, np.shape(inputs)[2]):
firstPart = (np.mean(np.asarray(inputs)[:,i, j]) + np.random.uniform(-0.1, 0.1))
secondPart = (np.mean(np.asarray(inputs)[:,i, j]) * np.random.uniform(low=0, high=1, size=(self.hiddenSize[0], self.hiddenSize[1])))
weights = firstPart * secondPart
self.weights_Kohonen[:, :, i, j] = weights
#######################
# for epochs
weight_change_magnitude = []
for epoch in range(0, epochs):
# for each input
epoch_loss = 0
for i in range(0, len(inputs)):
# propogate forward
self.propogateForward(inputs[i], windowSize)
# update Kohonen
loss = self.update_weights_Kohonen(inputs[i])
epoch_loss += loss
if epoch % 20 == 0:
print('Epoch : ' + str(epoch) + ' complete.')
print('Neighborhood Size : '+str(self.neighborhoodSize))
print('Learning Rate : '+str(self.learningRate))
print('**************************************')
if len(path)>0:
self.plotMap(inputs, epoch, showPlot, windowSize,path,targets,labels = labels,legend_dict = legend_dict )
self.update_parameters(epoch, epochs)
if self.neighborhoodSize < 1:
self.neighborhoodSize = 1
if self.learningRate < 0.2:
self.learningRate = 0.2
weight_change_magnitude.append(epoch_loss)
print('Epoch : ' + str(epoch + 1) + ' complete.')
print('Neighborhood Size : '+str(self.neighborhoodSize))
print('Learning Rate : '+str(self.learningRate))
print('**************************************')
#get Umatrix
self.Umatrix = self.createUmatrix(windowSize)
if len(path)>0:
all_data = self.plotMap(inputs, epoch, showPlot, windowSize,path,targets,labels = labels,Umatrix = self.Umatrix,legend_dict = legend_dict ,write = False) #,
self.plotChange(weight_change_magnitude,showPlot, path)
return all_data
def createUmatrix(self,windowSize):
#############################
# Description: create the umatrix
# windowSize: window size for dynamic time warping. Umatrix function currentl always uses Euclidean distance to avoid theoritical issues.
########################################
#set neighborhood size to 1 and reset it after the function
neighborHood_temp = copy.deepcopy(self.neighborhoodSize)
self.neighborhoodSize = 1
Umatrix = np.zeros((self.hiddenSize[0],self.hiddenSize[1]))
# Perform 2D convolution with input data and kernel to get sum of neighboring nodes
for i in range(0,self.hiddenSize[0]):
for j in range(0,self.hiddenSize[1]):
#find all the neighbors for node at i,j
neighbors = self.find_neighbor_indices(i, j)
#remove self
neighbors.remove((i, j))
#get weights for node at i,j
weights = self.weights_Kohonen[i,j,:]
weights = np.reshape(weights, (1,np.shape(weights)[0],np.shape(weights)[1]))
#for all neighbors
for neighbor in neighbors:
#get x,y (i,j) coordinate for neighbor
x = neighbor[0]
y = neighbor[1]
#get neighbor weights
neighborWeights = self.weights_Kohonen[x,y,:]
neighborWeights = np.reshape(neighborWeights, (1, np.shape(neighborWeights)[0],np.shape(neighborWeights)[1]))
# use dynamic time warping as distance measure which has a window size parameter
#always use Euc distance for Umatrix
distance = self.dtw_d(weights, neighborWeights,0)
Umatrix[i,j] += distance
Umatrix[i,j] = Umatrix[i,j] / len(neighbors)
#reset neighborhoodSize
self.neighborhoodSize = copy.deepcopy(neighborHood_temp)
return Umatrix
def plotChange(self, weight_change_magnitude,showPlot ,path):
plt.figure()
plt.plot(np.arange(0,len(weight_change_magnitude)),weight_change_magnitude)
plt.xlabel('Epochs',fontsize = 22)
plt.ylabel('Weight change magnitude',fontsize = 22)
plt.ylim(0,max(weight_change_magnitude))
plt.xlim(0,len(weight_change_magnitude))
plt.savefig(path+'_weight_change.png',bbox_inches = 'tight')
if showPlot == 'True':
plt.show()
else:
plt.close()
def plotMap(self, inputs, epoch, showPlot,windowSize, path = 'plot_epoch', targets = [],Umatrix = [], labels= [], legend_dict = {}, write = False):
if len(legend_dict.keys())==0:
setOfLabels= set(labels)
for l in setOfLabels:
legend_dict[l] = l
#colors to label points
colors = ['#9e0142','#d53e4f','#f46d43','#fdae61','#fee08b','#ffffbf','#e6f598','#abdda4','#66c2a5','#3288bd','#5e4fa2','#f768a1','#c7e9c0','#74c476','#238b45','#fec44f']
#colors = ['#d7191c','#abdda4','#2b83ba']
#for legend
colors_used = set()
# plot observations with labels
plt.figure(figsize = (6,6))
#plot Umatrix first so other stuff is over it
if len(Umatrix)> 0:
#normalize U matrix
#ignore the zero padding for minimum
min_v = Umatrix.min()
max_v = Umatrix.max()
Umatrix = (Umatrix - min_v) / (max_v - min_v)
#update values less than 0 to zero
Umatrix[Umatrix<0] = 0
Umatrix = Umatrix * 255
plt.imshow(Umatrix.transpose(), cmap='Greys',alpha=1)
plotted = set()
#write to CSV
all_data = []
node_to_scatterSize = defaultdict(lambda: 30)
for i in range(0, len(inputs)):
input = inputs[i]
input = np.reshape(input, (1, np.shape(input)[0],np.shape(input)[1]))
winnderNode = self.findWinningNode(input, windowSize)
# convert to x - y coordinate
coord = np.unravel_index(winnderNode, self.hiddenSize)
#save to dict for writing to csv
d = {}
d['x'] = coord[0]
d['y'] = coord[1]
if len(targets)> 0:
d['target'] = targets[i]
if len(labels) >0:
d['labels'] = labels[i]
all_data.append(d)
if coord in plotted:
#just add some noise so duck and goose can show seperately. They are exactly the same as per data
#shift = random.uniform(1, 4)
#shift = 0
x = coord[0]
y = coord[1] #+ shift
node_to_scatterSize[winnderNode] += 30
#print('scatter size increased.')
#print(node_to_scatterSize[winnderNode])
else:
plotted.add(coord)
x = coord[0]
y = coord[1]
#scatter plot at same location but annotation differently
#plt.scatter(coord[0], coord[1], s=30, color = '#2ca25f')
if len(labels)>0:
color = colors[labels[i]]
else:
color ='#2ca25f'
if color in colors_used:
plt.scatter(x, y, s=node_to_scatterSize[winnderNode], color = color)
else:
colors_used.add(color)
if len(labels)>0:
plt.scatter(x, y, s=node_to_scatterSize[winnderNode], color = color,label = legend_dict[labels[i]])
else:
plt.scatter(x, y, s=node_to_scatterSize[winnderNode], color = color)
#if len(targets)> 0:
# plt.annotate(targets[i], (x, y), fontsize=22)
plt.xlim(0 - 5, self.hiddenSize[0] + 5)
plt.ylim(0 - 5, self.hiddenSize[1] + 5)
plt.xlabel('Nodes', fontsize=22)
plt.ylabel('Nodes', fontsize=22)
plt.xticks([])
plt.yticks([])
plt.title('Kohonen Self-Organizing Map', fontsize=22)
if len(labels)>0:
plt.legend(fontsize = 18,bbox_to_anchor=(1.05, 1), loc='upper left')
plt.savefig(path+'_'+str(epoch)+'.png',bbox_inches = 'tight')
if showPlot == 'True':
plt.show()
else:
plt.close()
return all_data
def getWeights(self):
return self.weights_Kohonen
def getActivations(self):
return self.gridActivation
def getUmatrix(self):
return self.Umatrix
def sq_euc(self,s1, s2):
#author: Ali Javed
#email: [email protected]
#Version history:
#Version 1 . basis implementation of dynaimc time warping dependant.
#Version 2 (7 Nov 2019). changed variable names to be more representative and added comments.
#Inputs
#s1: signal 1, size 1 * m * n. where m is the number of variables, n is the timesteps.
#s2: signal 2, size 1 * m * n. where m is the number of variables, n is the timesteps.
#OUTPUT
#dist: Squared euclidean distance
dist = ((s1 - s2) ** 2)
return dist.flatten().sum()
def dtw_d(self,s1, s2, w):
#author: Ali Javed
#email: [email protected]
#Version 1 . basis implementation of dynaimc time warping dependant.
#Version 2 (7 Nov 2019). changed variable names to be more representative and added comments.
#INPUTS:
#s1: signal 1, size 1 * m * n. where m is the number of variables, n is the timesteps.
#s2: signal 2, size 1 * m * n. where m is the number of variables, n is the timesteps.
#w: window parameter, percent of size and is between0 and 1. 0 is
#euclidean distance while 1 is maximum window size.
#
#OUTPUTS:
#dist: resulting distance
s1 = np.asarray(s1)
s2 = np.asarray(s2)
s1_shape = np.shape(s1)
s2_shape = np.shape(s2)
if w<0 or w>1:
print("Error: W should be between 0 and 1")
return False
if s1_shape[0] >1 or s2_shape[0] >1:
print("Error: Please check input dimensions")
return False
if s1_shape[1] != s2_shape[1]:
print("Error: Please check input dimensions. Number of variables not consistent.")
return False
if s1_shape[2] != s2_shape[2]:
print("Warning: Length of time series not equal")
#if window size is zero, it is plain euclidean distance
if w ==0:
dist = np.sqrt(self.sq_euc(s1, s2))
return dist
#get absolute window size
w = int(np.ceil(w * s1_shape[2]))
#adapt window size
w=int(max(w, abs(s1_shape[2]- s2_shape[2])));
#initilize
DTW = {}
for i in range(-1, s1_shape[2]):
for j in range(-1, s2_shape[2]):
DTW[(i, j)] = float('inf')
DTW[(-1, -1)] = 0
for i in range(s1_shape[2]):
for j in range(max(0, i - w), min(s2_shape[2], i + w)):
#squared euc distance
dist = self.sq_euc(s1[0,:,i], s2[0,:,j])
#find optimal path
DTW[(i, j)] = dist + min(DTW[(i - 1, j)], DTW[(i, j - 1)], DTW[(i - 1, j - 1)])
dist = np.sqrt(DTW[s1_shape[2] - 1, s2_shape[2] - 1])
return dist
| update_parameters | identifier_name |
SelfOrganizingMap.py | #python libraries needed in code
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import copy
from collections import defaultdict
class SelfOrganizingMap:
def __init__(self, inputSize, variables, hiddenSize):
##############################
# author: Ali Javed
# October 14 2020
# email: [email protected]
#############################
# Description: Class initilizer. This function creates an instance of neural network saving the parameters and setting random weights
# inputsSize: number of input nodes needed i.e. 5.
# variables: In case of multivariate time series data, the number of variables. For a list of featuers, this parameter will be 1.
# hiddenSize: number of hidden layer nodes [2,3] will create a 2x3 node grid
########################################
#set random see for reproducibility
np.random.seed(0)
# initilize variables
self.hiddenSize = np.asarray(hiddenSize)
self.inputSize = inputSize
# always start learning rate at 0.9
self.learningRateInitial = 0.9
self.learningRate = 0.9
self.neighborhoodSizeInitial = int(self.hiddenSize[0] / 2)
self.neighborhoodSize = int(self.hiddenSize[0] / 2)
self.Umatrix = np.zeros((self.hiddenSize[0],self.hiddenSize[1]))
self.dim = variables
# initilize weights between 0 and 1 for a 3d weights matrix
self.weights_Kohonen = np.random.uniform(low=0, high=1,
size=(self.hiddenSize[0], self.hiddenSize[1], self.dim, self.inputSize))
# grid layer activations to allow for finding winning node
self.gridActivation = np.zeros((self.hiddenSize[0], self.hiddenSize[1]))
# Kohonen/Output Layer winning node initlize to -1 (no node is minus 1, this is just a place holder)
self.winnerNeuronIndex = -1
# def oned_to_grid_inde(index):
# function returns the index (in int) for each node in grid with the top left node being 0 and the bottom right node being i*j.
# return np.unravel_index(index, self.hiddenSize)
# def grid_to_1d_index(i,j)
# return np.ravel_multi_index((i,j),dims = hiddenSize, mode = 'raise')
def update_parameters(self, iteration, epoch):
#############################
# Description: Update the neighborhood size and learning rate
# iteration: number of current iteration
# epoch: total epochs to run the SOM for
########################################
self.neighborhoodSize = self.neighborhoodSizeInitial * (1 - (iteration / epoch))
self.learningRate = self.learningRateInitial * (1 - (iteration / epoch))
def find_neighbor_indices(self, i, j):
# function finds the neighboring rows and columns to include
# i : i-th index
# j : j-th index
# dist: how big the neighborhood should span
#########################################################
rows = []
columns = []
# python indexing starts with 0 so adjust here
i = i + 1
j = j + 1
if i > self.hiddenSize[0] or i < 1 or j > self.hiddenSize[1] or j < 1:
neighborhood = set()
return neighborhood
rows = np.arange(i - int(self.neighborhoodSize), i + int(self.neighborhoodSize) + 1)
columns = np.arange(j - int(self.neighborhoodSize), j + int(self.neighborhoodSize) + 1)
# get neighbor indexes as a combination of rows and columns
neighborhood = set()
for row in rows:
for column in columns:
#do not do wrap around neighborhood
if row > self.hiddenSize[0] or row < 1 or column > self.hiddenSize[1] or column < 1:
continue
row = row % self.hiddenSize[0]
column = column % self.hiddenSize[1]
if row == 0:
row = self.hiddenSize[0]
if column == 0:
column = self.hiddenSize[1]
# do not update actual row, because it is used in the loop
row_temp = row - 1
column_temp = column - 1
neighborhood.add((row_temp, column_temp))
return neighborhood
def findWinningNode(self, x, windowSize):
## function to find winning node
#: input observatiopn
# format input for use in this function --- dtw distance
# x = np.reshape(x[0], (1, 1, len(x[0])))
####################################
# calculate distances (in Euclidean and DTW it is the minimum). Iterate over all nodes to find distance
distances = np.zeros((self.hiddenSize[0], self.hiddenSize[1]))
distances = distances + float('inf')
for i in range(0, self.hiddenSize[0]):
for j in range(0, self.hiddenSize[1]):
# get weights associated to i-th and j-th node
weights = self.weights_Kohonen[i, j, :,:]
# make sure correct shape
weights = np.reshape(weights, (1, np.shape(weights)[0], np.shape(weights)[1]))
# format inputs for dynamic time warping
# use dynamic time warping as distance measure which has a window size parameter
d1 = np.reshape(x,(1,np.shape(x)[1],np.shape(x)[2]))
d2 = np.reshape(weights, (1, np.shape(weights)[1], np.shape(weights)[2]))
distance = self.dtw_d(d1, d2, windowSize)
if distance != distance:
print('DTW error: Nan Value')
distances[i, j] = distance
# minimum value is winner
winnerNeuronIndex = np.argmin(distances)
return winnerNeuronIndex
def propogateForward(self, x, windowSize):
############
# Description: Function forward propogates from input to grid
# x: single input
# windowSize: window size for dynamic time warping
##############################
# input to Kohonen
##############################
# make sure x is in correct shape for matrix multiplication
x = np.reshape(x, (1, np.shape(x)[0], np.shape(x)[1]))
self.winnerNeuronIndex = self.findWinningNode(x, windowSize)
def update_weights_Kohonen(self, x):
############
# Description: Function updates the Kohonen layer (SOM layer) after one forward pass (i.e., forwardPropogate)
# x: single input
# make sure x is in correct shape
##############
x = np.reshape(x, (1, np.shape(x)[0], np.shape(x)[1]))
# convert the single winning index into grid location (x, and y cordinate)
two_dIndex = np.unravel_index(self.winnerNeuronIndex, self.hiddenSize)
neighborhood = self.find_neighbor_indices(two_dIndex[0], two_dIndex[1])
# implement update formula to update all neighborhood
for neighbors in neighborhood:
i = neighbors[0]
j = neighbors[1]
# calculate the update
loss = 0
for m in range(0,np.shape(x)[1]):
update = self.learningRate * (x[0,m,:] - self.weights_Kohonen[i, j, m,:])
# update the weights
self.weights_Kohonen[i, j, m,:] = self.weights_Kohonen[i, j, m,:] + update
loss+= sum(abs(update))
return loss
def iterate(self, inputs, epochs, path = '',windowSize = 0, targets = [], labels = [],legend_dict = {}, showPlot = 'False'):
############
# Description: Function iterates to organize the Kohonen layer
# inputs: all inputs
# epochs: epochs to iterate for
# path: Path to save SOM plots
# windowSize: windowSize to be used by DTW (for project), not usefull in assignment and set to 0.
# targets: keys or ids for each observation
# labels: labels, or ground truth.
#returns
#all_data: list of dictionary objects with 4 keys, 'target','x','y', and 'labels'
# x: x coordinate on SOM
# y: y cordinate on SOM
# target: keys or name of observation if provided.
# label: label (ground truth) of observation if provided.
##############
# reinitilize weights based on inputs
# initilize weights between 0 and 1 for a 3d weights matrix
self.weights_Kohonen = np.random.uniform(low=0, high=1,
size=(self.hiddenSize[0], self.hiddenSize[1], self.dim,self.inputSize))
# formula for weights initilization
for i in range(0, np.shape(inputs)[1]):
for j in range(0, np.shape(inputs)[2]):
firstPart = (np.mean(np.asarray(inputs)[:,i, j]) + np.random.uniform(-0.1, 0.1))
secondPart = (np.mean(np.asarray(inputs)[:,i, j]) * np.random.uniform(low=0, high=1, size=(self.hiddenSize[0], self.hiddenSize[1])))
weights = firstPart * secondPart
self.weights_Kohonen[:, :, i, j] = weights
#######################
# for epochs
weight_change_magnitude = []
for epoch in range(0, epochs):
# for each input
epoch_loss = 0
for i in range(0, len(inputs)):
# propogate forward
self.propogateForward(inputs[i], windowSize)
# update Kohonen
loss = self.update_weights_Kohonen(inputs[i])
epoch_loss += loss
if epoch % 20 == 0:
print('Epoch : ' + str(epoch) + ' complete.')
print('Neighborhood Size : '+str(self.neighborhoodSize))
print('Learning Rate : '+str(self.learningRate))
print('**************************************')
if len(path)>0:
self.plotMap(inputs, epoch, showPlot, windowSize,path,targets,labels = labels,legend_dict = legend_dict )
self.update_parameters(epoch, epochs)
if self.neighborhoodSize < 1:
self.neighborhoodSize = 1
if self.learningRate < 0.2:
self.learningRate = 0.2
weight_change_magnitude.append(epoch_loss)
print('Epoch : ' + str(epoch + 1) + ' complete.')
print('Neighborhood Size : '+str(self.neighborhoodSize))
print('Learning Rate : '+str(self.learningRate))
print('**************************************')
#get Umatrix
self.Umatrix = self.createUmatrix(windowSize)
if len(path)>0:
all_data = self.plotMap(inputs, epoch, showPlot, windowSize,path,targets,labels = labels,Umatrix = self.Umatrix,legend_dict = legend_dict ,write = False) #,
self.plotChange(weight_change_magnitude,showPlot, path)
return all_data
def createUmatrix(self,windowSize):
#############################
# Description: create the umatrix
# windowSize: window size for dynamic time warping. Umatrix function currentl always uses Euclidean distance to avoid theoritical issues.
########################################
#set neighborhood size to 1 and reset it after the function
neighborHood_temp = copy.deepcopy(self.neighborhoodSize)
self.neighborhoodSize = 1
Umatrix = np.zeros((self.hiddenSize[0],self.hiddenSize[1]))
# Perform 2D convolution with input data and kernel to get sum of neighboring nodes
for i in range(0,self.hiddenSize[0]):
for j in range(0,self.hiddenSize[1]):
#find all the neighbors for node at i,j
neighbors = self.find_neighbor_indices(i, j)
#remove self
neighbors.remove((i, j))
#get weights for node at i,j
weights = self.weights_Kohonen[i,j,:]
weights = np.reshape(weights, (1,np.shape(weights)[0],np.shape(weights)[1]))
#for all neighbors
for neighbor in neighbors:
#get x,y (i,j) coordinate for neighbor
x = neighbor[0]
y = neighbor[1]
#get neighbor weights
neighborWeights = self.weights_Kohonen[x,y,:]
neighborWeights = np.reshape(neighborWeights, (1, np.shape(neighborWeights)[0],np.shape(neighborWeights)[1]))
# use dynamic time warping as distance measure which has a window size parameter
#always use Euc distance for Umatrix
distance = self.dtw_d(weights, neighborWeights,0)
Umatrix[i,j] += distance
Umatrix[i,j] = Umatrix[i,j] / len(neighbors)
#reset neighborhoodSize
self.neighborhoodSize = copy.deepcopy(neighborHood_temp)
return Umatrix
def plotChange(self, weight_change_magnitude,showPlot ,path):
plt.figure()
plt.plot(np.arange(0,len(weight_change_magnitude)),weight_change_magnitude)
plt.xlabel('Epochs',fontsize = 22)
plt.ylabel('Weight change magnitude',fontsize = 22)
plt.ylim(0,max(weight_change_magnitude))
plt.xlim(0,len(weight_change_magnitude))
plt.savefig(path+'_weight_change.png',bbox_inches = 'tight')
if showPlot == 'True':
plt.show()
else:
plt.close()
def plotMap(self, inputs, epoch, showPlot,windowSize, path = 'plot_epoch', targets = [],Umatrix = [], labels= [], legend_dict = {}, write = False):
if len(legend_dict.keys())==0:
setOfLabels= set(labels)
for l in setOfLabels:
legend_dict[l] = l
#colors to label points
colors = ['#9e0142','#d53e4f','#f46d43','#fdae61','#fee08b','#ffffbf','#e6f598','#abdda4','#66c2a5','#3288bd','#5e4fa2','#f768a1','#c7e9c0','#74c476','#238b45','#fec44f']
#colors = ['#d7191c','#abdda4','#2b83ba']
#for legend
colors_used = set()
# plot observations with labels
plt.figure(figsize = (6,6))
#plot Umatrix first so other stuff is over it
if len(Umatrix)> 0:
#normalize U matrix
#ignore the zero padding for minimum
min_v = Umatrix.min()
max_v = Umatrix.max()
Umatrix = (Umatrix - min_v) / (max_v - min_v)
#update values less than 0 to zero
Umatrix[Umatrix<0] = 0
Umatrix = Umatrix * 255
plt.imshow(Umatrix.transpose(), cmap='Greys',alpha=1)
plotted = set()
#write to CSV
all_data = []
node_to_scatterSize = defaultdict(lambda: 30)
for i in range(0, len(inputs)):
input = inputs[i]
input = np.reshape(input, (1, np.shape(input)[0],np.shape(input)[1]))
winnderNode = self.findWinningNode(input, windowSize)
# convert to x - y coordinate
coord = np.unravel_index(winnderNode, self.hiddenSize)
#save to dict for writing to csv
d = {}
d['x'] = coord[0]
d['y'] = coord[1]
if len(targets)> 0:
d['target'] = targets[i]
if len(labels) >0:
d['labels'] = labels[i]
all_data.append(d)
if coord in plotted:
#just add some noise so duck and goose can show seperately. They are exactly the same as per data
#shift = random.uniform(1, 4)
#shift = 0
x = coord[0]
y = coord[1] #+ shift
node_to_scatterSize[winnderNode] += 30
#print('scatter size increased.')
#print(node_to_scatterSize[winnderNode])
else:
plotted.add(coord)
x = coord[0]
y = coord[1]
#scatter plot at same location but annotation differently
#plt.scatter(coord[0], coord[1], s=30, color = '#2ca25f')
if len(labels)>0:
color = colors[labels[i]]
else:
color ='#2ca25f'
if color in colors_used:
plt.scatter(x, y, s=node_to_scatterSize[winnderNode], color = color)
else:
colors_used.add(color)
if len(labels)>0:
plt.scatter(x, y, s=node_to_scatterSize[winnderNode], color = color,label = legend_dict[labels[i]])
else:
plt.scatter(x, y, s=node_to_scatterSize[winnderNode], color = color)
#if len(targets)> 0:
# plt.annotate(targets[i], (x, y), fontsize=22)
plt.xlim(0 - 5, self.hiddenSize[0] + 5)
plt.ylim(0 - 5, self.hiddenSize[1] + 5)
plt.xlabel('Nodes', fontsize=22)
plt.ylabel('Nodes', fontsize=22)
plt.xticks([])
plt.yticks([])
plt.title('Kohonen Self-Organizing Map', fontsize=22)
if len(labels)>0:
plt.legend(fontsize = 18,bbox_to_anchor=(1.05, 1), loc='upper left')
plt.savefig(path+'_'+str(epoch)+'.png',bbox_inches = 'tight')
if showPlot == 'True':
plt.show()
else:
plt.close()
return all_data
def getWeights(self):
return self.weights_Kohonen
def getActivations(self):
return self.gridActivation
def getUmatrix(self):
return self.Umatrix
def sq_euc(self,s1, s2):
#author: Ali Javed
#email: [email protected]
#Version history:
#Version 1 . basis implementation of dynaimc time warping dependant.
#Version 2 (7 Nov 2019). changed variable names to be more representative and added comments.
#Inputs
#s1: signal 1, size 1 * m * n. where m is the number of variables, n is the timesteps.
#s2: signal 2, size 1 * m * n. where m is the number of variables, n is the timesteps.
#OUTPUT
#dist: Squared euclidean distance
|
def dtw_d(self,s1, s2, w):
#author: Ali Javed
#email: [email protected]
#Version 1 . basis implementation of dynaimc time warping dependant.
#Version 2 (7 Nov 2019). changed variable names to be more representative and added comments.
#INPUTS:
#s1: signal 1, size 1 * m * n. where m is the number of variables, n is the timesteps.
#s2: signal 2, size 1 * m * n. where m is the number of variables, n is the timesteps.
#w: window parameter, percent of size and is between0 and 1. 0 is
#euclidean distance while 1 is maximum window size.
#
#OUTPUTS:
#dist: resulting distance
s1 = np.asarray(s1)
s2 = np.asarray(s2)
s1_shape = np.shape(s1)
s2_shape = np.shape(s2)
if w<0 or w>1:
print("Error: W should be between 0 and 1")
return False
if s1_shape[0] >1 or s2_shape[0] >1:
print("Error: Please check input dimensions")
return False
if s1_shape[1] != s2_shape[1]:
print("Error: Please check input dimensions. Number of variables not consistent.")
return False
if s1_shape[2] != s2_shape[2]:
print("Warning: Length of time series not equal")
#if window size is zero, it is plain euclidean distance
if w ==0:
dist = np.sqrt(self.sq_euc(s1, s2))
return dist
#get absolute window size
w = int(np.ceil(w * s1_shape[2]))
#adapt window size
w=int(max(w, abs(s1_shape[2]- s2_shape[2])));
#initilize
DTW = {}
for i in range(-1, s1_shape[2]):
for j in range(-1, s2_shape[2]):
DTW[(i, j)] = float('inf')
DTW[(-1, -1)] = 0
for i in range(s1_shape[2]):
for j in range(max(0, i - w), min(s2_shape[2], i + w)):
#squared euc distance
dist = self.sq_euc(s1[0,:,i], s2[0,:,j])
#find optimal path
DTW[(i, j)] = dist + min(DTW[(i - 1, j)], DTW[(i, j - 1)], DTW[(i - 1, j - 1)])
dist = np.sqrt(DTW[s1_shape[2] - 1, s2_shape[2] - 1])
return dist
| dist = ((s1 - s2) ** 2)
return dist.flatten().sum() | identifier_body |
SelfOrganizingMap.py | #python libraries needed in code
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import copy
from collections import defaultdict
class SelfOrganizingMap:
def __init__(self, inputSize, variables, hiddenSize):
##############################
# author: Ali Javed
# October 14 2020
# email: [email protected]
#############################
# Description: Class initilizer. This function creates an instance of neural network saving the parameters and setting random weights
# inputsSize: number of input nodes needed i.e. 5.
# variables: In case of multivariate time series data, the number of variables. For a list of featuers, this parameter will be 1.
# hiddenSize: number of hidden layer nodes [2,3] will create a 2x3 node grid
########################################
#set random see for reproducibility
np.random.seed(0)
# initilize variables
self.hiddenSize = np.asarray(hiddenSize)
self.inputSize = inputSize
# always start learning rate at 0.9
self.learningRateInitial = 0.9
self.learningRate = 0.9
self.neighborhoodSizeInitial = int(self.hiddenSize[0] / 2)
self.neighborhoodSize = int(self.hiddenSize[0] / 2)
self.Umatrix = np.zeros((self.hiddenSize[0],self.hiddenSize[1]))
self.dim = variables
# initilize weights between 0 and 1 for a 3d weights matrix
self.weights_Kohonen = np.random.uniform(low=0, high=1,
size=(self.hiddenSize[0], self.hiddenSize[1], self.dim, self.inputSize))
# grid layer activations to allow for finding winning node
self.gridActivation = np.zeros((self.hiddenSize[0], self.hiddenSize[1]))
# Kohonen/Output Layer winning node initlize to -1 (no node is minus 1, this is just a place holder)
self.winnerNeuronIndex = -1
# def oned_to_grid_inde(index):
# function returns the index (in int) for each node in grid with the top left node being 0 and the bottom right node being i*j.
# return np.unravel_index(index, self.hiddenSize)
# def grid_to_1d_index(i,j)
# return np.ravel_multi_index((i,j),dims = hiddenSize, mode = 'raise')
def update_parameters(self, iteration, epoch):
#############################
# Description: Update the neighborhood size and learning rate
# iteration: number of current iteration
# epoch: total epochs to run the SOM for
########################################
self.neighborhoodSize = self.neighborhoodSizeInitial * (1 - (iteration / epoch))
self.learningRate = self.learningRateInitial * (1 - (iteration / epoch))
def find_neighbor_indices(self, i, j):
# function finds the neighboring rows and columns to include
# i : i-th index
# j : j-th index
# dist: how big the neighborhood should span
#########################################################
rows = []
columns = []
# python indexing starts with 0 so adjust here
i = i + 1
j = j + 1
if i > self.hiddenSize[0] or i < 1 or j > self.hiddenSize[1] or j < 1:
neighborhood = set()
return neighborhood
rows = np.arange(i - int(self.neighborhoodSize), i + int(self.neighborhoodSize) + 1)
columns = np.arange(j - int(self.neighborhoodSize), j + int(self.neighborhoodSize) + 1)
# get neighbor indexes as a combination of rows and columns
neighborhood = set()
for row in rows:
for column in columns:
#do not do wrap around neighborhood
if row > self.hiddenSize[0] or row < 1 or column > self.hiddenSize[1] or column < 1:
continue
row = row % self.hiddenSize[0]
column = column % self.hiddenSize[1]
if row == 0:
row = self.hiddenSize[0]
if column == 0:
column = self.hiddenSize[1]
# do not update actual row, because it is used in the loop
row_temp = row - 1
column_temp = column - 1
neighborhood.add((row_temp, column_temp))
return neighborhood
def findWinningNode(self, x, windowSize):
## function to find winning node
#: input observatiopn
# format input for use in this function --- dtw distance
# x = np.reshape(x[0], (1, 1, len(x[0])))
####################################
# calculate distances (in Euclidean and DTW it is the minimum). Iterate over all nodes to find distance
distances = np.zeros((self.hiddenSize[0], self.hiddenSize[1]))
distances = distances + float('inf')
for i in range(0, self.hiddenSize[0]):
|
# minimum value is winner
winnerNeuronIndex = np.argmin(distances)
return winnerNeuronIndex
def propogateForward(self, x, windowSize):
############
# Description: Function forward propogates from input to grid
# x: single input
# windowSize: window size for dynamic time warping
##############################
# input to Kohonen
##############################
# make sure x is in correct shape for matrix multiplication
x = np.reshape(x, (1, np.shape(x)[0], np.shape(x)[1]))
self.winnerNeuronIndex = self.findWinningNode(x, windowSize)
def update_weights_Kohonen(self, x):
############
# Description: Function updates the Kohonen layer (SOM layer) after one forward pass (i.e., forwardPropogate)
# x: single input
# make sure x is in correct shape
##############
x = np.reshape(x, (1, np.shape(x)[0], np.shape(x)[1]))
# convert the single winning index into grid location (x, and y cordinate)
two_dIndex = np.unravel_index(self.winnerNeuronIndex, self.hiddenSize)
neighborhood = self.find_neighbor_indices(two_dIndex[0], two_dIndex[1])
# implement update formula to update all neighborhood
for neighbors in neighborhood:
i = neighbors[0]
j = neighbors[1]
# calculate the update
loss = 0
for m in range(0,np.shape(x)[1]):
update = self.learningRate * (x[0,m,:] - self.weights_Kohonen[i, j, m,:])
# update the weights
self.weights_Kohonen[i, j, m,:] = self.weights_Kohonen[i, j, m,:] + update
loss+= sum(abs(update))
return loss
def iterate(self, inputs, epochs, path = '',windowSize = 0, targets = [], labels = [],legend_dict = {}, showPlot = 'False'):
############
# Description: Function iterates to organize the Kohonen layer
# inputs: all inputs
# epochs: epochs to iterate for
# path: Path to save SOM plots
# windowSize: windowSize to be used by DTW (for project), not usefull in assignment and set to 0.
# targets: keys or ids for each observation
# labels: labels, or ground truth.
#returns
#all_data: list of dictionary objects with 4 keys, 'target','x','y', and 'labels'
# x: x coordinate on SOM
# y: y cordinate on SOM
# target: keys or name of observation if provided.
# label: label (ground truth) of observation if provided.
##############
# reinitilize weights based on inputs
# initilize weights between 0 and 1 for a 3d weights matrix
self.weights_Kohonen = np.random.uniform(low=0, high=1,
size=(self.hiddenSize[0], self.hiddenSize[1], self.dim,self.inputSize))
# formula for weights initilization
for i in range(0, np.shape(inputs)[1]):
for j in range(0, np.shape(inputs)[2]):
firstPart = (np.mean(np.asarray(inputs)[:,i, j]) + np.random.uniform(-0.1, 0.1))
secondPart = (np.mean(np.asarray(inputs)[:,i, j]) * np.random.uniform(low=0, high=1, size=(self.hiddenSize[0], self.hiddenSize[1])))
weights = firstPart * secondPart
self.weights_Kohonen[:, :, i, j] = weights
#######################
# for epochs
weight_change_magnitude = []
for epoch in range(0, epochs):
# for each input
epoch_loss = 0
for i in range(0, len(inputs)):
# propogate forward
self.propogateForward(inputs[i], windowSize)
# update Kohonen
loss = self.update_weights_Kohonen(inputs[i])
epoch_loss += loss
if epoch % 20 == 0:
print('Epoch : ' + str(epoch) + ' complete.')
print('Neighborhood Size : '+str(self.neighborhoodSize))
print('Learning Rate : '+str(self.learningRate))
print('**************************************')
if len(path)>0:
self.plotMap(inputs, epoch, showPlot, windowSize,path,targets,labels = labels,legend_dict = legend_dict )
self.update_parameters(epoch, epochs)
if self.neighborhoodSize < 1:
self.neighborhoodSize = 1
if self.learningRate < 0.2:
self.learningRate = 0.2
weight_change_magnitude.append(epoch_loss)
print('Epoch : ' + str(epoch + 1) + ' complete.')
print('Neighborhood Size : '+str(self.neighborhoodSize))
print('Learning Rate : '+str(self.learningRate))
print('**************************************')
#get Umatrix
self.Umatrix = self.createUmatrix(windowSize)
if len(path)>0:
all_data = self.plotMap(inputs, epoch, showPlot, windowSize,path,targets,labels = labels,Umatrix = self.Umatrix,legend_dict = legend_dict ,write = False) #,
self.plotChange(weight_change_magnitude,showPlot, path)
return all_data
def createUmatrix(self,windowSize):
#############################
# Description: create the umatrix
# windowSize: window size for dynamic time warping. Umatrix function currentl always uses Euclidean distance to avoid theoritical issues.
########################################
#set neighborhood size to 1 and reset it after the function
neighborHood_temp = copy.deepcopy(self.neighborhoodSize)
self.neighborhoodSize = 1
Umatrix = np.zeros((self.hiddenSize[0],self.hiddenSize[1]))
# Perform 2D convolution with input data and kernel to get sum of neighboring nodes
for i in range(0,self.hiddenSize[0]):
for j in range(0,self.hiddenSize[1]):
#find all the neighbors for node at i,j
neighbors = self.find_neighbor_indices(i, j)
#remove self
neighbors.remove((i, j))
#get weights for node at i,j
weights = self.weights_Kohonen[i,j,:]
weights = np.reshape(weights, (1,np.shape(weights)[0],np.shape(weights)[1]))
#for all neighbors
for neighbor in neighbors:
#get x,y (i,j) coordinate for neighbor
x = neighbor[0]
y = neighbor[1]
#get neighbor weights
neighborWeights = self.weights_Kohonen[x,y,:]
neighborWeights = np.reshape(neighborWeights, (1, np.shape(neighborWeights)[0],np.shape(neighborWeights)[1]))
# use dynamic time warping as distance measure which has a window size parameter
#always use Euc distance for Umatrix
distance = self.dtw_d(weights, neighborWeights,0)
Umatrix[i,j] += distance
Umatrix[i,j] = Umatrix[i,j] / len(neighbors)
#reset neighborhoodSize
self.neighborhoodSize = copy.deepcopy(neighborHood_temp)
return Umatrix
def plotChange(self, weight_change_magnitude,showPlot ,path):
plt.figure()
plt.plot(np.arange(0,len(weight_change_magnitude)),weight_change_magnitude)
plt.xlabel('Epochs',fontsize = 22)
plt.ylabel('Weight change magnitude',fontsize = 22)
plt.ylim(0,max(weight_change_magnitude))
plt.xlim(0,len(weight_change_magnitude))
plt.savefig(path+'_weight_change.png',bbox_inches = 'tight')
if showPlot == 'True':
plt.show()
else:
plt.close()
def plotMap(self, inputs, epoch, showPlot,windowSize, path = 'plot_epoch', targets = [],Umatrix = [], labels= [], legend_dict = {}, write = False):
if len(legend_dict.keys())==0:
setOfLabels= set(labels)
for l in setOfLabels:
legend_dict[l] = l
#colors to label points
colors = ['#9e0142','#d53e4f','#f46d43','#fdae61','#fee08b','#ffffbf','#e6f598','#abdda4','#66c2a5','#3288bd','#5e4fa2','#f768a1','#c7e9c0','#74c476','#238b45','#fec44f']
#colors = ['#d7191c','#abdda4','#2b83ba']
#for legend
colors_used = set()
# plot observations with labels
plt.figure(figsize = (6,6))
#plot Umatrix first so other stuff is over it
if len(Umatrix)> 0:
#normalize U matrix
#ignore the zero padding for minimum
min_v = Umatrix.min()
max_v = Umatrix.max()
Umatrix = (Umatrix - min_v) / (max_v - min_v)
#update values less than 0 to zero
Umatrix[Umatrix<0] = 0
Umatrix = Umatrix * 255
plt.imshow(Umatrix.transpose(), cmap='Greys',alpha=1)
plotted = set()
#write to CSV
all_data = []
node_to_scatterSize = defaultdict(lambda: 30)
for i in range(0, len(inputs)):
input = inputs[i]
input = np.reshape(input, (1, np.shape(input)[0],np.shape(input)[1]))
winnderNode = self.findWinningNode(input, windowSize)
# convert to x - y coordinate
coord = np.unravel_index(winnderNode, self.hiddenSize)
#save to dict for writing to csv
d = {}
d['x'] = coord[0]
d['y'] = coord[1]
if len(targets)> 0:
d['target'] = targets[i]
if len(labels) >0:
d['labels'] = labels[i]
all_data.append(d)
if coord in plotted:
#just add some noise so duck and goose can show seperately. They are exactly the same as per data
#shift = random.uniform(1, 4)
#shift = 0
x = coord[0]
y = coord[1] #+ shift
node_to_scatterSize[winnderNode] += 30
#print('scatter size increased.')
#print(node_to_scatterSize[winnderNode])
else:
plotted.add(coord)
x = coord[0]
y = coord[1]
#scatter plot at same location but annotation differently
#plt.scatter(coord[0], coord[1], s=30, color = '#2ca25f')
if len(labels)>0:
color = colors[labels[i]]
else:
color ='#2ca25f'
if color in colors_used:
plt.scatter(x, y, s=node_to_scatterSize[winnderNode], color = color)
else:
colors_used.add(color)
if len(labels)>0:
plt.scatter(x, y, s=node_to_scatterSize[winnderNode], color = color,label = legend_dict[labels[i]])
else:
plt.scatter(x, y, s=node_to_scatterSize[winnderNode], color = color)
#if len(targets)> 0:
# plt.annotate(targets[i], (x, y), fontsize=22)
plt.xlim(0 - 5, self.hiddenSize[0] + 5)
plt.ylim(0 - 5, self.hiddenSize[1] + 5)
plt.xlabel('Nodes', fontsize=22)
plt.ylabel('Nodes', fontsize=22)
plt.xticks([])
plt.yticks([])
plt.title('Kohonen Self-Organizing Map', fontsize=22)
if len(labels)>0:
plt.legend(fontsize = 18,bbox_to_anchor=(1.05, 1), loc='upper left')
plt.savefig(path+'_'+str(epoch)+'.png',bbox_inches = 'tight')
if showPlot == 'True':
plt.show()
else:
plt.close()
return all_data
def getWeights(self):
return self.weights_Kohonen
def getActivations(self):
return self.gridActivation
def getUmatrix(self):
return self.Umatrix
def sq_euc(self,s1, s2):
#author: Ali Javed
#email: [email protected]
#Version history:
#Version 1 . basis implementation of dynaimc time warping dependant.
#Version 2 (7 Nov 2019). changed variable names to be more representative and added comments.
#Inputs
#s1: signal 1, size 1 * m * n. where m is the number of variables, n is the timesteps.
#s2: signal 2, size 1 * m * n. where m is the number of variables, n is the timesteps.
#OUTPUT
#dist: Squared euclidean distance
dist = ((s1 - s2) ** 2)
return dist.flatten().sum()
def dtw_d(self,s1, s2, w):
#author: Ali Javed
#email: [email protected]
#Version 1 . basis implementation of dynaimc time warping dependant.
#Version 2 (7 Nov 2019). changed variable names to be more representative and added comments.
#INPUTS:
#s1: signal 1, size 1 * m * n. where m is the number of variables, n is the timesteps.
#s2: signal 2, size 1 * m * n. where m is the number of variables, n is the timesteps.
#w: window parameter, percent of size and is between0 and 1. 0 is
#euclidean distance while 1 is maximum window size.
#
#OUTPUTS:
#dist: resulting distance
s1 = np.asarray(s1)
s2 = np.asarray(s2)
s1_shape = np.shape(s1)
s2_shape = np.shape(s2)
if w<0 or w>1:
print("Error: W should be between 0 and 1")
return False
if s1_shape[0] >1 or s2_shape[0] >1:
print("Error: Please check input dimensions")
return False
if s1_shape[1] != s2_shape[1]:
print("Error: Please check input dimensions. Number of variables not consistent.")
return False
if s1_shape[2] != s2_shape[2]:
print("Warning: Length of time series not equal")
#if window size is zero, it is plain euclidean distance
if w ==0:
dist = np.sqrt(self.sq_euc(s1, s2))
return dist
#get absolute window size
w = int(np.ceil(w * s1_shape[2]))
#adapt window size
w=int(max(w, abs(s1_shape[2]- s2_shape[2])));
#initilize
DTW = {}
for i in range(-1, s1_shape[2]):
for j in range(-1, s2_shape[2]):
DTW[(i, j)] = float('inf')
DTW[(-1, -1)] = 0
for i in range(s1_shape[2]):
for j in range(max(0, i - w), min(s2_shape[2], i + w)):
#squared euc distance
dist = self.sq_euc(s1[0,:,i], s2[0,:,j])
#find optimal path
DTW[(i, j)] = dist + min(DTW[(i - 1, j)], DTW[(i, j - 1)], DTW[(i - 1, j - 1)])
dist = np.sqrt(DTW[s1_shape[2] - 1, s2_shape[2] - 1])
return dist
| for j in range(0, self.hiddenSize[1]):
# get weights associated to i-th and j-th node
weights = self.weights_Kohonen[i, j, :,:]
# make sure correct shape
weights = np.reshape(weights, (1, np.shape(weights)[0], np.shape(weights)[1]))
# format inputs for dynamic time warping
# use dynamic time warping as distance measure which has a window size parameter
d1 = np.reshape(x,(1,np.shape(x)[1],np.shape(x)[2]))
d2 = np.reshape(weights, (1, np.shape(weights)[1], np.shape(weights)[2]))
distance = self.dtw_d(d1, d2, windowSize)
if distance != distance:
print('DTW error: Nan Value')
distances[i, j] = distance | conditional_block |
SelfOrganizingMap.py | #python libraries needed in code
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import copy
from collections import defaultdict
class SelfOrganizingMap:
def __init__(self, inputSize, variables, hiddenSize):
##############################
# author: Ali Javed
# October 14 2020
# email: [email protected]
#############################
# Description: Class initilizer. This function creates an instance of neural network saving the parameters and setting random weights
# inputsSize: number of input nodes needed i.e. 5.
# variables: In case of multivariate time series data, the number of variables. For a list of featuers, this parameter will be 1.
# hiddenSize: number of hidden layer nodes [2,3] will create a 2x3 node grid
########################################
#set random see for reproducibility
np.random.seed(0)
# initilize variables
self.hiddenSize = np.asarray(hiddenSize)
self.inputSize = inputSize
# always start learning rate at 0.9
self.learningRateInitial = 0.9
self.learningRate = 0.9
self.neighborhoodSizeInitial = int(self.hiddenSize[0] / 2)
self.neighborhoodSize = int(self.hiddenSize[0] / 2)
self.Umatrix = np.zeros((self.hiddenSize[0],self.hiddenSize[1]))
self.dim = variables
# initilize weights between 0 and 1 for a 3d weights matrix
self.weights_Kohonen = np.random.uniform(low=0, high=1,
size=(self.hiddenSize[0], self.hiddenSize[1], self.dim, self.inputSize))
# grid layer activations to allow for finding winning node
self.gridActivation = np.zeros((self.hiddenSize[0], self.hiddenSize[1]))
# Kohonen/Output Layer winning node initlize to -1 (no node is minus 1, this is just a place holder)
self.winnerNeuronIndex = -1
# def oned_to_grid_inde(index):
# function returns the index (in int) for each node in grid with the top left node being 0 and the bottom right node being i*j.
# return np.unravel_index(index, self.hiddenSize)
# def grid_to_1d_index(i,j)
# return np.ravel_multi_index((i,j),dims = hiddenSize, mode = 'raise')
def update_parameters(self, iteration, epoch):
#############################
# Description: Update the neighborhood size and learning rate
# iteration: number of current iteration
# epoch: total epochs to run the SOM for
########################################
self.neighborhoodSize = self.neighborhoodSizeInitial * (1 - (iteration / epoch))
self.learningRate = self.learningRateInitial * (1 - (iteration / epoch))
def find_neighbor_indices(self, i, j):
# function finds the neighboring rows and columns to include
# i : i-th index
# j : j-th index
# dist: how big the neighborhood should span
#########################################################
rows = []
columns = []
# python indexing starts with 0 so adjust here
i = i + 1
j = j + 1
if i > self.hiddenSize[0] or i < 1 or j > self.hiddenSize[1] or j < 1:
neighborhood = set()
return neighborhood
rows = np.arange(i - int(self.neighborhoodSize), i + int(self.neighborhoodSize) + 1)
columns = np.arange(j - int(self.neighborhoodSize), j + int(self.neighborhoodSize) + 1)
# get neighbor indexes as a combination of rows and columns
neighborhood = set()
for row in rows:
for column in columns:
#do not do wrap around neighborhood
if row > self.hiddenSize[0] or row < 1 or column > self.hiddenSize[1] or column < 1:
continue
row = row % self.hiddenSize[0]
column = column % self.hiddenSize[1]
if row == 0:
row = self.hiddenSize[0]
if column == 0:
column = self.hiddenSize[1]
# do not update actual row, because it is used in the loop
row_temp = row - 1
column_temp = column - 1
neighborhood.add((row_temp, column_temp))
return neighborhood
def findWinningNode(self, x, windowSize):
## function to find winning node
#: input observatiopn
# format input for use in this function --- dtw distance
# x = np.reshape(x[0], (1, 1, len(x[0])))
####################################
# calculate distances (in Euclidean and DTW it is the minimum). Iterate over all nodes to find distance
distances = np.zeros((self.hiddenSize[0], self.hiddenSize[1]))
distances = distances + float('inf')
for i in range(0, self.hiddenSize[0]):
for j in range(0, self.hiddenSize[1]):
# get weights associated to i-th and j-th node
weights = self.weights_Kohonen[i, j, :,:]
# make sure correct shape
weights = np.reshape(weights, (1, np.shape(weights)[0], np.shape(weights)[1]))
# format inputs for dynamic time warping
# use dynamic time warping as distance measure which has a window size parameter
d1 = np.reshape(x,(1,np.shape(x)[1],np.shape(x)[2]))
d2 = np.reshape(weights, (1, np.shape(weights)[1], np.shape(weights)[2]))
distance = self.dtw_d(d1, d2, windowSize)
if distance != distance:
print('DTW error: Nan Value')
distances[i, j] = distance
# minimum value is winner
winnerNeuronIndex = np.argmin(distances)
return winnerNeuronIndex
def propogateForward(self, x, windowSize):
############
# Description: Function forward propogates from input to grid
# x: single input
# windowSize: window size for dynamic time warping
##############################
# input to Kohonen
##############################
# make sure x is in correct shape for matrix multiplication
x = np.reshape(x, (1, np.shape(x)[0], np.shape(x)[1]))
self.winnerNeuronIndex = self.findWinningNode(x, windowSize)
def update_weights_Kohonen(self, x):
############
# Description: Function updates the Kohonen layer (SOM layer) after one forward pass (i.e., forwardPropogate)
# x: single input
# make sure x is in correct shape
##############
x = np.reshape(x, (1, np.shape(x)[0], np.shape(x)[1]))
# convert the single winning index into grid location (x, and y cordinate)
two_dIndex = np.unravel_index(self.winnerNeuronIndex, self.hiddenSize)
neighborhood = self.find_neighbor_indices(two_dIndex[0], two_dIndex[1])
# implement update formula to update all neighborhood
for neighbors in neighborhood:
i = neighbors[0]
j = neighbors[1]
# calculate the update
loss = 0
for m in range(0,np.shape(x)[1]):
update = self.learningRate * (x[0,m,:] - self.weights_Kohonen[i, j, m,:])
# update the weights
self.weights_Kohonen[i, j, m,:] = self.weights_Kohonen[i, j, m,:] + update
loss+= sum(abs(update))
return loss
def iterate(self, inputs, epochs, path = '',windowSize = 0, targets = [], labels = [],legend_dict = {}, showPlot = 'False'):
############
# Description: Function iterates to organize the Kohonen layer
# inputs: all inputs
# epochs: epochs to iterate for
# path: Path to save SOM plots
# windowSize: windowSize to be used by DTW (for project), not usefull in assignment and set to 0.
# targets: keys or ids for each observation
# labels: labels, or ground truth.
#returns
#all_data: list of dictionary objects with 4 keys, 'target','x','y', and 'labels'
# x: x coordinate on SOM
# y: y cordinate on SOM
# target: keys or name of observation if provided.
# label: label (ground truth) of observation if provided.
##############
# reinitilize weights based on inputs
# initilize weights between 0 and 1 for a 3d weights matrix
self.weights_Kohonen = np.random.uniform(low=0, high=1,
size=(self.hiddenSize[0], self.hiddenSize[1], self.dim,self.inputSize))
# formula for weights initilization
for i in range(0, np.shape(inputs)[1]):
for j in range(0, np.shape(inputs)[2]):
firstPart = (np.mean(np.asarray(inputs)[:,i, j]) + np.random.uniform(-0.1, 0.1))
secondPart = (np.mean(np.asarray(inputs)[:,i, j]) * np.random.uniform(low=0, high=1, size=(self.hiddenSize[0], self.hiddenSize[1])))
weights = firstPart * secondPart
self.weights_Kohonen[:, :, i, j] = weights
#######################
# for epochs
weight_change_magnitude = []
for epoch in range(0, epochs):
# for each input
epoch_loss = 0
for i in range(0, len(inputs)):
# propogate forward
self.propogateForward(inputs[i], windowSize)
# update Kohonen
loss = self.update_weights_Kohonen(inputs[i])
epoch_loss += loss
if epoch % 20 == 0:
print('Epoch : ' + str(epoch) + ' complete.')
print('Neighborhood Size : '+str(self.neighborhoodSize))
print('Learning Rate : '+str(self.learningRate))
print('**************************************')
if len(path)>0:
self.plotMap(inputs, epoch, showPlot, windowSize,path,targets,labels = labels,legend_dict = legend_dict )
self.update_parameters(epoch, epochs)
if self.neighborhoodSize < 1:
self.neighborhoodSize = 1
if self.learningRate < 0.2:
self.learningRate = 0.2
weight_change_magnitude.append(epoch_loss)
print('Epoch : ' + str(epoch + 1) + ' complete.')
print('Neighborhood Size : '+str(self.neighborhoodSize))
print('Learning Rate : '+str(self.learningRate))
print('**************************************')
#get Umatrix
self.Umatrix = self.createUmatrix(windowSize)
if len(path)>0:
all_data = self.plotMap(inputs, epoch, showPlot, windowSize,path,targets,labels = labels,Umatrix = self.Umatrix,legend_dict = legend_dict ,write = False) #,
self.plotChange(weight_change_magnitude,showPlot, path)
return all_data
def createUmatrix(self,windowSize):
#############################
# Description: create the umatrix
# windowSize: window size for dynamic time warping. Umatrix function currentl always uses Euclidean distance to avoid theoritical issues.
########################################
#set neighborhood size to 1 and reset it after the function
neighborHood_temp = copy.deepcopy(self.neighborhoodSize)
self.neighborhoodSize = 1
| #find all the neighbors for node at i,j
neighbors = self.find_neighbor_indices(i, j)
#remove self
neighbors.remove((i, j))
#get weights for node at i,j
weights = self.weights_Kohonen[i,j,:]
weights = np.reshape(weights, (1,np.shape(weights)[0],np.shape(weights)[1]))
#for all neighbors
for neighbor in neighbors:
#get x,y (i,j) coordinate for neighbor
x = neighbor[0]
y = neighbor[1]
#get neighbor weights
neighborWeights = self.weights_Kohonen[x,y,:]
neighborWeights = np.reshape(neighborWeights, (1, np.shape(neighborWeights)[0],np.shape(neighborWeights)[1]))
# use dynamic time warping as distance measure which has a window size parameter
#always use Euc distance for Umatrix
distance = self.dtw_d(weights, neighborWeights,0)
Umatrix[i,j] += distance
Umatrix[i,j] = Umatrix[i,j] / len(neighbors)
#reset neighborhoodSize
self.neighborhoodSize = copy.deepcopy(neighborHood_temp)
return Umatrix
def plotChange(self, weight_change_magnitude,showPlot ,path):
plt.figure()
plt.plot(np.arange(0,len(weight_change_magnitude)),weight_change_magnitude)
plt.xlabel('Epochs',fontsize = 22)
plt.ylabel('Weight change magnitude',fontsize = 22)
plt.ylim(0,max(weight_change_magnitude))
plt.xlim(0,len(weight_change_magnitude))
plt.savefig(path+'_weight_change.png',bbox_inches = 'tight')
if showPlot == 'True':
plt.show()
else:
plt.close()
def plotMap(self, inputs, epoch, showPlot,windowSize, path = 'plot_epoch', targets = [],Umatrix = [], labels= [], legend_dict = {}, write = False):
if len(legend_dict.keys())==0:
setOfLabels= set(labels)
for l in setOfLabels:
legend_dict[l] = l
#colors to label points
colors = ['#9e0142','#d53e4f','#f46d43','#fdae61','#fee08b','#ffffbf','#e6f598','#abdda4','#66c2a5','#3288bd','#5e4fa2','#f768a1','#c7e9c0','#74c476','#238b45','#fec44f']
#colors = ['#d7191c','#abdda4','#2b83ba']
#for legend
colors_used = set()
# plot observations with labels
plt.figure(figsize = (6,6))
#plot Umatrix first so other stuff is over it
if len(Umatrix)> 0:
#normalize U matrix
#ignore the zero padding for minimum
min_v = Umatrix.min()
max_v = Umatrix.max()
Umatrix = (Umatrix - min_v) / (max_v - min_v)
#update values less than 0 to zero
Umatrix[Umatrix<0] = 0
Umatrix = Umatrix * 255
plt.imshow(Umatrix.transpose(), cmap='Greys',alpha=1)
plotted = set()
#write to CSV
all_data = []
node_to_scatterSize = defaultdict(lambda: 30)
for i in range(0, len(inputs)):
input = inputs[i]
input = np.reshape(input, (1, np.shape(input)[0],np.shape(input)[1]))
winnderNode = self.findWinningNode(input, windowSize)
# convert to x - y coordinate
coord = np.unravel_index(winnderNode, self.hiddenSize)
#save to dict for writing to csv
d = {}
d['x'] = coord[0]
d['y'] = coord[1]
if len(targets)> 0:
d['target'] = targets[i]
if len(labels) >0:
d['labels'] = labels[i]
all_data.append(d)
if coord in plotted:
#just add some noise so duck and goose can show seperately. They are exactly the same as per data
#shift = random.uniform(1, 4)
#shift = 0
x = coord[0]
y = coord[1] #+ shift
node_to_scatterSize[winnderNode] += 30
#print('scatter size increased.')
#print(node_to_scatterSize[winnderNode])
else:
plotted.add(coord)
x = coord[0]
y = coord[1]
#scatter plot at same location but annotation differently
#plt.scatter(coord[0], coord[1], s=30, color = '#2ca25f')
if len(labels)>0:
color = colors[labels[i]]
else:
color ='#2ca25f'
if color in colors_used:
plt.scatter(x, y, s=node_to_scatterSize[winnderNode], color = color)
else:
colors_used.add(color)
if len(labels)>0:
plt.scatter(x, y, s=node_to_scatterSize[winnderNode], color = color,label = legend_dict[labels[i]])
else:
plt.scatter(x, y, s=node_to_scatterSize[winnderNode], color = color)
#if len(targets)> 0:
# plt.annotate(targets[i], (x, y), fontsize=22)
plt.xlim(0 - 5, self.hiddenSize[0] + 5)
plt.ylim(0 - 5, self.hiddenSize[1] + 5)
plt.xlabel('Nodes', fontsize=22)
plt.ylabel('Nodes', fontsize=22)
plt.xticks([])
plt.yticks([])
plt.title('Kohonen Self-Organizing Map', fontsize=22)
if len(labels)>0:
plt.legend(fontsize = 18,bbox_to_anchor=(1.05, 1), loc='upper left')
plt.savefig(path+'_'+str(epoch)+'.png',bbox_inches = 'tight')
if showPlot == 'True':
plt.show()
else:
plt.close()
return all_data
def getWeights(self):
return self.weights_Kohonen
def getActivations(self):
return self.gridActivation
def getUmatrix(self):
return self.Umatrix
def sq_euc(self,s1, s2):
#author: Ali Javed
#email: [email protected]
#Version history:
#Version 1 . basis implementation of dynaimc time warping dependant.
#Version 2 (7 Nov 2019). changed variable names to be more representative and added comments.
#Inputs
#s1: signal 1, size 1 * m * n. where m is the number of variables, n is the timesteps.
#s2: signal 2, size 1 * m * n. where m is the number of variables, n is the timesteps.
#OUTPUT
#dist: Squared euclidean distance
dist = ((s1 - s2) ** 2)
return dist.flatten().sum()
def dtw_d(self,s1, s2, w):
#author: Ali Javed
#email: [email protected]
#Version 1 . basis implementation of dynaimc time warping dependant.
#Version 2 (7 Nov 2019). changed variable names to be more representative and added comments.
#INPUTS:
#s1: signal 1, size 1 * m * n. where m is the number of variables, n is the timesteps.
#s2: signal 2, size 1 * m * n. where m is the number of variables, n is the timesteps.
#w: window parameter, percent of size and is between0 and 1. 0 is
#euclidean distance while 1 is maximum window size.
#
#OUTPUTS:
#dist: resulting distance
s1 = np.asarray(s1)
s2 = np.asarray(s2)
s1_shape = np.shape(s1)
s2_shape = np.shape(s2)
if w<0 or w>1:
print("Error: W should be between 0 and 1")
return False
if s1_shape[0] >1 or s2_shape[0] >1:
print("Error: Please check input dimensions")
return False
if s1_shape[1] != s2_shape[1]:
print("Error: Please check input dimensions. Number of variables not consistent.")
return False
if s1_shape[2] != s2_shape[2]:
print("Warning: Length of time series not equal")
#if window size is zero, it is plain euclidean distance
if w ==0:
dist = np.sqrt(self.sq_euc(s1, s2))
return dist
#get absolute window size
w = int(np.ceil(w * s1_shape[2]))
#adapt window size
w=int(max(w, abs(s1_shape[2]- s2_shape[2])));
#initilize
DTW = {}
for i in range(-1, s1_shape[2]):
for j in range(-1, s2_shape[2]):
DTW[(i, j)] = float('inf')
DTW[(-1, -1)] = 0
for i in range(s1_shape[2]):
for j in range(max(0, i - w), min(s2_shape[2], i + w)):
#squared euc distance
dist = self.sq_euc(s1[0,:,i], s2[0,:,j])
#find optimal path
DTW[(i, j)] = dist + min(DTW[(i - 1, j)], DTW[(i, j - 1)], DTW[(i - 1, j - 1)])
dist = np.sqrt(DTW[s1_shape[2] - 1, s2_shape[2] - 1])
return dist | Umatrix = np.zeros((self.hiddenSize[0],self.hiddenSize[1]))
# Perform 2D convolution with input data and kernel to get sum of neighboring nodes
for i in range(0,self.hiddenSize[0]):
for j in range(0,self.hiddenSize[1]): | random_line_split |
train.py | # -*- coding:UTF-8 -*-
# -----------------------------------------------------------
# "BCAN++: Cross-modal Retrieval With Bidirectional Correct Attention Network"
# Yang Liu, Hong Liu, Huaqiu Wang, Fanyang Meng, Mengyuan Liu*
#
# ---------------------------------------------------------------
"""Training script"""
import os
import time
import shutil
import torch
import torch.nn as nn
import numpy
from torch.nn.utils.clip_grad import clip_grad_norm_
import logging
import argparse
import numpy as np
import random
from data import get_loaders
from vocab import deserialize_vocab
from model import SCAN, ContrastiveLoss
from evaluation import AverageMeter, encode_data, LogCollector, i2t, t2i, shard_xattn
def setup_seed(seed):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed) #cpu
torch.cuda.manual_seed_all(seed) #并行gpu
torch.backends.cudnn.deterministic = True #cpu/gpu结果一致
torch.backends.cudnn.benchmark = True #训练集变化不大时使训练加速
def logging_func(log_file, message):
with open(log_file,'a') as f:
f.write(message)
|
def main():
setup_seed(1024)
# Hyper Parameters
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', default='D:/data/',
help='path to datasets')
parser.add_argument('--data_name', default='f30k_precomp',
help='{coco,f30k}_precomp')
parser.add_argument('--vocab_path', default='./vocab/',
help='Path to saved vocabulary json files.')
parser.add_argument('--margin', default=0.2, type=float,
help='Rank loss margin.')
parser.add_argument('--grad_clip', default=2.0, type=float,
help='Gradient clipping threshold.')
parser.add_argument('--num_epochs', default=20, type=int,
help='Number of training epochs.')
parser.add_argument('--batch_size', default=128, type=int,
help='Size of a training mini-batch.')
parser.add_argument('--word_dim', default=300, type=int,
help='Dimensionality of the word embedding.')
parser.add_argument('--embed_size', default=1024, type=int,
help='Dimensionality of the joint embedding.')
parser.add_argument('--num_layers', default=1, type=int,
help='Number of GRU layers.')
parser.add_argument('--learning_rate', default=.0002, type=float,
help='Initial learning rate.')
parser.add_argument('--lr_update', default=15, type=int,
help='Number of epochs to update the learning rate.')
parser.add_argument('--workers', default=0, type=int,
help='Number of data loader workers.')
parser.add_argument('--log_step', default=100, type=int,
help='Number of steps to print and record the log.')
parser.add_argument('--logger_name', default='./runs/test2',
help='Path to save Tensorboard log.')
parser.add_argument('--model_name', default='./runs/test2',
help='Path to save the model.')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--img_dim', default=2048, type=int,
help='Dimensionality of the image embedding.')
parser.add_argument('--no_imgnorm', action='store_true',
help='Do not normalize the image embeddings.')
parser.add_argument('--no_txtnorm', action='store_true',
help='Do not normalize the text embeddings.')
parser.add_argument('--correct_type', default="prob",
help='equal|prob')
parser.add_argument('--precomp_enc_type', default="basic",
help='basic|weight_norm')
parser.add_argument('--bi_gru', action='store_true', default=True,
help='Use bidirectional GRU.')
parser.add_argument('--lambda_softmax', default=20., type=float,
help='Attention softmax temperature.')
opt = parser.parse_known_args()[0]
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
logging.info('train')
# Load Vocabulary Wrapper
vocab = deserialize_vocab(os.path.join(opt.vocab_path, '%s_vocab.json' % opt.data_name))
word2idx = vocab.word2idx
opt.vocab_size = len(vocab)
# Load data loaders
train_loader, val_loader = get_loaders(
opt.data_name, vocab, opt.batch_size, opt.workers, opt)
# Construct the model
model = SCAN(word2idx, opt)
model.cuda()
model = nn.DataParallel(model)
criterion = ContrastiveLoss(margin=opt.margin)
optimizer = torch.optim.Adam(model.parameters(), lr=opt.learning_rate)
best_rsum = 0
start_epoch = 0
# optionally resume from a checkpoint
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
checkpoint = torch.load(opt.resume)
start_epoch = checkpoint['epoch'] + 1
best_rsum = checkpoint['best_rsum']
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
# Eiters is used to show logs as the continuation of another
# training
# model.Eiters = checkpoint['Eiters']
print("=> loaded checkpoint '{}' (epoch {}, best_rsum {})"
.format(opt.resume, start_epoch, best_rsum))
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
# Train the Model
for epoch in range(start_epoch, opt.num_epochs):
print(opt.logger_name)
print(opt.model_name)
if not os.path.exists(opt.model_name):
os.makedirs(opt.model_name)
message = "epoch: %d, model name: %s\n" % (epoch, opt.model_name)
log_file = os.path.join(opt.logger_name, "performance.log")
logging_func(log_file, message)
adjust_learning_rate(opt, optimizer, epoch)
# train for one epoch
train(opt, train_loader, model, criterion, optimizer, epoch, val_loader)
# evaluate on validation set
rsum = validate(opt, val_loader, model)
# remember best R@ sum and save checkpoint
is_best = rsum > best_rsum
best_rsum = max(rsum, best_rsum)
if not os.path.exists(opt.model_name):
os.mkdir(opt.model_name)
save_checkpoint({
'epoch': epoch,
'model': model.state_dict(),
'best_rsum': best_rsum,
'opt': opt,
'optimizer': optimizer.state_dict(),
}, is_best, filename='checkpoint_{}.pth.tar'.format(epoch), prefix=opt.model_name + '/')
class DataPrefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
self.images, self.captions, self.length, self.index = next(self.loader)
except StopIteration:
self.images, self.captions, self.length, self.index = None, None, None, None
return
with torch.cuda.stream(self.stream):
self.images = self.images.cuda()
self.captions = self.captions.cuda()
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
self.preload()
return self.images, self.captions, self.length, self.index
def train(opt, train_loader, model, criterion, optimizer, epoch, val_loader):
# average meters to record the training statistics
batch_time = AverageMeter()
data_time = AverageMeter()
train_logger = LogCollector()
run_time = 0
start_time = time.time()
prefetcher = DataPrefetcher(train_loader)
images, captions, lengths, index = prefetcher.next()
i = 0
while images is not None:
# switch to train mode
model.train()
# measure data loading time
model.logger = train_logger
optimizer.zero_grad()
# Update the model
if torch.cuda.device_count() > 1:
images = images.repeat(torch.cuda.device_count(), 1, 1)
score = model(images, captions, lengths, index)
loss = criterion(score)
loss.backward()
if opt.grad_clip > 0:
clip_grad_norm_(model.parameters(), opt.grad_clip)
optimizer.step()
if (i + 1) % opt.log_step == 0:
run_time += time.time() - start_time
log = "epoch: %d; batch: %d/%d; loss: %.6f; time: %.4f" % (epoch,
i, len(train_loader), loss.data.item(),
run_time)
print(log, flush=True)
start_time = time.time()
run_time = 0
# validate at every val_step
images, captions, lengths, index = prefetcher.next()
i += 1
def validate(opt, val_loader, model):
# compute the encoding for all the validation images and captions
img_embs, img_means, cap_embs, cap_lens, cap_means = encode_data(
model, val_loader, opt.log_step, logging.info)
print(img_embs.shape, cap_embs.shape)
img_embs = numpy.array([img_embs[i] for i in range(0, len(img_embs), 5)])
start = time.time()
sims = shard_xattn(model, img_embs, img_means, cap_embs, cap_lens, cap_means, opt, shard_size=128)
end = time.time()
print("calculate similarity time:", end-start)
# caption retrieval
(r1, r5, r10, medr, meanr) = i2t(img_embs, cap_embs, cap_lens, sims)
print("Image to text: %.1f, %.1f, %.1f, %.1f, %.1f" %
(r1, r5, r10, medr, meanr))
# image retrieval
(r1i, r5i, r10i, medri, meanr) = t2i(
img_embs, cap_embs, cap_lens, sims)
print("Text to image: %.1f, %.1f, %.1f, %.1f, %.1f" %
(r1i, r5i, r10i, medri, meanr))
# sum of recalls to be used for early stopping
currscore = r1 + r5 + r10 + r1i + r5i + r10i
return currscore
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar', prefix=''):
tries = 15
error = None
# deal with unstable I/O. Usually not necessary.
while tries:
try:
torch.save(state, prefix + filename)
if is_best:
message = "--------save best model at epoch %d---------\n" % (state["epoch"] - 1)
print(message, flush=True)
log_file = os.path.join(prefix, "performance.log")
logging_func(log_file, message)
shutil.copyfile(prefix + filename, prefix + 'model_best.pth.tar')
except IOError as e:
error = e
tries -= 1
else:
break
print('model save {} failed, remaining {} trials'.format(filename, tries))
if not tries:
raise error
def adjust_learning_rate(opt, optimizer, epoch):
"""Sets the learning rate to the initial LR
decayed by 10 every 30 epochs"""
lr = opt.learning_rate * (0.1 ** (epoch // opt.lr_update))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main() | f.close()
| random_line_split |
train.py | # -*- coding:UTF-8 -*-
# -----------------------------------------------------------
# "BCAN++: Cross-modal Retrieval With Bidirectional Correct Attention Network"
# Yang Liu, Hong Liu, Huaqiu Wang, Fanyang Meng, Mengyuan Liu*
#
# ---------------------------------------------------------------
"""Training script"""
import os
import time
import shutil
import torch
import torch.nn as nn
import numpy
from torch.nn.utils.clip_grad import clip_grad_norm_
import logging
import argparse
import numpy as np
import random
from data import get_loaders
from vocab import deserialize_vocab
from model import SCAN, ContrastiveLoss
from evaluation import AverageMeter, encode_data, LogCollector, i2t, t2i, shard_xattn
def setup_seed(seed):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed) #cpu
torch.cuda.manual_seed_all(seed) #并行gpu
torch.backends.cudnn.deterministic = True #cpu/gpu结果一致
torch.backends.cudnn.benchmark = True #训练集变化不大时使训练加速
def logging_func(log_file, message):
with open(log_file,'a') as f:
f.write(message)
f.close()
def main():
setup_seed(1024)
# Hyper Parameters
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', default='D:/data/',
help='path to datasets')
parser.add_argument('--data_name', default='f30k_precomp',
help='{coco,f30k}_precomp')
parser.add_argument('--vocab_path', default='./vocab/',
help='Path to saved vocabulary json files.')
parser.add_argument('--margin', default=0.2, type=float,
help='Rank loss margin.')
parser.add_argument('--grad_clip', default=2.0, type=float,
help='Gradient clipping threshold.')
parser.add_argument('--num_epochs', default=20, type=int,
help='Number of training epochs.')
parser.add_argument('--batch_size', default=128, type=int,
help='Size of a training mini-batch.')
parser.add_argument('--word_dim', default=300, type=int,
help='Dimensionality of the word embedding.')
parser.add_argument('--embed_size', default=1024, type=int,
help='Dimensionality of the joint embedding.')
parser.add_argument('--num_layers', default=1, type=int,
help='Number of GRU layers.')
parser.add_argument('--learning_rate', default=.0002, type=float,
help='Initial learning rate.')
parser.add_argument('--lr_update', default=15, type=int,
help='Number of epochs to update the learning rate.')
parser.add_argument('--workers', default=0, type=int,
help='Number of data loader workers.')
parser.add_argument('--log_step', default=100, type=int,
help='Number of steps to print and record the log.')
parser.add_argument('--logger_name', default='./runs/test2',
help='Path to save Tensorboard log.')
parser.add_argument('--model_name', default='./runs/test2',
help='Path to save the model.')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--img_dim', default=2048, type=int,
help='Dimensionality of the image embedding.')
parser.add_argument('--no_imgnorm', action='store_true',
help='Do not normalize the image embeddings.')
parser.add_argument('--no_txtnorm', action='store_true',
help='Do not normalize the text embeddings.')
parser.add_argument('--correct_type', default="prob",
help='equal|prob')
parser.add_argument('--precomp_enc_type', default="basic",
help='basic|weight_norm')
parser.add_argument('--bi_gru', action='store_true', default=True,
help='Use bidirectional GRU.')
parser.add_argument('--lambda_softmax', default=20., type=float,
help='Attention softmax temperature.')
opt = parser.parse_known_args()[0]
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
logging.info('train')
# Load Vocabulary Wrapper
vocab = deserialize_vocab(os.path.join(opt.vocab_path, '%s_vocab.json' % opt.data_name))
word2idx = vocab.word2idx
opt.vocab_size = len(vocab)
# Load data loaders
train_loader, val_loader = get_loaders(
opt.data_name, vocab, opt.batch_size, opt.workers, opt)
# Construct the model
model = SCAN(word2idx, opt)
model.cuda()
model = nn.DataParallel(model)
criterion = ContrastiveLoss(margin=opt.margin)
optimizer = torch.optim.Adam(model.parameters(), lr=opt.learning_rate)
best_rsum = 0
start_epoch = 0
# optionally resume from a checkpoint
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
checkpoint = torch.load(opt.resume)
start_epoch = checkpoint['epoch'] + 1
best_rsum = checkpoint['best_rsum']
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
# Eiters is used to show logs as the continuation of another
# training
# model.Eiters = checkpoint['Eiters']
print("=> loaded checkpoint '{}' (epoch {}, best_rsum {})"
.format(opt.resume, start_epoch, best_rsum))
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
# Train the Model
for epoch in range(start_epoch, opt.num_epochs):
print(opt.logger_name)
print(opt.model_name)
if not os.path.exists(opt.model_name):
os.makedirs(opt.model_name)
message = "epoch: %d, model name: %s\n" % (epoch, opt.model_name)
log_file = os.path.join(opt.logger_name, "performance.log")
logging_func(log_file, message)
adjust_learning_rate(opt, optimizer, epoch)
# train for one epoch
train(opt, train_loader, model, criterion, optimizer, epoch, val_loader)
# evaluate on validation set
rsum = validate(opt, val_loader, model)
# remember best R@ sum and save checkpoint
is_best = rsum > best_rsum
best_rsum = max(rsum, best_rsum)
if not os.path.exists(opt.model_name):
os.mkdir(opt.model_name)
save_checkpoint({
'epoch': epoch,
'model': model.state_dict(),
'best_rsum': best_rsum,
'opt': opt,
'optimizer': optimizer.state_dict(),
}, is_best, filename='checkpoint_{}.pth.tar'.format(epoch), prefix=opt.model_name + '/')
class DataPrefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
self.images, self.captions, self.length, self.index = next(self.loader)
except StopIteration:
self.images, self.captions, self.length, self.index = None, None, None, None
return
with torch.cuda.stream(self.stream):
self.images = self.images.cuda()
self.captions = self.captions.cuda()
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
self.preload()
return self.images, self.captions, self.length, self.index
def train(opt, train_loader, model, criterion, optimizer, epoch, val_loader):
# average meters to record the training statistics
batch_time = AverageMeter()
data_time = AverageMeter()
train_logger = LogCollector()
run_time = 0
start_time = time.time()
prefetcher = DataPrefetcher(train_loader)
images, captions, lengths, index = prefetcher.next()
i = 0
while images is not None:
# switch to train mode
model.train()
# measure data loading time
model.logger = train_logger
optimizer.zero_grad()
# Update the model
if torch.cuda.device_count() > 1:
images = images.repeat(torch.cuda.device_count(), 1, 1)
score = model(images, captions, lengths, index)
loss = criterion(score)
loss.backward()
if opt.grad_clip > 0:
clip_grad_norm_(model.parameters(), opt.grad_clip)
optimizer.step()
if (i + 1) % opt.log_step == 0:
run_time += time.time() - start_time
log = "epoch: %d; batch: %d/%d; loss: %.6f; time: %.4f" % (epoch,
i, len(train_loader), loss.data.item(),
run_time)
print(log, flush=True)
start_time = time.time()
run_time = 0
# validate at every val_step
images, captions, lengths, index = prefetcher.next()
i += 1
def validate(opt, val_loader, model):
| te the encoding for all the validation images and captions
img_embs, img_means, cap_embs, cap_lens, cap_means = encode_data(
model, val_loader, opt.log_step, logging.info)
print(img_embs.shape, cap_embs.shape)
img_embs = numpy.array([img_embs[i] for i in range(0, len(img_embs), 5)])
start = time.time()
sims = shard_xattn(model, img_embs, img_means, cap_embs, cap_lens, cap_means, opt, shard_size=128)
end = time.time()
print("calculate similarity time:", end-start)
# caption retrieval
(r1, r5, r10, medr, meanr) = i2t(img_embs, cap_embs, cap_lens, sims)
print("Image to text: %.1f, %.1f, %.1f, %.1f, %.1f" %
(r1, r5, r10, medr, meanr))
# image retrieval
(r1i, r5i, r10i, medri, meanr) = t2i(
img_embs, cap_embs, cap_lens, sims)
print("Text to image: %.1f, %.1f, %.1f, %.1f, %.1f" %
(r1i, r5i, r10i, medri, meanr))
# sum of recalls to be used for early stopping
currscore = r1 + r5 + r10 + r1i + r5i + r10i
return currscore
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar', prefix=''):
tries = 15
error = None
# deal with unstable I/O. Usually not necessary.
while tries:
try:
torch.save(state, prefix + filename)
if is_best:
message = "--------save best model at epoch %d---------\n" % (state["epoch"] - 1)
print(message, flush=True)
log_file = os.path.join(prefix, "performance.log")
logging_func(log_file, message)
shutil.copyfile(prefix + filename, prefix + 'model_best.pth.tar')
except IOError as e:
error = e
tries -= 1
else:
break
print('model save {} failed, remaining {} trials'.format(filename, tries))
if not tries:
raise error
def adjust_learning_rate(opt, optimizer, epoch):
"""Sets the learning rate to the initial LR
decayed by 10 every 30 epochs"""
lr = opt.learning_rate * (0.1 ** (epoch // opt.lr_update))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| # compu | identifier_name |
train.py | # -*- coding:UTF-8 -*-
# -----------------------------------------------------------
# "BCAN++: Cross-modal Retrieval With Bidirectional Correct Attention Network"
# Yang Liu, Hong Liu, Huaqiu Wang, Fanyang Meng, Mengyuan Liu*
#
# ---------------------------------------------------------------
"""Training script"""
import os
import time
import shutil
import torch
import torch.nn as nn
import numpy
from torch.nn.utils.clip_grad import clip_grad_norm_
import logging
import argparse
import numpy as np
import random
from data import get_loaders
from vocab import deserialize_vocab
from model import SCAN, ContrastiveLoss
from evaluation import AverageMeter, encode_data, LogCollector, i2t, t2i, shard_xattn
def setup_seed(seed):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed) #cpu
torch.cuda.manual_seed_all(seed) #并行gpu
torch.backends.cudnn.deterministic = True #cpu/gpu结果一致
torch.backends.cudnn.benchmark = True #训练集变化不大时使训练加速
def logging_func(log_file, message):
with open(log_file,'a') as f:
f.write(message)
f.close()
def main():
setup_seed(1024)
# Hyper Parameters
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', default='D:/data/',
help='path to datasets')
parser.add_argument('--data_name', default='f30k_precomp',
help='{coco,f30k}_precomp')
parser.add_argument('--vocab_path', default='./vocab/',
help='Path to saved vocabulary json files.')
parser.add_argument('--margin', default=0.2, type=float,
help='Rank loss margin.')
parser.add_argument('--grad_clip', default=2.0, type=float,
help='Gradient clipping threshold.')
parser.add_argument('--num_epochs', default=20, type=int,
help='Number of training epochs.')
parser.add_argument('--batch_size', default=128, type=int,
help='Size of a training mini-batch.')
parser.add_argument('--word_dim', default=300, type=int,
help='Dimensionality of the word embedding.')
parser.add_argument('--embed_size', default=1024, type=int,
help='Dimensionality of the joint embedding.')
parser.add_argument('--num_layers', default=1, type=int,
help='Number of GRU layers.')
parser.add_argument('--learning_rate', default=.0002, type=float,
help='Initial learning rate.')
parser.add_argument('--lr_update', default=15, type=int,
help='Number of epochs to update the learning rate.')
parser.add_argument('--workers', default=0, type=int,
help='Number of data loader workers.')
parser.add_argument('--log_step', default=100, type=int,
help='Number of steps to print and record the log.')
parser.add_argument('--logger_name', default='./runs/test2',
help='Path to save Tensorboard log.')
parser.add_argument('--model_name', default='./runs/test2',
help='Path to save the model.')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--img_dim', default=2048, type=int,
help='Dimensionality of the image embedding.')
parser.add_argument('--no_imgnorm', action='store_true',
help='Do not normalize the image embeddings.')
parser.add_argument('--no_txtnorm', action='store_true',
help='Do not normalize the text embeddings.')
parser.add_argument('--correct_type', default="prob",
help='equal|prob')
parser.add_argument('--precomp_enc_type', default="basic",
help='basic|weight_norm')
parser.add_argument('--bi_gru', action='store_true', default=True,
help='Use bidirectional GRU.')
parser.add_argument('--lambda_softmax', default=20., type=float,
help='Attention softmax temperature.')
opt = parser.parse_known_args()[0]
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
logging.info('train')
# Load Vocabulary Wrapper
vocab = deserialize_vocab(os.path.join(opt.vocab_path, '%s_vocab.json' % opt.data_name))
word2idx = vocab.word2idx
opt.vocab_size = len(vocab)
# Load data loaders
train_loader, val_loader = get_loaders(
opt.data_name, vocab, opt.batch_size, opt.workers, opt)
# Construct the model
model = SCAN(word2idx, opt)
model.cuda()
model = nn.DataParallel(model)
criterion = ContrastiveLoss(margin=opt.margin)
optimizer = torch.optim.Adam(model.parameters(), lr=opt.learning_rate)
best_rsum = 0
start_epoch = 0
# optionally resume from a checkpoint
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
checkpoint = torch.load(opt.resume)
start_epoch = checkpoint['epoch'] + 1
best_rsum = checkpoint['best_rsum']
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
# Eiters is used to show logs as the continuation of another
# training
# model.Eiters = checkpoint['Eiters']
print("=> loaded checkpoint '{}' (epoch {}, best_rsum {})"
.format(opt.resume, start_epoch, best_rsum))
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
# Train the Model
for epoch in range(start_epoch, opt.num_epochs):
print(opt.logger_name)
print(opt.model_name)
if not os.path.exists(opt.model_name):
os.makedirs(opt.model_name)
message = "epoch: %d, model name: %s\n" % (epoch, opt.model_name)
log_file = os.path.join(opt.logger_name, "performance.log")
logging_func(log_file, message)
adjust_learning_rate(opt, optimizer, epoch)
# train for one epoch
train(opt, train_loader, model, criterion, optimizer, epoch, val_loader)
# evaluate on validation set
rsum = validate(opt, val_loader, model)
# remember best R@ sum and save checkpoint
is_best = rsum > best_rsum
best_rsum = max(rsum, best_rsum)
if not os.path.exists(opt.model_name):
os.mkdir(opt.model_name)
save_checkpoint({
'epoch': epoch,
'model': model.state_dict(),
'best_rsum': best_rsum,
'opt': opt,
'optimizer': optimizer.state_dict(),
}, is_best, filename='checkpoint_{}.pth.tar'.format(epoch), prefix=opt.model_name + '/')
class DataPrefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
self.images, self.captions, self.length, self.index = next(self.loader)
except StopIteration:
self.images, self.captions, self.length, self.index = None, None, None, None
return
with torch.cuda.stream(self.stream):
self.images = self.images.cuda()
self.captions = self.captions.cuda()
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
self.preload()
return self.images, self.captions, self.length, self.index
def train(opt, train_loader, model, criterion, optimizer, epoch, val_loader):
# average meters to record the training statistics
batch_time = AverageMeter()
data_time = AverageMeter()
train_logger = LogCollector()
run_time = 0
start_time = time.time()
prefetcher = DataPrefetcher(train_loader)
images, captions, lengths, index = prefetcher.next()
i = 0
while images is not None:
# switch to train mode
model.train()
# measure data loading time
model.logger = train_logger
optimizer.zero_grad()
# Update the model
if torch.cuda.device_count() > 1:
images = images.repeat(torch.cuda.device_count(), 1, 1)
score = model(images, captions, lengths, index)
loss = criterion(score)
loss.backward()
if opt.grad_clip > 0:
clip_grad_norm_(model.parameters(), op | if (i + 1) % opt.log_step == 0:
run_time += time.time() - start_time
log = "epoch: %d; batch: %d/%d; loss: %.6f; time: %.4f" % (epoch,
i, len(train_loader), loss.data.item(),
run_time)
print(log, flush=True)
start_time = time.time()
run_time = 0
# validate at every val_step
images, captions, lengths, index = prefetcher.next()
i += 1
def validate(opt, val_loader, model):
# compute the encoding for all the validation images and captions
img_embs, img_means, cap_embs, cap_lens, cap_means = encode_data(
model, val_loader, opt.log_step, logging.info)
print(img_embs.shape, cap_embs.shape)
img_embs = numpy.array([img_embs[i] for i in range(0, len(img_embs), 5)])
start = time.time()
sims = shard_xattn(model, img_embs, img_means, cap_embs, cap_lens, cap_means, opt, shard_size=128)
end = time.time()
print("calculate similarity time:", end-start)
# caption retrieval
(r1, r5, r10, medr, meanr) = i2t(img_embs, cap_embs, cap_lens, sims)
print("Image to text: %.1f, %.1f, %.1f, %.1f, %.1f" %
(r1, r5, r10, medr, meanr))
# image retrieval
(r1i, r5i, r10i, medri, meanr) = t2i(
img_embs, cap_embs, cap_lens, sims)
print("Text to image: %.1f, %.1f, %.1f, %.1f, %.1f" %
(r1i, r5i, r10i, medri, meanr))
# sum of recalls to be used for early stopping
currscore = r1 + r5 + r10 + r1i + r5i + r10i
return currscore
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar', prefix=''):
tries = 15
error = None
# deal with unstable I/O. Usually not necessary.
while tries:
try:
torch.save(state, prefix + filename)
if is_best:
message = "--------save best model at epoch %d---------\n" % (state["epoch"] - 1)
print(message, flush=True)
log_file = os.path.join(prefix, "performance.log")
logging_func(log_file, message)
shutil.copyfile(prefix + filename, prefix + 'model_best.pth.tar')
except IOError as e:
error = e
tries -= 1
else:
break
print('model save {} failed, remaining {} trials'.format(filename, tries))
if not tries:
raise error
def adjust_learning_rate(opt, optimizer, epoch):
"""Sets the learning rate to the initial LR
decayed by 10 every 30 epochs"""
lr = opt.learning_rate * (0.1 ** (epoch // opt.lr_update))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| t.grad_clip)
optimizer.step()
| conditional_block |
train.py | # -*- coding:UTF-8 -*-
# -----------------------------------------------------------
# "BCAN++: Cross-modal Retrieval With Bidirectional Correct Attention Network"
# Yang Liu, Hong Liu, Huaqiu Wang, Fanyang Meng, Mengyuan Liu*
#
# ---------------------------------------------------------------
"""Training script"""
import os
import time
import shutil
import torch
import torch.nn as nn
import numpy
from torch.nn.utils.clip_grad import clip_grad_norm_
import logging
import argparse
import numpy as np
import random
from data import get_loaders
from vocab import deserialize_vocab
from model import SCAN, ContrastiveLoss
from evaluation import AverageMeter, encode_data, LogCollector, i2t, t2i, shard_xattn
def setup_seed(seed):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed) #cpu
torch.cuda.manual_seed_all(seed) #并行gpu
torch.backends.cudnn.deterministic = True #cpu/gpu结果一致
torch.backends.cudnn.benchmark = True #训练集变化不大时使训练加速
def logging_func(log_file, message):
with open(log_file,'a') as f:
f.write(message)
f.close()
def main():
setup_seed(1024)
# Hyper Parameters
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', default='D:/data/',
help='path to datasets')
parser.add_argument('--data_name', default='f30k_precomp',
help='{coco,f30k}_precomp')
parser.add_argument('--vocab_path', default='./vocab/',
help='Path to saved vocabulary json files.')
parser.add_argument('--margin', default=0.2, type=float,
help='Rank loss margin.')
parser.add_argument('--grad_clip', default=2.0, type=float,
help='Gradient clipping threshold.')
parser.add_argument('--num_epochs', default=20, type=int,
help='Number of training epochs.')
parser.add_argument('--batch_size', default=128, type=int,
help='Size of a training mini-batch.')
parser.add_argument('--word_dim', default=300, type=int,
help='Dimensionality of the word embedding.')
parser.add_argument('--embed_size', default=1024, type=int,
help='Dimensionality of the joint embedding.')
parser.add_argument('--num_layers', default=1, type=int,
help='Number of GRU layers.')
parser.add_argument('--learning_rate', default=.0002, type=float,
help='Initial learning rate.')
parser.add_argument('--lr_update', default=15, type=int,
help='Number of epochs to update the learning rate.')
parser.add_argument('--workers', default=0, type=int,
help='Number of data loader workers.')
parser.add_argument('--log_step', default=100, type=int,
help='Number of steps to print and record the log.')
parser.add_argument('--logger_name', default='./runs/test2',
help='Path to save Tensorboard log.')
parser.add_argument('--model_name', default='./runs/test2',
help='Path to save the model.')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--img_dim', default=2048, type=int,
help='Dimensionality of the image embedding.')
parser.add_argument('--no_imgnorm', action='store_true',
help='Do not normalize the image embeddings.')
parser.add_argument('--no_txtnorm', action='store_true',
help='Do not normalize the text embeddings.')
parser.add_argument('--correct_type', default="prob",
help='equal|prob')
parser.add_argument('--precomp_enc_type', default="basic",
help='basic|weight_norm')
parser.add_argument('--bi_gru', action='store_true', default=True,
help='Use bidirectional GRU.')
parser.add_argument('--lambda_softmax', default=20., type=float,
help='Attention softmax temperature.')
opt = parser.parse_known_args()[0]
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
logging.info('train')
# Load Vocabulary Wrapper
vocab = deserialize_vocab(os.path.join(opt.vocab_path, '%s_vocab.json' % opt.data_name))
word2idx = vocab.word2idx
opt.vocab_size = len(vocab)
# Load data loaders
train_loader, val_loader = get_loaders(
opt.data_name, vocab, opt.batch_size, opt.workers, opt)
# Construct the model
model = SCAN(word2idx, opt)
model.cuda()
model = nn.DataParallel(model)
criterion = ContrastiveLoss(margin=opt.margin)
optimizer = torch.optim.Adam(model.parameters(), lr=opt.learning_rate)
best_rsum = 0
start_epoch = 0
# optionally resume from a checkpoint
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
checkpoint = torch.load(opt.resume)
start_epoch = checkpoint['epoch'] + 1
best_rsum = checkpoint['best_rsum']
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
# Eiters is used to show logs as the continuation of another
# training
# model.Eiters = checkpoint['Eiters']
print("=> loaded checkpoint '{}' (epoch {}, best_rsum {})"
.format(opt.resume, start_epoch, best_rsum))
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
# Train the Model
for epoch in range(start_epoch, opt.num_epochs):
print(opt.logger_name)
print(opt.model_name)
if not os.path.exists(opt.model_name):
os.makedirs(opt.model_name)
message = "epoch: %d, model name: %s\n" % (epoch, opt.model_name)
log_file = os.path.join(opt.logger_name, "performance.log")
logging_func(log_file, message)
adjust_learning_rate(opt, optimizer, epoch)
# train for one epoch
train(opt, train_loader, model, criterion, optimizer, epoch, val_loader)
# evaluate on validation set
rsum = validate(opt, val_loader, model)
# remember best R@ sum and save checkpoint
is_best = rsum > best_rsum
best_rsum = max(rsum, best_rsum)
if not os.path.exists(opt.model_name):
os.mkdir(opt.model_name)
save_checkpoint({
'epoch': epoch,
'model': model.state_dict(),
'best_rsum': best_rsum,
'opt': opt,
'optimizer': optimizer.state_dict(),
}, is_best, filename='checkpoint_{}.pth.tar'.format(epoch), prefix=opt.model_name + '/')
class DataPrefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
self.images, self.captions, self.length, self.index = next(self.loader)
except StopIteration:
self.images, self.captions, self.length, self.index = None, None, None, None
return
with torch.cuda.stream(self.stream):
self.images = self.images.cuda()
self.captions = self.captions.cuda()
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
self.preload()
return self.images, self.captions, self.length, self.index
def train(opt, train_loader, model, criterion, optimizer, epoch, val_loader):
# average meters to record the training statistics
batch_time = AverageMeter()
data_time = AverageMeter()
train_logger = LogCollector()
run_time = 0
start_time = time.time()
prefetcher = DataPrefetcher(train_loader)
images, captions, lengths, index = prefetcher.next()
i = 0
while images is not None:
# switch to train mode
model.train()
# measure data loading time
model.logger = train_logger
optimizer.zero_grad()
# Update the model
if torch.cuda.device_count() > 1:
images = images.repeat(torch.cuda.device_count(), 1, 1)
score = model(images, captions, lengths, index)
loss = criterion(score)
loss.backward()
if opt.grad_clip > 0:
clip_grad_norm_(model.parameters(), opt.grad_clip)
optimizer.step()
if (i + 1) % opt.log_step == 0:
run_time += time.time() - start_time
log = "epoch: %d; batch: %d/%d; loss: %.6f; time: %.4f" % (epoch,
i, len(train_loader), loss.data.item(),
run_time)
print(log, flush=True)
start_time = time.time()
run_time = 0
# validate at every val_step
images, captions, lengths, index = prefetcher.next()
i += 1
def validate(opt, val_loader, model):
# compute the encoding for all the validation images and captions
img_embs, img_means, cap_embs, cap_lens, cap_means = encode_data(
model, val_loader, opt.log_step, logging.info)
print(img_embs.shape, cap_embs.shape)
img_embs = numpy.array([img_embs[i] for i in range(0, len(img_embs), 5)])
start = time.time()
sims = shard_xattn(model, img_embs, img_means, cap_embs, cap_lens, cap_means, opt, shard_size=128)
end = time.time()
print("calculate similarity time:", end-start)
# caption retrieval
(r1, r5, r10, medr, meanr) = i2t(img_embs, cap_embs, cap_lens, sims)
print("Image to text: %.1f, %.1f, %.1f, %.1f, %.1f" %
(r1, r5, r10, medr, meanr))
# image retrieval
(r1i, r5i, r10i, medri, meanr) = t2i(
img_embs, cap_embs, cap_lens, sims)
print("Text to image: %.1f, %.1f, %.1f, %.1f, %.1f" %
(r1i, r5i, r10i, medri, meanr))
# sum of recalls to be used for early stopping
currscore = r1 + r5 + r10 + r1i + r5i + r10i
return currscore
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar', prefix=''):
tries = 15
error = None
# deal with unstable I/O. Usually not necessary.
while tries:
try:
torch.save(state, prefix + filename)
if is_best:
message = "--------save best model at epoch %d---------\n" % (state["epoch"] - 1)
print(message, flush=True)
log_file = os.path.join(prefix, "performance.log")
logging_func(log_file, message)
shutil.copyfile(prefix + filename, prefix + 'model_best.pth.tar')
except IOError as e:
error = e
tries -= 1
else:
break
print('model save {} failed, remaining {} trials'.format(filename, tries))
if not tries:
raise error
def adjust_learning_rate(opt, optimizer, epoch):
"""Sets the learning rate to the initial LR
decayed by 10 every 30 epochs"""
lr = opt.learning_rate * (0.1 ** (epoch // opt.lr_update))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the sp | main()
| ecified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
| identifier_body |
android_bootldr_qcom.go | // Code generated by kaitai-struct-compiler from a .ksy source file. DO NOT EDIT.
import (
"github.com/kaitai-io/kaitai_struct_go_runtime/kaitai"
"bytes"
"io"
)
/**
* A bootloader for Android used on various devices powered by Qualcomm
* Snapdragon chips:
*
* <https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors>
*
* Although not all of the Snapdragon based Android devices use this particular
* bootloader format, it is known that devices with the following chips have used
* it (example devices are given for each chip):
*
* * APQ8064 ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_S4_Pro))
* - Nexus 4 "mako": [sample][sample-mako] ([other samples][others-mako]),
* [releasetools.py](https://android.googlesource.com/device/lge/mako/+/33f0114/releasetools.py#98)
*
* * MSM8974AA ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_800,_801_and_805_(2013/14)))
* - Nexus 5 "hammerhead": [sample][sample-hammerhead] ([other samples][others-hammerhead]),
* [releasetools.py](https://android.googlesource.com/device/lge/hammerhead/+/7618a7d/releasetools.py#116)
*
* * MSM8992 ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_808_and_810_(2015)))
* - Nexus 5X "bullhead": [sample][sample-bullhead] ([other samples][others-bullhead]),
* [releasetools.py](https://android.googlesource.com/device/lge/bullhead/+/2994b6b/releasetools.py#126)
*
* * APQ8064-1AA ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_600_(2013)))
* - Nexus 7 \[2013] (Mobile) "deb" <a href="#doc-note-data-after-img-bodies">(\**)</a>: [sample][sample-deb] ([other samples][others-deb]),
* [releasetools.py](https://android.googlesource.com/device/asus/deb/+/14c1638/releasetools.py#105)
* - Nexus 7 \[2013] (Wi-Fi) "flo" <a href="#doc-note-data-after-img-bodies">(\**)</a>: [sample][sample-flo] ([other samples][others-flo]),
* [releasetools.py](https://android.googlesource.com/device/asus/flo/+/9d9fee9/releasetools.py#130)
*
* * MSM8996 Pro-AB ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_820_and_821_(2016)))
* - Pixel "sailfish" <a href="#doc-note-bootloader-size">(\*)</a>:
* [sample][sample-sailfish] ([other samples][others-sailfish])
* - Pixel XL "marlin" <a href="#doc-note-bootloader-size">(\*)</a>:
* [sample][sample-marlin] ([other samples][others-marlin])
*
* * MSM8998 ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_835_(2017)))
* - Pixel 2 "walleye" <a href="#doc-note-bootloader-size">(\*)</a>: [sample][sample-walleye] ([other samples][others-walleye])
* - Pixel 2 XL "taimen": [sample][sample-taimen] ([other samples][others-taimen])
*
* <small id="doc-note-bootloader-size">(\*)
* `bootloader_size` is equal to the size of the whole file (not just `img_bodies` as usual).
* </small>
*
* <small id="doc-note-data-after-img-bodies">(\**)
* There are some data after the end of `img_bodies`.
* </small>
*
* ---
*
* On the other hand, devices with these chips **do not** use this format:
*
* * <del>APQ8084</del> ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_800,_801_and_805_(2013/14)))
* - Nexus 6 "shamu": [sample][foreign-sample-shamu] ([other samples][foreign-others-shamu]),
* [releasetools.py](https://android.googlesource.com/device/moto/shamu/+/df9354d/releasetools.py#12) -
* uses "Motoboot packed image format" instead
*
* * <del>MSM8994</del> ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_808_and_810_(2015)))
* - Nexus 6P "angler": [sample][foreign-sample-angler] ([other samples][foreign-others-angler]),
* [releasetools.py](https://android.googlesource.com/device/huawei/angler/+/cf92cd8/releasetools.py#29) -
* uses "Huawei Bootloader packed image format" instead
*
* [sample-mako]: https://androidfilehost.com/?fid=96039337900113996 "bootloader-mako-makoz30f.img"
* [others-mako]: https://androidfilehost.com/?w=search&s=bootloader-mako&type=files
*
* [sample-hammerhead]: https://androidfilehost.com/?fid=385035244224410247 "bootloader-hammerhead-hhz20h.img"
* [others-hammerhead]: https://androidfilehost.com/?w=search&s=bootloader-hammerhead&type=files
*
* [sample-bullhead]: https://androidfilehost.com/?fid=11410963190603870177 "bootloader-bullhead-bhz32c.img"
* [others-bullhead]: https://androidfilehost.com/?w=search&s=bootloader-bullhead&type=files
*
* [sample-deb]: https://androidfilehost.com/?fid=23501681358552487 "bootloader-deb-flo-04.02.img"
* [others-deb]: https://androidfilehost.com/?w=search&s=bootloader-deb-flo&type=files
*
* [sample-flo]: https://androidfilehost.com/?fid=23991606952593542 "bootloader-flo-flo-04.05.img"
* [others-flo]: https://androidfilehost.com/?w=search&s=bootloader-flo-flo&type=files
*
* [sample-sailfish]: https://androidfilehost.com/?fid=6006931924117907154 "bootloader-sailfish-8996-012001-1904111134.img"
* [others-sailfish]: https://androidfilehost.com/?w=search&s=bootloader-sailfish&type=files
*
* [sample-marlin]: https://androidfilehost.com/?fid=6006931924117907131 "bootloader-marlin-8996-012001-1904111134.img"
* [others-marlin]: https://androidfilehost.com/?w=search&s=bootloader-marlin&type=files
*
* [sample-walleye]: https://androidfilehost.com/?fid=14943124697586348540 "bootloader-walleye-mw8998-003.0085.00.img"
* [others-walleye]: https://androidfilehost.com/?w=search&s=bootloader-walleye&type=files
*
* [sample-taimen]: https://androidfilehost.com/?fid=14943124697586348536 "bootloader-taimen-tmz30m.img"
* [others-taimen]: https://androidfilehost.com/?w=search&s=bootloader-taimen&type=files
*
* [foreign-sample-shamu]: https://androidfilehost.com/?fid=745849072291678307 "bootloader-shamu-moto-apq8084-72.04.img"
* [foreign-others-shamu]: https://androidfilehost.com/?w=search&s=bootloader-shamu&type=files
*
* [foreign-sample-angler]: https://androidfilehost.com/?fid=11410963190603870158 "bootloader-angler-angler-03.84.img"
* [foreign-others-angler]: https://androidfilehost.com/?w=search&s=bootloader-angler&type=files
*
* ---
*
* The `bootloader-*.img` samples referenced above originally come from factory
* images packed in ZIP archives that can be found on the page [Factory Images
* for Nexus and Pixel Devices](https://developers.google.com/android/images) on
* the Google Developers site. Note that the codenames on that page may be
* different than the ones that are written in the list above. That's because the
* Google page indicates **ROM codenames** in headings (e.g. "occam" for Nexus 4)
* but the above list uses **model codenames** (e.g. "mako" for Nexus 4) because
* that is how the original `bootloader-*.img` files are identified. For most
* devices, however, these code names are the same.
* @see <a href="https://android.googlesource.com/device/lge/hammerhead/+/7618a7d/releasetools.py">Source</a>
*/
type AndroidBootldrQcom struct {
Magic []byte
NumImages uint32
OfsImgBodies uint32
BootloaderSize uint32
ImgHeaders []*AndroidBootldrQcom_ImgHeader
_io *kaitai.Stream
_root *AndroidBootldrQcom
_parent interface{}
_f_imgBodies bool
imgBodies []*AndroidBootldrQcom_ImgBody
}
func NewAndroidBootldrQcom() *AndroidBootldrQcom |
func (this *AndroidBootldrQcom) Read(io *kaitai.Stream, parent interface{}, root *AndroidBootldrQcom) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp1, err := this._io.ReadBytes(int(8))
if err != nil {
return err
}
tmp1 = tmp1
this.Magic = tmp1
if !(bytes.Equal(this.Magic, []uint8{66, 79, 79, 84, 76, 68, 82, 33})) {
return kaitai.NewValidationNotEqualError([]uint8{66, 79, 79, 84, 76, 68, 82, 33}, this.Magic, this._io, "/seq/0")
}
tmp2, err := this._io.ReadU4le()
if err != nil {
return err
}
this.NumImages = uint32(tmp2)
tmp3, err := this._io.ReadU4le()
if err != nil {
return err
}
this.OfsImgBodies = uint32(tmp3)
tmp4, err := this._io.ReadU4le()
if err != nil {
return err
}
this.BootloaderSize = uint32(tmp4)
for i := 0; i < int(this.NumImages); i++ {
_ = i
tmp5 := NewAndroidBootldrQcom_ImgHeader()
err = tmp5.Read(this._io, this, this._root)
if err != nil {
return err
}
this.ImgHeaders = append(this.ImgHeaders, tmp5)
}
return err
}
func (this *AndroidBootldrQcom) ImgBodies() (v []*AndroidBootldrQcom_ImgBody, err error) {
if (this._f_imgBodies) {
return this.imgBodies, nil
}
_pos, err := this._io.Pos()
if err != nil {
return nil, err
}
_, err = this._io.Seek(int64(this.OfsImgBodies), io.SeekStart)
if err != nil {
return nil, err
}
for i := 0; i < int(this.NumImages); i++ {
_ = i
tmp6 := NewAndroidBootldrQcom_ImgBody(i)
err = tmp6.Read(this._io, this, this._root)
if err != nil {
return nil, err
}
this.imgBodies = append(this.imgBodies, tmp6)
}
_, err = this._io.Seek(_pos, io.SeekStart)
if err != nil {
return nil, err
}
this._f_imgBodies = true
this._f_imgBodies = true
return this.imgBodies, nil
}
/**
* According to all available `releasetools.py` versions from AOSP (links are
* in the top-level `/doc`), this should determine only the size of
* `img_bodies` - there is [an assertion](
* https://android.googlesource.com/device/lge/hammerhead/+/7618a7d/releasetools.py#167)
* for it.
*
* However, files for certain Pixel devices (see `/doc`) apparently declare
* the entire file size here (i.e. including also fields from `magic` to
* `img_headers`). So if you interpreted `bootloader_size` as the size of
* `img_bodies` substream in these files, you would exceed the end of file.
* Although you could check that it fits in the file before attempting to
* create a substream of that size, you wouldn't know if it's meant to
* specify the size of just `img_bodies` or the size of the entire bootloader
* payload (whereas there may be additional data after the end of payload)
* until parsing `img_bodies` (or at least summing sizes from `img_headers`,
* but that's stupid).
*
* So this field isn't reliable enough to be used as the size of any
* substream. If you want to check if it has a reasonable value, do so in
* your application code.
*/
type AndroidBootldrQcom_ImgHeader struct {
Name string
LenBody uint32
_io *kaitai.Stream
_root *AndroidBootldrQcom
_parent *AndroidBootldrQcom
}
func NewAndroidBootldrQcom_ImgHeader() *AndroidBootldrQcom_ImgHeader {
return &AndroidBootldrQcom_ImgHeader{
}
}
func (this *AndroidBootldrQcom_ImgHeader) Read(io *kaitai.Stream, parent *AndroidBootldrQcom, root *AndroidBootldrQcom) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp7, err := this._io.ReadBytes(int(64))
if err != nil {
return err
}
tmp7 = kaitai.BytesTerminate(tmp7, 0, false)
this.Name = string(tmp7)
tmp8, err := this._io.ReadU4le()
if err != nil {
return err
}
this.LenBody = uint32(tmp8)
return err
}
type AndroidBootldrQcom_ImgBody struct {
Body []byte
Idx int32
_io *kaitai.Stream
_root *AndroidBootldrQcom
_parent *AndroidBootldrQcom
_f_imgHeader bool
imgHeader *AndroidBootldrQcom_ImgHeader
}
func NewAndroidBootldrQcom_ImgBody(idx int32) *AndroidBootldrQcom_ImgBody {
return &AndroidBootldrQcom_ImgBody{
Idx: idx,
}
}
func (this *AndroidBootldrQcom_ImgBody) Read(io *kaitai.Stream, parent *AndroidBootldrQcom, root *AndroidBootldrQcom) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp9, err := this.ImgHeader()
if err != nil {
return err
}
tmp10, err := this._io.ReadBytes(int(tmp9.LenBody))
if err != nil {
return err
}
tmp10 = tmp10
this.Body = tmp10
return err
}
func (this *AndroidBootldrQcom_ImgBody) ImgHeader() (v *AndroidBootldrQcom_ImgHeader, err error) {
if (this._f_imgHeader) {
return this.imgHeader, nil
}
this.imgHeader = this._root.ImgHeaders[this.Idx]
this._f_imgHeader = true
return this.imgHeader, nil
}
| {
return &AndroidBootldrQcom{
}
} | identifier_body |
android_bootldr_qcom.go | // Code generated by kaitai-struct-compiler from a .ksy source file. DO NOT EDIT.
import (
"github.com/kaitai-io/kaitai_struct_go_runtime/kaitai"
"bytes"
"io"
)
/**
* A bootloader for Android used on various devices powered by Qualcomm
* Snapdragon chips:
*
* <https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors>
*
* Although not all of the Snapdragon based Android devices use this particular
* bootloader format, it is known that devices with the following chips have used
* it (example devices are given for each chip):
*
* * APQ8064 ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_S4_Pro))
* - Nexus 4 "mako": [sample][sample-mako] ([other samples][others-mako]),
* [releasetools.py](https://android.googlesource.com/device/lge/mako/+/33f0114/releasetools.py#98)
*
* * MSM8974AA ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_800,_801_and_805_(2013/14)))
* - Nexus 5 "hammerhead": [sample][sample-hammerhead] ([other samples][others-hammerhead]),
* [releasetools.py](https://android.googlesource.com/device/lge/hammerhead/+/7618a7d/releasetools.py#116)
*
* * MSM8992 ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_808_and_810_(2015)))
* - Nexus 5X "bullhead": [sample][sample-bullhead] ([other samples][others-bullhead]),
* [releasetools.py](https://android.googlesource.com/device/lge/bullhead/+/2994b6b/releasetools.py#126)
*
* * APQ8064-1AA ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_600_(2013)))
* - Nexus 7 \[2013] (Mobile) "deb" <a href="#doc-note-data-after-img-bodies">(\**)</a>: [sample][sample-deb] ([other samples][others-deb]),
* [releasetools.py](https://android.googlesource.com/device/asus/deb/+/14c1638/releasetools.py#105)
* - Nexus 7 \[2013] (Wi-Fi) "flo" <a href="#doc-note-data-after-img-bodies">(\**)</a>: [sample][sample-flo] ([other samples][others-flo]),
* [releasetools.py](https://android.googlesource.com/device/asus/flo/+/9d9fee9/releasetools.py#130)
*
* * MSM8996 Pro-AB ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_820_and_821_(2016)))
* - Pixel "sailfish" <a href="#doc-note-bootloader-size">(\*)</a>:
* [sample][sample-sailfish] ([other samples][others-sailfish])
* - Pixel XL "marlin" <a href="#doc-note-bootloader-size">(\*)</a>:
* [sample][sample-marlin] ([other samples][others-marlin])
*
* * MSM8998 ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_835_(2017)))
* - Pixel 2 "walleye" <a href="#doc-note-bootloader-size">(\*)</a>: [sample][sample-walleye] ([other samples][others-walleye])
* - Pixel 2 XL "taimen": [sample][sample-taimen] ([other samples][others-taimen])
*
* <small id="doc-note-bootloader-size">(\*)
* `bootloader_size` is equal to the size of the whole file (not just `img_bodies` as usual).
* </small>
*
* <small id="doc-note-data-after-img-bodies">(\**)
* There are some data after the end of `img_bodies`.
* </small>
*
* ---
*
* On the other hand, devices with these chips **do not** use this format:
*
* * <del>APQ8084</del> ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_800,_801_and_805_(2013/14)))
* - Nexus 6 "shamu": [sample][foreign-sample-shamu] ([other samples][foreign-others-shamu]),
* [releasetools.py](https://android.googlesource.com/device/moto/shamu/+/df9354d/releasetools.py#12) -
* uses "Motoboot packed image format" instead
*
* * <del>MSM8994</del> ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_808_and_810_(2015)))
* - Nexus 6P "angler": [sample][foreign-sample-angler] ([other samples][foreign-others-angler]),
* [releasetools.py](https://android.googlesource.com/device/huawei/angler/+/cf92cd8/releasetools.py#29) -
* uses "Huawei Bootloader packed image format" instead
*
* [sample-mako]: https://androidfilehost.com/?fid=96039337900113996 "bootloader-mako-makoz30f.img"
* [others-mako]: https://androidfilehost.com/?w=search&s=bootloader-mako&type=files
*
* [sample-hammerhead]: https://androidfilehost.com/?fid=385035244224410247 "bootloader-hammerhead-hhz20h.img"
* [others-hammerhead]: https://androidfilehost.com/?w=search&s=bootloader-hammerhead&type=files
*
* [sample-bullhead]: https://androidfilehost.com/?fid=11410963190603870177 "bootloader-bullhead-bhz32c.img"
* [others-bullhead]: https://androidfilehost.com/?w=search&s=bootloader-bullhead&type=files
*
* [sample-deb]: https://androidfilehost.com/?fid=23501681358552487 "bootloader-deb-flo-04.02.img"
* [others-deb]: https://androidfilehost.com/?w=search&s=bootloader-deb-flo&type=files
*
* [sample-flo]: https://androidfilehost.com/?fid=23991606952593542 "bootloader-flo-flo-04.05.img"
* [others-flo]: https://androidfilehost.com/?w=search&s=bootloader-flo-flo&type=files
*
* [sample-sailfish]: https://androidfilehost.com/?fid=6006931924117907154 "bootloader-sailfish-8996-012001-1904111134.img"
* [others-sailfish]: https://androidfilehost.com/?w=search&s=bootloader-sailfish&type=files
*
* [sample-marlin]: https://androidfilehost.com/?fid=6006931924117907131 "bootloader-marlin-8996-012001-1904111134.img"
* [others-marlin]: https://androidfilehost.com/?w=search&s=bootloader-marlin&type=files
*
* [sample-walleye]: https://androidfilehost.com/?fid=14943124697586348540 "bootloader-walleye-mw8998-003.0085.00.img"
* [others-walleye]: https://androidfilehost.com/?w=search&s=bootloader-walleye&type=files
*
* [sample-taimen]: https://androidfilehost.com/?fid=14943124697586348536 "bootloader-taimen-tmz30m.img"
* [others-taimen]: https://androidfilehost.com/?w=search&s=bootloader-taimen&type=files
*
* [foreign-sample-shamu]: https://androidfilehost.com/?fid=745849072291678307 "bootloader-shamu-moto-apq8084-72.04.img"
* [foreign-others-shamu]: https://androidfilehost.com/?w=search&s=bootloader-shamu&type=files
*
* [foreign-sample-angler]: https://androidfilehost.com/?fid=11410963190603870158 "bootloader-angler-angler-03.84.img"
* [foreign-others-angler]: https://androidfilehost.com/?w=search&s=bootloader-angler&type=files
*
* ---
* | * The `bootloader-*.img` samples referenced above originally come from factory
* images packed in ZIP archives that can be found on the page [Factory Images
* for Nexus and Pixel Devices](https://developers.google.com/android/images) on
* the Google Developers site. Note that the codenames on that page may be
* different than the ones that are written in the list above. That's because the
* Google page indicates **ROM codenames** in headings (e.g. "occam" for Nexus 4)
* but the above list uses **model codenames** (e.g. "mako" for Nexus 4) because
* that is how the original `bootloader-*.img` files are identified. For most
* devices, however, these code names are the same.
* @see <a href="https://android.googlesource.com/device/lge/hammerhead/+/7618a7d/releasetools.py">Source</a>
*/
type AndroidBootldrQcom struct {
Magic []byte
NumImages uint32
OfsImgBodies uint32
BootloaderSize uint32
ImgHeaders []*AndroidBootldrQcom_ImgHeader
_io *kaitai.Stream
_root *AndroidBootldrQcom
_parent interface{}
_f_imgBodies bool
imgBodies []*AndroidBootldrQcom_ImgBody
}
func NewAndroidBootldrQcom() *AndroidBootldrQcom {
return &AndroidBootldrQcom{
}
}
func (this *AndroidBootldrQcom) Read(io *kaitai.Stream, parent interface{}, root *AndroidBootldrQcom) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp1, err := this._io.ReadBytes(int(8))
if err != nil {
return err
}
tmp1 = tmp1
this.Magic = tmp1
if !(bytes.Equal(this.Magic, []uint8{66, 79, 79, 84, 76, 68, 82, 33})) {
return kaitai.NewValidationNotEqualError([]uint8{66, 79, 79, 84, 76, 68, 82, 33}, this.Magic, this._io, "/seq/0")
}
tmp2, err := this._io.ReadU4le()
if err != nil {
return err
}
this.NumImages = uint32(tmp2)
tmp3, err := this._io.ReadU4le()
if err != nil {
return err
}
this.OfsImgBodies = uint32(tmp3)
tmp4, err := this._io.ReadU4le()
if err != nil {
return err
}
this.BootloaderSize = uint32(tmp4)
for i := 0; i < int(this.NumImages); i++ {
_ = i
tmp5 := NewAndroidBootldrQcom_ImgHeader()
err = tmp5.Read(this._io, this, this._root)
if err != nil {
return err
}
this.ImgHeaders = append(this.ImgHeaders, tmp5)
}
return err
}
func (this *AndroidBootldrQcom) ImgBodies() (v []*AndroidBootldrQcom_ImgBody, err error) {
if (this._f_imgBodies) {
return this.imgBodies, nil
}
_pos, err := this._io.Pos()
if err != nil {
return nil, err
}
_, err = this._io.Seek(int64(this.OfsImgBodies), io.SeekStart)
if err != nil {
return nil, err
}
for i := 0; i < int(this.NumImages); i++ {
_ = i
tmp6 := NewAndroidBootldrQcom_ImgBody(i)
err = tmp6.Read(this._io, this, this._root)
if err != nil {
return nil, err
}
this.imgBodies = append(this.imgBodies, tmp6)
}
_, err = this._io.Seek(_pos, io.SeekStart)
if err != nil {
return nil, err
}
this._f_imgBodies = true
this._f_imgBodies = true
return this.imgBodies, nil
}
/**
* According to all available `releasetools.py` versions from AOSP (links are
* in the top-level `/doc`), this should determine only the size of
* `img_bodies` - there is [an assertion](
* https://android.googlesource.com/device/lge/hammerhead/+/7618a7d/releasetools.py#167)
* for it.
*
* However, files for certain Pixel devices (see `/doc`) apparently declare
* the entire file size here (i.e. including also fields from `magic` to
* `img_headers`). So if you interpreted `bootloader_size` as the size of
* `img_bodies` substream in these files, you would exceed the end of file.
* Although you could check that it fits in the file before attempting to
* create a substream of that size, you wouldn't know if it's meant to
* specify the size of just `img_bodies` or the size of the entire bootloader
* payload (whereas there may be additional data after the end of payload)
* until parsing `img_bodies` (or at least summing sizes from `img_headers`,
* but that's stupid).
*
* So this field isn't reliable enough to be used as the size of any
* substream. If you want to check if it has a reasonable value, do so in
* your application code.
*/
type AndroidBootldrQcom_ImgHeader struct {
Name string
LenBody uint32
_io *kaitai.Stream
_root *AndroidBootldrQcom
_parent *AndroidBootldrQcom
}
func NewAndroidBootldrQcom_ImgHeader() *AndroidBootldrQcom_ImgHeader {
return &AndroidBootldrQcom_ImgHeader{
}
}
func (this *AndroidBootldrQcom_ImgHeader) Read(io *kaitai.Stream, parent *AndroidBootldrQcom, root *AndroidBootldrQcom) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp7, err := this._io.ReadBytes(int(64))
if err != nil {
return err
}
tmp7 = kaitai.BytesTerminate(tmp7, 0, false)
this.Name = string(tmp7)
tmp8, err := this._io.ReadU4le()
if err != nil {
return err
}
this.LenBody = uint32(tmp8)
return err
}
type AndroidBootldrQcom_ImgBody struct {
Body []byte
Idx int32
_io *kaitai.Stream
_root *AndroidBootldrQcom
_parent *AndroidBootldrQcom
_f_imgHeader bool
imgHeader *AndroidBootldrQcom_ImgHeader
}
func NewAndroidBootldrQcom_ImgBody(idx int32) *AndroidBootldrQcom_ImgBody {
return &AndroidBootldrQcom_ImgBody{
Idx: idx,
}
}
func (this *AndroidBootldrQcom_ImgBody) Read(io *kaitai.Stream, parent *AndroidBootldrQcom, root *AndroidBootldrQcom) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp9, err := this.ImgHeader()
if err != nil {
return err
}
tmp10, err := this._io.ReadBytes(int(tmp9.LenBody))
if err != nil {
return err
}
tmp10 = tmp10
this.Body = tmp10
return err
}
func (this *AndroidBootldrQcom_ImgBody) ImgHeader() (v *AndroidBootldrQcom_ImgHeader, err error) {
if (this._f_imgHeader) {
return this.imgHeader, nil
}
this.imgHeader = this._root.ImgHeaders[this.Idx]
this._f_imgHeader = true
return this.imgHeader, nil
} | random_line_split |
|
android_bootldr_qcom.go | // Code generated by kaitai-struct-compiler from a .ksy source file. DO NOT EDIT.
import (
"github.com/kaitai-io/kaitai_struct_go_runtime/kaitai"
"bytes"
"io"
)
/**
* A bootloader for Android used on various devices powered by Qualcomm
* Snapdragon chips:
*
* <https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors>
*
* Although not all of the Snapdragon based Android devices use this particular
* bootloader format, it is known that devices with the following chips have used
* it (example devices are given for each chip):
*
* * APQ8064 ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_S4_Pro))
* - Nexus 4 "mako": [sample][sample-mako] ([other samples][others-mako]),
* [releasetools.py](https://android.googlesource.com/device/lge/mako/+/33f0114/releasetools.py#98)
*
* * MSM8974AA ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_800,_801_and_805_(2013/14)))
* - Nexus 5 "hammerhead": [sample][sample-hammerhead] ([other samples][others-hammerhead]),
* [releasetools.py](https://android.googlesource.com/device/lge/hammerhead/+/7618a7d/releasetools.py#116)
*
* * MSM8992 ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_808_and_810_(2015)))
* - Nexus 5X "bullhead": [sample][sample-bullhead] ([other samples][others-bullhead]),
* [releasetools.py](https://android.googlesource.com/device/lge/bullhead/+/2994b6b/releasetools.py#126)
*
* * APQ8064-1AA ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_600_(2013)))
* - Nexus 7 \[2013] (Mobile) "deb" <a href="#doc-note-data-after-img-bodies">(\**)</a>: [sample][sample-deb] ([other samples][others-deb]),
* [releasetools.py](https://android.googlesource.com/device/asus/deb/+/14c1638/releasetools.py#105)
* - Nexus 7 \[2013] (Wi-Fi) "flo" <a href="#doc-note-data-after-img-bodies">(\**)</a>: [sample][sample-flo] ([other samples][others-flo]),
* [releasetools.py](https://android.googlesource.com/device/asus/flo/+/9d9fee9/releasetools.py#130)
*
* * MSM8996 Pro-AB ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_820_and_821_(2016)))
* - Pixel "sailfish" <a href="#doc-note-bootloader-size">(\*)</a>:
* [sample][sample-sailfish] ([other samples][others-sailfish])
* - Pixel XL "marlin" <a href="#doc-note-bootloader-size">(\*)</a>:
* [sample][sample-marlin] ([other samples][others-marlin])
*
* * MSM8998 ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_835_(2017)))
* - Pixel 2 "walleye" <a href="#doc-note-bootloader-size">(\*)</a>: [sample][sample-walleye] ([other samples][others-walleye])
* - Pixel 2 XL "taimen": [sample][sample-taimen] ([other samples][others-taimen])
*
* <small id="doc-note-bootloader-size">(\*)
* `bootloader_size` is equal to the size of the whole file (not just `img_bodies` as usual).
* </small>
*
* <small id="doc-note-data-after-img-bodies">(\**)
* There are some data after the end of `img_bodies`.
* </small>
*
* ---
*
* On the other hand, devices with these chips **do not** use this format:
*
* * <del>APQ8084</del> ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_800,_801_and_805_(2013/14)))
* - Nexus 6 "shamu": [sample][foreign-sample-shamu] ([other samples][foreign-others-shamu]),
* [releasetools.py](https://android.googlesource.com/device/moto/shamu/+/df9354d/releasetools.py#12) -
* uses "Motoboot packed image format" instead
*
* * <del>MSM8994</del> ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_808_and_810_(2015)))
* - Nexus 6P "angler": [sample][foreign-sample-angler] ([other samples][foreign-others-angler]),
* [releasetools.py](https://android.googlesource.com/device/huawei/angler/+/cf92cd8/releasetools.py#29) -
* uses "Huawei Bootloader packed image format" instead
*
* [sample-mako]: https://androidfilehost.com/?fid=96039337900113996 "bootloader-mako-makoz30f.img"
* [others-mako]: https://androidfilehost.com/?w=search&s=bootloader-mako&type=files
*
* [sample-hammerhead]: https://androidfilehost.com/?fid=385035244224410247 "bootloader-hammerhead-hhz20h.img"
* [others-hammerhead]: https://androidfilehost.com/?w=search&s=bootloader-hammerhead&type=files
*
* [sample-bullhead]: https://androidfilehost.com/?fid=11410963190603870177 "bootloader-bullhead-bhz32c.img"
* [others-bullhead]: https://androidfilehost.com/?w=search&s=bootloader-bullhead&type=files
*
* [sample-deb]: https://androidfilehost.com/?fid=23501681358552487 "bootloader-deb-flo-04.02.img"
* [others-deb]: https://androidfilehost.com/?w=search&s=bootloader-deb-flo&type=files
*
* [sample-flo]: https://androidfilehost.com/?fid=23991606952593542 "bootloader-flo-flo-04.05.img"
* [others-flo]: https://androidfilehost.com/?w=search&s=bootloader-flo-flo&type=files
*
* [sample-sailfish]: https://androidfilehost.com/?fid=6006931924117907154 "bootloader-sailfish-8996-012001-1904111134.img"
* [others-sailfish]: https://androidfilehost.com/?w=search&s=bootloader-sailfish&type=files
*
* [sample-marlin]: https://androidfilehost.com/?fid=6006931924117907131 "bootloader-marlin-8996-012001-1904111134.img"
* [others-marlin]: https://androidfilehost.com/?w=search&s=bootloader-marlin&type=files
*
* [sample-walleye]: https://androidfilehost.com/?fid=14943124697586348540 "bootloader-walleye-mw8998-003.0085.00.img"
* [others-walleye]: https://androidfilehost.com/?w=search&s=bootloader-walleye&type=files
*
* [sample-taimen]: https://androidfilehost.com/?fid=14943124697586348536 "bootloader-taimen-tmz30m.img"
* [others-taimen]: https://androidfilehost.com/?w=search&s=bootloader-taimen&type=files
*
* [foreign-sample-shamu]: https://androidfilehost.com/?fid=745849072291678307 "bootloader-shamu-moto-apq8084-72.04.img"
* [foreign-others-shamu]: https://androidfilehost.com/?w=search&s=bootloader-shamu&type=files
*
* [foreign-sample-angler]: https://androidfilehost.com/?fid=11410963190603870158 "bootloader-angler-angler-03.84.img"
* [foreign-others-angler]: https://androidfilehost.com/?w=search&s=bootloader-angler&type=files
*
* ---
*
* The `bootloader-*.img` samples referenced above originally come from factory
* images packed in ZIP archives that can be found on the page [Factory Images
* for Nexus and Pixel Devices](https://developers.google.com/android/images) on
* the Google Developers site. Note that the codenames on that page may be
* different than the ones that are written in the list above. That's because the
* Google page indicates **ROM codenames** in headings (e.g. "occam" for Nexus 4)
* but the above list uses **model codenames** (e.g. "mako" for Nexus 4) because
* that is how the original `bootloader-*.img` files are identified. For most
* devices, however, these code names are the same.
* @see <a href="https://android.googlesource.com/device/lge/hammerhead/+/7618a7d/releasetools.py">Source</a>
*/
type AndroidBootldrQcom struct {
Magic []byte
NumImages uint32
OfsImgBodies uint32
BootloaderSize uint32
ImgHeaders []*AndroidBootldrQcom_ImgHeader
_io *kaitai.Stream
_root *AndroidBootldrQcom
_parent interface{}
_f_imgBodies bool
imgBodies []*AndroidBootldrQcom_ImgBody
}
func NewAndroidBootldrQcom() *AndroidBootldrQcom {
return &AndroidBootldrQcom{
}
}
func (this *AndroidBootldrQcom) | (io *kaitai.Stream, parent interface{}, root *AndroidBootldrQcom) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp1, err := this._io.ReadBytes(int(8))
if err != nil {
return err
}
tmp1 = tmp1
this.Magic = tmp1
if !(bytes.Equal(this.Magic, []uint8{66, 79, 79, 84, 76, 68, 82, 33})) {
return kaitai.NewValidationNotEqualError([]uint8{66, 79, 79, 84, 76, 68, 82, 33}, this.Magic, this._io, "/seq/0")
}
tmp2, err := this._io.ReadU4le()
if err != nil {
return err
}
this.NumImages = uint32(tmp2)
tmp3, err := this._io.ReadU4le()
if err != nil {
return err
}
this.OfsImgBodies = uint32(tmp3)
tmp4, err := this._io.ReadU4le()
if err != nil {
return err
}
this.BootloaderSize = uint32(tmp4)
for i := 0; i < int(this.NumImages); i++ {
_ = i
tmp5 := NewAndroidBootldrQcom_ImgHeader()
err = tmp5.Read(this._io, this, this._root)
if err != nil {
return err
}
this.ImgHeaders = append(this.ImgHeaders, tmp5)
}
return err
}
func (this *AndroidBootldrQcom) ImgBodies() (v []*AndroidBootldrQcom_ImgBody, err error) {
if (this._f_imgBodies) {
return this.imgBodies, nil
}
_pos, err := this._io.Pos()
if err != nil {
return nil, err
}
_, err = this._io.Seek(int64(this.OfsImgBodies), io.SeekStart)
if err != nil {
return nil, err
}
for i := 0; i < int(this.NumImages); i++ {
_ = i
tmp6 := NewAndroidBootldrQcom_ImgBody(i)
err = tmp6.Read(this._io, this, this._root)
if err != nil {
return nil, err
}
this.imgBodies = append(this.imgBodies, tmp6)
}
_, err = this._io.Seek(_pos, io.SeekStart)
if err != nil {
return nil, err
}
this._f_imgBodies = true
this._f_imgBodies = true
return this.imgBodies, nil
}
/**
* According to all available `releasetools.py` versions from AOSP (links are
* in the top-level `/doc`), this should determine only the size of
* `img_bodies` - there is [an assertion](
* https://android.googlesource.com/device/lge/hammerhead/+/7618a7d/releasetools.py#167)
* for it.
*
* However, files for certain Pixel devices (see `/doc`) apparently declare
* the entire file size here (i.e. including also fields from `magic` to
* `img_headers`). So if you interpreted `bootloader_size` as the size of
* `img_bodies` substream in these files, you would exceed the end of file.
* Although you could check that it fits in the file before attempting to
* create a substream of that size, you wouldn't know if it's meant to
* specify the size of just `img_bodies` or the size of the entire bootloader
* payload (whereas there may be additional data after the end of payload)
* until parsing `img_bodies` (or at least summing sizes from `img_headers`,
* but that's stupid).
*
* So this field isn't reliable enough to be used as the size of any
* substream. If you want to check if it has a reasonable value, do so in
* your application code.
*/
type AndroidBootldrQcom_ImgHeader struct {
Name string
LenBody uint32
_io *kaitai.Stream
_root *AndroidBootldrQcom
_parent *AndroidBootldrQcom
}
func NewAndroidBootldrQcom_ImgHeader() *AndroidBootldrQcom_ImgHeader {
return &AndroidBootldrQcom_ImgHeader{
}
}
func (this *AndroidBootldrQcom_ImgHeader) Read(io *kaitai.Stream, parent *AndroidBootldrQcom, root *AndroidBootldrQcom) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp7, err := this._io.ReadBytes(int(64))
if err != nil {
return err
}
tmp7 = kaitai.BytesTerminate(tmp7, 0, false)
this.Name = string(tmp7)
tmp8, err := this._io.ReadU4le()
if err != nil {
return err
}
this.LenBody = uint32(tmp8)
return err
}
type AndroidBootldrQcom_ImgBody struct {
Body []byte
Idx int32
_io *kaitai.Stream
_root *AndroidBootldrQcom
_parent *AndroidBootldrQcom
_f_imgHeader bool
imgHeader *AndroidBootldrQcom_ImgHeader
}
func NewAndroidBootldrQcom_ImgBody(idx int32) *AndroidBootldrQcom_ImgBody {
return &AndroidBootldrQcom_ImgBody{
Idx: idx,
}
}
func (this *AndroidBootldrQcom_ImgBody) Read(io *kaitai.Stream, parent *AndroidBootldrQcom, root *AndroidBootldrQcom) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp9, err := this.ImgHeader()
if err != nil {
return err
}
tmp10, err := this._io.ReadBytes(int(tmp9.LenBody))
if err != nil {
return err
}
tmp10 = tmp10
this.Body = tmp10
return err
}
func (this *AndroidBootldrQcom_ImgBody) ImgHeader() (v *AndroidBootldrQcom_ImgHeader, err error) {
if (this._f_imgHeader) {
return this.imgHeader, nil
}
this.imgHeader = this._root.ImgHeaders[this.Idx]
this._f_imgHeader = true
return this.imgHeader, nil
}
| Read | identifier_name |
android_bootldr_qcom.go | // Code generated by kaitai-struct-compiler from a .ksy source file. DO NOT EDIT.
import (
"github.com/kaitai-io/kaitai_struct_go_runtime/kaitai"
"bytes"
"io"
)
/**
* A bootloader for Android used on various devices powered by Qualcomm
* Snapdragon chips:
*
* <https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors>
*
* Although not all of the Snapdragon based Android devices use this particular
* bootloader format, it is known that devices with the following chips have used
* it (example devices are given for each chip):
*
* * APQ8064 ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_S4_Pro))
* - Nexus 4 "mako": [sample][sample-mako] ([other samples][others-mako]),
* [releasetools.py](https://android.googlesource.com/device/lge/mako/+/33f0114/releasetools.py#98)
*
* * MSM8974AA ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_800,_801_and_805_(2013/14)))
* - Nexus 5 "hammerhead": [sample][sample-hammerhead] ([other samples][others-hammerhead]),
* [releasetools.py](https://android.googlesource.com/device/lge/hammerhead/+/7618a7d/releasetools.py#116)
*
* * MSM8992 ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_808_and_810_(2015)))
* - Nexus 5X "bullhead": [sample][sample-bullhead] ([other samples][others-bullhead]),
* [releasetools.py](https://android.googlesource.com/device/lge/bullhead/+/2994b6b/releasetools.py#126)
*
* * APQ8064-1AA ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_600_(2013)))
* - Nexus 7 \[2013] (Mobile) "deb" <a href="#doc-note-data-after-img-bodies">(\**)</a>: [sample][sample-deb] ([other samples][others-deb]),
* [releasetools.py](https://android.googlesource.com/device/asus/deb/+/14c1638/releasetools.py#105)
* - Nexus 7 \[2013] (Wi-Fi) "flo" <a href="#doc-note-data-after-img-bodies">(\**)</a>: [sample][sample-flo] ([other samples][others-flo]),
* [releasetools.py](https://android.googlesource.com/device/asus/flo/+/9d9fee9/releasetools.py#130)
*
* * MSM8996 Pro-AB ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_820_and_821_(2016)))
* - Pixel "sailfish" <a href="#doc-note-bootloader-size">(\*)</a>:
* [sample][sample-sailfish] ([other samples][others-sailfish])
* - Pixel XL "marlin" <a href="#doc-note-bootloader-size">(\*)</a>:
* [sample][sample-marlin] ([other samples][others-marlin])
*
* * MSM8998 ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_835_(2017)))
* - Pixel 2 "walleye" <a href="#doc-note-bootloader-size">(\*)</a>: [sample][sample-walleye] ([other samples][others-walleye])
* - Pixel 2 XL "taimen": [sample][sample-taimen] ([other samples][others-taimen])
*
* <small id="doc-note-bootloader-size">(\*)
* `bootloader_size` is equal to the size of the whole file (not just `img_bodies` as usual).
* </small>
*
* <small id="doc-note-data-after-img-bodies">(\**)
* There are some data after the end of `img_bodies`.
* </small>
*
* ---
*
* On the other hand, devices with these chips **do not** use this format:
*
* * <del>APQ8084</del> ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_800,_801_and_805_(2013/14)))
* - Nexus 6 "shamu": [sample][foreign-sample-shamu] ([other samples][foreign-others-shamu]),
* [releasetools.py](https://android.googlesource.com/device/moto/shamu/+/df9354d/releasetools.py#12) -
* uses "Motoboot packed image format" instead
*
* * <del>MSM8994</del> ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_808_and_810_(2015)))
* - Nexus 6P "angler": [sample][foreign-sample-angler] ([other samples][foreign-others-angler]),
* [releasetools.py](https://android.googlesource.com/device/huawei/angler/+/cf92cd8/releasetools.py#29) -
* uses "Huawei Bootloader packed image format" instead
*
* [sample-mako]: https://androidfilehost.com/?fid=96039337900113996 "bootloader-mako-makoz30f.img"
* [others-mako]: https://androidfilehost.com/?w=search&s=bootloader-mako&type=files
*
* [sample-hammerhead]: https://androidfilehost.com/?fid=385035244224410247 "bootloader-hammerhead-hhz20h.img"
* [others-hammerhead]: https://androidfilehost.com/?w=search&s=bootloader-hammerhead&type=files
*
* [sample-bullhead]: https://androidfilehost.com/?fid=11410963190603870177 "bootloader-bullhead-bhz32c.img"
* [others-bullhead]: https://androidfilehost.com/?w=search&s=bootloader-bullhead&type=files
*
* [sample-deb]: https://androidfilehost.com/?fid=23501681358552487 "bootloader-deb-flo-04.02.img"
* [others-deb]: https://androidfilehost.com/?w=search&s=bootloader-deb-flo&type=files
*
* [sample-flo]: https://androidfilehost.com/?fid=23991606952593542 "bootloader-flo-flo-04.05.img"
* [others-flo]: https://androidfilehost.com/?w=search&s=bootloader-flo-flo&type=files
*
* [sample-sailfish]: https://androidfilehost.com/?fid=6006931924117907154 "bootloader-sailfish-8996-012001-1904111134.img"
* [others-sailfish]: https://androidfilehost.com/?w=search&s=bootloader-sailfish&type=files
*
* [sample-marlin]: https://androidfilehost.com/?fid=6006931924117907131 "bootloader-marlin-8996-012001-1904111134.img"
* [others-marlin]: https://androidfilehost.com/?w=search&s=bootloader-marlin&type=files
*
* [sample-walleye]: https://androidfilehost.com/?fid=14943124697586348540 "bootloader-walleye-mw8998-003.0085.00.img"
* [others-walleye]: https://androidfilehost.com/?w=search&s=bootloader-walleye&type=files
*
* [sample-taimen]: https://androidfilehost.com/?fid=14943124697586348536 "bootloader-taimen-tmz30m.img"
* [others-taimen]: https://androidfilehost.com/?w=search&s=bootloader-taimen&type=files
*
* [foreign-sample-shamu]: https://androidfilehost.com/?fid=745849072291678307 "bootloader-shamu-moto-apq8084-72.04.img"
* [foreign-others-shamu]: https://androidfilehost.com/?w=search&s=bootloader-shamu&type=files
*
* [foreign-sample-angler]: https://androidfilehost.com/?fid=11410963190603870158 "bootloader-angler-angler-03.84.img"
* [foreign-others-angler]: https://androidfilehost.com/?w=search&s=bootloader-angler&type=files
*
* ---
*
* The `bootloader-*.img` samples referenced above originally come from factory
* images packed in ZIP archives that can be found on the page [Factory Images
* for Nexus and Pixel Devices](https://developers.google.com/android/images) on
* the Google Developers site. Note that the codenames on that page may be
* different than the ones that are written in the list above. That's because the
* Google page indicates **ROM codenames** in headings (e.g. "occam" for Nexus 4)
* but the above list uses **model codenames** (e.g. "mako" for Nexus 4) because
* that is how the original `bootloader-*.img` files are identified. For most
* devices, however, these code names are the same.
* @see <a href="https://android.googlesource.com/device/lge/hammerhead/+/7618a7d/releasetools.py">Source</a>
*/
type AndroidBootldrQcom struct {
Magic []byte
NumImages uint32
OfsImgBodies uint32
BootloaderSize uint32
ImgHeaders []*AndroidBootldrQcom_ImgHeader
_io *kaitai.Stream
_root *AndroidBootldrQcom
_parent interface{}
_f_imgBodies bool
imgBodies []*AndroidBootldrQcom_ImgBody
}
func NewAndroidBootldrQcom() *AndroidBootldrQcom {
return &AndroidBootldrQcom{
}
}
func (this *AndroidBootldrQcom) Read(io *kaitai.Stream, parent interface{}, root *AndroidBootldrQcom) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp1, err := this._io.ReadBytes(int(8))
if err != nil {
return err
}
tmp1 = tmp1
this.Magic = tmp1
if !(bytes.Equal(this.Magic, []uint8{66, 79, 79, 84, 76, 68, 82, 33})) {
return kaitai.NewValidationNotEqualError([]uint8{66, 79, 79, 84, 76, 68, 82, 33}, this.Magic, this._io, "/seq/0")
}
tmp2, err := this._io.ReadU4le()
if err != nil {
return err
}
this.NumImages = uint32(tmp2)
tmp3, err := this._io.ReadU4le()
if err != nil {
return err
}
this.OfsImgBodies = uint32(tmp3)
tmp4, err := this._io.ReadU4le()
if err != nil |
this.BootloaderSize = uint32(tmp4)
for i := 0; i < int(this.NumImages); i++ {
_ = i
tmp5 := NewAndroidBootldrQcom_ImgHeader()
err = tmp5.Read(this._io, this, this._root)
if err != nil {
return err
}
this.ImgHeaders = append(this.ImgHeaders, tmp5)
}
return err
}
func (this *AndroidBootldrQcom) ImgBodies() (v []*AndroidBootldrQcom_ImgBody, err error) {
if (this._f_imgBodies) {
return this.imgBodies, nil
}
_pos, err := this._io.Pos()
if err != nil {
return nil, err
}
_, err = this._io.Seek(int64(this.OfsImgBodies), io.SeekStart)
if err != nil {
return nil, err
}
for i := 0; i < int(this.NumImages); i++ {
_ = i
tmp6 := NewAndroidBootldrQcom_ImgBody(i)
err = tmp6.Read(this._io, this, this._root)
if err != nil {
return nil, err
}
this.imgBodies = append(this.imgBodies, tmp6)
}
_, err = this._io.Seek(_pos, io.SeekStart)
if err != nil {
return nil, err
}
this._f_imgBodies = true
this._f_imgBodies = true
return this.imgBodies, nil
}
/**
* According to all available `releasetools.py` versions from AOSP (links are
* in the top-level `/doc`), this should determine only the size of
* `img_bodies` - there is [an assertion](
* https://android.googlesource.com/device/lge/hammerhead/+/7618a7d/releasetools.py#167)
* for it.
*
* However, files for certain Pixel devices (see `/doc`) apparently declare
* the entire file size here (i.e. including also fields from `magic` to
* `img_headers`). So if you interpreted `bootloader_size` as the size of
* `img_bodies` substream in these files, you would exceed the end of file.
* Although you could check that it fits in the file before attempting to
* create a substream of that size, you wouldn't know if it's meant to
* specify the size of just `img_bodies` or the size of the entire bootloader
* payload (whereas there may be additional data after the end of payload)
* until parsing `img_bodies` (or at least summing sizes from `img_headers`,
* but that's stupid).
*
* So this field isn't reliable enough to be used as the size of any
* substream. If you want to check if it has a reasonable value, do so in
* your application code.
*/
type AndroidBootldrQcom_ImgHeader struct {
Name string
LenBody uint32
_io *kaitai.Stream
_root *AndroidBootldrQcom
_parent *AndroidBootldrQcom
}
func NewAndroidBootldrQcom_ImgHeader() *AndroidBootldrQcom_ImgHeader {
return &AndroidBootldrQcom_ImgHeader{
}
}
func (this *AndroidBootldrQcom_ImgHeader) Read(io *kaitai.Stream, parent *AndroidBootldrQcom, root *AndroidBootldrQcom) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp7, err := this._io.ReadBytes(int(64))
if err != nil {
return err
}
tmp7 = kaitai.BytesTerminate(tmp7, 0, false)
this.Name = string(tmp7)
tmp8, err := this._io.ReadU4le()
if err != nil {
return err
}
this.LenBody = uint32(tmp8)
return err
}
type AndroidBootldrQcom_ImgBody struct {
Body []byte
Idx int32
_io *kaitai.Stream
_root *AndroidBootldrQcom
_parent *AndroidBootldrQcom
_f_imgHeader bool
imgHeader *AndroidBootldrQcom_ImgHeader
}
func NewAndroidBootldrQcom_ImgBody(idx int32) *AndroidBootldrQcom_ImgBody {
return &AndroidBootldrQcom_ImgBody{
Idx: idx,
}
}
func (this *AndroidBootldrQcom_ImgBody) Read(io *kaitai.Stream, parent *AndroidBootldrQcom, root *AndroidBootldrQcom) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp9, err := this.ImgHeader()
if err != nil {
return err
}
tmp10, err := this._io.ReadBytes(int(tmp9.LenBody))
if err != nil {
return err
}
tmp10 = tmp10
this.Body = tmp10
return err
}
func (this *AndroidBootldrQcom_ImgBody) ImgHeader() (v *AndroidBootldrQcom_ImgHeader, err error) {
if (this._f_imgHeader) {
return this.imgHeader, nil
}
this.imgHeader = this._root.ImgHeaders[this.Idx]
this._f_imgHeader = true
return this.imgHeader, nil
}
| {
return err
} | conditional_block |
username.go | package people
import (
"fmt"
"math/rand"
)
// Select a random word and append some numbers to it to make something username-looking.
func Username() string {
word := Words[rand.Int31n(int32(len(Words)))]
digits := rand.Int31n(1000)
return fmt.Sprintf("%s%d", word, digits)
}
var Words = []string{
"acquirable",
"bestsellers",
"farther",
"prizer",
"shasta",
"evaporate",
"auspices",
"garments",
"partnership",
"blocs",
"forestalling",
"razors",
"extensibility",
"unavoidably",
"logician",
"embroidered",
"crippling",
"supranational",
"milton",
"healthily",
"spiraling",
"coolies",
"bartend",
"precondition",
"reflectors",
"judged",
"rinser",
"amplify",
"casseroles",
"physics",
"raider",
"whippet",
"expulsion",
"enzyme",
"prohibit",
"gazers",
"unchangeable",
"matching",
"mouthe",
"millihenry",
"plowshare",
"quicken",
"blackmailing",
"chatham",
"jobbing",
"augustly",
"constitutionality",
"cathodes",
"inspirations",
"seniority",
"staging",
"figuratively",
"beckon",
"rankle",
"buzzwords",
"mccullough",
"justifying",
"antiquities",
"ardency",
"tribunals",
"laughs",
"shakes",
"feedback",
"balustrade",
"mattress",
"seduces",
"attainments",
"counterattack",
"sweeter",
"deforestation",
"digests",
"sacrificed",
"scripts",
"philharmonic",
"legerdemain",
"advancements",
"disburse",
"bottles",
"scatterbrain",
"conceptions",
"planer",
"fishpond",
"tidying",
"illustration",
"dishonoring",
"impostors",
"aspect",
"summations",
"steering",
"cheesy",
"hamlets",
"cryptanalyst",
"ensued",
"upholsterer",
"detaining",
"penned",
"robbers",
"contingency",
"effectively",
"soybean",
"clockings",
"pappas",
"jellies",
"formulae",
"routines",
"savoyard",
"redefining",
"insistently",
"macroscopic",
"taster",
"phosphates",
"midsts",
"invertebrates",
"vices",
"vacancy",
"predominated",
"timeshare",
"convincing",
"paralleling",
"conceived",
"guggenheim",
"paintings",
"dispells",
"incapacitating",
"nostrand",
"pliant",
"sleuth",
"grammar",
"wallows",
"dismisses",
"wilhelm",
"exiling",
"checkers",
"proceedings",
"hoarsely",
"stretches",
"purport",
"limousine",
"inheritresses",
"company",
"thruway",
"hopkinsian",
"downcast",
"dangers",
"anatomically",
"allure",
"stampers",
"executive",
"postmaster",
"depressing",
"dragons",
"countys",
"harriet",
"attire",
"runway",
"bubbled",
"waterman",
"gerhardt",
"honorableness",
"flurry",
"refract",
"bacteria",
"antiques",
"provide",
"mysteriously",
"interrogation",
"discontinuous",
"victrola",
"replications",
"passion",
"thawed",
"alligator",
"documentaries",
"nakedness",
"veining",
"durability",
"corrosion",
"laterally",
"winnipeg",
"federally",
"divest",
"gasped",
"unselfishly",
"disclosing",
"nurturing",
"tramway",
"palmed",
"disruptions",
"footman",
"senators",
"cleave",
"effected",
"ceramic",
"leathery",
"nicely",
"frustrater",
"warning",
"lexicons",
"exactions",
"prover",
"recreates",
"puddling",
"diabolic",
"spatula",
"herons",
"blobs",
"fibrosity",
"cabinetmake",
"phobic",
"jingling",
"double",
"proving",
"taipei",
"skims",
"prophesied",
"hastily",
"parasitics",
"landings",
"taxicabs",
"subway",
"recount",
"noisemake",
"induce",
"mountaineer",
"achieved",
"celebrities",
"fluffy",
"bimini",
"briefcases",
"devote",
"stylishly",
"cleansing",
"disclaimed",
"phonemes",
"impertinent",
"connecting",
"lentil",
"revelations",
"phoned",
"lading",
"lengthens",
"nobles",
"despairing",
"hatchets",
"livably",
"lodger",
"tokens",
"ensurers",
"interconnects",
"passionate",
"peppergrass",
"bookkeep",
"humerus",
"thanklessness",
"shamed",
"choreography",
"swimmers",
"authors",
"football",
"auditions",
"greener",
"deflater",
"tariff",
"banjos",
"packages",
"gambit",
"heated",
"interfere",
"collectors",
"sideboards",
"shoreline",
"rutherford",
"ethnology",
"persecuting",
"operatives",
"demark",
"curtate",
"inheritress",
"economizer",
"pleural",
"broiling",
"minting",
"ricochet",
"lookup",
"biases",
"auctioneers",
"formula",
"morphism",
"outstripped",
"falsifying",
"fealty",
"homesteads",
"dilate",
"councilmen",
"cornea",
"intercept",
"adjoins",
"medals",
"autonomic",
"monologue", | "cruisers",
"psychoanalyst",
"registrations",
"agnostics",
"ambivalently",
"punishable",
"philosophically",
"storages",
"wistful",
"loveland",
"preferential",
"armchairs",
"washington",
"accretions",
"interchangeable",
"ambitions",
"hostesss",
"heading",
"crucifies",
"venturesome",
"mullion",
"fueling",
"bedposts",
"soapstone",
"garland",
"heaved",
"instrumentalists",
"patristic",
"tableau",
"plagiarist",
"disambiguate",
"autopilot",
"anointing",
"retypes",
"pirates",
"obfuscatory",
"octennial",
"indeterminately",
"defended",
"childbirth",
"liberation",
"kilograms",
"elaborates",
"snyaptic",
"granitic",
"carthage",
"deteriorate",
"matilda",
"antislavery",
"batter",
"cringes",
"aerosolize",
"floppily",
"caribbean",
"woodbury",
"wrapper",
"capistrano",
"meats",
"overdrafts",
"gnats",
"sympathetic",
"pritchard",
"subscripted",
"chinquapin",
"skater",
"counterfeiter",
"leathern",
"tabula",
"bowled",
"reagan",
"appropriators",
"curing",
"pacific",
"scandalous",
"anesthetized",
"reinforcements",
"conner",
"complains",
"conjugal",
"enumerator",
"inconclusive",
"pipelines",
"synthesizer",
"intimate",
"saturater",
"splintered",
"taxonomy",
"roaring",
"transduction",
"collegial",
"breakdown",
"adducing",
"debenture",
"jeopardy",
"intoxicant",
"rescue",
"phrased",
"cartwheel",
"remedies",
"penguin",
"shined",
"codification",
"impugn",
"doorbell",
"ludlow",
"visibility",
"agglutinins",
"apposition",
"pathogenic",
"bestial",
"present",
"encyclopedic",
"qualifiers",
"realists",
"baptism",
"plasticity",
"transitioned",
"atalanta",
"crucially",
"trackers",
"identities",
"cursors",
"backspace",
"airships",
"multilevel",
"concretely",
"gazette",
"intelligibility",
"cottager",
"denigrated",
"unimpeded",
"matisse",
"thrashed",
"impious",
"ceaseless",
"callisto",
"lollipop",
"defenestrated",
"reredos",
"chemic",
"foulest",
"solemn",
"staley",
"ballfield",
"alameda",
"panaceas",
"nabisco",
"strainer",
"hackmatack",
"hemispheric",
"cogitated",
"customizing",
"pushbutton",
"dressmaker",
"amending",
"penance",
"seasonal",
"chromium",
"offsaddle",
"atrophy",
"souffle",
"platforms",
"wrangle",
"clearness",
"anecdotes",
"hurting",
"tooled",
"angora",
"narrate",
"statistician",
"philosoph",
"assertions",
"indefinitely",
"parsimonious",
"bribing",
"tolerant",
"lilies",
"sulfate",
"righteously",
"stereotypical",
"degeneracy",
"similarity",
"pastimes",
"informed",
"polypropylene",
"backlog",
"typography",
"survivors",
"reconfiguring",
"gadding",
"caryatid",
"scuttling",
"semaphores",
"debugged",
"pacification",
"carbone",
"firearms",
"neurophysiology",
"blazing",
"ballrooms",
"thunderbolts",
"forefather",
"rachel",
"collision",
"reticulately",
"resignations",
"interactions",
"conspirator",
"basilar",
"climaxes",
"draining",
"cabinets",
"checksumming",
"suicide",
"coffees",
"mescaline",
"tininess",
"tinder",
"binomial",
"berates",
"cashed",
"bellwethers",
"carbonation",
"kalamazoo",
"thyroglobulin",
"kidnappers",
"numbed",
"shiftiness",
"presuming",
"achievements",
"amplifiers",
"lurches",
"cataclysmic",
"subvert",
"paragon",
"hoppers",
"lapels",
"recast",
"pitilessly",
"coffins",
"outstretched",
"perceiving",
"thoughtfully",
"taking",
"stems",
"favors",
"streets",
"quieting",
"monoid",
"delectable",
"encoding",
"jejune",
"sincere",
"goober",
"testes",
"lexicon",
"richter",
"covenants",
"pitiers",
"quintessence",
"yellower",
"equitably",
"dickens",
"contentment",
"bessemer",
"metabole",
"timetables",
"solemnity",
"report",
"indiana",
"fortunate",
"sweepstake",
"lapelled",
"arduousness",
"blunts",
"anorthosite",
"acclimatized",
"potters",
"babysitter",
"graveyard",
"forthcoming",
"glimmer",
"knaves",
"purposed",
"entice",
"amorality",
"poetics",
"frightened",
"dilution",
"erastus",
"anabaptists",
"carport",
"whatre",
"harpsichord",
"marvin",
"triers",
"dumbbells",
"hopefulness",
"sorting",
"continentally",
"asynchronism",
"illustratively",
"afforestation",
"constitutional",
"arcsin",
"darlings",
"removes",
"incompletion",
"bitterroot",
"blissfully",
"splash",
"manfred",
"rashly",
"bustling",
"hathaway",
"lacerating",
"underplayed",
"roundhead",
"purposefully",
"baldly",
"steadily",
"syndromes",
"subversion",
"lunchtime",
"congressman",
"mouses",
"valences",
"perhaps",
"sawmills",
"pinehurst",
"comparison",
"expansive",
"kidnappers",
"occasioned",
"transferable",
"transducer",
"synchrotron",
"rutile",
"effete",
"savaged",
"dearths",
"reading",
"horizons",
"scabbards",
"presences",
"trinity",
"isinglass",
"abusive",
"critics",
"recalculates",
"loitering",
"atrium",
"terrorizing",
"unblocked",
"trickery",
"accomplices",
"bleedings",
"wholesomeness",
"opaquely",
"epitaph",
"escorted",
"automatically",
"interdependence",
"ludicrously",
"necessary",
"chastising",
"beneficences",
"mbabane",
"floury",
"iniquity",
"craftsmen",
"feasted",
"inviting",
"gasoline",
"balsam",
"finality",
"howling",
"largesse",
"docile",
"conveniences",
"sensors",
"dinners",
"alterable",
"overhangs",
"satisfaction",
"semicolons",
"eclipses",
"languish",
"symbol",
"praecox",
"muskox",
"inheritrices",
"invade",
"measurable",
"converses",
"enlivened",
"tantrums",
"stopped",
"wavefront",
"constance",
"humanness",
"postscripts",
"troublesomely",
"unfavorable",
"tarbell",
"antagonistically",
"unsure",
"bridgehead",
"imprints",
"neurons",
"volatilities",
"renowned",
"midwestern",
"mistakable",
"recover",
"stoichiometry",
"hating",
"feuds",
"superscripts",
"irvine",
"vertebrates",
"fifteenth",
"parthenon",
"southbound",
"nineveh",
"twitched",
"flooded",
"sectors",
"tractable",
"subtrees",
"suffering",
"doggedness",
"shoveled",
"poisons",
"cinders",
"payoff",
"enjoyably",
"applicative",
"aspersion",
"analytic",
"tactician",
"forgiveness",
"shibboleth",
"equalizing",
"deluxe",
"frankest",
"epistemology",
"donated",
"armpits",
"corpsmen",
"pyhrric",
"backers",
"clearings",
"patience",
"bijectively",
"screwbean",
"careers",
"delaney",
"applications",
"autocrats",
"homeopath",
"abysmal",
"ribbon",
"bimodal",
"relaxations",
"fascism",
"chatterer",
"inaugurated",
"uncaught",
"hardhat",
"voiceband",
"wetland",
"additions",
"yugoslav",
"atrophying",
"plebeian",
"duplication",
"birthplaces",
"farmers",
"suspends",
"retrograde",
"transgress",
"harassing",
"yonder",
"formulators",
"alhambra",
"attesting",
"fairport",
"osseous",
"oratorical",
"songbook",
"wingspan",
"loggers",
"julies",
"highlighted",
"granter",
"louver",
"seersucker",
"parley",
"beheading",
"soothes",
"crises",
"rounded",
"lesser",
"streamline",
"measure",
"rusticate",
"ordained",
"gabrielle",
"haplessness",
"raccoon",
"explosions",
"freakish",
"soother",
"pravda",
"icebox",
"homing",
"leeway",
"handsomeness",
"commodity",
"liberated",
"playwrights",
"rebroadcast",
"instincts",
"bumblers",
"variously",
"martyrs",
"wednesdays",
"thresholds",
"azimuths",
"aspire",
"touchdown",
"drudge",
"teared",
"exposing",
"downside",
"lofts",
"lebensraum",
"nether",
"nozzle",
"lawsuits",
"gustafson",
"predates",
"meritorious",
"searchings",
"bundling",
"scenarios",
"schmidt",
"fischbein",
"bewitched",
"grassiest",
"disbursed",
"indestructible",
"burning",
"swearer",
"spirits",
"cushman",
"timbre",
"seconding",
"giggle",
"ordered",
"bibliophile",
"affianced",
"camera",
"bernet",
"facilitated",
"highways",
"grandly",
"inkings",
"shelves",
"staggering",
"bloomington",
"magnesite",
"colored",
"sunned",
"audiometer",
"affectation",
"maiden",
"subfields",
"axiomatically",
"respire",
"palmate",
"domino",
"depressed",
"industrials",
"expellable",
"justine",
"nuclei",
"balkanize",
"refreshments",
"santiago",
"organizers",
"prospections",
"antagonizing",
"resort",
"incoherent",
"nonsense",
"highwayman",
"members",
"sightings",
"unique",
"relentlessness",
"chorused",
"recipes",
"strongest",
"airlocks",
"brethren",
"legate",
"capitalist",
"digestive",
"bookcases",
"controller",
"deficit",
"masochism",
"unlocks",
"recreated",
"bulbs",
"avertive",
"penetrable",
"mynheer",
"attracts",
"axiomatizes",
"typical",
"resuming",
"remarkable",
"cookery",
"excused",
"meals",
"bodybuilders",
"canoes",
"harmonize",
"uncertainly",
"blowup",
"former",
"squaring",
"alphabetical",
"disciplines",
"genera",
"melody",
"cossack",
"varyings",
"abridge",
"harvested",
"tenney",
"zodiacal",
"labrador",
"honoraries",
"heuristics",
"billeted",
"outlays",
"scrimmage",
"folksy",
"resented",
"audits",
"pilgrims",
"compromisers",
"niggardly",
"patchwork",
"briefest",
"comatose",
"faceplate",
"variations",
"assertively",
"tutelage",
"precedences",
"bleating",
"suspensor",
"divisors",
"youngest",
"ruthlessly",
"breadth",
"abundance",
"penalized",
"promotes",
"thermometers",
"retired",
"circumlocutions",
"recites",
"nettlesome",
"woodside",
"differentials",
"handicaps",
"lawmen",
"budging",
"balkanized",
"instantiated",
"contradict",
"insinuations",
"assigned",
"budgeter",
"hailing",
"strolls",
"ultrasound",
"liberties",
"pantomime",
"triplex",
"inroad",
"befouling",
"foregoing",
"gullys",
} | random_line_split |
|
username.go | package people
import (
"fmt"
"math/rand"
)
// Select a random word and append some numbers to it to make something username-looking.
func Username() string |
var Words = []string{
"acquirable",
"bestsellers",
"farther",
"prizer",
"shasta",
"evaporate",
"auspices",
"garments",
"partnership",
"blocs",
"forestalling",
"razors",
"extensibility",
"unavoidably",
"logician",
"embroidered",
"crippling",
"supranational",
"milton",
"healthily",
"spiraling",
"coolies",
"bartend",
"precondition",
"reflectors",
"judged",
"rinser",
"amplify",
"casseroles",
"physics",
"raider",
"whippet",
"expulsion",
"enzyme",
"prohibit",
"gazers",
"unchangeable",
"matching",
"mouthe",
"millihenry",
"plowshare",
"quicken",
"blackmailing",
"chatham",
"jobbing",
"augustly",
"constitutionality",
"cathodes",
"inspirations",
"seniority",
"staging",
"figuratively",
"beckon",
"rankle",
"buzzwords",
"mccullough",
"justifying",
"antiquities",
"ardency",
"tribunals",
"laughs",
"shakes",
"feedback",
"balustrade",
"mattress",
"seduces",
"attainments",
"counterattack",
"sweeter",
"deforestation",
"digests",
"sacrificed",
"scripts",
"philharmonic",
"legerdemain",
"advancements",
"disburse",
"bottles",
"scatterbrain",
"conceptions",
"planer",
"fishpond",
"tidying",
"illustration",
"dishonoring",
"impostors",
"aspect",
"summations",
"steering",
"cheesy",
"hamlets",
"cryptanalyst",
"ensued",
"upholsterer",
"detaining",
"penned",
"robbers",
"contingency",
"effectively",
"soybean",
"clockings",
"pappas",
"jellies",
"formulae",
"routines",
"savoyard",
"redefining",
"insistently",
"macroscopic",
"taster",
"phosphates",
"midsts",
"invertebrates",
"vices",
"vacancy",
"predominated",
"timeshare",
"convincing",
"paralleling",
"conceived",
"guggenheim",
"paintings",
"dispells",
"incapacitating",
"nostrand",
"pliant",
"sleuth",
"grammar",
"wallows",
"dismisses",
"wilhelm",
"exiling",
"checkers",
"proceedings",
"hoarsely",
"stretches",
"purport",
"limousine",
"inheritresses",
"company",
"thruway",
"hopkinsian",
"downcast",
"dangers",
"anatomically",
"allure",
"stampers",
"executive",
"postmaster",
"depressing",
"dragons",
"countys",
"harriet",
"attire",
"runway",
"bubbled",
"waterman",
"gerhardt",
"honorableness",
"flurry",
"refract",
"bacteria",
"antiques",
"provide",
"mysteriously",
"interrogation",
"discontinuous",
"victrola",
"replications",
"passion",
"thawed",
"alligator",
"documentaries",
"nakedness",
"veining",
"durability",
"corrosion",
"laterally",
"winnipeg",
"federally",
"divest",
"gasped",
"unselfishly",
"disclosing",
"nurturing",
"tramway",
"palmed",
"disruptions",
"footman",
"senators",
"cleave",
"effected",
"ceramic",
"leathery",
"nicely",
"frustrater",
"warning",
"lexicons",
"exactions",
"prover",
"recreates",
"puddling",
"diabolic",
"spatula",
"herons",
"blobs",
"fibrosity",
"cabinetmake",
"phobic",
"jingling",
"double",
"proving",
"taipei",
"skims",
"prophesied",
"hastily",
"parasitics",
"landings",
"taxicabs",
"subway",
"recount",
"noisemake",
"induce",
"mountaineer",
"achieved",
"celebrities",
"fluffy",
"bimini",
"briefcases",
"devote",
"stylishly",
"cleansing",
"disclaimed",
"phonemes",
"impertinent",
"connecting",
"lentil",
"revelations",
"phoned",
"lading",
"lengthens",
"nobles",
"despairing",
"hatchets",
"livably",
"lodger",
"tokens",
"ensurers",
"interconnects",
"passionate",
"peppergrass",
"bookkeep",
"humerus",
"thanklessness",
"shamed",
"choreography",
"swimmers",
"authors",
"football",
"auditions",
"greener",
"deflater",
"tariff",
"banjos",
"packages",
"gambit",
"heated",
"interfere",
"collectors",
"sideboards",
"shoreline",
"rutherford",
"ethnology",
"persecuting",
"operatives",
"demark",
"curtate",
"inheritress",
"economizer",
"pleural",
"broiling",
"minting",
"ricochet",
"lookup",
"biases",
"auctioneers",
"formula",
"morphism",
"outstripped",
"falsifying",
"fealty",
"homesteads",
"dilate",
"councilmen",
"cornea",
"intercept",
"adjoins",
"medals",
"autonomic",
"monologue",
"cruisers",
"psychoanalyst",
"registrations",
"agnostics",
"ambivalently",
"punishable",
"philosophically",
"storages",
"wistful",
"loveland",
"preferential",
"armchairs",
"washington",
"accretions",
"interchangeable",
"ambitions",
"hostesss",
"heading",
"crucifies",
"venturesome",
"mullion",
"fueling",
"bedposts",
"soapstone",
"garland",
"heaved",
"instrumentalists",
"patristic",
"tableau",
"plagiarist",
"disambiguate",
"autopilot",
"anointing",
"retypes",
"pirates",
"obfuscatory",
"octennial",
"indeterminately",
"defended",
"childbirth",
"liberation",
"kilograms",
"elaborates",
"snyaptic",
"granitic",
"carthage",
"deteriorate",
"matilda",
"antislavery",
"batter",
"cringes",
"aerosolize",
"floppily",
"caribbean",
"woodbury",
"wrapper",
"capistrano",
"meats",
"overdrafts",
"gnats",
"sympathetic",
"pritchard",
"subscripted",
"chinquapin",
"skater",
"counterfeiter",
"leathern",
"tabula",
"bowled",
"reagan",
"appropriators",
"curing",
"pacific",
"scandalous",
"anesthetized",
"reinforcements",
"conner",
"complains",
"conjugal",
"enumerator",
"inconclusive",
"pipelines",
"synthesizer",
"intimate",
"saturater",
"splintered",
"taxonomy",
"roaring",
"transduction",
"collegial",
"breakdown",
"adducing",
"debenture",
"jeopardy",
"intoxicant",
"rescue",
"phrased",
"cartwheel",
"remedies",
"penguin",
"shined",
"codification",
"impugn",
"doorbell",
"ludlow",
"visibility",
"agglutinins",
"apposition",
"pathogenic",
"bestial",
"present",
"encyclopedic",
"qualifiers",
"realists",
"baptism",
"plasticity",
"transitioned",
"atalanta",
"crucially",
"trackers",
"identities",
"cursors",
"backspace",
"airships",
"multilevel",
"concretely",
"gazette",
"intelligibility",
"cottager",
"denigrated",
"unimpeded",
"matisse",
"thrashed",
"impious",
"ceaseless",
"callisto",
"lollipop",
"defenestrated",
"reredos",
"chemic",
"foulest",
"solemn",
"staley",
"ballfield",
"alameda",
"panaceas",
"nabisco",
"strainer",
"hackmatack",
"hemispheric",
"cogitated",
"customizing",
"pushbutton",
"dressmaker",
"amending",
"penance",
"seasonal",
"chromium",
"offsaddle",
"atrophy",
"souffle",
"platforms",
"wrangle",
"clearness",
"anecdotes",
"hurting",
"tooled",
"angora",
"narrate",
"statistician",
"philosoph",
"assertions",
"indefinitely",
"parsimonious",
"bribing",
"tolerant",
"lilies",
"sulfate",
"righteously",
"stereotypical",
"degeneracy",
"similarity",
"pastimes",
"informed",
"polypropylene",
"backlog",
"typography",
"survivors",
"reconfiguring",
"gadding",
"caryatid",
"scuttling",
"semaphores",
"debugged",
"pacification",
"carbone",
"firearms",
"neurophysiology",
"blazing",
"ballrooms",
"thunderbolts",
"forefather",
"rachel",
"collision",
"reticulately",
"resignations",
"interactions",
"conspirator",
"basilar",
"climaxes",
"draining",
"cabinets",
"checksumming",
"suicide",
"coffees",
"mescaline",
"tininess",
"tinder",
"binomial",
"berates",
"cashed",
"bellwethers",
"carbonation",
"kalamazoo",
"thyroglobulin",
"kidnappers",
"numbed",
"shiftiness",
"presuming",
"achievements",
"amplifiers",
"lurches",
"cataclysmic",
"subvert",
"paragon",
"hoppers",
"lapels",
"recast",
"pitilessly",
"coffins",
"outstretched",
"perceiving",
"thoughtfully",
"taking",
"stems",
"favors",
"streets",
"quieting",
"monoid",
"delectable",
"encoding",
"jejune",
"sincere",
"goober",
"testes",
"lexicon",
"richter",
"covenants",
"pitiers",
"quintessence",
"yellower",
"equitably",
"dickens",
"contentment",
"bessemer",
"metabole",
"timetables",
"solemnity",
"report",
"indiana",
"fortunate",
"sweepstake",
"lapelled",
"arduousness",
"blunts",
"anorthosite",
"acclimatized",
"potters",
"babysitter",
"graveyard",
"forthcoming",
"glimmer",
"knaves",
"purposed",
"entice",
"amorality",
"poetics",
"frightened",
"dilution",
"erastus",
"anabaptists",
"carport",
"whatre",
"harpsichord",
"marvin",
"triers",
"dumbbells",
"hopefulness",
"sorting",
"continentally",
"asynchronism",
"illustratively",
"afforestation",
"constitutional",
"arcsin",
"darlings",
"removes",
"incompletion",
"bitterroot",
"blissfully",
"splash",
"manfred",
"rashly",
"bustling",
"hathaway",
"lacerating",
"underplayed",
"roundhead",
"purposefully",
"baldly",
"steadily",
"syndromes",
"subversion",
"lunchtime",
"congressman",
"mouses",
"valences",
"perhaps",
"sawmills",
"pinehurst",
"comparison",
"expansive",
"kidnappers",
"occasioned",
"transferable",
"transducer",
"synchrotron",
"rutile",
"effete",
"savaged",
"dearths",
"reading",
"horizons",
"scabbards",
"presences",
"trinity",
"isinglass",
"abusive",
"critics",
"recalculates",
"loitering",
"atrium",
"terrorizing",
"unblocked",
"trickery",
"accomplices",
"bleedings",
"wholesomeness",
"opaquely",
"epitaph",
"escorted",
"automatically",
"interdependence",
"ludicrously",
"necessary",
"chastising",
"beneficences",
"mbabane",
"floury",
"iniquity",
"craftsmen",
"feasted",
"inviting",
"gasoline",
"balsam",
"finality",
"howling",
"largesse",
"docile",
"conveniences",
"sensors",
"dinners",
"alterable",
"overhangs",
"satisfaction",
"semicolons",
"eclipses",
"languish",
"symbol",
"praecox",
"muskox",
"inheritrices",
"invade",
"measurable",
"converses",
"enlivened",
"tantrums",
"stopped",
"wavefront",
"constance",
"humanness",
"postscripts",
"troublesomely",
"unfavorable",
"tarbell",
"antagonistically",
"unsure",
"bridgehead",
"imprints",
"neurons",
"volatilities",
"renowned",
"midwestern",
"mistakable",
"recover",
"stoichiometry",
"hating",
"feuds",
"superscripts",
"irvine",
"vertebrates",
"fifteenth",
"parthenon",
"southbound",
"nineveh",
"twitched",
"flooded",
"sectors",
"tractable",
"subtrees",
"suffering",
"doggedness",
"shoveled",
"poisons",
"cinders",
"payoff",
"enjoyably",
"applicative",
"aspersion",
"analytic",
"tactician",
"forgiveness",
"shibboleth",
"equalizing",
"deluxe",
"frankest",
"epistemology",
"donated",
"armpits",
"corpsmen",
"pyhrric",
"backers",
"clearings",
"patience",
"bijectively",
"screwbean",
"careers",
"delaney",
"applications",
"autocrats",
"homeopath",
"abysmal",
"ribbon",
"bimodal",
"relaxations",
"fascism",
"chatterer",
"inaugurated",
"uncaught",
"hardhat",
"voiceband",
"wetland",
"additions",
"yugoslav",
"atrophying",
"plebeian",
"duplication",
"birthplaces",
"farmers",
"suspends",
"retrograde",
"transgress",
"harassing",
"yonder",
"formulators",
"alhambra",
"attesting",
"fairport",
"osseous",
"oratorical",
"songbook",
"wingspan",
"loggers",
"julies",
"highlighted",
"granter",
"louver",
"seersucker",
"parley",
"beheading",
"soothes",
"crises",
"rounded",
"lesser",
"streamline",
"measure",
"rusticate",
"ordained",
"gabrielle",
"haplessness",
"raccoon",
"explosions",
"freakish",
"soother",
"pravda",
"icebox",
"homing",
"leeway",
"handsomeness",
"commodity",
"liberated",
"playwrights",
"rebroadcast",
"instincts",
"bumblers",
"variously",
"martyrs",
"wednesdays",
"thresholds",
"azimuths",
"aspire",
"touchdown",
"drudge",
"teared",
"exposing",
"downside",
"lofts",
"lebensraum",
"nether",
"nozzle",
"lawsuits",
"gustafson",
"predates",
"meritorious",
"searchings",
"bundling",
"scenarios",
"schmidt",
"fischbein",
"bewitched",
"grassiest",
"disbursed",
"indestructible",
"burning",
"swearer",
"spirits",
"cushman",
"timbre",
"seconding",
"giggle",
"ordered",
"bibliophile",
"affianced",
"camera",
"bernet",
"facilitated",
"highways",
"grandly",
"inkings",
"shelves",
"staggering",
"bloomington",
"magnesite",
"colored",
"sunned",
"audiometer",
"affectation",
"maiden",
"subfields",
"axiomatically",
"respire",
"palmate",
"domino",
"depressed",
"industrials",
"expellable",
"justine",
"nuclei",
"balkanize",
"refreshments",
"santiago",
"organizers",
"prospections",
"antagonizing",
"resort",
"incoherent",
"nonsense",
"highwayman",
"members",
"sightings",
"unique",
"relentlessness",
"chorused",
"recipes",
"strongest",
"airlocks",
"brethren",
"legate",
"capitalist",
"digestive",
"bookcases",
"controller",
"deficit",
"masochism",
"unlocks",
"recreated",
"bulbs",
"avertive",
"penetrable",
"mynheer",
"attracts",
"axiomatizes",
"typical",
"resuming",
"remarkable",
"cookery",
"excused",
"meals",
"bodybuilders",
"canoes",
"harmonize",
"uncertainly",
"blowup",
"former",
"squaring",
"alphabetical",
"disciplines",
"genera",
"melody",
"cossack",
"varyings",
"abridge",
"harvested",
"tenney",
"zodiacal",
"labrador",
"honoraries",
"heuristics",
"billeted",
"outlays",
"scrimmage",
"folksy",
"resented",
"audits",
"pilgrims",
"compromisers",
"niggardly",
"patchwork",
"briefest",
"comatose",
"faceplate",
"variations",
"assertively",
"tutelage",
"precedences",
"bleating",
"suspensor",
"divisors",
"youngest",
"ruthlessly",
"breadth",
"abundance",
"penalized",
"promotes",
"thermometers",
"retired",
"circumlocutions",
"recites",
"nettlesome",
"woodside",
"differentials",
"handicaps",
"lawmen",
"budging",
"balkanized",
"instantiated",
"contradict",
"insinuations",
"assigned",
"budgeter",
"hailing",
"strolls",
"ultrasound",
"liberties",
"pantomime",
"triplex",
"inroad",
"befouling",
"foregoing",
"gullys",
}
| {
word := Words[rand.Int31n(int32(len(Words)))]
digits := rand.Int31n(1000)
return fmt.Sprintf("%s%d", word, digits)
} | identifier_body |
username.go | package people
import (
"fmt"
"math/rand"
)
// Select a random word and append some numbers to it to make something username-looking.
func | () string {
word := Words[rand.Int31n(int32(len(Words)))]
digits := rand.Int31n(1000)
return fmt.Sprintf("%s%d", word, digits)
}
var Words = []string{
"acquirable",
"bestsellers",
"farther",
"prizer",
"shasta",
"evaporate",
"auspices",
"garments",
"partnership",
"blocs",
"forestalling",
"razors",
"extensibility",
"unavoidably",
"logician",
"embroidered",
"crippling",
"supranational",
"milton",
"healthily",
"spiraling",
"coolies",
"bartend",
"precondition",
"reflectors",
"judged",
"rinser",
"amplify",
"casseroles",
"physics",
"raider",
"whippet",
"expulsion",
"enzyme",
"prohibit",
"gazers",
"unchangeable",
"matching",
"mouthe",
"millihenry",
"plowshare",
"quicken",
"blackmailing",
"chatham",
"jobbing",
"augustly",
"constitutionality",
"cathodes",
"inspirations",
"seniority",
"staging",
"figuratively",
"beckon",
"rankle",
"buzzwords",
"mccullough",
"justifying",
"antiquities",
"ardency",
"tribunals",
"laughs",
"shakes",
"feedback",
"balustrade",
"mattress",
"seduces",
"attainments",
"counterattack",
"sweeter",
"deforestation",
"digests",
"sacrificed",
"scripts",
"philharmonic",
"legerdemain",
"advancements",
"disburse",
"bottles",
"scatterbrain",
"conceptions",
"planer",
"fishpond",
"tidying",
"illustration",
"dishonoring",
"impostors",
"aspect",
"summations",
"steering",
"cheesy",
"hamlets",
"cryptanalyst",
"ensued",
"upholsterer",
"detaining",
"penned",
"robbers",
"contingency",
"effectively",
"soybean",
"clockings",
"pappas",
"jellies",
"formulae",
"routines",
"savoyard",
"redefining",
"insistently",
"macroscopic",
"taster",
"phosphates",
"midsts",
"invertebrates",
"vices",
"vacancy",
"predominated",
"timeshare",
"convincing",
"paralleling",
"conceived",
"guggenheim",
"paintings",
"dispells",
"incapacitating",
"nostrand",
"pliant",
"sleuth",
"grammar",
"wallows",
"dismisses",
"wilhelm",
"exiling",
"checkers",
"proceedings",
"hoarsely",
"stretches",
"purport",
"limousine",
"inheritresses",
"company",
"thruway",
"hopkinsian",
"downcast",
"dangers",
"anatomically",
"allure",
"stampers",
"executive",
"postmaster",
"depressing",
"dragons",
"countys",
"harriet",
"attire",
"runway",
"bubbled",
"waterman",
"gerhardt",
"honorableness",
"flurry",
"refract",
"bacteria",
"antiques",
"provide",
"mysteriously",
"interrogation",
"discontinuous",
"victrola",
"replications",
"passion",
"thawed",
"alligator",
"documentaries",
"nakedness",
"veining",
"durability",
"corrosion",
"laterally",
"winnipeg",
"federally",
"divest",
"gasped",
"unselfishly",
"disclosing",
"nurturing",
"tramway",
"palmed",
"disruptions",
"footman",
"senators",
"cleave",
"effected",
"ceramic",
"leathery",
"nicely",
"frustrater",
"warning",
"lexicons",
"exactions",
"prover",
"recreates",
"puddling",
"diabolic",
"spatula",
"herons",
"blobs",
"fibrosity",
"cabinetmake",
"phobic",
"jingling",
"double",
"proving",
"taipei",
"skims",
"prophesied",
"hastily",
"parasitics",
"landings",
"taxicabs",
"subway",
"recount",
"noisemake",
"induce",
"mountaineer",
"achieved",
"celebrities",
"fluffy",
"bimini",
"briefcases",
"devote",
"stylishly",
"cleansing",
"disclaimed",
"phonemes",
"impertinent",
"connecting",
"lentil",
"revelations",
"phoned",
"lading",
"lengthens",
"nobles",
"despairing",
"hatchets",
"livably",
"lodger",
"tokens",
"ensurers",
"interconnects",
"passionate",
"peppergrass",
"bookkeep",
"humerus",
"thanklessness",
"shamed",
"choreography",
"swimmers",
"authors",
"football",
"auditions",
"greener",
"deflater",
"tariff",
"banjos",
"packages",
"gambit",
"heated",
"interfere",
"collectors",
"sideboards",
"shoreline",
"rutherford",
"ethnology",
"persecuting",
"operatives",
"demark",
"curtate",
"inheritress",
"economizer",
"pleural",
"broiling",
"minting",
"ricochet",
"lookup",
"biases",
"auctioneers",
"formula",
"morphism",
"outstripped",
"falsifying",
"fealty",
"homesteads",
"dilate",
"councilmen",
"cornea",
"intercept",
"adjoins",
"medals",
"autonomic",
"monologue",
"cruisers",
"psychoanalyst",
"registrations",
"agnostics",
"ambivalently",
"punishable",
"philosophically",
"storages",
"wistful",
"loveland",
"preferential",
"armchairs",
"washington",
"accretions",
"interchangeable",
"ambitions",
"hostesss",
"heading",
"crucifies",
"venturesome",
"mullion",
"fueling",
"bedposts",
"soapstone",
"garland",
"heaved",
"instrumentalists",
"patristic",
"tableau",
"plagiarist",
"disambiguate",
"autopilot",
"anointing",
"retypes",
"pirates",
"obfuscatory",
"octennial",
"indeterminately",
"defended",
"childbirth",
"liberation",
"kilograms",
"elaborates",
"snyaptic",
"granitic",
"carthage",
"deteriorate",
"matilda",
"antislavery",
"batter",
"cringes",
"aerosolize",
"floppily",
"caribbean",
"woodbury",
"wrapper",
"capistrano",
"meats",
"overdrafts",
"gnats",
"sympathetic",
"pritchard",
"subscripted",
"chinquapin",
"skater",
"counterfeiter",
"leathern",
"tabula",
"bowled",
"reagan",
"appropriators",
"curing",
"pacific",
"scandalous",
"anesthetized",
"reinforcements",
"conner",
"complains",
"conjugal",
"enumerator",
"inconclusive",
"pipelines",
"synthesizer",
"intimate",
"saturater",
"splintered",
"taxonomy",
"roaring",
"transduction",
"collegial",
"breakdown",
"adducing",
"debenture",
"jeopardy",
"intoxicant",
"rescue",
"phrased",
"cartwheel",
"remedies",
"penguin",
"shined",
"codification",
"impugn",
"doorbell",
"ludlow",
"visibility",
"agglutinins",
"apposition",
"pathogenic",
"bestial",
"present",
"encyclopedic",
"qualifiers",
"realists",
"baptism",
"plasticity",
"transitioned",
"atalanta",
"crucially",
"trackers",
"identities",
"cursors",
"backspace",
"airships",
"multilevel",
"concretely",
"gazette",
"intelligibility",
"cottager",
"denigrated",
"unimpeded",
"matisse",
"thrashed",
"impious",
"ceaseless",
"callisto",
"lollipop",
"defenestrated",
"reredos",
"chemic",
"foulest",
"solemn",
"staley",
"ballfield",
"alameda",
"panaceas",
"nabisco",
"strainer",
"hackmatack",
"hemispheric",
"cogitated",
"customizing",
"pushbutton",
"dressmaker",
"amending",
"penance",
"seasonal",
"chromium",
"offsaddle",
"atrophy",
"souffle",
"platforms",
"wrangle",
"clearness",
"anecdotes",
"hurting",
"tooled",
"angora",
"narrate",
"statistician",
"philosoph",
"assertions",
"indefinitely",
"parsimonious",
"bribing",
"tolerant",
"lilies",
"sulfate",
"righteously",
"stereotypical",
"degeneracy",
"similarity",
"pastimes",
"informed",
"polypropylene",
"backlog",
"typography",
"survivors",
"reconfiguring",
"gadding",
"caryatid",
"scuttling",
"semaphores",
"debugged",
"pacification",
"carbone",
"firearms",
"neurophysiology",
"blazing",
"ballrooms",
"thunderbolts",
"forefather",
"rachel",
"collision",
"reticulately",
"resignations",
"interactions",
"conspirator",
"basilar",
"climaxes",
"draining",
"cabinets",
"checksumming",
"suicide",
"coffees",
"mescaline",
"tininess",
"tinder",
"binomial",
"berates",
"cashed",
"bellwethers",
"carbonation",
"kalamazoo",
"thyroglobulin",
"kidnappers",
"numbed",
"shiftiness",
"presuming",
"achievements",
"amplifiers",
"lurches",
"cataclysmic",
"subvert",
"paragon",
"hoppers",
"lapels",
"recast",
"pitilessly",
"coffins",
"outstretched",
"perceiving",
"thoughtfully",
"taking",
"stems",
"favors",
"streets",
"quieting",
"monoid",
"delectable",
"encoding",
"jejune",
"sincere",
"goober",
"testes",
"lexicon",
"richter",
"covenants",
"pitiers",
"quintessence",
"yellower",
"equitably",
"dickens",
"contentment",
"bessemer",
"metabole",
"timetables",
"solemnity",
"report",
"indiana",
"fortunate",
"sweepstake",
"lapelled",
"arduousness",
"blunts",
"anorthosite",
"acclimatized",
"potters",
"babysitter",
"graveyard",
"forthcoming",
"glimmer",
"knaves",
"purposed",
"entice",
"amorality",
"poetics",
"frightened",
"dilution",
"erastus",
"anabaptists",
"carport",
"whatre",
"harpsichord",
"marvin",
"triers",
"dumbbells",
"hopefulness",
"sorting",
"continentally",
"asynchronism",
"illustratively",
"afforestation",
"constitutional",
"arcsin",
"darlings",
"removes",
"incompletion",
"bitterroot",
"blissfully",
"splash",
"manfred",
"rashly",
"bustling",
"hathaway",
"lacerating",
"underplayed",
"roundhead",
"purposefully",
"baldly",
"steadily",
"syndromes",
"subversion",
"lunchtime",
"congressman",
"mouses",
"valences",
"perhaps",
"sawmills",
"pinehurst",
"comparison",
"expansive",
"kidnappers",
"occasioned",
"transferable",
"transducer",
"synchrotron",
"rutile",
"effete",
"savaged",
"dearths",
"reading",
"horizons",
"scabbards",
"presences",
"trinity",
"isinglass",
"abusive",
"critics",
"recalculates",
"loitering",
"atrium",
"terrorizing",
"unblocked",
"trickery",
"accomplices",
"bleedings",
"wholesomeness",
"opaquely",
"epitaph",
"escorted",
"automatically",
"interdependence",
"ludicrously",
"necessary",
"chastising",
"beneficences",
"mbabane",
"floury",
"iniquity",
"craftsmen",
"feasted",
"inviting",
"gasoline",
"balsam",
"finality",
"howling",
"largesse",
"docile",
"conveniences",
"sensors",
"dinners",
"alterable",
"overhangs",
"satisfaction",
"semicolons",
"eclipses",
"languish",
"symbol",
"praecox",
"muskox",
"inheritrices",
"invade",
"measurable",
"converses",
"enlivened",
"tantrums",
"stopped",
"wavefront",
"constance",
"humanness",
"postscripts",
"troublesomely",
"unfavorable",
"tarbell",
"antagonistically",
"unsure",
"bridgehead",
"imprints",
"neurons",
"volatilities",
"renowned",
"midwestern",
"mistakable",
"recover",
"stoichiometry",
"hating",
"feuds",
"superscripts",
"irvine",
"vertebrates",
"fifteenth",
"parthenon",
"southbound",
"nineveh",
"twitched",
"flooded",
"sectors",
"tractable",
"subtrees",
"suffering",
"doggedness",
"shoveled",
"poisons",
"cinders",
"payoff",
"enjoyably",
"applicative",
"aspersion",
"analytic",
"tactician",
"forgiveness",
"shibboleth",
"equalizing",
"deluxe",
"frankest",
"epistemology",
"donated",
"armpits",
"corpsmen",
"pyhrric",
"backers",
"clearings",
"patience",
"bijectively",
"screwbean",
"careers",
"delaney",
"applications",
"autocrats",
"homeopath",
"abysmal",
"ribbon",
"bimodal",
"relaxations",
"fascism",
"chatterer",
"inaugurated",
"uncaught",
"hardhat",
"voiceband",
"wetland",
"additions",
"yugoslav",
"atrophying",
"plebeian",
"duplication",
"birthplaces",
"farmers",
"suspends",
"retrograde",
"transgress",
"harassing",
"yonder",
"formulators",
"alhambra",
"attesting",
"fairport",
"osseous",
"oratorical",
"songbook",
"wingspan",
"loggers",
"julies",
"highlighted",
"granter",
"louver",
"seersucker",
"parley",
"beheading",
"soothes",
"crises",
"rounded",
"lesser",
"streamline",
"measure",
"rusticate",
"ordained",
"gabrielle",
"haplessness",
"raccoon",
"explosions",
"freakish",
"soother",
"pravda",
"icebox",
"homing",
"leeway",
"handsomeness",
"commodity",
"liberated",
"playwrights",
"rebroadcast",
"instincts",
"bumblers",
"variously",
"martyrs",
"wednesdays",
"thresholds",
"azimuths",
"aspire",
"touchdown",
"drudge",
"teared",
"exposing",
"downside",
"lofts",
"lebensraum",
"nether",
"nozzle",
"lawsuits",
"gustafson",
"predates",
"meritorious",
"searchings",
"bundling",
"scenarios",
"schmidt",
"fischbein",
"bewitched",
"grassiest",
"disbursed",
"indestructible",
"burning",
"swearer",
"spirits",
"cushman",
"timbre",
"seconding",
"giggle",
"ordered",
"bibliophile",
"affianced",
"camera",
"bernet",
"facilitated",
"highways",
"grandly",
"inkings",
"shelves",
"staggering",
"bloomington",
"magnesite",
"colored",
"sunned",
"audiometer",
"affectation",
"maiden",
"subfields",
"axiomatically",
"respire",
"palmate",
"domino",
"depressed",
"industrials",
"expellable",
"justine",
"nuclei",
"balkanize",
"refreshments",
"santiago",
"organizers",
"prospections",
"antagonizing",
"resort",
"incoherent",
"nonsense",
"highwayman",
"members",
"sightings",
"unique",
"relentlessness",
"chorused",
"recipes",
"strongest",
"airlocks",
"brethren",
"legate",
"capitalist",
"digestive",
"bookcases",
"controller",
"deficit",
"masochism",
"unlocks",
"recreated",
"bulbs",
"avertive",
"penetrable",
"mynheer",
"attracts",
"axiomatizes",
"typical",
"resuming",
"remarkable",
"cookery",
"excused",
"meals",
"bodybuilders",
"canoes",
"harmonize",
"uncertainly",
"blowup",
"former",
"squaring",
"alphabetical",
"disciplines",
"genera",
"melody",
"cossack",
"varyings",
"abridge",
"harvested",
"tenney",
"zodiacal",
"labrador",
"honoraries",
"heuristics",
"billeted",
"outlays",
"scrimmage",
"folksy",
"resented",
"audits",
"pilgrims",
"compromisers",
"niggardly",
"patchwork",
"briefest",
"comatose",
"faceplate",
"variations",
"assertively",
"tutelage",
"precedences",
"bleating",
"suspensor",
"divisors",
"youngest",
"ruthlessly",
"breadth",
"abundance",
"penalized",
"promotes",
"thermometers",
"retired",
"circumlocutions",
"recites",
"nettlesome",
"woodside",
"differentials",
"handicaps",
"lawmen",
"budging",
"balkanized",
"instantiated",
"contradict",
"insinuations",
"assigned",
"budgeter",
"hailing",
"strolls",
"ultrasound",
"liberties",
"pantomime",
"triplex",
"inroad",
"befouling",
"foregoing",
"gullys",
}
| Username | identifier_name |
lib.rs | #![deny(missing_docs)]
//! An append-only, on-disk key-value index with lockless reads
use std::cell::UnsafeCell;
use std::fs::OpenOptions;
use std::hash::{Hash, Hasher};
use std::io;
use std::marker::PhantomData;
use std::mem;
use std::ops::{Deref, DerefMut};
use std::path::{Path, PathBuf};
use arrayvec::ArrayVec;
use lazy_static::lazy_static;
use memmap::MmapMut;
use parking_lot::{Mutex, MutexGuard};
use seahash::SeaHasher;
const NUM_LANES: usize = 64;
const NUM_SHARDS: usize = 1024;
const PAGE_SIZE: usize = 4096;
const FIRST_LANE_PAGES: usize = 64;
// marker struct for shard-mutexes
struct Shard;
lazy_static! {
static ref SHARDS: ArrayVec<[Mutex<Shard>; NUM_SHARDS]> = {
let mut locks = ArrayVec::new();
for _ in 0..NUM_SHARDS {
locks.push(Mutex::new(Shard))
}
locks
};
}
#[inline(always)]
fn hash_val<T: Hash>(t: &T) -> u64 {
let mut hasher = SeaHasher::new();
t.hash(&mut hasher);
hasher.finish()
}
enum Found<'a, K, V> {
Some(&'a Entry<K, V>),
None(usize, usize, usize),
Invalid(usize, usize, usize),
}
/// Marker type telling you your update was a no-op
pub type AlreadyThere = bool;
/// On-disk index structure mapping keys to values
pub struct Index<K, V> {
lanes: UnsafeCell<ArrayVec<[MmapMut; NUM_LANES]>>,
path: PathBuf,
pages: Mutex<u64>,
_marker: PhantomData<(K, V)>,
}
unsafe impl<K, V> Send for Index<K, V> {}
unsafe impl<K, V> Sync for Index<K, V> {}
#[derive(Debug)]
struct Entry<K, V> {
key: K,
val: V,
next: u64,
kv_checksum: u64,
next_checksum: u64,
}
// Wrapper reference for mutating entries, carrying a mutex guard
struct EntryMut<'a, K, V> {
entry: &'a mut Entry<K, V>,
_lock: MutexGuard<'a, Shard>,
}
impl<'a, K, V> Deref for EntryMut<'a, K, V> {
type Target = Entry<K, V>;
fn deref(&self) -> &Self::Target {
&self.entry
}
}
impl<'a, K, V> DerefMut for EntryMut<'a, K, V> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.entry
}
}
impl<K: Hash, V: Hash> Entry<K, V> {
fn new(key: K, val: V) -> Self {
let kv_checksum = hash_val(&key).wrapping_add(hash_val(&val));
let entry = Entry {
key,
val,
kv_checksum,
next: 0,
next_checksum: 0 + 1,
};
debug_assert!(entry.valid());
entry
}
fn valid(&self) -> bool {
if hash_val(&self.key).wrapping_add(hash_val(&self.val))
== self.kv_checksum
&& self.next + 1 == self.next_checksum
{
true
} else {
false
}
}
fn set_next<I: Into<u64>>(&mut self, next: I) {
let next = next.into();
self.next = next;
self.next_checksum = next + 1;
}
}
impl<K: Hash + Copy + PartialEq, V: Hash + Copy> Index<K, V> {
/// Create or load an index at `path`
pub fn new<P: AsRef<Path>>(path: &P) -> io::Result<Self> {
let mut lanes = ArrayVec::new();
// check for lane files already on disk
for n in 0..NUM_LANES {
let mut pathbuf = PathBuf::from(path.as_ref());
pathbuf.push(&format!("{:02x}", n));
if pathbuf.exists() {
let file =
OpenOptions::new().read(true).write(true).open(&pathbuf)?;
let lane_pages = Self::lane_pages(n);
let file_len = PAGE_SIZE as u64 * lane_pages as u64;
file.set_len(file_len)?;
unsafe { lanes.push(MmapMut::map_mut(&file)?) };
}
}
// find the number of already occupied pages
let mut num_pages = 0;
if let Some(last) = lanes.last() {
// help the type inferance along a bit.
let last: &MmapMut = last;
// add up pages of all but the last lane, since they must all be full
let mut full_pages = 0;
for n in 0..lanes.len().saturating_sub(1) {
println!("lane {}, pages {}", n, Self::lane_pages(n));
full_pages += Self::lane_pages(n)
}
// do a binary search to find the last populated page in the last lane
let mut low_bound = 0;
let mut high_bound = Self::lane_pages(lanes.len() - 1) - 1;
while low_bound + 1 != high_bound {
let check = low_bound + (high_bound - low_bound) / 2;
println!(
"low bound: {}, high bound: {}, check {}",
low_bound, high_bound, check,
);
let page_ofs = PAGE_SIZE * check;
// is there a valid entry in this page?
for slot in 0..Self::entries_per_page() {
let slot_ofs =
page_ofs + slot * mem::size_of::<Entry<K, V>>();
let ptr = last.as_ptr();
let entry: &Entry<K, V> = unsafe {
mem::transmute(ptr.offset(slot_ofs as isize))
};
if entry.valid() {
low_bound = check;
break;
}
}
if low_bound != check {
high_bound = check
}
}
num_pages = full_pages + high_bound;
}
// create the index
let index = Index {
lanes: UnsafeCell::new(lanes),
path: PathBuf::from(path.as_ref()),
pages: Mutex::new(num_pages as u64),
_marker: PhantomData,
};
// initialize index with at least one page
if num_pages == 0 {
assert_eq!(index.new_page()?, 0);
}
Ok(index)
}
/// Returns how many pages have been allocated so far
pub fn pages(&self) -> usize {
*self.pages.lock() as usize
}
/// Returns how many pages fit into one lane
#[inline(always)]
fn lane_pages(n: usize) -> usize {
2_usize.pow(n as u32) * FIRST_LANE_PAGES
}
#[inline(always)]
fn entries_per_page() -> usize {
PAGE_SIZE / mem::size_of::<Entry<K, V>>()
}
// calculates the slot in the page this hashed key would
// occupy at a certain depth
#[inline(always)]
fn slot(key_hash: u64, depth: usize) -> usize {
(hash_val(&(key_hash + depth as u64)) % Self::entries_per_page() as u64)
as usize
}
// produces following output over page with FIRST_LANE_PAGES = 2
// (0, 0), (0, 1),
// (1, 0), (1, 1), (1, 2), (1, 3),
// (2, 0), (2, 1), (2, 2), (2, 3), (2, 4), (2, 5), (2, 6), (2, 7),
// ... and so on and so forth ...
#[inline(always)]
fn lane_page(page: usize) -> (usize, usize) {
let usize_bits = mem::size_of::<usize>() * 8;
let i = page / FIRST_LANE_PAGES + 1;
let lane = usize_bits - i.leading_zeros() as usize - 1;
let page = page - (2usize.pow(lane as u32) - 1) * FIRST_LANE_PAGES;
(lane, page)
}
fn new_lane(&self) -> io::Result<()> {
let lanes_ptr = self.lanes.get();
let lane_nr = unsafe { (*lanes_ptr).len() };
let num_pages = Self::lane_pages(lane_nr);
let mut path = self.path.clone();
path.push(format!("{:02x}", lane_nr));
let file_len = PAGE_SIZE as u64 * num_pages as u64;
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)?;
file.set_len(file_len)?;
unsafe { (*lanes_ptr).push(MmapMut::map_mut(&file)?) }
Ok(())
}
fn new_page(&self) -> io::Result<u64> {
let mut page_nr = self.pages.lock();
let (_, offset) = Self::lane_page(*page_nr as usize);
if offset == 0 {
// create new lane
self.new_lane()?
}
let new_page_nr = *page_nr;
*page_nr += 1;
Ok(new_page_nr)
}
// Get a mutable reference to the `Entry`,
fn entry(&self, lane: usize, page: usize, slot: usize) -> &Entry<K, V> |
// Get a mutable reference to the `Entry`,
// locking the corresponding shard.
fn entry_mut(
&self,
lane: usize,
page: usize,
slot: usize,
) -> EntryMut<K, V> {
let shard = (page ^ slot) % NUM_SHARDS;
// Lock the entry for writing
let lock = SHARDS[shard].lock();
let page_ofs = PAGE_SIZE * page;
let slot_ofs = page_ofs + slot * mem::size_of::<Entry<K, V>>();
EntryMut {
entry: unsafe {
mem::transmute(
(*self.lanes.get())[lane]
.as_ptr()
.offset(slot_ofs as isize),
)
},
_lock: lock,
}
}
// Traverse the tree to find the entry for this key
fn find_key(&self, k: &K) -> io::Result<Found<K, V>> {
let mut depth = 0;
let mut abs_page = 0;
loop {
let hash = hash_val(&k);
let slot = Self::slot(hash, depth);
let (lane, page) = Self::lane_page(abs_page);
let entry = self.entry(lane, page, slot);
if !entry.valid() {
return Ok(Found::Invalid(lane, page, slot));
}
if &entry.key == k {
return Ok(Found::Some(entry));
} else if entry.next == 0 {
return Ok(Found::None(lane, page, slot));
} else {
abs_page = entry.next as usize;
}
depth += 1;
}
}
/// Inserts a key-value pair into the index, if the key is already
/// present, this is a no-op
pub fn insert(&self, key: K, val: V) -> io::Result<AlreadyThere> {
match self.find_key(&key)? {
Found::Some(_) => {
// no-op
Ok(true)
}
Found::Invalid(lane, page, slot) => {
let mut entry = self.entry_mut(lane, page, slot);
if entry.valid() && entry.next != 0 {
// Someone already wrote here, recurse!
// We accept the performance hit of re-traversing
// the whole tree, since this case is uncommon,
// and makes the implementation simpler.
mem::drop(entry);
self.insert(key, val)
} else {
*entry = Entry::new(key, val);
return Ok(false);
}
}
Found::None(lane, page, slot) => {
let mut entry = self.entry_mut(lane, page, slot);
if entry.next != 0 {
// again, another thread was here before us
} else {
entry.set_next(self.new_page()?);
}
// recurse
mem::drop(entry);
self.insert(key, val)
}
}
}
/// Looks up a value with `key` in the index
pub fn get(&self, key: &K) -> io::Result<Option<&V>> {
match self.find_key(key)? {
Found::Some(entry) => Ok(Some(&entry.val)),
_ => Ok(None),
}
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use std::thread;
use rand::{seq::SliceRandom, thread_rng};
use tempfile::tempdir;
use super::*;
#[test]
fn simple() {
let dir = tempdir().unwrap();
let index = Index::new(&dir).unwrap();
index.insert(0, 0).unwrap();
assert_eq!(index.get(&0).unwrap(), Some(&0));
}
const N: u64 = 1024 * 256;
#[test]
fn multiple() {
let dir = tempdir().unwrap();
let index = Index::new(&dir).unwrap();
for i in 0..N {
index.insert(i, i).unwrap();
}
for i in 0..N {
assert_eq!(index.get(&i).unwrap(), Some(&i));
}
}
#[test]
fn reload() {
let dir = tempdir().unwrap();
let mut pages;
{
{
let index_a = Index::new(&dir).unwrap();
for i in 0..N {
index_a.insert(i, i).unwrap();
}
pages = index_a.pages();
mem::drop(index_a);
}
let index_b = Index::new(&dir).unwrap();
// make sure the page count matches
assert_eq!(pages, index_b.pages());
for i in 0..N {
assert_eq!(index_b.get(&i).unwrap(), Some(&i));
}
for i in N..N * 2 {
index_b.insert(i, i).unwrap();
}
pages = index_b.pages();
mem::drop(index_b);
}
let index_c = Index::new(&dir).unwrap();
// make sure the page count matches
assert_eq!(pages, index_c.pages());
for i in 0..N * 2 {
assert_eq!(index_c.get(&i).unwrap(), Some(&i));
}
}
const N_THREADS: usize = 8;
// The stress test creates an index, and simultaneously writes
// entries in random order from `N_THREADS` threads,
// while at the same time reading from an equal amount of threads.
//
// When all threads are finished, a final read-through is made to see
// that all key value pairs are present.
#[test]
fn stress() {
let dir = tempdir().unwrap();
let index = Arc::new(Index::new(&dir).unwrap());
let mut all_indicies = vec![];
for i in 0..N {
all_indicies.push(i);
}
let mut rng = thread_rng();
// shuffle the order of the writes
let mut shuffles_write = vec![];
for _ in 0..N_THREADS {
let mut new = all_indicies.clone();
SliceRandom::shuffle(&mut new[..], &mut rng);
shuffles_write.push(new);
}
// shuffle the order of the reads
let mut shuffles_read = vec![];
for _ in 0..N_THREADS {
let mut new = all_indicies.clone();
SliceRandom::shuffle(&mut new[..], &mut rng);
shuffles_read.push(new);
}
let mut threads_running = vec![];
for i in 0..N_THREADS {
// shuffled write
let shuffle_write = mem::replace(&mut shuffles_write[i], vec![]);
let index_write = index.clone();
// write threads
threads_running.push(thread::spawn(move || {
for write in shuffle_write {
index_write.insert(write, write).unwrap();
}
}));
// shuffled reads
let shuffle_read = mem::replace(&mut shuffles_read[i], vec![]);
let index_read = index.clone();
// read threads
threads_running.push(thread::spawn(move || {
for read in shuffle_read {
match index_read.get(&read).unwrap() {
Some(val) => assert_eq!(val, &read),
None => (),
}
}
}));
}
// make sure all threads finish successfully
for thread in threads_running {
thread.join().unwrap()
}
for i in 0..N {
assert_eq!(index.get(&i).unwrap(), Some(&i));
}
}
}
| {
// Get a reference to the `Entry`
let page_ofs = PAGE_SIZE * page;
let slot_ofs = page_ofs + slot * mem::size_of::<Entry<K, V>>();
unsafe {
mem::transmute(
(*self.lanes.get())[lane].as_ptr().offset(slot_ofs as isize),
)
}
} | identifier_body |
lib.rs | #![deny(missing_docs)]
//! An append-only, on-disk key-value index with lockless reads
use std::cell::UnsafeCell;
use std::fs::OpenOptions;
use std::hash::{Hash, Hasher};
use std::io;
use std::marker::PhantomData;
use std::mem;
use std::ops::{Deref, DerefMut};
use std::path::{Path, PathBuf};
use arrayvec::ArrayVec;
use lazy_static::lazy_static;
use memmap::MmapMut;
use parking_lot::{Mutex, MutexGuard};
use seahash::SeaHasher;
const NUM_LANES: usize = 64;
const NUM_SHARDS: usize = 1024;
const PAGE_SIZE: usize = 4096;
const FIRST_LANE_PAGES: usize = 64;
// marker struct for shard-mutexes
struct Shard;
lazy_static! {
static ref SHARDS: ArrayVec<[Mutex<Shard>; NUM_SHARDS]> = {
let mut locks = ArrayVec::new();
for _ in 0..NUM_SHARDS {
locks.push(Mutex::new(Shard))
}
locks
};
}
#[inline(always)]
fn hash_val<T: Hash>(t: &T) -> u64 {
let mut hasher = SeaHasher::new();
t.hash(&mut hasher);
hasher.finish()
}
enum Found<'a, K, V> {
Some(&'a Entry<K, V>),
None(usize, usize, usize),
Invalid(usize, usize, usize),
}
/// Marker type telling you your update was a no-op
pub type AlreadyThere = bool;
/// On-disk index structure mapping keys to values
pub struct Index<K, V> {
lanes: UnsafeCell<ArrayVec<[MmapMut; NUM_LANES]>>,
path: PathBuf,
pages: Mutex<u64>,
_marker: PhantomData<(K, V)>,
}
unsafe impl<K, V> Send for Index<K, V> {}
unsafe impl<K, V> Sync for Index<K, V> {}
#[derive(Debug)]
struct Entry<K, V> {
key: K,
val: V,
next: u64,
kv_checksum: u64,
next_checksum: u64,
}
// Wrapper reference for mutating entries, carrying a mutex guard
struct EntryMut<'a, K, V> {
entry: &'a mut Entry<K, V>,
_lock: MutexGuard<'a, Shard>,
}
impl<'a, K, V> Deref for EntryMut<'a, K, V> {
type Target = Entry<K, V>;
fn deref(&self) -> &Self::Target {
&self.entry
}
}
impl<'a, K, V> DerefMut for EntryMut<'a, K, V> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.entry
}
}
impl<K: Hash, V: Hash> Entry<K, V> {
fn new(key: K, val: V) -> Self {
let kv_checksum = hash_val(&key).wrapping_add(hash_val(&val));
let entry = Entry {
key,
val,
kv_checksum,
next: 0,
next_checksum: 0 + 1,
};
debug_assert!(entry.valid());
entry
}
fn valid(&self) -> bool {
if hash_val(&self.key).wrapping_add(hash_val(&self.val))
== self.kv_checksum
&& self.next + 1 == self.next_checksum
{
true
} else {
false
}
}
fn set_next<I: Into<u64>>(&mut self, next: I) {
let next = next.into();
self.next = next;
self.next_checksum = next + 1;
}
}
impl<K: Hash + Copy + PartialEq, V: Hash + Copy> Index<K, V> {
/// Create or load an index at `path`
pub fn new<P: AsRef<Path>>(path: &P) -> io::Result<Self> {
let mut lanes = ArrayVec::new();
// check for lane files already on disk
for n in 0..NUM_LANES {
let mut pathbuf = PathBuf::from(path.as_ref());
pathbuf.push(&format!("{:02x}", n));
if pathbuf.exists() {
let file =
OpenOptions::new().read(true).write(true).open(&pathbuf)?;
let lane_pages = Self::lane_pages(n);
let file_len = PAGE_SIZE as u64 * lane_pages as u64;
file.set_len(file_len)?;
unsafe { lanes.push(MmapMut::map_mut(&file)?) };
}
}
// find the number of already occupied pages
let mut num_pages = 0;
if let Some(last) = lanes.last() {
// help the type inferance along a bit.
let last: &MmapMut = last;
// add up pages of all but the last lane, since they must all be full
let mut full_pages = 0;
for n in 0..lanes.len().saturating_sub(1) {
println!("lane {}, pages {}", n, Self::lane_pages(n));
full_pages += Self::lane_pages(n)
}
// do a binary search to find the last populated page in the last lane
let mut low_bound = 0;
let mut high_bound = Self::lane_pages(lanes.len() - 1) - 1;
while low_bound + 1 != high_bound {
let check = low_bound + (high_bound - low_bound) / 2;
println!(
"low bound: {}, high bound: {}, check {}",
low_bound, high_bound, check,
);
let page_ofs = PAGE_SIZE * check;
// is there a valid entry in this page?
for slot in 0..Self::entries_per_page() {
let slot_ofs =
page_ofs + slot * mem::size_of::<Entry<K, V>>();
let ptr = last.as_ptr();
let entry: &Entry<K, V> = unsafe {
mem::transmute(ptr.offset(slot_ofs as isize))
};
if entry.valid() {
low_bound = check;
break;
}
}
if low_bound != check {
high_bound = check
}
}
num_pages = full_pages + high_bound;
}
| pages: Mutex::new(num_pages as u64),
_marker: PhantomData,
};
// initialize index with at least one page
if num_pages == 0 {
assert_eq!(index.new_page()?, 0);
}
Ok(index)
}
/// Returns how many pages have been allocated so far
pub fn pages(&self) -> usize {
*self.pages.lock() as usize
}
/// Returns how many pages fit into one lane
#[inline(always)]
fn lane_pages(n: usize) -> usize {
2_usize.pow(n as u32) * FIRST_LANE_PAGES
}
#[inline(always)]
fn entries_per_page() -> usize {
PAGE_SIZE / mem::size_of::<Entry<K, V>>()
}
// calculates the slot in the page this hashed key would
// occupy at a certain depth
#[inline(always)]
fn slot(key_hash: u64, depth: usize) -> usize {
(hash_val(&(key_hash + depth as u64)) % Self::entries_per_page() as u64)
as usize
}
// produces following output over page with FIRST_LANE_PAGES = 2
// (0, 0), (0, 1),
// (1, 0), (1, 1), (1, 2), (1, 3),
// (2, 0), (2, 1), (2, 2), (2, 3), (2, 4), (2, 5), (2, 6), (2, 7),
// ... and so on and so forth ...
#[inline(always)]
fn lane_page(page: usize) -> (usize, usize) {
let usize_bits = mem::size_of::<usize>() * 8;
let i = page / FIRST_LANE_PAGES + 1;
let lane = usize_bits - i.leading_zeros() as usize - 1;
let page = page - (2usize.pow(lane as u32) - 1) * FIRST_LANE_PAGES;
(lane, page)
}
fn new_lane(&self) -> io::Result<()> {
let lanes_ptr = self.lanes.get();
let lane_nr = unsafe { (*lanes_ptr).len() };
let num_pages = Self::lane_pages(lane_nr);
let mut path = self.path.clone();
path.push(format!("{:02x}", lane_nr));
let file_len = PAGE_SIZE as u64 * num_pages as u64;
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)?;
file.set_len(file_len)?;
unsafe { (*lanes_ptr).push(MmapMut::map_mut(&file)?) }
Ok(())
}
fn new_page(&self) -> io::Result<u64> {
let mut page_nr = self.pages.lock();
let (_, offset) = Self::lane_page(*page_nr as usize);
if offset == 0 {
// create new lane
self.new_lane()?
}
let new_page_nr = *page_nr;
*page_nr += 1;
Ok(new_page_nr)
}
// Get a mutable reference to the `Entry`,
fn entry(&self, lane: usize, page: usize, slot: usize) -> &Entry<K, V> {
// Get a reference to the `Entry`
let page_ofs = PAGE_SIZE * page;
let slot_ofs = page_ofs + slot * mem::size_of::<Entry<K, V>>();
unsafe {
mem::transmute(
(*self.lanes.get())[lane].as_ptr().offset(slot_ofs as isize),
)
}
}
// Get a mutable reference to the `Entry`,
// locking the corresponding shard.
fn entry_mut(
&self,
lane: usize,
page: usize,
slot: usize,
) -> EntryMut<K, V> {
let shard = (page ^ slot) % NUM_SHARDS;
// Lock the entry for writing
let lock = SHARDS[shard].lock();
let page_ofs = PAGE_SIZE * page;
let slot_ofs = page_ofs + slot * mem::size_of::<Entry<K, V>>();
EntryMut {
entry: unsafe {
mem::transmute(
(*self.lanes.get())[lane]
.as_ptr()
.offset(slot_ofs as isize),
)
},
_lock: lock,
}
}
// Traverse the tree to find the entry for this key
fn find_key(&self, k: &K) -> io::Result<Found<K, V>> {
let mut depth = 0;
let mut abs_page = 0;
loop {
let hash = hash_val(&k);
let slot = Self::slot(hash, depth);
let (lane, page) = Self::lane_page(abs_page);
let entry = self.entry(lane, page, slot);
if !entry.valid() {
return Ok(Found::Invalid(lane, page, slot));
}
if &entry.key == k {
return Ok(Found::Some(entry));
} else if entry.next == 0 {
return Ok(Found::None(lane, page, slot));
} else {
abs_page = entry.next as usize;
}
depth += 1;
}
}
/// Inserts a key-value pair into the index, if the key is already
/// present, this is a no-op
pub fn insert(&self, key: K, val: V) -> io::Result<AlreadyThere> {
match self.find_key(&key)? {
Found::Some(_) => {
// no-op
Ok(true)
}
Found::Invalid(lane, page, slot) => {
let mut entry = self.entry_mut(lane, page, slot);
if entry.valid() && entry.next != 0 {
// Someone already wrote here, recurse!
// We accept the performance hit of re-traversing
// the whole tree, since this case is uncommon,
// and makes the implementation simpler.
mem::drop(entry);
self.insert(key, val)
} else {
*entry = Entry::new(key, val);
return Ok(false);
}
}
Found::None(lane, page, slot) => {
let mut entry = self.entry_mut(lane, page, slot);
if entry.next != 0 {
// again, another thread was here before us
} else {
entry.set_next(self.new_page()?);
}
// recurse
mem::drop(entry);
self.insert(key, val)
}
}
}
/// Looks up a value with `key` in the index
pub fn get(&self, key: &K) -> io::Result<Option<&V>> {
match self.find_key(key)? {
Found::Some(entry) => Ok(Some(&entry.val)),
_ => Ok(None),
}
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use std::thread;
use rand::{seq::SliceRandom, thread_rng};
use tempfile::tempdir;
use super::*;
#[test]
fn simple() {
let dir = tempdir().unwrap();
let index = Index::new(&dir).unwrap();
index.insert(0, 0).unwrap();
assert_eq!(index.get(&0).unwrap(), Some(&0));
}
const N: u64 = 1024 * 256;
#[test]
fn multiple() {
let dir = tempdir().unwrap();
let index = Index::new(&dir).unwrap();
for i in 0..N {
index.insert(i, i).unwrap();
}
for i in 0..N {
assert_eq!(index.get(&i).unwrap(), Some(&i));
}
}
#[test]
fn reload() {
let dir = tempdir().unwrap();
let mut pages;
{
{
let index_a = Index::new(&dir).unwrap();
for i in 0..N {
index_a.insert(i, i).unwrap();
}
pages = index_a.pages();
mem::drop(index_a);
}
let index_b = Index::new(&dir).unwrap();
// make sure the page count matches
assert_eq!(pages, index_b.pages());
for i in 0..N {
assert_eq!(index_b.get(&i).unwrap(), Some(&i));
}
for i in N..N * 2 {
index_b.insert(i, i).unwrap();
}
pages = index_b.pages();
mem::drop(index_b);
}
let index_c = Index::new(&dir).unwrap();
// make sure the page count matches
assert_eq!(pages, index_c.pages());
for i in 0..N * 2 {
assert_eq!(index_c.get(&i).unwrap(), Some(&i));
}
}
const N_THREADS: usize = 8;
// The stress test creates an index, and simultaneously writes
// entries in random order from `N_THREADS` threads,
// while at the same time reading from an equal amount of threads.
//
// When all threads are finished, a final read-through is made to see
// that all key value pairs are present.
#[test]
fn stress() {
let dir = tempdir().unwrap();
let index = Arc::new(Index::new(&dir).unwrap());
let mut all_indicies = vec![];
for i in 0..N {
all_indicies.push(i);
}
let mut rng = thread_rng();
// shuffle the order of the writes
let mut shuffles_write = vec![];
for _ in 0..N_THREADS {
let mut new = all_indicies.clone();
SliceRandom::shuffle(&mut new[..], &mut rng);
shuffles_write.push(new);
}
// shuffle the order of the reads
let mut shuffles_read = vec![];
for _ in 0..N_THREADS {
let mut new = all_indicies.clone();
SliceRandom::shuffle(&mut new[..], &mut rng);
shuffles_read.push(new);
}
let mut threads_running = vec![];
for i in 0..N_THREADS {
// shuffled write
let shuffle_write = mem::replace(&mut shuffles_write[i], vec![]);
let index_write = index.clone();
// write threads
threads_running.push(thread::spawn(move || {
for write in shuffle_write {
index_write.insert(write, write).unwrap();
}
}));
// shuffled reads
let shuffle_read = mem::replace(&mut shuffles_read[i], vec![]);
let index_read = index.clone();
// read threads
threads_running.push(thread::spawn(move || {
for read in shuffle_read {
match index_read.get(&read).unwrap() {
Some(val) => assert_eq!(val, &read),
None => (),
}
}
}));
}
// make sure all threads finish successfully
for thread in threads_running {
thread.join().unwrap()
}
for i in 0..N {
assert_eq!(index.get(&i).unwrap(), Some(&i));
}
}
} | // create the index
let index = Index {
lanes: UnsafeCell::new(lanes),
path: PathBuf::from(path.as_ref()), | random_line_split |
lib.rs | #![deny(missing_docs)]
//! An append-only, on-disk key-value index with lockless reads
use std::cell::UnsafeCell;
use std::fs::OpenOptions;
use std::hash::{Hash, Hasher};
use std::io;
use std::marker::PhantomData;
use std::mem;
use std::ops::{Deref, DerefMut};
use std::path::{Path, PathBuf};
use arrayvec::ArrayVec;
use lazy_static::lazy_static;
use memmap::MmapMut;
use parking_lot::{Mutex, MutexGuard};
use seahash::SeaHasher;
const NUM_LANES: usize = 64;
const NUM_SHARDS: usize = 1024;
const PAGE_SIZE: usize = 4096;
const FIRST_LANE_PAGES: usize = 64;
// marker struct for shard-mutexes
struct Shard;
lazy_static! {
static ref SHARDS: ArrayVec<[Mutex<Shard>; NUM_SHARDS]> = {
let mut locks = ArrayVec::new();
for _ in 0..NUM_SHARDS {
locks.push(Mutex::new(Shard))
}
locks
};
}
#[inline(always)]
fn hash_val<T: Hash>(t: &T) -> u64 {
let mut hasher = SeaHasher::new();
t.hash(&mut hasher);
hasher.finish()
}
enum Found<'a, K, V> {
Some(&'a Entry<K, V>),
None(usize, usize, usize),
Invalid(usize, usize, usize),
}
/// Marker type telling you your update was a no-op
pub type AlreadyThere = bool;
/// On-disk index structure mapping keys to values
pub struct Index<K, V> {
lanes: UnsafeCell<ArrayVec<[MmapMut; NUM_LANES]>>,
path: PathBuf,
pages: Mutex<u64>,
_marker: PhantomData<(K, V)>,
}
unsafe impl<K, V> Send for Index<K, V> {}
unsafe impl<K, V> Sync for Index<K, V> {}
#[derive(Debug)]
struct Entry<K, V> {
key: K,
val: V,
next: u64,
kv_checksum: u64,
next_checksum: u64,
}
// Wrapper reference for mutating entries, carrying a mutex guard
struct EntryMut<'a, K, V> {
entry: &'a mut Entry<K, V>,
_lock: MutexGuard<'a, Shard>,
}
impl<'a, K, V> Deref for EntryMut<'a, K, V> {
type Target = Entry<K, V>;
fn | (&self) -> &Self::Target {
&self.entry
}
}
impl<'a, K, V> DerefMut for EntryMut<'a, K, V> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.entry
}
}
impl<K: Hash, V: Hash> Entry<K, V> {
fn new(key: K, val: V) -> Self {
let kv_checksum = hash_val(&key).wrapping_add(hash_val(&val));
let entry = Entry {
key,
val,
kv_checksum,
next: 0,
next_checksum: 0 + 1,
};
debug_assert!(entry.valid());
entry
}
fn valid(&self) -> bool {
if hash_val(&self.key).wrapping_add(hash_val(&self.val))
== self.kv_checksum
&& self.next + 1 == self.next_checksum
{
true
} else {
false
}
}
fn set_next<I: Into<u64>>(&mut self, next: I) {
let next = next.into();
self.next = next;
self.next_checksum = next + 1;
}
}
impl<K: Hash + Copy + PartialEq, V: Hash + Copy> Index<K, V> {
/// Create or load an index at `path`
pub fn new<P: AsRef<Path>>(path: &P) -> io::Result<Self> {
let mut lanes = ArrayVec::new();
// check for lane files already on disk
for n in 0..NUM_LANES {
let mut pathbuf = PathBuf::from(path.as_ref());
pathbuf.push(&format!("{:02x}", n));
if pathbuf.exists() {
let file =
OpenOptions::new().read(true).write(true).open(&pathbuf)?;
let lane_pages = Self::lane_pages(n);
let file_len = PAGE_SIZE as u64 * lane_pages as u64;
file.set_len(file_len)?;
unsafe { lanes.push(MmapMut::map_mut(&file)?) };
}
}
// find the number of already occupied pages
let mut num_pages = 0;
if let Some(last) = lanes.last() {
// help the type inferance along a bit.
let last: &MmapMut = last;
// add up pages of all but the last lane, since they must all be full
let mut full_pages = 0;
for n in 0..lanes.len().saturating_sub(1) {
println!("lane {}, pages {}", n, Self::lane_pages(n));
full_pages += Self::lane_pages(n)
}
// do a binary search to find the last populated page in the last lane
let mut low_bound = 0;
let mut high_bound = Self::lane_pages(lanes.len() - 1) - 1;
while low_bound + 1 != high_bound {
let check = low_bound + (high_bound - low_bound) / 2;
println!(
"low bound: {}, high bound: {}, check {}",
low_bound, high_bound, check,
);
let page_ofs = PAGE_SIZE * check;
// is there a valid entry in this page?
for slot in 0..Self::entries_per_page() {
let slot_ofs =
page_ofs + slot * mem::size_of::<Entry<K, V>>();
let ptr = last.as_ptr();
let entry: &Entry<K, V> = unsafe {
mem::transmute(ptr.offset(slot_ofs as isize))
};
if entry.valid() {
low_bound = check;
break;
}
}
if low_bound != check {
high_bound = check
}
}
num_pages = full_pages + high_bound;
}
// create the index
let index = Index {
lanes: UnsafeCell::new(lanes),
path: PathBuf::from(path.as_ref()),
pages: Mutex::new(num_pages as u64),
_marker: PhantomData,
};
// initialize index with at least one page
if num_pages == 0 {
assert_eq!(index.new_page()?, 0);
}
Ok(index)
}
/// Returns how many pages have been allocated so far
pub fn pages(&self) -> usize {
*self.pages.lock() as usize
}
/// Returns how many pages fit into one lane
#[inline(always)]
fn lane_pages(n: usize) -> usize {
2_usize.pow(n as u32) * FIRST_LANE_PAGES
}
#[inline(always)]
fn entries_per_page() -> usize {
PAGE_SIZE / mem::size_of::<Entry<K, V>>()
}
// calculates the slot in the page this hashed key would
// occupy at a certain depth
#[inline(always)]
fn slot(key_hash: u64, depth: usize) -> usize {
(hash_val(&(key_hash + depth as u64)) % Self::entries_per_page() as u64)
as usize
}
// produces following output over page with FIRST_LANE_PAGES = 2
// (0, 0), (0, 1),
// (1, 0), (1, 1), (1, 2), (1, 3),
// (2, 0), (2, 1), (2, 2), (2, 3), (2, 4), (2, 5), (2, 6), (2, 7),
// ... and so on and so forth ...
#[inline(always)]
fn lane_page(page: usize) -> (usize, usize) {
let usize_bits = mem::size_of::<usize>() * 8;
let i = page / FIRST_LANE_PAGES + 1;
let lane = usize_bits - i.leading_zeros() as usize - 1;
let page = page - (2usize.pow(lane as u32) - 1) * FIRST_LANE_PAGES;
(lane, page)
}
fn new_lane(&self) -> io::Result<()> {
let lanes_ptr = self.lanes.get();
let lane_nr = unsafe { (*lanes_ptr).len() };
let num_pages = Self::lane_pages(lane_nr);
let mut path = self.path.clone();
path.push(format!("{:02x}", lane_nr));
let file_len = PAGE_SIZE as u64 * num_pages as u64;
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)?;
file.set_len(file_len)?;
unsafe { (*lanes_ptr).push(MmapMut::map_mut(&file)?) }
Ok(())
}
fn new_page(&self) -> io::Result<u64> {
let mut page_nr = self.pages.lock();
let (_, offset) = Self::lane_page(*page_nr as usize);
if offset == 0 {
// create new lane
self.new_lane()?
}
let new_page_nr = *page_nr;
*page_nr += 1;
Ok(new_page_nr)
}
// Get a mutable reference to the `Entry`,
fn entry(&self, lane: usize, page: usize, slot: usize) -> &Entry<K, V> {
// Get a reference to the `Entry`
let page_ofs = PAGE_SIZE * page;
let slot_ofs = page_ofs + slot * mem::size_of::<Entry<K, V>>();
unsafe {
mem::transmute(
(*self.lanes.get())[lane].as_ptr().offset(slot_ofs as isize),
)
}
}
// Get a mutable reference to the `Entry`,
// locking the corresponding shard.
fn entry_mut(
&self,
lane: usize,
page: usize,
slot: usize,
) -> EntryMut<K, V> {
let shard = (page ^ slot) % NUM_SHARDS;
// Lock the entry for writing
let lock = SHARDS[shard].lock();
let page_ofs = PAGE_SIZE * page;
let slot_ofs = page_ofs + slot * mem::size_of::<Entry<K, V>>();
EntryMut {
entry: unsafe {
mem::transmute(
(*self.lanes.get())[lane]
.as_ptr()
.offset(slot_ofs as isize),
)
},
_lock: lock,
}
}
// Traverse the tree to find the entry for this key
fn find_key(&self, k: &K) -> io::Result<Found<K, V>> {
let mut depth = 0;
let mut abs_page = 0;
loop {
let hash = hash_val(&k);
let slot = Self::slot(hash, depth);
let (lane, page) = Self::lane_page(abs_page);
let entry = self.entry(lane, page, slot);
if !entry.valid() {
return Ok(Found::Invalid(lane, page, slot));
}
if &entry.key == k {
return Ok(Found::Some(entry));
} else if entry.next == 0 {
return Ok(Found::None(lane, page, slot));
} else {
abs_page = entry.next as usize;
}
depth += 1;
}
}
/// Inserts a key-value pair into the index, if the key is already
/// present, this is a no-op
pub fn insert(&self, key: K, val: V) -> io::Result<AlreadyThere> {
match self.find_key(&key)? {
Found::Some(_) => {
// no-op
Ok(true)
}
Found::Invalid(lane, page, slot) => {
let mut entry = self.entry_mut(lane, page, slot);
if entry.valid() && entry.next != 0 {
// Someone already wrote here, recurse!
// We accept the performance hit of re-traversing
// the whole tree, since this case is uncommon,
// and makes the implementation simpler.
mem::drop(entry);
self.insert(key, val)
} else {
*entry = Entry::new(key, val);
return Ok(false);
}
}
Found::None(lane, page, slot) => {
let mut entry = self.entry_mut(lane, page, slot);
if entry.next != 0 {
// again, another thread was here before us
} else {
entry.set_next(self.new_page()?);
}
// recurse
mem::drop(entry);
self.insert(key, val)
}
}
}
/// Looks up a value with `key` in the index
pub fn get(&self, key: &K) -> io::Result<Option<&V>> {
match self.find_key(key)? {
Found::Some(entry) => Ok(Some(&entry.val)),
_ => Ok(None),
}
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use std::thread;
use rand::{seq::SliceRandom, thread_rng};
use tempfile::tempdir;
use super::*;
#[test]
fn simple() {
let dir = tempdir().unwrap();
let index = Index::new(&dir).unwrap();
index.insert(0, 0).unwrap();
assert_eq!(index.get(&0).unwrap(), Some(&0));
}
const N: u64 = 1024 * 256;
#[test]
fn multiple() {
let dir = tempdir().unwrap();
let index = Index::new(&dir).unwrap();
for i in 0..N {
index.insert(i, i).unwrap();
}
for i in 0..N {
assert_eq!(index.get(&i).unwrap(), Some(&i));
}
}
#[test]
fn reload() {
let dir = tempdir().unwrap();
let mut pages;
{
{
let index_a = Index::new(&dir).unwrap();
for i in 0..N {
index_a.insert(i, i).unwrap();
}
pages = index_a.pages();
mem::drop(index_a);
}
let index_b = Index::new(&dir).unwrap();
// make sure the page count matches
assert_eq!(pages, index_b.pages());
for i in 0..N {
assert_eq!(index_b.get(&i).unwrap(), Some(&i));
}
for i in N..N * 2 {
index_b.insert(i, i).unwrap();
}
pages = index_b.pages();
mem::drop(index_b);
}
let index_c = Index::new(&dir).unwrap();
// make sure the page count matches
assert_eq!(pages, index_c.pages());
for i in 0..N * 2 {
assert_eq!(index_c.get(&i).unwrap(), Some(&i));
}
}
const N_THREADS: usize = 8;
// The stress test creates an index, and simultaneously writes
// entries in random order from `N_THREADS` threads,
// while at the same time reading from an equal amount of threads.
//
// When all threads are finished, a final read-through is made to see
// that all key value pairs are present.
#[test]
fn stress() {
let dir = tempdir().unwrap();
let index = Arc::new(Index::new(&dir).unwrap());
let mut all_indicies = vec![];
for i in 0..N {
all_indicies.push(i);
}
let mut rng = thread_rng();
// shuffle the order of the writes
let mut shuffles_write = vec![];
for _ in 0..N_THREADS {
let mut new = all_indicies.clone();
SliceRandom::shuffle(&mut new[..], &mut rng);
shuffles_write.push(new);
}
// shuffle the order of the reads
let mut shuffles_read = vec![];
for _ in 0..N_THREADS {
let mut new = all_indicies.clone();
SliceRandom::shuffle(&mut new[..], &mut rng);
shuffles_read.push(new);
}
let mut threads_running = vec![];
for i in 0..N_THREADS {
// shuffled write
let shuffle_write = mem::replace(&mut shuffles_write[i], vec![]);
let index_write = index.clone();
// write threads
threads_running.push(thread::spawn(move || {
for write in shuffle_write {
index_write.insert(write, write).unwrap();
}
}));
// shuffled reads
let shuffle_read = mem::replace(&mut shuffles_read[i], vec![]);
let index_read = index.clone();
// read threads
threads_running.push(thread::spawn(move || {
for read in shuffle_read {
match index_read.get(&read).unwrap() {
Some(val) => assert_eq!(val, &read),
None => (),
}
}
}));
}
// make sure all threads finish successfully
for thread in threads_running {
thread.join().unwrap()
}
for i in 0..N {
assert_eq!(index.get(&i).unwrap(), Some(&i));
}
}
}
| deref | identifier_name |
blockchain.py | from flask import Flask, request, jsonify, render_template, redirect, session, g, url_for
from time import time, ctime
from flask_cors import CORS
from collections import OrderedDict
import binascii
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
from uuid import uuid4
import json
import hashlib
import requests
from urllib.parse import urlparse
from bson.objectid import ObjectId
from pymongo import MongoClient
from dotenv import load_dotenv
import os
load_dotenv()
MONGODB_URL = os.getenv('MONGODB_URL')
#qr code librabry
import qrcode
qr = qrcode.QRCode(version=1,box_size=10,border=5)
#db connection
client = MongoClient(MONGODB_URL)
db = client.get_database('Blockchain')
transaction_blocks = db.chain_db
minerdb = db.miners
MINING_SENDER = "The Blockchain"
MINING_REWARD = 1
MINING_DIFFICULTY = 2
t = time()
class Blockchain:
def __init__(self):
self.transactions = []
self.chain = []
self.nodes = set()
self.node_id = str(uuid4()).replace('-', '')
# Create the genesis block
self.create_block(0, '00')
# data = {'block_number':0, "nonce": 0, "previous_hash": 00, "timestamp": 0, "transactions":[]}
# self.chain.append(data)
def register_node(self, node_url):
parsed_url = urlparse(node_url)
if parsed_url.netloc:
self.nodes.add(parsed_url.netloc)
elif parsed_url.path:
self.nodes.add(parsed_url.path)
else:
raise ValueError('Invalid URL')
def create_block(self, nonce, previous_hash):
"""
Add a block of transactions to the blockchain
"""
block = {'block_number': transaction_blocks.count() + 1,
'timestamp': ctime(t),
'transactions': self.transactions,
'nonce': nonce,
'previous_hash': previous_hash}
# Reset the current list of transactions
self.transactions = []
self.chain.append(block)
return block
def verify_transaction_signature(self, sender_public_key, signature, transaction):
public_key = RSA.importKey(binascii.unhexlify(sender_public_key))
verifier = PKCS1_v1_5.new(public_key)
h = SHA.new(str(transaction).encode('utf8'))
try:
verifier.verify(h, binascii.unhexlify(signature))
return True
except ValueError:
return False
@staticmethod
def valid_proof(transactions, last_hash, nonce, difficulty=MINING_DIFFICULTY):
guess = (str(transactions) + str(last_hash) + str(nonce)).encode('utf8')
h = hashlib.new('sha256')
h.update(guess)
guess_hash = h.hexdigest()
return guess_hash[:difficulty] == '0' * difficulty
def proof_of_work(self):
last_block = self.chain[-1]
last_hash = self.hash(last_block)
nonce = 0
while self.valid_proof(self.transactions, last_hash, nonce) is False:
nonce += 1
return nonce
@staticmethod
def hash(block):
# We must to ensure that the Dictionary is ordered, otherwise we'll get inconsistent hashes
block_string = json.dumps(block, sort_keys=True).encode('utf8')
h = hashlib.new('sha256')
h.update(block_string)
return h.hexdigest()
def resolve_conflicts(self):
neighbours = self.nodes
new_chain = None
max_length = len(self.chain)
for node in neighbours:
response = requests.get('http://' + node + '/chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.valid_chain(chain):
max_length = length
new_chain = chain
if len(new_chain) > 0:
self.chain = new_chain
return True
return False
def valid_chain(self, chain):
last_block = chain[0]
current_index = 1
while current_index < len(chain):
block = chain[current_index]
if block['previous_hash'] != self.hash(last_block):
return False
transactions = block['transactions'][:-1]
transaction_elements = ['sender_public_key', 'recipient_public_key', 'signature', 'product_name', 'product_details', 'amount']
transactions = [OrderedDict((k, transaction[k]) for k in transaction_elements) for transaction in
transactions]
if not self.valid_proof(transactions, block['previous_hash'], block['nonce'], MINING_DIFFICULTY):
return False
last_block = block
current_index += 1
return True
def submit_transaction(self, sender_public_key, recipient_public_key, product_name, product_details,signature, amount):
transaction = OrderedDict({
'sender_public_key': sender_public_key,
'recipient_public_key': recipient_public_key,
'signature': signature,
'product_name': product_name,
'product_details': product_details,
'amount': amount
})
# Reward for mining a block
if sender_public_key == MINING_SENDER:
self.transactions.append(transaction)
return len(self.chain) + 1
else:
# Transaction from wallet to another wallet
signature_verification = self.verify_transaction_signature(sender_public_key, signature, transaction)
if signature_verification:
self.transactions.append(transaction)
return len(self.chain) + 1
else:
return False
# Instantiate the Blockchain
blockchain = Blockchain()
# Instantiate the Node
app = Flask(__name__)
app.secret_key = 'secretkey'
CORS(app)
@app.before_request
def before_request():
g.user = None
if 'miner_email' in session:
miner = minerdb.find_one( {"email": session['miner_email'],})
g.user = miner
@app.route('/')
def index():
return render_template('./index.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
session.pop('miner_email',None)
email = request.form.get('minerEmail')
password = request.form.get('minerPass')
user = minerdb.find_one( { "email": email,})
if user and user.get('password') == password:
session['miner_email'] = user.get('email')
return redirect('/profile')
else:
return redirect('/login')
return render_template('login.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
if request.method == 'POST':
name = request.form.get('minerName')
email = request.form.get('minerEmail')
password = request.form.get('minerPass')
minerdata = {'name':name, "email":email, "password":password}
minerdb.insert_one(minerdata)
return redirect('/login')
return render_template('register.html')
@app.route('/logout')
def logout():
# remove the username from the session if it is there
session.pop('miner_email', None)
return redirect('/login')
@app.route('/profile')
def profile():
if not g.user:
return redirect('/login')
return render_template('profile.html')
# @app.route('/register_credentials', methods=['POST'])
# def minerRegistration():
# name = request.form.get('minerName')
# email = request.form.get('minerEmail')
# password = request.form.get('minerPass')
# minerdata = {'name':name, "email":email, "password":password}
# minerdb.insert_one(minerdata)
# return redirect('/login')
# @app.route('/login_credentials', methods=['POST'])
# def minerLogin():
# email = request.form.get('minerEmail')
# password = request.form.get('minerPass')
# user = minerdb.find_one( { "email": email, "password" : password})
# if user:
# return redirect('/')
# else:
# return redirect('/login')
# return 'Miner email is {} and password is {}'.format(email, password)
@app.route('/configure')
def configure():
return render_template('./configure.html')
@app.route('/transactions/get', methods=['GET'])
def get_transactions():
transactions = blockchain.transactions
response = {'transactions': transactions}
return jsonify(response), 200
@app.route('/chain', methods=['GET'])
def get_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain)
}
# new_chain = {
# 'data': response
# }
#
# transaction_blocks.insert_one(new_chain)
return jsonify(response), 200
@app.route('/mine', methods=['GET'])
def mine():
# We run the proof of work algorithm
nonce = blockchain.proof_of_work()
blockchain.submit_transaction(sender_public_key=MINING_SENDER,
recipient_public_key=blockchain.node_id,
product_name='',
product_details='',
signature='',
amount=MINING_REWARD)
last_block = blockchain.chain[-1]
previous_hash = blockchain.hash(last_block)
block = blockchain.create_block(nonce, previous_hash)
response = {
'message': 'New block created',
'timestamp': block['timestamp'],
'block_number': block['block_number'],
'transactions': block['transactions'],
'nonce': block['nonce'],
'previous_hash': block['previous_hash'],
}
u_id = uuid4()
# add data into mongodb atlas
new_chain = {
'u_id': u_id,
'data': response
}
transaction_blocks.insert_one(new_chain)
id = transaction_blocks.find_one({'u_id': u_id})
print(id)
ob_id = id.get('_id')
# over db lines
# QR Code generation
data = ob_id
qr.add_data(data)
qr.make(fit=True)
img = qr.make_image(fill="black", back_color="white")
img.save('test.png')
return jsonify(response), 200
@app.route('/transactions/new', methods=['POST'])
def new_transaction():
values = request.form
required = ['confirmation_sender_public_key', 'confirmation_recipient_public_key', 'confirmation_product_name', 'confirmation_product_details', 'transaction_signature',
'confirmation_amount']
if not all(k in values for k in required):
return 'Missing values', 400
transaction_results = blockchain.submit_transaction(values['confirmation_sender_public_key'],
values['confirmation_recipient_public_key'],
values['confirmation_product_name'],
values['confirmation_product_details'],
values['transaction_signature'],
values['confirmation_amount'])
if transaction_results == False:
response = {'message': 'Invalid transaction/signature'}
return jsonify(response), 406
else:
response = {'message': 'Transaction will be added to the Block ' + str(transaction_results)}
return jsonify(response), 201
@app.route('/nodes/get', methods=['GET'])
def get_nodes():
nodes = list(blockchain.nodes)
response = {'nodes': nodes}
return jsonify(response), 200
@app.route('/nodes/resolve', methods=['GET'])
def consensus():
replaced = blockchain.resolve_conflicts()
if replaced:
response = {
'message': 'Our chain was replaced',
'new_chain': blockchain.chain
}
else:
response = {
'message': 'Our chain is authoritative',
'chain': blockchain.chain
}
return jsonify(response), 200
@app.route('/nodes/register', methods=['POST'])
def | ():
values = request.form
# 127.0.0.1:5002,127.0.0.1:5003, 127.0.0.1:5004
nodes = values.get('nodes').replace(' ', '').split(',')
if nodes is None:
return 'Error: Please supply a valid list of nodes', 400
for node in nodes:
blockchain.register_node(node)
response = {
'message': 'Nodes have been added',
'total_nodes': [node for node in blockchain.nodes]
}
return jsonify(response), 200
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=5001, type=int, help="port to listen to")
args = parser.parse_args()
port = args.port
app.run(host='127.0.0.1', port=port, debug=True)
| register_node | identifier_name |
blockchain.py | from flask import Flask, request, jsonify, render_template, redirect, session, g, url_for
from time import time, ctime
from flask_cors import CORS
from collections import OrderedDict
import binascii
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
from uuid import uuid4
import json
import hashlib
import requests
from urllib.parse import urlparse
from bson.objectid import ObjectId
from pymongo import MongoClient
from dotenv import load_dotenv
import os
load_dotenv()
MONGODB_URL = os.getenv('MONGODB_URL')
#qr code librabry
import qrcode
qr = qrcode.QRCode(version=1,box_size=10,border=5)
#db connection
client = MongoClient(MONGODB_URL)
db = client.get_database('Blockchain')
transaction_blocks = db.chain_db
minerdb = db.miners
MINING_SENDER = "The Blockchain"
MINING_REWARD = 1
MINING_DIFFICULTY = 2
t = time()
class Blockchain:
def __init__(self):
self.transactions = []
self.chain = []
self.nodes = set()
self.node_id = str(uuid4()).replace('-', '')
# Create the genesis block
self.create_block(0, '00')
# data = {'block_number':0, "nonce": 0, "previous_hash": 00, "timestamp": 0, "transactions":[]}
# self.chain.append(data)
def register_node(self, node_url):
parsed_url = urlparse(node_url)
if parsed_url.netloc:
self.nodes.add(parsed_url.netloc)
elif parsed_url.path:
self.nodes.add(parsed_url.path)
else:
raise ValueError('Invalid URL')
def create_block(self, nonce, previous_hash):
"""
Add a block of transactions to the blockchain
"""
block = {'block_number': transaction_blocks.count() + 1,
'timestamp': ctime(t),
'transactions': self.transactions,
'nonce': nonce,
'previous_hash': previous_hash}
# Reset the current list of transactions
self.transactions = []
self.chain.append(block)
return block
def verify_transaction_signature(self, sender_public_key, signature, transaction):
public_key = RSA.importKey(binascii.unhexlify(sender_public_key))
verifier = PKCS1_v1_5.new(public_key)
h = SHA.new(str(transaction).encode('utf8'))
try:
verifier.verify(h, binascii.unhexlify(signature))
return True
except ValueError:
return False
@staticmethod
def valid_proof(transactions, last_hash, nonce, difficulty=MINING_DIFFICULTY):
guess = (str(transactions) + str(last_hash) + str(nonce)).encode('utf8')
h = hashlib.new('sha256')
h.update(guess)
guess_hash = h.hexdigest()
return guess_hash[:difficulty] == '0' * difficulty
def proof_of_work(self):
last_block = self.chain[-1]
last_hash = self.hash(last_block)
nonce = 0
while self.valid_proof(self.transactions, last_hash, nonce) is False:
nonce += 1
return nonce
@staticmethod
def hash(block):
# We must to ensure that the Dictionary is ordered, otherwise we'll get inconsistent hashes
block_string = json.dumps(block, sort_keys=True).encode('utf8')
h = hashlib.new('sha256')
h.update(block_string)
return h.hexdigest()
def resolve_conflicts(self):
neighbours = self.nodes
new_chain = None
max_length = len(self.chain)
for node in neighbours:
response = requests.get('http://' + node + '/chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.valid_chain(chain):
max_length = length
new_chain = chain
if len(new_chain) > 0:
self.chain = new_chain
return True
return False
def valid_chain(self, chain):
last_block = chain[0]
current_index = 1
while current_index < len(chain):
block = chain[current_index]
if block['previous_hash'] != self.hash(last_block):
return False
transactions = block['transactions'][:-1]
transaction_elements = ['sender_public_key', 'recipient_public_key', 'signature', 'product_name', 'product_details', 'amount']
transactions = [OrderedDict((k, transaction[k]) for k in transaction_elements) for transaction in
transactions]
if not self.valid_proof(transactions, block['previous_hash'], block['nonce'], MINING_DIFFICULTY):
return False
last_block = block
current_index += 1
return True
def submit_transaction(self, sender_public_key, recipient_public_key, product_name, product_details,signature, amount):
transaction = OrderedDict({
'sender_public_key': sender_public_key,
'recipient_public_key': recipient_public_key,
'signature': signature,
'product_name': product_name,
'product_details': product_details,
'amount': amount
})
# Reward for mining a block
if sender_public_key == MINING_SENDER:
self.transactions.append(transaction)
return len(self.chain) + 1
else:
# Transaction from wallet to another wallet
signature_verification = self.verify_transaction_signature(sender_public_key, signature, transaction)
if signature_verification:
self.transactions.append(transaction)
return len(self.chain) + 1
else:
return False
# Instantiate the Blockchain
blockchain = Blockchain()
# Instantiate the Node
app = Flask(__name__)
app.secret_key = 'secretkey'
CORS(app)
@app.before_request
def before_request():
g.user = None
if 'miner_email' in session:
miner = minerdb.find_one( {"email": session['miner_email'],})
g.user = miner
@app.route('/')
def index():
return render_template('./index.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
session.pop('miner_email',None)
email = request.form.get('minerEmail')
password = request.form.get('minerPass')
user = minerdb.find_one( { "email": email,})
if user and user.get('password') == password:
session['miner_email'] = user.get('email')
return redirect('/profile')
else:
return redirect('/login')
return render_template('login.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
|
@app.route('/logout')
def logout():
# remove the username from the session if it is there
session.pop('miner_email', None)
return redirect('/login')
@app.route('/profile')
def profile():
if not g.user:
return redirect('/login')
return render_template('profile.html')
# @app.route('/register_credentials', methods=['POST'])
# def minerRegistration():
# name = request.form.get('minerName')
# email = request.form.get('minerEmail')
# password = request.form.get('minerPass')
# minerdata = {'name':name, "email":email, "password":password}
# minerdb.insert_one(minerdata)
# return redirect('/login')
# @app.route('/login_credentials', methods=['POST'])
# def minerLogin():
# email = request.form.get('minerEmail')
# password = request.form.get('minerPass')
# user = minerdb.find_one( { "email": email, "password" : password})
# if user:
# return redirect('/')
# else:
# return redirect('/login')
# return 'Miner email is {} and password is {}'.format(email, password)
@app.route('/configure')
def configure():
return render_template('./configure.html')
@app.route('/transactions/get', methods=['GET'])
def get_transactions():
transactions = blockchain.transactions
response = {'transactions': transactions}
return jsonify(response), 200
@app.route('/chain', methods=['GET'])
def get_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain)
}
# new_chain = {
# 'data': response
# }
#
# transaction_blocks.insert_one(new_chain)
return jsonify(response), 200
@app.route('/mine', methods=['GET'])
def mine():
# We run the proof of work algorithm
nonce = blockchain.proof_of_work()
blockchain.submit_transaction(sender_public_key=MINING_SENDER,
recipient_public_key=blockchain.node_id,
product_name='',
product_details='',
signature='',
amount=MINING_REWARD)
last_block = blockchain.chain[-1]
previous_hash = blockchain.hash(last_block)
block = blockchain.create_block(nonce, previous_hash)
response = {
'message': 'New block created',
'timestamp': block['timestamp'],
'block_number': block['block_number'],
'transactions': block['transactions'],
'nonce': block['nonce'],
'previous_hash': block['previous_hash'],
}
u_id = uuid4()
# add data into mongodb atlas
new_chain = {
'u_id': u_id,
'data': response
}
transaction_blocks.insert_one(new_chain)
id = transaction_blocks.find_one({'u_id': u_id})
print(id)
ob_id = id.get('_id')
# over db lines
# QR Code generation
data = ob_id
qr.add_data(data)
qr.make(fit=True)
img = qr.make_image(fill="black", back_color="white")
img.save('test.png')
return jsonify(response), 200
@app.route('/transactions/new', methods=['POST'])
def new_transaction():
values = request.form
required = ['confirmation_sender_public_key', 'confirmation_recipient_public_key', 'confirmation_product_name', 'confirmation_product_details', 'transaction_signature',
'confirmation_amount']
if not all(k in values for k in required):
return 'Missing values', 400
transaction_results = blockchain.submit_transaction(values['confirmation_sender_public_key'],
values['confirmation_recipient_public_key'],
values['confirmation_product_name'],
values['confirmation_product_details'],
values['transaction_signature'],
values['confirmation_amount'])
if transaction_results == False:
response = {'message': 'Invalid transaction/signature'}
return jsonify(response), 406
else:
response = {'message': 'Transaction will be added to the Block ' + str(transaction_results)}
return jsonify(response), 201
@app.route('/nodes/get', methods=['GET'])
def get_nodes():
nodes = list(blockchain.nodes)
response = {'nodes': nodes}
return jsonify(response), 200
@app.route('/nodes/resolve', methods=['GET'])
def consensus():
replaced = blockchain.resolve_conflicts()
if replaced:
response = {
'message': 'Our chain was replaced',
'new_chain': blockchain.chain
}
else:
response = {
'message': 'Our chain is authoritative',
'chain': blockchain.chain
}
return jsonify(response), 200
@app.route('/nodes/register', methods=['POST'])
def register_node():
values = request.form
# 127.0.0.1:5002,127.0.0.1:5003, 127.0.0.1:5004
nodes = values.get('nodes').replace(' ', '').split(',')
if nodes is None:
return 'Error: Please supply a valid list of nodes', 400
for node in nodes:
blockchain.register_node(node)
response = {
'message': 'Nodes have been added',
'total_nodes': [node for node in blockchain.nodes]
}
return jsonify(response), 200
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=5001, type=int, help="port to listen to")
args = parser.parse_args()
port = args.port
app.run(host='127.0.0.1', port=port, debug=True)
| if request.method == 'POST':
name = request.form.get('minerName')
email = request.form.get('minerEmail')
password = request.form.get('minerPass')
minerdata = {'name':name, "email":email, "password":password}
minerdb.insert_one(minerdata)
return redirect('/login')
return render_template('register.html') | identifier_body |
blockchain.py | from flask import Flask, request, jsonify, render_template, redirect, session, g, url_for
from time import time, ctime
from flask_cors import CORS
from collections import OrderedDict
import binascii
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
from uuid import uuid4
import json
import hashlib
import requests
from urllib.parse import urlparse
from bson.objectid import ObjectId
from pymongo import MongoClient
from dotenv import load_dotenv
import os
load_dotenv()
MONGODB_URL = os.getenv('MONGODB_URL')
#qr code librabry
import qrcode
qr = qrcode.QRCode(version=1,box_size=10,border=5)
#db connection
client = MongoClient(MONGODB_URL)
db = client.get_database('Blockchain')
transaction_blocks = db.chain_db
minerdb = db.miners
MINING_SENDER = "The Blockchain"
MINING_REWARD = 1
MINING_DIFFICULTY = 2
t = time()
class Blockchain:
def __init__(self):
self.transactions = []
self.chain = []
self.nodes = set()
self.node_id = str(uuid4()).replace('-', '')
# Create the genesis block
self.create_block(0, '00')
# data = {'block_number':0, "nonce": 0, "previous_hash": 00, "timestamp": 0, "transactions":[]}
# self.chain.append(data)
def register_node(self, node_url):
parsed_url = urlparse(node_url)
if parsed_url.netloc:
self.nodes.add(parsed_url.netloc)
elif parsed_url.path:
self.nodes.add(parsed_url.path)
else:
raise ValueError('Invalid URL')
def create_block(self, nonce, previous_hash):
"""
Add a block of transactions to the blockchain
"""
block = {'block_number': transaction_blocks.count() + 1,
'timestamp': ctime(t),
'transactions': self.transactions,
'nonce': nonce,
'previous_hash': previous_hash}
# Reset the current list of transactions
self.transactions = []
self.chain.append(block)
return block
def verify_transaction_signature(self, sender_public_key, signature, transaction):
public_key = RSA.importKey(binascii.unhexlify(sender_public_key))
verifier = PKCS1_v1_5.new(public_key)
h = SHA.new(str(transaction).encode('utf8'))
try:
verifier.verify(h, binascii.unhexlify(signature))
return True
except ValueError:
return False
@staticmethod
def valid_proof(transactions, last_hash, nonce, difficulty=MINING_DIFFICULTY):
guess = (str(transactions) + str(last_hash) + str(nonce)).encode('utf8')
h = hashlib.new('sha256')
h.update(guess)
guess_hash = h.hexdigest()
return guess_hash[:difficulty] == '0' * difficulty
def proof_of_work(self):
last_block = self.chain[-1]
last_hash = self.hash(last_block)
nonce = 0
while self.valid_proof(self.transactions, last_hash, nonce) is False:
nonce += 1
return nonce
@staticmethod
def hash(block):
# We must to ensure that the Dictionary is ordered, otherwise we'll get inconsistent hashes
block_string = json.dumps(block, sort_keys=True).encode('utf8')
h = hashlib.new('sha256')
h.update(block_string)
return h.hexdigest()
def resolve_conflicts(self):
neighbours = self.nodes
new_chain = None
max_length = len(self.chain)
for node in neighbours:
response = requests.get('http://' + node + '/chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.valid_chain(chain):
max_length = length
new_chain = chain
if len(new_chain) > 0:
self.chain = new_chain
return True
return False
def valid_chain(self, chain):
last_block = chain[0]
current_index = 1
while current_index < len(chain):
block = chain[current_index]
if block['previous_hash'] != self.hash(last_block):
return False
transactions = block['transactions'][:-1]
transaction_elements = ['sender_public_key', 'recipient_public_key', 'signature', 'product_name', 'product_details', 'amount']
transactions = [OrderedDict((k, transaction[k]) for k in transaction_elements) for transaction in
transactions]
if not self.valid_proof(transactions, block['previous_hash'], block['nonce'], MINING_DIFFICULTY):
return False
last_block = block
current_index += 1
return True
def submit_transaction(self, sender_public_key, recipient_public_key, product_name, product_details,signature, amount):
transaction = OrderedDict({
'sender_public_key': sender_public_key,
'recipient_public_key': recipient_public_key,
'signature': signature,
'product_name': product_name,
'product_details': product_details,
'amount': amount
})
# Reward for mining a block
if sender_public_key == MINING_SENDER:
self.transactions.append(transaction)
return len(self.chain) + 1
else:
# Transaction from wallet to another wallet
signature_verification = self.verify_transaction_signature(sender_public_key, signature, transaction)
if signature_verification:
self.transactions.append(transaction)
return len(self.chain) + 1
else:
return False
# Instantiate the Blockchain
blockchain = Blockchain()
# Instantiate the Node
app = Flask(__name__)
app.secret_key = 'secretkey'
CORS(app)
@app.before_request
def before_request():
g.user = None
if 'miner_email' in session:
miner = minerdb.find_one( {"email": session['miner_email'],})
g.user = miner
@app.route('/')
def index():
return render_template('./index.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
session.pop('miner_email',None)
email = request.form.get('minerEmail')
password = request.form.get('minerPass')
user = minerdb.find_one( { "email": email,})
if user and user.get('password') == password:
session['miner_email'] = user.get('email')
return redirect('/profile')
else:
return redirect('/login')
return render_template('login.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
if request.method == 'POST':
name = request.form.get('minerName')
email = request.form.get('minerEmail')
password = request.form.get('minerPass')
minerdata = {'name':name, "email":email, "password":password}
minerdb.insert_one(minerdata)
return redirect('/login')
return render_template('register.html')
@app.route('/logout')
def logout():
# remove the username from the session if it is there
session.pop('miner_email', None)
return redirect('/login')
@app.route('/profile')
def profile():
if not g.user:
return redirect('/login')
return render_template('profile.html')
# @app.route('/register_credentials', methods=['POST'])
# def minerRegistration():
# name = request.form.get('minerName')
# email = request.form.get('minerEmail')
# password = request.form.get('minerPass')
# minerdata = {'name':name, "email":email, "password":password}
# minerdb.insert_one(minerdata)
# return redirect('/login')
# @app.route('/login_credentials', methods=['POST'])
# def minerLogin():
# email = request.form.get('minerEmail')
# password = request.form.get('minerPass') |
# user = minerdb.find_one( { "email": email, "password" : password})
# if user:
# return redirect('/')
# else:
# return redirect('/login')
# return 'Miner email is {} and password is {}'.format(email, password)
@app.route('/configure')
def configure():
return render_template('./configure.html')
@app.route('/transactions/get', methods=['GET'])
def get_transactions():
transactions = blockchain.transactions
response = {'transactions': transactions}
return jsonify(response), 200
@app.route('/chain', methods=['GET'])
def get_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain)
}
# new_chain = {
# 'data': response
# }
#
# transaction_blocks.insert_one(new_chain)
return jsonify(response), 200
@app.route('/mine', methods=['GET'])
def mine():
# We run the proof of work algorithm
nonce = blockchain.proof_of_work()
blockchain.submit_transaction(sender_public_key=MINING_SENDER,
recipient_public_key=blockchain.node_id,
product_name='',
product_details='',
signature='',
amount=MINING_REWARD)
last_block = blockchain.chain[-1]
previous_hash = blockchain.hash(last_block)
block = blockchain.create_block(nonce, previous_hash)
response = {
'message': 'New block created',
'timestamp': block['timestamp'],
'block_number': block['block_number'],
'transactions': block['transactions'],
'nonce': block['nonce'],
'previous_hash': block['previous_hash'],
}
u_id = uuid4()
# add data into mongodb atlas
new_chain = {
'u_id': u_id,
'data': response
}
transaction_blocks.insert_one(new_chain)
id = transaction_blocks.find_one({'u_id': u_id})
print(id)
ob_id = id.get('_id')
# over db lines
# QR Code generation
data = ob_id
qr.add_data(data)
qr.make(fit=True)
img = qr.make_image(fill="black", back_color="white")
img.save('test.png')
return jsonify(response), 200
@app.route('/transactions/new', methods=['POST'])
def new_transaction():
values = request.form
required = ['confirmation_sender_public_key', 'confirmation_recipient_public_key', 'confirmation_product_name', 'confirmation_product_details', 'transaction_signature',
'confirmation_amount']
if not all(k in values for k in required):
return 'Missing values', 400
transaction_results = blockchain.submit_transaction(values['confirmation_sender_public_key'],
values['confirmation_recipient_public_key'],
values['confirmation_product_name'],
values['confirmation_product_details'],
values['transaction_signature'],
values['confirmation_amount'])
if transaction_results == False:
response = {'message': 'Invalid transaction/signature'}
return jsonify(response), 406
else:
response = {'message': 'Transaction will be added to the Block ' + str(transaction_results)}
return jsonify(response), 201
@app.route('/nodes/get', methods=['GET'])
def get_nodes():
nodes = list(blockchain.nodes)
response = {'nodes': nodes}
return jsonify(response), 200
@app.route('/nodes/resolve', methods=['GET'])
def consensus():
replaced = blockchain.resolve_conflicts()
if replaced:
response = {
'message': 'Our chain was replaced',
'new_chain': blockchain.chain
}
else:
response = {
'message': 'Our chain is authoritative',
'chain': blockchain.chain
}
return jsonify(response), 200
@app.route('/nodes/register', methods=['POST'])
def register_node():
values = request.form
# 127.0.0.1:5002,127.0.0.1:5003, 127.0.0.1:5004
nodes = values.get('nodes').replace(' ', '').split(',')
if nodes is None:
return 'Error: Please supply a valid list of nodes', 400
for node in nodes:
blockchain.register_node(node)
response = {
'message': 'Nodes have been added',
'total_nodes': [node for node in blockchain.nodes]
}
return jsonify(response), 200
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=5001, type=int, help="port to listen to")
args = parser.parse_args()
port = args.port
app.run(host='127.0.0.1', port=port, debug=True) | random_line_split |
|
blockchain.py | from flask import Flask, request, jsonify, render_template, redirect, session, g, url_for
from time import time, ctime
from flask_cors import CORS
from collections import OrderedDict
import binascii
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
from uuid import uuid4
import json
import hashlib
import requests
from urllib.parse import urlparse
from bson.objectid import ObjectId
from pymongo import MongoClient
from dotenv import load_dotenv
import os
load_dotenv()
MONGODB_URL = os.getenv('MONGODB_URL')
#qr code librabry
import qrcode
qr = qrcode.QRCode(version=1,box_size=10,border=5)
#db connection
client = MongoClient(MONGODB_URL)
db = client.get_database('Blockchain')
transaction_blocks = db.chain_db
minerdb = db.miners
MINING_SENDER = "The Blockchain"
MINING_REWARD = 1
MINING_DIFFICULTY = 2
t = time()
class Blockchain:
def __init__(self):
self.transactions = []
self.chain = []
self.nodes = set()
self.node_id = str(uuid4()).replace('-', '')
# Create the genesis block
self.create_block(0, '00')
# data = {'block_number':0, "nonce": 0, "previous_hash": 00, "timestamp": 0, "transactions":[]}
# self.chain.append(data)
def register_node(self, node_url):
parsed_url = urlparse(node_url)
if parsed_url.netloc:
self.nodes.add(parsed_url.netloc)
elif parsed_url.path:
self.nodes.add(parsed_url.path)
else:
raise ValueError('Invalid URL')
def create_block(self, nonce, previous_hash):
"""
Add a block of transactions to the blockchain
"""
block = {'block_number': transaction_blocks.count() + 1,
'timestamp': ctime(t),
'transactions': self.transactions,
'nonce': nonce,
'previous_hash': previous_hash}
# Reset the current list of transactions
self.transactions = []
self.chain.append(block)
return block
def verify_transaction_signature(self, sender_public_key, signature, transaction):
public_key = RSA.importKey(binascii.unhexlify(sender_public_key))
verifier = PKCS1_v1_5.new(public_key)
h = SHA.new(str(transaction).encode('utf8'))
try:
verifier.verify(h, binascii.unhexlify(signature))
return True
except ValueError:
return False
@staticmethod
def valid_proof(transactions, last_hash, nonce, difficulty=MINING_DIFFICULTY):
guess = (str(transactions) + str(last_hash) + str(nonce)).encode('utf8')
h = hashlib.new('sha256')
h.update(guess)
guess_hash = h.hexdigest()
return guess_hash[:difficulty] == '0' * difficulty
def proof_of_work(self):
last_block = self.chain[-1]
last_hash = self.hash(last_block)
nonce = 0
while self.valid_proof(self.transactions, last_hash, nonce) is False:
nonce += 1
return nonce
@staticmethod
def hash(block):
# We must to ensure that the Dictionary is ordered, otherwise we'll get inconsistent hashes
block_string = json.dumps(block, sort_keys=True).encode('utf8')
h = hashlib.new('sha256')
h.update(block_string)
return h.hexdigest()
def resolve_conflicts(self):
neighbours = self.nodes
new_chain = None
max_length = len(self.chain)
for node in neighbours:
response = requests.get('http://' + node + '/chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.valid_chain(chain):
max_length = length
new_chain = chain
if len(new_chain) > 0:
self.chain = new_chain
return True
return False
def valid_chain(self, chain):
last_block = chain[0]
current_index = 1
while current_index < len(chain):
block = chain[current_index]
if block['previous_hash'] != self.hash(last_block):
return False
transactions = block['transactions'][:-1]
transaction_elements = ['sender_public_key', 'recipient_public_key', 'signature', 'product_name', 'product_details', 'amount']
transactions = [OrderedDict((k, transaction[k]) for k in transaction_elements) for transaction in
transactions]
if not self.valid_proof(transactions, block['previous_hash'], block['nonce'], MINING_DIFFICULTY):
return False
last_block = block
current_index += 1
return True
def submit_transaction(self, sender_public_key, recipient_public_key, product_name, product_details,signature, amount):
transaction = OrderedDict({
'sender_public_key': sender_public_key,
'recipient_public_key': recipient_public_key,
'signature': signature,
'product_name': product_name,
'product_details': product_details,
'amount': amount
})
# Reward for mining a block
if sender_public_key == MINING_SENDER:
self.transactions.append(transaction)
return len(self.chain) + 1
else:
# Transaction from wallet to another wallet
signature_verification = self.verify_transaction_signature(sender_public_key, signature, transaction)
if signature_verification:
self.transactions.append(transaction)
return len(self.chain) + 1
else:
return False
# Instantiate the Blockchain
blockchain = Blockchain()
# Instantiate the Node
app = Flask(__name__)
app.secret_key = 'secretkey'
CORS(app)
@app.before_request
def before_request():
g.user = None
if 'miner_email' in session:
miner = minerdb.find_one( {"email": session['miner_email'],})
g.user = miner
@app.route('/')
def index():
return render_template('./index.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
session.pop('miner_email',None)
email = request.form.get('minerEmail')
password = request.form.get('minerPass')
user = minerdb.find_one( { "email": email,})
if user and user.get('password') == password:
session['miner_email'] = user.get('email')
return redirect('/profile')
else:
return redirect('/login')
return render_template('login.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
if request.method == 'POST':
name = request.form.get('minerName')
email = request.form.get('minerEmail')
password = request.form.get('minerPass')
minerdata = {'name':name, "email":email, "password":password}
minerdb.insert_one(minerdata)
return redirect('/login')
return render_template('register.html')
@app.route('/logout')
def logout():
# remove the username from the session if it is there
session.pop('miner_email', None)
return redirect('/login')
@app.route('/profile')
def profile():
if not g.user:
return redirect('/login')
return render_template('profile.html')
# @app.route('/register_credentials', methods=['POST'])
# def minerRegistration():
# name = request.form.get('minerName')
# email = request.form.get('minerEmail')
# password = request.form.get('minerPass')
# minerdata = {'name':name, "email":email, "password":password}
# minerdb.insert_one(minerdata)
# return redirect('/login')
# @app.route('/login_credentials', methods=['POST'])
# def minerLogin():
# email = request.form.get('minerEmail')
# password = request.form.get('minerPass')
# user = minerdb.find_one( { "email": email, "password" : password})
# if user:
# return redirect('/')
# else:
# return redirect('/login')
# return 'Miner email is {} and password is {}'.format(email, password)
@app.route('/configure')
def configure():
return render_template('./configure.html')
@app.route('/transactions/get', methods=['GET'])
def get_transactions():
transactions = blockchain.transactions
response = {'transactions': transactions}
return jsonify(response), 200
@app.route('/chain', methods=['GET'])
def get_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain)
}
# new_chain = {
# 'data': response
# }
#
# transaction_blocks.insert_one(new_chain)
return jsonify(response), 200
@app.route('/mine', methods=['GET'])
def mine():
# We run the proof of work algorithm
nonce = blockchain.proof_of_work()
blockchain.submit_transaction(sender_public_key=MINING_SENDER,
recipient_public_key=blockchain.node_id,
product_name='',
product_details='',
signature='',
amount=MINING_REWARD)
last_block = blockchain.chain[-1]
previous_hash = blockchain.hash(last_block)
block = blockchain.create_block(nonce, previous_hash)
response = {
'message': 'New block created',
'timestamp': block['timestamp'],
'block_number': block['block_number'],
'transactions': block['transactions'],
'nonce': block['nonce'],
'previous_hash': block['previous_hash'],
}
u_id = uuid4()
# add data into mongodb atlas
new_chain = {
'u_id': u_id,
'data': response
}
transaction_blocks.insert_one(new_chain)
id = transaction_blocks.find_one({'u_id': u_id})
print(id)
ob_id = id.get('_id')
# over db lines
# QR Code generation
data = ob_id
qr.add_data(data)
qr.make(fit=True)
img = qr.make_image(fill="black", back_color="white")
img.save('test.png')
return jsonify(response), 200
@app.route('/transactions/new', methods=['POST'])
def new_transaction():
values = request.form
required = ['confirmation_sender_public_key', 'confirmation_recipient_public_key', 'confirmation_product_name', 'confirmation_product_details', 'transaction_signature',
'confirmation_amount']
if not all(k in values for k in required):
return 'Missing values', 400
transaction_results = blockchain.submit_transaction(values['confirmation_sender_public_key'],
values['confirmation_recipient_public_key'],
values['confirmation_product_name'],
values['confirmation_product_details'],
values['transaction_signature'],
values['confirmation_amount'])
if transaction_results == False:
response = {'message': 'Invalid transaction/signature'}
return jsonify(response), 406
else:
response = {'message': 'Transaction will be added to the Block ' + str(transaction_results)}
return jsonify(response), 201
@app.route('/nodes/get', methods=['GET'])
def get_nodes():
nodes = list(blockchain.nodes)
response = {'nodes': nodes}
return jsonify(response), 200
@app.route('/nodes/resolve', methods=['GET'])
def consensus():
replaced = blockchain.resolve_conflicts()
if replaced:
response = {
'message': 'Our chain was replaced',
'new_chain': blockchain.chain
}
else:
response = {
'message': 'Our chain is authoritative',
'chain': blockchain.chain
}
return jsonify(response), 200
@app.route('/nodes/register', methods=['POST'])
def register_node():
values = request.form
# 127.0.0.1:5002,127.0.0.1:5003, 127.0.0.1:5004
nodes = values.get('nodes').replace(' ', '').split(',')
if nodes is None:
return 'Error: Please supply a valid list of nodes', 400
for node in nodes:
|
response = {
'message': 'Nodes have been added',
'total_nodes': [node for node in blockchain.nodes]
}
return jsonify(response), 200
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=5001, type=int, help="port to listen to")
args = parser.parse_args()
port = args.port
app.run(host='127.0.0.1', port=port, debug=True)
| blockchain.register_node(node) | conditional_block |
HaarROG.py | #==========================================================================
#Уже подсчитанные характеристики для 1000 тестовых изображений
'''
haarPath = "cascade30h.xml"
recallList [0.8254189944134078, 0.8225352112676056, 0.8208744710860366, 0.8045325779036827, 0.7897727272727273,
0.7795163584637269, 0.7694483734087695, 0.7460992907801418, 0.7126600284495022, 0.7357954545454546,
0.701280227596017, 0.6529160739687055, 0.6584699453551912, 0.5549374130737135, 0.4767932489451477,
0.569060773480663, 0.4197183098591549, 0.3437057991513437, 0.5805084745762712, 0.49291784702549574,
0.4164305949008499, 0.48933143669985774, 0.40256045519203415, 0.33712660028449504]
precisionList = [0.17620751341681573, 0.23710921640276086, 0.29158316633266534, 0.43193916349809885,
0.5656154628687691, 0.6277205040091638, 0.5732349841938883, 0.6985391766268261, 0.75,
0.7204450625869263, 0.8029315960912052, 0.8375912408759124, 0.6703755215577191,
0.7388888888888889, 0.8033175355450237, 0.6809917355371901, 0.7286063569682152,
0.7546583850931677, 0.6650485436893204, 0.7265135699373695, 0.7405541561712846,
0.6653771760154739, 0.7546666666666667, 0.7719869706840391]
'''
'''
haarPath = "haarcascade_russian_plate_number.xml"
recallList = [0.8129395218002813, 0.8095909732016925, 0.8053596614950634, 0.7926657263751763,
0.7776203966005666, 0.7585227272727273, 0.7577903682719547, 0.7347517730496453,
0.7116477272727273, 0.7315340909090909, 0.6941678520625889, 0.6486486486486487,
0.6387362637362637, 0.5305555555555556, 0.47692307692307695, 0.561624649859944,
0.442090395480226, 0.3356940509915014, 0.5671852899575672, 0.4830028328611898,
0.42329545454545453, 0.5007112375533428, 0.42816500711237554, 0.3655761024182077]
precisionList = [0.15579514824797844, 0.21875, 0.26508820798514393, 0.43769470404984423, 0.5473579262213359,
0.5986547085201793, 0.545361875637105, 0.6641025641025641, 0.7389380530973452, 0.6848404255319149,
0.7845659163987139, 0.8201438848920863, 0.6258411843876177, 0.6970802919708029, 0.754424778761062,
0.6334913112164297, 0.681917211328976, 0.6909620991253644, 0.6140888208269525, 0.6779324055666004,
0.7376237623762376, 0.6530612244897959, 0.7270531400966184, 0.7558823529411764]
'''
#==========================================================================
#команда для установки opencv
#pip install opencv-python
#==========================================================================
#Для форматированя данных в программе использован json
'''
"1_11_2014_12_13_38_590.bmp38467": {
"fileref": "",
"size": 38467,
"filename": "1_11_2014_12_13_38_590.bmp",
"base64_img_data": "",
"file_attributes": {},
"regions": {
"0": {
"shape_attributes": {
"name": "rect",
"x": 186,
"y": 203,
"width": 75,
"height": 21
},
"region_attributes": {}
}
}
}
'''
#==========================================================================
import math
import random
import PIL
import cv2
import numpy as np
#для хранения данных используем json, наиболее удобный формат
import json
#метрики
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import average_precision_score
#Построение графиков
import matplotlib.pyplot as plt
import sys
#==========================================================================
#вспомогательные процедуры
def changeOpCharac(detectListParam, markedDataParam, key, rateCh, lowerBorder, topBorder):
#список для найденных уникальных номерных пластин
#список для общего количества найденных номерных пластин
findPlates = []
findUnicPlates = []
#делаем копии, чтобы работать с локальными параметрами
detectList = detectListParam.copy()
markedData = markedDataParam.copy()
for i in range(len(detectList)):
x1 = 0
x2 = 0
y1 = 0
y2 = 0
mx1 = 0
mx2 = 0
my1 = 0
my2 = 0
#в данном случае очень важная нумерация с нуля
for j in range(len(markedData[key]['regions'])):
#формируем список для размеченных данных из json
markedNumPlatesList = [
markedData[key]['regions'][str(j)]['shape_attributes']['x'],
markedData[key]['regions'][str(j)]['shape_attributes']['y'],
markedData[key]['regions'][str(j)]['shape_attributes']['x'] + markedData[key]['regions'][str(j)]['shape_attributes']['width'],
markedData[key]['regions'][str(j)]['shape_attributes']['y'] + markedData[key]['regions'][str(j)]['shape_attributes']['height']
]
#print('LL')
#print(detectList)
#print('MNPL')
#print(markedNumPlatesList)
#x1 < x2
#упорядочили по x
if detectList[i][0] < detectList[i][2]:
x1 = detectList[i][0]
x2 = detectList[i][2]
else:
x1 = detectList[i][2]
x2 = detectList[i][0]
#упорядочили по x
if markedNumPlatesList[0] < markedNumPlatesList[2]:
mx1 = markedNumPlatesList[0]
mx2 = markedNumPlatesList[2]
else:
mx1 = markedNumPlatesList[2]
mx2 = markedNumPlatesList[0]
#y1 < y2
#упорядочили по y
if detectList[i][1] < detectList[i][3]:
y1 = detectList[i][1]
y2 = detectList[i][3]
else:
y1 = detectList[i][3]
y2 = detectList[i][1]
#упорядочили по x
if markedNumPlatesList[1] < markedNumPlatesList[3]:
my1 = markedNumPlatesList[1]
my2 = markedNumPlatesList[3]
else:
my1 = markedNumPlatesList[3]
my2 = markedNumPlatesList[1]
#print(x1, x2, mx1, mx2, y1, y2, my1, my2)
#находим пересечение отрезков
xIntersection = max(0, min(x2, mx2) - max(x1, mx1))
yIntersection = max(0, min(y2, my2) - max(y1, my1))
#print('xIntersection ' + str(xIntersection))
#print('yIntersection ' + str(yIntersection))
#вычисляем площади
detectNumArea = math.sqrt((x2 - x1)**2) * math.sqrt((y2 - y1)**2)
detectNumAreaInter = xIntersection * yIntersection
numArea = math.sqrt((markedNumPlatesList[0] - markedNumPlatesList[2])**2) * math.sqrt((markedNumPlatesList[1] - markedNumPlatesList[3])**2)
#print('detectNumAreaInter / numArea: ' + str(detectNumAreaInter / numArea))
#print('detectNumArea / numArea: ' + str(detectNumArea / numArea))
if (detectNumAreaInter / numArea > lowerBorder) and (detectNumArea / numArea < topBorder):
findPlates.append(str(j))
if (detectNumAreaInter / numArea > lowerBorder) and (detectNumArea / numArea < topBorder) and (str(j) not in findUnicPlates):
findUnicPlates.append(str(j))
#print(findPlates, ' findPlates')
#print(detectList, ' detectList')
#print(findUnicPlates, ' findUnicPlates')
#print(len(markedData[key]['regions']), ' len(markedData[key][\'regions\'])')
rateCh.tp += len(findPlates)
rateCh.fp += len(detectList) - len(findPlates)
rateCh.fn += len(markedData[key]['regions']) - len(findUnicPlates)
return rateCh
def drawMarkAndDetect(detectReg, markedRegions, itemKey, image):
#делаем копии списков, чтобы работать с локальными параметрами
localMarkedRegions = markedRegions.copy()
localDetectReg = detectReg.copy()
markedNumPlates = []
for i in range(len(localMarkedRegions[itemKey]['regions'])):
batch = []
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['x'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['y'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['x'] + localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['width'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['y'] + localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['height'])
markedNumPlates.append(batch)
'''
for (x, y, x1, y1) in localDetectReg:
cv2.rectangle(image, (x, y), (x1, y1), (random.randint(50, 250), 232, random.randint(50, 250)), -1)
for (x, y, x1, y1) in markedNumPlates:
cv2.rectangle(image, (x, y), (x1, y1), (0, 250, 250), 2)
cv2.imshow("Image", image)
cv2.waitKey(0)
'''
def makeDetectetData(image, numplateCascade, scaleF, minN):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#определяем, присутствует ли номерная пластина
#характеристики нужно менять, при оценке каскада
numPlates = numplateCascade.detectMultiScale(
gray,
scaleFactor = scaleF,
minNeighbors = minN
)
localDetectData = []
#для того, чтобы не появлялась ошибка, когда каскад не нашел номерных пластин
#создаем список в удобном для нас формате, т.к. для numPlates характерна запись (x, y, w, h)
if len(numPlates) == | DetectData = []
else:
for i in range(len(numPlates)):
bufData = [numPlates[i][0], numPlates[i][1], numPlates[i][0] + numPlates[i][2], numPlates[i][1] + numPlates[i][3]]
localDetectData.append(bufData)
return localDetectData
#==========================================================================
def mainProcedure(haarPath, dataPath, drivePath):
print('CV2 version: ')
print(cv2.__version__ + '\n')
# -----------------------------------------------------------------------
# загрузка данных
# загружаем каскад
try:
numplateCascade = cv2.CascadeClassifier(haarPath)
except:
print('ERROR: cv2.CascadeClassifier(haarPath) \n')
sys.exit()
try:
# загружаем файл с размеченной тестовой выборкой
with open(dataPath, "r") as read_file:
testData = json.load(read_file)
# создаем список ключей в словаре
keys = list(testData.keys())
except:
print('ERROR: dataPath \n')
sys.exit()
# -----------------------------------------------------------------------
# тестирование
class Characteristics:
# положительные характеристики
tp = 0
tn = 0
# отрицательные характеристики
fp = 0
fn = 0
rateCh = Characteristics()
# border для определения, правильно найден номер или не правильно
# для площади пересечения номерных рамок
lowerBorder = 0.7
topBorder = 1.8
# два списка для составления PR-кривой
precisionList = []
recallList = []
# точки, для построения графика
points = [1.01, 1.05, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6]
numNeigh = [3, 5, 7]
# points = [1.1]
# numNeigh = [3]
for pIter in range(len(points)):
rateCh.tp = 0
rateCh.tn = 0
rateCh.fp = 0
rateCh.fn = 0
# проходимся по всем тестовым картинкам
for numIter in range(len(numNeigh)):
for i in range(len(keys) // 10):
# для удобства сохраним ключ в отдельную переменную
itemKey = keys[i]
print('----------------------------------------------------')
print(str(i) + '. ' + testData[itemKey]['filename'])
# считываем изображение
image = cv2.imread(drivePath + "\\" + testData[itemKey]['filename'])
if image is None:
continue
detectData = []
detectData = makeDetectetData(image, numplateCascade, points[pIter], numNeigh[numIter])
# numPlates это список из списков [[],[]]
# передаем в функцию list с найденными номерами и размеченные данные
rateCh = changeOpCharac(detectData, testData, itemKey, rateCh, lowerBorder, topBorder)
print(' TP: ' + str(rateCh.tp) + ' TN: ' + str(rateCh.tn) + ' FP: ' + str(rateCh.fp) + ' FN: ' + str(
rateCh.fn))
print(' Number of license plates: ', len(testData[itemKey]['regions']))
print(' Found: {0} numplate!'.format(len(detectData)))
print('----------------------------------------------------')
drawMarkAndDetect(detectData, testData, itemKey, image)
# считаем precision и recall (учитываем деление на ноль)
try:
recallList.append(rateCh.tp / (rateCh.tp + rateCh.fn))
print('recall:' + str(rateCh.tp / (rateCh.tp + rateCh.fn)))
except:
recallList.append(0)
try:
precisionList.append(rateCh.tp / (rateCh.tp + rateCh.fp))
print('precision:' + str(rateCh.tp / (rateCh.tp + rateCh.fp)))
except:
precisionList.append(0)
# обнуляем
rateCh.tp = 0
rateCh.tn = 0
rateCh.fp = 0
rateCh.fn = 0
# Следующим шагом построим PR-кривую для оценки точности полученной модели:
print(recallList)
print(precisionList)
plt.plot(recallList, precisionList, color='r', label='Log Res')
plt.title('precision-recall curve for Log Res')
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlabel('recall')
plt.ylabel('precision')
plt.grid()
plt.show()
plt.gcf().clear()
#==========================================================================
haarPath = "cascade30h.xml"
dataPath = 'numplates_region_data.json'
drivePath = 'TestData'
mainProcedure(haarPath, dataPath, drivePath)
#==========================================================================
fig = plt.figure()
fig, ax = plt.subplots()
recallList = [0.8254189944134078, 0.8225352112676056, 0.8208744710860366, 0.8045325779036827, 0.7897727272727273,
0.7795163584637269, 0.7694483734087695, 0.7460992907801418, 0.7126600284495022, 0.7357954545454546,
0.701280227596017, 0.6529160739687055, 0.6584699453551912, 0.5549374130737135, 0.4767932489451477,
0.569060773480663, 0.4197183098591549, 0.3437057991513437, 0.5805084745762712, 0.49291784702549574,
0.4164305949008499, 0.48933143669985774, 0.40256045519203415, 0.33712660028449504]
precisionList = [0.17620751341681573, 0.23710921640276086, 0.29158316633266534, 0.43193916349809885,
0.5656154628687691, 0.6277205040091638, 0.5732349841938883, 0.6985391766268261, 0.75,
0.7204450625869263, 0.8029315960912052, 0.8375912408759124, 0.6703755215577191,
0.7388888888888889, 0.8033175355450237, 0.6809917355371901, 0.7286063569682152,
0.7546583850931677, 0.6650485436893204, 0.7265135699373695, 0.7405541561712846,
0.6653771760154739, 0.7546666666666667, 0.7719869706840391]
ax.plot(recallList, precisionList, 's', color='blue')
recallList = [0.8129395218002813, 0.8095909732016925, 0.8053596614950634, 0.7926657263751763,
0.7776203966005666, 0.7585227272727273, 0.7577903682719547, 0.7347517730496453,
0.7116477272727273, 0.7315340909090909, 0.6941678520625889, 0.6486486486486487,
0.6387362637362637, 0.5305555555555556, 0.47692307692307695, 0.561624649859944,
0.442090395480226, 0.3356940509915014, 0.5671852899575672, 0.4830028328611898,
0.42329545454545453, 0.5007112375533428, 0.42816500711237554, 0.3655761024182077]
precisionList = [0.15579514824797844, 0.21875, 0.26508820798514393, 0.43769470404984423, 0.5473579262213359,
0.5986547085201793, 0.545361875637105, 0.6641025641025641, 0.7389380530973452, 0.6848404255319149,
0.7845659163987139, 0.8201438848920863, 0.6258411843876177, 0.6970802919708029, 0.754424778761062,
0.6334913112164297, 0.681917211328976, 0.6909620991253644, 0.6140888208269525, 0.6779324055666004,
0.7376237623762376, 0.6530612244897959, 0.7270531400966184, 0.7558823529411764]
ax.plot(recallList, precisionList, 's', color='red')
plt.title('precision-recall curve for Log Res')
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlabel('recall')
plt.ylabel('precision')
plt.grid()
plt.show()
plt.gcf().clear()
#========================================================================== | 0:
local | identifier_name |
HaarROG.py | #==========================================================================
#Уже подсчитанные характеристики для 1000 тестовых изображений
'''
haarPath = "cascade30h.xml"
recallList [0.8254189944134078, 0.8225352112676056, 0.8208744710860366, 0.8045325779036827, 0.7897727272727273,
0.7795163584637269, 0.7694483734087695, 0.7460992907801418, 0.7126600284495022, 0.7357954545454546,
0.701280227596017, 0.6529160739687055, 0.6584699453551912, 0.5549374130737135, 0.4767932489451477,
0.569060773480663, 0.4197183098591549, 0.3437057991513437, 0.5805084745762712, 0.49291784702549574,
0.4164305949008499, 0.48933143669985774, 0.40256045519203415, 0.33712660028449504]
precisionList = [0.17620751341681573, 0.23710921640276086, 0.29158316633266534, 0.43193916349809885,
0.5656154628687691, 0.6277205040091638, 0.5732349841938883, 0.6985391766268261, 0.75,
0.7204450625869263, 0.8029315960912052, 0.8375912408759124, 0.6703755215577191,
0.7388888888888889, 0.8033175355450237, 0.6809917355371901, 0.7286063569682152,
0.7546583850931677, 0.6650485436893204, 0.7265135699373695, 0.7405541561712846,
0.6653771760154739, 0.7546666666666667, 0.7719869706840391]
'''
'''
haarPath = "haarcascade_russian_plate_number.xml"
recallList = [0.8129395218002813, 0.8095909732016925, 0.8053596614950634, 0.7926657263751763,
0.7776203966005666, 0.7585227272727273, 0.7577903682719547, 0.7347517730496453,
0.7116477272727273, 0.7315340909090909, 0.6941678520625889, 0.6486486486486487,
0.6387362637362637, 0.5305555555555556, 0.47692307692307695, 0.561624649859944,
0.442090395480226, 0.3356940509915014, 0.5671852899575672, 0.4830028328611898,
0.42329545454545453, 0.5007112375533428, 0.42816500711237554, 0.3655761024182077]
precisionList = [0.15579514824797844, 0.21875, 0.26508820798514393, 0.43769470404984423, 0.5473579262213359,
0.5986547085201793, 0.545361875637105, 0.6641025641025641, 0.7389380530973452, 0.6848404255319149,
0.7845659163987139, 0.8201438848920863, 0.6258411843876177, 0.6970802919708029, 0.754424778761062,
0.6334913112164297, 0.681917211328976, 0.6909620991253644, 0.6140888208269525, 0.6779324055666004,
0.7376237623762376, 0.6530612244897959, 0.7270531400966184, 0.7558823529411764]
'''
#==========================================================================
#команда для установки opencv
#pip install opencv-python
#==========================================================================
#Для форматированя данных в программе использован json
'''
"1_11_2014_12_13_38_590.bmp38467": {
"fileref": "",
"size": 38467,
"filename": "1_11_2014_12_13_38_590.bmp",
"base64_img_data": "",
"file_attributes": {},
"regions": {
"0": {
"shape_attributes": {
"name": "rect",
"x": 186,
"y": 203,
"width": 75,
"height": 21
},
"region_attributes": {}
}
}
}
'''
#==========================================================================
import math
import random
import PIL
import cv2
import numpy as np
#для хранения данных используем json, наиболее удобный формат
import json
#метрики
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import average_precision_score
#Построение графиков
import matplotlib.pyplot as plt
import sys
#==========================================================================
#вспомогательные процедуры
def changeOpCharac(detectListParam, markedDataParam, key, rateCh, lowerBorder, topBorder):
#список для найденных уникальных номерных пластин
#список для общего количества найденных номерных пластин
findPlates = []
findUnicPlates = []
#делаем копии, чтобы работать с локальными параметрами
detectList = detectListParam.copy()
markedData = markedDataParam.copy()
for i in range(len(detectList)):
x1 = 0
x2 = 0
y1 = 0
y2 = 0
mx1 = 0
mx2 = 0
my1 = 0
my2 = 0
#в данном случае очень важная нумерация с нуля
for j in range(len(markedData[key]['regions'])):
#формируем список для размеченных данных из json
markedNumPlatesList = [
markedData[key]['regions'][str(j)]['shape_attributes']['x'],
markedData[key]['regions'][str(j)]['shape_attributes']['y'],
markedData[key]['regions'][str(j)]['shape_attributes']['x'] + markedData[key]['regions'][str(j)]['shape_attributes']['width'],
markedData[key]['regions'][str(j)]['shape_attributes']['y'] + markedData[key]['regions'][str(j)]['shape_attributes']['height']
]
#print('LL')
#print(detectList)
#print('MNPL')
#print(markedNumPlatesList)
#x1 < x2
#упорядочили по x
if detectList[i][0] < detectList[i][2]:
x1 = detectList[i][0]
x2 = detectList[i][2]
else:
x1 = detectList[i][2]
x2 = detectList[i][0]
#упорядочили по x
if markedNumPlatesList[0] < markedNumPlatesList[2]:
mx1 = markedNumPlatesList[0]
mx2 = markedNumPlatesList[2]
else:
mx1 = markedNumPlatesList[2]
mx2 = markedNumPlatesList[0]
#y1 < y2
#упорядочили по y
if detectList[i][1] < detectList[i][3]:
y1 = detectList[i][1]
y2 = detectList[i][3]
else:
y1 = detectList[i][3]
y2 = detectList[i][1]
#упорядочили по x
if markedNumPlatesList[1] < markedNumPlatesList[3]:
my1 = markedNumPlatesList[1]
my2 = markedNumPlatesList[3]
else:
my1 = markedNumPlatesList[3]
my2 = markedNumPlatesList[1]
#print(x1, x2, mx1, mx2, y1, y2, my1, my2)
#находим пересечение отрезков
xIntersection = max(0, min(x2, mx2) - max(x1, mx1))
yIntersection = max(0, min(y2, my2) - max(y1, my1))
#print('xIntersection ' + str(xIntersection))
#print('yIntersection ' + str(yIntersection))
#вычисляем площади
detectNumArea = math.sqrt((x2 - x1)**2) * math.sqrt((y2 - y1)**2)
detectNumAreaInter = xIntersection * yIntersection
numArea = math.sqrt((markedNumPlatesList[0] - markedNumPlatesList[2])**2) * math.sqrt((markedNumPlatesList[1] - markedNumPlatesList[3])**2)
#print('detectNumAreaInter / numArea: ' + str(detectNumAreaInter / numArea))
#print('detectNumArea / numArea: ' + str(detectNumArea / numArea))
if (detectNumAreaInter / numArea > lowerBorder) and (detectNumArea / numArea < topBorder):
findPlates.append(str(j))
if (detectNumAreaInter / numArea > lowerBorder) and (detectNumArea / numArea < topBorder) and (str(j) not in findUnicPlates):
findUnicPlates.append(str(j))
#print(findPlates, ' findPlates')
#print(detectList, ' detectList')
#print(findUnicPlates, ' findUnicPlates')
#print(len(markedData[key]['regions']), ' len(markedData[key][\'regions\'])')
rateCh.tp += len(findPlates)
rateCh.fp += len(detectList) - len(findPlates)
rateCh.fn += len(markedData[key]['regions']) - len(findUnicPlates)
return rateCh
def drawMarkAndDetect(detectReg, markedRegions, itemKey, image):
#делаем копии списков, чтобы работать с локальными параметрами
localMarkedRegions = markedRegions.copy()
localDetectReg = detectReg.copy()
markedNumPlates = []
for i in range(len(localMarkedRegions[itemKey]['regions'])):
batch = []
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['x'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['y'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['x'] + localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['width'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['y'] + localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['height'])
markedNumPlates.append(batch)
'''
for (x, y, x1, y1) in localDetectReg:
cv2.rectangle(image, (x, y), (x1, y1), (random.randint(50, 250), 232, random.randint(50, 250)), -1)
for (x, y, x1, y1) in markedNumPlates:
cv2.rectangle(image, (x, y), (x1, y1), (0, 250, 250), 2)
cv2.imshow("Image", image)
cv2.waitKey(0)
'''
def makeDetectetData(image, numplateCascade, scaleF, minN):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#определяем, присутствует ли номерная пластина
#характеристики нужно менять, при оценке каскада
numPlates = numplateCascade.detectMultiScale(
gray,
scaleFactor = scaleF,
minNeighbors = minN
)
localDetectData = []
#для того, чтобы не появлялась ошибка, когда каскад не нашел номерных пластин
#создаем список в удобном для нас формате, т.к. для numPlates характерна запись (x, y, w, h)
if len(numPlates) == 0:
localDetectData = []
else:
for i in range(len(numPlates)):
bufData = [numPlates[i][0], numPlates[i][1], numPlates[i][0] + numPlates[i][2], numPlates[i][1] + numPlates[i][3]]
localDetectData.append(bufData)
return localDetectData
#==========================================================================
def mainProcedure(haarPath, dataPath, drivePath):
print('CV2 version: ')
print(cv2.__version__ + '\n')
# -----------------------------------------------------------------------
# загрузка данных
# загружаем каскад
try:
numplateCascade = cv2.CascadeClassifier(haarPath)
except:
print('ERROR: cv2.CascadeClassifier(haarPath) \n')
sys.exit()
try:
# загружаем файл с размеченной тестовой выборкой
with open(dataPath, "r") as read_file:
testData = json.load(read_file)
# создаем список ключей в словаре
keys = list(testData.keys())
except:
print('ERROR: dataPath \n')
sys.exit()
# -----------------------------------------------------------------------
# тестирование
class Character | 04, 0.7265135699373695, 0.7405541561712846,
0.6653771760154739, 0.7546666666666667, 0.7719869706840391]
ax.plot(recallList, precisionList, 's', color='blue')
recallList = [0.8129395218002813, 0.8095909732016925, 0.8053596614950634, 0.7926657263751763,
0.7776203966005666, 0.7585227272727273, 0.7577903682719547, 0.7347517730496453,
0.7116477272727273, 0.7315340909090909, 0.6941678520625889, 0.6486486486486487,
0.6387362637362637, 0.5305555555555556, 0.47692307692307695, 0.561624649859944,
0.442090395480226, 0.3356940509915014, 0.5671852899575672, 0.4830028328611898,
0.42329545454545453, 0.5007112375533428, 0.42816500711237554, 0.3655761024182077]
precisionList = [0.15579514824797844, 0.21875, 0.26508820798514393, 0.43769470404984423, 0.5473579262213359,
0.5986547085201793, 0.545361875637105, 0.6641025641025641, 0.7389380530973452, 0.6848404255319149,
0.7845659163987139, 0.8201438848920863, 0.6258411843876177, 0.6970802919708029, 0.754424778761062,
0.6334913112164297, 0.681917211328976, 0.6909620991253644, 0.6140888208269525, 0.6779324055666004,
0.7376237623762376, 0.6530612244897959, 0.7270531400966184, 0.7558823529411764]
ax.plot(recallList, precisionList, 's', color='red')
plt.title('precision-recall curve for Log Res')
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlabel('recall')
plt.ylabel('precision')
plt.grid()
plt.show()
plt.gcf().clear()
#========================================================================== | istics:
# положительные характеристики
tp = 0
tn = 0
# отрицательные характеристики
fp = 0
fn = 0
rateCh = Characteristics()
# border для определения, правильно найден номер или не правильно
# для площади пересечения номерных рамок
lowerBorder = 0.7
topBorder = 1.8
# два списка для составления PR-кривой
precisionList = []
recallList = []
# точки, для построения графика
points = [1.01, 1.05, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6]
numNeigh = [3, 5, 7]
# points = [1.1]
# numNeigh = [3]
for pIter in range(len(points)):
rateCh.tp = 0
rateCh.tn = 0
rateCh.fp = 0
rateCh.fn = 0
# проходимся по всем тестовым картинкам
for numIter in range(len(numNeigh)):
for i in range(len(keys) // 10):
# для удобства сохраним ключ в отдельную переменную
itemKey = keys[i]
print('----------------------------------------------------')
print(str(i) + '. ' + testData[itemKey]['filename'])
# считываем изображение
image = cv2.imread(drivePath + "\\" + testData[itemKey]['filename'])
if image is None:
continue
detectData = []
detectData = makeDetectetData(image, numplateCascade, points[pIter], numNeigh[numIter])
# numPlates это список из списков [[],[]]
# передаем в функцию list с найденными номерами и размеченные данные
rateCh = changeOpCharac(detectData, testData, itemKey, rateCh, lowerBorder, topBorder)
print(' TP: ' + str(rateCh.tp) + ' TN: ' + str(rateCh.tn) + ' FP: ' + str(rateCh.fp) + ' FN: ' + str(
rateCh.fn))
print(' Number of license plates: ', len(testData[itemKey]['regions']))
print(' Found: {0} numplate!'.format(len(detectData)))
print('----------------------------------------------------')
drawMarkAndDetect(detectData, testData, itemKey, image)
# считаем precision и recall (учитываем деление на ноль)
try:
recallList.append(rateCh.tp / (rateCh.tp + rateCh.fn))
print('recall:' + str(rateCh.tp / (rateCh.tp + rateCh.fn)))
except:
recallList.append(0)
try:
precisionList.append(rateCh.tp / (rateCh.tp + rateCh.fp))
print('precision:' + str(rateCh.tp / (rateCh.tp + rateCh.fp)))
except:
precisionList.append(0)
# обнуляем
rateCh.tp = 0
rateCh.tn = 0
rateCh.fp = 0
rateCh.fn = 0
# Следующим шагом построим PR-кривую для оценки точности полученной модели:
print(recallList)
print(precisionList)
plt.plot(recallList, precisionList, color='r', label='Log Res')
plt.title('precision-recall curve for Log Res')
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlabel('recall')
plt.ylabel('precision')
plt.grid()
plt.show()
plt.gcf().clear()
#==========================================================================
haarPath = "cascade30h.xml"
dataPath = 'numplates_region_data.json'
drivePath = 'TestData'
mainProcedure(haarPath, dataPath, drivePath)
#==========================================================================
fig = plt.figure()
fig, ax = plt.subplots()
recallList = [0.8254189944134078, 0.8225352112676056, 0.8208744710860366, 0.8045325779036827, 0.7897727272727273,
0.7795163584637269, 0.7694483734087695, 0.7460992907801418, 0.7126600284495022, 0.7357954545454546,
0.701280227596017, 0.6529160739687055, 0.6584699453551912, 0.5549374130737135, 0.4767932489451477,
0.569060773480663, 0.4197183098591549, 0.3437057991513437, 0.5805084745762712, 0.49291784702549574,
0.4164305949008499, 0.48933143669985774, 0.40256045519203415, 0.33712660028449504]
precisionList = [0.17620751341681573, 0.23710921640276086, 0.29158316633266534, 0.43193916349809885,
0.5656154628687691, 0.6277205040091638, 0.5732349841938883, 0.6985391766268261, 0.75,
0.7204450625869263, 0.8029315960912052, 0.8375912408759124, 0.6703755215577191,
0.7388888888888889, 0.8033175355450237, 0.6809917355371901, 0.7286063569682152,
0.7546583850931677, 0.66504854368932 | identifier_body |
HaarROG.py | #==========================================================================
#Уже подсчитанные характеристики для 1000 тестовых изображений
'''
haarPath = "cascade30h.xml"
recallList [0.8254189944134078, 0.8225352112676056, 0.8208744710860366, 0.8045325779036827, 0.7897727272727273,
0.7795163584637269, 0.7694483734087695, 0.7460992907801418, 0.7126600284495022, 0.7357954545454546,
0.701280227596017, 0.6529160739687055, 0.6584699453551912, 0.5549374130737135, 0.4767932489451477,
0.569060773480663, 0.4197183098591549, 0.3437057991513437, 0.5805084745762712, 0.49291784702549574,
0.4164305949008499, 0.48933143669985774, 0.40256045519203415, 0.33712660028449504]
precisionList = [0.17620751341681573, 0.23710921640276086, 0.29158316633266534, 0.43193916349809885,
0.5656154628687691, 0.6277205040091638, 0.5732349841938883, 0.6985391766268261, 0.75,
0.7204450625869263, 0.8029315960912052, 0.8375912408759124, 0.6703755215577191,
0.7388888888888889, 0.8033175355450237, 0.6809917355371901, 0.7286063569682152,
0.7546583850931677, 0.6650485436893204, 0.7265135699373695, 0.7405541561712846,
0.6653771760154739, 0.7546666666666667, 0.7719869706840391]
'''
'''
haarPath = "haarcascade_russian_plate_number.xml"
recallList = [0.8129395218002813, 0.8095909732016925, 0.8053596614950634, 0.7926657263751763,
0.7776203966005666, 0.7585227272727273, 0.7577903682719547, 0.7347517730496453,
0.7116477272727273, 0.7315340909090909, 0.6941678520625889, 0.6486486486486487,
0.6387362637362637, 0.5305555555555556, 0.47692307692307695, 0.561624649859944,
0.442090395480226, 0.3356940509915014, 0.5671852899575672, 0.4830028328611898,
0.42329545454545453, 0.5007112375533428, 0.42816500711237554, 0.3655761024182077]
precisionList = [0.15579514824797844, 0.21875, 0.26508820798514393, 0.43769470404984423, 0.5473579262213359,
0.5986547085201793, 0.545361875637105, 0.6641025641025641, 0.7389380530973452, 0.6848404255319149,
0.7845659163987139, 0.8201438848920863, 0.6258411843876177, 0.6970802919708029, 0.754424778761062,
0.6334913112164297, 0.681917211328976, 0.6909620991253644, 0.6140888208269525, 0.6779324055666004,
0.7376237623762376, 0.6530612244897959, 0.7270531400966184, 0.7558823529411764]
'''
#==========================================================================
#команда для установки opencv
#pip install opencv-python
#==========================================================================
#Для форматированя данных в программе использован json
'''
"1_11_2014_12_13_38_590.bmp38467": {
"fileref": "",
"size": 38467,
"filename": "1_11_2014_12_13_38_590.bmp",
"base64_img_data": "",
"file_attributes": {},
"regions": {
"0": {
"shape_attributes": {
"name": "rect",
"x": 186,
"y": 203,
"width": 75,
"height": 21
},
"region_attributes": {}
}
}
}
'''
#==========================================================================
import math
import random
import PIL
import cv2
import numpy as np
#для хранения данных используем json, наиболее удобный формат
import json
#метрики
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import average_precision_score
#Построение графиков
import matplotlib.pyplot as plt
import sys
#==========================================================================
#вспомогательные процедуры
def changeOpCharac(detectListParam, markedDataParam, key, rateCh, lowerBorder, topBorder):
#список для найденных уникальных номерных пластин
#список для общего количества найденных номерных пластин
findPlates = []
findUnicPlates = []
#делаем копии, чтобы работать с локальными параметрами
detectList = detectListParam.copy()
markedData = markedDataParam.copy()
for i in range(len(detectList)):
x1 = 0
x2 = 0
y1 = 0
y2 = 0
mx1 = 0
mx2 = 0
my1 = 0
my2 = 0
#в данном случае очень важная нумерация с нуля
for j in range(len(markedData[key]['regions'])):
#формируем список для размеченных данных из json
markedNumPlatesList = [
markedData[key]['regions'][str(j)]['shape_attributes']['x'],
markedData[key]['regions'][str(j)]['shape_attributes']['y'],
markedData[key]['regions'][str(j)]['shape_attributes']['x'] + markedData[key]['regions'][str(j)]['shape_attributes']['width'],
markedData[key]['regions'][str(j)]['shape_attributes']['y'] + markedData[key]['regions'][str(j)]['shape_attributes']['height']
]
#print('LL')
#print(detectList)
#print('MNPL')
#print(markedNumPlatesList)
#x1 < x2
#упорядочили по x
if detectList[i][0] < detectList[i][2]:
x1 = detectList[i][0]
x2 = detectList[i][2]
else:
x1 = detectList[i][2]
x2 = detectList[i][0]
#упорядочили по x
if markedNumPlatesList[0] < markedNumPlatesList[2]:
mx1 = markedNumPlatesList[0]
mx2 = markedNumPlatesList[2]
else:
mx1 = markedNumPlatesList[2]
mx2 = markedNumPlatesList[0]
#y1 < y2
#упорядочили по y
if detectList[i][1] < detectList[i][3]:
y1 = detectList[i][1]
y2 = detectList[i][3]
else:
y1 = detectList[i][3]
y2 = detectList[i][1]
#упорядочили по x
if markedNumPlatesList[1] < markedNumPlatesList[3]:
my1 = markedNumPlatesList[1]
my2 = markedNumPlatesList[3]
else:
my1 = markedNumPlatesList[3]
my2 = markedNumPlatesList[1]
#print(x1, x2, mx1, mx2, y1, y2, my1, my2)
#находим пересечение отрезков
xIntersection = max(0, min(x2, mx2) - max(x1, mx1))
yIntersection = max(0, min(y2, my2) - max(y1, my1))
#print('xIntersection ' + str(xIntersection))
#print('yIntersection ' + str(yIntersection))
#вычисляем площади
detectNumArea = math.sqrt((x2 - x1)**2) * math.sqrt((y2 - y1)**2)
detectNumAreaInter = xIntersection * yIntersection
numArea = math.sqrt((markedNumPlatesList[0] - markedNumPlatesList[2])**2) * math.sqrt((markedNumPlatesList[1] - markedNumPlatesList[3])**2)
#print('detectNumAreaInter / numArea: ' + str(detectNumAreaInter / numArea))
#print('detectNumArea / numArea: ' + str(detectNumArea / numArea))
if (detectNumAreaInter / numArea > lowerBorder) and (detectNumArea / numArea < topBorder):
findPlates.append(str(j))
if (detectNumAreaInter / numArea > lowerBorder) and (detectNumArea / numArea < topBorder) and (str(j) not in findUnicPlates):
findUnicPlates.append(str(j))
#print(findPlates, ' findPlates')
#print(detectList, ' detectList')
#print(findUnicPlates, ' findUnicPlates')
#print(len(markedData[key]['regions']), ' len(markedData[key][\'regions\'])')
rateCh.tp += len(findPlates)
rateCh.fp += len(detectList) - len(findPlates)
rateCh.fn += len(markedData[key]['regions']) - len(findUnicPlates)
return rateCh
def drawMarkAndDetect(detectReg, markedRegions, itemKey, image):
#делаем копии списков, чтобы работать с локальными параметрами
localMarkedRegions = markedRegions.copy()
localDetectReg = detectReg.copy()
markedNumPlates = []
for i in range(len(localMarkedRegions[itemKey]['regions'])):
batch = []
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['x'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['y'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['x'] + localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['width'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['y'] + localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['height'])
markedNumPlates.append(batch)
'''
for (x, y, x1, y1) in localDetectReg:
cv2.rectangle(image, (x, y), (x1, y1), (random.randint(50, 250), 232, random.randint(50, 250)), -1)
for (x, y, x1, y1) in markedNumPlates:
cv2.rectangle(image, (x, y), (x1, y1), (0, 250, 250), 2)
cv2.imshow("Image", image)
cv2.waitKey(0)
'''
def makeDetectetData(image, numplateCascade, scaleF, minN):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#определяем, присутствует ли номерная пластина
#характеристики нужно менять, при оценке каскада
numPlates = numplateCascade.detectMultiScale(
gray,
scaleFactor = scaleF,
minNeighbors = minN
)
localDetectData = []
#для того, чтобы не появлялась ошибка, когда каскад не нашел номерных пластин
#создаем список в удобном для нас формате, т.к. для numPlates характерна запись (x, y, w, h)
if len(numPlates) == 0:
localDetectData = []
else:
for i in range(len(numPlates)):
bufData = [numPlates[i][0], numPlates[i][1], numPlates[i][0] + numPlates[i][2], numPlates[i][1] + numPlates[i][3]]
localDetectData.append(bufData)
return localDetectData
#==========================================================================
def mainProcedure(haarPath, dataPath, drivePath):
print('CV2 version: ')
print(cv2.__version__ + '\n')
# -----------------------------------------------------------------------
# загрузка данных
# загружаем каскад
try:
numplateCascade = cv2.CascadeClassifier(haarPath)
except:
print('ERROR: cv2.CascadeClassifier(haarPath) \n')
sys.exit()
try:
# загружаем файл с размеченной тестовой выборкой | nt('ERROR: dataPath \n')
sys.exit()
# -----------------------------------------------------------------------
# тестирование
class Characteristics:
# положительные характеристики
tp = 0
tn = 0
# отрицательные характеристики
fp = 0
fn = 0
rateCh = Characteristics()
# border для определения, правильно найден номер или не правильно
# для площади пересечения номерных рамок
lowerBorder = 0.7
topBorder = 1.8
# два списка для составления PR-кривой
precisionList = []
recallList = []
# точки, для построения графика
points = [1.01, 1.05, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6]
numNeigh = [3, 5, 7]
# points = [1.1]
# numNeigh = [3]
for pIter in range(len(points)):
rateCh.tp = 0
rateCh.tn = 0
rateCh.fp = 0
rateCh.fn = 0
# проходимся по всем тестовым картинкам
for numIter in range(len(numNeigh)):
for i in range(len(keys) // 10):
# для удобства сохраним ключ в отдельную переменную
itemKey = keys[i]
print('----------------------------------------------------')
print(str(i) + '. ' + testData[itemKey]['filename'])
# считываем изображение
image = cv2.imread(drivePath + "\\" + testData[itemKey]['filename'])
if image is None:
continue
detectData = []
detectData = makeDetectetData(image, numplateCascade, points[pIter], numNeigh[numIter])
# numPlates это список из списков [[],[]]
# передаем в функцию list с найденными номерами и размеченные данные
rateCh = changeOpCharac(detectData, testData, itemKey, rateCh, lowerBorder, topBorder)
print(' TP: ' + str(rateCh.tp) + ' TN: ' + str(rateCh.tn) + ' FP: ' + str(rateCh.fp) + ' FN: ' + str(
rateCh.fn))
print(' Number of license plates: ', len(testData[itemKey]['regions']))
print(' Found: {0} numplate!'.format(len(detectData)))
print('----------------------------------------------------')
drawMarkAndDetect(detectData, testData, itemKey, image)
# считаем precision и recall (учитываем деление на ноль)
try:
recallList.append(rateCh.tp / (rateCh.tp + rateCh.fn))
print('recall:' + str(rateCh.tp / (rateCh.tp + rateCh.fn)))
except:
recallList.append(0)
try:
precisionList.append(rateCh.tp / (rateCh.tp + rateCh.fp))
print('precision:' + str(rateCh.tp / (rateCh.tp + rateCh.fp)))
except:
precisionList.append(0)
# обнуляем
rateCh.tp = 0
rateCh.tn = 0
rateCh.fp = 0
rateCh.fn = 0
# Следующим шагом построим PR-кривую для оценки точности полученной модели:
print(recallList)
print(precisionList)
plt.plot(recallList, precisionList, color='r', label='Log Res')
plt.title('precision-recall curve for Log Res')
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlabel('recall')
plt.ylabel('precision')
plt.grid()
plt.show()
plt.gcf().clear()
#==========================================================================
haarPath = "cascade30h.xml"
dataPath = 'numplates_region_data.json'
drivePath = 'TestData'
mainProcedure(haarPath, dataPath, drivePath)
#==========================================================================
fig = plt.figure()
fig, ax = plt.subplots()
recallList = [0.8254189944134078, 0.8225352112676056, 0.8208744710860366, 0.8045325779036827, 0.7897727272727273,
0.7795163584637269, 0.7694483734087695, 0.7460992907801418, 0.7126600284495022, 0.7357954545454546,
0.701280227596017, 0.6529160739687055, 0.6584699453551912, 0.5549374130737135, 0.4767932489451477,
0.569060773480663, 0.4197183098591549, 0.3437057991513437, 0.5805084745762712, 0.49291784702549574,
0.4164305949008499, 0.48933143669985774, 0.40256045519203415, 0.33712660028449504]
precisionList = [0.17620751341681573, 0.23710921640276086, 0.29158316633266534, 0.43193916349809885,
0.5656154628687691, 0.6277205040091638, 0.5732349841938883, 0.6985391766268261, 0.75,
0.7204450625869263, 0.8029315960912052, 0.8375912408759124, 0.6703755215577191,
0.7388888888888889, 0.8033175355450237, 0.6809917355371901, 0.7286063569682152,
0.7546583850931677, 0.6650485436893204, 0.7265135699373695, 0.7405541561712846,
0.6653771760154739, 0.7546666666666667, 0.7719869706840391]
ax.plot(recallList, precisionList, 's', color='blue')
recallList = [0.8129395218002813, 0.8095909732016925, 0.8053596614950634, 0.7926657263751763,
0.7776203966005666, 0.7585227272727273, 0.7577903682719547, 0.7347517730496453,
0.7116477272727273, 0.7315340909090909, 0.6941678520625889, 0.6486486486486487,
0.6387362637362637, 0.5305555555555556, 0.47692307692307695, 0.561624649859944,
0.442090395480226, 0.3356940509915014, 0.5671852899575672, 0.4830028328611898,
0.42329545454545453, 0.5007112375533428, 0.42816500711237554, 0.3655761024182077]
precisionList = [0.15579514824797844, 0.21875, 0.26508820798514393, 0.43769470404984423, 0.5473579262213359,
0.5986547085201793, 0.545361875637105, 0.6641025641025641, 0.7389380530973452, 0.6848404255319149,
0.7845659163987139, 0.8201438848920863, 0.6258411843876177, 0.6970802919708029, 0.754424778761062,
0.6334913112164297, 0.681917211328976, 0.6909620991253644, 0.6140888208269525, 0.6779324055666004,
0.7376237623762376, 0.6530612244897959, 0.7270531400966184, 0.7558823529411764]
ax.plot(recallList, precisionList, 's', color='red')
plt.title('precision-recall curve for Log Res')
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlabel('recall')
plt.ylabel('precision')
plt.grid()
plt.show()
plt.gcf().clear()
#========================================================================== |
with open(dataPath, "r") as read_file:
testData = json.load(read_file)
# создаем список ключей в словаре
keys = list(testData.keys())
except:
pri | conditional_block |
HaarROG.py | #==========================================================================
#Уже подсчитанные характеристики для 1000 тестовых изображений
'''
haarPath = "cascade30h.xml"
recallList [0.8254189944134078, 0.8225352112676056, 0.8208744710860366, 0.8045325779036827, 0.7897727272727273,
0.7795163584637269, 0.7694483734087695, 0.7460992907801418, 0.7126600284495022, 0.7357954545454546,
0.701280227596017, 0.6529160739687055, 0.6584699453551912, 0.5549374130737135, 0.4767932489451477,
0.569060773480663, 0.4197183098591549, 0.3437057991513437, 0.5805084745762712, 0.49291784702549574,
0.4164305949008499, 0.48933143669985774, 0.40256045519203415, 0.33712660028449504]
precisionList = [0.17620751341681573, 0.23710921640276086, 0.29158316633266534, 0.43193916349809885,
0.5656154628687691, 0.6277205040091638, 0.5732349841938883, 0.6985391766268261, 0.75,
0.7204450625869263, 0.8029315960912052, 0.8375912408759124, 0.6703755215577191,
0.7388888888888889, 0.8033175355450237, 0.6809917355371901, 0.7286063569682152,
0.7546583850931677, 0.6650485436893204, 0.7265135699373695, 0.7405541561712846,
0.6653771760154739, 0.7546666666666667, 0.7719869706840391]
'''
'''
haarPath = "haarcascade_russian_plate_number.xml"
recallList = [0.8129395218002813, 0.8095909732016925, 0.8053596614950634, 0.7926657263751763,
0.7776203966005666, 0.7585227272727273, 0.7577903682719547, 0.7347517730496453,
0.7116477272727273, 0.7315340909090909, 0.6941678520625889, 0.6486486486486487,
0.6387362637362637, 0.5305555555555556, 0.47692307692307695, 0.561624649859944,
0.442090395480226, 0.3356940509915014, 0.5671852899575672, 0.4830028328611898,
0.42329545454545453, 0.5007112375533428, 0.42816500711237554, 0.3655761024182077]
precisionList = [0.15579514824797844, 0.21875, 0.26508820798514393, 0.43769470404984423, 0.5473579262213359,
0.5986547085201793, 0.545361875637105, 0.6641025641025641, 0.7389380530973452, 0.6848404255319149,
0.7845659163987139, 0.8201438848920863, 0.6258411843876177, 0.6970802919708029, 0.754424778761062,
0.6334913112164297, 0.681917211328976, 0.6909620991253644, 0.6140888208269525, 0.6779324055666004,
0.7376237623762376, 0.6530612244897959, 0.7270531400966184, 0.7558823529411764]
'''
#==========================================================================
#команда для установки opencv
#pip install opencv-python
#==========================================================================
#Для форматированя данных в программе использован json
'''
"1_11_2014_12_13_38_590.bmp38467": {
"fileref": "",
"size": 38467,
"filename": "1_11_2014_12_13_38_590.bmp",
"base64_img_data": "",
"file_attributes": {},
"regions": {
"0": {
"shape_attributes": {
"name": "rect",
"x": 186,
"y": 203,
"width": 75,
"height": 21
},
"region_attributes": {}
}
}
}
'''
#==========================================================================
import math
import random
import PIL
import cv2
import numpy as np
#для хранения данных используем json, наиболее удобный формат
import json
#метрики
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import average_precision_score
#Построение графиков
import matplotlib.pyplot as plt
import sys
#==========================================================================
#вспомогательные процедуры
def changeOpCharac(detectListParam, markedDataParam, key, rateCh, lowerBorder, topBorder):
#список для найденных уникальных номерных пластин
#список для общего количества найденных номерных пластин
findPlates = []
findUnicPlates = []
#делаем копии, чтобы работать с локальными параметрами
detectList = detectListParam.copy()
markedData = markedDataParam.copy()
for i in range(len(detectList)):
x1 = 0
x2 = 0
y1 = 0
y2 = 0
mx1 = 0
mx2 = 0
my1 = 0
my2 = 0
#в данном случае очень важная нумерация с нуля
for j in range(len(markedData[key]['regions'])):
#формируем список для размеченных данных из json
markedNumPlatesList = [
markedData[key]['regions'][str(j)]['shape_attributes']['x'],
markedData[key]['regions'][str(j)]['shape_attributes']['y'],
markedData[key]['regions'][str(j)]['shape_attributes']['x'] + markedData[key]['regions'][str(j)]['shape_attributes']['width'],
markedData[key]['regions'][str(j)]['shape_attributes']['y'] + markedData[key]['regions'][str(j)]['shape_attributes']['height'] |
#print('LL')
#print(detectList)
#print('MNPL')
#print(markedNumPlatesList)
#x1 < x2
#упорядочили по x
if detectList[i][0] < detectList[i][2]:
x1 = detectList[i][0]
x2 = detectList[i][2]
else:
x1 = detectList[i][2]
x2 = detectList[i][0]
#упорядочили по x
if markedNumPlatesList[0] < markedNumPlatesList[2]:
mx1 = markedNumPlatesList[0]
mx2 = markedNumPlatesList[2]
else:
mx1 = markedNumPlatesList[2]
mx2 = markedNumPlatesList[0]
#y1 < y2
#упорядочили по y
if detectList[i][1] < detectList[i][3]:
y1 = detectList[i][1]
y2 = detectList[i][3]
else:
y1 = detectList[i][3]
y2 = detectList[i][1]
#упорядочили по x
if markedNumPlatesList[1] < markedNumPlatesList[3]:
my1 = markedNumPlatesList[1]
my2 = markedNumPlatesList[3]
else:
my1 = markedNumPlatesList[3]
my2 = markedNumPlatesList[1]
#print(x1, x2, mx1, mx2, y1, y2, my1, my2)
#находим пересечение отрезков
xIntersection = max(0, min(x2, mx2) - max(x1, mx1))
yIntersection = max(0, min(y2, my2) - max(y1, my1))
#print('xIntersection ' + str(xIntersection))
#print('yIntersection ' + str(yIntersection))
#вычисляем площади
detectNumArea = math.sqrt((x2 - x1)**2) * math.sqrt((y2 - y1)**2)
detectNumAreaInter = xIntersection * yIntersection
numArea = math.sqrt((markedNumPlatesList[0] - markedNumPlatesList[2])**2) * math.sqrt((markedNumPlatesList[1] - markedNumPlatesList[3])**2)
#print('detectNumAreaInter / numArea: ' + str(detectNumAreaInter / numArea))
#print('detectNumArea / numArea: ' + str(detectNumArea / numArea))
if (detectNumAreaInter / numArea > lowerBorder) and (detectNumArea / numArea < topBorder):
findPlates.append(str(j))
if (detectNumAreaInter / numArea > lowerBorder) and (detectNumArea / numArea < topBorder) and (str(j) not in findUnicPlates):
findUnicPlates.append(str(j))
#print(findPlates, ' findPlates')
#print(detectList, ' detectList')
#print(findUnicPlates, ' findUnicPlates')
#print(len(markedData[key]['regions']), ' len(markedData[key][\'regions\'])')
rateCh.tp += len(findPlates)
rateCh.fp += len(detectList) - len(findPlates)
rateCh.fn += len(markedData[key]['regions']) - len(findUnicPlates)
return rateCh
def drawMarkAndDetect(detectReg, markedRegions, itemKey, image):
#делаем копии списков, чтобы работать с локальными параметрами
localMarkedRegions = markedRegions.copy()
localDetectReg = detectReg.copy()
markedNumPlates = []
for i in range(len(localMarkedRegions[itemKey]['regions'])):
batch = []
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['x'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['y'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['x'] + localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['width'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['y'] + localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['height'])
markedNumPlates.append(batch)
'''
for (x, y, x1, y1) in localDetectReg:
cv2.rectangle(image, (x, y), (x1, y1), (random.randint(50, 250), 232, random.randint(50, 250)), -1)
for (x, y, x1, y1) in markedNumPlates:
cv2.rectangle(image, (x, y), (x1, y1), (0, 250, 250), 2)
cv2.imshow("Image", image)
cv2.waitKey(0)
'''
def makeDetectetData(image, numplateCascade, scaleF, minN):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#определяем, присутствует ли номерная пластина
#характеристики нужно менять, при оценке каскада
numPlates = numplateCascade.detectMultiScale(
gray,
scaleFactor = scaleF,
minNeighbors = minN
)
localDetectData = []
#для того, чтобы не появлялась ошибка, когда каскад не нашел номерных пластин
#создаем список в удобном для нас формате, т.к. для numPlates характерна запись (x, y, w, h)
if len(numPlates) == 0:
localDetectData = []
else:
for i in range(len(numPlates)):
bufData = [numPlates[i][0], numPlates[i][1], numPlates[i][0] + numPlates[i][2], numPlates[i][1] + numPlates[i][3]]
localDetectData.append(bufData)
return localDetectData
#==========================================================================
def mainProcedure(haarPath, dataPath, drivePath):
print('CV2 version: ')
print(cv2.__version__ + '\n')
# -----------------------------------------------------------------------
# загрузка данных
# загружаем каскад
try:
numplateCascade = cv2.CascadeClassifier(haarPath)
except:
print('ERROR: cv2.CascadeClassifier(haarPath) \n')
sys.exit()
try:
# загружаем файл с размеченной тестовой выборкой
with open(dataPath, "r") as read_file:
testData = json.load(read_file)
# создаем список ключей в словаре
keys = list(testData.keys())
except:
print('ERROR: dataPath \n')
sys.exit()
# -----------------------------------------------------------------------
# тестирование
class Characteristics:
# положительные характеристики
tp = 0
tn = 0
# отрицательные характеристики
fp = 0
fn = 0
rateCh = Characteristics()
# border для определения, правильно найден номер или не правильно
# для площади пересечения номерных рамок
lowerBorder = 0.7
topBorder = 1.8
# два списка для составления PR-кривой
precisionList = []
recallList = []
# точки, для построения графика
points = [1.01, 1.05, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6]
numNeigh = [3, 5, 7]
# points = [1.1]
# numNeigh = [3]
for pIter in range(len(points)):
rateCh.tp = 0
rateCh.tn = 0
rateCh.fp = 0
rateCh.fn = 0
# проходимся по всем тестовым картинкам
for numIter in range(len(numNeigh)):
for i in range(len(keys) // 10):
# для удобства сохраним ключ в отдельную переменную
itemKey = keys[i]
print('----------------------------------------------------')
print(str(i) + '. ' + testData[itemKey]['filename'])
# считываем изображение
image = cv2.imread(drivePath + "\\" + testData[itemKey]['filename'])
if image is None:
continue
detectData = []
detectData = makeDetectetData(image, numplateCascade, points[pIter], numNeigh[numIter])
# numPlates это список из списков [[],[]]
# передаем в функцию list с найденными номерами и размеченные данные
rateCh = changeOpCharac(detectData, testData, itemKey, rateCh, lowerBorder, topBorder)
print(' TP: ' + str(rateCh.tp) + ' TN: ' + str(rateCh.tn) + ' FP: ' + str(rateCh.fp) + ' FN: ' + str(
rateCh.fn))
print(' Number of license plates: ', len(testData[itemKey]['regions']))
print(' Found: {0} numplate!'.format(len(detectData)))
print('----------------------------------------------------')
drawMarkAndDetect(detectData, testData, itemKey, image)
# считаем precision и recall (учитываем деление на ноль)
try:
recallList.append(rateCh.tp / (rateCh.tp + rateCh.fn))
print('recall:' + str(rateCh.tp / (rateCh.tp + rateCh.fn)))
except:
recallList.append(0)
try:
precisionList.append(rateCh.tp / (rateCh.tp + rateCh.fp))
print('precision:' + str(rateCh.tp / (rateCh.tp + rateCh.fp)))
except:
precisionList.append(0)
# обнуляем
rateCh.tp = 0
rateCh.tn = 0
rateCh.fp = 0
rateCh.fn = 0
# Следующим шагом построим PR-кривую для оценки точности полученной модели:
print(recallList)
print(precisionList)
plt.plot(recallList, precisionList, color='r', label='Log Res')
plt.title('precision-recall curve for Log Res')
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlabel('recall')
plt.ylabel('precision')
plt.grid()
plt.show()
plt.gcf().clear()
#==========================================================================
haarPath = "cascade30h.xml"
dataPath = 'numplates_region_data.json'
drivePath = 'TestData'
mainProcedure(haarPath, dataPath, drivePath)
#==========================================================================
fig = plt.figure()
fig, ax = plt.subplots()
recallList = [0.8254189944134078, 0.8225352112676056, 0.8208744710860366, 0.8045325779036827, 0.7897727272727273,
0.7795163584637269, 0.7694483734087695, 0.7460992907801418, 0.7126600284495022, 0.7357954545454546,
0.701280227596017, 0.6529160739687055, 0.6584699453551912, 0.5549374130737135, 0.4767932489451477,
0.569060773480663, 0.4197183098591549, 0.3437057991513437, 0.5805084745762712, 0.49291784702549574,
0.4164305949008499, 0.48933143669985774, 0.40256045519203415, 0.33712660028449504]
precisionList = [0.17620751341681573, 0.23710921640276086, 0.29158316633266534, 0.43193916349809885,
0.5656154628687691, 0.6277205040091638, 0.5732349841938883, 0.6985391766268261, 0.75,
0.7204450625869263, 0.8029315960912052, 0.8375912408759124, 0.6703755215577191,
0.7388888888888889, 0.8033175355450237, 0.6809917355371901, 0.7286063569682152,
0.7546583850931677, 0.6650485436893204, 0.7265135699373695, 0.7405541561712846,
0.6653771760154739, 0.7546666666666667, 0.7719869706840391]
ax.plot(recallList, precisionList, 's', color='blue')
recallList = [0.8129395218002813, 0.8095909732016925, 0.8053596614950634, 0.7926657263751763,
0.7776203966005666, 0.7585227272727273, 0.7577903682719547, 0.7347517730496453,
0.7116477272727273, 0.7315340909090909, 0.6941678520625889, 0.6486486486486487,
0.6387362637362637, 0.5305555555555556, 0.47692307692307695, 0.561624649859944,
0.442090395480226, 0.3356940509915014, 0.5671852899575672, 0.4830028328611898,
0.42329545454545453, 0.5007112375533428, 0.42816500711237554, 0.3655761024182077]
precisionList = [0.15579514824797844, 0.21875, 0.26508820798514393, 0.43769470404984423, 0.5473579262213359,
0.5986547085201793, 0.545361875637105, 0.6641025641025641, 0.7389380530973452, 0.6848404255319149,
0.7845659163987139, 0.8201438848920863, 0.6258411843876177, 0.6970802919708029, 0.754424778761062,
0.6334913112164297, 0.681917211328976, 0.6909620991253644, 0.6140888208269525, 0.6779324055666004,
0.7376237623762376, 0.6530612244897959, 0.7270531400966184, 0.7558823529411764]
ax.plot(recallList, precisionList, 's', color='red')
plt.title('precision-recall curve for Log Res')
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlabel('recall')
plt.ylabel('precision')
plt.grid()
plt.show()
plt.gcf().clear()
#========================================================================== | ] | random_line_split |
transcation.go | package trx
import (
"crypto/ecdsa"
"encoding/json"
"fmt"
"math/big"
"time"
"tron/api"
"tron/common/base58"
"tron/common/hexutil"
"tron/core"
"tron/log"
"tron/service"
wallet "tron/util"
"github.com/ethereum/go-ethereum/common"
"github.com/golang/protobuf/proto"
"github.com/shopspring/decimal"
)
// 每次最多100 个
func getBlockWithHeights(start, end int64) error {
if end-start < 1 {
return nil
}
var node *service.GrpcClient
againblock:
if node != nil {
node.Conn.Close()
}
select {
case <-ctx.Done():
return nil
default:
}
node = getRandOneNode()
block, err := node.GetBlockByLimitNext(start, end)
if err != nil {
// rpc error: code = DeadlineExceeded desc = context deadline exceeded will get again
log.Warnf("node get bolck start %d end %d GetBlockByLimitNext err: %v will get again", start, end, err)
time.Sleep(time.Second * 5)
goto againblock
}
log.Infof("node get bolck start %d end %d length %d", start, end, len(block.Block))
if len(block.Block) < 1 {
log.Warnf("get bolck zero lenghth of block start %d end %d, will get again", start, end)
time.Sleep(time.Second * 5)
goto againblock
}
processBlocks(block)
node.Conn.Close()
return nil
}
func getBlockWithHeight(num int64) error {
node := getRandOneNode()
defer node.Conn.Close()
block, err := node.GetBlockByNum(num)
if err != nil {
return err
}
processBlock(block)
return nil
}
func processBlocks(blocks *api.BlockListExtention) {
for _, v := range blocks.Block {
processBlock(v)
}
}
func processBlock(block *api.BlockExtention) {
height := block.GetBlockHeader().GetRawData().GetNumber()
node := getRandOneNode()
defer node.Conn.Close()
for _, v := range block.Transactions {
// transaction.ret.contractRe
txid := hexutil.Encode(v.Txid)
// https://tronscan.org/#/transaction/fede1aa9e5c5d7bd179fd62e23bdd11e3c1edd0ca51e41070e34a026d6a42569
if v.Result == nil || !v.Result.Result {
continue
}
rets := v.Transaction.Ret
if len(rets) < 1 || rets[0].ContractRet != core.Transaction_Result_SUCCESS {
continue
}
//fmt.Println(txid)
log.Debugf("process block height %d txid %s", height, txid)
for _, v1 := range v.Transaction.RawData.Contract {
if v1.Type == core.Transaction_Contract_TransferContract { //转账合约
// trx 转账
unObj := &core.TransferContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
form := base58.EncodeCheck(unObj.GetOwnerAddress())
to := base58.EncodeCheck(unObj.GetToAddress())
processTransaction(node, Trx, txid, form, to, height, unObj.GetAmount())
} else if v1.Type == core.Transaction_Contract_TriggerSmartContract { //调用智能合约
// trc20 转账
unObj := &core.TriggerSmartContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
// res, _ := json.Marshal(unObj)
// fmt.Println(string(res))
contract := base58.EncodeCheck(unObj.GetContractAddress())
form := base58.EncodeCheck(unObj.GetOwnerAddress())
data := unObj.GetData()
// unObj.Data https://goethereumbook.org/en/transfer-tokens/ 参考eth 操作
to, amount, flag := processTransferData(data)
if flag { // 只有调用了 transfer(address,uint256) 才是转账
processTransaction(node, contract, txid, form, to, height, amount)
}
} else if v1.Type == core.Transaction_Contract_TransferAssetContract { //通证转账合约
// trc10 转账
unObj := &core.TransferAssetContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
contract := base58.EncodeCheck(unObj.GetAssetName())
form := base58.EncodeCheck(unObj.GetOwnerAddress())
to := base58.EncodeCheck(unObj.GetToAddress())
processTransaction(node, contract, txid, form, to, height, unObj.GetAmount())
}
}
}
}
// 这个结构目前没有用到 只是记录Trc20合约调用对应转换结果
var mapFunctionTcc20 = map[string]string{
"a9059cbb": "transfer(address,uint256)",
"70a08231": "balanceOf(address)",
}
// a9059cbb 4 8
// 00000000000000000000004173d5888eedd05efeda5bca710982d9c13b975f98 32 64
// 0000000000000000000000000000000000000000000000000000000000989680 32 64
// 处理合约参数
func processTransferData(trc20 []byte) (to string, amount int64, flag bool) {
if len(trc20) >= 68 {
fmt.Println(hexutil.Encode(trc20))
if hexutil.Encode(trc20[:4]) != "a9059cbb" {
return
}
// 多1位41
trc20[15] = 65
to = base58.EncodeCheck(trc20[15:36])
amount = new(big.Int).SetBytes(common.TrimLeftZeroes(trc20[36:68])).Int64()
flag = true
}
return
}
// 处理合约转账参数
func processTransferParameter(to string, amount int64) (data []byte) {
methodID, _ := hexutil.Decode("a9059cbb")
addr, _ := base58.DecodeCheck(to)
paddedAddress := common.LeftPadBytes(addr[1:], 32)
amountBig := new(big.Int).SetInt64(amount)
paddedAmount := common.LeftPadBytes(amountBig.Bytes(), 32)
data = append(data, methodID...)
data = append(data, paddedAddress...)
data = append(data, paddedAmount...)
return
}
// 处理合约获取余额
func processBalanceOfData(trc20 []byte) (amount int64) {
if len(trc20) >= 32 {
amount = new(big.Int).SetBytes(common.TrimLeftZeroes(trc20[0:32])).Int64()
}
return
}
// 处理合约获取余额参数
func processBalanceOfParameter(addr st | := common.LeftPadBytes(add[1:], 32)
data = append(data, methodID...)
data = append(data, paddedAddress...)
return
}
func processTransaction(node *service.GrpcClient, contract, txid, from, to string, blockheight, amount int64) {
// 合约是否存在
if !IsContract(contract) {
return
}
// fmt.Printf("contract %s txid %s from %s to %s, blockheight %d amount %d \n",
// contract, txid, from, to, blockheight, amount)
var types string
if from == mainAddr { // 提币 or 中转
ac, err := dbengine.SearchAccount(to)
if err != nil {
log.Error(err)
}
if ac != nil {
types = Collect // 手续费划转
} else {
types = Send
}
} else if to == mainAddr { // 归集记录
ac, err := dbengine.SearchAccount(from)
if err != nil {
log.Error(err)
}
if ac != nil {
types = Collect
} else {
types = ReceiveOther
}
} else {
acf, err := dbengine.SearchAccount(from)
if err != nil {
log.Error(err)
}
act, err := dbengine.SearchAccount(to)
if err != nil {
log.Error(err)
}
if act != nil { // 收币地址
if acf != nil {
types = CollectOwn // 站内转账 暂时不可能触发
} else {
types = Receive
go collectall(to) // 归集检测
}
} else {
if acf != nil {
types = CollectSend // 转账到外面地址 异常
} else {
return // 不处理 都不是平台的地址
}
}
}
// 手续费处理
transinfo, err := node.GetTransactionInfoById(txid)
var fee int64
if err != nil {
log.Error(err)
} else {
fee = transinfo.GetFee()
}
_, decimalnum := chargeContract(contract)
var trans = &Transactions{
TxID: txid,
Contract: contract,
Type: types,
BlockHeight: blockheight,
Amount: decimal.New(amount, -decimalnum).String(),
Fee: decimal.New(fee, -trxdecimal).String(),
Timestamp: time.Now().Unix(),
Address: to,
FromAddress: from,
}
_, err = dbengine.InsertTransactions(trans)
log.Infof("InsertTransactions %v err %v ", trans, err)
}
// 转账合约燃烧 trx数量 单位 sun 默认5trx
var feelimit int64 = 5000000
// 转币
func send(key *ecdsa.PrivateKey, contract, to string, amount decimal.Decimal) (string, error) {
node := getRandOneNode()
defer node.Conn.Close()
typs, decimalnum := chargeContract(contract)
var amountdecimal = decimal.New(1, decimalnum)
amountac, _ := amount.Mul(amountdecimal).Float64()
switch typs {
case Trc10:
return node.TransferAsset(key, contract, to, int64(amountac))
case Trx:
return node.Transfer(key, to, int64(amountac))
case Trc20:
data := processTransferParameter(to, int64(amountac))
return node.TransferContract(key, contract, data, feelimit)
}
return "", fmt.Errorf("the type %s not support now", typs)
}
// 往外转 提币
func sendOut(contract, to string, amount decimal.Decimal) (string, error) {
return send(mainAccout, contract, to, amount)
}
// 往地址转手续费
func sendFee(to string, amount decimal.Decimal) (string, error) {
return send(mainAccout, Trx, to, amount)
}
// 归集
func sendIn(contract, from string, amount decimal.Decimal) (string, error) {
var accout *ecdsa.PrivateKey
accout, err := loadAccount(from)
if err != nil {
return "", err
}
return send(accout, contract, mainAddr, amount)
}
// 交易记录
func recentTransactions(contract, addr string, count, skip int) ([]wallet.Transactions, error) {
re, err := dbengine.GetTransactions(contract, addr, count, skip)
lens := len(re)
ral := make([]wallet.Transactions, lens)
if err != nil {
return ral, err
}
var account = "go-tron-" + contract + "-walletrpc"
for i := 0; i < lens; i++ {
ral[i].Address = re[i].Address
ral[i].FromAddress = re[i].FromAddress
ral[i].Fee = json.Number(re[i].Fee)
ral[i].Amount = json.Number(re[i].Amount)
ral[i].Category = re[i].Type
ral[i].Confirmations = blockHeightTop - re[i].BlockHeight + 1
ral[i].Time = re[i].Timestamp
ral[i].TimeReceived = re[i].Timestamp
ral[i].TxID = re[i].TxID
ral[i].BlockIndex = re[i].BlockHeight
ral[i].Account = account
}
return ral, nil
}
// 归集记录
func collectTransactions(contract string, sTime, eTime int64) ([]wallet.SummaryData, error) {
re, err := dbengine.GetCollestTransactions(sTime, eTime, contract)
lens := len(re)
ral := make([]wallet.SummaryData, lens)
if err != nil {
return ral, err
}
var account = "go-tron-" + contract + "-walletrpc"
for i := 0; i < lens; i++ {
ral[i].Address = re[i].Address
ral[i].FromAddress = re[i].FromAddress
ral[i].Fee = re[i].Fee
ral[i].Amount = re[i].Amount
ral[i].Category = re[i].Type
ral[i].Time = re[i].Timestamp
ral[i].TimeReceived = re[i].Timestamp
ral[i].Blocktime = re[i].Timestamp
ral[i].TxID = re[i].TxID
ral[i].BlockIndex = re[i].BlockHeight
ral[i].Account = account
}
return ral, nil
}
| ring) (data []byte) {
methodID, _ := hexutil.Decode("70a08231")
add, _ := base58.DecodeCheck(addr)
paddedAddress | identifier_body |
transcation.go | package trx
import (
"crypto/ecdsa"
"encoding/json"
"fmt"
"math/big"
"time"
"tron/api"
"tron/common/base58"
"tron/common/hexutil"
"tron/core"
"tron/log"
"tron/service"
wallet "tron/util"
"github.com/ethereum/go-ethereum/common"
"github.com/golang/protobuf/proto"
"github.com/shopspring/decimal"
)
// 每次最多100 个
func getBlockWithHeights(start, end int64) error {
if end-start < 1 {
return nil
}
var node *service.GrpcClient
againblock:
if node != nil {
node.Conn.Close()
}
select {
case <-ctx.Done():
return nil
default:
}
| node = getRandOneNode()
block, err := node.GetBlockByLimitNext(start, end)
if err != nil {
// rpc error: code = DeadlineExceeded desc = context deadline exceeded will get again
log.Warnf("node get bolck start %d end %d GetBlockByLimitNext err: %v will get again", start, end, err)
time.Sleep(time.Second * 5)
goto againblock
}
log.Infof("node get bolck start %d end %d length %d", start, end, len(block.Block))
if len(block.Block) < 1 {
log.Warnf("get bolck zero lenghth of block start %d end %d, will get again", start, end)
time.Sleep(time.Second * 5)
goto againblock
}
processBlocks(block)
node.Conn.Close()
return nil
}
func getBlockWithHeight(num int64) error {
node := getRandOneNode()
defer node.Conn.Close()
block, err := node.GetBlockByNum(num)
if err != nil {
return err
}
processBlock(block)
return nil
}
func processBlocks(blocks *api.BlockListExtention) {
for _, v := range blocks.Block {
processBlock(v)
}
}
func processBlock(block *api.BlockExtention) {
height := block.GetBlockHeader().GetRawData().GetNumber()
node := getRandOneNode()
defer node.Conn.Close()
for _, v := range block.Transactions {
// transaction.ret.contractRe
txid := hexutil.Encode(v.Txid)
// https://tronscan.org/#/transaction/fede1aa9e5c5d7bd179fd62e23bdd11e3c1edd0ca51e41070e34a026d6a42569
if v.Result == nil || !v.Result.Result {
continue
}
rets := v.Transaction.Ret
if len(rets) < 1 || rets[0].ContractRet != core.Transaction_Result_SUCCESS {
continue
}
//fmt.Println(txid)
log.Debugf("process block height %d txid %s", height, txid)
for _, v1 := range v.Transaction.RawData.Contract {
if v1.Type == core.Transaction_Contract_TransferContract { //转账合约
// trx 转账
unObj := &core.TransferContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
form := base58.EncodeCheck(unObj.GetOwnerAddress())
to := base58.EncodeCheck(unObj.GetToAddress())
processTransaction(node, Trx, txid, form, to, height, unObj.GetAmount())
} else if v1.Type == core.Transaction_Contract_TriggerSmartContract { //调用智能合约
// trc20 转账
unObj := &core.TriggerSmartContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
// res, _ := json.Marshal(unObj)
// fmt.Println(string(res))
contract := base58.EncodeCheck(unObj.GetContractAddress())
form := base58.EncodeCheck(unObj.GetOwnerAddress())
data := unObj.GetData()
// unObj.Data https://goethereumbook.org/en/transfer-tokens/ 参考eth 操作
to, amount, flag := processTransferData(data)
if flag { // 只有调用了 transfer(address,uint256) 才是转账
processTransaction(node, contract, txid, form, to, height, amount)
}
} else if v1.Type == core.Transaction_Contract_TransferAssetContract { //通证转账合约
// trc10 转账
unObj := &core.TransferAssetContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
contract := base58.EncodeCheck(unObj.GetAssetName())
form := base58.EncodeCheck(unObj.GetOwnerAddress())
to := base58.EncodeCheck(unObj.GetToAddress())
processTransaction(node, contract, txid, form, to, height, unObj.GetAmount())
}
}
}
}
// 这个结构目前没有用到 只是记录Trc20合约调用对应转换结果
var mapFunctionTcc20 = map[string]string{
"a9059cbb": "transfer(address,uint256)",
"70a08231": "balanceOf(address)",
}
// a9059cbb 4 8
// 00000000000000000000004173d5888eedd05efeda5bca710982d9c13b975f98 32 64
// 0000000000000000000000000000000000000000000000000000000000989680 32 64
// 处理合约参数
func processTransferData(trc20 []byte) (to string, amount int64, flag bool) {
if len(trc20) >= 68 {
fmt.Println(hexutil.Encode(trc20))
if hexutil.Encode(trc20[:4]) != "a9059cbb" {
return
}
// 多1位41
trc20[15] = 65
to = base58.EncodeCheck(trc20[15:36])
amount = new(big.Int).SetBytes(common.TrimLeftZeroes(trc20[36:68])).Int64()
flag = true
}
return
}
// 处理合约转账参数
func processTransferParameter(to string, amount int64) (data []byte) {
methodID, _ := hexutil.Decode("a9059cbb")
addr, _ := base58.DecodeCheck(to)
paddedAddress := common.LeftPadBytes(addr[1:], 32)
amountBig := new(big.Int).SetInt64(amount)
paddedAmount := common.LeftPadBytes(amountBig.Bytes(), 32)
data = append(data, methodID...)
data = append(data, paddedAddress...)
data = append(data, paddedAmount...)
return
}
// 处理合约获取余额
func processBalanceOfData(trc20 []byte) (amount int64) {
if len(trc20) >= 32 {
amount = new(big.Int).SetBytes(common.TrimLeftZeroes(trc20[0:32])).Int64()
}
return
}
// 处理合约获取余额参数
func processBalanceOfParameter(addr string) (data []byte) {
methodID, _ := hexutil.Decode("70a08231")
add, _ := base58.DecodeCheck(addr)
paddedAddress := common.LeftPadBytes(add[1:], 32)
data = append(data, methodID...)
data = append(data, paddedAddress...)
return
}
func processTransaction(node *service.GrpcClient, contract, txid, from, to string, blockheight, amount int64) {
// 合约是否存在
if !IsContract(contract) {
return
}
// fmt.Printf("contract %s txid %s from %s to %s, blockheight %d amount %d \n",
// contract, txid, from, to, blockheight, amount)
var types string
if from == mainAddr { // 提币 or 中转
ac, err := dbengine.SearchAccount(to)
if err != nil {
log.Error(err)
}
if ac != nil {
types = Collect // 手续费划转
} else {
types = Send
}
} else if to == mainAddr { // 归集记录
ac, err := dbengine.SearchAccount(from)
if err != nil {
log.Error(err)
}
if ac != nil {
types = Collect
} else {
types = ReceiveOther
}
} else {
acf, err := dbengine.SearchAccount(from)
if err != nil {
log.Error(err)
}
act, err := dbengine.SearchAccount(to)
if err != nil {
log.Error(err)
}
if act != nil { // 收币地址
if acf != nil {
types = CollectOwn // 站内转账 暂时不可能触发
} else {
types = Receive
go collectall(to) // 归集检测
}
} else {
if acf != nil {
types = CollectSend // 转账到外面地址 异常
} else {
return // 不处理 都不是平台的地址
}
}
}
// 手续费处理
transinfo, err := node.GetTransactionInfoById(txid)
var fee int64
if err != nil {
log.Error(err)
} else {
fee = transinfo.GetFee()
}
_, decimalnum := chargeContract(contract)
var trans = &Transactions{
TxID: txid,
Contract: contract,
Type: types,
BlockHeight: blockheight,
Amount: decimal.New(amount, -decimalnum).String(),
Fee: decimal.New(fee, -trxdecimal).String(),
Timestamp: time.Now().Unix(),
Address: to,
FromAddress: from,
}
_, err = dbengine.InsertTransactions(trans)
log.Infof("InsertTransactions %v err %v ", trans, err)
}
// 转账合约燃烧 trx数量 单位 sun 默认5trx
var feelimit int64 = 5000000
// 转币
func send(key *ecdsa.PrivateKey, contract, to string, amount decimal.Decimal) (string, error) {
node := getRandOneNode()
defer node.Conn.Close()
typs, decimalnum := chargeContract(contract)
var amountdecimal = decimal.New(1, decimalnum)
amountac, _ := amount.Mul(amountdecimal).Float64()
switch typs {
case Trc10:
return node.TransferAsset(key, contract, to, int64(amountac))
case Trx:
return node.Transfer(key, to, int64(amountac))
case Trc20:
data := processTransferParameter(to, int64(amountac))
return node.TransferContract(key, contract, data, feelimit)
}
return "", fmt.Errorf("the type %s not support now", typs)
}
// 往外转 提币
func sendOut(contract, to string, amount decimal.Decimal) (string, error) {
return send(mainAccout, contract, to, amount)
}
// 往地址转手续费
func sendFee(to string, amount decimal.Decimal) (string, error) {
return send(mainAccout, Trx, to, amount)
}
// 归集
func sendIn(contract, from string, amount decimal.Decimal) (string, error) {
var accout *ecdsa.PrivateKey
accout, err := loadAccount(from)
if err != nil {
return "", err
}
return send(accout, contract, mainAddr, amount)
}
// 交易记录
func recentTransactions(contract, addr string, count, skip int) ([]wallet.Transactions, error) {
re, err := dbengine.GetTransactions(contract, addr, count, skip)
lens := len(re)
ral := make([]wallet.Transactions, lens)
if err != nil {
return ral, err
}
var account = "go-tron-" + contract + "-walletrpc"
for i := 0; i < lens; i++ {
ral[i].Address = re[i].Address
ral[i].FromAddress = re[i].FromAddress
ral[i].Fee = json.Number(re[i].Fee)
ral[i].Amount = json.Number(re[i].Amount)
ral[i].Category = re[i].Type
ral[i].Confirmations = blockHeightTop - re[i].BlockHeight + 1
ral[i].Time = re[i].Timestamp
ral[i].TimeReceived = re[i].Timestamp
ral[i].TxID = re[i].TxID
ral[i].BlockIndex = re[i].BlockHeight
ral[i].Account = account
}
return ral, nil
}
// 归集记录
func collectTransactions(contract string, sTime, eTime int64) ([]wallet.SummaryData, error) {
re, err := dbengine.GetCollestTransactions(sTime, eTime, contract)
lens := len(re)
ral := make([]wallet.SummaryData, lens)
if err != nil {
return ral, err
}
var account = "go-tron-" + contract + "-walletrpc"
for i := 0; i < lens; i++ {
ral[i].Address = re[i].Address
ral[i].FromAddress = re[i].FromAddress
ral[i].Fee = re[i].Fee
ral[i].Amount = re[i].Amount
ral[i].Category = re[i].Type
ral[i].Time = re[i].Timestamp
ral[i].TimeReceived = re[i].Timestamp
ral[i].Blocktime = re[i].Timestamp
ral[i].TxID = re[i].TxID
ral[i].BlockIndex = re[i].BlockHeight
ral[i].Account = account
}
return ral, nil
} | random_line_split |
|
transcation.go | package trx
import (
"crypto/ecdsa"
"encoding/json"
"fmt"
"math/big"
"time"
"tron/api"
"tron/common/base58"
"tron/common/hexutil"
"tron/core"
"tron/log"
"tron/service"
wallet "tron/util"
"github.com/ethereum/go-ethereum/common"
"github.com/golang/protobuf/proto"
"github.com/shopspring/decimal"
)
// 每次最多100 个
func getBlockWithHeights(start, end int64) error {
if end-start < 1 {
return nil
}
var node *service.GrpcClient
againblock:
if node != nil {
node.Conn.Close()
}
select {
case <-ctx.Done():
return nil
default:
}
node = getRandOneNode()
block, err := node.GetBlockByLimitNext(start, end)
if err != nil {
// rpc error: code = DeadlineExceeded desc = context deadline exceeded will get again
log.Warnf("node get bolck start %d end %d GetBlockByLimitNext err: %v will get again", start, end, err)
time.Sleep(time.Second * 5)
goto againblock
}
log.Infof("node get bolck start %d end %d length %d", start, end, len(block.Block))
if len(block.Block) < 1 {
log.Warnf("get bolck zero lenghth of block start %d end %d, will get again", start, end)
time.Sleep(time.Second * 5)
goto againblock
}
processBlocks(block)
node.Conn.Close()
return nil
}
func getBlockWithHeight(num int64) error {
node := getRandOneNode()
defer node.Conn.Close()
block, err := node.GetBlockByNum(num)
if err != nil {
return err
}
processBlock(block)
return nil
}
func processBlocks(blocks *api.BlockListExtention) {
for _, v := range blocks.Block {
processBlock(v)
}
}
func processBlock(block *api.BlockExtention) {
height := block.GetBlockHeader().GetRawData().GetNumber()
node := getRandOneNode()
defer node.Conn.Close()
for _, v := range block.Transactions {
// transaction.ret.contractRe
txid := hexutil.Encode(v.Txid)
// https://tronscan.org/#/transaction/fede1aa9e5c5d7bd179fd62e23bdd11e3c1edd0ca51e41070e34a026d6a42569
if v.Result == nil || !v.Result.Result {
continue
}
rets := v.Transaction.Ret
if len(rets) < 1 || rets[0].ContractRet != core.Transaction_Result_SUCCESS {
continue
}
//fmt.Println(txid)
log.Debugf("process block height %d txid %s", height, txid)
for _, v1 := range v.Transaction.RawData.Contract {
if v1.Type == core.Transaction_Contract_TransferContract { //转账合约
// trx 转账
unObj := &core.TransferContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
form := base58.EncodeCheck(unObj.GetOwnerAddress())
to := base58.EncodeCheck(unObj.GetToAddress())
processTransaction(node, Trx, txid, form, to, height, unObj.GetAmount())
} else if v1.Type == core.Transaction_Contract_TriggerSmartContract { //调用智能合约
// trc20 转账
unObj := &core.TriggerSmartContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
// res, _ := json.Marshal(unObj)
// fmt.Println(string(res))
contract := base58.EncodeCheck(unObj.GetContractAddress())
form := base58.EncodeCheck(unObj.GetOwnerAddress())
data := unObj.GetData()
// unObj.Data https://goethereumbook.org/en/transfer-tokens/ 参考eth 操作
to, amount, flag := processTransferData(data)
if flag { // 只有调用了 transfer(address,uint256) 才是转账
processTransaction(node, contract, txid, form, to, height, amount)
}
} else if v1.Type == core.Transaction_Contract_TransferAssetContract { //通证转账合约
// trc10 转账
unObj := &core.TransferAssetContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
contract := base58.EncodeCheck(unObj.GetAssetName())
form := base58.EncodeCheck(unObj.GetOwnerAddress())
to := base58.EncodeCheck(unObj.GetToAddress())
processTransaction(node, contract, txid, form, to, height, unObj.GetAmount())
}
}
}
}
// 这个结构目前没有用到 只是记录Trc20合约调用对应转换结果
var mapFunctionTcc20 = map[string]string{
"a9059cbb": "transfer(address,uint256)",
"70a08231": "balanceOf(address)",
}
// a9059cbb 4 8
// 00000000000000000000004173d5888eedd05efeda5bca710982d9c13b975f98 32 64
// 0000000000000000000000000000000000000000000000000000000000989680 32 64
// 处理合约参数
func processTransferData(trc20 []byte) (to string, amount int64, flag bool) {
if len(trc20) >= 68 {
fmt.Println(hexutil.Encode(trc20))
if hexutil.Encode(trc20[:4]) != "a9059cbb" {
return
}
// 多1位41
trc20[15] = 65
to = base58.EncodeCheck(trc20[15:36])
amount = new(big.Int).SetBytes(common.TrimLeftZeroes(trc20[36:68])).Int64()
flag = true
}
return
}
// 处理合约转账参数
func processTransferParameter(to string, amount int64) (data []byte) {
methodID, _ := hexutil.Decode("a9059cbb")
addr, _ := base58.DecodeCheck(to)
paddedAddress := common.LeftPadBytes(addr[1:], 32)
amountBig := new(big.Int).SetInt64(amount)
paddedAmount := common.LeftPadBytes(amountBig.Bytes(), 32)
data = append(data, methodID...)
data = append(data, paddedAddress...)
data = append(data, paddedAmount...)
return
}
// 处理合约获取余额
func processBalanceOfData(trc20 []byte) (amount int64) {
if len(trc20) >= 32 {
amount = new(big.Int).SetBytes(common.TrimLeftZeroes(trc20[0:32])).Int64()
}
return
}
// 处理合约获取余额参数
func processBalanceOfParameter(addr string) (data []byte) {
methodID, _ := hexutil.Decode("70a08231")
add, _ := base58.DecodeCheck(addr)
paddedAddress := common.LeftPadBytes(add[1:], 32)
data = append(data, methodID...)
data = append(data, paddedAddress...)
return
}
func processTransaction(node *service.GrpcClient, contract, txid, from, to string, blockheight, amount int64) {
// 合约是否存在
if !IsContract(contract) {
return
}
// fmt.Printf("contract %s txid %s from %s to %s, blockheight %d amount %d \n",
// contract, txid, from, to, blockheight, amount)
var types string
if from == mainAddr { // 提币 or 中转
ac, err := dbengine.SearchAccount(to)
if err != nil {
log.Error(err)
}
if ac != nil {
types = Collect // 手续费划转
} else {
types = Send
}
} else if to == mainAddr { // 归集记录
ac, err := dbengine.SearchAccount(from)
if err != nil {
log.Error(err)
}
if ac != nil {
types = Collect
} else {
types = ReceiveOther
}
} else {
acf, err := dbengine.SearchAccount(from)
if err != nil {
log.Error(err)
}
act, err := dbengine.SearchAccount(to)
if err != nil {
log.Error(err)
}
if act != nil { // 收币地址
if acf != nil {
types = CollectOwn // 站内转账 暂时不可能触发
} else {
types = Receive
go collectall(to) // 归集检测
}
} else {
if acf != nil {
types = CollectSend // 转账到外面地址 异常
} else {
return // 不处理 都不是平台的地址
}
}
}
// 手续费处理
transinfo, err := node.GetTransactionInfoById(txid)
var fee int64
if err != nil {
log.Error(err)
} else {
fee = transinfo.GetFee()
}
_, decimaln | xID: txid,
Contract: contract,
Type: types,
BlockHeight: blockheight,
Amount: decimal.New(amount, -decimalnum).String(),
Fee: decimal.New(fee, -trxdecimal).String(),
Timestamp: time.Now().Unix(),
Address: to,
FromAddress: from,
}
_, err = dbengine.InsertTransactions(trans)
log.Infof("InsertTransactions %v err %v ", trans, err)
}
// 转账合约燃烧 trx数量 单位 sun 默认5trx
var feelimit int64 = 5000000
// 转币
func send(key *ecdsa.PrivateKey, contract, to string, amount decimal.Decimal) (string, error) {
node := getRandOneNode()
defer node.Conn.Close()
typs, decimalnum := chargeContract(contract)
var amountdecimal = decimal.New(1, decimalnum)
amountac, _ := amount.Mul(amountdecimal).Float64()
switch typs {
case Trc10:
return node.TransferAsset(key, contract, to, int64(amountac))
case Trx:
return node.Transfer(key, to, int64(amountac))
case Trc20:
data := processTransferParameter(to, int64(amountac))
return node.TransferContract(key, contract, data, feelimit)
}
return "", fmt.Errorf("the type %s not support now", typs)
}
// 往外转 提币
func sendOut(contract, to string, amount decimal.Decimal) (string, error) {
return send(mainAccout, contract, to, amount)
}
// 往地址转手续费
func sendFee(to string, amount decimal.Decimal) (string, error) {
return send(mainAccout, Trx, to, amount)
}
// 归集
func sendIn(contract, from string, amount decimal.Decimal) (string, error) {
var accout *ecdsa.PrivateKey
accout, err := loadAccount(from)
if err != nil {
return "", err
}
return send(accout, contract, mainAddr, amount)
}
// 交易记录
func recentTransactions(contract, addr string, count, skip int) ([]wallet.Transactions, error) {
re, err := dbengine.GetTransactions(contract, addr, count, skip)
lens := len(re)
ral := make([]wallet.Transactions, lens)
if err != nil {
return ral, err
}
var account = "go-tron-" + contract + "-walletrpc"
for i := 0; i < lens; i++ {
ral[i].Address = re[i].Address
ral[i].FromAddress = re[i].FromAddress
ral[i].Fee = json.Number(re[i].Fee)
ral[i].Amount = json.Number(re[i].Amount)
ral[i].Category = re[i].Type
ral[i].Confirmations = blockHeightTop - re[i].BlockHeight + 1
ral[i].Time = re[i].Timestamp
ral[i].TimeReceived = re[i].Timestamp
ral[i].TxID = re[i].TxID
ral[i].BlockIndex = re[i].BlockHeight
ral[i].Account = account
}
return ral, nil
}
// 归集记录
func collectTransactions(contract string, sTime, eTime int64) ([]wallet.SummaryData, error) {
re, err := dbengine.GetCollestTransactions(sTime, eTime, contract)
lens := len(re)
ral := make([]wallet.SummaryData, lens)
if err != nil {
return ral, err
}
var account = "go-tron-" + contract + "-walletrpc"
for i := 0; i < lens; i++ {
ral[i].Address = re[i].Address
ral[i].FromAddress = re[i].FromAddress
ral[i].Fee = re[i].Fee
ral[i].Amount = re[i].Amount
ral[i].Category = re[i].Type
ral[i].Time = re[i].Timestamp
ral[i].TimeReceived = re[i].Timestamp
ral[i].Blocktime = re[i].Timestamp
ral[i].TxID = re[i].TxID
ral[i].BlockIndex = re[i].BlockHeight
ral[i].Account = account
}
return ral, nil
}
| um := chargeContract(contract)
var trans = &Transactions{
T | conditional_block |
transcation.go | package trx
import (
"crypto/ecdsa"
"encoding/json"
"fmt"
"math/big"
"time"
"tron/api"
"tron/common/base58"
"tron/common/hexutil"
"tron/core"
"tron/log"
"tron/service"
wallet "tron/util"
"github.com/ethereum/go-ethereum/common"
"github.com/golang/protobuf/proto"
"github.com/shopspring/decimal"
)
// 每次最多100 个
func getBlockWithHeights(start, end int64) error {
if end-start < 1 {
return nil
}
var node *service.GrpcClient
againblock:
if node != nil {
node.Conn.Close()
}
select {
case <-ctx.Done():
return nil
default:
}
node = getRandOneNode()
block, err := node.GetBlockByLimitNext(start, end)
if err != nil {
// rpc error: code = DeadlineExceeded desc = context deadline exceeded will get again
log.Warnf("node get bolck start %d end %d GetBlockByLimitNext err: %v will get again", start, end, err)
time.Sleep(time.Second * 5)
goto againblock
}
log.Infof("node get bolck start %d end %d length %d", start, end, len(block.Block))
if len(block.Block) < 1 {
log.Warnf("get bolck zero lenghth of block start %d end %d, will get again", start, end)
time.Sleep(time.Second * 5)
goto againblock
}
processBlocks(block)
node.Conn.Close()
return nil
}
func getBlockWithHeight(num int64) error {
node := getRandOneNode()
defer node.Conn.Close()
block, err := node.GetBlockByNum(num)
if err != nil {
return err
}
processBlock(block)
return nil
}
func processBlocks(blocks *api.BlockListExtention) {
for _, v := range blocks.Block {
processBlock(v)
}
}
func processBlo | i.BlockExtention) {
height := block.GetBlockHeader().GetRawData().GetNumber()
node := getRandOneNode()
defer node.Conn.Close()
for _, v := range block.Transactions {
// transaction.ret.contractRe
txid := hexutil.Encode(v.Txid)
// https://tronscan.org/#/transaction/fede1aa9e5c5d7bd179fd62e23bdd11e3c1edd0ca51e41070e34a026d6a42569
if v.Result == nil || !v.Result.Result {
continue
}
rets := v.Transaction.Ret
if len(rets) < 1 || rets[0].ContractRet != core.Transaction_Result_SUCCESS {
continue
}
//fmt.Println(txid)
log.Debugf("process block height %d txid %s", height, txid)
for _, v1 := range v.Transaction.RawData.Contract {
if v1.Type == core.Transaction_Contract_TransferContract { //转账合约
// trx 转账
unObj := &core.TransferContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
form := base58.EncodeCheck(unObj.GetOwnerAddress())
to := base58.EncodeCheck(unObj.GetToAddress())
processTransaction(node, Trx, txid, form, to, height, unObj.GetAmount())
} else if v1.Type == core.Transaction_Contract_TriggerSmartContract { //调用智能合约
// trc20 转账
unObj := &core.TriggerSmartContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
// res, _ := json.Marshal(unObj)
// fmt.Println(string(res))
contract := base58.EncodeCheck(unObj.GetContractAddress())
form := base58.EncodeCheck(unObj.GetOwnerAddress())
data := unObj.GetData()
// unObj.Data https://goethereumbook.org/en/transfer-tokens/ 参考eth 操作
to, amount, flag := processTransferData(data)
if flag { // 只有调用了 transfer(address,uint256) 才是转账
processTransaction(node, contract, txid, form, to, height, amount)
}
} else if v1.Type == core.Transaction_Contract_TransferAssetContract { //通证转账合约
// trc10 转账
unObj := &core.TransferAssetContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
contract := base58.EncodeCheck(unObj.GetAssetName())
form := base58.EncodeCheck(unObj.GetOwnerAddress())
to := base58.EncodeCheck(unObj.GetToAddress())
processTransaction(node, contract, txid, form, to, height, unObj.GetAmount())
}
}
}
}
// 这个结构目前没有用到 只是记录Trc20合约调用对应转换结果
var mapFunctionTcc20 = map[string]string{
"a9059cbb": "transfer(address,uint256)",
"70a08231": "balanceOf(address)",
}
// a9059cbb 4 8
// 00000000000000000000004173d5888eedd05efeda5bca710982d9c13b975f98 32 64
// 0000000000000000000000000000000000000000000000000000000000989680 32 64
// 处理合约参数
func processTransferData(trc20 []byte) (to string, amount int64, flag bool) {
if len(trc20) >= 68 {
fmt.Println(hexutil.Encode(trc20))
if hexutil.Encode(trc20[:4]) != "a9059cbb" {
return
}
// 多1位41
trc20[15] = 65
to = base58.EncodeCheck(trc20[15:36])
amount = new(big.Int).SetBytes(common.TrimLeftZeroes(trc20[36:68])).Int64()
flag = true
}
return
}
// 处理合约转账参数
func processTransferParameter(to string, amount int64) (data []byte) {
methodID, _ := hexutil.Decode("a9059cbb")
addr, _ := base58.DecodeCheck(to)
paddedAddress := common.LeftPadBytes(addr[1:], 32)
amountBig := new(big.Int).SetInt64(amount)
paddedAmount := common.LeftPadBytes(amountBig.Bytes(), 32)
data = append(data, methodID...)
data = append(data, paddedAddress...)
data = append(data, paddedAmount...)
return
}
// 处理合约获取余额
func processBalanceOfData(trc20 []byte) (amount int64) {
if len(trc20) >= 32 {
amount = new(big.Int).SetBytes(common.TrimLeftZeroes(trc20[0:32])).Int64()
}
return
}
// 处理合约获取余额参数
func processBalanceOfParameter(addr string) (data []byte) {
methodID, _ := hexutil.Decode("70a08231")
add, _ := base58.DecodeCheck(addr)
paddedAddress := common.LeftPadBytes(add[1:], 32)
data = append(data, methodID...)
data = append(data, paddedAddress...)
return
}
func processTransaction(node *service.GrpcClient, contract, txid, from, to string, blockheight, amount int64) {
// 合约是否存在
if !IsContract(contract) {
return
}
// fmt.Printf("contract %s txid %s from %s to %s, blockheight %d amount %d \n",
// contract, txid, from, to, blockheight, amount)
var types string
if from == mainAddr { // 提币 or 中转
ac, err := dbengine.SearchAccount(to)
if err != nil {
log.Error(err)
}
if ac != nil {
types = Collect // 手续费划转
} else {
types = Send
}
} else if to == mainAddr { // 归集记录
ac, err := dbengine.SearchAccount(from)
if err != nil {
log.Error(err)
}
if ac != nil {
types = Collect
} else {
types = ReceiveOther
}
} else {
acf, err := dbengine.SearchAccount(from)
if err != nil {
log.Error(err)
}
act, err := dbengine.SearchAccount(to)
if err != nil {
log.Error(err)
}
if act != nil { // 收币地址
if acf != nil {
types = CollectOwn // 站内转账 暂时不可能触发
} else {
types = Receive
go collectall(to) // 归集检测
}
} else {
if acf != nil {
types = CollectSend // 转账到外面地址 异常
} else {
return // 不处理 都不是平台的地址
}
}
}
// 手续费处理
transinfo, err := node.GetTransactionInfoById(txid)
var fee int64
if err != nil {
log.Error(err)
} else {
fee = transinfo.GetFee()
}
_, decimalnum := chargeContract(contract)
var trans = &Transactions{
TxID: txid,
Contract: contract,
Type: types,
BlockHeight: blockheight,
Amount: decimal.New(amount, -decimalnum).String(),
Fee: decimal.New(fee, -trxdecimal).String(),
Timestamp: time.Now().Unix(),
Address: to,
FromAddress: from,
}
_, err = dbengine.InsertTransactions(trans)
log.Infof("InsertTransactions %v err %v ", trans, err)
}
// 转账合约燃烧 trx数量 单位 sun 默认5trx
var feelimit int64 = 5000000
// 转币
func send(key *ecdsa.PrivateKey, contract, to string, amount decimal.Decimal) (string, error) {
node := getRandOneNode()
defer node.Conn.Close()
typs, decimalnum := chargeContract(contract)
var amountdecimal = decimal.New(1, decimalnum)
amountac, _ := amount.Mul(amountdecimal).Float64()
switch typs {
case Trc10:
return node.TransferAsset(key, contract, to, int64(amountac))
case Trx:
return node.Transfer(key, to, int64(amountac))
case Trc20:
data := processTransferParameter(to, int64(amountac))
return node.TransferContract(key, contract, data, feelimit)
}
return "", fmt.Errorf("the type %s not support now", typs)
}
// 往外转 提币
func sendOut(contract, to string, amount decimal.Decimal) (string, error) {
return send(mainAccout, contract, to, amount)
}
// 往地址转手续费
func sendFee(to string, amount decimal.Decimal) (string, error) {
return send(mainAccout, Trx, to, amount)
}
// 归集
func sendIn(contract, from string, amount decimal.Decimal) (string, error) {
var accout *ecdsa.PrivateKey
accout, err := loadAccount(from)
if err != nil {
return "", err
}
return send(accout, contract, mainAddr, amount)
}
// 交易记录
func recentTransactions(contract, addr string, count, skip int) ([]wallet.Transactions, error) {
re, err := dbengine.GetTransactions(contract, addr, count, skip)
lens := len(re)
ral := make([]wallet.Transactions, lens)
if err != nil {
return ral, err
}
var account = "go-tron-" + contract + "-walletrpc"
for i := 0; i < lens; i++ {
ral[i].Address = re[i].Address
ral[i].FromAddress = re[i].FromAddress
ral[i].Fee = json.Number(re[i].Fee)
ral[i].Amount = json.Number(re[i].Amount)
ral[i].Category = re[i].Type
ral[i].Confirmations = blockHeightTop - re[i].BlockHeight + 1
ral[i].Time = re[i].Timestamp
ral[i].TimeReceived = re[i].Timestamp
ral[i].TxID = re[i].TxID
ral[i].BlockIndex = re[i].BlockHeight
ral[i].Account = account
}
return ral, nil
}
// 归集记录
func collectTransactions(contract string, sTime, eTime int64) ([]wallet.SummaryData, error) {
re, err := dbengine.GetCollestTransactions(sTime, eTime, contract)
lens := len(re)
ral := make([]wallet.SummaryData, lens)
if err != nil {
return ral, err
}
var account = "go-tron-" + contract + "-walletrpc"
for i := 0; i < lens; i++ {
ral[i].Address = re[i].Address
ral[i].FromAddress = re[i].FromAddress
ral[i].Fee = re[i].Fee
ral[i].Amount = re[i].Amount
ral[i].Category = re[i].Type
ral[i].Time = re[i].Timestamp
ral[i].TimeReceived = re[i].Timestamp
ral[i].Blocktime = re[i].Timestamp
ral[i].TxID = re[i].TxID
ral[i].BlockIndex = re[i].BlockHeight
ral[i].Account = account
}
return ral, nil
}
| ck(block *ap | identifier_name |
fmt.rs | // Copyright 2018-2021 the Deno authors. All rights reserved. MIT license.
//! This module provides file formatting utilities using
//! [`dprint-plugin-typescript`](https://github.com/dprint/dprint-plugin-typescript).
//!
//! At the moment it is only consumed using CLI but in
//! the future it can be easily extended to provide
//! the same functions as ops available in JS runtime.
use crate::colors;
use crate::diff::diff;
use crate::file_watcher;
use crate::fs_util::{collect_files, get_extension, is_supported_ext_fmt};
use crate::text_encoding;
use deno_core::error::generic_error;
use deno_core::error::AnyError;
use deno_core::futures;
use deno_core::futures::FutureExt;
use std::fs;
use std::io::stdin;
use std::io::stdout;
use std::io::Read;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
const BOM_CHAR: char = '\u{FEFF}';
/// Format JavaScript/TypeScript files.
pub async fn format(
args: Vec<PathBuf>,
ignore: Vec<PathBuf>,
check: bool,
watch: bool,
) -> Result<(), AnyError> {
let target_file_resolver = || {
// collect the files that are to be formatted
collect_files(&args, &ignore, is_supported_ext_fmt).and_then(|files| {
if files.is_empty() {
Err(generic_error("No target files found."))
} else {
Ok(files)
}
})
};
let operation = |paths: Vec<PathBuf>| {
let config = get_typescript_config();
async move {
if check {
check_source_files(config, paths).await?;
} else {
format_source_files(config, paths).await?;
}
Ok(())
}
.boxed_local()
};
if watch {
file_watcher::watch_func(target_file_resolver, operation, "Fmt").await?;
} else {
operation(target_file_resolver()?).await?;
}
Ok(())
}
/// Formats markdown (using https://github.com/dprint/dprint-plugin-markdown) and its code blocks
/// (ts/tsx, js/jsx).
fn format_markdown(
file_text: &str,
ts_config: dprint_plugin_typescript::configuration::Configuration,
) -> Result<String, String> {
let md_config = get_markdown_config();
dprint_plugin_markdown::format_text(
&file_text,
&md_config,
Box::new(move |tag, text, line_width| {
let tag = tag.to_lowercase();
if matches!(
tag.as_str(),
"ts"
| "tsx"
| "js"
| "jsx"
| "javascript"
| "typescript"
| "json"
| "jsonc"
) {
// It's important to tell dprint proper file extension, otherwise
// it might parse the file twice.
let extension = match tag.as_str() {
"javascript" => "js",
"typescript" => "ts",
rest => rest,
};
if matches!(extension, "json" | "jsonc") {
let mut json_config = get_json_config();
json_config.line_width = line_width;
dprint_plugin_json::format_text(&text, &json_config)
} else {
let fake_filename =
PathBuf::from(format!("deno_fmt_stdin.{}", extension));
let mut codeblock_config = ts_config.clone();
codeblock_config.line_width = line_width;
dprint_plugin_typescript::format_text(
&fake_filename,
&text,
&codeblock_config,
)
}
} else {
Ok(text.to_string())
}
}),
)
}
/// Formats JSON and JSONC using the rules provided by .deno()
/// of configuration builder of https://github.com/dprint/dprint-plugin-json.
/// See https://git.io/Jt4ht for configuration.
fn format_json(file_text: &str) -> Result<String, String> {
let json_config = get_json_config();
dprint_plugin_json::format_text(&file_text, &json_config)
}
async fn check_source_files(
config: dprint_plugin_typescript::configuration::Configuration,
paths: Vec<PathBuf>,
) -> Result<(), AnyError> {
let not_formatted_files_count = Arc::new(AtomicUsize::new(0));
let checked_files_count = Arc::new(AtomicUsize::new(0));
// prevent threads outputting at the same time
let output_lock = Arc::new(Mutex::new(0));
run_parallelized(paths, {
let not_formatted_files_count = not_formatted_files_count.clone();
let checked_files_count = checked_files_count.clone();
move |file_path| {
checked_files_count.fetch_add(1, Ordering::Relaxed);
let file_text = read_file_contents(&file_path)?.text;
let ext = get_extension(&file_path).unwrap_or_else(String::new);
let r = if ext == "md" {
format_markdown(&file_text, config.clone())
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&file_text)
} else {
dprint_plugin_typescript::format_text(&file_path, &file_text, &config)
};
match r {
Ok(formatted_text) => {
if formatted_text != file_text {
not_formatted_files_count.fetch_add(1, Ordering::Relaxed);
let _g = output_lock.lock().unwrap();
let diff = diff(&file_text, &formatted_text);
info!("");
info!("{} {}:", colors::bold("from"), file_path.display());
info!("{}", diff);
}
}
Err(e) => {
let _g = output_lock.lock().unwrap();
eprintln!("Error checking: {}", file_path.to_string_lossy());
eprintln!(" {}", e);
}
}
Ok(())
}
})
.await?;
let not_formatted_files_count =
not_formatted_files_count.load(Ordering::Relaxed);
let checked_files_count = checked_files_count.load(Ordering::Relaxed);
let checked_files_str =
format!("{} {}", checked_files_count, files_str(checked_files_count));
if not_formatted_files_count == 0 {
info!("Checked {}", checked_files_str);
Ok(())
} else {
let not_formatted_files_str = files_str(not_formatted_files_count);
Err(generic_error(format!(
"Found {} not formatted {} in {}",
not_formatted_files_count, not_formatted_files_str, checked_files_str,
)))
}
}
async fn format_source_files(
config: dprint_plugin_typescript::configuration::Configuration,
paths: Vec<PathBuf>,
) -> Result<(), AnyError> {
let formatted_files_count = Arc::new(AtomicUsize::new(0));
let checked_files_count = Arc::new(AtomicUsize::new(0));
let output_lock = Arc::new(Mutex::new(0)); // prevent threads outputting at the same time
run_parallelized(paths, {
let formatted_files_count = formatted_files_count.clone();
let checked_files_count = checked_files_count.clone();
move |file_path| {
checked_files_count.fetch_add(1, Ordering::Relaxed);
let file_contents = read_file_contents(&file_path)?;
let ext = get_extension(&file_path).unwrap_or_else(String::new);
let r = if ext == "md" {
format_markdown(&file_contents.text, config.clone())
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&file_contents.text)
} else {
dprint_plugin_typescript::format_text(
&file_path,
&file_contents.text,
&config,
)
};
match r {
Ok(formatted_text) => {
if formatted_text != file_contents.text {
write_file_contents(
&file_path,
FileContents {
had_bom: file_contents.had_bom,
text: formatted_text,
},
)?;
formatted_files_count.fetch_add(1, Ordering::Relaxed);
let _g = output_lock.lock().unwrap();
info!("{}", file_path.to_string_lossy());
}
}
Err(e) => {
let _g = output_lock.lock().unwrap();
eprintln!("Error formatting: {}", file_path.to_string_lossy());
eprintln!(" {}", e);
}
}
Ok(())
}
})
.await?;
let formatted_files_count = formatted_files_count.load(Ordering::Relaxed);
debug!(
"Formatted {} {}",
formatted_files_count,
files_str(formatted_files_count),
);
let checked_files_count = checked_files_count.load(Ordering::Relaxed);
info!(
"Checked {} {}",
checked_files_count,
files_str(checked_files_count)
);
Ok(())
}
/// Format stdin and write result to stdout.
/// Treats input as TypeScript or as set by `--ext` flag.
/// Compatible with `--check` flag.
pub fn format_stdin(check: bool, ext: String) -> Result<(), AnyError> {
let mut source = String::new();
if stdin().read_to_string(&mut source).is_err() {
return Err(generic_error("Failed to read from stdin"));
}
let config = get_typescript_config();
let r = if ext.as_str() == "md" {
format_markdown(&source, config)
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&source)
} else {
// dprint will fallback to jsx parsing if parsing this as a .ts file doesn't work
dprint_plugin_typescript::format_text(
&PathBuf::from("_stdin.ts"),
&source,
&config,
)
};
match r {
Ok(formatted_text) => {
if check {
if formatted_text != source {
println!("Not formatted stdin");
}
} else {
stdout().write_all(formatted_text.as_bytes())?;
}
}
Err(e) => {
return Err(generic_error(e));
}
}
Ok(())
}
fn files_str(len: usize) -> &'static str {
if len <= 1 {
"file"
} else {
"files"
}
}
fn get_typescript_config(
) -> dprint_plugin_typescript::configuration::Configuration {
dprint_plugin_typescript::configuration::ConfigurationBuilder::new()
.deno()
.build()
}
fn get_markdown_config() -> dprint_plugin_markdown::configuration::Configuration
{
dprint_plugin_markdown::configuration::ConfigurationBuilder::new()
.deno()
.build()
}
fn | () -> dprint_plugin_json::configuration::Configuration {
dprint_plugin_json::configuration::ConfigurationBuilder::new()
.deno()
.build()
}
struct FileContents {
text: String,
had_bom: bool,
}
fn read_file_contents(file_path: &Path) -> Result<FileContents, AnyError> {
let file_bytes = fs::read(&file_path)?;
let charset = text_encoding::detect_charset(&file_bytes);
let file_text = text_encoding::convert_to_utf8(&file_bytes, charset)?;
let had_bom = file_text.starts_with(BOM_CHAR);
let text = if had_bom {
// remove the BOM
String::from(&file_text[BOM_CHAR.len_utf8()..])
} else {
String::from(file_text)
};
Ok(FileContents { text, had_bom })
}
fn write_file_contents(
file_path: &Path,
file_contents: FileContents,
) -> Result<(), AnyError> {
let file_text = if file_contents.had_bom {
// add back the BOM
format!("{}{}", BOM_CHAR, file_contents.text)
} else {
file_contents.text
};
Ok(fs::write(file_path, file_text)?)
}
pub async fn run_parallelized<F>(
file_paths: Vec<PathBuf>,
f: F,
) -> Result<(), AnyError>
where
F: FnOnce(PathBuf) -> Result<(), AnyError> + Send + 'static + Clone,
{
let handles = file_paths.iter().map(|file_path| {
let f = f.clone();
let file_path = file_path.clone();
tokio::task::spawn_blocking(move || f(file_path))
});
let join_results = futures::future::join_all(handles).await;
// find the tasks that panicked and let the user know which files
let panic_file_paths = join_results
.iter()
.enumerate()
.filter_map(|(i, join_result)| {
join_result
.as_ref()
.err()
.map(|_| file_paths[i].to_string_lossy())
})
.collect::<Vec<_>>();
if !panic_file_paths.is_empty() {
panic!("Panic formatting: {}", panic_file_paths.join(", "))
}
// check for any errors and if so return the first one
let mut errors = join_results.into_iter().filter_map(|join_result| {
join_result
.ok()
.map(|handle_result| handle_result.err())
.flatten()
});
if let Some(e) = errors.next() {
Err(e)
} else {
Ok(())
}
}
| get_json_config | identifier_name |
fmt.rs | // Copyright 2018-2021 the Deno authors. All rights reserved. MIT license.
//! This module provides file formatting utilities using
//! [`dprint-plugin-typescript`](https://github.com/dprint/dprint-plugin-typescript).
//!
//! At the moment it is only consumed using CLI but in
//! the future it can be easily extended to provide
//! the same functions as ops available in JS runtime.
use crate::colors;
use crate::diff::diff;
use crate::file_watcher;
use crate::fs_util::{collect_files, get_extension, is_supported_ext_fmt};
use crate::text_encoding;
use deno_core::error::generic_error;
use deno_core::error::AnyError;
use deno_core::futures;
use deno_core::futures::FutureExt;
use std::fs;
use std::io::stdin;
use std::io::stdout;
use std::io::Read;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
const BOM_CHAR: char = '\u{FEFF}';
/// Format JavaScript/TypeScript files.
pub async fn format(
args: Vec<PathBuf>,
ignore: Vec<PathBuf>,
check: bool,
watch: bool,
) -> Result<(), AnyError> {
let target_file_resolver = || {
// collect the files that are to be formatted
collect_files(&args, &ignore, is_supported_ext_fmt).and_then(|files| {
if files.is_empty() {
Err(generic_error("No target files found."))
} else {
Ok(files)
}
})
};
let operation = |paths: Vec<PathBuf>| {
let config = get_typescript_config();
async move {
if check {
check_source_files(config, paths).await?;
} else {
format_source_files(config, paths).await?;
}
Ok(())
}
.boxed_local()
};
if watch {
file_watcher::watch_func(target_file_resolver, operation, "Fmt").await?;
} else {
operation(target_file_resolver()?).await?;
}
Ok(())
}
/// Formats markdown (using https://github.com/dprint/dprint-plugin-markdown) and its code blocks
/// (ts/tsx, js/jsx).
fn format_markdown(
file_text: &str,
ts_config: dprint_plugin_typescript::configuration::Configuration,
) -> Result<String, String> {
let md_config = get_markdown_config();
dprint_plugin_markdown::format_text(
&file_text,
&md_config,
Box::new(move |tag, text, line_width| {
let tag = tag.to_lowercase();
if matches!(
tag.as_str(),
"ts"
| "tsx"
| "js"
| "jsx"
| "javascript"
| "typescript"
| "json"
| "jsonc"
) {
// It's important to tell dprint proper file extension, otherwise
// it might parse the file twice.
let extension = match tag.as_str() {
"javascript" => "js",
"typescript" => "ts",
rest => rest,
};
if matches!(extension, "json" | "jsonc") {
let mut json_config = get_json_config();
json_config.line_width = line_width;
dprint_plugin_json::format_text(&text, &json_config)
} else {
let fake_filename =
PathBuf::from(format!("deno_fmt_stdin.{}", extension));
let mut codeblock_config = ts_config.clone();
codeblock_config.line_width = line_width;
dprint_plugin_typescript::format_text(
&fake_filename,
&text,
&codeblock_config,
)
}
} else {
Ok(text.to_string())
}
}),
)
}
/// Formats JSON and JSONC using the rules provided by .deno()
/// of configuration builder of https://github.com/dprint/dprint-plugin-json.
/// See https://git.io/Jt4ht for configuration.
fn format_json(file_text: &str) -> Result<String, String> {
let json_config = get_json_config();
dprint_plugin_json::format_text(&file_text, &json_config)
}
async fn check_source_files(
config: dprint_plugin_typescript::configuration::Configuration,
paths: Vec<PathBuf>,
) -> Result<(), AnyError> {
let not_formatted_files_count = Arc::new(AtomicUsize::new(0));
let checked_files_count = Arc::new(AtomicUsize::new(0));
// prevent threads outputting at the same time
let output_lock = Arc::new(Mutex::new(0));
run_parallelized(paths, {
let not_formatted_files_count = not_formatted_files_count.clone();
let checked_files_count = checked_files_count.clone();
move |file_path| {
checked_files_count.fetch_add(1, Ordering::Relaxed);
let file_text = read_file_contents(&file_path)?.text;
let ext = get_extension(&file_path).unwrap_or_else(String::new);
let r = if ext == "md" {
format_markdown(&file_text, config.clone())
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&file_text)
} else {
dprint_plugin_typescript::format_text(&file_path, &file_text, &config)
};
match r {
Ok(formatted_text) => {
if formatted_text != file_text {
not_formatted_files_count.fetch_add(1, Ordering::Relaxed);
let _g = output_lock.lock().unwrap();
let diff = diff(&file_text, &formatted_text);
info!("");
info!("{} {}:", colors::bold("from"), file_path.display());
info!("{}", diff);
}
}
Err(e) => {
let _g = output_lock.lock().unwrap();
eprintln!("Error checking: {}", file_path.to_string_lossy());
eprintln!(" {}", e);
}
}
Ok(())
}
})
.await?;
let not_formatted_files_count =
not_formatted_files_count.load(Ordering::Relaxed);
let checked_files_count = checked_files_count.load(Ordering::Relaxed);
let checked_files_str =
format!("{} {}", checked_files_count, files_str(checked_files_count));
if not_formatted_files_count == 0 {
info!("Checked {}", checked_files_str);
Ok(())
} else {
let not_formatted_files_str = files_str(not_formatted_files_count);
Err(generic_error(format!(
"Found {} not formatted {} in {}",
not_formatted_files_count, not_formatted_files_str, checked_files_str,
)))
}
}
async fn format_source_files(
config: dprint_plugin_typescript::configuration::Configuration,
paths: Vec<PathBuf>,
) -> Result<(), AnyError> {
let formatted_files_count = Arc::new(AtomicUsize::new(0));
let checked_files_count = Arc::new(AtomicUsize::new(0));
let output_lock = Arc::new(Mutex::new(0)); // prevent threads outputting at the same time
run_parallelized(paths, {
let formatted_files_count = formatted_files_count.clone();
let checked_files_count = checked_files_count.clone();
move |file_path| {
checked_files_count.fetch_add(1, Ordering::Relaxed);
let file_contents = read_file_contents(&file_path)?;
let ext = get_extension(&file_path).unwrap_or_else(String::new);
let r = if ext == "md" {
format_markdown(&file_contents.text, config.clone())
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&file_contents.text)
} else {
dprint_plugin_typescript::format_text(
&file_path,
&file_contents.text,
&config,
)
};
match r {
Ok(formatted_text) => {
if formatted_text != file_contents.text {
write_file_contents(
&file_path,
FileContents {
had_bom: file_contents.had_bom,
text: formatted_text,
},
)?;
formatted_files_count.fetch_add(1, Ordering::Relaxed);
let _g = output_lock.lock().unwrap();
info!("{}", file_path.to_string_lossy());
}
}
Err(e) => {
let _g = output_lock.lock().unwrap();
eprintln!("Error formatting: {}", file_path.to_string_lossy());
eprintln!(" {}", e);
}
}
Ok(())
}
})
.await?;
let formatted_files_count = formatted_files_count.load(Ordering::Relaxed);
debug!(
"Formatted {} {}",
formatted_files_count,
files_str(formatted_files_count),
);
let checked_files_count = checked_files_count.load(Ordering::Relaxed);
info!(
"Checked {} {}",
checked_files_count,
files_str(checked_files_count)
);
Ok(())
}
/// Format stdin and write result to stdout.
/// Treats input as TypeScript or as set by `--ext` flag.
/// Compatible with `--check` flag.
pub fn format_stdin(check: bool, ext: String) -> Result<(), AnyError> {
let mut source = String::new();
if stdin().read_to_string(&mut source).is_err() {
return Err(generic_error("Failed to read from stdin"));
}
let config = get_typescript_config();
let r = if ext.as_str() == "md" {
format_markdown(&source, config)
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&source)
} else {
// dprint will fallback to jsx parsing if parsing this as a .ts file doesn't work
dprint_plugin_typescript::format_text(
&PathBuf::from("_stdin.ts"),
&source,
&config,
)
};
match r {
Ok(formatted_text) => {
if check {
if formatted_text != source {
println!("Not formatted stdin");
}
} else {
stdout().write_all(formatted_text.as_bytes())?;
}
}
Err(e) => |
}
Ok(())
}
fn files_str(len: usize) -> &'static str {
if len <= 1 {
"file"
} else {
"files"
}
}
fn get_typescript_config(
) -> dprint_plugin_typescript::configuration::Configuration {
dprint_plugin_typescript::configuration::ConfigurationBuilder::new()
.deno()
.build()
}
fn get_markdown_config() -> dprint_plugin_markdown::configuration::Configuration
{
dprint_plugin_markdown::configuration::ConfigurationBuilder::new()
.deno()
.build()
}
fn get_json_config() -> dprint_plugin_json::configuration::Configuration {
dprint_plugin_json::configuration::ConfigurationBuilder::new()
.deno()
.build()
}
struct FileContents {
text: String,
had_bom: bool,
}
fn read_file_contents(file_path: &Path) -> Result<FileContents, AnyError> {
let file_bytes = fs::read(&file_path)?;
let charset = text_encoding::detect_charset(&file_bytes);
let file_text = text_encoding::convert_to_utf8(&file_bytes, charset)?;
let had_bom = file_text.starts_with(BOM_CHAR);
let text = if had_bom {
// remove the BOM
String::from(&file_text[BOM_CHAR.len_utf8()..])
} else {
String::from(file_text)
};
Ok(FileContents { text, had_bom })
}
fn write_file_contents(
file_path: &Path,
file_contents: FileContents,
) -> Result<(), AnyError> {
let file_text = if file_contents.had_bom {
// add back the BOM
format!("{}{}", BOM_CHAR, file_contents.text)
} else {
file_contents.text
};
Ok(fs::write(file_path, file_text)?)
}
pub async fn run_parallelized<F>(
file_paths: Vec<PathBuf>,
f: F,
) -> Result<(), AnyError>
where
F: FnOnce(PathBuf) -> Result<(), AnyError> + Send + 'static + Clone,
{
let handles = file_paths.iter().map(|file_path| {
let f = f.clone();
let file_path = file_path.clone();
tokio::task::spawn_blocking(move || f(file_path))
});
let join_results = futures::future::join_all(handles).await;
// find the tasks that panicked and let the user know which files
let panic_file_paths = join_results
.iter()
.enumerate()
.filter_map(|(i, join_result)| {
join_result
.as_ref()
.err()
.map(|_| file_paths[i].to_string_lossy())
})
.collect::<Vec<_>>();
if !panic_file_paths.is_empty() {
panic!("Panic formatting: {}", panic_file_paths.join(", "))
}
// check for any errors and if so return the first one
let mut errors = join_results.into_iter().filter_map(|join_result| {
join_result
.ok()
.map(|handle_result| handle_result.err())
.flatten()
});
if let Some(e) = errors.next() {
Err(e)
} else {
Ok(())
}
}
| {
return Err(generic_error(e));
} | conditional_block |
fmt.rs | // Copyright 2018-2021 the Deno authors. All rights reserved. MIT license.
//! This module provides file formatting utilities using
//! [`dprint-plugin-typescript`](https://github.com/dprint/dprint-plugin-typescript).
//!
//! At the moment it is only consumed using CLI but in
//! the future it can be easily extended to provide
//! the same functions as ops available in JS runtime.
use crate::colors;
use crate::diff::diff;
use crate::file_watcher;
use crate::fs_util::{collect_files, get_extension, is_supported_ext_fmt};
use crate::text_encoding;
use deno_core::error::generic_error;
use deno_core::error::AnyError;
use deno_core::futures;
use deno_core::futures::FutureExt;
use std::fs;
use std::io::stdin;
use std::io::stdout;
use std::io::Read;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
const BOM_CHAR: char = '\u{FEFF}';
/// Format JavaScript/TypeScript files.
pub async fn format(
args: Vec<PathBuf>,
ignore: Vec<PathBuf>,
check: bool,
watch: bool,
) -> Result<(), AnyError> {
let target_file_resolver = || {
// collect the files that are to be formatted
collect_files(&args, &ignore, is_supported_ext_fmt).and_then(|files| {
if files.is_empty() {
Err(generic_error("No target files found."))
} else {
Ok(files)
}
})
};
let operation = |paths: Vec<PathBuf>| {
let config = get_typescript_config();
async move {
if check {
check_source_files(config, paths).await?;
} else {
format_source_files(config, paths).await?;
}
Ok(())
}
.boxed_local()
};
if watch {
file_watcher::watch_func(target_file_resolver, operation, "Fmt").await?;
} else {
operation(target_file_resolver()?).await?;
} | /// Formats markdown (using https://github.com/dprint/dprint-plugin-markdown) and its code blocks
/// (ts/tsx, js/jsx).
fn format_markdown(
file_text: &str,
ts_config: dprint_plugin_typescript::configuration::Configuration,
) -> Result<String, String> {
let md_config = get_markdown_config();
dprint_plugin_markdown::format_text(
&file_text,
&md_config,
Box::new(move |tag, text, line_width| {
let tag = tag.to_lowercase();
if matches!(
tag.as_str(),
"ts"
| "tsx"
| "js"
| "jsx"
| "javascript"
| "typescript"
| "json"
| "jsonc"
) {
// It's important to tell dprint proper file extension, otherwise
// it might parse the file twice.
let extension = match tag.as_str() {
"javascript" => "js",
"typescript" => "ts",
rest => rest,
};
if matches!(extension, "json" | "jsonc") {
let mut json_config = get_json_config();
json_config.line_width = line_width;
dprint_plugin_json::format_text(&text, &json_config)
} else {
let fake_filename =
PathBuf::from(format!("deno_fmt_stdin.{}", extension));
let mut codeblock_config = ts_config.clone();
codeblock_config.line_width = line_width;
dprint_plugin_typescript::format_text(
&fake_filename,
&text,
&codeblock_config,
)
}
} else {
Ok(text.to_string())
}
}),
)
}
/// Formats JSON and JSONC using the rules provided by .deno()
/// of configuration builder of https://github.com/dprint/dprint-plugin-json.
/// See https://git.io/Jt4ht for configuration.
fn format_json(file_text: &str) -> Result<String, String> {
let json_config = get_json_config();
dprint_plugin_json::format_text(&file_text, &json_config)
}
async fn check_source_files(
config: dprint_plugin_typescript::configuration::Configuration,
paths: Vec<PathBuf>,
) -> Result<(), AnyError> {
let not_formatted_files_count = Arc::new(AtomicUsize::new(0));
let checked_files_count = Arc::new(AtomicUsize::new(0));
// prevent threads outputting at the same time
let output_lock = Arc::new(Mutex::new(0));
run_parallelized(paths, {
let not_formatted_files_count = not_formatted_files_count.clone();
let checked_files_count = checked_files_count.clone();
move |file_path| {
checked_files_count.fetch_add(1, Ordering::Relaxed);
let file_text = read_file_contents(&file_path)?.text;
let ext = get_extension(&file_path).unwrap_or_else(String::new);
let r = if ext == "md" {
format_markdown(&file_text, config.clone())
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&file_text)
} else {
dprint_plugin_typescript::format_text(&file_path, &file_text, &config)
};
match r {
Ok(formatted_text) => {
if formatted_text != file_text {
not_formatted_files_count.fetch_add(1, Ordering::Relaxed);
let _g = output_lock.lock().unwrap();
let diff = diff(&file_text, &formatted_text);
info!("");
info!("{} {}:", colors::bold("from"), file_path.display());
info!("{}", diff);
}
}
Err(e) => {
let _g = output_lock.lock().unwrap();
eprintln!("Error checking: {}", file_path.to_string_lossy());
eprintln!(" {}", e);
}
}
Ok(())
}
})
.await?;
let not_formatted_files_count =
not_formatted_files_count.load(Ordering::Relaxed);
let checked_files_count = checked_files_count.load(Ordering::Relaxed);
let checked_files_str =
format!("{} {}", checked_files_count, files_str(checked_files_count));
if not_formatted_files_count == 0 {
info!("Checked {}", checked_files_str);
Ok(())
} else {
let not_formatted_files_str = files_str(not_formatted_files_count);
Err(generic_error(format!(
"Found {} not formatted {} in {}",
not_formatted_files_count, not_formatted_files_str, checked_files_str,
)))
}
}
async fn format_source_files(
config: dprint_plugin_typescript::configuration::Configuration,
paths: Vec<PathBuf>,
) -> Result<(), AnyError> {
let formatted_files_count = Arc::new(AtomicUsize::new(0));
let checked_files_count = Arc::new(AtomicUsize::new(0));
let output_lock = Arc::new(Mutex::new(0)); // prevent threads outputting at the same time
run_parallelized(paths, {
let formatted_files_count = formatted_files_count.clone();
let checked_files_count = checked_files_count.clone();
move |file_path| {
checked_files_count.fetch_add(1, Ordering::Relaxed);
let file_contents = read_file_contents(&file_path)?;
let ext = get_extension(&file_path).unwrap_or_else(String::new);
let r = if ext == "md" {
format_markdown(&file_contents.text, config.clone())
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&file_contents.text)
} else {
dprint_plugin_typescript::format_text(
&file_path,
&file_contents.text,
&config,
)
};
match r {
Ok(formatted_text) => {
if formatted_text != file_contents.text {
write_file_contents(
&file_path,
FileContents {
had_bom: file_contents.had_bom,
text: formatted_text,
},
)?;
formatted_files_count.fetch_add(1, Ordering::Relaxed);
let _g = output_lock.lock().unwrap();
info!("{}", file_path.to_string_lossy());
}
}
Err(e) => {
let _g = output_lock.lock().unwrap();
eprintln!("Error formatting: {}", file_path.to_string_lossy());
eprintln!(" {}", e);
}
}
Ok(())
}
})
.await?;
let formatted_files_count = formatted_files_count.load(Ordering::Relaxed);
debug!(
"Formatted {} {}",
formatted_files_count,
files_str(formatted_files_count),
);
let checked_files_count = checked_files_count.load(Ordering::Relaxed);
info!(
"Checked {} {}",
checked_files_count,
files_str(checked_files_count)
);
Ok(())
}
/// Format stdin and write result to stdout.
/// Treats input as TypeScript or as set by `--ext` flag.
/// Compatible with `--check` flag.
pub fn format_stdin(check: bool, ext: String) -> Result<(), AnyError> {
let mut source = String::new();
if stdin().read_to_string(&mut source).is_err() {
return Err(generic_error("Failed to read from stdin"));
}
let config = get_typescript_config();
let r = if ext.as_str() == "md" {
format_markdown(&source, config)
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&source)
} else {
// dprint will fallback to jsx parsing if parsing this as a .ts file doesn't work
dprint_plugin_typescript::format_text(
&PathBuf::from("_stdin.ts"),
&source,
&config,
)
};
match r {
Ok(formatted_text) => {
if check {
if formatted_text != source {
println!("Not formatted stdin");
}
} else {
stdout().write_all(formatted_text.as_bytes())?;
}
}
Err(e) => {
return Err(generic_error(e));
}
}
Ok(())
}
fn files_str(len: usize) -> &'static str {
if len <= 1 {
"file"
} else {
"files"
}
}
fn get_typescript_config(
) -> dprint_plugin_typescript::configuration::Configuration {
dprint_plugin_typescript::configuration::ConfigurationBuilder::new()
.deno()
.build()
}
fn get_markdown_config() -> dprint_plugin_markdown::configuration::Configuration
{
dprint_plugin_markdown::configuration::ConfigurationBuilder::new()
.deno()
.build()
}
fn get_json_config() -> dprint_plugin_json::configuration::Configuration {
dprint_plugin_json::configuration::ConfigurationBuilder::new()
.deno()
.build()
}
struct FileContents {
text: String,
had_bom: bool,
}
fn read_file_contents(file_path: &Path) -> Result<FileContents, AnyError> {
let file_bytes = fs::read(&file_path)?;
let charset = text_encoding::detect_charset(&file_bytes);
let file_text = text_encoding::convert_to_utf8(&file_bytes, charset)?;
let had_bom = file_text.starts_with(BOM_CHAR);
let text = if had_bom {
// remove the BOM
String::from(&file_text[BOM_CHAR.len_utf8()..])
} else {
String::from(file_text)
};
Ok(FileContents { text, had_bom })
}
fn write_file_contents(
file_path: &Path,
file_contents: FileContents,
) -> Result<(), AnyError> {
let file_text = if file_contents.had_bom {
// add back the BOM
format!("{}{}", BOM_CHAR, file_contents.text)
} else {
file_contents.text
};
Ok(fs::write(file_path, file_text)?)
}
pub async fn run_parallelized<F>(
file_paths: Vec<PathBuf>,
f: F,
) -> Result<(), AnyError>
where
F: FnOnce(PathBuf) -> Result<(), AnyError> + Send + 'static + Clone,
{
let handles = file_paths.iter().map(|file_path| {
let f = f.clone();
let file_path = file_path.clone();
tokio::task::spawn_blocking(move || f(file_path))
});
let join_results = futures::future::join_all(handles).await;
// find the tasks that panicked and let the user know which files
let panic_file_paths = join_results
.iter()
.enumerate()
.filter_map(|(i, join_result)| {
join_result
.as_ref()
.err()
.map(|_| file_paths[i].to_string_lossy())
})
.collect::<Vec<_>>();
if !panic_file_paths.is_empty() {
panic!("Panic formatting: {}", panic_file_paths.join(", "))
}
// check for any errors and if so return the first one
let mut errors = join_results.into_iter().filter_map(|join_result| {
join_result
.ok()
.map(|handle_result| handle_result.err())
.flatten()
});
if let Some(e) = errors.next() {
Err(e)
} else {
Ok(())
}
} |
Ok(())
}
| random_line_split |
fmt.rs | // Copyright 2018-2021 the Deno authors. All rights reserved. MIT license.
//! This module provides file formatting utilities using
//! [`dprint-plugin-typescript`](https://github.com/dprint/dprint-plugin-typescript).
//!
//! At the moment it is only consumed using CLI but in
//! the future it can be easily extended to provide
//! the same functions as ops available in JS runtime.
use crate::colors;
use crate::diff::diff;
use crate::file_watcher;
use crate::fs_util::{collect_files, get_extension, is_supported_ext_fmt};
use crate::text_encoding;
use deno_core::error::generic_error;
use deno_core::error::AnyError;
use deno_core::futures;
use deno_core::futures::FutureExt;
use std::fs;
use std::io::stdin;
use std::io::stdout;
use std::io::Read;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
const BOM_CHAR: char = '\u{FEFF}';
/// Format JavaScript/TypeScript files.
pub async fn format(
args: Vec<PathBuf>,
ignore: Vec<PathBuf>,
check: bool,
watch: bool,
) -> Result<(), AnyError> {
let target_file_resolver = || {
// collect the files that are to be formatted
collect_files(&args, &ignore, is_supported_ext_fmt).and_then(|files| {
if files.is_empty() {
Err(generic_error("No target files found."))
} else {
Ok(files)
}
})
};
let operation = |paths: Vec<PathBuf>| {
let config = get_typescript_config();
async move {
if check {
check_source_files(config, paths).await?;
} else {
format_source_files(config, paths).await?;
}
Ok(())
}
.boxed_local()
};
if watch {
file_watcher::watch_func(target_file_resolver, operation, "Fmt").await?;
} else {
operation(target_file_resolver()?).await?;
}
Ok(())
}
/// Formats markdown (using https://github.com/dprint/dprint-plugin-markdown) and its code blocks
/// (ts/tsx, js/jsx).
fn format_markdown(
file_text: &str,
ts_config: dprint_plugin_typescript::configuration::Configuration,
) -> Result<String, String> {
let md_config = get_markdown_config();
dprint_plugin_markdown::format_text(
&file_text,
&md_config,
Box::new(move |tag, text, line_width| {
let tag = tag.to_lowercase();
if matches!(
tag.as_str(),
"ts"
| "tsx"
| "js"
| "jsx"
| "javascript"
| "typescript"
| "json"
| "jsonc"
) {
// It's important to tell dprint proper file extension, otherwise
// it might parse the file twice.
let extension = match tag.as_str() {
"javascript" => "js",
"typescript" => "ts",
rest => rest,
};
if matches!(extension, "json" | "jsonc") {
let mut json_config = get_json_config();
json_config.line_width = line_width;
dprint_plugin_json::format_text(&text, &json_config)
} else {
let fake_filename =
PathBuf::from(format!("deno_fmt_stdin.{}", extension));
let mut codeblock_config = ts_config.clone();
codeblock_config.line_width = line_width;
dprint_plugin_typescript::format_text(
&fake_filename,
&text,
&codeblock_config,
)
}
} else {
Ok(text.to_string())
}
}),
)
}
/// Formats JSON and JSONC using the rules provided by .deno()
/// of configuration builder of https://github.com/dprint/dprint-plugin-json.
/// See https://git.io/Jt4ht for configuration.
fn format_json(file_text: &str) -> Result<String, String> {
let json_config = get_json_config();
dprint_plugin_json::format_text(&file_text, &json_config)
}
async fn check_source_files(
config: dprint_plugin_typescript::configuration::Configuration,
paths: Vec<PathBuf>,
) -> Result<(), AnyError> {
let not_formatted_files_count = Arc::new(AtomicUsize::new(0));
let checked_files_count = Arc::new(AtomicUsize::new(0));
// prevent threads outputting at the same time
let output_lock = Arc::new(Mutex::new(0));
run_parallelized(paths, {
let not_formatted_files_count = not_formatted_files_count.clone();
let checked_files_count = checked_files_count.clone();
move |file_path| {
checked_files_count.fetch_add(1, Ordering::Relaxed);
let file_text = read_file_contents(&file_path)?.text;
let ext = get_extension(&file_path).unwrap_or_else(String::new);
let r = if ext == "md" {
format_markdown(&file_text, config.clone())
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&file_text)
} else {
dprint_plugin_typescript::format_text(&file_path, &file_text, &config)
};
match r {
Ok(formatted_text) => {
if formatted_text != file_text {
not_formatted_files_count.fetch_add(1, Ordering::Relaxed);
let _g = output_lock.lock().unwrap();
let diff = diff(&file_text, &formatted_text);
info!("");
info!("{} {}:", colors::bold("from"), file_path.display());
info!("{}", diff);
}
}
Err(e) => {
let _g = output_lock.lock().unwrap();
eprintln!("Error checking: {}", file_path.to_string_lossy());
eprintln!(" {}", e);
}
}
Ok(())
}
})
.await?;
let not_formatted_files_count =
not_formatted_files_count.load(Ordering::Relaxed);
let checked_files_count = checked_files_count.load(Ordering::Relaxed);
let checked_files_str =
format!("{} {}", checked_files_count, files_str(checked_files_count));
if not_formatted_files_count == 0 {
info!("Checked {}", checked_files_str);
Ok(())
} else {
let not_formatted_files_str = files_str(not_formatted_files_count);
Err(generic_error(format!(
"Found {} not formatted {} in {}",
not_formatted_files_count, not_formatted_files_str, checked_files_str,
)))
}
}
async fn format_source_files(
config: dprint_plugin_typescript::configuration::Configuration,
paths: Vec<PathBuf>,
) -> Result<(), AnyError> {
let formatted_files_count = Arc::new(AtomicUsize::new(0));
let checked_files_count = Arc::new(AtomicUsize::new(0));
let output_lock = Arc::new(Mutex::new(0)); // prevent threads outputting at the same time
run_parallelized(paths, {
let formatted_files_count = formatted_files_count.clone();
let checked_files_count = checked_files_count.clone();
move |file_path| {
checked_files_count.fetch_add(1, Ordering::Relaxed);
let file_contents = read_file_contents(&file_path)?;
let ext = get_extension(&file_path).unwrap_or_else(String::new);
let r = if ext == "md" {
format_markdown(&file_contents.text, config.clone())
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&file_contents.text)
} else {
dprint_plugin_typescript::format_text(
&file_path,
&file_contents.text,
&config,
)
};
match r {
Ok(formatted_text) => {
if formatted_text != file_contents.text {
write_file_contents(
&file_path,
FileContents {
had_bom: file_contents.had_bom,
text: formatted_text,
},
)?;
formatted_files_count.fetch_add(1, Ordering::Relaxed);
let _g = output_lock.lock().unwrap();
info!("{}", file_path.to_string_lossy());
}
}
Err(e) => {
let _g = output_lock.lock().unwrap();
eprintln!("Error formatting: {}", file_path.to_string_lossy());
eprintln!(" {}", e);
}
}
Ok(())
}
})
.await?;
let formatted_files_count = formatted_files_count.load(Ordering::Relaxed);
debug!(
"Formatted {} {}",
formatted_files_count,
files_str(formatted_files_count),
);
let checked_files_count = checked_files_count.load(Ordering::Relaxed);
info!(
"Checked {} {}",
checked_files_count,
files_str(checked_files_count)
);
Ok(())
}
/// Format stdin and write result to stdout.
/// Treats input as TypeScript or as set by `--ext` flag.
/// Compatible with `--check` flag.
pub fn format_stdin(check: bool, ext: String) -> Result<(), AnyError> {
let mut source = String::new();
if stdin().read_to_string(&mut source).is_err() {
return Err(generic_error("Failed to read from stdin"));
}
let config = get_typescript_config();
let r = if ext.as_str() == "md" {
format_markdown(&source, config)
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&source)
} else {
// dprint will fallback to jsx parsing if parsing this as a .ts file doesn't work
dprint_plugin_typescript::format_text(
&PathBuf::from("_stdin.ts"),
&source,
&config,
)
};
match r {
Ok(formatted_text) => {
if check {
if formatted_text != source {
println!("Not formatted stdin");
}
} else {
stdout().write_all(formatted_text.as_bytes())?;
}
}
Err(e) => {
return Err(generic_error(e));
}
}
Ok(())
}
fn files_str(len: usize) -> &'static str {
if len <= 1 {
"file"
} else {
"files"
}
}
fn get_typescript_config(
) -> dprint_plugin_typescript::configuration::Configuration {
dprint_plugin_typescript::configuration::ConfigurationBuilder::new()
.deno()
.build()
}
fn get_markdown_config() -> dprint_plugin_markdown::configuration::Configuration
{
dprint_plugin_markdown::configuration::ConfigurationBuilder::new()
.deno()
.build()
}
fn get_json_config() -> dprint_plugin_json::configuration::Configuration {
dprint_plugin_json::configuration::ConfigurationBuilder::new()
.deno()
.build()
}
struct FileContents {
text: String,
had_bom: bool,
}
fn read_file_contents(file_path: &Path) -> Result<FileContents, AnyError> |
fn write_file_contents(
file_path: &Path,
file_contents: FileContents,
) -> Result<(), AnyError> {
let file_text = if file_contents.had_bom {
// add back the BOM
format!("{}{}", BOM_CHAR, file_contents.text)
} else {
file_contents.text
};
Ok(fs::write(file_path, file_text)?)
}
pub async fn run_parallelized<F>(
file_paths: Vec<PathBuf>,
f: F,
) -> Result<(), AnyError>
where
F: FnOnce(PathBuf) -> Result<(), AnyError> + Send + 'static + Clone,
{
let handles = file_paths.iter().map(|file_path| {
let f = f.clone();
let file_path = file_path.clone();
tokio::task::spawn_blocking(move || f(file_path))
});
let join_results = futures::future::join_all(handles).await;
// find the tasks that panicked and let the user know which files
let panic_file_paths = join_results
.iter()
.enumerate()
.filter_map(|(i, join_result)| {
join_result
.as_ref()
.err()
.map(|_| file_paths[i].to_string_lossy())
})
.collect::<Vec<_>>();
if !panic_file_paths.is_empty() {
panic!("Panic formatting: {}", panic_file_paths.join(", "))
}
// check for any errors and if so return the first one
let mut errors = join_results.into_iter().filter_map(|join_result| {
join_result
.ok()
.map(|handle_result| handle_result.err())
.flatten()
});
if let Some(e) = errors.next() {
Err(e)
} else {
Ok(())
}
}
| {
let file_bytes = fs::read(&file_path)?;
let charset = text_encoding::detect_charset(&file_bytes);
let file_text = text_encoding::convert_to_utf8(&file_bytes, charset)?;
let had_bom = file_text.starts_with(BOM_CHAR);
let text = if had_bom {
// remove the BOM
String::from(&file_text[BOM_CHAR.len_utf8()..])
} else {
String::from(file_text)
};
Ok(FileContents { text, had_bom })
} | identifier_body |
publish.go | package publish
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"github.com/google/uuid"
"github.com/mholt/archiver/v3"
"github.com/opencontainers/go-digest"
"github.com/openshift/library-go/pkg/image/reference"
"github.com/openshift/library-go/pkg/image/registryclient"
"github.com/openshift/oc/pkg/cli/admin/catalog"
"github.com/openshift/oc/pkg/cli/admin/release"
"github.com/openshift/oc/pkg/cli/image/imagesource"
imagemanifest "github.com/openshift/oc/pkg/cli/image/manifest"
imgmirror "github.com/openshift/oc/pkg/cli/image/mirror"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
kcmdutil "k8s.io/kubectl/pkg/cmd/util"
"github.com/RedHatGov/bundle/pkg/archive"
"github.com/RedHatGov/bundle/pkg/config"
"github.com/RedHatGov/bundle/pkg/config/v1alpha1"
"github.com/RedHatGov/bundle/pkg/image"
"github.com/RedHatGov/bundle/pkg/metadata/storage"
)
type UuidError struct {
InUuid uuid.UUID
CurrUuid uuid.UUID
}
func (u *UuidError) Error() string {
return fmt.Sprintf("Mismatched UUIDs. Want %v, got %v", u.CurrUuid, u.InUuid)
}
type SequenceError struct {
inSeq int
CurrSeq int
}
func (s *SequenceError) Error() string {
return fmt.Sprintf("Bundle Sequence out of order. Current sequence %v, incoming sequence %v", s.CurrSeq, s.inSeq)
}
func (o *Options) | (ctx context.Context, cmd *cobra.Command, f kcmdutil.Factory) error {
logrus.Infof("Publishing image set from archive %q to registry %q", o.ArchivePath, o.ToMirror)
var currentMeta v1alpha1.Metadata
var incomingMeta v1alpha1.Metadata
a := archive.NewArchiver()
// Create workspace
tmpdir, err := ioutil.TempDir(o.Dir, "imageset")
if err != nil {
return err
}
if !o.SkipCleanup {
defer os.RemoveAll(tmpdir)
}
logrus.Debugf("Using temporary directory %s to unarchive metadata", tmpdir)
// Get file information from the source archives
filesInArchive, err := o.readImageSet(a)
if err != nil {
return err
}
// Extract incoming metadata
archive, ok := filesInArchive[config.MetadataFile]
if !ok {
return errors.New("metadata is not in archive")
}
logrus.Debug("Extracting incoming metadta")
if err := a.Extract(archive, config.MetadataBasePath, tmpdir); err != nil {
return err
}
// Create backend for o.Dir
backend, err := storage.NewLocalBackend(o.Dir)
if err != nil {
return fmt.Errorf("error opening local backend: %v", err)
}
// Create a local workspace backend
workspace, err := storage.NewLocalBackend(tmpdir)
if err != nil {
return fmt.Errorf("error opening local backend: %v", err)
}
// Check for existing metadata. Metadata will be extracted before
// the extraction of the archive so imageset mismatches can
// be handled before the longer unarchiving process
existingMeta := filepath.Join(o.Dir, config.MetadataBasePath)
if _, err := os.Stat(existingMeta); err != nil {
if !errors.Is(err, os.ErrNotExist) {
return err
}
logrus.Infof("No existing metadata found. Setting up new workspace")
// Find first file and load metadata from that
if err := workspace.ReadMetadata(ctx, &incomingMeta, config.MetadataBasePath); err != nil {
return fmt.Errorf("error reading incoming metadata: %v", err)
}
} else {
// Compare metadata UID and sequence number
if err := backend.ReadMetadata(ctx, ¤tMeta, config.MetadataBasePath); err != nil {
return fmt.Errorf("error reading current metadata: %v", err)
}
if err := workspace.ReadMetadata(ctx, &incomingMeta, config.MetadataBasePath); err != nil {
return fmt.Errorf("error reading incoming metadata: %v", err)
}
logrus.Debug("Checking metadata UID")
if incomingMeta.MetadataSpec.Uid != currentMeta.MetadataSpec.Uid {
return &UuidError{currentMeta.MetadataSpec.Uid, incomingMeta.MetadataSpec.Uid}
}
logrus.Debug("Check metadata sequence number")
currRun := currentMeta.PastMirrors[len(currentMeta.PastMirrors)-1]
incomingRun := incomingMeta.PastMirrors[len(incomingMeta.PastMirrors)-1]
if incomingRun.Sequence != (currRun.Sequence + 1) {
return &SequenceError{incomingRun.Sequence, currRun.Sequence}
}
}
// Unarchive full imageset after metadata checks
if err := o.unpackImageSet(a, o.Dir); err != nil {
return err
}
// Load image associations to find layers not present locally.
assocPath := filepath.Join(o.Dir, config.AssociationsBasePath)
assocs, err := readAssociations(assocPath)
if err != nil {
return fmt.Errorf("error reading associations from %s: %v", o.Dir, err)
}
toMirrorRef, err := imagesource.ParseReference(o.ToMirror)
if err != nil {
return fmt.Errorf("error parsing mirror registry %q: %v", o.ToMirror, err)
}
logrus.Debugf("mirror reference: %#v", toMirrorRef)
if toMirrorRef.Type != imagesource.DestinationRegistry {
return fmt.Errorf("destination %q must be a registry reference", o.ToMirror)
}
var (
errs []error
// Mappings for mirroring image types.
genericMappings []imgmirror.Mapping
releaseMappings []imgmirror.Mapping
catalogMappings []imgmirror.Mapping
// Map of remote layer digest to the set of paths they should be fetched to.
missingLayers = map[string][]string{}
)
for imageName, assoc := range assocs {
assoc := assoc
logrus.Debugf("reading assoc: %s", assoc.Name)
// All manifest layers will be pulled below if associated,
// so just sanity-check that the layers are referenced in some association.
if len(assoc.ManifestDigests) != 0 {
for _, manifestDigest := range assoc.ManifestDigests {
if _, hasManifest := assocs[manifestDigest]; !hasManifest {
errs = append(errs, fmt.Errorf("image %q: expected associations to have manifest %s but was not found", imageName, manifestDigest))
}
}
}
for _, layerDigest := range assoc.LayerDigests {
logrus.Debugf("Found layer %v for image %s", layerDigest, imageName)
// Construct blob path, which is adjacent to the manifests path.
imageBlobPath := filepath.Join(o.Dir, "v2", assoc.Path, "blobs", layerDigest)
blobPath := filepath.Join(o.Dir, "blobs", layerDigest)
switch _, err := os.Stat(blobPath); {
case err == nil:
// If a layer exists in the archive, simply copy it to the blob path
// adjacent to its parent manifest.
if src, err := os.Open(blobPath); err == nil {
err = copyBlobFile(src, imageBlobPath)
if err := src.Close(); err != nil {
logrus.Error(err)
}
} else {
err = fmt.Errorf("error opening existing blob file: %v", err)
}
case errors.Is(err, os.ErrNotExist):
// Image layer must exist in the mirror registry since it wasn't archived,
// so fetch the layer and place it in the blob dir so it can be mirrored by `oc`.
missingLayers[layerDigest] = append(missingLayers[layerDigest], imageBlobPath)
default:
err = fmt.Errorf("accessing image %q blob %q at %s: %v", imageName, layerDigest, blobPath, err)
}
if err != nil {
errs = append(errs, err)
}
}
m := imgmirror.Mapping{Name: assoc.Name}
if m.Source, err = imagesource.ParseReference("file://" + assoc.Path); err != nil {
errs = append(errs, fmt.Errorf("error parsing source ref %q: %v", assoc.Path, err))
continue
}
// The mirrorer is not a fan of accepting an image ID when a tag symlink is available
// for some reason.
// TODO(estroz): investigate the cause of this behavior.
if assoc.TagSymlink == "" {
m.Source.Ref.ID = assoc.ID
} else {
m.Source.Ref.Tag = assoc.TagSymlink
}
m.Destination = toMirrorRef
m.Destination.Ref.Namespace = m.Source.Ref.Namespace
m.Destination.Ref.Name = m.Source.Ref.Name
m.Destination.Ref.Tag = m.Source.Ref.Tag
m.Destination.Ref.ID = m.Source.Ref.ID
switch assoc.Type {
case image.TypeGeneric:
genericMappings = append(genericMappings, m)
case image.TypeOCPRelease:
m.Destination.Ref.Tag = ""
m.Destination.Ref.ID = ""
// Only add top level release images to
// release mapping
if strings.Contains(assoc.Name, "ocp-release") {
releaseMappings = append(releaseMappings, m)
}
case image.TypeOperatorCatalog:
catalogMappings = append(catalogMappings, m)
case image.TypeOperatorBundle, image.TypeOperatorRelatedImage:
// Let the `catalog mirror` API call mirror all bundle and related images in the catalog.
// TODO(estroz): this may be incorrect if bundle and related images not in a catalog can be archived,
// ex. as an additional image. Can probably get around this by mirroring
// images of this type not mapped by preceding `catalog mirror` calls.
case image.TypeInvalid:
errs = append(errs, fmt.Errorf("image %q: image type is not set", imageName))
default:
errs = append(errs, fmt.Errorf("image %q: invalid image type %v", imageName, assoc.Type))
}
}
if len(errs) != 0 {
return utilerrors.NewAggregate(errs)
}
if len(missingLayers) != 0 {
// Fetch all layers and mount them at the specified paths.
if err := o.fetchBlobs(ctx, incomingMeta, catalogMappings, missingLayers); err != nil {
return err
}
}
// Now that all layers have been pulled, symlink all tagged manifests to their digest files.
for _, assoc := range assocs {
if assoc.TagSymlink == "" {
continue
}
manifestsPath := filepath.Join(o.Dir, "v2", assoc.Path, "manifests")
srcPath := filepath.Join(manifestsPath, assoc.ID)
dstPath := filepath.Join(manifestsPath, assoc.TagSymlink)
if _, err := os.Stat(dstPath); err == nil || errors.Is(err, os.ErrExist) {
logrus.Debugf("image %s: tag %s symlink for manifest %s already exists", assoc.Name, assoc.TagSymlink, assoc.ID)
continue
}
if err := os.Symlink(srcPath, dstPath); err != nil {
errs = append(errs, fmt.Errorf("error symlinking manifest digest %q to tag %q: %v", assoc.ID, assoc.TagSymlink, err))
}
}
if len(errs) != 0 {
return utilerrors.NewAggregate(errs)
}
// import imagecontentsourcepolicy
logrus.Info("ICSP importing not implemented")
// import catalogsource
logrus.Info("CatalogSource importing not implemented")
// Mirror all file sources of each available image type to mirror registry.
if len(genericMappings) != 0 {
if logrus.IsLevelEnabled(logrus.DebugLevel) {
var srcs []string
for _, m := range genericMappings {
srcs = append(srcs, m.Source.String())
}
logrus.Debugf("mirroring generic images: %q", srcs)
}
genOpts := imgmirror.NewMirrorImageOptions(o.IOStreams)
genOpts.Mappings = genericMappings
genOpts.DryRun = o.DryRun
genOpts.FromFileDir = o.Dir
genOpts.FilterOptions = imagemanifest.FilterOptions{FilterByOS: ".*"}
genOpts.SkipMultipleScopes = true
genOpts.KeepManifestList = true
genOpts.SecurityOptions.Insecure = o.SkipTLS
if err := genOpts.Validate(); err != nil {
return fmt.Errorf("invalid image mirror options: %v", err)
}
if err := genOpts.Run(); err != nil {
return fmt.Errorf("error running generic image mirror: %v", err)
}
}
for _, m := range releaseMappings {
logrus.Debugf("mirroring release image: %s", m.Source.String())
relOpts := release.NewMirrorOptions(o.IOStreams)
relOpts.From = m.Source.String()
relOpts.FromDir = o.Dir
relOpts.To = m.Destination.String()
relOpts.SecurityOptions.Insecure = o.SkipTLS
relOpts.DryRun = o.DryRun
if err := relOpts.Complete(cmd, f, nil); err != nil {
return fmt.Errorf("error initializing release mirror options: %v", err)
}
if err := relOpts.Validate(); err != nil {
return fmt.Errorf("invalid release mirror options: %v", err)
}
if err := relOpts.Run(); err != nil {
return fmt.Errorf("error running %q release mirror: %v", m, err)
}
}
// Change to the working dir since catalog mirroring does not respect
// FileDir in the "expected" manner (unclear why).
wd, err := os.Getwd()
if err != nil {
return err
}
if err := os.Chdir(o.Dir); err != nil {
return err
}
defer func() {
if err := os.Chdir(wd); err != nil {
logrus.Error(err)
}
}()
for _, m := range catalogMappings {
logrus.Debugf("mirroring catalog image: %s", m.Source)
catOpts := catalog.NewMirrorCatalogOptions(o.IOStreams)
catOpts.DryRun = o.DryRun
catOpts.MaxPathComponents = 2
catOpts.SecurityOptions.Insecure = o.SkipTLS
catOpts.FilterOptions = imagemanifest.FilterOptions{FilterByOS: ".*"}
args := []string{
m.Source.String(),
o.ToMirror,
}
if err := catOpts.Complete(&cobra.Command{}, args); err != nil {
return fmt.Errorf("error constructing catalog options: %v", err)
}
if err := catOpts.Validate(); err != nil {
return fmt.Errorf("invalid catalog mirror options: %v", err)
}
if err := catOpts.Run(); err != nil {
return fmt.Errorf("error mirroring catalog: %v", err)
}
}
if err := os.Chdir(wd); err != nil {
return err
}
// install imagecontentsourcepolicy
logrus.Info("ICSP creation not implemented")
// install catalogsource
logrus.Info("CatalogSource creation not implemented")
// Replace old metadata with new metadata
if err := backend.WriteMetadata(ctx, &incomingMeta, config.MetadataBasePath); err != nil {
return err
}
return nil
}
// readAssociations will process and return data from the image associations file
func readAssociations(assocPath string) (assocs image.Associations, err error) {
f, err := os.Open(assocPath)
if err != nil {
return assocs, fmt.Errorf("error opening image associations file: %v", err)
}
defer f.Close()
return assocs, assocs.Decode(f)
}
// getImage unarchives all provided tar archives
func (o *Options) unpackImageSet(a archive.Archiver, dest string) error {
file, err := os.Stat(o.ArchivePath)
if err != nil {
return err
}
if file.IsDir() {
err = filepath.Walk(o.ArchivePath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("traversing %s: %v", path, err)
}
if info == nil {
return fmt.Errorf("no file info")
}
extension := filepath.Ext(path)
extension = strings.TrimPrefix(extension, ".")
if extension == a.String() {
logrus.Debugf("Extracting archive %s", path)
if err := a.Unarchive(path, dest); err != nil {
return err
}
}
return nil
})
} else {
logrus.Infof("Extracting archive %s", o.ArchivePath)
if err := a.Unarchive(o.ArchivePath, dest); err != nil {
return err
}
}
return err
}
// readImage set will create a map with all the files located in the archives
func (o *Options) readImageSet(a archive.Archiver) (map[string]string, error) {
filesinArchive := make(map[string]string)
file, err := os.Stat(o.ArchivePath)
if err != nil {
return nil, err
}
if file.IsDir() {
// Walk the directory and load the files from the archives
// into the map
logrus.Infoln("Detected multiple archive files")
err = filepath.Walk(o.ArchivePath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("traversing %s: %v", path, err)
}
if info == nil {
return fmt.Errorf("no file info")
}
extension := filepath.Ext(path)
extension = strings.TrimPrefix(extension, ".")
if extension == a.String() {
logrus.Debugf("Found archive %s", path)
return a.Walk(path, func(f archiver.File) error {
filesinArchive[f.Name()] = path
return nil
})
}
return nil
})
} else {
// Walk the archive and load the file names into the map
err = a.Walk(o.ArchivePath, func(f archiver.File) error {
filesinArchive[f.Name()] = o.ArchivePath
return nil
})
}
return filesinArchive, err
}
// TODO(estroz): symlink blobs instead of copying them to avoid data duplication.
// `oc` mirror libs should be able to follow these symlinks.
func copyBlobFile(src io.Reader, dstPath string) error {
logrus.Debugf("copying blob to %s", dstPath)
if err := os.MkdirAll(filepath.Dir(dstPath), os.ModePerm); err != nil {
return err
}
dst, err := os.OpenFile(dstPath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0666)
if err != nil {
return fmt.Errorf("error creating blob file: %v", err)
}
defer dst.Close()
if _, err := io.Copy(dst, src); err != nil {
return fmt.Errorf("error copying blob %q: %v", filepath.Base(dstPath), err)
}
return nil
}
func (o *Options) fetchBlobs(ctx context.Context, meta v1alpha1.Metadata, mappings []imgmirror.Mapping, missingLayers map[string][]string) error {
catalogNamespaceNames := []string{}
for _, m := range mappings {
dstRef := m.Destination.Ref
catalogNamespaceNames = append(catalogNamespaceNames, path.Join(dstRef.Namespace, dstRef.Name))
}
blobResources := map[string]string{}
for _, blob := range meta.PastBlobs {
resource := blob.NamespaceName
for _, nsName := range catalogNamespaceNames {
if nsName == resource {
// Blob is associated with the catalog image itself.
blobResources[blob.ID] = nsName
continue
}
suffix := strings.TrimPrefix(resource, nsName+"/")
if suffix == resource {
// Blob is not a child of the catalog image in nsName.
continue
}
// Blob may belong to multiple images.
if _, seenBlob := blobResources[blob.ID]; !seenBlob {
blobResources[blob.ID] = suffix
continue
}
}
}
restctx, err := config.CreateContext(nil, false, o.SkipTLS)
if err != nil {
return err
}
var errs []error
for layerDigest, dstBlobPaths := range missingLayers {
resource, hasResource := blobResources[layerDigest]
if !hasResource {
errs = append(errs, fmt.Errorf("layer %s: no registry resource path found", layerDigest))
continue
}
if err := o.fetchBlob(ctx, restctx, resource, layerDigest, dstBlobPaths); err != nil {
errs = append(errs, fmt.Errorf("layer %s: %v", layerDigest, err))
continue
}
}
return utilerrors.NewAggregate(errs)
}
// fetchBlob fetches a blob at <o.ToMirror>/<resource>/blobs/<layerDigest>
// then copies it to each path in dstPaths.
func (o *Options) fetchBlob(ctx context.Context, restctx *registryclient.Context, resource, layerDigest string, dstPaths []string) error {
refStr := path.Join(o.ToMirror, resource)
ref, err := reference.Parse(refStr)
if err != nil {
return fmt.Errorf("parse ref %s: %v", refStr, err)
}
logrus.Debugf("copying blob %s from %s", layerDigest, ref.Exact())
repo, err := restctx.RepositoryForRef(ctx, ref, o.SkipTLS)
if err != nil {
return fmt.Errorf("create repo for %s: %v", ref, err)
}
dgst, err := digest.Parse(layerDigest)
if err != nil {
return err
}
rc, err := repo.Blobs(ctx).Open(ctx, dgst)
if err != nil {
return fmt.Errorf("open blob: %v", err)
}
defer rc.Close()
for _, dstPath := range dstPaths {
if err := copyBlobFile(rc, dstPath); err != nil {
return fmt.Errorf("copy blob: %v", err)
}
if _, err := rc.Seek(0, 0); err != nil {
return fmt.Errorf("seek to start of blob: %v", err)
}
}
return nil
}
| Run | identifier_name |
publish.go | package publish
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"github.com/google/uuid"
"github.com/mholt/archiver/v3"
"github.com/opencontainers/go-digest"
"github.com/openshift/library-go/pkg/image/reference"
"github.com/openshift/library-go/pkg/image/registryclient"
"github.com/openshift/oc/pkg/cli/admin/catalog"
"github.com/openshift/oc/pkg/cli/admin/release"
"github.com/openshift/oc/pkg/cli/image/imagesource"
imagemanifest "github.com/openshift/oc/pkg/cli/image/manifest"
imgmirror "github.com/openshift/oc/pkg/cli/image/mirror"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
kcmdutil "k8s.io/kubectl/pkg/cmd/util"
"github.com/RedHatGov/bundle/pkg/archive"
"github.com/RedHatGov/bundle/pkg/config"
"github.com/RedHatGov/bundle/pkg/config/v1alpha1"
"github.com/RedHatGov/bundle/pkg/image"
"github.com/RedHatGov/bundle/pkg/metadata/storage"
)
type UuidError struct {
InUuid uuid.UUID
CurrUuid uuid.UUID
}
func (u *UuidError) Error() string {
return fmt.Sprintf("Mismatched UUIDs. Want %v, got %v", u.CurrUuid, u.InUuid)
}
type SequenceError struct {
inSeq int
CurrSeq int
}
func (s *SequenceError) Error() string |
func (o *Options) Run(ctx context.Context, cmd *cobra.Command, f kcmdutil.Factory) error {
logrus.Infof("Publishing image set from archive %q to registry %q", o.ArchivePath, o.ToMirror)
var currentMeta v1alpha1.Metadata
var incomingMeta v1alpha1.Metadata
a := archive.NewArchiver()
// Create workspace
tmpdir, err := ioutil.TempDir(o.Dir, "imageset")
if err != nil {
return err
}
if !o.SkipCleanup {
defer os.RemoveAll(tmpdir)
}
logrus.Debugf("Using temporary directory %s to unarchive metadata", tmpdir)
// Get file information from the source archives
filesInArchive, err := o.readImageSet(a)
if err != nil {
return err
}
// Extract incoming metadata
archive, ok := filesInArchive[config.MetadataFile]
if !ok {
return errors.New("metadata is not in archive")
}
logrus.Debug("Extracting incoming metadta")
if err := a.Extract(archive, config.MetadataBasePath, tmpdir); err != nil {
return err
}
// Create backend for o.Dir
backend, err := storage.NewLocalBackend(o.Dir)
if err != nil {
return fmt.Errorf("error opening local backend: %v", err)
}
// Create a local workspace backend
workspace, err := storage.NewLocalBackend(tmpdir)
if err != nil {
return fmt.Errorf("error opening local backend: %v", err)
}
// Check for existing metadata. Metadata will be extracted before
// the extraction of the archive so imageset mismatches can
// be handled before the longer unarchiving process
existingMeta := filepath.Join(o.Dir, config.MetadataBasePath)
if _, err := os.Stat(existingMeta); err != nil {
if !errors.Is(err, os.ErrNotExist) {
return err
}
logrus.Infof("No existing metadata found. Setting up new workspace")
// Find first file and load metadata from that
if err := workspace.ReadMetadata(ctx, &incomingMeta, config.MetadataBasePath); err != nil {
return fmt.Errorf("error reading incoming metadata: %v", err)
}
} else {
// Compare metadata UID and sequence number
if err := backend.ReadMetadata(ctx, ¤tMeta, config.MetadataBasePath); err != nil {
return fmt.Errorf("error reading current metadata: %v", err)
}
if err := workspace.ReadMetadata(ctx, &incomingMeta, config.MetadataBasePath); err != nil {
return fmt.Errorf("error reading incoming metadata: %v", err)
}
logrus.Debug("Checking metadata UID")
if incomingMeta.MetadataSpec.Uid != currentMeta.MetadataSpec.Uid {
return &UuidError{currentMeta.MetadataSpec.Uid, incomingMeta.MetadataSpec.Uid}
}
logrus.Debug("Check metadata sequence number")
currRun := currentMeta.PastMirrors[len(currentMeta.PastMirrors)-1]
incomingRun := incomingMeta.PastMirrors[len(incomingMeta.PastMirrors)-1]
if incomingRun.Sequence != (currRun.Sequence + 1) {
return &SequenceError{incomingRun.Sequence, currRun.Sequence}
}
}
// Unarchive full imageset after metadata checks
if err := o.unpackImageSet(a, o.Dir); err != nil {
return err
}
// Load image associations to find layers not present locally.
assocPath := filepath.Join(o.Dir, config.AssociationsBasePath)
assocs, err := readAssociations(assocPath)
if err != nil {
return fmt.Errorf("error reading associations from %s: %v", o.Dir, err)
}
toMirrorRef, err := imagesource.ParseReference(o.ToMirror)
if err != nil {
return fmt.Errorf("error parsing mirror registry %q: %v", o.ToMirror, err)
}
logrus.Debugf("mirror reference: %#v", toMirrorRef)
if toMirrorRef.Type != imagesource.DestinationRegistry {
return fmt.Errorf("destination %q must be a registry reference", o.ToMirror)
}
var (
errs []error
// Mappings for mirroring image types.
genericMappings []imgmirror.Mapping
releaseMappings []imgmirror.Mapping
catalogMappings []imgmirror.Mapping
// Map of remote layer digest to the set of paths they should be fetched to.
missingLayers = map[string][]string{}
)
for imageName, assoc := range assocs {
assoc := assoc
logrus.Debugf("reading assoc: %s", assoc.Name)
// All manifest layers will be pulled below if associated,
// so just sanity-check that the layers are referenced in some association.
if len(assoc.ManifestDigests) != 0 {
for _, manifestDigest := range assoc.ManifestDigests {
if _, hasManifest := assocs[manifestDigest]; !hasManifest {
errs = append(errs, fmt.Errorf("image %q: expected associations to have manifest %s but was not found", imageName, manifestDigest))
}
}
}
for _, layerDigest := range assoc.LayerDigests {
logrus.Debugf("Found layer %v for image %s", layerDigest, imageName)
// Construct blob path, which is adjacent to the manifests path.
imageBlobPath := filepath.Join(o.Dir, "v2", assoc.Path, "blobs", layerDigest)
blobPath := filepath.Join(o.Dir, "blobs", layerDigest)
switch _, err := os.Stat(blobPath); {
case err == nil:
// If a layer exists in the archive, simply copy it to the blob path
// adjacent to its parent manifest.
if src, err := os.Open(blobPath); err == nil {
err = copyBlobFile(src, imageBlobPath)
if err := src.Close(); err != nil {
logrus.Error(err)
}
} else {
err = fmt.Errorf("error opening existing blob file: %v", err)
}
case errors.Is(err, os.ErrNotExist):
// Image layer must exist in the mirror registry since it wasn't archived,
// so fetch the layer and place it in the blob dir so it can be mirrored by `oc`.
missingLayers[layerDigest] = append(missingLayers[layerDigest], imageBlobPath)
default:
err = fmt.Errorf("accessing image %q blob %q at %s: %v", imageName, layerDigest, blobPath, err)
}
if err != nil {
errs = append(errs, err)
}
}
m := imgmirror.Mapping{Name: assoc.Name}
if m.Source, err = imagesource.ParseReference("file://" + assoc.Path); err != nil {
errs = append(errs, fmt.Errorf("error parsing source ref %q: %v", assoc.Path, err))
continue
}
// The mirrorer is not a fan of accepting an image ID when a tag symlink is available
// for some reason.
// TODO(estroz): investigate the cause of this behavior.
if assoc.TagSymlink == "" {
m.Source.Ref.ID = assoc.ID
} else {
m.Source.Ref.Tag = assoc.TagSymlink
}
m.Destination = toMirrorRef
m.Destination.Ref.Namespace = m.Source.Ref.Namespace
m.Destination.Ref.Name = m.Source.Ref.Name
m.Destination.Ref.Tag = m.Source.Ref.Tag
m.Destination.Ref.ID = m.Source.Ref.ID
switch assoc.Type {
case image.TypeGeneric:
genericMappings = append(genericMappings, m)
case image.TypeOCPRelease:
m.Destination.Ref.Tag = ""
m.Destination.Ref.ID = ""
// Only add top level release images to
// release mapping
if strings.Contains(assoc.Name, "ocp-release") {
releaseMappings = append(releaseMappings, m)
}
case image.TypeOperatorCatalog:
catalogMappings = append(catalogMappings, m)
case image.TypeOperatorBundle, image.TypeOperatorRelatedImage:
// Let the `catalog mirror` API call mirror all bundle and related images in the catalog.
// TODO(estroz): this may be incorrect if bundle and related images not in a catalog can be archived,
// ex. as an additional image. Can probably get around this by mirroring
// images of this type not mapped by preceding `catalog mirror` calls.
case image.TypeInvalid:
errs = append(errs, fmt.Errorf("image %q: image type is not set", imageName))
default:
errs = append(errs, fmt.Errorf("image %q: invalid image type %v", imageName, assoc.Type))
}
}
if len(errs) != 0 {
return utilerrors.NewAggregate(errs)
}
if len(missingLayers) != 0 {
// Fetch all layers and mount them at the specified paths.
if err := o.fetchBlobs(ctx, incomingMeta, catalogMappings, missingLayers); err != nil {
return err
}
}
// Now that all layers have been pulled, symlink all tagged manifests to their digest files.
for _, assoc := range assocs {
if assoc.TagSymlink == "" {
continue
}
manifestsPath := filepath.Join(o.Dir, "v2", assoc.Path, "manifests")
srcPath := filepath.Join(manifestsPath, assoc.ID)
dstPath := filepath.Join(manifestsPath, assoc.TagSymlink)
if _, err := os.Stat(dstPath); err == nil || errors.Is(err, os.ErrExist) {
logrus.Debugf("image %s: tag %s symlink for manifest %s already exists", assoc.Name, assoc.TagSymlink, assoc.ID)
continue
}
if err := os.Symlink(srcPath, dstPath); err != nil {
errs = append(errs, fmt.Errorf("error symlinking manifest digest %q to tag %q: %v", assoc.ID, assoc.TagSymlink, err))
}
}
if len(errs) != 0 {
return utilerrors.NewAggregate(errs)
}
// import imagecontentsourcepolicy
logrus.Info("ICSP importing not implemented")
// import catalogsource
logrus.Info("CatalogSource importing not implemented")
// Mirror all file sources of each available image type to mirror registry.
if len(genericMappings) != 0 {
if logrus.IsLevelEnabled(logrus.DebugLevel) {
var srcs []string
for _, m := range genericMappings {
srcs = append(srcs, m.Source.String())
}
logrus.Debugf("mirroring generic images: %q", srcs)
}
genOpts := imgmirror.NewMirrorImageOptions(o.IOStreams)
genOpts.Mappings = genericMappings
genOpts.DryRun = o.DryRun
genOpts.FromFileDir = o.Dir
genOpts.FilterOptions = imagemanifest.FilterOptions{FilterByOS: ".*"}
genOpts.SkipMultipleScopes = true
genOpts.KeepManifestList = true
genOpts.SecurityOptions.Insecure = o.SkipTLS
if err := genOpts.Validate(); err != nil {
return fmt.Errorf("invalid image mirror options: %v", err)
}
if err := genOpts.Run(); err != nil {
return fmt.Errorf("error running generic image mirror: %v", err)
}
}
for _, m := range releaseMappings {
logrus.Debugf("mirroring release image: %s", m.Source.String())
relOpts := release.NewMirrorOptions(o.IOStreams)
relOpts.From = m.Source.String()
relOpts.FromDir = o.Dir
relOpts.To = m.Destination.String()
relOpts.SecurityOptions.Insecure = o.SkipTLS
relOpts.DryRun = o.DryRun
if err := relOpts.Complete(cmd, f, nil); err != nil {
return fmt.Errorf("error initializing release mirror options: %v", err)
}
if err := relOpts.Validate(); err != nil {
return fmt.Errorf("invalid release mirror options: %v", err)
}
if err := relOpts.Run(); err != nil {
return fmt.Errorf("error running %q release mirror: %v", m, err)
}
}
// Change to the working dir since catalog mirroring does not respect
// FileDir in the "expected" manner (unclear why).
wd, err := os.Getwd()
if err != nil {
return err
}
if err := os.Chdir(o.Dir); err != nil {
return err
}
defer func() {
if err := os.Chdir(wd); err != nil {
logrus.Error(err)
}
}()
for _, m := range catalogMappings {
logrus.Debugf("mirroring catalog image: %s", m.Source)
catOpts := catalog.NewMirrorCatalogOptions(o.IOStreams)
catOpts.DryRun = o.DryRun
catOpts.MaxPathComponents = 2
catOpts.SecurityOptions.Insecure = o.SkipTLS
catOpts.FilterOptions = imagemanifest.FilterOptions{FilterByOS: ".*"}
args := []string{
m.Source.String(),
o.ToMirror,
}
if err := catOpts.Complete(&cobra.Command{}, args); err != nil {
return fmt.Errorf("error constructing catalog options: %v", err)
}
if err := catOpts.Validate(); err != nil {
return fmt.Errorf("invalid catalog mirror options: %v", err)
}
if err := catOpts.Run(); err != nil {
return fmt.Errorf("error mirroring catalog: %v", err)
}
}
if err := os.Chdir(wd); err != nil {
return err
}
// install imagecontentsourcepolicy
logrus.Info("ICSP creation not implemented")
// install catalogsource
logrus.Info("CatalogSource creation not implemented")
// Replace old metadata with new metadata
if err := backend.WriteMetadata(ctx, &incomingMeta, config.MetadataBasePath); err != nil {
return err
}
return nil
}
// readAssociations will process and return data from the image associations file
func readAssociations(assocPath string) (assocs image.Associations, err error) {
f, err := os.Open(assocPath)
if err != nil {
return assocs, fmt.Errorf("error opening image associations file: %v", err)
}
defer f.Close()
return assocs, assocs.Decode(f)
}
// getImage unarchives all provided tar archives
func (o *Options) unpackImageSet(a archive.Archiver, dest string) error {
file, err := os.Stat(o.ArchivePath)
if err != nil {
return err
}
if file.IsDir() {
err = filepath.Walk(o.ArchivePath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("traversing %s: %v", path, err)
}
if info == nil {
return fmt.Errorf("no file info")
}
extension := filepath.Ext(path)
extension = strings.TrimPrefix(extension, ".")
if extension == a.String() {
logrus.Debugf("Extracting archive %s", path)
if err := a.Unarchive(path, dest); err != nil {
return err
}
}
return nil
})
} else {
logrus.Infof("Extracting archive %s", o.ArchivePath)
if err := a.Unarchive(o.ArchivePath, dest); err != nil {
return err
}
}
return err
}
// readImage set will create a map with all the files located in the archives
func (o *Options) readImageSet(a archive.Archiver) (map[string]string, error) {
filesinArchive := make(map[string]string)
file, err := os.Stat(o.ArchivePath)
if err != nil {
return nil, err
}
if file.IsDir() {
// Walk the directory and load the files from the archives
// into the map
logrus.Infoln("Detected multiple archive files")
err = filepath.Walk(o.ArchivePath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("traversing %s: %v", path, err)
}
if info == nil {
return fmt.Errorf("no file info")
}
extension := filepath.Ext(path)
extension = strings.TrimPrefix(extension, ".")
if extension == a.String() {
logrus.Debugf("Found archive %s", path)
return a.Walk(path, func(f archiver.File) error {
filesinArchive[f.Name()] = path
return nil
})
}
return nil
})
} else {
// Walk the archive and load the file names into the map
err = a.Walk(o.ArchivePath, func(f archiver.File) error {
filesinArchive[f.Name()] = o.ArchivePath
return nil
})
}
return filesinArchive, err
}
// TODO(estroz): symlink blobs instead of copying them to avoid data duplication.
// `oc` mirror libs should be able to follow these symlinks.
func copyBlobFile(src io.Reader, dstPath string) error {
logrus.Debugf("copying blob to %s", dstPath)
if err := os.MkdirAll(filepath.Dir(dstPath), os.ModePerm); err != nil {
return err
}
dst, err := os.OpenFile(dstPath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0666)
if err != nil {
return fmt.Errorf("error creating blob file: %v", err)
}
defer dst.Close()
if _, err := io.Copy(dst, src); err != nil {
return fmt.Errorf("error copying blob %q: %v", filepath.Base(dstPath), err)
}
return nil
}
func (o *Options) fetchBlobs(ctx context.Context, meta v1alpha1.Metadata, mappings []imgmirror.Mapping, missingLayers map[string][]string) error {
catalogNamespaceNames := []string{}
for _, m := range mappings {
dstRef := m.Destination.Ref
catalogNamespaceNames = append(catalogNamespaceNames, path.Join(dstRef.Namespace, dstRef.Name))
}
blobResources := map[string]string{}
for _, blob := range meta.PastBlobs {
resource := blob.NamespaceName
for _, nsName := range catalogNamespaceNames {
if nsName == resource {
// Blob is associated with the catalog image itself.
blobResources[blob.ID] = nsName
continue
}
suffix := strings.TrimPrefix(resource, nsName+"/")
if suffix == resource {
// Blob is not a child of the catalog image in nsName.
continue
}
// Blob may belong to multiple images.
if _, seenBlob := blobResources[blob.ID]; !seenBlob {
blobResources[blob.ID] = suffix
continue
}
}
}
restctx, err := config.CreateContext(nil, false, o.SkipTLS)
if err != nil {
return err
}
var errs []error
for layerDigest, dstBlobPaths := range missingLayers {
resource, hasResource := blobResources[layerDigest]
if !hasResource {
errs = append(errs, fmt.Errorf("layer %s: no registry resource path found", layerDigest))
continue
}
if err := o.fetchBlob(ctx, restctx, resource, layerDigest, dstBlobPaths); err != nil {
errs = append(errs, fmt.Errorf("layer %s: %v", layerDigest, err))
continue
}
}
return utilerrors.NewAggregate(errs)
}
// fetchBlob fetches a blob at <o.ToMirror>/<resource>/blobs/<layerDigest>
// then copies it to each path in dstPaths.
func (o *Options) fetchBlob(ctx context.Context, restctx *registryclient.Context, resource, layerDigest string, dstPaths []string) error {
refStr := path.Join(o.ToMirror, resource)
ref, err := reference.Parse(refStr)
if err != nil {
return fmt.Errorf("parse ref %s: %v", refStr, err)
}
logrus.Debugf("copying blob %s from %s", layerDigest, ref.Exact())
repo, err := restctx.RepositoryForRef(ctx, ref, o.SkipTLS)
if err != nil {
return fmt.Errorf("create repo for %s: %v", ref, err)
}
dgst, err := digest.Parse(layerDigest)
if err != nil {
return err
}
rc, err := repo.Blobs(ctx).Open(ctx, dgst)
if err != nil {
return fmt.Errorf("open blob: %v", err)
}
defer rc.Close()
for _, dstPath := range dstPaths {
if err := copyBlobFile(rc, dstPath); err != nil {
return fmt.Errorf("copy blob: %v", err)
}
if _, err := rc.Seek(0, 0); err != nil {
return fmt.Errorf("seek to start of blob: %v", err)
}
}
return nil
}
| {
return fmt.Sprintf("Bundle Sequence out of order. Current sequence %v, incoming sequence %v", s.CurrSeq, s.inSeq)
} | identifier_body |
publish.go | package publish
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"github.com/google/uuid"
"github.com/mholt/archiver/v3"
"github.com/opencontainers/go-digest"
"github.com/openshift/library-go/pkg/image/reference"
"github.com/openshift/library-go/pkg/image/registryclient"
"github.com/openshift/oc/pkg/cli/admin/catalog"
"github.com/openshift/oc/pkg/cli/admin/release"
"github.com/openshift/oc/pkg/cli/image/imagesource"
imagemanifest "github.com/openshift/oc/pkg/cli/image/manifest"
imgmirror "github.com/openshift/oc/pkg/cli/image/mirror"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
kcmdutil "k8s.io/kubectl/pkg/cmd/util"
"github.com/RedHatGov/bundle/pkg/archive"
"github.com/RedHatGov/bundle/pkg/config"
"github.com/RedHatGov/bundle/pkg/config/v1alpha1"
"github.com/RedHatGov/bundle/pkg/image"
"github.com/RedHatGov/bundle/pkg/metadata/storage"
)
type UuidError struct {
InUuid uuid.UUID
CurrUuid uuid.UUID
}
func (u *UuidError) Error() string {
return fmt.Sprintf("Mismatched UUIDs. Want %v, got %v", u.CurrUuid, u.InUuid)
}
type SequenceError struct {
inSeq int
CurrSeq int
}
func (s *SequenceError) Error() string {
return fmt.Sprintf("Bundle Sequence out of order. Current sequence %v, incoming sequence %v", s.CurrSeq, s.inSeq)
}
func (o *Options) Run(ctx context.Context, cmd *cobra.Command, f kcmdutil.Factory) error {
logrus.Infof("Publishing image set from archive %q to registry %q", o.ArchivePath, o.ToMirror)
var currentMeta v1alpha1.Metadata
var incomingMeta v1alpha1.Metadata
a := archive.NewArchiver()
// Create workspace
tmpdir, err := ioutil.TempDir(o.Dir, "imageset")
if err != nil {
return err
}
if !o.SkipCleanup {
defer os.RemoveAll(tmpdir)
}
logrus.Debugf("Using temporary directory %s to unarchive metadata", tmpdir)
// Get file information from the source archives
filesInArchive, err := o.readImageSet(a)
if err != nil {
return err
}
// Extract incoming metadata
archive, ok := filesInArchive[config.MetadataFile]
if !ok {
return errors.New("metadata is not in archive")
}
logrus.Debug("Extracting incoming metadta")
if err := a.Extract(archive, config.MetadataBasePath, tmpdir); err != nil {
return err
}
// Create backend for o.Dir
backend, err := storage.NewLocalBackend(o.Dir)
if err != nil {
return fmt.Errorf("error opening local backend: %v", err)
}
// Create a local workspace backend
workspace, err := storage.NewLocalBackend(tmpdir)
if err != nil {
return fmt.Errorf("error opening local backend: %v", err)
}
// Check for existing metadata. Metadata will be extracted before
// the extraction of the archive so imageset mismatches can
// be handled before the longer unarchiving process
existingMeta := filepath.Join(o.Dir, config.MetadataBasePath)
if _, err := os.Stat(existingMeta); err != nil {
if !errors.Is(err, os.ErrNotExist) {
return err
}
logrus.Infof("No existing metadata found. Setting up new workspace")
// Find first file and load metadata from that
if err := workspace.ReadMetadata(ctx, &incomingMeta, config.MetadataBasePath); err != nil {
return fmt.Errorf("error reading incoming metadata: %v", err)
}
} else {
// Compare metadata UID and sequence number
if err := backend.ReadMetadata(ctx, ¤tMeta, config.MetadataBasePath); err != nil {
return fmt.Errorf("error reading current metadata: %v", err)
}
if err := workspace.ReadMetadata(ctx, &incomingMeta, config.MetadataBasePath); err != nil {
return fmt.Errorf("error reading incoming metadata: %v", err)
}
logrus.Debug("Checking metadata UID")
if incomingMeta.MetadataSpec.Uid != currentMeta.MetadataSpec.Uid {
return &UuidError{currentMeta.MetadataSpec.Uid, incomingMeta.MetadataSpec.Uid}
}
logrus.Debug("Check metadata sequence number")
currRun := currentMeta.PastMirrors[len(currentMeta.PastMirrors)-1]
incomingRun := incomingMeta.PastMirrors[len(incomingMeta.PastMirrors)-1]
if incomingRun.Sequence != (currRun.Sequence + 1) {
return &SequenceError{incomingRun.Sequence, currRun.Sequence}
}
}
// Unarchive full imageset after metadata checks
if err := o.unpackImageSet(a, o.Dir); err != nil {
return err
}
// Load image associations to find layers not present locally.
assocPath := filepath.Join(o.Dir, config.AssociationsBasePath)
assocs, err := readAssociations(assocPath)
if err != nil {
return fmt.Errorf("error reading associations from %s: %v", o.Dir, err)
}
toMirrorRef, err := imagesource.ParseReference(o.ToMirror)
if err != nil {
return fmt.Errorf("error parsing mirror registry %q: %v", o.ToMirror, err)
}
logrus.Debugf("mirror reference: %#v", toMirrorRef)
if toMirrorRef.Type != imagesource.DestinationRegistry {
return fmt.Errorf("destination %q must be a registry reference", o.ToMirror)
}
var (
errs []error
// Mappings for mirroring image types.
genericMappings []imgmirror.Mapping
releaseMappings []imgmirror.Mapping
catalogMappings []imgmirror.Mapping
// Map of remote layer digest to the set of paths they should be fetched to.
missingLayers = map[string][]string{}
)
for imageName, assoc := range assocs {
assoc := assoc
logrus.Debugf("reading assoc: %s", assoc.Name)
// All manifest layers will be pulled below if associated,
// so just sanity-check that the layers are referenced in some association.
if len(assoc.ManifestDigests) != 0 {
for _, manifestDigest := range assoc.ManifestDigests {
if _, hasManifest := assocs[manifestDigest]; !hasManifest {
errs = append(errs, fmt.Errorf("image %q: expected associations to have manifest %s but was not found", imageName, manifestDigest))
}
}
}
for _, layerDigest := range assoc.LayerDigests {
logrus.Debugf("Found layer %v for image %s", layerDigest, imageName)
// Construct blob path, which is adjacent to the manifests path.
imageBlobPath := filepath.Join(o.Dir, "v2", assoc.Path, "blobs", layerDigest)
blobPath := filepath.Join(o.Dir, "blobs", layerDigest)
switch _, err := os.Stat(blobPath); {
case err == nil:
// If a layer exists in the archive, simply copy it to the blob path
// adjacent to its parent manifest.
if src, err := os.Open(blobPath); err == nil {
err = copyBlobFile(src, imageBlobPath)
if err := src.Close(); err != nil {
logrus.Error(err)
}
} else {
err = fmt.Errorf("error opening existing blob file: %v", err)
}
case errors.Is(err, os.ErrNotExist):
// Image layer must exist in the mirror registry since it wasn't archived,
// so fetch the layer and place it in the blob dir so it can be mirrored by `oc`.
missingLayers[layerDigest] = append(missingLayers[layerDigest], imageBlobPath)
default:
err = fmt.Errorf("accessing image %q blob %q at %s: %v", imageName, layerDigest, blobPath, err)
}
if err != nil {
errs = append(errs, err)
}
}
m := imgmirror.Mapping{Name: assoc.Name}
if m.Source, err = imagesource.ParseReference("file://" + assoc.Path); err != nil {
errs = append(errs, fmt.Errorf("error parsing source ref %q: %v", assoc.Path, err))
continue
}
// The mirrorer is not a fan of accepting an image ID when a tag symlink is available
// for some reason.
// TODO(estroz): investigate the cause of this behavior.
if assoc.TagSymlink == "" {
m.Source.Ref.ID = assoc.ID
} else {
m.Source.Ref.Tag = assoc.TagSymlink
}
m.Destination = toMirrorRef
m.Destination.Ref.Namespace = m.Source.Ref.Namespace
m.Destination.Ref.Name = m.Source.Ref.Name
m.Destination.Ref.Tag = m.Source.Ref.Tag
m.Destination.Ref.ID = m.Source.Ref.ID
switch assoc.Type {
case image.TypeGeneric:
genericMappings = append(genericMappings, m)
case image.TypeOCPRelease:
m.Destination.Ref.Tag = ""
m.Destination.Ref.ID = ""
// Only add top level release images to
// release mapping
if strings.Contains(assoc.Name, "ocp-release") {
releaseMappings = append(releaseMappings, m)
}
case image.TypeOperatorCatalog:
catalogMappings = append(catalogMappings, m)
case image.TypeOperatorBundle, image.TypeOperatorRelatedImage:
// Let the `catalog mirror` API call mirror all bundle and related images in the catalog.
// TODO(estroz): this may be incorrect if bundle and related images not in a catalog can be archived,
// ex. as an additional image. Can probably get around this by mirroring
// images of this type not mapped by preceding `catalog mirror` calls.
case image.TypeInvalid:
errs = append(errs, fmt.Errorf("image %q: image type is not set", imageName))
default:
errs = append(errs, fmt.Errorf("image %q: invalid image type %v", imageName, assoc.Type))
}
}
if len(errs) != 0 {
return utilerrors.NewAggregate(errs)
}
if len(missingLayers) != 0 {
// Fetch all layers and mount them at the specified paths.
if err := o.fetchBlobs(ctx, incomingMeta, catalogMappings, missingLayers); err != nil {
return err
}
}
// Now that all layers have been pulled, symlink all tagged manifests to their digest files.
for _, assoc := range assocs {
if assoc.TagSymlink == "" {
continue
}
manifestsPath := filepath.Join(o.Dir, "v2", assoc.Path, "manifests")
srcPath := filepath.Join(manifestsPath, assoc.ID)
dstPath := filepath.Join(manifestsPath, assoc.TagSymlink)
if _, err := os.Stat(dstPath); err == nil || errors.Is(err, os.ErrExist) {
logrus.Debugf("image %s: tag %s symlink for manifest %s already exists", assoc.Name, assoc.TagSymlink, assoc.ID)
continue
}
if err := os.Symlink(srcPath, dstPath); err != nil {
errs = append(errs, fmt.Errorf("error symlinking manifest digest %q to tag %q: %v", assoc.ID, assoc.TagSymlink, err))
}
}
if len(errs) != 0 {
return utilerrors.NewAggregate(errs)
}
// import imagecontentsourcepolicy
logrus.Info("ICSP importing not implemented")
// import catalogsource
logrus.Info("CatalogSource importing not implemented")
// Mirror all file sources of each available image type to mirror registry.
if len(genericMappings) != 0 {
if logrus.IsLevelEnabled(logrus.DebugLevel) {
var srcs []string
for _, m := range genericMappings {
srcs = append(srcs, m.Source.String())
}
logrus.Debugf("mirroring generic images: %q", srcs)
}
genOpts := imgmirror.NewMirrorImageOptions(o.IOStreams)
genOpts.Mappings = genericMappings
genOpts.DryRun = o.DryRun
genOpts.FromFileDir = o.Dir
genOpts.FilterOptions = imagemanifest.FilterOptions{FilterByOS: ".*"}
genOpts.SkipMultipleScopes = true
genOpts.KeepManifestList = true
genOpts.SecurityOptions.Insecure = o.SkipTLS
if err := genOpts.Validate(); err != nil |
if err := genOpts.Run(); err != nil {
return fmt.Errorf("error running generic image mirror: %v", err)
}
}
for _, m := range releaseMappings {
logrus.Debugf("mirroring release image: %s", m.Source.String())
relOpts := release.NewMirrorOptions(o.IOStreams)
relOpts.From = m.Source.String()
relOpts.FromDir = o.Dir
relOpts.To = m.Destination.String()
relOpts.SecurityOptions.Insecure = o.SkipTLS
relOpts.DryRun = o.DryRun
if err := relOpts.Complete(cmd, f, nil); err != nil {
return fmt.Errorf("error initializing release mirror options: %v", err)
}
if err := relOpts.Validate(); err != nil {
return fmt.Errorf("invalid release mirror options: %v", err)
}
if err := relOpts.Run(); err != nil {
return fmt.Errorf("error running %q release mirror: %v", m, err)
}
}
// Change to the working dir since catalog mirroring does not respect
// FileDir in the "expected" manner (unclear why).
wd, err := os.Getwd()
if err != nil {
return err
}
if err := os.Chdir(o.Dir); err != nil {
return err
}
defer func() {
if err := os.Chdir(wd); err != nil {
logrus.Error(err)
}
}()
for _, m := range catalogMappings {
logrus.Debugf("mirroring catalog image: %s", m.Source)
catOpts := catalog.NewMirrorCatalogOptions(o.IOStreams)
catOpts.DryRun = o.DryRun
catOpts.MaxPathComponents = 2
catOpts.SecurityOptions.Insecure = o.SkipTLS
catOpts.FilterOptions = imagemanifest.FilterOptions{FilterByOS: ".*"}
args := []string{
m.Source.String(),
o.ToMirror,
}
if err := catOpts.Complete(&cobra.Command{}, args); err != nil {
return fmt.Errorf("error constructing catalog options: %v", err)
}
if err := catOpts.Validate(); err != nil {
return fmt.Errorf("invalid catalog mirror options: %v", err)
}
if err := catOpts.Run(); err != nil {
return fmt.Errorf("error mirroring catalog: %v", err)
}
}
if err := os.Chdir(wd); err != nil {
return err
}
// install imagecontentsourcepolicy
logrus.Info("ICSP creation not implemented")
// install catalogsource
logrus.Info("CatalogSource creation not implemented")
// Replace old metadata with new metadata
if err := backend.WriteMetadata(ctx, &incomingMeta, config.MetadataBasePath); err != nil {
return err
}
return nil
}
// readAssociations will process and return data from the image associations file
func readAssociations(assocPath string) (assocs image.Associations, err error) {
f, err := os.Open(assocPath)
if err != nil {
return assocs, fmt.Errorf("error opening image associations file: %v", err)
}
defer f.Close()
return assocs, assocs.Decode(f)
}
// getImage unarchives all provided tar archives
func (o *Options) unpackImageSet(a archive.Archiver, dest string) error {
file, err := os.Stat(o.ArchivePath)
if err != nil {
return err
}
if file.IsDir() {
err = filepath.Walk(o.ArchivePath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("traversing %s: %v", path, err)
}
if info == nil {
return fmt.Errorf("no file info")
}
extension := filepath.Ext(path)
extension = strings.TrimPrefix(extension, ".")
if extension == a.String() {
logrus.Debugf("Extracting archive %s", path)
if err := a.Unarchive(path, dest); err != nil {
return err
}
}
return nil
})
} else {
logrus.Infof("Extracting archive %s", o.ArchivePath)
if err := a.Unarchive(o.ArchivePath, dest); err != nil {
return err
}
}
return err
}
// readImage set will create a map with all the files located in the archives
func (o *Options) readImageSet(a archive.Archiver) (map[string]string, error) {
filesinArchive := make(map[string]string)
file, err := os.Stat(o.ArchivePath)
if err != nil {
return nil, err
}
if file.IsDir() {
// Walk the directory and load the files from the archives
// into the map
logrus.Infoln("Detected multiple archive files")
err = filepath.Walk(o.ArchivePath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("traversing %s: %v", path, err)
}
if info == nil {
return fmt.Errorf("no file info")
}
extension := filepath.Ext(path)
extension = strings.TrimPrefix(extension, ".")
if extension == a.String() {
logrus.Debugf("Found archive %s", path)
return a.Walk(path, func(f archiver.File) error {
filesinArchive[f.Name()] = path
return nil
})
}
return nil
})
} else {
// Walk the archive and load the file names into the map
err = a.Walk(o.ArchivePath, func(f archiver.File) error {
filesinArchive[f.Name()] = o.ArchivePath
return nil
})
}
return filesinArchive, err
}
// TODO(estroz): symlink blobs instead of copying them to avoid data duplication.
// `oc` mirror libs should be able to follow these symlinks.
func copyBlobFile(src io.Reader, dstPath string) error {
logrus.Debugf("copying blob to %s", dstPath)
if err := os.MkdirAll(filepath.Dir(dstPath), os.ModePerm); err != nil {
return err
}
dst, err := os.OpenFile(dstPath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0666)
if err != nil {
return fmt.Errorf("error creating blob file: %v", err)
}
defer dst.Close()
if _, err := io.Copy(dst, src); err != nil {
return fmt.Errorf("error copying blob %q: %v", filepath.Base(dstPath), err)
}
return nil
}
func (o *Options) fetchBlobs(ctx context.Context, meta v1alpha1.Metadata, mappings []imgmirror.Mapping, missingLayers map[string][]string) error {
catalogNamespaceNames := []string{}
for _, m := range mappings {
dstRef := m.Destination.Ref
catalogNamespaceNames = append(catalogNamespaceNames, path.Join(dstRef.Namespace, dstRef.Name))
}
blobResources := map[string]string{}
for _, blob := range meta.PastBlobs {
resource := blob.NamespaceName
for _, nsName := range catalogNamespaceNames {
if nsName == resource {
// Blob is associated with the catalog image itself.
blobResources[blob.ID] = nsName
continue
}
suffix := strings.TrimPrefix(resource, nsName+"/")
if suffix == resource {
// Blob is not a child of the catalog image in nsName.
continue
}
// Blob may belong to multiple images.
if _, seenBlob := blobResources[blob.ID]; !seenBlob {
blobResources[blob.ID] = suffix
continue
}
}
}
restctx, err := config.CreateContext(nil, false, o.SkipTLS)
if err != nil {
return err
}
var errs []error
for layerDigest, dstBlobPaths := range missingLayers {
resource, hasResource := blobResources[layerDigest]
if !hasResource {
errs = append(errs, fmt.Errorf("layer %s: no registry resource path found", layerDigest))
continue
}
if err := o.fetchBlob(ctx, restctx, resource, layerDigest, dstBlobPaths); err != nil {
errs = append(errs, fmt.Errorf("layer %s: %v", layerDigest, err))
continue
}
}
return utilerrors.NewAggregate(errs)
}
// fetchBlob fetches a blob at <o.ToMirror>/<resource>/blobs/<layerDigest>
// then copies it to each path in dstPaths.
func (o *Options) fetchBlob(ctx context.Context, restctx *registryclient.Context, resource, layerDigest string, dstPaths []string) error {
refStr := path.Join(o.ToMirror, resource)
ref, err := reference.Parse(refStr)
if err != nil {
return fmt.Errorf("parse ref %s: %v", refStr, err)
}
logrus.Debugf("copying blob %s from %s", layerDigest, ref.Exact())
repo, err := restctx.RepositoryForRef(ctx, ref, o.SkipTLS)
if err != nil {
return fmt.Errorf("create repo for %s: %v", ref, err)
}
dgst, err := digest.Parse(layerDigest)
if err != nil {
return err
}
rc, err := repo.Blobs(ctx).Open(ctx, dgst)
if err != nil {
return fmt.Errorf("open blob: %v", err)
}
defer rc.Close()
for _, dstPath := range dstPaths {
if err := copyBlobFile(rc, dstPath); err != nil {
return fmt.Errorf("copy blob: %v", err)
}
if _, err := rc.Seek(0, 0); err != nil {
return fmt.Errorf("seek to start of blob: %v", err)
}
}
return nil
}
| {
return fmt.Errorf("invalid image mirror options: %v", err)
} | conditional_block |
publish.go | package publish
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"github.com/google/uuid"
"github.com/mholt/archiver/v3"
"github.com/opencontainers/go-digest"
"github.com/openshift/library-go/pkg/image/reference"
"github.com/openshift/library-go/pkg/image/registryclient"
"github.com/openshift/oc/pkg/cli/admin/catalog"
"github.com/openshift/oc/pkg/cli/admin/release"
"github.com/openshift/oc/pkg/cli/image/imagesource"
imagemanifest "github.com/openshift/oc/pkg/cli/image/manifest"
imgmirror "github.com/openshift/oc/pkg/cli/image/mirror"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
kcmdutil "k8s.io/kubectl/pkg/cmd/util"
"github.com/RedHatGov/bundle/pkg/archive"
"github.com/RedHatGov/bundle/pkg/config"
"github.com/RedHatGov/bundle/pkg/config/v1alpha1"
"github.com/RedHatGov/bundle/pkg/image"
"github.com/RedHatGov/bundle/pkg/metadata/storage"
)
type UuidError struct {
InUuid uuid.UUID
CurrUuid uuid.UUID
}
func (u *UuidError) Error() string {
return fmt.Sprintf("Mismatched UUIDs. Want %v, got %v", u.CurrUuid, u.InUuid)
}
type SequenceError struct {
inSeq int
CurrSeq int
}
func (s *SequenceError) Error() string {
return fmt.Sprintf("Bundle Sequence out of order. Current sequence %v, incoming sequence %v", s.CurrSeq, s.inSeq)
}
func (o *Options) Run(ctx context.Context, cmd *cobra.Command, f kcmdutil.Factory) error {
logrus.Infof("Publishing image set from archive %q to registry %q", o.ArchivePath, o.ToMirror)
var currentMeta v1alpha1.Metadata
var incomingMeta v1alpha1.Metadata
a := archive.NewArchiver()
// Create workspace
tmpdir, err := ioutil.TempDir(o.Dir, "imageset")
if err != nil {
return err
}
if !o.SkipCleanup {
defer os.RemoveAll(tmpdir)
}
logrus.Debugf("Using temporary directory %s to unarchive metadata", tmpdir)
// Get file information from the source archives
filesInArchive, err := o.readImageSet(a)
if err != nil {
return err
}
// Extract incoming metadata
archive, ok := filesInArchive[config.MetadataFile]
if !ok {
return errors.New("metadata is not in archive")
}
logrus.Debug("Extracting incoming metadta")
if err := a.Extract(archive, config.MetadataBasePath, tmpdir); err != nil {
return err
}
// Create backend for o.Dir
backend, err := storage.NewLocalBackend(o.Dir)
if err != nil {
return fmt.Errorf("error opening local backend: %v", err)
}
// Create a local workspace backend
workspace, err := storage.NewLocalBackend(tmpdir)
if err != nil {
return fmt.Errorf("error opening local backend: %v", err)
}
// Check for existing metadata. Metadata will be extracted before
// the extraction of the archive so imageset mismatches can
// be handled before the longer unarchiving process
existingMeta := filepath.Join(o.Dir, config.MetadataBasePath)
if _, err := os.Stat(existingMeta); err != nil {
if !errors.Is(err, os.ErrNotExist) {
return err
}
logrus.Infof("No existing metadata found. Setting up new workspace")
// Find first file and load metadata from that
if err := workspace.ReadMetadata(ctx, &incomingMeta, config.MetadataBasePath); err != nil {
return fmt.Errorf("error reading incoming metadata: %v", err)
}
} else {
// Compare metadata UID and sequence number
if err := backend.ReadMetadata(ctx, ¤tMeta, config.MetadataBasePath); err != nil {
return fmt.Errorf("error reading current metadata: %v", err)
}
if err := workspace.ReadMetadata(ctx, &incomingMeta, config.MetadataBasePath); err != nil {
return fmt.Errorf("error reading incoming metadata: %v", err)
}
logrus.Debug("Checking metadata UID")
if incomingMeta.MetadataSpec.Uid != currentMeta.MetadataSpec.Uid {
return &UuidError{currentMeta.MetadataSpec.Uid, incomingMeta.MetadataSpec.Uid}
}
logrus.Debug("Check metadata sequence number")
currRun := currentMeta.PastMirrors[len(currentMeta.PastMirrors)-1]
incomingRun := incomingMeta.PastMirrors[len(incomingMeta.PastMirrors)-1]
if incomingRun.Sequence != (currRun.Sequence + 1) {
return &SequenceError{incomingRun.Sequence, currRun.Sequence}
}
}
// Unarchive full imageset after metadata checks
if err := o.unpackImageSet(a, o.Dir); err != nil {
return err
}
// Load image associations to find layers not present locally.
assocPath := filepath.Join(o.Dir, config.AssociationsBasePath)
assocs, err := readAssociations(assocPath)
if err != nil {
return fmt.Errorf("error reading associations from %s: %v", o.Dir, err)
}
toMirrorRef, err := imagesource.ParseReference(o.ToMirror)
if err != nil {
return fmt.Errorf("error parsing mirror registry %q: %v", o.ToMirror, err)
}
logrus.Debugf("mirror reference: %#v", toMirrorRef)
if toMirrorRef.Type != imagesource.DestinationRegistry {
return fmt.Errorf("destination %q must be a registry reference", o.ToMirror)
}
var (
errs []error
// Mappings for mirroring image types.
genericMappings []imgmirror.Mapping
releaseMappings []imgmirror.Mapping
catalogMappings []imgmirror.Mapping
// Map of remote layer digest to the set of paths they should be fetched to.
missingLayers = map[string][]string{}
)
for imageName, assoc := range assocs {
assoc := assoc
logrus.Debugf("reading assoc: %s", assoc.Name)
// All manifest layers will be pulled below if associated,
// so just sanity-check that the layers are referenced in some association.
if len(assoc.ManifestDigests) != 0 {
for _, manifestDigest := range assoc.ManifestDigests {
if _, hasManifest := assocs[manifestDigest]; !hasManifest {
errs = append(errs, fmt.Errorf("image %q: expected associations to have manifest %s but was not found", imageName, manifestDigest))
}
}
}
for _, layerDigest := range assoc.LayerDigests {
logrus.Debugf("Found layer %v for image %s", layerDigest, imageName)
// Construct blob path, which is adjacent to the manifests path.
imageBlobPath := filepath.Join(o.Dir, "v2", assoc.Path, "blobs", layerDigest)
blobPath := filepath.Join(o.Dir, "blobs", layerDigest)
switch _, err := os.Stat(blobPath); {
case err == nil:
// If a layer exists in the archive, simply copy it to the blob path
// adjacent to its parent manifest.
if src, err := os.Open(blobPath); err == nil {
err = copyBlobFile(src, imageBlobPath)
if err := src.Close(); err != nil {
logrus.Error(err)
}
} else {
err = fmt.Errorf("error opening existing blob file: %v", err)
}
case errors.Is(err, os.ErrNotExist):
// Image layer must exist in the mirror registry since it wasn't archived,
// so fetch the layer and place it in the blob dir so it can be mirrored by `oc`.
missingLayers[layerDigest] = append(missingLayers[layerDigest], imageBlobPath)
default:
err = fmt.Errorf("accessing image %q blob %q at %s: %v", imageName, layerDigest, blobPath, err)
}
if err != nil {
errs = append(errs, err)
}
}
m := imgmirror.Mapping{Name: assoc.Name}
if m.Source, err = imagesource.ParseReference("file://" + assoc.Path); err != nil {
errs = append(errs, fmt.Errorf("error parsing source ref %q: %v", assoc.Path, err))
continue
}
// The mirrorer is not a fan of accepting an image ID when a tag symlink is available
// for some reason.
// TODO(estroz): investigate the cause of this behavior.
if assoc.TagSymlink == "" {
m.Source.Ref.ID = assoc.ID
} else {
m.Source.Ref.Tag = assoc.TagSymlink
}
m.Destination = toMirrorRef
m.Destination.Ref.Namespace = m.Source.Ref.Namespace
m.Destination.Ref.Name = m.Source.Ref.Name
m.Destination.Ref.Tag = m.Source.Ref.Tag
m.Destination.Ref.ID = m.Source.Ref.ID
switch assoc.Type {
case image.TypeGeneric:
genericMappings = append(genericMappings, m)
case image.TypeOCPRelease:
m.Destination.Ref.Tag = ""
m.Destination.Ref.ID = ""
// Only add top level release images to
// release mapping
if strings.Contains(assoc.Name, "ocp-release") {
releaseMappings = append(releaseMappings, m)
}
case image.TypeOperatorCatalog:
catalogMappings = append(catalogMappings, m)
case image.TypeOperatorBundle, image.TypeOperatorRelatedImage:
// Let the `catalog mirror` API call mirror all bundle and related images in the catalog.
// TODO(estroz): this may be incorrect if bundle and related images not in a catalog can be archived,
// ex. as an additional image. Can probably get around this by mirroring
// images of this type not mapped by preceding `catalog mirror` calls.
case image.TypeInvalid:
errs = append(errs, fmt.Errorf("image %q: image type is not set", imageName))
default:
errs = append(errs, fmt.Errorf("image %q: invalid image type %v", imageName, assoc.Type))
}
}
if len(errs) != 0 {
return utilerrors.NewAggregate(errs)
}
if len(missingLayers) != 0 {
// Fetch all layers and mount them at the specified paths.
if err := o.fetchBlobs(ctx, incomingMeta, catalogMappings, missingLayers); err != nil {
return err
}
}
// Now that all layers have been pulled, symlink all tagged manifests to their digest files.
for _, assoc := range assocs {
if assoc.TagSymlink == "" {
continue
}
manifestsPath := filepath.Join(o.Dir, "v2", assoc.Path, "manifests")
srcPath := filepath.Join(manifestsPath, assoc.ID)
dstPath := filepath.Join(manifestsPath, assoc.TagSymlink)
if _, err := os.Stat(dstPath); err == nil || errors.Is(err, os.ErrExist) {
logrus.Debugf("image %s: tag %s symlink for manifest %s already exists", assoc.Name, assoc.TagSymlink, assoc.ID)
continue
}
if err := os.Symlink(srcPath, dstPath); err != nil {
errs = append(errs, fmt.Errorf("error symlinking manifest digest %q to tag %q: %v", assoc.ID, assoc.TagSymlink, err))
}
}
if len(errs) != 0 {
return utilerrors.NewAggregate(errs)
}
// import imagecontentsourcepolicy
logrus.Info("ICSP importing not implemented")
// import catalogsource
logrus.Info("CatalogSource importing not implemented")
// Mirror all file sources of each available image type to mirror registry.
if len(genericMappings) != 0 {
if logrus.IsLevelEnabled(logrus.DebugLevel) {
var srcs []string
for _, m := range genericMappings {
srcs = append(srcs, m.Source.String())
}
logrus.Debugf("mirroring generic images: %q", srcs)
}
genOpts := imgmirror.NewMirrorImageOptions(o.IOStreams)
genOpts.Mappings = genericMappings
genOpts.DryRun = o.DryRun
genOpts.FromFileDir = o.Dir
genOpts.FilterOptions = imagemanifest.FilterOptions{FilterByOS: ".*"}
genOpts.SkipMultipleScopes = true
genOpts.KeepManifestList = true
genOpts.SecurityOptions.Insecure = o.SkipTLS
if err := genOpts.Validate(); err != nil {
return fmt.Errorf("invalid image mirror options: %v", err)
}
if err := genOpts.Run(); err != nil {
return fmt.Errorf("error running generic image mirror: %v", err)
}
}
for _, m := range releaseMappings {
logrus.Debugf("mirroring release image: %s", m.Source.String())
relOpts := release.NewMirrorOptions(o.IOStreams)
relOpts.From = m.Source.String()
relOpts.FromDir = o.Dir
relOpts.To = m.Destination.String()
relOpts.SecurityOptions.Insecure = o.SkipTLS
relOpts.DryRun = o.DryRun
if err := relOpts.Complete(cmd, f, nil); err != nil {
return fmt.Errorf("error initializing release mirror options: %v", err)
}
if err := relOpts.Validate(); err != nil {
return fmt.Errorf("invalid release mirror options: %v", err)
}
if err := relOpts.Run(); err != nil {
return fmt.Errorf("error running %q release mirror: %v", m, err)
}
}
// Change to the working dir since catalog mirroring does not respect
// FileDir in the "expected" manner (unclear why).
wd, err := os.Getwd()
if err != nil {
return err
}
if err := os.Chdir(o.Dir); err != nil {
return err
}
defer func() {
if err := os.Chdir(wd); err != nil {
logrus.Error(err)
}
}()
for _, m := range catalogMappings {
logrus.Debugf("mirroring catalog image: %s", m.Source)
catOpts := catalog.NewMirrorCatalogOptions(o.IOStreams)
catOpts.DryRun = o.DryRun
catOpts.MaxPathComponents = 2
catOpts.SecurityOptions.Insecure = o.SkipTLS
catOpts.FilterOptions = imagemanifest.FilterOptions{FilterByOS: ".*"}
args := []string{
m.Source.String(),
o.ToMirror,
}
if err := catOpts.Complete(&cobra.Command{}, args); err != nil {
return fmt.Errorf("error constructing catalog options: %v", err)
}
if err := catOpts.Validate(); err != nil {
return fmt.Errorf("invalid catalog mirror options: %v", err)
}
if err := catOpts.Run(); err != nil {
return fmt.Errorf("error mirroring catalog: %v", err)
} | // install imagecontentsourcepolicy
logrus.Info("ICSP creation not implemented")
// install catalogsource
logrus.Info("CatalogSource creation not implemented")
// Replace old metadata with new metadata
if err := backend.WriteMetadata(ctx, &incomingMeta, config.MetadataBasePath); err != nil {
return err
}
return nil
}
// readAssociations will process and return data from the image associations file
func readAssociations(assocPath string) (assocs image.Associations, err error) {
f, err := os.Open(assocPath)
if err != nil {
return assocs, fmt.Errorf("error opening image associations file: %v", err)
}
defer f.Close()
return assocs, assocs.Decode(f)
}
// getImage unarchives all provided tar archives
func (o *Options) unpackImageSet(a archive.Archiver, dest string) error {
file, err := os.Stat(o.ArchivePath)
if err != nil {
return err
}
if file.IsDir() {
err = filepath.Walk(o.ArchivePath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("traversing %s: %v", path, err)
}
if info == nil {
return fmt.Errorf("no file info")
}
extension := filepath.Ext(path)
extension = strings.TrimPrefix(extension, ".")
if extension == a.String() {
logrus.Debugf("Extracting archive %s", path)
if err := a.Unarchive(path, dest); err != nil {
return err
}
}
return nil
})
} else {
logrus.Infof("Extracting archive %s", o.ArchivePath)
if err := a.Unarchive(o.ArchivePath, dest); err != nil {
return err
}
}
return err
}
// readImage set will create a map with all the files located in the archives
func (o *Options) readImageSet(a archive.Archiver) (map[string]string, error) {
filesinArchive := make(map[string]string)
file, err := os.Stat(o.ArchivePath)
if err != nil {
return nil, err
}
if file.IsDir() {
// Walk the directory and load the files from the archives
// into the map
logrus.Infoln("Detected multiple archive files")
err = filepath.Walk(o.ArchivePath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("traversing %s: %v", path, err)
}
if info == nil {
return fmt.Errorf("no file info")
}
extension := filepath.Ext(path)
extension = strings.TrimPrefix(extension, ".")
if extension == a.String() {
logrus.Debugf("Found archive %s", path)
return a.Walk(path, func(f archiver.File) error {
filesinArchive[f.Name()] = path
return nil
})
}
return nil
})
} else {
// Walk the archive and load the file names into the map
err = a.Walk(o.ArchivePath, func(f archiver.File) error {
filesinArchive[f.Name()] = o.ArchivePath
return nil
})
}
return filesinArchive, err
}
// TODO(estroz): symlink blobs instead of copying them to avoid data duplication.
// `oc` mirror libs should be able to follow these symlinks.
func copyBlobFile(src io.Reader, dstPath string) error {
logrus.Debugf("copying blob to %s", dstPath)
if err := os.MkdirAll(filepath.Dir(dstPath), os.ModePerm); err != nil {
return err
}
dst, err := os.OpenFile(dstPath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0666)
if err != nil {
return fmt.Errorf("error creating blob file: %v", err)
}
defer dst.Close()
if _, err := io.Copy(dst, src); err != nil {
return fmt.Errorf("error copying blob %q: %v", filepath.Base(dstPath), err)
}
return nil
}
func (o *Options) fetchBlobs(ctx context.Context, meta v1alpha1.Metadata, mappings []imgmirror.Mapping, missingLayers map[string][]string) error {
catalogNamespaceNames := []string{}
for _, m := range mappings {
dstRef := m.Destination.Ref
catalogNamespaceNames = append(catalogNamespaceNames, path.Join(dstRef.Namespace, dstRef.Name))
}
blobResources := map[string]string{}
for _, blob := range meta.PastBlobs {
resource := blob.NamespaceName
for _, nsName := range catalogNamespaceNames {
if nsName == resource {
// Blob is associated with the catalog image itself.
blobResources[blob.ID] = nsName
continue
}
suffix := strings.TrimPrefix(resource, nsName+"/")
if suffix == resource {
// Blob is not a child of the catalog image in nsName.
continue
}
// Blob may belong to multiple images.
if _, seenBlob := blobResources[blob.ID]; !seenBlob {
blobResources[blob.ID] = suffix
continue
}
}
}
restctx, err := config.CreateContext(nil, false, o.SkipTLS)
if err != nil {
return err
}
var errs []error
for layerDigest, dstBlobPaths := range missingLayers {
resource, hasResource := blobResources[layerDigest]
if !hasResource {
errs = append(errs, fmt.Errorf("layer %s: no registry resource path found", layerDigest))
continue
}
if err := o.fetchBlob(ctx, restctx, resource, layerDigest, dstBlobPaths); err != nil {
errs = append(errs, fmt.Errorf("layer %s: %v", layerDigest, err))
continue
}
}
return utilerrors.NewAggregate(errs)
}
// fetchBlob fetches a blob at <o.ToMirror>/<resource>/blobs/<layerDigest>
// then copies it to each path in dstPaths.
func (o *Options) fetchBlob(ctx context.Context, restctx *registryclient.Context, resource, layerDigest string, dstPaths []string) error {
refStr := path.Join(o.ToMirror, resource)
ref, err := reference.Parse(refStr)
if err != nil {
return fmt.Errorf("parse ref %s: %v", refStr, err)
}
logrus.Debugf("copying blob %s from %s", layerDigest, ref.Exact())
repo, err := restctx.RepositoryForRef(ctx, ref, o.SkipTLS)
if err != nil {
return fmt.Errorf("create repo for %s: %v", ref, err)
}
dgst, err := digest.Parse(layerDigest)
if err != nil {
return err
}
rc, err := repo.Blobs(ctx).Open(ctx, dgst)
if err != nil {
return fmt.Errorf("open blob: %v", err)
}
defer rc.Close()
for _, dstPath := range dstPaths {
if err := copyBlobFile(rc, dstPath); err != nil {
return fmt.Errorf("copy blob: %v", err)
}
if _, err := rc.Seek(0, 0); err != nil {
return fmt.Errorf("seek to start of blob: %v", err)
}
}
return nil
} | }
if err := os.Chdir(wd); err != nil {
return err
}
| random_line_split |
tree_dir.py | #!/usr/bin/env python3
'''
@File : tree_dir.py
@Time : 2018/12/26 13:39:13
@Author : yangshifu
@Version : 1.0
@Contact : [email protected]
@Desc : None
'''
import os
import math
import warnings
import numpy as np
import tkinter as tk
from tkinter import *
from tkinter import ttk
from idlelib import tree
from idlelib.config import idleConf
from PIL import Image, ImageTk
from photos import Photos
class SubFileTreeItem(tree.FileTreeItem):
def GetSubList(self):
try:
names = os.listdir(self.path)
# 过滤掉隐藏文件
names = list(filter(lambda x: False if x.startswith('.') else True, names))
except OSError:
return []
names.sort(key = os.path.normcase)
sublist = []
for name in names:
item = tree.FileTreeItem(os.path.join(self.path, name))
sublist.append(item)
return sublist
class SubTreeNode(tree.TreeNode):
"""
继承父类tree.TreeNode,增加Ctrl+左键及右键的功能, 可多选,当右键时打开jpg文件
"""
path_list = []
img_type = 'jpg'
photos = Photos()
def __init__(self, dir_canvas, parent, item):
tree.TreeNode.__init__(self, dir_canvas, parent, item)
def get_path_list(self, suffix=img_type):
""" get img_type file list such as get jpg files"""
img_list = list(filter(lambda x: x.endswith(suffix), self.path_list))
return img_list
def select_or_edit(self, event=None):
self.path_list.clear()
if self.selected and self.item.IsEditable():
self.edit(event)
else:
self.path_list.append(self.item.path)
self.select(event)
def select_more(self, event=None):
"""Control + 左键 触发选择多个文件或目录"""
self.path_list.append(self.item.path)
# if self.selected:
# return
# self.deselectall()
self.selected = True
# self.canvas.delete(self.image_id)
self.drawicon()
self.drawtext()
def execute_file(self, event=None):
""" open jpg file or merge several jpg file then open it"""
file_list = self.get_path_list()
print(file_list)
if not file_list:
return
# merge image
# 修复内存泄露的bug,由于没有清除之前打开的图片,第二次打开的图片仍然为之前的图片
try:
self.photos.destroy()
except:
pass
self.photos.imgs = file_list
merged_photo = self.photos.merge_photos()
# show image
try:
window.destroy()
except:
import traceback
traceback.print_exc()
window.build_img_canvas()
window.show_img_in_canvas(merged_photo)
def drawtext(self):
textx = self.x+20-1
texty = self.y-4
labeltext = self.item.GetLabelText()
if labeltext:
id = self.canvas.create_text(textx, texty, anchor="nw",
text=labeltext)
self.canvas.tag_bind(id, "<1>", self.select)
self.canvas.tag_bind(id, "<Double-1>", self.flip)
x0, y0, x1, y1 = self.canvas.bbox(id)
textx = max(x1, 200) + 10
text = self.item.GetText() or "<no text>"
try:
self.entry
except AttributeError:
pass
else:
self.edit_finish()
try:
self.label
except AttributeError:
# padding carefully selected (on Windows) to match Entry widget:
self.label = Label(self.canvas, text=text, bd=0, padx=2, pady=2)
theme = idleConf.CurrentTheme()
if self.selected:
self.label.configure(idleConf.GetHighlight(theme, 'hilite'))
else:
self.label.configure(idleConf.GetHighlight(theme, 'normal'))
id = self.canvas.create_window(textx, texty,
anchor="nw", window=self.label)
self.label.bind("<1>", self.select_or_edit)
self.label.bind("<Double-1>", self.flip)
self.label.bind("<Control-1>", self.select_more)
self.label.bind("<3>", self.execute_file)
self.text_id = id
class AutoScrollbar(ttk.Scrollbar):
""" A scrollbar that hides itself if it's not needed. Works only for grid geometry manager """
def set(self, lo, hi):
if float(lo) <= 0.0 and float(hi) >= 1.0:
self.grid_remove()
else:
self.grid()
ttk.Scrollbar.set(self, lo, hi)
def pack(self, **kw):
raise tk.TclError('Cannot use pack with the widget ' + self.__class__.__name__)
def place(self, **kw):
raise tk.TclError('Cannot use place with the widget ' + self.__class__.__name__)
class WholeWindow():
move_gap = [0, 0]
def __init__(self, master):
self.master = master # 父窗口root
self.screen_width, self.screen_height = self.get_screen_size(self.master)
self.center_window(self.screen_width-50, self.screen_height-50)
self.master.resizable(width=False, height=False)
self.build_tree_canvas()
self.build_tree()
self.build_img_canvas()
def build_tree_canvas(self):
# create frame
self.tree_width = self.screen_width // 7
self.tree_height = self.screen_height
frame = tk.Frame(self.master, width=self.tree_width, height=self.tree_height)
frame.grid(row=0, column=0)
# canvas
self.tree_canvas=tk.Canvas(frame, bg='#FFFFFF', scrollregion=(0,0,500,500))
# vbar & hbar
hbar=tk.Scrollbar(frame,orient=tk.HORIZONTAL)
hbar.pack(side=tk.BOTTOM,fill=tk.X)
hbar.config(command=self.tree_canvas.xview)
vbar=tk.Scrollbar(frame,orient=tk.VERTICAL)
vbar.pack(side=tk.RIGHT,fill=tk.Y)
vbar.config(command=self.tree_canvas.yview)
self.tree_canvas.config(width=self.tree_width,height=self.tree_height)
self.tree_canvas.config(xscrollcommand=hbar.set, yscrollcommand=vbar.set)
self.tree_canvas.pack(side=tk.LEFT,expand=True,fill=tk.BOTH)
def build_tree(self):
home = os.environ.get('HOME')
item = SubFileTreeItem(home)
node = SubTreeNode(self.tree_canvas, None, item)
node.update()
def build_img_canvas(self):
self.box_width = self.screen_width - self.tree_width
self.img_frame = tk.Frame(self.master, width=self.box_width, height=self.screen_height, background='red')
self.img_frame.grid(row=0, column=1)
hbar = AutoScrollbar(self.img_frame, orient=tk.HORIZONTAL)
vbar = AutoScrollbar(self.img_frame, orient=tk.VERTICAL)
hbar.grid(row=1, column=1, sticky='we')
vbar.grid(row=0, column=2, sticky='ns')
# Create canvas and bind it with scrollbars
self.canvas_image = tk.Canvas(self.img_frame, highlightthickness=0, width=self.box_width, height=self.screen_height,
xscrollcommand=hbar.set, yscrollcommand=vbar.set, background='blue')
self.canvas_image.grid(row=0, column=1, sticky='nswe')
self.canvas_image.update() # wait till canvas is created
hbar.configure(command=self.__scroll_x) # bind scrollbars to the canvas
vbar.configure(command=self.__scroll_y)
# Bind events to the Canvas
self.canvas_image.bind('<Configure>', lambda event: self.__show_image()) # canvas is resized
self.canvas_image.bind('<Control-ButtonPress-1>', self.__move_from) # remember canvas position
self.canvas_image.bind('<Control-B1-Motion>', self.__move_to) # move canvas to the new position
self.canvas_image.bind('<Control-B1-ButtonRelease>', self.get_move_gap)
self.canvas_image.bind('<MouseWheel>', self.__wheel) # zoom for Windows and MacOS, but not Linux
self.canvas_image.bind('<Button-5>', self.__wheel) # zoom for Linux, wheel scroll down
self.canvas_image.bind('<Button-4>', self.__wheel) # zoom for Linux, wheel scroll up
# Handle keystrokes in idle mode, because program slows down on a weak computers,
# when too many key stroke events in the same time
# self.canvas_image.bind('<Key>', lambda event: self.canvas_image.after_idle(self.__keystroke, event))
def get_screen_size(self, window):
return window.winfo_screenwidth(),window.winfo_screenheight()
def get_window_size(self, window):
return window.winfo_reqwidth(),window.winfo_reqheight()
def center_window(self, width, height):
screenwidth = self.master.winfo_screenwidth()
screenheight = self.master.winfo_screenheight()
size = '%dx%d+%d+%d' % (width, height, (screenwidth - width)/2, (screenheight - height)/2)
self.master.geometry(size)
def show_img_in_canvas(self, pil_image):
self.imscale = 1.0 # scale for the canvas image zoom, public for outer classes
self.__delta = 1.3 # zoom magnitude
self.__filter = Image.ANTIALIAS # could be: NEAREST, BILINEAR, BICUBIC and ANTIALIAS
self.__previous_state = 0 # previous state of the keyboard
self.pil_image = pil_image
with warnings.catch_warnings(): # suppress DecompressionBombWarning
warnings.simplefilter('ignore')
self.__image = self.pil_image # open image, but down't load it
self.imwidth, self.imheight = self.__image.size # public for outer classes
self.__min_side = min(self.imwidth, self.imheight) # get the smaller image side
# Create image pyramid
self.__pyramid = [self.pil_image]
# Set ratio coefficient for image pyramid
self.__ratio = 1.0
self.__curr_img = 0 # current image from the pyramid
self.__scale = self.imscale * self.__ratio # image pyramide scale
self.__reduction = 2 # reduction degree of image pyramid
w, h = self.__pyramid[-1].size
while w > 512 and h > 512: # top pyramid image is around 512 pixels in size
w /= self.__reduction # divide on reduction degree
h /= self.__reduction # divide on reduction degree
self.__pyramid.append(self.__pyramid[-1].resize((int(w), int(h)), self.__filter))
# Put image into container rectangle and use it to set proper coordinates to the image
self.container = self.canvas_image.create_rectangle((0, 0, self.imwidth, self.imheight), width=0)
self.__show_image() # show image on the canvas
self.canvas_image.focus_set() # set focus on the canvas
def __scroll_x(self, *args, **kwargs):
""" Scroll canvas horizontally and redraw the image """
self.canvas_image.xview(*args) # scroll horizontally
self.__show_image() # redraw the image
def __scroll_y(self, *args, **kwargs):
""" Scroll canvas vertically and redraw the image """
self.canvas_image.yview(*args) # scroll vertically
self.__show_image() # redraw the image
def __move_from(self, event):
""" Remember previous coordinates for scrolling with the mouse """
self.canvas_image.scan_mark(event.x, event.y)
self.from_coord = (event.x, event.y)
def __move_to(self, event):
""" Drag (move) canvas to the new position """
self.canvas_image.scan_dragto(event.x, event.y, gain=1)
self.to_coord = (event.x, event.y)
self.__show_image() # zoom tile and show it on the canvas
def get_move_gap(self, event):
""" B1 release时获取移动的距离 """
try:
self.move_gap = list(np.array(self.to_coord) - np.array(self.from_coord) + np.array(self.move_gap))
except:
self.move_gap = [0, 0]
def outside(self, x, y):
""" Checks if the point (x,y) is outside the image area """
bbox = self.canvas_image.coords(self.container) # get image area
if bbox[0] < x < bbox[2] and bbox[1] < y < bbox[3]:
return False # point (x,y) is inside the image area
else:
return True # point (x,y) is outside the image area
def __wheel(self, event):
""" Zoom with mouse wheel """
x = self.canvas_image.canvasx(event.x) # get coordinates of the event on the canvas
y = self.canvas_image.canvasy(event.y)
if self.outside(x, y): return # zoom only inside image area
scale = 1.0
# Respond to Linux (event.num) or Windows (event.delta) wheel event
if event.num == 5 or event.delta == -120: # scroll down, smaller
if round(self.__min_side * self.imscale) < 30: return # image is less than 30 pixels
self.imscale /= self.__delta
scale /= self.__delta
if event.num == 4 or event.delta == 120: # scroll up, bigger
i = min(self.canvas_image.winfo_width(), self.canvas_image.winfo_height()) >> 1
if i < self.imscale: return # 1 pixel is bigger than the visible area
self.imscale *= self.__delta
scale *= self.__delta
# Take appropriate image from the pyramid
k = self.imscale * self.__ratio # temporary coefficient
self.__curr_img = min((-1) * int(math.log(k, self.__reduction)), len(self.__pyramid) - 1)
self.__scale = k * math.pow(self.__reduction, max(0, self.__curr_img))
#
self.canvas_image.scale('all', x, y, scale, scale) # rescale all objects
# Redraw some figures before showing image on the screen
self.redraw_figures() # method for child classes
self.__show_image()
def __keystroke(self, event):
""" Scrolling with the keyboard.
Independent from the language of the keyboard, CapsLock, <Ctrl>+<key>, etc. """
if event.state - self.__previous_state == 4: # means that the Control key is pressed
pass # do nothing if Control key is pressed
else:
self.__previous_state = event.state # remember the last keystroke state
# Up, Down, Left, Right keystrokes
if event.keycode in [68, 39, 102]: # scroll right, keys 'd' or 'Right'
self.__scroll_x('scroll', 1, 'unit', event=event)
elif event.keycode in [65, 37, 100]: # scroll left, keys 'a' or 'Left'
self.__scroll_x('scroll', -1, 'unit', event=event)
elif event.keycode in [87, 38, 104]: # scroll up, keys 'w' or 'Up'
self.__scroll_y('scroll', -1, 'unit', event=event)
elif event.keycode in [83, 40, 98]: # scroll down, keys 's' or 'Down'
self.__scroll_y('scroll', 1, 'unit', event=event)
def redraw_figures(self):
""" Dummy function to redraw figures in the children classes """
pass
def crop(self, bbox):
""" Crop rectangle from the image and return it """
return self.__pyramid[0].crop(bbox)
def destroy(self):
""" ImageFrame destructor """
del self.move_gap
del self.canvas_image.imagetk
# # print(self.imageid)
self.pil_image.close()
del self.pil_image
self.canvas_image.delete(self.imageid) # 清除画布上的图片
map(lambda i: i.close, self.__pyramid) # close all pyramid images
del self.__pyramid[:] # delete pyramid list
del self.__pyramid # delete pyramid variable
self.canvas_image.delete(tk.ALL)
self.canvas_image.destroy()
# self.img_frame.destroy()
def __show_image(self):
""" Show image on the Canvas. Implements correct image zoom almost like in Google Maps """
box_image = self.canvas_image.coords(self.container) # get image area
box_canvas = (self.canvas_image.canvasx(0), # get visible area of the canvas
self.canvas_image.canvasy(0),
self.canvas_image.canvasx(self.canvas_image.winfo_width()),
self.canvas_image.canvasy(self.canvas_image.winfo_height()))
self.box_img_int = tuple(map(int, box_image)) # convert to integer or it will not work properly
# Get scroll region box
box_scroll = [min(self.box_img_int[0], box_canvas[0]), min(self.box_img_int[1], box_canvas[1]),
max(self.box_img_int[2], box_canvas[2]), max(self.box_img_int[3], box_canvas[3])]
# Horizontal part of the image is in the visible area
if box_scroll[0] == box_canvas[0] and box_scroll[2] == box_canvas[2]:
box_scroll[0] = self.box_img_int[0]
box_scroll[2] = self.box_img_int[2] | box_scroll[3] = self.box_img_int[3]
# Convert scroll region to tuple and to integer
self.canvas_image.configure(scrollregion=tuple(map(int, box_scroll))) # set scroll region
x1 = max(box_canvas[0] - box_image[0], 0) # get coordinates (x1,y1,x2,y2) of the image tile
y1 = max(box_canvas[1] - box_image[1], 0)
x2 = min(box_canvas[2], box_image[2]) - box_image[0]
y2 = min(box_canvas[3], box_image[3]) - box_image[1]
if int(x2 - x1) > 0 and int(y2 - y1) > 0: # show image if it in the visible area
image = self.__pyramid[max(0, self.__curr_img)].crop( # crop current img from pyramid
(int(x1 / self.__scale), int(y1 / self.__scale),
int(x2 / self.__scale), int(y2 / self.__scale)))
#
imagetk = ImageTk.PhotoImage(image.resize((int(x2 - x1), int(y2 - y1)), self.__filter))
self.imageid = self.canvas_image.create_image(max(box_canvas[0], self.box_img_int[0]),
max(box_canvas[1], self.box_img_int[1]),
anchor='nw', image=imagetk)
self.canvas_image.lower(self.imageid) # set image into background
self.canvas_image.imagetk = imagetk # keep an extra reference to prevent garbage-collection
if __name__ == "__main__":
from function import Function
root = tk.Tk()
window = WholeWindow(root)
func = Function(root, obj_window=window)
# window.build_tree()
# photo = Photos("sample/0001.jpg")
# window.show_img_in_canvas(photo.pil_image)
root.mainloop() | # Vertical part of the image is in the visible area
if box_scroll[1] == box_canvas[1] and box_scroll[3] == box_canvas[3]:
box_scroll[1] = self.box_img_int[1] | random_line_split |
tree_dir.py | #!/usr/bin/env python3
'''
@File : tree_dir.py
@Time : 2018/12/26 13:39:13
@Author : yangshifu
@Version : 1.0
@Contact : [email protected]
@Desc : None
'''
import os
import math
import warnings
import numpy as np
import tkinter as tk
from tkinter import *
from tkinter import ttk
from idlelib import tree
from idlelib.config import idleConf
from PIL import Image, ImageTk
from photos import Photos
class SubFileTreeItem(tree.FileTreeItem):
def GetSubList(self):
try:
names = os.listdir(self.path)
# 过滤掉隐藏文件
names = list(filter(lambda x: False if x.startswith('.') else True, names))
except OSError:
return []
names.sort(key = os.path.normcase)
sublist = []
for name in names:
item = tree.FileTreeItem(os.path.join(self.path, name))
sublist.append(item)
return sublist
class SubTreeNode(tree.TreeNode):
"""
继承父类tree.TreeNode,增加Ctrl+左键及右键的功能, 可多选,当右键时打开jpg文件
"""
path_list = []
img_type = 'jpg'
photos = Photos()
def __init__(self, dir_canvas, parent, item):
tree.TreeNode.__init__(self, dir_canvas, parent, item)
def get_path_list(self, suffix=img_type):
""" get img_type file list such as get jpg files"""
img_list = list(filter(lambda x: x.endswith(suffix), self.path_list))
return img_list
def select_or_edit(self, event=None):
self.path_list.clear()
if self.selected and self.item.IsEditable():
self.edit(event)
else:
self.path_list.append(self.item.path)
self.select(event)
def select_more(self, event=None):
"""Control + 左键 触发选择多个文件或目录"""
self.path_list.append(self.item.path)
# if self.selected:
# return
# self.deselectall()
self.selected = True
# self.canvas.delete(self.image_id)
self.drawicon()
self.drawtext()
def execute_file(self, event=None):
""" open jpg file or merge several jpg file then open it | file_list = self.get_path_list()
print(file_list)
if not file_list:
return
# merge image
# 修复内存泄露的bug,由于没有清除之前打开的图片,第二次打开的图片仍然为之前的图片
try:
self.photos.destroy()
except:
pass
self.photos.imgs = file_list
merged_photo = self.photos.merge_photos()
# show image
try:
window.destroy()
except:
import traceback
traceback.print_exc()
window.build_img_canvas()
window.show_img_in_canvas(merged_photo)
def drawtext(self):
textx = self.x+20-1
texty = self.y-4
labeltext = self.item.GetLabelText()
if labeltext:
id = self.canvas.create_text(textx, texty, anchor="nw",
text=labeltext)
self.canvas.tag_bind(id, "<1>", self.select)
self.canvas.tag_bind(id, "<Double-1>", self.flip)
x0, y0, x1, y1 = self.canvas.bbox(id)
textx = max(x1, 200) + 10
text = self.item.GetText() or "<no text>"
try:
self.entry
except AttributeError:
pass
else:
self.edit_finish()
try:
self.label
except AttributeError:
# padding carefully selected (on Windows) to match Entry widget:
self.label = Label(self.canvas, text=text, bd=0, padx=2, pady=2)
theme = idleConf.CurrentTheme()
if self.selected:
self.label.configure(idleConf.GetHighlight(theme, 'hilite'))
else:
self.label.configure(idleConf.GetHighlight(theme, 'normal'))
id = self.canvas.create_window(textx, texty,
anchor="nw", window=self.label)
self.label.bind("<1>", self.select_or_edit)
self.label.bind("<Double-1>", self.flip)
self.label.bind("<Control-1>", self.select_more)
self.label.bind("<3>", self.execute_file)
self.text_id = id
class AutoScrollbar(ttk.Scrollbar):
""" A scrollbar that hides itself if it's not needed. Works only for grid geometry manager """
def set(self, lo, hi):
if float(lo) <= 0.0 and float(hi) >= 1.0:
self.grid_remove()
else:
self.grid()
ttk.Scrollbar.set(self, lo, hi)
def pack(self, **kw):
raise tk.TclError('Cannot use pack with the widget ' + self.__class__.__name__)
def place(self, **kw):
raise tk.TclError('Cannot use place with the widget ' + self.__class__.__name__)
class WholeWindow():
move_gap = [0, 0]
def __init__(self, master):
self.master = master # 父窗口root
self.screen_width, self.screen_height = self.get_screen_size(self.master)
self.center_window(self.screen_width-50, self.screen_height-50)
self.master.resizable(width=False, height=False)
self.build_tree_canvas()
self.build_tree()
self.build_img_canvas()
def build_tree_canvas(self):
# create frame
self.tree_width = self.screen_width // 7
self.tree_height = self.screen_height
frame = tk.Frame(self.master, width=self.tree_width, height=self.tree_height)
frame.grid(row=0, column=0)
# canvas
self.tree_canvas=tk.Canvas(frame, bg='#FFFFFF', scrollregion=(0,0,500,500))
# vbar & hbar
hbar=tk.Scrollbar(frame,orient=tk.HORIZONTAL)
hbar.pack(side=tk.BOTTOM,fill=tk.X)
hbar.config(command=self.tree_canvas.xview)
vbar=tk.Scrollbar(frame,orient=tk.VERTICAL)
vbar.pack(side=tk.RIGHT,fill=tk.Y)
vbar.config(command=self.tree_canvas.yview)
self.tree_canvas.config(width=self.tree_width,height=self.tree_height)
self.tree_canvas.config(xscrollcommand=hbar.set, yscrollcommand=vbar.set)
self.tree_canvas.pack(side=tk.LEFT,expand=True,fill=tk.BOTH)
def build_tree(self):
home = os.environ.get('HOME')
item = SubFileTreeItem(home)
node = SubTreeNode(self.tree_canvas, None, item)
node.update()
def build_img_canvas(self):
self.box_width = self.screen_width - self.tree_width
self.img_frame = tk.Frame(self.master, width=self.box_width, height=self.screen_height, background='red')
self.img_frame.grid(row=0, column=1)
hbar = AutoScrollbar(self.img_frame, orient=tk.HORIZONTAL)
vbar = AutoScrollbar(self.img_frame, orient=tk.VERTICAL)
hbar.grid(row=1, column=1, sticky='we')
vbar.grid(row=0, column=2, sticky='ns')
# Create canvas and bind it with scrollbars
self.canvas_image = tk.Canvas(self.img_frame, highlightthickness=0, width=self.box_width, height=self.screen_height,
xscrollcommand=hbar.set, yscrollcommand=vbar.set, background='blue')
self.canvas_image.grid(row=0, column=1, sticky='nswe')
self.canvas_image.update() # wait till canvas is created
hbar.configure(command=self.__scroll_x) # bind scrollbars to the canvas
vbar.configure(command=self.__scroll_y)
# Bind events to the Canvas
self.canvas_image.bind('<Configure>', lambda event: self.__show_image()) # canvas is resized
self.canvas_image.bind('<Control-ButtonPress-1>', self.__move_from) # remember canvas position
self.canvas_image.bind('<Control-B1-Motion>', self.__move_to) # move canvas to the new position
self.canvas_image.bind('<Control-B1-ButtonRelease>', self.get_move_gap)
self.canvas_image.bind('<MouseWheel>', self.__wheel) # zoom for Windows and MacOS, but not Linux
self.canvas_image.bind('<Button-5>', self.__wheel) # zoom for Linux, wheel scroll down
self.canvas_image.bind('<Button-4>', self.__wheel) # zoom for Linux, wheel scroll up
# Handle keystrokes in idle mode, because program slows down on a weak computers,
# when too many key stroke events in the same time
# self.canvas_image.bind('<Key>', lambda event: self.canvas_image.after_idle(self.__keystroke, event))
def get_screen_size(self, window):
return window.winfo_screenwidth(),window.winfo_screenheight()
def get_window_size(self, window):
return window.winfo_reqwidth(),window.winfo_reqheight()
def center_window(self, width, height):
screenwidth = self.master.winfo_screenwidth()
screenheight = self.master.winfo_screenheight()
size = '%dx%d+%d+%d' % (width, height, (screenwidth - width)/2, (screenheight - height)/2)
self.master.geometry(size)
def show_img_in_canvas(self, pil_image):
self.imscale = 1.0 # scale for the canvas image zoom, public for outer classes
self.__delta = 1.3 # zoom magnitude
self.__filter = Image.ANTIALIAS # could be: NEAREST, BILINEAR, BICUBIC and ANTIALIAS
self.__previous_state = 0 # previous state of the keyboard
self.pil_image = pil_image
with warnings.catch_warnings(): # suppress DecompressionBombWarning
warnings.simplefilter('ignore')
self.__image = self.pil_image # open image, but down't load it
self.imwidth, self.imheight = self.__image.size # public for outer classes
self.__min_side = min(self.imwidth, self.imheight) # get the smaller image side
# Create image pyramid
self.__pyramid = [self.pil_image]
# Set ratio coefficient for image pyramid
self.__ratio = 1.0
self.__curr_img = 0 # current image from the pyramid
self.__scale = self.imscale * self.__ratio # image pyramide scale
self.__reduction = 2 # reduction degree of image pyramid
w, h = self.__pyramid[-1].size
while w > 512 and h > 512: # top pyramid image is around 512 pixels in size
w /= self.__reduction # divide on reduction degree
h /= self.__reduction # divide on reduction degree
self.__pyramid.append(self.__pyramid[-1].resize((int(w), int(h)), self.__filter))
# Put image into container rectangle and use it to set proper coordinates to the image
self.container = self.canvas_image.create_rectangle((0, 0, self.imwidth, self.imheight), width=0)
self.__show_image() # show image on the canvas
self.canvas_image.focus_set() # set focus on the canvas
def __scroll_x(self, *args, **kwargs):
""" Scroll canvas horizontally and redraw the image """
self.canvas_image.xview(*args) # scroll horizontally
self.__show_image() # redraw the image
def __scroll_y(self, *args, **kwargs):
""" Scroll canvas vertically and redraw the image """
self.canvas_image.yview(*args) # scroll vertically
self.__show_image() # redraw the image
def __move_from(self, event):
""" Remember previous coordinates for scrolling with the mouse """
self.canvas_image.scan_mark(event.x, event.y)
self.from_coord = (event.x, event.y)
def __move_to(self, event):
""" Drag (move) canvas to the new position """
self.canvas_image.scan_dragto(event.x, event.y, gain=1)
self.to_coord = (event.x, event.y)
self.__show_image() # zoom tile and show it on the canvas
def get_move_gap(self, event):
""" B1 release时获取移动的距离 """
try:
self.move_gap = list(np.array(self.to_coord) - np.array(self.from_coord) + np.array(self.move_gap))
except:
self.move_gap = [0, 0]
def outside(self, x, y):
""" Checks if the point (x,y) is outside the image area """
bbox = self.canvas_image.coords(self.container) # get image area
if bbox[0] < x < bbox[2] and bbox[1] < y < bbox[3]:
return False # point (x,y) is inside the image area
else:
return True # point (x,y) is outside the image area
def __wheel(self, event):
""" Zoom with mouse wheel """
x = self.canvas_image.canvasx(event.x) # get coordinates of the event on the canvas
y = self.canvas_image.canvasy(event.y)
if self.outside(x, y): return # zoom only inside image area
scale = 1.0
# Respond to Linux (event.num) or Windows (event.delta) wheel event
if event.num == 5 or event.delta == -120: # scroll down, smaller
if round(self.__min_side * self.imscale) < 30: return # image is less than 30 pixels
self.imscale /= self.__delta
scale /= self.__delta
if event.num == 4 or event.delta == 120: # scroll up, bigger
i = min(self.canvas_image.winfo_width(), self.canvas_image.winfo_height()) >> 1
if i < self.imscale: return # 1 pixel is bigger than the visible area
self.imscale *= self.__delta
scale *= self.__delta
# Take appropriate image from the pyramid
k = self.imscale * self.__ratio # temporary coefficient
self.__curr_img = min((-1) * int(math.log(k, self.__reduction)), len(self.__pyramid) - 1)
self.__scale = k * math.pow(self.__reduction, max(0, self.__curr_img))
#
self.canvas_image.scale('all', x, y, scale, scale) # rescale all objects
# Redraw some figures before showing image on the screen
self.redraw_figures() # method for child classes
self.__show_image()
def __keystroke(self, event):
""" Scrolling with the keyboard.
Independent from the language of the keyboard, CapsLock, <Ctrl>+<key>, etc. """
if event.state - self.__previous_state == 4: # means that the Control key is pressed
pass # do nothing if Control key is pressed
else:
self.__previous_state = event.state # remember the last keystroke state
# Up, Down, Left, Right keystrokes
if event.keycode in [68, 39, 102]: # scroll right, keys 'd' or 'Right'
self.__scroll_x('scroll', 1, 'unit', event=event)
elif event.keycode in [65, 37, 100]: # scroll left, keys 'a' or 'Left'
self.__scroll_x('scroll', -1, 'unit', event=event)
elif event.keycode in [87, 38, 104]: # scroll up, keys 'w' or 'Up'
self.__scroll_y('scroll', -1, 'unit', event=event)
elif event.keycode in [83, 40, 98]: # scroll down, keys 's' or 'Down'
self.__scroll_y('scroll', 1, 'unit', event=event)
def redraw_figures(self):
""" Dummy function to redraw figures in the children classes """
pass
def crop(self, bbox):
""" Crop rectangle from the image and return it """
return self.__pyramid[0].crop(bbox)
def destroy(self):
""" ImageFrame destructor """
del self.move_gap
del self.canvas_image.imagetk
# # print(self.imageid)
self.pil_image.close()
del self.pil_image
self.canvas_image.delete(self.imageid) # 清除画布上的图片
map(lambda i: i.close, self.__pyramid) # close all pyramid images
del self.__pyramid[:] # delete pyramid list
del self.__pyramid # delete pyramid variable
self.canvas_image.delete(tk.ALL)
self.canvas_image.destroy()
# self.img_frame.destroy()
def __show_image(self):
""" Show image on the Canvas. Implements correct image zoom almost like in Google Maps """
box_image = self.canvas_image.coords(self.container) # get image area
box_canvas = (self.canvas_image.canvasx(0), # get visible area of the canvas
self.canvas_image.canvasy(0),
self.canvas_image.canvasx(self.canvas_image.winfo_width()),
self.canvas_image.canvasy(self.canvas_image.winfo_height()))
self.box_img_int = tuple(map(int, box_image)) # convert to integer or it will not work properly
# Get scroll region box
box_scroll = [min(self.box_img_int[0], box_canvas[0]), min(self.box_img_int[1], box_canvas[1]),
max(self.box_img_int[2], box_canvas[2]), max(self.box_img_int[3], box_canvas[3])]
# Horizontal part of the image is in the visible area
if box_scroll[0] == box_canvas[0] and box_scroll[2] == box_canvas[2]:
box_scroll[0] = self.box_img_int[0]
box_scroll[2] = self.box_img_int[2]
# Vertical part of the image is in the visible area
if box_scroll[1] == box_canvas[1] and box_scroll[3] == box_canvas[3]:
box_scroll[1] = self.box_img_int[1]
box_scroll[3] = self.box_img_int[3]
# Convert scroll region to tuple and to integer
self.canvas_image.configure(scrollregion=tuple(map(int, box_scroll))) # set scroll region
x1 = max(box_canvas[0] - box_image[0], 0) # get coordinates (x1,y1,x2,y2) of the image tile
y1 = max(box_canvas[1] - box_image[1], 0)
x2 = min(box_canvas[2], box_image[2]) - box_image[0]
y2 = min(box_canvas[3], box_image[3]) - box_image[1]
if int(x2 - x1) > 0 and int(y2 - y1) > 0: # show image if it in the visible area
image = self.__pyramid[max(0, self.__curr_img)].crop( # crop current img from pyramid
(int(x1 / self.__scale), int(y1 / self.__scale),
int(x2 / self.__scale), int(y2 / self.__scale)))
#
imagetk = ImageTk.PhotoImage(image.resize((int(x2 - x1), int(y2 - y1)), self.__filter))
self.imageid = self.canvas_image.create_image(max(box_canvas[0], self.box_img_int[0]),
max(box_canvas[1], self.box_img_int[1]),
anchor='nw', image=imagetk)
self.canvas_image.lower(self.imageid) # set image into background
self.canvas_image.imagetk = imagetk # keep an extra reference to prevent garbage-collection
if __name__ == "__main__":
from function import Function
root = tk.Tk()
window = WholeWindow(root)
func = Function(root, obj_window=window)
# window.build_tree()
# photo = Photos("sample/0001.jpg")
# window.show_img_in_canvas(photo.pil_image)
root.mainloop()
| """
| identifier_name |
tree_dir.py | #!/usr/bin/env python3
'''
@File : tree_dir.py
@Time : 2018/12/26 13:39:13
@Author : yangshifu
@Version : 1.0
@Contact : [email protected]
@Desc : None
'''
import os
import math
import warnings
import numpy as np
import tkinter as tk
from tkinter import *
from tkinter import ttk
from idlelib import tree
from idlelib.config import idleConf
from PIL import Image, ImageTk
from photos import Photos
class SubFileTreeItem(tree.FileTreeItem):
def GetSubList(self):
try:
names = os.listdir(self.path)
# 过滤掉隐藏文件
names = list(filter(lambda x: False if x.startswith('.') else True, names))
except OSError:
return []
names.sort(key = os.path.normcase)
sublist = []
for name in names:
item = tree.FileTreeItem(os.path.join(self.path, name))
sublist.append(item)
return sublist
class SubTreeNode(tree.TreeNode):
"""
继承父类tree.TreeNode,增加Ctrl+左键及右键的功能, 可多选,当右键时打开jpg文件
"""
path_list = []
img_type = 'jpg'
photos = Photos()
def __init__(self, dir_canvas, parent, item):
tree.TreeNode.__init__(self, dir_canvas, parent, item)
def get_path_list(self, suffix=img_type):
""" get img_type file list such as get jpg files"""
img_list = list(filter(lambda x: x.endswith(suffix), self.path_list))
return img_list
def select_or_edit(self, event=None):
self.path_list.clear()
if self.selected and self.item.IsEditable():
self.edit(event)
else:
self.path_list.append(self.item.path)
self.select(event)
def select_more(self, event=None):
"""Control + 左键 触发选择多个文件或目录"""
self.path_list.append(self.item.path)
# if self.selected:
# return
# self.deselectall()
self.selected = True
# self.canvas.delete(self.image_id)
self.drawicon()
self.drawtext()
def execute_file(self, event=None):
""" open jpg file or merge several jpg file then open it"""
file_list = self.get_path_list()
print(file_list)
if not file_list:
return
# merge image
# 修复内存泄露的bug,由于没有清除之前打开的图片,第二次打开的图片仍然为之前的图片
try:
self.photos.destroy()
except:
pass
self.photos.imgs = file_list
merged_photo = self.photos.merge_photos()
# show image
try:
window.destroy()
except:
import traceback
traceback.print_exc()
window.build_img_canvas()
window.show_img_in_canvas(merged_photo)
def drawtext(self):
textx = self.x+20-1
texty = self.y-4
labeltext = self.item.GetLabelText()
if labeltext:
id = self.canvas.create_text(textx, texty, anchor="nw",
text=labeltext)
self.canvas.tag_bind(id, "<1>", self.select)
self.canvas.tag_bind(id, "<Double-1>", self.flip)
x0, y0, x1, y1 = self.canvas.bbox(id)
textx = max(x1, 200) + 10
text = self.item.GetText() or "<no text>"
try:
self.entry
except AttributeError:
pass
else:
self.edit_finish()
try:
self.label
except AttributeError:
# padding carefully selected (on Windows) to match Entry widget:
self.label = Label(self.canvas, text=text, bd=0, padx=2, pady=2)
theme = idleConf.CurrentTheme()
if self.selected:
self.label.configure(idleConf.GetHighlight(theme, 'hilite'))
else:
self.label.configure(idleConf.GetHighlight(theme, 'normal'))
id = self.canvas.create_window(textx, texty,
anchor="nw", window | dit)
self.label.bind("<Double-1>", self.flip)
self.label.bind("<Control-1>", self.select_more)
self.label.bind("<3>", self.execute_file)
self.text_id = id
class AutoScrollbar(ttk.Scrollbar):
""" A scrollbar that hides itself if it's not needed. Works only for grid geometry manager """
def set(self, lo, hi):
if float(lo) <= 0.0 and float(hi) >= 1.0:
self.grid_remove()
else:
self.grid()
ttk.Scrollbar.set(self, lo, hi)
def pack(self, **kw):
raise tk.TclError('Cannot use pack with the widget ' + self.__class__.__name__)
def place(self, **kw):
raise tk.TclError('Cannot use place with the widget ' + self.__class__.__name__)
class WholeWindow():
move_gap = [0, 0]
def __init__(self, master):
self.master = master # 父窗口root
self.screen_width, self.screen_height = self.get_screen_size(self.master)
self.center_window(self.screen_width-50, self.screen_height-50)
self.master.resizable(width=False, height=False)
self.build_tree_canvas()
self.build_tree()
self.build_img_canvas()
def build_tree_canvas(self):
# create frame
self.tree_width = self.screen_width // 7
self.tree_height = self.screen_height
frame = tk.Frame(self.master, width=self.tree_width, height=self.tree_height)
frame.grid(row=0, column=0)
# canvas
self.tree_canvas=tk.Canvas(frame, bg='#FFFFFF', scrollregion=(0,0,500,500))
# vbar & hbar
hbar=tk.Scrollbar(frame,orient=tk.HORIZONTAL)
hbar.pack(side=tk.BOTTOM,fill=tk.X)
hbar.config(command=self.tree_canvas.xview)
vbar=tk.Scrollbar(frame,orient=tk.VERTICAL)
vbar.pack(side=tk.RIGHT,fill=tk.Y)
vbar.config(command=self.tree_canvas.yview)
self.tree_canvas.config(width=self.tree_width,height=self.tree_height)
self.tree_canvas.config(xscrollcommand=hbar.set, yscrollcommand=vbar.set)
self.tree_canvas.pack(side=tk.LEFT,expand=True,fill=tk.BOTH)
def build_tree(self):
home = os.environ.get('HOME')
item = SubFileTreeItem(home)
node = SubTreeNode(self.tree_canvas, None, item)
node.update()
def build_img_canvas(self):
self.box_width = self.screen_width - self.tree_width
self.img_frame = tk.Frame(self.master, width=self.box_width, height=self.screen_height, background='red')
self.img_frame.grid(row=0, column=1)
hbar = AutoScrollbar(self.img_frame, orient=tk.HORIZONTAL)
vbar = AutoScrollbar(self.img_frame, orient=tk.VERTICAL)
hbar.grid(row=1, column=1, sticky='we')
vbar.grid(row=0, column=2, sticky='ns')
# Create canvas and bind it with scrollbars
self.canvas_image = tk.Canvas(self.img_frame, highlightthickness=0, width=self.box_width, height=self.screen_height,
xscrollcommand=hbar.set, yscrollcommand=vbar.set, background='blue')
self.canvas_image.grid(row=0, column=1, sticky='nswe')
self.canvas_image.update() # wait till canvas is created
hbar.configure(command=self.__scroll_x) # bind scrollbars to the canvas
vbar.configure(command=self.__scroll_y)
# Bind events to the Canvas
self.canvas_image.bind('<Configure>', lambda event: self.__show_image()) # canvas is resized
self.canvas_image.bind('<Control-ButtonPress-1>', self.__move_from) # remember canvas position
self.canvas_image.bind('<Control-B1-Motion>', self.__move_to) # move canvas to the new position
self.canvas_image.bind('<Control-B1-ButtonRelease>', self.get_move_gap)
self.canvas_image.bind('<MouseWheel>', self.__wheel) # zoom for Windows and MacOS, but not Linux
self.canvas_image.bind('<Button-5>', self.__wheel) # zoom for Linux, wheel scroll down
self.canvas_image.bind('<Button-4>', self.__wheel) # zoom for Linux, wheel scroll up
# Handle keystrokes in idle mode, because program slows down on a weak computers,
# when too many key stroke events in the same time
# self.canvas_image.bind('<Key>', lambda event: self.canvas_image.after_idle(self.__keystroke, event))
def get_screen_size(self, window):
return window.winfo_screenwidth(),window.winfo_screenheight()
def get_window_size(self, window):
return window.winfo_reqwidth(),window.winfo_reqheight()
def center_window(self, width, height):
screenwidth = self.master.winfo_screenwidth()
screenheight = self.master.winfo_screenheight()
size = '%dx%d+%d+%d' % (width, height, (screenwidth - width)/2, (screenheight - height)/2)
self.master.geometry(size)
def show_img_in_canvas(self, pil_image):
self.imscale = 1.0 # scale for the canvas image zoom, public for outer classes
self.__delta = 1.3 # zoom magnitude
self.__filter = Image.ANTIALIAS # could be: NEAREST, BILINEAR, BICUBIC and ANTIALIAS
self.__previous_state = 0 # previous state of the keyboard
self.pil_image = pil_image
with warnings.catch_warnings(): # suppress DecompressionBombWarning
warnings.simplefilter('ignore')
self.__image = self.pil_image # open image, but down't load it
self.imwidth, self.imheight = self.__image.size # public for outer classes
self.__min_side = min(self.imwidth, self.imheight) # get the smaller image side
# Create image pyramid
self.__pyramid = [self.pil_image]
# Set ratio coefficient for image pyramid
self.__ratio = 1.0
self.__curr_img = 0 # current image from the pyramid
self.__scale = self.imscale * self.__ratio # image pyramide scale
self.__reduction = 2 # reduction degree of image pyramid
w, h = self.__pyramid[-1].size
while w > 512 and h > 512: # top pyramid image is around 512 pixels in size
w /= self.__reduction # divide on reduction degree
h /= self.__reduction # divide on reduction degree
self.__pyramid.append(self.__pyramid[-1].resize((int(w), int(h)), self.__filter))
# Put image into container rectangle and use it to set proper coordinates to the image
self.container = self.canvas_image.create_rectangle((0, 0, self.imwidth, self.imheight), width=0)
self.__show_image() # show image on the canvas
self.canvas_image.focus_set() # set focus on the canvas
def __scroll_x(self, *args, **kwargs):
""" Scroll canvas horizontally and redraw the image """
self.canvas_image.xview(*args) # scroll horizontally
self.__show_image() # redraw the image
def __scroll_y(self, *args, **kwargs):
""" Scroll canvas vertically and redraw the image """
self.canvas_image.yview(*args) # scroll vertically
self.__show_image() # redraw the image
def __move_from(self, event):
""" Remember previous coordinates for scrolling with the mouse """
self.canvas_image.scan_mark(event.x, event.y)
self.from_coord = (event.x, event.y)
def __move_to(self, event):
""" Drag (move) canvas to the new position """
self.canvas_image.scan_dragto(event.x, event.y, gain=1)
self.to_coord = (event.x, event.y)
self.__show_image() # zoom tile and show it on the canvas
def get_move_gap(self, event):
""" B1 release时获取移动的距离 """
try:
self.move_gap = list(np.array(self.to_coord) - np.array(self.from_coord) + np.array(self.move_gap))
except:
self.move_gap = [0, 0]
def outside(self, x, y):
""" Checks if the point (x,y) is outside the image area """
bbox = self.canvas_image.coords(self.container) # get image area
if bbox[0] < x < bbox[2] and bbox[1] < y < bbox[3]:
return False # point (x,y) is inside the image area
else:
return True # point (x,y) is outside the image area
def __wheel(self, event):
""" Zoom with mouse wheel """
x = self.canvas_image.canvasx(event.x) # get coordinates of the event on the canvas
y = self.canvas_image.canvasy(event.y)
if self.outside(x, y): return # zoom only inside image area
scale = 1.0
# Respond to Linux (event.num) or Windows (event.delta) wheel event
if event.num == 5 or event.delta == -120: # scroll down, smaller
if round(self.__min_side * self.imscale) < 30: return # image is less than 30 pixels
self.imscale /= self.__delta
scale /= self.__delta
if event.num == 4 or event.delta == 120: # scroll up, bigger
i = min(self.canvas_image.winfo_width(), self.canvas_image.winfo_height()) >> 1
if i < self.imscale: return # 1 pixel is bigger than the visible area
self.imscale *= self.__delta
scale *= self.__delta
# Take appropriate image from the pyramid
k = self.imscale * self.__ratio # temporary coefficient
self.__curr_img = min((-1) * int(math.log(k, self.__reduction)), len(self.__pyramid) - 1)
self.__scale = k * math.pow(self.__reduction, max(0, self.__curr_img))
#
self.canvas_image.scale('all', x, y, scale, scale) # rescale all objects
# Redraw some figures before showing image on the screen
self.redraw_figures() # method for child classes
self.__show_image()
def __keystroke(self, event):
""" Scrolling with the keyboard.
Independent from the language of the keyboard, CapsLock, <Ctrl>+<key>, etc. """
if event.state - self.__previous_state == 4: # means that the Control key is pressed
pass # do nothing if Control key is pressed
else:
self.__previous_state = event.state # remember the last keystroke state
# Up, Down, Left, Right keystrokes
if event.keycode in [68, 39, 102]: # scroll right, keys 'd' or 'Right'
self.__scroll_x('scroll', 1, 'unit', event=event)
elif event.keycode in [65, 37, 100]: # scroll left, keys 'a' or 'Left'
self.__scroll_x('scroll', -1, 'unit', event=event)
elif event.keycode in [87, 38, 104]: # scroll up, keys 'w' or 'Up'
self.__scroll_y('scroll', -1, 'unit', event=event)
elif event.keycode in [83, 40, 98]: # scroll down, keys 's' or 'Down'
self.__scroll_y('scroll', 1, 'unit', event=event)
def redraw_figures(self):
""" Dummy function to redraw figures in the children classes """
pass
def crop(self, bbox):
""" Crop rectangle from the image and return it """
return self.__pyramid[0].crop(bbox)
def destroy(self):
""" ImageFrame destructor """
del self.move_gap
del self.canvas_image.imagetk
# # print(self.imageid)
self.pil_image.close()
del self.pil_image
self.canvas_image.delete(self.imageid) # 清除画布上的图片
map(lambda i: i.close, self.__pyramid) # close all pyramid images
del self.__pyramid[:] # delete pyramid list
del self.__pyramid # delete pyramid variable
self.canvas_image.delete(tk.ALL)
self.canvas_image.destroy()
# self.img_frame.destroy()
def __show_image(self):
""" Show image on the Canvas. Implements correct image zoom almost like in Google Maps """
box_image = self.canvas_image.coords(self.container) # get image area
box_canvas = (self.canvas_image.canvasx(0), # get visible area of the canvas
self.canvas_image.canvasy(0),
self.canvas_image.canvasx(self.canvas_image.winfo_width()),
self.canvas_image.canvasy(self.canvas_image.winfo_height()))
self.box_img_int = tuple(map(int, box_image)) # convert to integer or it will not work properly
# Get scroll region box
box_scroll = [min(self.box_img_int[0], box_canvas[0]), min(self.box_img_int[1], box_canvas[1]),
max(self.box_img_int[2], box_canvas[2]), max(self.box_img_int[3], box_canvas[3])]
# Horizontal part of the image is in the visible area
if box_scroll[0] == box_canvas[0] and box_scroll[2] == box_canvas[2]:
box_scroll[0] = self.box_img_int[0]
box_scroll[2] = self.box_img_int[2]
# Vertical part of the image is in the visible area
if box_scroll[1] == box_canvas[1] and box_scroll[3] == box_canvas[3]:
box_scroll[1] = self.box_img_int[1]
box_scroll[3] = self.box_img_int[3]
# Convert scroll region to tuple and to integer
self.canvas_image.configure(scrollregion=tuple(map(int, box_scroll))) # set scroll region
x1 = max(box_canvas[0] - box_image[0], 0) # get coordinates (x1,y1,x2,y2) of the image tile
y1 = max(box_canvas[1] - box_image[1], 0)
x2 = min(box_canvas[2], box_image[2]) - box_image[0]
y2 = min(box_canvas[3], box_image[3]) - box_image[1]
if int(x2 - x1) > 0 and int(y2 - y1) > 0: # show image if it in the visible area
image = self.__pyramid[max(0, self.__curr_img)].crop( # crop current img from pyramid
(int(x1 / self.__scale), int(y1 / self.__scale),
int(x2 / self.__scale), int(y2 / self.__scale)))
#
imagetk = ImageTk.PhotoImage(image.resize((int(x2 - x1), int(y2 - y1)), self.__filter))
self.imageid = self.canvas_image.create_image(max(box_canvas[0], self.box_img_int[0]),
max(box_canvas[1], self.box_img_int[1]),
anchor='nw', image=imagetk)
self.canvas_image.lower(self.imageid) # set image into background
self.canvas_image.imagetk = imagetk # keep an extra reference to prevent garbage-collection
if __name__ == "__main__":
from function import Function
root = tk.Tk()
window = WholeWindow(root)
func = Function(root, obj_window=window)
# window.build_tree()
# photo = Photos("sample/0001.jpg")
# window.show_img_in_canvas(photo.pil_image)
root.mainloop()
| =self.label)
self.label.bind("<1>", self.select_or_e | conditional_block |
tree_dir.py | #!/usr/bin/env python3
'''
@File : tree_dir.py
@Time : 2018/12/26 13:39:13
@Author : yangshifu
@Version : 1.0
@Contact : [email protected]
@Desc : None
'''
import os
import math
import warnings
import numpy as np
import tkinter as tk
from tkinter import *
from tkinter import ttk
from idlelib import tree
from idlelib.config import idleConf
from PIL import Image, ImageTk
from photos import Photos
class SubFileTreeItem(tree.FileTreeItem):
def GetSubList(self):
try:
names = os.listdir(self.path)
# 过滤掉隐藏文件
names = list(filter(lambda x: False if x.startswith('.') else True, names))
except OSError:
return []
names.sort(key = os.path.normcase)
sublist = []
for name in names:
item = tree.FileTreeItem(os.path.join(self.path, name))
sublist.append(item)
return sublist
class SubTreeNode(tree.TreeNode):
"""
继承父类tree.TreeNode,增加Ctrl+左键及右键的功能, 可多选,当右键时打开jpg文件
"""
path_list = []
img_type = 'jpg'
photos = Photos()
def __init__(self, dir_canvas, parent, item):
tree.TreeNode.__init__(self, dir_canvas, parent, item)
def get_path_list(self, suffix=img_type):
""" get img_type file list such as get jpg files"""
img_list = list(filter(lambda x: x.endswith(suffix), self.path_list))
return img_list
def select_or_edit(self, event=None):
self.path_list.clear()
if self.selected and self.item.IsEditable():
self.edit(event)
else:
self.path_list.append(self.item.path)
self.select(event)
def select_more(self, event=None):
"""Control + 左键 触发选择多个文件或目录"""
self.path_list.append(self.item.path)
# if self.selected:
# return
# self.deselectall()
self.selected = True
# self.canvas.delete(self.image_id)
self.drawicon()
self.drawtext()
def execute_file(self, event=None):
""" open jpg file or merge several jpg file then open it"""
file_list = self.get_path_list()
print(file_list)
if not file_list:
return
# merge image
# 修复内存泄露的bug,由于没有清除之前打开的图片,第二次打开的图片仍然为之前的图片
try:
self.photos.destroy()
except:
pass
self.photos.imgs = file_list
merged_photo = self.photos.merge_photos()
# show image
try:
window.destroy()
except:
import traceback
traceback.print_exc()
window.build_img_canvas()
window.show_img_in_canvas(merged_photo)
def drawtext(self):
textx = self.x+20-1
texty = self.y-4
labeltext = self.item.GetLabelText()
if labeltext:
id = self.canvas.create_text(textx, texty, anchor="nw",
text=labeltext)
self.canvas.tag_bind(id, "<1>", self.select)
self.canvas.tag_bind(id, "<Double-1>", self.flip)
x0, y0, x1, y1 = self.canvas.bbox(id)
textx = max(x1, 200) + 10
text = self.item.GetText() or "<no text>"
try:
self.entry
except AttributeError:
pass
else:
self.edit_finish()
try:
self.label
except AttributeError:
# padding carefully selected (on Windows) to match Entry widget:
self.label = Label(self.canvas, text=text, bd=0, padx=2, pady=2)
theme = idleConf.CurrentTheme()
if self.selected:
self.label.configure(idleConf.GetHighlight(theme, 'hilite'))
else:
self.label.configure(idleConf.GetHighlight(theme, 'normal'))
id = self.canvas.create_window(textx, texty,
anchor="nw", window=self.label)
self.label.bind("<1>", self.select_or_edit)
self.label.bind("<Double-1>", self.flip)
self.label.bind("<Control-1>", self.select_more)
self.label.bind("<3>", self.execute_file)
self.text_id = id
class AutoScrollbar(ttk.Scrollbar):
""" A scrollbar that hides itself if it's not needed. Works only for grid geometry manager """
def set(self, lo, hi):
if float(lo) <= 0.0 and float(hi) >= 1.0:
| f.get_screen_size(self.master)
self.center_window(self.screen_width-50, self.screen_height-50)
self.master.resizable(width=False, height=False)
self.build_tree_canvas()
self.build_tree()
self.build_img_canvas()
def build_tree_canvas(self):
# create frame
self.tree_width = self.screen_width // 7
self.tree_height = self.screen_height
frame = tk.Frame(self.master, width=self.tree_width, height=self.tree_height)
frame.grid(row=0, column=0)
# canvas
self.tree_canvas=tk.Canvas(frame, bg='#FFFFFF', scrollregion=(0,0,500,500))
# vbar & hbar
hbar=tk.Scrollbar(frame,orient=tk.HORIZONTAL)
hbar.pack(side=tk.BOTTOM,fill=tk.X)
hbar.config(command=self.tree_canvas.xview)
vbar=tk.Scrollbar(frame,orient=tk.VERTICAL)
vbar.pack(side=tk.RIGHT,fill=tk.Y)
vbar.config(command=self.tree_canvas.yview)
self.tree_canvas.config(width=self.tree_width,height=self.tree_height)
self.tree_canvas.config(xscrollcommand=hbar.set, yscrollcommand=vbar.set)
self.tree_canvas.pack(side=tk.LEFT,expand=True,fill=tk.BOTH)
def build_tree(self):
home = os.environ.get('HOME')
item = SubFileTreeItem(home)
node = SubTreeNode(self.tree_canvas, None, item)
node.update()
def build_img_canvas(self):
self.box_width = self.screen_width - self.tree_width
self.img_frame = tk.Frame(self.master, width=self.box_width, height=self.screen_height, background='red')
self.img_frame.grid(row=0, column=1)
hbar = AutoScrollbar(self.img_frame, orient=tk.HORIZONTAL)
vbar = AutoScrollbar(self.img_frame, orient=tk.VERTICAL)
hbar.grid(row=1, column=1, sticky='we')
vbar.grid(row=0, column=2, sticky='ns')
# Create canvas and bind it with scrollbars
self.canvas_image = tk.Canvas(self.img_frame, highlightthickness=0, width=self.box_width, height=self.screen_height,
xscrollcommand=hbar.set, yscrollcommand=vbar.set, background='blue')
self.canvas_image.grid(row=0, column=1, sticky='nswe')
self.canvas_image.update() # wait till canvas is created
hbar.configure(command=self.__scroll_x) # bind scrollbars to the canvas
vbar.configure(command=self.__scroll_y)
# Bind events to the Canvas
self.canvas_image.bind('<Configure>', lambda event: self.__show_image()) # canvas is resized
self.canvas_image.bind('<Control-ButtonPress-1>', self.__move_from) # remember canvas position
self.canvas_image.bind('<Control-B1-Motion>', self.__move_to) # move canvas to the new position
self.canvas_image.bind('<Control-B1-ButtonRelease>', self.get_move_gap)
self.canvas_image.bind('<MouseWheel>', self.__wheel) # zoom for Windows and MacOS, but not Linux
self.canvas_image.bind('<Button-5>', self.__wheel) # zoom for Linux, wheel scroll down
self.canvas_image.bind('<Button-4>', self.__wheel) # zoom for Linux, wheel scroll up
# Handle keystrokes in idle mode, because program slows down on a weak computers,
# when too many key stroke events in the same time
# self.canvas_image.bind('<Key>', lambda event: self.canvas_image.after_idle(self.__keystroke, event))
def get_screen_size(self, window):
return window.winfo_screenwidth(),window.winfo_screenheight()
def get_window_size(self, window):
return window.winfo_reqwidth(),window.winfo_reqheight()
def center_window(self, width, height):
screenwidth = self.master.winfo_screenwidth()
screenheight = self.master.winfo_screenheight()
size = '%dx%d+%d+%d' % (width, height, (screenwidth - width)/2, (screenheight - height)/2)
self.master.geometry(size)
def show_img_in_canvas(self, pil_image):
self.imscale = 1.0 # scale for the canvas image zoom, public for outer classes
self.__delta = 1.3 # zoom magnitude
self.__filter = Image.ANTIALIAS # could be: NEAREST, BILINEAR, BICUBIC and ANTIALIAS
self.__previous_state = 0 # previous state of the keyboard
self.pil_image = pil_image
with warnings.catch_warnings(): # suppress DecompressionBombWarning
warnings.simplefilter('ignore')
self.__image = self.pil_image # open image, but down't load it
self.imwidth, self.imheight = self.__image.size # public for outer classes
self.__min_side = min(self.imwidth, self.imheight) # get the smaller image side
# Create image pyramid
self.__pyramid = [self.pil_image]
# Set ratio coefficient for image pyramid
self.__ratio = 1.0
self.__curr_img = 0 # current image from the pyramid
self.__scale = self.imscale * self.__ratio # image pyramide scale
self.__reduction = 2 # reduction degree of image pyramid
w, h = self.__pyramid[-1].size
while w > 512 and h > 512: # top pyramid image is around 512 pixels in size
w /= self.__reduction # divide on reduction degree
h /= self.__reduction # divide on reduction degree
self.__pyramid.append(self.__pyramid[-1].resize((int(w), int(h)), self.__filter))
# Put image into container rectangle and use it to set proper coordinates to the image
self.container = self.canvas_image.create_rectangle((0, 0, self.imwidth, self.imheight), width=0)
self.__show_image() # show image on the canvas
self.canvas_image.focus_set() # set focus on the canvas
def __scroll_x(self, *args, **kwargs):
""" Scroll canvas horizontally and redraw the image """
self.canvas_image.xview(*args) # scroll horizontally
self.__show_image() # redraw the image
def __scroll_y(self, *args, **kwargs):
""" Scroll canvas vertically and redraw the image """
self.canvas_image.yview(*args) # scroll vertically
self.__show_image() # redraw the image
def __move_from(self, event):
""" Remember previous coordinates for scrolling with the mouse """
self.canvas_image.scan_mark(event.x, event.y)
self.from_coord = (event.x, event.y)
def __move_to(self, event):
""" Drag (move) canvas to the new position """
self.canvas_image.scan_dragto(event.x, event.y, gain=1)
self.to_coord = (event.x, event.y)
self.__show_image() # zoom tile and show it on the canvas
def get_move_gap(self, event):
""" B1 release时获取移动的距离 """
try:
self.move_gap = list(np.array(self.to_coord) - np.array(self.from_coord) + np.array(self.move_gap))
except:
self.move_gap = [0, 0]
def outside(self, x, y):
""" Checks if the point (x,y) is outside the image area """
bbox = self.canvas_image.coords(self.container) # get image area
if bbox[0] < x < bbox[2] and bbox[1] < y < bbox[3]:
return False # point (x,y) is inside the image area
else:
return True # point (x,y) is outside the image area
def __wheel(self, event):
""" Zoom with mouse wheel """
x = self.canvas_image.canvasx(event.x) # get coordinates of the event on the canvas
y = self.canvas_image.canvasy(event.y)
if self.outside(x, y): return # zoom only inside image area
scale = 1.0
# Respond to Linux (event.num) or Windows (event.delta) wheel event
if event.num == 5 or event.delta == -120: # scroll down, smaller
if round(self.__min_side * self.imscale) < 30: return # image is less than 30 pixels
self.imscale /= self.__delta
scale /= self.__delta
if event.num == 4 or event.delta == 120: # scroll up, bigger
i = min(self.canvas_image.winfo_width(), self.canvas_image.winfo_height()) >> 1
if i < self.imscale: return # 1 pixel is bigger than the visible area
self.imscale *= self.__delta
scale *= self.__delta
# Take appropriate image from the pyramid
k = self.imscale * self.__ratio # temporary coefficient
self.__curr_img = min((-1) * int(math.log(k, self.__reduction)), len(self.__pyramid) - 1)
self.__scale = k * math.pow(self.__reduction, max(0, self.__curr_img))
#
self.canvas_image.scale('all', x, y, scale, scale) # rescale all objects
# Redraw some figures before showing image on the screen
self.redraw_figures() # method for child classes
self.__show_image()
def __keystroke(self, event):
""" Scrolling with the keyboard.
Independent from the language of the keyboard, CapsLock, <Ctrl>+<key>, etc. """
if event.state - self.__previous_state == 4: # means that the Control key is pressed
pass # do nothing if Control key is pressed
else:
self.__previous_state = event.state # remember the last keystroke state
# Up, Down, Left, Right keystrokes
if event.keycode in [68, 39, 102]: # scroll right, keys 'd' or 'Right'
self.__scroll_x('scroll', 1, 'unit', event=event)
elif event.keycode in [65, 37, 100]: # scroll left, keys 'a' or 'Left'
self.__scroll_x('scroll', -1, 'unit', event=event)
elif event.keycode in [87, 38, 104]: # scroll up, keys 'w' or 'Up'
self.__scroll_y('scroll', -1, 'unit', event=event)
elif event.keycode in [83, 40, 98]: # scroll down, keys 's' or 'Down'
self.__scroll_y('scroll', 1, 'unit', event=event)
def redraw_figures(self):
""" Dummy function to redraw figures in the children classes """
pass
def crop(self, bbox):
""" Crop rectangle from the image and return it """
return self.__pyramid[0].crop(bbox)
def destroy(self):
""" ImageFrame destructor """
del self.move_gap
del self.canvas_image.imagetk
# # print(self.imageid)
self.pil_image.close()
del self.pil_image
self.canvas_image.delete(self.imageid) # 清除画布上的图片
map(lambda i: i.close, self.__pyramid) # close all pyramid images
del self.__pyramid[:] # delete pyramid list
del self.__pyramid # delete pyramid variable
self.canvas_image.delete(tk.ALL)
self.canvas_image.destroy()
# self.img_frame.destroy()
def __show_image(self):
""" Show image on the Canvas. Implements correct image zoom almost like in Google Maps """
box_image = self.canvas_image.coords(self.container) # get image area
box_canvas = (self.canvas_image.canvasx(0), # get visible area of the canvas
self.canvas_image.canvasy(0),
self.canvas_image.canvasx(self.canvas_image.winfo_width()),
self.canvas_image.canvasy(self.canvas_image.winfo_height()))
self.box_img_int = tuple(map(int, box_image)) # convert to integer or it will not work properly
# Get scroll region box
box_scroll = [min(self.box_img_int[0], box_canvas[0]), min(self.box_img_int[1], box_canvas[1]),
max(self.box_img_int[2], box_canvas[2]), max(self.box_img_int[3], box_canvas[3])]
# Horizontal part of the image is in the visible area
if box_scroll[0] == box_canvas[0] and box_scroll[2] == box_canvas[2]:
box_scroll[0] = self.box_img_int[0]
box_scroll[2] = self.box_img_int[2]
# Vertical part of the image is in the visible area
if box_scroll[1] == box_canvas[1] and box_scroll[3] == box_canvas[3]:
box_scroll[1] = self.box_img_int[1]
box_scroll[3] = self.box_img_int[3]
# Convert scroll region to tuple and to integer
self.canvas_image.configure(scrollregion=tuple(map(int, box_scroll))) # set scroll region
x1 = max(box_canvas[0] - box_image[0], 0) # get coordinates (x1,y1,x2,y2) of the image tile
y1 = max(box_canvas[1] - box_image[1], 0)
x2 = min(box_canvas[2], box_image[2]) - box_image[0]
y2 = min(box_canvas[3], box_image[3]) - box_image[1]
if int(x2 - x1) > 0 and int(y2 - y1) > 0: # show image if it in the visible area
image = self.__pyramid[max(0, self.__curr_img)].crop( # crop current img from pyramid
(int(x1 / self.__scale), int(y1 / self.__scale),
int(x2 / self.__scale), int(y2 / self.__scale)))
#
imagetk = ImageTk.PhotoImage(image.resize((int(x2 - x1), int(y2 - y1)), self.__filter))
self.imageid = self.canvas_image.create_image(max(box_canvas[0], self.box_img_int[0]),
max(box_canvas[1], self.box_img_int[1]),
anchor='nw', image=imagetk)
self.canvas_image.lower(self.imageid) # set image into background
self.canvas_image.imagetk = imagetk # keep an extra reference to prevent garbage-collection
if __name__ == "__main__":
from function import Function
root = tk.Tk()
window = WholeWindow(root)
func = Function(root, obj_window=window)
# window.build_tree()
# photo = Photos("sample/0001.jpg")
# window.show_img_in_canvas(photo.pil_image)
root.mainloop()
| self.grid_remove()
else:
self.grid()
ttk.Scrollbar.set(self, lo, hi)
def pack(self, **kw):
raise tk.TclError('Cannot use pack with the widget ' + self.__class__.__name__)
def place(self, **kw):
raise tk.TclError('Cannot use place with the widget ' + self.__class__.__name__)
class WholeWindow():
move_gap = [0, 0]
def __init__(self, master):
self.master = master # 父窗口root
self.screen_width, self.screen_height = sel | identifier_body |
global_stats.rs | use ahash::HashMapExt;
use categories::CATEGORIES;
use categories::Category;
use categories::CategoryMap;
use chrono::prelude::*;
use futures::future::try_join;
use crate::Page;
use crate::templates;
use crate::Urler;
use kitchen_sink::CompatByCrateVersion;
use kitchen_sink::KitchenSink;
use kitchen_sink::Origin;
use locale::Numeric;
use peeking_take_while::PeekableExt;
use rand::seq::SliceRandom;
use render_readme::Renderer;
use ahash::HashMap;
use std::io::Write;
#[derive(Debug)]
pub struct GlobalStats {
pub(crate) total_crate_num: u32,
pub(crate) total_owners_at_month: Vec<u32>,
pub(crate) max_total_owners: u32,
pub(crate) max_daily_downloads_rate: u32,
pub(crate) max_downloads_per_week: u64,
pub(crate) start_week_offset: u32,
pub(crate) dl_grid_line_every: u64,
pub(crate) weeks_to_reach_max_downloads: u32,
pub(crate) dl_per_day_this_year: (u64, u64),
pub(crate) dl_per_day_last_year: (u64, u64),
pub(crate) hs_releases: Histogram,
pub(crate) hs_sizes: Histogram,
pub(crate) hs_deps1: Histogram,
pub(crate) hs_deps2: Histogram,
pub(crate) hs_rev_deps: Histogram,
pub(crate) hs_maintenance: Histogram,
pub(crate) hs_age: Histogram,
pub(crate) hs_languish: Histogram,
pub(crate) hs_owner_crates: Histogram,
pub(crate) categories: Vec<TreeBox>,
pub(crate) rustc_stats_all: Vec<Compat>,
pub(crate) rustc_stats_recent: Vec<Compat>,
pub(crate) rustc_stats_recent_num: usize,
}
pub type CallbackFn = fn(&Urler, &str) -> String;
impl GlobalStats {
pub fn relative_increase(val: (u64, u64)) -> String {
format!("{:.1}×", val.0 as f64 / val.1 as f64)
}
pub fn dl_ratio_up(&self) -> bool {
let r1 = self.dl_per_day_this_year.0 as f64 / self.dl_per_day_this_year.1 as f64;
let r2 = self.dl_per_day_last_year.0 as f64 / self.dl_per_day_last_year.1 as f64;
r1 > r2
}
}
fn downloads_over_time(start: Date<Utc>, mut day: Date<Utc>, kitchen_sink: &KitchenSink) -> Result<Vec<(u64, u64)>, anyhow::Error> {
let mut current_year = 0;
let mut current = [0; 366];
let mut dl = Vec::new();
while day > start {
let year = day.year() as u16;
if year != current_year {
current_year = year;
current = kitchen_sink.total_year_downloads(current_year)?;
}
let n = current[day.ordinal0() as usize];
if n > 0 {
break;
}
day -= chrono::Duration::days(1);
}
while day > start {
let mut weekday_sum = 0;
let mut weekend_sum = 0;
for _ in 0..7 {
let year = day.year() as u16;
if year != current_year {
current_year = year;
current = kitchen_sink.total_year_downloads(current_year)?;
}
let n = current[day.ordinal0() as usize];
match day.weekday() {
// this sucks a bit due to mon/fri being UTC, and overlapping with the weekend
// in the rest of the world.
Weekday::Sat | Weekday::Sun => weekend_sum += n,
_ => weekday_sum += n,
};
day -= chrono::Duration::days(1);
}
dl.push((weekday_sum, weekend_sum));
}
dl.reverse();
Ok(dl)
}
pub async fn render_global_stats(out: &mut impl Write, kitchen_sink: &KitchenSink, _renderer: &Renderer) -> Result<(), anyhow::Error> {
let (categories, recent_crates) = try_join(
category_stats(kitchen_sink),
kitchen_sink.notable_recently_updated_crates(4100)).await?;
let urler = Urler::new(None);
let start = Utc.ymd(2015, 5, 15); // Rust 1.0
let start_week_offset = start.ordinal0()/7;
let end = Utc::today() - chrono::Duration::days(2);
let latest_rustc_version = end.signed_duration_since(start).num_weeks()/6;
let mut compat_data = tokio::task::block_in_place(|| kitchen_sink.all_crate_compat())?;
let rustc_stats_all = rustc_stats(&compat_data, latest_rustc_version as u16)?;
let mut recent_compat = HashMap::with_capacity(recent_crates.len());
let mut rustc_stats_recent_num = 0;
for (o, _) in recent_crates {
if let Some(v) = compat_data.remove(&o) {
recent_compat.insert(o, v);
rustc_stats_recent_num += 1;
if rustc_stats_recent_num >= 4000 {
break;
}
}
}
let rustc_stats_recent = rustc_stats(&recent_compat, latest_rustc_version as u16)?;
let dl = downloads_over_time(start, end, kitchen_sink)?;
let (total_owners_at_month, mut hs_owner_crates) = owner_stats(kitchen_sink, start).await?;
hs_owner_crates.buckets.iter_mut().take(4).for_each(|c| c.examples.truncate(6)); // normal amount of crates is boring
assert!(dl.len() >= 52*2);
let this_year = &dl[dl.len()-52..];
let last_year = &dl[dl.len()-52*2..dl.len()-52];
fn sum2(s: &[(u64, u64)]) -> (u64, u64) {
let mut a_sum = 0;
let mut b_sum = 0;
s.iter().for_each(|&(a, b)| { a_sum += a; b_sum += b; });
(a_sum, b_sum)
}
let max_daily_downloads_rate = this_year.iter().map(move |(d, e)| (d/5).max(e/2)).max().unwrap_or(0) as u32;
let mut tmp_sum = 0;
let downloads_this_year = sum2(this_year);
let downloads_last_year = sum2(last_year);
let max_downloads_per_week = dl.iter().map(|(a, b)| a + b).max().unwrap_or(0);
let max_total_owners = total_owners_at_month.iter().copied().max().unwrap_or(0);
let dl_grid_line_every = (max_downloads_per_week / 6_000_000) * 1_000_000;
let mut hs_deps1 = Histogram::new(kitchen_sink.get_stats_histogram("deps")?.expect("hs_deps"), true,
&[0,1,2,3,4,5,6,7,8,9,10,11,12,14,16,18,20,25,30,40,60,80,100,120,150],
|n| if n > 11 {format!("≥{n}")} else {n.to_string()});
let hs_deps2 = Histogram {
max: hs_deps1.max,
buckets: hs_deps1.buckets.split_off(10),
bucket_labels: hs_deps1.bucket_labels.split_off(10),
};
let rev_deps = kitchen_sink.crates_io_all_rev_deps_counts().await?;
let mut hs_rev_deps = Histogram::new(rev_deps, true,
&[0,1,2,5,15,50,100,250,500,750,1000,2500,5000,10000,15000,20000,50000],
|n| if n > 2 {format!("≥{n}")} else {n.to_string()});
hs_rev_deps.buckets.iter_mut().take(5).for_each(|b| b.examples.truncate(5));
let age_label = |n| match n {
0..=1 => "≤1 week".to_string(),
2..=4 => format!("≤{n} weeks"),
5 => "≤1 month".to_string(),
6..=51 => format!("≤{} months", (n as f64 / (365./12./7.)).round()),
52 => "≤1 year".to_string(),
_ => format!("≤{} years", (n as f64 / 52.).round()),
};
let total_crate_num = kitchen_sink.all_crates().count() as u32;
let stats = GlobalStats {
total_crate_num,
total_owners_at_month,
max_total_owners,
max_daily_downloads_rate,
start_week_offset,
weeks_to_reach_max_downloads: dl.iter().copied().take_while(move |(d, e)| { tmp_sum += (d + e) as u32; tmp_sum < max_daily_downloads_rate }).count() as u32,
dl_per_day_this_year: (downloads_this_year.0 / 5, downloads_this_year.1 / 2),
dl_per_day_last_year: (downloads_last_year.0 / 5, downloads_last_year.1 / 2),
max_downloads_per_week,
dl_grid_line_every,
hs_releases: Histogram::new(kitchen_sink.get_stats_histogram("releases")?.expect("hs_releases"), true, &[1,2,4,8,16,32,50,100,500], |n| if n > 2 {format!("≥{n}")} else {n.to_string()}),
hs_sizes: Histogram::new(kitchen_sink.get_stats_histogram("sizes")?.expect("hs_sizes"), true, &[1,10,50,100,500,1_000,5_000,10_000,20_000], |n| {
let mut t = format_bytes(n*1024);
t.insert(0, '≤'); t
}),
hs_deps1, hs_deps2,
hs_maintenance: Histogram::new(kitchen_sink.get_stats_histogram("maintenance")?.expect("hs_maintenance"), false, &[0, 1, 5, 26, 52, 52*2, 52*3, 52*5, 52*6, 52*8], |n| match n {
0 => "one-off".to_string(),
1 => "≤1 week".to_string(),
2..=4 => format!("≤{n} weeks"),
5 => "≤1 month".to_string(),
6..=51 => format!("≤{} months", (n as f64 / (365./12./7.)).round()),
52 => "≤1 year".to_string(),
_ => format!("≤{} years", (n as f64 / 52.).round()),
}),
hs_age: Histogram::new(kitchen_sink.get_stats_histogram("age")?.expect("hs_age"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label),
hs_languish: Histogram::new(kitchen_sink.get_stats_histogram("languish")?.expect("hs_languish"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label),
hs_owner_crates,
categories,
rustc_stats_all,
rustc_stats_recent,
rustc_stats_recent_num,
hs_rev_deps,
};
templates::global_stats(out, &Page {
title: "State of the Rust/Cargo crates ecosystem".to_owned(),
description: Some("How many packages there are? How many dependencies they have? Which crate is the oldest or biggest? Is Rust usage growing?".to_owned()),
noindex: false,
search_meta: true,
critical_css_data: Some(include_str!("../../style/public/home.css")),
critical_css_dev_url: Some("/home.css"),
..Default::default()
}, &dl, &stats, &urler)?;
Ok(())
}
#[derive(Default, Copy, Clone, Debug)]
pub struct Compat {
pub(crate) bad: u32,
pub(crate) maybe_bad: u32,
pub(crate) unknown: u32,
pub(crate) maybe_ok: u32,
pub(crate) ok: u32,
}
impl Compat {
pub fn sum(&self) -> u32 {
self.bad + self.maybe_bad + self.unknown + self.maybe_ok + self.ok
}
}
fn rustc_stats(compat: &HashMap<Origin, CompatByCrateVersion>, max_rust_version: u16) -> Result<Vec<Compat>, anyhow::Error> {
// (ok, maybe, not), [0] is unused
let mut rustc_versions = vec![Compat::default(); (max_rust_version+1) as usize];
for c in compat.values() {
// can't compile at all
if !c.iter().any(|(_, c)| c.has_ever_built()) {
continue;
}
// stats for latest crate version only
let latest_ver = match c.iter().rfind(|(v, _)| v.pre.is_empty()).or_else(|| c.iter().rev().next()) {
Some((_, c)) => c,
None => continue,
};
let latest_ver_bad = match c.iter().rfind(|(v, c)| v.pre.is_empty() && c.newest_bad_likely().is_some()) {
Some((_, c)) => c,
None => latest_ver,
};
let newest_bad_raw = latest_ver_bad.newest_bad_likely().unwrap_or(0);
let newest_bad = latest_ver.newest_bad().unwrap_or(0);
let oldest_ok = latest_ver.oldest_ok().unwrap_or(999);
let oldest_ok_raw = latest_ver.oldest_ok_certain().unwrap_or(999);
for (ver, c) in rustc_versions.iter_mut().enumerate() {
let ver = ver as u16;
if ver >= oldest_ok {
if ver >= oldest_ok_raw {
c.ok += 1;
} else {
c.maybe_ok += 1;
}
} else if ver <= newest_bad {
if ver <= newest_bad_raw {
c.bad += 1;
} else {
c.maybe_bad += 1;
}
} else {
c.unknown += 1;
}
}
}
// resize to width
let width = 330;
for c in &mut rustc_versions {
let sum = c.sum();
c.bad = (c.bad * width + width / 2) / sum;
c.ok = (c.ok * width + width / 2) / sum;
c.maybe_bad = (c.maybe_bad * width + width / 2) / sum;
c.maybe_ok = (c.maybe_ok * width + width / 2) / sum;
c.unknown = width - c.bad - c.ok - c.maybe_bad - c.maybe_ok;
}
Ok(rustc_versions)
}
fn cat_slugs(sub: &'static CategoryMap) -> Vec<TreeBox> {
let mut out = Vec::with_capacity(sub.len());
for c in sub.values() {
if c.slug == "uncategorized" {
continue;
}
out.push(TreeBox {
cat: c,
label: c.name.clone(),
title: c.name.clone(),
count: 0,
weight: 0.,
bounds: treemap::Rect::new(),
color: String::new(),
font_size: 12.,
sub: cat_slugs(&c.sub),
});
}
out
}
#[derive(Debug, Clone)]
pub struct TreeBox {
pub cat: &'static C | ,
pub title: String,
pub label: String,
pub font_size: f64,
/// SVG fill
pub color: String,
pub count: u32,
pub weight: f64,
pub bounds: treemap::Rect,
pub sub: Vec<TreeBox>,
}
impl TreeBox {
pub fn line_y(&self, nth: usize) -> f64 {
self.bounds.y + 1. + self.font_size * 1.1 * (nth+1) as f64
}
pub fn can_fit_count(&self) -> bool {
self.line_y(self.label.lines().count()) + 1. - self.bounds.y < self.bounds.h
}
}
impl treemap::Mappable for TreeBox {
fn size(&self) -> f64 { self.weight }
fn bounds(&self) -> &treemap::Rect { &self.bounds }
fn set_bounds(&mut self, b: treemap::Rect) { self.bounds = b; }
}
async fn category_stats(kitchen_sink: &KitchenSink) -> Result<Vec<TreeBox>, anyhow::Error> {
use treemap::*;
let mut roots = cat_slugs(&CATEGORIES.root);
#[track_caller]
fn take_cat(slug: &str, items: &mut Vec<TreeBox>) -> TreeBox {
let pos = items.iter().position(|i| i.cat.slug == slug).unwrap_or_else(|| panic!("{slug} in {items:?}"));
items.swap_remove(pos)
}
#[track_caller]
fn get_cat<'a>(slug: &str, items: &'a mut Vec<TreeBox>) -> &'a mut TreeBox {
let pos = items.iter().position(|i| i.cat.slug == slug).unwrap_or_else(|| panic!("{slug} in {items:?}"));
&mut items[pos]
}
fn new_cat(sub: Vec<TreeBox>) -> TreeBox {
TreeBox {
cat: CATEGORIES.root.values().next().unwrap(),
title: String::new(),
label: String::new(),
font_size: 0.,
color: String::new(),
count: 0,
weight: 0.,
bounds: Rect::new(),
sub,
}
}
// names don't fit
get_cat("database-implementations", &mut roots).label = "Database".into();
get_cat("simulation", &mut roots).label = "Sim".into();
get_cat("caching", &mut roots).label = "Cache".into();
get_cat("config", &mut roots).label = "Config".into();
get_cat("os", &mut roots).label = "OS".into();
get_cat("internationalization", &mut roots).label = "i18n".into();
get_cat("authentication", &mut roots).label = "Auth".into();
get_cat("visualization", &mut roots).label = "Visualize".into();
get_cat("accessibility", &mut roots).label = "a11y".into();
get_cat("compilers", &mut roots).label = "Lang".into();
get_cat("os::macos-apis", &mut get_cat("os", &mut roots).sub).label = "Apple".into();
get_cat("rendering::engine", &mut get_cat("rendering", &mut roots).sub).label = "Engine".into();
get_cat("rendering::data-formats", &mut get_cat("rendering", &mut roots).sub).label = "Formats".into();
// group them in a more sensible way
let parsers = vec![take_cat("parsing", &mut roots), take_cat("parser-implementations", &mut roots)];
roots.push(new_cat(parsers));
let hw = vec![take_cat("embedded", &mut roots), take_cat("hardware-support", &mut roots), take_cat("no-std", &mut roots)];
roots.push(new_cat(hw));
let db = vec![take_cat("database", &mut roots), take_cat("database-implementations", &mut roots)];
roots.push(new_cat(db));
let gg = vec![take_cat("game-development", &mut roots), take_cat("games", &mut roots)];
roots.push(new_cat(gg));
let int = take_cat("command-line-interface", &mut roots);
let cli = vec![int, take_cat("command-line-utilities", &mut roots)];
roots.push(new_cat(cli));
let mut editors = take_cat("text-editors", &mut roots);
editors.label = "Editors".into();
let txt = vec![
take_cat("text-processing", &mut roots),
editors,
take_cat("template-engine", &mut roots),
take_cat("value-formatting", &mut roots),
];
roots.push(new_cat(txt));
let wasm = take_cat("wasm", &mut roots);
get_cat("web-programming", &mut roots).sub.push(wasm);
let mut asyn = take_cat("asynchronous", &mut roots);
asyn.label = "Async".into();
get_cat("network-programming", &mut roots).sub.push(asyn);
let mut proc = take_cat("development-tools::procedural-macro-helpers", &mut get_cat("development-tools", &mut roots).sub);
proc.label = "Proc macros".into();
get_cat("rust-patterns", &mut roots).sub.push(proc);
let concurrency = take_cat("concurrency", &mut roots);
get_cat("rust-patterns", &mut roots).sub.push(concurrency);
let mut cr = get_cat("cryptography", &mut roots).sub.remove(0);
cr.label = "Crypto Magic Beans".into();
roots.push(cr);
// first layout of top-level boxes (won't be used for anything other than second layout)
for top in roots.iter_mut() {
let (count, weight) = if top.label.is_empty() { (0, 0.) } else { kitchen_sink.category_crate_count(&top.cat.slug).await? };
top.count = count;
top.weight = weight;
let mut top_copy = top.clone();
top_copy.sub = Vec::new();
for i in top.sub.iter_mut() {
let (count, weight) = kitchen_sink.category_crate_count(&i.cat.slug).await?;
i.count = count;
i.weight = weight;
top.count += i.count;
top.weight += i.weight;
assert!(i.sub.is_empty());
}
if top_copy.count > 0 {
top.sub.insert(0, top_copy);
}
}
let mut items_flattened = Vec::new();
let layout = TreemapLayout::new();
layout.layout_items(&mut roots, Rect::from_points(0.0, 0.0, 1000., 600.));
for parent in roots.iter_mut() {
let layout = TreemapLayout::new();
layout.layout_items(&mut parent.sub, parent.bounds);
items_flattened.append(&mut parent.sub);
}
postprocess_treebox_items(&mut items_flattened);
Ok(items_flattened)
}
fn postprocess_treebox_items(items: &mut Vec<TreeBox>) {
let colors = [
[0xff, 0xf1, 0xe6],
[0xe2, 0xec, 0xe9],
[0xDC, 0xED, 0xC1],
[0xcd, 0xda, 0xfd],
[0xbe, 0xe1, 0xe6],
[0xfd, 0xe2, 0xe4],
[0xdf, 0xe7, 0xfd],
[0xFF, 0xD3, 0xB6],
[0xea, 0xe4, 0xe9],
[0xd0, 0xd1, 0xff],
[0xf4, 0xda, 0xe2],
[0xde, 0xc3, 0xe1],
[0xd4, 0xe0, 0xf9],
[0xFF, 0xD3, 0xB6],
[0xDF, 0xCB, 0xD2],
];
let len = items.len() as f32;
for (i, item) in &mut items.iter_mut().enumerate() {
let x = 0.8 + (i as f32 / len) * 0.2;
let c = colors[i % colors.len()];
let c = [
(c[0] as f32 * x + (1. - x) * 200.) as u8,
(c[1] as f32 * x + (1. - x) * 100.) as u8,
(c[2] as f32 * x + (1. - x) * 200.) as u8
];
let mut l = lab::Lab::from_rgb(&c);
l.l = (l.l + 90.) * 0.5; // fix my bad palette
let c = l.to_rgb();
item.color = format!("#{:02x}{:02x}{:02x}", c[0], c[1], c[2]);
let ideal_max_width = (item.bounds.w * 1.2 / (item.font_size / 1.7)) as usize;
let maybe_label = textwrap::wrap(&item.label, textwrap::Options::new(ideal_max_width).break_words(false));
let chars = maybe_label.iter().map(|w| w.len()).max().unwrap_or(1);
let lines = maybe_label.len();
let try_font_size = item.font_size
.min(item.bounds.h / (lines as f64 * 1.05) - 4.)
.min(item.bounds.w * 1.6 / chars as f64)
.max(4.);
let max_width = (item.bounds.w / (try_font_size / 1.7)) as usize;
let must_break = ideal_max_width < chars * 2 / 3 && item.bounds.h > item.font_size * 2.;
let label = textwrap::wrap(&item.label, textwrap::Options::new(max_width).break_words(must_break));
let chars = label.iter().map(|w| w.len()).max().unwrap_or(1);
let lines = label.len();
item.label = label.join("\n");
item.font_size = item.font_size
.min(item.bounds.h / (lines as f64 * 1.05) - 4.)
.min(item.bounds.w * 1.6 / chars as f64)
.max(4.);
}
}
async fn owner_stats(kitchen_sink: &KitchenSink, start: Date<Utc>) -> Result<(Vec<u32>, Histogram), anyhow::Error> {
let all_owners = kitchen_sink.crate_all_owners().await?;
eprintln!("got {} owners", all_owners.len());
assert!(all_owners.len() > 1000);
let mut owner_crates_with_ids = HashMap::new();
let mut total_owners_at_month = vec![0u32; (Utc::today().signed_duration_since(start).num_days() as usize + 29) / 30];
let mut sum = 0;
for o in &all_owners {
// account creation history
let (y,m,_d) = o.created_at;
if y < 2015 || (y == 2015 && m < 5) {
sum += 1;
continue;
}
let mon_num = (y as usize - 2015) * 12 + m as usize - 5;
if mon_num < total_owners_at_month.len() {
total_owners_at_month[mon_num] += 1;
}
// update histogram
let t = owner_crates_with_ids.entry(o.num_crates).or_insert((0, Vec::<u64>::new()));
t.0 += 1;
if t.1.len() < 1000 {
t.1.push(o.github_id);
}
}
// convert IDs to logins
let owner_crates = owner_crates_with_ids.into_iter().map(|(k, (pop, mut id_examples))| {
let mut examples = Vec::with_capacity(id_examples.len().min(10));
if k <= 50 {
id_examples.sort_unstable(); // promote low-id users for normal amount of crates
} else {
id_examples.sort_unstable_by_key(|v| !v); // show newest users for potentially-spammy crate sets
}
// but include one counter-example just to make things more interesting
if let Some(tmp) = id_examples.pop() {
id_examples.insert(0, tmp);
}
for id in id_examples {
if let Ok(login) = kitchen_sink.login_by_github_id(id) {
if kitchen_sink.crates_io_login_on_blocklist(&login).is_none() { // github logins currently equal crates_io_logins
examples.push(login);
if examples.len() >= 10 {
break;
}
}
}
}
(k, (pop, examples))
}).collect();
// trim empty end
while total_owners_at_month.last().map_or(false, |&l| l == 0) {
total_owners_at_month.pop();
}
total_owners_at_month.iter_mut().for_each(|n| {
sum += *n;
*n = sum;
});
let hs_owner_crates = Histogram::new(owner_crates, true, &[1,2,3,6,25,50,75,100,150,200,500,750,2000], |n| if n > 3 {format!("≥{n}")} else {n.to_string()});
Ok((total_owners_at_month, hs_owner_crates))
}
#[derive(Debug)]
pub struct Histogram {
pub max: u32,
pub buckets: Vec<Bucket>,
pub bucket_labels: Vec<String>,
}
#[derive(Debug)]
pub struct Bucket {
/// population
pub count: u32,
pub threshold: u32,
pub examples: Vec<String>,
}
impl Bucket {
pub fn new(threshold: u32) -> Self {
Self { threshold, count: 0, examples: Vec::with_capacity(BUCKET_MAX_EXAMPLES) }
}
}
const BUCKET_MAX_EXAMPLES: usize = 25;
impl Histogram {
pub fn perc(&self, val: u32) -> f32 {
val as f32 / (self.max as f32 / 100.)
}
/// greater_mode - bucket means this many or more, otherwise it's <=
///
pub fn new(data: kitchen_sink::StatsHistogram, greater_mode: bool, bucket_thresholds: &[u32], label: fn(u32) -> String) -> Self {
let mut data: Vec<_> = data.into_iter().collect();
data.sort_unstable_by_key(|d| d.0);
let mut data = data.drain(..).fuse().peekable();
fn make_bucket(mut b: Bucket, (key, (size, mut val)): (u32, (u32, Vec<String>))) -> Bucket {
debug_assert!(size as usize >= val.len());
b.count += size;
if b.examples.len() < BUCKET_MAX_EXAMPLES {
b.examples.append(&mut val);
}
if key > b.threshold {
b.threshold = key;
}
b
}
let mut buckets: Vec<_> = bucket_thresholds.windows(2)
.map(|thr_pair| (thr_pair[0], thr_pair[1]))
.chain(std::iter::once((bucket_thresholds.last().copied().unwrap(), !0)))
.map(|(threshold, next_thr)| {
let mut b = data.by_ref()
.peeking_take_while(|d| if greater_mode {
d.0 < next_thr
} else {
d.0 <= threshold
})
.fold(Bucket::new(0), make_bucket);
if greater_mode {
b.threshold = threshold;
} else {
// round threshold to max if close, otherwise show actual
if b.threshold / 9 > threshold / 10 {
b.threshold = threshold;
}
}
b.examples.shuffle(&mut rand::thread_rng());
b
})
.filter(|bucket| bucket.count > 0)
.collect();
let other = data.fold(Bucket::new(0), make_bucket);
if other.count > 0 {
buckets.push(other);
}
Self {
max: buckets.iter().map(|b| b.count).max().unwrap_or(0),
bucket_labels: buckets.iter().map(|b| label(b.threshold)).collect(),
buckets,
}
}
}
pub fn url_for_crate_name(url: &Urler, name: &str) -> String {
url.crate_by_origin(&Origin::from_crates_io_name(name))
}
pub fn url_for_rev_deps(url: &Urler, name: &str) -> String {
url.reverse_deps(&Origin::from_crates_io_name(name)).unwrap()
}
pub fn versions_for_crate_name(url: &Urler, name: &str) -> String {
url.all_versions(&Origin::from_crates_io_name(name)).unwrap()
}
pub fn format_number(num: u32) -> String {
Numeric::english().format_int(num)
}
pub fn format_bytes(bytes: u32) -> String {
let (num, unit) = match bytes {
0..=1_000_000 => ((bytes + 999) / 1024, "KB"),
0..=9_999_999 => return format!("{}MB", ((bytes + 250_000) / 500_000) as f64 * 0.5),
_ => ((bytes + 500_000) / 1_000_000, "MB"),
};
format!("{}{unit}", Numeric::english().format_int(num))
}
| ategory | identifier_name |
global_stats.rs | use ahash::HashMapExt;
use categories::CATEGORIES;
use categories::Category;
use categories::CategoryMap;
use chrono::prelude::*;
use futures::future::try_join;
use crate::Page;
use crate::templates;
use crate::Urler;
use kitchen_sink::CompatByCrateVersion;
use kitchen_sink::KitchenSink;
use kitchen_sink::Origin;
use locale::Numeric;
use peeking_take_while::PeekableExt;
use rand::seq::SliceRandom;
use render_readme::Renderer;
use ahash::HashMap;
use std::io::Write;
#[derive(Debug)]
pub struct GlobalStats {
pub(crate) total_crate_num: u32,
pub(crate) total_owners_at_month: Vec<u32>,
pub(crate) max_total_owners: u32,
pub(crate) max_daily_downloads_rate: u32,
pub(crate) max_downloads_per_week: u64,
pub(crate) start_week_offset: u32,
pub(crate) dl_grid_line_every: u64,
pub(crate) weeks_to_reach_max_downloads: u32,
pub(crate) dl_per_day_this_year: (u64, u64),
pub(crate) dl_per_day_last_year: (u64, u64),
pub(crate) hs_releases: Histogram,
pub(crate) hs_sizes: Histogram,
pub(crate) hs_deps1: Histogram,
pub(crate) hs_deps2: Histogram,
pub(crate) hs_rev_deps: Histogram,
pub(crate) hs_maintenance: Histogram,
pub(crate) hs_age: Histogram,
pub(crate) hs_languish: Histogram,
pub(crate) hs_owner_crates: Histogram,
pub(crate) categories: Vec<TreeBox>,
pub(crate) rustc_stats_all: Vec<Compat>,
pub(crate) rustc_stats_recent: Vec<Compat>,
pub(crate) rustc_stats_recent_num: usize,
}
pub type CallbackFn = fn(&Urler, &str) -> String;
impl GlobalStats {
pub fn relative_increase(val: (u64, u64)) -> String {
format!("{:.1}×", val.0 as f64 / val.1 as f64)
}
pub fn dl_ratio_up(&self) -> bool {
let r1 = self.dl_per_day_this_year.0 as f64 / self.dl_per_day_this_year.1 as f64;
let r2 = self.dl_per_day_last_year.0 as f64 / self.dl_per_day_last_year.1 as f64;
r1 > r2
}
}
fn downloads_over_time(start: Date<Utc>, mut day: Date<Utc>, kitchen_sink: &KitchenSink) -> Result<Vec<(u64, u64)>, anyhow::Error> {
let mut current_year = 0;
let mut current = [0; 366];
let mut dl = Vec::new();
while day > start {
let year = day.year() as u16;
if year != current_year {
current_year = year;
current = kitchen_sink.total_year_downloads(current_year)?;
}
let n = current[day.ordinal0() as usize];
if n > 0 {
break;
}
day -= chrono::Duration::days(1);
}
while day > start {
let mut weekday_sum = 0;
let mut weekend_sum = 0;
for _ in 0..7 {
let year = day.year() as u16;
if year != current_year {
current_year = year;
current = kitchen_sink.total_year_downloads(current_year)?;
}
let n = current[day.ordinal0() as usize];
match day.weekday() {
// this sucks a bit due to mon/fri being UTC, and overlapping with the weekend
// in the rest of the world.
Weekday::Sat | Weekday::Sun => weekend_sum += n,
_ => weekday_sum += n,
};
day -= chrono::Duration::days(1);
}
dl.push((weekday_sum, weekend_sum));
}
dl.reverse();
Ok(dl)
}
pub async fn render_global_stats(out: &mut impl Write, kitchen_sink: &KitchenSink, _renderer: &Renderer) -> Result<(), anyhow::Error> {
let (categories, recent_crates) = try_join(
category_stats(kitchen_sink),
kitchen_sink.notable_recently_updated_crates(4100)).await?;
let urler = Urler::new(None);
let start = Utc.ymd(2015, 5, 15); // Rust 1.0
let start_week_offset = start.ordinal0()/7;
let end = Utc::today() - chrono::Duration::days(2);
let latest_rustc_version = end.signed_duration_since(start).num_weeks()/6;
let mut compat_data = tokio::task::block_in_place(|| kitchen_sink.all_crate_compat())?;
let rustc_stats_all = rustc_stats(&compat_data, latest_rustc_version as u16)?;
let mut recent_compat = HashMap::with_capacity(recent_crates.len());
let mut rustc_stats_recent_num = 0;
for (o, _) in recent_crates {
if let Some(v) = compat_data.remove(&o) {
recent_compat.insert(o, v);
rustc_stats_recent_num += 1;
if rustc_stats_recent_num >= 4000 {
break;
}
}
}
let rustc_stats_recent = rustc_stats(&recent_compat, latest_rustc_version as u16)?;
let dl = downloads_over_time(start, end, kitchen_sink)?;
let (total_owners_at_month, mut hs_owner_crates) = owner_stats(kitchen_sink, start).await?;
hs_owner_crates.buckets.iter_mut().take(4).for_each(|c| c.examples.truncate(6)); // normal amount of crates is boring
assert!(dl.len() >= 52*2);
let this_year = &dl[dl.len()-52..];
let last_year = &dl[dl.len()-52*2..dl.len()-52];
fn sum2(s: &[(u64, u64)]) -> (u64, u64) {
let mut a_sum = 0;
let mut b_sum = 0;
s.iter().for_each(|&(a, b)| { a_sum += a; b_sum += b; });
(a_sum, b_sum)
}
let max_daily_downloads_rate = this_year.iter().map(move |(d, e)| (d/5).max(e/2)).max().unwrap_or(0) as u32;
let mut tmp_sum = 0;
let downloads_this_year = sum2(this_year);
let downloads_last_year = sum2(last_year);
let max_downloads_per_week = dl.iter().map(|(a, b)| a + b).max().unwrap_or(0);
let max_total_owners = total_owners_at_month.iter().copied().max().unwrap_or(0);
let dl_grid_line_every = (max_downloads_per_week / 6_000_000) * 1_000_000;
let mut hs_deps1 = Histogram::new(kitchen_sink.get_stats_histogram("deps")?.expect("hs_deps"), true,
&[0,1,2,3,4,5,6,7,8,9,10,11,12,14,16,18,20,25,30,40,60,80,100,120,150],
|n| if n > 11 {format!("≥{n}")} else {n.to_string()});
let hs_deps2 = Histogram {
max: hs_deps1.max,
buckets: hs_deps1.buckets.split_off(10),
bucket_labels: hs_deps1.bucket_labels.split_off(10),
};
let rev_deps = kitchen_sink.crates_io_all_rev_deps_counts().await?;
let mut hs_rev_deps = Histogram::new(rev_deps, true,
&[0,1,2,5,15,50,100,250,500,750,1000,2500,5000,10000,15000,20000,50000],
|n| if n > 2 {format!("≥{n}")} else {n.to_string()});
hs_rev_deps.buckets.iter_mut().take(5).for_each(|b| b.examples.truncate(5));
let age_label = |n| match n {
0..=1 => "≤1 week".to_string(),
2..=4 => format!("≤{n} weeks"),
5 => "≤1 month".to_string(),
6..=51 => format!("≤{} months", (n as f64 / (365./12./7.)).round()),
52 => "≤1 year".to_string(),
_ => format!("≤{} years", (n as f64 / 52.).round()),
};
let total_crate_num = kitchen_sink.all_crates().count() as u32;
let stats = GlobalStats {
total_crate_num,
total_owners_at_month,
max_total_owners,
max_daily_downloads_rate,
start_week_offset,
weeks_to_reach_max_downloads: dl.iter().copied().take_while(move |(d, e)| { tmp_sum += (d + e) as u32; tmp_sum < max_daily_downloads_rate }).count() as u32,
dl_per_day_this_year: (downloads_this_year.0 / 5, downloads_this_year.1 / 2),
dl_per_day_last_year: (downloads_last_year.0 / 5, downloads_last_year.1 / 2),
max_downloads_per_week,
dl_grid_line_every,
hs_releases: Histogram::new(kitchen_sink.get_stats_histogram("releases")?.expect("hs_releases"), true, &[1,2,4,8,16,32,50,100,500], |n| if n > 2 {format!("≥{n}")} else {n.to_string()}),
hs_sizes: Histogram::new(kitchen_sink.get_stats_histogram("sizes")?.expect("hs_sizes"), true, &[1,10,50,100,500,1_000,5_000,10_000,20_000], |n| {
let mut t = format_bytes(n*1024);
t.insert(0, '≤'); t
}),
hs_deps1, hs_deps2,
hs_maintenance: Histogram::new(kitchen_sink.get_stats_histogram("maintenance")?.expect("hs_maintenance"), false, &[0, 1, 5, 26, 52, 52*2, 52*3, 52*5, 52*6, 52*8], |n| match n {
0 => "one-off".to_string(),
1 => "≤1 week".to_string(),
2..=4 => format!("≤{n} weeks"),
5 => "≤1 month".to_string(),
6..=51 => format!("≤{} months", (n as f64 / (365./12./7.)).round()),
52 => "≤1 year".to_string(),
_ => format!("≤{} years", (n as f64 / 52.).round()),
}),
hs_age: Histogram::new(kitchen_sink.get_stats_histogram("age")?.expect("hs_age"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label),
hs_languish: Histogram::new(kitchen_sink.get_stats_histogram("languish")?.expect("hs_languish"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label),
hs_owner_crates,
categories,
rustc_stats_all,
rustc_stats_recent,
rustc_stats_recent_num,
hs_rev_deps,
};
templates::global_stats(out, &Page {
title: "State of the Rust/Cargo crates ecosystem".to_owned(),
description: Some("How many packages there are? How many dependencies they have? Which crate is the oldest or biggest? Is Rust usage growing?".to_owned()),
noindex: false,
search_meta: true,
critical_css_data: Some(include_str!("../../style/public/home.css")),
critical_css_dev_url: Some("/home.css"),
..Default::default()
}, &dl, &stats, &urler)?;
Ok(())
}
#[derive(Default, Copy, Clone, Debug)]
pub struct Compat {
pub(crate) bad: u32,
pub(crate) maybe_bad: u32,
pub(crate) unknown: u32,
pub(crate) maybe_ok: u32,
pub(crate) ok: u32,
}
impl Compat {
pub fn sum(&self) -> u32 {
self.bad + self.maybe_bad + self.unknown + self.maybe_ok + self.ok
}
}
fn rustc_stats(compat: &HashMap<Origin, CompatByCrateVersion>, max_rust_version: u16) -> Result<Vec<Compat>, anyhow::Error> {
// (ok, maybe, not), [0] is unused
let mut rustc_versions = vec![Compat::default(); (max_rust_version+1) as usize];
for c in compat.values() {
// can't compile at all
if !c.iter().any(|(_, c)| c.has_ever_built()) {
continue;
}
// stats for latest crate version only
let latest_ver = match c.iter().rfind(|(v, _)| v.pre.is_empty()).or_else(|| c.iter().rev().next()) {
Some((_, c)) => c,
None => continue,
};
let latest_ver_bad = match c.iter().rfind(|(v, c)| v.pre.is_empty() && c.newest_bad_likely().is_some()) {
Some((_, c)) => c,
None => latest_ver,
};
let newest_bad_raw = latest_ver_bad.newest_bad_likely().unwrap_or(0);
let newest_bad = latest_ver.newest_bad().unwrap_or(0);
let oldest_ok = latest_ver.oldest_ok().unwrap_or(999);
let oldest_ok_raw = latest_ver.oldest_ok_certain().unwrap_or(999);
for (ver, c) in rustc_versions.iter_mut().enumerate() {
let ver = ver as u16;
if ver >= oldest_ok {
if ver >= oldest_ok_raw {
c.ok += 1;
} else {
c.maybe_ok += 1;
}
} else if ver <= newest_bad {
if ver <= newest_bad_raw {
c.bad += 1;
} else {
c.maybe_bad += 1;
}
} else {
c.unknown += 1;
}
}
}
// resize to width
let width = 330;
for c in &mut rustc_versions {
let sum = c.sum();
c.bad = (c.bad * width + width / 2) / sum;
c.ok = (c.ok * width + width / 2) / sum;
c.maybe_bad = (c.maybe_bad * width + width / 2) / sum;
c.maybe_ok = (c.maybe_ok * width + width / 2) / sum;
c.unknown = width - c.bad - c.ok - c.maybe_bad - c.maybe_ok;
}
Ok(rustc_versions)
}
fn cat_slugs(sub: &'static CategoryMap) -> Vec<TreeBox> {
let mut out = Vec::with_capacity(sub.len());
for c in sub.values() {
if c.slug == "uncategorized" {
continue;
}
out.push(TreeBox {
cat: c,
label: c.name.clone(),
title: c.name.clone(),
count: 0,
weight: 0.,
bounds: treemap::Rect::new(),
color: String::new(),
font_size: 12.,
sub: cat_slugs(&c.sub),
});
}
out
}
#[derive(Debug, Clone)]
pub struct TreeBox {
pub cat: &'static Category,
pub title: String,
pub label: String,
pub font_size: f64,
/// SVG fill
pub color: String,
pub count: u32,
pub weight: f64,
pub bounds: treemap::Rect,
pub sub: Vec<TreeBox>,
}
impl TreeBox {
pub fn line_y(&self, nth: usize) -> f64 {
self.bounds.y + 1. + self.font_size * 1.1 * (nth+1) as f64
}
pub fn can_fit_count(&self) -> bool {
self.line_y(self.label.lines().count()) + 1. - self.bounds.y < self.bounds.h
}
}
impl treemap::Mappable for TreeBox {
fn size(&self) -> f64 { self.weight }
fn bounds(&self) -> &treemap::Rect { &self.bounds }
fn set_bounds(&mut self, b: treemap::Rect) { self.bounds = b; }
}
async fn category_stats(kitchen_sink: &KitchenSink) -> Result<Vec<TreeBox>, anyhow::Error> {
use treemap::*;
let mut roots = cat_slugs(&CATEGORIES.root);
#[track_caller]
fn take_cat(slug: &str, items: &mut Vec<TreeBox>) -> TreeBox {
let pos = items.iter().position(|i| i.cat.slug == slug).unwrap_or_else(|| panic!("{slug} in {items:?}"));
items.swap_remove(pos)
}
#[track_caller]
fn get_cat<'a>(slug: &str, items: &'a mut Vec<TreeBox>) -> &'a mut TreeBox {
let pos = items.iter().position(|i| i.cat.slug == slug).unwrap_or_else(|| panic!("{slug} in {items:?}"));
&mut items[pos]
}
fn new_cat(sub: Vec<TreeBox>) -> TreeBox {
TreeBox {
c | cat("database-implementations", &mut roots).label = "Database".into();
get_cat("simulation", &mut roots).label = "Sim".into();
get_cat("caching", &mut roots).label = "Cache".into();
get_cat("config", &mut roots).label = "Config".into();
get_cat("os", &mut roots).label = "OS".into();
get_cat("internationalization", &mut roots).label = "i18n".into();
get_cat("authentication", &mut roots).label = "Auth".into();
get_cat("visualization", &mut roots).label = "Visualize".into();
get_cat("accessibility", &mut roots).label = "a11y".into();
get_cat("compilers", &mut roots).label = "Lang".into();
get_cat("os::macos-apis", &mut get_cat("os", &mut roots).sub).label = "Apple".into();
get_cat("rendering::engine", &mut get_cat("rendering", &mut roots).sub).label = "Engine".into();
get_cat("rendering::data-formats", &mut get_cat("rendering", &mut roots).sub).label = "Formats".into();
// group them in a more sensible way
let parsers = vec![take_cat("parsing", &mut roots), take_cat("parser-implementations", &mut roots)];
roots.push(new_cat(parsers));
let hw = vec![take_cat("embedded", &mut roots), take_cat("hardware-support", &mut roots), take_cat("no-std", &mut roots)];
roots.push(new_cat(hw));
let db = vec![take_cat("database", &mut roots), take_cat("database-implementations", &mut roots)];
roots.push(new_cat(db));
let gg = vec![take_cat("game-development", &mut roots), take_cat("games", &mut roots)];
roots.push(new_cat(gg));
let int = take_cat("command-line-interface", &mut roots);
let cli = vec![int, take_cat("command-line-utilities", &mut roots)];
roots.push(new_cat(cli));
let mut editors = take_cat("text-editors", &mut roots);
editors.label = "Editors".into();
let txt = vec![
take_cat("text-processing", &mut roots),
editors,
take_cat("template-engine", &mut roots),
take_cat("value-formatting", &mut roots),
];
roots.push(new_cat(txt));
let wasm = take_cat("wasm", &mut roots);
get_cat("web-programming", &mut roots).sub.push(wasm);
let mut asyn = take_cat("asynchronous", &mut roots);
asyn.label = "Async".into();
get_cat("network-programming", &mut roots).sub.push(asyn);
let mut proc = take_cat("development-tools::procedural-macro-helpers", &mut get_cat("development-tools", &mut roots).sub);
proc.label = "Proc macros".into();
get_cat("rust-patterns", &mut roots).sub.push(proc);
let concurrency = take_cat("concurrency", &mut roots);
get_cat("rust-patterns", &mut roots).sub.push(concurrency);
let mut cr = get_cat("cryptography", &mut roots).sub.remove(0);
cr.label = "Crypto Magic Beans".into();
roots.push(cr);
// first layout of top-level boxes (won't be used for anything other than second layout)
for top in roots.iter_mut() {
let (count, weight) = if top.label.is_empty() { (0, 0.) } else { kitchen_sink.category_crate_count(&top.cat.slug).await? };
top.count = count;
top.weight = weight;
let mut top_copy = top.clone();
top_copy.sub = Vec::new();
for i in top.sub.iter_mut() {
let (count, weight) = kitchen_sink.category_crate_count(&i.cat.slug).await?;
i.count = count;
i.weight = weight;
top.count += i.count;
top.weight += i.weight;
assert!(i.sub.is_empty());
}
if top_copy.count > 0 {
top.sub.insert(0, top_copy);
}
}
let mut items_flattened = Vec::new();
let layout = TreemapLayout::new();
layout.layout_items(&mut roots, Rect::from_points(0.0, 0.0, 1000., 600.));
for parent in roots.iter_mut() {
let layout = TreemapLayout::new();
layout.layout_items(&mut parent.sub, parent.bounds);
items_flattened.append(&mut parent.sub);
}
postprocess_treebox_items(&mut items_flattened);
Ok(items_flattened)
}
fn postprocess_treebox_items(items: &mut Vec<TreeBox>) {
let colors = [
[0xff, 0xf1, 0xe6],
[0xe2, 0xec, 0xe9],
[0xDC, 0xED, 0xC1],
[0xcd, 0xda, 0xfd],
[0xbe, 0xe1, 0xe6],
[0xfd, 0xe2, 0xe4],
[0xdf, 0xe7, 0xfd],
[0xFF, 0xD3, 0xB6],
[0xea, 0xe4, 0xe9],
[0xd0, 0xd1, 0xff],
[0xf4, 0xda, 0xe2],
[0xde, 0xc3, 0xe1],
[0xd4, 0xe0, 0xf9],
[0xFF, 0xD3, 0xB6],
[0xDF, 0xCB, 0xD2],
];
let len = items.len() as f32;
for (i, item) in &mut items.iter_mut().enumerate() {
let x = 0.8 + (i as f32 / len) * 0.2;
let c = colors[i % colors.len()];
let c = [
(c[0] as f32 * x + (1. - x) * 200.) as u8,
(c[1] as f32 * x + (1. - x) * 100.) as u8,
(c[2] as f32 * x + (1. - x) * 200.) as u8
];
let mut l = lab::Lab::from_rgb(&c);
l.l = (l.l + 90.) * 0.5; // fix my bad palette
let c = l.to_rgb();
item.color = format!("#{:02x}{:02x}{:02x}", c[0], c[1], c[2]);
let ideal_max_width = (item.bounds.w * 1.2 / (item.font_size / 1.7)) as usize;
let maybe_label = textwrap::wrap(&item.label, textwrap::Options::new(ideal_max_width).break_words(false));
let chars = maybe_label.iter().map(|w| w.len()).max().unwrap_or(1);
let lines = maybe_label.len();
let try_font_size = item.font_size
.min(item.bounds.h / (lines as f64 * 1.05) - 4.)
.min(item.bounds.w * 1.6 / chars as f64)
.max(4.);
let max_width = (item.bounds.w / (try_font_size / 1.7)) as usize;
let must_break = ideal_max_width < chars * 2 / 3 && item.bounds.h > item.font_size * 2.;
let label = textwrap::wrap(&item.label, textwrap::Options::new(max_width).break_words(must_break));
let chars = label.iter().map(|w| w.len()).max().unwrap_or(1);
let lines = label.len();
item.label = label.join("\n");
item.font_size = item.font_size
.min(item.bounds.h / (lines as f64 * 1.05) - 4.)
.min(item.bounds.w * 1.6 / chars as f64)
.max(4.);
}
}
async fn owner_stats(kitchen_sink: &KitchenSink, start: Date<Utc>) -> Result<(Vec<u32>, Histogram), anyhow::Error> {
let all_owners = kitchen_sink.crate_all_owners().await?;
eprintln!("got {} owners", all_owners.len());
assert!(all_owners.len() > 1000);
let mut owner_crates_with_ids = HashMap::new();
let mut total_owners_at_month = vec![0u32; (Utc::today().signed_duration_since(start).num_days() as usize + 29) / 30];
let mut sum = 0;
for o in &all_owners {
// account creation history
let (y,m,_d) = o.created_at;
if y < 2015 || (y == 2015 && m < 5) {
sum += 1;
continue;
}
let mon_num = (y as usize - 2015) * 12 + m as usize - 5;
if mon_num < total_owners_at_month.len() {
total_owners_at_month[mon_num] += 1;
}
// update histogram
let t = owner_crates_with_ids.entry(o.num_crates).or_insert((0, Vec::<u64>::new()));
t.0 += 1;
if t.1.len() < 1000 {
t.1.push(o.github_id);
}
}
// convert IDs to logins
let owner_crates = owner_crates_with_ids.into_iter().map(|(k, (pop, mut id_examples))| {
let mut examples = Vec::with_capacity(id_examples.len().min(10));
if k <= 50 {
id_examples.sort_unstable(); // promote low-id users for normal amount of crates
} else {
id_examples.sort_unstable_by_key(|v| !v); // show newest users for potentially-spammy crate sets
}
// but include one counter-example just to make things more interesting
if let Some(tmp) = id_examples.pop() {
id_examples.insert(0, tmp);
}
for id in id_examples {
if let Ok(login) = kitchen_sink.login_by_github_id(id) {
if kitchen_sink.crates_io_login_on_blocklist(&login).is_none() { // github logins currently equal crates_io_logins
examples.push(login);
if examples.len() >= 10 {
break;
}
}
}
}
(k, (pop, examples))
}).collect();
// trim empty end
while total_owners_at_month.last().map_or(false, |&l| l == 0) {
total_owners_at_month.pop();
}
total_owners_at_month.iter_mut().for_each(|n| {
sum += *n;
*n = sum;
});
let hs_owner_crates = Histogram::new(owner_crates, true, &[1,2,3,6,25,50,75,100,150,200,500,750,2000], |n| if n > 3 {format!("≥{n}")} else {n.to_string()});
Ok((total_owners_at_month, hs_owner_crates))
}
#[derive(Debug)]
pub struct Histogram {
pub max: u32,
pub buckets: Vec<Bucket>,
pub bucket_labels: Vec<String>,
}
#[derive(Debug)]
pub struct Bucket {
/// population
pub count: u32,
pub threshold: u32,
pub examples: Vec<String>,
}
impl Bucket {
pub fn new(threshold: u32) -> Self {
Self { threshold, count: 0, examples: Vec::with_capacity(BUCKET_MAX_EXAMPLES) }
}
}
const BUCKET_MAX_EXAMPLES: usize = 25;
impl Histogram {
pub fn perc(&self, val: u32) -> f32 {
val as f32 / (self.max as f32 / 100.)
}
/// greater_mode - bucket means this many or more, otherwise it's <=
///
pub fn new(data: kitchen_sink::StatsHistogram, greater_mode: bool, bucket_thresholds: &[u32], label: fn(u32) -> String) -> Self {
let mut data: Vec<_> = data.into_iter().collect();
data.sort_unstable_by_key(|d| d.0);
let mut data = data.drain(..).fuse().peekable();
fn make_bucket(mut b: Bucket, (key, (size, mut val)): (u32, (u32, Vec<String>))) -> Bucket {
debug_assert!(size as usize >= val.len());
b.count += size;
if b.examples.len() < BUCKET_MAX_EXAMPLES {
b.examples.append(&mut val);
}
if key > b.threshold {
b.threshold = key;
}
b
}
let mut buckets: Vec<_> = bucket_thresholds.windows(2)
.map(|thr_pair| (thr_pair[0], thr_pair[1]))
.chain(std::iter::once((bucket_thresholds.last().copied().unwrap(), !0)))
.map(|(threshold, next_thr)| {
let mut b = data.by_ref()
.peeking_take_while(|d| if greater_mode {
d.0 < next_thr
} else {
d.0 <= threshold
})
.fold(Bucket::new(0), make_bucket);
if greater_mode {
b.threshold = threshold;
} else {
// round threshold to max if close, otherwise show actual
if b.threshold / 9 > threshold / 10 {
b.threshold = threshold;
}
}
b.examples.shuffle(&mut rand::thread_rng());
b
})
.filter(|bucket| bucket.count > 0)
.collect();
let other = data.fold(Bucket::new(0), make_bucket);
if other.count > 0 {
buckets.push(other);
}
Self {
max: buckets.iter().map(|b| b.count).max().unwrap_or(0),
bucket_labels: buckets.iter().map(|b| label(b.threshold)).collect(),
buckets,
}
}
}
pub fn url_for_crate_name(url: &Urler, name: &str) -> String {
url.crate_by_origin(&Origin::from_crates_io_name(name))
}
pub fn url_for_rev_deps(url: &Urler, name: &str) -> String {
url.reverse_deps(&Origin::from_crates_io_name(name)).unwrap()
}
pub fn versions_for_crate_name(url: &Urler, name: &str) -> String {
url.all_versions(&Origin::from_crates_io_name(name)).unwrap()
}
pub fn format_number(num: u32) -> String {
Numeric::english().format_int(num)
}
pub fn format_bytes(bytes: u32) -> String {
let (num, unit) = match bytes {
0..=1_000_000 => ((bytes + 999) / 1024, "KB"),
0..=9_999_999 => return format!("{}MB", ((bytes + 250_000) / 500_000) as f64 * 0.5),
_ => ((bytes + 500_000) / 1_000_000, "MB"),
};
format!("{}{unit}", Numeric::english().format_int(num))
}
| at: CATEGORIES.root.values().next().unwrap(),
title: String::new(),
label: String::new(),
font_size: 0.,
color: String::new(),
count: 0,
weight: 0.,
bounds: Rect::new(),
sub,
}
}
// names don't fit
get_ | identifier_body |
global_stats.rs | use ahash::HashMapExt;
use categories::CATEGORIES;
use categories::Category;
use categories::CategoryMap;
use chrono::prelude::*;
use futures::future::try_join;
use crate::Page;
use crate::templates;
use crate::Urler;
use kitchen_sink::CompatByCrateVersion;
use kitchen_sink::KitchenSink;
use kitchen_sink::Origin;
use locale::Numeric;
use peeking_take_while::PeekableExt;
use rand::seq::SliceRandom;
use render_readme::Renderer;
use ahash::HashMap;
use std::io::Write;
#[derive(Debug)]
pub struct GlobalStats {
pub(crate) total_crate_num: u32,
pub(crate) total_owners_at_month: Vec<u32>,
pub(crate) max_total_owners: u32,
pub(crate) max_daily_downloads_rate: u32,
pub(crate) max_downloads_per_week: u64,
pub(crate) start_week_offset: u32,
pub(crate) dl_grid_line_every: u64,
pub(crate) weeks_to_reach_max_downloads: u32,
pub(crate) dl_per_day_this_year: (u64, u64),
pub(crate) dl_per_day_last_year: (u64, u64),
pub(crate) hs_releases: Histogram,
pub(crate) hs_sizes: Histogram,
pub(crate) hs_deps1: Histogram,
pub(crate) hs_deps2: Histogram,
pub(crate) hs_rev_deps: Histogram,
pub(crate) hs_maintenance: Histogram,
pub(crate) hs_age: Histogram,
pub(crate) hs_languish: Histogram,
pub(crate) hs_owner_crates: Histogram,
pub(crate) categories: Vec<TreeBox>,
pub(crate) rustc_stats_all: Vec<Compat>,
pub(crate) rustc_stats_recent: Vec<Compat>,
pub(crate) rustc_stats_recent_num: usize,
}
pub type CallbackFn = fn(&Urler, &str) -> String;
impl GlobalStats {
pub fn relative_increase(val: (u64, u64)) -> String {
format!("{:.1}×", val.0 as f64 / val.1 as f64)
}
pub fn dl_ratio_up(&self) -> bool {
let r1 = self.dl_per_day_this_year.0 as f64 / self.dl_per_day_this_year.1 as f64;
let r2 = self.dl_per_day_last_year.0 as f64 / self.dl_per_day_last_year.1 as f64;
r1 > r2
}
}
fn downloads_over_time(start: Date<Utc>, mut day: Date<Utc>, kitchen_sink: &KitchenSink) -> Result<Vec<(u64, u64)>, anyhow::Error> {
let mut current_year = 0;
let mut current = [0; 366];
let mut dl = Vec::new();
while day > start {
let year = day.year() as u16;
if year != current_year {
current_year = year;
current = kitchen_sink.total_year_downloads(current_year)?;
}
let n = current[day.ordinal0() as usize];
if n > 0 {
break;
}
day -= chrono::Duration::days(1);
}
while day > start {
let mut weekday_sum = 0;
let mut weekend_sum = 0;
for _ in 0..7 {
let year = day.year() as u16;
if year != current_year {
current_year = year;
current = kitchen_sink.total_year_downloads(current_year)?;
}
let n = current[day.ordinal0() as usize];
match day.weekday() {
// this sucks a bit due to mon/fri being UTC, and overlapping with the weekend
// in the rest of the world.
Weekday::Sat | Weekday::Sun => weekend_sum += n,
_ => weekday_sum += n,
};
day -= chrono::Duration::days(1);
}
dl.push((weekday_sum, weekend_sum));
}
dl.reverse();
Ok(dl)
}
pub async fn render_global_stats(out: &mut impl Write, kitchen_sink: &KitchenSink, _renderer: &Renderer) -> Result<(), anyhow::Error> {
let (categories, recent_crates) = try_join(
category_stats(kitchen_sink),
kitchen_sink.notable_recently_updated_crates(4100)).await?;
let urler = Urler::new(None);
let start = Utc.ymd(2015, 5, 15); // Rust 1.0
let start_week_offset = start.ordinal0()/7;
let end = Utc::today() - chrono::Duration::days(2);
let latest_rustc_version = end.signed_duration_since(start).num_weeks()/6;
let mut compat_data = tokio::task::block_in_place(|| kitchen_sink.all_crate_compat())?;
let rustc_stats_all = rustc_stats(&compat_data, latest_rustc_version as u16)?;
let mut recent_compat = HashMap::with_capacity(recent_crates.len());
let mut rustc_stats_recent_num = 0;
for (o, _) in recent_crates {
if let Some(v) = compat_data.remove(&o) {
recent_compat.insert(o, v);
rustc_stats_recent_num += 1;
if rustc_stats_recent_num >= 4000 {
break;
}
}
}
let rustc_stats_recent = rustc_stats(&recent_compat, latest_rustc_version as u16)?;
let dl = downloads_over_time(start, end, kitchen_sink)?;
let (total_owners_at_month, mut hs_owner_crates) = owner_stats(kitchen_sink, start).await?;
hs_owner_crates.buckets.iter_mut().take(4).for_each(|c| c.examples.truncate(6)); // normal amount of crates is boring
assert!(dl.len() >= 52*2);
let this_year = &dl[dl.len()-52..];
let last_year = &dl[dl.len()-52*2..dl.len()-52];
fn sum2(s: &[(u64, u64)]) -> (u64, u64) {
let mut a_sum = 0;
let mut b_sum = 0;
s.iter().for_each(|&(a, b)| { a_sum += a; b_sum += b; });
(a_sum, b_sum)
}
let max_daily_downloads_rate = this_year.iter().map(move |(d, e)| (d/5).max(e/2)).max().unwrap_or(0) as u32;
let mut tmp_sum = 0;
let downloads_this_year = sum2(this_year);
let downloads_last_year = sum2(last_year);
let max_downloads_per_week = dl.iter().map(|(a, b)| a + b).max().unwrap_or(0);
let max_total_owners = total_owners_at_month.iter().copied().max().unwrap_or(0);
let dl_grid_line_every = (max_downloads_per_week / 6_000_000) * 1_000_000;
let mut hs_deps1 = Histogram::new(kitchen_sink.get_stats_histogram("deps")?.expect("hs_deps"), true,
&[0,1,2,3,4,5,6,7,8,9,10,11,12,14,16,18,20,25,30,40,60,80,100,120,150],
|n| if n > 11 {format!("≥{n}")} else {n.to_string()});
let hs_deps2 = Histogram {
max: hs_deps1.max,
buckets: hs_deps1.buckets.split_off(10),
bucket_labels: hs_deps1.bucket_labels.split_off(10),
};
let rev_deps = kitchen_sink.crates_io_all_rev_deps_counts().await?;
let mut hs_rev_deps = Histogram::new(rev_deps, true,
&[0,1,2,5,15,50,100,250,500,750,1000,2500,5000,10000,15000,20000,50000],
|n| if n > 2 {format!("≥{n}")} else {n.to_string()});
hs_rev_deps.buckets.iter_mut().take(5).for_each(|b| b.examples.truncate(5));
let age_label = |n| match n {
0..=1 => "≤1 week".to_string(),
2..=4 => format!("≤{n} weeks"),
5 => "≤1 month".to_string(),
6..=51 => format!("≤{} months", (n as f64 / (365./12./7.)).round()),
52 => "≤1 year".to_string(),
_ => format!("≤{} years", (n as f64 / 52.).round()),
};
let total_crate_num = kitchen_sink.all_crates().count() as u32;
let stats = GlobalStats {
total_crate_num,
total_owners_at_month,
max_total_owners,
max_daily_downloads_rate,
start_week_offset,
weeks_to_reach_max_downloads: dl.iter().copied().take_while(move |(d, e)| { tmp_sum += (d + e) as u32; tmp_sum < max_daily_downloads_rate }).count() as u32,
dl_per_day_this_year: (downloads_this_year.0 / 5, downloads_this_year.1 / 2),
dl_per_day_last_year: (downloads_last_year.0 / 5, downloads_last_year.1 / 2),
max_downloads_per_week,
dl_grid_line_every,
hs_releases: Histogram::new(kitchen_sink.get_stats_histogram("releases")?.expect("hs_releases"), true, &[1,2,4,8,16,32,50,100,500], |n| if n > 2 {format!("≥{n}")} else {n.to_string()}),
hs_sizes: Histogram::new(kitchen_sink.get_stats_histogram("sizes")?.expect("hs_sizes"), true, &[1,10,50,100,500,1_000,5_000,10_000,20_000], |n| {
let mut t = format_bytes(n*1024);
t.insert(0, '≤'); t
}),
hs_deps1, hs_deps2,
hs_maintenance: Histogram::new(kitchen_sink.get_stats_histogram("maintenance")?.expect("hs_maintenance"), false, &[0, 1, 5, 26, 52, 52*2, 52*3, 52*5, 52*6, 52*8], |n| match n {
0 => "one-off".to_string(),
1 => "≤1 week".to_string(),
2..=4 => format!("≤{n} weeks"),
5 => "≤1 month".to_string(),
6..=51 => format!("≤{} months", (n as f64 / (365./12./7.)).round()),
52 => "≤1 year".to_string(),
_ => format!("≤{} years", (n as f64 / 52.).round()),
}),
hs_age: Histogram::new(kitchen_sink.get_stats_histogram("age")?.expect("hs_age"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label),
hs_languish: Histogram::new(kitchen_sink.get_stats_histogram("languish")?.expect("hs_languish"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label),
hs_owner_crates,
categories,
rustc_stats_all,
rustc_stats_recent,
rustc_stats_recent_num,
hs_rev_deps,
};
templates::global_stats(out, &Page {
title: "State of the Rust/Cargo crates ecosystem".to_owned(),
description: Some("How many packages there are? How many dependencies they have? Which crate is the oldest or biggest? Is Rust usage growing?".to_owned()),
noindex: false,
search_meta: true,
critical_css_data: Some(include_str!("../../style/public/home.css")),
critical_css_dev_url: Some("/home.css"),
..Default::default()
}, &dl, &stats, &urler)?;
Ok(())
}
#[derive(Default, Copy, Clone, Debug)]
pub struct Compat {
pub(crate) bad: u32,
pub(crate) maybe_bad: u32,
pub(crate) unknown: u32,
pub(crate) maybe_ok: u32,
pub(crate) ok: u32,
}
impl Compat {
pub fn sum(&self) -> u32 {
self.bad + self.maybe_bad + self.unknown + self.maybe_ok + self.ok
}
}
fn rustc_stats(compat: &HashMap<Origin, CompatByCrateVersion>, max_rust_version: u16) -> Result<Vec<Compat>, anyhow::Error> {
// (ok, maybe, not), [0] is unused
let mut rustc_versions = vec![Compat::default(); (max_rust_version+1) as usize];
for c in compat.values() {
// can't compile at all
if !c.iter().any(|(_, c)| c.has_ever_built()) {
continue;
}
// stats for latest crate version only
let latest_ver = match c.iter().rfind(|(v, _)| v.pre.is_empty()).or_else(|| c.iter().rev().next()) {
Some((_, c)) => c,
None => continue,
};
let latest_ver_bad = match c.iter().rfind(|(v, c)| v.pre.is_empty() && c.newest_bad_likely().is_some()) {
Some((_, c)) => c,
None => latest_ver,
};
let newest_bad_raw = latest_ver_bad.newest_bad_likely().unwrap_or(0);
let newest_bad = latest_ver.newest_bad().unwrap_or(0);
let oldest_ok = latest_ver.oldest_ok().unwrap_or(999);
let oldest_ok_raw = latest_ver.oldest_ok_certain().unwrap_or(999);
for (ver, c) in rustc_versions.iter_mut().enumerate() {
let ver = ver as u16;
if ver >= oldest_ok {
if ver >= oldest_ok_raw {
c.ok += 1;
} else {
c.maybe_ok += 1;
}
} else if ver <= newest_bad {
if ver <= newest_bad_raw {
c.bad += 1;
} else {
c.maybe_bad += 1;
}
} else {
c.unknown += 1;
}
}
}
// resize to width
let width = 330;
for c in &mut rustc_versions {
let sum = c.sum();
c.bad = (c.bad * width + width / 2) / sum;
c.ok = (c.ok * width + width / 2) / sum;
c.maybe_bad = (c.maybe_bad * width + width / 2) / sum;
c.maybe_ok = (c.maybe_ok * width + width / 2) / sum;
c.unknown = width - c.bad - c.ok - c.maybe_bad - c.maybe_ok;
}
Ok(rustc_versions)
}
fn cat_slugs(sub: &'static CategoryMap) -> Vec<TreeBox> {
let mut out = Vec::with_capacity(sub.len());
for c in sub.values() {
if c.slug == "uncategorized" {
continue;
}
out.push(TreeBox {
cat: c,
label: c.name.clone(),
title: c.name.clone(),
count: 0,
weight: 0.,
bounds: treemap::Rect::new(),
color: String::new(),
font_size: 12.,
sub: cat_slugs(&c.sub),
});
}
out
}
#[derive(Debug, Clone)]
pub struct TreeBox {
pub cat: &'static Category,
pub title: String,
pub label: String,
pub font_size: f64,
/// SVG fill
pub color: String,
pub count: u32,
pub weight: f64,
pub bounds: treemap::Rect,
pub sub: Vec<TreeBox>,
}
impl TreeBox {
pub fn line_y(&self, nth: usize) -> f64 {
self.bounds.y + 1. + self.font_size * 1.1 * (nth+1) as f64
}
pub fn can_fit_count(&self) -> bool {
self.line_y(self.label.lines().count()) + 1. - self.bounds.y < self.bounds.h
}
}
impl treemap::Mappable for TreeBox {
fn size(&self) -> f64 { self.weight }
fn bounds(&self) -> &treemap::Rect { &self.bounds }
fn set_bounds(&mut self, b: treemap::Rect) { self.bounds = b; }
}
async fn category_stats(kitchen_sink: &KitchenSink) -> Result<Vec<TreeBox>, anyhow::Error> {
use treemap::*;
let mut roots = cat_slugs(&CATEGORIES.root);
#[track_caller]
fn take_cat(slug: &str, items: &mut Vec<TreeBox>) -> TreeBox {
let pos = items.iter().position(|i| i.cat.slug == slug).unwrap_or_else(|| panic!("{slug} in {items:?}"));
items.swap_remove(pos)
}
#[track_caller]
fn get_cat<'a>(slug: &str, items: &'a mut Vec<TreeBox>) -> &'a mut TreeBox {
let pos = items.iter().position(|i| i.cat.slug == slug).unwrap_or_else(|| panic!("{slug} in {items:?}"));
&mut items[pos]
}
fn new_cat(sub: Vec<TreeBox>) -> TreeBox {
TreeBox {
cat: CATEGORIES.root.values().next().unwrap(),
title: String::new(),
label: String::new(),
font_size: 0.,
color: String::new(),
count: 0,
weight: 0.,
bounds: Rect::new(),
sub,
}
}
// names don't fit
get_cat("database-implementations", &mut roots).label = "Database".into();
get_cat("simulation", &mut roots).label = "Sim".into();
get_cat("caching", &mut roots).label = "Cache".into();
get_cat("config", &mut roots).label = "Config".into();
get_cat("os", &mut roots).label = "OS".into();
get_cat("internationalization", &mut roots).label = "i18n".into();
get_cat("authentication", &mut roots).label = "Auth".into();
get_cat("visualization", &mut roots).label = "Visualize".into();
get_cat("accessibility", &mut roots).label = "a11y".into();
get_cat("compilers", &mut roots).label = "Lang".into();
get_cat("os::macos-apis", &mut get_cat("os", &mut roots).sub).label = "Apple".into();
get_cat("rendering::engine", &mut get_cat("rendering", &mut roots).sub).label = "Engine".into();
get_cat("rendering::data-formats", &mut get_cat("rendering", &mut roots).sub).label = "Formats".into();
// group them in a more sensible way
let parsers = vec![take_cat("parsing", &mut roots), take_cat("parser-implementations", &mut roots)];
roots.push(new_cat(parsers));
let hw = vec![take_cat("embedded", &mut roots), take_cat("hardware-support", &mut roots), take_cat("no-std", &mut roots)];
roots.push(new_cat(hw));
let db = vec![take_cat("database", &mut roots), take_cat("database-implementations", &mut roots)];
roots.push(new_cat(db));
let gg = vec![take_cat("game-development", &mut roots), take_cat("games", &mut roots)];
roots.push(new_cat(gg));
let int = take_cat("command-line-interface", &mut roots);
let cli = vec![int, take_cat("command-line-utilities", &mut roots)];
roots.push(new_cat(cli));
let mut editors = take_cat("text-editors", &mut roots);
editors.label = "Editors".into();
let txt = vec![
take_cat("text-processing", &mut roots),
editors,
take_cat("template-engine", &mut roots),
take_cat("value-formatting", &mut roots),
];
roots.push(new_cat(txt));
let wasm = take_cat("wasm", &mut roots);
get_cat("web-programming", &mut roots).sub.push(wasm);
let mut asyn = take_cat("asynchronous", &mut roots);
asyn.label = "Async".into();
get_cat("network-programming", &mut roots).sub.push(asyn);
let mut proc = take_cat("development-tools::procedural-macro-helpers", &mut get_cat("development-tools", &mut roots).sub);
proc.label = "Proc macros".into();
get_cat("rust-patterns", &mut roots).sub.push(proc);
let concurrency = take_cat("concurrency", &mut roots);
get_cat("rust-patterns", &mut roots).sub.push(concurrency);
let mut cr = get_cat("cryptography", &mut roots).sub.remove(0);
cr.label = "Crypto Magic Beans".into();
roots.push(cr);
// first layout of top-level boxes (won't be used for anything other than second layout)
for top in roots.iter_mut() {
let (count, weight) = if top.label.is_empty() { (0, 0.) } else { kitchen_sink.category_crate_count(&top.cat.slug).await? };
top.count = count;
top.weight = weight;
let mut top_copy = top.clone();
top_copy.sub = Vec::new();
for i in top.sub.iter_mut() {
let (count, weight) = kitchen_sink.category_crate_count(&i.cat.slug).await?;
i.count = count;
i.weight = weight;
top.count += i.count;
top.weight += i.weight;
assert!(i.sub.is_empty());
}
if top_copy.count > 0 {
top.sub.insert(0, top_copy);
}
}
let mut items_flattened = Vec::new();
let layout = TreemapLayout::new();
layout.layout_items(&mut roots, Rect::from_points(0.0, 0.0, 1000., 600.));
for parent in roots.iter_mut() {
let layout = TreemapLayout::new();
layout.layout_items(&mut parent.sub, parent.bounds); | }
postprocess_treebox_items(&mut items_flattened);
Ok(items_flattened)
}
fn postprocess_treebox_items(items: &mut Vec<TreeBox>) {
let colors = [
[0xff, 0xf1, 0xe6],
[0xe2, 0xec, 0xe9],
[0xDC, 0xED, 0xC1],
[0xcd, 0xda, 0xfd],
[0xbe, 0xe1, 0xe6],
[0xfd, 0xe2, 0xe4],
[0xdf, 0xe7, 0xfd],
[0xFF, 0xD3, 0xB6],
[0xea, 0xe4, 0xe9],
[0xd0, 0xd1, 0xff],
[0xf4, 0xda, 0xe2],
[0xde, 0xc3, 0xe1],
[0xd4, 0xe0, 0xf9],
[0xFF, 0xD3, 0xB6],
[0xDF, 0xCB, 0xD2],
];
let len = items.len() as f32;
for (i, item) in &mut items.iter_mut().enumerate() {
let x = 0.8 + (i as f32 / len) * 0.2;
let c = colors[i % colors.len()];
let c = [
(c[0] as f32 * x + (1. - x) * 200.) as u8,
(c[1] as f32 * x + (1. - x) * 100.) as u8,
(c[2] as f32 * x + (1. - x) * 200.) as u8
];
let mut l = lab::Lab::from_rgb(&c);
l.l = (l.l + 90.) * 0.5; // fix my bad palette
let c = l.to_rgb();
item.color = format!("#{:02x}{:02x}{:02x}", c[0], c[1], c[2]);
let ideal_max_width = (item.bounds.w * 1.2 / (item.font_size / 1.7)) as usize;
let maybe_label = textwrap::wrap(&item.label, textwrap::Options::new(ideal_max_width).break_words(false));
let chars = maybe_label.iter().map(|w| w.len()).max().unwrap_or(1);
let lines = maybe_label.len();
let try_font_size = item.font_size
.min(item.bounds.h / (lines as f64 * 1.05) - 4.)
.min(item.bounds.w * 1.6 / chars as f64)
.max(4.);
let max_width = (item.bounds.w / (try_font_size / 1.7)) as usize;
let must_break = ideal_max_width < chars * 2 / 3 && item.bounds.h > item.font_size * 2.;
let label = textwrap::wrap(&item.label, textwrap::Options::new(max_width).break_words(must_break));
let chars = label.iter().map(|w| w.len()).max().unwrap_or(1);
let lines = label.len();
item.label = label.join("\n");
item.font_size = item.font_size
.min(item.bounds.h / (lines as f64 * 1.05) - 4.)
.min(item.bounds.w * 1.6 / chars as f64)
.max(4.);
}
}
async fn owner_stats(kitchen_sink: &KitchenSink, start: Date<Utc>) -> Result<(Vec<u32>, Histogram), anyhow::Error> {
let all_owners = kitchen_sink.crate_all_owners().await?;
eprintln!("got {} owners", all_owners.len());
assert!(all_owners.len() > 1000);
let mut owner_crates_with_ids = HashMap::new();
let mut total_owners_at_month = vec![0u32; (Utc::today().signed_duration_since(start).num_days() as usize + 29) / 30];
let mut sum = 0;
for o in &all_owners {
// account creation history
let (y,m,_d) = o.created_at;
if y < 2015 || (y == 2015 && m < 5) {
sum += 1;
continue;
}
let mon_num = (y as usize - 2015) * 12 + m as usize - 5;
if mon_num < total_owners_at_month.len() {
total_owners_at_month[mon_num] += 1;
}
// update histogram
let t = owner_crates_with_ids.entry(o.num_crates).or_insert((0, Vec::<u64>::new()));
t.0 += 1;
if t.1.len() < 1000 {
t.1.push(o.github_id);
}
}
// convert IDs to logins
let owner_crates = owner_crates_with_ids.into_iter().map(|(k, (pop, mut id_examples))| {
let mut examples = Vec::with_capacity(id_examples.len().min(10));
if k <= 50 {
id_examples.sort_unstable(); // promote low-id users for normal amount of crates
} else {
id_examples.sort_unstable_by_key(|v| !v); // show newest users for potentially-spammy crate sets
}
// but include one counter-example just to make things more interesting
if let Some(tmp) = id_examples.pop() {
id_examples.insert(0, tmp);
}
for id in id_examples {
if let Ok(login) = kitchen_sink.login_by_github_id(id) {
if kitchen_sink.crates_io_login_on_blocklist(&login).is_none() { // github logins currently equal crates_io_logins
examples.push(login);
if examples.len() >= 10 {
break;
}
}
}
}
(k, (pop, examples))
}).collect();
// trim empty end
while total_owners_at_month.last().map_or(false, |&l| l == 0) {
total_owners_at_month.pop();
}
total_owners_at_month.iter_mut().for_each(|n| {
sum += *n;
*n = sum;
});
let hs_owner_crates = Histogram::new(owner_crates, true, &[1,2,3,6,25,50,75,100,150,200,500,750,2000], |n| if n > 3 {format!("≥{n}")} else {n.to_string()});
Ok((total_owners_at_month, hs_owner_crates))
}
#[derive(Debug)]
pub struct Histogram {
pub max: u32,
pub buckets: Vec<Bucket>,
pub bucket_labels: Vec<String>,
}
#[derive(Debug)]
pub struct Bucket {
/// population
pub count: u32,
pub threshold: u32,
pub examples: Vec<String>,
}
impl Bucket {
pub fn new(threshold: u32) -> Self {
Self { threshold, count: 0, examples: Vec::with_capacity(BUCKET_MAX_EXAMPLES) }
}
}
const BUCKET_MAX_EXAMPLES: usize = 25;
impl Histogram {
pub fn perc(&self, val: u32) -> f32 {
val as f32 / (self.max as f32 / 100.)
}
/// greater_mode - bucket means this many or more, otherwise it's <=
///
pub fn new(data: kitchen_sink::StatsHistogram, greater_mode: bool, bucket_thresholds: &[u32], label: fn(u32) -> String) -> Self {
let mut data: Vec<_> = data.into_iter().collect();
data.sort_unstable_by_key(|d| d.0);
let mut data = data.drain(..).fuse().peekable();
fn make_bucket(mut b: Bucket, (key, (size, mut val)): (u32, (u32, Vec<String>))) -> Bucket {
debug_assert!(size as usize >= val.len());
b.count += size;
if b.examples.len() < BUCKET_MAX_EXAMPLES {
b.examples.append(&mut val);
}
if key > b.threshold {
b.threshold = key;
}
b
}
let mut buckets: Vec<_> = bucket_thresholds.windows(2)
.map(|thr_pair| (thr_pair[0], thr_pair[1]))
.chain(std::iter::once((bucket_thresholds.last().copied().unwrap(), !0)))
.map(|(threshold, next_thr)| {
let mut b = data.by_ref()
.peeking_take_while(|d| if greater_mode {
d.0 < next_thr
} else {
d.0 <= threshold
})
.fold(Bucket::new(0), make_bucket);
if greater_mode {
b.threshold = threshold;
} else {
// round threshold to max if close, otherwise show actual
if b.threshold / 9 > threshold / 10 {
b.threshold = threshold;
}
}
b.examples.shuffle(&mut rand::thread_rng());
b
})
.filter(|bucket| bucket.count > 0)
.collect();
let other = data.fold(Bucket::new(0), make_bucket);
if other.count > 0 {
buckets.push(other);
}
Self {
max: buckets.iter().map(|b| b.count).max().unwrap_or(0),
bucket_labels: buckets.iter().map(|b| label(b.threshold)).collect(),
buckets,
}
}
}
pub fn url_for_crate_name(url: &Urler, name: &str) -> String {
url.crate_by_origin(&Origin::from_crates_io_name(name))
}
pub fn url_for_rev_deps(url: &Urler, name: &str) -> String {
url.reverse_deps(&Origin::from_crates_io_name(name)).unwrap()
}
pub fn versions_for_crate_name(url: &Urler, name: &str) -> String {
url.all_versions(&Origin::from_crates_io_name(name)).unwrap()
}
pub fn format_number(num: u32) -> String {
Numeric::english().format_int(num)
}
pub fn format_bytes(bytes: u32) -> String {
let (num, unit) = match bytes {
0..=1_000_000 => ((bytes + 999) / 1024, "KB"),
0..=9_999_999 => return format!("{}MB", ((bytes + 250_000) / 500_000) as f64 * 0.5),
_ => ((bytes + 500_000) / 1_000_000, "MB"),
};
format!("{}{unit}", Numeric::english().format_int(num))
} | items_flattened.append(&mut parent.sub); | random_line_split |
global_stats.rs | use ahash::HashMapExt;
use categories::CATEGORIES;
use categories::Category;
use categories::CategoryMap;
use chrono::prelude::*;
use futures::future::try_join;
use crate::Page;
use crate::templates;
use crate::Urler;
use kitchen_sink::CompatByCrateVersion;
use kitchen_sink::KitchenSink;
use kitchen_sink::Origin;
use locale::Numeric;
use peeking_take_while::PeekableExt;
use rand::seq::SliceRandom;
use render_readme::Renderer;
use ahash::HashMap;
use std::io::Write;
#[derive(Debug)]
pub struct GlobalStats {
pub(crate) total_crate_num: u32,
pub(crate) total_owners_at_month: Vec<u32>,
pub(crate) max_total_owners: u32,
pub(crate) max_daily_downloads_rate: u32,
pub(crate) max_downloads_per_week: u64,
pub(crate) start_week_offset: u32,
pub(crate) dl_grid_line_every: u64,
pub(crate) weeks_to_reach_max_downloads: u32,
pub(crate) dl_per_day_this_year: (u64, u64),
pub(crate) dl_per_day_last_year: (u64, u64),
pub(crate) hs_releases: Histogram,
pub(crate) hs_sizes: Histogram,
pub(crate) hs_deps1: Histogram,
pub(crate) hs_deps2: Histogram,
pub(crate) hs_rev_deps: Histogram,
pub(crate) hs_maintenance: Histogram,
pub(crate) hs_age: Histogram,
pub(crate) hs_languish: Histogram,
pub(crate) hs_owner_crates: Histogram,
pub(crate) categories: Vec<TreeBox>,
pub(crate) rustc_stats_all: Vec<Compat>,
pub(crate) rustc_stats_recent: Vec<Compat>,
pub(crate) rustc_stats_recent_num: usize,
}
pub type CallbackFn = fn(&Urler, &str) -> String;
impl GlobalStats {
pub fn relative_increase(val: (u64, u64)) -> String {
format!("{:.1}×", val.0 as f64 / val.1 as f64)
}
pub fn dl_ratio_up(&self) -> bool {
let r1 = self.dl_per_day_this_year.0 as f64 / self.dl_per_day_this_year.1 as f64;
let r2 = self.dl_per_day_last_year.0 as f64 / self.dl_per_day_last_year.1 as f64;
r1 > r2
}
}
fn downloads_over_time(start: Date<Utc>, mut day: Date<Utc>, kitchen_sink: &KitchenSink) -> Result<Vec<(u64, u64)>, anyhow::Error> {
let mut current_year = 0;
let mut current = [0; 366];
let mut dl = Vec::new();
while day > start {
let year = day.year() as u16;
if year != current_year {
current_year = year;
current = kitchen_sink.total_year_downloads(current_year)?;
}
let n = current[day.ordinal0() as usize];
if n > 0 {
break;
}
day -= chrono::Duration::days(1);
}
while day > start {
let mut weekday_sum = 0;
let mut weekend_sum = 0;
for _ in 0..7 {
let year = day.year() as u16;
if year != current_year {
current_year = year;
current = kitchen_sink.total_year_downloads(current_year)?;
}
let n = current[day.ordinal0() as usize];
match day.weekday() {
// this sucks a bit due to mon/fri being UTC, and overlapping with the weekend
// in the rest of the world.
Weekday::Sat | Weekday::Sun => weekend_sum += n,
_ => weekday_sum += n,
};
day -= chrono::Duration::days(1);
}
dl.push((weekday_sum, weekend_sum));
}
dl.reverse();
Ok(dl)
}
pub async fn render_global_stats(out: &mut impl Write, kitchen_sink: &KitchenSink, _renderer: &Renderer) -> Result<(), anyhow::Error> {
let (categories, recent_crates) = try_join(
category_stats(kitchen_sink),
kitchen_sink.notable_recently_updated_crates(4100)).await?;
let urler = Urler::new(None);
let start = Utc.ymd(2015, 5, 15); // Rust 1.0
let start_week_offset = start.ordinal0()/7;
let end = Utc::today() - chrono::Duration::days(2);
let latest_rustc_version = end.signed_duration_since(start).num_weeks()/6;
let mut compat_data = tokio::task::block_in_place(|| kitchen_sink.all_crate_compat())?;
let rustc_stats_all = rustc_stats(&compat_data, latest_rustc_version as u16)?;
let mut recent_compat = HashMap::with_capacity(recent_crates.len());
let mut rustc_stats_recent_num = 0;
for (o, _) in recent_crates {
if let Some(v) = compat_data.remove(&o) {
recent_compat.insert(o, v);
rustc_stats_recent_num += 1;
if rustc_stats_recent_num >= 4000 {
break;
}
}
}
let rustc_stats_recent = rustc_stats(&recent_compat, latest_rustc_version as u16)?;
let dl = downloads_over_time(start, end, kitchen_sink)?;
let (total_owners_at_month, mut hs_owner_crates) = owner_stats(kitchen_sink, start).await?;
hs_owner_crates.buckets.iter_mut().take(4).for_each(|c| c.examples.truncate(6)); // normal amount of crates is boring
assert!(dl.len() >= 52*2);
let this_year = &dl[dl.len()-52..];
let last_year = &dl[dl.len()-52*2..dl.len()-52];
fn sum2(s: &[(u64, u64)]) -> (u64, u64) {
let mut a_sum = 0;
let mut b_sum = 0;
s.iter().for_each(|&(a, b)| { a_sum += a; b_sum += b; });
(a_sum, b_sum)
}
let max_daily_downloads_rate = this_year.iter().map(move |(d, e)| (d/5).max(e/2)).max().unwrap_or(0) as u32;
let mut tmp_sum = 0;
let downloads_this_year = sum2(this_year);
let downloads_last_year = sum2(last_year);
let max_downloads_per_week = dl.iter().map(|(a, b)| a + b).max().unwrap_or(0);
let max_total_owners = total_owners_at_month.iter().copied().max().unwrap_or(0);
let dl_grid_line_every = (max_downloads_per_week / 6_000_000) * 1_000_000;
let mut hs_deps1 = Histogram::new(kitchen_sink.get_stats_histogram("deps")?.expect("hs_deps"), true,
&[0,1,2,3,4,5,6,7,8,9,10,11,12,14,16,18,20,25,30,40,60,80,100,120,150],
|n| if n > 11 {format!("≥{n}")} else {n. | let hs_deps2 = Histogram {
max: hs_deps1.max,
buckets: hs_deps1.buckets.split_off(10),
bucket_labels: hs_deps1.bucket_labels.split_off(10),
};
let rev_deps = kitchen_sink.crates_io_all_rev_deps_counts().await?;
let mut hs_rev_deps = Histogram::new(rev_deps, true,
&[0,1,2,5,15,50,100,250,500,750,1000,2500,5000,10000,15000,20000,50000],
|n| if n > 2 {format!("≥{n}")} else {n.to_string()});
hs_rev_deps.buckets.iter_mut().take(5).for_each(|b| b.examples.truncate(5));
let age_label = |n| match n {
0..=1 => "≤1 week".to_string(),
2..=4 => format!("≤{n} weeks"),
5 => "≤1 month".to_string(),
6..=51 => format!("≤{} months", (n as f64 / (365./12./7.)).round()),
52 => "≤1 year".to_string(),
_ => format!("≤{} years", (n as f64 / 52.).round()),
};
let total_crate_num = kitchen_sink.all_crates().count() as u32;
let stats = GlobalStats {
total_crate_num,
total_owners_at_month,
max_total_owners,
max_daily_downloads_rate,
start_week_offset,
weeks_to_reach_max_downloads: dl.iter().copied().take_while(move |(d, e)| { tmp_sum += (d + e) as u32; tmp_sum < max_daily_downloads_rate }).count() as u32,
dl_per_day_this_year: (downloads_this_year.0 / 5, downloads_this_year.1 / 2),
dl_per_day_last_year: (downloads_last_year.0 / 5, downloads_last_year.1 / 2),
max_downloads_per_week,
dl_grid_line_every,
hs_releases: Histogram::new(kitchen_sink.get_stats_histogram("releases")?.expect("hs_releases"), true, &[1,2,4,8,16,32,50,100,500], |n| if n > 2 {format!("≥{n}")} else {n.to_string()}),
hs_sizes: Histogram::new(kitchen_sink.get_stats_histogram("sizes")?.expect("hs_sizes"), true, &[1,10,50,100,500,1_000,5_000,10_000,20_000], |n| {
let mut t = format_bytes(n*1024);
t.insert(0, '≤'); t
}),
hs_deps1, hs_deps2,
hs_maintenance: Histogram::new(kitchen_sink.get_stats_histogram("maintenance")?.expect("hs_maintenance"), false, &[0, 1, 5, 26, 52, 52*2, 52*3, 52*5, 52*6, 52*8], |n| match n {
0 => "one-off".to_string(),
1 => "≤1 week".to_string(),
2..=4 => format!("≤{n} weeks"),
5 => "≤1 month".to_string(),
6..=51 => format!("≤{} months", (n as f64 / (365./12./7.)).round()),
52 => "≤1 year".to_string(),
_ => format!("≤{} years", (n as f64 / 52.).round()),
}),
hs_age: Histogram::new(kitchen_sink.get_stats_histogram("age")?.expect("hs_age"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label),
hs_languish: Histogram::new(kitchen_sink.get_stats_histogram("languish")?.expect("hs_languish"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label),
hs_owner_crates,
categories,
rustc_stats_all,
rustc_stats_recent,
rustc_stats_recent_num,
hs_rev_deps,
};
templates::global_stats(out, &Page {
title: "State of the Rust/Cargo crates ecosystem".to_owned(),
description: Some("How many packages there are? How many dependencies they have? Which crate is the oldest or biggest? Is Rust usage growing?".to_owned()),
noindex: false,
search_meta: true,
critical_css_data: Some(include_str!("../../style/public/home.css")),
critical_css_dev_url: Some("/home.css"),
..Default::default()
}, &dl, &stats, &urler)?;
Ok(())
}
#[derive(Default, Copy, Clone, Debug)]
pub struct Compat {
pub(crate) bad: u32,
pub(crate) maybe_bad: u32,
pub(crate) unknown: u32,
pub(crate) maybe_ok: u32,
pub(crate) ok: u32,
}
impl Compat {
pub fn sum(&self) -> u32 {
self.bad + self.maybe_bad + self.unknown + self.maybe_ok + self.ok
}
}
fn rustc_stats(compat: &HashMap<Origin, CompatByCrateVersion>, max_rust_version: u16) -> Result<Vec<Compat>, anyhow::Error> {
// (ok, maybe, not), [0] is unused
let mut rustc_versions = vec![Compat::default(); (max_rust_version+1) as usize];
for c in compat.values() {
// can't compile at all
if !c.iter().any(|(_, c)| c.has_ever_built()) {
continue;
}
// stats for latest crate version only
let latest_ver = match c.iter().rfind(|(v, _)| v.pre.is_empty()).or_else(|| c.iter().rev().next()) {
Some((_, c)) => c,
None => continue,
};
let latest_ver_bad = match c.iter().rfind(|(v, c)| v.pre.is_empty() && c.newest_bad_likely().is_some()) {
Some((_, c)) => c,
None => latest_ver,
};
let newest_bad_raw = latest_ver_bad.newest_bad_likely().unwrap_or(0);
let newest_bad = latest_ver.newest_bad().unwrap_or(0);
let oldest_ok = latest_ver.oldest_ok().unwrap_or(999);
let oldest_ok_raw = latest_ver.oldest_ok_certain().unwrap_or(999);
for (ver, c) in rustc_versions.iter_mut().enumerate() {
let ver = ver as u16;
if ver >= oldest_ok {
if ver >= oldest_ok_raw {
c.ok += 1;
} else {
c.maybe_ok += 1;
}
} else if ver <= newest_bad {
if ver <= newest_bad_raw {
c.bad += 1;
} else {
c.maybe_bad += 1;
}
} else {
c.unknown += 1;
}
}
}
// resize to width
let width = 330;
for c in &mut rustc_versions {
let sum = c.sum();
c.bad = (c.bad * width + width / 2) / sum;
c.ok = (c.ok * width + width / 2) / sum;
c.maybe_bad = (c.maybe_bad * width + width / 2) / sum;
c.maybe_ok = (c.maybe_ok * width + width / 2) / sum;
c.unknown = width - c.bad - c.ok - c.maybe_bad - c.maybe_ok;
}
Ok(rustc_versions)
}
fn cat_slugs(sub: &'static CategoryMap) -> Vec<TreeBox> {
let mut out = Vec::with_capacity(sub.len());
for c in sub.values() {
if c.slug == "uncategorized" {
continue;
}
out.push(TreeBox {
cat: c,
label: c.name.clone(),
title: c.name.clone(),
count: 0,
weight: 0.,
bounds: treemap::Rect::new(),
color: String::new(),
font_size: 12.,
sub: cat_slugs(&c.sub),
});
}
out
}
#[derive(Debug, Clone)]
pub struct TreeBox {
pub cat: &'static Category,
pub title: String,
pub label: String,
pub font_size: f64,
/// SVG fill
pub color: String,
pub count: u32,
pub weight: f64,
pub bounds: treemap::Rect,
pub sub: Vec<TreeBox>,
}
impl TreeBox {
pub fn line_y(&self, nth: usize) -> f64 {
self.bounds.y + 1. + self.font_size * 1.1 * (nth+1) as f64
}
pub fn can_fit_count(&self) -> bool {
self.line_y(self.label.lines().count()) + 1. - self.bounds.y < self.bounds.h
}
}
impl treemap::Mappable for TreeBox {
fn size(&self) -> f64 { self.weight }
fn bounds(&self) -> &treemap::Rect { &self.bounds }
fn set_bounds(&mut self, b: treemap::Rect) { self.bounds = b; }
}
async fn category_stats(kitchen_sink: &KitchenSink) -> Result<Vec<TreeBox>, anyhow::Error> {
use treemap::*;
let mut roots = cat_slugs(&CATEGORIES.root);
#[track_caller]
fn take_cat(slug: &str, items: &mut Vec<TreeBox>) -> TreeBox {
let pos = items.iter().position(|i| i.cat.slug == slug).unwrap_or_else(|| panic!("{slug} in {items:?}"));
items.swap_remove(pos)
}
#[track_caller]
fn get_cat<'a>(slug: &str, items: &'a mut Vec<TreeBox>) -> &'a mut TreeBox {
let pos = items.iter().position(|i| i.cat.slug == slug).unwrap_or_else(|| panic!("{slug} in {items:?}"));
&mut items[pos]
}
fn new_cat(sub: Vec<TreeBox>) -> TreeBox {
TreeBox {
cat: CATEGORIES.root.values().next().unwrap(),
title: String::new(),
label: String::new(),
font_size: 0.,
color: String::new(),
count: 0,
weight: 0.,
bounds: Rect::new(),
sub,
}
}
// names don't fit
get_cat("database-implementations", &mut roots).label = "Database".into();
get_cat("simulation", &mut roots).label = "Sim".into();
get_cat("caching", &mut roots).label = "Cache".into();
get_cat("config", &mut roots).label = "Config".into();
get_cat("os", &mut roots).label = "OS".into();
get_cat("internationalization", &mut roots).label = "i18n".into();
get_cat("authentication", &mut roots).label = "Auth".into();
get_cat("visualization", &mut roots).label = "Visualize".into();
get_cat("accessibility", &mut roots).label = "a11y".into();
get_cat("compilers", &mut roots).label = "Lang".into();
get_cat("os::macos-apis", &mut get_cat("os", &mut roots).sub).label = "Apple".into();
get_cat("rendering::engine", &mut get_cat("rendering", &mut roots).sub).label = "Engine".into();
get_cat("rendering::data-formats", &mut get_cat("rendering", &mut roots).sub).label = "Formats".into();
// group them in a more sensible way
let parsers = vec![take_cat("parsing", &mut roots), take_cat("parser-implementations", &mut roots)];
roots.push(new_cat(parsers));
let hw = vec![take_cat("embedded", &mut roots), take_cat("hardware-support", &mut roots), take_cat("no-std", &mut roots)];
roots.push(new_cat(hw));
let db = vec![take_cat("database", &mut roots), take_cat("database-implementations", &mut roots)];
roots.push(new_cat(db));
let gg = vec![take_cat("game-development", &mut roots), take_cat("games", &mut roots)];
roots.push(new_cat(gg));
let int = take_cat("command-line-interface", &mut roots);
let cli = vec![int, take_cat("command-line-utilities", &mut roots)];
roots.push(new_cat(cli));
let mut editors = take_cat("text-editors", &mut roots);
editors.label = "Editors".into();
let txt = vec![
take_cat("text-processing", &mut roots),
editors,
take_cat("template-engine", &mut roots),
take_cat("value-formatting", &mut roots),
];
roots.push(new_cat(txt));
let wasm = take_cat("wasm", &mut roots);
get_cat("web-programming", &mut roots).sub.push(wasm);
let mut asyn = take_cat("asynchronous", &mut roots);
asyn.label = "Async".into();
get_cat("network-programming", &mut roots).sub.push(asyn);
let mut proc = take_cat("development-tools::procedural-macro-helpers", &mut get_cat("development-tools", &mut roots).sub);
proc.label = "Proc macros".into();
get_cat("rust-patterns", &mut roots).sub.push(proc);
let concurrency = take_cat("concurrency", &mut roots);
get_cat("rust-patterns", &mut roots).sub.push(concurrency);
let mut cr = get_cat("cryptography", &mut roots).sub.remove(0);
cr.label = "Crypto Magic Beans".into();
roots.push(cr);
// first layout of top-level boxes (won't be used for anything other than second layout)
for top in roots.iter_mut() {
let (count, weight) = if top.label.is_empty() { (0, 0.) } else { kitchen_sink.category_crate_count(&top.cat.slug).await? };
top.count = count;
top.weight = weight;
let mut top_copy = top.clone();
top_copy.sub = Vec::new();
for i in top.sub.iter_mut() {
let (count, weight) = kitchen_sink.category_crate_count(&i.cat.slug).await?;
i.count = count;
i.weight = weight;
top.count += i.count;
top.weight += i.weight;
assert!(i.sub.is_empty());
}
if top_copy.count > 0 {
top.sub.insert(0, top_copy);
}
}
let mut items_flattened = Vec::new();
let layout = TreemapLayout::new();
layout.layout_items(&mut roots, Rect::from_points(0.0, 0.0, 1000., 600.));
for parent in roots.iter_mut() {
let layout = TreemapLayout::new();
layout.layout_items(&mut parent.sub, parent.bounds);
items_flattened.append(&mut parent.sub);
}
postprocess_treebox_items(&mut items_flattened);
Ok(items_flattened)
}
fn postprocess_treebox_items(items: &mut Vec<TreeBox>) {
let colors = [
[0xff, 0xf1, 0xe6],
[0xe2, 0xec, 0xe9],
[0xDC, 0xED, 0xC1],
[0xcd, 0xda, 0xfd],
[0xbe, 0xe1, 0xe6],
[0xfd, 0xe2, 0xe4],
[0xdf, 0xe7, 0xfd],
[0xFF, 0xD3, 0xB6],
[0xea, 0xe4, 0xe9],
[0xd0, 0xd1, 0xff],
[0xf4, 0xda, 0xe2],
[0xde, 0xc3, 0xe1],
[0xd4, 0xe0, 0xf9],
[0xFF, 0xD3, 0xB6],
[0xDF, 0xCB, 0xD2],
];
let len = items.len() as f32;
for (i, item) in &mut items.iter_mut().enumerate() {
let x = 0.8 + (i as f32 / len) * 0.2;
let c = colors[i % colors.len()];
let c = [
(c[0] as f32 * x + (1. - x) * 200.) as u8,
(c[1] as f32 * x + (1. - x) * 100.) as u8,
(c[2] as f32 * x + (1. - x) * 200.) as u8
];
let mut l = lab::Lab::from_rgb(&c);
l.l = (l.l + 90.) * 0.5; // fix my bad palette
let c = l.to_rgb();
item.color = format!("#{:02x}{:02x}{:02x}", c[0], c[1], c[2]);
let ideal_max_width = (item.bounds.w * 1.2 / (item.font_size / 1.7)) as usize;
let maybe_label = textwrap::wrap(&item.label, textwrap::Options::new(ideal_max_width).break_words(false));
let chars = maybe_label.iter().map(|w| w.len()).max().unwrap_or(1);
let lines = maybe_label.len();
let try_font_size = item.font_size
.min(item.bounds.h / (lines as f64 * 1.05) - 4.)
.min(item.bounds.w * 1.6 / chars as f64)
.max(4.);
let max_width = (item.bounds.w / (try_font_size / 1.7)) as usize;
let must_break = ideal_max_width < chars * 2 / 3 && item.bounds.h > item.font_size * 2.;
let label = textwrap::wrap(&item.label, textwrap::Options::new(max_width).break_words(must_break));
let chars = label.iter().map(|w| w.len()).max().unwrap_or(1);
let lines = label.len();
item.label = label.join("\n");
item.font_size = item.font_size
.min(item.bounds.h / (lines as f64 * 1.05) - 4.)
.min(item.bounds.w * 1.6 / chars as f64)
.max(4.);
}
}
async fn owner_stats(kitchen_sink: &KitchenSink, start: Date<Utc>) -> Result<(Vec<u32>, Histogram), anyhow::Error> {
let all_owners = kitchen_sink.crate_all_owners().await?;
eprintln!("got {} owners", all_owners.len());
assert!(all_owners.len() > 1000);
let mut owner_crates_with_ids = HashMap::new();
let mut total_owners_at_month = vec![0u32; (Utc::today().signed_duration_since(start).num_days() as usize + 29) / 30];
let mut sum = 0;
for o in &all_owners {
// account creation history
let (y,m,_d) = o.created_at;
if y < 2015 || (y == 2015 && m < 5) {
sum += 1;
continue;
}
let mon_num = (y as usize - 2015) * 12 + m as usize - 5;
if mon_num < total_owners_at_month.len() {
total_owners_at_month[mon_num] += 1;
}
// update histogram
let t = owner_crates_with_ids.entry(o.num_crates).or_insert((0, Vec::<u64>::new()));
t.0 += 1;
if t.1.len() < 1000 {
t.1.push(o.github_id);
}
}
// convert IDs to logins
let owner_crates = owner_crates_with_ids.into_iter().map(|(k, (pop, mut id_examples))| {
let mut examples = Vec::with_capacity(id_examples.len().min(10));
if k <= 50 {
id_examples.sort_unstable(); // promote low-id users for normal amount of crates
} else {
id_examples.sort_unstable_by_key(|v| !v); // show newest users for potentially-spammy crate sets
}
// but include one counter-example just to make things more interesting
if let Some(tmp) = id_examples.pop() {
id_examples.insert(0, tmp);
}
for id in id_examples {
if let Ok(login) = kitchen_sink.login_by_github_id(id) {
if kitchen_sink.crates_io_login_on_blocklist(&login).is_none() { // github logins currently equal crates_io_logins
examples.push(login);
if examples.len() >= 10 {
break;
}
}
}
}
(k, (pop, examples))
}).collect();
// trim empty end
while total_owners_at_month.last().map_or(false, |&l| l == 0) {
total_owners_at_month.pop();
}
total_owners_at_month.iter_mut().for_each(|n| {
sum += *n;
*n = sum;
});
let hs_owner_crates = Histogram::new(owner_crates, true, &[1,2,3,6,25,50,75,100,150,200,500,750,2000], |n| if n > 3 {format!("≥{n}")} else {n.to_string()});
Ok((total_owners_at_month, hs_owner_crates))
}
#[derive(Debug)]
pub struct Histogram {
pub max: u32,
pub buckets: Vec<Bucket>,
pub bucket_labels: Vec<String>,
}
#[derive(Debug)]
pub struct Bucket {
/// population
pub count: u32,
pub threshold: u32,
pub examples: Vec<String>,
}
impl Bucket {
pub fn new(threshold: u32) -> Self {
Self { threshold, count: 0, examples: Vec::with_capacity(BUCKET_MAX_EXAMPLES) }
}
}
const BUCKET_MAX_EXAMPLES: usize = 25;
impl Histogram {
pub fn perc(&self, val: u32) -> f32 {
val as f32 / (self.max as f32 / 100.)
}
/// greater_mode - bucket means this many or more, otherwise it's <=
///
pub fn new(data: kitchen_sink::StatsHistogram, greater_mode: bool, bucket_thresholds: &[u32], label: fn(u32) -> String) -> Self {
let mut data: Vec<_> = data.into_iter().collect();
data.sort_unstable_by_key(|d| d.0);
let mut data = data.drain(..).fuse().peekable();
fn make_bucket(mut b: Bucket, (key, (size, mut val)): (u32, (u32, Vec<String>))) -> Bucket {
debug_assert!(size as usize >= val.len());
b.count += size;
if b.examples.len() < BUCKET_MAX_EXAMPLES {
b.examples.append(&mut val);
}
if key > b.threshold {
b.threshold = key;
}
b
}
let mut buckets: Vec<_> = bucket_thresholds.windows(2)
.map(|thr_pair| (thr_pair[0], thr_pair[1]))
.chain(std::iter::once((bucket_thresholds.last().copied().unwrap(), !0)))
.map(|(threshold, next_thr)| {
let mut b = data.by_ref()
.peeking_take_while(|d| if greater_mode {
d.0 < next_thr
} else {
d.0 <= threshold
})
.fold(Bucket::new(0), make_bucket);
if greater_mode {
b.threshold = threshold;
} else {
// round threshold to max if close, otherwise show actual
if b.threshold / 9 > threshold / 10 {
b.threshold = threshold;
}
}
b.examples.shuffle(&mut rand::thread_rng());
b
})
.filter(|bucket| bucket.count > 0)
.collect();
let other = data.fold(Bucket::new(0), make_bucket);
if other.count > 0 {
buckets.push(other);
}
Self {
max: buckets.iter().map(|b| b.count).max().unwrap_or(0),
bucket_labels: buckets.iter().map(|b| label(b.threshold)).collect(),
buckets,
}
}
}
pub fn url_for_crate_name(url: &Urler, name: &str) -> String {
url.crate_by_origin(&Origin::from_crates_io_name(name))
}
pub fn url_for_rev_deps(url: &Urler, name: &str) -> String {
url.reverse_deps(&Origin::from_crates_io_name(name)).unwrap()
}
pub fn versions_for_crate_name(url: &Urler, name: &str) -> String {
url.all_versions(&Origin::from_crates_io_name(name)).unwrap()
}
pub fn format_number(num: u32) -> String {
Numeric::english().format_int(num)
}
pub fn format_bytes(bytes: u32) -> String {
let (num, unit) = match bytes {
0..=1_000_000 => ((bytes + 999) / 1024, "KB"),
0..=9_999_999 => return format!("{}MB", ((bytes + 250_000) / 500_000) as f64 * 0.5),
_ => ((bytes + 500_000) / 1_000_000, "MB"),
};
format!("{}{unit}", Numeric::english().format_int(num))
}
| to_string()});
| conditional_block |
worker.go | /*
http://www.apache.org/licenses/LICENSE-2.0.txt
Copyright 2015 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package worker
import (
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"os"
"path"
"sync"
"time"
"github.com/intelsdi-x/snap/core"
"github.com/intelsdi-x/snap/core/serror"
"github.com/intelsdi-x/snap/mgmt/rest/client"
"github.com/intelsdi-x/snap/pkg/schedule"
"github.com/intelsdi-x/snap/scheduler"
"github.com/intelsdi-x/snap/scheduler/wmap"
log "github.com/sirupsen/logrus"
)
const (
retryDelay = 500 * time.Millisecond
retryLimit = 20
)
const (
PluginLoadedType = iota
PluginUnloadedType
)
const (
TaskCreatedType = iota
TaskStoppedType
TaskStartedType
TaskRemovedType
)
var (
PluginRequestTypeLookup = map[PluginRequestType]string{
PluginLoadedType: "Loaded",
PluginUnloadedType: "Unloaded",
}
TaskRequestTypeLookup = map[TaskRequestType]string{
TaskCreatedType: "Created",
TaskStoppedType: "Stopped",
TaskStartedType: "Started",
TaskRemovedType: "Removed",
}
)
var TempPath = os.TempDir()
type PluginRequestType int
func (p PluginRequestType) String() string {
return PluginRequestTypeLookup[p]
}
type TaskRequestType int
func (t TaskRequestType) String() string {
return TaskRequestTypeLookup[t]
}
type PluginRequest struct {
Plugin core.Plugin
RequestType PluginRequestType
retryCount int
}
type TaskRequest struct {
Task Task
RequestType TaskRequestType
retryCount int
}
type Task struct {
ID string
StartOnCreate bool
}
type ManagesPlugins interface {
Load(*core.RequestedPlugin) (core.CatalogedPlugin, serror.SnapError)
Unload(plugin core.Plugin) (core.CatalogedPlugin, serror.SnapError)
PluginCatalog() core.PluginCatalog
}
type ManagesTasks interface {
GetTask(id string) (core.Task, error)
CreateTaskTribe(sch schedule.Schedule, wfMap *wmap.WorkflowMap, startOnCreate bool, opts ...core.TaskOption) (core.Task, core.TaskErrors)
StopTaskTribe(id string) []serror.SnapError
StartTaskTribe(id string) []serror.SnapError
RemoveTaskTribe(id string) error
}
type getsMembers interface {
GetPluginAgreementMembers() ([]Member, error)
GetTaskAgreementMembers() ([]Member, error)
GetRequestPassword() string
}
type Member interface {
GetAddr() net.IP
GetRestPort() string
GetRestProto() string
GetRestInsecureSkipVerify() bool
GetName() string
}
// newPluginWorker
func newWorker(id int,
pluginQueue chan PluginRequest,
taskQueue chan TaskRequest,
quitChan chan struct{},
wg *sync.WaitGroup,
pm ManagesPlugins,
tm ManagesTasks,
mm getsMembers) worker |
type worker struct {
pluginManager ManagesPlugins
memberManager getsMembers
taskManager ManagesTasks
id int
pluginWork chan PluginRequest
taskWork chan TaskRequest
quitChan chan struct{}
waitGroup *sync.WaitGroup
logger *log.Entry
}
func DispatchWorkers(nworkers int, pluginQueue chan PluginRequest, taskQueue chan TaskRequest, quitChan chan struct{}, workerWaitGroup *sync.WaitGroup, cp ManagesPlugins, tm ManagesTasks, mm getsMembers) {
for i := 0; i < nworkers; i++ {
log.WithFields(log.Fields{
"_module": "worker",
"_block": "dispatch-workers",
}).Infof("dispatching tribe worker-%d", i+1)
worker := newWorker(i+1, pluginQueue, taskQueue, quitChan, workerWaitGroup, cp, tm, mm)
worker.start()
}
}
// Start "starts" the workers
func (w worker) start() {
logger := w.logger.WithFields(log.Fields{"_block": "start"})
// task worker
w.waitGroup.Add(1)
go func() {
defer w.waitGroup.Done()
logger.Debug("starting task worker")
for {
select {
case work := <-w.taskWork:
// Receive a work request.
logger := w.logger.WithFields(log.Fields{
"task": work.Task.ID,
"request-type": work.RequestType.String(),
"retries": work.retryCount,
})
logger.Debug("received task work")
if work.RequestType == TaskStartedType {
if err := w.startTask(work.Task.ID); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing task start request")
work.retryCount++
time.Sleep(retryDelay)
w.taskWork <- work
}
}
}
if work.RequestType == TaskStoppedType {
if err := w.stopTask(work.Task.ID); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing task stop request")
work.retryCount++
time.Sleep(retryDelay)
w.taskWork <- work
}
}
}
if work.RequestType == TaskCreatedType {
w.createTask(work.Task.ID, work.Task.StartOnCreate)
}
if work.RequestType == TaskRemovedType {
if err := w.removeTask(work.Task.ID); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing request")
work.retryCount++
time.Sleep(retryDelay)
w.taskWork <- work
}
}
}
case <-w.quitChan:
logger.Infof("stopping tribe worker")
return
}
}
}()
// plugin worker
w.waitGroup.Add(1)
go func() {
defer w.waitGroup.Done()
logger.Debug("starting plugin worker")
for {
select {
case work := <-w.pluginWork:
// Receive a work request.
logger := w.logger.WithFields(log.Fields{
"plugin-name": work.Plugin.Name(),
"plugin-version": work.Plugin.Version(),
"plugin-type": work.Plugin.TypeName(),
"request-type": work.RequestType.String(),
})
logger.Debug("received plugin work")
if work.RequestType == PluginLoadedType {
if err := w.loadPlugin(work.Plugin); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing request")
work.retryCount++
time.Sleep(retryDelay)
w.pluginWork <- work
}
}
}
if work.RequestType == PluginUnloadedType {
if err := w.unloadPlugin(work.Plugin); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing request")
work.retryCount++
time.Sleep(retryDelay)
w.pluginWork <- work
}
}
}
case <-w.quitChan:
w.logger.Debug("stop tribe plugin worker")
return
}
}
}()
}
func (w worker) unloadPlugin(plugin core.Plugin) error {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"_block": "unload-plugin",
})
if !w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
if _, err := w.pluginManager.Unload(plugin); err != nil {
logger.WithField("err", err).Info("failed to unload plugin")
return err
}
return nil
}
func (w worker) loadPlugin(plugin core.Plugin) error {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"_block": "load-plugin",
})
if w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
members, err := w.memberManager.GetPluginAgreementMembers()
if err != nil {
logger.Error(err)
return err
}
for _, member := range shuffle(members) {
url := fmt.Sprintf("%s://%s:%s/v1/plugins/%s/%s/%d?download=true", member.GetRestProto(), member.GetAddr(), member.GetRestPort(), plugin.TypeName(), plugin.Name(), plugin.Version())
c, err := client.New(url, "v1", member.GetRestInsecureSkipVerify(), client.Password(w.memberManager.GetRequestPassword()))
if err != nil {
logger.WithFields(log.Fields{
"err": err,
"url": url,
}).Info("unable to create client")
continue
}
f, err := w.downloadPlugin(c, plugin)
// If we can't download from this member, try the next
if err != nil {
logger.Error(err)
continue
}
rp, err := core.NewRequestedPlugin(f.Name(), TempPath, nil)
if err != nil {
logger.Error(err)
return err
}
_, err = w.pluginManager.Load(rp)
if err != nil {
logger.Error(err)
return err
}
if w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
return errors.New("failed to load plugin")
}
return errors.New("failed to find a member with the plugin")
}
func (w worker) downloadPlugin(c *client.Client, plugin core.Plugin) (*os.File, error) {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"url": c.URL,
"_block": "download-plugin",
})
resp, err := c.TribeRequest()
if err != nil {
logger.WithFields(log.Fields{
"err": err,
}).Info("plugin not found")
return nil, fmt.Errorf("Plugin not found at %s: %s", c.URL, err.Error())
}
defer resp.Body.Close()
if resp.StatusCode == 200 {
if resp.Header.Get("Content-Type") != "application/x-gzip" {
logger.WithField("content-type", resp.Header.Get("Content-Type")).Error("Expected application/x-gzip")
}
dir, err := ioutil.TempDir("", "")
if err != nil {
logger.Error(err)
return nil, err
}
fpath := path.Join(dir, fmt.Sprintf("%s-%s-%d", plugin.TypeName(), plugin.Name(), plugin.Version()))
f, err := os.OpenFile(fpath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0700)
if err != nil {
logger.Error(err)
return nil, err
}
io.Copy(f, resp.Body)
f.Close()
return f, nil
}
return nil, fmt.Errorf("Status code not 200 was %v: %s", resp.StatusCode, c.URL)
}
func (w worker) createTask(taskID string, startOnCreate bool) {
logger := w.logger.WithFields(log.Fields{
"task-id": taskID,
"_block": "create-task",
})
done := false
_, err := w.taskManager.GetTask(taskID)
if err == nil {
return
}
for {
members, err := w.memberManager.GetTaskAgreementMembers()
if err != nil {
logger.Error(err)
continue
}
for _, member := range shuffle(members) {
uri := fmt.Sprintf("%s://%s:%s", member.GetRestProto(), member.GetAddr(), member.GetRestPort())
logger.Debugf("getting task %v from %v", taskID, uri)
c, err := client.New(uri, "v1", member.GetRestInsecureSkipVerify(), client.Password(w.memberManager.GetRequestPassword()))
if err != nil {
logger.Error(err)
continue
}
taskResult := c.GetTask(taskID)
if taskResult.Err != nil {
logger.WithField("err", taskResult.Err.Error()).Debug("error getting task")
continue
}
// this block addresses the condition when we are creating and starting
// a task and the task is created but fails to start (deps were not yet met)
if startOnCreate {
if _, err := w.taskManager.GetTask(taskID); err == nil {
logger.Debug("starting task")
if errs := w.taskManager.StartTaskTribe(taskID); errs != nil {
fields := log.Fields{}
for idx, e := range errs {
fields[fmt.Sprintf("err-%d", idx)] = e.Error()
}
logger.WithFields(fields).Error("error starting task")
continue
}
done = true
break
}
}
logger.Debug("creating task")
opt := core.SetTaskID(taskID)
_, errs := w.taskManager.CreateTaskTribe(
getSchedule(taskResult.ScheduledTaskReturned.Schedule),
taskResult.Workflow,
startOnCreate,
opt)
if errs != nil && len(errs.Errors()) > 0 {
fields := log.Fields{}
for idx, e := range errs.Errors() {
fields[fmt.Sprintf("err-%d", idx)] = e
}
logger.WithFields(fields).Debug("error creating task")
continue
}
logger.Debugf("task created")
done = true
break
}
if done {
break
}
time.Sleep(500 * time.Millisecond)
}
}
func (w worker) startTask(taskID string) error {
logger := w.logger.WithFields(log.Fields{
"task-id": taskID,
"_block": "start-task",
})
logger.Debug("starting task")
errs := w.taskManager.StartTaskTribe(taskID)
if errs == nil || len(errs) == 0 {
return nil
}
if errs != nil {
for _, err := range errs {
if err.Error() == scheduler.ErrTaskAlreadyRunning.Error() {
logger.WithFields(err.Fields()).Info(err)
return nil
} else {
logger.WithFields(err.Fields()).Info(err)
}
}
}
return errors.New("error starting task")
}
func (w worker) stopTask(taskID string) error {
logger := w.logger.WithFields(log.Fields{
"task-id": taskID,
"_block": "stop-task",
})
errs := w.taskManager.StopTaskTribe(taskID)
if errs == nil || len(errs) == 0 {
return nil
}
for _, err := range errs {
if err.Error() == scheduler.ErrTaskAlreadyStopped.Error() {
logger.WithFields(err.Fields()).Info(err)
return nil
} else {
logger.WithFields(err.Fields()).Info(err)
}
}
return errors.New("error stopping task")
}
func (w worker) removeTask(taskID string) error {
logger := w.logger.WithFields(log.Fields{
"task-id": taskID,
"_block": "remove-task",
})
err := w.taskManager.RemoveTaskTribe(taskID)
if err == nil {
return nil
}
logger.Info(err)
return err
}
func shuffle(m []Member) []Member {
result := make([]Member, len(m))
perm := rand.Perm(len(m))
for i, v := range perm {
result[v] = m[i]
}
return result
}
func (w worker) isPluginLoaded(n, t string, v int) bool {
catalog := w.pluginManager.PluginCatalog()
for _, item := range catalog {
if item.TypeName() == t &&
item.Name() == n &&
item.Version() == v {
w.logger.WithFields(log.Fields{
"name": n,
"version": v,
"type": t,
"_block": "is-plugin-loaded",
}).Debugf("plugin already loaded")
return true
}
}
return false
}
func getSchedule(s *core.Schedule) schedule.Schedule {
logger := log.WithFields(log.Fields{
"_block": "get-schedule",
"schedule-type": s.Type,
})
switch s.Type {
case "simple", "windowed":
if s.Interval == "" {
logger.Error(core.ErrMissingScheduleInterval)
return nil
}
d, err := time.ParseDuration(s.Interval)
if err != nil {
logger.Error(err)
return nil
}
sch := schedule.NewWindowedSchedule(
d,
s.StartTimestamp,
s.StopTimestamp,
s.Count,
)
if err = sch.Validate(); err != nil {
logger.Error(err)
return nil
}
return sch
case "cron":
if s.Interval == "" {
logger.Error(core.ErrMissingScheduleInterval)
return nil
}
sch := schedule.NewCronSchedule(s.Interval)
if err := sch.Validate(); err != nil {
logger.Error(err)
return nil
}
return sch
case "streaming":
logger.Error("streaming is not yet available for tribe")
//todo
//return schedule.NewStreamingSchedule()
default:
logger.Error("unknown schedule type")
}
return nil
}
| {
logger := log.WithFields(log.Fields{
"_module": "worker",
"worker-id": id,
})
worker := worker{
pluginManager: pm,
taskManager: tm,
memberManager: mm,
id: id,
pluginWork: pluginQueue,
taskWork: taskQueue,
waitGroup: wg,
quitChan: quitChan,
logger: logger,
}
return worker
} | identifier_body |
worker.go | /*
http://www.apache.org/licenses/LICENSE-2.0.txt
Copyright 2015 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package worker
import (
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"os"
"path"
"sync"
"time"
"github.com/intelsdi-x/snap/core"
"github.com/intelsdi-x/snap/core/serror"
"github.com/intelsdi-x/snap/mgmt/rest/client"
"github.com/intelsdi-x/snap/pkg/schedule"
"github.com/intelsdi-x/snap/scheduler"
"github.com/intelsdi-x/snap/scheduler/wmap"
log "github.com/sirupsen/logrus"
)
const (
retryDelay = 500 * time.Millisecond
retryLimit = 20
)
const (
PluginLoadedType = iota
PluginUnloadedType
)
const (
TaskCreatedType = iota
TaskStoppedType
TaskStartedType
TaskRemovedType
)
var (
PluginRequestTypeLookup = map[PluginRequestType]string{
PluginLoadedType: "Loaded",
PluginUnloadedType: "Unloaded",
}
TaskRequestTypeLookup = map[TaskRequestType]string{
TaskCreatedType: "Created",
TaskStoppedType: "Stopped",
TaskStartedType: "Started",
TaskRemovedType: "Removed",
}
)
var TempPath = os.TempDir()
type PluginRequestType int
func (p PluginRequestType) String() string {
return PluginRequestTypeLookup[p]
}
type TaskRequestType int
func (t TaskRequestType) String() string {
return TaskRequestTypeLookup[t]
}
type PluginRequest struct {
Plugin core.Plugin
RequestType PluginRequestType
retryCount int
}
type TaskRequest struct {
Task Task
RequestType TaskRequestType
retryCount int
}
type Task struct {
ID string
StartOnCreate bool
}
type ManagesPlugins interface {
Load(*core.RequestedPlugin) (core.CatalogedPlugin, serror.SnapError)
Unload(plugin core.Plugin) (core.CatalogedPlugin, serror.SnapError)
PluginCatalog() core.PluginCatalog
}
type ManagesTasks interface {
GetTask(id string) (core.Task, error)
CreateTaskTribe(sch schedule.Schedule, wfMap *wmap.WorkflowMap, startOnCreate bool, opts ...core.TaskOption) (core.Task, core.TaskErrors)
StopTaskTribe(id string) []serror.SnapError
StartTaskTribe(id string) []serror.SnapError
RemoveTaskTribe(id string) error
}
type getsMembers interface {
GetPluginAgreementMembers() ([]Member, error)
GetTaskAgreementMembers() ([]Member, error)
GetRequestPassword() string
}
type Member interface {
GetAddr() net.IP
GetRestPort() string
GetRestProto() string
GetRestInsecureSkipVerify() bool
GetName() string
}
// newPluginWorker
func newWorker(id int,
pluginQueue chan PluginRequest,
taskQueue chan TaskRequest,
quitChan chan struct{},
wg *sync.WaitGroup,
pm ManagesPlugins,
tm ManagesTasks,
mm getsMembers) worker {
logger := log.WithFields(log.Fields{
"_module": "worker",
"worker-id": id,
})
worker := worker{
pluginManager: pm,
taskManager: tm,
memberManager: mm,
id: id,
pluginWork: pluginQueue,
taskWork: taskQueue,
waitGroup: wg,
quitChan: quitChan,
logger: logger,
}
return worker
}
type worker struct {
pluginManager ManagesPlugins
memberManager getsMembers
taskManager ManagesTasks
id int
pluginWork chan PluginRequest
taskWork chan TaskRequest
quitChan chan struct{}
waitGroup *sync.WaitGroup
logger *log.Entry
}
func DispatchWorkers(nworkers int, pluginQueue chan PluginRequest, taskQueue chan TaskRequest, quitChan chan struct{}, workerWaitGroup *sync.WaitGroup, cp ManagesPlugins, tm ManagesTasks, mm getsMembers) {
for i := 0; i < nworkers; i++ {
log.WithFields(log.Fields{
"_module": "worker",
"_block": "dispatch-workers",
}).Infof("dispatching tribe worker-%d", i+1)
worker := newWorker(i+1, pluginQueue, taskQueue, quitChan, workerWaitGroup, cp, tm, mm)
worker.start()
}
}
// Start "starts" the workers
func (w worker) start() {
logger := w.logger.WithFields(log.Fields{"_block": "start"})
// task worker
w.waitGroup.Add(1)
go func() {
defer w.waitGroup.Done()
logger.Debug("starting task worker")
for {
select {
case work := <-w.taskWork:
// Receive a work request.
logger := w.logger.WithFields(log.Fields{
"task": work.Task.ID,
"request-type": work.RequestType.String(),
"retries": work.retryCount,
})
logger.Debug("received task work")
if work.RequestType == TaskStartedType {
if err := w.startTask(work.Task.ID); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing task start request")
work.retryCount++
time.Sleep(retryDelay)
w.taskWork <- work
}
}
}
if work.RequestType == TaskStoppedType {
if err := w.stopTask(work.Task.ID); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing task stop request")
work.retryCount++
time.Sleep(retryDelay)
w.taskWork <- work
}
}
}
if work.RequestType == TaskCreatedType {
w.createTask(work.Task.ID, work.Task.StartOnCreate)
}
if work.RequestType == TaskRemovedType {
if err := w.removeTask(work.Task.ID); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing request")
work.retryCount++
time.Sleep(retryDelay)
w.taskWork <- work
}
}
}
case <-w.quitChan:
logger.Infof("stopping tribe worker")
return
}
}
}()
// plugin worker
w.waitGroup.Add(1)
go func() {
defer w.waitGroup.Done()
logger.Debug("starting plugin worker")
for {
select {
case work := <-w.pluginWork:
// Receive a work request.
logger := w.logger.WithFields(log.Fields{
"plugin-name": work.Plugin.Name(),
"plugin-version": work.Plugin.Version(),
"plugin-type": work.Plugin.TypeName(),
"request-type": work.RequestType.String(),
})
logger.Debug("received plugin work")
if work.RequestType == PluginLoadedType {
if err := w.loadPlugin(work.Plugin); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing request")
work.retryCount++
time.Sleep(retryDelay)
w.pluginWork <- work
}
}
}
if work.RequestType == PluginUnloadedType {
if err := w.unloadPlugin(work.Plugin); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing request")
work.retryCount++
time.Sleep(retryDelay)
w.pluginWork <- work
}
}
}
case <-w.quitChan:
w.logger.Debug("stop tribe plugin worker")
return
}
}
}()
}
func (w worker) unloadPlugin(plugin core.Plugin) error {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"_block": "unload-plugin",
})
if !w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
if _, err := w.pluginManager.Unload(plugin); err != nil {
logger.WithField("err", err).Info("failed to unload plugin")
return err
}
return nil
}
func (w worker) loadPlugin(plugin core.Plugin) error {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"_block": "load-plugin",
})
if w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
members, err := w.memberManager.GetPluginAgreementMembers()
if err != nil {
logger.Error(err)
return err
}
for _, member := range shuffle(members) {
url := fmt.Sprintf("%s://%s:%s/v1/plugins/%s/%s/%d?download=true", member.GetRestProto(), member.GetAddr(), member.GetRestPort(), plugin.TypeName(), plugin.Name(), plugin.Version())
c, err := client.New(url, "v1", member.GetRestInsecureSkipVerify(), client.Password(w.memberManager.GetRequestPassword()))
if err != nil {
logger.WithFields(log.Fields{
"err": err,
"url": url,
}).Info("unable to create client")
continue
}
f, err := w.downloadPlugin(c, plugin)
// If we can't download from this member, try the next
if err != nil {
logger.Error(err)
continue
}
rp, err := core.NewRequestedPlugin(f.Name(), TempPath, nil)
if err != nil |
_, err = w.pluginManager.Load(rp)
if err != nil {
logger.Error(err)
return err
}
if w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
return errors.New("failed to load plugin")
}
return errors.New("failed to find a member with the plugin")
}
func (w worker) downloadPlugin(c *client.Client, plugin core.Plugin) (*os.File, error) {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"url": c.URL,
"_block": "download-plugin",
})
resp, err := c.TribeRequest()
if err != nil {
logger.WithFields(log.Fields{
"err": err,
}).Info("plugin not found")
return nil, fmt.Errorf("Plugin not found at %s: %s", c.URL, err.Error())
}
defer resp.Body.Close()
if resp.StatusCode == 200 {
if resp.Header.Get("Content-Type") != "application/x-gzip" {
logger.WithField("content-type", resp.Header.Get("Content-Type")).Error("Expected application/x-gzip")
}
dir, err := ioutil.TempDir("", "")
if err != nil {
logger.Error(err)
return nil, err
}
fpath := path.Join(dir, fmt.Sprintf("%s-%s-%d", plugin.TypeName(), plugin.Name(), plugin.Version()))
f, err := os.OpenFile(fpath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0700)
if err != nil {
logger.Error(err)
return nil, err
}
io.Copy(f, resp.Body)
f.Close()
return f, nil
}
return nil, fmt.Errorf("Status code not 200 was %v: %s", resp.StatusCode, c.URL)
}
func (w worker) createTask(taskID string, startOnCreate bool) {
logger := w.logger.WithFields(log.Fields{
"task-id": taskID,
"_block": "create-task",
})
done := false
_, err := w.taskManager.GetTask(taskID)
if err == nil {
return
}
for {
members, err := w.memberManager.GetTaskAgreementMembers()
if err != nil {
logger.Error(err)
continue
}
for _, member := range shuffle(members) {
uri := fmt.Sprintf("%s://%s:%s", member.GetRestProto(), member.GetAddr(), member.GetRestPort())
logger.Debugf("getting task %v from %v", taskID, uri)
c, err := client.New(uri, "v1", member.GetRestInsecureSkipVerify(), client.Password(w.memberManager.GetRequestPassword()))
if err != nil {
logger.Error(err)
continue
}
taskResult := c.GetTask(taskID)
if taskResult.Err != nil {
logger.WithField("err", taskResult.Err.Error()).Debug("error getting task")
continue
}
// this block addresses the condition when we are creating and starting
// a task and the task is created but fails to start (deps were not yet met)
if startOnCreate {
if _, err := w.taskManager.GetTask(taskID); err == nil {
logger.Debug("starting task")
if errs := w.taskManager.StartTaskTribe(taskID); errs != nil {
fields := log.Fields{}
for idx, e := range errs {
fields[fmt.Sprintf("err-%d", idx)] = e.Error()
}
logger.WithFields(fields).Error("error starting task")
continue
}
done = true
break
}
}
logger.Debug("creating task")
opt := core.SetTaskID(taskID)
_, errs := w.taskManager.CreateTaskTribe(
getSchedule(taskResult.ScheduledTaskReturned.Schedule),
taskResult.Workflow,
startOnCreate,
opt)
if errs != nil && len(errs.Errors()) > 0 {
fields := log.Fields{}
for idx, e := range errs.Errors() {
fields[fmt.Sprintf("err-%d", idx)] = e
}
logger.WithFields(fields).Debug("error creating task")
continue
}
logger.Debugf("task created")
done = true
break
}
if done {
break
}
time.Sleep(500 * time.Millisecond)
}
}
func (w worker) startTask(taskID string) error {
logger := w.logger.WithFields(log.Fields{
"task-id": taskID,
"_block": "start-task",
})
logger.Debug("starting task")
errs := w.taskManager.StartTaskTribe(taskID)
if errs == nil || len(errs) == 0 {
return nil
}
if errs != nil {
for _, err := range errs {
if err.Error() == scheduler.ErrTaskAlreadyRunning.Error() {
logger.WithFields(err.Fields()).Info(err)
return nil
} else {
logger.WithFields(err.Fields()).Info(err)
}
}
}
return errors.New("error starting task")
}
func (w worker) stopTask(taskID string) error {
logger := w.logger.WithFields(log.Fields{
"task-id": taskID,
"_block": "stop-task",
})
errs := w.taskManager.StopTaskTribe(taskID)
if errs == nil || len(errs) == 0 {
return nil
}
for _, err := range errs {
if err.Error() == scheduler.ErrTaskAlreadyStopped.Error() {
logger.WithFields(err.Fields()).Info(err)
return nil
} else {
logger.WithFields(err.Fields()).Info(err)
}
}
return errors.New("error stopping task")
}
func (w worker) removeTask(taskID string) error {
logger := w.logger.WithFields(log.Fields{
"task-id": taskID,
"_block": "remove-task",
})
err := w.taskManager.RemoveTaskTribe(taskID)
if err == nil {
return nil
}
logger.Info(err)
return err
}
func shuffle(m []Member) []Member {
result := make([]Member, len(m))
perm := rand.Perm(len(m))
for i, v := range perm {
result[v] = m[i]
}
return result
}
func (w worker) isPluginLoaded(n, t string, v int) bool {
catalog := w.pluginManager.PluginCatalog()
for _, item := range catalog {
if item.TypeName() == t &&
item.Name() == n &&
item.Version() == v {
w.logger.WithFields(log.Fields{
"name": n,
"version": v,
"type": t,
"_block": "is-plugin-loaded",
}).Debugf("plugin already loaded")
return true
}
}
return false
}
func getSchedule(s *core.Schedule) schedule.Schedule {
logger := log.WithFields(log.Fields{
"_block": "get-schedule",
"schedule-type": s.Type,
})
switch s.Type {
case "simple", "windowed":
if s.Interval == "" {
logger.Error(core.ErrMissingScheduleInterval)
return nil
}
d, err := time.ParseDuration(s.Interval)
if err != nil {
logger.Error(err)
return nil
}
sch := schedule.NewWindowedSchedule(
d,
s.StartTimestamp,
s.StopTimestamp,
s.Count,
)
if err = sch.Validate(); err != nil {
logger.Error(err)
return nil
}
return sch
case "cron":
if s.Interval == "" {
logger.Error(core.ErrMissingScheduleInterval)
return nil
}
sch := schedule.NewCronSchedule(s.Interval)
if err := sch.Validate(); err != nil {
logger.Error(err)
return nil
}
return sch
case "streaming":
logger.Error("streaming is not yet available for tribe")
//todo
//return schedule.NewStreamingSchedule()
default:
logger.Error("unknown schedule type")
}
return nil
}
| {
logger.Error(err)
return err
} | conditional_block |
worker.go | /*
http://www.apache.org/licenses/LICENSE-2.0.txt
Copyright 2015 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package worker
import (
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"os"
"path"
"sync"
"time"
"github.com/intelsdi-x/snap/core"
"github.com/intelsdi-x/snap/core/serror"
"github.com/intelsdi-x/snap/mgmt/rest/client"
"github.com/intelsdi-x/snap/pkg/schedule"
"github.com/intelsdi-x/snap/scheduler"
"github.com/intelsdi-x/snap/scheduler/wmap"
log "github.com/sirupsen/logrus"
)
const (
retryDelay = 500 * time.Millisecond
retryLimit = 20
)
const (
PluginLoadedType = iota
PluginUnloadedType
)
const (
TaskCreatedType = iota
TaskStoppedType
TaskStartedType
TaskRemovedType
)
var (
PluginRequestTypeLookup = map[PluginRequestType]string{
PluginLoadedType: "Loaded",
PluginUnloadedType: "Unloaded",
}
TaskRequestTypeLookup = map[TaskRequestType]string{
TaskCreatedType: "Created",
TaskStoppedType: "Stopped",
TaskStartedType: "Started",
TaskRemovedType: "Removed",
}
)
var TempPath = os.TempDir()
type PluginRequestType int
func (p PluginRequestType) String() string {
return PluginRequestTypeLookup[p]
}
type TaskRequestType int
func (t TaskRequestType) String() string {
return TaskRequestTypeLookup[t]
}
type PluginRequest struct {
Plugin core.Plugin
RequestType PluginRequestType
retryCount int
}
type TaskRequest struct {
Task Task
RequestType TaskRequestType
retryCount int
}
type Task struct {
ID string
StartOnCreate bool
}
type ManagesPlugins interface {
Load(*core.RequestedPlugin) (core.CatalogedPlugin, serror.SnapError)
Unload(plugin core.Plugin) (core.CatalogedPlugin, serror.SnapError)
PluginCatalog() core.PluginCatalog
}
type ManagesTasks interface {
GetTask(id string) (core.Task, error)
CreateTaskTribe(sch schedule.Schedule, wfMap *wmap.WorkflowMap, startOnCreate bool, opts ...core.TaskOption) (core.Task, core.TaskErrors)
StopTaskTribe(id string) []serror.SnapError
StartTaskTribe(id string) []serror.SnapError
RemoveTaskTribe(id string) error
}
type getsMembers interface {
GetPluginAgreementMembers() ([]Member, error)
GetTaskAgreementMembers() ([]Member, error)
GetRequestPassword() string
}
type Member interface {
GetAddr() net.IP
GetRestPort() string
GetRestProto() string
GetRestInsecureSkipVerify() bool
GetName() string
}
// newPluginWorker
func newWorker(id int,
pluginQueue chan PluginRequest,
taskQueue chan TaskRequest,
quitChan chan struct{},
wg *sync.WaitGroup,
pm ManagesPlugins,
tm ManagesTasks,
mm getsMembers) worker {
logger := log.WithFields(log.Fields{
"_module": "worker",
"worker-id": id,
})
worker := worker{
pluginManager: pm,
taskManager: tm,
memberManager: mm,
id: id,
pluginWork: pluginQueue,
taskWork: taskQueue,
waitGroup: wg,
quitChan: quitChan,
logger: logger,
}
return worker
}
type worker struct {
pluginManager ManagesPlugins
memberManager getsMembers
taskManager ManagesTasks
id int
pluginWork chan PluginRequest
taskWork chan TaskRequest
quitChan chan struct{}
waitGroup *sync.WaitGroup
logger *log.Entry
}
func DispatchWorkers(nworkers int, pluginQueue chan PluginRequest, taskQueue chan TaskRequest, quitChan chan struct{}, workerWaitGroup *sync.WaitGroup, cp ManagesPlugins, tm ManagesTasks, mm getsMembers) {
for i := 0; i < nworkers; i++ {
log.WithFields(log.Fields{
"_module": "worker",
"_block": "dispatch-workers",
}).Infof("dispatching tribe worker-%d", i+1)
worker := newWorker(i+1, pluginQueue, taskQueue, quitChan, workerWaitGroup, cp, tm, mm)
worker.start()
}
}
// Start "starts" the workers
func (w worker) start() {
logger := w.logger.WithFields(log.Fields{"_block": "start"})
// task worker
w.waitGroup.Add(1)
go func() {
defer w.waitGroup.Done()
logger.Debug("starting task worker")
for {
select {
case work := <-w.taskWork:
// Receive a work request.
logger := w.logger.WithFields(log.Fields{
"task": work.Task.ID,
"request-type": work.RequestType.String(),
"retries": work.retryCount,
})
logger.Debug("received task work")
if work.RequestType == TaskStartedType {
if err := w.startTask(work.Task.ID); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing task start request")
work.retryCount++
time.Sleep(retryDelay)
w.taskWork <- work
}
}
}
if work.RequestType == TaskStoppedType {
if err := w.stopTask(work.Task.ID); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing task stop request")
work.retryCount++
time.Sleep(retryDelay)
w.taskWork <- work
}
}
}
if work.RequestType == TaskCreatedType {
w.createTask(work.Task.ID, work.Task.StartOnCreate)
}
if work.RequestType == TaskRemovedType {
if err := w.removeTask(work.Task.ID); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing request")
work.retryCount++
time.Sleep(retryDelay)
w.taskWork <- work
}
}
}
case <-w.quitChan:
logger.Infof("stopping tribe worker")
return
}
}
}()
// plugin worker
w.waitGroup.Add(1)
go func() {
defer w.waitGroup.Done()
logger.Debug("starting plugin worker")
for {
select {
case work := <-w.pluginWork:
// Receive a work request.
logger := w.logger.WithFields(log.Fields{
"plugin-name": work.Plugin.Name(),
"plugin-version": work.Plugin.Version(),
"plugin-type": work.Plugin.TypeName(),
"request-type": work.RequestType.String(),
})
logger.Debug("received plugin work")
if work.RequestType == PluginLoadedType {
if err := w.loadPlugin(work.Plugin); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing request")
work.retryCount++
time.Sleep(retryDelay)
w.pluginWork <- work
}
}
}
if work.RequestType == PluginUnloadedType {
if err := w.unloadPlugin(work.Plugin); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing request")
work.retryCount++
time.Sleep(retryDelay)
w.pluginWork <- work
}
}
}
case <-w.quitChan:
w.logger.Debug("stop tribe plugin worker")
return
}
}
}()
}
func (w worker) unloadPlugin(plugin core.Plugin) error {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"_block": "unload-plugin",
})
if !w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
if _, err := w.pluginManager.Unload(plugin); err != nil {
logger.WithField("err", err).Info("failed to unload plugin")
return err
}
return nil
}
func (w worker) loadPlugin(plugin core.Plugin) error {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"_block": "load-plugin",
})
if w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
members, err := w.memberManager.GetPluginAgreementMembers()
if err != nil {
logger.Error(err)
return err
}
for _, member := range shuffle(members) {
url := fmt.Sprintf("%s://%s:%s/v1/plugins/%s/%s/%d?download=true", member.GetRestProto(), member.GetAddr(), member.GetRestPort(), plugin.TypeName(), plugin.Name(), plugin.Version())
c, err := client.New(url, "v1", member.GetRestInsecureSkipVerify(), client.Password(w.memberManager.GetRequestPassword()))
if err != nil {
logger.WithFields(log.Fields{
"err": err,
"url": url,
}).Info("unable to create client")
continue
}
f, err := w.downloadPlugin(c, plugin)
// If we can't download from this member, try the next
if err != nil {
logger.Error(err)
continue
}
rp, err := core.NewRequestedPlugin(f.Name(), TempPath, nil)
if err != nil {
logger.Error(err)
return err
}
_, err = w.pluginManager.Load(rp)
if err != nil {
logger.Error(err)
return err
}
if w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
return errors.New("failed to load plugin")
}
return errors.New("failed to find a member with the plugin")
}
func (w worker) downloadPlugin(c *client.Client, plugin core.Plugin) (*os.File, error) {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"url": c.URL,
"_block": "download-plugin",
})
resp, err := c.TribeRequest()
if err != nil {
logger.WithFields(log.Fields{
"err": err,
}).Info("plugin not found")
return nil, fmt.Errorf("Plugin not found at %s: %s", c.URL, err.Error())
}
defer resp.Body.Close()
if resp.StatusCode == 200 {
if resp.Header.Get("Content-Type") != "application/x-gzip" {
logger.WithField("content-type", resp.Header.Get("Content-Type")).Error("Expected application/x-gzip")
}
dir, err := ioutil.TempDir("", "")
if err != nil {
logger.Error(err)
return nil, err
}
fpath := path.Join(dir, fmt.Sprintf("%s-%s-%d", plugin.TypeName(), plugin.Name(), plugin.Version()))
f, err := os.OpenFile(fpath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0700)
if err != nil {
logger.Error(err)
return nil, err
}
io.Copy(f, resp.Body)
f.Close()
return f, nil
}
return nil, fmt.Errorf("Status code not 200 was %v: %s", resp.StatusCode, c.URL)
}
func (w worker) createTask(taskID string, startOnCreate bool) {
logger := w.logger.WithFields(log.Fields{
"task-id": taskID,
"_block": "create-task",
})
done := false
_, err := w.taskManager.GetTask(taskID)
if err == nil {
return
}
for {
members, err := w.memberManager.GetTaskAgreementMembers()
if err != nil {
logger.Error(err)
continue
}
for _, member := range shuffle(members) {
uri := fmt.Sprintf("%s://%s:%s", member.GetRestProto(), member.GetAddr(), member.GetRestPort())
logger.Debugf("getting task %v from %v", taskID, uri)
c, err := client.New(uri, "v1", member.GetRestInsecureSkipVerify(), client.Password(w.memberManager.GetRequestPassword()))
if err != nil {
logger.Error(err)
continue
}
taskResult := c.GetTask(taskID)
if taskResult.Err != nil {
logger.WithField("err", taskResult.Err.Error()).Debug("error getting task")
continue
}
// this block addresses the condition when we are creating and starting
// a task and the task is created but fails to start (deps were not yet met)
if startOnCreate {
if _, err := w.taskManager.GetTask(taskID); err == nil {
logger.Debug("starting task")
if errs := w.taskManager.StartTaskTribe(taskID); errs != nil {
fields := log.Fields{}
for idx, e := range errs {
fields[fmt.Sprintf("err-%d", idx)] = e.Error()
}
logger.WithFields(fields).Error("error starting task")
continue
}
done = true
break
}
}
logger.Debug("creating task")
opt := core.SetTaskID(taskID)
_, errs := w.taskManager.CreateTaskTribe(
getSchedule(taskResult.ScheduledTaskReturned.Schedule),
taskResult.Workflow,
startOnCreate,
opt)
if errs != nil && len(errs.Errors()) > 0 {
fields := log.Fields{}
for idx, e := range errs.Errors() {
fields[fmt.Sprintf("err-%d", idx)] = e
}
logger.WithFields(fields).Debug("error creating task")
continue
}
logger.Debugf("task created")
done = true
break
}
if done {
break
}
time.Sleep(500 * time.Millisecond)
}
}
func (w worker) startTask(taskID string) error {
logger := w.logger.WithFields(log.Fields{
"task-id": taskID,
"_block": "start-task",
})
logger.Debug("starting task")
errs := w.taskManager.StartTaskTribe(taskID)
if errs == nil || len(errs) == 0 {
return nil
}
if errs != nil {
for _, err := range errs {
if err.Error() == scheduler.ErrTaskAlreadyRunning.Error() {
logger.WithFields(err.Fields()).Info(err)
return nil
} else {
logger.WithFields(err.Fields()).Info(err)
}
}
}
return errors.New("error starting task")
}
func (w worker) stopTask(taskID string) error {
logger := w.logger.WithFields(log.Fields{
"task-id": taskID,
"_block": "stop-task",
})
errs := w.taskManager.StopTaskTribe(taskID)
if errs == nil || len(errs) == 0 {
return nil
}
for _, err := range errs {
if err.Error() == scheduler.ErrTaskAlreadyStopped.Error() {
logger.WithFields(err.Fields()).Info(err)
return nil
} else {
logger.WithFields(err.Fields()).Info(err)
}
}
return errors.New("error stopping task")
}
func (w worker) removeTask(taskID string) error {
logger := w.logger.WithFields(log.Fields{
"task-id": taskID,
"_block": "remove-task",
})
err := w.taskManager.RemoveTaskTribe(taskID)
if err == nil {
return nil
}
logger.Info(err)
return err
}
func shuffle(m []Member) []Member {
result := make([]Member, len(m))
perm := rand.Perm(len(m))
for i, v := range perm {
result[v] = m[i]
}
return result
}
func (w worker) isPluginLoaded(n, t string, v int) bool {
catalog := w.pluginManager.PluginCatalog()
for _, item := range catalog {
if item.TypeName() == t &&
item.Name() == n &&
item.Version() == v {
w.logger.WithFields(log.Fields{
"name": n,
"version": v,
"type": t,
"_block": "is-plugin-loaded",
}).Debugf("plugin already loaded")
return true
}
}
return false
}
func | (s *core.Schedule) schedule.Schedule {
logger := log.WithFields(log.Fields{
"_block": "get-schedule",
"schedule-type": s.Type,
})
switch s.Type {
case "simple", "windowed":
if s.Interval == "" {
logger.Error(core.ErrMissingScheduleInterval)
return nil
}
d, err := time.ParseDuration(s.Interval)
if err != nil {
logger.Error(err)
return nil
}
sch := schedule.NewWindowedSchedule(
d,
s.StartTimestamp,
s.StopTimestamp,
s.Count,
)
if err = sch.Validate(); err != nil {
logger.Error(err)
return nil
}
return sch
case "cron":
if s.Interval == "" {
logger.Error(core.ErrMissingScheduleInterval)
return nil
}
sch := schedule.NewCronSchedule(s.Interval)
if err := sch.Validate(); err != nil {
logger.Error(err)
return nil
}
return sch
case "streaming":
logger.Error("streaming is not yet available for tribe")
//todo
//return schedule.NewStreamingSchedule()
default:
logger.Error("unknown schedule type")
}
return nil
}
| getSchedule | identifier_name |
worker.go | /*
http://www.apache.org/licenses/LICENSE-2.0.txt
Copyright 2015 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package worker
import (
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"os"
"path"
"sync"
"time"
"github.com/intelsdi-x/snap/core"
"github.com/intelsdi-x/snap/core/serror"
"github.com/intelsdi-x/snap/mgmt/rest/client"
"github.com/intelsdi-x/snap/pkg/schedule"
"github.com/intelsdi-x/snap/scheduler"
"github.com/intelsdi-x/snap/scheduler/wmap"
log "github.com/sirupsen/logrus"
)
const (
retryDelay = 500 * time.Millisecond
retryLimit = 20
)
const (
PluginLoadedType = iota
PluginUnloadedType
)
const (
TaskCreatedType = iota
TaskStoppedType
TaskStartedType
TaskRemovedType
)
var (
PluginRequestTypeLookup = map[PluginRequestType]string{
PluginLoadedType: "Loaded",
PluginUnloadedType: "Unloaded",
}
TaskRequestTypeLookup = map[TaskRequestType]string{
TaskCreatedType: "Created",
TaskStoppedType: "Stopped",
TaskStartedType: "Started",
TaskRemovedType: "Removed",
}
)
var TempPath = os.TempDir()
type PluginRequestType int
func (p PluginRequestType) String() string {
return PluginRequestTypeLookup[p]
}
type TaskRequestType int
func (t TaskRequestType) String() string {
return TaskRequestTypeLookup[t]
}
type PluginRequest struct {
Plugin core.Plugin
RequestType PluginRequestType
retryCount int
}
type TaskRequest struct {
Task Task
RequestType TaskRequestType
retryCount int
}
type Task struct {
ID string
StartOnCreate bool
}
type ManagesPlugins interface {
Load(*core.RequestedPlugin) (core.CatalogedPlugin, serror.SnapError)
Unload(plugin core.Plugin) (core.CatalogedPlugin, serror.SnapError)
PluginCatalog() core.PluginCatalog
}
type ManagesTasks interface {
GetTask(id string) (core.Task, error)
CreateTaskTribe(sch schedule.Schedule, wfMap *wmap.WorkflowMap, startOnCreate bool, opts ...core.TaskOption) (core.Task, core.TaskErrors)
StopTaskTribe(id string) []serror.SnapError
StartTaskTribe(id string) []serror.SnapError
RemoveTaskTribe(id string) error
}
type getsMembers interface {
GetPluginAgreementMembers() ([]Member, error)
GetTaskAgreementMembers() ([]Member, error)
GetRequestPassword() string
}
type Member interface { | GetAddr() net.IP
GetRestPort() string
GetRestProto() string
GetRestInsecureSkipVerify() bool
GetName() string
}
// newPluginWorker
func newWorker(id int,
pluginQueue chan PluginRequest,
taskQueue chan TaskRequest,
quitChan chan struct{},
wg *sync.WaitGroup,
pm ManagesPlugins,
tm ManagesTasks,
mm getsMembers) worker {
logger := log.WithFields(log.Fields{
"_module": "worker",
"worker-id": id,
})
worker := worker{
pluginManager: pm,
taskManager: tm,
memberManager: mm,
id: id,
pluginWork: pluginQueue,
taskWork: taskQueue,
waitGroup: wg,
quitChan: quitChan,
logger: logger,
}
return worker
}
type worker struct {
pluginManager ManagesPlugins
memberManager getsMembers
taskManager ManagesTasks
id int
pluginWork chan PluginRequest
taskWork chan TaskRequest
quitChan chan struct{}
waitGroup *sync.WaitGroup
logger *log.Entry
}
func DispatchWorkers(nworkers int, pluginQueue chan PluginRequest, taskQueue chan TaskRequest, quitChan chan struct{}, workerWaitGroup *sync.WaitGroup, cp ManagesPlugins, tm ManagesTasks, mm getsMembers) {
for i := 0; i < nworkers; i++ {
log.WithFields(log.Fields{
"_module": "worker",
"_block": "dispatch-workers",
}).Infof("dispatching tribe worker-%d", i+1)
worker := newWorker(i+1, pluginQueue, taskQueue, quitChan, workerWaitGroup, cp, tm, mm)
worker.start()
}
}
// Start "starts" the workers
func (w worker) start() {
logger := w.logger.WithFields(log.Fields{"_block": "start"})
// task worker
w.waitGroup.Add(1)
go func() {
defer w.waitGroup.Done()
logger.Debug("starting task worker")
for {
select {
case work := <-w.taskWork:
// Receive a work request.
logger := w.logger.WithFields(log.Fields{
"task": work.Task.ID,
"request-type": work.RequestType.String(),
"retries": work.retryCount,
})
logger.Debug("received task work")
if work.RequestType == TaskStartedType {
if err := w.startTask(work.Task.ID); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing task start request")
work.retryCount++
time.Sleep(retryDelay)
w.taskWork <- work
}
}
}
if work.RequestType == TaskStoppedType {
if err := w.stopTask(work.Task.ID); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing task stop request")
work.retryCount++
time.Sleep(retryDelay)
w.taskWork <- work
}
}
}
if work.RequestType == TaskCreatedType {
w.createTask(work.Task.ID, work.Task.StartOnCreate)
}
if work.RequestType == TaskRemovedType {
if err := w.removeTask(work.Task.ID); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing request")
work.retryCount++
time.Sleep(retryDelay)
w.taskWork <- work
}
}
}
case <-w.quitChan:
logger.Infof("stopping tribe worker")
return
}
}
}()
// plugin worker
w.waitGroup.Add(1)
go func() {
defer w.waitGroup.Done()
logger.Debug("starting plugin worker")
for {
select {
case work := <-w.pluginWork:
// Receive a work request.
logger := w.logger.WithFields(log.Fields{
"plugin-name": work.Plugin.Name(),
"plugin-version": work.Plugin.Version(),
"plugin-type": work.Plugin.TypeName(),
"request-type": work.RequestType.String(),
})
logger.Debug("received plugin work")
if work.RequestType == PluginLoadedType {
if err := w.loadPlugin(work.Plugin); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing request")
work.retryCount++
time.Sleep(retryDelay)
w.pluginWork <- work
}
}
}
if work.RequestType == PluginUnloadedType {
if err := w.unloadPlugin(work.Plugin); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing request")
work.retryCount++
time.Sleep(retryDelay)
w.pluginWork <- work
}
}
}
case <-w.quitChan:
w.logger.Debug("stop tribe plugin worker")
return
}
}
}()
}
func (w worker) unloadPlugin(plugin core.Plugin) error {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"_block": "unload-plugin",
})
if !w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
if _, err := w.pluginManager.Unload(plugin); err != nil {
logger.WithField("err", err).Info("failed to unload plugin")
return err
}
return nil
}
func (w worker) loadPlugin(plugin core.Plugin) error {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"_block": "load-plugin",
})
if w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
members, err := w.memberManager.GetPluginAgreementMembers()
if err != nil {
logger.Error(err)
return err
}
for _, member := range shuffle(members) {
url := fmt.Sprintf("%s://%s:%s/v1/plugins/%s/%s/%d?download=true", member.GetRestProto(), member.GetAddr(), member.GetRestPort(), plugin.TypeName(), plugin.Name(), plugin.Version())
c, err := client.New(url, "v1", member.GetRestInsecureSkipVerify(), client.Password(w.memberManager.GetRequestPassword()))
if err != nil {
logger.WithFields(log.Fields{
"err": err,
"url": url,
}).Info("unable to create client")
continue
}
f, err := w.downloadPlugin(c, plugin)
// If we can't download from this member, try the next
if err != nil {
logger.Error(err)
continue
}
rp, err := core.NewRequestedPlugin(f.Name(), TempPath, nil)
if err != nil {
logger.Error(err)
return err
}
_, err = w.pluginManager.Load(rp)
if err != nil {
logger.Error(err)
return err
}
if w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
return errors.New("failed to load plugin")
}
return errors.New("failed to find a member with the plugin")
}
func (w worker) downloadPlugin(c *client.Client, plugin core.Plugin) (*os.File, error) {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"url": c.URL,
"_block": "download-plugin",
})
resp, err := c.TribeRequest()
if err != nil {
logger.WithFields(log.Fields{
"err": err,
}).Info("plugin not found")
return nil, fmt.Errorf("Plugin not found at %s: %s", c.URL, err.Error())
}
defer resp.Body.Close()
if resp.StatusCode == 200 {
if resp.Header.Get("Content-Type") != "application/x-gzip" {
logger.WithField("content-type", resp.Header.Get("Content-Type")).Error("Expected application/x-gzip")
}
dir, err := ioutil.TempDir("", "")
if err != nil {
logger.Error(err)
return nil, err
}
fpath := path.Join(dir, fmt.Sprintf("%s-%s-%d", plugin.TypeName(), plugin.Name(), plugin.Version()))
f, err := os.OpenFile(fpath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0700)
if err != nil {
logger.Error(err)
return nil, err
}
io.Copy(f, resp.Body)
f.Close()
return f, nil
}
return nil, fmt.Errorf("Status code not 200 was %v: %s", resp.StatusCode, c.URL)
}
func (w worker) createTask(taskID string, startOnCreate bool) {
logger := w.logger.WithFields(log.Fields{
"task-id": taskID,
"_block": "create-task",
})
done := false
_, err := w.taskManager.GetTask(taskID)
if err == nil {
return
}
for {
members, err := w.memberManager.GetTaskAgreementMembers()
if err != nil {
logger.Error(err)
continue
}
for _, member := range shuffle(members) {
uri := fmt.Sprintf("%s://%s:%s", member.GetRestProto(), member.GetAddr(), member.GetRestPort())
logger.Debugf("getting task %v from %v", taskID, uri)
c, err := client.New(uri, "v1", member.GetRestInsecureSkipVerify(), client.Password(w.memberManager.GetRequestPassword()))
if err != nil {
logger.Error(err)
continue
}
taskResult := c.GetTask(taskID)
if taskResult.Err != nil {
logger.WithField("err", taskResult.Err.Error()).Debug("error getting task")
continue
}
// this block addresses the condition when we are creating and starting
// a task and the task is created but fails to start (deps were not yet met)
if startOnCreate {
if _, err := w.taskManager.GetTask(taskID); err == nil {
logger.Debug("starting task")
if errs := w.taskManager.StartTaskTribe(taskID); errs != nil {
fields := log.Fields{}
for idx, e := range errs {
fields[fmt.Sprintf("err-%d", idx)] = e.Error()
}
logger.WithFields(fields).Error("error starting task")
continue
}
done = true
break
}
}
logger.Debug("creating task")
opt := core.SetTaskID(taskID)
_, errs := w.taskManager.CreateTaskTribe(
getSchedule(taskResult.ScheduledTaskReturned.Schedule),
taskResult.Workflow,
startOnCreate,
opt)
if errs != nil && len(errs.Errors()) > 0 {
fields := log.Fields{}
for idx, e := range errs.Errors() {
fields[fmt.Sprintf("err-%d", idx)] = e
}
logger.WithFields(fields).Debug("error creating task")
continue
}
logger.Debugf("task created")
done = true
break
}
if done {
break
}
time.Sleep(500 * time.Millisecond)
}
}
func (w worker) startTask(taskID string) error {
logger := w.logger.WithFields(log.Fields{
"task-id": taskID,
"_block": "start-task",
})
logger.Debug("starting task")
errs := w.taskManager.StartTaskTribe(taskID)
if errs == nil || len(errs) == 0 {
return nil
}
if errs != nil {
for _, err := range errs {
if err.Error() == scheduler.ErrTaskAlreadyRunning.Error() {
logger.WithFields(err.Fields()).Info(err)
return nil
} else {
logger.WithFields(err.Fields()).Info(err)
}
}
}
return errors.New("error starting task")
}
func (w worker) stopTask(taskID string) error {
logger := w.logger.WithFields(log.Fields{
"task-id": taskID,
"_block": "stop-task",
})
errs := w.taskManager.StopTaskTribe(taskID)
if errs == nil || len(errs) == 0 {
return nil
}
for _, err := range errs {
if err.Error() == scheduler.ErrTaskAlreadyStopped.Error() {
logger.WithFields(err.Fields()).Info(err)
return nil
} else {
logger.WithFields(err.Fields()).Info(err)
}
}
return errors.New("error stopping task")
}
func (w worker) removeTask(taskID string) error {
logger := w.logger.WithFields(log.Fields{
"task-id": taskID,
"_block": "remove-task",
})
err := w.taskManager.RemoveTaskTribe(taskID)
if err == nil {
return nil
}
logger.Info(err)
return err
}
func shuffle(m []Member) []Member {
result := make([]Member, len(m))
perm := rand.Perm(len(m))
for i, v := range perm {
result[v] = m[i]
}
return result
}
func (w worker) isPluginLoaded(n, t string, v int) bool {
catalog := w.pluginManager.PluginCatalog()
for _, item := range catalog {
if item.TypeName() == t &&
item.Name() == n &&
item.Version() == v {
w.logger.WithFields(log.Fields{
"name": n,
"version": v,
"type": t,
"_block": "is-plugin-loaded",
}).Debugf("plugin already loaded")
return true
}
}
return false
}
func getSchedule(s *core.Schedule) schedule.Schedule {
logger := log.WithFields(log.Fields{
"_block": "get-schedule",
"schedule-type": s.Type,
})
switch s.Type {
case "simple", "windowed":
if s.Interval == "" {
logger.Error(core.ErrMissingScheduleInterval)
return nil
}
d, err := time.ParseDuration(s.Interval)
if err != nil {
logger.Error(err)
return nil
}
sch := schedule.NewWindowedSchedule(
d,
s.StartTimestamp,
s.StopTimestamp,
s.Count,
)
if err = sch.Validate(); err != nil {
logger.Error(err)
return nil
}
return sch
case "cron":
if s.Interval == "" {
logger.Error(core.ErrMissingScheduleInterval)
return nil
}
sch := schedule.NewCronSchedule(s.Interval)
if err := sch.Validate(); err != nil {
logger.Error(err)
return nil
}
return sch
case "streaming":
logger.Error("streaming is not yet available for tribe")
//todo
//return schedule.NewStreamingSchedule()
default:
logger.Error("unknown schedule type")
}
return nil
} | random_line_split |
|
selection_test.go | /*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package client
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric-sdk-go/api/apifabclient"
"github.com/hyperledger/fabric-sdk-go/api/apilogging"
"github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/mocks"
sdkpeer "github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/peer"
"github.com/hyperledger/fabric-sdk-go/pkg/logging"
bccspFactory "github.com/hyperledger/fabric/bccsp/factory"
"github.com/hyperledger/fabric/core/common/ccprovider"
"github.com/hyperledger/fabric/protos/common"
configmanagerApi "github.com/securekey/fabric-snaps/configmanager/api"
"github.com/securekey/fabric-snaps/configmanager/pkg/mgmt"
configmgmtService "github.com/securekey/fabric-snaps/configmanager/pkg/service"
mockstub "github.com/securekey/fabric-snaps/mocks/mockstub"
"github.com/securekey/fabric-snaps/transactionsnap/api"
"github.com/securekey/fabric-snaps/transactionsnap/cmd/client/channelpeer"
"github.com/securekey/fabric-snaps/transactionsnap/cmd/client/pgresolver"
config "github.com/securekey/fabric-snaps/transactionsnap/cmd/config"
)
var configImp = mocks.NewMockConfig()
var channelID = "testChannel"
var mspID = "Org1MSP"
const (
org1 = "Org1MSP"
org2 = "Org2MSP"
org3 = "Org3MSP"
org4 = "Org4MSP"
org5 = "Org5MSP"
org6 = "Org6MSP"
org7 = "Org7MSP"
org8 = "Org8MSP"
org9 = "Org9MSP"
org10 = "Org10MSP"
)
const (
channel1 = "channel1"
channel2 = "channel2"
)
const (
cc1 = "cc1"
cc2 = "cc2"
cc3 = "cc3"
)
const (
o1 = iota
o2
o3
o4
o5
)
var p1 = peer("peer1", org1)
var p2 = peer("peer2", org1)
var p3 = peer("peer3", org2)
var p4 = peer("peer4", org2)
var p5 = peer("peer5", org3)
var p6 = peer("peer6", org3)
var p7 = peer("peer7", org3)
var p8 = peer("peer8", org4)
var p9 = peer("peer9", org4)
var p10 = peer("peer10", org4)
var p11 = peer("peer11", org5)
var p12 = peer("peer12", org5)
type sampleConfig struct {
api.Config
}
// Override GetMspConfigPath for relative path, just to avoid using new core.yaml for this purpose
func (c *sampleConfig) GetMspConfigPath() string {
return "../sampleconfig/msp"
}
func TestMain(m *testing.M) {
opts := &bccspFactory.FactoryOpts{
ProviderName: "SW",
SwOpts: &bccspFactory.SwOpts{
HashFamily: "SHA2",
SecLevel: 256,
Ephemeral: false,
FileKeystore: &bccspFactory.FileKeystoreOpts{KeyStorePath: "../sampleconfig/msp/keystore/"},
},
}
bccspFactory.InitFactories(opts)
//
configData, err := ioutil.ReadFile("../sampleconfig/config.yaml")
if err != nil {
panic(fmt.Sprintf("File error: %v\n", err))
}
configMsg := &configmanagerApi.ConfigMessage{MspID: mspID,
Peers: []configmanagerApi.PeerConfig{configmanagerApi.PeerConfig{
PeerID: "jdoe", App: []configmanagerApi.AppConfig{
configmanagerApi.AppConfig{AppName: "txnsnap", Config: string(configData)}}}}}
stub := getMockStub()
configBytes, err := json.Marshal(configMsg)
if err != nil {
panic(fmt.Sprintf("Cannot Marshal %s\n", err))
}
//upload valid message to HL
err = uplaodConfigToHL(stub, configBytes)
if err != nil {
panic(fmt.Sprintf("Cannot upload %s\n", err))
}
configmgmtService.Initialize(stub, mspID)
config, err := config.NewConfig("../sampleconfig", channelID)
if err != nil {
panic(fmt.Sprintf("Error initializing config: %s", err))
}
_, err = GetInstance("testChannel", &sampleConfig{config})
if err != nil {
panic(fmt.Sprintf("Client GetInstance return error %v", err))
}
os.Exit(m.Run())
}
func getMockStub() *mockstub.MockStub {
stub := mockstub.NewMockStub("testConfigState", nil)
stub.SetMspID("Org1MSP")
stub.MockTransactionStart("startTxn")
stub.ChannelID = channelID
return stub
}
//uplaodConfigToHL to upload key&config to repository
func uplaodConfigToHL(stub *mockstub.MockStub, config []byte) error {
configManager := mgmt.NewConfigManager(stub)
if configManager == nil {
return fmt.Errorf("Cannot instantiate config manager")
}
err := configManager.Save(config)
return err
}
func TestGetEndorsersForChaincodeOneCC(t *testing.T) {
service := newMockSelectionService(
newMockMembershipManager().
add(channel1, p1, p2, p3, p4, p5, p6, p7, p8),
newMockCCDataProvider().
add(channel1, cc1, getPolicy1()),
pgresolver.NewRoundRobinLBP())
// Channel1(Policy(cc1)) = Org1
expected := []api.PeerGroup{
// Org1
pg(p1), pg(p2),
}
verify(t, service, expected, channel1, cc1)
}
func TestGetEndorsersForChaincodeTwoCCs(t *testing.T) {
service := newMockSelectionService(
newMockMembershipManager().
add(channel1, p1, p2, p3, p4, p5, p6, p7, p8),
newMockCCDataProvider().
add(channel1, cc1, getPolicy1()).
add(channel1, cc2, getPolicy2()),
pgresolver.NewRoundRobinLBP())
// Channel1(Policy(cc1) and Policy(cc2)) = Org1 and (1 of [(2 of [Org1,Org2]),(2 of [Org1,Org3,Org4])])
expected := []api.PeerGroup{
// Org1 and Org2
pg(p1, p3), pg(p1, p4), pg(p2, p3), pg(p2, p4),
// Org1 and Org3
pg(p1, p5), pg(p1, p6), pg(p1, p7), pg(p2, p5), pg(p2, p6), pg(p2, p7),
// Org1 and Org4
pg(p1, p8), pg(p1, p9), pg(p1, p10), pg(p2, p8), pg(p2, p9), pg(p2, p10),
// Org1 and Org3 and Org4
pg(p1, p5, p8), pg(p1, p5, p9), pg(p1, p5, p10), pg(p1, p6, p8), pg(p1, p6, p9), pg(p1, p6, p10), pg(p1, p7, p8), pg(p1, p7, p9), pg(p1, p7, p10),
pg(p2, p5, p8), pg(p2, p5, p9), pg(p2, p5, p10), pg(p2, p6, p8), pg(p2, p6, p9), pg(p2, p6, p10), pg(p2, p7, p8), pg(p2, p7, p9), pg(p2, p7, p10),
}
verify(t, service, expected, channel1, cc1, cc2)
}
func TestGetEndorsersForChaincodeTwoCCsTwoChannels(t *testing.T) {
service := newMockSelectionService(
newMockMembershipManager().
add(channel1, p1, p2, p3, p4, p5, p6, p7, p8).
add(channel2, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12),
newMockCCDataProvider().
add(channel1, cc1, getPolicy1()).
add(channel1, cc2, getPolicy2()).
add(channel2, cc1, getPolicy3()).
add(channel2, cc2, getPolicy2()),
pgresolver.NewRoundRobinLBP(),
)
// Channel1(Policy(cc1) and Policy(cc2)) = Org1 and (1 of [(2 of [Org1,Org2]),(2 of [Org1,Org3,Org4])])
expected := []api.PeerGroup{
// Org1 and Org2
pg(p1, p3), pg(p1, p4), pg(p2, p3), pg(p2, p4),
// Org1 and Org3
pg(p1, p5), pg(p1, p6), pg(p1, p7), pg(p2, p5), pg(p2, p6), pg(p2, p7),
// Org1 and Org4
pg(p1, p8), pg(p1, p9), pg(p1, p10), pg(p2, p8), pg(p2, p9), pg(p2, p10),
// Org1 and Org3 and Org4
pg(p1, p5, p8), pg(p1, p5, p9), pg(p1, p5, p10), pg(p1, p6, p8), pg(p1, p6, p9), pg(p1, p6, p10), pg(p1, p7, p8), pg(p1, p7, p9), pg(p1, p7, p10),
pg(p2, p5, p8), pg(p2, p5, p9), pg(p2, p5, p10), pg(p2, p6, p8), pg(p2, p6, p9), pg(p2, p6, p10), pg(p2, p7, p8), pg(p2, p7, p9), pg(p2, p7, p10),
}
verify(t, service, expected, channel1, cc1, cc2)
// Channel2(Policy(cc1) and Policy(cc2)) = Org5 and (1 of [(2 of [Org1,Org2]),(2 of [Org1,Org3,Org4])])
expected = []api.PeerGroup{
// Org5 and Org2
pg(p11, p1, p3), pg(p11, p1, p4), pg(p11, p2, p3), pg(p11, p2, p4),
pg(p12, p1, p3), pg(p12, p1, p4), pg(p12, p2, p3), pg(p12, p2, p4),
// Org5 and Org3
pg(p11, p1, p5), pg(p11, p1, p6), pg(p11, p1, p7), pg(p11, p2, p5), pg(p11, p2, p6), pg(p11, p2, p7),
pg(p12, p1, p5), pg(p12, p1, p6), pg(p12, p1, p7), pg(p12, p2, p5), pg(p12, p2, p6), pg(p12, p2, p7),
// Org5 and Org4
pg(p11, p1, p8), pg(p11, p1, p9), pg(p11, p1, p10), pg(p11, p2, p8), pg(p11, p2, p9), pg(p11, p2, p10),
pg(p12, p1, p8), pg(p12, p1, p9), pg(p12, p1, p10), pg(p12, p2, p8), pg(p12, p2, p9), pg(p12, p2, p10),
// Org5 and Org3 and Org4
pg(p11, p5, p8), pg(p11, p5, p9), pg(p11, p5, p10), pg(p11, p6, p8), pg(p11, p6, p9), pg(p11, p6, p10), pg(p11, p7, p8), pg(p11, p7, p9), pg(p11, p7, p10),
pg(p12, p5, p8), pg(p12, p5, p9), pg(p12, p5, p10), pg(p12, p6, p8), pg(p12, p6, p9), pg(p12, p6, p10), pg(p12, p7, p8), pg(p12, p7, p9), pg(p12, p7, p10),
}
verify(t, service, expected, channel2, cc1, cc2)
}
func verify(t *testing.T, service api.SelectionService, expectedPeerGroups []api.PeerGroup, channelID string, chaincodeIDs ...string) {
// Set the log level to WARNING since the following spits out too much info in DEBUG
module := "pg-resolver"
level := logging.GetLevel(module)
logging.SetLevel(module, apilogging.WARNING)
defer logging.SetLevel(module, level)
for i := 0; i < len(expectedPeerGroups); i++ {
peers, err := service.GetEndorsersForChaincode(channelID, nil, chaincodeIDs...)
if err != nil {
t.Fatalf("error getting endorsers: %s", err)
}
if !containsPeerGroup(expectedPeerGroups, peers) {
t.Fatalf("peer group %s is not one of the expected peer groups: %v", toString(peers), expectedPeerGroups)
}
}
}
func containsPeerGroup(groups []api.PeerGroup, peers []apifabclient.Peer) bool {
for _, g := range groups {
if containsAllPeers(peers, g) {
return true
}
}
return false
}
func containsAllPeers(peers []apifabclient.Peer, pg api.PeerGroup) bool {
if len(peers) != len(pg.Peers()) {
return false
}
for _, peer := range peers {
if !containsPeer(pg.Peers(), peer) {
return false
}
}
return true
}
func containsPeer(peers []apifabclient.Peer, peer apifabclient.Peer) bool {
for _, p := range peers {
if p.URL() == peer.URL() {
return true
}
}
return false
}
func pg(peers ...api.ChannelPeer) api.PeerGroup {
return pgresolver.NewPeerGroup(peers...)
}
func peer(name string, mspID string) api.ChannelPeer {
peer, err := sdkpeer.New(configImp, sdkpeer.WithURL(name+":7051"))
if err != nil {
panic(fmt.Sprintf("Failed to create peer: %v)", err))
}
peer.SetName(name)
peer.SetMSPID(mspID)
return channelpeer.New(peer, "", 0, nil)
}
func newMockSelectionService(membershipManager api.MembershipManager, ccDataProvider api.CCDataProvider, lbp api.LoadBalancePolicy) api.SelectionService {
return &selectionServiceImpl{
membershipManager: membershipManager,
ccDataProvider: ccDataProvider,
pgLBP: lbp,
pgResolvers: make(map[string]api.PeerGroupResolver),
}
}
type mockMembershipManager struct {
peerConfigs map[string][]api.ChannelPeer
}
func (m *mockMembershipManager) GetPeersOfChannel(channelID string) api.ChannelMembership {
return api.ChannelMembership{Peers: m.peerConfigs[channelID]}
}
func newMockMembershipManager() *mockMembershipManager {
return &mockMembershipManager{peerConfigs: make(map[string][]api.ChannelPeer)}
}
func (m *mockMembershipManager) add(channelID string, peers ...api.ChannelPeer) *mockMembershipManager {
m.peerConfigs[channelID] = []api.ChannelPeer(peers)
return m
}
type mockCCDataProvider struct {
ccData map[string]*ccprovider.ChaincodeData
}
func newMockCCDataProvider() *mockCCDataProvider {
return &mockCCDataProvider{ccData: make(map[string]*ccprovider.ChaincodeData)}
}
func (p *mockCCDataProvider) QueryChaincodeData(channelID string, chaincodeID string) (*ccprovider.ChaincodeData, error) {
return p.ccData[newResolverKey(channelID, chaincodeID).String()], nil
}
func (p *mockCCDataProvider) add(channelID string, chaincodeID string, policy *ccprovider.ChaincodeData) *mockCCDataProvider {
p.ccData[newResolverKey(channelID, chaincodeID).String()] = policy
return p
}
// Policy: Org1
func getPolicy1() *ccprovider.ChaincodeData {
signedBy, identities, err := pgresolver.GetPolicies(org1)
if err != nil {
panic(err)
}
return newCCData(&common.SignaturePolicyEnvelope{
Version: 0,
Rule: signedBy[o1],
Identities: identities,
})
}
// Policy: 1 of [(2 of [Org1, Org2]),(2 of [Org1, Org3, Org4])]
func getPolicy2() *ccprovider.ChaincodeData {
signedBy, identities, err := pgresolver.GetPolicies(org1, org2, org3, org4)
if err != nil |
return newCCData(&common.SignaturePolicyEnvelope{
Version: 0,
Rule: pgresolver.NewNOutOfPolicy(1,
pgresolver.NewNOutOfPolicy(2,
signedBy[o1],
signedBy[o2],
),
pgresolver.NewNOutOfPolicy(2,
signedBy[o1],
signedBy[o3],
signedBy[o4],
),
),
Identities: identities,
})
}
// Policy: Org5
func getPolicy3() *ccprovider.ChaincodeData {
signedBy, identities, err := pgresolver.GetPolicies(org1, org2, org3, org4, org5)
if err != nil {
panic(err)
}
return newCCData(&common.SignaturePolicyEnvelope{
Version: 0,
Rule: signedBy[o5],
Identities: identities,
})
}
func newCCData(sigPolicyEnv *common.SignaturePolicyEnvelope) *ccprovider.ChaincodeData {
policyBytes, err := proto.Marshal(sigPolicyEnv)
if err != nil {
panic(err)
}
return &ccprovider.ChaincodeData{Policy: policyBytes}
}
func toString(peers []apifabclient.Peer) string {
str := "["
for i, p := range peers {
str += p.URL()
if i+1 < len(peers) {
str += ","
}
}
str += "]"
return str
}
| {
panic(err)
} | conditional_block |
selection_test.go | /*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package client
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric-sdk-go/api/apifabclient"
"github.com/hyperledger/fabric-sdk-go/api/apilogging"
"github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/mocks"
sdkpeer "github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/peer"
"github.com/hyperledger/fabric-sdk-go/pkg/logging"
bccspFactory "github.com/hyperledger/fabric/bccsp/factory"
"github.com/hyperledger/fabric/core/common/ccprovider"
"github.com/hyperledger/fabric/protos/common"
configmanagerApi "github.com/securekey/fabric-snaps/configmanager/api"
"github.com/securekey/fabric-snaps/configmanager/pkg/mgmt"
configmgmtService "github.com/securekey/fabric-snaps/configmanager/pkg/service"
mockstub "github.com/securekey/fabric-snaps/mocks/mockstub"
"github.com/securekey/fabric-snaps/transactionsnap/api"
"github.com/securekey/fabric-snaps/transactionsnap/cmd/client/channelpeer"
"github.com/securekey/fabric-snaps/transactionsnap/cmd/client/pgresolver"
config "github.com/securekey/fabric-snaps/transactionsnap/cmd/config"
)
var configImp = mocks.NewMockConfig()
var channelID = "testChannel"
var mspID = "Org1MSP"
const (
org1 = "Org1MSP"
org2 = "Org2MSP"
org3 = "Org3MSP"
org4 = "Org4MSP"
org5 = "Org5MSP"
org6 = "Org6MSP"
org7 = "Org7MSP"
org8 = "Org8MSP"
org9 = "Org9MSP"
org10 = "Org10MSP"
)
const (
channel1 = "channel1"
channel2 = "channel2"
)
const (
cc1 = "cc1"
cc2 = "cc2"
cc3 = "cc3"
)
const (
o1 = iota
o2
o3
o4
o5
)
var p1 = peer("peer1", org1)
var p2 = peer("peer2", org1)
var p3 = peer("peer3", org2)
var p4 = peer("peer4", org2)
var p5 = peer("peer5", org3)
var p6 = peer("peer6", org3)
var p7 = peer("peer7", org3)
var p8 = peer("peer8", org4)
var p9 = peer("peer9", org4)
var p10 = peer("peer10", org4)
var p11 = peer("peer11", org5)
var p12 = peer("peer12", org5)
type sampleConfig struct {
api.Config
}
// Override GetMspConfigPath for relative path, just to avoid using new core.yaml for this purpose
func (c *sampleConfig) GetMspConfigPath() string {
return "../sampleconfig/msp"
}
func TestMain(m *testing.M) {
opts := &bccspFactory.FactoryOpts{
ProviderName: "SW",
SwOpts: &bccspFactory.SwOpts{
HashFamily: "SHA2",
SecLevel: 256,
Ephemeral: false,
FileKeystore: &bccspFactory.FileKeystoreOpts{KeyStorePath: "../sampleconfig/msp/keystore/"},
},
}
bccspFactory.InitFactories(opts)
//
configData, err := ioutil.ReadFile("../sampleconfig/config.yaml")
if err != nil {
panic(fmt.Sprintf("File error: %v\n", err))
}
configMsg := &configmanagerApi.ConfigMessage{MspID: mspID,
Peers: []configmanagerApi.PeerConfig{configmanagerApi.PeerConfig{
PeerID: "jdoe", App: []configmanagerApi.AppConfig{
configmanagerApi.AppConfig{AppName: "txnsnap", Config: string(configData)}}}}}
stub := getMockStub()
configBytes, err := json.Marshal(configMsg)
if err != nil {
panic(fmt.Sprintf("Cannot Marshal %s\n", err))
}
//upload valid message to HL
err = uplaodConfigToHL(stub, configBytes)
if err != nil {
panic(fmt.Sprintf("Cannot upload %s\n", err))
}
configmgmtService.Initialize(stub, mspID)
config, err := config.NewConfig("../sampleconfig", channelID)
if err != nil {
panic(fmt.Sprintf("Error initializing config: %s", err))
}
_, err = GetInstance("testChannel", &sampleConfig{config})
if err != nil {
panic(fmt.Sprintf("Client GetInstance return error %v", err))
}
os.Exit(m.Run())
}
func getMockStub() *mockstub.MockStub {
stub := mockstub.NewMockStub("testConfigState", nil)
stub.SetMspID("Org1MSP")
stub.MockTransactionStart("startTxn")
stub.ChannelID = channelID
return stub
}
//uplaodConfigToHL to upload key&config to repository
func uplaodConfigToHL(stub *mockstub.MockStub, config []byte) error {
configManager := mgmt.NewConfigManager(stub)
if configManager == nil {
return fmt.Errorf("Cannot instantiate config manager")
}
err := configManager.Save(config)
return err
}
func TestGetEndorsersForChaincodeOneCC(t *testing.T) {
service := newMockSelectionService(
newMockMembershipManager().
add(channel1, p1, p2, p3, p4, p5, p6, p7, p8),
newMockCCDataProvider().
add(channel1, cc1, getPolicy1()),
pgresolver.NewRoundRobinLBP())
// Channel1(Policy(cc1)) = Org1
expected := []api.PeerGroup{
// Org1
pg(p1), pg(p2),
}
verify(t, service, expected, channel1, cc1)
}
func TestGetEndorsersForChaincodeTwoCCs(t *testing.T) {
service := newMockSelectionService(
newMockMembershipManager().
add(channel1, p1, p2, p3, p4, p5, p6, p7, p8),
newMockCCDataProvider().
add(channel1, cc1, getPolicy1()).
add(channel1, cc2, getPolicy2()),
pgresolver.NewRoundRobinLBP())
// Channel1(Policy(cc1) and Policy(cc2)) = Org1 and (1 of [(2 of [Org1,Org2]),(2 of [Org1,Org3,Org4])])
expected := []api.PeerGroup{
// Org1 and Org2
pg(p1, p3), pg(p1, p4), pg(p2, p3), pg(p2, p4),
// Org1 and Org3
pg(p1, p5), pg(p1, p6), pg(p1, p7), pg(p2, p5), pg(p2, p6), pg(p2, p7),
// Org1 and Org4
pg(p1, p8), pg(p1, p9), pg(p1, p10), pg(p2, p8), pg(p2, p9), pg(p2, p10),
// Org1 and Org3 and Org4
pg(p1, p5, p8), pg(p1, p5, p9), pg(p1, p5, p10), pg(p1, p6, p8), pg(p1, p6, p9), pg(p1, p6, p10), pg(p1, p7, p8), pg(p1, p7, p9), pg(p1, p7, p10),
pg(p2, p5, p8), pg(p2, p5, p9), pg(p2, p5, p10), pg(p2, p6, p8), pg(p2, p6, p9), pg(p2, p6, p10), pg(p2, p7, p8), pg(p2, p7, p9), pg(p2, p7, p10),
}
verify(t, service, expected, channel1, cc1, cc2)
}
func TestGetEndorsersForChaincodeTwoCCsTwoChannels(t *testing.T) {
service := newMockSelectionService(
newMockMembershipManager().
add(channel1, p1, p2, p3, p4, p5, p6, p7, p8).
add(channel2, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12),
newMockCCDataProvider().
add(channel1, cc1, getPolicy1()).
add(channel1, cc2, getPolicy2()).
add(channel2, cc1, getPolicy3()).
add(channel2, cc2, getPolicy2()),
pgresolver.NewRoundRobinLBP(),
)
// Channel1(Policy(cc1) and Policy(cc2)) = Org1 and (1 of [(2 of [Org1,Org2]),(2 of [Org1,Org3,Org4])])
expected := []api.PeerGroup{
// Org1 and Org2
pg(p1, p3), pg(p1, p4), pg(p2, p3), pg(p2, p4),
// Org1 and Org3
pg(p1, p5), pg(p1, p6), pg(p1, p7), pg(p2, p5), pg(p2, p6), pg(p2, p7),
// Org1 and Org4
pg(p1, p8), pg(p1, p9), pg(p1, p10), pg(p2, p8), pg(p2, p9), pg(p2, p10),
// Org1 and Org3 and Org4
pg(p1, p5, p8), pg(p1, p5, p9), pg(p1, p5, p10), pg(p1, p6, p8), pg(p1, p6, p9), pg(p1, p6, p10), pg(p1, p7, p8), pg(p1, p7, p9), pg(p1, p7, p10),
pg(p2, p5, p8), pg(p2, p5, p9), pg(p2, p5, p10), pg(p2, p6, p8), pg(p2, p6, p9), pg(p2, p6, p10), pg(p2, p7, p8), pg(p2, p7, p9), pg(p2, p7, p10),
}
verify(t, service, expected, channel1, cc1, cc2)
// Channel2(Policy(cc1) and Policy(cc2)) = Org5 and (1 of [(2 of [Org1,Org2]),(2 of [Org1,Org3,Org4])])
expected = []api.PeerGroup{
// Org5 and Org2
pg(p11, p1, p3), pg(p11, p1, p4), pg(p11, p2, p3), pg(p11, p2, p4),
pg(p12, p1, p3), pg(p12, p1, p4), pg(p12, p2, p3), pg(p12, p2, p4),
// Org5 and Org3
pg(p11, p1, p5), pg(p11, p1, p6), pg(p11, p1, p7), pg(p11, p2, p5), pg(p11, p2, p6), pg(p11, p2, p7),
pg(p12, p1, p5), pg(p12, p1, p6), pg(p12, p1, p7), pg(p12, p2, p5), pg(p12, p2, p6), pg(p12, p2, p7),
// Org5 and Org4
pg(p11, p1, p8), pg(p11, p1, p9), pg(p11, p1, p10), pg(p11, p2, p8), pg(p11, p2, p9), pg(p11, p2, p10),
pg(p12, p1, p8), pg(p12, p1, p9), pg(p12, p1, p10), pg(p12, p2, p8), pg(p12, p2, p9), pg(p12, p2, p10),
// Org5 and Org3 and Org4
pg(p11, p5, p8), pg(p11, p5, p9), pg(p11, p5, p10), pg(p11, p6, p8), pg(p11, p6, p9), pg(p11, p6, p10), pg(p11, p7, p8), pg(p11, p7, p9), pg(p11, p7, p10),
pg(p12, p5, p8), pg(p12, p5, p9), pg(p12, p5, p10), pg(p12, p6, p8), pg(p12, p6, p9), pg(p12, p6, p10), pg(p12, p7, p8), pg(p12, p7, p9), pg(p12, p7, p10),
}
verify(t, service, expected, channel2, cc1, cc2)
}
func verify(t *testing.T, service api.SelectionService, expectedPeerGroups []api.PeerGroup, channelID string, chaincodeIDs ...string) {
// Set the log level to WARNING since the following spits out too much info in DEBUG
module := "pg-resolver"
level := logging.GetLevel(module)
logging.SetLevel(module, apilogging.WARNING)
defer logging.SetLevel(module, level)
for i := 0; i < len(expectedPeerGroups); i++ {
peers, err := service.GetEndorsersForChaincode(channelID, nil, chaincodeIDs...)
if err != nil {
t.Fatalf("error getting endorsers: %s", err)
}
if !containsPeerGroup(expectedPeerGroups, peers) {
t.Fatalf("peer group %s is not one of the expected peer groups: %v", toString(peers), expectedPeerGroups)
}
}
}
func containsPeerGroup(groups []api.PeerGroup, peers []apifabclient.Peer) bool {
for _, g := range groups {
if containsAllPeers(peers, g) {
return true
}
}
return false
}
func containsAllPeers(peers []apifabclient.Peer, pg api.PeerGroup) bool {
if len(peers) != len(pg.Peers()) {
return false
}
for _, peer := range peers {
if !containsPeer(pg.Peers(), peer) {
return false
}
}
return true
}
func containsPeer(peers []apifabclient.Peer, peer apifabclient.Peer) bool {
for _, p := range peers {
if p.URL() == peer.URL() {
return true
}
}
return false
}
func pg(peers ...api.ChannelPeer) api.PeerGroup {
return pgresolver.NewPeerGroup(peers...)
}
func peer(name string, mspID string) api.ChannelPeer {
peer, err := sdkpeer.New(configImp, sdkpeer.WithURL(name+":7051"))
if err != nil {
panic(fmt.Sprintf("Failed to create peer: %v)", err))
}
peer.SetName(name)
peer.SetMSPID(mspID)
return channelpeer.New(peer, "", 0, nil)
}
func newMockSelectionService(membershipManager api.MembershipManager, ccDataProvider api.CCDataProvider, lbp api.LoadBalancePolicy) api.SelectionService {
return &selectionServiceImpl{
membershipManager: membershipManager,
ccDataProvider: ccDataProvider,
pgLBP: lbp,
pgResolvers: make(map[string]api.PeerGroupResolver),
}
}
type mockMembershipManager struct {
peerConfigs map[string][]api.ChannelPeer
}
func (m *mockMembershipManager) GetPeersOfChannel(channelID string) api.ChannelMembership {
return api.ChannelMembership{Peers: m.peerConfigs[channelID]}
}
func newMockMembershipManager() *mockMembershipManager |
func (m *mockMembershipManager) add(channelID string, peers ...api.ChannelPeer) *mockMembershipManager {
m.peerConfigs[channelID] = []api.ChannelPeer(peers)
return m
}
type mockCCDataProvider struct {
ccData map[string]*ccprovider.ChaincodeData
}
func newMockCCDataProvider() *mockCCDataProvider {
return &mockCCDataProvider{ccData: make(map[string]*ccprovider.ChaincodeData)}
}
func (p *mockCCDataProvider) QueryChaincodeData(channelID string, chaincodeID string) (*ccprovider.ChaincodeData, error) {
return p.ccData[newResolverKey(channelID, chaincodeID).String()], nil
}
func (p *mockCCDataProvider) add(channelID string, chaincodeID string, policy *ccprovider.ChaincodeData) *mockCCDataProvider {
p.ccData[newResolverKey(channelID, chaincodeID).String()] = policy
return p
}
// Policy: Org1
func getPolicy1() *ccprovider.ChaincodeData {
signedBy, identities, err := pgresolver.GetPolicies(org1)
if err != nil {
panic(err)
}
return newCCData(&common.SignaturePolicyEnvelope{
Version: 0,
Rule: signedBy[o1],
Identities: identities,
})
}
// Policy: 1 of [(2 of [Org1, Org2]),(2 of [Org1, Org3, Org4])]
func getPolicy2() *ccprovider.ChaincodeData {
signedBy, identities, err := pgresolver.GetPolicies(org1, org2, org3, org4)
if err != nil {
panic(err)
}
return newCCData(&common.SignaturePolicyEnvelope{
Version: 0,
Rule: pgresolver.NewNOutOfPolicy(1,
pgresolver.NewNOutOfPolicy(2,
signedBy[o1],
signedBy[o2],
),
pgresolver.NewNOutOfPolicy(2,
signedBy[o1],
signedBy[o3],
signedBy[o4],
),
),
Identities: identities,
})
}
// Policy: Org5
func getPolicy3() *ccprovider.ChaincodeData {
signedBy, identities, err := pgresolver.GetPolicies(org1, org2, org3, org4, org5)
if err != nil {
panic(err)
}
return newCCData(&common.SignaturePolicyEnvelope{
Version: 0,
Rule: signedBy[o5],
Identities: identities,
})
}
func newCCData(sigPolicyEnv *common.SignaturePolicyEnvelope) *ccprovider.ChaincodeData {
policyBytes, err := proto.Marshal(sigPolicyEnv)
if err != nil {
panic(err)
}
return &ccprovider.ChaincodeData{Policy: policyBytes}
}
func toString(peers []apifabclient.Peer) string {
str := "["
for i, p := range peers {
str += p.URL()
if i+1 < len(peers) {
str += ","
}
}
str += "]"
return str
}
| {
return &mockMembershipManager{peerConfigs: make(map[string][]api.ChannelPeer)}
} | identifier_body |
selection_test.go | /*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package client
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric-sdk-go/api/apifabclient"
"github.com/hyperledger/fabric-sdk-go/api/apilogging"
"github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/mocks"
sdkpeer "github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/peer"
"github.com/hyperledger/fabric-sdk-go/pkg/logging"
bccspFactory "github.com/hyperledger/fabric/bccsp/factory"
"github.com/hyperledger/fabric/core/common/ccprovider"
"github.com/hyperledger/fabric/protos/common"
configmanagerApi "github.com/securekey/fabric-snaps/configmanager/api"
"github.com/securekey/fabric-snaps/configmanager/pkg/mgmt"
configmgmtService "github.com/securekey/fabric-snaps/configmanager/pkg/service"
mockstub "github.com/securekey/fabric-snaps/mocks/mockstub"
"github.com/securekey/fabric-snaps/transactionsnap/api"
"github.com/securekey/fabric-snaps/transactionsnap/cmd/client/channelpeer"
"github.com/securekey/fabric-snaps/transactionsnap/cmd/client/pgresolver"
config "github.com/securekey/fabric-snaps/transactionsnap/cmd/config"
)
var configImp = mocks.NewMockConfig()
var channelID = "testChannel"
var mspID = "Org1MSP"
const (
org1 = "Org1MSP"
org2 = "Org2MSP"
org3 = "Org3MSP"
org4 = "Org4MSP"
org5 = "Org5MSP"
org6 = "Org6MSP"
org7 = "Org7MSP"
org8 = "Org8MSP"
org9 = "Org9MSP"
org10 = "Org10MSP"
)
const (
channel1 = "channel1"
channel2 = "channel2"
)
const (
cc1 = "cc1"
cc2 = "cc2"
cc3 = "cc3"
)
const (
o1 = iota
o2
o3
o4
o5
)
var p1 = peer("peer1", org1)
var p2 = peer("peer2", org1)
var p3 = peer("peer3", org2)
var p4 = peer("peer4", org2)
var p5 = peer("peer5", org3)
var p6 = peer("peer6", org3)
var p7 = peer("peer7", org3)
var p8 = peer("peer8", org4)
var p9 = peer("peer9", org4)
var p10 = peer("peer10", org4)
var p11 = peer("peer11", org5)
var p12 = peer("peer12", org5)
type sampleConfig struct {
api.Config
}
// Override GetMspConfigPath for relative path, just to avoid using new core.yaml for this purpose
func (c *sampleConfig) GetMspConfigPath() string {
return "../sampleconfig/msp"
}
func TestMain(m *testing.M) {
opts := &bccspFactory.FactoryOpts{
ProviderName: "SW",
SwOpts: &bccspFactory.SwOpts{
HashFamily: "SHA2",
SecLevel: 256,
Ephemeral: false,
FileKeystore: &bccspFactory.FileKeystoreOpts{KeyStorePath: "../sampleconfig/msp/keystore/"},
},
}
bccspFactory.InitFactories(opts)
//
configData, err := ioutil.ReadFile("../sampleconfig/config.yaml")
if err != nil {
panic(fmt.Sprintf("File error: %v\n", err))
}
configMsg := &configmanagerApi.ConfigMessage{MspID: mspID,
Peers: []configmanagerApi.PeerConfig{configmanagerApi.PeerConfig{
PeerID: "jdoe", App: []configmanagerApi.AppConfig{
configmanagerApi.AppConfig{AppName: "txnsnap", Config: string(configData)}}}}}
stub := getMockStub()
configBytes, err := json.Marshal(configMsg)
if err != nil {
panic(fmt.Sprintf("Cannot Marshal %s\n", err))
}
//upload valid message to HL
err = uplaodConfigToHL(stub, configBytes)
if err != nil {
panic(fmt.Sprintf("Cannot upload %s\n", err))
}
configmgmtService.Initialize(stub, mspID)
config, err := config.NewConfig("../sampleconfig", channelID)
if err != nil {
panic(fmt.Sprintf("Error initializing config: %s", err))
}
_, err = GetInstance("testChannel", &sampleConfig{config})
if err != nil {
panic(fmt.Sprintf("Client GetInstance return error %v", err))
}
os.Exit(m.Run())
}
func getMockStub() *mockstub.MockStub {
stub := mockstub.NewMockStub("testConfigState", nil)
stub.SetMspID("Org1MSP")
stub.MockTransactionStart("startTxn")
stub.ChannelID = channelID
return stub
}
//uplaodConfigToHL to upload key&config to repository
func uplaodConfigToHL(stub *mockstub.MockStub, config []byte) error {
configManager := mgmt.NewConfigManager(stub)
if configManager == nil {
return fmt.Errorf("Cannot instantiate config manager")
}
err := configManager.Save(config)
return err
}
func TestGetEndorsersForChaincodeOneCC(t *testing.T) {
service := newMockSelectionService( | add(channel1, cc1, getPolicy1()),
pgresolver.NewRoundRobinLBP())
// Channel1(Policy(cc1)) = Org1
expected := []api.PeerGroup{
// Org1
pg(p1), pg(p2),
}
verify(t, service, expected, channel1, cc1)
}
func TestGetEndorsersForChaincodeTwoCCs(t *testing.T) {
service := newMockSelectionService(
newMockMembershipManager().
add(channel1, p1, p2, p3, p4, p5, p6, p7, p8),
newMockCCDataProvider().
add(channel1, cc1, getPolicy1()).
add(channel1, cc2, getPolicy2()),
pgresolver.NewRoundRobinLBP())
// Channel1(Policy(cc1) and Policy(cc2)) = Org1 and (1 of [(2 of [Org1,Org2]),(2 of [Org1,Org3,Org4])])
expected := []api.PeerGroup{
// Org1 and Org2
pg(p1, p3), pg(p1, p4), pg(p2, p3), pg(p2, p4),
// Org1 and Org3
pg(p1, p5), pg(p1, p6), pg(p1, p7), pg(p2, p5), pg(p2, p6), pg(p2, p7),
// Org1 and Org4
pg(p1, p8), pg(p1, p9), pg(p1, p10), pg(p2, p8), pg(p2, p9), pg(p2, p10),
// Org1 and Org3 and Org4
pg(p1, p5, p8), pg(p1, p5, p9), pg(p1, p5, p10), pg(p1, p6, p8), pg(p1, p6, p9), pg(p1, p6, p10), pg(p1, p7, p8), pg(p1, p7, p9), pg(p1, p7, p10),
pg(p2, p5, p8), pg(p2, p5, p9), pg(p2, p5, p10), pg(p2, p6, p8), pg(p2, p6, p9), pg(p2, p6, p10), pg(p2, p7, p8), pg(p2, p7, p9), pg(p2, p7, p10),
}
verify(t, service, expected, channel1, cc1, cc2)
}
func TestGetEndorsersForChaincodeTwoCCsTwoChannels(t *testing.T) {
service := newMockSelectionService(
newMockMembershipManager().
add(channel1, p1, p2, p3, p4, p5, p6, p7, p8).
add(channel2, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12),
newMockCCDataProvider().
add(channel1, cc1, getPolicy1()).
add(channel1, cc2, getPolicy2()).
add(channel2, cc1, getPolicy3()).
add(channel2, cc2, getPolicy2()),
pgresolver.NewRoundRobinLBP(),
)
// Channel1(Policy(cc1) and Policy(cc2)) = Org1 and (1 of [(2 of [Org1,Org2]),(2 of [Org1,Org3,Org4])])
expected := []api.PeerGroup{
// Org1 and Org2
pg(p1, p3), pg(p1, p4), pg(p2, p3), pg(p2, p4),
// Org1 and Org3
pg(p1, p5), pg(p1, p6), pg(p1, p7), pg(p2, p5), pg(p2, p6), pg(p2, p7),
// Org1 and Org4
pg(p1, p8), pg(p1, p9), pg(p1, p10), pg(p2, p8), pg(p2, p9), pg(p2, p10),
// Org1 and Org3 and Org4
pg(p1, p5, p8), pg(p1, p5, p9), pg(p1, p5, p10), pg(p1, p6, p8), pg(p1, p6, p9), pg(p1, p6, p10), pg(p1, p7, p8), pg(p1, p7, p9), pg(p1, p7, p10),
pg(p2, p5, p8), pg(p2, p5, p9), pg(p2, p5, p10), pg(p2, p6, p8), pg(p2, p6, p9), pg(p2, p6, p10), pg(p2, p7, p8), pg(p2, p7, p9), pg(p2, p7, p10),
}
verify(t, service, expected, channel1, cc1, cc2)
// Channel2(Policy(cc1) and Policy(cc2)) = Org5 and (1 of [(2 of [Org1,Org2]),(2 of [Org1,Org3,Org4])])
expected = []api.PeerGroup{
// Org5 and Org2
pg(p11, p1, p3), pg(p11, p1, p4), pg(p11, p2, p3), pg(p11, p2, p4),
pg(p12, p1, p3), pg(p12, p1, p4), pg(p12, p2, p3), pg(p12, p2, p4),
// Org5 and Org3
pg(p11, p1, p5), pg(p11, p1, p6), pg(p11, p1, p7), pg(p11, p2, p5), pg(p11, p2, p6), pg(p11, p2, p7),
pg(p12, p1, p5), pg(p12, p1, p6), pg(p12, p1, p7), pg(p12, p2, p5), pg(p12, p2, p6), pg(p12, p2, p7),
// Org5 and Org4
pg(p11, p1, p8), pg(p11, p1, p9), pg(p11, p1, p10), pg(p11, p2, p8), pg(p11, p2, p9), pg(p11, p2, p10),
pg(p12, p1, p8), pg(p12, p1, p9), pg(p12, p1, p10), pg(p12, p2, p8), pg(p12, p2, p9), pg(p12, p2, p10),
// Org5 and Org3 and Org4
pg(p11, p5, p8), pg(p11, p5, p9), pg(p11, p5, p10), pg(p11, p6, p8), pg(p11, p6, p9), pg(p11, p6, p10), pg(p11, p7, p8), pg(p11, p7, p9), pg(p11, p7, p10),
pg(p12, p5, p8), pg(p12, p5, p9), pg(p12, p5, p10), pg(p12, p6, p8), pg(p12, p6, p9), pg(p12, p6, p10), pg(p12, p7, p8), pg(p12, p7, p9), pg(p12, p7, p10),
}
verify(t, service, expected, channel2, cc1, cc2)
}
func verify(t *testing.T, service api.SelectionService, expectedPeerGroups []api.PeerGroup, channelID string, chaincodeIDs ...string) {
// Set the log level to WARNING since the following spits out too much info in DEBUG
module := "pg-resolver"
level := logging.GetLevel(module)
logging.SetLevel(module, apilogging.WARNING)
defer logging.SetLevel(module, level)
for i := 0; i < len(expectedPeerGroups); i++ {
peers, err := service.GetEndorsersForChaincode(channelID, nil, chaincodeIDs...)
if err != nil {
t.Fatalf("error getting endorsers: %s", err)
}
if !containsPeerGroup(expectedPeerGroups, peers) {
t.Fatalf("peer group %s is not one of the expected peer groups: %v", toString(peers), expectedPeerGroups)
}
}
}
func containsPeerGroup(groups []api.PeerGroup, peers []apifabclient.Peer) bool {
for _, g := range groups {
if containsAllPeers(peers, g) {
return true
}
}
return false
}
func containsAllPeers(peers []apifabclient.Peer, pg api.PeerGroup) bool {
if len(peers) != len(pg.Peers()) {
return false
}
for _, peer := range peers {
if !containsPeer(pg.Peers(), peer) {
return false
}
}
return true
}
func containsPeer(peers []apifabclient.Peer, peer apifabclient.Peer) bool {
for _, p := range peers {
if p.URL() == peer.URL() {
return true
}
}
return false
}
func pg(peers ...api.ChannelPeer) api.PeerGroup {
return pgresolver.NewPeerGroup(peers...)
}
func peer(name string, mspID string) api.ChannelPeer {
peer, err := sdkpeer.New(configImp, sdkpeer.WithURL(name+":7051"))
if err != nil {
panic(fmt.Sprintf("Failed to create peer: %v)", err))
}
peer.SetName(name)
peer.SetMSPID(mspID)
return channelpeer.New(peer, "", 0, nil)
}
func newMockSelectionService(membershipManager api.MembershipManager, ccDataProvider api.CCDataProvider, lbp api.LoadBalancePolicy) api.SelectionService {
return &selectionServiceImpl{
membershipManager: membershipManager,
ccDataProvider: ccDataProvider,
pgLBP: lbp,
pgResolvers: make(map[string]api.PeerGroupResolver),
}
}
type mockMembershipManager struct {
peerConfigs map[string][]api.ChannelPeer
}
func (m *mockMembershipManager) GetPeersOfChannel(channelID string) api.ChannelMembership {
return api.ChannelMembership{Peers: m.peerConfigs[channelID]}
}
func newMockMembershipManager() *mockMembershipManager {
return &mockMembershipManager{peerConfigs: make(map[string][]api.ChannelPeer)}
}
func (m *mockMembershipManager) add(channelID string, peers ...api.ChannelPeer) *mockMembershipManager {
m.peerConfigs[channelID] = []api.ChannelPeer(peers)
return m
}
type mockCCDataProvider struct {
ccData map[string]*ccprovider.ChaincodeData
}
func newMockCCDataProvider() *mockCCDataProvider {
return &mockCCDataProvider{ccData: make(map[string]*ccprovider.ChaincodeData)}
}
func (p *mockCCDataProvider) QueryChaincodeData(channelID string, chaincodeID string) (*ccprovider.ChaincodeData, error) {
return p.ccData[newResolverKey(channelID, chaincodeID).String()], nil
}
func (p *mockCCDataProvider) add(channelID string, chaincodeID string, policy *ccprovider.ChaincodeData) *mockCCDataProvider {
p.ccData[newResolverKey(channelID, chaincodeID).String()] = policy
return p
}
// Policy: Org1
func getPolicy1() *ccprovider.ChaincodeData {
signedBy, identities, err := pgresolver.GetPolicies(org1)
if err != nil {
panic(err)
}
return newCCData(&common.SignaturePolicyEnvelope{
Version: 0,
Rule: signedBy[o1],
Identities: identities,
})
}
// Policy: 1 of [(2 of [Org1, Org2]),(2 of [Org1, Org3, Org4])]
func getPolicy2() *ccprovider.ChaincodeData {
signedBy, identities, err := pgresolver.GetPolicies(org1, org2, org3, org4)
if err != nil {
panic(err)
}
return newCCData(&common.SignaturePolicyEnvelope{
Version: 0,
Rule: pgresolver.NewNOutOfPolicy(1,
pgresolver.NewNOutOfPolicy(2,
signedBy[o1],
signedBy[o2],
),
pgresolver.NewNOutOfPolicy(2,
signedBy[o1],
signedBy[o3],
signedBy[o4],
),
),
Identities: identities,
})
}
// Policy: Org5
func getPolicy3() *ccprovider.ChaincodeData {
signedBy, identities, err := pgresolver.GetPolicies(org1, org2, org3, org4, org5)
if err != nil {
panic(err)
}
return newCCData(&common.SignaturePolicyEnvelope{
Version: 0,
Rule: signedBy[o5],
Identities: identities,
})
}
func newCCData(sigPolicyEnv *common.SignaturePolicyEnvelope) *ccprovider.ChaincodeData {
policyBytes, err := proto.Marshal(sigPolicyEnv)
if err != nil {
panic(err)
}
return &ccprovider.ChaincodeData{Policy: policyBytes}
}
func toString(peers []apifabclient.Peer) string {
str := "["
for i, p := range peers {
str += p.URL()
if i+1 < len(peers) {
str += ","
}
}
str += "]"
return str
} | newMockMembershipManager().
add(channel1, p1, p2, p3, p4, p5, p6, p7, p8),
newMockCCDataProvider(). | random_line_split |
selection_test.go | /*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package client
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric-sdk-go/api/apifabclient"
"github.com/hyperledger/fabric-sdk-go/api/apilogging"
"github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/mocks"
sdkpeer "github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/peer"
"github.com/hyperledger/fabric-sdk-go/pkg/logging"
bccspFactory "github.com/hyperledger/fabric/bccsp/factory"
"github.com/hyperledger/fabric/core/common/ccprovider"
"github.com/hyperledger/fabric/protos/common"
configmanagerApi "github.com/securekey/fabric-snaps/configmanager/api"
"github.com/securekey/fabric-snaps/configmanager/pkg/mgmt"
configmgmtService "github.com/securekey/fabric-snaps/configmanager/pkg/service"
mockstub "github.com/securekey/fabric-snaps/mocks/mockstub"
"github.com/securekey/fabric-snaps/transactionsnap/api"
"github.com/securekey/fabric-snaps/transactionsnap/cmd/client/channelpeer"
"github.com/securekey/fabric-snaps/transactionsnap/cmd/client/pgresolver"
config "github.com/securekey/fabric-snaps/transactionsnap/cmd/config"
)
var configImp = mocks.NewMockConfig()
var channelID = "testChannel"
var mspID = "Org1MSP"
const (
org1 = "Org1MSP"
org2 = "Org2MSP"
org3 = "Org3MSP"
org4 = "Org4MSP"
org5 = "Org5MSP"
org6 = "Org6MSP"
org7 = "Org7MSP"
org8 = "Org8MSP"
org9 = "Org9MSP"
org10 = "Org10MSP"
)
const (
channel1 = "channel1"
channel2 = "channel2"
)
const (
cc1 = "cc1"
cc2 = "cc2"
cc3 = "cc3"
)
const (
o1 = iota
o2
o3
o4
o5
)
var p1 = peer("peer1", org1)
var p2 = peer("peer2", org1)
var p3 = peer("peer3", org2)
var p4 = peer("peer4", org2)
var p5 = peer("peer5", org3)
var p6 = peer("peer6", org3)
var p7 = peer("peer7", org3)
var p8 = peer("peer8", org4)
var p9 = peer("peer9", org4)
var p10 = peer("peer10", org4)
var p11 = peer("peer11", org5)
var p12 = peer("peer12", org5)
type sampleConfig struct {
api.Config
}
// Override GetMspConfigPath for relative path, just to avoid using new core.yaml for this purpose
func (c *sampleConfig) GetMspConfigPath() string {
return "../sampleconfig/msp"
}
func TestMain(m *testing.M) {
opts := &bccspFactory.FactoryOpts{
ProviderName: "SW",
SwOpts: &bccspFactory.SwOpts{
HashFamily: "SHA2",
SecLevel: 256,
Ephemeral: false,
FileKeystore: &bccspFactory.FileKeystoreOpts{KeyStorePath: "../sampleconfig/msp/keystore/"},
},
}
bccspFactory.InitFactories(opts)
//
configData, err := ioutil.ReadFile("../sampleconfig/config.yaml")
if err != nil {
panic(fmt.Sprintf("File error: %v\n", err))
}
configMsg := &configmanagerApi.ConfigMessage{MspID: mspID,
Peers: []configmanagerApi.PeerConfig{configmanagerApi.PeerConfig{
PeerID: "jdoe", App: []configmanagerApi.AppConfig{
configmanagerApi.AppConfig{AppName: "txnsnap", Config: string(configData)}}}}}
stub := getMockStub()
configBytes, err := json.Marshal(configMsg)
if err != nil {
panic(fmt.Sprintf("Cannot Marshal %s\n", err))
}
//upload valid message to HL
err = uplaodConfigToHL(stub, configBytes)
if err != nil {
panic(fmt.Sprintf("Cannot upload %s\n", err))
}
configmgmtService.Initialize(stub, mspID)
config, err := config.NewConfig("../sampleconfig", channelID)
if err != nil {
panic(fmt.Sprintf("Error initializing config: %s", err))
}
_, err = GetInstance("testChannel", &sampleConfig{config})
if err != nil {
panic(fmt.Sprintf("Client GetInstance return error %v", err))
}
os.Exit(m.Run())
}
func getMockStub() *mockstub.MockStub {
stub := mockstub.NewMockStub("testConfigState", nil)
stub.SetMspID("Org1MSP")
stub.MockTransactionStart("startTxn")
stub.ChannelID = channelID
return stub
}
//uplaodConfigToHL to upload key&config to repository
func uplaodConfigToHL(stub *mockstub.MockStub, config []byte) error {
configManager := mgmt.NewConfigManager(stub)
if configManager == nil {
return fmt.Errorf("Cannot instantiate config manager")
}
err := configManager.Save(config)
return err
}
func TestGetEndorsersForChaincodeOneCC(t *testing.T) {
service := newMockSelectionService(
newMockMembershipManager().
add(channel1, p1, p2, p3, p4, p5, p6, p7, p8),
newMockCCDataProvider().
add(channel1, cc1, getPolicy1()),
pgresolver.NewRoundRobinLBP())
// Channel1(Policy(cc1)) = Org1
expected := []api.PeerGroup{
// Org1
pg(p1), pg(p2),
}
verify(t, service, expected, channel1, cc1)
}
func TestGetEndorsersForChaincodeTwoCCs(t *testing.T) {
service := newMockSelectionService(
newMockMembershipManager().
add(channel1, p1, p2, p3, p4, p5, p6, p7, p8),
newMockCCDataProvider().
add(channel1, cc1, getPolicy1()).
add(channel1, cc2, getPolicy2()),
pgresolver.NewRoundRobinLBP())
// Channel1(Policy(cc1) and Policy(cc2)) = Org1 and (1 of [(2 of [Org1,Org2]),(2 of [Org1,Org3,Org4])])
expected := []api.PeerGroup{
// Org1 and Org2
pg(p1, p3), pg(p1, p4), pg(p2, p3), pg(p2, p4),
// Org1 and Org3
pg(p1, p5), pg(p1, p6), pg(p1, p7), pg(p2, p5), pg(p2, p6), pg(p2, p7),
// Org1 and Org4
pg(p1, p8), pg(p1, p9), pg(p1, p10), pg(p2, p8), pg(p2, p9), pg(p2, p10),
// Org1 and Org3 and Org4
pg(p1, p5, p8), pg(p1, p5, p9), pg(p1, p5, p10), pg(p1, p6, p8), pg(p1, p6, p9), pg(p1, p6, p10), pg(p1, p7, p8), pg(p1, p7, p9), pg(p1, p7, p10),
pg(p2, p5, p8), pg(p2, p5, p9), pg(p2, p5, p10), pg(p2, p6, p8), pg(p2, p6, p9), pg(p2, p6, p10), pg(p2, p7, p8), pg(p2, p7, p9), pg(p2, p7, p10),
}
verify(t, service, expected, channel1, cc1, cc2)
}
func TestGetEndorsersForChaincodeTwoCCsTwoChannels(t *testing.T) {
service := newMockSelectionService(
newMockMembershipManager().
add(channel1, p1, p2, p3, p4, p5, p6, p7, p8).
add(channel2, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12),
newMockCCDataProvider().
add(channel1, cc1, getPolicy1()).
add(channel1, cc2, getPolicy2()).
add(channel2, cc1, getPolicy3()).
add(channel2, cc2, getPolicy2()),
pgresolver.NewRoundRobinLBP(),
)
// Channel1(Policy(cc1) and Policy(cc2)) = Org1 and (1 of [(2 of [Org1,Org2]),(2 of [Org1,Org3,Org4])])
expected := []api.PeerGroup{
// Org1 and Org2
pg(p1, p3), pg(p1, p4), pg(p2, p3), pg(p2, p4),
// Org1 and Org3
pg(p1, p5), pg(p1, p6), pg(p1, p7), pg(p2, p5), pg(p2, p6), pg(p2, p7),
// Org1 and Org4
pg(p1, p8), pg(p1, p9), pg(p1, p10), pg(p2, p8), pg(p2, p9), pg(p2, p10),
// Org1 and Org3 and Org4
pg(p1, p5, p8), pg(p1, p5, p9), pg(p1, p5, p10), pg(p1, p6, p8), pg(p1, p6, p9), pg(p1, p6, p10), pg(p1, p7, p8), pg(p1, p7, p9), pg(p1, p7, p10),
pg(p2, p5, p8), pg(p2, p5, p9), pg(p2, p5, p10), pg(p2, p6, p8), pg(p2, p6, p9), pg(p2, p6, p10), pg(p2, p7, p8), pg(p2, p7, p9), pg(p2, p7, p10),
}
verify(t, service, expected, channel1, cc1, cc2)
// Channel2(Policy(cc1) and Policy(cc2)) = Org5 and (1 of [(2 of [Org1,Org2]),(2 of [Org1,Org3,Org4])])
expected = []api.PeerGroup{
// Org5 and Org2
pg(p11, p1, p3), pg(p11, p1, p4), pg(p11, p2, p3), pg(p11, p2, p4),
pg(p12, p1, p3), pg(p12, p1, p4), pg(p12, p2, p3), pg(p12, p2, p4),
// Org5 and Org3
pg(p11, p1, p5), pg(p11, p1, p6), pg(p11, p1, p7), pg(p11, p2, p5), pg(p11, p2, p6), pg(p11, p2, p7),
pg(p12, p1, p5), pg(p12, p1, p6), pg(p12, p1, p7), pg(p12, p2, p5), pg(p12, p2, p6), pg(p12, p2, p7),
// Org5 and Org4
pg(p11, p1, p8), pg(p11, p1, p9), pg(p11, p1, p10), pg(p11, p2, p8), pg(p11, p2, p9), pg(p11, p2, p10),
pg(p12, p1, p8), pg(p12, p1, p9), pg(p12, p1, p10), pg(p12, p2, p8), pg(p12, p2, p9), pg(p12, p2, p10),
// Org5 and Org3 and Org4
pg(p11, p5, p8), pg(p11, p5, p9), pg(p11, p5, p10), pg(p11, p6, p8), pg(p11, p6, p9), pg(p11, p6, p10), pg(p11, p7, p8), pg(p11, p7, p9), pg(p11, p7, p10),
pg(p12, p5, p8), pg(p12, p5, p9), pg(p12, p5, p10), pg(p12, p6, p8), pg(p12, p6, p9), pg(p12, p6, p10), pg(p12, p7, p8), pg(p12, p7, p9), pg(p12, p7, p10),
}
verify(t, service, expected, channel2, cc1, cc2)
}
func | (t *testing.T, service api.SelectionService, expectedPeerGroups []api.PeerGroup, channelID string, chaincodeIDs ...string) {
// Set the log level to WARNING since the following spits out too much info in DEBUG
module := "pg-resolver"
level := logging.GetLevel(module)
logging.SetLevel(module, apilogging.WARNING)
defer logging.SetLevel(module, level)
for i := 0; i < len(expectedPeerGroups); i++ {
peers, err := service.GetEndorsersForChaincode(channelID, nil, chaincodeIDs...)
if err != nil {
t.Fatalf("error getting endorsers: %s", err)
}
if !containsPeerGroup(expectedPeerGroups, peers) {
t.Fatalf("peer group %s is not one of the expected peer groups: %v", toString(peers), expectedPeerGroups)
}
}
}
func containsPeerGroup(groups []api.PeerGroup, peers []apifabclient.Peer) bool {
for _, g := range groups {
if containsAllPeers(peers, g) {
return true
}
}
return false
}
func containsAllPeers(peers []apifabclient.Peer, pg api.PeerGroup) bool {
if len(peers) != len(pg.Peers()) {
return false
}
for _, peer := range peers {
if !containsPeer(pg.Peers(), peer) {
return false
}
}
return true
}
func containsPeer(peers []apifabclient.Peer, peer apifabclient.Peer) bool {
for _, p := range peers {
if p.URL() == peer.URL() {
return true
}
}
return false
}
func pg(peers ...api.ChannelPeer) api.PeerGroup {
return pgresolver.NewPeerGroup(peers...)
}
func peer(name string, mspID string) api.ChannelPeer {
peer, err := sdkpeer.New(configImp, sdkpeer.WithURL(name+":7051"))
if err != nil {
panic(fmt.Sprintf("Failed to create peer: %v)", err))
}
peer.SetName(name)
peer.SetMSPID(mspID)
return channelpeer.New(peer, "", 0, nil)
}
func newMockSelectionService(membershipManager api.MembershipManager, ccDataProvider api.CCDataProvider, lbp api.LoadBalancePolicy) api.SelectionService {
return &selectionServiceImpl{
membershipManager: membershipManager,
ccDataProvider: ccDataProvider,
pgLBP: lbp,
pgResolvers: make(map[string]api.PeerGroupResolver),
}
}
type mockMembershipManager struct {
peerConfigs map[string][]api.ChannelPeer
}
func (m *mockMembershipManager) GetPeersOfChannel(channelID string) api.ChannelMembership {
return api.ChannelMembership{Peers: m.peerConfigs[channelID]}
}
func newMockMembershipManager() *mockMembershipManager {
return &mockMembershipManager{peerConfigs: make(map[string][]api.ChannelPeer)}
}
func (m *mockMembershipManager) add(channelID string, peers ...api.ChannelPeer) *mockMembershipManager {
m.peerConfigs[channelID] = []api.ChannelPeer(peers)
return m
}
type mockCCDataProvider struct {
ccData map[string]*ccprovider.ChaincodeData
}
func newMockCCDataProvider() *mockCCDataProvider {
return &mockCCDataProvider{ccData: make(map[string]*ccprovider.ChaincodeData)}
}
func (p *mockCCDataProvider) QueryChaincodeData(channelID string, chaincodeID string) (*ccprovider.ChaincodeData, error) {
return p.ccData[newResolverKey(channelID, chaincodeID).String()], nil
}
func (p *mockCCDataProvider) add(channelID string, chaincodeID string, policy *ccprovider.ChaincodeData) *mockCCDataProvider {
p.ccData[newResolverKey(channelID, chaincodeID).String()] = policy
return p
}
// Policy: Org1
func getPolicy1() *ccprovider.ChaincodeData {
signedBy, identities, err := pgresolver.GetPolicies(org1)
if err != nil {
panic(err)
}
return newCCData(&common.SignaturePolicyEnvelope{
Version: 0,
Rule: signedBy[o1],
Identities: identities,
})
}
// Policy: 1 of [(2 of [Org1, Org2]),(2 of [Org1, Org3, Org4])]
func getPolicy2() *ccprovider.ChaincodeData {
signedBy, identities, err := pgresolver.GetPolicies(org1, org2, org3, org4)
if err != nil {
panic(err)
}
return newCCData(&common.SignaturePolicyEnvelope{
Version: 0,
Rule: pgresolver.NewNOutOfPolicy(1,
pgresolver.NewNOutOfPolicy(2,
signedBy[o1],
signedBy[o2],
),
pgresolver.NewNOutOfPolicy(2,
signedBy[o1],
signedBy[o3],
signedBy[o4],
),
),
Identities: identities,
})
}
// Policy: Org5
func getPolicy3() *ccprovider.ChaincodeData {
signedBy, identities, err := pgresolver.GetPolicies(org1, org2, org3, org4, org5)
if err != nil {
panic(err)
}
return newCCData(&common.SignaturePolicyEnvelope{
Version: 0,
Rule: signedBy[o5],
Identities: identities,
})
}
func newCCData(sigPolicyEnv *common.SignaturePolicyEnvelope) *ccprovider.ChaincodeData {
policyBytes, err := proto.Marshal(sigPolicyEnv)
if err != nil {
panic(err)
}
return &ccprovider.ChaincodeData{Policy: policyBytes}
}
func toString(peers []apifabclient.Peer) string {
str := "["
for i, p := range peers {
str += p.URL()
if i+1 < len(peers) {
str += ","
}
}
str += "]"
return str
}
| verify | identifier_name |
mod.rs | mod field_names_encoder;
use self::field_names_encoder::FieldNamesEncoder;
use csv::{self, Result};
use rustc_serialize::Encodable;
use std::fs::File;
use std::io::{BufWriter, Write};
use std::marker::PhantomData;
use std::path::Path;
/// A CSV writer that automatically writes the headers.
///
/// This writer provides a convenient interface for encoding CSV data. While
/// creating CSV data is much easier than parsing it, having a writer can be
/// convenient because it can handle quoting for you automatically. Moreover,
/// this particular writer supports [`rustc_serialize::Encodable`][Encodable]
/// types, which makes it easy to write your custom types as CSV records and
/// automatically generate headers.
///
/// All CSV data produced by this writer, with default options, conforms with
/// [RFC 4180](http://tools.ietf.org/html/rfc4180).
///
/// One slight deviation is that records with a single empty field are always
/// encoded as `""`. This ensures that the record is not skipped since some
/// CSV parsers will ignore consecutive record terminators (like the one in
/// this crate).
///
/// If you don't care want the writer to automatically write the header row,
/// use the [`csv`](https://github.com/BurntSushi/rust-csv) crate instead.
///
/// # Example
///
/// Here's an example that encodes a zoo of animals:
///
/// ```rust
/// extern crate rustc_serialize;
/// # extern crate typed_csv;
/// # fn main() {
///
/// #[derive(RustcEncodable)]
/// struct Record {
/// count: usize,
/// animal: &'static str,
/// description: &'static str,
/// }
///
/// let records = vec![
/// Record { count: 7, animal: "penguin", description: "happy" },
/// Record { count: 10, animal: "cheetah", description: "fast" },
/// Record { count: 4, animal: "armadillo", description: "armored" },
/// Record { count: 9, animal: "platypus", description: "unique" },
/// ];
///
/// let mut wtr = typed_csv::Writer::from_memory();
/// for record in records.into_iter() {
/// wtr.encode(record).unwrap();
/// }
///
/// assert_eq!(wtr.as_string(), "\
/// count,animal,description
/// 7,penguin,happy
/// 10,cheetah,fast
/// 4,armadillo,armored
/// 9,platypus,unique
/// ");
/// # }
/// ```
///
/// [Encodable]: https://doc.rust-lang.org/rustc-serialize/rustc_serialize/trait.Encodable.html
pub struct Writer<W: Write, E: Encodable> {
csv: csv::Writer<W>,
first_row: bool,
record_type: PhantomData<E>,
}
impl<E: Encodable> Writer<File, E> {
/// Creates a new typed CSV writer that writes to the file path given.
///
/// The file is created if it does not already exist and is truncated
/// otherwise.
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Writer<File, E>> {
Ok(Self::from_csv_writer(csv::Writer::from_file(path)?))
}
}
impl<W: Write, E: Encodable> Writer<W, E> {
/// Creates a new typed CSV writer that writes to the `io::Write` given.
///
/// Note that the writer is buffered for you automatically.
pub fn from_writer(w: W) -> Writer<W, E> {
Self::from_csv_writer(csv::Writer::from_writer(w))
}
/// Creates a new typed CSV writer that writes to the CSV writer given.
///
/// This lets you specify options to the underlying CSV writer (e.g. to use
/// a different delimiter).
pub fn from_csv_writer(w: csv::Writer<W>) -> Writer<W, E> {
Writer {
csv: w,
first_row: true,
record_type: PhantomData,
}
}
/// Creates a new typed CSV writer that writes to the buffer given.
///
/// This lets you specify your own buffered writer (e.g., use a different
/// capacity). All other constructors wrap the writer given in a buffer
/// with default capacity.
pub fn from_buffer(buf: BufWriter<W>) -> Writer<W, E> {
Self::from_csv_writer(csv::Writer::from_buffer(buf))
}
}
impl<E: Encodable> Writer<Vec<u8>, E> {
/// Creates a new CSV writer that writes to an in memory buffer. At any
/// time, `as_string` or `as_bytes` can be called to retrieve the
/// cumulative CSV data.
pub fn from_memory() -> Writer<Vec<u8>, E> {
Self::from_csv_writer(csv::Writer::from_memory())
}
/// Returns the written CSV data as a string.
pub fn as_string(&mut self) -> &str {
self.csv.as_string()
}
/// Returns the encoded CSV data as raw bytes.
pub fn as_bytes(&mut self) -> &[u8] {
self.csv.as_bytes()
}
/// Convert the Writer into a string of written CSV data
pub fn into_string(self) -> String {
self.csv.into_string()
}
/// Convert the Writer into a vector of encoded CSV bytes.
pub fn into_bytes(self) -> Vec<u8> {
self.csv.into_bytes()
}
}
impl<W: Write, E: Encodable> Writer<W, E> {
/// Writes a record by encoding any `Encodable` value.
///
/// When the first record is encoded, the headers (the field names in the
/// struct) are written prior to encoding the record.
///
/// The type that is being encoded into should correspond to *one full CSV
/// record*. This can be a single struct, or arbitrarily nested tuples,
/// arrays, Vecs, and structs, as long as all scalar types (integers,
/// floats, characters, strings, collections containing one scalar, and
/// enums with 0 or 1 scalar arguments) are fields in structs. Enums with
/// zero arguments encode to their name, while enums of one argument encode
/// to their constituent value. `Option` types are also supported. (`None`
/// encodes to an empty field.)
///
/// Note that single-element tuple structs (the newtype pattern) are
/// supported. Unfortunately, to provide this functionality, a heuristic is
/// necessary to differentiate field names in normal structs from those in
/// tuple structs. As a result, field names in normal structs should not be
/// of the form `_field{}` where `{}` is its position in the struct.
///
/// # Example
///
/// This example encodes a zoo animals with may not have a description.
///
/// ```rust
/// extern crate rustc_serialize;
/// # extern crate typed_csv;
/// # fn main() {
///
/// #[derive(RustcEncodable)]
/// struct Count(usize);
///
/// #[derive(RustcEncodable)]
/// enum Group {
/// Bird,
/// Mammal,
/// }
///
/// #[derive(RustcEncodable)]
/// struct Part1 {
/// count: Count,
/// animal: &'static str,
/// }
///
/// #[derive(RustcEncodable)]
/// struct Part2 {
/// group: Group,
/// description: Option<&'static str>,
/// }
///
/// let records = vec![
/// (
/// Part1 { count: Count(7), animal: "penguin" },
/// Part2 { group: Group::Bird, description: Some("happy") },
/// ),
/// (
/// Part1 { count: Count(10), animal: "cheetah" },
/// Part2 { group: Group::Mammal, description: Some("fast") },
/// ),
/// (
/// Part1 { count: Count(4), animal: "armadillo" },
/// Part2 { group: Group::Mammal, description: Some("armored") },
/// ),
/// (
/// Part1 { count: Count(9), animal: "platypus" },
/// Part2 { group: Group::Mammal, description: None },
/// ),
/// ];
///
/// let mut wtr = typed_csv::Writer::from_memory();
/// for record in records.into_iter() {
/// wtr.encode(record).unwrap();
/// }
///
/// assert_eq!(wtr.as_string(), "\
/// count,animal,group,description
/// 7,penguin,Bird,happy
/// 10,cheetah,Mammal,fast
/// 4,armadillo,Mammal,armored
/// 9,platypus,Mammal,
/// ");
/// # }
/// ```
pub fn encode(&mut self, row: E) -> csv::Result<()> {
// Write headers if this is the first row.
if self.first_row |
// Write row.
let mut erecord = csv::Encoded::new();
row.encode(&mut erecord)?;
self.csv.write(erecord.unwrap().into_iter())
}
/// Flushes the underlying buffer.
pub fn flush(&mut self) -> Result<()> {
self.csv.flush()
}
}
#[cfg(test)]
mod tests {
use super::Writer;
#[derive(RustcEncodable)]
struct SimpleStruct {
a: usize,
b: usize,
}
#[test]
fn test_struct() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
w.encode(s1).unwrap();
let s2 = SimpleStruct { a: 3, b: 4 };
w.encode(s2).unwrap();
assert_eq!(w.as_string(), "a,b\n0,1\n3,4\n");
}
#[test]
fn test_tuple_of_structs() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
let s2 = SimpleStruct { a: 2, b: 3 };
w.encode((s1, s2)).unwrap();
let s3 = SimpleStruct { a: 4, b: 5 };
let s4 = SimpleStruct { a: 6, b: 7 };
w.encode((s3, s4)).unwrap();
assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n");
}
#[test]
fn test_array_of_structs() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
let s2 = SimpleStruct { a: 2, b: 3 };
w.encode([s1, s2]).unwrap();
let s3 = SimpleStruct { a: 4, b: 5 };
let s4 = SimpleStruct { a: 6, b: 7 };
w.encode([s3, s4]).unwrap();
assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n");
}
#[test]
fn test_vec_of_structs() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
let s2 = SimpleStruct { a: 2, b: 3 };
w.encode(vec![s1, s2]).unwrap();
let s3 = SimpleStruct { a: 4, b: 5 };
let s4 = SimpleStruct { a: 6, b: 7 };
w.encode(vec![s3, s4]).unwrap();
assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n");
}
#[test]
fn test_nested_tuples_of_structs() {
let mut w = Writer::from_memory();
w.encode((SimpleStruct { a: 0, b: 1 },
(SimpleStruct { a: 2, b: 3 }),
(SimpleStruct { a: 4, b: 5 }, (SimpleStruct { a: 6, b: 7 }))))
.unwrap();
w.encode((SimpleStruct { a: 8, b: 9 },
(SimpleStruct { a: 10, b: 11 }),
(SimpleStruct { a: 12, b: 13 }, (SimpleStruct { a: 14, b: 15 }))))
.unwrap();
assert_eq!(w.as_string(),
"a,b,a,b,a,b,a,b\n0,1,2,3,4,5,6,7\n8,9,10,11,12,13,14,15\n");
}
#[derive(RustcEncodable)]
struct StructWithLengthOneSeqs {
a: [usize; 1],
b: Vec<usize>,
c: (usize),
}
#[test]
fn test_struct_with_len_one_seqs() {
let mut w = Writer::from_memory();
let s1 = StructWithLengthOneSeqs {
a: [0],
b: vec![1],
c: (2),
};
w.encode(s1).unwrap();
let s2 = StructWithLengthOneSeqs {
a: [3],
b: vec![4],
c: (5),
};
w.encode(s2).unwrap();
assert_eq!(w.as_string(), "a,b,c\n0,1,2\n3,4,5\n");
}
#[derive(RustcEncodable)]
struct StructOfStruct {
p: SimpleStruct,
q: (usize, usize),
}
#[should_panic]
#[test]
fn test_struct_of_struct() {
let mut w = Writer::from_memory();
let s = StructOfStruct {
p: SimpleStruct { a: 0, b: 1 },
q: (2, 3),
};
w.encode(s).unwrap();
}
#[derive(RustcEncodable)]
struct StructWithLongerSeq {
a: [usize; 2],
}
#[should_panic]
#[test]
fn test_struct_with_longer_seq() {
let mut w = Writer::from_memory();
let s = StructWithLongerSeq { a: [0, 1] };
w.encode(s).unwrap();
}
#[should_panic]
#[test]
fn test_vec() {
let mut w = Writer::from_memory();
let array = vec![0, 1];
w.encode(array).unwrap();
}
}
| {
let mut field_names_encoder = FieldNamesEncoder::new();
row.encode(&mut field_names_encoder)?;
self.csv.write(field_names_encoder.into_field_names().into_iter())?;
self.first_row = false;
} | conditional_block |
mod.rs | mod field_names_encoder;
use self::field_names_encoder::FieldNamesEncoder;
use csv::{self, Result};
use rustc_serialize::Encodable;
use std::fs::File;
use std::io::{BufWriter, Write};
use std::marker::PhantomData;
use std::path::Path;
/// A CSV writer that automatically writes the headers.
///
/// This writer provides a convenient interface for encoding CSV data. While
/// creating CSV data is much easier than parsing it, having a writer can be
/// convenient because it can handle quoting for you automatically. Moreover,
/// this particular writer supports [`rustc_serialize::Encodable`][Encodable]
/// types, which makes it easy to write your custom types as CSV records and
/// automatically generate headers.
///
/// All CSV data produced by this writer, with default options, conforms with
/// [RFC 4180](http://tools.ietf.org/html/rfc4180).
///
/// One slight deviation is that records with a single empty field are always
/// encoded as `""`. This ensures that the record is not skipped since some
/// CSV parsers will ignore consecutive record terminators (like the one in
/// this crate).
///
/// If you don't care want the writer to automatically write the header row,
/// use the [`csv`](https://github.com/BurntSushi/rust-csv) crate instead.
///
/// # Example
///
/// Here's an example that encodes a zoo of animals:
///
/// ```rust
/// extern crate rustc_serialize;
/// # extern crate typed_csv;
/// # fn main() {
///
/// #[derive(RustcEncodable)]
/// struct Record {
/// count: usize,
/// animal: &'static str,
/// description: &'static str,
/// }
///
/// let records = vec![
/// Record { count: 7, animal: "penguin", description: "happy" },
/// Record { count: 10, animal: "cheetah", description: "fast" },
/// Record { count: 4, animal: "armadillo", description: "armored" },
/// Record { count: 9, animal: "platypus", description: "unique" },
/// ];
///
/// let mut wtr = typed_csv::Writer::from_memory();
/// for record in records.into_iter() {
/// wtr.encode(record).unwrap();
/// }
///
/// assert_eq!(wtr.as_string(), "\
/// count,animal,description
/// 7,penguin,happy
/// 10,cheetah,fast
/// 4,armadillo,armored
/// 9,platypus,unique
/// ");
/// # }
/// ```
///
/// [Encodable]: https://doc.rust-lang.org/rustc-serialize/rustc_serialize/trait.Encodable.html
pub struct Writer<W: Write, E: Encodable> {
csv: csv::Writer<W>,
first_row: bool,
record_type: PhantomData<E>,
}
impl<E: Encodable> Writer<File, E> {
/// Creates a new typed CSV writer that writes to the file path given.
///
/// The file is created if it does not already exist and is truncated
/// otherwise.
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Writer<File, E>> {
Ok(Self::from_csv_writer(csv::Writer::from_file(path)?))
}
}
impl<W: Write, E: Encodable> Writer<W, E> {
/// Creates a new typed CSV writer that writes to the `io::Write` given.
///
/// Note that the writer is buffered for you automatically.
pub fn from_writer(w: W) -> Writer<W, E> {
Self::from_csv_writer(csv::Writer::from_writer(w))
}
/// Creates a new typed CSV writer that writes to the CSV writer given.
///
/// This lets you specify options to the underlying CSV writer (e.g. to use
/// a different delimiter).
pub fn from_csv_writer(w: csv::Writer<W>) -> Writer<W, E> {
Writer {
csv: w,
first_row: true,
record_type: PhantomData,
}
}
/// Creates a new typed CSV writer that writes to the buffer given.
///
/// This lets you specify your own buffered writer (e.g., use a different
/// capacity). All other constructors wrap the writer given in a buffer
/// with default capacity.
pub fn from_buffer(buf: BufWriter<W>) -> Writer<W, E> {
Self::from_csv_writer(csv::Writer::from_buffer(buf))
}
}
impl<E: Encodable> Writer<Vec<u8>, E> {
/// Creates a new CSV writer that writes to an in memory buffer. At any
/// time, `as_string` or `as_bytes` can be called to retrieve the
/// cumulative CSV data.
pub fn from_memory() -> Writer<Vec<u8>, E> {
Self::from_csv_writer(csv::Writer::from_memory())
}
/// Returns the written CSV data as a string.
pub fn as_string(&mut self) -> &str {
self.csv.as_string()
}
/// Returns the encoded CSV data as raw bytes.
pub fn as_bytes(&mut self) -> &[u8] {
self.csv.as_bytes()
}
/// Convert the Writer into a string of written CSV data
pub fn into_string(self) -> String {
self.csv.into_string()
}
/// Convert the Writer into a vector of encoded CSV bytes.
pub fn into_bytes(self) -> Vec<u8> {
self.csv.into_bytes()
}
}
impl<W: Write, E: Encodable> Writer<W, E> {
/// Writes a record by encoding any `Encodable` value.
///
/// When the first record is encoded, the headers (the field names in the
/// struct) are written prior to encoding the record.
///
/// The type that is being encoded into should correspond to *one full CSV
/// record*. This can be a single struct, or arbitrarily nested tuples,
/// arrays, Vecs, and structs, as long as all scalar types (integers,
/// floats, characters, strings, collections containing one scalar, and
/// enums with 0 or 1 scalar arguments) are fields in structs. Enums with
/// zero arguments encode to their name, while enums of one argument encode
/// to their constituent value. `Option` types are also supported. (`None`
/// encodes to an empty field.)
///
/// Note that single-element tuple structs (the newtype pattern) are
/// supported. Unfortunately, to provide this functionality, a heuristic is
/// necessary to differentiate field names in normal structs from those in
/// tuple structs. As a result, field names in normal structs should not be
/// of the form `_field{}` where `{}` is its position in the struct.
///
/// # Example
///
/// This example encodes a zoo animals with may not have a description.
///
/// ```rust
/// extern crate rustc_serialize;
/// # extern crate typed_csv;
/// # fn main() {
///
/// #[derive(RustcEncodable)]
/// struct Count(usize);
///
/// #[derive(RustcEncodable)]
/// enum Group {
/// Bird,
/// Mammal,
/// }
///
/// #[derive(RustcEncodable)]
/// struct Part1 {
/// count: Count,
/// animal: &'static str,
/// }
///
/// #[derive(RustcEncodable)]
/// struct Part2 {
/// group: Group,
/// description: Option<&'static str>,
/// }
///
/// let records = vec![
/// (
/// Part1 { count: Count(7), animal: "penguin" },
/// Part2 { group: Group::Bird, description: Some("happy") },
/// ),
/// (
/// Part1 { count: Count(10), animal: "cheetah" },
/// Part2 { group: Group::Mammal, description: Some("fast") },
/// ),
/// (
/// Part1 { count: Count(4), animal: "armadillo" },
/// Part2 { group: Group::Mammal, description: Some("armored") },
/// ),
/// (
/// Part1 { count: Count(9), animal: "platypus" },
/// Part2 { group: Group::Mammal, description: None },
/// ),
/// ];
///
/// let mut wtr = typed_csv::Writer::from_memory();
/// for record in records.into_iter() {
/// wtr.encode(record).unwrap();
/// }
///
/// assert_eq!(wtr.as_string(), "\
/// count,animal,group,description
/// 7,penguin,Bird,happy
/// 10,cheetah,Mammal,fast
/// 4,armadillo,Mammal,armored
/// 9,platypus,Mammal,
/// ");
/// # }
/// ```
pub fn encode(&mut self, row: E) -> csv::Result<()> {
// Write headers if this is the first row.
if self.first_row {
let mut field_names_encoder = FieldNamesEncoder::new();
row.encode(&mut field_names_encoder)?;
self.csv.write(field_names_encoder.into_field_names().into_iter())?;
self.first_row = false;
}
// Write row.
let mut erecord = csv::Encoded::new();
row.encode(&mut erecord)?;
self.csv.write(erecord.unwrap().into_iter())
}
/// Flushes the underlying buffer.
pub fn flush(&mut self) -> Result<()> {
self.csv.flush()
}
}
#[cfg(test)]
mod tests {
use super::Writer;
#[derive(RustcEncodable)]
struct SimpleStruct {
a: usize,
b: usize,
}
#[test]
fn | () {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
w.encode(s1).unwrap();
let s2 = SimpleStruct { a: 3, b: 4 };
w.encode(s2).unwrap();
assert_eq!(w.as_string(), "a,b\n0,1\n3,4\n");
}
#[test]
fn test_tuple_of_structs() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
let s2 = SimpleStruct { a: 2, b: 3 };
w.encode((s1, s2)).unwrap();
let s3 = SimpleStruct { a: 4, b: 5 };
let s4 = SimpleStruct { a: 6, b: 7 };
w.encode((s3, s4)).unwrap();
assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n");
}
#[test]
fn test_array_of_structs() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
let s2 = SimpleStruct { a: 2, b: 3 };
w.encode([s1, s2]).unwrap();
let s3 = SimpleStruct { a: 4, b: 5 };
let s4 = SimpleStruct { a: 6, b: 7 };
w.encode([s3, s4]).unwrap();
assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n");
}
#[test]
fn test_vec_of_structs() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
let s2 = SimpleStruct { a: 2, b: 3 };
w.encode(vec![s1, s2]).unwrap();
let s3 = SimpleStruct { a: 4, b: 5 };
let s4 = SimpleStruct { a: 6, b: 7 };
w.encode(vec![s3, s4]).unwrap();
assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n");
}
#[test]
fn test_nested_tuples_of_structs() {
let mut w = Writer::from_memory();
w.encode((SimpleStruct { a: 0, b: 1 },
(SimpleStruct { a: 2, b: 3 }),
(SimpleStruct { a: 4, b: 5 }, (SimpleStruct { a: 6, b: 7 }))))
.unwrap();
w.encode((SimpleStruct { a: 8, b: 9 },
(SimpleStruct { a: 10, b: 11 }),
(SimpleStruct { a: 12, b: 13 }, (SimpleStruct { a: 14, b: 15 }))))
.unwrap();
assert_eq!(w.as_string(),
"a,b,a,b,a,b,a,b\n0,1,2,3,4,5,6,7\n8,9,10,11,12,13,14,15\n");
}
#[derive(RustcEncodable)]
struct StructWithLengthOneSeqs {
a: [usize; 1],
b: Vec<usize>,
c: (usize),
}
#[test]
fn test_struct_with_len_one_seqs() {
let mut w = Writer::from_memory();
let s1 = StructWithLengthOneSeqs {
a: [0],
b: vec![1],
c: (2),
};
w.encode(s1).unwrap();
let s2 = StructWithLengthOneSeqs {
a: [3],
b: vec![4],
c: (5),
};
w.encode(s2).unwrap();
assert_eq!(w.as_string(), "a,b,c\n0,1,2\n3,4,5\n");
}
#[derive(RustcEncodable)]
struct StructOfStruct {
p: SimpleStruct,
q: (usize, usize),
}
#[should_panic]
#[test]
fn test_struct_of_struct() {
let mut w = Writer::from_memory();
let s = StructOfStruct {
p: SimpleStruct { a: 0, b: 1 },
q: (2, 3),
};
w.encode(s).unwrap();
}
#[derive(RustcEncodable)]
struct StructWithLongerSeq {
a: [usize; 2],
}
#[should_panic]
#[test]
fn test_struct_with_longer_seq() {
let mut w = Writer::from_memory();
let s = StructWithLongerSeq { a: [0, 1] };
w.encode(s).unwrap();
}
#[should_panic]
#[test]
fn test_vec() {
let mut w = Writer::from_memory();
let array = vec![0, 1];
w.encode(array).unwrap();
}
}
| test_struct | identifier_name |
mod.rs | mod field_names_encoder;
use self::field_names_encoder::FieldNamesEncoder;
use csv::{self, Result};
use rustc_serialize::Encodable;
use std::fs::File;
use std::io::{BufWriter, Write};
use std::marker::PhantomData;
use std::path::Path;
/// A CSV writer that automatically writes the headers.
///
/// This writer provides a convenient interface for encoding CSV data. While
/// creating CSV data is much easier than parsing it, having a writer can be
/// convenient because it can handle quoting for you automatically. Moreover,
/// this particular writer supports [`rustc_serialize::Encodable`][Encodable]
/// types, which makes it easy to write your custom types as CSV records and
/// automatically generate headers.
///
/// All CSV data produced by this writer, with default options, conforms with
/// [RFC 4180](http://tools.ietf.org/html/rfc4180).
///
/// One slight deviation is that records with a single empty field are always
/// encoded as `""`. This ensures that the record is not skipped since some
/// CSV parsers will ignore consecutive record terminators (like the one in
/// this crate).
///
/// If you don't care want the writer to automatically write the header row,
/// use the [`csv`](https://github.com/BurntSushi/rust-csv) crate instead.
///
/// # Example
///
/// Here's an example that encodes a zoo of animals:
///
/// ```rust
/// extern crate rustc_serialize;
/// # extern crate typed_csv;
/// # fn main() {
///
/// #[derive(RustcEncodable)]
/// struct Record {
/// count: usize,
/// animal: &'static str,
/// description: &'static str,
/// }
///
/// let records = vec![
/// Record { count: 7, animal: "penguin", description: "happy" },
/// Record { count: 10, animal: "cheetah", description: "fast" },
/// Record { count: 4, animal: "armadillo", description: "armored" },
/// Record { count: 9, animal: "platypus", description: "unique" },
/// ];
///
/// let mut wtr = typed_csv::Writer::from_memory();
/// for record in records.into_iter() {
/// wtr.encode(record).unwrap();
/// }
///
/// assert_eq!(wtr.as_string(), "\
/// count,animal,description
/// 7,penguin,happy
/// 10,cheetah,fast
/// 4,armadillo,armored
/// 9,platypus,unique
/// ");
/// # }
/// ```
///
/// [Encodable]: https://doc.rust-lang.org/rustc-serialize/rustc_serialize/trait.Encodable.html
pub struct Writer<W: Write, E: Encodable> {
csv: csv::Writer<W>,
first_row: bool,
record_type: PhantomData<E>,
}
impl<E: Encodable> Writer<File, E> {
/// Creates a new typed CSV writer that writes to the file path given.
///
/// The file is created if it does not already exist and is truncated
/// otherwise.
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Writer<File, E>> {
Ok(Self::from_csv_writer(csv::Writer::from_file(path)?))
}
}
impl<W: Write, E: Encodable> Writer<W, E> {
/// Creates a new typed CSV writer that writes to the `io::Write` given.
///
/// Note that the writer is buffered for you automatically.
pub fn from_writer(w: W) -> Writer<W, E> {
Self::from_csv_writer(csv::Writer::from_writer(w))
}
/// Creates a new typed CSV writer that writes to the CSV writer given.
///
/// This lets you specify options to the underlying CSV writer (e.g. to use
/// a different delimiter).
pub fn from_csv_writer(w: csv::Writer<W>) -> Writer<W, E> {
Writer {
csv: w,
first_row: true,
record_type: PhantomData,
}
}
/// Creates a new typed CSV writer that writes to the buffer given.
///
/// This lets you specify your own buffered writer (e.g., use a different
/// capacity). All other constructors wrap the writer given in a buffer
/// with default capacity.
pub fn from_buffer(buf: BufWriter<W>) -> Writer<W, E> {
Self::from_csv_writer(csv::Writer::from_buffer(buf))
}
}
impl<E: Encodable> Writer<Vec<u8>, E> {
/// Creates a new CSV writer that writes to an in memory buffer. At any
/// time, `as_string` or `as_bytes` can be called to retrieve the
/// cumulative CSV data.
pub fn from_memory() -> Writer<Vec<u8>, E> {
Self::from_csv_writer(csv::Writer::from_memory())
}
/// Returns the written CSV data as a string.
pub fn as_string(&mut self) -> &str {
self.csv.as_string()
}
/// Returns the encoded CSV data as raw bytes.
pub fn as_bytes(&mut self) -> &[u8] {
self.csv.as_bytes()
}
/// Convert the Writer into a string of written CSV data
pub fn into_string(self) -> String {
self.csv.into_string()
}
/// Convert the Writer into a vector of encoded CSV bytes.
pub fn into_bytes(self) -> Vec<u8> {
self.csv.into_bytes()
}
}
impl<W: Write, E: Encodable> Writer<W, E> {
/// Writes a record by encoding any `Encodable` value.
///
/// When the first record is encoded, the headers (the field names in the
/// struct) are written prior to encoding the record.
///
/// The type that is being encoded into should correspond to *one full CSV
/// record*. This can be a single struct, or arbitrarily nested tuples,
/// arrays, Vecs, and structs, as long as all scalar types (integers,
/// floats, characters, strings, collections containing one scalar, and
/// enums with 0 or 1 scalar arguments) are fields in structs. Enums with
/// zero arguments encode to their name, while enums of one argument encode
/// to their constituent value. `Option` types are also supported. (`None`
/// encodes to an empty field.)
///
/// Note that single-element tuple structs (the newtype pattern) are
/// supported. Unfortunately, to provide this functionality, a heuristic is
/// necessary to differentiate field names in normal structs from those in
/// tuple structs. As a result, field names in normal structs should not be
/// of the form `_field{}` where `{}` is its position in the struct.
///
/// # Example
///
/// This example encodes a zoo animals with may not have a description.
///
/// ```rust
/// extern crate rustc_serialize;
/// # extern crate typed_csv;
/// # fn main() {
///
/// #[derive(RustcEncodable)]
/// struct Count(usize);
///
/// #[derive(RustcEncodable)]
/// enum Group {
/// Bird,
/// Mammal,
/// }
///
/// #[derive(RustcEncodable)]
/// struct Part1 {
/// count: Count,
/// animal: &'static str,
/// }
///
/// #[derive(RustcEncodable)]
/// struct Part2 {
/// group: Group,
/// description: Option<&'static str>,
/// }
///
/// let records = vec![
/// (
/// Part1 { count: Count(7), animal: "penguin" },
/// Part2 { group: Group::Bird, description: Some("happy") },
/// ),
/// (
/// Part1 { count: Count(10), animal: "cheetah" },
/// Part2 { group: Group::Mammal, description: Some("fast") },
/// ),
/// (
/// Part1 { count: Count(4), animal: "armadillo" },
/// Part2 { group: Group::Mammal, description: Some("armored") }, | /// ),
/// (
/// Part1 { count: Count(9), animal: "platypus" },
/// Part2 { group: Group::Mammal, description: None },
/// ),
/// ];
///
/// let mut wtr = typed_csv::Writer::from_memory();
/// for record in records.into_iter() {
/// wtr.encode(record).unwrap();
/// }
///
/// assert_eq!(wtr.as_string(), "\
/// count,animal,group,description
/// 7,penguin,Bird,happy
/// 10,cheetah,Mammal,fast
/// 4,armadillo,Mammal,armored
/// 9,platypus,Mammal,
/// ");
/// # }
/// ```
pub fn encode(&mut self, row: E) -> csv::Result<()> {
// Write headers if this is the first row.
if self.first_row {
let mut field_names_encoder = FieldNamesEncoder::new();
row.encode(&mut field_names_encoder)?;
self.csv.write(field_names_encoder.into_field_names().into_iter())?;
self.first_row = false;
}
// Write row.
let mut erecord = csv::Encoded::new();
row.encode(&mut erecord)?;
self.csv.write(erecord.unwrap().into_iter())
}
/// Flushes the underlying buffer.
pub fn flush(&mut self) -> Result<()> {
self.csv.flush()
}
}
#[cfg(test)]
mod tests {
use super::Writer;
#[derive(RustcEncodable)]
struct SimpleStruct {
a: usize,
b: usize,
}
#[test]
fn test_struct() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
w.encode(s1).unwrap();
let s2 = SimpleStruct { a: 3, b: 4 };
w.encode(s2).unwrap();
assert_eq!(w.as_string(), "a,b\n0,1\n3,4\n");
}
#[test]
fn test_tuple_of_structs() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
let s2 = SimpleStruct { a: 2, b: 3 };
w.encode((s1, s2)).unwrap();
let s3 = SimpleStruct { a: 4, b: 5 };
let s4 = SimpleStruct { a: 6, b: 7 };
w.encode((s3, s4)).unwrap();
assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n");
}
#[test]
fn test_array_of_structs() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
let s2 = SimpleStruct { a: 2, b: 3 };
w.encode([s1, s2]).unwrap();
let s3 = SimpleStruct { a: 4, b: 5 };
let s4 = SimpleStruct { a: 6, b: 7 };
w.encode([s3, s4]).unwrap();
assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n");
}
#[test]
fn test_vec_of_structs() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
let s2 = SimpleStruct { a: 2, b: 3 };
w.encode(vec![s1, s2]).unwrap();
let s3 = SimpleStruct { a: 4, b: 5 };
let s4 = SimpleStruct { a: 6, b: 7 };
w.encode(vec![s3, s4]).unwrap();
assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n");
}
#[test]
fn test_nested_tuples_of_structs() {
let mut w = Writer::from_memory();
w.encode((SimpleStruct { a: 0, b: 1 },
(SimpleStruct { a: 2, b: 3 }),
(SimpleStruct { a: 4, b: 5 }, (SimpleStruct { a: 6, b: 7 }))))
.unwrap();
w.encode((SimpleStruct { a: 8, b: 9 },
(SimpleStruct { a: 10, b: 11 }),
(SimpleStruct { a: 12, b: 13 }, (SimpleStruct { a: 14, b: 15 }))))
.unwrap();
assert_eq!(w.as_string(),
"a,b,a,b,a,b,a,b\n0,1,2,3,4,5,6,7\n8,9,10,11,12,13,14,15\n");
}
#[derive(RustcEncodable)]
struct StructWithLengthOneSeqs {
a: [usize; 1],
b: Vec<usize>,
c: (usize),
}
#[test]
fn test_struct_with_len_one_seqs() {
let mut w = Writer::from_memory();
let s1 = StructWithLengthOneSeqs {
a: [0],
b: vec![1],
c: (2),
};
w.encode(s1).unwrap();
let s2 = StructWithLengthOneSeqs {
a: [3],
b: vec![4],
c: (5),
};
w.encode(s2).unwrap();
assert_eq!(w.as_string(), "a,b,c\n0,1,2\n3,4,5\n");
}
#[derive(RustcEncodable)]
struct StructOfStruct {
p: SimpleStruct,
q: (usize, usize),
}
#[should_panic]
#[test]
fn test_struct_of_struct() {
let mut w = Writer::from_memory();
let s = StructOfStruct {
p: SimpleStruct { a: 0, b: 1 },
q: (2, 3),
};
w.encode(s).unwrap();
}
#[derive(RustcEncodable)]
struct StructWithLongerSeq {
a: [usize; 2],
}
#[should_panic]
#[test]
fn test_struct_with_longer_seq() {
let mut w = Writer::from_memory();
let s = StructWithLongerSeq { a: [0, 1] };
w.encode(s).unwrap();
}
#[should_panic]
#[test]
fn test_vec() {
let mut w = Writer::from_memory();
let array = vec![0, 1];
w.encode(array).unwrap();
}
} | random_line_split |
|
mod.rs | mod field_names_encoder;
use self::field_names_encoder::FieldNamesEncoder;
use csv::{self, Result};
use rustc_serialize::Encodable;
use std::fs::File;
use std::io::{BufWriter, Write};
use std::marker::PhantomData;
use std::path::Path;
/// A CSV writer that automatically writes the headers.
///
/// This writer provides a convenient interface for encoding CSV data. While
/// creating CSV data is much easier than parsing it, having a writer can be
/// convenient because it can handle quoting for you automatically. Moreover,
/// this particular writer supports [`rustc_serialize::Encodable`][Encodable]
/// types, which makes it easy to write your custom types as CSV records and
/// automatically generate headers.
///
/// All CSV data produced by this writer, with default options, conforms with
/// [RFC 4180](http://tools.ietf.org/html/rfc4180).
///
/// One slight deviation is that records with a single empty field are always
/// encoded as `""`. This ensures that the record is not skipped since some
/// CSV parsers will ignore consecutive record terminators (like the one in
/// this crate).
///
/// If you don't care want the writer to automatically write the header row,
/// use the [`csv`](https://github.com/BurntSushi/rust-csv) crate instead.
///
/// # Example
///
/// Here's an example that encodes a zoo of animals:
///
/// ```rust
/// extern crate rustc_serialize;
/// # extern crate typed_csv;
/// # fn main() {
///
/// #[derive(RustcEncodable)]
/// struct Record {
/// count: usize,
/// animal: &'static str,
/// description: &'static str,
/// }
///
/// let records = vec![
/// Record { count: 7, animal: "penguin", description: "happy" },
/// Record { count: 10, animal: "cheetah", description: "fast" },
/// Record { count: 4, animal: "armadillo", description: "armored" },
/// Record { count: 9, animal: "platypus", description: "unique" },
/// ];
///
/// let mut wtr = typed_csv::Writer::from_memory();
/// for record in records.into_iter() {
/// wtr.encode(record).unwrap();
/// }
///
/// assert_eq!(wtr.as_string(), "\
/// count,animal,description
/// 7,penguin,happy
/// 10,cheetah,fast
/// 4,armadillo,armored
/// 9,platypus,unique
/// ");
/// # }
/// ```
///
/// [Encodable]: https://doc.rust-lang.org/rustc-serialize/rustc_serialize/trait.Encodable.html
pub struct Writer<W: Write, E: Encodable> {
csv: csv::Writer<W>,
first_row: bool,
record_type: PhantomData<E>,
}
impl<E: Encodable> Writer<File, E> {
/// Creates a new typed CSV writer that writes to the file path given.
///
/// The file is created if it does not already exist and is truncated
/// otherwise.
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Writer<File, E>> {
Ok(Self::from_csv_writer(csv::Writer::from_file(path)?))
}
}
impl<W: Write, E: Encodable> Writer<W, E> {
/// Creates a new typed CSV writer that writes to the `io::Write` given.
///
/// Note that the writer is buffered for you automatically.
pub fn from_writer(w: W) -> Writer<W, E> {
Self::from_csv_writer(csv::Writer::from_writer(w))
}
/// Creates a new typed CSV writer that writes to the CSV writer given.
///
/// This lets you specify options to the underlying CSV writer (e.g. to use
/// a different delimiter).
pub fn from_csv_writer(w: csv::Writer<W>) -> Writer<W, E> {
Writer {
csv: w,
first_row: true,
record_type: PhantomData,
}
}
/// Creates a new typed CSV writer that writes to the buffer given.
///
/// This lets you specify your own buffered writer (e.g., use a different
/// capacity). All other constructors wrap the writer given in a buffer
/// with default capacity.
pub fn from_buffer(buf: BufWriter<W>) -> Writer<W, E> {
Self::from_csv_writer(csv::Writer::from_buffer(buf))
}
}
impl<E: Encodable> Writer<Vec<u8>, E> {
/// Creates a new CSV writer that writes to an in memory buffer. At any
/// time, `as_string` or `as_bytes` can be called to retrieve the
/// cumulative CSV data.
pub fn from_memory() -> Writer<Vec<u8>, E> {
Self::from_csv_writer(csv::Writer::from_memory())
}
/// Returns the written CSV data as a string.
pub fn as_string(&mut self) -> &str {
self.csv.as_string()
}
/// Returns the encoded CSV data as raw bytes.
pub fn as_bytes(&mut self) -> &[u8] {
self.csv.as_bytes()
}
/// Convert the Writer into a string of written CSV data
pub fn into_string(self) -> String {
self.csv.into_string()
}
/// Convert the Writer into a vector of encoded CSV bytes.
pub fn into_bytes(self) -> Vec<u8> |
}
impl<W: Write, E: Encodable> Writer<W, E> {
/// Writes a record by encoding any `Encodable` value.
///
/// When the first record is encoded, the headers (the field names in the
/// struct) are written prior to encoding the record.
///
/// The type that is being encoded into should correspond to *one full CSV
/// record*. This can be a single struct, or arbitrarily nested tuples,
/// arrays, Vecs, and structs, as long as all scalar types (integers,
/// floats, characters, strings, collections containing one scalar, and
/// enums with 0 or 1 scalar arguments) are fields in structs. Enums with
/// zero arguments encode to their name, while enums of one argument encode
/// to their constituent value. `Option` types are also supported. (`None`
/// encodes to an empty field.)
///
/// Note that single-element tuple structs (the newtype pattern) are
/// supported. Unfortunately, to provide this functionality, a heuristic is
/// necessary to differentiate field names in normal structs from those in
/// tuple structs. As a result, field names in normal structs should not be
/// of the form `_field{}` where `{}` is its position in the struct.
///
/// # Example
///
/// This example encodes a zoo animals with may not have a description.
///
/// ```rust
/// extern crate rustc_serialize;
/// # extern crate typed_csv;
/// # fn main() {
///
/// #[derive(RustcEncodable)]
/// struct Count(usize);
///
/// #[derive(RustcEncodable)]
/// enum Group {
/// Bird,
/// Mammal,
/// }
///
/// #[derive(RustcEncodable)]
/// struct Part1 {
/// count: Count,
/// animal: &'static str,
/// }
///
/// #[derive(RustcEncodable)]
/// struct Part2 {
/// group: Group,
/// description: Option<&'static str>,
/// }
///
/// let records = vec![
/// (
/// Part1 { count: Count(7), animal: "penguin" },
/// Part2 { group: Group::Bird, description: Some("happy") },
/// ),
/// (
/// Part1 { count: Count(10), animal: "cheetah" },
/// Part2 { group: Group::Mammal, description: Some("fast") },
/// ),
/// (
/// Part1 { count: Count(4), animal: "armadillo" },
/// Part2 { group: Group::Mammal, description: Some("armored") },
/// ),
/// (
/// Part1 { count: Count(9), animal: "platypus" },
/// Part2 { group: Group::Mammal, description: None },
/// ),
/// ];
///
/// let mut wtr = typed_csv::Writer::from_memory();
/// for record in records.into_iter() {
/// wtr.encode(record).unwrap();
/// }
///
/// assert_eq!(wtr.as_string(), "\
/// count,animal,group,description
/// 7,penguin,Bird,happy
/// 10,cheetah,Mammal,fast
/// 4,armadillo,Mammal,armored
/// 9,platypus,Mammal,
/// ");
/// # }
/// ```
pub fn encode(&mut self, row: E) -> csv::Result<()> {
// Write headers if this is the first row.
if self.first_row {
let mut field_names_encoder = FieldNamesEncoder::new();
row.encode(&mut field_names_encoder)?;
self.csv.write(field_names_encoder.into_field_names().into_iter())?;
self.first_row = false;
}
// Write row.
let mut erecord = csv::Encoded::new();
row.encode(&mut erecord)?;
self.csv.write(erecord.unwrap().into_iter())
}
/// Flushes the underlying buffer.
pub fn flush(&mut self) -> Result<()> {
self.csv.flush()
}
}
#[cfg(test)]
mod tests {
use super::Writer;
#[derive(RustcEncodable)]
struct SimpleStruct {
a: usize,
b: usize,
}
#[test]
fn test_struct() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
w.encode(s1).unwrap();
let s2 = SimpleStruct { a: 3, b: 4 };
w.encode(s2).unwrap();
assert_eq!(w.as_string(), "a,b\n0,1\n3,4\n");
}
#[test]
fn test_tuple_of_structs() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
let s2 = SimpleStruct { a: 2, b: 3 };
w.encode((s1, s2)).unwrap();
let s3 = SimpleStruct { a: 4, b: 5 };
let s4 = SimpleStruct { a: 6, b: 7 };
w.encode((s3, s4)).unwrap();
assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n");
}
#[test]
fn test_array_of_structs() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
let s2 = SimpleStruct { a: 2, b: 3 };
w.encode([s1, s2]).unwrap();
let s3 = SimpleStruct { a: 4, b: 5 };
let s4 = SimpleStruct { a: 6, b: 7 };
w.encode([s3, s4]).unwrap();
assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n");
}
#[test]
fn test_vec_of_structs() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
let s2 = SimpleStruct { a: 2, b: 3 };
w.encode(vec![s1, s2]).unwrap();
let s3 = SimpleStruct { a: 4, b: 5 };
let s4 = SimpleStruct { a: 6, b: 7 };
w.encode(vec![s3, s4]).unwrap();
assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n");
}
#[test]
fn test_nested_tuples_of_structs() {
let mut w = Writer::from_memory();
w.encode((SimpleStruct { a: 0, b: 1 },
(SimpleStruct { a: 2, b: 3 }),
(SimpleStruct { a: 4, b: 5 }, (SimpleStruct { a: 6, b: 7 }))))
.unwrap();
w.encode((SimpleStruct { a: 8, b: 9 },
(SimpleStruct { a: 10, b: 11 }),
(SimpleStruct { a: 12, b: 13 }, (SimpleStruct { a: 14, b: 15 }))))
.unwrap();
assert_eq!(w.as_string(),
"a,b,a,b,a,b,a,b\n0,1,2,3,4,5,6,7\n8,9,10,11,12,13,14,15\n");
}
#[derive(RustcEncodable)]
struct StructWithLengthOneSeqs {
a: [usize; 1],
b: Vec<usize>,
c: (usize),
}
#[test]
fn test_struct_with_len_one_seqs() {
let mut w = Writer::from_memory();
let s1 = StructWithLengthOneSeqs {
a: [0],
b: vec![1],
c: (2),
};
w.encode(s1).unwrap();
let s2 = StructWithLengthOneSeqs {
a: [3],
b: vec![4],
c: (5),
};
w.encode(s2).unwrap();
assert_eq!(w.as_string(), "a,b,c\n0,1,2\n3,4,5\n");
}
#[derive(RustcEncodable)]
struct StructOfStruct {
p: SimpleStruct,
q: (usize, usize),
}
#[should_panic]
#[test]
fn test_struct_of_struct() {
let mut w = Writer::from_memory();
let s = StructOfStruct {
p: SimpleStruct { a: 0, b: 1 },
q: (2, 3),
};
w.encode(s).unwrap();
}
#[derive(RustcEncodable)]
struct StructWithLongerSeq {
a: [usize; 2],
}
#[should_panic]
#[test]
fn test_struct_with_longer_seq() {
let mut w = Writer::from_memory();
let s = StructWithLongerSeq { a: [0, 1] };
w.encode(s).unwrap();
}
#[should_panic]
#[test]
fn test_vec() {
let mut w = Writer::from_memory();
let array = vec![0, 1];
w.encode(array).unwrap();
}
}
| {
self.csv.into_bytes()
} | identifier_body |
interlock_handler.go | MilevaDB Copyright (c) 2022 MilevaDB Authors: Karl Whitford, Spencer Fogelman, Josh Leder
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a INTERLOCKy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package INTERLOCKhandler
import (
"bytes"
"fmt"
"time"
"github.com/golang/protobuf/proto"
"github.com/ngaut/entangledstore/einsteindb/dbreader"
"github.com/ngaut/entangledstore/lockstore"
"github.com/whtcorpsinc/MilevaDB-Prod/blockcodec"
"github.com/whtcorpsinc/MilevaDB-Prod/solomonkey"
"github.com/whtcorpsinc/MilevaDB-Prod/expression"
"github.com/whtcorpsinc/MilevaDB-Prod/expression/aggregation"
"github.com/whtcorpsinc/MilevaDB-Prod/soliton/chunk"
"github.com/whtcorpsinc/MilevaDB-Prod/soliton/codec"
"github.com/whtcorpsinc/MilevaDB-Prod/soliton/collate"
"github.com/whtcorpsinc/MilevaDB-Prod/soliton/rowcodec"
"github.com/whtcorpsinc/MilevaDB-Prod/stochastikctx/stmtctx"
"github.com/whtcorpsinc/MilevaDB-Prod/types"
"github.com/whtcorpsinc/berolinaAllegroSQL/allegrosql"
"github.com/whtcorpsinc/berolinaAllegroSQL/perceptron"
"github.com/whtcorpsinc/berolinaAllegroSQL/terror"
"github.com/whtcorpsinc/solomonkeyproto/pkg/interlock"
"github.com/whtcorpsinc/solomonkeyproto/pkg/kvrpcpb"
"github.com/whtcorpsinc/errors"
"github.com/whtcorpsinc/fidelpb/go-fidelpb"
)
// HandleINTERLOCKRequest handles interlock request.
func HandleINTERLOCKRequest(dbReader *dbreader.DBReader, lockStore *lockstore.MemStore, req *interlock.Request) *interlock.Response {
switch req.Tp {
case solomonkey.ReqTypePosetDag:
return handleCoFIDelAGRequest(dbReader, lockStore, req)
case solomonkey.ReqTypeAnalyze:
return handleINTERLOCKAnalyzeRequest(dbReader, req)
case solomonkey.ReqTypeChecksum:
return handleINTERLOCKChecksumRequest(dbReader, req)
}
return &interlock.Response{OtherError: fmt.Sprintf("unsupported request type %d", req.GetTp())}
}
type posetPosetDagContext struct {
*evalContext
dbReader *dbreader.DBReader
lockStore *lockstore.MemStore
resolvedLocks []uint64
posetPosetDagReq *fidelpb.PosetDagRequest
keyRanges []*interlock.KeyRange
startTS uint64
}
// handleCoFIDelAGRequest handles interlock PosetDag request.
func handleCoFIDelAGRequest(dbReader *dbreader.DBReader, lockStore *lockstore.MemStore, req *interlock.Request) *interlock.Response {
startTime := time.Now()
resp := &interlock.Response{}
posetPosetDagCtx, posetPosetDagReq, err := buildPosetDag(dbReader, lockStore, req)
if err != nil {
resp.OtherError = err.Error()
return resp
}
closureExec, err := buildClosureExecutor(posetPosetDagCtx, posetPosetDagReq)
if err != nil {
return buildResp(nil, nil, posetPosetDagReq, err, posetPosetDagCtx.sc.GetWarnings(), time.Since(startTime))
}
chunks, err := closureExec.execute()
return buildResp(chunks, closureExec.counts, posetPosetDagReq, err, posetPosetDagCtx.sc.GetWarnings(), time.Since(startTime))
}
func buildPosetDag(reader *dbreader.DBReader, lockStore *lockstore.MemStore, req *interlock.Request) (*posetPosetDagContext, *fidelpb.PosetDagRequest, error) {
if len(req.Ranges) == 0 {
return nil, nil, errors.New("request range is null")
}
if req.GetTp() != solomonkey.ReqTypePosetDag {
return nil, nil, errors.Errorf("unsupported request type %d", req.GetTp())
}
posetPosetDagReq := new(fidelpb.PosetDagRequest)
err := proto.Unmarshal(req.Data, posetPosetDagReq)
if err != nil {
return nil, nil, errors.Trace(err)
}
sc := flagsToStatementContext(posetPosetDagReq.Flags)
sc.TimeZone = time.FixedZone("UTC", int(posetPosetDagReq.TimeZoneOffset))
ctx := &posetPosetDagContext{
evalContext: &evalContext{sc: sc},
dbReader: reader,
lockStore: lockStore,
posetPosetDagReq: posetPosetDagReq,
keyRanges: req.Ranges,
startTS: req.StartTs,
resolvedLocks: req.Context.ResolvedLocks,
}
scanExec := posetPosetDagReq.Executors[0]
if scanExec.Tp == fidelpb.ExecType_TypeTableScan {
ctx.setDeferredCausetInfo(scanExec.TblScan.DeferredCausets)
ctx.primaryDefCauss = scanExec.TblScan.PrimaryDeferredCausetIds
} else {
ctx.setDeferredCausetInfo(scanExec.IdxScan.DeferredCausets)
}
return ctx, posetPosetDagReq, err
}
func getAggInfo(ctx *posetPosetDagContext, pbAgg *fidelpb.Aggregation) ([]aggregation.Aggregation, []expression.Expression, error) {
length := len(pbAgg.AggFunc)
aggs := make([]aggregation.Aggregation, 0, length)
var err error
for _, expr := range pbAgg.AggFunc {
var aggExpr aggregation.Aggregation
aggExpr, err = aggregation.NewDistAggFunc(expr, ctx.fieldTps, ctx.sc)
if err != nil {
return nil, nil, errors.Trace(err)
}
aggs = append(aggs, aggExpr)
}
groupBys, err := convertToExprs(ctx.sc, ctx.fieldTps, pbAgg.GetGroupBy())
if err != nil {
return nil, nil, errors.Trace(err)
}
return aggs, groupBys, nil
}
func getTopNInfo(ctx *evalContext, topN *fidelpb.TopN) (heap *topNHeap, conds []expression.Expression, err error) {
pbConds := make([]*fidelpb.Expr, len(topN.OrderBy))
for i, item := range topN.OrderBy {
pbConds[i] = item.Expr
}
heap = &topNHeap{
totalCount: int(topN.Limit),
topNSorter: topNSorter{
orderByItems: topN.OrderBy,
sc: ctx.sc,
},
}
if conds, err = convertToExprs(ctx.sc, ctx.fieldTps, pbConds); err != nil {
return nil, nil, errors.Trace(err) | return heap, conds, nil
}
type evalContext struct {
colIDs map[int64]int
columnInfos []*fidelpb.DeferredCausetInfo
fieldTps []*types.FieldType
primaryDefCauss []int64
sc *stmtctx.StatementContext
}
func (e *evalContext) setDeferredCausetInfo(defcaus []*fidelpb.DeferredCausetInfo) {
e.columnInfos = make([]*fidelpb.DeferredCausetInfo, len(defcaus))
INTERLOCKy(e.columnInfos, defcaus)
e.colIDs = make(map[int64]int, len(e.columnInfos))
e.fieldTps = make([]*types.FieldType, 0, len(e.columnInfos))
for i, col := range e.columnInfos {
ft := fieldTypeFromPBDeferredCauset(col)
e.fieldTps = append(e.fieldTps, ft)
e.colIDs[col.GetDeferredCausetId()] = i
}
}
func (e *evalContext) newRowDecoder() (*rowcodec.ChunkDecoder, error) {
var (
pkDefCauss []int64
defcaus = make([]rowcodec.DefCausInfo, 0, len(e.columnInfos))
)
for i := range e.columnInfos {
info := e.columnInfos[i]
ft := e.fieldTps[i]
col := rowcodec.DefCausInfo{
ID: info.DeferredCausetId,
Ft: ft,
IsPKHandle: info.PkHandle,
}
defcaus = append(defcaus, col)
if info.PkHandle {
pkDefCauss = append(pkDefCauss, info.DeferredCausetId)
}
}
if len(pkDefCauss) == 0 {
if e.primaryDefCauss != nil {
pkDefCauss = e.primaryDefCauss
} else {
pkDefCauss = []int64{0}
}
}
def := func(i int, chk *chunk.Chunk) error {
info := e.columnInfos[i]
if info.PkHandle || len(info.DefaultVal) == 0 {
chk.AppendNull(i)
return nil
}
decoder := codec.NewDecoder(chk, e.sc.TimeZone)
_, err := decoder.DecodeOne(info.DefaultVal, i, e.fieldTps[i])
if err != nil {
return err
}
return nil
}
return rowcodec.NewChunkDecoder(defcaus, pkDefCauss, def, e.sc.TimeZone), nil
}
// decodeRelatedDeferredCausetVals decodes data to Causet slice according to the event information.
func (e *evalContext) decodeRelatedDeferredCausetVals(relatedDefCausOffsets []int, value [][]byte, event []types.Causet) error {
var err error
for _, offset := range relatedDefCausOffsets {
event[offset], err = blockcodec.DecodeDeferredCausetValue(value[offset], e.fieldTps[offset], e.sc.TimeZone)
if err != nil {
return errors.Trace(err)
}
}
return nil
}
// flagsToStatementContext creates a StatementContext from a `fidelpb.SelectRequest.Flags`.
func flagsToStatementContext(flags uint64) *stmtctx.StatementContext {
sc := new(stmtctx.StatementContext)
sc.IgnoreTruncate = (flags & perceptron.FlagIgnoreTruncate) > 0
sc.TruncateAsWarning = (flags & perceptron.FlagTruncateAsWarning) > 0
sc.InInsertStmt = (flags & perceptron.FlagInInsertStmt) > 0
sc.InSelectStmt = (flags & perceptron.FlagInSelectStmt) > 0
sc.InDeleteStmt = (flags & perceptron.FlagInUFIDelateOrDeleteStmt) > 0
sc.OverflowAsWarning = (flags & perceptron.FlagOverflowAsWarning) > 0
sc.IgnoreZeroInDate = (flags & perceptron.FlagIgnoreZeroInDate) > 0
sc.DividedByZeroAsWarning = (flags & perceptron.FlagDividedByZeroAsWarning) > 0
return sc
}
// ErrLocked is returned when trying to Read/Write on a locked key. Client should
// backoff or cleanup the dagger then retry.
type ErrLocked struct {
Key []byte
Primary []byte
StartTS uint64
TTL uint64
LockType uint8
}
// BuildLockErr generates ErrKeyLocked objects
func BuildLockErr(key []byte, primaryKey []byte, startTS uint64, TTL uint64, lockType uint8) *ErrLocked {
errLocked := &ErrLocked{
Key: key,
Primary: primaryKey,
StartTS: startTS,
TTL: TTL,
LockType: lockType,
}
return errLocked
}
// Error formats the dagger to a string.
func (e *ErrLocked) Error() string {
return fmt.Sprintf("key is locked, key: %q, Type: %v, primary: %q, startTS: %v", e.Key, e.LockType, e.Primary, e.StartTS)
}
func buildResp(chunks []fidelpb.Chunk, counts []int64, posetPosetDagReq *fidelpb.PosetDagRequest, err error, warnings []stmtctx.ALLEGROSQLWarn, dur time.Duration) *interlock.Response {
resp := &interlock.Response{}
selResp := &fidelpb.SelectResponse{
Error: toPBError(err),
Chunks: chunks,
OutputCounts: counts,
}
if posetPosetDagReq.DefCauslectExecutionSummaries != nil && *posetPosetDagReq.DefCauslectExecutionSummaries {
execSummary := make([]*fidelpb.ExecutorExecutionSummary, len(posetPosetDagReq.Executors))
for i := range execSummary {
// TODO: Add real executor execution summary information.
execSummary[i] = &fidelpb.ExecutorExecutionSummary{}
}
selResp.ExecutionSummaries = execSummary
}
if len(warnings) > 0 {
selResp.Warnings = make([]*fidelpb.Error, 0, len(warnings))
for i := range warnings {
selResp.Warnings = append(selResp.Warnings, toPBError(warnings[i].Err))
}
}
if locked, ok := errors.Cause(err).(*ErrLocked); ok {
resp.Locked = &kvrpcpb.LockInfo{
Key: locked.Key,
PrimaryLock: locked.Primary,
LockVersion: locked.StartTS,
LockTtl: locked.TTL,
}
}
resp.ExecDetails = &kvrpcpb.ExecDetails{
HandleTime: &kvrpcpb.HandleTime{ProcessMs: int64(dur / time.Millisecond)},
}
data, err := proto.Marshal(selResp)
if err != nil {
resp.OtherError = err.Error()
return resp
}
resp.Data = data
return resp
}
func toPBError(err error) *fidelpb.Error {
if err == nil {
return nil
}
perr := new(fidelpb.Error)
e := errors.Cause(err)
switch y := e.(type) {
case *terror.Error:
tmp := terror.ToALLEGROSQLError(y)
perr.Code = int32(tmp.Code)
perr.Msg = tmp.Message
case *allegrosql.ALLEGROSQLError:
perr.Code = int32(y.Code)
perr.Msg = y.Message
default:
perr.Code = int32(1)
perr.Msg = err.Error()
}
return perr
}
// extractKVRanges extracts solomonkey.KeyRanges slice from a SelectRequest.
func extractKVRanges(startKey, endKey []byte, keyRanges []*interlock.KeyRange, descScan bool) (kvRanges []solomonkey.KeyRange, err error) {
kvRanges = make([]solomonkey.KeyRange, 0, len(keyRanges))
for _, kran := range keyRanges {
if bytes.Compare(kran.GetStart(), kran.GetEnd()) >= 0 {
err = errors.Errorf("invalid range, start should be smaller than end: %v %v", kran.GetStart(), kran.GetEnd())
return
}
upperKey := kran.GetEnd()
if bytes.Compare(upperKey, startKey) <= 0 {
continue
}
lowerKey := kran.GetStart()
if len(endKey) != 0 && bytes.Compare(lowerKey, endKey) >= 0 {
break
}
r := solomonkey.KeyRange{
StartKey: solomonkey.Key(maxStartKey(lowerKey, startKey)),
EndKey: solomonkey.Key(minEndKey(upperKey, endKey)),
}
kvRanges = append(kvRanges, r)
}
if descScan {
reverseKVRanges(kvRanges)
}
return
}
func reverseKVRanges(kvRanges []solomonkey.KeyRange) {
for i := 0; i < len(kvRanges)/2; i++ {
j := len(kvRanges) - i - 1
kvRanges[i], kvRanges[j] = kvRanges[j], kvRanges[i]
}
}
func maxStartKey(rangeStartKey solomonkey.Key, regionStartKey []byte) []byte {
if bytes.Compare([]byte(rangeStartKey), regionStartKey) > 0 {
return []byte(rangeStartKey)
}
return regionStartKey
}
func minEndKey(rangeEndKey solomonkey.Key, regionEndKey []byte) []byte {
if len(regionEndKey) == 0 || bytes.Compare([]byte(rangeEndKey), regionEndKey) < 0 {
return []byte(rangeEndKey)
}
return regionEndKey
}
const rowsPerChunk = 64
func appendRow(chunks []fidelpb.Chunk, data []byte, rowCnt int) []fidelpb.Chunk {
if rowCnt%rowsPerChunk == 0 {
chunks = append(chunks, fidelpb.Chunk{})
}
cur := &chunks[len(chunks)-1]
cur.RowsData = append(cur.RowsData, data...)
return chunks
}
// fieldTypeFromPBDeferredCauset creates a types.FieldType from fidelpb.DeferredCausetInfo.
func fieldTypeFromPBDeferredCauset(col *fidelpb.DeferredCausetInfo) *types.FieldType {
return &types.FieldType{
Tp: byte(col.GetTp()),
Flag: uint(col.Flag),
Flen: int(col.GetDeferredCausetLen()),
Decimal: int(col.GetDecimal()),
Elems: col.Elems,
DefCauslate: allegrosql.DefCauslations[uint8(collate.RestoreDefCauslationIDIfNeeded(col.GetDefCauslation()))],
}
}
// handleINTERLOCKChecksumRequest handles interlock check sum request.
func handleINTERLOCKChecksumRequest(dbReader *dbreader.DBReader, req *interlock.Request) *interlock.Response {
resp := &fidelpb.ChecksumResponse{
Checksum: 1,
TotalKvs: 1,
TotalBytes: 1,
}
data, err := resp.Marshal()
if err != nil {
return &interlock.Response{OtherError: fmt.Sprintf("marshal checksum response error: %v", err)}
}
return &interlock.Response{Data: data}
} | }
| random_line_split |
widget.js | /**
* Created by jakubniezgoda on 15/03/2017.
*/
Stage.defineWidget({
id: 'graph',
name: 'Deployment metric graph',
description: 'Display graph with deployment metric data',
initialWidth: 6,
initialHeight: 20,
showHeader: true,
showBorder: true,
isReact: true,
permission: Stage.GenericConfig.WIDGET_PERMISSION('graph'),
color: 'blue',
categories: [Stage.GenericConfig.CATEGORY.DEPLOYMENTS, Stage.GenericConfig.CATEGORY.CHARTS_AND_STATISTICS],
initialConfiguration: [
Stage.GenericConfig.POLLING_TIME_CONFIG(5),
{id: 'nodeFilter', name: 'Node filter', description: 'Node filter to limit number of available metrics', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.NodeFilter, default: Stage.Basic.NodeFilter.EMPTY_VALUE, storeValueInContext: true},
{id: 'charts', name: 'Charts table', description: 'Charts configuration table', default: '', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.Form.Table, rows: 5, columns: [
{name: 'metric', label: 'Metric', default: '', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.MetricFilter, description: 'Metric to be presented on the chart', filterContextName: 'nodeFilter'},
{name: 'label', label: 'Label', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: 'Chart label'}
]},
{id: 'query', name: 'Custom Influx Query', description: 'Please note that below query builder overrides the series defined in \'Charts table\'', default: '', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.Form.Table, rows: 1, columns: [
{name: 'qSelect', label: 'SELECT', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: ''},
{name: 'qFrom', label: 'FROM', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: 'You can use ${deploymentId} token to inject dynamic deployment ID. Example: \'/${deploymentId}\..*\.((memory_MemFree))$/\''},
{name: 'qWhere', label: 'WHERE', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: 'You can use ${timeFilter} token to inject dynamic data/time ranges.'}
]},
{id: 'type', name: 'Charts type', items: [
{name:'Line chart', value:Stage.Basic.Graphs.Graph.LINE_CHART_TYPE},
{name:'Bar chart', value:Stage.Basic.Graphs.Graph.BAR_CHART_TYPE},
{name:'Area chart', value:Stage.Basic.Graphs.Graph.AREA_CHART_TYPE}],
default: Stage.Basic.Graphs.Graph.LINE_CHART_TYPE, type: Stage.Basic.GenericField.LIST_TYPE},
{id: 'timeFilter', name: 'Time range and resolution', description: 'Time range and time resolution for all defined charts',
type: Stage.Basic.GenericField.CUSTOM_TYPE, component: Stage.Basic.TimeFilter,
default: Stage.Basic.TimeFilter.INFLUX_DEFAULT_VALUE, defaultValue: Stage.Basic.TimeFilter.INFLUX_DEFAULT_VALUE}
],
UNCONFIGURED_STATE: 'unconfigured',
EMPTY_RESPONSE_STATE: 'emptyResponse',
_prepareData: function(data, xDataKey) {
const TIME_FORMAT = 'YYYY-MM-DD HH:mm:ss';
const MAX_NUMBER_OF_POINTS = 200;
const TIME_INDEX = 0;
const VALUE_INDEX = 1;
const REFERENCE_METRIC_INDEX = 0;
const NUMBER_OF_METRICS = data.length;
const NUMBER_OF_POINTS = data[REFERENCE_METRIC_INDEX].points.length;
let points = [];
// Data conversion to recharts format
// As a reference time points list, metric no. REFERENCE_METRIC_INDEX is taken
for (let i = 0; i < NUMBER_OF_POINTS; i++) {
let point = { [xDataKey]: Stage.Utils.formatTimestamp(data[REFERENCE_METRIC_INDEX].points[i][TIME_INDEX], TIME_FORMAT, null) };
for (let j = 0; j < NUMBER_OF_METRICS; j++) {
if (data[j].points[i] &&
data[REFERENCE_METRIC_INDEX].points[i][TIME_INDEX] === data[j].points[i][TIME_INDEX])
{
let metricName = data[j].name;
let pointValue = data[j].points[i][VALUE_INDEX];
point[metricName] = pointValue;
}
}
points.push(point);
}
// Data optimization (show no more than MAX_NUMBER_OF_POINTS points on the graph)
if (points.length > MAX_NUMBER_OF_POINTS) {
let optimizedPoints = [];
let delta = parseFloat(points.length / MAX_NUMBER_OF_POINTS);
for (let i = 0; i < points.length; i = i + delta) {
optimizedPoints.push(points[Math.floor(i)]);
}
points = optimizedPoints;
}
return points;
},
_getChartsMetricsList: function(charts) {
return _.chain(charts)
.filter((graph) => !_.isEmpty(graph.metric))
.map((graph) => graph.metric)
.uniq()
.value();
},
_getChartsConfiguration: function(charts, query, data) {
let chartsConfig = [];
if (!_.isEmpty(query)) {
_.forEach(data, (chart) => {
chartsConfig.push({
name: chart.name,
label: chart.name,
axisLabel: ''
});
})
} else {
_.forEach(charts, (chart) => {
let chartName = chart.metric;
if (!_.isEmpty(chartName)) {
chartsConfig.push({
name: chartName,
label: chart.label ? chart.label : chartName,
axisLabel: ''
});
}
})
chartsConfig = _.uniqBy(chartsConfig, 'name');
}
return chartsConfig;
},
| (string){
return string.replace(/;/g, '');
},
_prepareInfluxQuery: function(queries, deploymentId, nodeId, nodeInstanceId, from, to, timeGroup) {
return _.map(queries, (queryParams) => {
let selectWhat = this._sanitizeQuery(queryParams.qSelect);
let selectFrom = this._sanitizeQuery(queryParams.qFrom);
let selectWhere = this._sanitizeQuery(queryParams.qWhere);
if (!_.isEmpty(selectWhat) && !_.isEmpty(selectFrom)) {
if ((_.includes(selectFrom, '${deploymentId}') && _.isEmpty(deploymentId)) ||
(_.includes(selectFrom, '${nodeId}') && _.isEmpty(nodeId)) ||
(_.includes(selectFrom, '${nodeInstanceId}') && _.isEmpty(nodeInstanceId)))
return {};
selectFrom = _.replace(selectFrom, '${deploymentId}', deploymentId);
selectFrom = _.replace(selectFrom, '${nodeId}', nodeId);
selectFrom = _.replace(selectFrom, '${nodeInstanceId}', nodeInstanceId);
selectWhere = _.replace(selectWhere, '${timeFilter}', `time > ${from} and time < ${to} group by time(${timeGroup})`);
if (_.isEmpty(selectWhere))
return {qSelect: selectWhat, qFrom: selectFrom};
else
return {qSelect: selectWhat, qFrom: selectFrom, qWhere: selectWhere};
} else
return {};
});
},
_isEmptyResponse: function(widget, data) {
return data.state === widget.definition.EMPTY_RESPONSE_STATE;
},
_isWidgetNotConfigured: function(widget, data) {
return data.state === widget.definition.UNCONFIGURED_STATE;
},
fetchParams: function(widget, toolbox) {
let deploymentId = toolbox.getContext().getValue('deploymentId');
let nodeId = toolbox.getContext().getValue('nodeId');
let nodeInstanceId = toolbox.getContext().getValue('nodeInstanceId');
let nodeFilterFromWidget = widget.configuration.nodeFilter;
if (nodeFilterFromWidget.deploymentId || nodeFilterFromWidget.nodeId || nodeFilterFromWidget.nodeInstanceId) {
deploymentId = nodeFilterFromWidget.deploymentId;
nodeId = nodeFilterFromWidget.nodeId;
nodeInstanceId = nodeFilterFromWidget.nodeInstanceId;
}
let timeFilterFromWidget = widget.configuration.timeFilter;
let timeFilterFromContext = toolbox.getContext().getValue('timeFilter');
let timeStart = _.get(timeFilterFromContext, 'start', timeFilterFromWidget.start);
timeStart = moment(timeStart).isValid() ? `${moment(timeStart).unix()}s` : timeStart;
let timeEnd = _.get(timeFilterFromContext, 'end', timeFilterFromWidget.end);
timeEnd = moment(timeEnd).isValid() ? `${moment(timeEnd).unix()}s` : timeEnd;
let timeResolution = _.get(timeFilterFromContext, 'resolution', timeFilterFromWidget.resolution);
let timeUnit = _.get(timeFilterFromContext, 'unit', timeFilterFromWidget.unit);
let timeGroup = `${timeResolution}${timeUnit}`;
return { deploymentId, nodeId, nodeInstanceId, timeStart, timeEnd, timeGroup };
},
fetchData: function(widget, toolbox, params) {
const actions = new Stage.Common.InfluxActions(toolbox);
const deploymentId = params.deploymentId;
const nodeId = params.nodeId;
const nodeInstanceId = params.nodeInstanceId;
const metrics = this._getChartsMetricsList(widget.configuration.charts);
const from = params.timeStart;
const to = params.timeEnd;
const timeGroup = params.timeGroup;
const preparedQuery = _.head(this._prepareInfluxQuery(widget.configuration.query, deploymentId, nodeId, nodeInstanceId, from, to, timeGroup));
if (!_.isEmpty(preparedQuery)) {
toolbox.loading(true);
return actions.doRunQuery(preparedQuery.qSelect, preparedQuery.qFrom, preparedQuery.qWhere).then((data) => {
toolbox.loading(false);
let formattedResponse
= _.map(data, (metric) => ({name: _.last(_.split(metric.name, '.')), points: metric.points}));
return Promise.resolve(_.isEmpty(data) ? {state: widget.definition.EMPTY_RESPONSE_STATE} : formattedResponse);
}).catch((error) => {
toolbox.loading(false);
return Promise.reject('There was a problem while querying for data. ' +
'Please check your Influx query syntax and try again. Error: ' +
error.message || error);
});
} else if (!_.isEmpty(deploymentId) && !_.isEmpty(nodeInstanceId) && !_.isEmpty(metrics)) {
toolbox.loading(true);
return actions.doGetMetric(deploymentId, nodeId, nodeInstanceId, metrics, from, to, timeGroup)
.then((data) => {
toolbox.loading(false);
let formattedResponse
= _.map(data, (metric) => ({name: _.last(_.split(metric.name, '.')), points: metric.points}));
return Promise.resolve(_.isEmpty(data) ? {state: widget.definition.EMPTY_RESPONSE_STATE} : formattedResponse);
})
.catch((error) => {
toolbox.loading(false);
return Promise.reject('There was a problem while querying for data. ' +
'Please check Deployment ID, Node ID, Node Instance ID, Metric and time range. Error: ' +
error.message || error);
});
} else {
toolbox.loading(false);
return Promise.resolve({state: widget.definition.UNCONFIGURED_STATE});
}
},
render: function(widget,data,error,toolbox) {
let {charts, query, type} = widget.configuration;
let {Message, Icon} = Stage.Basic;
if (_.isEmpty(data)) {
return (
<Stage.Basic.Loading/>
);
} else if (this._isWidgetNotConfigured(widget, data)) {
return (
<Message info icon>
<Icon name='info' />
Please select deployment, node instance and metric in widget's configuration to present the data graph.
</Message>
);
} else if (this._isEmptyResponse(widget, data)) {
return (
<Message info icon>
<Icon name='ban' />
No data fetched for specified chart(s) configuration.
</Message>
);
}
let {Graph} = Stage.Basic.Graphs;
return (
<Graph type={type}
data={this._prepareData(data, Graph.DEFAULT_X_DATA_KEY)}
charts={this._getChartsConfiguration(charts, query, data)} />
);
}
}); | _sanitizeQuery | identifier_name |
widget.js | /**
* Created by jakubniezgoda on 15/03/2017.
*/
Stage.defineWidget({
id: 'graph',
name: 'Deployment metric graph',
description: 'Display graph with deployment metric data',
initialWidth: 6,
initialHeight: 20,
showHeader: true,
showBorder: true,
isReact: true,
permission: Stage.GenericConfig.WIDGET_PERMISSION('graph'),
color: 'blue',
categories: [Stage.GenericConfig.CATEGORY.DEPLOYMENTS, Stage.GenericConfig.CATEGORY.CHARTS_AND_STATISTICS],
initialConfiguration: [
Stage.GenericConfig.POLLING_TIME_CONFIG(5),
{id: 'nodeFilter', name: 'Node filter', description: 'Node filter to limit number of available metrics', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.NodeFilter, default: Stage.Basic.NodeFilter.EMPTY_VALUE, storeValueInContext: true},
{id: 'charts', name: 'Charts table', description: 'Charts configuration table', default: '', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.Form.Table, rows: 5, columns: [
{name: 'metric', label: 'Metric', default: '', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.MetricFilter, description: 'Metric to be presented on the chart', filterContextName: 'nodeFilter'},
{name: 'label', label: 'Label', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: 'Chart label'}
]},
{id: 'query', name: 'Custom Influx Query', description: 'Please note that below query builder overrides the series defined in \'Charts table\'', default: '', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.Form.Table, rows: 1, columns: [
{name: 'qSelect', label: 'SELECT', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: ''},
{name: 'qFrom', label: 'FROM', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: 'You can use ${deploymentId} token to inject dynamic deployment ID. Example: \'/${deploymentId}\..*\.((memory_MemFree))$/\''},
{name: 'qWhere', label: 'WHERE', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: 'You can use ${timeFilter} token to inject dynamic data/time ranges.'}
]},
{id: 'type', name: 'Charts type', items: [
{name:'Line chart', value:Stage.Basic.Graphs.Graph.LINE_CHART_TYPE},
{name:'Bar chart', value:Stage.Basic.Graphs.Graph.BAR_CHART_TYPE},
{name:'Area chart', value:Stage.Basic.Graphs.Graph.AREA_CHART_TYPE}],
default: Stage.Basic.Graphs.Graph.LINE_CHART_TYPE, type: Stage.Basic.GenericField.LIST_TYPE},
{id: 'timeFilter', name: 'Time range and resolution', description: 'Time range and time resolution for all defined charts',
type: Stage.Basic.GenericField.CUSTOM_TYPE, component: Stage.Basic.TimeFilter,
default: Stage.Basic.TimeFilter.INFLUX_DEFAULT_VALUE, defaultValue: Stage.Basic.TimeFilter.INFLUX_DEFAULT_VALUE}
],
UNCONFIGURED_STATE: 'unconfigured',
EMPTY_RESPONSE_STATE: 'emptyResponse',
_prepareData: function(data, xDataKey) {
const TIME_FORMAT = 'YYYY-MM-DD HH:mm:ss';
const MAX_NUMBER_OF_POINTS = 200;
const TIME_INDEX = 0;
const VALUE_INDEX = 1;
const REFERENCE_METRIC_INDEX = 0;
const NUMBER_OF_METRICS = data.length;
const NUMBER_OF_POINTS = data[REFERENCE_METRIC_INDEX].points.length;
let points = [];
// Data conversion to recharts format
// As a reference time points list, metric no. REFERENCE_METRIC_INDEX is taken
for (let i = 0; i < NUMBER_OF_POINTS; i++) {
let point = { [xDataKey]: Stage.Utils.formatTimestamp(data[REFERENCE_METRIC_INDEX].points[i][TIME_INDEX], TIME_FORMAT, null) };
for (let j = 0; j < NUMBER_OF_METRICS; j++) {
if (data[j].points[i] &&
data[REFERENCE_METRIC_INDEX].points[i][TIME_INDEX] === data[j].points[i][TIME_INDEX])
{
let metricName = data[j].name;
let pointValue = data[j].points[i][VALUE_INDEX];
point[metricName] = pointValue;
}
}
points.push(point);
}
// Data optimization (show no more than MAX_NUMBER_OF_POINTS points on the graph)
if (points.length > MAX_NUMBER_OF_POINTS) {
let optimizedPoints = [];
let delta = parseFloat(points.length / MAX_NUMBER_OF_POINTS);
for (let i = 0; i < points.length; i = i + delta) {
optimizedPoints.push(points[Math.floor(i)]);
}
points = optimizedPoints;
}
return points;
},
_getChartsMetricsList: function(charts) {
return _.chain(charts)
.filter((graph) => !_.isEmpty(graph.metric))
.map((graph) => graph.metric)
.uniq()
.value();
},
_getChartsConfiguration: function(charts, query, data) {
let chartsConfig = [];
if (!_.isEmpty(query)) {
_.forEach(data, (chart) => {
chartsConfig.push({
name: chart.name,
label: chart.name,
axisLabel: ''
});
})
} else {
_.forEach(charts, (chart) => {
let chartName = chart.metric;
if (!_.isEmpty(chartName)) {
chartsConfig.push({
name: chartName,
label: chart.label ? chart.label : chartName,
axisLabel: ''
});
}
})
chartsConfig = _.uniqBy(chartsConfig, 'name');
}
return chartsConfig;
},
_sanitizeQuery(string){
return string.replace(/;/g, '');
},
_prepareInfluxQuery: function(queries, deploymentId, nodeId, nodeInstanceId, from, to, timeGroup) {
return _.map(queries, (queryParams) => {
let selectWhat = this._sanitizeQuery(queryParams.qSelect);
let selectFrom = this._sanitizeQuery(queryParams.qFrom);
let selectWhere = this._sanitizeQuery(queryParams.qWhere);
if (!_.isEmpty(selectWhat) && !_.isEmpty(selectFrom)) {
if ((_.includes(selectFrom, '${deploymentId}') && _.isEmpty(deploymentId)) ||
(_.includes(selectFrom, '${nodeId}') && _.isEmpty(nodeId)) ||
(_.includes(selectFrom, '${nodeInstanceId}') && _.isEmpty(nodeInstanceId)))
return {};
selectFrom = _.replace(selectFrom, '${deploymentId}', deploymentId);
selectFrom = _.replace(selectFrom, '${nodeId}', nodeId);
selectFrom = _.replace(selectFrom, '${nodeInstanceId}', nodeInstanceId);
selectWhere = _.replace(selectWhere, '${timeFilter}', `time > ${from} and time < ${to} group by time(${timeGroup})`);
if (_.isEmpty(selectWhere))
return {qSelect: selectWhat, qFrom: selectFrom};
else
return {qSelect: selectWhat, qFrom: selectFrom, qWhere: selectWhere};
} else
return {};
});
},
_isEmptyResponse: function(widget, data) {
return data.state === widget.definition.EMPTY_RESPONSE_STATE;
},
_isWidgetNotConfigured: function(widget, data) {
return data.state === widget.definition.UNCONFIGURED_STATE;
},
fetchParams: function(widget, toolbox) {
let deploymentId = toolbox.getContext().getValue('deploymentId'); | deploymentId = nodeFilterFromWidget.deploymentId;
nodeId = nodeFilterFromWidget.nodeId;
nodeInstanceId = nodeFilterFromWidget.nodeInstanceId;
}
let timeFilterFromWidget = widget.configuration.timeFilter;
let timeFilterFromContext = toolbox.getContext().getValue('timeFilter');
let timeStart = _.get(timeFilterFromContext, 'start', timeFilterFromWidget.start);
timeStart = moment(timeStart).isValid() ? `${moment(timeStart).unix()}s` : timeStart;
let timeEnd = _.get(timeFilterFromContext, 'end', timeFilterFromWidget.end);
timeEnd = moment(timeEnd).isValid() ? `${moment(timeEnd).unix()}s` : timeEnd;
let timeResolution = _.get(timeFilterFromContext, 'resolution', timeFilterFromWidget.resolution);
let timeUnit = _.get(timeFilterFromContext, 'unit', timeFilterFromWidget.unit);
let timeGroup = `${timeResolution}${timeUnit}`;
return { deploymentId, nodeId, nodeInstanceId, timeStart, timeEnd, timeGroup };
},
fetchData: function(widget, toolbox, params) {
const actions = new Stage.Common.InfluxActions(toolbox);
const deploymentId = params.deploymentId;
const nodeId = params.nodeId;
const nodeInstanceId = params.nodeInstanceId;
const metrics = this._getChartsMetricsList(widget.configuration.charts);
const from = params.timeStart;
const to = params.timeEnd;
const timeGroup = params.timeGroup;
const preparedQuery = _.head(this._prepareInfluxQuery(widget.configuration.query, deploymentId, nodeId, nodeInstanceId, from, to, timeGroup));
if (!_.isEmpty(preparedQuery)) {
toolbox.loading(true);
return actions.doRunQuery(preparedQuery.qSelect, preparedQuery.qFrom, preparedQuery.qWhere).then((data) => {
toolbox.loading(false);
let formattedResponse
= _.map(data, (metric) => ({name: _.last(_.split(metric.name, '.')), points: metric.points}));
return Promise.resolve(_.isEmpty(data) ? {state: widget.definition.EMPTY_RESPONSE_STATE} : formattedResponse);
}).catch((error) => {
toolbox.loading(false);
return Promise.reject('There was a problem while querying for data. ' +
'Please check your Influx query syntax and try again. Error: ' +
error.message || error);
});
} else if (!_.isEmpty(deploymentId) && !_.isEmpty(nodeInstanceId) && !_.isEmpty(metrics)) {
toolbox.loading(true);
return actions.doGetMetric(deploymentId, nodeId, nodeInstanceId, metrics, from, to, timeGroup)
.then((data) => {
toolbox.loading(false);
let formattedResponse
= _.map(data, (metric) => ({name: _.last(_.split(metric.name, '.')), points: metric.points}));
return Promise.resolve(_.isEmpty(data) ? {state: widget.definition.EMPTY_RESPONSE_STATE} : formattedResponse);
})
.catch((error) => {
toolbox.loading(false);
return Promise.reject('There was a problem while querying for data. ' +
'Please check Deployment ID, Node ID, Node Instance ID, Metric and time range. Error: ' +
error.message || error);
});
} else {
toolbox.loading(false);
return Promise.resolve({state: widget.definition.UNCONFIGURED_STATE});
}
},
render: function(widget,data,error,toolbox) {
let {charts, query, type} = widget.configuration;
let {Message, Icon} = Stage.Basic;
if (_.isEmpty(data)) {
return (
<Stage.Basic.Loading/>
);
} else if (this._isWidgetNotConfigured(widget, data)) {
return (
<Message info icon>
<Icon name='info' />
Please select deployment, node instance and metric in widget's configuration to present the data graph.
</Message>
);
} else if (this._isEmptyResponse(widget, data)) {
return (
<Message info icon>
<Icon name='ban' />
No data fetched for specified chart(s) configuration.
</Message>
);
}
let {Graph} = Stage.Basic.Graphs;
return (
<Graph type={type}
data={this._prepareData(data, Graph.DEFAULT_X_DATA_KEY)}
charts={this._getChartsConfiguration(charts, query, data)} />
);
}
}); | let nodeId = toolbox.getContext().getValue('nodeId');
let nodeInstanceId = toolbox.getContext().getValue('nodeInstanceId');
let nodeFilterFromWidget = widget.configuration.nodeFilter;
if (nodeFilterFromWidget.deploymentId || nodeFilterFromWidget.nodeId || nodeFilterFromWidget.nodeInstanceId) { | random_line_split |
widget.js | /**
* Created by jakubniezgoda on 15/03/2017.
*/
Stage.defineWidget({
id: 'graph',
name: 'Deployment metric graph',
description: 'Display graph with deployment metric data',
initialWidth: 6,
initialHeight: 20,
showHeader: true,
showBorder: true,
isReact: true,
permission: Stage.GenericConfig.WIDGET_PERMISSION('graph'),
color: 'blue',
categories: [Stage.GenericConfig.CATEGORY.DEPLOYMENTS, Stage.GenericConfig.CATEGORY.CHARTS_AND_STATISTICS],
initialConfiguration: [
Stage.GenericConfig.POLLING_TIME_CONFIG(5),
{id: 'nodeFilter', name: 'Node filter', description: 'Node filter to limit number of available metrics', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.NodeFilter, default: Stage.Basic.NodeFilter.EMPTY_VALUE, storeValueInContext: true},
{id: 'charts', name: 'Charts table', description: 'Charts configuration table', default: '', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.Form.Table, rows: 5, columns: [
{name: 'metric', label: 'Metric', default: '', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.MetricFilter, description: 'Metric to be presented on the chart', filterContextName: 'nodeFilter'},
{name: 'label', label: 'Label', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: 'Chart label'}
]},
{id: 'query', name: 'Custom Influx Query', description: 'Please note that below query builder overrides the series defined in \'Charts table\'', default: '', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.Form.Table, rows: 1, columns: [
{name: 'qSelect', label: 'SELECT', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: ''},
{name: 'qFrom', label: 'FROM', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: 'You can use ${deploymentId} token to inject dynamic deployment ID. Example: \'/${deploymentId}\..*\.((memory_MemFree))$/\''},
{name: 'qWhere', label: 'WHERE', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: 'You can use ${timeFilter} token to inject dynamic data/time ranges.'}
]},
{id: 'type', name: 'Charts type', items: [
{name:'Line chart', value:Stage.Basic.Graphs.Graph.LINE_CHART_TYPE},
{name:'Bar chart', value:Stage.Basic.Graphs.Graph.BAR_CHART_TYPE},
{name:'Area chart', value:Stage.Basic.Graphs.Graph.AREA_CHART_TYPE}],
default: Stage.Basic.Graphs.Graph.LINE_CHART_TYPE, type: Stage.Basic.GenericField.LIST_TYPE},
{id: 'timeFilter', name: 'Time range and resolution', description: 'Time range and time resolution for all defined charts',
type: Stage.Basic.GenericField.CUSTOM_TYPE, component: Stage.Basic.TimeFilter,
default: Stage.Basic.TimeFilter.INFLUX_DEFAULT_VALUE, defaultValue: Stage.Basic.TimeFilter.INFLUX_DEFAULT_VALUE}
],
UNCONFIGURED_STATE: 'unconfigured',
EMPTY_RESPONSE_STATE: 'emptyResponse',
_prepareData: function(data, xDataKey) {
const TIME_FORMAT = 'YYYY-MM-DD HH:mm:ss';
const MAX_NUMBER_OF_POINTS = 200;
const TIME_INDEX = 0;
const VALUE_INDEX = 1;
const REFERENCE_METRIC_INDEX = 0;
const NUMBER_OF_METRICS = data.length;
const NUMBER_OF_POINTS = data[REFERENCE_METRIC_INDEX].points.length;
let points = [];
// Data conversion to recharts format
// As a reference time points list, metric no. REFERENCE_METRIC_INDEX is taken
for (let i = 0; i < NUMBER_OF_POINTS; i++) {
let point = { [xDataKey]: Stage.Utils.formatTimestamp(data[REFERENCE_METRIC_INDEX].points[i][TIME_INDEX], TIME_FORMAT, null) };
for (let j = 0; j < NUMBER_OF_METRICS; j++) {
if (data[j].points[i] &&
data[REFERENCE_METRIC_INDEX].points[i][TIME_INDEX] === data[j].points[i][TIME_INDEX])
{
let metricName = data[j].name;
let pointValue = data[j].points[i][VALUE_INDEX];
point[metricName] = pointValue;
}
}
points.push(point);
}
// Data optimization (show no more than MAX_NUMBER_OF_POINTS points on the graph)
if (points.length > MAX_NUMBER_OF_POINTS) {
let optimizedPoints = [];
let delta = parseFloat(points.length / MAX_NUMBER_OF_POINTS);
for (let i = 0; i < points.length; i = i + delta) {
optimizedPoints.push(points[Math.floor(i)]);
}
points = optimizedPoints;
}
return points;
},
_getChartsMetricsList: function(charts) {
return _.chain(charts)
.filter((graph) => !_.isEmpty(graph.metric))
.map((graph) => graph.metric)
.uniq()
.value();
},
_getChartsConfiguration: function(charts, query, data) {
let chartsConfig = [];
if (!_.isEmpty(query)) {
_.forEach(data, (chart) => {
chartsConfig.push({
name: chart.name,
label: chart.name,
axisLabel: ''
});
})
} else {
_.forEach(charts, (chart) => {
let chartName = chart.metric;
if (!_.isEmpty(chartName)) {
chartsConfig.push({
name: chartName,
label: chart.label ? chart.label : chartName,
axisLabel: ''
});
}
})
chartsConfig = _.uniqBy(chartsConfig, 'name');
}
return chartsConfig;
},
_sanitizeQuery(string) | ,
_prepareInfluxQuery: function(queries, deploymentId, nodeId, nodeInstanceId, from, to, timeGroup) {
return _.map(queries, (queryParams) => {
let selectWhat = this._sanitizeQuery(queryParams.qSelect);
let selectFrom = this._sanitizeQuery(queryParams.qFrom);
let selectWhere = this._sanitizeQuery(queryParams.qWhere);
if (!_.isEmpty(selectWhat) && !_.isEmpty(selectFrom)) {
if ((_.includes(selectFrom, '${deploymentId}') && _.isEmpty(deploymentId)) ||
(_.includes(selectFrom, '${nodeId}') && _.isEmpty(nodeId)) ||
(_.includes(selectFrom, '${nodeInstanceId}') && _.isEmpty(nodeInstanceId)))
return {};
selectFrom = _.replace(selectFrom, '${deploymentId}', deploymentId);
selectFrom = _.replace(selectFrom, '${nodeId}', nodeId);
selectFrom = _.replace(selectFrom, '${nodeInstanceId}', nodeInstanceId);
selectWhere = _.replace(selectWhere, '${timeFilter}', `time > ${from} and time < ${to} group by time(${timeGroup})`);
if (_.isEmpty(selectWhere))
return {qSelect: selectWhat, qFrom: selectFrom};
else
return {qSelect: selectWhat, qFrom: selectFrom, qWhere: selectWhere};
} else
return {};
});
},
_isEmptyResponse: function(widget, data) {
return data.state === widget.definition.EMPTY_RESPONSE_STATE;
},
_isWidgetNotConfigured: function(widget, data) {
return data.state === widget.definition.UNCONFIGURED_STATE;
},
fetchParams: function(widget, toolbox) {
let deploymentId = toolbox.getContext().getValue('deploymentId');
let nodeId = toolbox.getContext().getValue('nodeId');
let nodeInstanceId = toolbox.getContext().getValue('nodeInstanceId');
let nodeFilterFromWidget = widget.configuration.nodeFilter;
if (nodeFilterFromWidget.deploymentId || nodeFilterFromWidget.nodeId || nodeFilterFromWidget.nodeInstanceId) {
deploymentId = nodeFilterFromWidget.deploymentId;
nodeId = nodeFilterFromWidget.nodeId;
nodeInstanceId = nodeFilterFromWidget.nodeInstanceId;
}
let timeFilterFromWidget = widget.configuration.timeFilter;
let timeFilterFromContext = toolbox.getContext().getValue('timeFilter');
let timeStart = _.get(timeFilterFromContext, 'start', timeFilterFromWidget.start);
timeStart = moment(timeStart).isValid() ? `${moment(timeStart).unix()}s` : timeStart;
let timeEnd = _.get(timeFilterFromContext, 'end', timeFilterFromWidget.end);
timeEnd = moment(timeEnd).isValid() ? `${moment(timeEnd).unix()}s` : timeEnd;
let timeResolution = _.get(timeFilterFromContext, 'resolution', timeFilterFromWidget.resolution);
let timeUnit = _.get(timeFilterFromContext, 'unit', timeFilterFromWidget.unit);
let timeGroup = `${timeResolution}${timeUnit}`;
return { deploymentId, nodeId, nodeInstanceId, timeStart, timeEnd, timeGroup };
},
fetchData: function(widget, toolbox, params) {
const actions = new Stage.Common.InfluxActions(toolbox);
const deploymentId = params.deploymentId;
const nodeId = params.nodeId;
const nodeInstanceId = params.nodeInstanceId;
const metrics = this._getChartsMetricsList(widget.configuration.charts);
const from = params.timeStart;
const to = params.timeEnd;
const timeGroup = params.timeGroup;
const preparedQuery = _.head(this._prepareInfluxQuery(widget.configuration.query, deploymentId, nodeId, nodeInstanceId, from, to, timeGroup));
if (!_.isEmpty(preparedQuery)) {
toolbox.loading(true);
return actions.doRunQuery(preparedQuery.qSelect, preparedQuery.qFrom, preparedQuery.qWhere).then((data) => {
toolbox.loading(false);
let formattedResponse
= _.map(data, (metric) => ({name: _.last(_.split(metric.name, '.')), points: metric.points}));
return Promise.resolve(_.isEmpty(data) ? {state: widget.definition.EMPTY_RESPONSE_STATE} : formattedResponse);
}).catch((error) => {
toolbox.loading(false);
return Promise.reject('There was a problem while querying for data. ' +
'Please check your Influx query syntax and try again. Error: ' +
error.message || error);
});
} else if (!_.isEmpty(deploymentId) && !_.isEmpty(nodeInstanceId) && !_.isEmpty(metrics)) {
toolbox.loading(true);
return actions.doGetMetric(deploymentId, nodeId, nodeInstanceId, metrics, from, to, timeGroup)
.then((data) => {
toolbox.loading(false);
let formattedResponse
= _.map(data, (metric) => ({name: _.last(_.split(metric.name, '.')), points: metric.points}));
return Promise.resolve(_.isEmpty(data) ? {state: widget.definition.EMPTY_RESPONSE_STATE} : formattedResponse);
})
.catch((error) => {
toolbox.loading(false);
return Promise.reject('There was a problem while querying for data. ' +
'Please check Deployment ID, Node ID, Node Instance ID, Metric and time range. Error: ' +
error.message || error);
});
} else {
toolbox.loading(false);
return Promise.resolve({state: widget.definition.UNCONFIGURED_STATE});
}
},
render: function(widget,data,error,toolbox) {
let {charts, query, type} = widget.configuration;
let {Message, Icon} = Stage.Basic;
if (_.isEmpty(data)) {
return (
<Stage.Basic.Loading/>
);
} else if (this._isWidgetNotConfigured(widget, data)) {
return (
<Message info icon>
<Icon name='info' />
Please select deployment, node instance and metric in widget's configuration to present the data graph.
</Message>
);
} else if (this._isEmptyResponse(widget, data)) {
return (
<Message info icon>
<Icon name='ban' />
No data fetched for specified chart(s) configuration.
</Message>
);
}
let {Graph} = Stage.Basic.Graphs;
return (
<Graph type={type}
data={this._prepareData(data, Graph.DEFAULT_X_DATA_KEY)}
charts={this._getChartsConfiguration(charts, query, data)} />
);
}
}); | {
return string.replace(/;/g, '');
} | identifier_body |
widget.js | /**
* Created by jakubniezgoda on 15/03/2017.
*/
Stage.defineWidget({
id: 'graph',
name: 'Deployment metric graph',
description: 'Display graph with deployment metric data',
initialWidth: 6,
initialHeight: 20,
showHeader: true,
showBorder: true,
isReact: true,
permission: Stage.GenericConfig.WIDGET_PERMISSION('graph'),
color: 'blue',
categories: [Stage.GenericConfig.CATEGORY.DEPLOYMENTS, Stage.GenericConfig.CATEGORY.CHARTS_AND_STATISTICS],
initialConfiguration: [
Stage.GenericConfig.POLLING_TIME_CONFIG(5),
{id: 'nodeFilter', name: 'Node filter', description: 'Node filter to limit number of available metrics', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.NodeFilter, default: Stage.Basic.NodeFilter.EMPTY_VALUE, storeValueInContext: true},
{id: 'charts', name: 'Charts table', description: 'Charts configuration table', default: '', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.Form.Table, rows: 5, columns: [
{name: 'metric', label: 'Metric', default: '', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.MetricFilter, description: 'Metric to be presented on the chart', filterContextName: 'nodeFilter'},
{name: 'label', label: 'Label', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: 'Chart label'}
]},
{id: 'query', name: 'Custom Influx Query', description: 'Please note that below query builder overrides the series defined in \'Charts table\'', default: '', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.Form.Table, rows: 1, columns: [
{name: 'qSelect', label: 'SELECT', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: ''},
{name: 'qFrom', label: 'FROM', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: 'You can use ${deploymentId} token to inject dynamic deployment ID. Example: \'/${deploymentId}\..*\.((memory_MemFree))$/\''},
{name: 'qWhere', label: 'WHERE', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: 'You can use ${timeFilter} token to inject dynamic data/time ranges.'}
]},
{id: 'type', name: 'Charts type', items: [
{name:'Line chart', value:Stage.Basic.Graphs.Graph.LINE_CHART_TYPE},
{name:'Bar chart', value:Stage.Basic.Graphs.Graph.BAR_CHART_TYPE},
{name:'Area chart', value:Stage.Basic.Graphs.Graph.AREA_CHART_TYPE}],
default: Stage.Basic.Graphs.Graph.LINE_CHART_TYPE, type: Stage.Basic.GenericField.LIST_TYPE},
{id: 'timeFilter', name: 'Time range and resolution', description: 'Time range and time resolution for all defined charts',
type: Stage.Basic.GenericField.CUSTOM_TYPE, component: Stage.Basic.TimeFilter,
default: Stage.Basic.TimeFilter.INFLUX_DEFAULT_VALUE, defaultValue: Stage.Basic.TimeFilter.INFLUX_DEFAULT_VALUE}
],
UNCONFIGURED_STATE: 'unconfigured',
EMPTY_RESPONSE_STATE: 'emptyResponse',
_prepareData: function(data, xDataKey) {
const TIME_FORMAT = 'YYYY-MM-DD HH:mm:ss';
const MAX_NUMBER_OF_POINTS = 200;
const TIME_INDEX = 0;
const VALUE_INDEX = 1;
const REFERENCE_METRIC_INDEX = 0;
const NUMBER_OF_METRICS = data.length;
const NUMBER_OF_POINTS = data[REFERENCE_METRIC_INDEX].points.length;
let points = [];
// Data conversion to recharts format
// As a reference time points list, metric no. REFERENCE_METRIC_INDEX is taken
for (let i = 0; i < NUMBER_OF_POINTS; i++) {
let point = { [xDataKey]: Stage.Utils.formatTimestamp(data[REFERENCE_METRIC_INDEX].points[i][TIME_INDEX], TIME_FORMAT, null) };
for (let j = 0; j < NUMBER_OF_METRICS; j++) {
if (data[j].points[i] &&
data[REFERENCE_METRIC_INDEX].points[i][TIME_INDEX] === data[j].points[i][TIME_INDEX])
{
let metricName = data[j].name;
let pointValue = data[j].points[i][VALUE_INDEX];
point[metricName] = pointValue;
}
}
points.push(point);
}
// Data optimization (show no more than MAX_NUMBER_OF_POINTS points on the graph)
if (points.length > MAX_NUMBER_OF_POINTS) {
let optimizedPoints = [];
let delta = parseFloat(points.length / MAX_NUMBER_OF_POINTS);
for (let i = 0; i < points.length; i = i + delta) {
optimizedPoints.push(points[Math.floor(i)]);
}
points = optimizedPoints;
}
return points;
},
_getChartsMetricsList: function(charts) {
return _.chain(charts)
.filter((graph) => !_.isEmpty(graph.metric))
.map((graph) => graph.metric)
.uniq()
.value();
},
_getChartsConfiguration: function(charts, query, data) {
let chartsConfig = [];
if (!_.isEmpty(query)) {
_.forEach(data, (chart) => {
chartsConfig.push({
name: chart.name,
label: chart.name,
axisLabel: ''
});
})
} else {
_.forEach(charts, (chart) => {
let chartName = chart.metric;
if (!_.isEmpty(chartName)) {
chartsConfig.push({
name: chartName,
label: chart.label ? chart.label : chartName,
axisLabel: ''
});
}
})
chartsConfig = _.uniqBy(chartsConfig, 'name');
}
return chartsConfig;
},
_sanitizeQuery(string){
return string.replace(/;/g, '');
},
_prepareInfluxQuery: function(queries, deploymentId, nodeId, nodeInstanceId, from, to, timeGroup) {
return _.map(queries, (queryParams) => {
let selectWhat = this._sanitizeQuery(queryParams.qSelect);
let selectFrom = this._sanitizeQuery(queryParams.qFrom);
let selectWhere = this._sanitizeQuery(queryParams.qWhere);
if (!_.isEmpty(selectWhat) && !_.isEmpty(selectFrom)) {
if ((_.includes(selectFrom, '${deploymentId}') && _.isEmpty(deploymentId)) ||
(_.includes(selectFrom, '${nodeId}') && _.isEmpty(nodeId)) ||
(_.includes(selectFrom, '${nodeInstanceId}') && _.isEmpty(nodeInstanceId)))
return {};
selectFrom = _.replace(selectFrom, '${deploymentId}', deploymentId);
selectFrom = _.replace(selectFrom, '${nodeId}', nodeId);
selectFrom = _.replace(selectFrom, '${nodeInstanceId}', nodeInstanceId);
selectWhere = _.replace(selectWhere, '${timeFilter}', `time > ${from} and time < ${to} group by time(${timeGroup})`);
if (_.isEmpty(selectWhere))
return {qSelect: selectWhat, qFrom: selectFrom};
else
return {qSelect: selectWhat, qFrom: selectFrom, qWhere: selectWhere};
} else
return {};
});
},
_isEmptyResponse: function(widget, data) {
return data.state === widget.definition.EMPTY_RESPONSE_STATE;
},
_isWidgetNotConfigured: function(widget, data) {
return data.state === widget.definition.UNCONFIGURED_STATE;
},
fetchParams: function(widget, toolbox) {
let deploymentId = toolbox.getContext().getValue('deploymentId');
let nodeId = toolbox.getContext().getValue('nodeId');
let nodeInstanceId = toolbox.getContext().getValue('nodeInstanceId');
let nodeFilterFromWidget = widget.configuration.nodeFilter;
if (nodeFilterFromWidget.deploymentId || nodeFilterFromWidget.nodeId || nodeFilterFromWidget.nodeInstanceId) {
deploymentId = nodeFilterFromWidget.deploymentId;
nodeId = nodeFilterFromWidget.nodeId;
nodeInstanceId = nodeFilterFromWidget.nodeInstanceId;
}
let timeFilterFromWidget = widget.configuration.timeFilter;
let timeFilterFromContext = toolbox.getContext().getValue('timeFilter');
let timeStart = _.get(timeFilterFromContext, 'start', timeFilterFromWidget.start);
timeStart = moment(timeStart).isValid() ? `${moment(timeStart).unix()}s` : timeStart;
let timeEnd = _.get(timeFilterFromContext, 'end', timeFilterFromWidget.end);
timeEnd = moment(timeEnd).isValid() ? `${moment(timeEnd).unix()}s` : timeEnd;
let timeResolution = _.get(timeFilterFromContext, 'resolution', timeFilterFromWidget.resolution);
let timeUnit = _.get(timeFilterFromContext, 'unit', timeFilterFromWidget.unit);
let timeGroup = `${timeResolution}${timeUnit}`;
return { deploymentId, nodeId, nodeInstanceId, timeStart, timeEnd, timeGroup };
},
fetchData: function(widget, toolbox, params) {
const actions = new Stage.Common.InfluxActions(toolbox);
const deploymentId = params.deploymentId;
const nodeId = params.nodeId;
const nodeInstanceId = params.nodeInstanceId;
const metrics = this._getChartsMetricsList(widget.configuration.charts);
const from = params.timeStart;
const to = params.timeEnd;
const timeGroup = params.timeGroup;
const preparedQuery = _.head(this._prepareInfluxQuery(widget.configuration.query, deploymentId, nodeId, nodeInstanceId, from, to, timeGroup));
if (!_.isEmpty(preparedQuery)) {
toolbox.loading(true);
return actions.doRunQuery(preparedQuery.qSelect, preparedQuery.qFrom, preparedQuery.qWhere).then((data) => {
toolbox.loading(false);
let formattedResponse
= _.map(data, (metric) => ({name: _.last(_.split(metric.name, '.')), points: metric.points}));
return Promise.resolve(_.isEmpty(data) ? {state: widget.definition.EMPTY_RESPONSE_STATE} : formattedResponse);
}).catch((error) => {
toolbox.loading(false);
return Promise.reject('There was a problem while querying for data. ' +
'Please check your Influx query syntax and try again. Error: ' +
error.message || error);
});
} else if (!_.isEmpty(deploymentId) && !_.isEmpty(nodeInstanceId) && !_.isEmpty(metrics)) {
toolbox.loading(true);
return actions.doGetMetric(deploymentId, nodeId, nodeInstanceId, metrics, from, to, timeGroup)
.then((data) => {
toolbox.loading(false);
let formattedResponse
= _.map(data, (metric) => ({name: _.last(_.split(metric.name, '.')), points: metric.points}));
return Promise.resolve(_.isEmpty(data) ? {state: widget.definition.EMPTY_RESPONSE_STATE} : formattedResponse);
})
.catch((error) => {
toolbox.loading(false);
return Promise.reject('There was a problem while querying for data. ' +
'Please check Deployment ID, Node ID, Node Instance ID, Metric and time range. Error: ' +
error.message || error);
});
} else {
toolbox.loading(false);
return Promise.resolve({state: widget.definition.UNCONFIGURED_STATE});
}
},
render: function(widget,data,error,toolbox) {
let {charts, query, type} = widget.configuration;
let {Message, Icon} = Stage.Basic;
if (_.isEmpty(data)) {
return (
<Stage.Basic.Loading/>
);
} else if (this._isWidgetNotConfigured(widget, data)) | else if (this._isEmptyResponse(widget, data)) {
return (
<Message info icon>
<Icon name='ban' />
No data fetched for specified chart(s) configuration.
</Message>
);
}
let {Graph} = Stage.Basic.Graphs;
return (
<Graph type={type}
data={this._prepareData(data, Graph.DEFAULT_X_DATA_KEY)}
charts={this._getChartsConfiguration(charts, query, data)} />
);
}
}); | {
return (
<Message info icon>
<Icon name='info' />
Please select deployment, node instance and metric in widget's configuration to present the data graph.
</Message>
);
} | conditional_block |
parse.go | package janet
import (
"strconv"
"unicode/utf8"
)
type JanetParserStatus int
const (
JANET_PARSE_ROOT = iota
JANET_PARSE_ERROR
JANET_PARSE_PENDING
JANET_PARSE_DEAD
)
const (
PFLAG_CONTAINER = 0x100
PFLAG_BUFFER = 0x200
PFLAG_PARENS = 0x400
PFLAG_SQRBRACKETS = 0x800
PFLAG_CURLYBRACKETS = 0x1000
PFLAG_STRING = 0x2000
PFLAG_LONGSTRING = 0x4000
PFLAG_READERMAC = 0x8000
PFLAG_ATSYM = 0x10000
PFLAG_COMMENT = 0x20000
PFLAG_TOKEN = 0x40000
)
type Parser struct {
args []Value
states []ParseState
buf []byte
line int
column int
pending int
lookback byte
flag int
err string
}
type ParseState struct {
counter int
argn int
flags int
line int
column int
consumer Consumer
}
type Consumer func(parser *Parser, state *ParseState, c byte) int
func isWhitespace(c byte) bool {
return c == ' ' ||
c == '\t' ||
c == '\n' ||
c == '\r' ||
c == '\v' ||
c == '\f' ||
c == 0
}
/* Code generated by cjanet/tools/symcharsgen.c.
* The table contains 256 bits, where each bit is 1
* if the corresponding ascii code is a symbol char, and 0
* if not. The upper characters are also considered symbol
* chars and are then checked for utf-8 compliance. */
var symchars = [8]uint32{
0x00000000, 0xf7ffec72, 0xc7ffffff, 0x07fffffe,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
}
/* Check if a character is a valid symbol character
* symbol chars are A-Z, a-z, 0-9, or one of !$&*+-./:<=>@\^_~| */
func isSymbolChar(c byte) bool {
return (symchars[c>>5] & (uint32(1) << (c & 0x1F))) != 0
}
func checkEscape(c byte) int {
switch c {
default:
return -1
case 'x':
return 1
case 'n':
return '\n'
case 't':
return '\t'
case 'r':
return '\r'
case '0':
return 0
case 'z':
return 0
case 'f':
return '\f'
case 'v':
return '\v'
case 'e':
return 27
case '"':
return '"'
case '\\':
return '\\'
}
}
/* Get hex digit from a letter */
func toHex(c byte) int {
if c >= '0' && c <= '9' {
return int(c) - '0'
} else if c >= 'A' && c <= 'F' {
return 10 + int(c) - 'A'
} else if c >= 'a' && c <= 'f' {
return 10 + int(c) - 'a'
} else {
return -1
}
}
func (parser *Parser) checkDead() {
if parser.flag != 0 {
JanetPanic("parser is dead, cannot consume")
}
if parser.err != "" {
JanetPanic("parser has unchecked error, cannot consume")
}
}
func (parser *Parser) pushState(consumer Consumer, flags int) {
state := ParseState{
counter: 0,
argn: 0,
flags: flags,
consumer: consumer,
line: parser.line,
column: parser.column,
}
parser.states = append(parser.states, state)
}
func (p *Parser) popState(val Value) {
for {
top := p.states[len(p.states)-1]
p.states = p.states[:len(p.states)-1]
newtop := &p.states[len(p.states)-1]
if (newtop.flags & PFLAG_CONTAINER) != 0 {
switch val := val.(type) {
case *Tuple:
val.Line = top.line
val.Column = top.column
default:
}
newtop.argn += 1
/* Keep track of number of values in the root state */
if len(p.states) == 1 {
p.pending += 1
}
p.args = append(p.args, val)
return
} else if (newtop.flags & PFLAG_READERMAC) != 0 {
which := "<unknown>"
t := NewTuple(2, 2)
c := newtop.flags & 0xFF
switch c {
case '\'':
which = "quote"
case ',':
which = "unquote"
case ';':
which = "splice"
case '|':
which = "short-fn"
case '~':
which = "quasiquote"
default:
}
t.Vals[0] = Symbol(which)
t.Vals[1] = val
/* Quote source mapping info */
t.Line = newtop.line
t.Column = newtop.column
val = t
} else {
return
}
}
}
func (p *Parser) closeArray(state *ParseState) *Array {
array := NewArray(state.argn, state.argn)
for i := state.argn - 1; i >= 0; i = i - 1 {
array.Data[i] = p.args[len(p.args)-1]
p.args = p.args[:len(p.args)-1]
}
return array
}
func (p *Parser) closeTuple(state *ParseState, flags int) *Tuple {
tup := NewTuple(state.argn, state.argn)
tup.Flags = flags
for i := state.argn - 1; i >= 0; i = i - 1 {
tup.Vals[i] = p.args[len(p.args)-1]
p.args = p.args[:len(p.args)-1]
}
return tup
}
func (p *Parser) closeStruct(state *ParseState) *Struct {
/*
JanetKV *st = janet_struct_begin(state->argn >> 1);
for (int32_t i = state->argn; i > 0; i -= 2) {
Janet value = p->args[--p->argcount];
Janet key = p->args[--p->argcount];
janet_struct_put(st, key, value);
}
return janet_wrap_struct(janet_struct_end(st));
*/
panic("XXX")
}
func (p *Parser) closeTable(state *ParseState) *Table {
/*
JanetTable *table = janet_table(state->argn >> 1);
for (int32_t i = state->argn; i > 0; i -= 2) {
Janet value = p->args[--p->argcount];
Janet key = p->args[--p->argcount];
janet_table_put(table, key, value);
}
return janet_wrap_table(table);
*/
panic("XXX")
}
func root(p *Parser, state *ParseState, c byte) int {
switch c {
default:
if isWhitespace(c) {
return 1
}
if !isSymbolChar(c) {
p.err = "unexpected character"
return 1
}
p.pushState(tokenchar, PFLAG_TOKEN)
return 0
case '\'', ',', ';', '~', '|':
p.pushState(root, PFLAG_READERMAC|int(c))
return 1
case '"':
p.pushState(stringchar, PFLAG_STRING)
return 1
case '#':
p.pushState(comment, PFLAG_COMMENT)
return 1
case '@':
p.pushState(atsign, PFLAG_ATSYM)
return 1
case '`':
p.pushState(longstring, PFLAG_LONGSTRING)
return 1
case ')', ']', '}':
{
var ds Value
if len(p.states) == 1 {
p.err = "unexpected delimiter"
return 1
}
if (c == ')' && (state.flags&PFLAG_PARENS) != 0) ||
(c == ']' && (state.flags&PFLAG_SQRBRACKETS) != 0) {
if (state.flags & PFLAG_ATSYM) != 0 {
ds = p.closeArray(state)
} else {
tupFlags := 0
if c == ']' {
tupFlags = JANET_TUPLE_FLAG_BRACKETCTOR
}
ds = p.closeTuple(state, tupFlags)
}
} else if c == '}' && ((state.flags & PFLAG_CURLYBRACKETS) != 0) {
if (state.argn % 2) == 1 {
p.err = "struct and table literals expect even number of arguments"
return 1
}
if (state.flags & PFLAG_ATSYM) != 0 {
ds = p.closeTable(state)
} else {
ds = p.closeStruct(state)
}
} else {
p.err = "mismatched delimiter"
return 1
}
p.popState(ds)
}
return 1
case '(':
p.pushState(root, PFLAG_CONTAINER|PFLAG_PARENS)
return 1
case '[':
p.pushState(root, PFLAG_CONTAINER|PFLAG_SQRBRACKETS)
return 1
case '{':
p.pushState(root, PFLAG_CONTAINER|PFLAG_CURLYBRACKETS)
return 1
}
}
func strEqBuf(str string, buf []byte) bool {
if len(str) != len(buf) {
return false
}
for i, b := range buf {
if str[i] != b {
return false
}
}
return true
}
// XXX This will need to be ported from the original janet.
// XXX avoiding casting to string will be more efficient.
func scanNumber(s string, out *float64) bool {
v, err := strconv.ParseFloat(s, 64)
if err != nil {
return false
}
*out = v
return true
}
func tokenchar(p *Parser, state *ParseState, c byte) int {
var ret Value
var numval float64
if isSymbolChar(c) {
p.buf = append(p.buf, c)
if c > 127 {
state.argn = 1 /* Use to indicate non ascii */
}
return 1
}
/* Token finished */
startDig := p.buf[0] >= '0' && p.buf[0] <= '9'
startNum := startDig || p.buf[0] == '-' || p.buf[0] == '+' || p.buf[0] == '.'
if p.buf[0] == ':' {
kwStr := p.buf[1:]
/* Don't do full utf-8 check unless we have seen non ascii characters. */
valid := (state.argn == 0) || utf8.Valid(kwStr)
if !valid {
p.err = "invalid utf-8 in keyword"
return 0
}
ret = Keyword(kwStr)
} else if startNum && scanNumber(string(p.buf), &numval) {
ret = Number(numval)
} else if strEqBuf("nil", p.buf) {
ret = nil
} else if strEqBuf("false", p.buf) {
ret = Bool(false)
} else if strEqBuf("true", p.buf) {
ret = Bool(true)
} else {
if startDig {
p.err = "symbol literal cannot start with a digit"
return 0
} else {
symStr := string(p.buf)
/* Don't do full utf-8 check unless we have seen non ascii characters. */
valid := (state.argn == 0) || utf8.ValidString(symStr)
if !valid {
p.err = "invalid utf-8 in symbol"
return 0
}
ret = Symbol(symStr)
}
}
p.buf = p.buf[:0]
p.popState(ret)
return 0
}
func escapeh(p *Parser, state *ParseState, c byte) int {
digit := toHex(c)
if digit < 0 {
p.err = "invalid hex digit in hex escape"
return 1
}
state.argn = (state.argn << 4) + digit
state.counter--
if state.counter == 0 {
p.buf = append(p.buf, byte(state.argn&0xFF))
state.argn = 0
state.consumer = stringchar
}
return 1
}
func escape1(p *Parser, state *ParseState, c byte) int {
e := checkEscape(c)
if e < 0 {
p.err = "invalid string escape sequence"
return 1
}
if c == 'x' {
state.counter = 2
state.argn = 0
state.consumer = escapeh
} else {
p.buf = append(p.buf, c)
state.consumer = stringchar
}
return 1
}
func stringend(p *Parser, state *ParseState) int {
var ret Value
buf := p.buf
if (state.flags & PFLAG_LONGSTRING) != 0 {
/* Check for leading newline character so we can remove it */
if buf[0] == '\n' {
buf = buf[1:]
}
if len(buf) > 0 && buf[len(buf)-1] == '\n' {
buf = buf[:len(buf)-1]
}
}
if (state.flags & PFLAG_BUFFER) != 0 {
b := NewBuffer(len(buf))
_, _ = b.Buf.Write(buf)
ret = b
} else {
ret = String(buf)
}
p.buf = []byte{}
p.popState(ret)
return 1
}
func stringchar(p *Parser, state *ParseState, c byte) int {
/* Enter escape */
if c == '\\' {
state.consumer = escape1
return 1
}
/* String end */
if c == '"' {
return stringend(p, state)
}
/* normal char */
if c != '\n' && c != '\r' {
p.buf = append(p.buf, c)
}
return 1
}
const PFLAG_INSTRING = 0x100000
const PFLAG_END_CANDIDATE = 0x200000
func longstring(p *Parser, state *ParseState, c byte) int {
if (state.flags & PFLAG_INSTRING) != 0 {
/* We are inside the long string */
if c == '`' {
state.flags |= PFLAG_END_CANDIDATE
state.flags &= ^PFLAG_INSTRING
state.counter = 1 /* Use counter to keep track of number of '=' seen */
return 1
}
p.buf = append(p.buf, c)
return 1
} else if (state.flags & PFLAG_END_CANDIDATE) != 0 {
/* We are checking a potential end of the string */
if state.counter == state.argn {
stringend(p, state)
return 0
}
if c == '`' && state.counter < state.argn {
state.counter += 1
return 1
}
/* Failed end candidate */
for i := 0; i < state.counter; i++ {
p.buf = append(p.buf, '`')
}
p.buf = append(p.buf, c)
state.counter = 0
state.flags &= ^PFLAG_END_CANDIDATE
state.flags |= PFLAG_INSTRING
return 1
} else {
/* We are at beginning of string */
state.argn += 1
if c != '`' {
state.flags |= PFLAG_INSTRING
p.buf = append(p.buf, c)
}
return 1
}
}
func comment(p *Parser, state *ParseState, c byte) int {
if c == '\n' {
p.states = p.states[:len(p.states)-1]
p.buf = p.buf[:0]
} else {
p.buf = append(p.buf, c)
}
return 1
}
func atsign(p *Parser, state *ParseState, c byte) int {
p.states = p.states[:len(p.states)-1]
switch c {
case '{':
p.pushState(root, PFLAG_CONTAINER|PFLAG_CURLYBRACKETS|PFLAG_ATSYM)
return 1
case '"':
p.pushState(stringchar, PFLAG_BUFFER|PFLAG_STRING)
return 1
case '`':
p.pushState(longstring, PFLAG_BUFFER|PFLAG_LONGSTRING)
return 1
case '[':
p.pushState(root, PFLAG_CONTAINER|PFLAG_SQRBRACKETS|PFLAG_ATSYM)
return 1
case '(':
p.pushState(root, PFLAG_CONTAINER|PFLAG_PARENS|PFLAG_ATSYM)
return 1
default:
break
}
p.pushState(tokenchar, PFLAG_TOKEN)
p.buf = append(p.buf, '@')
return 0
}
// Public api
func (parser *Parser) Init() {
parser.args = []Value{}
parser.states = []ParseState{}
parser.buf = []byte{}
parser.err = ""
parser.lookback = 0
parser.line = 1
parser.column = 0
parser.pending = 0
parser.flag = 0
parser.pushState(root, PFLAG_CONTAINER)
}
func (parser *Parser) Consume(c byte) {
consumed := 0
parser.checkDead()
if c == '\r' {
parser.line += 1
parser.column = 0
} else if c == '\n' {
parser.column = 0
if parser.lookback != '\r' {
parser.line += 1
}
} else {
parser.column += 1
}
for consumed == 0 && parser.err == "" {
state := &parser.states[len(parser.states)-1]
consumed = state.consumer(parser, state, c)
}
parser.lookback = c
}
func (parser *Parser) | () Value {
var ret Value
if parser.pending == 0 {
return nil
}
ret = parser.args[0]
for i := 1; i < len(parser.args); i += 1 {
parser.args[i-1] = parser.args[i]
}
parser.args = parser.args[:len(parser.args)-1]
parser.pending -= 1
return ret
}
func (parser *Parser) Clone() *Parser {
dest := &Parser{}
/* Misc fields */
dest.flag = parser.flag
dest.pending = parser.pending
dest.lookback = parser.lookback
dest.line = parser.line
dest.column = parser.column
dest.err = parser.err
/* Deep cloned fields */
dest.args = make([]Value, 0, len(parser.args))
dest.states = make([]ParseState, 0, len(parser.states))
dest.buf = make([]byte, 0, len(parser.buf))
dest.args = append(dest.args, parser.args...)
dest.states = append(dest.states, parser.states...)
dest.buf = append(dest.buf, parser.buf...)
return dest
}
| Produce | identifier_name |
parse.go | package janet
import (
"strconv"
"unicode/utf8"
)
type JanetParserStatus int
const (
JANET_PARSE_ROOT = iota
JANET_PARSE_ERROR
JANET_PARSE_PENDING
JANET_PARSE_DEAD
)
const (
PFLAG_CONTAINER = 0x100
PFLAG_BUFFER = 0x200
PFLAG_PARENS = 0x400
PFLAG_SQRBRACKETS = 0x800
PFLAG_CURLYBRACKETS = 0x1000
PFLAG_STRING = 0x2000
PFLAG_LONGSTRING = 0x4000
PFLAG_READERMAC = 0x8000
PFLAG_ATSYM = 0x10000
PFLAG_COMMENT = 0x20000
PFLAG_TOKEN = 0x40000
)
type Parser struct {
args []Value
states []ParseState
buf []byte
line int
column int
pending int
lookback byte
flag int
err string
}
type ParseState struct {
counter int
argn int
flags int
line int
column int
consumer Consumer
}
type Consumer func(parser *Parser, state *ParseState, c byte) int
func isWhitespace(c byte) bool {
return c == ' ' ||
c == '\t' ||
c == '\n' ||
c == '\r' ||
c == '\v' ||
c == '\f' ||
c == 0
}
/* Code generated by cjanet/tools/symcharsgen.c.
* The table contains 256 bits, where each bit is 1
* if the corresponding ascii code is a symbol char, and 0
* if not. The upper characters are also considered symbol
* chars and are then checked for utf-8 compliance. */
var symchars = [8]uint32{
0x00000000, 0xf7ffec72, 0xc7ffffff, 0x07fffffe,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
}
/* Check if a character is a valid symbol character
* symbol chars are A-Z, a-z, 0-9, or one of !$&*+-./:<=>@\^_~| */
func isSymbolChar(c byte) bool {
return (symchars[c>>5] & (uint32(1) << (c & 0x1F))) != 0
}
func checkEscape(c byte) int {
switch c {
default:
return -1
case 'x':
return 1
case 'n':
return '\n'
case 't':
return '\t'
case 'r':
return '\r'
case '0':
return 0
case 'z':
return 0
case 'f':
return '\f'
case 'v':
return '\v'
case 'e':
return 27
case '"':
return '"'
case '\\':
return '\\'
}
}
/* Get hex digit from a letter */
func toHex(c byte) int {
if c >= '0' && c <= '9' {
return int(c) - '0'
} else if c >= 'A' && c <= 'F' {
return 10 + int(c) - 'A'
} else if c >= 'a' && c <= 'f' {
return 10 + int(c) - 'a'
} else {
return -1
}
}
func (parser *Parser) checkDead() {
if parser.flag != 0 {
JanetPanic("parser is dead, cannot consume")
}
if parser.err != "" {
JanetPanic("parser has unchecked error, cannot consume")
}
}
func (parser *Parser) pushState(consumer Consumer, flags int) {
state := ParseState{
counter: 0,
argn: 0,
flags: flags,
consumer: consumer,
line: parser.line,
column: parser.column,
}
parser.states = append(parser.states, state)
}
func (p *Parser) popState(val Value) {
for {
top := p.states[len(p.states)-1]
p.states = p.states[:len(p.states)-1]
newtop := &p.states[len(p.states)-1]
if (newtop.flags & PFLAG_CONTAINER) != 0 {
switch val := val.(type) {
case *Tuple:
val.Line = top.line
val.Column = top.column
default:
}
newtop.argn += 1
/* Keep track of number of values in the root state */
if len(p.states) == 1 {
p.pending += 1
}
p.args = append(p.args, val)
return
} else if (newtop.flags & PFLAG_READERMAC) != 0 {
which := "<unknown>"
t := NewTuple(2, 2)
c := newtop.flags & 0xFF
switch c {
case '\'':
which = "quote"
case ',':
which = "unquote"
case ';':
which = "splice"
case '|':
which = "short-fn"
case '~':
which = "quasiquote"
default:
}
t.Vals[0] = Symbol(which)
t.Vals[1] = val
/* Quote source mapping info */
t.Line = newtop.line
t.Column = newtop.column
val = t
} else {
return
}
} | for i := state.argn - 1; i >= 0; i = i - 1 {
array.Data[i] = p.args[len(p.args)-1]
p.args = p.args[:len(p.args)-1]
}
return array
}
func (p *Parser) closeTuple(state *ParseState, flags int) *Tuple {
tup := NewTuple(state.argn, state.argn)
tup.Flags = flags
for i := state.argn - 1; i >= 0; i = i - 1 {
tup.Vals[i] = p.args[len(p.args)-1]
p.args = p.args[:len(p.args)-1]
}
return tup
}
func (p *Parser) closeStruct(state *ParseState) *Struct {
/*
JanetKV *st = janet_struct_begin(state->argn >> 1);
for (int32_t i = state->argn; i > 0; i -= 2) {
Janet value = p->args[--p->argcount];
Janet key = p->args[--p->argcount];
janet_struct_put(st, key, value);
}
return janet_wrap_struct(janet_struct_end(st));
*/
panic("XXX")
}
func (p *Parser) closeTable(state *ParseState) *Table {
/*
JanetTable *table = janet_table(state->argn >> 1);
for (int32_t i = state->argn; i > 0; i -= 2) {
Janet value = p->args[--p->argcount];
Janet key = p->args[--p->argcount];
janet_table_put(table, key, value);
}
return janet_wrap_table(table);
*/
panic("XXX")
}
func root(p *Parser, state *ParseState, c byte) int {
switch c {
default:
if isWhitespace(c) {
return 1
}
if !isSymbolChar(c) {
p.err = "unexpected character"
return 1
}
p.pushState(tokenchar, PFLAG_TOKEN)
return 0
case '\'', ',', ';', '~', '|':
p.pushState(root, PFLAG_READERMAC|int(c))
return 1
case '"':
p.pushState(stringchar, PFLAG_STRING)
return 1
case '#':
p.pushState(comment, PFLAG_COMMENT)
return 1
case '@':
p.pushState(atsign, PFLAG_ATSYM)
return 1
case '`':
p.pushState(longstring, PFLAG_LONGSTRING)
return 1
case ')', ']', '}':
{
var ds Value
if len(p.states) == 1 {
p.err = "unexpected delimiter"
return 1
}
if (c == ')' && (state.flags&PFLAG_PARENS) != 0) ||
(c == ']' && (state.flags&PFLAG_SQRBRACKETS) != 0) {
if (state.flags & PFLAG_ATSYM) != 0 {
ds = p.closeArray(state)
} else {
tupFlags := 0
if c == ']' {
tupFlags = JANET_TUPLE_FLAG_BRACKETCTOR
}
ds = p.closeTuple(state, tupFlags)
}
} else if c == '}' && ((state.flags & PFLAG_CURLYBRACKETS) != 0) {
if (state.argn % 2) == 1 {
p.err = "struct and table literals expect even number of arguments"
return 1
}
if (state.flags & PFLAG_ATSYM) != 0 {
ds = p.closeTable(state)
} else {
ds = p.closeStruct(state)
}
} else {
p.err = "mismatched delimiter"
return 1
}
p.popState(ds)
}
return 1
case '(':
p.pushState(root, PFLAG_CONTAINER|PFLAG_PARENS)
return 1
case '[':
p.pushState(root, PFLAG_CONTAINER|PFLAG_SQRBRACKETS)
return 1
case '{':
p.pushState(root, PFLAG_CONTAINER|PFLAG_CURLYBRACKETS)
return 1
}
}
func strEqBuf(str string, buf []byte) bool {
if len(str) != len(buf) {
return false
}
for i, b := range buf {
if str[i] != b {
return false
}
}
return true
}
// XXX This will need to be ported from the original janet.
// XXX avoiding casting to string will be more efficient.
func scanNumber(s string, out *float64) bool {
v, err := strconv.ParseFloat(s, 64)
if err != nil {
return false
}
*out = v
return true
}
func tokenchar(p *Parser, state *ParseState, c byte) int {
var ret Value
var numval float64
if isSymbolChar(c) {
p.buf = append(p.buf, c)
if c > 127 {
state.argn = 1 /* Use to indicate non ascii */
}
return 1
}
/* Token finished */
startDig := p.buf[0] >= '0' && p.buf[0] <= '9'
startNum := startDig || p.buf[0] == '-' || p.buf[0] == '+' || p.buf[0] == '.'
if p.buf[0] == ':' {
kwStr := p.buf[1:]
/* Don't do full utf-8 check unless we have seen non ascii characters. */
valid := (state.argn == 0) || utf8.Valid(kwStr)
if !valid {
p.err = "invalid utf-8 in keyword"
return 0
}
ret = Keyword(kwStr)
} else if startNum && scanNumber(string(p.buf), &numval) {
ret = Number(numval)
} else if strEqBuf("nil", p.buf) {
ret = nil
} else if strEqBuf("false", p.buf) {
ret = Bool(false)
} else if strEqBuf("true", p.buf) {
ret = Bool(true)
} else {
if startDig {
p.err = "symbol literal cannot start with a digit"
return 0
} else {
symStr := string(p.buf)
/* Don't do full utf-8 check unless we have seen non ascii characters. */
valid := (state.argn == 0) || utf8.ValidString(symStr)
if !valid {
p.err = "invalid utf-8 in symbol"
return 0
}
ret = Symbol(symStr)
}
}
p.buf = p.buf[:0]
p.popState(ret)
return 0
}
func escapeh(p *Parser, state *ParseState, c byte) int {
digit := toHex(c)
if digit < 0 {
p.err = "invalid hex digit in hex escape"
return 1
}
state.argn = (state.argn << 4) + digit
state.counter--
if state.counter == 0 {
p.buf = append(p.buf, byte(state.argn&0xFF))
state.argn = 0
state.consumer = stringchar
}
return 1
}
func escape1(p *Parser, state *ParseState, c byte) int {
e := checkEscape(c)
if e < 0 {
p.err = "invalid string escape sequence"
return 1
}
if c == 'x' {
state.counter = 2
state.argn = 0
state.consumer = escapeh
} else {
p.buf = append(p.buf, c)
state.consumer = stringchar
}
return 1
}
func stringend(p *Parser, state *ParseState) int {
var ret Value
buf := p.buf
if (state.flags & PFLAG_LONGSTRING) != 0 {
/* Check for leading newline character so we can remove it */
if buf[0] == '\n' {
buf = buf[1:]
}
if len(buf) > 0 && buf[len(buf)-1] == '\n' {
buf = buf[:len(buf)-1]
}
}
if (state.flags & PFLAG_BUFFER) != 0 {
b := NewBuffer(len(buf))
_, _ = b.Buf.Write(buf)
ret = b
} else {
ret = String(buf)
}
p.buf = []byte{}
p.popState(ret)
return 1
}
func stringchar(p *Parser, state *ParseState, c byte) int {
/* Enter escape */
if c == '\\' {
state.consumer = escape1
return 1
}
/* String end */
if c == '"' {
return stringend(p, state)
}
/* normal char */
if c != '\n' && c != '\r' {
p.buf = append(p.buf, c)
}
return 1
}
const PFLAG_INSTRING = 0x100000
const PFLAG_END_CANDIDATE = 0x200000
func longstring(p *Parser, state *ParseState, c byte) int {
if (state.flags & PFLAG_INSTRING) != 0 {
/* We are inside the long string */
if c == '`' {
state.flags |= PFLAG_END_CANDIDATE
state.flags &= ^PFLAG_INSTRING
state.counter = 1 /* Use counter to keep track of number of '=' seen */
return 1
}
p.buf = append(p.buf, c)
return 1
} else if (state.flags & PFLAG_END_CANDIDATE) != 0 {
/* We are checking a potential end of the string */
if state.counter == state.argn {
stringend(p, state)
return 0
}
if c == '`' && state.counter < state.argn {
state.counter += 1
return 1
}
/* Failed end candidate */
for i := 0; i < state.counter; i++ {
p.buf = append(p.buf, '`')
}
p.buf = append(p.buf, c)
state.counter = 0
state.flags &= ^PFLAG_END_CANDIDATE
state.flags |= PFLAG_INSTRING
return 1
} else {
/* We are at beginning of string */
state.argn += 1
if c != '`' {
state.flags |= PFLAG_INSTRING
p.buf = append(p.buf, c)
}
return 1
}
}
func comment(p *Parser, state *ParseState, c byte) int {
if c == '\n' {
p.states = p.states[:len(p.states)-1]
p.buf = p.buf[:0]
} else {
p.buf = append(p.buf, c)
}
return 1
}
func atsign(p *Parser, state *ParseState, c byte) int {
p.states = p.states[:len(p.states)-1]
switch c {
case '{':
p.pushState(root, PFLAG_CONTAINER|PFLAG_CURLYBRACKETS|PFLAG_ATSYM)
return 1
case '"':
p.pushState(stringchar, PFLAG_BUFFER|PFLAG_STRING)
return 1
case '`':
p.pushState(longstring, PFLAG_BUFFER|PFLAG_LONGSTRING)
return 1
case '[':
p.pushState(root, PFLAG_CONTAINER|PFLAG_SQRBRACKETS|PFLAG_ATSYM)
return 1
case '(':
p.pushState(root, PFLAG_CONTAINER|PFLAG_PARENS|PFLAG_ATSYM)
return 1
default:
break
}
p.pushState(tokenchar, PFLAG_TOKEN)
p.buf = append(p.buf, '@')
return 0
}
// Public api
func (parser *Parser) Init() {
parser.args = []Value{}
parser.states = []ParseState{}
parser.buf = []byte{}
parser.err = ""
parser.lookback = 0
parser.line = 1
parser.column = 0
parser.pending = 0
parser.flag = 0
parser.pushState(root, PFLAG_CONTAINER)
}
func (parser *Parser) Consume(c byte) {
consumed := 0
parser.checkDead()
if c == '\r' {
parser.line += 1
parser.column = 0
} else if c == '\n' {
parser.column = 0
if parser.lookback != '\r' {
parser.line += 1
}
} else {
parser.column += 1
}
for consumed == 0 && parser.err == "" {
state := &parser.states[len(parser.states)-1]
consumed = state.consumer(parser, state, c)
}
parser.lookback = c
}
func (parser *Parser) Produce() Value {
var ret Value
if parser.pending == 0 {
return nil
}
ret = parser.args[0]
for i := 1; i < len(parser.args); i += 1 {
parser.args[i-1] = parser.args[i]
}
parser.args = parser.args[:len(parser.args)-1]
parser.pending -= 1
return ret
}
func (parser *Parser) Clone() *Parser {
dest := &Parser{}
/* Misc fields */
dest.flag = parser.flag
dest.pending = parser.pending
dest.lookback = parser.lookback
dest.line = parser.line
dest.column = parser.column
dest.err = parser.err
/* Deep cloned fields */
dest.args = make([]Value, 0, len(parser.args))
dest.states = make([]ParseState, 0, len(parser.states))
dest.buf = make([]byte, 0, len(parser.buf))
dest.args = append(dest.args, parser.args...)
dest.states = append(dest.states, parser.states...)
dest.buf = append(dest.buf, parser.buf...)
return dest
} | }
func (p *Parser) closeArray(state *ParseState) *Array {
array := NewArray(state.argn, state.argn) | random_line_split |
parse.go | package janet
import (
"strconv"
"unicode/utf8"
)
type JanetParserStatus int
const (
JANET_PARSE_ROOT = iota
JANET_PARSE_ERROR
JANET_PARSE_PENDING
JANET_PARSE_DEAD
)
const (
PFLAG_CONTAINER = 0x100
PFLAG_BUFFER = 0x200
PFLAG_PARENS = 0x400
PFLAG_SQRBRACKETS = 0x800
PFLAG_CURLYBRACKETS = 0x1000
PFLAG_STRING = 0x2000
PFLAG_LONGSTRING = 0x4000
PFLAG_READERMAC = 0x8000
PFLAG_ATSYM = 0x10000
PFLAG_COMMENT = 0x20000
PFLAG_TOKEN = 0x40000
)
type Parser struct {
args []Value
states []ParseState
buf []byte
line int
column int
pending int
lookback byte
flag int
err string
}
type ParseState struct {
counter int
argn int
flags int
line int
column int
consumer Consumer
}
type Consumer func(parser *Parser, state *ParseState, c byte) int
func isWhitespace(c byte) bool {
return c == ' ' ||
c == '\t' ||
c == '\n' ||
c == '\r' ||
c == '\v' ||
c == '\f' ||
c == 0
}
/* Code generated by cjanet/tools/symcharsgen.c.
* The table contains 256 bits, where each bit is 1
* if the corresponding ascii code is a symbol char, and 0
* if not. The upper characters are also considered symbol
* chars and are then checked for utf-8 compliance. */
var symchars = [8]uint32{
0x00000000, 0xf7ffec72, 0xc7ffffff, 0x07fffffe,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
}
/* Check if a character is a valid symbol character
* symbol chars are A-Z, a-z, 0-9, or one of !$&*+-./:<=>@\^_~| */
func isSymbolChar(c byte) bool {
return (symchars[c>>5] & (uint32(1) << (c & 0x1F))) != 0
}
func checkEscape(c byte) int {
switch c {
default:
return -1
case 'x':
return 1
case 'n':
return '\n'
case 't':
return '\t'
case 'r':
return '\r'
case '0':
return 0
case 'z':
return 0
case 'f':
return '\f'
case 'v':
return '\v'
case 'e':
return 27
case '"':
return '"'
case '\\':
return '\\'
}
}
/* Get hex digit from a letter */
func toHex(c byte) int {
if c >= '0' && c <= '9' {
return int(c) - '0'
} else if c >= 'A' && c <= 'F' {
return 10 + int(c) - 'A'
} else if c >= 'a' && c <= 'f' {
return 10 + int(c) - 'a'
} else {
return -1
}
}
func (parser *Parser) checkDead() {
if parser.flag != 0 {
JanetPanic("parser is dead, cannot consume")
}
if parser.err != "" {
JanetPanic("parser has unchecked error, cannot consume")
}
}
func (parser *Parser) pushState(consumer Consumer, flags int) {
state := ParseState{
counter: 0,
argn: 0,
flags: flags,
consumer: consumer,
line: parser.line,
column: parser.column,
}
parser.states = append(parser.states, state)
}
func (p *Parser) popState(val Value) {
for |
}
func (p *Parser) closeArray(state *ParseState) *Array {
array := NewArray(state.argn, state.argn)
for i := state.argn - 1; i >= 0; i = i - 1 {
array.Data[i] = p.args[len(p.args)-1]
p.args = p.args[:len(p.args)-1]
}
return array
}
func (p *Parser) closeTuple(state *ParseState, flags int) *Tuple {
tup := NewTuple(state.argn, state.argn)
tup.Flags = flags
for i := state.argn - 1; i >= 0; i = i - 1 {
tup.Vals[i] = p.args[len(p.args)-1]
p.args = p.args[:len(p.args)-1]
}
return tup
}
func (p *Parser) closeStruct(state *ParseState) *Struct {
/*
JanetKV *st = janet_struct_begin(state->argn >> 1);
for (int32_t i = state->argn; i > 0; i -= 2) {
Janet value = p->args[--p->argcount];
Janet key = p->args[--p->argcount];
janet_struct_put(st, key, value);
}
return janet_wrap_struct(janet_struct_end(st));
*/
panic("XXX")
}
func (p *Parser) closeTable(state *ParseState) *Table {
/*
JanetTable *table = janet_table(state->argn >> 1);
for (int32_t i = state->argn; i > 0; i -= 2) {
Janet value = p->args[--p->argcount];
Janet key = p->args[--p->argcount];
janet_table_put(table, key, value);
}
return janet_wrap_table(table);
*/
panic("XXX")
}
func root(p *Parser, state *ParseState, c byte) int {
switch c {
default:
if isWhitespace(c) {
return 1
}
if !isSymbolChar(c) {
p.err = "unexpected character"
return 1
}
p.pushState(tokenchar, PFLAG_TOKEN)
return 0
case '\'', ',', ';', '~', '|':
p.pushState(root, PFLAG_READERMAC|int(c))
return 1
case '"':
p.pushState(stringchar, PFLAG_STRING)
return 1
case '#':
p.pushState(comment, PFLAG_COMMENT)
return 1
case '@':
p.pushState(atsign, PFLAG_ATSYM)
return 1
case '`':
p.pushState(longstring, PFLAG_LONGSTRING)
return 1
case ')', ']', '}':
{
var ds Value
if len(p.states) == 1 {
p.err = "unexpected delimiter"
return 1
}
if (c == ')' && (state.flags&PFLAG_PARENS) != 0) ||
(c == ']' && (state.flags&PFLAG_SQRBRACKETS) != 0) {
if (state.flags & PFLAG_ATSYM) != 0 {
ds = p.closeArray(state)
} else {
tupFlags := 0
if c == ']' {
tupFlags = JANET_TUPLE_FLAG_BRACKETCTOR
}
ds = p.closeTuple(state, tupFlags)
}
} else if c == '}' && ((state.flags & PFLAG_CURLYBRACKETS) != 0) {
if (state.argn % 2) == 1 {
p.err = "struct and table literals expect even number of arguments"
return 1
}
if (state.flags & PFLAG_ATSYM) != 0 {
ds = p.closeTable(state)
} else {
ds = p.closeStruct(state)
}
} else {
p.err = "mismatched delimiter"
return 1
}
p.popState(ds)
}
return 1
case '(':
p.pushState(root, PFLAG_CONTAINER|PFLAG_PARENS)
return 1
case '[':
p.pushState(root, PFLAG_CONTAINER|PFLAG_SQRBRACKETS)
return 1
case '{':
p.pushState(root, PFLAG_CONTAINER|PFLAG_CURLYBRACKETS)
return 1
}
}
func strEqBuf(str string, buf []byte) bool {
if len(str) != len(buf) {
return false
}
for i, b := range buf {
if str[i] != b {
return false
}
}
return true
}
// XXX This will need to be ported from the original janet.
// XXX avoiding casting to string will be more efficient.
func scanNumber(s string, out *float64) bool {
v, err := strconv.ParseFloat(s, 64)
if err != nil {
return false
}
*out = v
return true
}
func tokenchar(p *Parser, state *ParseState, c byte) int {
var ret Value
var numval float64
if isSymbolChar(c) {
p.buf = append(p.buf, c)
if c > 127 {
state.argn = 1 /* Use to indicate non ascii */
}
return 1
}
/* Token finished */
startDig := p.buf[0] >= '0' && p.buf[0] <= '9'
startNum := startDig || p.buf[0] == '-' || p.buf[0] == '+' || p.buf[0] == '.'
if p.buf[0] == ':' {
kwStr := p.buf[1:]
/* Don't do full utf-8 check unless we have seen non ascii characters. */
valid := (state.argn == 0) || utf8.Valid(kwStr)
if !valid {
p.err = "invalid utf-8 in keyword"
return 0
}
ret = Keyword(kwStr)
} else if startNum && scanNumber(string(p.buf), &numval) {
ret = Number(numval)
} else if strEqBuf("nil", p.buf) {
ret = nil
} else if strEqBuf("false", p.buf) {
ret = Bool(false)
} else if strEqBuf("true", p.buf) {
ret = Bool(true)
} else {
if startDig {
p.err = "symbol literal cannot start with a digit"
return 0
} else {
symStr := string(p.buf)
/* Don't do full utf-8 check unless we have seen non ascii characters. */
valid := (state.argn == 0) || utf8.ValidString(symStr)
if !valid {
p.err = "invalid utf-8 in symbol"
return 0
}
ret = Symbol(symStr)
}
}
p.buf = p.buf[:0]
p.popState(ret)
return 0
}
func escapeh(p *Parser, state *ParseState, c byte) int {
digit := toHex(c)
if digit < 0 {
p.err = "invalid hex digit in hex escape"
return 1
}
state.argn = (state.argn << 4) + digit
state.counter--
if state.counter == 0 {
p.buf = append(p.buf, byte(state.argn&0xFF))
state.argn = 0
state.consumer = stringchar
}
return 1
}
func escape1(p *Parser, state *ParseState, c byte) int {
e := checkEscape(c)
if e < 0 {
p.err = "invalid string escape sequence"
return 1
}
if c == 'x' {
state.counter = 2
state.argn = 0
state.consumer = escapeh
} else {
p.buf = append(p.buf, c)
state.consumer = stringchar
}
return 1
}
func stringend(p *Parser, state *ParseState) int {
var ret Value
buf := p.buf
if (state.flags & PFLAG_LONGSTRING) != 0 {
/* Check for leading newline character so we can remove it */
if buf[0] == '\n' {
buf = buf[1:]
}
if len(buf) > 0 && buf[len(buf)-1] == '\n' {
buf = buf[:len(buf)-1]
}
}
if (state.flags & PFLAG_BUFFER) != 0 {
b := NewBuffer(len(buf))
_, _ = b.Buf.Write(buf)
ret = b
} else {
ret = String(buf)
}
p.buf = []byte{}
p.popState(ret)
return 1
}
func stringchar(p *Parser, state *ParseState, c byte) int {
/* Enter escape */
if c == '\\' {
state.consumer = escape1
return 1
}
/* String end */
if c == '"' {
return stringend(p, state)
}
/* normal char */
if c != '\n' && c != '\r' {
p.buf = append(p.buf, c)
}
return 1
}
const PFLAG_INSTRING = 0x100000
const PFLAG_END_CANDIDATE = 0x200000
func longstring(p *Parser, state *ParseState, c byte) int {
if (state.flags & PFLAG_INSTRING) != 0 {
/* We are inside the long string */
if c == '`' {
state.flags |= PFLAG_END_CANDIDATE
state.flags &= ^PFLAG_INSTRING
state.counter = 1 /* Use counter to keep track of number of '=' seen */
return 1
}
p.buf = append(p.buf, c)
return 1
} else if (state.flags & PFLAG_END_CANDIDATE) != 0 {
/* We are checking a potential end of the string */
if state.counter == state.argn {
stringend(p, state)
return 0
}
if c == '`' && state.counter < state.argn {
state.counter += 1
return 1
}
/* Failed end candidate */
for i := 0; i < state.counter; i++ {
p.buf = append(p.buf, '`')
}
p.buf = append(p.buf, c)
state.counter = 0
state.flags &= ^PFLAG_END_CANDIDATE
state.flags |= PFLAG_INSTRING
return 1
} else {
/* We are at beginning of string */
state.argn += 1
if c != '`' {
state.flags |= PFLAG_INSTRING
p.buf = append(p.buf, c)
}
return 1
}
}
func comment(p *Parser, state *ParseState, c byte) int {
if c == '\n' {
p.states = p.states[:len(p.states)-1]
p.buf = p.buf[:0]
} else {
p.buf = append(p.buf, c)
}
return 1
}
func atsign(p *Parser, state *ParseState, c byte) int {
p.states = p.states[:len(p.states)-1]
switch c {
case '{':
p.pushState(root, PFLAG_CONTAINER|PFLAG_CURLYBRACKETS|PFLAG_ATSYM)
return 1
case '"':
p.pushState(stringchar, PFLAG_BUFFER|PFLAG_STRING)
return 1
case '`':
p.pushState(longstring, PFLAG_BUFFER|PFLAG_LONGSTRING)
return 1
case '[':
p.pushState(root, PFLAG_CONTAINER|PFLAG_SQRBRACKETS|PFLAG_ATSYM)
return 1
case '(':
p.pushState(root, PFLAG_CONTAINER|PFLAG_PARENS|PFLAG_ATSYM)
return 1
default:
break
}
p.pushState(tokenchar, PFLAG_TOKEN)
p.buf = append(p.buf, '@')
return 0
}
// Public api
func (parser *Parser) Init() {
parser.args = []Value{}
parser.states = []ParseState{}
parser.buf = []byte{}
parser.err = ""
parser.lookback = 0
parser.line = 1
parser.column = 0
parser.pending = 0
parser.flag = 0
parser.pushState(root, PFLAG_CONTAINER)
}
func (parser *Parser) Consume(c byte) {
consumed := 0
parser.checkDead()
if c == '\r' {
parser.line += 1
parser.column = 0
} else if c == '\n' {
parser.column = 0
if parser.lookback != '\r' {
parser.line += 1
}
} else {
parser.column += 1
}
for consumed == 0 && parser.err == "" {
state := &parser.states[len(parser.states)-1]
consumed = state.consumer(parser, state, c)
}
parser.lookback = c
}
func (parser *Parser) Produce() Value {
var ret Value
if parser.pending == 0 {
return nil
}
ret = parser.args[0]
for i := 1; i < len(parser.args); i += 1 {
parser.args[i-1] = parser.args[i]
}
parser.args = parser.args[:len(parser.args)-1]
parser.pending -= 1
return ret
}
func (parser *Parser) Clone() *Parser {
dest := &Parser{}
/* Misc fields */
dest.flag = parser.flag
dest.pending = parser.pending
dest.lookback = parser.lookback
dest.line = parser.line
dest.column = parser.column
dest.err = parser.err
/* Deep cloned fields */
dest.args = make([]Value, 0, len(parser.args))
dest.states = make([]ParseState, 0, len(parser.states))
dest.buf = make([]byte, 0, len(parser.buf))
dest.args = append(dest.args, parser.args...)
dest.states = append(dest.states, parser.states...)
dest.buf = append(dest.buf, parser.buf...)
return dest
}
| {
top := p.states[len(p.states)-1]
p.states = p.states[:len(p.states)-1]
newtop := &p.states[len(p.states)-1]
if (newtop.flags & PFLAG_CONTAINER) != 0 {
switch val := val.(type) {
case *Tuple:
val.Line = top.line
val.Column = top.column
default:
}
newtop.argn += 1
/* Keep track of number of values in the root state */
if len(p.states) == 1 {
p.pending += 1
}
p.args = append(p.args, val)
return
} else if (newtop.flags & PFLAG_READERMAC) != 0 {
which := "<unknown>"
t := NewTuple(2, 2)
c := newtop.flags & 0xFF
switch c {
case '\'':
which = "quote"
case ',':
which = "unquote"
case ';':
which = "splice"
case '|':
which = "short-fn"
case '~':
which = "quasiquote"
default:
}
t.Vals[0] = Symbol(which)
t.Vals[1] = val
/* Quote source mapping info */
t.Line = newtop.line
t.Column = newtop.column
val = t
} else {
return
}
} | conditional_block |
parse.go | package janet
import (
"strconv"
"unicode/utf8"
)
type JanetParserStatus int
const (
JANET_PARSE_ROOT = iota
JANET_PARSE_ERROR
JANET_PARSE_PENDING
JANET_PARSE_DEAD
)
const (
PFLAG_CONTAINER = 0x100
PFLAG_BUFFER = 0x200
PFLAG_PARENS = 0x400
PFLAG_SQRBRACKETS = 0x800
PFLAG_CURLYBRACKETS = 0x1000
PFLAG_STRING = 0x2000
PFLAG_LONGSTRING = 0x4000
PFLAG_READERMAC = 0x8000
PFLAG_ATSYM = 0x10000
PFLAG_COMMENT = 0x20000
PFLAG_TOKEN = 0x40000
)
type Parser struct {
args []Value
states []ParseState
buf []byte
line int
column int
pending int
lookback byte
flag int
err string
}
type ParseState struct {
counter int
argn int
flags int
line int
column int
consumer Consumer
}
type Consumer func(parser *Parser, state *ParseState, c byte) int
func isWhitespace(c byte) bool {
return c == ' ' ||
c == '\t' ||
c == '\n' ||
c == '\r' ||
c == '\v' ||
c == '\f' ||
c == 0
}
/* Code generated by cjanet/tools/symcharsgen.c.
* The table contains 256 bits, where each bit is 1
* if the corresponding ascii code is a symbol char, and 0
* if not. The upper characters are also considered symbol
* chars and are then checked for utf-8 compliance. */
var symchars = [8]uint32{
0x00000000, 0xf7ffec72, 0xc7ffffff, 0x07fffffe,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
}
/* Check if a character is a valid symbol character
* symbol chars are A-Z, a-z, 0-9, or one of !$&*+-./:<=>@\^_~| */
func isSymbolChar(c byte) bool |
func checkEscape(c byte) int {
switch c {
default:
return -1
case 'x':
return 1
case 'n':
return '\n'
case 't':
return '\t'
case 'r':
return '\r'
case '0':
return 0
case 'z':
return 0
case 'f':
return '\f'
case 'v':
return '\v'
case 'e':
return 27
case '"':
return '"'
case '\\':
return '\\'
}
}
/* Get hex digit from a letter */
func toHex(c byte) int {
if c >= '0' && c <= '9' {
return int(c) - '0'
} else if c >= 'A' && c <= 'F' {
return 10 + int(c) - 'A'
} else if c >= 'a' && c <= 'f' {
return 10 + int(c) - 'a'
} else {
return -1
}
}
func (parser *Parser) checkDead() {
if parser.flag != 0 {
JanetPanic("parser is dead, cannot consume")
}
if parser.err != "" {
JanetPanic("parser has unchecked error, cannot consume")
}
}
func (parser *Parser) pushState(consumer Consumer, flags int) {
state := ParseState{
counter: 0,
argn: 0,
flags: flags,
consumer: consumer,
line: parser.line,
column: parser.column,
}
parser.states = append(parser.states, state)
}
func (p *Parser) popState(val Value) {
for {
top := p.states[len(p.states)-1]
p.states = p.states[:len(p.states)-1]
newtop := &p.states[len(p.states)-1]
if (newtop.flags & PFLAG_CONTAINER) != 0 {
switch val := val.(type) {
case *Tuple:
val.Line = top.line
val.Column = top.column
default:
}
newtop.argn += 1
/* Keep track of number of values in the root state */
if len(p.states) == 1 {
p.pending += 1
}
p.args = append(p.args, val)
return
} else if (newtop.flags & PFLAG_READERMAC) != 0 {
which := "<unknown>"
t := NewTuple(2, 2)
c := newtop.flags & 0xFF
switch c {
case '\'':
which = "quote"
case ',':
which = "unquote"
case ';':
which = "splice"
case '|':
which = "short-fn"
case '~':
which = "quasiquote"
default:
}
t.Vals[0] = Symbol(which)
t.Vals[1] = val
/* Quote source mapping info */
t.Line = newtop.line
t.Column = newtop.column
val = t
} else {
return
}
}
}
func (p *Parser) closeArray(state *ParseState) *Array {
array := NewArray(state.argn, state.argn)
for i := state.argn - 1; i >= 0; i = i - 1 {
array.Data[i] = p.args[len(p.args)-1]
p.args = p.args[:len(p.args)-1]
}
return array
}
func (p *Parser) closeTuple(state *ParseState, flags int) *Tuple {
tup := NewTuple(state.argn, state.argn)
tup.Flags = flags
for i := state.argn - 1; i >= 0; i = i - 1 {
tup.Vals[i] = p.args[len(p.args)-1]
p.args = p.args[:len(p.args)-1]
}
return tup
}
func (p *Parser) closeStruct(state *ParseState) *Struct {
/*
JanetKV *st = janet_struct_begin(state->argn >> 1);
for (int32_t i = state->argn; i > 0; i -= 2) {
Janet value = p->args[--p->argcount];
Janet key = p->args[--p->argcount];
janet_struct_put(st, key, value);
}
return janet_wrap_struct(janet_struct_end(st));
*/
panic("XXX")
}
func (p *Parser) closeTable(state *ParseState) *Table {
/*
JanetTable *table = janet_table(state->argn >> 1);
for (int32_t i = state->argn; i > 0; i -= 2) {
Janet value = p->args[--p->argcount];
Janet key = p->args[--p->argcount];
janet_table_put(table, key, value);
}
return janet_wrap_table(table);
*/
panic("XXX")
}
func root(p *Parser, state *ParseState, c byte) int {
switch c {
default:
if isWhitespace(c) {
return 1
}
if !isSymbolChar(c) {
p.err = "unexpected character"
return 1
}
p.pushState(tokenchar, PFLAG_TOKEN)
return 0
case '\'', ',', ';', '~', '|':
p.pushState(root, PFLAG_READERMAC|int(c))
return 1
case '"':
p.pushState(stringchar, PFLAG_STRING)
return 1
case '#':
p.pushState(comment, PFLAG_COMMENT)
return 1
case '@':
p.pushState(atsign, PFLAG_ATSYM)
return 1
case '`':
p.pushState(longstring, PFLAG_LONGSTRING)
return 1
case ')', ']', '}':
{
var ds Value
if len(p.states) == 1 {
p.err = "unexpected delimiter"
return 1
}
if (c == ')' && (state.flags&PFLAG_PARENS) != 0) ||
(c == ']' && (state.flags&PFLAG_SQRBRACKETS) != 0) {
if (state.flags & PFLAG_ATSYM) != 0 {
ds = p.closeArray(state)
} else {
tupFlags := 0
if c == ']' {
tupFlags = JANET_TUPLE_FLAG_BRACKETCTOR
}
ds = p.closeTuple(state, tupFlags)
}
} else if c == '}' && ((state.flags & PFLAG_CURLYBRACKETS) != 0) {
if (state.argn % 2) == 1 {
p.err = "struct and table literals expect even number of arguments"
return 1
}
if (state.flags & PFLAG_ATSYM) != 0 {
ds = p.closeTable(state)
} else {
ds = p.closeStruct(state)
}
} else {
p.err = "mismatched delimiter"
return 1
}
p.popState(ds)
}
return 1
case '(':
p.pushState(root, PFLAG_CONTAINER|PFLAG_PARENS)
return 1
case '[':
p.pushState(root, PFLAG_CONTAINER|PFLAG_SQRBRACKETS)
return 1
case '{':
p.pushState(root, PFLAG_CONTAINER|PFLAG_CURLYBRACKETS)
return 1
}
}
func strEqBuf(str string, buf []byte) bool {
if len(str) != len(buf) {
return false
}
for i, b := range buf {
if str[i] != b {
return false
}
}
return true
}
// XXX This will need to be ported from the original janet.
// XXX avoiding casting to string will be more efficient.
func scanNumber(s string, out *float64) bool {
v, err := strconv.ParseFloat(s, 64)
if err != nil {
return false
}
*out = v
return true
}
func tokenchar(p *Parser, state *ParseState, c byte) int {
var ret Value
var numval float64
if isSymbolChar(c) {
p.buf = append(p.buf, c)
if c > 127 {
state.argn = 1 /* Use to indicate non ascii */
}
return 1
}
/* Token finished */
startDig := p.buf[0] >= '0' && p.buf[0] <= '9'
startNum := startDig || p.buf[0] == '-' || p.buf[0] == '+' || p.buf[0] == '.'
if p.buf[0] == ':' {
kwStr := p.buf[1:]
/* Don't do full utf-8 check unless we have seen non ascii characters. */
valid := (state.argn == 0) || utf8.Valid(kwStr)
if !valid {
p.err = "invalid utf-8 in keyword"
return 0
}
ret = Keyword(kwStr)
} else if startNum && scanNumber(string(p.buf), &numval) {
ret = Number(numval)
} else if strEqBuf("nil", p.buf) {
ret = nil
} else if strEqBuf("false", p.buf) {
ret = Bool(false)
} else if strEqBuf("true", p.buf) {
ret = Bool(true)
} else {
if startDig {
p.err = "symbol literal cannot start with a digit"
return 0
} else {
symStr := string(p.buf)
/* Don't do full utf-8 check unless we have seen non ascii characters. */
valid := (state.argn == 0) || utf8.ValidString(symStr)
if !valid {
p.err = "invalid utf-8 in symbol"
return 0
}
ret = Symbol(symStr)
}
}
p.buf = p.buf[:0]
p.popState(ret)
return 0
}
func escapeh(p *Parser, state *ParseState, c byte) int {
digit := toHex(c)
if digit < 0 {
p.err = "invalid hex digit in hex escape"
return 1
}
state.argn = (state.argn << 4) + digit
state.counter--
if state.counter == 0 {
p.buf = append(p.buf, byte(state.argn&0xFF))
state.argn = 0
state.consumer = stringchar
}
return 1
}
func escape1(p *Parser, state *ParseState, c byte) int {
e := checkEscape(c)
if e < 0 {
p.err = "invalid string escape sequence"
return 1
}
if c == 'x' {
state.counter = 2
state.argn = 0
state.consumer = escapeh
} else {
p.buf = append(p.buf, c)
state.consumer = stringchar
}
return 1
}
func stringend(p *Parser, state *ParseState) int {
var ret Value
buf := p.buf
if (state.flags & PFLAG_LONGSTRING) != 0 {
/* Check for leading newline character so we can remove it */
if buf[0] == '\n' {
buf = buf[1:]
}
if len(buf) > 0 && buf[len(buf)-1] == '\n' {
buf = buf[:len(buf)-1]
}
}
if (state.flags & PFLAG_BUFFER) != 0 {
b := NewBuffer(len(buf))
_, _ = b.Buf.Write(buf)
ret = b
} else {
ret = String(buf)
}
p.buf = []byte{}
p.popState(ret)
return 1
}
func stringchar(p *Parser, state *ParseState, c byte) int {
/* Enter escape */
if c == '\\' {
state.consumer = escape1
return 1
}
/* String end */
if c == '"' {
return stringend(p, state)
}
/* normal char */
if c != '\n' && c != '\r' {
p.buf = append(p.buf, c)
}
return 1
}
const PFLAG_INSTRING = 0x100000
const PFLAG_END_CANDIDATE = 0x200000
func longstring(p *Parser, state *ParseState, c byte) int {
if (state.flags & PFLAG_INSTRING) != 0 {
/* We are inside the long string */
if c == '`' {
state.flags |= PFLAG_END_CANDIDATE
state.flags &= ^PFLAG_INSTRING
state.counter = 1 /* Use counter to keep track of number of '=' seen */
return 1
}
p.buf = append(p.buf, c)
return 1
} else if (state.flags & PFLAG_END_CANDIDATE) != 0 {
/* We are checking a potential end of the string */
if state.counter == state.argn {
stringend(p, state)
return 0
}
if c == '`' && state.counter < state.argn {
state.counter += 1
return 1
}
/* Failed end candidate */
for i := 0; i < state.counter; i++ {
p.buf = append(p.buf, '`')
}
p.buf = append(p.buf, c)
state.counter = 0
state.flags &= ^PFLAG_END_CANDIDATE
state.flags |= PFLAG_INSTRING
return 1
} else {
/* We are at beginning of string */
state.argn += 1
if c != '`' {
state.flags |= PFLAG_INSTRING
p.buf = append(p.buf, c)
}
return 1
}
}
func comment(p *Parser, state *ParseState, c byte) int {
if c == '\n' {
p.states = p.states[:len(p.states)-1]
p.buf = p.buf[:0]
} else {
p.buf = append(p.buf, c)
}
return 1
}
func atsign(p *Parser, state *ParseState, c byte) int {
p.states = p.states[:len(p.states)-1]
switch c {
case '{':
p.pushState(root, PFLAG_CONTAINER|PFLAG_CURLYBRACKETS|PFLAG_ATSYM)
return 1
case '"':
p.pushState(stringchar, PFLAG_BUFFER|PFLAG_STRING)
return 1
case '`':
p.pushState(longstring, PFLAG_BUFFER|PFLAG_LONGSTRING)
return 1
case '[':
p.pushState(root, PFLAG_CONTAINER|PFLAG_SQRBRACKETS|PFLAG_ATSYM)
return 1
case '(':
p.pushState(root, PFLAG_CONTAINER|PFLAG_PARENS|PFLAG_ATSYM)
return 1
default:
break
}
p.pushState(tokenchar, PFLAG_TOKEN)
p.buf = append(p.buf, '@')
return 0
}
// Public api
func (parser *Parser) Init() {
parser.args = []Value{}
parser.states = []ParseState{}
parser.buf = []byte{}
parser.err = ""
parser.lookback = 0
parser.line = 1
parser.column = 0
parser.pending = 0
parser.flag = 0
parser.pushState(root, PFLAG_CONTAINER)
}
func (parser *Parser) Consume(c byte) {
consumed := 0
parser.checkDead()
if c == '\r' {
parser.line += 1
parser.column = 0
} else if c == '\n' {
parser.column = 0
if parser.lookback != '\r' {
parser.line += 1
}
} else {
parser.column += 1
}
for consumed == 0 && parser.err == "" {
state := &parser.states[len(parser.states)-1]
consumed = state.consumer(parser, state, c)
}
parser.lookback = c
}
func (parser *Parser) Produce() Value {
var ret Value
if parser.pending == 0 {
return nil
}
ret = parser.args[0]
for i := 1; i < len(parser.args); i += 1 {
parser.args[i-1] = parser.args[i]
}
parser.args = parser.args[:len(parser.args)-1]
parser.pending -= 1
return ret
}
func (parser *Parser) Clone() *Parser {
dest := &Parser{}
/* Misc fields */
dest.flag = parser.flag
dest.pending = parser.pending
dest.lookback = parser.lookback
dest.line = parser.line
dest.column = parser.column
dest.err = parser.err
/* Deep cloned fields */
dest.args = make([]Value, 0, len(parser.args))
dest.states = make([]ParseState, 0, len(parser.states))
dest.buf = make([]byte, 0, len(parser.buf))
dest.args = append(dest.args, parser.args...)
dest.states = append(dest.states, parser.states...)
dest.buf = append(dest.buf, parser.buf...)
return dest
}
| {
return (symchars[c>>5] & (uint32(1) << (c & 0x1F))) != 0
} | identifier_body |
flux_calc.py | """
Code to calculate mean fluxes of properties through polygon
faces, in depth layers.
"""
#%% Imports
import os
import sys
pth = os.path.abspath('../../LiveOcean/alpha')
if pth not in sys.path:
sys.path.append(pth)
import Lfun
Ldir = Lfun.Lstart()
import zrfun
import numpy as np
from datetime import datetime, timedelta
import pickle
import netCDF4 as nc
import time
import matplotlib.pyplot as plt
testing = False
if testing == True:
plt.close('all')
#%% setup input locations
whichyear = 2005
if Ldir['lo_env'] == 'pm_mac': # mac version
if whichyear == 2006:
R_in_dir0 = Ldir['parent'] + 'roms/output/salish_2006_4_lp/'
out_dir0 = Ldir['parent'] + 'ptools_output/atlantis_mac_2006/'
elif whichyear == 2005:
R_in_dir0 = Ldir['parent'] + 'roms/output/salish_2005_1_lp/'
out_dir0 = Ldir['parent'] + 'ptools_output/atlantis_mac_2005/'
elif Ldir['lo_env'] == 'pm_fjord': # fjord version
if whichyear == 2006:
R_in_dir0 = '/boildat1/parker/roms/output/salish_2006_4_lp/'
out_dir0 = Ldir['parent'] + 'ptools_output/atlantis_fjord_2006/'
elif whichyear == 2005:
R_in_dir0 = '/boildat1/parker/roms/output/salish_2005_1_lp/' |
in_dir = out_dir0 + 'gridded_polygons/'
out_dir = out_dir0 + 'fluxes/'
Lfun.make_dir(out_dir, clean=True)
# load polygon results
gpoly_dict = pickle.load(open(in_dir + 'gpoly_dict.p', 'rb'))
shared_faces_dict = pickle.load(open(in_dir + 'shared_faces.p', 'rb'))
# and specify z levels
z_dict = {0:5, 1:-5, 2:-25, 3:-50, 4:-100, 5:-150, 6:-350}
NLAY = len(z_dict) - 1
#%% find fluxes
dt0 = datetime(whichyear,1,1)
if Ldir['lo_env'] == 'pm_mac':
# have 2006.07.01-31 = days 181 to 211
# big convergence errors for 7/29, 7/30 = 209, 210
day_list = [208, 209] #range(181,211+1)
elif Ldir['lo_env'] == 'pm_fjord':
# in /data1/parker/roms/output/salish_2006_4_lp
# we have f2006.01.04 through 2016.12.29
# = days 3 to 362
day_list = range(3, 363)
counter = 0
for nday in day_list:
tt0 = time.time()
# specify ROMS file to work on
dt0 = datetime(whichyear,1,1)
dt = dt0 + timedelta(days=nday)
f_string = 'f' + dt.strftime('%Y.%m.%d')
print('\nWorking on day %s (nday = %3d)' % (f_string, nday))
R_in_dir = R_in_dir0 + f_string + '/'
R_fn = R_in_dir + 'low_passed.nc'
ds = nc.Dataset(R_fn)
u = ds['u'][:].squeeze()
v = ds['v'][:].squeeze()
w0 = ds['w'][0,-1,:,:].squeeze()
G = zrfun.get_basic_info(R_fn, only_G=True)
S = zrfun.get_basic_info(R_fn, only_S=True)
zeta = ds['zeta'][0,:,:]
z_rho, z_w = zrfun.get_z(G['h'], zeta, S)
DA = G['DX'] * G['DY']
DAm = np.ma.masked_where(zeta.mask, DA)
DZ = np.diff(z_w, axis=0)
# make versions of DA masked for the z layers
zDAm = DAm * np.ones((NLAY, 1, 1))
for nlay in range(NLAY):
z_lower = z_dict[nlay + 1] # lower z
zmask = (-G['h'] >= z_lower)
draft_DA = DA.copy()
draft_DA[zmask] = 0.
this_DAm = np.ma.masked_where(zeta.mask, draft_DA)
zDAm[nlay,:,:] = this_DAm
# Z on u and v grids
Zu = z_rho[:, :, :-1] + np.diff(z_rho, axis=2)/2
Zv = z_rho[:, :-1, :] + np.diff(z_rho, axis=1)/2
zmu_dict = dict()
zmv_dict = dict()
for nlay in range(NLAY):
z_lower = z_dict[nlay + 1] # lower z
z_upper = z_dict[nlay] # upper z
zmu_dict[nlay] = (Zu > z_lower) & (Zu <= z_upper)
zmv_dict[nlay] = (Zv > z_lower) & (Zv <= z_upper)
layu_dict = dict()
layv_dict = dict()
# DZ on u and v grids
DZu = DZ[:, :, :-1] + np.diff(DZ, axis=2)/2
DZv = DZ[:, :-1, :] + np.diff(DZ, axis=1)/2
# DX and DY on u and v grids
DYu = G['DY'][:, :-1]
DXv = G['DX'][:-1, :] + np.diff(G['DX'], axis=0)/2
# cell areas for u and v grid box faces
DAHu = DZu * DYu
DAHv = DZv * DXv
# Initialize arrays to store transport data.
face_trans_dict = dict()
poly_conv_dict = dict()
face_ztrans_dict = dict()
poly_zconv_dict = dict()
# calculate convergence in each polygon
counter = 0
NPOLY = len(gpoly_dict)
for npoly in range(NPOLY):
#print(' npoly = ' + str(npoly))
# we have two objects associated with a given polygon,
# * per_dict[iseg] has arrays of boundary information, and
# * ji_rho_in is an array of indices of interior points
per_dict = gpoly_dict[npoly]['per_dict']
ji_rho_in = gpoly_dict[npoly]['ji_rho_in']
j_in = ji_rho_in[:,0]
i_in = ji_rho_in[:,1]
# find fluxes through the faces
NFACE = len(per_dict)
face_trans_arr = np.zeros(NFACE)
face_area_arr = np.zeros(NFACE)
face_ztrans_arr = np.zeros((NFACE, NLAY))
face_zarea_arr = np.zeros((NFACE, NLAY))
for nface in range(NFACE):
per = per_dict[nface]
JJ = per[:,0]
II = per[:,1]
UV = per[:,2]
PM = per[:,3]
# vectors of integers
JJu = JJ[UV==0]
IIu = II[UV==0]
PMu = PM[UV==0]
JJv = JJ[UV==1]
IIv = II[UV==1]
PMv = PM[UV==1]
# shorter vectors of integers, specific to the u- and v-grids
PMu = PMu.reshape((1, len(PMu)))
PMv = PMv.reshape((1, len(PMv)))
this_u = u[:, JJu, IIu]
this_v = v[:, JJv, IIv]
draft_DAHu = DAHu[:, JJu, IIu]
draft_DAHv = DAHv[:, JJv, IIv]
this_DAHu = np.ma.masked_where(this_u.mask, draft_DAHu)
this_DAHv = np.ma.masked_where(this_v.mask, draft_DAHv)
# check lengths
l1u = this_u.compressed().size
l2u = this_DAHu.compressed().size
if l1u != l2u:
print('Warning: U Result vectors are different lengths')
l1v = this_v.compressed().size
l2v = this_DAHv.compressed().size
if l1v != l2v:
print('Warning: V Result vectors are different lengths')
# do the integrals
if l1u>0:
area_u = this_DAHu.sum()
trans_u = (this_u * this_DAHu * PMu).sum()
else:
area_u = 0.
trans_u = 0.
if l1v>0:
area_v = this_DAHv.sum()
trans_v = (this_v * this_DAHv * PMv).sum()
else:
area_v = 0.
trans_v = 0.
face_trans = trans_u + trans_v
face_trans_arr[nface] = face_trans
face_area = area_u + area_v
face_area_arr[nface] = face_area
# store results for later
face_trans_dict[(npoly, nface)] = (face_trans, face_area)
# START z level code #############################################
# now do the same thing but divvying up into z levels
for nlay in range(NLAY):
this_zmu = zmu_dict[nlay][:, JJu, IIu]
this_zmv = zmv_dict[nlay][:, JJv, IIv]
this_zu = this_u[this_zmu]
this_zv = this_v[this_zmv]
draft_zDAHu = this_DAHu[this_zmu]
draft_zDAHv = this_DAHv[this_zmv]
this_zDAHu = np.ma.masked_where(this_zu.mask, draft_zDAHu)
this_zDAHv = np.ma.masked_where(this_zv.mask, draft_zDAHv)
# check lengths
l1zu = this_zu.compressed().size
l2zu = this_zDAHu.compressed().size
if l1zu != l2zu:
print('Warning: ZU Result vectors are different lengths')
l1zv = this_zv.compressed().size
l2zv = this_zDAHv.compressed().size
if l1zv != l2zv:
print('Warning: ZV Result vectors are different lengths')
# do the integrals
if l1zu>0:
area_zu = this_zDAHu.sum()
trans_zu = (this_zu * this_zDAHu * PMu[0,0]).sum()
else:
area_zu = 0.
trans_zu = 0.
if l1zv>0:
area_zv = this_zDAHv.sum()
trans_zv = (this_zv * this_zDAHv * PMv[0,0]).sum()
else:
area_zv = 0.
trans_zv = 0.
face_ztrans = trans_zu + trans_zv
face_ztrans_arr[nface, nlay] = face_ztrans
face_zarea = area_zu + area_zv
face_zarea_arr[nface, nlay] = face_zarea
# Do we use the _arr fields? I think not, but it doesn't matter.
# store results for later
face_ztrans_dict[(npoly, nface, nlay)] = (face_ztrans, face_zarea)
# END z level code ###############################################
# check results of z level code
# RESULT it works perfectly (to within roundoff error I think)
fzt = 0.
fza = 0.
for nlay in range(NLAY):
fzt += face_ztrans_dict[(npoly, nface, nlay)][0]
fza += face_ztrans_dict[(npoly, nface, nlay)][1]
if np.abs(fzt - face_trans_dict[(npoly, nface)][0]) > .001:
print('npoly=%d nface=%d transport error' % (npoly, nface))
print('fzt=%0.5f ft=%0.5f' % (fzt, face_trans_dict[(npoly, nface)][0]))
if np.abs(fza - face_trans_dict[(npoly, nface)][1]) > .001:
print('npoly=%d nface=%d area error' % (npoly, nface))
print('fza=%0.5f fa=%0.5f' % (fza, face_trans_dict[(npoly, nface)][1]))
poly_zarea = np.zeros(NLAY)
for nlay in range(NLAY):
this_zarea = zDAm[nlay,j_in,i_in].sum()
try:
if this_zarea.mask == True:
this_zarea = 0.
except AttributeError:
pass
poly_zarea[nlay] = this_zarea
poly_area = DAm[j_in,i_in].sum()
try:
if poly_area.mask == True:
poly_area = 0.
except AttributeError:
pass
net_conv = face_trans_arr.sum()
if (poly_area > 0):
poly_mean_w = net_conv/poly_area
else:
poly_mean_w = 0.0
# store results for later
net_face_area = face_area_arr.sum()
poly_conv_dict[npoly] = (net_conv, poly_area, poly_zarea, net_face_area, NFACE)
counter += 1
ds.close()
# save originals
orig_poly_conv_dict = poly_conv_dict.copy()
orig_face_trans_dict = face_trans_dict.copy()
orig_face_ztrans_dict = face_ztrans_dict.copy()
# Next try to adjust all polygons to have conv = 0
NITER = 400
for iii in range(NITER):
new_poly_conv_dict = poly_conv_dict.copy()
for npoly in range(NPOLY):
net_conv, poly_area, poly_zarea, net_face_area, NFACE = new_poly_conv_dict[npoly]
new_poly_conv_dict[npoly] = (0.0, poly_area, poly_zarea, net_face_area, NFACE)
new_face_trans_dict = face_trans_dict.copy()
for npoly in range(NPOLY):
net_conv, poly_area, poly_zarea, net_face_area, NFACE = poly_conv_dict[npoly]
new_net_conv, poly_area, poly_zarea, net_face_area, NFACE = new_poly_conv_dict[npoly]
dconv = new_net_conv - net_conv
if net_face_area != 0.0:
dconv_a = dconv / net_face_area
else:
dconv_a = 0.0
for nface in range(NFACE):
face_trans, face_area = face_trans_dict[(npoly, nface)]
if face_trans != 0.0 and face_area != 0.0:
new_face_trans_dict[(npoly, nface)] = (face_trans + dconv_a*face_area, face_area)
else:
pass # keep original values
new_face_trans_dict_copy = new_face_trans_dict.copy()
for npoly in range(NPOLY):
net_conv, poly_area, poly_zarea, net_face_area, NFACE = poly_conv_dict[npoly]
for nface in range(NFACE):
try:
new_face_trans, face_area = new_face_trans_dict[(npoly, nface)]
ipoly, iface = shared_faces_dict[(npoly, nface)]
new_facing_trans, facing_area = new_face_trans_dict_copy[(ipoly, iface)]
fact = (new_face_trans + new_facing_trans)/2
new_face_trans_dict[(npoly, nface)] = (new_face_trans - fact, face_area)
except KeyError:
# presumably this face does not have a match on another polygon
pass
for npoly in range(NPOLY):
net_conv, poly_area, poly_zarea, net_face_area, NFACE = poly_conv_dict[npoly]
shelf = []
for nface in range(NFACE):
new_face_trans, face_area = new_face_trans_dict[(npoly, nface)]
shelf.append(new_face_trans)
new_conv = np.array(shelf).sum()
new_poly_conv_dict[npoly] = (new_conv, poly_area, poly_zarea, net_face_area, NFACE)
face_trans_dict = new_face_trans_dict.copy()
poly_conv_dict = new_poly_conv_dict.copy()
# finally add the adjustments to the transports in z levels:
face_ztrans_dict = dict()
for npoly in range(NPOLY):
net_conv, poly_area, poly_zarea, net_face_area, NFACE = poly_conv_dict[npoly]
for nface in range(NFACE):
face_trans, face_area = face_trans_dict[(npoly, nface)]
orig_face_trans, face_area = orig_face_trans_dict[(npoly, nface)]
DQ = face_trans - orig_face_trans
for nlay in range(NLAY):
(face_ztrans, face_zarea) = orig_face_ztrans_dict[(npoly, nface, nlay)]
if face_area > 0:
adj = DQ * face_zarea / face_area
else:
adj = 0.
face_ztrans_dict[(npoly, nface, nlay)] = (face_ztrans + adj, face_zarea)
# calculate w at interfaces and check that they are reasonable
# RESULT: they seem good
wz_dict = dict()
# each entry in wz_dict is a tuple of (w, area) at the TOP of a layer
print_info = False
for npoly in range(NPOLY):
if print_info:
print('\nnpoly = %d' % (npoly))
cz = np.zeros(NLAY)
net_conv, poly_area, poly_zarea, net_face_area, NFACE = poly_conv_dict[npoly]
for nface in range(NFACE):
face_trans, face_area = face_trans_dict[(npoly, nface)]
for nlay in range(NLAY):
(face_ztrans, face_zarea) = face_ztrans_dict[(npoly, nface, nlay)]
cz[nlay] += face_ztrans
czr = cz[::-1] # packed bottom to top
cczr = np.cumsum(czr)
ccz = cczr[::-1]
# ccz is the vertical transport through the UPPER boundary of each layer
# packed top to bottom like all other z-layer variables
for nlay in range(NLAY):
if nlay == 0: # get the associated horizontal area
this_zarea = poly_area
else:
this_zarea = poly_zarea[nlay-1]
try: # accout for the few cases where an area might be a masked constant
if this_zarea.mask == True:
this_zarea = 0.
except AttributeError:
pass
# calculate the vertical velocity through the layer
# note that the transport throught the deepest level (-350 m) is ZERO
# and the uppermost w is also ~zero because of our iterative correction
if (this_zarea == 0.):
wz_dict[(npoly, nlay)] = (0., 0.)
else:
wz_dict[(npoly, nlay)] = (ccz[nlay] / this_zarea, this_zarea)
if print_info:
print(' z = %4d w = %10.1f (mm/hour)' %
(z_dict[nlay], 3600*1000*wz_dict[(npoly, nlay)][0]))
# save the results for this day
pickle.dump(face_trans_dict, open(out_dir+f_string+'_face_trans.p', 'wb'))
pickle.dump(face_ztrans_dict, open(out_dir+f_string+'_face_ztrans.p', 'wb'))
pickle.dump(poly_conv_dict, open(out_dir+f_string+'_poly_conv.p', 'wb'))
pickle.dump(wz_dict, open(out_dir+f_string+'_wz.p', 'wb'))
# check that the net convergence is still small
# RESULT: it is very,very small
zconv = []
pconv = []
for npoly in range(NPOLY):
cc = 0.
net_conv, poly_area, poly_zarea, net_face_area, NFACE = poly_conv_dict[npoly]
for nface in range(NFACE):
face_trans, face_area = face_trans_dict[(npoly, nface)]
for nlay in range(NLAY):
(face_ztrans, face_zarea) = face_ztrans_dict[(npoly, nface, nlay)]
cc += face_ztrans
zconv.append(cc)
pconv.append(net_conv)
print(' max convergence error = %g (m3/s)' % (np.abs(np.array(pconv) - np.array(zconv)).max()))
# check size of w at the free surface
w_arr = np.zeros(NPOLY)
for k in range(NPOLY):
conv, poly_area, poly_zarea, net_face_area, NFACE = poly_conv_dict[k]
if poly_area > 0:
w_arr[k] = conv / poly_area
else:
w_arr[k] = 0.
print(' max w = %0.5f (mm/hour)' % (3600 * 1000 * np.abs(w_arr.max())))
# report on calculation time
print(' Took %0.1f seconds' % (time.time() - tt0))
sys.stdout.flush()
# a plot of the net convergence in all polygons, before and after
# the iterative correction
if (testing == True):
new_conv = []
orig_conv = []
ipoly = []
for k in poly_conv_dict.keys():
new_conv.append(poly_conv_dict[k][0])
orig_conv.append(orig_poly_conv_dict[k][0])
ipoly.append(k)
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(111)
ax.plot(ipoly, orig_conv, '*r', ipoly, new_conv, 'ob')
ax.set_title(f_string)
ax.set_xlabel('Polygon Number')
ax.set_ylabel('Convergence (m3/s)')
plt.show() | out_dir0 = Ldir['parent'] + 'ptools_output/atlantis_fjord_2005/' | random_line_split |
flux_calc.py | """
Code to calculate mean fluxes of properties through polygon
faces, in depth layers.
"""
#%% Imports
import os
import sys
pth = os.path.abspath('../../LiveOcean/alpha')
if pth not in sys.path:
sys.path.append(pth)
import Lfun
Ldir = Lfun.Lstart()
import zrfun
import numpy as np
from datetime import datetime, timedelta
import pickle
import netCDF4 as nc
import time
import matplotlib.pyplot as plt
testing = False
if testing == True:
plt.close('all')
#%% setup input locations
whichyear = 2005
if Ldir['lo_env'] == 'pm_mac': # mac version
if whichyear == 2006:
R_in_dir0 = Ldir['parent'] + 'roms/output/salish_2006_4_lp/'
out_dir0 = Ldir['parent'] + 'ptools_output/atlantis_mac_2006/'
elif whichyear == 2005:
R_in_dir0 = Ldir['parent'] + 'roms/output/salish_2005_1_lp/'
out_dir0 = Ldir['parent'] + 'ptools_output/atlantis_mac_2005/'
elif Ldir['lo_env'] == 'pm_fjord': # fjord version
if whichyear == 2006:
R_in_dir0 = '/boildat1/parker/roms/output/salish_2006_4_lp/'
out_dir0 = Ldir['parent'] + 'ptools_output/atlantis_fjord_2006/'
elif whichyear == 2005:
R_in_dir0 = '/boildat1/parker/roms/output/salish_2005_1_lp/'
out_dir0 = Ldir['parent'] + 'ptools_output/atlantis_fjord_2005/'
in_dir = out_dir0 + 'gridded_polygons/'
out_dir = out_dir0 + 'fluxes/'
Lfun.make_dir(out_dir, clean=True)
# load polygon results
gpoly_dict = pickle.load(open(in_dir + 'gpoly_dict.p', 'rb'))
shared_faces_dict = pickle.load(open(in_dir + 'shared_faces.p', 'rb'))
# and specify z levels
z_dict = {0:5, 1:-5, 2:-25, 3:-50, 4:-100, 5:-150, 6:-350}
NLAY = len(z_dict) - 1
#%% find fluxes
dt0 = datetime(whichyear,1,1)
if Ldir['lo_env'] == 'pm_mac':
# have 2006.07.01-31 = days 181 to 211
# big convergence errors for 7/29, 7/30 = 209, 210
day_list = [208, 209] #range(181,211+1)
elif Ldir['lo_env'] == 'pm_fjord':
# in /data1/parker/roms/output/salish_2006_4_lp
# we have f2006.01.04 through 2016.12.29
# = days 3 to 362
day_list = range(3, 363)
counter = 0
for nday in day_list:
tt0 = time.time()
# specify ROMS file to work on
dt0 = datetime(whichyear,1,1)
dt = dt0 + timedelta(days=nday)
f_string = 'f' + dt.strftime('%Y.%m.%d')
print('\nWorking on day %s (nday = %3d)' % (f_string, nday))
R_in_dir = R_in_dir0 + f_string + '/'
R_fn = R_in_dir + 'low_passed.nc'
ds = nc.Dataset(R_fn)
u = ds['u'][:].squeeze()
v = ds['v'][:].squeeze()
w0 = ds['w'][0,-1,:,:].squeeze()
G = zrfun.get_basic_info(R_fn, only_G=True)
S = zrfun.get_basic_info(R_fn, only_S=True)
zeta = ds['zeta'][0,:,:]
z_rho, z_w = zrfun.get_z(G['h'], zeta, S)
DA = G['DX'] * G['DY']
DAm = np.ma.masked_where(zeta.mask, DA)
DZ = np.diff(z_w, axis=0)
# make versions of DA masked for the z layers
zDAm = DAm * np.ones((NLAY, 1, 1))
for nlay in range(NLAY):
z_lower = z_dict[nlay + 1] # lower z
zmask = (-G['h'] >= z_lower)
draft_DA = DA.copy()
draft_DA[zmask] = 0.
this_DAm = np.ma.masked_where(zeta.mask, draft_DA)
zDAm[nlay,:,:] = this_DAm
# Z on u and v grids
Zu = z_rho[:, :, :-1] + np.diff(z_rho, axis=2)/2
Zv = z_rho[:, :-1, :] + np.diff(z_rho, axis=1)/2
zmu_dict = dict()
zmv_dict = dict()
for nlay in range(NLAY):
z_lower = z_dict[nlay + 1] # lower z
z_upper = z_dict[nlay] # upper z
zmu_dict[nlay] = (Zu > z_lower) & (Zu <= z_upper)
zmv_dict[nlay] = (Zv > z_lower) & (Zv <= z_upper)
layu_dict = dict()
layv_dict = dict()
# DZ on u and v grids
DZu = DZ[:, :, :-1] + np.diff(DZ, axis=2)/2
DZv = DZ[:, :-1, :] + np.diff(DZ, axis=1)/2
# DX and DY on u and v grids
DYu = G['DY'][:, :-1]
DXv = G['DX'][:-1, :] + np.diff(G['DX'], axis=0)/2
# cell areas for u and v grid box faces
DAHu = DZu * DYu
DAHv = DZv * DXv
# Initialize arrays to store transport data.
face_trans_dict = dict()
poly_conv_dict = dict()
face_ztrans_dict = dict()
poly_zconv_dict = dict()
# calculate convergence in each polygon
counter = 0
NPOLY = len(gpoly_dict)
for npoly in range(NPOLY):
#print(' npoly = ' + str(npoly))
# we have two objects associated with a given polygon,
# * per_dict[iseg] has arrays of boundary information, and
# * ji_rho_in is an array of indices of interior points
per_dict = gpoly_dict[npoly]['per_dict']
ji_rho_in = gpoly_dict[npoly]['ji_rho_in']
j_in = ji_rho_in[:,0]
i_in = ji_rho_in[:,1]
# find fluxes through the faces
NFACE = len(per_dict)
face_trans_arr = np.zeros(NFACE)
face_area_arr = np.zeros(NFACE)
face_ztrans_arr = np.zeros((NFACE, NLAY))
face_zarea_arr = np.zeros((NFACE, NLAY))
for nface in range(NFACE):
per = per_dict[nface]
JJ = per[:,0]
II = per[:,1]
UV = per[:,2]
PM = per[:,3]
# vectors of integers
JJu = JJ[UV==0]
IIu = II[UV==0]
PMu = PM[UV==0]
JJv = JJ[UV==1]
IIv = II[UV==1]
PMv = PM[UV==1]
# shorter vectors of integers, specific to the u- and v-grids
PMu = PMu.reshape((1, len(PMu)))
PMv = PMv.reshape((1, len(PMv)))
this_u = u[:, JJu, IIu]
this_v = v[:, JJv, IIv]
draft_DAHu = DAHu[:, JJu, IIu]
draft_DAHv = DAHv[:, JJv, IIv]
this_DAHu = np.ma.masked_where(this_u.mask, draft_DAHu)
this_DAHv = np.ma.masked_where(this_v.mask, draft_DAHv)
# check lengths
l1u = this_u.compressed().size
l2u = this_DAHu.compressed().size
if l1u != l2u:
print('Warning: U Result vectors are different lengths')
l1v = this_v.compressed().size
l2v = this_DAHv.compressed().size
if l1v != l2v:
print('Warning: V Result vectors are different lengths')
# do the integrals
if l1u>0:
area_u = this_DAHu.sum()
trans_u = (this_u * this_DAHu * PMu).sum()
else:
area_u = 0.
trans_u = 0.
if l1v>0:
area_v = this_DAHv.sum()
trans_v = (this_v * this_DAHv * PMv).sum()
else:
area_v = 0.
trans_v = 0.
face_trans = trans_u + trans_v
face_trans_arr[nface] = face_trans
face_area = area_u + area_v
face_area_arr[nface] = face_area
# store results for later
face_trans_dict[(npoly, nface)] = (face_trans, face_area)
# START z level code #############################################
# now do the same thing but divvying up into z levels
for nlay in range(NLAY):
this_zmu = zmu_dict[nlay][:, JJu, IIu]
this_zmv = zmv_dict[nlay][:, JJv, IIv]
this_zu = this_u[this_zmu]
this_zv = this_v[this_zmv]
draft_zDAHu = this_DAHu[this_zmu]
draft_zDAHv = this_DAHv[this_zmv]
this_zDAHu = np.ma.masked_where(this_zu.mask, draft_zDAHu)
this_zDAHv = np.ma.masked_where(this_zv.mask, draft_zDAHv)
# check lengths
l1zu = this_zu.compressed().size
l2zu = this_zDAHu.compressed().size
if l1zu != l2zu:
print('Warning: ZU Result vectors are different lengths')
l1zv = this_zv.compressed().size
l2zv = this_zDAHv.compressed().size
if l1zv != l2zv:
print('Warning: ZV Result vectors are different lengths')
# do the integrals
if l1zu>0:
area_zu = this_zDAHu.sum()
trans_zu = (this_zu * this_zDAHu * PMu[0,0]).sum()
else:
area_zu = 0.
trans_zu = 0.
if l1zv>0:
area_zv = this_zDAHv.sum()
trans_zv = (this_zv * this_zDAHv * PMv[0,0]).sum()
else:
area_zv = 0.
trans_zv = 0.
face_ztrans = trans_zu + trans_zv
face_ztrans_arr[nface, nlay] = face_ztrans
face_zarea = area_zu + area_zv
face_zarea_arr[nface, nlay] = face_zarea
# Do we use the _arr fields? I think not, but it doesn't matter.
# store results for later
face_ztrans_dict[(npoly, nface, nlay)] = (face_ztrans, face_zarea)
# END z level code ###############################################
# check results of z level code
# RESULT it works perfectly (to within roundoff error I think)
fzt = 0.
fza = 0.
for nlay in range(NLAY):
fzt += face_ztrans_dict[(npoly, nface, nlay)][0]
fza += face_ztrans_dict[(npoly, nface, nlay)][1]
if np.abs(fzt - face_trans_dict[(npoly, nface)][0]) > .001:
print('npoly=%d nface=%d transport error' % (npoly, nface))
print('fzt=%0.5f ft=%0.5f' % (fzt, face_trans_dict[(npoly, nface)][0]))
if np.abs(fza - face_trans_dict[(npoly, nface)][1]) > .001:
print('npoly=%d nface=%d area error' % (npoly, nface))
print('fza=%0.5f fa=%0.5f' % (fza, face_trans_dict[(npoly, nface)][1]))
poly_zarea = np.zeros(NLAY)
for nlay in range(NLAY):
this_zarea = zDAm[nlay,j_in,i_in].sum()
try:
if this_zarea.mask == True:
this_zarea = 0.
except AttributeError:
pass
poly_zarea[nlay] = this_zarea
poly_area = DAm[j_in,i_in].sum()
try:
if poly_area.mask == True:
poly_area = 0.
except AttributeError:
pass
net_conv = face_trans_arr.sum()
if (poly_area > 0):
poly_mean_w = net_conv/poly_area
else:
poly_mean_w = 0.0
# store results for later
net_face_area = face_area_arr.sum()
poly_conv_dict[npoly] = (net_conv, poly_area, poly_zarea, net_face_area, NFACE)
counter += 1
ds.close()
# save originals
orig_poly_conv_dict = poly_conv_dict.copy()
orig_face_trans_dict = face_trans_dict.copy()
orig_face_ztrans_dict = face_ztrans_dict.copy()
# Next try to adjust all polygons to have conv = 0
NITER = 400
for iii in range(NITER):
new_poly_conv_dict = poly_conv_dict.copy()
for npoly in range(NPOLY):
net_conv, poly_area, poly_zarea, net_face_area, NFACE = new_poly_conv_dict[npoly]
new_poly_conv_dict[npoly] = (0.0, poly_area, poly_zarea, net_face_area, NFACE)
new_face_trans_dict = face_trans_dict.copy()
for npoly in range(NPOLY):
net_conv, poly_area, poly_zarea, net_face_area, NFACE = poly_conv_dict[npoly]
new_net_conv, poly_area, poly_zarea, net_face_area, NFACE = new_poly_conv_dict[npoly]
dconv = new_net_conv - net_conv
if net_face_area != 0.0:
dconv_a = dconv / net_face_area
else:
dconv_a = 0.0
for nface in range(NFACE):
face_trans, face_area = face_trans_dict[(npoly, nface)]
if face_trans != 0.0 and face_area != 0.0:
new_face_trans_dict[(npoly, nface)] = (face_trans + dconv_a*face_area, face_area)
else:
pass # keep original values
new_face_trans_dict_copy = new_face_trans_dict.copy()
for npoly in range(NPOLY):
net_conv, poly_area, poly_zarea, net_face_area, NFACE = poly_conv_dict[npoly]
for nface in range(NFACE):
try:
new_face_trans, face_area = new_face_trans_dict[(npoly, nface)]
ipoly, iface = shared_faces_dict[(npoly, nface)]
new_facing_trans, facing_area = new_face_trans_dict_copy[(ipoly, iface)]
fact = (new_face_trans + new_facing_trans)/2
new_face_trans_dict[(npoly, nface)] = (new_face_trans - fact, face_area)
except KeyError:
# presumably this face does not have a match on another polygon
pass
for npoly in range(NPOLY):
net_conv, poly_area, poly_zarea, net_face_area, NFACE = poly_conv_dict[npoly]
shelf = []
for nface in range(NFACE):
new_face_trans, face_area = new_face_trans_dict[(npoly, nface)]
shelf.append(new_face_trans)
new_conv = np.array(shelf).sum()
new_poly_conv_dict[npoly] = (new_conv, poly_area, poly_zarea, net_face_area, NFACE)
face_trans_dict = new_face_trans_dict.copy()
poly_conv_dict = new_poly_conv_dict.copy()
# finally add the adjustments to the transports in z levels:
face_ztrans_dict = dict()
for npoly in range(NPOLY):
net_conv, poly_area, poly_zarea, net_face_area, NFACE = poly_conv_dict[npoly]
for nface in range(NFACE):
face_trans, face_area = face_trans_dict[(npoly, nface)]
orig_face_trans, face_area = orig_face_trans_dict[(npoly, nface)]
DQ = face_trans - orig_face_trans
for nlay in range(NLAY):
(face_ztrans, face_zarea) = orig_face_ztrans_dict[(npoly, nface, nlay)]
if face_area > 0:
adj = DQ * face_zarea / face_area
else:
adj = 0.
face_ztrans_dict[(npoly, nface, nlay)] = (face_ztrans + adj, face_zarea)
# calculate w at interfaces and check that they are reasonable
# RESULT: they seem good
wz_dict = dict()
# each entry in wz_dict is a tuple of (w, area) at the TOP of a layer
print_info = False
for npoly in range(NPOLY):
if print_info:
print('\nnpoly = %d' % (npoly))
cz = np.zeros(NLAY)
net_conv, poly_area, poly_zarea, net_face_area, NFACE = poly_conv_dict[npoly]
for nface in range(NFACE):
face_trans, face_area = face_trans_dict[(npoly, nface)]
for nlay in range(NLAY):
(face_ztrans, face_zarea) = face_ztrans_dict[(npoly, nface, nlay)]
cz[nlay] += face_ztrans
czr = cz[::-1] # packed bottom to top
cczr = np.cumsum(czr)
ccz = cczr[::-1]
# ccz is the vertical transport through the UPPER boundary of each layer
# packed top to bottom like all other z-layer variables
for nlay in range(NLAY):
if nlay == 0: # get the associated horizontal area
this_zarea = poly_area
else:
this_zarea = poly_zarea[nlay-1]
try: # accout for the few cases where an area might be a masked constant
if this_zarea.mask == True:
this_zarea = 0.
except AttributeError:
pass
# calculate the vertical velocity through the layer
# note that the transport throught the deepest level (-350 m) is ZERO
# and the uppermost w is also ~zero because of our iterative correction
if (this_zarea == 0.):
wz_dict[(npoly, nlay)] = (0., 0.)
else:
wz_dict[(npoly, nlay)] = (ccz[nlay] / this_zarea, this_zarea)
if print_info:
print(' z = %4d w = %10.1f (mm/hour)' %
(z_dict[nlay], 3600*1000*wz_dict[(npoly, nlay)][0]))
# save the results for this day
pickle.dump(face_trans_dict, open(out_dir+f_string+'_face_trans.p', 'wb'))
pickle.dump(face_ztrans_dict, open(out_dir+f_string+'_face_ztrans.p', 'wb'))
pickle.dump(poly_conv_dict, open(out_dir+f_string+'_poly_conv.p', 'wb'))
pickle.dump(wz_dict, open(out_dir+f_string+'_wz.p', 'wb'))
# check that the net convergence is still small
# RESULT: it is very,very small
zconv = []
pconv = []
for npoly in range(NPOLY):
cc = 0.
net_conv, poly_area, poly_zarea, net_face_area, NFACE = poly_conv_dict[npoly]
for nface in range(NFACE):
face_trans, face_area = face_trans_dict[(npoly, nface)]
for nlay in range(NLAY):
|
zconv.append(cc)
pconv.append(net_conv)
print(' max convergence error = %g (m3/s)' % (np.abs(np.array(pconv) - np.array(zconv)).max()))
# check size of w at the free surface
w_arr = np.zeros(NPOLY)
for k in range(NPOLY):
conv, poly_area, poly_zarea, net_face_area, NFACE = poly_conv_dict[k]
if poly_area > 0:
w_arr[k] = conv / poly_area
else:
w_arr[k] = 0.
print(' max w = %0.5f (mm/hour)' % (3600 * 1000 * np.abs(w_arr.max())))
# report on calculation time
print(' Took %0.1f seconds' % (time.time() - tt0))
sys.stdout.flush()
# a plot of the net convergence in all polygons, before and after
# the iterative correction
if (testing == True):
new_conv = []
orig_conv = []
ipoly = []
for k in poly_conv_dict.keys():
new_conv.append(poly_conv_dict[k][0])
orig_conv.append(orig_poly_conv_dict[k][0])
ipoly.append(k)
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(111)
ax.plot(ipoly, orig_conv, '*r', ipoly, new_conv, 'ob')
ax.set_title(f_string)
ax.set_xlabel('Polygon Number')
ax.set_ylabel('Convergence (m3/s)')
plt.show()
| (face_ztrans, face_zarea) = face_ztrans_dict[(npoly, nface, nlay)]
cc += face_ztrans | conditional_block |
mpcs_app.py | # mpcs_app.py
#
# Copyright (C) 2011-2017 Vas Vasiliadis
# University of Chicago
#
# Application logic for the GAS
#
##
__author__ = 'Zhuoyu Zhu <[email protected]>'
import stripe
import base64
import datetime
import hashlib
import hmac
import json
import sha
import string
import time
import urllib
import urlparse
import uuid
import boto3
import subprocess
import boto3
import botocore
import pytz
import botocore.session
from boto3.dynamodb.conditions import Key
from mpcs_utils import log, auth
from bottle import route, request, response, redirect, template, static_file, run, post, get
from datetime import datetime, timedelta
from boto3 import client
from boto3.session import Session
from boto3.dynamodb.conditions import Key, Attr
# Use the boto session object only to get AWS credentials
session = botocore.session.get_session()
aws_access_key_id = str(session.get_credentials().access_key)
aws_secret_access_key = str(session.get_credentials().secret_key)
aws_session_token = str(session.get_credentials().token)
# Create a reference of dynamoDB
region_name = request.app.config['mpcs.aws.app_region']
dynamodb = boto3.resource('dynamodb', region_name = region_name)
ann_table = dynamodb.Table(request.app.config['mpcs.aws.dynamodb.annotations_table'])
# Define s3 policy property
bucket_name = request.app.config['mpcs.aws.s3.inputs_bucket']
encryption = request.app.config['mpcs.aws.s3.encryption']
acl = request.app.config['mpcs.aws.s3.acl']
result_bucket = request.app.config['mpcs.aws.s3.results_bucket']
# Job Request Topic
job_request_topic = request.app.config['mpcs.aws.sns.job_request_topic']
'''
*******************************************************************************
Set up static resource handler - DO NOT CHANGE THIS METHOD IN ANY WAY
*******************************************************************************
'''
@route('/static/<filename:path>', method='GET', name="static")
def serve_static(filename):
# Tell Bottle where static files should be served from
return static_file(filename, root=request.app.config['mpcs.env.static_root'])
'''
*******************************************************************************
Home page
*******************************************************************************
'''
@route('/', method='GET', name="home")
def home_page():
log.info(request.url)
return template(request.app.config['mpcs.env.templates'] + 'home', auth=auth)
'''
*******************************************************************************
Registration form
*******************************************************************************
'''
@route('/register', method='GET', name="register")
def register():
log.info(request.url)
return template(request.app.config['mpcs.env.templates'] + 'register',
auth=auth, name="", email="", username="",
alert=False, success=True, error_message=None)
@route('/register', method='POST', name="register_submit")
def register_submit():
try:
auth.register(description=request.POST.get('name').strip(),
username=request.POST.get('username').strip(),
password=request.POST.get('password').strip(),
email_addr=request.POST.get('email_address').strip(),
role="free_user")
except Exception, error:
return template(request.app.config['mpcs.env.templates'] + 'register',
auth=auth, alert=True, success=False, error_message=error)
return template(request.app.config['mpcs.env.templates'] + 'register',
auth=auth, alert=True, success=True, error_message=None)
@route('/register/<reg_code>', method='GET', name="register_confirm")
def register_confirm(reg_code):
log.info(request.url)
try:
auth.validate_registration(reg_code)
except Exception, error:
return template(request.app.config['mpcs.env.templates'] + 'register_confirm',
auth=auth, success=False, error_message=error)
return template(request.app.config['mpcs.env.templates'] + 'register_confirm',
auth=auth, success=True, error_message=None)
'''
*******************************************************************************
Login, logout, and password reset forms
*******************************************************************************
'''
@route('/login', method='GET', name="login")
def login():
log.info(request.url)
redirect_url = "/"
# If the user is trying to access a protected URL, go there after auhtenticating
if request.query.redirect_url.strip() != "":
redirect_url = request.query.redirect_url
return template(request.app.config['mpcs.env.templates'] + 'login',
auth=auth, redirect_url=redirect_url, alert=False)
@route('/login', method='POST', name="login_submit")
def login_submit():
auth.login(request.POST.get('username'),
request.POST.get('password'),
success_redirect=request.POST.get('redirect_url'),
fail_redirect='/login')
@route('/logout', method='GET', name="logout")
def logout():
log.info(request.url)
auth.logout(success_redirect='/login')
'''
*******************************************************************************
*
CORE APPLICATION CODE IS BELOW...
*
*******************************************************************************
'''
'''
*******************************************************************************
Subscription management handlers
*******************************************************************************
'''
import stripe
# Display form to get subscriber credit card info
@route('/subscribe', method='GET', name="subscribe")
def subscribe():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
return template(request.app.config['mpcs.env.templates'] + 'subscribe', auth=auth, alert=False)
# Process the subscription request
@route('/subscribe', method='POST', name="subscribe_submit")
def subscribe_submit():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
try:
# Extract the Stripe token from submited form -- stripe_token
stripe.api_key = request.app.config['mpcs.stripe.secret_key']
token = request.POST['stripe_token']
# Create a premium customer subscribing to premium plan
print 'Welcome to Stripe'
customer = stripe.Customer.create(description=auth.current_user.username, source=token, email=auth.current_user.email_addr)
stripe.Subscription.create(customer=customer.id, plan="premium_plan",)
# Update the user's profile in our user database
auth.current_user.update(role="premium_user")
except stripe.error.CardError, e:
print 'This credit card has been declined'
# Get the current username
username = auth.current_user.username
res = ann_table.query(
IndexName='username_index',
KeyConditionExpression=Key('username').eq(username))
items = res['Items']
client = boto3.client('glacier', region_name = region_name)
# Check if we have any job within our DynamoDB
if len(items) > 0:
for item in items:
# Update the user role to premium in DynamoDB
updateData = ann_table.update_item(
Key={
'job_id': item['job_id']
},
UpdateExpression="set user_role=:a",
ExpressionAttributeValues={
':a': "premium_user"
},
ReturnValues="UPDATED_NEW"
)
# Check if we should initiate archive request
if item['results_file_archive_id'] != 'Not available':
re = client.initiate_job(vaultName='ucmpcs', jobParameters={"Type": "archive-retrieval", "ArchiveId": item['results_file_archive_id'], "SNSTopic": request.app.config['mpcs.aws.sns.glacier_topic'], "Tier": "Expedited"})
return template(request.app.config['mpcs.env.templates'] + 'subscribe_confirm', auth=auth, stripe_id=customer.id, alert=False)
'''
*******************************************************************************
Display the user's profile with subscription link for Free users
*******************************************************************************
'''
@route('/profile', method='GET', name="profile")
def user_profile():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Upgrade link to become a premium user
temp = str(request.url).split('/profile')
upgrade_link = temp[0] + '/subscribe'
return template(request.app.config['mpcs.env.templates'] + 'profile', auth=auth, upgrade_link=upgrade_link, alert=False)
'''
*******************************************************************************
Creates the necessary AWS S3 policy document and renders a form for
uploading an input file using the policy document
*******************************************************************************
'''
@route('/annotate', method='GET', name="annotate")
def upload_input_file():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Generate unique ID to be used as S3 key (name)
key_name = auth.current_user.username + '/' + str(uuid.uuid4())
# Redirect to a route that will call the annotator
redirect_url = str(request.url) + "/job"
# Get the current time
current = datetime.now(pytz.timezone('US/Central'))
expiration = current + timedelta(hours=24)
expiration = expiration.isoformat()
time = expiration[:23]
time = time + "Z"
# Define the S3 policy doc to allow upload via form POST
policy_document = str({
"expiration": time,
"conditions": [
{"bucket": bucket_name},
["starts-with","$key", request.app.config['mpcs.aws.s3.key_prefix']],
["starts-with", "$success_action_redirect", redirect_url],
{"x-amz-server-side-encryption": encryption},
{"x-amz-security-token": aws_session_token},
{"acl": acl}]})
# Encode the policy document - ensure no whitespace before encoding
policy = base64.b64encode(policy_document.translate(None, string.whitespace))
# Sign the policy document using the AWS secret key
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, hashlib.sha1).digest())
# Render the upload form
return template(request.app.config['mpcs.env.templates'] + 'upload',
auth=auth, bucket_name=bucket_name, s3_key_name=key_name,
aws_access_key_id=aws_access_key_id,
aws_session_token=aws_session_token, redirect_url=redirect_url,
encryption=encryption, acl=acl, policy=policy, signature=signature)
'''
*******************************************************************************
Accepts the S3 redirect GET request, parses it to extract
required info, saves a job item to the database, and then
publishes a notification for the annotator service.
*******************************************************************************
'''
@route('/annotate/job', method='GET')
def create_annotation_job_request():
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get bucket name, key, and job ID from the S3 redirect URL
bucket_name = request.query['bucket']
s3key = request.query['key']
# Get the file name
filename = s3key.split("~")[1]
index = s3key.split("~")[0].rindex('/')
job_id = s3key.split("~")[0][index + 1:]
first = s3key.find('/')
second = s3key.rindex('/')
# Create a job item and persist it to the annotations database
data = {
"job_id": job_id,
"username": auth.current_user.username,
"input_file_name": filename,
"s3_inputs_bucket": bucket_name,
"s3_key_input_file": s3key,
"submit_time": int(time.time()),
"job_status": "PENDING",
"user_email_addr": auth.current_user.email_addr,
"user_role": auth.current_user.role
}
# Insert the new data into data table
ann_table.put_item(Item=data)
# Publish a notification message to the SNS topic
client = boto3.client('sns', region_name = region_name)
response_notification = client.publish(
TopicArn = job_request_topic,
Message = json.dumps(data)
)
# Render upload_confirm template
return template(request.app.config['mpcs.env.templates'] + 'upload_confirm', auth=auth, job_id=job_id, alert=False)
'''
*******************************************************************************
List all annotations for the user
*******************************************************************************
'''
@route('/annotations', method='GET', name="annotations_list")
def get_annotations_list():
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get the current username
username = auth.current_user.username
res = ann_table.query(
IndexName='username_index',
KeyConditionExpression=Key('username').eq(username))
# Get all the relevant detail about current user
items = res['Items']
# Modify the date and time format that is rendered into template file
result_data = list()
for item in items:
item['submit_time'] = datetime.fromtimestamp(int(item['submit_time'])).strftime('%Y-%m-%d %H:%M')
result_data.append(item)
# Render myannotations template
return template(request.app.config['mpcs.env.templates'] + 'myannotations', auth=auth, items=result_data, alert=False)
'''
*******************************************************************************
Display details of a specific annotation job
*******************************************************************************
'''
@route('/annotations/<job_id>', method='GET', name="annotation_details")
def get_annotation_details(job_id):
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get the current user name
username = auth.current_user.username
res = ann_table.query(KeyConditionExpression=Key('job_id').eq(job_id))
items = res['Items']
download_url = ''
# Construct a signed download URL for user to download result file from s3 bucket
if items[0]['job_status'] != 'RUNNING':
resultfile = items[0]['s3_key_result_file'].split('~')
client = boto3.client('s3')
download_url = client.generate_presigned_url(
ClientMethod='get_object',
Params = {
'Bucket': request.app.config['mpcs.aws.s3.results_bucket'], |
# Display annotation detail for specified job
current_time = 0
# Check if the job is still running
if items[0]['job_status'] == 'RUNNING':
new_link = 2
# Check if the given username match the username within database
if username == items[0]['username']:
# Modify the date and time format that is rendered into template file
result_data = list()
for item in items:
item['submit_time'] = datetime.fromtimestamp(int(item['submit_time'])).strftime('%Y-%m-%d %H:%M')
result_data.append(item)
# Display annotation job detail template
return template(request.app.config['mpcs.env.templates'] + 'annotationdetails', auth=auth, items=result_data, new_link=new_link, alert=False)
else:
# Display the not authorized template if username doesn't match
return template(request.app.config['mpcs.env.templates'] + 'notauthorized', auth=auth, alert=False)
# The specified job has completed
else:
current_time = int(items[0]['complete_time'])
time_pass = int(time.time()) - current_time
new_link = 0
# Check if 30mins passed and the current user is a free user role
if time_pass > 1800 and auth.current_user.role == 'free_user':
new_link = 1
# redirect url and upgrade url
redirect_url = str(request.url) + "/log"
temp = str(request.url).split('/')
upgrade_url = temp[0] + '/subscribe'
if username == items[0]['username']:
# Modify the date and time format that is rendered into template file
result_data = list()
# Convert the date into standard format
for item in items:
item['submit_time'] = datetime.fromtimestamp(int(item['submit_time'])).strftime('%Y-%m-%d %H:%M')
item['complete_time'] = datetime.fromtimestamp(int(item['complete_time'])).strftime('%Y-%m-%d %H:%M')
result_data.append(item)
return template(request.app.config['mpcs.env.templates'] + 'annotationdetails', auth=auth, items=result_data, download_url=download_url, redirect_url=redirect_url, new_link=new_link, upgrade_url=upgrade_url, alert=False)
else:
return template(request.app.config['mpcs.env.templates'] + 'notauthorized', auth=auth, alert=False)
'''
*******************************************************************************
Display the log file for an annotation job
*******************************************************************************
'''
@route('/annotations/<job_id>/log', method='GET', name="annotation_log")
def view_annotation_log(job_id):
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get all the relevant detail about the specified job id
res = ann_table.query(KeyConditionExpression=Key('job_id').eq(job_id))
items = res['Items']
# Display the log file in the browser
logfile = items[0]['s3_key_log_file'].split('~')
s3 = boto3.resource('s3')
obj = s3.Object(result_bucket, logfile[0] + '/' + logfile[1])
log_content = obj.get()['Body'].read().decode('utf-8')
# Render the log file content
return template(request.app.config['mpcs.env.templates'] + 'logcontent', auth=auth, log_content=log_content, alert=False)
### EOF | 'Key': resultfile[0] + '/' + resultfile[1]
}
) | random_line_split |
mpcs_app.py | # mpcs_app.py
#
# Copyright (C) 2011-2017 Vas Vasiliadis
# University of Chicago
#
# Application logic for the GAS
#
##
__author__ = 'Zhuoyu Zhu <[email protected]>'
import stripe
import base64
import datetime
import hashlib
import hmac
import json
import sha
import string
import time
import urllib
import urlparse
import uuid
import boto3
import subprocess
import boto3
import botocore
import pytz
import botocore.session
from boto3.dynamodb.conditions import Key
from mpcs_utils import log, auth
from bottle import route, request, response, redirect, template, static_file, run, post, get
from datetime import datetime, timedelta
from boto3 import client
from boto3.session import Session
from boto3.dynamodb.conditions import Key, Attr
# Use the boto session object only to get AWS credentials
session = botocore.session.get_session()
aws_access_key_id = str(session.get_credentials().access_key)
aws_secret_access_key = str(session.get_credentials().secret_key)
aws_session_token = str(session.get_credentials().token)
# Create a reference of dynamoDB
region_name = request.app.config['mpcs.aws.app_region']
dynamodb = boto3.resource('dynamodb', region_name = region_name)
ann_table = dynamodb.Table(request.app.config['mpcs.aws.dynamodb.annotations_table'])
# Define s3 policy property
bucket_name = request.app.config['mpcs.aws.s3.inputs_bucket']
encryption = request.app.config['mpcs.aws.s3.encryption']
acl = request.app.config['mpcs.aws.s3.acl']
result_bucket = request.app.config['mpcs.aws.s3.results_bucket']
# Job Request Topic
job_request_topic = request.app.config['mpcs.aws.sns.job_request_topic']
'''
*******************************************************************************
Set up static resource handler - DO NOT CHANGE THIS METHOD IN ANY WAY
*******************************************************************************
'''
@route('/static/<filename:path>', method='GET', name="static")
def serve_static(filename):
# Tell Bottle where static files should be served from
return static_file(filename, root=request.app.config['mpcs.env.static_root'])
'''
*******************************************************************************
Home page
*******************************************************************************
'''
@route('/', method='GET', name="home")
def home_page():
log.info(request.url)
return template(request.app.config['mpcs.env.templates'] + 'home', auth=auth)
'''
*******************************************************************************
Registration form
*******************************************************************************
'''
@route('/register', method='GET', name="register")
def register():
log.info(request.url)
return template(request.app.config['mpcs.env.templates'] + 'register',
auth=auth, name="", email="", username="",
alert=False, success=True, error_message=None)
@route('/register', method='POST', name="register_submit")
def register_submit():
try:
auth.register(description=request.POST.get('name').strip(),
username=request.POST.get('username').strip(),
password=request.POST.get('password').strip(),
email_addr=request.POST.get('email_address').strip(),
role="free_user")
except Exception, error:
return template(request.app.config['mpcs.env.templates'] + 'register',
auth=auth, alert=True, success=False, error_message=error)
return template(request.app.config['mpcs.env.templates'] + 'register',
auth=auth, alert=True, success=True, error_message=None)
@route('/register/<reg_code>', method='GET', name="register_confirm")
def register_confirm(reg_code):
log.info(request.url)
try:
auth.validate_registration(reg_code)
except Exception, error:
return template(request.app.config['mpcs.env.templates'] + 'register_confirm',
auth=auth, success=False, error_message=error)
return template(request.app.config['mpcs.env.templates'] + 'register_confirm',
auth=auth, success=True, error_message=None)
'''
*******************************************************************************
Login, logout, and password reset forms
*******************************************************************************
'''
@route('/login', method='GET', name="login")
def login():
log.info(request.url)
redirect_url = "/"
# If the user is trying to access a protected URL, go there after auhtenticating
if request.query.redirect_url.strip() != "":
redirect_url = request.query.redirect_url
return template(request.app.config['mpcs.env.templates'] + 'login',
auth=auth, redirect_url=redirect_url, alert=False)
@route('/login', method='POST', name="login_submit")
def login_submit():
auth.login(request.POST.get('username'),
request.POST.get('password'),
success_redirect=request.POST.get('redirect_url'),
fail_redirect='/login')
@route('/logout', method='GET', name="logout")
def logout():
log.info(request.url)
auth.logout(success_redirect='/login')
'''
*******************************************************************************
*
CORE APPLICATION CODE IS BELOW...
*
*******************************************************************************
'''
'''
*******************************************************************************
Subscription management handlers
*******************************************************************************
'''
import stripe
# Display form to get subscriber credit card info
@route('/subscribe', method='GET', name="subscribe")
def subscribe():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
return template(request.app.config['mpcs.env.templates'] + 'subscribe', auth=auth, alert=False)
# Process the subscription request
@route('/subscribe', method='POST', name="subscribe_submit")
def subscribe_submit():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
try:
# Extract the Stripe token from submited form -- stripe_token
stripe.api_key = request.app.config['mpcs.stripe.secret_key']
token = request.POST['stripe_token']
# Create a premium customer subscribing to premium plan
print 'Welcome to Stripe'
customer = stripe.Customer.create(description=auth.current_user.username, source=token, email=auth.current_user.email_addr)
stripe.Subscription.create(customer=customer.id, plan="premium_plan",)
# Update the user's profile in our user database
auth.current_user.update(role="premium_user")
except stripe.error.CardError, e:
print 'This credit card has been declined'
# Get the current username
username = auth.current_user.username
res = ann_table.query(
IndexName='username_index',
KeyConditionExpression=Key('username').eq(username))
items = res['Items']
client = boto3.client('glacier', region_name = region_name)
# Check if we have any job within our DynamoDB
if len(items) > 0:
for item in items:
# Update the user role to premium in DynamoDB
updateData = ann_table.update_item(
Key={
'job_id': item['job_id']
},
UpdateExpression="set user_role=:a",
ExpressionAttributeValues={
':a': "premium_user"
},
ReturnValues="UPDATED_NEW"
)
# Check if we should initiate archive request
if item['results_file_archive_id'] != 'Not available':
re = client.initiate_job(vaultName='ucmpcs', jobParameters={"Type": "archive-retrieval", "ArchiveId": item['results_file_archive_id'], "SNSTopic": request.app.config['mpcs.aws.sns.glacier_topic'], "Tier": "Expedited"})
return template(request.app.config['mpcs.env.templates'] + 'subscribe_confirm', auth=auth, stripe_id=customer.id, alert=False)
'''
*******************************************************************************
Display the user's profile with subscription link for Free users
*******************************************************************************
'''
@route('/profile', method='GET', name="profile")
def user_profile():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Upgrade link to become a premium user
temp = str(request.url).split('/profile')
upgrade_link = temp[0] + '/subscribe'
return template(request.app.config['mpcs.env.templates'] + 'profile', auth=auth, upgrade_link=upgrade_link, alert=False)
'''
*******************************************************************************
Creates the necessary AWS S3 policy document and renders a form for
uploading an input file using the policy document
*******************************************************************************
'''
@route('/annotate', method='GET', name="annotate")
def upload_input_file():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Generate unique ID to be used as S3 key (name)
key_name = auth.current_user.username + '/' + str(uuid.uuid4())
# Redirect to a route that will call the annotator
redirect_url = str(request.url) + "/job"
# Get the current time
current = datetime.now(pytz.timezone('US/Central'))
expiration = current + timedelta(hours=24)
expiration = expiration.isoformat()
time = expiration[:23]
time = time + "Z"
# Define the S3 policy doc to allow upload via form POST
policy_document = str({
"expiration": time,
"conditions": [
{"bucket": bucket_name},
["starts-with","$key", request.app.config['mpcs.aws.s3.key_prefix']],
["starts-with", "$success_action_redirect", redirect_url],
{"x-amz-server-side-encryption": encryption},
{"x-amz-security-token": aws_session_token},
{"acl": acl}]})
# Encode the policy document - ensure no whitespace before encoding
policy = base64.b64encode(policy_document.translate(None, string.whitespace))
# Sign the policy document using the AWS secret key
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, hashlib.sha1).digest())
# Render the upload form
return template(request.app.config['mpcs.env.templates'] + 'upload',
auth=auth, bucket_name=bucket_name, s3_key_name=key_name,
aws_access_key_id=aws_access_key_id,
aws_session_token=aws_session_token, redirect_url=redirect_url,
encryption=encryption, acl=acl, policy=policy, signature=signature)
'''
*******************************************************************************
Accepts the S3 redirect GET request, parses it to extract
required info, saves a job item to the database, and then
publishes a notification for the annotator service.
*******************************************************************************
'''
@route('/annotate/job', method='GET')
def create_annotation_job_request():
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get bucket name, key, and job ID from the S3 redirect URL
bucket_name = request.query['bucket']
s3key = request.query['key']
# Get the file name
filename = s3key.split("~")[1]
index = s3key.split("~")[0].rindex('/')
job_id = s3key.split("~")[0][index + 1:]
first = s3key.find('/')
second = s3key.rindex('/')
# Create a job item and persist it to the annotations database
data = {
"job_id": job_id,
"username": auth.current_user.username,
"input_file_name": filename,
"s3_inputs_bucket": bucket_name,
"s3_key_input_file": s3key,
"submit_time": int(time.time()),
"job_status": "PENDING",
"user_email_addr": auth.current_user.email_addr,
"user_role": auth.current_user.role
}
# Insert the new data into data table
ann_table.put_item(Item=data)
# Publish a notification message to the SNS topic
client = boto3.client('sns', region_name = region_name)
response_notification = client.publish(
TopicArn = job_request_topic,
Message = json.dumps(data)
)
# Render upload_confirm template
return template(request.app.config['mpcs.env.templates'] + 'upload_confirm', auth=auth, job_id=job_id, alert=False)
'''
*******************************************************************************
List all annotations for the user
*******************************************************************************
'''
@route('/annotations', method='GET', name="annotations_list")
def get_annotations_list():
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get the current username
username = auth.current_user.username
res = ann_table.query(
IndexName='username_index',
KeyConditionExpression=Key('username').eq(username))
# Get all the relevant detail about current user
items = res['Items']
# Modify the date and time format that is rendered into template file
result_data = list()
for item in items:
item['submit_time'] = datetime.fromtimestamp(int(item['submit_time'])).strftime('%Y-%m-%d %H:%M')
result_data.append(item)
# Render myannotations template
return template(request.app.config['mpcs.env.templates'] + 'myannotations', auth=auth, items=result_data, alert=False)
'''
*******************************************************************************
Display details of a specific annotation job
*******************************************************************************
'''
@route('/annotations/<job_id>', method='GET', name="annotation_details")
def get_annotation_details(job_id):
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get the current user name
username = auth.current_user.username
res = ann_table.query(KeyConditionExpression=Key('job_id').eq(job_id))
items = res['Items']
download_url = ''
# Construct a signed download URL for user to download result file from s3 bucket
if items[0]['job_status'] != 'RUNNING':
resultfile = items[0]['s3_key_result_file'].split('~')
client = boto3.client('s3')
download_url = client.generate_presigned_url(
ClientMethod='get_object',
Params = {
'Bucket': request.app.config['mpcs.aws.s3.results_bucket'],
'Key': resultfile[0] + '/' + resultfile[1]
}
)
# Display annotation detail for specified job
current_time = 0
# Check if the job is still running
if items[0]['job_status'] == 'RUNNING':
new_link = 2
# Check if the given username match the username within database
if username == items[0]['username']: |
# Modify the date and time format that is rendered into template file
result_data = list()
for item in items:
item['submit_time'] = datetime.fromtimestamp(int(item['submit_time'])).strftime('%Y-%m-%d %H:%M')
result_data.append(item)
# Display annotation job detail template
return template(request.app.config['mpcs.env.templates'] + 'annotationdetails', auth=auth, items=result_data, new_link=new_link, alert=False)
else:
# Display the not authorized template if username doesn't match
return template(request.app.config['mpcs.env.templates'] + 'notauthorized', auth=auth, alert=False)
# The specified job has completed
else:
current_time = int(items[0]['complete_time'])
time_pass = int(time.time()) - current_time
new_link = 0
# Check if 30mins passed and the current user is a free user role
if time_pass > 1800 and auth.current_user.role == 'free_user':
new_link = 1
# redirect url and upgrade url
redirect_url = str(request.url) + "/log"
temp = str(request.url).split('/')
upgrade_url = temp[0] + '/subscribe'
if username == items[0]['username']:
# Modify the date and time format that is rendered into template file
result_data = list()
# Convert the date into standard format
for item in items:
item['submit_time'] = datetime.fromtimestamp(int(item['submit_time'])).strftime('%Y-%m-%d %H:%M')
item['complete_time'] = datetime.fromtimestamp(int(item['complete_time'])).strftime('%Y-%m-%d %H:%M')
result_data.append(item)
return template(request.app.config['mpcs.env.templates'] + 'annotationdetails', auth=auth, items=result_data, download_url=download_url, redirect_url=redirect_url, new_link=new_link, upgrade_url=upgrade_url, alert=False)
else:
return template(request.app.config['mpcs.env.templates'] + 'notauthorized', auth=auth, alert=False)
'''
*******************************************************************************
Display the log file for an annotation job
*******************************************************************************
'''
@route('/annotations/<job_id>/log', method='GET', name="annotation_log")
def view_annotation_log(job_id):
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get all the relevant detail about the specified job id
res = ann_table.query(KeyConditionExpression=Key('job_id').eq(job_id))
items = res['Items']
# Display the log file in the browser
logfile = items[0]['s3_key_log_file'].split('~')
s3 = boto3.resource('s3')
obj = s3.Object(result_bucket, logfile[0] + '/' + logfile[1])
log_content = obj.get()['Body'].read().decode('utf-8')
# Render the log file content
return template(request.app.config['mpcs.env.templates'] + 'logcontent', auth=auth, log_content=log_content, alert=False)
### EOF
| conditional_block |
|
mpcs_app.py | # mpcs_app.py
#
# Copyright (C) 2011-2017 Vas Vasiliadis
# University of Chicago
#
# Application logic for the GAS
#
##
__author__ = 'Zhuoyu Zhu <[email protected]>'
import stripe
import base64
import datetime
import hashlib
import hmac
import json
import sha
import string
import time
import urllib
import urlparse
import uuid
import boto3
import subprocess
import boto3
import botocore
import pytz
import botocore.session
from boto3.dynamodb.conditions import Key
from mpcs_utils import log, auth
from bottle import route, request, response, redirect, template, static_file, run, post, get
from datetime import datetime, timedelta
from boto3 import client
from boto3.session import Session
from boto3.dynamodb.conditions import Key, Attr
# Use the boto session object only to get AWS credentials
session = botocore.session.get_session()
aws_access_key_id = str(session.get_credentials().access_key)
aws_secret_access_key = str(session.get_credentials().secret_key)
aws_session_token = str(session.get_credentials().token)
# Create a reference of dynamoDB
region_name = request.app.config['mpcs.aws.app_region']
dynamodb = boto3.resource('dynamodb', region_name = region_name)
ann_table = dynamodb.Table(request.app.config['mpcs.aws.dynamodb.annotations_table'])
# Define s3 policy property
bucket_name = request.app.config['mpcs.aws.s3.inputs_bucket']
encryption = request.app.config['mpcs.aws.s3.encryption']
acl = request.app.config['mpcs.aws.s3.acl']
result_bucket = request.app.config['mpcs.aws.s3.results_bucket']
# Job Request Topic
job_request_topic = request.app.config['mpcs.aws.sns.job_request_topic']
'''
*******************************************************************************
Set up static resource handler - DO NOT CHANGE THIS METHOD IN ANY WAY
*******************************************************************************
'''
@route('/static/<filename:path>', method='GET', name="static")
def serve_static(filename):
# Tell Bottle where static files should be served from
return static_file(filename, root=request.app.config['mpcs.env.static_root'])
'''
*******************************************************************************
Home page
*******************************************************************************
'''
@route('/', method='GET', name="home")
def home_page():
log.info(request.url)
return template(request.app.config['mpcs.env.templates'] + 'home', auth=auth)
'''
*******************************************************************************
Registration form
*******************************************************************************
'''
@route('/register', method='GET', name="register")
def register():
log.info(request.url)
return template(request.app.config['mpcs.env.templates'] + 'register',
auth=auth, name="", email="", username="",
alert=False, success=True, error_message=None)
@route('/register', method='POST', name="register_submit")
def register_submit():
try:
auth.register(description=request.POST.get('name').strip(),
username=request.POST.get('username').strip(),
password=request.POST.get('password').strip(),
email_addr=request.POST.get('email_address').strip(),
role="free_user")
except Exception, error:
return template(request.app.config['mpcs.env.templates'] + 'register',
auth=auth, alert=True, success=False, error_message=error)
return template(request.app.config['mpcs.env.templates'] + 'register',
auth=auth, alert=True, success=True, error_message=None)
@route('/register/<reg_code>', method='GET', name="register_confirm")
def register_confirm(reg_code):
log.info(request.url)
try:
auth.validate_registration(reg_code)
except Exception, error:
return template(request.app.config['mpcs.env.templates'] + 'register_confirm',
auth=auth, success=False, error_message=error)
return template(request.app.config['mpcs.env.templates'] + 'register_confirm',
auth=auth, success=True, error_message=None)
'''
*******************************************************************************
Login, logout, and password reset forms
*******************************************************************************
'''
@route('/login', method='GET', name="login")
def login():
log.info(request.url)
redirect_url = "/"
# If the user is trying to access a protected URL, go there after auhtenticating
if request.query.redirect_url.strip() != "":
redirect_url = request.query.redirect_url
return template(request.app.config['mpcs.env.templates'] + 'login',
auth=auth, redirect_url=redirect_url, alert=False)
@route('/login', method='POST', name="login_submit")
def login_submit():
auth.login(request.POST.get('username'),
request.POST.get('password'),
success_redirect=request.POST.get('redirect_url'),
fail_redirect='/login')
@route('/logout', method='GET', name="logout")
def logout():
log.info(request.url)
auth.logout(success_redirect='/login')
'''
*******************************************************************************
*
CORE APPLICATION CODE IS BELOW...
*
*******************************************************************************
'''
'''
*******************************************************************************
Subscription management handlers
*******************************************************************************
'''
import stripe
# Display form to get subscriber credit card info
@route('/subscribe', method='GET', name="subscribe")
def subscribe():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
return template(request.app.config['mpcs.env.templates'] + 'subscribe', auth=auth, alert=False)
# Process the subscription request
@route('/subscribe', method='POST', name="subscribe_submit")
def subscribe_submit():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
try:
# Extract the Stripe token from submited form -- stripe_token
stripe.api_key = request.app.config['mpcs.stripe.secret_key']
token = request.POST['stripe_token']
# Create a premium customer subscribing to premium plan
print 'Welcome to Stripe'
customer = stripe.Customer.create(description=auth.current_user.username, source=token, email=auth.current_user.email_addr)
stripe.Subscription.create(customer=customer.id, plan="premium_plan",)
# Update the user's profile in our user database
auth.current_user.update(role="premium_user")
except stripe.error.CardError, e:
print 'This credit card has been declined'
# Get the current username
username = auth.current_user.username
res = ann_table.query(
IndexName='username_index',
KeyConditionExpression=Key('username').eq(username))
items = res['Items']
client = boto3.client('glacier', region_name = region_name)
# Check if we have any job within our DynamoDB
if len(items) > 0:
for item in items:
# Update the user role to premium in DynamoDB
updateData = ann_table.update_item(
Key={
'job_id': item['job_id']
},
UpdateExpression="set user_role=:a",
ExpressionAttributeValues={
':a': "premium_user"
},
ReturnValues="UPDATED_NEW"
)
# Check if we should initiate archive request
if item['results_file_archive_id'] != 'Not available':
re = client.initiate_job(vaultName='ucmpcs', jobParameters={"Type": "archive-retrieval", "ArchiveId": item['results_file_archive_id'], "SNSTopic": request.app.config['mpcs.aws.sns.glacier_topic'], "Tier": "Expedited"})
return template(request.app.config['mpcs.env.templates'] + 'subscribe_confirm', auth=auth, stripe_id=customer.id, alert=False)
'''
*******************************************************************************
Display the user's profile with subscription link for Free users
*******************************************************************************
'''
@route('/profile', method='GET', name="profile")
def user_profile():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Upgrade link to become a premium user
temp = str(request.url).split('/profile')
upgrade_link = temp[0] + '/subscribe'
return template(request.app.config['mpcs.env.templates'] + 'profile', auth=auth, upgrade_link=upgrade_link, alert=False)
'''
*******************************************************************************
Creates the necessary AWS S3 policy document and renders a form for
uploading an input file using the policy document
*******************************************************************************
'''
@route('/annotate', method='GET', name="annotate")
def upload_input_file():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Generate unique ID to be used as S3 key (name)
key_name = auth.current_user.username + '/' + str(uuid.uuid4())
# Redirect to a route that will call the annotator
redirect_url = str(request.url) + "/job"
# Get the current time
current = datetime.now(pytz.timezone('US/Central'))
expiration = current + timedelta(hours=24)
expiration = expiration.isoformat()
time = expiration[:23]
time = time + "Z"
# Define the S3 policy doc to allow upload via form POST
policy_document = str({
"expiration": time,
"conditions": [
{"bucket": bucket_name},
["starts-with","$key", request.app.config['mpcs.aws.s3.key_prefix']],
["starts-with", "$success_action_redirect", redirect_url],
{"x-amz-server-side-encryption": encryption},
{"x-amz-security-token": aws_session_token},
{"acl": acl}]})
# Encode the policy document - ensure no whitespace before encoding
policy = base64.b64encode(policy_document.translate(None, string.whitespace))
# Sign the policy document using the AWS secret key
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, hashlib.sha1).digest())
# Render the upload form
return template(request.app.config['mpcs.env.templates'] + 'upload',
auth=auth, bucket_name=bucket_name, s3_key_name=key_name,
aws_access_key_id=aws_access_key_id,
aws_session_token=aws_session_token, redirect_url=redirect_url,
encryption=encryption, acl=acl, policy=policy, signature=signature)
'''
*******************************************************************************
Accepts the S3 redirect GET request, parses it to extract
required info, saves a job item to the database, and then
publishes a notification for the annotator service.
*******************************************************************************
'''
@route('/annotate/job', method='GET')
def create_annotation_job_request():
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get bucket name, key, and job ID from the S3 redirect URL
bucket_name = request.query['bucket']
s3key = request.query['key']
# Get the file name
filename = s3key.split("~")[1]
index = s3key.split("~")[0].rindex('/')
job_id = s3key.split("~")[0][index + 1:]
first = s3key.find('/')
second = s3key.rindex('/')
# Create a job item and persist it to the annotations database
data = {
"job_id": job_id,
"username": auth.current_user.username,
"input_file_name": filename,
"s3_inputs_bucket": bucket_name,
"s3_key_input_file": s3key,
"submit_time": int(time.time()),
"job_status": "PENDING",
"user_email_addr": auth.current_user.email_addr,
"user_role": auth.current_user.role
}
# Insert the new data into data table
ann_table.put_item(Item=data)
# Publish a notification message to the SNS topic
client = boto3.client('sns', region_name = region_name)
response_notification = client.publish(
TopicArn = job_request_topic,
Message = json.dumps(data)
)
# Render upload_confirm template
return template(request.app.config['mpcs.env.templates'] + 'upload_confirm', auth=auth, job_id=job_id, alert=False)
'''
*******************************************************************************
List all annotations for the user
*******************************************************************************
'''
@route('/annotations', method='GET', name="annotations_list")
def get_annotations_list():
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get the current username
username = auth.current_user.username
res = ann_table.query(
IndexName='username_index',
KeyConditionExpression=Key('username').eq(username))
# Get all the relevant detail about current user
items = res['Items']
# Modify the date and time format that is rendered into template file
result_data = list()
for item in items:
item['submit_time'] = datetime.fromtimestamp(int(item['submit_time'])).strftime('%Y-%m-%d %H:%M')
result_data.append(item)
# Render myannotations template
return template(request.app.config['mpcs.env.templates'] + 'myannotations', auth=auth, items=result_data, alert=False)
'''
*******************************************************************************
Display details of a specific annotation job
*******************************************************************************
'''
@route('/annotations/<job_id>', method='GET', name="annotation_details")
def | (job_id):
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get the current user name
username = auth.current_user.username
res = ann_table.query(KeyConditionExpression=Key('job_id').eq(job_id))
items = res['Items']
download_url = ''
# Construct a signed download URL for user to download result file from s3 bucket
if items[0]['job_status'] != 'RUNNING':
resultfile = items[0]['s3_key_result_file'].split('~')
client = boto3.client('s3')
download_url = client.generate_presigned_url(
ClientMethod='get_object',
Params = {
'Bucket': request.app.config['mpcs.aws.s3.results_bucket'],
'Key': resultfile[0] + '/' + resultfile[1]
}
)
# Display annotation detail for specified job
current_time = 0
# Check if the job is still running
if items[0]['job_status'] == 'RUNNING':
new_link = 2
# Check if the given username match the username within database
if username == items[0]['username']:
# Modify the date and time format that is rendered into template file
result_data = list()
for item in items:
item['submit_time'] = datetime.fromtimestamp(int(item['submit_time'])).strftime('%Y-%m-%d %H:%M')
result_data.append(item)
# Display annotation job detail template
return template(request.app.config['mpcs.env.templates'] + 'annotationdetails', auth=auth, items=result_data, new_link=new_link, alert=False)
else:
# Display the not authorized template if username doesn't match
return template(request.app.config['mpcs.env.templates'] + 'notauthorized', auth=auth, alert=False)
# The specified job has completed
else:
current_time = int(items[0]['complete_time'])
time_pass = int(time.time()) - current_time
new_link = 0
# Check if 30mins passed and the current user is a free user role
if time_pass > 1800 and auth.current_user.role == 'free_user':
new_link = 1
# redirect url and upgrade url
redirect_url = str(request.url) + "/log"
temp = str(request.url).split('/')
upgrade_url = temp[0] + '/subscribe'
if username == items[0]['username']:
# Modify the date and time format that is rendered into template file
result_data = list()
# Convert the date into standard format
for item in items:
item['submit_time'] = datetime.fromtimestamp(int(item['submit_time'])).strftime('%Y-%m-%d %H:%M')
item['complete_time'] = datetime.fromtimestamp(int(item['complete_time'])).strftime('%Y-%m-%d %H:%M')
result_data.append(item)
return template(request.app.config['mpcs.env.templates'] + 'annotationdetails', auth=auth, items=result_data, download_url=download_url, redirect_url=redirect_url, new_link=new_link, upgrade_url=upgrade_url, alert=False)
else:
return template(request.app.config['mpcs.env.templates'] + 'notauthorized', auth=auth, alert=False)
'''
*******************************************************************************
Display the log file for an annotation job
*******************************************************************************
'''
@route('/annotations/<job_id>/log', method='GET', name="annotation_log")
def view_annotation_log(job_id):
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get all the relevant detail about the specified job id
res = ann_table.query(KeyConditionExpression=Key('job_id').eq(job_id))
items = res['Items']
# Display the log file in the browser
logfile = items[0]['s3_key_log_file'].split('~')
s3 = boto3.resource('s3')
obj = s3.Object(result_bucket, logfile[0] + '/' + logfile[1])
log_content = obj.get()['Body'].read().decode('utf-8')
# Render the log file content
return template(request.app.config['mpcs.env.templates'] + 'logcontent', auth=auth, log_content=log_content, alert=False)
### EOF
| get_annotation_details | identifier_name |
mpcs_app.py | # mpcs_app.py
#
# Copyright (C) 2011-2017 Vas Vasiliadis
# University of Chicago
#
# Application logic for the GAS
#
##
__author__ = 'Zhuoyu Zhu <[email protected]>'
import stripe
import base64
import datetime
import hashlib
import hmac
import json
import sha
import string
import time
import urllib
import urlparse
import uuid
import boto3
import subprocess
import boto3
import botocore
import pytz
import botocore.session
from boto3.dynamodb.conditions import Key
from mpcs_utils import log, auth
from bottle import route, request, response, redirect, template, static_file, run, post, get
from datetime import datetime, timedelta
from boto3 import client
from boto3.session import Session
from boto3.dynamodb.conditions import Key, Attr
# Use the boto session object only to get AWS credentials
session = botocore.session.get_session()
aws_access_key_id = str(session.get_credentials().access_key)
aws_secret_access_key = str(session.get_credentials().secret_key)
aws_session_token = str(session.get_credentials().token)
# Create a reference of dynamoDB
region_name = request.app.config['mpcs.aws.app_region']
dynamodb = boto3.resource('dynamodb', region_name = region_name)
ann_table = dynamodb.Table(request.app.config['mpcs.aws.dynamodb.annotations_table'])
# Define s3 policy property
bucket_name = request.app.config['mpcs.aws.s3.inputs_bucket']
encryption = request.app.config['mpcs.aws.s3.encryption']
acl = request.app.config['mpcs.aws.s3.acl']
result_bucket = request.app.config['mpcs.aws.s3.results_bucket']
# Job Request Topic
job_request_topic = request.app.config['mpcs.aws.sns.job_request_topic']
'''
*******************************************************************************
Set up static resource handler - DO NOT CHANGE THIS METHOD IN ANY WAY
*******************************************************************************
'''
@route('/static/<filename:path>', method='GET', name="static")
def serve_static(filename):
# Tell Bottle where static files should be served from
return static_file(filename, root=request.app.config['mpcs.env.static_root'])
'''
*******************************************************************************
Home page
*******************************************************************************
'''
@route('/', method='GET', name="home")
def home_page():
log.info(request.url)
return template(request.app.config['mpcs.env.templates'] + 'home', auth=auth)
'''
*******************************************************************************
Registration form
*******************************************************************************
'''
@route('/register', method='GET', name="register")
def register():
log.info(request.url)
return template(request.app.config['mpcs.env.templates'] + 'register',
auth=auth, name="", email="", username="",
alert=False, success=True, error_message=None)
@route('/register', method='POST', name="register_submit")
def register_submit():
try:
auth.register(description=request.POST.get('name').strip(),
username=request.POST.get('username').strip(),
password=request.POST.get('password').strip(),
email_addr=request.POST.get('email_address').strip(),
role="free_user")
except Exception, error:
return template(request.app.config['mpcs.env.templates'] + 'register',
auth=auth, alert=True, success=False, error_message=error)
return template(request.app.config['mpcs.env.templates'] + 'register',
auth=auth, alert=True, success=True, error_message=None)
@route('/register/<reg_code>', method='GET', name="register_confirm")
def register_confirm(reg_code):
log.info(request.url)
try:
auth.validate_registration(reg_code)
except Exception, error:
return template(request.app.config['mpcs.env.templates'] + 'register_confirm',
auth=auth, success=False, error_message=error)
return template(request.app.config['mpcs.env.templates'] + 'register_confirm',
auth=auth, success=True, error_message=None)
'''
*******************************************************************************
Login, logout, and password reset forms
*******************************************************************************
'''
@route('/login', method='GET', name="login")
def login():
log.info(request.url)
redirect_url = "/"
# If the user is trying to access a protected URL, go there after auhtenticating
if request.query.redirect_url.strip() != "":
redirect_url = request.query.redirect_url
return template(request.app.config['mpcs.env.templates'] + 'login',
auth=auth, redirect_url=redirect_url, alert=False)
@route('/login', method='POST', name="login_submit")
def login_submit():
auth.login(request.POST.get('username'),
request.POST.get('password'),
success_redirect=request.POST.get('redirect_url'),
fail_redirect='/login')
@route('/logout', method='GET', name="logout")
def logout():
log.info(request.url)
auth.logout(success_redirect='/login')
'''
*******************************************************************************
*
CORE APPLICATION CODE IS BELOW...
*
*******************************************************************************
'''
'''
*******************************************************************************
Subscription management handlers
*******************************************************************************
'''
import stripe
# Display form to get subscriber credit card info
@route('/subscribe', method='GET', name="subscribe")
def subscribe():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
return template(request.app.config['mpcs.env.templates'] + 'subscribe', auth=auth, alert=False)
# Process the subscription request
@route('/subscribe', method='POST', name="subscribe_submit")
def subscribe_submit():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
try:
# Extract the Stripe token from submited form -- stripe_token
stripe.api_key = request.app.config['mpcs.stripe.secret_key']
token = request.POST['stripe_token']
# Create a premium customer subscribing to premium plan
print 'Welcome to Stripe'
customer = stripe.Customer.create(description=auth.current_user.username, source=token, email=auth.current_user.email_addr)
stripe.Subscription.create(customer=customer.id, plan="premium_plan",)
# Update the user's profile in our user database
auth.current_user.update(role="premium_user")
except stripe.error.CardError, e:
print 'This credit card has been declined'
# Get the current username
username = auth.current_user.username
res = ann_table.query(
IndexName='username_index',
KeyConditionExpression=Key('username').eq(username))
items = res['Items']
client = boto3.client('glacier', region_name = region_name)
# Check if we have any job within our DynamoDB
if len(items) > 0:
for item in items:
# Update the user role to premium in DynamoDB
updateData = ann_table.update_item(
Key={
'job_id': item['job_id']
},
UpdateExpression="set user_role=:a",
ExpressionAttributeValues={
':a': "premium_user"
},
ReturnValues="UPDATED_NEW"
)
# Check if we should initiate archive request
if item['results_file_archive_id'] != 'Not available':
re = client.initiate_job(vaultName='ucmpcs', jobParameters={"Type": "archive-retrieval", "ArchiveId": item['results_file_archive_id'], "SNSTopic": request.app.config['mpcs.aws.sns.glacier_topic'], "Tier": "Expedited"})
return template(request.app.config['mpcs.env.templates'] + 'subscribe_confirm', auth=auth, stripe_id=customer.id, alert=False)
'''
*******************************************************************************
Display the user's profile with subscription link for Free users
*******************************************************************************
'''
@route('/profile', method='GET', name="profile")
def user_profile():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Upgrade link to become a premium user
temp = str(request.url).split('/profile')
upgrade_link = temp[0] + '/subscribe'
return template(request.app.config['mpcs.env.templates'] + 'profile', auth=auth, upgrade_link=upgrade_link, alert=False)
'''
*******************************************************************************
Creates the necessary AWS S3 policy document and renders a form for
uploading an input file using the policy document
*******************************************************************************
'''
@route('/annotate', method='GET', name="annotate")
def upload_input_file():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Generate unique ID to be used as S3 key (name)
key_name = auth.current_user.username + '/' + str(uuid.uuid4())
# Redirect to a route that will call the annotator
redirect_url = str(request.url) + "/job"
# Get the current time
current = datetime.now(pytz.timezone('US/Central'))
expiration = current + timedelta(hours=24)
expiration = expiration.isoformat()
time = expiration[:23]
time = time + "Z"
# Define the S3 policy doc to allow upload via form POST
policy_document = str({
"expiration": time,
"conditions": [
{"bucket": bucket_name},
["starts-with","$key", request.app.config['mpcs.aws.s3.key_prefix']],
["starts-with", "$success_action_redirect", redirect_url],
{"x-amz-server-side-encryption": encryption},
{"x-amz-security-token": aws_session_token},
{"acl": acl}]})
# Encode the policy document - ensure no whitespace before encoding
policy = base64.b64encode(policy_document.translate(None, string.whitespace))
# Sign the policy document using the AWS secret key
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, hashlib.sha1).digest())
# Render the upload form
return template(request.app.config['mpcs.env.templates'] + 'upload',
auth=auth, bucket_name=bucket_name, s3_key_name=key_name,
aws_access_key_id=aws_access_key_id,
aws_session_token=aws_session_token, redirect_url=redirect_url,
encryption=encryption, acl=acl, policy=policy, signature=signature)
'''
*******************************************************************************
Accepts the S3 redirect GET request, parses it to extract
required info, saves a job item to the database, and then
publishes a notification for the annotator service.
*******************************************************************************
'''
@route('/annotate/job', method='GET')
def create_annotation_job_request():
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get bucket name, key, and job ID from the S3 redirect URL
bucket_name = request.query['bucket']
s3key = request.query['key']
# Get the file name
filename = s3key.split("~")[1]
index = s3key.split("~")[0].rindex('/')
job_id = s3key.split("~")[0][index + 1:]
first = s3key.find('/')
second = s3key.rindex('/')
# Create a job item and persist it to the annotations database
data = {
"job_id": job_id,
"username": auth.current_user.username,
"input_file_name": filename,
"s3_inputs_bucket": bucket_name,
"s3_key_input_file": s3key,
"submit_time": int(time.time()),
"job_status": "PENDING",
"user_email_addr": auth.current_user.email_addr,
"user_role": auth.current_user.role
}
# Insert the new data into data table
ann_table.put_item(Item=data)
# Publish a notification message to the SNS topic
client = boto3.client('sns', region_name = region_name)
response_notification = client.publish(
TopicArn = job_request_topic,
Message = json.dumps(data)
)
# Render upload_confirm template
return template(request.app.config['mpcs.env.templates'] + 'upload_confirm', auth=auth, job_id=job_id, alert=False)
'''
*******************************************************************************
List all annotations for the user
*******************************************************************************
'''
@route('/annotations', method='GET', name="annotations_list")
def get_annotations_list():
# Check that user is authenticated
|
'''
*******************************************************************************
Display details of a specific annotation job
*******************************************************************************
'''
@route('/annotations/<job_id>', method='GET', name="annotation_details")
def get_annotation_details(job_id):
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get the current user name
username = auth.current_user.username
res = ann_table.query(KeyConditionExpression=Key('job_id').eq(job_id))
items = res['Items']
download_url = ''
# Construct a signed download URL for user to download result file from s3 bucket
if items[0]['job_status'] != 'RUNNING':
resultfile = items[0]['s3_key_result_file'].split('~')
client = boto3.client('s3')
download_url = client.generate_presigned_url(
ClientMethod='get_object',
Params = {
'Bucket': request.app.config['mpcs.aws.s3.results_bucket'],
'Key': resultfile[0] + '/' + resultfile[1]
}
)
# Display annotation detail for specified job
current_time = 0
# Check if the job is still running
if items[0]['job_status'] == 'RUNNING':
new_link = 2
# Check if the given username match the username within database
if username == items[0]['username']:
# Modify the date and time format that is rendered into template file
result_data = list()
for item in items:
item['submit_time'] = datetime.fromtimestamp(int(item['submit_time'])).strftime('%Y-%m-%d %H:%M')
result_data.append(item)
# Display annotation job detail template
return template(request.app.config['mpcs.env.templates'] + 'annotationdetails', auth=auth, items=result_data, new_link=new_link, alert=False)
else:
# Display the not authorized template if username doesn't match
return template(request.app.config['mpcs.env.templates'] + 'notauthorized', auth=auth, alert=False)
# The specified job has completed
else:
current_time = int(items[0]['complete_time'])
time_pass = int(time.time()) - current_time
new_link = 0
# Check if 30mins passed and the current user is a free user role
if time_pass > 1800 and auth.current_user.role == 'free_user':
new_link = 1
# redirect url and upgrade url
redirect_url = str(request.url) + "/log"
temp = str(request.url).split('/')
upgrade_url = temp[0] + '/subscribe'
if username == items[0]['username']:
# Modify the date and time format that is rendered into template file
result_data = list()
# Convert the date into standard format
for item in items:
item['submit_time'] = datetime.fromtimestamp(int(item['submit_time'])).strftime('%Y-%m-%d %H:%M')
item['complete_time'] = datetime.fromtimestamp(int(item['complete_time'])).strftime('%Y-%m-%d %H:%M')
result_data.append(item)
return template(request.app.config['mpcs.env.templates'] + 'annotationdetails', auth=auth, items=result_data, download_url=download_url, redirect_url=redirect_url, new_link=new_link, upgrade_url=upgrade_url, alert=False)
else:
return template(request.app.config['mpcs.env.templates'] + 'notauthorized', auth=auth, alert=False)
'''
*******************************************************************************
Display the log file for an annotation job
*******************************************************************************
'''
@route('/annotations/<job_id>/log', method='GET', name="annotation_log")
def view_annotation_log(job_id):
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get all the relevant detail about the specified job id
res = ann_table.query(KeyConditionExpression=Key('job_id').eq(job_id))
items = res['Items']
# Display the log file in the browser
logfile = items[0]['s3_key_log_file'].split('~')
s3 = boto3.resource('s3')
obj = s3.Object(result_bucket, logfile[0] + '/' + logfile[1])
log_content = obj.get()['Body'].read().decode('utf-8')
# Render the log file content
return template(request.app.config['mpcs.env.templates'] + 'logcontent', auth=auth, log_content=log_content, alert=False)
### EOF
| auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get the current username
username = auth.current_user.username
res = ann_table.query(
IndexName='username_index',
KeyConditionExpression=Key('username').eq(username))
# Get all the relevant detail about current user
items = res['Items']
# Modify the date and time format that is rendered into template file
result_data = list()
for item in items:
item['submit_time'] = datetime.fromtimestamp(int(item['submit_time'])).strftime('%Y-%m-%d %H:%M')
result_data.append(item)
# Render myannotations template
return template(request.app.config['mpcs.env.templates'] + 'myannotations', auth=auth, items=result_data, alert=False) | identifier_body |
blockchain.go | package main
import (
"bytes"
"crypto/ecdsa"
"errors"
"fmt"
"github.com/boltdb/bolt"
"log"
"time"
)
const BlockChainDB = "blockchain.db"
const BlockBucket = "blockbucket"
// BlockClain 4.define blockchain struct
type BlockChain struct {
//blocks [] *Block
db *bolt.DB
//storage the last block's hash
tail []byte
}
// NewBlockChain 5. Define a block chain
func | (address string) *BlockChain {
//return &BlockClain{
// []*Block{genesisBlock},
//}
var lastHash []byte
db, err := bolt.Open(BlockChainDB, 0600, nil)
//defer db.Close()
if err != nil {
log.Fatal("create database failed")
}
err = db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(BlockBucket))
if bucket == nil{
bucket,err = tx.CreateBucket([]byte(BlockBucket))
if err != nil{
log.Fatal("create bucket failed")
}
//Create genesis block
genesisBlock := GenesisBlock(address)
//Write message into database
bucket.Put(genesisBlock.Hash,genesisBlock.Serialize())
bucket.Put([]byte("LastHashKey"),genesisBlock.Hash)
lastHash = genesisBlock.Hash
}else{
lastHash = bucket.Get([]byte("LastHashKey"))
}
return nil
})
return &BlockChain{db,lastHash}
}
// GenesisBlock create a genesisiBlock
func GenesisBlock(address string) *Block {
coinBase := NewCoinbaseTX(address, "创世块")
coinBases := []*Transaction{coinBase}
return NewBlock(coinBases, []byte{})
}
// AddBlock 6.add a new block
func (bc *BlockChain)AddBlock(txs []*Transaction) {
for _, tx := range txs{
if !bc.VerifyTransaction(tx) {
fmt.Println("校验交易失败")
return
}
}
//found the last block's hash
lastHash := bc.tail
db := bc.db
//create a new block
//send the new block into the blockchain
db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(BlockBucket))
if bucket == nil{
log.Fatal("no bucket")
}else{
//Write message into database
block := NewBlock(txs, lastHash)
bucket.Put(block.Hash,block.Serialize())
bucket.Put([]byte("LastHashKey"),block.Hash)
//update the last hash
bc.tail = block.Hash
}
return nil
})
}
//正向打印区块链
func (bc *BlockChain) Printchain() {
bcI := bc.NewIterator()
var blockHeight int
var blocks []*Block
for {
block := bcI.Next()
blocks = append(blocks, block)
if block.PrevHash == nil {
break
}
}
for i := len(blocks) - 1; i > -1; i--{
timeFormat := time.Unix(int64(blocks[i].TimeStamp), 0).Format("2006-01-02 15:04:05")
fmt.Printf("=============== 区块高度: %d ==============\n", blockHeight)
fmt.Printf("版本号: %d\n", blocks[i].Version)
fmt.Printf("前区块哈希值: %x\n", blocks[i].PrevHash)
fmt.Printf("梅克尔根: %x\n", blocks[i].Merkel)
fmt.Printf("时间戳: %s\n", timeFormat)
fmt.Printf("难度值: %d\n", blocks[i].Difficulty)
fmt.Printf("随机数 : %d\n", blocks[i].Nonce)
fmt.Printf("当前区块哈希值: %x\n", blocks[i].Hash)
fmt.Printf("区块数据 :%s\n", blocks[i].Transaction[0].TXInputs[0].PubKey)
blockHeight++
}
}
//找到指定地址所有的UTXO,即未消费的
func (bc *BlockChain)FindUTXOs(pubKeyHash []byte) []TXOuput {
var UTXO []TXOuput
//定义一个map来保存消费过的output, key为这个消费过的output的交易id,value值为这个交易中索引的数组
spentOutput := make(map[string][]int64)
// 遍历input,找到自己花费过的utxo的集合
//创建迭代器
it := bc.NewIterator()
//遍历区块
for {
block := it.Next()
//遍历区块中的每笔交易
for _, transaction := range block.Transaction{
//遍历output,添加该地址有关的到返回的utxo中
//这里的i为outputs的下标
OUTPUT:
for i, output := range transaction.TXOuputs{
//过滤,已经消费过的output不用添加进去
if spentOutput[string(transaction.TXID)] != nil{
for _, j := range spentOutput[string(transaction.TXID)]{
/*
//找错误, continue只能跳出最近的for循环
fmt.Println(j)
fmt.Println(i)
var a bool
a = int64(i) == j
fmt.Println(a)
*/
//标识过下标和循环中的下标对比, 过滤到已经消费的output
if int64(i) == j{
continue OUTPUT
}
}
}
if bytes.Equal(output.PubKeyHash,pubKeyHash){
//fmt.Println(output)
UTXO = append(UTXO, output)
}
}
//挖矿交易没有input
if !transaction.IsCoinbase(){
//遍历input,找到花费过的utxo的集合
for _, input := range transaction.TXInputs{
if bytes.Equal(HashPubKey(input.PubKey),pubKeyHash){
//key为签名的那个交易
//indexArray := spentOutput[string(input.TXID)]
// //这个数组为签名的那个交易中 已经消费过的output的index值
//indexArray = append(indexArray, input.Index)
spentOutput[string(input.TXID)] = append(spentOutput[string(input.TXID)], input.Index)
//fmt.Println("===========")
//fmt.Printf("%x\n", input.TXID)
//fmt.Println(spentOutput[string(input.TXID)])
//fmt.Println("===========")
}
}
}
}
if len(block.PrevHash) == 0 {
break
}
}
return UTXO
}
//找到指定地址所有的UTXO,即未消费的,优化上面函数
//func (bc *BlockChain)FindUTXOs(pubKeyHash []byte) []TXOuput {
// var UTXO []TXOuput
// txs := bc.FindUTXOsBased(pubKeyHash)
// for _, tx := range txs{
// for _, output := range tx.TXOuputs{
// if bytes.Equal(pubKeyHash,output.PubKeyHash){
// UTXO = append(UTXO, output)
// }
// }
// }
// return UTXO
//}
//FindNeedUTXOs 根据需求找到合理的utxo集合返回,格式 map[string][]in64 即map[Transaction.TXID] {合适的output的index}
func (bc *BlockChain)FindNeedUTXOs(senderPubKeyHash []byte, amount float64) (map[string][]int64, float64){
//合理utxo集合
utxos := make(map[string][]int64)
//找到钱的总数
var cacl float64
//=================================
//定义一个map来保存消费过的output, key为这个消费过的output的交易id,value值为这个交易中索引的数组
spentOutput := make(map[string][]int64)
// 遍历input,找到自己花费过的utxo的集合
//创建迭代器
it := bc.NewIterator()
//遍历区块
for {
block := it.Next()
//遍历区块中的每笔交易
for _, transaction := range block.Transaction{
//遍历output,添加该地址有关的到返回的utxo中
//这里的i为outputs的下标
OUTPUT:
for i, output := range transaction.TXOuputs{
//过滤,已经消费过的output不用添加进去
if spentOutput[string(transaction.TXID)] != nil{
for _, j := range spentOutput[string(transaction.TXID)]{
//标识过下标和循环中的下标对比, 过滤到已经消费的output
if int64(i) == j{
continue OUTPUT
}
}
}
if bytes.Equal(senderPubKeyHash, output.PubKeyHash){
//将utxo加进来,统计总额,比较是否是否满足转账需求
// 满足则退出并返回
//fmt.Println(output)
if cacl < amount {
//统计金额
cacl += output.Value
//将对应交易号及output的index添加进map
//array := utxos[string(transaction.TXID)]
//array = append(array, int64(i))
utxos[string(transaction.TXID)] = append(utxos[string(transaction.TXID)], int64(i))
if cacl >= amount{
fmt.Printf("找到满足的金额%f\n", cacl)
return utxos, cacl
}
}
}
}
//挖矿交易没有input
if !transaction.IsCoinbase(){
//遍历input,找到花费过的utxo的集合
for _, input := range transaction.TXInputs{
if bytes.Equal(HashPubKey(input.PubKey),senderPubKeyHash){
/*
//key为签名的那个交易
indexArray := spentOutput[string(input.TXID)]
//这个数组为签名的那个交易中 已经消费过的output的index值
indexArray = append(indexArray, input.Index)
*/
spentOutput[string(input.TXID)] = append(spentOutput[string(input.TXID)], input.Index)
}
}
}
}
if len(block.PrevHash) == 0 {
//fmt.Println("遍历结束")
break
}
}
//=================================
return utxos, cacl
}
//func (bc *BlockChain)FindNeedUTXOs(senderPubKeyHash []byte, amount float64) (map[string][]int64, float64){
// //合理utxo集合
// utxos := make(map[string][]int64)
// //找到钱的总数
// var cacl float64
//
// txs := bc.FindUTXOsBased(senderPubKeyHash)
// for _, tx := range txs{
// for i, output := range tx.TXOuputs{
// //if from == output.PubKeyHash{
// //两个byte数组相比
// if bytes.Equal(senderPubKeyHash, output.PubKeyHash){
// //将utxo加进来,统计总额,比较是否是否满足转账需求
// // 满足则退出并返回
// if cacl < amount {
// //统计金额
// cacl += output.Value
// //将对应交易号及output的index添加进map
// //array := utxos[string(transaction.TXID)]
// //array = append(array, int64(i))
// utxos[string(tx.TXID)] = append(utxos[string(tx.TXID)], int64(i))
// if cacl >= amount {
// fmt.Printf("找到满足的金额%f\n", cacl)
// return utxos, cacl
// }
// }
// }
// }
// }
// return utxos, cacl
//}
////提炼公共基础函数
////有问题,因为该链上每个区块只有一个交易,所有才能用,如果一个连上有多个交易则可能出错!
//func (bc *BlockChain)FindUTXOsBased(senderPubKeyHash []byte) []*Transaction {
// //var UTXO []TXOuput
// var txs []*Transaction
// //定义一个map来保存消费过的output, key为这个消费过的output的交易id,value值为这个交易中索引的数组
// spentOutput := make(map[string][]int64)
//
//
// // 遍历input,找到自己花费过的utxo的集合
//
// //创建迭代器
// it := bc.NewIterator()
//
// //遍历区块
// for {
// block := it.Next()
//
// //遍历区块中的每笔交易
// for _, transaction := range block.Transaction{
// //遍历output,添加该地址有关的到返回的utxo中
// //这里的i为outputs的下标
// OUTPUT:
// for i, output := range transaction.TXOuputs{
// //过滤,已经消费过的output不用添加进去
// if spentOutput[string(transaction.TXID)] != nil{
// for _, j := range spentOutput[string(transaction.TXID)]{
// /*
// //找错误, continue只能跳出最近的for循环
// fmt.Println(j)
// fmt.Println(i)
// var a bool
// a = int64(i) == j
// fmt.Println(a)
// */
// //标识过下标和循环中的下标对比, 过滤到已经消费的output
// if int64(i) == j{
// continue OUTPUT
// }
// }
// }
//
// //if output.PuKKeyHash == address{
// if bytes.Equal(senderPubKeyHash, output.PubKeyHash){
// //fmt.Println(output)
// txs = append(txs, transaction)
// }
// }
// //挖矿交易没有input
// if !transaction.IsCoinbase(){
// //遍历input,找到花费过的utxo的集合
// for _, input := range transaction.TXInputs{
// //判断前面的output是否被用过
// if bytes.Equal(HashPubKey(input.PubKey),senderPubKeyHash){
// //key为签名的那个交易
// //indexArray := spentOutput[string(input.TXID)]
// // //这个数组为签名的那个交易中 已经消费过的output的index值
// //indexArray = append(indexArray, input.Index)
// spentOutput[string(input.TXID)] = append(spentOutput[string(input.TXID)], input.Index)
// //fmt.Println("===========")
// //fmt.Printf("%x\n", input.TXID)
// //fmt.Println(spentOutput[string(input.TXID)])
// //fmt.Println("===========")
// }
// }
// }
// }
//
// if len(block.PrevHash) == 0 {
// //fmt.Println("遍历结束")
// break
// }
// }
//
//
// return txs
//}
//找到对应id的交易
func (bc *BlockChain)FindTranscationByTXid(id []byte) (Transaction,error) {
//1.遍历区块链
//2.遍历交易
//3.比较交易,找到则退出
//4.找不到则返回nil及err
it := bc.NewIterator()
for {
block := it.Next()
for _, tx := range block.Transaction{
if bytes.Equal(tx.TXID, id){
return *tx, nil
}
}
if len(block.PrevHash) == 0{
break
}
}
return Transaction{}, errors.New("无效的交易ID!")
}
//签名
func (bc *BlockChain)SignTranscation(tx *Transaction,private *ecdsa.PrivateKey) {
prevTXs := make(map[string]Transaction)
//找到所有引用的交易
for _, input := range tx.TXInputs{
txs, err := bc.FindTranscationByTXid(input.TXID)
if err != nil {
log.Panic(err)
}
//fmt.Printf("%x\n",txs.TXID)
prevTXs[string(input.TXID)] = txs
}
tx.Sign(private, prevTXs)
}
//校验
func (bc *BlockChain)VerifyTransaction(tx *Transaction)bool{
//fmt.Println(tx.IsCoinbase())
if tx.IsCoinbase(){
return true
}
prevTXs := make(map[string]Transaction)
//找到所有引用的交易
for _, input := range tx.TXInputs{
txs, err := bc.FindTranscationByTXid(input.TXID)
if err != nil {
log.Panic(err)
}
prevTXs[string(input.TXID)] = txs
}
return tx.Verify(prevTXs)
}
| NewBlockChain | identifier_name |
blockchain.go | package main
import (
"bytes"
"crypto/ecdsa"
"errors"
"fmt"
"github.com/boltdb/bolt"
"log"
"time"
)
const BlockChainDB = "blockchain.db"
const BlockBucket = "blockbucket"
// BlockClain 4.define blockchain struct
type BlockChain struct {
//blocks [] *Block
db *bolt.DB
//storage the last block's hash
tail []byte
}
// NewBlockChain 5. Define a block chain
func NewBlockChain(address string) *BlockChain {
//return &BlockClain{
// []*Block{genesisBlock},
//}
var lastHash []byte
db, err := bolt.Open(BlockChainDB, 0600, nil)
//defer db.Close()
if err != nil {
log.Fatal("create database failed")
}
err = db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(BlockBucket))
if bucket == nil{
bucket,err = tx.CreateBucket([]byte(BlockBucket))
if err != nil{
log.Fatal("create bucket failed")
}
//Create genesis block
genesisBlock := GenesisBlock(address)
//Write message into database
bucket.Put(genesisBlock.Hash,genesisBlock.Serialize())
bucket.Put([]byte("LastHashKey"),genesisBlock.Hash)
lastHash = genesisBlock.Hash
}else{
lastHash = bucket.Get([]byte("LastHashKey"))
}
return nil
})
return &BlockChain{db,lastHash}
}
// GenesisBlock create a genesisiBlock
func GenesisBlock(address string) *Block {
coinBase := NewCoinbaseTX(address, "创世块")
coinBases := []*Transaction{coinBase}
return NewBlock(coinBases, []byte{})
}
// AddBlock 6.add a new block
func (bc *BlockChain)AddBlock(txs []*Transaction) {
for _, tx := range txs{
if !bc.VerifyTransaction(tx) {
fmt.Println("校验交易失败")
return
}
}
//found the last block's hash
lastHash := bc.tail
db := bc.db
//create a new block
//send the new block into the blockchain
db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(BlockBucket))
if bucket == nil{
log.Fatal("no bucket")
}else{
//Write message into database
block := NewBlock(txs, lastHash)
bucket.Put(block.Hash,block.Serialize())
bucket.Put([]byte("LastHashKey"),block.Hash)
//update the last hash
bc.tail = block.Hash
}
return nil
})
}
//正向打印区块链
func (bc *BlockChain) Printchain() {
bcI := bc.NewIterator()
var blockHeight int
var blocks []*Block
for {
block := bcI.Next()
blocks = append(blocks, block)
if block.PrevHash == nil {
break
}
}
for i := len(blocks) - 1; i > -1; i--{
timeFormat := time.Unix(int64(blocks[i].TimeStamp), 0).Format("2006-01-02 15:04:05")
fmt.Printf("=============== 区块高度: %d ==============\n", blockHeight)
fmt.Printf("版本号: %d\n", blocks[i].Version)
fmt.Printf("前区块哈希值: %x\n", blocks[i].PrevHash)
fmt.Printf("梅克尔根: %x\n", blocks[i].Merkel)
fmt.Printf("时间戳: %s\n", timeFormat)
fmt.Printf("难度值: %d\n", blocks[i].Difficulty)
fmt.Printf("随机数 : %d\n", blocks[i].Nonce)
fmt.Printf("当前区块哈希值: %x\n", blocks[i].Hash)
fmt.Printf("区块数据 :%s\n", blocks[i].Transaction[0].TXInputs[0].PubKey)
blockHeight++
}
}
//找到指定地址所有的UTXO,即未消费的
func (bc *BlockChain)FindUTXOs(pubKeyHash []byte) []TXOuput {
var UTXO []TXOuput
//定义一个map来保存消费过的output, key为这个消费过的output的交易id,value值为这个交易中索引的数组
spentOutput := make(map[string][]int64)
// 遍历input,找到自己花费过的utxo的集合
//创建迭代器
it := bc.NewIterator()
//遍历区块
for {
block := it.Next()
//遍历区块中的每笔交易
for _, transaction := range block.Transaction{
//遍历output,添加该地址有关的到返回的utxo中
//这里的i为outputs的下标
OUTPUT:
for i, output := range transaction.TXOuputs{
//过滤,已经消费过的output不用添加进去
if spentOutput[string(transaction.TXID)] != nil{
for _, j := range spentOutput[string(transaction.TXID)]{
/*
| unc (bc *BlockChain)FindNeedUTXOs(senderPubKeyHash []byte, amount float64) (map[string][]int64, float64){
//合理utxo集合
utxos := make(map[string][]int64)
//找到钱的总数
var cacl float64
//=================================
//定义一个map来保存消费过的output, key为这个消费过的output的交易id,value值为这个交易中索引的数组
spentOutput := make(map[string][]int64)
// 遍历input,找到自己花费过的utxo的集合
//创建迭代器
it := bc.NewIterator()
//遍历区块
for {
block := it.Next()
//遍历区块中的每笔交易
for _, transaction := range block.Transaction{
//遍历output,添加该地址有关的到返回的utxo中
//这里的i为outputs的下标
OUTPUT:
for i, output := range transaction.TXOuputs{
//过滤,已经消费过的output不用添加进去
if spentOutput[string(transaction.TXID)] != nil{
for _, j := range spentOutput[string(transaction.TXID)]{
//标识过下标和循环中的下标对比, 过滤到已经消费的output
if int64(i) == j{
continue OUTPUT
}
}
}
if bytes.Equal(senderPubKeyHash, output.PubKeyHash){
//将utxo加进来,统计总额,比较是否是否满足转账需求
// 满足则退出并返回
//fmt.Println(output)
if cacl < amount {
//统计金额
cacl += output.Value
//将对应交易号及output的index添加进map
//array := utxos[string(transaction.TXID)]
//array = append(array, int64(i))
utxos[string(transaction.TXID)] = append(utxos[string(transaction.TXID)], int64(i))
if cacl >= amount{
fmt.Printf("找到满足的金额%f\n", cacl)
return utxos, cacl
}
}
}
}
//挖矿交易没有input
if !transaction.IsCoinbase(){
//遍历input,找到花费过的utxo的集合
for _, input := range transaction.TXInputs{
if bytes.Equal(HashPubKey(input.PubKey),senderPubKeyHash){
/*
//key为签名的那个交易
indexArray := spentOutput[string(input.TXID)]
//这个数组为签名的那个交易中 已经消费过的output的index值
indexArray = append(indexArray, input.Index)
*/
spentOutput[string(input.TXID)] = append(spentOutput[string(input.TXID)], input.Index)
}
}
}
}
if len(block.PrevHash) == 0 {
//fmt.Println("遍历结束")
break
}
}
//=================================
return utxos, cacl
}
//func (bc *BlockChain)FindNeedUTXOs(senderPubKeyHash []byte, amount float64) (map[string][]int64, float64){
// //合理utxo集合
// utxos := make(map[string][]int64)
// //找到钱的总数
// var cacl float64
//
// txs := bc.FindUTXOsBased(senderPubKeyHash)
// for _, tx := range txs{
// for i, output := range tx.TXOuputs{
// //if from == output.PubKeyHash{
// //两个byte数组相比
// if bytes.Equal(senderPubKeyHash, output.PubKeyHash){
// //将utxo加进来,统计总额,比较是否是否满足转账需求
// // 满足则退出并返回
// if cacl < amount {
// //统计金额
// cacl += output.Value
// //将对应交易号及output的index添加进map
// //array := utxos[string(transaction.TXID)]
// //array = append(array, int64(i))
// utxos[string(tx.TXID)] = append(utxos[string(tx.TXID)], int64(i))
// if cacl >= amount {
// fmt.Printf("找到满足的金额%f\n", cacl)
// return utxos, cacl
// }
// }
// }
// }
// }
// return utxos, cacl
//}
////提炼公共基础函数
////有问题,因为该链上每个区块只有一个交易,所有才能用,如果一个连上有多个交易则可能出错!
//func (bc *BlockChain)FindUTXOsBased(senderPubKeyHash []byte) []*Transaction {
// //var UTXO []TXOuput
// var txs []*Transaction
// //定义一个map来保存消费过的output, key为这个消费过的output的交易id,value值为这个交易中索引的数组
// spentOutput := make(map[string][]int64)
//
//
// // 遍历input,找到自己花费过的utxo的集合
//
// //创建迭代器
// it := bc.NewIterator()
//
// //遍历区块
// for {
// block := it.Next()
//
// //遍历区块中的每笔交易
// for _, transaction := range block.Transaction{
// //遍历output,添加该地址有关的到返回的utxo中
// //这里的i为outputs的下标
// OUTPUT:
// for i, output := range transaction.TXOuputs{
// //过滤,已经消费过的output不用添加进去
// if spentOutput[string(transaction.TXID)] != nil{
// for _, j := range spentOutput[string(transaction.TXID)]{
// /*
// //找错误, continue只能跳出最近的for循环
// fmt.Println(j)
// fmt.Println(i)
// var a bool
// a = int64(i) == j
// fmt.Println(a)
// */
// //标识过下标和循环中的下标对比, 过滤到已经消费的output
// if int64(i) == j{
// continue OUTPUT
// }
// }
// }
//
// //if output.PuKKeyHash == address{
// if bytes.Equal(senderPubKeyHash, output.PubKeyHash){
// //fmt.Println(output)
// txs = append(txs, transaction)
// }
// }
// //挖矿交易没有input
// if !transaction.IsCoinbase(){
// //遍历input,找到花费过的utxo的集合
// for _, input := range transaction.TXInputs{
// //判断前面的output是否被用过
// if bytes.Equal(HashPubKey(input.PubKey),senderPubKeyHash){
// //key为签名的那个交易
// //indexArray := spentOutput[string(input.TXID)]
// // //这个数组为签名的那个交易中 已经消费过的output的index值
// //indexArray = append(indexArray, input.Index)
// spentOutput[string(input.TXID)] = append(spentOutput[string(input.TXID)], input.Index)
// //fmt.Println("===========")
// //fmt.Printf("%x\n", input.TXID)
// //fmt.Println(spentOutput[string(input.TXID)])
// //fmt.Println("===========")
// }
// }
// }
// }
//
// if len(block.PrevHash) == 0 {
// //fmt.Println("遍历结束")
// break
// }
// }
//
//
// return txs
//}
//找到对应id的交易
func (bc *BlockChain)FindTranscationByTXid(id []byte) (Transaction,error) {
//1.遍历区块链
//2.遍历交易
//3.比较交易,找到则退出
//4.找不到则返回nil及err
it := bc.NewIterator()
for {
block := it.Next()
for _, tx := range block.Transaction{
if bytes.Equal(tx.TXID, id){
return *tx, nil
}
}
if len(block.PrevHash) == 0{
break
}
}
return Transaction{}, errors.New("无效的交易ID!")
}
//签名
func (bc *BlockChain)SignTranscation(tx *Transaction,private *ecdsa.PrivateKey) {
prevTXs := make(map[string]Transaction)
//找到所有引用的交易
for _, input := range tx.TXInputs{
txs, err := bc.FindTranscationByTXid(input.TXID)
if err != nil {
log.Panic(err)
}
//fmt.Printf("%x\n",txs.TXID)
prevTXs[string(input.TXID)] = txs
}
tx.Sign(private, prevTXs)
}
//校验
func (bc *BlockChain)VerifyTransaction(tx *Transaction)bool{
//fmt.Println(tx.IsCoinbase())
if tx.IsCoinbase(){
return true
}
prevTXs := make(map[string]Transaction)
//找到所有引用的交易
for _, input := range tx.TXInputs{
txs, err := bc.FindTranscationByTXid(input.TXID)
if err != nil {
log.Panic(err)
}
prevTXs[string(input.TXID)] = txs
}
return tx.Verify(prevTXs)
}
| //找错误, continue只能跳出最近的for循环
fmt.Println(j)
fmt.Println(i)
var a bool
a = int64(i) == j
fmt.Println(a)
*/
//标识过下标和循环中的下标对比, 过滤到已经消费的output
if int64(i) == j{
continue OUTPUT
}
}
}
if bytes.Equal(output.PubKeyHash,pubKeyHash){
//fmt.Println(output)
UTXO = append(UTXO, output)
}
}
//挖矿交易没有input
if !transaction.IsCoinbase(){
//遍历input,找到花费过的utxo的集合
for _, input := range transaction.TXInputs{
if bytes.Equal(HashPubKey(input.PubKey),pubKeyHash){
//key为签名的那个交易
//indexArray := spentOutput[string(input.TXID)]
// //这个数组为签名的那个交易中 已经消费过的output的index值
//indexArray = append(indexArray, input.Index)
spentOutput[string(input.TXID)] = append(spentOutput[string(input.TXID)], input.Index)
//fmt.Println("===========")
//fmt.Printf("%x\n", input.TXID)
//fmt.Println(spentOutput[string(input.TXID)])
//fmt.Println("===========")
}
}
}
}
if len(block.PrevHash) == 0 {
break
}
}
return UTXO
}
//找到指定地址所有的UTXO,即未消费的,优化上面函数
//func (bc *BlockChain)FindUTXOs(pubKeyHash []byte) []TXOuput {
// var UTXO []TXOuput
// txs := bc.FindUTXOsBased(pubKeyHash)
// for _, tx := range txs{
// for _, output := range tx.TXOuputs{
// if bytes.Equal(pubKeyHash,output.PubKeyHash){
// UTXO = append(UTXO, output)
// }
// }
// }
// return UTXO
//}
//FindNeedUTXOs 根据需求找到合理的utxo集合返回,格式 map[string][]in64 即map[Transaction.TXID] {合适的output的index}
f | conditional_block |
blockchain.go | package main
import (
"bytes"
"crypto/ecdsa"
"errors"
"fmt"
"github.com/boltdb/bolt"
"log"
"time"
)
const BlockChainDB = "blockchain.db"
const BlockBucket = "blockbucket"
// BlockClain 4.define blockchain struct
type BlockChain struct {
//blocks [] *Block
db *bolt.DB
//storage the last block's hash
tail []byte
}
// NewBlockChain 5. Define a block chain
func NewBlockChain(address string) *BlockChain {
//return &BlockClain{
// []*Block{genesisBlock},
//}
var lastHash []byte
db, err := bolt.Open(BlockChainDB, 0600, nil)
//defer db.Close()
if err != nil {
log.Fatal("create database failed")
}
err = db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(BlockBucket))
if bucket == nil{
bucket,err = tx.CreateBucket([]byte(BlockBucket))
if err != nil{
log.Fatal("create bucket failed")
}
//Create genesis block
genesisBlock := GenesisBlock(address)
//Write message into database
bucket.Put(genesisBlock.Hash,genesisBlock.Serialize())
bucket.Put([]byte("LastHashKey"),genesisBlock.Hash)
lastHash = genesisBlock.Hash
}else{
lastHash = bucket.Get([]byte("LastHashKey"))
}
return nil
})
return &BlockChain{db,lastHash}
}
// GenesisBlock create a genesisiBlock
func GenesisBlock(address string) *Block | ddBlock 6.add a new block
func (bc *BlockChain)AddBlock(txs []*Transaction) {
for _, tx := range txs{
if !bc.VerifyTransaction(tx) {
fmt.Println("校验交易失败")
return
}
}
//found the last block's hash
lastHash := bc.tail
db := bc.db
//create a new block
//send the new block into the blockchain
db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(BlockBucket))
if bucket == nil{
log.Fatal("no bucket")
}else{
//Write message into database
block := NewBlock(txs, lastHash)
bucket.Put(block.Hash,block.Serialize())
bucket.Put([]byte("LastHashKey"),block.Hash)
//update the last hash
bc.tail = block.Hash
}
return nil
})
}
//正向打印区块链
func (bc *BlockChain) Printchain() {
bcI := bc.NewIterator()
var blockHeight int
var blocks []*Block
for {
block := bcI.Next()
blocks = append(blocks, block)
if block.PrevHash == nil {
break
}
}
for i := len(blocks) - 1; i > -1; i--{
timeFormat := time.Unix(int64(blocks[i].TimeStamp), 0).Format("2006-01-02 15:04:05")
fmt.Printf("=============== 区块高度: %d ==============\n", blockHeight)
fmt.Printf("版本号: %d\n", blocks[i].Version)
fmt.Printf("前区块哈希值: %x\n", blocks[i].PrevHash)
fmt.Printf("梅克尔根: %x\n", blocks[i].Merkel)
fmt.Printf("时间戳: %s\n", timeFormat)
fmt.Printf("难度值: %d\n", blocks[i].Difficulty)
fmt.Printf("随机数 : %d\n", blocks[i].Nonce)
fmt.Printf("当前区块哈希值: %x\n", blocks[i].Hash)
fmt.Printf("区块数据 :%s\n", blocks[i].Transaction[0].TXInputs[0].PubKey)
blockHeight++
}
}
//找到指定地址所有的UTXO,即未消费的
func (bc *BlockChain)FindUTXOs(pubKeyHash []byte) []TXOuput {
var UTXO []TXOuput
//定义一个map来保存消费过的output, key为这个消费过的output的交易id,value值为这个交易中索引的数组
spentOutput := make(map[string][]int64)
// 遍历input,找到自己花费过的utxo的集合
//创建迭代器
it := bc.NewIterator()
//遍历区块
for {
block := it.Next()
//遍历区块中的每笔交易
for _, transaction := range block.Transaction{
//遍历output,添加该地址有关的到返回的utxo中
//这里的i为outputs的下标
OUTPUT:
for i, output := range transaction.TXOuputs{
//过滤,已经消费过的output不用添加进去
if spentOutput[string(transaction.TXID)] != nil{
for _, j := range spentOutput[string(transaction.TXID)]{
/*
//找错误, continue只能跳出最近的for循环
fmt.Println(j)
fmt.Println(i)
var a bool
a = int64(i) == j
fmt.Println(a)
*/
//标识过下标和循环中的下标对比, 过滤到已经消费的output
if int64(i) == j{
continue OUTPUT
}
}
}
if bytes.Equal(output.PubKeyHash,pubKeyHash){
//fmt.Println(output)
UTXO = append(UTXO, output)
}
}
//挖矿交易没有input
if !transaction.IsCoinbase(){
//遍历input,找到花费过的utxo的集合
for _, input := range transaction.TXInputs{
if bytes.Equal(HashPubKey(input.PubKey),pubKeyHash){
//key为签名的那个交易
//indexArray := spentOutput[string(input.TXID)]
// //这个数组为签名的那个交易中 已经消费过的output的index值
//indexArray = append(indexArray, input.Index)
spentOutput[string(input.TXID)] = append(spentOutput[string(input.TXID)], input.Index)
//fmt.Println("===========")
//fmt.Printf("%x\n", input.TXID)
//fmt.Println(spentOutput[string(input.TXID)])
//fmt.Println("===========")
}
}
}
}
if len(block.PrevHash) == 0 {
break
}
}
return UTXO
}
//找到指定地址所有的UTXO,即未消费的,优化上面函数
//func (bc *BlockChain)FindUTXOs(pubKeyHash []byte) []TXOuput {
// var UTXO []TXOuput
// txs := bc.FindUTXOsBased(pubKeyHash)
// for _, tx := range txs{
// for _, output := range tx.TXOuputs{
// if bytes.Equal(pubKeyHash,output.PubKeyHash){
// UTXO = append(UTXO, output)
// }
// }
// }
// return UTXO
//}
//FindNeedUTXOs 根据需求找到合理的utxo集合返回,格式 map[string][]in64 即map[Transaction.TXID] {合适的output的index}
func (bc *BlockChain)FindNeedUTXOs(senderPubKeyHash []byte, amount float64) (map[string][]int64, float64){
//合理utxo集合
utxos := make(map[string][]int64)
//找到钱的总数
var cacl float64
//=================================
//定义一个map来保存消费过的output, key为这个消费过的output的交易id,value值为这个交易中索引的数组
spentOutput := make(map[string][]int64)
// 遍历input,找到自己花费过的utxo的集合
//创建迭代器
it := bc.NewIterator()
//遍历区块
for {
block := it.Next()
//遍历区块中的每笔交易
for _, transaction := range block.Transaction{
//遍历output,添加该地址有关的到返回的utxo中
//这里的i为outputs的下标
OUTPUT:
for i, output := range transaction.TXOuputs{
//过滤,已经消费过的output不用添加进去
if spentOutput[string(transaction.TXID)] != nil{
for _, j := range spentOutput[string(transaction.TXID)]{
//标识过下标和循环中的下标对比, 过滤到已经消费的output
if int64(i) == j{
continue OUTPUT
}
}
}
if bytes.Equal(senderPubKeyHash, output.PubKeyHash){
//将utxo加进来,统计总额,比较是否是否满足转账需求
// 满足则退出并返回
//fmt.Println(output)
if cacl < amount {
//统计金额
cacl += output.Value
//将对应交易号及output的index添加进map
//array := utxos[string(transaction.TXID)]
//array = append(array, int64(i))
utxos[string(transaction.TXID)] = append(utxos[string(transaction.TXID)], int64(i))
if cacl >= amount{
fmt.Printf("找到满足的金额%f\n", cacl)
return utxos, cacl
}
}
}
}
//挖矿交易没有input
if !transaction.IsCoinbase(){
//遍历input,找到花费过的utxo的集合
for _, input := range transaction.TXInputs{
if bytes.Equal(HashPubKey(input.PubKey),senderPubKeyHash){
/*
//key为签名的那个交易
indexArray := spentOutput[string(input.TXID)]
//这个数组为签名的那个交易中 已经消费过的output的index值
indexArray = append(indexArray, input.Index)
*/
spentOutput[string(input.TXID)] = append(spentOutput[string(input.TXID)], input.Index)
}
}
}
}
if len(block.PrevHash) == 0 {
//fmt.Println("遍历结束")
break
}
}
//=================================
return utxos, cacl
}
//func (bc *BlockChain)FindNeedUTXOs(senderPubKeyHash []byte, amount float64) (map[string][]int64, float64){
// //合理utxo集合
// utxos := make(map[string][]int64)
// //找到钱的总数
// var cacl float64
//
// txs := bc.FindUTXOsBased(senderPubKeyHash)
// for _, tx := range txs{
// for i, output := range tx.TXOuputs{
// //if from == output.PubKeyHash{
// //两个byte数组相比
// if bytes.Equal(senderPubKeyHash, output.PubKeyHash){
// //将utxo加进来,统计总额,比较是否是否满足转账需求
// // 满足则退出并返回
// if cacl < amount {
// //统计金额
// cacl += output.Value
// //将对应交易号及output的index添加进map
// //array := utxos[string(transaction.TXID)]
// //array = append(array, int64(i))
// utxos[string(tx.TXID)] = append(utxos[string(tx.TXID)], int64(i))
// if cacl >= amount {
// fmt.Printf("找到满足的金额%f\n", cacl)
// return utxos, cacl
// }
// }
// }
// }
// }
// return utxos, cacl
//}
////提炼公共基础函数
////有问题,因为该链上每个区块只有一个交易,所有才能用,如果一个连上有多个交易则可能出错!
//func (bc *BlockChain)FindUTXOsBased(senderPubKeyHash []byte) []*Transaction {
// //var UTXO []TXOuput
// var txs []*Transaction
// //定义一个map来保存消费过的output, key为这个消费过的output的交易id,value值为这个交易中索引的数组
// spentOutput := make(map[string][]int64)
//
//
// // 遍历input,找到自己花费过的utxo的集合
//
// //创建迭代器
// it := bc.NewIterator()
//
// //遍历区块
// for {
// block := it.Next()
//
// //遍历区块中的每笔交易
// for _, transaction := range block.Transaction{
// //遍历output,添加该地址有关的到返回的utxo中
// //这里的i为outputs的下标
// OUTPUT:
// for i, output := range transaction.TXOuputs{
// //过滤,已经消费过的output不用添加进去
// if spentOutput[string(transaction.TXID)] != nil{
// for _, j := range spentOutput[string(transaction.TXID)]{
// /*
// //找错误, continue只能跳出最近的for循环
// fmt.Println(j)
// fmt.Println(i)
// var a bool
// a = int64(i) == j
// fmt.Println(a)
// */
// //标识过下标和循环中的下标对比, 过滤到已经消费的output
// if int64(i) == j{
// continue OUTPUT
// }
// }
// }
//
// //if output.PuKKeyHash == address{
// if bytes.Equal(senderPubKeyHash, output.PubKeyHash){
// //fmt.Println(output)
// txs = append(txs, transaction)
// }
// }
// //挖矿交易没有input
// if !transaction.IsCoinbase(){
// //遍历input,找到花费过的utxo的集合
// for _, input := range transaction.TXInputs{
// //判断前面的output是否被用过
// if bytes.Equal(HashPubKey(input.PubKey),senderPubKeyHash){
// //key为签名的那个交易
// //indexArray := spentOutput[string(input.TXID)]
// // //这个数组为签名的那个交易中 已经消费过的output的index值
// //indexArray = append(indexArray, input.Index)
// spentOutput[string(input.TXID)] = append(spentOutput[string(input.TXID)], input.Index)
// //fmt.Println("===========")
// //fmt.Printf("%x\n", input.TXID)
// //fmt.Println(spentOutput[string(input.TXID)])
// //fmt.Println("===========")
// }
// }
// }
// }
//
// if len(block.PrevHash) == 0 {
// //fmt.Println("遍历结束")
// break
// }
// }
//
//
// return txs
//}
//找到对应id的交易
func (bc *BlockChain)FindTranscationByTXid(id []byte) (Transaction,error) {
//1.遍历区块链
//2.遍历交易
//3.比较交易,找到则退出
//4.找不到则返回nil及err
it := bc.NewIterator()
for {
block := it.Next()
for _, tx := range block.Transaction{
if bytes.Equal(tx.TXID, id){
return *tx, nil
}
}
if len(block.PrevHash) == 0{
break
}
}
return Transaction{}, errors.New("无效的交易ID!")
}
//签名
func (bc *BlockChain)SignTranscation(tx *Transaction,private *ecdsa.PrivateKey) {
prevTXs := make(map[string]Transaction)
//找到所有引用的交易
for _, input := range tx.TXInputs{
txs, err := bc.FindTranscationByTXid(input.TXID)
if err != nil {
log.Panic(err)
}
//fmt.Printf("%x\n",txs.TXID)
prevTXs[string(input.TXID)] = txs
}
tx.Sign(private, prevTXs)
}
//校验
func (bc *BlockChain)VerifyTransaction(tx *Transaction)bool{
//fmt.Println(tx.IsCoinbase())
if tx.IsCoinbase(){
return true
}
prevTXs := make(map[string]Transaction)
//找到所有引用的交易
for _, input := range tx.TXInputs{
txs, err := bc.FindTranscationByTXid(input.TXID)
if err != nil {
log.Panic(err)
}
prevTXs[string(input.TXID)] = txs
}
return tx.Verify(prevTXs)
}
| {
coinBase := NewCoinbaseTX(address, "创世块")
coinBases := []*Transaction{coinBase}
return NewBlock(coinBases, []byte{})
}
// A | identifier_body |
blockchain.go | package main
import (
"bytes"
"crypto/ecdsa"
"errors"
"fmt"
"github.com/boltdb/bolt"
"log"
"time"
)
const BlockChainDB = "blockchain.db"
const BlockBucket = "blockbucket"
// BlockClain 4.define blockchain struct
type BlockChain struct {
//blocks [] *Block
db *bolt.DB
//storage the last block's hash
tail []byte
}
// NewBlockChain 5. Define a block chain
func NewBlockChain(address string) *BlockChain {
//return &BlockClain{
// []*Block{genesisBlock},
//}
var lastHash []byte
db, err := bolt.Open(BlockChainDB, 0600, nil)
//defer db.Close()
if err != nil {
log.Fatal("create database failed")
}
err = db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(BlockBucket))
if bucket == nil{
bucket,err = tx.CreateBucket([]byte(BlockBucket))
if err != nil{
log.Fatal("create bucket failed")
}
//Create genesis block
genesisBlock := GenesisBlock(address)
//Write message into database
bucket.Put(genesisBlock.Hash,genesisBlock.Serialize())
bucket.Put([]byte("LastHashKey"),genesisBlock.Hash)
lastHash = genesisBlock.Hash
}else{
lastHash = bucket.Get([]byte("LastHashKey"))
}
return nil
})
return &BlockChain{db,lastHash}
}
// GenesisBlock create a genesisiBlock
func GenesisBlock(address string) *Block {
coinBase := NewCoinbaseTX(address, "创世块")
coinBases := []*Transaction{coinBase}
return NewBlock(coinBases, []byte{})
}
// AddBlock 6.add a new block
func (bc *BlockChain)AddBlock(txs []*Transaction) {
for _, tx := range txs{
if !bc.VerifyTransaction(tx) {
fmt.Println("校验交易失败")
return
}
}
//found the last block's hash
lastHash := bc.tail
db := bc.db
//create a new block
//send the new block into the blockchain
db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(BlockBucket))
if bucket == nil{
log.Fatal("no bucket")
}else{
//Write message into database
block := NewBlock(txs, lastHash)
bucket.Put(block.Hash,block.Serialize())
bucket.Put([]byte("LastHashKey"),block.Hash)
//update the last hash
bc.tail = block.Hash
}
return nil
})
}
//正向打印区块链
func (bc *BlockChain) Printchain() {
bcI := bc.NewIterator()
var blockHeight int
var blocks []*Block
for {
block := bcI.Next()
blocks = append(blocks, block)
if block.PrevHash == nil {
break
}
}
for i := len(blocks) - 1; i > -1; i--{
timeFormat := time.Unix(int64(blocks[i].TimeStamp), 0).Format("2006-01-02 15:04:05")
fmt.Printf("=============== 区块高度: %d ==============\n", blockHeight)
fmt.Printf("版本号: %d\n", blocks[i].Version)
fmt.Printf("前区块哈希值: %x\n", blocks[i].PrevHash)
fmt.Printf("梅克尔根: %x\n", blocks[i].Merkel)
fmt.Printf("时间戳: %s\n", timeFormat)
fmt.Printf("难度值: %d\n", blocks[i].Difficulty)
fmt.Printf("随机数 : %d\n", blocks[i].Nonce)
fmt.Printf("当前区块哈希值: %x\n", blocks[i].Hash)
fmt.Printf("区块数据 :%s\n", blocks[i].Transaction[0].TXInputs[0].PubKey)
blockHeight++
}
}
//找到指定地址所有的UTXO,即未消费的
func (bc *BlockChain)FindUTXOs(pubKeyHash []byte) []TXOuput {
var UTXO []TXOuput
//定义一个map来保存消费过的output, key为这个消费过的output的交易id,value值为这个交易中索引的数组
spentOutput := make(map[string][]int64)
// 遍历input,找到自己花费过的utxo的集合
//创建迭代器
it := bc.NewIterator()
//遍历区块
for {
block := it.Next()
//遍历区块中的每笔交易
for _, transaction := range block.Transaction{
//遍历output,添加该地址有关的到返回的utxo中
//这里的i为outputs的下标
OUTPUT:
for i, output := range transaction.TXOuputs{
//过滤,已经消费过的output不用添加进去
if spentOutput[string(transaction.TXID)] != nil{
for _, j := range spentOutput[string(transaction.TXID)]{
/*
//找错误, continue只能跳出最近的for循环
fmt.Println(j)
fmt.Println(i)
var a bool
a = int64(i) == j
fmt.Println(a)
*/
//标识过下标和循环中的下标对比, 过滤到已经消费的output
if int64(i) == j{
continue OUTPUT
}
}
}
if bytes.Equal(output.PubKeyHash,pubKeyHash){
//fmt.Println(output)
UTXO = append(UTXO, output)
}
}
//挖矿交易没有input
if !transaction.IsCoinbase(){
//遍历input,找到花费过的utxo的集合
for _, input := range transaction.TXInputs{
if bytes.Equal(HashPubKey(input.PubKey),pubKeyHash){
//key为签名的那个交易
//indexArray := spentOutput[string(input.TXID)]
// //这个数组为签名的那个交易中 已经消费过的output的index值
//indexArray = append(indexArray, input.Index)
spentOutput[string(input.TXID)] = append(spentOutput[string(input.TXID)], input.Index)
//fmt.Println("===========")
//fmt.Printf("%x\n", input.TXID)
//fmt.Println(spentOutput[string(input.TXID)])
//fmt.Println("===========")
}
}
}
}
if len(block.PrevHash) == 0 {
break | return UTXO
}
//找到指定地址所有的UTXO,即未消费的,优化上面函数
//func (bc *BlockChain)FindUTXOs(pubKeyHash []byte) []TXOuput {
// var UTXO []TXOuput
// txs := bc.FindUTXOsBased(pubKeyHash)
// for _, tx := range txs{
// for _, output := range tx.TXOuputs{
// if bytes.Equal(pubKeyHash,output.PubKeyHash){
// UTXO = append(UTXO, output)
// }
// }
// }
// return UTXO
//}
//FindNeedUTXOs 根据需求找到合理的utxo集合返回,格式 map[string][]in64 即map[Transaction.TXID] {合适的output的index}
func (bc *BlockChain)FindNeedUTXOs(senderPubKeyHash []byte, amount float64) (map[string][]int64, float64){
//合理utxo集合
utxos := make(map[string][]int64)
//找到钱的总数
var cacl float64
//=================================
//定义一个map来保存消费过的output, key为这个消费过的output的交易id,value值为这个交易中索引的数组
spentOutput := make(map[string][]int64)
// 遍历input,找到自己花费过的utxo的集合
//创建迭代器
it := bc.NewIterator()
//遍历区块
for {
block := it.Next()
//遍历区块中的每笔交易
for _, transaction := range block.Transaction{
//遍历output,添加该地址有关的到返回的utxo中
//这里的i为outputs的下标
OUTPUT:
for i, output := range transaction.TXOuputs{
//过滤,已经消费过的output不用添加进去
if spentOutput[string(transaction.TXID)] != nil{
for _, j := range spentOutput[string(transaction.TXID)]{
//标识过下标和循环中的下标对比, 过滤到已经消费的output
if int64(i) == j{
continue OUTPUT
}
}
}
if bytes.Equal(senderPubKeyHash, output.PubKeyHash){
//将utxo加进来,统计总额,比较是否是否满足转账需求
// 满足则退出并返回
//fmt.Println(output)
if cacl < amount {
//统计金额
cacl += output.Value
//将对应交易号及output的index添加进map
//array := utxos[string(transaction.TXID)]
//array = append(array, int64(i))
utxos[string(transaction.TXID)] = append(utxos[string(transaction.TXID)], int64(i))
if cacl >= amount{
fmt.Printf("找到满足的金额%f\n", cacl)
return utxos, cacl
}
}
}
}
//挖矿交易没有input
if !transaction.IsCoinbase(){
//遍历input,找到花费过的utxo的集合
for _, input := range transaction.TXInputs{
if bytes.Equal(HashPubKey(input.PubKey),senderPubKeyHash){
/*
//key为签名的那个交易
indexArray := spentOutput[string(input.TXID)]
//这个数组为签名的那个交易中 已经消费过的output的index值
indexArray = append(indexArray, input.Index)
*/
spentOutput[string(input.TXID)] = append(spentOutput[string(input.TXID)], input.Index)
}
}
}
}
if len(block.PrevHash) == 0 {
//fmt.Println("遍历结束")
break
}
}
//=================================
return utxos, cacl
}
//func (bc *BlockChain)FindNeedUTXOs(senderPubKeyHash []byte, amount float64) (map[string][]int64, float64){
// //合理utxo集合
// utxos := make(map[string][]int64)
// //找到钱的总数
// var cacl float64
//
// txs := bc.FindUTXOsBased(senderPubKeyHash)
// for _, tx := range txs{
// for i, output := range tx.TXOuputs{
// //if from == output.PubKeyHash{
// //两个byte数组相比
// if bytes.Equal(senderPubKeyHash, output.PubKeyHash){
// //将utxo加进来,统计总额,比较是否是否满足转账需求
// // 满足则退出并返回
// if cacl < amount {
// //统计金额
// cacl += output.Value
// //将对应交易号及output的index添加进map
// //array := utxos[string(transaction.TXID)]
// //array = append(array, int64(i))
// utxos[string(tx.TXID)] = append(utxos[string(tx.TXID)], int64(i))
// if cacl >= amount {
// fmt.Printf("找到满足的金额%f\n", cacl)
// return utxos, cacl
// }
// }
// }
// }
// }
// return utxos, cacl
//}
////提炼公共基础函数
////有问题,因为该链上每个区块只有一个交易,所有才能用,如果一个连上有多个交易则可能出错!
//func (bc *BlockChain)FindUTXOsBased(senderPubKeyHash []byte) []*Transaction {
// //var UTXO []TXOuput
// var txs []*Transaction
// //定义一个map来保存消费过的output, key为这个消费过的output的交易id,value值为这个交易中索引的数组
// spentOutput := make(map[string][]int64)
//
//
// // 遍历input,找到自己花费过的utxo的集合
//
// //创建迭代器
// it := bc.NewIterator()
//
// //遍历区块
// for {
// block := it.Next()
//
// //遍历区块中的每笔交易
// for _, transaction := range block.Transaction{
// //遍历output,添加该地址有关的到返回的utxo中
// //这里的i为outputs的下标
// OUTPUT:
// for i, output := range transaction.TXOuputs{
// //过滤,已经消费过的output不用添加进去
// if spentOutput[string(transaction.TXID)] != nil{
// for _, j := range spentOutput[string(transaction.TXID)]{
// /*
// //找错误, continue只能跳出最近的for循环
// fmt.Println(j)
// fmt.Println(i)
// var a bool
// a = int64(i) == j
// fmt.Println(a)
// */
// //标识过下标和循环中的下标对比, 过滤到已经消费的output
// if int64(i) == j{
// continue OUTPUT
// }
// }
// }
//
// //if output.PuKKeyHash == address{
// if bytes.Equal(senderPubKeyHash, output.PubKeyHash){
// //fmt.Println(output)
// txs = append(txs, transaction)
// }
// }
// //挖矿交易没有input
// if !transaction.IsCoinbase(){
// //遍历input,找到花费过的utxo的集合
// for _, input := range transaction.TXInputs{
// //判断前面的output是否被用过
// if bytes.Equal(HashPubKey(input.PubKey),senderPubKeyHash){
// //key为签名的那个交易
// //indexArray := spentOutput[string(input.TXID)]
// // //这个数组为签名的那个交易中 已经消费过的output的index值
// //indexArray = append(indexArray, input.Index)
// spentOutput[string(input.TXID)] = append(spentOutput[string(input.TXID)], input.Index)
// //fmt.Println("===========")
// //fmt.Printf("%x\n", input.TXID)
// //fmt.Println(spentOutput[string(input.TXID)])
// //fmt.Println("===========")
// }
// }
// }
// }
//
// if len(block.PrevHash) == 0 {
// //fmt.Println("遍历结束")
// break
// }
// }
//
//
// return txs
//}
//找到对应id的交易
func (bc *BlockChain)FindTranscationByTXid(id []byte) (Transaction,error) {
//1.遍历区块链
//2.遍历交易
//3.比较交易,找到则退出
//4.找不到则返回nil及err
it := bc.NewIterator()
for {
block := it.Next()
for _, tx := range block.Transaction{
if bytes.Equal(tx.TXID, id){
return *tx, nil
}
}
if len(block.PrevHash) == 0{
break
}
}
return Transaction{}, errors.New("无效的交易ID!")
}
//签名
func (bc *BlockChain)SignTranscation(tx *Transaction,private *ecdsa.PrivateKey) {
prevTXs := make(map[string]Transaction)
//找到所有引用的交易
for _, input := range tx.TXInputs{
txs, err := bc.FindTranscationByTXid(input.TXID)
if err != nil {
log.Panic(err)
}
//fmt.Printf("%x\n",txs.TXID)
prevTXs[string(input.TXID)] = txs
}
tx.Sign(private, prevTXs)
}
//校验
func (bc *BlockChain)VerifyTransaction(tx *Transaction)bool{
//fmt.Println(tx.IsCoinbase())
if tx.IsCoinbase(){
return true
}
prevTXs := make(map[string]Transaction)
//找到所有引用的交易
for _, input := range tx.TXInputs{
txs, err := bc.FindTranscationByTXid(input.TXID)
if err != nil {
log.Panic(err)
}
prevTXs[string(input.TXID)] = txs
}
return tx.Verify(prevTXs)
} | }
} | random_line_split |
app.py | import os
import re
import datetime
from werkzeug.utils import secure_filename
from flask import Flask, jsonify, request, abort, send_from_directory
from flask_sqlalchemy import SQLAlchemy
from flask_mysqldb import MySQL
from flask_cors import CORS
from flask_bcrypt import Bcrypt
from flask_jwt_extended import (
JWTManager, jwt_required, create_access_token,
jwt_refresh_token_required, create_refresh_token,
get_jwt_identity, set_access_cookies,
set_refresh_cookies, unset_jwt_cookies
)
from flask_wtf.csrf import CSRFProtect
from model import user_model
from model import student_model
from model import lecturer_model
from model import mentor_model
from controller import user_controller, news_controller, blogger_controller
import json
from model import base
from functools import wraps
from flask import g, request, redirect, url_for
import MySQLdb
from inittables import InitTables
from sqlalchemy.orm import relationship, backref
from sqlalchemy.orm import sessionmaker
from model.school_model import School, Faculty, Department, Program, Level
from platform_model import class_mem, class_message, class_model, course_model, course_mem, course_lecturer, course_message
from message_model import group_conv_init, group_conv_mem, personal_conv_init, personal_conv_mem, personal_message_model, group_message_model
from entertainment_model import news_model, blogger_model, featured_image_model, like, comment, news_category
from personal_model import course_reg, calender
from model.user_type import UserType
# Retrieves database configuration from environment variables
mysql_host = os.environ.get('MYSQL_HOST')
mysql_user = os.environ.get('MYSQL_USER')
mysql_password = os.environ.get('MYSQL_PASSWORD')
db_name = 'testing' # os.environ.get('DB_NAME')
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'mp3', 'mp4', 'pdf', 'doc', 'docx'} # Feel free to add more file types to be accepted
MAX_CONTENT_LENGTH = 1024*1024*10 # Maximum file upload sixe is 50 MB
# App flask Configuration
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://' + mysql_user + ':' + mysql_password + '@' + mysql_host + '/' + db_name
app.config['JWT_TOKEN_LOCATION'] = ['cookies']
app.config['JWT_COOKIE_SECURE'] = False
app.config['JWT_ACCESS_COOKIE_PATH'] = '/api/'
app.config['JWT_REFRESH_COOKIE_PATH'] = '/token/refresh'
app.config['JWT_COOKIE_CSRF_PROTECT'] = True
app.config['JWT_SECRET_KEY'] = os.environ.get('JWT_SECRET_KEY') #The Jwt_secret_key is obtained from environment variables
app.config['MAX_CONTENT_LENGTH'] = MAX_CONTENT_LENGTH
app.config['UPLOAD_EXTENSIONS'] = ALLOWED_EXTENSIONS
app.config['UPLOAD_PATH'] = UPLOAD_FOLDER
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
jwt = JWTManager(app)
base.Base.metadata.create_all(db.engine, checkfirst=True)
# This part of the code is to check if the important tables (School, Faculty, Department, Program, Level) have been initialized
Session = sessionmaker(db.engine)
Session.configure()
session = Session()
if session.query(School).count() == 0 or session.query(Faculty).count() == 0 or session.query(Department).count() == 0 or session.query(Program).count() == 0 or session.query(Level).count() == 0 :
InitTables(session)
controller = user_controller.UserController(db) #Create instance of controller to perform some operations
n_controller = news_controller.NewsController(db) # Create instance of news controller to perform some operations
b_controller = blogger_controller.BloggerController(db) #Create instance of blogger controller to perform some operations
CORS(app)
# Sets up the funtion for checking if a user is logged in
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'csrf_access_token' not in request.cookies:
return {'Error': 'You have to login first'}
return f(*args, **kwargs)
return decorated_function
# Register a user - either a student, lecturer or mentor
@app.route('/users/register', methods = ['POST'])
def register ():
try:
# Gets all input data from the user
user_name = request.form.get('user_name')
surname = request.form.get('surname')
first_name = request.form.get('first_name')
email = request.form.get('email')
password = bcrypt.generate_password_hash(request.form.get('password')).decode('utf-8')
user_type = request.form.get('user_type')
getemail = controller.get_email(email) # Checks to see if the entry email is in the database. If not, it returns None
except:
return {'Error': 'Unable to retrieve user details'}
if str(user_type) == 'Student':
try:
# Gets additional input for a student
program_id = request.form.get('program_id')
matric_no = request.form.get('matric_no')
level_id = request.form.get('level_id')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(student_model.Student(
user_name, surname, first_name, email, password, user_type, program_id, matric_no, level_id))
class_id = controller.get_class_id(program_id, level_id)
user_id = controller.get_user_id(email)
controller.add_data(class_mem.ClassMem(class_id, user_id))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
|
except: # To notify if a student hasn't conformed to an acceptable input format.
return {'Error': 'Unable to retrieve student details. Ensure the inputs are valid'}
elif str(user_type) == 'Lecturer':
try:
# Get additional inpuuts for lecturers
department_id = request.form.get('department_id')
title = request.form.get('title')
position = request.form.get('position')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(lecturer_model.Lecturer(
user_name, surname, first_name, email, password, user_type, department_id, title, position))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error':'This email has already been used to register'}
except: # To notify if a lecturer hasn't conformed to an acceptable input format.
return {'Error': "Unable to save lecturer details. Ensure that the inputs are correct"}
elif str(user_type) == 'Mentor':
try:
# Gets addional input data for a mentor
profession = request.form.get('profession')
company = request.form.get('company')
title = request.form.get('title')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(mentor_model.Mentor(
user_name, surname, first_name, email, password, user_type, profession, company, title))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error': 'This email has already been used to register'}
except: # To notify if a mentor hasn't conformed to an acceptable input format.
return {'Error': 'Unable to get mentor details. Ensure that the inputs are correct'}
else: # To notify if a user hasn't conformed to an acceptable input format.
return {'Error': 'Unable to get user details. Ensure that the inputs are correct'}
# Function to retrieve a user details based on id
@app.route('/users/<id>', methods=['GET'])
@login_required
def get_user(id):
try:
resp = controller.get_user(id) #Gets the details of a user given the user id.
return resp.to_dict()
except:
return {'Error': 'User not found'}
# Function to login
@app.route('/token/auth', methods=['POST'])
def login():
# Gets email and password inputed by the user
email = request.form.get('email')
pass_word = request.form.get('password')
try:
password = controller.get_password(email) # Checks if email has been registered. If this line fails, it runs the except block
if bcrypt.check_password_hash(password[0], pass_word): # Checks if password is correct
user_name = controller.get_user_name(email)
user_id = controller.get_user_id(email)
print(user_name)
access_token = create_access_token(identity={'User name': user_name[0],'id': user_id[0]}, expires_delta=datetime.timedelta(days=1))
refresh_token = create_refresh_token(identity = {'User name': user_name[0], 'id': user_id[0]}, expires_delta=datetime.timedelta(days=1))
resp = jsonify({'login': True, 'user name': user_name[0]})
set_access_cookies(resp, access_token)
set_refresh_cookies(resp, refresh_token)
return resp, 200
else:
return jsonify({'login': False}), 401
except:
return jsonify({'login': False}), 401
@app.route('/token/refresh', methods=['POST'])
@jwt_refresh_token_required
def refresh():
# Create the new access token
current_user = get_jwt_identity()
access_token = create_access_token(identity=current_user)
# Set the access JWT and CSRF double submit protection cookies
# in this response
resp = jsonify({'refresh': True})
set_access_cookies(resp, access_token)
return resp, 200
@app.route('/token/remove', methods=['POST'])
def logout():
resp = jsonify({'logout': True})
unset_jwt_cookies(resp)
return resp, 200
#This route is to return the image from the local storage.
@app.route('/uploads/<fc>/<yr>/<mn>/<dy>/<filename>', methods=['GET'])
def get_file(fc, yr, mn, dy, filename):
return send_from_directory((os.path.join(app.config['UPLOAD_PATH'], fc, str(yr), str(mn), str(dy))), filename)
#checks file category
def get_file_category(uploaded_file):
file_mime = uploaded_file.content_type
# extracts the file format e.g application, media, etc
file_category = re.findall('(.*)\/', file_mime)[0]
return file_category
#This route is to upload a file
@app.route('/api/file', methods=['POST'])
@jwt_required
def uploadfeaturedimage_file():
uploaded_file = request.files['file']
ts = int(datetime.datetime.now().timestamp())
date = datetime.datetime.fromtimestamp(ts)
yr = date.year
mn = date.month
dy = date.day
filename = secure_filename(uploaded_file.filename)
if filename != '':
name = filename.split('.')[0]
file_ext = filename.split('.')[1].lower()
file_category = get_file_category(uploaded_file)
if file_ext not in app.config['UPLOAD_EXTENSIONS']:
abort(400, description="File format not supported")
filename = name + str(ts) + '.' + file_ext
try:
if os.path.isdir('./uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy)) is True:
uploaded_file.save(os.path.join(
app.config['UPLOAD_PATH'], file_category, str(yr), str(mn), str(dy), filename))
else:
directory = './uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy)
print(directory)
os.makedirs(directory)
uploaded_file.save(os.path.join(
app.config['UPLOAD_PATH'], file_category, str(yr), str(mn), str(dy), filename))
stat = 'upload successful' # Default status if file upload is successful
link = 'http://127.0.0.1:5000/uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy) + '/' + str(filename)
except:
stat = 'upload not succesful'
link = 'no link returned because upload was unsuccessful'
return {'status': stat, 'link': link}
# Create a Course by a lecturer
@app.route('/course/create', methods=['POST'])
@login_required
def course_create():
try:
# Gets all input data from the user
code = request.form.get('code')
title = request.form.get('title')
unit = request.form.get('unit')
except:
return {'Error': 'Unable to retrieve course details'}
try:
code = code.replace(" ", "")
code = code.upper()
# Check to see if course code is already in the DB
controller.add_data(course_model.Course(code, title, unit))
return {'Status': 'Course registered successfully'}
except: # Exception as e:
# raise
return {'Status': 'registration not successfully'}
# Join a course created
@app.route ('/course/join', methods=['POST'])
@login_required
def course_join():
try:
# Get all inputs from the user
course_id = request.form.get('course_id')
user_id = request.form.get('user_id')
except:
return {'Error': 'Unable to retreive details'}
# add data in the course_memmber table
try:
controller.add_data(course_mem.CourseMem(course_id, user_id))
if controller.get_user_type(user_id) == UserType.Lecturer:
controller.add_data(course_lecturer.CourseLecturer(course_id, user_id))
return {'Status': 'Successfully joined the course'}
except:
return {'Error': 'Unable to join course'}
#Get list of active courses a student or lecturer is part of.
@app.route('/courses/<id>', methods=['GET'])
@login_required
def courses(id):
try:
courses = controller.get_courses(id)
return courses
except:
return {'Error': 'failed to get courses'}
# Register a Course by a Student
@app.route('/course/register', methods=['POST'])
@login_required
def course_register():
try:
# Gets all input data from the student
student_id = request.form.get('student_id')
course_id = request.form.get('course_id')
grade_id = request.form.get('grade_id')
semester = request.form.get('semester')
session_id = request.form.get('session_id')
except:
return {'Error': 'Unable to retreive details'}
try:
# add the data to the database
controller.add_data(course_reg.CourseRegistration(student_id, course_id, grade_id, semester, session_id))
return {'Status': 'Successfully registered the course'}
except:
return {'Error': 'Unable to register the course'}
#Get list of courses a student has registered.
@app.route('/courses/registered/<id>', methods=['GET'])
@login_required
def registered_courses(id):
courses = controller.get_registered_courses(id)
return courses
# Route to update the event for a user.
@app.route('/calendar/update', methods=['POST'])
@login_required
def update_calendar(): #where id is the user_id
try:
user_id = request.form.get('user_id')
data = request.form.get('data')
updated_at = request.form.get('updated_at')
except:
return {'Error': 'Unable to retreive data sent'}
try:
ts = datetime.datetime.now()
controller.update_event(user_id, data, ts)
return{'success': 'event successfully inserted or updated'}
except:
return{'Error': 'error updating event'}
#This route is to like a news
@app.route('/api/news/like', methods=['POST'])
@jwt_required
def like_news():
user = get_jwt_identity()
user_id = int(user['id'])
ts = datetime.datetime.now()
ts = str(ts)
try:
# Gets all input data from the user
# Get the news user wants to like
news_id = request.form.get('news_id')
except:
return {'Error': 'Invalid news_id'}
try:
n_controller.like_news(news_controller.Like(user_id, news_id, ts))
except:
return {'Error': 'unable to like news'}
return{'status': 'liked'}
#This route is to unlike a news
@app.route('/api/news/unlike', methods=['POST'])
@jwt_required
def unlike_news():
user = get_jwt_identity()
user_id = int(user['id'])
try:
# Gets all input data from the user
# Get the news user wants to like
news_id = request.form.get('news_id')
except:
return {'Error': 'Invalid news_id'}
try:
n_controller.unlike_news(user_id, news_id)
except:
return {'Error': 'unable to unlike news'}
return{'status': 'unliked'}
#This route is to comment on a news
@app.route('/api/news/comment', methods=['POST'])
@jwt_required
def comment_news():
user = get_jwt_identity()
user_id = int(user['id'])
ts = datetime.datetime.now()
ts = str(ts)
try:
# Gets all input data from the user
# Get the news user wants to like
news_id = request.form.get('news_id')
comment = request.form.get('comment') # This is the user's comment
except:
return {'Error': 'Invalid news_id'}
try:
n_controller.comment_news(
news_controller.Comment(user_id, news_id, comment, ts))
except:
return {'Error': 'unable to comment on news'}
return{'status': 'commented'}
#This route if to retrieve likes
@app.route('/api/likes/<news_id>', methods=['GET'])
@jwt_required
def get_likes(news_id):
likes = n_controller.get_likes(news_id)
return likes
#This route if to retrieve comments
@app.route('/api/comments/<news_id>', methods=['GET'])
@jwt_required
def get_comments(news_id):
comments = n_controller.get_comments(news_id)
return comments
#This route is to get the info in a specific news based on the news_id
@app.route('/api/news/<int:news_id>', methods=['GET'])
@jwt_required
def get_news(news_id):
user = get_jwt_identity()
user_id = int(user['id'])
try:
news_object = n_controller.get_news(news_id)
blogger_id = news_object.blogger_id
blogger_name = b_controller.blogger_name(blogger_id)
content = news_object.content
title = news_object.title
category_id = news_object.category_id
category = n_controller.get_category(category_id)
ts = news_object.timestamp
featured_image_object = n_controller.get_featuredimage(news_id)
featured_image = featured_image_object.image
no_of_likes = n_controller.get_no_likes(news_id)
no_of_comments = n_controller.get_no_comments(news_id)
user_like = n_controller.user_like(user_id, news_id)
status = 'success'
news = {'blogger_name': blogger_name, 'title': title, 'content': content, 'category': category,'featured image': featured_image, 'no of likes':no_of_likes, 'no of comments': no_of_comments, 'user like ?': user_like, 'time': ts}
except:
news = 'Record not found'
status = 'failed'
return {'status': status, str(news_id):news}
#This route is to get news in a news page
# per means the number per page and page_num means the page number
@app.route('/api/newslist/<int:per>/<int:page_num>', methods=['GET'])
@jwt_required
def get_news_list(per, page_num):
if page_num == 0:
page_num = 1
if per == 0:
per = 20
threads = db.session.query(news_model.News).paginate(per_page=per, page=page_num, error_out=False)
no_of_items = len(threads.items)
news = {}
status = 'failed'
if no_of_items > 0:
for a in range(no_of_items):
blogger_id = threads.items[a].blogger_id
blogger_name = b_controller.blogger_name(blogger_id)
news.update({threads.items[a].id: {'news_id': threads.items[a].id,
'blogger_name': blogger_name, 'title': threads.items[a].title}})
status = 'success'
news_list = {'news_list': news, 'status': status}
print("i'm here")
return news_list
if __name__ == '__main__':
app.run(debug=True)
| return {'Error': 'This email has already been used to register'} | conditional_block |
app.py | import os
import re
import datetime
from werkzeug.utils import secure_filename
from flask import Flask, jsonify, request, abort, send_from_directory
from flask_sqlalchemy import SQLAlchemy
from flask_mysqldb import MySQL
from flask_cors import CORS
from flask_bcrypt import Bcrypt
from flask_jwt_extended import (
JWTManager, jwt_required, create_access_token,
jwt_refresh_token_required, create_refresh_token,
get_jwt_identity, set_access_cookies,
set_refresh_cookies, unset_jwt_cookies
)
from flask_wtf.csrf import CSRFProtect
from model import user_model
from model import student_model
from model import lecturer_model
from model import mentor_model
from controller import user_controller, news_controller, blogger_controller
import json
from model import base
from functools import wraps
from flask import g, request, redirect, url_for
import MySQLdb
from inittables import InitTables
from sqlalchemy.orm import relationship, backref
from sqlalchemy.orm import sessionmaker
from model.school_model import School, Faculty, Department, Program, Level
from platform_model import class_mem, class_message, class_model, course_model, course_mem, course_lecturer, course_message
from message_model import group_conv_init, group_conv_mem, personal_conv_init, personal_conv_mem, personal_message_model, group_message_model
from entertainment_model import news_model, blogger_model, featured_image_model, like, comment, news_category
from personal_model import course_reg, calender
from model.user_type import UserType
# Retrieves database configuration from environment variables
mysql_host = os.environ.get('MYSQL_HOST')
mysql_user = os.environ.get('MYSQL_USER')
mysql_password = os.environ.get('MYSQL_PASSWORD')
db_name = 'testing' # os.environ.get('DB_NAME')
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'mp3', 'mp4', 'pdf', 'doc', 'docx'} # Feel free to add more file types to be accepted
MAX_CONTENT_LENGTH = 1024*1024*10 # Maximum file upload sixe is 50 MB
# App flask Configuration
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://' + mysql_user + ':' + mysql_password + '@' + mysql_host + '/' + db_name
app.config['JWT_TOKEN_LOCATION'] = ['cookies']
app.config['JWT_COOKIE_SECURE'] = False
app.config['JWT_ACCESS_COOKIE_PATH'] = '/api/'
app.config['JWT_REFRESH_COOKIE_PATH'] = '/token/refresh'
app.config['JWT_COOKIE_CSRF_PROTECT'] = True
app.config['JWT_SECRET_KEY'] = os.environ.get('JWT_SECRET_KEY') #The Jwt_secret_key is obtained from environment variables
app.config['MAX_CONTENT_LENGTH'] = MAX_CONTENT_LENGTH
app.config['UPLOAD_EXTENSIONS'] = ALLOWED_EXTENSIONS
app.config['UPLOAD_PATH'] = UPLOAD_FOLDER
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
jwt = JWTManager(app)
base.Base.metadata.create_all(db.engine, checkfirst=True)
# This part of the code is to check if the important tables (School, Faculty, Department, Program, Level) have been initialized
Session = sessionmaker(db.engine)
Session.configure()
session = Session()
if session.query(School).count() == 0 or session.query(Faculty).count() == 0 or session.query(Department).count() == 0 or session.query(Program).count() == 0 or session.query(Level).count() == 0 :
InitTables(session)
controller = user_controller.UserController(db) #Create instance of controller to perform some operations
n_controller = news_controller.NewsController(db) # Create instance of news controller to perform some operations
b_controller = blogger_controller.BloggerController(db) #Create instance of blogger controller to perform some operations
CORS(app)
# Sets up the funtion for checking if a user is logged in
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'csrf_access_token' not in request.cookies:
return {'Error': 'You have to login first'}
return f(*args, **kwargs)
return decorated_function
# Register a user - either a student, lecturer or mentor
@app.route('/users/register', methods = ['POST'])
def register ():
|
# Function to retrieve a user details based on id
@app.route('/users/<id>', methods=['GET'])
@login_required
def get_user(id):
try:
resp = controller.get_user(id) #Gets the details of a user given the user id.
return resp.to_dict()
except:
return {'Error': 'User not found'}
# Function to login
@app.route('/token/auth', methods=['POST'])
def login():
# Gets email and password inputed by the user
email = request.form.get('email')
pass_word = request.form.get('password')
try:
password = controller.get_password(email) # Checks if email has been registered. If this line fails, it runs the except block
if bcrypt.check_password_hash(password[0], pass_word): # Checks if password is correct
user_name = controller.get_user_name(email)
user_id = controller.get_user_id(email)
print(user_name)
access_token = create_access_token(identity={'User name': user_name[0],'id': user_id[0]}, expires_delta=datetime.timedelta(days=1))
refresh_token = create_refresh_token(identity = {'User name': user_name[0], 'id': user_id[0]}, expires_delta=datetime.timedelta(days=1))
resp = jsonify({'login': True, 'user name': user_name[0]})
set_access_cookies(resp, access_token)
set_refresh_cookies(resp, refresh_token)
return resp, 200
else:
return jsonify({'login': False}), 401
except:
return jsonify({'login': False}), 401
@app.route('/token/refresh', methods=['POST'])
@jwt_refresh_token_required
def refresh():
# Create the new access token
current_user = get_jwt_identity()
access_token = create_access_token(identity=current_user)
# Set the access JWT and CSRF double submit protection cookies
# in this response
resp = jsonify({'refresh': True})
set_access_cookies(resp, access_token)
return resp, 200
@app.route('/token/remove', methods=['POST'])
def logout():
resp = jsonify({'logout': True})
unset_jwt_cookies(resp)
return resp, 200
#This route is to return the image from the local storage.
@app.route('/uploads/<fc>/<yr>/<mn>/<dy>/<filename>', methods=['GET'])
def get_file(fc, yr, mn, dy, filename):
return send_from_directory((os.path.join(app.config['UPLOAD_PATH'], fc, str(yr), str(mn), str(dy))), filename)
#checks file category
def get_file_category(uploaded_file):
file_mime = uploaded_file.content_type
# extracts the file format e.g application, media, etc
file_category = re.findall('(.*)\/', file_mime)[0]
return file_category
#This route is to upload a file
@app.route('/api/file', methods=['POST'])
@jwt_required
def uploadfeaturedimage_file():
uploaded_file = request.files['file']
ts = int(datetime.datetime.now().timestamp())
date = datetime.datetime.fromtimestamp(ts)
yr = date.year
mn = date.month
dy = date.day
filename = secure_filename(uploaded_file.filename)
if filename != '':
name = filename.split('.')[0]
file_ext = filename.split('.')[1].lower()
file_category = get_file_category(uploaded_file)
if file_ext not in app.config['UPLOAD_EXTENSIONS']:
abort(400, description="File format not supported")
filename = name + str(ts) + '.' + file_ext
try:
if os.path.isdir('./uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy)) is True:
uploaded_file.save(os.path.join(
app.config['UPLOAD_PATH'], file_category, str(yr), str(mn), str(dy), filename))
else:
directory = './uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy)
print(directory)
os.makedirs(directory)
uploaded_file.save(os.path.join(
app.config['UPLOAD_PATH'], file_category, str(yr), str(mn), str(dy), filename))
stat = 'upload successful' # Default status if file upload is successful
link = 'http://127.0.0.1:5000/uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy) + '/' + str(filename)
except:
stat = 'upload not succesful'
link = 'no link returned because upload was unsuccessful'
return {'status': stat, 'link': link}
# Create a Course by a lecturer
@app.route('/course/create', methods=['POST'])
@login_required
def course_create():
try:
# Gets all input data from the user
code = request.form.get('code')
title = request.form.get('title')
unit = request.form.get('unit')
except:
return {'Error': 'Unable to retrieve course details'}
try:
code = code.replace(" ", "")
code = code.upper()
# Check to see if course code is already in the DB
controller.add_data(course_model.Course(code, title, unit))
return {'Status': 'Course registered successfully'}
except: # Exception as e:
# raise
return {'Status': 'registration not successfully'}
# Join a course created
@app.route ('/course/join', methods=['POST'])
@login_required
def course_join():
try:
# Get all inputs from the user
course_id = request.form.get('course_id')
user_id = request.form.get('user_id')
except:
return {'Error': 'Unable to retreive details'}
# add data in the course_memmber table
try:
controller.add_data(course_mem.CourseMem(course_id, user_id))
if controller.get_user_type(user_id) == UserType.Lecturer:
controller.add_data(course_lecturer.CourseLecturer(course_id, user_id))
return {'Status': 'Successfully joined the course'}
except:
return {'Error': 'Unable to join course'}
#Get list of active courses a student or lecturer is part of.
@app.route('/courses/<id>', methods=['GET'])
@login_required
def courses(id):
try:
courses = controller.get_courses(id)
return courses
except:
return {'Error': 'failed to get courses'}
# Register a Course by a Student
@app.route('/course/register', methods=['POST'])
@login_required
def course_register():
try:
# Gets all input data from the student
student_id = request.form.get('student_id')
course_id = request.form.get('course_id')
grade_id = request.form.get('grade_id')
semester = request.form.get('semester')
session_id = request.form.get('session_id')
except:
return {'Error': 'Unable to retreive details'}
try:
# add the data to the database
controller.add_data(course_reg.CourseRegistration(student_id, course_id, grade_id, semester, session_id))
return {'Status': 'Successfully registered the course'}
except:
return {'Error': 'Unable to register the course'}
#Get list of courses a student has registered.
@app.route('/courses/registered/<id>', methods=['GET'])
@login_required
def registered_courses(id):
courses = controller.get_registered_courses(id)
return courses
# Route to update the event for a user.
@app.route('/calendar/update', methods=['POST'])
@login_required
def update_calendar(): #where id is the user_id
try:
user_id = request.form.get('user_id')
data = request.form.get('data')
updated_at = request.form.get('updated_at')
except:
return {'Error': 'Unable to retreive data sent'}
try:
ts = datetime.datetime.now()
controller.update_event(user_id, data, ts)
return{'success': 'event successfully inserted or updated'}
except:
return{'Error': 'error updating event'}
#This route is to like a news
@app.route('/api/news/like', methods=['POST'])
@jwt_required
def like_news():
user = get_jwt_identity()
user_id = int(user['id'])
ts = datetime.datetime.now()
ts = str(ts)
try:
# Gets all input data from the user
# Get the news user wants to like
news_id = request.form.get('news_id')
except:
return {'Error': 'Invalid news_id'}
try:
n_controller.like_news(news_controller.Like(user_id, news_id, ts))
except:
return {'Error': 'unable to like news'}
return{'status': 'liked'}
#This route is to unlike a news
@app.route('/api/news/unlike', methods=['POST'])
@jwt_required
def unlike_news():
user = get_jwt_identity()
user_id = int(user['id'])
try:
# Gets all input data from the user
# Get the news user wants to like
news_id = request.form.get('news_id')
except:
return {'Error': 'Invalid news_id'}
try:
n_controller.unlike_news(user_id, news_id)
except:
return {'Error': 'unable to unlike news'}
return{'status': 'unliked'}
#This route is to comment on a news
@app.route('/api/news/comment', methods=['POST'])
@jwt_required
def comment_news():
user = get_jwt_identity()
user_id = int(user['id'])
ts = datetime.datetime.now()
ts = str(ts)
try:
# Gets all input data from the user
# Get the news user wants to like
news_id = request.form.get('news_id')
comment = request.form.get('comment') # This is the user's comment
except:
return {'Error': 'Invalid news_id'}
try:
n_controller.comment_news(
news_controller.Comment(user_id, news_id, comment, ts))
except:
return {'Error': 'unable to comment on news'}
return{'status': 'commented'}
#This route if to retrieve likes
@app.route('/api/likes/<news_id>', methods=['GET'])
@jwt_required
def get_likes(news_id):
likes = n_controller.get_likes(news_id)
return likes
#This route if to retrieve comments
@app.route('/api/comments/<news_id>', methods=['GET'])
@jwt_required
def get_comments(news_id):
comments = n_controller.get_comments(news_id)
return comments
#This route is to get the info in a specific news based on the news_id
@app.route('/api/news/<int:news_id>', methods=['GET'])
@jwt_required
def get_news(news_id):
user = get_jwt_identity()
user_id = int(user['id'])
try:
news_object = n_controller.get_news(news_id)
blogger_id = news_object.blogger_id
blogger_name = b_controller.blogger_name(blogger_id)
content = news_object.content
title = news_object.title
category_id = news_object.category_id
category = n_controller.get_category(category_id)
ts = news_object.timestamp
featured_image_object = n_controller.get_featuredimage(news_id)
featured_image = featured_image_object.image
no_of_likes = n_controller.get_no_likes(news_id)
no_of_comments = n_controller.get_no_comments(news_id)
user_like = n_controller.user_like(user_id, news_id)
status = 'success'
news = {'blogger_name': blogger_name, 'title': title, 'content': content, 'category': category,'featured image': featured_image, 'no of likes':no_of_likes, 'no of comments': no_of_comments, 'user like ?': user_like, 'time': ts}
except:
news = 'Record not found'
status = 'failed'
return {'status': status, str(news_id):news}
#This route is to get news in a news page
# per means the number per page and page_num means the page number
@app.route('/api/newslist/<int:per>/<int:page_num>', methods=['GET'])
@jwt_required
def get_news_list(per, page_num):
if page_num == 0:
page_num = 1
if per == 0:
per = 20
threads = db.session.query(news_model.News).paginate(per_page=per, page=page_num, error_out=False)
no_of_items = len(threads.items)
news = {}
status = 'failed'
if no_of_items > 0:
for a in range(no_of_items):
blogger_id = threads.items[a].blogger_id
blogger_name = b_controller.blogger_name(blogger_id)
news.update({threads.items[a].id: {'news_id': threads.items[a].id,
'blogger_name': blogger_name, 'title': threads.items[a].title}})
status = 'success'
news_list = {'news_list': news, 'status': status}
print("i'm here")
return news_list
if __name__ == '__main__':
app.run(debug=True)
| try:
# Gets all input data from the user
user_name = request.form.get('user_name')
surname = request.form.get('surname')
first_name = request.form.get('first_name')
email = request.form.get('email')
password = bcrypt.generate_password_hash(request.form.get('password')).decode('utf-8')
user_type = request.form.get('user_type')
getemail = controller.get_email(email) # Checks to see if the entry email is in the database. If not, it returns None
except:
return {'Error': 'Unable to retrieve user details'}
if str(user_type) == 'Student':
try:
# Gets additional input for a student
program_id = request.form.get('program_id')
matric_no = request.form.get('matric_no')
level_id = request.form.get('level_id')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(student_model.Student(
user_name, surname, first_name, email, password, user_type, program_id, matric_no, level_id))
class_id = controller.get_class_id(program_id, level_id)
user_id = controller.get_user_id(email)
controller.add_data(class_mem.ClassMem(class_id, user_id))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error': 'This email has already been used to register'}
except: # To notify if a student hasn't conformed to an acceptable input format.
return {'Error': 'Unable to retrieve student details. Ensure the inputs are valid'}
elif str(user_type) == 'Lecturer':
try:
# Get additional inpuuts for lecturers
department_id = request.form.get('department_id')
title = request.form.get('title')
position = request.form.get('position')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(lecturer_model.Lecturer(
user_name, surname, first_name, email, password, user_type, department_id, title, position))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error':'This email has already been used to register'}
except: # To notify if a lecturer hasn't conformed to an acceptable input format.
return {'Error': "Unable to save lecturer details. Ensure that the inputs are correct"}
elif str(user_type) == 'Mentor':
try:
# Gets addional input data for a mentor
profession = request.form.get('profession')
company = request.form.get('company')
title = request.form.get('title')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(mentor_model.Mentor(
user_name, surname, first_name, email, password, user_type, profession, company, title))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error': 'This email has already been used to register'}
except: # To notify if a mentor hasn't conformed to an acceptable input format.
return {'Error': 'Unable to get mentor details. Ensure that the inputs are correct'}
else: # To notify if a user hasn't conformed to an acceptable input format.
return {'Error': 'Unable to get user details. Ensure that the inputs are correct'} | identifier_body |
app.py | import os
import re
import datetime
from werkzeug.utils import secure_filename
from flask import Flask, jsonify, request, abort, send_from_directory
from flask_sqlalchemy import SQLAlchemy
from flask_mysqldb import MySQL
from flask_cors import CORS
from flask_bcrypt import Bcrypt
from flask_jwt_extended import (
JWTManager, jwt_required, create_access_token,
jwt_refresh_token_required, create_refresh_token,
get_jwt_identity, set_access_cookies,
set_refresh_cookies, unset_jwt_cookies
)
from flask_wtf.csrf import CSRFProtect
from model import user_model
from model import student_model
from model import lecturer_model
from model import mentor_model
from controller import user_controller, news_controller, blogger_controller
import json
from model import base
from functools import wraps
from flask import g, request, redirect, url_for
import MySQLdb
from inittables import InitTables
from sqlalchemy.orm import relationship, backref
from sqlalchemy.orm import sessionmaker
from model.school_model import School, Faculty, Department, Program, Level
from platform_model import class_mem, class_message, class_model, course_model, course_mem, course_lecturer, course_message
from message_model import group_conv_init, group_conv_mem, personal_conv_init, personal_conv_mem, personal_message_model, group_message_model
from entertainment_model import news_model, blogger_model, featured_image_model, like, comment, news_category
from personal_model import course_reg, calender
from model.user_type import UserType
# Retrieves database configuration from environment variables
mysql_host = os.environ.get('MYSQL_HOST')
mysql_user = os.environ.get('MYSQL_USER')
mysql_password = os.environ.get('MYSQL_PASSWORD')
db_name = 'testing' # os.environ.get('DB_NAME')
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'mp3', 'mp4', 'pdf', 'doc', 'docx'} # Feel free to add more file types to be accepted
MAX_CONTENT_LENGTH = 1024*1024*10 # Maximum file upload sixe is 50 MB
# App flask Configuration
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://' + mysql_user + ':' + mysql_password + '@' + mysql_host + '/' + db_name
app.config['JWT_TOKEN_LOCATION'] = ['cookies']
app.config['JWT_COOKIE_SECURE'] = False
app.config['JWT_ACCESS_COOKIE_PATH'] = '/api/'
app.config['JWT_REFRESH_COOKIE_PATH'] = '/token/refresh'
app.config['JWT_COOKIE_CSRF_PROTECT'] = True
app.config['JWT_SECRET_KEY'] = os.environ.get('JWT_SECRET_KEY') #The Jwt_secret_key is obtained from environment variables
app.config['MAX_CONTENT_LENGTH'] = MAX_CONTENT_LENGTH
app.config['UPLOAD_EXTENSIONS'] = ALLOWED_EXTENSIONS
app.config['UPLOAD_PATH'] = UPLOAD_FOLDER
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
jwt = JWTManager(app)
base.Base.metadata.create_all(db.engine, checkfirst=True)
# This part of the code is to check if the important tables (School, Faculty, Department, Program, Level) have been initialized
Session = sessionmaker(db.engine)
Session.configure()
session = Session()
if session.query(School).count() == 0 or session.query(Faculty).count() == 0 or session.query(Department).count() == 0 or session.query(Program).count() == 0 or session.query(Level).count() == 0 :
InitTables(session)
controller = user_controller.UserController(db) #Create instance of controller to perform some operations
n_controller = news_controller.NewsController(db) # Create instance of news controller to perform some operations
b_controller = blogger_controller.BloggerController(db) #Create instance of blogger controller to perform some operations
CORS(app)
# Sets up the funtion for checking if a user is logged in
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'csrf_access_token' not in request.cookies:
return {'Error': 'You have to login first'}
return f(*args, **kwargs)
return decorated_function
# Register a user - either a student, lecturer or mentor
@app.route('/users/register', methods = ['POST'])
def register ():
try:
# Gets all input data from the user
user_name = request.form.get('user_name')
surname = request.form.get('surname')
first_name = request.form.get('first_name')
email = request.form.get('email')
password = bcrypt.generate_password_hash(request.form.get('password')).decode('utf-8')
user_type = request.form.get('user_type')
getemail = controller.get_email(email) # Checks to see if the entry email is in the database. If not, it returns None
except:
return {'Error': 'Unable to retrieve user details'}
if str(user_type) == 'Student':
try:
# Gets additional input for a student
program_id = request.form.get('program_id')
matric_no = request.form.get('matric_no')
level_id = request.form.get('level_id')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(student_model.Student(
user_name, surname, first_name, email, password, user_type, program_id, matric_no, level_id))
class_id = controller.get_class_id(program_id, level_id)
user_id = controller.get_user_id(email)
controller.add_data(class_mem.ClassMem(class_id, user_id))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error': 'This email has already been used to register'}
except: # To notify if a student hasn't conformed to an acceptable input format.
return {'Error': 'Unable to retrieve student details. Ensure the inputs are valid'}
elif str(user_type) == 'Lecturer':
try:
# Get additional inpuuts for lecturers
department_id = request.form.get('department_id')
title = request.form.get('title')
position = request.form.get('position')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(lecturer_model.Lecturer(
user_name, surname, first_name, email, password, user_type, department_id, title, position))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error':'This email has already been used to register'}
except: # To notify if a lecturer hasn't conformed to an acceptable input format.
return {'Error': "Unable to save lecturer details. Ensure that the inputs are correct"}
elif str(user_type) == 'Mentor':
try:
# Gets addional input data for a mentor
profession = request.form.get('profession')
company = request.form.get('company')
title = request.form.get('title')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(mentor_model.Mentor(
user_name, surname, first_name, email, password, user_type, profession, company, title))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error': 'This email has already been used to register'}
except: # To notify if a mentor hasn't conformed to an acceptable input format.
return {'Error': 'Unable to get mentor details. Ensure that the inputs are correct'}
else: # To notify if a user hasn't conformed to an acceptable input format.
return {'Error': 'Unable to get user details. Ensure that the inputs are correct'}
# Function to retrieve a user details based on id
@app.route('/users/<id>', methods=['GET'])
@login_required
def get_user(id):
try:
resp = controller.get_user(id) #Gets the details of a user given the user id.
return resp.to_dict()
except:
return {'Error': 'User not found'}
# Function to login
@app.route('/token/auth', methods=['POST'])
def login():
# Gets email and password inputed by the user
email = request.form.get('email')
pass_word = request.form.get('password')
try:
password = controller.get_password(email) # Checks if email has been registered. If this line fails, it runs the except block
if bcrypt.check_password_hash(password[0], pass_word): # Checks if password is correct
user_name = controller.get_user_name(email)
user_id = controller.get_user_id(email)
print(user_name)
access_token = create_access_token(identity={'User name': user_name[0],'id': user_id[0]}, expires_delta=datetime.timedelta(days=1))
refresh_token = create_refresh_token(identity = {'User name': user_name[0], 'id': user_id[0]}, expires_delta=datetime.timedelta(days=1))
resp = jsonify({'login': True, 'user name': user_name[0]})
set_access_cookies(resp, access_token)
set_refresh_cookies(resp, refresh_token)
return resp, 200
else:
return jsonify({'login': False}), 401
except:
return jsonify({'login': False}), 401
@app.route('/token/refresh', methods=['POST'])
@jwt_refresh_token_required
def refresh():
# Create the new access token
current_user = get_jwt_identity()
access_token = create_access_token(identity=current_user)
# Set the access JWT and CSRF double submit protection cookies
# in this response
resp = jsonify({'refresh': True})
set_access_cookies(resp, access_token)
return resp, 200
@app.route('/token/remove', methods=['POST'])
def logout():
resp = jsonify({'logout': True})
unset_jwt_cookies(resp)
return resp, 200
#This route is to return the image from the local storage.
@app.route('/uploads/<fc>/<yr>/<mn>/<dy>/<filename>', methods=['GET'])
def get_file(fc, yr, mn, dy, filename):
return send_from_directory((os.path.join(app.config['UPLOAD_PATH'], fc, str(yr), str(mn), str(dy))), filename)
#checks file category
def get_file_category(uploaded_file):
file_mime = uploaded_file.content_type
# extracts the file format e.g application, media, etc
file_category = re.findall('(.*)\/', file_mime)[0]
return file_category
#This route is to upload a file
@app.route('/api/file', methods=['POST'])
@jwt_required
def uploadfeaturedimage_file():
uploaded_file = request.files['file']
ts = int(datetime.datetime.now().timestamp())
date = datetime.datetime.fromtimestamp(ts)
yr = date.year
mn = date.month
dy = date.day
filename = secure_filename(uploaded_file.filename)
if filename != '':
name = filename.split('.')[0]
file_ext = filename.split('.')[1].lower()
file_category = get_file_category(uploaded_file)
if file_ext not in app.config['UPLOAD_EXTENSIONS']:
abort(400, description="File format not supported")
filename = name + str(ts) + '.' + file_ext
try:
if os.path.isdir('./uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy)) is True:
uploaded_file.save(os.path.join(
app.config['UPLOAD_PATH'], file_category, str(yr), str(mn), str(dy), filename))
else:
directory = './uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy)
print(directory)
os.makedirs(directory)
uploaded_file.save(os.path.join(
app.config['UPLOAD_PATH'], file_category, str(yr), str(mn), str(dy), filename))
stat = 'upload successful' # Default status if file upload is successful
link = 'http://127.0.0.1:5000/uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy) + '/' + str(filename)
except:
stat = 'upload not succesful'
link = 'no link returned because upload was unsuccessful'
return {'status': stat, 'link': link}
# Create a Course by a lecturer
@app.route('/course/create', methods=['POST'])
@login_required
def course_create():
try:
# Gets all input data from the user
code = request.form.get('code')
title = request.form.get('title')
unit = request.form.get('unit')
except:
return {'Error': 'Unable to retrieve course details'}
try:
code = code.replace(" ", "")
code = code.upper()
# Check to see if course code is already in the DB
controller.add_data(course_model.Course(code, title, unit))
return {'Status': 'Course registered successfully'}
except: # Exception as e:
# raise
return {'Status': 'registration not successfully'}
# Join a course created
@app.route ('/course/join', methods=['POST'])
@login_required
def course_join():
try:
# Get all inputs from the user
course_id = request.form.get('course_id')
user_id = request.form.get('user_id')
except:
return {'Error': 'Unable to retreive details'}
# add data in the course_memmber table
try:
controller.add_data(course_mem.CourseMem(course_id, user_id))
if controller.get_user_type(user_id) == UserType.Lecturer:
controller.add_data(course_lecturer.CourseLecturer(course_id, user_id))
return {'Status': 'Successfully joined the course'}
except:
return {'Error': 'Unable to join course'}
#Get list of active courses a student or lecturer is part of.
@app.route('/courses/<id>', methods=['GET'])
@login_required
def courses(id):
try:
courses = controller.get_courses(id)
return courses
except:
return {'Error': 'failed to get courses'}
# Register a Course by a Student
@app.route('/course/register', methods=['POST'])
@login_required
def course_register():
try:
# Gets all input data from the student
student_id = request.form.get('student_id')
course_id = request.form.get('course_id')
grade_id = request.form.get('grade_id')
semester = request.form.get('semester')
session_id = request.form.get('session_id')
except:
return {'Error': 'Unable to retreive details'}
try:
# add the data to the database
controller.add_data(course_reg.CourseRegistration(student_id, course_id, grade_id, semester, session_id))
return {'Status': 'Successfully registered the course'}
except:
return {'Error': 'Unable to register the course'}
#Get list of courses a student has registered.
@app.route('/courses/registered/<id>', methods=['GET'])
@login_required
def registered_courses(id):
courses = controller.get_registered_courses(id)
return courses
# Route to update the event for a user.
@app.route('/calendar/update', methods=['POST'])
@login_required
def | (): #where id is the user_id
try:
user_id = request.form.get('user_id')
data = request.form.get('data')
updated_at = request.form.get('updated_at')
except:
return {'Error': 'Unable to retreive data sent'}
try:
ts = datetime.datetime.now()
controller.update_event(user_id, data, ts)
return{'success': 'event successfully inserted or updated'}
except:
return{'Error': 'error updating event'}
#This route is to like a news
@app.route('/api/news/like', methods=['POST'])
@jwt_required
def like_news():
user = get_jwt_identity()
user_id = int(user['id'])
ts = datetime.datetime.now()
ts = str(ts)
try:
# Gets all input data from the user
# Get the news user wants to like
news_id = request.form.get('news_id')
except:
return {'Error': 'Invalid news_id'}
try:
n_controller.like_news(news_controller.Like(user_id, news_id, ts))
except:
return {'Error': 'unable to like news'}
return{'status': 'liked'}
#This route is to unlike a news
@app.route('/api/news/unlike', methods=['POST'])
@jwt_required
def unlike_news():
user = get_jwt_identity()
user_id = int(user['id'])
try:
# Gets all input data from the user
# Get the news user wants to like
news_id = request.form.get('news_id')
except:
return {'Error': 'Invalid news_id'}
try:
n_controller.unlike_news(user_id, news_id)
except:
return {'Error': 'unable to unlike news'}
return{'status': 'unliked'}
#This route is to comment on a news
@app.route('/api/news/comment', methods=['POST'])
@jwt_required
def comment_news():
user = get_jwt_identity()
user_id = int(user['id'])
ts = datetime.datetime.now()
ts = str(ts)
try:
# Gets all input data from the user
# Get the news user wants to like
news_id = request.form.get('news_id')
comment = request.form.get('comment') # This is the user's comment
except:
return {'Error': 'Invalid news_id'}
try:
n_controller.comment_news(
news_controller.Comment(user_id, news_id, comment, ts))
except:
return {'Error': 'unable to comment on news'}
return{'status': 'commented'}
#This route if to retrieve likes
@app.route('/api/likes/<news_id>', methods=['GET'])
@jwt_required
def get_likes(news_id):
likes = n_controller.get_likes(news_id)
return likes
#This route if to retrieve comments
@app.route('/api/comments/<news_id>', methods=['GET'])
@jwt_required
def get_comments(news_id):
comments = n_controller.get_comments(news_id)
return comments
#This route is to get the info in a specific news based on the news_id
@app.route('/api/news/<int:news_id>', methods=['GET'])
@jwt_required
def get_news(news_id):
user = get_jwt_identity()
user_id = int(user['id'])
try:
news_object = n_controller.get_news(news_id)
blogger_id = news_object.blogger_id
blogger_name = b_controller.blogger_name(blogger_id)
content = news_object.content
title = news_object.title
category_id = news_object.category_id
category = n_controller.get_category(category_id)
ts = news_object.timestamp
featured_image_object = n_controller.get_featuredimage(news_id)
featured_image = featured_image_object.image
no_of_likes = n_controller.get_no_likes(news_id)
no_of_comments = n_controller.get_no_comments(news_id)
user_like = n_controller.user_like(user_id, news_id)
status = 'success'
news = {'blogger_name': blogger_name, 'title': title, 'content': content, 'category': category,'featured image': featured_image, 'no of likes':no_of_likes, 'no of comments': no_of_comments, 'user like ?': user_like, 'time': ts}
except:
news = 'Record not found'
status = 'failed'
return {'status': status, str(news_id):news}
#This route is to get news in a news page
# per means the number per page and page_num means the page number
@app.route('/api/newslist/<int:per>/<int:page_num>', methods=['GET'])
@jwt_required
def get_news_list(per, page_num):
if page_num == 0:
page_num = 1
if per == 0:
per = 20
threads = db.session.query(news_model.News).paginate(per_page=per, page=page_num, error_out=False)
no_of_items = len(threads.items)
news = {}
status = 'failed'
if no_of_items > 0:
for a in range(no_of_items):
blogger_id = threads.items[a].blogger_id
blogger_name = b_controller.blogger_name(blogger_id)
news.update({threads.items[a].id: {'news_id': threads.items[a].id,
'blogger_name': blogger_name, 'title': threads.items[a].title}})
status = 'success'
news_list = {'news_list': news, 'status': status}
print("i'm here")
return news_list
if __name__ == '__main__':
app.run(debug=True)
| update_calendar | identifier_name |
app.py | import os
import re
import datetime
from werkzeug.utils import secure_filename
from flask import Flask, jsonify, request, abort, send_from_directory
from flask_sqlalchemy import SQLAlchemy
from flask_mysqldb import MySQL
from flask_cors import CORS
from flask_bcrypt import Bcrypt
from flask_jwt_extended import (
JWTManager, jwt_required, create_access_token,
jwt_refresh_token_required, create_refresh_token,
get_jwt_identity, set_access_cookies,
set_refresh_cookies, unset_jwt_cookies
)
from flask_wtf.csrf import CSRFProtect
from model import user_model
from model import student_model
from model import lecturer_model
from model import mentor_model
from controller import user_controller, news_controller, blogger_controller
import json
from model import base
from functools import wraps
from flask import g, request, redirect, url_for
import MySQLdb
from inittables import InitTables
from sqlalchemy.orm import relationship, backref
from sqlalchemy.orm import sessionmaker
from model.school_model import School, Faculty, Department, Program, Level
from platform_model import class_mem, class_message, class_model, course_model, course_mem, course_lecturer, course_message
from message_model import group_conv_init, group_conv_mem, personal_conv_init, personal_conv_mem, personal_message_model, group_message_model
from entertainment_model import news_model, blogger_model, featured_image_model, like, comment, news_category
from personal_model import course_reg, calender
from model.user_type import UserType
# Retrieves database configuration from environment variables
mysql_host = os.environ.get('MYSQL_HOST')
mysql_user = os.environ.get('MYSQL_USER')
mysql_password = os.environ.get('MYSQL_PASSWORD')
db_name = 'testing' # os.environ.get('DB_NAME')
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'mp3', 'mp4', 'pdf', 'doc', 'docx'} # Feel free to add more file types to be accepted
MAX_CONTENT_LENGTH = 1024*1024*10 # Maximum file upload sixe is 50 MB
# App flask Configuration
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://' + mysql_user + ':' + mysql_password + '@' + mysql_host + '/' + db_name
app.config['JWT_TOKEN_LOCATION'] = ['cookies']
app.config['JWT_COOKIE_SECURE'] = False
app.config['JWT_ACCESS_COOKIE_PATH'] = '/api/'
app.config['JWT_REFRESH_COOKIE_PATH'] = '/token/refresh'
app.config['JWT_COOKIE_CSRF_PROTECT'] = True
app.config['JWT_SECRET_KEY'] = os.environ.get('JWT_SECRET_KEY') #The Jwt_secret_key is obtained from environment variables
app.config['MAX_CONTENT_LENGTH'] = MAX_CONTENT_LENGTH
app.config['UPLOAD_EXTENSIONS'] = ALLOWED_EXTENSIONS
app.config['UPLOAD_PATH'] = UPLOAD_FOLDER
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
jwt = JWTManager(app)
base.Base.metadata.create_all(db.engine, checkfirst=True)
# This part of the code is to check if the important tables (School, Faculty, Department, Program, Level) have been initialized
Session = sessionmaker(db.engine)
Session.configure()
session = Session()
if session.query(School).count() == 0 or session.query(Faculty).count() == 0 or session.query(Department).count() == 0 or session.query(Program).count() == 0 or session.query(Level).count() == 0 :
InitTables(session)
controller = user_controller.UserController(db) #Create instance of controller to perform some operations
n_controller = news_controller.NewsController(db) # Create instance of news controller to perform some operations
b_controller = blogger_controller.BloggerController(db) #Create instance of blogger controller to perform some operations
CORS(app)
# Sets up the funtion for checking if a user is logged in
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'csrf_access_token' not in request.cookies:
return {'Error': 'You have to login first'}
return f(*args, **kwargs)
return decorated_function
# Register a user - either a student, lecturer or mentor
@app.route('/users/register', methods = ['POST'])
def register ():
try:
# Gets all input data from the user
user_name = request.form.get('user_name')
surname = request.form.get('surname')
first_name = request.form.get('first_name')
email = request.form.get('email')
password = bcrypt.generate_password_hash(request.form.get('password')).decode('utf-8')
user_type = request.form.get('user_type')
getemail = controller.get_email(email) # Checks to see if the entry email is in the database. If not, it returns None
except:
return {'Error': 'Unable to retrieve user details'}
if str(user_type) == 'Student':
try:
# Gets additional input for a student
program_id = request.form.get('program_id')
matric_no = request.form.get('matric_no')
level_id = request.form.get('level_id')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(student_model.Student(
user_name, surname, first_name, email, password, user_type, program_id, matric_no, level_id))
class_id = controller.get_class_id(program_id, level_id)
user_id = controller.get_user_id(email)
controller.add_data(class_mem.ClassMem(class_id, user_id))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error': 'This email has already been used to register'}
except: # To notify if a student hasn't conformed to an acceptable input format.
return {'Error': 'Unable to retrieve student details. Ensure the inputs are valid'}
elif str(user_type) == 'Lecturer':
try:
# Get additional inpuuts for lecturers
department_id = request.form.get('department_id')
title = request.form.get('title')
position = request.form.get('position')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(lecturer_model.Lecturer(
user_name, surname, first_name, email, password, user_type, department_id, title, position))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error':'This email has already been used to register'}
except: # To notify if a lecturer hasn't conformed to an acceptable input format.
return {'Error': "Unable to save lecturer details. Ensure that the inputs are correct"}
elif str(user_type) == 'Mentor':
try:
# Gets addional input data for a mentor
profession = request.form.get('profession')
company = request.form.get('company')
title = request.form.get('title')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(mentor_model.Mentor(
user_name, surname, first_name, email, password, user_type, profession, company, title))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error': 'This email has already been used to register'}
except: # To notify if a mentor hasn't conformed to an acceptable input format.
return {'Error': 'Unable to get mentor details. Ensure that the inputs are correct'}
else: # To notify if a user hasn't conformed to an acceptable input format.
return {'Error': 'Unable to get user details. Ensure that the inputs are correct'}
| try:
resp = controller.get_user(id) #Gets the details of a user given the user id.
return resp.to_dict()
except:
return {'Error': 'User not found'}
# Function to login
@app.route('/token/auth', methods=['POST'])
def login():
# Gets email and password inputed by the user
email = request.form.get('email')
pass_word = request.form.get('password')
try:
password = controller.get_password(email) # Checks if email has been registered. If this line fails, it runs the except block
if bcrypt.check_password_hash(password[0], pass_word): # Checks if password is correct
user_name = controller.get_user_name(email)
user_id = controller.get_user_id(email)
print(user_name)
access_token = create_access_token(identity={'User name': user_name[0],'id': user_id[0]}, expires_delta=datetime.timedelta(days=1))
refresh_token = create_refresh_token(identity = {'User name': user_name[0], 'id': user_id[0]}, expires_delta=datetime.timedelta(days=1))
resp = jsonify({'login': True, 'user name': user_name[0]})
set_access_cookies(resp, access_token)
set_refresh_cookies(resp, refresh_token)
return resp, 200
else:
return jsonify({'login': False}), 401
except:
return jsonify({'login': False}), 401
@app.route('/token/refresh', methods=['POST'])
@jwt_refresh_token_required
def refresh():
# Create the new access token
current_user = get_jwt_identity()
access_token = create_access_token(identity=current_user)
# Set the access JWT and CSRF double submit protection cookies
# in this response
resp = jsonify({'refresh': True})
set_access_cookies(resp, access_token)
return resp, 200
@app.route('/token/remove', methods=['POST'])
def logout():
resp = jsonify({'logout': True})
unset_jwt_cookies(resp)
return resp, 200
#This route is to return the image from the local storage.
@app.route('/uploads/<fc>/<yr>/<mn>/<dy>/<filename>', methods=['GET'])
def get_file(fc, yr, mn, dy, filename):
return send_from_directory((os.path.join(app.config['UPLOAD_PATH'], fc, str(yr), str(mn), str(dy))), filename)
#checks file category
def get_file_category(uploaded_file):
file_mime = uploaded_file.content_type
# extracts the file format e.g application, media, etc
file_category = re.findall('(.*)\/', file_mime)[0]
return file_category
#This route is to upload a file
@app.route('/api/file', methods=['POST'])
@jwt_required
def uploadfeaturedimage_file():
uploaded_file = request.files['file']
ts = int(datetime.datetime.now().timestamp())
date = datetime.datetime.fromtimestamp(ts)
yr = date.year
mn = date.month
dy = date.day
filename = secure_filename(uploaded_file.filename)
if filename != '':
name = filename.split('.')[0]
file_ext = filename.split('.')[1].lower()
file_category = get_file_category(uploaded_file)
if file_ext not in app.config['UPLOAD_EXTENSIONS']:
abort(400, description="File format not supported")
filename = name + str(ts) + '.' + file_ext
try:
if os.path.isdir('./uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy)) is True:
uploaded_file.save(os.path.join(
app.config['UPLOAD_PATH'], file_category, str(yr), str(mn), str(dy), filename))
else:
directory = './uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy)
print(directory)
os.makedirs(directory)
uploaded_file.save(os.path.join(
app.config['UPLOAD_PATH'], file_category, str(yr), str(mn), str(dy), filename))
stat = 'upload successful' # Default status if file upload is successful
link = 'http://127.0.0.1:5000/uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy) + '/' + str(filename)
except:
stat = 'upload not succesful'
link = 'no link returned because upload was unsuccessful'
return {'status': stat, 'link': link}
# Create a Course by a lecturer
@app.route('/course/create', methods=['POST'])
@login_required
def course_create():
try:
# Gets all input data from the user
code = request.form.get('code')
title = request.form.get('title')
unit = request.form.get('unit')
except:
return {'Error': 'Unable to retrieve course details'}
try:
code = code.replace(" ", "")
code = code.upper()
# Check to see if course code is already in the DB
controller.add_data(course_model.Course(code, title, unit))
return {'Status': 'Course registered successfully'}
except: # Exception as e:
# raise
return {'Status': 'registration not successfully'}
# Join a course created
@app.route ('/course/join', methods=['POST'])
@login_required
def course_join():
try:
# Get all inputs from the user
course_id = request.form.get('course_id')
user_id = request.form.get('user_id')
except:
return {'Error': 'Unable to retreive details'}
# add data in the course_memmber table
try:
controller.add_data(course_mem.CourseMem(course_id, user_id))
if controller.get_user_type(user_id) == UserType.Lecturer:
controller.add_data(course_lecturer.CourseLecturer(course_id, user_id))
return {'Status': 'Successfully joined the course'}
except:
return {'Error': 'Unable to join course'}
#Get list of active courses a student or lecturer is part of.
@app.route('/courses/<id>', methods=['GET'])
@login_required
def courses(id):
try:
courses = controller.get_courses(id)
return courses
except:
return {'Error': 'failed to get courses'}
# Register a Course by a Student
@app.route('/course/register', methods=['POST'])
@login_required
def course_register():
try:
# Gets all input data from the student
student_id = request.form.get('student_id')
course_id = request.form.get('course_id')
grade_id = request.form.get('grade_id')
semester = request.form.get('semester')
session_id = request.form.get('session_id')
except:
return {'Error': 'Unable to retreive details'}
try:
# add the data to the database
controller.add_data(course_reg.CourseRegistration(student_id, course_id, grade_id, semester, session_id))
return {'Status': 'Successfully registered the course'}
except:
return {'Error': 'Unable to register the course'}
#Get list of courses a student has registered.
@app.route('/courses/registered/<id>', methods=['GET'])
@login_required
def registered_courses(id):
courses = controller.get_registered_courses(id)
return courses
# Route to update the event for a user.
@app.route('/calendar/update', methods=['POST'])
@login_required
def update_calendar(): #where id is the user_id
try:
user_id = request.form.get('user_id')
data = request.form.get('data')
updated_at = request.form.get('updated_at')
except:
return {'Error': 'Unable to retreive data sent'}
try:
ts = datetime.datetime.now()
controller.update_event(user_id, data, ts)
return{'success': 'event successfully inserted or updated'}
except:
return{'Error': 'error updating event'}
#This route is to like a news
@app.route('/api/news/like', methods=['POST'])
@jwt_required
def like_news():
user = get_jwt_identity()
user_id = int(user['id'])
ts = datetime.datetime.now()
ts = str(ts)
try:
# Gets all input data from the user
# Get the news user wants to like
news_id = request.form.get('news_id')
except:
return {'Error': 'Invalid news_id'}
try:
n_controller.like_news(news_controller.Like(user_id, news_id, ts))
except:
return {'Error': 'unable to like news'}
return{'status': 'liked'}
#This route is to unlike a news
@app.route('/api/news/unlike', methods=['POST'])
@jwt_required
def unlike_news():
user = get_jwt_identity()
user_id = int(user['id'])
try:
# Gets all input data from the user
# Get the news user wants to like
news_id = request.form.get('news_id')
except:
return {'Error': 'Invalid news_id'}
try:
n_controller.unlike_news(user_id, news_id)
except:
return {'Error': 'unable to unlike news'}
return{'status': 'unliked'}
#This route is to comment on a news
@app.route('/api/news/comment', methods=['POST'])
@jwt_required
def comment_news():
user = get_jwt_identity()
user_id = int(user['id'])
ts = datetime.datetime.now()
ts = str(ts)
try:
# Gets all input data from the user
# Get the news user wants to like
news_id = request.form.get('news_id')
comment = request.form.get('comment') # This is the user's comment
except:
return {'Error': 'Invalid news_id'}
try:
n_controller.comment_news(
news_controller.Comment(user_id, news_id, comment, ts))
except:
return {'Error': 'unable to comment on news'}
return{'status': 'commented'}
#This route if to retrieve likes
@app.route('/api/likes/<news_id>', methods=['GET'])
@jwt_required
def get_likes(news_id):
likes = n_controller.get_likes(news_id)
return likes
#This route if to retrieve comments
@app.route('/api/comments/<news_id>', methods=['GET'])
@jwt_required
def get_comments(news_id):
comments = n_controller.get_comments(news_id)
return comments
#This route is to get the info in a specific news based on the news_id
@app.route('/api/news/<int:news_id>', methods=['GET'])
@jwt_required
def get_news(news_id):
user = get_jwt_identity()
user_id = int(user['id'])
try:
news_object = n_controller.get_news(news_id)
blogger_id = news_object.blogger_id
blogger_name = b_controller.blogger_name(blogger_id)
content = news_object.content
title = news_object.title
category_id = news_object.category_id
category = n_controller.get_category(category_id)
ts = news_object.timestamp
featured_image_object = n_controller.get_featuredimage(news_id)
featured_image = featured_image_object.image
no_of_likes = n_controller.get_no_likes(news_id)
no_of_comments = n_controller.get_no_comments(news_id)
user_like = n_controller.user_like(user_id, news_id)
status = 'success'
news = {'blogger_name': blogger_name, 'title': title, 'content': content, 'category': category,'featured image': featured_image, 'no of likes':no_of_likes, 'no of comments': no_of_comments, 'user like ?': user_like, 'time': ts}
except:
news = 'Record not found'
status = 'failed'
return {'status': status, str(news_id):news}
#This route is to get news in a news page
# per means the number per page and page_num means the page number
@app.route('/api/newslist/<int:per>/<int:page_num>', methods=['GET'])
@jwt_required
def get_news_list(per, page_num):
if page_num == 0:
page_num = 1
if per == 0:
per = 20
threads = db.session.query(news_model.News).paginate(per_page=per, page=page_num, error_out=False)
no_of_items = len(threads.items)
news = {}
status = 'failed'
if no_of_items > 0:
for a in range(no_of_items):
blogger_id = threads.items[a].blogger_id
blogger_name = b_controller.blogger_name(blogger_id)
news.update({threads.items[a].id: {'news_id': threads.items[a].id,
'blogger_name': blogger_name, 'title': threads.items[a].title}})
status = 'success'
news_list = {'news_list': news, 'status': status}
print("i'm here")
return news_list
if __name__ == '__main__':
app.run(debug=True) | # Function to retrieve a user details based on id
@app.route('/users/<id>', methods=['GET'])
@login_required
def get_user(id): | random_line_split |
DQN_Snake.py | # -*- coding: UTF-8 -*-
import cv2
import numpy as np | import matplotlib.pyplot as plt
from BrainDQN_Nature import BrainDQN
##################################################################################################################
##################################################################################################################
import random, pygame
from pygame.locals import *
FPS = 200 # 螢幕刷新率(在這裡相當於貪吃蛇的速度)
WINDOWWIDTH = 300 # 螢幕寬度
WINDOWHEIGHT = 300 # 螢幕高度
CELLSIZE = 20 # 小方格的大小
ALIVE_REWARD = 0 #-0.05 #存活獎勵
WIN_REWARD = 1 #獎勵
LOSE_REWARD = -1 #懲罰
# 斷言,螢幕的寬和高必須能被方塊大小整除
assert WINDOWWIDTH % CELLSIZE == 0, "Window width must be a multiple of cell size."
assert WINDOWHEIGHT % CELLSIZE == 0, "Window height must be a multiple of cell size."
# 橫向和縱向的方格數
CELLWIDTH = int(WINDOWWIDTH / CELLSIZE)
CELLHEIGHT = int(WINDOWHEIGHT / CELLSIZE)
# 定義幾個常用的顏色
# R G B
WHITE = (255, 255, 255)
BLACK = ( 0, 0, 0)
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
DARKGREEN = ( 0, 155, 0)
DARKGRAY = ( 40, 40, 40)
BGCOLOR = BLACK
# 定義貪吃蛇的動作
UP = 'up'
DOWN = 'down'
LEFT = 'left'
RIGHT = 'right'
# 神經網路的輸出
MOVE_STAY = [1, 0, 0, 0, 0]
MOVE_UP = [0, 1, 0, 0, 0]
MOVE_DOWN = [0, 0, 1, 0, 0]
MOVE_LEFT = [0, 0, 0, 1, 0]
MOVE_RIGHT = [0, 0, 0, 0, 1]
class Game(object):
def __init__(self):
# 定義全域變數
global FPSCLOCK, DISPLAYSURF, BASICFONT
pygame.init() # 初始化pygame
FPSCLOCK = pygame.time.Clock() # 獲得pygame時鐘
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT)) # 設置螢幕寬高
BASICFONT = pygame.font.Font('freesansbold.ttf', 18) # BASICFONT
pygame.display.set_caption('Greedy Snake') # 設置視窗的標題
self.HEAD = 0 # syntactic sugar: index of the worm's head # 貪吃蛇的頭()
self.Bodylen=3
#showStartScreen() # 顯示開始畫面
self.runGame()
def getRandomLocation(self): # 隨機生成一個座標位置
return {'x': random.randint(0, CELLWIDTH - 1), 'y': random.randint(0, CELLHEIGHT - 1)}
def runGame(self):
# 隨機初始化設置一個點作為貪吃蛇的起點
startx = random.randint(5, CELLWIDTH - 6)
starty = random.randint(5, CELLHEIGHT - 6)
# 以這個點為起點,建立一個長度為3格的貪吃蛇(陣列)
self.wormCoords = [{'x': startx, 'y': starty},
{'x': startx - 1, 'y': starty},
{'x': startx - 2, 'y': starty}]
self.direction = RIGHT # 初始化一個運動的方向
self.apple = self.getRandomLocation() # 隨機一個apple的位置
# 根據 wormCoords 陣列繪製貪吃蛇
def drawWorm(self,wormCoords):
for coord in wormCoords:
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
wormSegmentRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, DARKGREEN, wormSegmentRect)
wormInnerSegmentRect = pygame.Rect(x + 4, y + 4, CELLSIZE - 8, CELLSIZE - 8)
pygame.draw.rect(DISPLAYSURF, GREEN, wormInnerSegmentRect)
# 根據 coord 繪製 apple
def drawApple(self,coord):
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
appleRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, RED, appleRect,8)
# 繪製所有的方格
def drawGrid(self):
for x in range(0, WINDOWWIDTH, CELLSIZE): # draw vertical lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (x, 0), (x, WINDOWHEIGHT))
for y in range(0, WINDOWHEIGHT, CELLSIZE): # draw horizontal lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (0, y), (WINDOWWIDTH, y))
def gen_action(self,optfromNN):
if optfromNN[0]==1: return MOVE_STAY
elif optfromNN[1]==1: return MOVE_UP
elif optfromNN[2]==1: return MOVE_DOWN
elif optfromNN[3]==1: return MOVE_LEFT
elif optfromNN[4]==1: return MOVE_RIGHT
def step(self, action):
pygame.display.update()
terminal=False
reward=0
if action==MOVE_LEFT and self.direction!=LEFT and self.direction!=RIGHT : self.direction = LEFT
elif action==MOVE_RIGHT and self.direction!=LEFT and self.direction!=RIGHT : self.direction = RIGHT
elif action==MOVE_UP and self.direction!=UP and self.direction!=DOWN: self.direction = UP
elif action==MOVE_DOWN and self.direction!=UP and self.direction!=DOWN: self.direction = DOWN
elif action==MOVE_STAY : pass
# 檢查貪吃蛇是否撞到撞到邊界
if self.wormCoords[self.HEAD]['x'] == -1 or self.wormCoords[self.HEAD]['x'] == CELLWIDTH or self.wormCoords[self.HEAD]['y'] == -1 or self.wormCoords[self.HEAD]['y'] == CELLHEIGHT:
terminal=True
reward=LOSE_REWARD
print ("撞牆死....")
for wormBody in self.wormCoords[1:]: # 檢查貪吃蛇是否撞到自己
if wormBody['x'] == self.wormCoords[self.HEAD]['x'] and wormBody['y'] == self.wormCoords[self.HEAD]['y']:
terminal=True
reward=LOSE_REWARD
print ("撞自己死....")
break
if terminal==False:
# 檢查貪吃蛇是否吃到apple
if self.wormCoords[self.HEAD]['x'] == self.apple['x'] and self.wormCoords[self.HEAD]['y'] == self.apple['y']:
self.apple = self.getRandomLocation() # 重新隨機生成一個apple # 不移除蛇的最後一個尾巴格
reward=WIN_REWARD
self.Bodylen+=1
else: #沒吃到apple也是要給予存活獎勵
reward=ALIVE_REWARD/self.Bodylen
del self.wormCoords[-1] # 移除蛇的最後一個尾巴格
# 根據方向,添加一個新的蛇頭,以這種方式來移動貪吃蛇
if self.direction == UP:
newHead = {'x': self.wormCoords[self.HEAD]['x'], 'y': self.wormCoords[self.HEAD]['y'] - 1}
elif self.direction == DOWN:
newHead = {'x': self.wormCoords[self.HEAD]['x'], 'y': self.wormCoords[self.HEAD]['y'] + 1}
elif self.direction == LEFT:
newHead = {'x': self.wormCoords[self.HEAD]['x'] - 1, 'y': self.wormCoords[self.HEAD]['y']}
elif self.direction == RIGHT:
newHead = {'x': self.wormCoords[self.HEAD]['x'] + 1, 'y': self.wormCoords[self.HEAD]['y']}
self.wormCoords.insert(0, newHead) # 插入新的蛇頭在陣列的最前面
#drawScore(len(self.wormCoords) - 3) # 繪製分數(分數為貪吃蛇陣列當前的長度-3)
DISPLAYSURF.fill(BGCOLOR) # 繪製背景
self.drawGrid() # 繪製所有的方格
self.drawWorm(self.wormCoords) # 繪製貪吃蛇
self.drawApple(self.apple) # 繪製apple
pygame.display.update() # 更新螢幕
FPSCLOCK.tick(FPS) # 設置幀率
if terminal==True:
gameOverFont = pygame.font.Font('freesansbold.ttf', 40)
gameOverSurf = gameOverFont.render('Game Over', True, WHITE)
gameOverRect = gameOverSurf.get_rect()
gameOverRect.midtop = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2-gameOverRect.height-10)
DISPLAYSURF.blit(gameOverSurf, gameOverRect)
# 獲得遊戲畫面的影像
screen_image = pygame.surfarray.array3d(pygame.display.get_surface())
pygame.display.update()
# 返回遊戲畫面和對應的賞罰
return screen_image,reward, terminal
##################################################################################################################
##################################################################################################################
# preprocess raw image to 80*80 gray image
def preprocess(observation):
observation = cv2.cvtColor(cv2.resize(observation, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, observation = cv2.threshold(observation,1,255,cv2.THRESH_BINARY)
#plt.imshow(observation, cmap ='gray'); plt.show();
return np.reshape(observation,(80,80,1))
def playGame():
# Step 0: Define reort
win = 0
lose = 0
points = 0
# Step 1: init BrainDQN
actions = 5
brain = BrainDQN(actions)
# Step 2: init Game
bg = Game()
# Step 3: play game
# Step 3.1: obtain init state
action0 = bg.gen_action([1,0,0,0,0]) # do nothing
observation0, reward0, terminal = bg.step(action0)
observation0 = cv2.cvtColor(cv2.resize(observation0, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, observation0 = cv2.threshold(observation0,1,255,cv2.THRESH_BINARY)
brain.setInitState(observation0)
# Step 3.2: run the game
while True:
pygame.event.get() #讓遊戲畫面能夠更新
action = bg.gen_action(brain.getAction())
Observation,reward,terminal = bg.step(action)
nextObservation = preprocess(Observation)
brain.setPerception(nextObservation,action,reward,terminal)
######################## 統計輸出報表用 ########################
points+=reward
if terminal==True:
win+=points
lose+=1
points=0
bg = Game()
print("Lost cnt:" ,lose," ,win_points:",round(points,2)," ,cnt:",brain.timeStep)
if brain.timeStep % 1000 == 0:
learn_rate.append(lose)
win_cnt.append(win)
plt.plot(learn_rate,"g");plt.plot(win_cnt,"r");plt.show();
lose=0
win=0
######################## 統計輸出報表用 ########################
learn_rate=[]
win_cnt=[]
def main():
playGame()
if __name__ == '__main__':
main()
# # 繪製分數
# def drawScore(self,score):
# scoreSurf = BASICFONT.render('Score: %s' % (score), True, WHITE)
# scoreRect = scoreSurf.get_rect()
# scoreRect.topleft = (WINDOWWIDTH - 120, 10)
# DISPLAYSURF.blit(scoreSurf, scoreRect)
# 顯示遊戲結束畫面
# def showGameOverScreen():
# gameOverFont = pygame.font.Font('freesansbold.ttf', 50)
# gameSurf = gameOverFont.render('Game', True, WHITE)
# overSurf = gameOverFont.render('Over', True, WHITE)
# gameRect = gameSurf.get_rect()
# overRect = overSurf.get_rect()
# gameRect.midtop = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2-gameRect.height-10)
# overRect.midtop = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2)
#
# DISPLAYSURF.blit(gameSurf, gameRect)
# DISPLAYSURF.blit(overSurf, overRect)
# drawPressKeyMsg()
# pygame.display.update()
# pygame.time.wait(500)
# checkForKeyPress() # clear out any key presses in the event queue
#
# while True:
# if checkForKeyPress():
# pygame.event.get() # clear event queue
# return
# while True:
#
# # 這裡一直迴圈於開始遊戲和顯示遊戲結束畫面之間,
# # 運行遊戲裡有一個迴圈,顯示遊戲結束畫面也有一個迴圈
# # 兩個迴圈都有相應的return,這樣就可以達到切換這兩個模組的效果
#
# runGame() # 運行遊戲
#
# showGameOverScreen() # 顯示遊戲結束畫面
# for event in pygame.event.get(): # 事件處理
# if event.type == QUIT: # 退出事件
# terminate()
# elif event.type == KEYDOWN: # 按鍵事件
# #如果按下的是左鍵或a鍵,且當前的方向不是向右,就改變方向,以此類推
# if (event.key == K_LEFT or event.key == K_a) and direction != RIGHT:
# direction = LEFT
# elif (event.key == K_RIGHT or event.key == K_d) and direction != LEFT:
# direction = RIGHT
# elif (event.key == K_UP or event.key == K_w) and direction != DOWN:
# direction = UP
# elif (event.key == K_DOWN or event.key == K_s) and direction != UP:
# direction = DOWN
# elif event.key == K_ESCAPE:
# terminate()
# # 繪製提示消息
# def drawPressKeyMsg():
# pressKeySurf = BASICFONT.render('Press a key to play.', True, DARKGRAY)
# pressKeyRect = pressKeySurf.get_rect()
# pressKeyRect.topleft = (WINDOWWIDTH - 200, WINDOWHEIGHT - 30)
# DISPLAYSURF.blit(pressKeySurf, pressKeyRect)
#
# # 檢查按鍵是否有按鍵事件
# def checkForKeyPress():
# if len(pygame.event.get(QUIT)) > 0:
# terminate()
#
# keyUpEvents = pygame.event.get(KEYUP)
# if len(keyUpEvents) == 0:
# return None
# if keyUpEvents[0].key == K_ESCAPE:
# terminate()
# return keyUpEvents[0].key
# # 顯示開始畫面
# def showStartScreen():
#
# DISPLAYSURF.fill(BGCOLOR)
# titleFont = pygame.font.Font('freesansbold.ttf', 50)
# titleSurf = titleFont.render('Greedy Snake', True, GREEN)
# titleRect = titleSurf.get_rect()
# titleRect.center = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2)
# DISPLAYSURF.blit(titleSurf, titleRect)
# drawPressKeyMsg()
#
# pygame.display.update()
#
# while True:
#
# if checkForKeyPress():
# pygame.event.get() # clear event queue
# return
# # 退出
# def terminate():
# pygame.quit()
# sys.exit() | random_line_split |
|
DQN_Snake.py | # -*- coding: UTF-8 -*-
import cv2
import numpy as np
import matplotlib.pyplot as plt
from BrainDQN_Nature import BrainDQN
##################################################################################################################
##################################################################################################################
import random, pygame
from pygame.locals import *
FPS = 200 # 螢幕刷新率(在這裡相當於貪吃蛇的速度)
WINDOWWIDTH = 300 # 螢幕寬度
WINDOWHEIGHT = 300 # 螢幕高度
CELLSIZE = 20 # 小方格的大小
ALIVE_REWARD = 0 #-0.05 #存活獎勵
WIN_REWARD = 1 #獎勵
LOSE_REWARD = -1 #懲罰
# 斷言,螢幕的寬和高必須能被方塊大小整除
assert WINDOWWIDTH % CELLSIZE == 0, "Window width must be a multiple of cell size."
assert WINDOWHEIGHT % CELLSIZE == 0, "Window height must be a multiple of cell size."
# 橫向和縱向的方格數
CELLWIDTH = int(WINDOWWIDTH / CELLSIZE)
CELLHEIGHT = int(WINDOWHEIGHT / CELLSIZE)
# 定義幾個常用的顏色
# R G B
WHITE = (255, 255, 255)
BLACK = ( 0, 0, 0)
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
DARKGREEN = ( 0, 155, 0)
DARKGRAY = ( 40, 40, 40)
BGCOLOR = BLACK
# 定義貪吃蛇的動作
UP = 'up'
DOWN = 'down'
LEFT = 'left'
RIGHT = 'right'
# 神經網路的輸出
MOVE_STAY = [1, 0, 0, 0, 0]
MOVE_UP = [0, 1, 0, 0, 0]
MOVE_DOWN = [0, 0, 1, 0, 0]
MOVE_LEFT = [0, 0, 0, 1, 0]
MOVE_RIGHT = [0, 0, 0, 0, 1]
class Game(object):
def __init__(self):
# 定義全域變數
global FPSCLOCK, DISPLAYSURF, BASICFONT
pygame.init() # 初始化pygame
FPSCLOCK = pygame.time.Clock() # 獲得pygame時鐘
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT)) # 設置螢幕寬高
BASICFONT = pygame.font.Font('freesansbold.ttf', 18) # BASICFONT
pygame.display.set_caption('Greedy Snake') # 設置視窗的標題
self.HEAD = 0 # syntactic sugar: index of the worm's head # 貪吃蛇的頭()
self.Bodylen=3
#showStartScreen() # 顯示開始畫面
self.runGame()
def getRandomLocation(self): # 隨機生成一個座標位置
return {'x': random.randint(0, CELLWIDTH - 1), 'y': random.randint(0, CELLHEIGHT - 1)}
def runGame(self):
# 隨機初始化設置一個點作為貪吃蛇的起點
startx = random.randint(5, CELLWIDTH - 6)
starty = random.randint(5, CELLHEIGHT - 6)
# 以這個點為起點,建立一個長度為3格的貪吃蛇(陣列)
self.wormCoords = [{'x': startx, 'y': starty},
{'x': startx - 1, 'y': starty},
{'x': startx - 2, 'y': starty}]
self.direction = RIGHT # 初始化一個運動的方向
self.apple = self.getRandomLocation() # 隨機一個apple的位置
# 根據 wormCoords 陣列繪製貪吃蛇
def drawWorm(self,wormCoords):
for coord in wormCoords:
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
wormSegmentRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, DARKGREEN, wormSegmentRect)
wormInnerSegmentRect = pygame.Rect(x + 4, y + 4, CELLSIZE - 8, CELLSIZE - 8)
pygame.draw.rect(DISPLAYSURF, GREEN, wormInnerSegmentRect)
# 根據 coord 繪製 apple
def drawApple(self,coord):
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
appleRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, RED, appleRect,8)
# 繪製所有的方格
def drawGrid(self):
for x in range(0, WINDOWWIDTH, CELLSIZE): # draw vertical lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (x, 0), (x, WINDOWHEIGHT))
for y in range(0, WINDOWHEIGHT, CELLSIZE): # draw horizontal lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (0, y), (WINDOWWIDTH, y))
def gen_action(self,optfromNN):
if optfromNN[0]==1: return MOVE_STAY
elif optfromNN[1]==1: return MOVE_UP
elif optfromNN[2]==1: return MOVE_DOWN
elif optfromNN[3]==1: return MOVE_LEFT
elif optfromNN[4]==1: return MOVE_RIGHT
def step(self, action):
pygame.display.update()
terminal=False
reward=0
if action==MOVE_LEFT and self.direction!=LEFT and self.direction!=RIGHT : self.direction = LEFT
elif action==MOVE_RIGHT and self.direction!=LEFT and self.direction!=RIGHT : self.direction = RIGHT
elif action==MOVE_UP and self.direction!=UP and self.direction!=DOWN: self.direction = UP
elif action==MOVE_DOWN and self.direction!=UP and self.direction!=DOWN: self.direction = DOWN
elif action==MOVE_STAY : pass
# 檢查貪吃蛇是否撞到撞到邊界
if self.wormCoords[self.HEAD]['x'] == -1 or self.wormCoords[self.HEAD]['x'] == CELLWIDTH or self.wormCoords[self.HEAD]['y'] == -1 or self.wormCoords[self.HEAD]['y'] == CELLHEIGHT | reward=LOSE_REWARD
print ("撞牆死....")
for wormBody in self.wormCoords[1:]: # 檢查貪吃蛇是否撞到自己
if wormBody['x'] == self.wormCoords[self.HEAD]['x'] and wormBody['y'] == self.wormCoords[self.HEAD]['y']:
terminal=True
reward=LOSE_REWARD
print ("撞自己死....")
break
if terminal==False:
# 檢查貪吃蛇是否吃到apple
if self.wormCoords[self.HEAD]['x'] == self.apple['x'] and self.wormCoords[self.HEAD]['y'] == self.apple['y']:
self.apple = self.getRandomLocation() # 重新隨機生成一個apple # 不移除蛇的最後一個尾巴格
reward=WIN_REWARD
self.Bodylen+=1
else: #沒吃到apple也是要給予存活獎勵
reward=ALIVE_REWARD/self.Bodylen
del self.wormCoords[-1] # 移除蛇的最後一個尾巴格
# 根據方向,添加一個新的蛇頭,以這種方式來移動貪吃蛇
if self.direction == UP:
newHead = {'x': self.wormCoords[self.HEAD]['x'], 'y': self.wormCoords[self.HEAD]['y'] - 1}
elif self.direction == DOWN:
newHead = {'x': self.wormCoords[self.HEAD]['x'], 'y': self.wormCoords[self.HEAD]['y'] + 1}
elif self.direction == LEFT:
newHead = {'x': self.wormCoords[self.HEAD]['x'] - 1, 'y': self.wormCoords[self.HEAD]['y']}
elif self.direction == RIGHT:
newHead = {'x': self.wormCoords[self.HEAD]['x'] + 1, 'y': self.wormCoords[self.HEAD]['y']}
self.wormCoords.insert(0, newHead) # 插入新的蛇頭在陣列的最前面
#drawScore(len(self.wormCoords) - 3) # 繪製分數(分數為貪吃蛇陣列當前的長度-3)
DISPLAYSURF.fill(BGCOLOR) # 繪製背景
self.drawGrid() # 繪製所有的方格
self.drawWorm(self.wormCoords) # 繪製貪吃蛇
self.drawApple(self.apple) # 繪製apple
pygame.display.update() # 更新螢幕
FPSCLOCK.tick(FPS) # 設置幀率
if terminal==True:
gameOverFont = pygame.font.Font('freesansbold.ttf', 40)
gameOverSurf = gameOverFont.render('Game Over', True, WHITE)
gameOverRect = gameOverSurf.get_rect()
gameOverRect.midtop = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2-gameOverRect.height-10)
DISPLAYSURF.blit(gameOverSurf, gameOverRect)
# 獲得遊戲畫面的影像
screen_image = pygame.surfarray.array3d(pygame.display.get_surface())
pygame.display.update()
# 返回遊戲畫面和對應的賞罰
return screen_image,reward, terminal
##################################################################################################################
##################################################################################################################
# preprocess raw image to 80*80 gray image
def preprocess(observation):
observation = cv2.cvtColor(cv2.resize(observation, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, observation = cv2.threshold(observation,1,255,cv2.THRESH_BINARY)
#plt.imshow(observation, cmap ='gray'); plt.show();
return np.reshape(observation,(80,80,1))
def playGame():
# Step 0: Define reort
win = 0
lose = 0
points = 0
# Step 1: init BrainDQN
actions = 5
brain = BrainDQN(actions)
# Step 2: init Game
bg = Game()
# Step 3: play game
# Step 3.1: obtain init state
action0 = bg.gen_action([1,0,0,0,0]) # do nothing
observation0, reward0, terminal = bg.step(action0)
observation0 = cv2.cvtColor(cv2.resize(observation0, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, observation0 = cv2.threshold(observation0,1,255,cv2.THRESH_BINARY)
brain.setInitState(observation0)
# Step 3.2: run the game
while True:
pygame.event.get() #讓遊戲畫面能夠更新
action = bg.gen_action(brain.getAction())
Observation,reward,terminal = bg.step(action)
nextObservation = preprocess(Observation)
brain.setPerception(nextObservation,action,reward,terminal)
######################## 統計輸出報表用 ########################
points+=reward
if terminal==True:
win+=points
lose+=1
points=0
bg = Game()
print("Lost cnt:" ,lose," ,win_points:",round(points,2)," ,cnt:",brain.timeStep)
if brain.timeStep % 1000 == 0:
learn_rate.append(lose)
win_cnt.append(win)
plt.plot(learn_rate,"g");plt.plot(win_cnt,"r");plt.show();
lose=0
win=0
######################## 統計輸出報表用 ########################
learn_rate=[]
win_cnt=[]
def main():
playGame()
if __name__ == '__main__':
main()
# # 繪製分數
# def drawScore(self,score):
# scoreSurf = BASICFONT.render('Score: %s' % (score), True, WHITE)
# scoreRect = scoreSurf.get_rect()
# scoreRect.topleft = (WINDOWWIDTH - 120, 10)
# DISPLAYSURF.blit(scoreSurf, scoreRect)
# 顯示遊戲結束畫面
# def showGameOverScreen():
# gameOverFont = pygame.font.Font('freesansbold.ttf', 50)
# gameSurf = gameOverFont.render('Game', True, WHITE)
# overSurf = gameOverFont.render('Over', True, WHITE)
# gameRect = gameSurf.get_rect()
# overRect = overSurf.get_rect()
# gameRect.midtop = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2-gameRect.height-10)
# overRect.midtop = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2)
#
# DISPLAYSURF.blit(gameSurf, gameRect)
# DISPLAYSURF.blit(overSurf, overRect)
# drawPressKeyMsg()
# pygame.display.update()
# pygame.time.wait(500)
# checkForKeyPress() # clear out any key presses in the event queue
#
# while True:
# if checkForKeyPress():
# pygame.event.get() # clear event queue
# return
# while True:
#
# # 這裡一直迴圈於開始遊戲和顯示遊戲結束畫面之間,
# # 運行遊戲裡有一個迴圈,顯示遊戲結束畫面也有一個迴圈
# # 兩個迴圈都有相應的return,這樣就可以達到切換這兩個模組的效果
#
# runGame() # 運行遊戲
#
# showGameOverScreen() # 顯示遊戲結束畫面
# for event in pygame.event.get(): # 事件處理
# if event.type == QUIT: # 退出事件
# terminate()
# elif event.type == KEYDOWN: # 按鍵事件
# #如果按下的是左鍵或a鍵,且當前的方向不是向右,就改變方向,以此類推
# if (event.key == K_LEFT or event.key == K_a) and direction != RIGHT:
# direction = LEFT
# elif (event.key == K_RIGHT or event.key == K_d) and direction != LEFT:
# direction = RIGHT
# elif (event.key == K_UP or event.key == K_w) and direction != DOWN:
# direction = UP
# elif (event.key == K_DOWN or event.key == K_s) and direction != UP:
# direction = DOWN
# elif event.key == K_ESCAPE:
# terminate()
# # 繪製提示消息
# def drawPressKeyMsg():
# pressKeySurf = BASICFONT.render('Press a key to play.', True, DARKGRAY)
# pressKeyRect = pressKeySurf.get_rect()
# pressKeyRect.topleft = (WINDOWWIDTH - 200, WINDOWHEIGHT - 30)
# DISPLAYSURF.blit(pressKeySurf, pressKeyRect)
#
# # 檢查按鍵是否有按鍵事件
# def checkForKeyPress():
# if len(pygame.event.get(QUIT)) > 0:
# terminate()
#
# keyUpEvents = pygame.event.get(KEYUP)
# if len(keyUpEvents) == 0:
# return None
# if keyUpEvents[0].key == K_ESCAPE:
# terminate()
# return keyUpEvents[0].key
# # 顯示開始畫面
# def showStartScreen():
#
# DISPLAYSURF.fill(BGCOLOR)
# titleFont = pygame.font.Font('freesansbold.ttf', 50)
# titleSurf = titleFont.render('Greedy Snake', True, GREEN)
# titleRect = titleSurf.get_rect()
# titleRect.center = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2)
# DISPLAYSURF.blit(titleSurf, titleRect)
# drawPressKeyMsg()
#
# pygame.display.update()
#
# while True:
#
# if checkForKeyPress():
# pygame.event.get() # clear event queue
# return
# # 退出
# def terminate():
# pygame.quit()
# sys.exit()
| :
terminal=True
| conditional_block |
DQN_Snake.py | # -*- coding: UTF-8 -*-
import cv2
import numpy as np
import matplotlib.pyplot as plt
from BrainDQN_Nature import BrainDQN
##################################################################################################################
##################################################################################################################
import random, pygame
from pygame.locals import *
FPS = 200 # 螢幕刷新率(在這裡相當於貪吃蛇的速度)
WINDOWWIDTH = 300 # 螢幕寬度
WINDOWHEIGHT = 300 # 螢幕高度
CELLSIZE = 20 # 小方格的大小
ALIVE_REWARD = 0 #-0.05 #存活獎勵
WIN_REWARD = 1 #獎勵
LOSE_REWARD = -1 #懲罰
# 斷言,螢幕的寬和高必須能被方塊大小整除
assert WINDOWWIDTH % CELLSIZE == 0, "Window width must be a multiple of cell size."
assert WINDOWHEIGHT % CELLSIZE == 0, "Window height must be a multiple of cell size."
# 橫向和縱向的方格數
CELLWIDTH = int(WINDOWWIDTH / CELLSIZE)
CELLHEIGHT = int(WINDOWHEIGHT / CELLSIZE)
# 定義幾個常用的顏色
# R G B
WHITE = (255, 255, 255)
BLACK = ( 0, 0, 0)
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
DARKGREEN = ( 0, 155, 0)
DARKGRAY = ( 40, 40, 40)
BGCOLOR = BLACK
# 定義貪吃蛇的動作
UP = 'up'
DOWN = 'down'
LEFT = 'left'
RIGHT = 'right'
# 神經網路的輸出
MOVE_STAY = [1, 0, 0, 0, 0]
MOVE_UP = [0, 1, 0, 0, 0]
MOVE_DOWN = [0, 0, 1, 0, 0]
MOVE_LEFT = [0, 0, 0, 1, 0]
MOVE_RIGHT = [0, 0, 0, 0, 1]
class Game(object):
def __init__(self):
# 定義全域變數
global FPSCLOCK, DISPLAYSURF, BASICFONT
pygame.init() # 初始化pygame
FPSCLOCK = pygame.time.Clock() # 獲得pygame時鐘
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT)) # 設置螢幕寬高
BAS | andom.randint(5, CELLHEIGHT - 6)
# 以這個點為起點,建立一個長度為3格的貪吃蛇(陣列)
self.wormCoords = [{'x': startx, 'y': starty},
{'x': startx - 1, 'y': starty},
{'x': startx - 2, 'y': starty}]
self.direction = RIGHT # 初始化一個運動的方向
self.apple = self.getRandomLocation() # 隨機一個apple的位置
# 根據 wormCoords 陣列繪製貪吃蛇
def drawWorm(self,wormCoords):
for coord in wormCoords:
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
wormSegmentRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, DARKGREEN, wormSegmentRect)
wormInnerSegmentRect = pygame.Rect(x + 4, y + 4, CELLSIZE - 8, CELLSIZE - 8)
pygame.draw.rect(DISPLAYSURF, GREEN, wormInnerSegmentRect)
# 根據 coord 繪製 apple
def drawApple(self,coord):
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
appleRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, RED, appleRect,8)
# 繪製所有的方格
def drawGrid(self):
for x in range(0, WINDOWWIDTH, CELLSIZE): # draw vertical lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (x, 0), (x, WINDOWHEIGHT))
for y in range(0, WINDOWHEIGHT, CELLSIZE): # draw horizontal lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (0, y), (WINDOWWIDTH, y))
def gen_action(self,optfromNN):
if optfromNN[0]==1: return MOVE_STAY
elif optfromNN[1]==1: return MOVE_UP
elif optfromNN[2]==1: return MOVE_DOWN
elif optfromNN[3]==1: return MOVE_LEFT
elif optfromNN[4]==1: return MOVE_RIGHT
def step(self, action):
pygame.display.update()
terminal=False
reward=0
if action==MOVE_LEFT and self.direction!=LEFT and self.direction!=RIGHT : self.direction = LEFT
elif action==MOVE_RIGHT and self.direction!=LEFT and self.direction!=RIGHT : self.direction = RIGHT
elif action==MOVE_UP and self.direction!=UP and self.direction!=DOWN: self.direction = UP
elif action==MOVE_DOWN and self.direction!=UP and self.direction!=DOWN: self.direction = DOWN
elif action==MOVE_STAY : pass
# 檢查貪吃蛇是否撞到撞到邊界
if self.wormCoords[self.HEAD]['x'] == -1 or self.wormCoords[self.HEAD]['x'] == CELLWIDTH or self.wormCoords[self.HEAD]['y'] == -1 or self.wormCoords[self.HEAD]['y'] == CELLHEIGHT:
terminal=True
reward=LOSE_REWARD
print ("撞牆死....")
for wormBody in self.wormCoords[1:]: # 檢查貪吃蛇是否撞到自己
if wormBody['x'] == self.wormCoords[self.HEAD]['x'] and wormBody['y'] == self.wormCoords[self.HEAD]['y']:
terminal=True
reward=LOSE_REWARD
print ("撞自己死....")
break
if terminal==False:
# 檢查貪吃蛇是否吃到apple
if self.wormCoords[self.HEAD]['x'] == self.apple['x'] and self.wormCoords[self.HEAD]['y'] == self.apple['y']:
self.apple = self.getRandomLocation() # 重新隨機生成一個apple # 不移除蛇的最後一個尾巴格
reward=WIN_REWARD
self.Bodylen+=1
else: #沒吃到apple也是要給予存活獎勵
reward=ALIVE_REWARD/self.Bodylen
del self.wormCoords[-1] # 移除蛇的最後一個尾巴格
# 根據方向,添加一個新的蛇頭,以這種方式來移動貪吃蛇
if self.direction == UP:
newHead = {'x': self.wormCoords[self.HEAD]['x'], 'y': self.wormCoords[self.HEAD]['y'] - 1}
elif self.direction == DOWN:
newHead = {'x': self.wormCoords[self.HEAD]['x'], 'y': self.wormCoords[self.HEAD]['y'] + 1}
elif self.direction == LEFT:
newHead = {'x': self.wormCoords[self.HEAD]['x'] - 1, 'y': self.wormCoords[self.HEAD]['y']}
elif self.direction == RIGHT:
newHead = {'x': self.wormCoords[self.HEAD]['x'] + 1, 'y': self.wormCoords[self.HEAD]['y']}
self.wormCoords.insert(0, newHead) # 插入新的蛇頭在陣列的最前面
#drawScore(len(self.wormCoords) - 3) # 繪製分數(分數為貪吃蛇陣列當前的長度-3)
DISPLAYSURF.fill(BGCOLOR) # 繪製背景
self.drawGrid() # 繪製所有的方格
self.drawWorm(self.wormCoords) # 繪製貪吃蛇
self.drawApple(self.apple) # 繪製apple
pygame.display.update() # 更新螢幕
FPSCLOCK.tick(FPS) # 設置幀率
if terminal==True:
gameOverFont = pygame.font.Font('freesansbold.ttf', 40)
gameOverSurf = gameOverFont.render('Game Over', True, WHITE)
gameOverRect = gameOverSurf.get_rect()
gameOverRect.midtop = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2-gameOverRect.height-10)
DISPLAYSURF.blit(gameOverSurf, gameOverRect)
# 獲得遊戲畫面的影像
screen_image = pygame.surfarray.array3d(pygame.display.get_surface())
pygame.display.update()
# 返回遊戲畫面和對應的賞罰
return screen_image,reward, terminal
##################################################################################################################
##################################################################################################################
# preprocess raw image to 80*80 gray image
def preprocess(observation):
observation = cv2.cvtColor(cv2.resize(observation, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, observation = cv2.threshold(observation,1,255,cv2.THRESH_BINARY)
#plt.imshow(observation, cmap ='gray'); plt.show();
return np.reshape(observation,(80,80,1))
def playGame():
# Step 0: Define reort
win = 0
lose = 0
points = 0
# Step 1: init BrainDQN
actions = 5
brain = BrainDQN(actions)
# Step 2: init Game
bg = Game()
# Step 3: play game
# Step 3.1: obtain init state
action0 = bg.gen_action([1,0,0,0,0]) # do nothing
observation0, reward0, terminal = bg.step(action0)
observation0 = cv2.cvtColor(cv2.resize(observation0, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, observation0 = cv2.threshold(observation0,1,255,cv2.THRESH_BINARY)
brain.setInitState(observation0)
# Step 3.2: run the game
while True:
pygame.event.get() #讓遊戲畫面能夠更新
action = bg.gen_action(brain.getAction())
Observation,reward,terminal = bg.step(action)
nextObservation = preprocess(Observation)
brain.setPerception(nextObservation,action,reward,terminal)
######################## 統計輸出報表用 ########################
points+=reward
if terminal==True:
win+=points
lose+=1
points=0
bg = Game()
print("Lost cnt:" ,lose," ,win_points:",round(points,2)," ,cnt:",brain.timeStep)
if brain.timeStep % 1000 == 0:
learn_rate.append(lose)
win_cnt.append(win)
plt.plot(learn_rate,"g");plt.plot(win_cnt,"r");plt.show();
lose=0
win=0
######################## 統計輸出報表用 ########################
learn_rate=[]
win_cnt=[]
def main():
playGame()
if __name__ == '__main__':
main()
# # 繪製分數
# def drawScore(self,score):
# scoreSurf = BASICFONT.render('Score: %s' % (score), True, WHITE)
# scoreRect = scoreSurf.get_rect()
# scoreRect.topleft = (WINDOWWIDTH - 120, 10)
# DISPLAYSURF.blit(scoreSurf, scoreRect)
# 顯示遊戲結束畫面
# def showGameOverScreen():
# gameOverFont = pygame.font.Font('freesansbold.ttf', 50)
# gameSurf = gameOverFont.render('Game', True, WHITE)
# overSurf = gameOverFont.render('Over', True, WHITE)
# gameRect = gameSurf.get_rect()
# overRect = overSurf.get_rect()
# gameRect.midtop = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2-gameRect.height-10)
# overRect.midtop = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2)
#
# DISPLAYSURF.blit(gameSurf, gameRect)
# DISPLAYSURF.blit(overSurf, overRect)
# drawPressKeyMsg()
# pygame.display.update()
# pygame.time.wait(500)
# checkForKeyPress() # clear out any key presses in the event queue
#
# while True:
# if checkForKeyPress():
# pygame.event.get() # clear event queue
# return
# while True:
#
# # 這裡一直迴圈於開始遊戲和顯示遊戲結束畫面之間,
# # 運行遊戲裡有一個迴圈,顯示遊戲結束畫面也有一個迴圈
# # 兩個迴圈都有相應的return,這樣就可以達到切換這兩個模組的效果
#
# runGame() # 運行遊戲
#
# showGameOverScreen() # 顯示遊戲結束畫面
# for event in pygame.event.get(): # 事件處理
# if event.type == QUIT: # 退出事件
# terminate()
# elif event.type == KEYDOWN: # 按鍵事件
# #如果按下的是左鍵或a鍵,且當前的方向不是向右,就改變方向,以此類推
# if (event.key == K_LEFT or event.key == K_a) and direction != RIGHT:
# direction = LEFT
# elif (event.key == K_RIGHT or event.key == K_d) and direction != LEFT:
# direction = RIGHT
# elif (event.key == K_UP or event.key == K_w) and direction != DOWN:
# direction = UP
# elif (event.key == K_DOWN or event.key == K_s) and direction != UP:
# direction = DOWN
# elif event.key == K_ESCAPE:
# terminate()
# # 繪製提示消息
# def drawPressKeyMsg():
# pressKeySurf = BASICFONT.render('Press a key to play.', True, DARKGRAY)
# pressKeyRect = pressKeySurf.get_rect()
# pressKeyRect.topleft = (WINDOWWIDTH - 200, WINDOWHEIGHT - 30)
# DISPLAYSURF.blit(pressKeySurf, pressKeyRect)
#
# # 檢查按鍵是否有按鍵事件
# def checkForKeyPress():
# if len(pygame.event.get(QUIT)) > 0:
# terminate()
#
# keyUpEvents = pygame.event.get(KEYUP)
# if len(keyUpEvents) == 0:
# return None
# if keyUpEvents[0].key == K_ESCAPE:
# terminate()
# return keyUpEvents[0].key
# # 顯示開始畫面
# def showStartScreen():
#
# DISPLAYSURF.fill(BGCOLOR)
# titleFont = pygame.font.Font('freesansbold.ttf', 50)
# titleSurf = titleFont.render('Greedy Snake', True, GREEN)
# titleRect = titleSurf.get_rect()
# titleRect.center = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2)
# DISPLAYSURF.blit(titleSurf, titleRect)
# drawPressKeyMsg()
#
# pygame.display.update()
#
# while True:
#
# if checkForKeyPress():
# pygame.event.get() # clear event queue
# return
# # 退出
# def terminate():
# pygame.quit()
# sys.exit()
| ICFONT = pygame.font.Font('freesansbold.ttf', 18) # BASICFONT
pygame.display.set_caption('Greedy Snake') # 設置視窗的標題
self.HEAD = 0 # syntactic sugar: index of the worm's head # 貪吃蛇的頭()
self.Bodylen=3
#showStartScreen() # 顯示開始畫面
self.runGame()
def getRandomLocation(self): # 隨機生成一個座標位置
return {'x': random.randint(0, CELLWIDTH - 1), 'y': random.randint(0, CELLHEIGHT - 1)}
def runGame(self):
# 隨機初始化設置一個點作為貪吃蛇的起點
startx = random.randint(5, CELLWIDTH - 6)
starty = r | identifier_body |
DQN_Snake.py | # -*- coding: UTF-8 -*-
import cv2
import numpy as np
import matplotlib.pyplot as plt
from BrainDQN_Nature import BrainDQN
##################################################################################################################
##################################################################################################################
import random, pygame
from pygame.locals import *
FPS = 200 # 螢幕刷新率(在這裡相當於貪吃蛇的速度)
WINDOWWIDTH = 300 # 螢幕寬度
WINDOWHEIGHT = 300 # 螢幕高度
CELLSIZE = 20 # 小方格的大小
ALIVE_REWARD = 0 #-0.05 #存活獎勵
WIN_REWARD = 1 #獎勵
LOSE_REWARD = -1 #懲罰
# 斷言,螢幕的寬和高必須能被方塊大小整除
assert WINDOWWIDTH % CELLSIZE == 0, "Window width must be a multiple of cell size."
assert WINDOWHEIGHT % CELLSIZE == 0, "Window height must be a multiple of cell size."
# 橫向和縱向的方格數
CELLWIDTH = int(WINDOWWIDTH / CELLSIZE)
CELLHEIGHT = int(WINDOWHEIGHT / CELLSIZE)
# 定義幾個常用的顏色
# R G B
WHITE = (255, 255, 255)
BLACK = ( 0, 0, 0)
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
DARKGREEN = ( 0, 155, 0)
DARKGRAY = ( 40, 40, 40)
BGCOLOR = BLACK
# 定義貪吃蛇的動作
UP = 'up'
DOWN = 'down'
LEFT = 'left'
RIGHT = 'right'
# 神經網路的輸出
MOVE_STAY = [1, 0, 0, 0, 0]
MOVE_UP = [0, 1, 0, 0, 0]
MOVE_DOWN = [0, 0, 1, 0, 0]
MOVE_LEFT = [0, 0, 0, 1, 0]
MOVE_RIGHT = [0, 0, 0, 0, 1]
class Game(object):
def __init__(self):
# 定義全域變數
global FPSCLOCK, DISPLAYSURF, BASICFONT
pygame.init() # 初始化pygame
FPSCLOCK = pygame.time.Clock() # 獲得pygame時鐘
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT)) # 設置螢幕寬高
BASICFONT = pygame.font.Font('freesansbold.ttf', 18) # BASICFONT
pygame.display.set_caption('Greedy Snake') # 設置視窗的標題
self.HEAD = 0 # syntactic sugar: index of the worm's head # 貪吃蛇的頭()
self.Bodylen=3
#showStartScreen() # 顯示開始畫面
self.runGame()
def getRandomLocation(self): # 隨機生成一個座標位置
return {'x': random.randint(0, CELLWIDTH - 1), 'y': random.randint(0, CELLHEIGHT - 1)}
def runGame(self):
# 隨機初始化設置一個點作為貪吃蛇的起點
startx = random.randint(5, CELLWIDTH - 6)
starty = random.randint(5, CELLHEIGHT - 6)
# 以這個點為起點,建立一個長度為3格的貪吃蛇(陣列)
self.wormCoords = [{'x': startx, 'y': starty},
{'x': startx - 1, 'y': starty},
{'x': startx - 2, 'y': starty}]
self.direction = RIGHT # 初始化一個運動的方向
self.apple = self.getRandomLocation() # 隨機一個apple的位置
# 根據 wormCoords 陣列繪製貪吃蛇
def drawWorm(self,wormCoords):
for coord in wormCoords:
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
wormSegmentRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, DARKGREEN, wormSegmentRect)
wormInnerSegmentRect = pygame.Rect(x + 4, y + 4, CELLSIZE - 8, CELLSIZE - 8)
pygame.draw.rect(DISPLAYSURF, GREEN, wormInnerSegmentRect)
# 根據 coord 繪製 apple
def drawApple(self,coord):
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
appleRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, RED, appleRect,8)
# 繪製所有的方格
def drawGrid(self):
for x in range(0, WINDOWWIDTH, CELLSIZE): # draw vertical lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (x, 0), (x, WINDOWHEIGHT))
for y in range(0, WINDOWHEIGHT, CELLSIZE): # draw horizontal lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (0, y), (WINDOWWIDTH, y))
def gen_action(self,optfromNN):
if optfromNN[0]==1: return MOVE_STAY
elif optfromNN[1]==1: return MOVE_UP
elif optfromNN[2]==1: return MOVE_DOWN
elif optfromNN[3]==1: return MOVE_LEFT
elif optfromNN[4]==1: return MOVE_RIGHT
def step(self, action):
pygame.display.update()
terminal=False
reward=0
if action==MOVE_LEFT and self.direction!=LEFT and self.direction!=RIGHT : self.direction = LEFT
elif action==MOVE_RI | lf.direction!=LEFT and self.direction!=RIGHT : self.direction = RIGHT
elif action==MOVE_UP and self.direction!=UP and self.direction!=DOWN: self.direction = UP
elif action==MOVE_DOWN and self.direction!=UP and self.direction!=DOWN: self.direction = DOWN
elif action==MOVE_STAY : pass
# 檢查貪吃蛇是否撞到撞到邊界
if self.wormCoords[self.HEAD]['x'] == -1 or self.wormCoords[self.HEAD]['x'] == CELLWIDTH or self.wormCoords[self.HEAD]['y'] == -1 or self.wormCoords[self.HEAD]['y'] == CELLHEIGHT:
terminal=True
reward=LOSE_REWARD
print ("撞牆死....")
for wormBody in self.wormCoords[1:]: # 檢查貪吃蛇是否撞到自己
if wormBody['x'] == self.wormCoords[self.HEAD]['x'] and wormBody['y'] == self.wormCoords[self.HEAD]['y']:
terminal=True
reward=LOSE_REWARD
print ("撞自己死....")
break
if terminal==False:
# 檢查貪吃蛇是否吃到apple
if self.wormCoords[self.HEAD]['x'] == self.apple['x'] and self.wormCoords[self.HEAD]['y'] == self.apple['y']:
self.apple = self.getRandomLocation() # 重新隨機生成一個apple # 不移除蛇的最後一個尾巴格
reward=WIN_REWARD
self.Bodylen+=1
else: #沒吃到apple也是要給予存活獎勵
reward=ALIVE_REWARD/self.Bodylen
del self.wormCoords[-1] # 移除蛇的最後一個尾巴格
# 根據方向,添加一個新的蛇頭,以這種方式來移動貪吃蛇
if self.direction == UP:
newHead = {'x': self.wormCoords[self.HEAD]['x'], 'y': self.wormCoords[self.HEAD]['y'] - 1}
elif self.direction == DOWN:
newHead = {'x': self.wormCoords[self.HEAD]['x'], 'y': self.wormCoords[self.HEAD]['y'] + 1}
elif self.direction == LEFT:
newHead = {'x': self.wormCoords[self.HEAD]['x'] - 1, 'y': self.wormCoords[self.HEAD]['y']}
elif self.direction == RIGHT:
newHead = {'x': self.wormCoords[self.HEAD]['x'] + 1, 'y': self.wormCoords[self.HEAD]['y']}
self.wormCoords.insert(0, newHead) # 插入新的蛇頭在陣列的最前面
#drawScore(len(self.wormCoords) - 3) # 繪製分數(分數為貪吃蛇陣列當前的長度-3)
DISPLAYSURF.fill(BGCOLOR) # 繪製背景
self.drawGrid() # 繪製所有的方格
self.drawWorm(self.wormCoords) # 繪製貪吃蛇
self.drawApple(self.apple) # 繪製apple
pygame.display.update() # 更新螢幕
FPSCLOCK.tick(FPS) # 設置幀率
if terminal==True:
gameOverFont = pygame.font.Font('freesansbold.ttf', 40)
gameOverSurf = gameOverFont.render('Game Over', True, WHITE)
gameOverRect = gameOverSurf.get_rect()
gameOverRect.midtop = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2-gameOverRect.height-10)
DISPLAYSURF.blit(gameOverSurf, gameOverRect)
# 獲得遊戲畫面的影像
screen_image = pygame.surfarray.array3d(pygame.display.get_surface())
pygame.display.update()
# 返回遊戲畫面和對應的賞罰
return screen_image,reward, terminal
##################################################################################################################
##################################################################################################################
# preprocess raw image to 80*80 gray image
def preprocess(observation):
observation = cv2.cvtColor(cv2.resize(observation, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, observation = cv2.threshold(observation,1,255,cv2.THRESH_BINARY)
#plt.imshow(observation, cmap ='gray'); plt.show();
return np.reshape(observation,(80,80,1))
def playGame():
# Step 0: Define reort
win = 0
lose = 0
points = 0
# Step 1: init BrainDQN
actions = 5
brain = BrainDQN(actions)
# Step 2: init Game
bg = Game()
# Step 3: play game
# Step 3.1: obtain init state
action0 = bg.gen_action([1,0,0,0,0]) # do nothing
observation0, reward0, terminal = bg.step(action0)
observation0 = cv2.cvtColor(cv2.resize(observation0, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, observation0 = cv2.threshold(observation0,1,255,cv2.THRESH_BINARY)
brain.setInitState(observation0)
# Step 3.2: run the game
while True:
pygame.event.get() #讓遊戲畫面能夠更新
action = bg.gen_action(brain.getAction())
Observation,reward,terminal = bg.step(action)
nextObservation = preprocess(Observation)
brain.setPerception(nextObservation,action,reward,terminal)
######################## 統計輸出報表用 ########################
points+=reward
if terminal==True:
win+=points
lose+=1
points=0
bg = Game()
print("Lost cnt:" ,lose," ,win_points:",round(points,2)," ,cnt:",brain.timeStep)
if brain.timeStep % 1000 == 0:
learn_rate.append(lose)
win_cnt.append(win)
plt.plot(learn_rate,"g");plt.plot(win_cnt,"r");plt.show();
lose=0
win=0
######################## 統計輸出報表用 ########################
learn_rate=[]
win_cnt=[]
def main():
playGame()
if __name__ == '__main__':
main()
# # 繪製分數
# def drawScore(self,score):
# scoreSurf = BASICFONT.render('Score: %s' % (score), True, WHITE)
# scoreRect = scoreSurf.get_rect()
# scoreRect.topleft = (WINDOWWIDTH - 120, 10)
# DISPLAYSURF.blit(scoreSurf, scoreRect)
# 顯示遊戲結束畫面
# def showGameOverScreen():
# gameOverFont = pygame.font.Font('freesansbold.ttf', 50)
# gameSurf = gameOverFont.render('Game', True, WHITE)
# overSurf = gameOverFont.render('Over', True, WHITE)
# gameRect = gameSurf.get_rect()
# overRect = overSurf.get_rect()
# gameRect.midtop = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2-gameRect.height-10)
# overRect.midtop = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2)
#
# DISPLAYSURF.blit(gameSurf, gameRect)
# DISPLAYSURF.blit(overSurf, overRect)
# drawPressKeyMsg()
# pygame.display.update()
# pygame.time.wait(500)
# checkForKeyPress() # clear out any key presses in the event queue
#
# while True:
# if checkForKeyPress():
# pygame.event.get() # clear event queue
# return
# while True:
#
# # 這裡一直迴圈於開始遊戲和顯示遊戲結束畫面之間,
# # 運行遊戲裡有一個迴圈,顯示遊戲結束畫面也有一個迴圈
# # 兩個迴圈都有相應的return,這樣就可以達到切換這兩個模組的效果
#
# runGame() # 運行遊戲
#
# showGameOverScreen() # 顯示遊戲結束畫面
# for event in pygame.event.get(): # 事件處理
# if event.type == QUIT: # 退出事件
# terminate()
# elif event.type == KEYDOWN: # 按鍵事件
# #如果按下的是左鍵或a鍵,且當前的方向不是向右,就改變方向,以此類推
# if (event.key == K_LEFT or event.key == K_a) and direction != RIGHT:
# direction = LEFT
# elif (event.key == K_RIGHT or event.key == K_d) and direction != LEFT:
# direction = RIGHT
# elif (event.key == K_UP or event.key == K_w) and direction != DOWN:
# direction = UP
# elif (event.key == K_DOWN or event.key == K_s) and direction != UP:
# direction = DOWN
# elif event.key == K_ESCAPE:
# terminate()
# # 繪製提示消息
# def drawPressKeyMsg():
# pressKeySurf = BASICFONT.render('Press a key to play.', True, DARKGRAY)
# pressKeyRect = pressKeySurf.get_rect()
# pressKeyRect.topleft = (WINDOWWIDTH - 200, WINDOWHEIGHT - 30)
# DISPLAYSURF.blit(pressKeySurf, pressKeyRect)
#
# # 檢查按鍵是否有按鍵事件
# def checkForKeyPress():
# if len(pygame.event.get(QUIT)) > 0:
# terminate()
#
# keyUpEvents = pygame.event.get(KEYUP)
# if len(keyUpEvents) == 0:
# return None
# if keyUpEvents[0].key == K_ESCAPE:
# terminate()
# return keyUpEvents[0].key
# # 顯示開始畫面
# def showStartScreen():
#
# DISPLAYSURF.fill(BGCOLOR)
# titleFont = pygame.font.Font('freesansbold.ttf', 50)
# titleSurf = titleFont.render('Greedy Snake', True, GREEN)
# titleRect = titleSurf.get_rect()
# titleRect.center = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2)
# DISPLAYSURF.blit(titleSurf, titleRect)
# drawPressKeyMsg()
#
# pygame.display.update()
#
# while True:
#
# if checkForKeyPress():
# pygame.event.get() # clear event queue
# return
# # 退出
# def terminate():
# pygame.quit()
# sys.exit()
| GHT and se | identifier_name |
JS_Exercises.js | // // JS Variables 1-1: Create a variable called carName, assign the value Volvo to it.
// var carName = "Volvo";
// document.write(carName);
// console.log(carName);
// // JS Variables 2-1: Create a variable called x, assign the value 50 to it.
// var x = 50;
// document.write(x);
// console.log(x);
// // JS Variables 3-1: Display the sum of 5 + 10, using two variables: x and y.
// var x = 5;
// var y = 10;
// document.getElementById("demo").innerHTML = x + y;
// var test = document.getElementById("demo").innerHTML;
// var divText = document.getElementById("myDiv").innerHTML;
// console.log(test);
// console.log(divText);
// // JS Variables 4-1: Create a variable called z, assign x + y to it, and display the result in an alert box.
// var x = 5;
// var y = 10;
// var z = x + y;
// alert(z);
// JS Variables 5-1: On one single line, declare three variables with the following names and values:
// firstName = "John"
// lastName = "Doe"
// age = 35
// var firstName = "John", lastName = "Doe", age = 35;
// console.log(firstName, lastName, age);
// JS Operators 1-1: Multiply 10 with 5, and alert the result:
// alert(10*5);
// JS Operators 2-1: Divide 10 by 2, and alert the result:
// alert(10/2);
// JS Operators 3-1: Alert the remainder when 15 is divided by 9.
// alert(15%9);
// JS Operators 4-1: Use the correct assignment operator that will result in x being 15 (same as x = x + y).
// x = 10;
// y = 5;
// x += y;
// console.log(x);
// JS Operators 5-1: Use the correct assignment operator that will result in x being 50 (same as x = x * y).
// x = 10;
// y = 5;
// x *= y;
// console.log(x);
// JS Data Types 1-1: Use comments to describe the correct data type of the following variables:
// let length = 16 // number (int)
// let lastName = "Johnson"; // string
// const x = {
// firstName: "John",
// lastName: "Doe"
// }; // object
// JS Functions 1-1: Execute the function named myFunction.
// function myFunction() {
// alert("Hello World");
// };
// myFunction();
// JS Functions 2-1: Create a function called "myFunction".
// function myFunction() {
// };
// JS Functions 3-1: Make the function return "Hello".
// function myFunction() {
// return "Hello";
// };
// document.getElementById('demo').innerHTML = myFunction();
// JS Functions 4-1: Make the function display "Hello" in the inner HTML of an element with the ID "demo".
// function myFunction() {
// document.getElementById('demo').innerHTML = "Hello";
// };
// myFunction();
// JS Objects 1-1: Alert "John" by extracting information from the person object.
// const person = {
// firstName : "John",
// lastName : "Doe"
// };
// alert(person.firstName);
// JS Objects 2-1: Add the following property and value to the person object: country: Norway.
// const person = {
// firstName: 'John',
// lastName: 'Doe',
// country: 'Norway'
// };
// console.log(person.country);
// JS Objects 3-1: Create an object called person with name = John, age = 50.
// Then, access the object to alert("John is 50").
// const person = {
// name : "John",
// age : 50
// }
// alert(person.name + " is " + person.age);
// JS Events 1-1: The <button> element should do something when someone clicks on it. Try to fix it!
/* <button onclick = "alert('Hello')">Click me.</button> */
// JS Events 2-1: When the button is clicked, the function "myFunction" should be executed.
/* <button onclick = "myFunction()">Click me.</button> */
// JS Events 3-1: The <div> element should turn red when someone moves the mouse over it.
/* <div onmouseover="this.style.backgroundColor='red'">myDIV.</div> */
// JS Strings 1-1: Use the length property to alert the length of txt.
// let txt = "Hello World!";
// let x = txt.length;
// alert(x);
// JS Strings 2-1: Use escape characters to alert We are "Vikings".
// alert("We are \"Vikings\".");
// JS Strings 3-1: Concatenate the two strings to alert "Hello World!".
// let str1 = "Hello ";
// let str2 = "World!";
// alert(str1 + str2);
// JS String Methods 1-1: Find the position of the character h in the string txt.
// let txt = "abcdefghijklm";
// let pos = txt.indexOf("h");
// console.log(pos);
// alert(pos);
// JS String Methods 2-1: Use the slice method to return the word "bananas".
// let txt = "I can eat bananas all day";
// let x = txt.slice(10, 17);
// console.log(x);
// JS String Methods 3-1: Use the correct String method to replace the word "Hello" with the word "Welcome".
// let txt = "Hello World";
// txt = txt.replace("Hello", "Welcome");
// console.log(txt);
// JS String Methods 4-1: Convert the value of txt to upper case.
// let txt = "Hello World";
// txt = txt.toUpperCase();
// console.log(txt);
// JS String Methods 5-1: Convert the value of txt to lower case.
// let txt = "Hello World";
// txt = txt.toLowerCase();
// console.log(txt);
// JS Arrays 1-1: Get the value "Volvo" from the cars array.
// const cars = ["Saab", "Volvo", "BMW"];
// let x = cars[1];
// console.log(x);
// JS Arrays 2-1: Change the first item of cars to "Ford".
// const cars = ["Volvo", "Jeep", "Mercedes"];
// cars[0] = "Ford";
// console.log(cars[0]);
// JS Arrays 3-1: Alert the number of items in an array, using the correct Array property.
// const cars = ["Volvo", "Jeep", "Mercedes"];
// alert(cars.length);
// JS Array Methods 1-1: Use the correct Array method to remove the last item of the fruits array.
// const fruits = ["Banana", "Orange", "Apple"];
// fruits.pop();
// fruits.splice(-1);
// console.log(fruits);
// JS Array Methods 2-1: Use the correct Array method to add "Kiwi" to the fruits array.
// const fruits = ["Banana", "Orange", "Apple"];
// fruits.push("Kiwi")
// console.log(fruits);
// JS Array Methods 3-1: Use the splice() method to remove "Orange" and "Apple" from fruits.
// const fruits = ["Banana", "Orange", "Apple", "Kiwi"];
// fruits.splice(1, 2);
// console.log(fruits);
// JS Array Sort 1-1: Use the correct Array method to sort the fruits array alphabetically.
// const fruits = ["Banana", "Orange", "Apple", "Kiwi"];
// fruits.sort();
// console.log(fruits);
// JS Dates 1-1: Create a Date object and alert the current date and time.
// const d = new Date();
// console.log(d);
// alert(d);
// JS Dates 2-1: Use the correct Date method to extract the year (four digits) out of a date object.
// const d = new Date();
// year = d.getFullYear();
// console.log(year);
// JS Dates 3-1: Use the correct Date method to get the month (0-11) out of a date object.
// const d = new Date();
// month = d.getMonth();
// console.log(month);
// JS Dates 4-1: Use the correct Date method to set the year of a date object to 2020.
// const d = new Date();
// d.setFullYear(2020);
// console.log(d);
// JS Math 1-1: Use the correct Math method to create a random number.
// let r = Math.random();
// console.log(r);
// JS Math 2-1: Use the correct Math method to return the largest number of 10 and 20.
// let x = Math.max(10, 20);
// console.log(x);
// JS Math 3-1: Use the correct Math method to round a number to the nearest integer.
// let x = Math.round(5.3);
// console.log(x);
// JS Math 4-1: Use the correct Math method to get the square root of 9.
// let x = Math.sqrt(9);
// console.log(x);
// JS Comparisons 1-1: Choose the correct comparison operator to alert true, when x is greater than y.
// let x = 10;
// let y = 5;
// alert(x > y);
// console.log(x > y);
// JS Comparisons 2-1: Choose the correct comparison operator to alert true, when x is equal to y.
// let x = 10;
// let y = 10;
// console.log(x == y);
// alert(x == y);
// JS Comparisons 3-1: Choose the correct comparison operator to alert true, when x is NOT equal to y.
// let x = 10;
// let y = 5;
// console.log(x != y);
// alert(x != y);
// JS Comparisons 4-1: Choose the correct conditional (ternary) operator to alert "Too young" if age is less than 18, otherwise alert "Old enough".
// var age = 20;
// var voteable = (age < 18) ? "Too young" : "Old enough";
// console.log(voteable);
// JS Conditions 1-1: Fix the if statement to alert "Hello World" if x is greater than y.
// var x = 100;
// var y = 15;
// if (x > y) {
// alert("Hello World");
// };
// JS Conditions 2-1: Fix the if statement to alert "Hello World" if x is greater than y, otherwise alert "Goodbye".
// var x = 8;
// var y = 9;
// if (x > y) {
// alert("Hello World");
// } else {
// alert("Goodbye");
// }
// JS Switch 1-1: Create a switch statement that will alert "Hello" if fruits is "banana", and "Welcome" if fruits is "apple".
// fruits = "Apples";
// if (fruits == "Banana") {
// console.log("Hello1");
// } else if (fruits == "Apple") {
// console.log("Welcome1");
// } else {
// console.log("No Match");
// }
// switch(fruits) {
// case "Banana":
// console.log("Hello")
// break;
// case "Apple":
// console.log("Welcome")
// break;
// }
// JS Switch 2-1: Add a section that will alert("Neither") if fruits is neither "banana" nor "apple".
// fruits = "Orange";
// switch (fruits) {
// case "Banana":
// console.log("Match B")
// break;
// case "Apple":
// console.log("Match A")
// break;
// default:
// console.log("Neither")
// }
// JS For Loops 1-1: Create a loop that runs from 0 to 9.
// let i;
// for (i = 0; i < 10; i++) {
// console.log(i);
// }
// JS For Loops 2-1: Create a loop that runs through each item in the fruits array.
// const fruits = ["Apple", "Banana", "Orange"];
// for (x in fruits) {
// console.log(x);
// }
// JS While Loops 1-1: Create a loop that runs as long as i is less than 10.
// let i = 0;
// while (i < 10) {
// console.log(i);
// i++
// };
// JS While Loops 2-1: Create a loop that runs as long as i is less than 10, but increase i with 2 each time.
// let i = 0;
// while (i < 10) {
// console.log(i);
// i = i + 2;
// };
// JS Break Loops 1-1: Make the loop stop when i is 5.
// for (i = 0; i < 10; i++) {
// console.log(i);
// if (i == 5) {
// break;
// }
// }
// JS Break Loops 2-1: Make the loop jump to the next iteration when i is 5.
// for (i = 0; i < 10; i++) {
// if (i ==5) {
// continue;
// }
// console.log(i);
// }
// JS HTML DOM 1-1: Use the getElementById method to find the <p> element, and change its text to "Hello".
/* <p id="demo">This is the text</p>
<script>document.getElementById("demo").innerHTML = "Hello";</script> */
// JS HTML DOM 2-1: Use the getElementsByTagName method to find the first <p> element, and change its text to "Hello".
/* <p id="demo">This is the text</p>
<script>document.getElementsByTagName("p")[0].innerHTML = "Hello";</script> */
// JS HTML DOM 3-1: Change the text of the first element that has the class name "test".
/* <div class="test">next exercise</div>
<script>document.getElementsByClassName("test")[0].innerHTML = "Hello";</script> */
// JS HTML DOM 4-1: Use HTML DOM to change the value of the image's src attribute.
/* <img id="image" src="smiley.gif">
<script>document.getElementById("image").src = "pic_mountain.jpg";</script> */
// JS HTML DOM 5-1: Use HTML DOM to change the value of the input field.
/* <input type="text" id="myText" value="Hello">
<script>document.getElementById("myText").value = "Have a nice day!";</script> */
// JS HTML DOM 6-1: Change the text color of the <p> element to "red".
/* <p id="demo">this is a paragraph</p>
<script>document.getElementById("demo").style.color = "red";</script> */
// JS HTML DOM 7-1: Change the font size of the p element to 40 pixels.
/* <p id="demo">Howdy World!!</p>
<script>document.getElementById("demo").style.fontSize = "40px";</script> */
// JS HTML DOM 8-1: Use the CSS display property to hide the p element.
/* <p id="demo">Still adding gibberish</p>
<script>document.getElementById("demo").style.display = "none";</script> */ | /* <button id="demo">CLICK ME!!</button>
<script>document.getElementById("demo").addEventListener("click", myFunction);</script> */ | // JS HTML DOM 9-1: Use the eventListener to assign an onclick event to the <button> element. | random_line_split |
http_ece.rs | use base64::{self, URL_SAFE_NO_PAD};
use crate::error::WebPushError;
use crate::message::WebPushPayload;
use ring::rand::SecureRandom;
use ring::{aead::{self, BoundKey}, agreement, hkdf, rand};
use crate::vapid::VapidSignature;
pub enum ContentEncoding {
AesGcm,
Aes128Gcm,
}
pub struct HttpEce<'a> {
peer_public_key: &'a [u8],
peer_secret: &'a [u8],
encoding: ContentEncoding,
rng: rand::SystemRandom,
vapid_signature: Option<VapidSignature>,
}
#[derive(Debug, PartialEq)]
struct EceKey<T: core::fmt::Debug + PartialEq>(T);
impl hkdf::KeyType for EceKey<usize> {
fn len(&self) -> usize {
self.0
}
}
impl From<hkdf::Okm<'_, EceKey<usize>>> for EceKey<Vec<u8>> {
fn from(okm: hkdf::Okm<EceKey<usize>>) -> Self {
let mut r = vec![0u8; okm.len().0];
okm.fill(&mut r).unwrap();
EceKey(r)
}
}
#[derive(Debug, PartialEq, Default)]
struct EceNonce {
used: bool,
nonce: Vec<u8>,
}
impl EceNonce {
fn fill(&mut self, nonce: Vec<u8>) {
self.nonce = nonce;
self.used = false;
}
}
impl aead::NonceSequence for EceNonce {
fn advance(&mut self) -> Result<aead::Nonce, ring::error::Unspecified> {
if self.used {
return Err(ring::error::Unspecified);
}
let mut nonce = [0u8; 12];
for (i, n) in self.nonce.iter().enumerate() {
if i >= 12 {
return Err(ring::error::Unspecified);
}
nonce[i] = *n;
}
self.used = true;
Ok(aead::Nonce::assume_unique_for_key(nonce))
}
}
impl<'a> HttpEce<'a> {
/// Create a new encryptor. The content encoding has preliminary support for
/// Aes128Gcm, which is the 8th draft of the Encrypted Content-Encoding, but
/// currently using it will return an error when trying to encrypt. There is
/// no real support yet for the encoding in web browsers.
///
/// `peer_public_key` is the `p256dh` and `peer_secret` the `auth` from
/// browser subscription info.
pub fn new(
encoding: ContentEncoding,
peer_public_key: &'a [u8],
peer_secret: &'a [u8],
vapid_signature: Option<VapidSignature>,
) -> HttpEce<'a> {
HttpEce {
rng: rand::SystemRandom::new(),
peer_public_key: peer_public_key,
peer_secret: peer_secret,
encoding: encoding,
vapid_signature: vapid_signature,
}
}
/// Encrypts a payload. The maximum length for the payload is 3800
/// characters, which is the largest that works with Google's and Mozilla's
/// push servers.
pub fn encrypt(&self, content: &'a [u8]) -> Result<WebPushPayload, WebPushError> {
if content.len() > 3052 {
return Err(WebPushError::PayloadTooLarge);
}
let private_key =
agreement::EphemeralPrivateKey::generate(&agreement::ECDH_P256, &self.rng)?;
let public_key = private_key.compute_public_key()?;
let mut salt_bytes = [0u8; 16];
self.rng.fill(&mut salt_bytes)?;
let peer_public_key = agreement::UnparsedPublicKey::new(&agreement::ECDH_P256, self.peer_public_key);
agreement::agree_ephemeral(
private_key,
&peer_public_key,
WebPushError::Unspecified,
|shared_secret| match self.encoding {
ContentEncoding::AesGcm => {
let mut payload = vec![0; 3054];
front_pad(content, &mut payload);
self.aes_gcm(
shared_secret,
public_key.as_ref(),
&salt_bytes,
&mut payload,
)?;
Ok(WebPushPayload {
content: payload.to_vec(),
crypto_headers: self.generate_headers(public_key.as_ref(), &salt_bytes),
content_encoding: "aesgcm",
})
}
ContentEncoding::Aes128Gcm => Err(WebPushError::NotImplemented),
},
) | }
pub fn generate_headers(
&self,
public_key: &'a [u8],
salt: &'a [u8],
) -> Vec<(&'static str, String)> {
let mut crypto_headers = Vec::new();
let mut crypto_key = format!("dh={}", base64::encode_config(public_key, URL_SAFE_NO_PAD));
if let Some(ref signature) = self.vapid_signature {
crypto_key = format!("{}; p256ecdsa={}", crypto_key, signature.auth_k);
let sig_s: String = signature.into();
crypto_headers.push(("Authorization", sig_s));
};
crypto_headers.push(("Crypto-Key", crypto_key));
crypto_headers.push((
"Encryption",
format!("salt={}", base64::encode_config(&salt, URL_SAFE_NO_PAD)),
));
crypto_headers
}
/// The aesgcm encrypted content-encoding, draft 3.
pub fn aes_gcm(
&self,
shared_secret: &'a [u8],
as_public_key: &'a [u8],
salt_bytes: &'a [u8],
payload: &'a mut Vec<u8>,
) -> Result<(), WebPushError> {
let mut context = Vec::with_capacity(140);
context.extend_from_slice("P-256\0".as_bytes());
context.push((self.peer_public_key.len() >> 8) as u8);
context.push((self.peer_public_key.len() & 0xff) as u8);
context.extend_from_slice(self.peer_public_key);
context.push((as_public_key.len() >> 8) as u8);
context.push((as_public_key.len() & 0xff) as u8);
context.extend_from_slice(as_public_key);
let client_auth_secret = hkdf::Salt::new(hkdf::HKDF_SHA256, &self.peer_secret);
let salt = hkdf::Salt::new(hkdf::HKDF_SHA256, salt_bytes);
let EceKey(prk) = client_auth_secret
.extract(shared_secret)
.expand(&[&"Content-Encoding: auth\0".as_bytes()], EceKey(32))
.unwrap()
.into();
let mut cek_info = Vec::with_capacity(165);
cek_info.extend_from_slice("Content-Encoding: aesgcm\0".as_bytes());
cek_info.extend_from_slice(&context);
let EceKey(content_encryption_key) = salt
.extract(&prk)
.expand(&[&cek_info], EceKey(16))
.unwrap()
.into();
let mut nonce_info = Vec::with_capacity(164);
nonce_info.extend_from_slice("Content-Encoding: nonce\0".as_bytes());
nonce_info.extend_from_slice(&context);
let EceKey(nonce_bytes) = salt
.extract(&prk)
.expand(&[&nonce_info], EceKey(12))
.unwrap()
.into();
let mut nonce = EceNonce::default();
nonce.fill(nonce_bytes);
let unbound_key = aead::UnboundKey::new(&aead::AES_128_GCM, &content_encryption_key)?;
let mut sealing_key = aead::SealingKey::new(unbound_key, nonce);
sealing_key.seal_in_place_append_tag(aead::Aad::empty(), payload)?;
Ok(())
}
}
fn front_pad(payload: &[u8], output: &mut [u8]) {
let payload_len = payload.len();
let max_payload = output.len() - 2;
let padding_size = max_payload - payload.len();
output[0] = (padding_size >> 8) as u8;
output[1] = (padding_size & 0xff) as u8;
for i in 0..payload_len {
output[padding_size + i + 2] = payload[i];
}
}
#[cfg(test)]
mod tests {
use base64::{self, URL_SAFE, URL_SAFE_NO_PAD};
use crate::error::WebPushError;
use crate::http_ece::{front_pad, ContentEncoding, HttpEce};
use crate::vapid::VapidSignature;
#[test]
fn test_payload_too_big() {
let p256dh = base64::decode_config("BLMaF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fj5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None);
let content = [0u8; 3801];
assert_eq!(
Err(WebPushError::PayloadTooLarge),
http_ece.encrypt(&content)
);
}
#[test]
fn test_aes128gcm() {
let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::Aes128Gcm, &p256dh, &auth, None);
let content = [0u8; 10];
assert_eq!(
Err(WebPushError::NotImplemented),
http_ece.encrypt(&content)
);
}
#[test]
fn test_aesgcm() {
let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None);
let shared_secret =
base64::decode_config("9vcttSQ8tq-Wi_lLQ_xA37tkYssMtJsdY6xENG5f1sE=", URL_SAFE)
.unwrap();
let as_pubkey = base64::decode_config("BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=",
URL_SAFE).unwrap();
let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap();
let mut payload = "This is test data. XXX".as_bytes().to_vec();
http_ece
.aes_gcm(&shared_secret, &as_pubkey, &salt_bytes, &mut payload)
.unwrap();
assert_eq!(
"tmE7-emq6iasohjXNMue0i0vn5o7EIOyP-bKyDoM1teHLcLtg44",
base64::encode_config(&payload.to_vec(), URL_SAFE_NO_PAD)
);
}
#[test]
fn test_headers_with_vapid() {
let as_pubkey =
base64::decode_config(
"BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=",
URL_SAFE
).unwrap();
let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap();
let p256dh =
base64::decode_config(
"BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE
).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let vapid_signature = VapidSignature {
auth_t: String::from("foo"),
auth_k: String::from("bar"),
};
let http_ece = HttpEce::new(
ContentEncoding::AesGcm,
&p256dh,
&auth,
Some(vapid_signature),
);
assert_eq!(
vec![
("Authorization", "WebPush foo".to_string()),
("Crypto-Key", "dh=BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs; p256ecdsa=bar".to_string()),
("Encryption", "salt=YMcMuxqRkchXwy7vMwNl1Q".to_string())],
http_ece.generate_headers(&as_pubkey, &salt_bytes))
}
#[test]
fn test_headers_without_vapid() {
let as_pubkey =
base64::decode_config(
"BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=",
URL_SAFE
).unwrap();
let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap();
let p256dh =
base64::decode_config(
"BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE
).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None);
assert_eq!(
vec![
("Crypto-Key", "dh=BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs".to_string()),
("Encryption", "salt=YMcMuxqRkchXwy7vMwNl1Q".to_string())],
http_ece.generate_headers(&as_pubkey, &salt_bytes))
}
#[test]
fn test_front_pad() {
// writes the padding count in the beginning, zeroes, content and again space for the encryption tag
let content = "naukio";
let mut output = [0u8; 30];
front_pad(content.as_bytes(), &mut output);
assert_eq!(
vec![0, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 110, 97, 117, 107, 105, 111],
output
);
}
} | random_line_split |
|
http_ece.rs | use base64::{self, URL_SAFE_NO_PAD};
use crate::error::WebPushError;
use crate::message::WebPushPayload;
use ring::rand::SecureRandom;
use ring::{aead::{self, BoundKey}, agreement, hkdf, rand};
use crate::vapid::VapidSignature;
pub enum ContentEncoding {
AesGcm,
Aes128Gcm,
}
pub struct HttpEce<'a> {
peer_public_key: &'a [u8],
peer_secret: &'a [u8],
encoding: ContentEncoding,
rng: rand::SystemRandom,
vapid_signature: Option<VapidSignature>,
}
#[derive(Debug, PartialEq)]
struct EceKey<T: core::fmt::Debug + PartialEq>(T);
impl hkdf::KeyType for EceKey<usize> {
fn len(&self) -> usize {
self.0
}
}
impl From<hkdf::Okm<'_, EceKey<usize>>> for EceKey<Vec<u8>> {
fn from(okm: hkdf::Okm<EceKey<usize>>) -> Self {
let mut r = vec![0u8; okm.len().0];
okm.fill(&mut r).unwrap();
EceKey(r)
}
}
#[derive(Debug, PartialEq, Default)]
struct EceNonce {
used: bool,
nonce: Vec<u8>,
}
impl EceNonce {
fn fill(&mut self, nonce: Vec<u8>) {
self.nonce = nonce;
self.used = false;
}
}
impl aead::NonceSequence for EceNonce {
fn advance(&mut self) -> Result<aead::Nonce, ring::error::Unspecified> {
if self.used {
return Err(ring::error::Unspecified);
}
let mut nonce = [0u8; 12];
for (i, n) in self.nonce.iter().enumerate() {
if i >= 12 {
return Err(ring::error::Unspecified);
}
nonce[i] = *n;
}
self.used = true;
Ok(aead::Nonce::assume_unique_for_key(nonce))
}
}
impl<'a> HttpEce<'a> {
/// Create a new encryptor. The content encoding has preliminary support for
/// Aes128Gcm, which is the 8th draft of the Encrypted Content-Encoding, but
/// currently using it will return an error when trying to encrypt. There is
/// no real support yet for the encoding in web browsers.
///
/// `peer_public_key` is the `p256dh` and `peer_secret` the `auth` from
/// browser subscription info.
pub fn new(
encoding: ContentEncoding,
peer_public_key: &'a [u8],
peer_secret: &'a [u8],
vapid_signature: Option<VapidSignature>,
) -> HttpEce<'a> {
HttpEce {
rng: rand::SystemRandom::new(),
peer_public_key: peer_public_key,
peer_secret: peer_secret,
encoding: encoding,
vapid_signature: vapid_signature,
}
}
/// Encrypts a payload. The maximum length for the payload is 3800
/// characters, which is the largest that works with Google's and Mozilla's
/// push servers.
pub fn encrypt(&self, content: &'a [u8]) -> Result<WebPushPayload, WebPushError> {
if content.len() > 3052 {
return Err(WebPushError::PayloadTooLarge);
}
let private_key =
agreement::EphemeralPrivateKey::generate(&agreement::ECDH_P256, &self.rng)?;
let public_key = private_key.compute_public_key()?;
let mut salt_bytes = [0u8; 16];
self.rng.fill(&mut salt_bytes)?;
let peer_public_key = agreement::UnparsedPublicKey::new(&agreement::ECDH_P256, self.peer_public_key);
agreement::agree_ephemeral(
private_key,
&peer_public_key,
WebPushError::Unspecified,
|shared_secret| match self.encoding {
ContentEncoding::AesGcm => {
let mut payload = vec![0; 3054];
front_pad(content, &mut payload);
self.aes_gcm(
shared_secret,
public_key.as_ref(),
&salt_bytes,
&mut payload,
)?;
Ok(WebPushPayload {
content: payload.to_vec(),
crypto_headers: self.generate_headers(public_key.as_ref(), &salt_bytes),
content_encoding: "aesgcm",
})
}
ContentEncoding::Aes128Gcm => Err(WebPushError::NotImplemented),
},
)
}
pub fn generate_headers(
&self,
public_key: &'a [u8],
salt: &'a [u8],
) -> Vec<(&'static str, String)> {
let mut crypto_headers = Vec::new();
let mut crypto_key = format!("dh={}", base64::encode_config(public_key, URL_SAFE_NO_PAD));
if let Some(ref signature) = self.vapid_signature {
crypto_key = format!("{}; p256ecdsa={}", crypto_key, signature.auth_k);
let sig_s: String = signature.into();
crypto_headers.push(("Authorization", sig_s));
};
crypto_headers.push(("Crypto-Key", crypto_key));
crypto_headers.push((
"Encryption",
format!("salt={}", base64::encode_config(&salt, URL_SAFE_NO_PAD)),
));
crypto_headers
}
/// The aesgcm encrypted content-encoding, draft 3.
pub fn aes_gcm(
&self,
shared_secret: &'a [u8],
as_public_key: &'a [u8],
salt_bytes: &'a [u8],
payload: &'a mut Vec<u8>,
) -> Result<(), WebPushError> {
let mut context = Vec::with_capacity(140);
context.extend_from_slice("P-256\0".as_bytes());
context.push((self.peer_public_key.len() >> 8) as u8);
context.push((self.peer_public_key.len() & 0xff) as u8);
context.extend_from_slice(self.peer_public_key);
context.push((as_public_key.len() >> 8) as u8);
context.push((as_public_key.len() & 0xff) as u8);
context.extend_from_slice(as_public_key);
let client_auth_secret = hkdf::Salt::new(hkdf::HKDF_SHA256, &self.peer_secret);
let salt = hkdf::Salt::new(hkdf::HKDF_SHA256, salt_bytes);
let EceKey(prk) = client_auth_secret
.extract(shared_secret)
.expand(&[&"Content-Encoding: auth\0".as_bytes()], EceKey(32))
.unwrap()
.into();
let mut cek_info = Vec::with_capacity(165);
cek_info.extend_from_slice("Content-Encoding: aesgcm\0".as_bytes());
cek_info.extend_from_slice(&context);
let EceKey(content_encryption_key) = salt
.extract(&prk)
.expand(&[&cek_info], EceKey(16))
.unwrap()
.into();
let mut nonce_info = Vec::with_capacity(164);
nonce_info.extend_from_slice("Content-Encoding: nonce\0".as_bytes());
nonce_info.extend_from_slice(&context);
let EceKey(nonce_bytes) = salt
.extract(&prk)
.expand(&[&nonce_info], EceKey(12))
.unwrap()
.into();
let mut nonce = EceNonce::default();
nonce.fill(nonce_bytes);
let unbound_key = aead::UnboundKey::new(&aead::AES_128_GCM, &content_encryption_key)?;
let mut sealing_key = aead::SealingKey::new(unbound_key, nonce);
sealing_key.seal_in_place_append_tag(aead::Aad::empty(), payload)?;
Ok(())
}
}
fn front_pad(payload: &[u8], output: &mut [u8]) {
let payload_len = payload.len();
let max_payload = output.len() - 2;
let padding_size = max_payload - payload.len();
output[0] = (padding_size >> 8) as u8;
output[1] = (padding_size & 0xff) as u8;
for i in 0..payload_len {
output[padding_size + i + 2] = payload[i];
}
}
#[cfg(test)]
mod tests {
use base64::{self, URL_SAFE, URL_SAFE_NO_PAD};
use crate::error::WebPushError;
use crate::http_ece::{front_pad, ContentEncoding, HttpEce};
use crate::vapid::VapidSignature;
#[test]
fn test_payload_too_big() {
let p256dh = base64::decode_config("BLMaF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fj5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None);
let content = [0u8; 3801];
assert_eq!(
Err(WebPushError::PayloadTooLarge),
http_ece.encrypt(&content)
);
}
#[test]
fn test_aes128gcm() {
let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::Aes128Gcm, &p256dh, &auth, None);
let content = [0u8; 10];
assert_eq!(
Err(WebPushError::NotImplemented),
http_ece.encrypt(&content)
);
}
#[test]
fn test_aesgcm() {
let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None);
let shared_secret =
base64::decode_config("9vcttSQ8tq-Wi_lLQ_xA37tkYssMtJsdY6xENG5f1sE=", URL_SAFE)
.unwrap();
let as_pubkey = base64::decode_config("BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=",
URL_SAFE).unwrap();
let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap();
let mut payload = "This is test data. XXX".as_bytes().to_vec();
http_ece
.aes_gcm(&shared_secret, &as_pubkey, &salt_bytes, &mut payload)
.unwrap();
assert_eq!(
"tmE7-emq6iasohjXNMue0i0vn5o7EIOyP-bKyDoM1teHLcLtg44",
base64::encode_config(&payload.to_vec(), URL_SAFE_NO_PAD)
);
}
#[test]
fn test_headers_with_vapid() {
let as_pubkey =
base64::decode_config(
"BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=",
URL_SAFE
).unwrap();
let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap();
let p256dh =
base64::decode_config(
"BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE
).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let vapid_signature = VapidSignature {
auth_t: String::from("foo"),
auth_k: String::from("bar"),
};
let http_ece = HttpEce::new(
ContentEncoding::AesGcm,
&p256dh,
&auth,
Some(vapid_signature),
);
assert_eq!(
vec![
("Authorization", "WebPush foo".to_string()),
("Crypto-Key", "dh=BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs; p256ecdsa=bar".to_string()),
("Encryption", "salt=YMcMuxqRkchXwy7vMwNl1Q".to_string())],
http_ece.generate_headers(&as_pubkey, &salt_bytes))
}
#[test]
fn test_headers_without_vapid() {
let as_pubkey =
base64::decode_config(
"BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=",
URL_SAFE
).unwrap();
let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap();
let p256dh =
base64::decode_config(
"BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE
).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None);
assert_eq!(
vec![
("Crypto-Key", "dh=BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs".to_string()),
("Encryption", "salt=YMcMuxqRkchXwy7vMwNl1Q".to_string())],
http_ece.generate_headers(&as_pubkey, &salt_bytes))
}
#[test]
fn | () {
// writes the padding count in the beginning, zeroes, content and again space for the encryption tag
let content = "naukio";
let mut output = [0u8; 30];
front_pad(content.as_bytes(), &mut output);
assert_eq!(
vec![0, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 110, 97, 117, 107, 105, 111],
output
);
}
}
| test_front_pad | identifier_name |
http_ece.rs | use base64::{self, URL_SAFE_NO_PAD};
use crate::error::WebPushError;
use crate::message::WebPushPayload;
use ring::rand::SecureRandom;
use ring::{aead::{self, BoundKey}, agreement, hkdf, rand};
use crate::vapid::VapidSignature;
pub enum ContentEncoding {
AesGcm,
Aes128Gcm,
}
pub struct HttpEce<'a> {
peer_public_key: &'a [u8],
peer_secret: &'a [u8],
encoding: ContentEncoding,
rng: rand::SystemRandom,
vapid_signature: Option<VapidSignature>,
}
#[derive(Debug, PartialEq)]
struct EceKey<T: core::fmt::Debug + PartialEq>(T);
impl hkdf::KeyType for EceKey<usize> {
fn len(&self) -> usize {
self.0
}
}
impl From<hkdf::Okm<'_, EceKey<usize>>> for EceKey<Vec<u8>> {
fn from(okm: hkdf::Okm<EceKey<usize>>) -> Self {
let mut r = vec![0u8; okm.len().0];
okm.fill(&mut r).unwrap();
EceKey(r)
}
}
#[derive(Debug, PartialEq, Default)]
struct EceNonce {
used: bool,
nonce: Vec<u8>,
}
impl EceNonce {
fn fill(&mut self, nonce: Vec<u8>) {
self.nonce = nonce;
self.used = false;
}
}
impl aead::NonceSequence for EceNonce {
fn advance(&mut self) -> Result<aead::Nonce, ring::error::Unspecified> {
if self.used {
return Err(ring::error::Unspecified);
}
let mut nonce = [0u8; 12];
for (i, n) in self.nonce.iter().enumerate() {
if i >= 12 {
return Err(ring::error::Unspecified);
}
nonce[i] = *n;
}
self.used = true;
Ok(aead::Nonce::assume_unique_for_key(nonce))
}
}
impl<'a> HttpEce<'a> {
/// Create a new encryptor. The content encoding has preliminary support for
/// Aes128Gcm, which is the 8th draft of the Encrypted Content-Encoding, but
/// currently using it will return an error when trying to encrypt. There is
/// no real support yet for the encoding in web browsers.
///
/// `peer_public_key` is the `p256dh` and `peer_secret` the `auth` from
/// browser subscription info.
pub fn new(
encoding: ContentEncoding,
peer_public_key: &'a [u8],
peer_secret: &'a [u8],
vapid_signature: Option<VapidSignature>,
) -> HttpEce<'a> {
HttpEce {
rng: rand::SystemRandom::new(),
peer_public_key: peer_public_key,
peer_secret: peer_secret,
encoding: encoding,
vapid_signature: vapid_signature,
}
}
/// Encrypts a payload. The maximum length for the payload is 3800
/// characters, which is the largest that works with Google's and Mozilla's
/// push servers.
pub fn encrypt(&self, content: &'a [u8]) -> Result<WebPushPayload, WebPushError> {
if content.len() > 3052 {
return Err(WebPushError::PayloadTooLarge);
}
let private_key =
agreement::EphemeralPrivateKey::generate(&agreement::ECDH_P256, &self.rng)?;
let public_key = private_key.compute_public_key()?;
let mut salt_bytes = [0u8; 16];
self.rng.fill(&mut salt_bytes)?;
let peer_public_key = agreement::UnparsedPublicKey::new(&agreement::ECDH_P256, self.peer_public_key);
agreement::agree_ephemeral(
private_key,
&peer_public_key,
WebPushError::Unspecified,
|shared_secret| match self.encoding {
ContentEncoding::AesGcm => {
let mut payload = vec![0; 3054];
front_pad(content, &mut payload);
self.aes_gcm(
shared_secret,
public_key.as_ref(),
&salt_bytes,
&mut payload,
)?;
Ok(WebPushPayload {
content: payload.to_vec(),
crypto_headers: self.generate_headers(public_key.as_ref(), &salt_bytes),
content_encoding: "aesgcm",
})
}
ContentEncoding::Aes128Gcm => Err(WebPushError::NotImplemented),
},
)
}
pub fn generate_headers(
&self,
public_key: &'a [u8],
salt: &'a [u8],
) -> Vec<(&'static str, String)> |
/// The aesgcm encrypted content-encoding, draft 3.
pub fn aes_gcm(
&self,
shared_secret: &'a [u8],
as_public_key: &'a [u8],
salt_bytes: &'a [u8],
payload: &'a mut Vec<u8>,
) -> Result<(), WebPushError> {
let mut context = Vec::with_capacity(140);
context.extend_from_slice("P-256\0".as_bytes());
context.push((self.peer_public_key.len() >> 8) as u8);
context.push((self.peer_public_key.len() & 0xff) as u8);
context.extend_from_slice(self.peer_public_key);
context.push((as_public_key.len() >> 8) as u8);
context.push((as_public_key.len() & 0xff) as u8);
context.extend_from_slice(as_public_key);
let client_auth_secret = hkdf::Salt::new(hkdf::HKDF_SHA256, &self.peer_secret);
let salt = hkdf::Salt::new(hkdf::HKDF_SHA256, salt_bytes);
let EceKey(prk) = client_auth_secret
.extract(shared_secret)
.expand(&[&"Content-Encoding: auth\0".as_bytes()], EceKey(32))
.unwrap()
.into();
let mut cek_info = Vec::with_capacity(165);
cek_info.extend_from_slice("Content-Encoding: aesgcm\0".as_bytes());
cek_info.extend_from_slice(&context);
let EceKey(content_encryption_key) = salt
.extract(&prk)
.expand(&[&cek_info], EceKey(16))
.unwrap()
.into();
let mut nonce_info = Vec::with_capacity(164);
nonce_info.extend_from_slice("Content-Encoding: nonce\0".as_bytes());
nonce_info.extend_from_slice(&context);
let EceKey(nonce_bytes) = salt
.extract(&prk)
.expand(&[&nonce_info], EceKey(12))
.unwrap()
.into();
let mut nonce = EceNonce::default();
nonce.fill(nonce_bytes);
let unbound_key = aead::UnboundKey::new(&aead::AES_128_GCM, &content_encryption_key)?;
let mut sealing_key = aead::SealingKey::new(unbound_key, nonce);
sealing_key.seal_in_place_append_tag(aead::Aad::empty(), payload)?;
Ok(())
}
}
fn front_pad(payload: &[u8], output: &mut [u8]) {
let payload_len = payload.len();
let max_payload = output.len() - 2;
let padding_size = max_payload - payload.len();
output[0] = (padding_size >> 8) as u8;
output[1] = (padding_size & 0xff) as u8;
for i in 0..payload_len {
output[padding_size + i + 2] = payload[i];
}
}
#[cfg(test)]
mod tests {
use base64::{self, URL_SAFE, URL_SAFE_NO_PAD};
use crate::error::WebPushError;
use crate::http_ece::{front_pad, ContentEncoding, HttpEce};
use crate::vapid::VapidSignature;
#[test]
fn test_payload_too_big() {
let p256dh = base64::decode_config("BLMaF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fj5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None);
let content = [0u8; 3801];
assert_eq!(
Err(WebPushError::PayloadTooLarge),
http_ece.encrypt(&content)
);
}
#[test]
fn test_aes128gcm() {
let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::Aes128Gcm, &p256dh, &auth, None);
let content = [0u8; 10];
assert_eq!(
Err(WebPushError::NotImplemented),
http_ece.encrypt(&content)
);
}
#[test]
fn test_aesgcm() {
let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None);
let shared_secret =
base64::decode_config("9vcttSQ8tq-Wi_lLQ_xA37tkYssMtJsdY6xENG5f1sE=", URL_SAFE)
.unwrap();
let as_pubkey = base64::decode_config("BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=",
URL_SAFE).unwrap();
let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap();
let mut payload = "This is test data. XXX".as_bytes().to_vec();
http_ece
.aes_gcm(&shared_secret, &as_pubkey, &salt_bytes, &mut payload)
.unwrap();
assert_eq!(
"tmE7-emq6iasohjXNMue0i0vn5o7EIOyP-bKyDoM1teHLcLtg44",
base64::encode_config(&payload.to_vec(), URL_SAFE_NO_PAD)
);
}
#[test]
fn test_headers_with_vapid() {
let as_pubkey =
base64::decode_config(
"BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=",
URL_SAFE
).unwrap();
let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap();
let p256dh =
base64::decode_config(
"BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE
).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let vapid_signature = VapidSignature {
auth_t: String::from("foo"),
auth_k: String::from("bar"),
};
let http_ece = HttpEce::new(
ContentEncoding::AesGcm,
&p256dh,
&auth,
Some(vapid_signature),
);
assert_eq!(
vec![
("Authorization", "WebPush foo".to_string()),
("Crypto-Key", "dh=BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs; p256ecdsa=bar".to_string()),
("Encryption", "salt=YMcMuxqRkchXwy7vMwNl1Q".to_string())],
http_ece.generate_headers(&as_pubkey, &salt_bytes))
}
#[test]
fn test_headers_without_vapid() {
let as_pubkey =
base64::decode_config(
"BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=",
URL_SAFE
).unwrap();
let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap();
let p256dh =
base64::decode_config(
"BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE
).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None);
assert_eq!(
vec![
("Crypto-Key", "dh=BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs".to_string()),
("Encryption", "salt=YMcMuxqRkchXwy7vMwNl1Q".to_string())],
http_ece.generate_headers(&as_pubkey, &salt_bytes))
}
#[test]
fn test_front_pad() {
// writes the padding count in the beginning, zeroes, content and again space for the encryption tag
let content = "naukio";
let mut output = [0u8; 30];
front_pad(content.as_bytes(), &mut output);
assert_eq!(
vec![0, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 110, 97, 117, 107, 105, 111],
output
);
}
}
| {
let mut crypto_headers = Vec::new();
let mut crypto_key = format!("dh={}", base64::encode_config(public_key, URL_SAFE_NO_PAD));
if let Some(ref signature) = self.vapid_signature {
crypto_key = format!("{}; p256ecdsa={}", crypto_key, signature.auth_k);
let sig_s: String = signature.into();
crypto_headers.push(("Authorization", sig_s));
};
crypto_headers.push(("Crypto-Key", crypto_key));
crypto_headers.push((
"Encryption",
format!("salt={}", base64::encode_config(&salt, URL_SAFE_NO_PAD)),
));
crypto_headers
} | identifier_body |
http_ece.rs | use base64::{self, URL_SAFE_NO_PAD};
use crate::error::WebPushError;
use crate::message::WebPushPayload;
use ring::rand::SecureRandom;
use ring::{aead::{self, BoundKey}, agreement, hkdf, rand};
use crate::vapid::VapidSignature;
pub enum ContentEncoding {
AesGcm,
Aes128Gcm,
}
pub struct HttpEce<'a> {
peer_public_key: &'a [u8],
peer_secret: &'a [u8],
encoding: ContentEncoding,
rng: rand::SystemRandom,
vapid_signature: Option<VapidSignature>,
}
#[derive(Debug, PartialEq)]
struct EceKey<T: core::fmt::Debug + PartialEq>(T);
impl hkdf::KeyType for EceKey<usize> {
fn len(&self) -> usize {
self.0
}
}
impl From<hkdf::Okm<'_, EceKey<usize>>> for EceKey<Vec<u8>> {
fn from(okm: hkdf::Okm<EceKey<usize>>) -> Self {
let mut r = vec![0u8; okm.len().0];
okm.fill(&mut r).unwrap();
EceKey(r)
}
}
#[derive(Debug, PartialEq, Default)]
struct EceNonce {
used: bool,
nonce: Vec<u8>,
}
impl EceNonce {
fn fill(&mut self, nonce: Vec<u8>) {
self.nonce = nonce;
self.used = false;
}
}
impl aead::NonceSequence for EceNonce {
fn advance(&mut self) -> Result<aead::Nonce, ring::error::Unspecified> {
if self.used {
return Err(ring::error::Unspecified);
}
let mut nonce = [0u8; 12];
for (i, n) in self.nonce.iter().enumerate() {
if i >= 12 {
return Err(ring::error::Unspecified);
}
nonce[i] = *n;
}
self.used = true;
Ok(aead::Nonce::assume_unique_for_key(nonce))
}
}
impl<'a> HttpEce<'a> {
/// Create a new encryptor. The content encoding has preliminary support for
/// Aes128Gcm, which is the 8th draft of the Encrypted Content-Encoding, but
/// currently using it will return an error when trying to encrypt. There is
/// no real support yet for the encoding in web browsers.
///
/// `peer_public_key` is the `p256dh` and `peer_secret` the `auth` from
/// browser subscription info.
pub fn new(
encoding: ContentEncoding,
peer_public_key: &'a [u8],
peer_secret: &'a [u8],
vapid_signature: Option<VapidSignature>,
) -> HttpEce<'a> {
HttpEce {
rng: rand::SystemRandom::new(),
peer_public_key: peer_public_key,
peer_secret: peer_secret,
encoding: encoding,
vapid_signature: vapid_signature,
}
}
/// Encrypts a payload. The maximum length for the payload is 3800
/// characters, which is the largest that works with Google's and Mozilla's
/// push servers.
pub fn encrypt(&self, content: &'a [u8]) -> Result<WebPushPayload, WebPushError> {
if content.len() > 3052 |
let private_key =
agreement::EphemeralPrivateKey::generate(&agreement::ECDH_P256, &self.rng)?;
let public_key = private_key.compute_public_key()?;
let mut salt_bytes = [0u8; 16];
self.rng.fill(&mut salt_bytes)?;
let peer_public_key = agreement::UnparsedPublicKey::new(&agreement::ECDH_P256, self.peer_public_key);
agreement::agree_ephemeral(
private_key,
&peer_public_key,
WebPushError::Unspecified,
|shared_secret| match self.encoding {
ContentEncoding::AesGcm => {
let mut payload = vec![0; 3054];
front_pad(content, &mut payload);
self.aes_gcm(
shared_secret,
public_key.as_ref(),
&salt_bytes,
&mut payload,
)?;
Ok(WebPushPayload {
content: payload.to_vec(),
crypto_headers: self.generate_headers(public_key.as_ref(), &salt_bytes),
content_encoding: "aesgcm",
})
}
ContentEncoding::Aes128Gcm => Err(WebPushError::NotImplemented),
},
)
}
pub fn generate_headers(
&self,
public_key: &'a [u8],
salt: &'a [u8],
) -> Vec<(&'static str, String)> {
let mut crypto_headers = Vec::new();
let mut crypto_key = format!("dh={}", base64::encode_config(public_key, URL_SAFE_NO_PAD));
if let Some(ref signature) = self.vapid_signature {
crypto_key = format!("{}; p256ecdsa={}", crypto_key, signature.auth_k);
let sig_s: String = signature.into();
crypto_headers.push(("Authorization", sig_s));
};
crypto_headers.push(("Crypto-Key", crypto_key));
crypto_headers.push((
"Encryption",
format!("salt={}", base64::encode_config(&salt, URL_SAFE_NO_PAD)),
));
crypto_headers
}
/// The aesgcm encrypted content-encoding, draft 3.
pub fn aes_gcm(
&self,
shared_secret: &'a [u8],
as_public_key: &'a [u8],
salt_bytes: &'a [u8],
payload: &'a mut Vec<u8>,
) -> Result<(), WebPushError> {
let mut context = Vec::with_capacity(140);
context.extend_from_slice("P-256\0".as_bytes());
context.push((self.peer_public_key.len() >> 8) as u8);
context.push((self.peer_public_key.len() & 0xff) as u8);
context.extend_from_slice(self.peer_public_key);
context.push((as_public_key.len() >> 8) as u8);
context.push((as_public_key.len() & 0xff) as u8);
context.extend_from_slice(as_public_key);
let client_auth_secret = hkdf::Salt::new(hkdf::HKDF_SHA256, &self.peer_secret);
let salt = hkdf::Salt::new(hkdf::HKDF_SHA256, salt_bytes);
let EceKey(prk) = client_auth_secret
.extract(shared_secret)
.expand(&[&"Content-Encoding: auth\0".as_bytes()], EceKey(32))
.unwrap()
.into();
let mut cek_info = Vec::with_capacity(165);
cek_info.extend_from_slice("Content-Encoding: aesgcm\0".as_bytes());
cek_info.extend_from_slice(&context);
let EceKey(content_encryption_key) = salt
.extract(&prk)
.expand(&[&cek_info], EceKey(16))
.unwrap()
.into();
let mut nonce_info = Vec::with_capacity(164);
nonce_info.extend_from_slice("Content-Encoding: nonce\0".as_bytes());
nonce_info.extend_from_slice(&context);
let EceKey(nonce_bytes) = salt
.extract(&prk)
.expand(&[&nonce_info], EceKey(12))
.unwrap()
.into();
let mut nonce = EceNonce::default();
nonce.fill(nonce_bytes);
let unbound_key = aead::UnboundKey::new(&aead::AES_128_GCM, &content_encryption_key)?;
let mut sealing_key = aead::SealingKey::new(unbound_key, nonce);
sealing_key.seal_in_place_append_tag(aead::Aad::empty(), payload)?;
Ok(())
}
}
fn front_pad(payload: &[u8], output: &mut [u8]) {
let payload_len = payload.len();
let max_payload = output.len() - 2;
let padding_size = max_payload - payload.len();
output[0] = (padding_size >> 8) as u8;
output[1] = (padding_size & 0xff) as u8;
for i in 0..payload_len {
output[padding_size + i + 2] = payload[i];
}
}
#[cfg(test)]
mod tests {
use base64::{self, URL_SAFE, URL_SAFE_NO_PAD};
use crate::error::WebPushError;
use crate::http_ece::{front_pad, ContentEncoding, HttpEce};
use crate::vapid::VapidSignature;
#[test]
fn test_payload_too_big() {
let p256dh = base64::decode_config("BLMaF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fj5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None);
let content = [0u8; 3801];
assert_eq!(
Err(WebPushError::PayloadTooLarge),
http_ece.encrypt(&content)
);
}
#[test]
fn test_aes128gcm() {
let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::Aes128Gcm, &p256dh, &auth, None);
let content = [0u8; 10];
assert_eq!(
Err(WebPushError::NotImplemented),
http_ece.encrypt(&content)
);
}
#[test]
fn test_aesgcm() {
let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None);
let shared_secret =
base64::decode_config("9vcttSQ8tq-Wi_lLQ_xA37tkYssMtJsdY6xENG5f1sE=", URL_SAFE)
.unwrap();
let as_pubkey = base64::decode_config("BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=",
URL_SAFE).unwrap();
let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap();
let mut payload = "This is test data. XXX".as_bytes().to_vec();
http_ece
.aes_gcm(&shared_secret, &as_pubkey, &salt_bytes, &mut payload)
.unwrap();
assert_eq!(
"tmE7-emq6iasohjXNMue0i0vn5o7EIOyP-bKyDoM1teHLcLtg44",
base64::encode_config(&payload.to_vec(), URL_SAFE_NO_PAD)
);
}
#[test]
fn test_headers_with_vapid() {
let as_pubkey =
base64::decode_config(
"BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=",
URL_SAFE
).unwrap();
let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap();
let p256dh =
base64::decode_config(
"BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE
).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let vapid_signature = VapidSignature {
auth_t: String::from("foo"),
auth_k: String::from("bar"),
};
let http_ece = HttpEce::new(
ContentEncoding::AesGcm,
&p256dh,
&auth,
Some(vapid_signature),
);
assert_eq!(
vec![
("Authorization", "WebPush foo".to_string()),
("Crypto-Key", "dh=BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs; p256ecdsa=bar".to_string()),
("Encryption", "salt=YMcMuxqRkchXwy7vMwNl1Q".to_string())],
http_ece.generate_headers(&as_pubkey, &salt_bytes))
}
#[test]
fn test_headers_without_vapid() {
let as_pubkey =
base64::decode_config(
"BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=",
URL_SAFE
).unwrap();
let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap();
let p256dh =
base64::decode_config(
"BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE
).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None);
assert_eq!(
vec![
("Crypto-Key", "dh=BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs".to_string()),
("Encryption", "salt=YMcMuxqRkchXwy7vMwNl1Q".to_string())],
http_ece.generate_headers(&as_pubkey, &salt_bytes))
}
#[test]
fn test_front_pad() {
// writes the padding count in the beginning, zeroes, content and again space for the encryption tag
let content = "naukio";
let mut output = [0u8; 30];
front_pad(content.as_bytes(), &mut output);
assert_eq!(
vec![0, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 110, 97, 117, 107, 105, 111],
output
);
}
}
| {
return Err(WebPushError::PayloadTooLarge);
} | conditional_block |
weights_manager.py | import torch
from torch import nn
from aw_nas import ops
from aw_nas.btcs.layer2.search_space import *
from aw_nas.weights_manager.base import BaseWeightsManager, CandidateNet
class Layer2CandidateNet(CandidateNet):
def __init__(self, supernet, rollout, eval_no_grad):
super().__init__(eval_no_grad)
self.supernet = supernet # type: Layer2MacroSupernet
self.rollout = rollout # type: Layer2Rollout
def begin_virtual(self):
raise NotImplementedError()
def forward(self, inputs):
return self.supernet.forward(inputs, self.rollout)
def _forward_with_params(self, *args, **kwargs):
raise NotImplementedError()
def get_device(self):
return self.supernet.device
class Layer2MacroSupernet(BaseWeightsManager, nn.Module):
NAME = "layer2_supernet"
def __init__(
self,
search_space, # type: Layer2SearchSpace
device,
rollout_type="layer2",
init_channels=16,
# classifier
num_classes=10,
dropout_rate=0.0,
max_grad_norm=None,
# stem
use_stem="conv_bn_3x3",
stem_stride=1,
stem_affine=True,
stem_multiplier=1,
# candidate
candidate_eval_no_grad=True,
# schedule
schedule_cfg=None,
):
super().__init__(search_space, device, rollout_type, schedule_cfg)
nn.Module.__init__(self)
self.macro_search_space = (
search_space.macro_search_space
) # type: StagewiseMacroSearchSpace
self.micro_search_space = (
search_space.micro_search_space
) # type: DenseMicroSearchSpace
self.num_cell_groups = self.macro_search_space.num_cell_groups
self.cell_layout = self.macro_search_space.cell_layout
self.reduce_cell_groups = self.macro_search_space.reduce_cell_groups
self.max_grad_norm = max_grad_norm
self.candidate_eval_no_grad = candidate_eval_no_grad
# make stem
self.use_stem = use_stem
if not self.use_stem:
c_stem = 3
elif isinstance(self.use_stem, (list, tuple)):
self.stem = []
c_stem = stem_multiplier * init_channels
for i, stem_type in enumerate(self.use_stem):
c_in = 3 if i == 0 else c_stem
self.stem.append(
ops.get_op(stem_type)(
c_in, c_stem, stride=stem_stride, affine=stem_affine
)
)
self.stem = nn.Sequential(*self.stem)
else:
c_stem = stem_multiplier * init_channels
self.stem = ops.get_op(self.use_stem)(
3, c_stem, stride=stem_stride, affine=stem_affine
)
# make cells
self.cells = nn.ModuleList()
num_channels = init_channels
prev_num_channels = c_stem
for i, cg in enumerate(self.cell_layout):
stride = 2 if cg in self.reduce_cell_groups else 1
num_channels *= stride
self.cells.append(
Layer2MicroCell(
prev_num_channels,
num_channels,
stride,
affine=True,
primitives=self.micro_search_space.primitives,
num_steps=self.micro_search_space.num_steps,
num_init_nodes=self.micro_search_space.num_init_nodes,
output_op=self.micro_search_space.concat_op,
postprocess_op="conv_1x1",
cell_shortcut=True,
cell_shortcut_op="skip_connect",
)
)
prev_num_channels = num_channels
# make pooling and classifier
self.pooling = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(dropout_rate) if dropout_rate else nn.Identity()
self.classifier = nn.Linear(prev_num_channels, num_classes)
self.to(self.device)
def forward(
self,
inputs,
rollout, # type: Layer2Rollout
):
macro_rollout = rollout.macro # type: StagewiseMacroRollout
micro_rollout = rollout.micro # type: DenseMicroRollout
overall_adj = self.macro_search_space.parse_overall_adj(macro_rollout)
# all cell outputs + input/output states
states = [None] * (len(self.cells) + 2) # type: list[torch.Tensor]
if self.use_stem:
states[0] = self.stem(inputs)
else:
states[0] = inputs
assert len(states) == len(overall_adj)
for to, froms in enumerate(overall_adj):
froms = np.nonzero(froms)[0]
if len(froms) == 0:
continue # no inputs to this cell
if any(states[i] is None for i in froms):
raise RuntimeError(
"Invalid compute graph. Cell output used before computed"
)
# all inputs to a cell are added
cell_idx = to - 1
cell_input = sum(states[i] for i in froms)
if cell_idx < len(self.cells):
cell_arch = micro_rollout.arch[self.cell_layout[cell_idx]]
states[to] = self.cells[cell_idx].forward(cell_input, cell_arch)
else:
states[to] = cell_input # the final output state
assert states[-1] is not None
out = self.pooling(states[-1]).squeeze()
out = self.dropout(out)
out = self.classifier(out)
return out
def assemble_candidate(self, rollout):
return Layer2CandidateNet(self, rollout, self.candidate_eval_no_grad)
def set_device(self, device):
self.device = device
self.to(device)
def step(self, gradients, optimizer):
self.zero_grad() # clear all gradients
named_params = dict(self.named_parameters())
for k, grad in gradients:
named_params[k].grad = grad
if self.max_grad_norm is not None:
# clip the gradients
torch.nn.utils.clip_grad_norm_(self.parameters(), self.max_grad_norm)
# apply the gradients
optimizer.step()
def save(self, path):
torch.save({"epoch": self.epoch, "state_dict": self.state_dict()}, path)
def load(self, path):
checkpoint = torch.load(path, map_location=torch.device("cpu"))
self.load_state_dict(checkpoint["state_dict"])
self.on_epoch_start(checkpoint["epoch"])
@classmethod
def supported_data_types(cls):
return ["image"]
@classmethod
def supported_rollout_types(cls):
return ["layer2"]
class Layer2MicroCell(nn.Module):
def __init__(
self,
in_channels,
out_channels,
stride,
affine,
primitives,
num_steps,
num_init_nodes,
output_op="concat",
postprocess_op="conv_1x1",
cell_shortcut=False,
cell_shortcut_op="skip_connect",
):
super().__init__()
self.out_channels = out_channels
self.stride = stride
self.primitives = primitives
self.num_init_nodes = num_init_nodes
self.num_nodes = num_steps + num_init_nodes
self.output_op = output_op
# it's easier to calc edge indices with a longer ModuleList and some None
self.edges = nn.ModuleList()
for j in range(self.num_nodes):
for i in range(self.num_nodes):
if j > i:
if i < self.num_init_nodes:
self.edges.append(
Layer2MicroEdge(
primitives, in_channels, out_channels, stride, affine
)
)
else:
self.edges.append(
Layer2MicroEdge(
primitives, out_channels, out_channels, 1, affine
)
)
else:
self.edges.append(None)
if cell_shortcut and cell_shortcut_op != "none":
self.shortcut = ops.get_op(cell_shortcut_op)(
in_channels, out_channels, stride, affine
)
else:
self.shortcut = None
if self.output_op == "concat":
self.postprocess = ops.get_op(postprocess_op)(
out_channels * num_steps, out_channels, stride=1, affine=False
)
def forward(self, inputs, cell_arch):
# cell_arch shape: [#nodes, #nodes, #ops]
n, _, h, w = inputs.shape
node_outputs = [inputs] * self.num_init_nodes
for to in range(self.num_init_nodes, self.num_nodes):
froms = np.nonzero(cell_arch[to].sum(axis=1))[0]
edge_indices = froms + (to * self.num_nodes)
if any(self.edges[i] is None for i in edge_indices):
raise RuntimeError(
"Invalid compute graph in cell. Cannot compute an edge where j <= i"
)
# outputs `to` this node `from` all used edges
edge_outputs = [
self.edges[edge_i](node_outputs[from_i], cell_arch[to, from_i])
for edge_i, from_i in zip(edge_indices, froms)
]
if len(edge_outputs) != 0:
node_outputs.append(sum(edge_outputs))
elif self.output_op == "concat":
# append fake outputs if required by concat
node_outputs.append(
torch.zeros(
n,
self.out_channels,
h // self.stride,
w // self.stride,
device=inputs.device,
)
)
node_outputs = node_outputs[self.num_init_nodes :]
if len(node_outputs) == 0:
# no node outputs (including fake outputs) in this cell
out = 0
elif self.output_op == "concat":
out = self.postprocess(torch.cat(node_outputs, dim=1))
elif self.output_op == "add":
out = sum(node_outputs)
else:
raise ValueError("Unknown cell output op `{}`".format(self.output_op))
if self.shortcut is not None:
out += self.shortcut(inputs)
return out
class Layer2MicroEdge(nn.Module):
def __init__(self, primitives, in_channels, out_channels, stride, affine):
super().__init__()
assert "none" not in primitives, "Edge should not have `none` primitive"
self.ops = nn.ModuleList(
ops.get_op(prim)(in_channels, out_channels, stride, affine)
for prim in primitives
)
def forward(self, inputs, edge_arch):
outputs = []
for op, use_op in zip(self.ops, edge_arch):
|
if len(outputs) != 0:
return sum(outputs)
else:
raise RuntimeError(
"Edge module does not handle the case where no op is "
"used. It should be handled in Cell and Edge.forward "
"should not be called"
)
| if use_op != 0:
outputs.append(op(inputs)) | conditional_block |
weights_manager.py | import torch
from torch import nn
from aw_nas import ops
from aw_nas.btcs.layer2.search_space import *
from aw_nas.weights_manager.base import BaseWeightsManager, CandidateNet
class Layer2CandidateNet(CandidateNet):
def __init__(self, supernet, rollout, eval_no_grad):
super().__init__(eval_no_grad)
self.supernet = supernet # type: Layer2MacroSupernet
self.rollout = rollout # type: Layer2Rollout
def begin_virtual(self):
|
def forward(self, inputs):
return self.supernet.forward(inputs, self.rollout)
def _forward_with_params(self, *args, **kwargs):
raise NotImplementedError()
def get_device(self):
return self.supernet.device
class Layer2MacroSupernet(BaseWeightsManager, nn.Module):
NAME = "layer2_supernet"
def __init__(
self,
search_space, # type: Layer2SearchSpace
device,
rollout_type="layer2",
init_channels=16,
# classifier
num_classes=10,
dropout_rate=0.0,
max_grad_norm=None,
# stem
use_stem="conv_bn_3x3",
stem_stride=1,
stem_affine=True,
stem_multiplier=1,
# candidate
candidate_eval_no_grad=True,
# schedule
schedule_cfg=None,
):
super().__init__(search_space, device, rollout_type, schedule_cfg)
nn.Module.__init__(self)
self.macro_search_space = (
search_space.macro_search_space
) # type: StagewiseMacroSearchSpace
self.micro_search_space = (
search_space.micro_search_space
) # type: DenseMicroSearchSpace
self.num_cell_groups = self.macro_search_space.num_cell_groups
self.cell_layout = self.macro_search_space.cell_layout
self.reduce_cell_groups = self.macro_search_space.reduce_cell_groups
self.max_grad_norm = max_grad_norm
self.candidate_eval_no_grad = candidate_eval_no_grad
# make stem
self.use_stem = use_stem
if not self.use_stem:
c_stem = 3
elif isinstance(self.use_stem, (list, tuple)):
self.stem = []
c_stem = stem_multiplier * init_channels
for i, stem_type in enumerate(self.use_stem):
c_in = 3 if i == 0 else c_stem
self.stem.append(
ops.get_op(stem_type)(
c_in, c_stem, stride=stem_stride, affine=stem_affine
)
)
self.stem = nn.Sequential(*self.stem)
else:
c_stem = stem_multiplier * init_channels
self.stem = ops.get_op(self.use_stem)(
3, c_stem, stride=stem_stride, affine=stem_affine
)
# make cells
self.cells = nn.ModuleList()
num_channels = init_channels
prev_num_channels = c_stem
for i, cg in enumerate(self.cell_layout):
stride = 2 if cg in self.reduce_cell_groups else 1
num_channels *= stride
self.cells.append(
Layer2MicroCell(
prev_num_channels,
num_channels,
stride,
affine=True,
primitives=self.micro_search_space.primitives,
num_steps=self.micro_search_space.num_steps,
num_init_nodes=self.micro_search_space.num_init_nodes,
output_op=self.micro_search_space.concat_op,
postprocess_op="conv_1x1",
cell_shortcut=True,
cell_shortcut_op="skip_connect",
)
)
prev_num_channels = num_channels
# make pooling and classifier
self.pooling = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(dropout_rate) if dropout_rate else nn.Identity()
self.classifier = nn.Linear(prev_num_channels, num_classes)
self.to(self.device)
def forward(
self,
inputs,
rollout, # type: Layer2Rollout
):
macro_rollout = rollout.macro # type: StagewiseMacroRollout
micro_rollout = rollout.micro # type: DenseMicroRollout
overall_adj = self.macro_search_space.parse_overall_adj(macro_rollout)
# all cell outputs + input/output states
states = [None] * (len(self.cells) + 2) # type: list[torch.Tensor]
if self.use_stem:
states[0] = self.stem(inputs)
else:
states[0] = inputs
assert len(states) == len(overall_adj)
for to, froms in enumerate(overall_adj):
froms = np.nonzero(froms)[0]
if len(froms) == 0:
continue # no inputs to this cell
if any(states[i] is None for i in froms):
raise RuntimeError(
"Invalid compute graph. Cell output used before computed"
)
# all inputs to a cell are added
cell_idx = to - 1
cell_input = sum(states[i] for i in froms)
if cell_idx < len(self.cells):
cell_arch = micro_rollout.arch[self.cell_layout[cell_idx]]
states[to] = self.cells[cell_idx].forward(cell_input, cell_arch)
else:
states[to] = cell_input # the final output state
assert states[-1] is not None
out = self.pooling(states[-1]).squeeze()
out = self.dropout(out)
out = self.classifier(out)
return out
def assemble_candidate(self, rollout):
return Layer2CandidateNet(self, rollout, self.candidate_eval_no_grad)
def set_device(self, device):
self.device = device
self.to(device)
def step(self, gradients, optimizer):
self.zero_grad() # clear all gradients
named_params = dict(self.named_parameters())
for k, grad in gradients:
named_params[k].grad = grad
if self.max_grad_norm is not None:
# clip the gradients
torch.nn.utils.clip_grad_norm_(self.parameters(), self.max_grad_norm)
# apply the gradients
optimizer.step()
def save(self, path):
torch.save({"epoch": self.epoch, "state_dict": self.state_dict()}, path)
def load(self, path):
checkpoint = torch.load(path, map_location=torch.device("cpu"))
self.load_state_dict(checkpoint["state_dict"])
self.on_epoch_start(checkpoint["epoch"])
@classmethod
def supported_data_types(cls):
return ["image"]
@classmethod
def supported_rollout_types(cls):
return ["layer2"]
class Layer2MicroCell(nn.Module):
def __init__(
self,
in_channels,
out_channels,
stride,
affine,
primitives,
num_steps,
num_init_nodes,
output_op="concat",
postprocess_op="conv_1x1",
cell_shortcut=False,
cell_shortcut_op="skip_connect",
):
super().__init__()
self.out_channels = out_channels
self.stride = stride
self.primitives = primitives
self.num_init_nodes = num_init_nodes
self.num_nodes = num_steps + num_init_nodes
self.output_op = output_op
# it's easier to calc edge indices with a longer ModuleList and some None
self.edges = nn.ModuleList()
for j in range(self.num_nodes):
for i in range(self.num_nodes):
if j > i:
if i < self.num_init_nodes:
self.edges.append(
Layer2MicroEdge(
primitives, in_channels, out_channels, stride, affine
)
)
else:
self.edges.append(
Layer2MicroEdge(
primitives, out_channels, out_channels, 1, affine
)
)
else:
self.edges.append(None)
if cell_shortcut and cell_shortcut_op != "none":
self.shortcut = ops.get_op(cell_shortcut_op)(
in_channels, out_channels, stride, affine
)
else:
self.shortcut = None
if self.output_op == "concat":
self.postprocess = ops.get_op(postprocess_op)(
out_channels * num_steps, out_channels, stride=1, affine=False
)
def forward(self, inputs, cell_arch):
# cell_arch shape: [#nodes, #nodes, #ops]
n, _, h, w = inputs.shape
node_outputs = [inputs] * self.num_init_nodes
for to in range(self.num_init_nodes, self.num_nodes):
froms = np.nonzero(cell_arch[to].sum(axis=1))[0]
edge_indices = froms + (to * self.num_nodes)
if any(self.edges[i] is None for i in edge_indices):
raise RuntimeError(
"Invalid compute graph in cell. Cannot compute an edge where j <= i"
)
# outputs `to` this node `from` all used edges
edge_outputs = [
self.edges[edge_i](node_outputs[from_i], cell_arch[to, from_i])
for edge_i, from_i in zip(edge_indices, froms)
]
if len(edge_outputs) != 0:
node_outputs.append(sum(edge_outputs))
elif self.output_op == "concat":
# append fake outputs if required by concat
node_outputs.append(
torch.zeros(
n,
self.out_channels,
h // self.stride,
w // self.stride,
device=inputs.device,
)
)
node_outputs = node_outputs[self.num_init_nodes :]
if len(node_outputs) == 0:
# no node outputs (including fake outputs) in this cell
out = 0
elif self.output_op == "concat":
out = self.postprocess(torch.cat(node_outputs, dim=1))
elif self.output_op == "add":
out = sum(node_outputs)
else:
raise ValueError("Unknown cell output op `{}`".format(self.output_op))
if self.shortcut is not None:
out += self.shortcut(inputs)
return out
class Layer2MicroEdge(nn.Module):
def __init__(self, primitives, in_channels, out_channels, stride, affine):
super().__init__()
assert "none" not in primitives, "Edge should not have `none` primitive"
self.ops = nn.ModuleList(
ops.get_op(prim)(in_channels, out_channels, stride, affine)
for prim in primitives
)
def forward(self, inputs, edge_arch):
outputs = []
for op, use_op in zip(self.ops, edge_arch):
if use_op != 0:
outputs.append(op(inputs))
if len(outputs) != 0:
return sum(outputs)
else:
raise RuntimeError(
"Edge module does not handle the case where no op is "
"used. It should be handled in Cell and Edge.forward "
"should not be called"
)
| raise NotImplementedError() | identifier_body |
weights_manager.py | import torch
from torch import nn
from aw_nas import ops
from aw_nas.btcs.layer2.search_space import *
from aw_nas.weights_manager.base import BaseWeightsManager, CandidateNet
class Layer2CandidateNet(CandidateNet):
def __init__(self, supernet, rollout, eval_no_grad):
super().__init__(eval_no_grad)
self.supernet = supernet # type: Layer2MacroSupernet
self.rollout = rollout # type: Layer2Rollout
def begin_virtual(self):
raise NotImplementedError()
def forward(self, inputs):
return self.supernet.forward(inputs, self.rollout)
def _forward_with_params(self, *args, **kwargs):
raise NotImplementedError()
def get_device(self):
return self.supernet.device
class Layer2MacroSupernet(BaseWeightsManager, nn.Module):
NAME = "layer2_supernet"
def __init__(
self,
search_space, # type: Layer2SearchSpace
device,
rollout_type="layer2",
init_channels=16,
# classifier
num_classes=10,
dropout_rate=0.0,
max_grad_norm=None,
# stem
use_stem="conv_bn_3x3",
stem_stride=1,
stem_affine=True,
stem_multiplier=1,
# candidate
candidate_eval_no_grad=True,
# schedule
schedule_cfg=None,
):
super().__init__(search_space, device, rollout_type, schedule_cfg)
nn.Module.__init__(self)
self.macro_search_space = (
search_space.macro_search_space
) # type: StagewiseMacroSearchSpace
self.micro_search_space = (
search_space.micro_search_space
) # type: DenseMicroSearchSpace
self.num_cell_groups = self.macro_search_space.num_cell_groups
self.cell_layout = self.macro_search_space.cell_layout
self.reduce_cell_groups = self.macro_search_space.reduce_cell_groups
self.max_grad_norm = max_grad_norm
self.candidate_eval_no_grad = candidate_eval_no_grad
# make stem
self.use_stem = use_stem
if not self.use_stem:
c_stem = 3
elif isinstance(self.use_stem, (list, tuple)):
self.stem = []
c_stem = stem_multiplier * init_channels
for i, stem_type in enumerate(self.use_stem):
c_in = 3 if i == 0 else c_stem
self.stem.append(
ops.get_op(stem_type)(
c_in, c_stem, stride=stem_stride, affine=stem_affine
)
)
self.stem = nn.Sequential(*self.stem)
else:
c_stem = stem_multiplier * init_channels
self.stem = ops.get_op(self.use_stem)(
3, c_stem, stride=stem_stride, affine=stem_affine
)
# make cells
self.cells = nn.ModuleList()
num_channels = init_channels
prev_num_channels = c_stem
for i, cg in enumerate(self.cell_layout):
stride = 2 if cg in self.reduce_cell_groups else 1
num_channels *= stride
self.cells.append(
Layer2MicroCell(
prev_num_channels,
num_channels,
stride,
affine=True,
primitives=self.micro_search_space.primitives,
num_steps=self.micro_search_space.num_steps,
num_init_nodes=self.micro_search_space.num_init_nodes,
output_op=self.micro_search_space.concat_op,
postprocess_op="conv_1x1",
cell_shortcut=True,
cell_shortcut_op="skip_connect",
)
)
prev_num_channels = num_channels
# make pooling and classifier
self.pooling = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(dropout_rate) if dropout_rate else nn.Identity()
self.classifier = nn.Linear(prev_num_channels, num_classes)
self.to(self.device)
def forward(
self,
inputs,
rollout, # type: Layer2Rollout
):
macro_rollout = rollout.macro # type: StagewiseMacroRollout
micro_rollout = rollout.micro # type: DenseMicroRollout
overall_adj = self.macro_search_space.parse_overall_adj(macro_rollout)
# all cell outputs + input/output states
states = [None] * (len(self.cells) + 2) # type: list[torch.Tensor]
if self.use_stem:
states[0] = self.stem(inputs)
else:
states[0] = inputs
assert len(states) == len(overall_adj)
for to, froms in enumerate(overall_adj):
froms = np.nonzero(froms)[0]
if len(froms) == 0:
continue # no inputs to this cell
if any(states[i] is None for i in froms):
raise RuntimeError(
"Invalid compute graph. Cell output used before computed"
)
# all inputs to a cell are added
cell_idx = to - 1
cell_input = sum(states[i] for i in froms)
if cell_idx < len(self.cells):
cell_arch = micro_rollout.arch[self.cell_layout[cell_idx]]
states[to] = self.cells[cell_idx].forward(cell_input, cell_arch)
else:
states[to] = cell_input # the final output state
assert states[-1] is not None
out = self.pooling(states[-1]).squeeze()
out = self.dropout(out)
out = self.classifier(out)
return out
def assemble_candidate(self, rollout):
return Layer2CandidateNet(self, rollout, self.candidate_eval_no_grad)
def set_device(self, device):
self.device = device
self.to(device)
def step(self, gradients, optimizer):
self.zero_grad() # clear all gradients
named_params = dict(self.named_parameters())
for k, grad in gradients:
named_params[k].grad = grad
if self.max_grad_norm is not None:
# clip the gradients
torch.nn.utils.clip_grad_norm_(self.parameters(), self.max_grad_norm)
# apply the gradients
optimizer.step()
def save(self, path):
torch.save({"epoch": self.epoch, "state_dict": self.state_dict()}, path)
def load(self, path):
checkpoint = torch.load(path, map_location=torch.device("cpu"))
self.load_state_dict(checkpoint["state_dict"])
self.on_epoch_start(checkpoint["epoch"])
@classmethod
def supported_data_types(cls):
return ["image"]
@classmethod
def supported_rollout_types(cls):
return ["layer2"]
class Layer2MicroCell(nn.Module):
def __init__(
self,
in_channels,
out_channels,
stride,
affine,
primitives,
num_steps,
num_init_nodes,
output_op="concat",
postprocess_op="conv_1x1",
cell_shortcut=False,
cell_shortcut_op="skip_connect",
):
super().__init__()
self.out_channels = out_channels
self.stride = stride
self.primitives = primitives
self.num_init_nodes = num_init_nodes
self.num_nodes = num_steps + num_init_nodes
self.output_op = output_op
# it's easier to calc edge indices with a longer ModuleList and some None
self.edges = nn.ModuleList()
for j in range(self.num_nodes):
for i in range(self.num_nodes):
if j > i:
if i < self.num_init_nodes:
self.edges.append(
Layer2MicroEdge(
primitives, in_channels, out_channels, stride, affine
)
)
else:
self.edges.append(
Layer2MicroEdge(
primitives, out_channels, out_channels, 1, affine
)
)
else:
self.edges.append(None)
if cell_shortcut and cell_shortcut_op != "none":
self.shortcut = ops.get_op(cell_shortcut_op)(
in_channels, out_channels, stride, affine
)
else:
self.shortcut = None
if self.output_op == "concat":
self.postprocess = ops.get_op(postprocess_op)(
out_channels * num_steps, out_channels, stride=1, affine=False
)
def forward(self, inputs, cell_arch):
# cell_arch shape: [#nodes, #nodes, #ops]
n, _, h, w = inputs.shape
node_outputs = [inputs] * self.num_init_nodes
for to in range(self.num_init_nodes, self.num_nodes):
froms = np.nonzero(cell_arch[to].sum(axis=1))[0]
edge_indices = froms + (to * self.num_nodes)
if any(self.edges[i] is None for i in edge_indices):
raise RuntimeError(
"Invalid compute graph in cell. Cannot compute an edge where j <= i"
)
# outputs `to` this node `from` all used edges
edge_outputs = [
self.edges[edge_i](node_outputs[from_i], cell_arch[to, from_i])
for edge_i, from_i in zip(edge_indices, froms)
]
if len(edge_outputs) != 0:
node_outputs.append(sum(edge_outputs))
elif self.output_op == "concat":
# append fake outputs if required by concat
node_outputs.append(
torch.zeros(
n,
self.out_channels,
h // self.stride,
w // self.stride,
device=inputs.device,
)
)
node_outputs = node_outputs[self.num_init_nodes :]
if len(node_outputs) == 0:
# no node outputs (including fake outputs) in this cell
out = 0
elif self.output_op == "concat":
out = self.postprocess(torch.cat(node_outputs, dim=1))
elif self.output_op == "add":
out = sum(node_outputs)
else:
raise ValueError("Unknown cell output op `{}`".format(self.output_op))
if self.shortcut is not None:
out += self.shortcut(inputs)
return out
class Layer2MicroEdge(nn.Module):
def __init__(self, primitives, in_channels, out_channels, stride, affine):
super().__init__()
assert "none" not in primitives, "Edge should not have `none` primitive"
self.ops = nn.ModuleList(
ops.get_op(prim)(in_channels, out_channels, stride, affine)
for prim in primitives
)
def | (self, inputs, edge_arch):
outputs = []
for op, use_op in zip(self.ops, edge_arch):
if use_op != 0:
outputs.append(op(inputs))
if len(outputs) != 0:
return sum(outputs)
else:
raise RuntimeError(
"Edge module does not handle the case where no op is "
"used. It should be handled in Cell and Edge.forward "
"should not be called"
)
| forward | identifier_name |
weights_manager.py | import torch
from torch import nn
from aw_nas import ops
from aw_nas.btcs.layer2.search_space import *
from aw_nas.weights_manager.base import BaseWeightsManager, CandidateNet
class Layer2CandidateNet(CandidateNet):
def __init__(self, supernet, rollout, eval_no_grad):
super().__init__(eval_no_grad)
self.supernet = supernet # type: Layer2MacroSupernet
self.rollout = rollout # type: Layer2Rollout
def begin_virtual(self):
raise NotImplementedError()
def forward(self, inputs):
return self.supernet.forward(inputs, self.rollout)
def _forward_with_params(self, *args, **kwargs):
raise NotImplementedError()
def get_device(self):
return self.supernet.device
class Layer2MacroSupernet(BaseWeightsManager, nn.Module):
NAME = "layer2_supernet"
def __init__(
self,
search_space, # type: Layer2SearchSpace
device,
rollout_type="layer2",
init_channels=16,
# classifier
num_classes=10,
dropout_rate=0.0,
max_grad_norm=None,
# stem
use_stem="conv_bn_3x3",
stem_stride=1,
stem_affine=True,
stem_multiplier=1,
# candidate
candidate_eval_no_grad=True,
# schedule
schedule_cfg=None,
):
super().__init__(search_space, device, rollout_type, schedule_cfg)
nn.Module.__init__(self)
self.macro_search_space = (
search_space.macro_search_space
) # type: StagewiseMacroSearchSpace
self.micro_search_space = (
search_space.micro_search_space
) # type: DenseMicroSearchSpace
self.num_cell_groups = self.macro_search_space.num_cell_groups
self.cell_layout = self.macro_search_space.cell_layout
self.reduce_cell_groups = self.macro_search_space.reduce_cell_groups
self.max_grad_norm = max_grad_norm
self.candidate_eval_no_grad = candidate_eval_no_grad
# make stem
self.use_stem = use_stem
if not self.use_stem:
c_stem = 3
elif isinstance(self.use_stem, (list, tuple)):
self.stem = []
c_stem = stem_multiplier * init_channels
for i, stem_type in enumerate(self.use_stem):
c_in = 3 if i == 0 else c_stem
self.stem.append(
ops.get_op(stem_type)(
c_in, c_stem, stride=stem_stride, affine=stem_affine
)
)
self.stem = nn.Sequential(*self.stem)
else:
c_stem = stem_multiplier * init_channels
self.stem = ops.get_op(self.use_stem)(
3, c_stem, stride=stem_stride, affine=stem_affine
)
# make cells
self.cells = nn.ModuleList()
num_channels = init_channels
prev_num_channels = c_stem
for i, cg in enumerate(self.cell_layout):
stride = 2 if cg in self.reduce_cell_groups else 1
num_channels *= stride
self.cells.append(
Layer2MicroCell(
prev_num_channels,
num_channels,
stride,
affine=True,
primitives=self.micro_search_space.primitives,
num_steps=self.micro_search_space.num_steps,
num_init_nodes=self.micro_search_space.num_init_nodes,
output_op=self.micro_search_space.concat_op,
postprocess_op="conv_1x1",
cell_shortcut=True,
cell_shortcut_op="skip_connect",
)
)
| self.classifier = nn.Linear(prev_num_channels, num_classes)
self.to(self.device)
def forward(
self,
inputs,
rollout, # type: Layer2Rollout
):
macro_rollout = rollout.macro # type: StagewiseMacroRollout
micro_rollout = rollout.micro # type: DenseMicroRollout
overall_adj = self.macro_search_space.parse_overall_adj(macro_rollout)
# all cell outputs + input/output states
states = [None] * (len(self.cells) + 2) # type: list[torch.Tensor]
if self.use_stem:
states[0] = self.stem(inputs)
else:
states[0] = inputs
assert len(states) == len(overall_adj)
for to, froms in enumerate(overall_adj):
froms = np.nonzero(froms)[0]
if len(froms) == 0:
continue # no inputs to this cell
if any(states[i] is None for i in froms):
raise RuntimeError(
"Invalid compute graph. Cell output used before computed"
)
# all inputs to a cell are added
cell_idx = to - 1
cell_input = sum(states[i] for i in froms)
if cell_idx < len(self.cells):
cell_arch = micro_rollout.arch[self.cell_layout[cell_idx]]
states[to] = self.cells[cell_idx].forward(cell_input, cell_arch)
else:
states[to] = cell_input # the final output state
assert states[-1] is not None
out = self.pooling(states[-1]).squeeze()
out = self.dropout(out)
out = self.classifier(out)
return out
def assemble_candidate(self, rollout):
return Layer2CandidateNet(self, rollout, self.candidate_eval_no_grad)
def set_device(self, device):
self.device = device
self.to(device)
def step(self, gradients, optimizer):
self.zero_grad() # clear all gradients
named_params = dict(self.named_parameters())
for k, grad in gradients:
named_params[k].grad = grad
if self.max_grad_norm is not None:
# clip the gradients
torch.nn.utils.clip_grad_norm_(self.parameters(), self.max_grad_norm)
# apply the gradients
optimizer.step()
def save(self, path):
torch.save({"epoch": self.epoch, "state_dict": self.state_dict()}, path)
def load(self, path):
checkpoint = torch.load(path, map_location=torch.device("cpu"))
self.load_state_dict(checkpoint["state_dict"])
self.on_epoch_start(checkpoint["epoch"])
@classmethod
def supported_data_types(cls):
return ["image"]
@classmethod
def supported_rollout_types(cls):
return ["layer2"]
class Layer2MicroCell(nn.Module):
def __init__(
self,
in_channels,
out_channels,
stride,
affine,
primitives,
num_steps,
num_init_nodes,
output_op="concat",
postprocess_op="conv_1x1",
cell_shortcut=False,
cell_shortcut_op="skip_connect",
):
super().__init__()
self.out_channels = out_channels
self.stride = stride
self.primitives = primitives
self.num_init_nodes = num_init_nodes
self.num_nodes = num_steps + num_init_nodes
self.output_op = output_op
# it's easier to calc edge indices with a longer ModuleList and some None
self.edges = nn.ModuleList()
for j in range(self.num_nodes):
for i in range(self.num_nodes):
if j > i:
if i < self.num_init_nodes:
self.edges.append(
Layer2MicroEdge(
primitives, in_channels, out_channels, stride, affine
)
)
else:
self.edges.append(
Layer2MicroEdge(
primitives, out_channels, out_channels, 1, affine
)
)
else:
self.edges.append(None)
if cell_shortcut and cell_shortcut_op != "none":
self.shortcut = ops.get_op(cell_shortcut_op)(
in_channels, out_channels, stride, affine
)
else:
self.shortcut = None
if self.output_op == "concat":
self.postprocess = ops.get_op(postprocess_op)(
out_channels * num_steps, out_channels, stride=1, affine=False
)
def forward(self, inputs, cell_arch):
# cell_arch shape: [#nodes, #nodes, #ops]
n, _, h, w = inputs.shape
node_outputs = [inputs] * self.num_init_nodes
for to in range(self.num_init_nodes, self.num_nodes):
froms = np.nonzero(cell_arch[to].sum(axis=1))[0]
edge_indices = froms + (to * self.num_nodes)
if any(self.edges[i] is None for i in edge_indices):
raise RuntimeError(
"Invalid compute graph in cell. Cannot compute an edge where j <= i"
)
# outputs `to` this node `from` all used edges
edge_outputs = [
self.edges[edge_i](node_outputs[from_i], cell_arch[to, from_i])
for edge_i, from_i in zip(edge_indices, froms)
]
if len(edge_outputs) != 0:
node_outputs.append(sum(edge_outputs))
elif self.output_op == "concat":
# append fake outputs if required by concat
node_outputs.append(
torch.zeros(
n,
self.out_channels,
h // self.stride,
w // self.stride,
device=inputs.device,
)
)
node_outputs = node_outputs[self.num_init_nodes :]
if len(node_outputs) == 0:
# no node outputs (including fake outputs) in this cell
out = 0
elif self.output_op == "concat":
out = self.postprocess(torch.cat(node_outputs, dim=1))
elif self.output_op == "add":
out = sum(node_outputs)
else:
raise ValueError("Unknown cell output op `{}`".format(self.output_op))
if self.shortcut is not None:
out += self.shortcut(inputs)
return out
class Layer2MicroEdge(nn.Module):
def __init__(self, primitives, in_channels, out_channels, stride, affine):
super().__init__()
assert "none" not in primitives, "Edge should not have `none` primitive"
self.ops = nn.ModuleList(
ops.get_op(prim)(in_channels, out_channels, stride, affine)
for prim in primitives
)
def forward(self, inputs, edge_arch):
outputs = []
for op, use_op in zip(self.ops, edge_arch):
if use_op != 0:
outputs.append(op(inputs))
if len(outputs) != 0:
return sum(outputs)
else:
raise RuntimeError(
"Edge module does not handle the case where no op is "
"used. It should be handled in Cell and Edge.forward "
"should not be called"
) | prev_num_channels = num_channels
# make pooling and classifier
self.pooling = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(dropout_rate) if dropout_rate else nn.Identity() | random_line_split |
downloadtaskmgr.go | package downloadtaskmgr
import (
"errors"
"io"
"math"
"net"
"net/http"
"os"
"path"
"sort"
"strings"
"sync"
"time"
"github.com/daqnext/meson-common/common/logger"
)
type DownloadInfo struct {
TargetUrl string
BindName string
FileName string
Continent string
Country string
Area string
SavePath string
DownloadType string
OriginRegion string
TargetRegion string
}
type TaskStatus string
//const Task_Success TaskStatus = "success"
//const Task_Fail TaskStatus ="fail"
const Task_UnStart TaskStatus = "unstart"
const Task_Break TaskStatus = "break"
const Task_Downloading TaskStatus = "downloading"
type DownloadTask struct {
DownloadInfo
Id uint64
Status TaskStatus
FileSize int64
SpeedKBs float64
DownloadedSize int64
TryTimes int
StartTime int64
ZeroSpeedSec int
DownloadChannel *DownloadChannel
}
type TaskList struct {
TaskInQueue []DownloadTask
}
var currentId uint64
var idLock sync.Mutex
const GlobalDownloadTaskChanSize = 1024 * 10
var globalDownloadTaskChan = make(chan *DownloadTask, GlobalDownloadTaskChanSize)
var onTaskSuccess func(task *DownloadTask)
var onTaskFailed func(task *DownloadTask)
var panicCatcher func()
var onDownloadStart func(task *DownloadTask)
var onDownloading func(task *DownloadTask, usedTimeSec int)
type ExecResult string
const Success ExecResult = "Success"
const Fail ExecResult = "Fail"
const Break ExecResult = "Break"
type DownloadChannel struct {
SpeedLimitKBs int64
CountLimit int
RunningCountControlChan chan bool
IdleChan chan *DownloadTask
}
var DownloadingTaskMap sync.Map
var ChannelRunningSize = []int{10, 6, 3, 3, 2}
var channelArray = []*DownloadChannel{
{SpeedLimitKBs: 30, CountLimit: ChannelRunningSize[0], RunningCountControlChan: make(chan bool, ChannelRunningSize[0]), IdleChan: make(chan *DownloadTask, 1024*5)}, //30KB/s
{SpeedLimitKBs: 100, CountLimit: ChannelRunningSize[1], RunningCountControlChan: make(chan bool, ChannelRunningSize[1]), IdleChan: make(chan *DownloadTask, 1024*5)}, //100KB/s
{SpeedLimitKBs: 500, CountLimit: ChannelRunningSize[2], RunningCountControlChan: make(chan bool, ChannelRunningSize[2]), IdleChan: make(chan *DownloadTask, 1024*5)}, //500KB/s
{SpeedLimitKBs: 1500, CountLimit: ChannelRunningSize[3], RunningCountControlChan: make(chan bool, ChannelRunningSize[3]), IdleChan: make(chan *DownloadTask, 1024*3)}, //1500KB/s
{SpeedLimitKBs: 2500, CountLimit: ChannelRunningSize[4], RunningCountControlChan: make(chan bool, ChannelRunningSize[4]), IdleChan: make(chan *DownloadTask, 1024*3)}, //2500KB/s
}
const NewRunningTaskCount = 7
var newRunningTaskControlChan = make(chan bool, NewRunningTaskCount)
func AddTaskToDownloadingMap(task *DownloadTask) {
DownloadingTaskMap.Store(task.Id, task)
}
func DeleteDownloadingTask(taskid uint64) {
DownloadingTaskMap.Delete(taskid)
}
type BySpeed []*DownloadTask
func (t BySpeed) Len() int { return len(t) }
func (t BySpeed) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t BySpeed) Less(i, j int) bool { return t[i].SpeedKBs < t[j].SpeedKBs }
func LoopScanRunningTask() {
newWaitingTaskCount := len(globalDownloadTaskChan)
//logger.Debug("Download waiting len","len",newWaitingTaskCount)
if newWaitingTaskCount <= 0 {
//logger.Debug("have no new task waiting")
return
}
killCount := 3
if newWaitingTaskCount < killCount {
killCount = newWaitingTaskCount
}
nowTime := time.Now().Unix()
taskReadyToKill := []*DownloadTask{}
DownloadingTaskMap.Range(func(key, value interface{}) bool {
task, ok := value.(*DownloadTask)
if ok {
//loop find task to kill
if nowTime-task.StartTime < 5 {
return true
}
for _, v := range channelArray {
if task.SpeedKBs < float64(v.SpeedLimitKBs) {
//if task.FileSize>0 {
// finishPercent:=task.DownloadedSize*100/task.FileSize
// if finishPercent>70 {
// break
// }
//}
task.DownloadChannel = v
taskReadyToKill = append(taskReadyToKill, task)
break
}
}
}
return true
})
if len(taskReadyToKill) == 0 {
return
}
sort.Sort(BySpeed(taskReadyToKill))
count := 0
for _, v := range taskReadyToKill {
v.Status = Task_Break
//logger.Debug("Break Task","id",v.Id)
count++
if count >= killCount {
return
}
}
}
func InitTaskMgr(rootPath string) {
LevelDBInit()
for _, v := range channelArray {
for i := 0; i < v.CountLimit; i++ {
v.RunningCountControlChan <- true
}
}
for i := 0; i < NewRunningTaskCount; i++ {
newRunningTaskControlChan <- true
}
//read unfinished task and restart
unFinishedTask := LoopTasksInLDB()
if unFinishedTask == nil {
return
}
for _, v := range unFinishedTask {
info := &DownloadInfo{}
info.TargetUrl = v.TargetUrl
info.BindName = v.BindName
info.FileName = v.FileName
info.Continent = v.Continent
info.Country = v.Country
info.Area = v.Area
info.SavePath = v.SavePath
err := AddGlobalDownloadTask(info)
if err != nil {
logger.Error("Add AddGlobalDownloadTask error")
}
}
}
func AddGlobalDownloadTask(info *DownloadInfo) error {
idLock.Lock()
if currentId >= math.MaxUint64 {
currentId = 0
}
currentId++
idLock.Unlock()
newTask := &DownloadTask{}
newTask.Id = currentId
newTask.TargetUrl = info.TargetUrl
newTask.BindName = info.BindName
newTask.FileName = info.FileName
newTask.Continent = info.Continent
newTask.Country = info.Country
newTask.Area = info.Area
newTask.DownloadType = info.DownloadType
newTask.OriginRegion = info.OriginRegion
newTask.TargetRegion = info.TargetRegion
newTask.SavePath = info.SavePath
newTask.Status = Task_UnStart
newTask.TryTimes = 0
go func() {
//save to LevelDB
SetTaskToLDB(newTask)
//to task channel
globalDownloadTaskChan <- newTask
}()
return nil
}
func SetPanicCatcher(function func()) {
panicCatcher = function
}
func SetOnTaskSuccess(function func(task *DownloadTask)) {
onTaskSuccess = function
}
func SetOnTaskFailed(function func(task *DownloadTask)) {
onTaskFailed = function
}
func SetOnDownloading(function func(task *DownloadTask, usedTimeSec int)) {
onDownloading = function
}
func SetOnDownloadStart(function func(task *DownloadTask)) {
onDownloadStart = function
}
func | () []*DownloadTask {
taskInLDB := LoopTasksInLDB()
if taskInLDB == nil {
return nil
}
list := []*DownloadTask{}
for _, v := range taskInLDB {
list = append(list, v)
}
return list
}
func TaskSuccess(task *DownloadTask) {
logger.Debug("Task Success", "id", task.Id)
//从map中删除任务
DelTaskFromLDB(task.Id)
DeleteDownloadingTask(task.Id)
if onTaskSuccess == nil {
logger.Error("not define onTaskSuccess")
return
}
onTaskSuccess(task)
}
func TaskFail(task *DownloadTask) {
logger.Debug("Task Fail", "id", task.Id)
//从map中删除任务
DelTaskFromLDB(task.Id)
DeleteDownloadingTask(task.Id)
if onTaskFailed == nil {
logger.Error("not define onTaskFailed")
return
}
onTaskFailed(task)
}
func TaskBreak(task *DownloadTask) {
logger.Debug("Task Break", "id", task.Id)
//delete from runningMap
DeleteDownloadingTask(task.Id)
task.Status = Task_UnStart
//add to queue
channel := task.DownloadChannel
if channel == nil {
logger.Error("Break Task not set channel,back to global list", "taskid", task.Id)
globalDownloadTaskChan <- task
return
}
channel.IdleChan <- task
logger.Debug("add break task to idleChan", "speedLimit", channel.SpeedLimitKBs, "chanLen", len(channel.IdleChan), "taskid", task.Id)
}
func TaskRetry(task *DownloadTask) {
logger.Debug("Task Retry", "id", task.Id)
DeleteDownloadingTask(task.Id)
task.TryTimes++
task.Status = Task_UnStart
globalDownloadTaskChan <- task
}
func StartTask(task *DownloadTask) {
if panicCatcher != nil {
defer panicCatcher()
}
result := ExecDownloadTask(task)
switch result {
case Success:
//logger.Debug("download task success", "id", task.Id)
TaskSuccess(task)
case Fail:
//logger.Debug("download task fail", "id", task.Id)
if task.TryTimes >= 2 {
TaskFail(task)
} else {
//继续放入任务队列
TaskRetry(task)
}
case Break:
//logger.Debug("download task idle", "id", task.Id)
TaskBreak(task)
}
}
func (dc *DownloadChannel) ChannelDownload() {
go func() {
for true {
//拿到自己队列的token
<-dc.RunningCountControlChan
select {
case task := <-dc.IdleChan:
go func() {
defer func() {
dc.RunningCountControlChan <- true
}()
logger.Debug("get a task from idle list", "channel speed", dc.SpeedLimitKBs, "id", task.Id, "chanlen", len(dc.IdleChan))
//执行任务
StartTask(task)
}()
}
}
}()
}
func Run() {
RunNewTask()
RunChannelDownload()
//scanloop
go func() {
if panicCatcher != nil {
defer panicCatcher()
}
for true {
time.Sleep(5 * time.Second)
LoopScanRunningTask()
}
}()
}
func RunChannelDownload() {
for _, v := range channelArray {
v.ChannelDownload()
}
}
func RunNewTask() {
go func() {
for true {
<-newRunningTaskControlChan
select {
case task := <-globalDownloadTaskChan:
//开始一个新下载任务
go func() {
//任务结束,放回token
defer func() {
newRunningTaskControlChan <- true
}()
//执行任务
//logger.Debug("start a new task", "id", task.Id)
task.Status = Task_Downloading
AddTaskToDownloadingMap(task)
StartTask(task)
}()
}
}
}()
}
func TimeoutDialer(cTimeout time.Duration, rwTimeout time.Duration) func(net, addr string) (c net.Conn, err error) {
return func(netw, addr string) (net.Conn, error) {
conn, err := net.DialTimeout(netw, addr, cTimeout)
if err != nil {
return nil, err
}
if rwTimeout > 0 {
err := conn.SetDeadline(time.Now().Add(rwTimeout))
if err != nil {
logger.Error("set download process rwTimeout error", "err", err)
return nil, err
}
}
return conn, nil
}
}
func ExecDownloadTask(task *DownloadTask) ExecResult {
connectTimeout := 10 * time.Second
readWriteTimeout := 3600 * 12 * time.Second
//readWriteTimeout := time.Duration(0)
url := task.TargetUrl
distFilePath := task.SavePath
cHead := http.Client{
Transport: &http.Transport{
Dial: TimeoutDialer(connectTimeout, readWriteTimeout),
},
}
//get
reqHead, err := http.NewRequest(http.MethodHead, url, nil)
if err == nil {
responseHead, err := cHead.Do(reqHead)
if err == nil {
if responseHead.StatusCode == 200 && responseHead.ContentLength > 0 {
task.FileSize = responseHead.ContentLength
}
}
}
//http client
c := http.Client{
Transport: &http.Transport{
Dial: TimeoutDialer(connectTimeout, readWriteTimeout),
},
}
//get
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
logger.Error("create request error", "err", err)
return Fail
}
//download
response, err := c.Do(req)
if err != nil {
logger.Error("get file url "+url+" error", "err", err)
return Fail
}
if response.StatusCode != 200 {
logger.Error("get file url "+url+" error", "err", err, "statusCode", response.StatusCode)
return Fail
}
//creat folder and file
distDir := path.Dir(distFilePath)
err = os.MkdirAll(distDir, os.ModePerm)
if err != nil {
return Fail
}
file, err := os.Create(distFilePath)
if err != nil {
return Fail
}
defer file.Close()
if response.Body == nil {
logger.Error("Download responseBody is null")
return Fail
}
defer response.Body.Close()
task.StartTime = time.Now().Unix()
if onDownloadStart != nil {
go onDownloadStart(task)
}
_, err = copyBuffer(file, response.Body, nil, task)
if err != nil {
os.Remove(distFilePath)
if err.Error() == string(Break) {
//logger.Debug("task break","id",task.Id)
return Break
}
return Fail
}
fileInfo, err := os.Stat(distFilePath)
if err != nil {
logger.Error("Get file Stat error", "err", err)
os.Remove(distFilePath)
return Fail
}
size := fileInfo.Size()
logger.Debug("donwload file,fileInfo", "size", size)
if size == 0 {
os.Remove(distFilePath)
logger.Error("download file size error")
return Fail
}
return Success
}
func copyBuffer(dst io.Writer, src io.Reader, buf []byte, task *DownloadTask) (written int64, err error) {
// If the reader has a WriteTo method, use it to do the copy.
// Avoids an allocation and a copy.
if wt, ok := src.(io.WriterTo); ok {
return wt.WriteTo(dst)
}
// Similarly, if the writer has a ReadFrom method, use it to do the copy.
if rt, ok := dst.(io.ReaderFrom); ok {
return rt.ReadFrom(src)
}
if buf == nil {
size := 32 * 1024
if l, ok := src.(*io.LimitedReader); ok && int64(size) > l.N {
if l.N < 1 {
size = 1
} else {
size = int(l.N)
}
}
buf = make([]byte, size)
}
stop := false
srcWithCloser, ok := src.(io.ReadCloser)
if ok == false {
err = errors.New("to io.ReadCloser error")
return written, err
}
go func() {
for {
time.Sleep(500 * time.Millisecond) //for test
nr, er := srcWithCloser.Read(buf)
if nr > 0 {
nw, ew := dst.Write(buf[0:nr])
if nw > 0 {
written += int64(nw)
}
if ew != nil {
err = ew
//fmt.Println(ew.Error())
if task.Status == Task_Break &&
(strings.Contains(err.Error(), "http: read on closed response body") ||
strings.Contains(err.Error(), "use of closed network connection")) {
err = errors.New(string(Break))
}
break
}
if nr != nw {
err = io.ErrShortWrite
break
}
}
if er != nil {
if er != io.EOF {
err = er
//errStr:=err.Error()
//fmt.Println(errStr)
if task.Status == Task_Break &&
(strings.Contains(err.Error(), "http: read on closed response body") ||
strings.Contains(err.Error(), "use of closed network connection")) {
err = errors.New(string(Break))
}
}
break
}
}
stop = true
}()
//monitor download speed
spaceTime := time.Millisecond * 1000
ticker := time.NewTicker(spaceTime)
//lastWtn := int64(0)
count := 0
for {
count++
if stop {
break
}
select {
case <-ticker.C:
if task.Status == Task_Break {
srcWithCloser.Close()
}
task.DownloadedSize = written
useTime := count * 1000
speed := float64(written) / float64(useTime)
task.SpeedKBs = speed
//reportDownloadState
if onDownloading != nil {
go onDownloading(task, useTime)
}
}
}
return written, err
}
| GetDownloadTaskList | identifier_name |
downloadtaskmgr.go | package downloadtaskmgr
import (
"errors"
"io"
"math"
"net"
"net/http"
"os"
"path"
"sort"
"strings"
"sync"
"time"
"github.com/daqnext/meson-common/common/logger"
)
| Continent string
Country string
Area string
SavePath string
DownloadType string
OriginRegion string
TargetRegion string
}
type TaskStatus string
//const Task_Success TaskStatus = "success"
//const Task_Fail TaskStatus ="fail"
const Task_UnStart TaskStatus = "unstart"
const Task_Break TaskStatus = "break"
const Task_Downloading TaskStatus = "downloading"
type DownloadTask struct {
DownloadInfo
Id uint64
Status TaskStatus
FileSize int64
SpeedKBs float64
DownloadedSize int64
TryTimes int
StartTime int64
ZeroSpeedSec int
DownloadChannel *DownloadChannel
}
type TaskList struct {
TaskInQueue []DownloadTask
}
var currentId uint64
var idLock sync.Mutex
const GlobalDownloadTaskChanSize = 1024 * 10
var globalDownloadTaskChan = make(chan *DownloadTask, GlobalDownloadTaskChanSize)
var onTaskSuccess func(task *DownloadTask)
var onTaskFailed func(task *DownloadTask)
var panicCatcher func()
var onDownloadStart func(task *DownloadTask)
var onDownloading func(task *DownloadTask, usedTimeSec int)
type ExecResult string
const Success ExecResult = "Success"
const Fail ExecResult = "Fail"
const Break ExecResult = "Break"
type DownloadChannel struct {
SpeedLimitKBs int64
CountLimit int
RunningCountControlChan chan bool
IdleChan chan *DownloadTask
}
var DownloadingTaskMap sync.Map
var ChannelRunningSize = []int{10, 6, 3, 3, 2}
var channelArray = []*DownloadChannel{
{SpeedLimitKBs: 30, CountLimit: ChannelRunningSize[0], RunningCountControlChan: make(chan bool, ChannelRunningSize[0]), IdleChan: make(chan *DownloadTask, 1024*5)}, //30KB/s
{SpeedLimitKBs: 100, CountLimit: ChannelRunningSize[1], RunningCountControlChan: make(chan bool, ChannelRunningSize[1]), IdleChan: make(chan *DownloadTask, 1024*5)}, //100KB/s
{SpeedLimitKBs: 500, CountLimit: ChannelRunningSize[2], RunningCountControlChan: make(chan bool, ChannelRunningSize[2]), IdleChan: make(chan *DownloadTask, 1024*5)}, //500KB/s
{SpeedLimitKBs: 1500, CountLimit: ChannelRunningSize[3], RunningCountControlChan: make(chan bool, ChannelRunningSize[3]), IdleChan: make(chan *DownloadTask, 1024*3)}, //1500KB/s
{SpeedLimitKBs: 2500, CountLimit: ChannelRunningSize[4], RunningCountControlChan: make(chan bool, ChannelRunningSize[4]), IdleChan: make(chan *DownloadTask, 1024*3)}, //2500KB/s
}
const NewRunningTaskCount = 7
var newRunningTaskControlChan = make(chan bool, NewRunningTaskCount)
func AddTaskToDownloadingMap(task *DownloadTask) {
DownloadingTaskMap.Store(task.Id, task)
}
func DeleteDownloadingTask(taskid uint64) {
DownloadingTaskMap.Delete(taskid)
}
type BySpeed []*DownloadTask
func (t BySpeed) Len() int { return len(t) }
func (t BySpeed) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t BySpeed) Less(i, j int) bool { return t[i].SpeedKBs < t[j].SpeedKBs }
func LoopScanRunningTask() {
newWaitingTaskCount := len(globalDownloadTaskChan)
//logger.Debug("Download waiting len","len",newWaitingTaskCount)
if newWaitingTaskCount <= 0 {
//logger.Debug("have no new task waiting")
return
}
killCount := 3
if newWaitingTaskCount < killCount {
killCount = newWaitingTaskCount
}
nowTime := time.Now().Unix()
taskReadyToKill := []*DownloadTask{}
DownloadingTaskMap.Range(func(key, value interface{}) bool {
task, ok := value.(*DownloadTask)
if ok {
//loop find task to kill
if nowTime-task.StartTime < 5 {
return true
}
for _, v := range channelArray {
if task.SpeedKBs < float64(v.SpeedLimitKBs) {
//if task.FileSize>0 {
// finishPercent:=task.DownloadedSize*100/task.FileSize
// if finishPercent>70 {
// break
// }
//}
task.DownloadChannel = v
taskReadyToKill = append(taskReadyToKill, task)
break
}
}
}
return true
})
if len(taskReadyToKill) == 0 {
return
}
sort.Sort(BySpeed(taskReadyToKill))
count := 0
for _, v := range taskReadyToKill {
v.Status = Task_Break
//logger.Debug("Break Task","id",v.Id)
count++
if count >= killCount {
return
}
}
}
func InitTaskMgr(rootPath string) {
LevelDBInit()
for _, v := range channelArray {
for i := 0; i < v.CountLimit; i++ {
v.RunningCountControlChan <- true
}
}
for i := 0; i < NewRunningTaskCount; i++ {
newRunningTaskControlChan <- true
}
//read unfinished task and restart
unFinishedTask := LoopTasksInLDB()
if unFinishedTask == nil {
return
}
for _, v := range unFinishedTask {
info := &DownloadInfo{}
info.TargetUrl = v.TargetUrl
info.BindName = v.BindName
info.FileName = v.FileName
info.Continent = v.Continent
info.Country = v.Country
info.Area = v.Area
info.SavePath = v.SavePath
err := AddGlobalDownloadTask(info)
if err != nil {
logger.Error("Add AddGlobalDownloadTask error")
}
}
}
func AddGlobalDownloadTask(info *DownloadInfo) error {
idLock.Lock()
if currentId >= math.MaxUint64 {
currentId = 0
}
currentId++
idLock.Unlock()
newTask := &DownloadTask{}
newTask.Id = currentId
newTask.TargetUrl = info.TargetUrl
newTask.BindName = info.BindName
newTask.FileName = info.FileName
newTask.Continent = info.Continent
newTask.Country = info.Country
newTask.Area = info.Area
newTask.DownloadType = info.DownloadType
newTask.OriginRegion = info.OriginRegion
newTask.TargetRegion = info.TargetRegion
newTask.SavePath = info.SavePath
newTask.Status = Task_UnStart
newTask.TryTimes = 0
go func() {
//save to LevelDB
SetTaskToLDB(newTask)
//to task channel
globalDownloadTaskChan <- newTask
}()
return nil
}
func SetPanicCatcher(function func()) {
panicCatcher = function
}
func SetOnTaskSuccess(function func(task *DownloadTask)) {
onTaskSuccess = function
}
func SetOnTaskFailed(function func(task *DownloadTask)) {
onTaskFailed = function
}
func SetOnDownloading(function func(task *DownloadTask, usedTimeSec int)) {
onDownloading = function
}
func SetOnDownloadStart(function func(task *DownloadTask)) {
onDownloadStart = function
}
func GetDownloadTaskList() []*DownloadTask {
taskInLDB := LoopTasksInLDB()
if taskInLDB == nil {
return nil
}
list := []*DownloadTask{}
for _, v := range taskInLDB {
list = append(list, v)
}
return list
}
func TaskSuccess(task *DownloadTask) {
logger.Debug("Task Success", "id", task.Id)
//从map中删除任务
DelTaskFromLDB(task.Id)
DeleteDownloadingTask(task.Id)
if onTaskSuccess == nil {
logger.Error("not define onTaskSuccess")
return
}
onTaskSuccess(task)
}
func TaskFail(task *DownloadTask) {
logger.Debug("Task Fail", "id", task.Id)
//从map中删除任务
DelTaskFromLDB(task.Id)
DeleteDownloadingTask(task.Id)
if onTaskFailed == nil {
logger.Error("not define onTaskFailed")
return
}
onTaskFailed(task)
}
func TaskBreak(task *DownloadTask) {
logger.Debug("Task Break", "id", task.Id)
//delete from runningMap
DeleteDownloadingTask(task.Id)
task.Status = Task_UnStart
//add to queue
channel := task.DownloadChannel
if channel == nil {
logger.Error("Break Task not set channel,back to global list", "taskid", task.Id)
globalDownloadTaskChan <- task
return
}
channel.IdleChan <- task
logger.Debug("add break task to idleChan", "speedLimit", channel.SpeedLimitKBs, "chanLen", len(channel.IdleChan), "taskid", task.Id)
}
func TaskRetry(task *DownloadTask) {
logger.Debug("Task Retry", "id", task.Id)
DeleteDownloadingTask(task.Id)
task.TryTimes++
task.Status = Task_UnStart
globalDownloadTaskChan <- task
}
func StartTask(task *DownloadTask) {
if panicCatcher != nil {
defer panicCatcher()
}
result := ExecDownloadTask(task)
switch result {
case Success:
//logger.Debug("download task success", "id", task.Id)
TaskSuccess(task)
case Fail:
//logger.Debug("download task fail", "id", task.Id)
if task.TryTimes >= 2 {
TaskFail(task)
} else {
//继续放入任务队列
TaskRetry(task)
}
case Break:
//logger.Debug("download task idle", "id", task.Id)
TaskBreak(task)
}
}
func (dc *DownloadChannel) ChannelDownload() {
go func() {
for true {
//拿到自己队列的token
<-dc.RunningCountControlChan
select {
case task := <-dc.IdleChan:
go func() {
defer func() {
dc.RunningCountControlChan <- true
}()
logger.Debug("get a task from idle list", "channel speed", dc.SpeedLimitKBs, "id", task.Id, "chanlen", len(dc.IdleChan))
//执行任务
StartTask(task)
}()
}
}
}()
}
func Run() {
RunNewTask()
RunChannelDownload()
//scanloop
go func() {
if panicCatcher != nil {
defer panicCatcher()
}
for true {
time.Sleep(5 * time.Second)
LoopScanRunningTask()
}
}()
}
func RunChannelDownload() {
for _, v := range channelArray {
v.ChannelDownload()
}
}
func RunNewTask() {
go func() {
for true {
<-newRunningTaskControlChan
select {
case task := <-globalDownloadTaskChan:
//开始一个新下载任务
go func() {
//任务结束,放回token
defer func() {
newRunningTaskControlChan <- true
}()
//执行任务
//logger.Debug("start a new task", "id", task.Id)
task.Status = Task_Downloading
AddTaskToDownloadingMap(task)
StartTask(task)
}()
}
}
}()
}
func TimeoutDialer(cTimeout time.Duration, rwTimeout time.Duration) func(net, addr string) (c net.Conn, err error) {
return func(netw, addr string) (net.Conn, error) {
conn, err := net.DialTimeout(netw, addr, cTimeout)
if err != nil {
return nil, err
}
if rwTimeout > 0 {
err := conn.SetDeadline(time.Now().Add(rwTimeout))
if err != nil {
logger.Error("set download process rwTimeout error", "err", err)
return nil, err
}
}
return conn, nil
}
}
func ExecDownloadTask(task *DownloadTask) ExecResult {
connectTimeout := 10 * time.Second
readWriteTimeout := 3600 * 12 * time.Second
//readWriteTimeout := time.Duration(0)
url := task.TargetUrl
distFilePath := task.SavePath
cHead := http.Client{
Transport: &http.Transport{
Dial: TimeoutDialer(connectTimeout, readWriteTimeout),
},
}
//get
reqHead, err := http.NewRequest(http.MethodHead, url, nil)
if err == nil {
responseHead, err := cHead.Do(reqHead)
if err == nil {
if responseHead.StatusCode == 200 && responseHead.ContentLength > 0 {
task.FileSize = responseHead.ContentLength
}
}
}
//http client
c := http.Client{
Transport: &http.Transport{
Dial: TimeoutDialer(connectTimeout, readWriteTimeout),
},
}
//get
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
logger.Error("create request error", "err", err)
return Fail
}
//download
response, err := c.Do(req)
if err != nil {
logger.Error("get file url "+url+" error", "err", err)
return Fail
}
if response.StatusCode != 200 {
logger.Error("get file url "+url+" error", "err", err, "statusCode", response.StatusCode)
return Fail
}
//creat folder and file
distDir := path.Dir(distFilePath)
err = os.MkdirAll(distDir, os.ModePerm)
if err != nil {
return Fail
}
file, err := os.Create(distFilePath)
if err != nil {
return Fail
}
defer file.Close()
if response.Body == nil {
logger.Error("Download responseBody is null")
return Fail
}
defer response.Body.Close()
task.StartTime = time.Now().Unix()
if onDownloadStart != nil {
go onDownloadStart(task)
}
_, err = copyBuffer(file, response.Body, nil, task)
if err != nil {
os.Remove(distFilePath)
if err.Error() == string(Break) {
//logger.Debug("task break","id",task.Id)
return Break
}
return Fail
}
fileInfo, err := os.Stat(distFilePath)
if err != nil {
logger.Error("Get file Stat error", "err", err)
os.Remove(distFilePath)
return Fail
}
size := fileInfo.Size()
logger.Debug("donwload file,fileInfo", "size", size)
if size == 0 {
os.Remove(distFilePath)
logger.Error("download file size error")
return Fail
}
return Success
}
func copyBuffer(dst io.Writer, src io.Reader, buf []byte, task *DownloadTask) (written int64, err error) {
// If the reader has a WriteTo method, use it to do the copy.
// Avoids an allocation and a copy.
if wt, ok := src.(io.WriterTo); ok {
return wt.WriteTo(dst)
}
// Similarly, if the writer has a ReadFrom method, use it to do the copy.
if rt, ok := dst.(io.ReaderFrom); ok {
return rt.ReadFrom(src)
}
if buf == nil {
size := 32 * 1024
if l, ok := src.(*io.LimitedReader); ok && int64(size) > l.N {
if l.N < 1 {
size = 1
} else {
size = int(l.N)
}
}
buf = make([]byte, size)
}
stop := false
srcWithCloser, ok := src.(io.ReadCloser)
if ok == false {
err = errors.New("to io.ReadCloser error")
return written, err
}
go func() {
for {
time.Sleep(500 * time.Millisecond) //for test
nr, er := srcWithCloser.Read(buf)
if nr > 0 {
nw, ew := dst.Write(buf[0:nr])
if nw > 0 {
written += int64(nw)
}
if ew != nil {
err = ew
//fmt.Println(ew.Error())
if task.Status == Task_Break &&
(strings.Contains(err.Error(), "http: read on closed response body") ||
strings.Contains(err.Error(), "use of closed network connection")) {
err = errors.New(string(Break))
}
break
}
if nr != nw {
err = io.ErrShortWrite
break
}
}
if er != nil {
if er != io.EOF {
err = er
//errStr:=err.Error()
//fmt.Println(errStr)
if task.Status == Task_Break &&
(strings.Contains(err.Error(), "http: read on closed response body") ||
strings.Contains(err.Error(), "use of closed network connection")) {
err = errors.New(string(Break))
}
}
break
}
}
stop = true
}()
//monitor download speed
spaceTime := time.Millisecond * 1000
ticker := time.NewTicker(spaceTime)
//lastWtn := int64(0)
count := 0
for {
count++
if stop {
break
}
select {
case <-ticker.C:
if task.Status == Task_Break {
srcWithCloser.Close()
}
task.DownloadedSize = written
useTime := count * 1000
speed := float64(written) / float64(useTime)
task.SpeedKBs = speed
//reportDownloadState
if onDownloading != nil {
go onDownloading(task, useTime)
}
}
}
return written, err
} | type DownloadInfo struct {
TargetUrl string
BindName string
FileName string | random_line_split |
downloadtaskmgr.go | package downloadtaskmgr
import (
"errors"
"io"
"math"
"net"
"net/http"
"os"
"path"
"sort"
"strings"
"sync"
"time"
"github.com/daqnext/meson-common/common/logger"
)
type DownloadInfo struct {
TargetUrl string
BindName string
FileName string
Continent string
Country string
Area string
SavePath string
DownloadType string
OriginRegion string
TargetRegion string
}
type TaskStatus string
//const Task_Success TaskStatus = "success"
//const Task_Fail TaskStatus ="fail"
const Task_UnStart TaskStatus = "unstart"
const Task_Break TaskStatus = "break"
const Task_Downloading TaskStatus = "downloading"
type DownloadTask struct {
DownloadInfo
Id uint64
Status TaskStatus
FileSize int64
SpeedKBs float64
DownloadedSize int64
TryTimes int
StartTime int64
ZeroSpeedSec int
DownloadChannel *DownloadChannel
}
type TaskList struct {
TaskInQueue []DownloadTask
}
var currentId uint64
var idLock sync.Mutex
const GlobalDownloadTaskChanSize = 1024 * 10
var globalDownloadTaskChan = make(chan *DownloadTask, GlobalDownloadTaskChanSize)
var onTaskSuccess func(task *DownloadTask)
var onTaskFailed func(task *DownloadTask)
var panicCatcher func()
var onDownloadStart func(task *DownloadTask)
var onDownloading func(task *DownloadTask, usedTimeSec int)
type ExecResult string
const Success ExecResult = "Success"
const Fail ExecResult = "Fail"
const Break ExecResult = "Break"
type DownloadChannel struct {
SpeedLimitKBs int64
CountLimit int
RunningCountControlChan chan bool
IdleChan chan *DownloadTask
}
var DownloadingTaskMap sync.Map
var ChannelRunningSize = []int{10, 6, 3, 3, 2}
var channelArray = []*DownloadChannel{
{SpeedLimitKBs: 30, CountLimit: ChannelRunningSize[0], RunningCountControlChan: make(chan bool, ChannelRunningSize[0]), IdleChan: make(chan *DownloadTask, 1024*5)}, //30KB/s
{SpeedLimitKBs: 100, CountLimit: ChannelRunningSize[1], RunningCountControlChan: make(chan bool, ChannelRunningSize[1]), IdleChan: make(chan *DownloadTask, 1024*5)}, //100KB/s
{SpeedLimitKBs: 500, CountLimit: ChannelRunningSize[2], RunningCountControlChan: make(chan bool, ChannelRunningSize[2]), IdleChan: make(chan *DownloadTask, 1024*5)}, //500KB/s
{SpeedLimitKBs: 1500, CountLimit: ChannelRunningSize[3], RunningCountControlChan: make(chan bool, ChannelRunningSize[3]), IdleChan: make(chan *DownloadTask, 1024*3)}, //1500KB/s
{SpeedLimitKBs: 2500, CountLimit: ChannelRunningSize[4], RunningCountControlChan: make(chan bool, ChannelRunningSize[4]), IdleChan: make(chan *DownloadTask, 1024*3)}, //2500KB/s
}
const NewRunningTaskCount = 7
var newRunningTaskControlChan = make(chan bool, NewRunningTaskCount)
func AddTaskToDownloadingMap(task *DownloadTask) {
DownloadingTaskMap.Store(task.Id, task)
}
func DeleteDownloadingTask(taskid uint64) {
DownloadingTaskMap.Delete(taskid)
}
type BySpeed []*DownloadTask
func (t BySpeed) Len() int { return len(t) }
func (t BySpeed) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t BySpeed) Less(i, j int) bool { return t[i].SpeedKBs < t[j].SpeedKBs }
func LoopScanRunningTask() {
newWaitingTaskCount := len(globalDownloadTaskChan)
//logger.Debug("Download waiting len","len",newWaitingTaskCount)
if newWaitingTaskCount <= 0 {
//logger.Debug("have no new task waiting")
return
}
killCount := 3
if newWaitingTaskCount < killCount {
killCount = newWaitingTaskCount
}
nowTime := time.Now().Unix()
taskReadyToKill := []*DownloadTask{}
DownloadingTaskMap.Range(func(key, value interface{}) bool {
task, ok := value.(*DownloadTask)
if ok {
//loop find task to kill
if nowTime-task.StartTime < 5 {
return true
}
for _, v := range channelArray {
if task.SpeedKBs < float64(v.SpeedLimitKBs) {
//if task.FileSize>0 {
// finishPercent:=task.DownloadedSize*100/task.FileSize
// if finishPercent>70 {
// break
// }
//}
task.DownloadChannel = v
taskReadyToKill = append(taskReadyToKill, task)
break
}
}
}
return true
})
if len(taskReadyToKill) == 0 {
return
}
sort.Sort(BySpeed(taskReadyToKill))
count := 0
for _, v := range taskReadyToKill {
v.Status = Task_Break
//logger.Debug("Break Task","id",v.Id)
count++
if count >= killCount {
return
}
}
}
func InitTaskMgr(rootPath string) {
LevelDBInit()
for _, v := range channelArray {
for i := 0; i < v.CountLimit; i++ {
v.RunningCountControlChan <- true
}
}
for i := 0; i < NewRunningTaskCount; i++ {
newRunningTaskControlChan <- true
}
//read unfinished task and restart
unFinishedTask := LoopTasksInLDB()
if unFinishedTask == nil {
return
}
for _, v := range unFinishedTask {
info := &DownloadInfo{}
info.TargetUrl = v.TargetUrl
info.BindName = v.BindName
info.FileName = v.FileName
info.Continent = v.Continent
info.Country = v.Country
info.Area = v.Area
info.SavePath = v.SavePath
err := AddGlobalDownloadTask(info)
if err != nil {
logger.Error("Add AddGlobalDownloadTask error")
}
}
}
func AddGlobalDownloadTask(info *DownloadInfo) error {
idLock.Lock()
if currentId >= math.MaxUint64 {
currentId = 0
}
currentId++
idLock.Unlock()
newTask := &DownloadTask{}
newTask.Id = currentId
newTask.TargetUrl = info.TargetUrl
newTask.BindName = info.BindName
newTask.FileName = info.FileName
newTask.Continent = info.Continent
newTask.Country = info.Country
newTask.Area = info.Area
newTask.DownloadType = info.DownloadType
newTask.OriginRegion = info.OriginRegion
newTask.TargetRegion = info.TargetRegion
newTask.SavePath = info.SavePath
newTask.Status = Task_UnStart
newTask.TryTimes = 0
go func() {
//save to LevelDB
SetTaskToLDB(newTask)
//to task channel
globalDownloadTaskChan <- newTask
}()
return nil
}
func SetPanicCatcher(function func()) {
panicCatcher = function
}
func SetOnTaskSuccess(function func(task *DownloadTask)) {
onTaskSuccess = function
}
func SetOnTaskFailed(function func(task *DownloadTask)) {
onTaskFailed = function
}
func SetOnDownloading(function func(task *DownloadTask, usedTimeSec int)) {
onDownloading = function
}
func SetOnDownloadStart(function func(task *DownloadTask)) {
onDownloadStart = function
}
func GetDownloadTaskList() []*DownloadTask {
taskInLDB := LoopTasksInLDB()
if taskInLDB == nil {
return nil
}
list := []*DownloadTask{}
for _, v := range taskInLDB {
list = append(list, v)
}
return list
}
func TaskSuccess(task *DownloadTask) {
logger.Debug("Task Success", "id", task.Id)
//从map中删除任务
DelTaskFromLDB(task.Id)
DeleteDownloadingTask(task.Id)
if onTaskSuccess == nil {
logger.Error("not define onTaskSuccess")
return
}
onTaskSuccess(task)
}
func TaskFail(task *DownloadTask) {
logger.Debug("Task Fail", "id", task.Id)
//从map中删除任务
DelTaskFromLDB(task.Id)
DeleteDownloadingTask(task.Id)
if onTaskFailed == nil {
logger.Error("not define onTaskFailed")
return
}
onTaskFailed(task)
}
func TaskBreak(task *DownloadTask) {
logger.Debug("Task Break", "id", task.Id)
//delete from runningMap
DeleteDownloadingTask(task.Id)
task.Status = Task_UnStart
//add to queue
channel := task.DownloadChannel
if channel == nil {
logger.Error("Break Task not set channel,back to global list", "taskid", task.Id)
globalDownloadTaskChan <- task
return
}
channel.IdleChan <- task
logger.Debug("add break task to idleChan", "speedLimit", channel.SpeedLimitKBs, "chanLen", len(channel.IdleChan), "taskid", task.Id)
}
func TaskRetry(task *DownloadTask) {
logger.Debug("Task Retry", "id", task.Id)
DeleteDownloadingTask(task.Id)
task.TryTimes++
task.Status = Task_UnStart
globalDownloadTaskChan <- task
}
func StartTask(task *DownloadTask) {
if panicCatcher != nil {
defer panicCatcher()
}
result := ExecDownloadTask(task)
switch result {
case Success:
//logger.Debug("download task success", "id", task.Id)
TaskSuccess(task)
case Fail:
//logger.Debug("download task fail", "id", task.Id)
if task.TryTimes >= 2 {
TaskFail(task)
} else {
//继续放入任务队列
TaskRetry(task)
}
case Break:
//logger.Debug("download task idle", "id", task.Id)
TaskBreak(task)
}
}
func (dc *DownloadChannel) ChannelDownload() {
go func() {
for true {
//拿到自己队列的token
<-dc.RunningCountControlChan
select {
case task := <-dc.IdleChan:
go func() {
defer func() {
dc.RunningCountControlChan <- true
}()
logger.Debug("get a task from idle list", "channel speed", dc.SpeedLimitKBs, "id", task.Id, "chanlen", len(dc.IdleChan))
//执行任务
StartTask(task)
}()
}
}
}()
}
func Run() {
RunNewTask()
RunChannelDownload()
//scanloop
go func() {
if panicCatcher != nil {
defer panicCatcher()
}
for true {
time.Sleep(5 * time.Second)
LoopScanRunningTask()
}
}()
}
func RunChannelDownload() {
for _, v := range channelArray {
v.ChannelDownload()
}
}
func RunNewTask() {
go func() {
for true {
<-newRunningTaskControlChan
| Conn, err error) {
return func(netw, addr string) (net.Conn, error) {
conn, err := net.DialTimeout(netw, addr, cTimeout)
if err != nil {
return nil, err
}
if rwTimeout > 0 {
err := conn.SetDeadline(time.Now().Add(rwTimeout))
if err != nil {
logger.Error("set download process rwTimeout error", "err", err)
return nil, err
}
}
return conn, nil
}
}
func ExecDownloadTask(task *DownloadTask) ExecResult {
connectTimeout := 10 * time.Second
readWriteTimeout := 3600 * 12 * time.Second
//readWriteTimeout := time.Duration(0)
url := task.TargetUrl
distFilePath := task.SavePath
cHead := http.Client{
Transport: &http.Transport{
Dial: TimeoutDialer(connectTimeout, readWriteTimeout),
},
}
//get
reqHead, err := http.NewRequest(http.MethodHead, url, nil)
if err == nil {
responseHead, err := cHead.Do(reqHead)
if err == nil {
if responseHead.StatusCode == 200 && responseHead.ContentLength > 0 {
task.FileSize = responseHead.ContentLength
}
}
}
//http client
c := http.Client{
Transport: &http.Transport{
Dial: TimeoutDialer(connectTimeout, readWriteTimeout),
},
}
//get
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
logger.Error("create request error", "err", err)
return Fail
}
//download
response, err := c.Do(req)
if err != nil {
logger.Error("get file url "+url+" error", "err", err)
return Fail
}
if response.StatusCode != 200 {
logger.Error("get file url "+url+" error", "err", err, "statusCode", response.StatusCode)
return Fail
}
//creat folder and file
distDir := path.Dir(distFilePath)
err = os.MkdirAll(distDir, os.ModePerm)
if err != nil {
return Fail
}
file, err := os.Create(distFilePath)
if err != nil {
return Fail
}
defer file.Close()
if response.Body == nil {
logger.Error("Download responseBody is null")
return Fail
}
defer response.Body.Close()
task.StartTime = time.Now().Unix()
if onDownloadStart != nil {
go onDownloadStart(task)
}
_, err = copyBuffer(file, response.Body, nil, task)
if err != nil {
os.Remove(distFilePath)
if err.Error() == string(Break) {
//logger.Debug("task break","id",task.Id)
return Break
}
return Fail
}
fileInfo, err := os.Stat(distFilePath)
if err != nil {
logger.Error("Get file Stat error", "err", err)
os.Remove(distFilePath)
return Fail
}
size := fileInfo.Size()
logger.Debug("donwload file,fileInfo", "size", size)
if size == 0 {
os.Remove(distFilePath)
logger.Error("download file size error")
return Fail
}
return Success
}
func copyBuffer(dst io.Writer, src io.Reader, buf []byte, task *DownloadTask) (written int64, err error) {
// If the reader has a WriteTo method, use it to do the copy.
// Avoids an allocation and a copy.
if wt, ok := src.(io.WriterTo); ok {
return wt.WriteTo(dst)
}
// Similarly, if the writer has a ReadFrom method, use it to do the copy.
if rt, ok := dst.(io.ReaderFrom); ok {
return rt.ReadFrom(src)
}
if buf == nil {
size := 32 * 1024
if l, ok := src.(*io.LimitedReader); ok && int64(size) > l.N {
if l.N < 1 {
size = 1
} else {
size = int(l.N)
}
}
buf = make([]byte, size)
}
stop := false
srcWithCloser, ok := src.(io.ReadCloser)
if ok == false {
err = errors.New("to io.ReadCloser error")
return written, err
}
go func() {
for {
time.Sleep(500 * time.Millisecond) //for test
nr, er := srcWithCloser.Read(buf)
if nr > 0 {
nw, ew := dst.Write(buf[0:nr])
if nw > 0 {
written += int64(nw)
}
if ew != nil {
err = ew
//fmt.Println(ew.Error())
if task.Status == Task_Break &&
(strings.Contains(err.Error(), "http: read on closed response body") ||
strings.Contains(err.Error(), "use of closed network connection")) {
err = errors.New(string(Break))
}
break
}
if nr != nw {
err = io.ErrShortWrite
break
}
}
if er != nil {
if er != io.EOF {
err = er
//errStr:=err.Error()
//fmt.Println(errStr)
if task.Status == Task_Break &&
(strings.Contains(err.Error(), "http: read on closed response body") ||
strings.Contains(err.Error(), "use of closed network connection")) {
err = errors.New(string(Break))
}
}
break
}
}
stop = true
}()
//monitor download speed
spaceTime := time.Millisecond * 1000
ticker := time.NewTicker(spaceTime)
//lastWtn := int64(0)
count := 0
for {
count++
if stop {
break
}
select {
case <-ticker.C:
if task.Status == Task_Break {
srcWithCloser.Close()
}
task.DownloadedSize = written
useTime := count * 1000
speed := float64(written) / float64(useTime)
task.SpeedKBs = speed
//reportDownloadState
if onDownloading != nil {
go onDownloading(task, useTime)
}
}
}
return written, err
}
| select {
case task := <-globalDownloadTaskChan:
//开始一个新下载任务
go func() {
//任务结束,放回token
defer func() {
newRunningTaskControlChan <- true
}()
//执行任务
//logger.Debug("start a new task", "id", task.Id)
task.Status = Task_Downloading
AddTaskToDownloadingMap(task)
StartTask(task)
}()
}
}
}()
}
func TimeoutDialer(cTimeout time.Duration, rwTimeout time.Duration) func(net, addr string) (c net. | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.