file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
test2.py | import os, sys
import Tkinter
import tkFileDialog
import PIL
from PIL import ImageTk, Image
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
import cv2
from scipy.cluster.vq import kmeans
from skimage import data, img_as_float
#from skimage.measure import compare_ssim as ssim
from skimage.measure import structural_similarity as ssim
LETTERS = ["a","b","c","d","e","f","g","h","i","j","k","l","m",
"n","o","p","q","r","s","t","u","v","w","x","y","z"]
class Rectangle:
def __init__(self, x_param=0, y_param=0, w_param=0, h_param=0):
self.x = x_param
self.y = y_param
self.w = w_param
self.h = h_param
def __str__(self):
return "Width = "+str(self.w)+", Height = "+str(self.h)
class MainWindow:
def __init__(self, master):
self.video = None
self.frame_rate = 0
self.video_length = 0
# The scaled image used for display. Needs to persist for display
self.display_image = None
self.display_ratio = 0
self.awaiting_corners = False
self.corners = []
#Tkinter related fields
self.master = master
self.master.title("Auto Kifu Test2")
self.window_width = root.winfo_screenwidth()
self.window_height = root.winfo_screenheight() - 100
self.master.geometry("%dx%d+0+0" % (self.window_width, self.window_height))
self.master.configure(background='grey')
self.canvas = Tkinter.Canvas(self.master)
self.canvas.place(x=0,
y=0,
width=self.window_width,
height=self.window_height)
self.canvas.bind("<Button-1>", self.mouse_clicked)
self.menubar = Tkinter.Menu(root)
root.config(menu=self.menubar)
self.fileMenu = Tkinter.Menu(self.menubar)
self.fileMenu.add_command(label="Load Image", command=self.load())
self.menubar.add_cascade(label="File", menu=self.fileMenu)
def mouse_clicked(self, event):
if self.awaiting_corners:
self.draw_x(event.x, event.y)
self.corners += [(event.x/self.display_ratio, event.y/self.display_ratio)]
if len(self.corners) == 4:
self.awaiting_corners = False
self.main()
def main(self):
board_positions, crop_window = self.find_grid(self.corners)
frames = self.parse_video(crop_window)
for x in range(len(frames)):
frames[x] = cv2.cvtColor(frames[x], cv2.COLOR_BGR2GRAY)
frames[x] = cv2.GaussianBlur(frames[x], (51, 51), 0)
thresholds = self.determine_thresholds(frames[-1], board_positions)
for x in range(len(frames)):
cv2.imwrite('output/2/frames'+str(x)+'.png', frames[x])
for x in range(len(frames)):
frames[x] = self.parse_frames(frames[x], board_positions, thresholds)
for x in range(1, len(frames)):
print "Board: "+str(x)
self.print_board(frames[x])
output = "(;GM[1]FF[4]CA[UTF-8]AP[CGoban:3]ST[2]SZ[19]"
for i in range(1, len(frames)):
moves = self.frame_difference(frames[i-1], frames[i])
for move in moves:
color = move["color"]
x = LETTERS[move["position"][0]]
y = LETTERS[move["position"][1]]
output += ";"+color+"["+x+y+"]"
output += ")"
file = open("output.txt", "w")
file.write(output)
file.close()
def find_grid(self, corners):
top_left = corners[0]
bottom_right = corners[2]
board_width = bottom_right[0] - top_left[0]
board_height = bottom_right[1] - top_left[1]
horizontal_spacing = board_width / 18
vertical_spacing = board_height / 18
crop_window = Rectangle()
crop_window.x = int(top_left[0] - horizontal_spacing)
crop_window.y = int(top_left[1] - vertical_spacing)
crop_window.w = int(board_width + (2 * horizontal_spacing))
crop_window.h = int(board_height + (2 * vertical_spacing))
board_positions = []
for x in range(0, 19):
board_positions += [[]]
for y in range(0, 19):
x_coord = int(top_left[0] + horizontal_spacing * x)
y_coord = int(top_left[1] + vertical_spacing * y)
x_coord -= crop_window.x
y_coord -= crop_window.y
board_positions[x] += [(y_coord, x_coord)]
return board_positions, crop_window
def print_board(self, frame):
print "-------------------"
for y in range(19):
string = ""
for x in range(19):
string += frame[x][y]
print string
print "-------------------"
def parse_video(self, crop_window):
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out1 = cv2.VideoWriter('output.avi', fourcc, 30.0, (crop_window.w, crop_window.h))
success, current_frame = self.video.read()
current_frame = current_frame[crop_window.y:crop_window.y + crop_window.h,
crop_window.x:crop_window.x + crop_window.w]
differences = []
final_video = [current_frame]
while (self.video.isOpened() and success):
last_frame = current_frame
success, current_frame = self.video.read()
if not success: break
current_frame = current_frame[crop_window.y:crop_window.y+crop_window.h,
crop_window.x:crop_window.x+crop_window.w]
out1.write(current_frame)
s = self.mse_total(last_frame, current_frame)
#s = ssim(last_frame, current_frame) # Doesn't Work
differences += [s]
recently_still = True
still_duration = 15
for x in range(still_duration):
if x<len(differences) and differences[-x]>4:
recently_still = False
if recently_still:
#out1.write(current_frame)
s = self.mse_total(current_frame, final_video[-1])
if s>20:
final_video += [current_frame]
#plt.hist(differences, bins=400)
plt.title("Frame Difference Historgram")
plt.xlabel("Difference (mean squared error)")
plt.ylabel("Number of Frames")
#plt.show()
time = np.arange(0, self.video_length/self.frame_rate, 1.0/self.frame_rate)
time = time[:len(differences)]
#plt.plot(time, differences)
plt.xlabel('time (s)')
plt.ylabel('Difference')
plt.title('MSE over Time')
plt.grid(True)
#plt.show()
out1.release()
'''
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out2 = cv2.VideoWriter('output2.avi', fourcc, 30.0,
(self.crop_w, self.crop_h))
for x in final_video:
for y in range(30):
out2.write(x)
out2.release()
'''
return final_video
def mse_total(self, imageA, imageB):
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
return err
def mse_image(self, imageA, imageB):
return (imageA - imageB) ** 2
def determine_thresholds(self, image, board_positions):
samples = []
for x in range(0, 19):
for y in range(0, 19):
position = board_positions[x][y]
samples += [float(image[position[0]][position[1]])]
plt.hist(samples, bins=255)
plt.title("Intersection Intensity Historgram")
plt.xlabel("Intensity (Greyscale)")
plt.ylabel("Number of Intersections")
# plt.show() | plt.axvline(x=centroids[0], color="red")
plt.axvline(x=centroids[1], color="red")
plt.axvline(x=centroids[2], color="red")
plt.show()
min = 0
mid = 0
max = 0
for x in range(0, 3):
if centroids[x] < centroids[min]:
min = x
if centroids[x] > centroids[max]:
max = x
for x in range(0, 3):
if x != min and x != max:
mid = x
min = centroids[min]
mid = centroids[mid]
max = centroids[max]
threshold1 = (min + mid) / 2
threshold2 = (max + mid) / 2
print "threshold 1 = "+str(threshold1)
print "threshold 2 = "+str(threshold2)
#return [threshold1, threshold2]
return [120,185]
def parse_frames(self, image, board_positions, thresholds):
return_array = []
for x in range(0, 19):
return_array += [[]]
for y in range(0, 19):
position = board_positions[x][y]
intensity = image[position[0]][position[1]]
if intensity < thresholds[0]:
return_array[x] += ["B"]
elif intensity > thresholds[1]:
return_array[x] += ["W"]
else:
return_array[x] += ["+"]
return return_array
def frame_difference(self, former_frame, later_frame):
moves = []
for x in range(19):
for y in range(19):
if (later_frame[x][y] != former_frame[x][y]
and former_frame[x][y] == "+"):
moves += [{"color":later_frame[x][y],
"position":(x,y)}]
return moves
def display_grid(self, board_positions):
for x in range(0, 19):
for y in range(0, 19):
self.draw_x(board_positions[x][y][1],
board_positions[x][y][0],
transform=self.display_ratio)
def draw_x(self, x, y, radius=10, width=3, color = "red", transform = 1):
self.canvas.create_line((x-radius)*transform,
(y-radius)*transform,
(x+radius)*transform,
(y+radius)*transform,
width=width,
fill=color)
self.canvas.create_line((x-radius)*transform,
(y+radius)*transform,
(x+radius)*transform,
(y-radius)*transform,
width=width,
fill=color)
def load(self):
# Load Video
dir_path = os.path.dirname(os.path.realpath(__file__))
path = tkFileDialog.askopenfilename(initialdir=dir_path,
title="Select file",
filetypes=(
("mp4 files", "*.mp4"),
("jpeg files", "*.jpg"),
("png files", "*.png")))
self.video = cv2.VideoCapture(path)
self.frame_rate = self.video.get(cv2.CAP_PROP_FPS)
self.video_length = int(self.video.get(cv2.CAP_PROP_FRAME_COUNT))
success, first_frame = self.video.read()
image_height, image_width = first_frame.shape[:2]
# Display Image
self.display_ratio = float(self.window_height - 200)/image_height
resize_dimentions = (int(image_width*self.display_ratio), int(image_height*self.display_ratio))
resized_image = cv2.resize(first_frame, resize_dimentions, interpolation=cv2.INTER_CUBIC)
tk_image = self.convert_cv2_to_PIL(resized_image)
self.display_image = PIL.ImageTk.PhotoImage(tk_image)
self.canvas.create_image(0, 0, anchor ="nw", image = self.display_image)
# cue corner collection
self.awaiting_corners = True
def convert_cv2_to_PIL(self, cv2image):
cv2_im = cv2.cvtColor(cv2image, cv2.COLOR_BGR2RGB)
return PIL.Image.fromarray(cv2_im)
root = Tkinter.Tk()
main_window = MainWindow(root)
root.mainloop() |
centroids, _ = kmeans(samples, 3) | random_line_split |
test2.py | import os, sys
import Tkinter
import tkFileDialog
import PIL
from PIL import ImageTk, Image
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
import cv2
from scipy.cluster.vq import kmeans
from skimage import data, img_as_float
#from skimage.measure import compare_ssim as ssim
from skimage.measure import structural_similarity as ssim
LETTERS = ["a","b","c","d","e","f","g","h","i","j","k","l","m",
"n","o","p","q","r","s","t","u","v","w","x","y","z"]
class Rectangle:
def __init__(self, x_param=0, y_param=0, w_param=0, h_param=0):
self.x = x_param
self.y = y_param
self.w = w_param
self.h = h_param
def __str__(self):
return "Width = "+str(self.w)+", Height = "+str(self.h)
class MainWindow:
def __init__(self, master):
self.video = None
self.frame_rate = 0
self.video_length = 0
# The scaled image used for display. Needs to persist for display
self.display_image = None
self.display_ratio = 0
self.awaiting_corners = False
self.corners = []
#Tkinter related fields
self.master = master
self.master.title("Auto Kifu Test2")
self.window_width = root.winfo_screenwidth()
self.window_height = root.winfo_screenheight() - 100
self.master.geometry("%dx%d+0+0" % (self.window_width, self.window_height))
self.master.configure(background='grey')
self.canvas = Tkinter.Canvas(self.master)
self.canvas.place(x=0,
y=0,
width=self.window_width,
height=self.window_height)
self.canvas.bind("<Button-1>", self.mouse_clicked)
self.menubar = Tkinter.Menu(root)
root.config(menu=self.menubar)
self.fileMenu = Tkinter.Menu(self.menubar)
self.fileMenu.add_command(label="Load Image", command=self.load())
self.menubar.add_cascade(label="File", menu=self.fileMenu)
def mouse_clicked(self, event):
if self.awaiting_corners:
self.draw_x(event.x, event.y)
self.corners += [(event.x/self.display_ratio, event.y/self.display_ratio)]
if len(self.corners) == 4:
self.awaiting_corners = False
self.main()
def main(self):
board_positions, crop_window = self.find_grid(self.corners)
frames = self.parse_video(crop_window)
for x in range(len(frames)):
frames[x] = cv2.cvtColor(frames[x], cv2.COLOR_BGR2GRAY)
frames[x] = cv2.GaussianBlur(frames[x], (51, 51), 0)
thresholds = self.determine_thresholds(frames[-1], board_positions)
for x in range(len(frames)):
cv2.imwrite('output/2/frames'+str(x)+'.png', frames[x])
for x in range(len(frames)):
frames[x] = self.parse_frames(frames[x], board_positions, thresholds)
for x in range(1, len(frames)):
print "Board: "+str(x)
self.print_board(frames[x])
output = "(;GM[1]FF[4]CA[UTF-8]AP[CGoban:3]ST[2]SZ[19]"
for i in range(1, len(frames)):
moves = self.frame_difference(frames[i-1], frames[i])
for move in moves:
color = move["color"]
x = LETTERS[move["position"][0]]
y = LETTERS[move["position"][1]]
output += ";"+color+"["+x+y+"]"
output += ")"
file = open("output.txt", "w")
file.write(output)
file.close()
def find_grid(self, corners):
top_left = corners[0]
bottom_right = corners[2]
board_width = bottom_right[0] - top_left[0]
board_height = bottom_right[1] - top_left[1]
horizontal_spacing = board_width / 18
vertical_spacing = board_height / 18
crop_window = Rectangle()
crop_window.x = int(top_left[0] - horizontal_spacing)
crop_window.y = int(top_left[1] - vertical_spacing)
crop_window.w = int(board_width + (2 * horizontal_spacing))
crop_window.h = int(board_height + (2 * vertical_spacing))
board_positions = []
for x in range(0, 19):
board_positions += [[]]
for y in range(0, 19):
x_coord = int(top_left[0] + horizontal_spacing * x)
y_coord = int(top_left[1] + vertical_spacing * y)
x_coord -= crop_window.x
y_coord -= crop_window.y
board_positions[x] += [(y_coord, x_coord)]
return board_positions, crop_window
def print_board(self, frame):
print "-------------------"
for y in range(19):
string = ""
for x in range(19):
string += frame[x][y]
print string
print "-------------------"
def parse_video(self, crop_window):
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out1 = cv2.VideoWriter('output.avi', fourcc, 30.0, (crop_window.w, crop_window.h))
success, current_frame = self.video.read()
current_frame = current_frame[crop_window.y:crop_window.y + crop_window.h,
crop_window.x:crop_window.x + crop_window.w]
differences = []
final_video = [current_frame]
while (self.video.isOpened() and success):
last_frame = current_frame
success, current_frame = self.video.read()
if not success: break
current_frame = current_frame[crop_window.y:crop_window.y+crop_window.h,
crop_window.x:crop_window.x+crop_window.w]
out1.write(current_frame)
s = self.mse_total(last_frame, current_frame)
#s = ssim(last_frame, current_frame) # Doesn't Work
differences += [s]
recently_still = True
still_duration = 15
for x in range(still_duration):
if x<len(differences) and differences[-x]>4:
recently_still = False
if recently_still:
#out1.write(current_frame)
s = self.mse_total(current_frame, final_video[-1])
if s>20:
|
#plt.hist(differences, bins=400)
plt.title("Frame Difference Historgram")
plt.xlabel("Difference (mean squared error)")
plt.ylabel("Number of Frames")
#plt.show()
time = np.arange(0, self.video_length/self.frame_rate, 1.0/self.frame_rate)
time = time[:len(differences)]
#plt.plot(time, differences)
plt.xlabel('time (s)')
plt.ylabel('Difference')
plt.title('MSE over Time')
plt.grid(True)
#plt.show()
out1.release()
'''
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out2 = cv2.VideoWriter('output2.avi', fourcc, 30.0,
(self.crop_w, self.crop_h))
for x in final_video:
for y in range(30):
out2.write(x)
out2.release()
'''
return final_video
def mse_total(self, imageA, imageB):
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
return err
def mse_image(self, imageA, imageB):
return (imageA - imageB) ** 2
def determine_thresholds(self, image, board_positions):
samples = []
for x in range(0, 19):
for y in range(0, 19):
position = board_positions[x][y]
samples += [float(image[position[0]][position[1]])]
plt.hist(samples, bins=255)
plt.title("Intersection Intensity Historgram")
plt.xlabel("Intensity (Greyscale)")
plt.ylabel("Number of Intersections")
# plt.show()
centroids, _ = kmeans(samples, 3)
plt.axvline(x=centroids[0], color="red")
plt.axvline(x=centroids[1], color="red")
plt.axvline(x=centroids[2], color="red")
plt.show()
min = 0
mid = 0
max = 0
for x in range(0, 3):
if centroids[x] < centroids[min]:
min = x
if centroids[x] > centroids[max]:
max = x
for x in range(0, 3):
if x != min and x != max:
mid = x
min = centroids[min]
mid = centroids[mid]
max = centroids[max]
threshold1 = (min + mid) / 2
threshold2 = (max + mid) / 2
print "threshold 1 = "+str(threshold1)
print "threshold 2 = "+str(threshold2)
#return [threshold1, threshold2]
return [120,185]
def parse_frames(self, image, board_positions, thresholds):
return_array = []
for x in range(0, 19):
return_array += [[]]
for y in range(0, 19):
position = board_positions[x][y]
intensity = image[position[0]][position[1]]
if intensity < thresholds[0]:
return_array[x] += ["B"]
elif intensity > thresholds[1]:
return_array[x] += ["W"]
else:
return_array[x] += ["+"]
return return_array
def frame_difference(self, former_frame, later_frame):
moves = []
for x in range(19):
for y in range(19):
if (later_frame[x][y] != former_frame[x][y]
and former_frame[x][y] == "+"):
moves += [{"color":later_frame[x][y],
"position":(x,y)}]
return moves
def display_grid(self, board_positions):
for x in range(0, 19):
for y in range(0, 19):
self.draw_x(board_positions[x][y][1],
board_positions[x][y][0],
transform=self.display_ratio)
def draw_x(self, x, y, radius=10, width=3, color = "red", transform = 1):
self.canvas.create_line((x-radius)*transform,
(y-radius)*transform,
(x+radius)*transform,
(y+radius)*transform,
width=width,
fill=color)
self.canvas.create_line((x-radius)*transform,
(y+radius)*transform,
(x+radius)*transform,
(y-radius)*transform,
width=width,
fill=color)
def load(self):
# Load Video
dir_path = os.path.dirname(os.path.realpath(__file__))
path = tkFileDialog.askopenfilename(initialdir=dir_path,
title="Select file",
filetypes=(
("mp4 files", "*.mp4"),
("jpeg files", "*.jpg"),
("png files", "*.png")))
self.video = cv2.VideoCapture(path)
self.frame_rate = self.video.get(cv2.CAP_PROP_FPS)
self.video_length = int(self.video.get(cv2.CAP_PROP_FRAME_COUNT))
success, first_frame = self.video.read()
image_height, image_width = first_frame.shape[:2]
# Display Image
self.display_ratio = float(self.window_height - 200)/image_height
resize_dimentions = (int(image_width*self.display_ratio), int(image_height*self.display_ratio))
resized_image = cv2.resize(first_frame, resize_dimentions, interpolation=cv2.INTER_CUBIC)
tk_image = self.convert_cv2_to_PIL(resized_image)
self.display_image = PIL.ImageTk.PhotoImage(tk_image)
self.canvas.create_image(0, 0, anchor ="nw", image = self.display_image)
# cue corner collection
self.awaiting_corners = True
def convert_cv2_to_PIL(self, cv2image):
cv2_im = cv2.cvtColor(cv2image, cv2.COLOR_BGR2RGB)
return PIL.Image.fromarray(cv2_im)
root = Tkinter.Tk()
main_window = MainWindow(root)
root.mainloop()
| final_video += [current_frame] | conditional_block |
test2.py | import os, sys
import Tkinter
import tkFileDialog
import PIL
from PIL import ImageTk, Image
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
import cv2
from scipy.cluster.vq import kmeans
from skimage import data, img_as_float
#from skimage.measure import compare_ssim as ssim
from skimage.measure import structural_similarity as ssim
LETTERS = ["a","b","c","d","e","f","g","h","i","j","k","l","m",
"n","o","p","q","r","s","t","u","v","w","x","y","z"]
class Rectangle:
def __init__(self, x_param=0, y_param=0, w_param=0, h_param=0):
self.x = x_param
self.y = y_param
self.w = w_param
self.h = h_param
def __str__(self):
return "Width = "+str(self.w)+", Height = "+str(self.h)
class | :
def __init__(self, master):
self.video = None
self.frame_rate = 0
self.video_length = 0
# The scaled image used for display. Needs to persist for display
self.display_image = None
self.display_ratio = 0
self.awaiting_corners = False
self.corners = []
#Tkinter related fields
self.master = master
self.master.title("Auto Kifu Test2")
self.window_width = root.winfo_screenwidth()
self.window_height = root.winfo_screenheight() - 100
self.master.geometry("%dx%d+0+0" % (self.window_width, self.window_height))
self.master.configure(background='grey')
self.canvas = Tkinter.Canvas(self.master)
self.canvas.place(x=0,
y=0,
width=self.window_width,
height=self.window_height)
self.canvas.bind("<Button-1>", self.mouse_clicked)
self.menubar = Tkinter.Menu(root)
root.config(menu=self.menubar)
self.fileMenu = Tkinter.Menu(self.menubar)
self.fileMenu.add_command(label="Load Image", command=self.load())
self.menubar.add_cascade(label="File", menu=self.fileMenu)
def mouse_clicked(self, event):
if self.awaiting_corners:
self.draw_x(event.x, event.y)
self.corners += [(event.x/self.display_ratio, event.y/self.display_ratio)]
if len(self.corners) == 4:
self.awaiting_corners = False
self.main()
def main(self):
board_positions, crop_window = self.find_grid(self.corners)
frames = self.parse_video(crop_window)
for x in range(len(frames)):
frames[x] = cv2.cvtColor(frames[x], cv2.COLOR_BGR2GRAY)
frames[x] = cv2.GaussianBlur(frames[x], (51, 51), 0)
thresholds = self.determine_thresholds(frames[-1], board_positions)
for x in range(len(frames)):
cv2.imwrite('output/2/frames'+str(x)+'.png', frames[x])
for x in range(len(frames)):
frames[x] = self.parse_frames(frames[x], board_positions, thresholds)
for x in range(1, len(frames)):
print "Board: "+str(x)
self.print_board(frames[x])
output = "(;GM[1]FF[4]CA[UTF-8]AP[CGoban:3]ST[2]SZ[19]"
for i in range(1, len(frames)):
moves = self.frame_difference(frames[i-1], frames[i])
for move in moves:
color = move["color"]
x = LETTERS[move["position"][0]]
y = LETTERS[move["position"][1]]
output += ";"+color+"["+x+y+"]"
output += ")"
file = open("output.txt", "w")
file.write(output)
file.close()
def find_grid(self, corners):
top_left = corners[0]
bottom_right = corners[2]
board_width = bottom_right[0] - top_left[0]
board_height = bottom_right[1] - top_left[1]
horizontal_spacing = board_width / 18
vertical_spacing = board_height / 18
crop_window = Rectangle()
crop_window.x = int(top_left[0] - horizontal_spacing)
crop_window.y = int(top_left[1] - vertical_spacing)
crop_window.w = int(board_width + (2 * horizontal_spacing))
crop_window.h = int(board_height + (2 * vertical_spacing))
board_positions = []
for x in range(0, 19):
board_positions += [[]]
for y in range(0, 19):
x_coord = int(top_left[0] + horizontal_spacing * x)
y_coord = int(top_left[1] + vertical_spacing * y)
x_coord -= crop_window.x
y_coord -= crop_window.y
board_positions[x] += [(y_coord, x_coord)]
return board_positions, crop_window
def print_board(self, frame):
print "-------------------"
for y in range(19):
string = ""
for x in range(19):
string += frame[x][y]
print string
print "-------------------"
def parse_video(self, crop_window):
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out1 = cv2.VideoWriter('output.avi', fourcc, 30.0, (crop_window.w, crop_window.h))
success, current_frame = self.video.read()
current_frame = current_frame[crop_window.y:crop_window.y + crop_window.h,
crop_window.x:crop_window.x + crop_window.w]
differences = []
final_video = [current_frame]
while (self.video.isOpened() and success):
last_frame = current_frame
success, current_frame = self.video.read()
if not success: break
current_frame = current_frame[crop_window.y:crop_window.y+crop_window.h,
crop_window.x:crop_window.x+crop_window.w]
out1.write(current_frame)
s = self.mse_total(last_frame, current_frame)
#s = ssim(last_frame, current_frame) # Doesn't Work
differences += [s]
recently_still = True
still_duration = 15
for x in range(still_duration):
if x<len(differences) and differences[-x]>4:
recently_still = False
if recently_still:
#out1.write(current_frame)
s = self.mse_total(current_frame, final_video[-1])
if s>20:
final_video += [current_frame]
#plt.hist(differences, bins=400)
plt.title("Frame Difference Historgram")
plt.xlabel("Difference (mean squared error)")
plt.ylabel("Number of Frames")
#plt.show()
time = np.arange(0, self.video_length/self.frame_rate, 1.0/self.frame_rate)
time = time[:len(differences)]
#plt.plot(time, differences)
plt.xlabel('time (s)')
plt.ylabel('Difference')
plt.title('MSE over Time')
plt.grid(True)
#plt.show()
out1.release()
'''
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out2 = cv2.VideoWriter('output2.avi', fourcc, 30.0,
(self.crop_w, self.crop_h))
for x in final_video:
for y in range(30):
out2.write(x)
out2.release()
'''
return final_video
def mse_total(self, imageA, imageB):
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
return err
def mse_image(self, imageA, imageB):
return (imageA - imageB) ** 2
def determine_thresholds(self, image, board_positions):
samples = []
for x in range(0, 19):
for y in range(0, 19):
position = board_positions[x][y]
samples += [float(image[position[0]][position[1]])]
plt.hist(samples, bins=255)
plt.title("Intersection Intensity Historgram")
plt.xlabel("Intensity (Greyscale)")
plt.ylabel("Number of Intersections")
# plt.show()
centroids, _ = kmeans(samples, 3)
plt.axvline(x=centroids[0], color="red")
plt.axvline(x=centroids[1], color="red")
plt.axvline(x=centroids[2], color="red")
plt.show()
min = 0
mid = 0
max = 0
for x in range(0, 3):
if centroids[x] < centroids[min]:
min = x
if centroids[x] > centroids[max]:
max = x
for x in range(0, 3):
if x != min and x != max:
mid = x
min = centroids[min]
mid = centroids[mid]
max = centroids[max]
threshold1 = (min + mid) / 2
threshold2 = (max + mid) / 2
print "threshold 1 = "+str(threshold1)
print "threshold 2 = "+str(threshold2)
#return [threshold1, threshold2]
return [120,185]
def parse_frames(self, image, board_positions, thresholds):
return_array = []
for x in range(0, 19):
return_array += [[]]
for y in range(0, 19):
position = board_positions[x][y]
intensity = image[position[0]][position[1]]
if intensity < thresholds[0]:
return_array[x] += ["B"]
elif intensity > thresholds[1]:
return_array[x] += ["W"]
else:
return_array[x] += ["+"]
return return_array
def frame_difference(self, former_frame, later_frame):
moves = []
for x in range(19):
for y in range(19):
if (later_frame[x][y] != former_frame[x][y]
and former_frame[x][y] == "+"):
moves += [{"color":later_frame[x][y],
"position":(x,y)}]
return moves
def display_grid(self, board_positions):
for x in range(0, 19):
for y in range(0, 19):
self.draw_x(board_positions[x][y][1],
board_positions[x][y][0],
transform=self.display_ratio)
def draw_x(self, x, y, radius=10, width=3, color = "red", transform = 1):
self.canvas.create_line((x-radius)*transform,
(y-radius)*transform,
(x+radius)*transform,
(y+radius)*transform,
width=width,
fill=color)
self.canvas.create_line((x-radius)*transform,
(y+radius)*transform,
(x+radius)*transform,
(y-radius)*transform,
width=width,
fill=color)
def load(self):
# Load Video
dir_path = os.path.dirname(os.path.realpath(__file__))
path = tkFileDialog.askopenfilename(initialdir=dir_path,
title="Select file",
filetypes=(
("mp4 files", "*.mp4"),
("jpeg files", "*.jpg"),
("png files", "*.png")))
self.video = cv2.VideoCapture(path)
self.frame_rate = self.video.get(cv2.CAP_PROP_FPS)
self.video_length = int(self.video.get(cv2.CAP_PROP_FRAME_COUNT))
success, first_frame = self.video.read()
image_height, image_width = first_frame.shape[:2]
# Display Image
self.display_ratio = float(self.window_height - 200)/image_height
resize_dimentions = (int(image_width*self.display_ratio), int(image_height*self.display_ratio))
resized_image = cv2.resize(first_frame, resize_dimentions, interpolation=cv2.INTER_CUBIC)
tk_image = self.convert_cv2_to_PIL(resized_image)
self.display_image = PIL.ImageTk.PhotoImage(tk_image)
self.canvas.create_image(0, 0, anchor ="nw", image = self.display_image)
# cue corner collection
self.awaiting_corners = True
def convert_cv2_to_PIL(self, cv2image):
cv2_im = cv2.cvtColor(cv2image, cv2.COLOR_BGR2RGB)
return PIL.Image.fromarray(cv2_im)
root = Tkinter.Tk()
main_window = MainWindow(root)
root.mainloop()
| MainWindow | identifier_name |
test_install.py | #!/usr/bin/env python
"""
Simple script for testing various SfePy functionality, examples not
covered by tests, and running the tests.
The script just runs the commands specified in its main() using the
`subprocess` module, captures the output and compares one or more key
words to the expected ones.
The output of failed commands is saved to 'test_install.log' file.
"""
import time
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import shlex
import subprocess
import logging
import re
DEBUG_FMT = '*' * 55 + '\n%s\n' + '*' * 55 |
def _get_logger(filename='test_install.log'):
"""
Convenience function to set-up output and logging.
"""
logger = logging.getLogger('test_install.py')
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
file_handler = logging.FileHandler(filename)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
return logger
logger = _get_logger()
def check_output(cmd):
"""
Run the specified command and capture its outputs.
Returns
-------
out : tuple
The (stdout, stderr) output tuple.
"""
logger.info(cmd)
args = shlex.split(cmd)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = [ii.decode() for ii in p.communicate()]
return out
def report(out, name, line, item, value, eps=None, return_item=False,
match_numbers=False):
"""
Check that `item` at `line` of the output string `out` is equal
to `value`. If not, print the output.
"""
try:
if match_numbers:
status = out.split('\n')[line]
else:
status = out.split('\n')[line].split()
except IndexError:
logger.error(' not enough output from command!')
ok = False
else:
try:
if match_numbers:
pat = '([-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?[jJ]?)'
matches = re.findall(pat, status)
status_item = matches[item]
else:
status_item = status[item]
logger.info(' comparing: %s %s', status_item, value)
if eps is None:
ok = (status_item == value)
else:
try:
ok = abs(float(status_item) - float(value)) < eps
except:
ok = False
except IndexError:
ok = False
logger.info(' %s: %s', name, ok)
if not ok:
logger.debug(DEBUG_FMT, out)
if return_item:
return ok, status[item]
else:
return ok
def report2(out, name, items, return_item=False):
"""
Check that `items` are in the output string `out`.
If not, print the output.
"""
ok = True
for s in items:
logger.info(' checking: %s', s)
if s not in out:
ok = False
break
logger.info(' %s: %s', name, ok)
if not ok:
logger.debug(DEBUG_FMT, out)
if return_item:
return ok, s
else:
return ok
def report_tests(out, return_item=False):
"""
Check that all tests in the output string `out` passed.
If not, print the output.
"""
from pyparsing import (Word, Combine, Suppress, Optional, OneOrMore,
delimitedList, nums, Literal)
from functools import partial
integer = Word(nums).setName('integer')
real = Combine(Word(nums) + '.' + Optional(Word(nums))).setName('real')
equals = Suppress(OneOrMore('='))
_stats = {}
def add_stat(s, loc, toks, key=None):
if key is None:
key = toks[1]
_stats[key] = toks[0]
return toks
word = ((integer + 'failed') |
(integer + 'passed') |
(integer + 'deselected') |
(integer + 'warnings')).setParseAction(add_stat)
line = (equals +
Optional(delimitedList(word)) +
'in' +
(real + (Literal('s') | 'seconds'))
.setParseAction(partial(add_stat, key='seconds')) +
equals)
line.searchString(out)
keys = ['failed', 'passed', 'deselected', 'warnings', 'seconds']
stats = {key : _stats.get(key, '0') for key in keys}
ok = stats['failed'] == '0'
logger.info(
(' {failed} failed, {passed} passed, {deselected} deselected,'
' {warnings} warnings in {seconds} seconds').format(**stats)
)
if not ok:
logger.debug(DEBUG_FMT, out)
if return_item:
return ok, stats['failed']
else:
return ok
def main():
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.parse_args()
fd = open('test_install.log', 'w')
fd.close()
eok = 0
t0 = time.time()
out, err = check_output('python3 sfepy/scripts/blockgen.py')
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('python3 sfepy/scripts/cylindergen.py')
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('python3 sfepy/scripts/convert_mesh.py meshes/3d/cylinder.vtk out.mesh')
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('python3 sfepy/scripts/convert_mesh.py --tile 2,2 meshes/elements/2_4_2.mesh out-per.mesh')
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('python3 sfepy/scripts/convert_mesh.py --extract-surface --print-surface=- meshes/various_formats/octahedron.node surf_octahedron.mesh')
eok += report(out, '...', -4, 0, '1185')
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/diffusion/poisson.py')
eok += report(out, '...', -3, 5, '1.173819e-16', eps=1e-15)
out, err = check_output("""python3 sfepy/scripts/simple.py -c "ebc_2 : {'name' : 't2', 'region' : 'Gamma_Right', 'dofs' : {'t.0' : -5.0}}" sfepy/examples/diffusion/poisson.py""")
eok += report(out, '...', -3, 5, '2.308051e-16', eps=1e-15)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/diffusion/poisson_iga.py')
eok += report(out, '...', -3, 5, '3.373487e-15', eps=1e-14)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/navier_stokes/stokes.py')
eok += report(out, '...', -3, 5, '1.210678e-13', eps=1e-11)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/diffusion/poisson_parametric_study.py')
eok += report(out, '...', -3, 5, '1.606408e-14', eps=1e-13)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/linear_elasticity/its2D_3.py')
eok += report(out, '...', -24, 5, '3.964886e-12', eps=1e-11)
eok += report(out, '...', -4, 4, '2.58660e+01', eps=1e-5)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/linear_elasticity/linear_elastic.py --format h5')
eok += report(out, '...', -3, 5, '4.638192e-18', eps=1e-15)
out, err = check_output('python3 sfepy/scripts/extractor.py -d cylinder.h5')
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('python3 sfepy/scripts/resview.py --off-screen -o cylinder.png cylinder.h5')
eok += report(out, '...', -2, 1, 'cylinder.png')
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/phononic/band_gaps.py')
eok += report(out, '...', -9, 0, '2.08545116e+08', match_numbers=True)
eok += report(out, '...', -8, 1, '1.16309223e+11', match_numbers=True)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/phononic/band_gaps.py --phonon-phase-velocity')
eok += report(out, '...', -2, 0, '4189.41229592', match_numbers=True)
eok += report(out, '...', -2, 1, '2620.55608256', match_numbers=True)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/phononic/band_gaps.py --phonon-dispersion')
eok += report(out, '...', -6, 1, '[0,')
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/phononic/band_gaps_rigid.py')
eok += report(out, '...', -9, 0, '4.58709531e+07', match_numbers=True)
eok += report(out, '...', -8, 1, '1.13929200e+11', match_numbers=True)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/quantum/hydrogen.py')
eok += report(out, '...', -2, -2, '-0.01913506', eps=1e-4)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/homogenization/perfusion_micro.py')
eok += report2(out, '...', ['computing EpA', 'computing PA_3',
'computing GA', 'computing EmA',
'computing KA'])
out, err = check_output('python3 sfepy/examples/homogenization/rs_correctors.py -n')
eok += report(out, '...', -2, -1, '1.644e-01', match_numbers=True)
out, err = check_output('python3 sfepy/examples/large_deformation/compare_elastic_materials.py -n')
eok += report(out, '...', -3, 5, '1.068759e-14', eps=1e-13)
out, err = check_output('python3 sfepy/examples/linear_elasticity/linear_elastic_interactive.py')
eok += report(out, '...', -16, 0, '1.62128841139e-14', eps=1e-13)
out, err = check_output('python3 sfepy/examples/linear_elasticity/modal_analysis.py')
eok += report(out, '...', -12, 5, '12142.11470773', eps=1e-13)
out, err = check_output('python3 sfepy/examples/multi_physics/thermal_electric.py')
eok += report(out, '...', -4, 5, '2.612933e-14', eps=1e-13)
out, err = check_output('python3 sfepy/examples/diffusion/laplace_refine_interactive.py output')
eok += report(out, '...', -3, 5, '2.675866e-15', eps=1e-13)
out, err = check_output('python3 sfepy/examples/diffusion/laplace_iga_interactive.py -o output')
eok += report(out, '...', -3, 5, '1.028134e-13', eps=1e-12)
out, err = check_output('python3 sfepy/examples/dg/imperative_burgers_1D.py -o output')
eok += report(out, '...', -3, 3, 'moment_1D_limiter')
out, err = check_output('mpiexec -n 2 python3 sfepy/examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --silent -ksp_monitor')
eok += report(out, '...', -2, 4, '8.021313824020e-07', eps=1e-6)
out, err = check_output('mpiexec -n 2 python3 sfepy/examples/multi_physics/biot_parallel_interactive.py output-parallel -2 --silent -ksp_monitor')
eok += report(out, '...', -2, 4, '3.787214380277e-09', eps=1e-7)
t1 = time.time()
out, err = check_output("python3 -c \"import sfepy; sfepy.test('-v', '--disable-warnings')\"")
tok, failed = report_tests(out, return_item=True)
tok = {True : 'ok', False : 'fail'}[tok]
t2 = time.time()
fd = open('test_install_times.log', 'a+')
fd.write('%s: examples: %.2f [s] (%d), tests: %.2f [s] (%s: %s)\n'
% (time.ctime(t0), t1 - t0, eok, t2 - t1, tok, failed))
fd.close()
if __name__ == '__main__':
main() | random_line_split |
|
test_install.py | #!/usr/bin/env python
"""
Simple script for testing various SfePy functionality, examples not
covered by tests, and running the tests.
The script just runs the commands specified in its main() using the
`subprocess` module, captures the output and compares one or more key
words to the expected ones.
The output of failed commands is saved to 'test_install.log' file.
"""
import time
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import shlex
import subprocess
import logging
import re
DEBUG_FMT = '*' * 55 + '\n%s\n' + '*' * 55
def _get_logger(filename='test_install.log'):
"""
Convenience function to set-up output and logging.
"""
logger = logging.getLogger('test_install.py')
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
file_handler = logging.FileHandler(filename)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
return logger
logger = _get_logger()
def check_output(cmd):
|
def report(out, name, line, item, value, eps=None, return_item=False,
match_numbers=False):
"""
Check that `item` at `line` of the output string `out` is equal
to `value`. If not, print the output.
"""
try:
if match_numbers:
status = out.split('\n')[line]
else:
status = out.split('\n')[line].split()
except IndexError:
logger.error(' not enough output from command!')
ok = False
else:
try:
if match_numbers:
pat = '([-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?[jJ]?)'
matches = re.findall(pat, status)
status_item = matches[item]
else:
status_item = status[item]
logger.info(' comparing: %s %s', status_item, value)
if eps is None:
ok = (status_item == value)
else:
try:
ok = abs(float(status_item) - float(value)) < eps
except:
ok = False
except IndexError:
ok = False
logger.info(' %s: %s', name, ok)
if not ok:
logger.debug(DEBUG_FMT, out)
if return_item:
return ok, status[item]
else:
return ok
def report2(out, name, items, return_item=False):
"""
Check that `items` are in the output string `out`.
If not, print the output.
"""
ok = True
for s in items:
logger.info(' checking: %s', s)
if s not in out:
ok = False
break
logger.info(' %s: %s', name, ok)
if not ok:
logger.debug(DEBUG_FMT, out)
if return_item:
return ok, s
else:
return ok
def report_tests(out, return_item=False):
"""
Check that all tests in the output string `out` passed.
If not, print the output.
"""
from pyparsing import (Word, Combine, Suppress, Optional, OneOrMore,
delimitedList, nums, Literal)
from functools import partial
integer = Word(nums).setName('integer')
real = Combine(Word(nums) + '.' + Optional(Word(nums))).setName('real')
equals = Suppress(OneOrMore('='))
_stats = {}
def add_stat(s, loc, toks, key=None):
if key is None:
key = toks[1]
_stats[key] = toks[0]
return toks
word = ((integer + 'failed') |
(integer + 'passed') |
(integer + 'deselected') |
(integer + 'warnings')).setParseAction(add_stat)
line = (equals +
Optional(delimitedList(word)) +
'in' +
(real + (Literal('s') | 'seconds'))
.setParseAction(partial(add_stat, key='seconds')) +
equals)
line.searchString(out)
keys = ['failed', 'passed', 'deselected', 'warnings', 'seconds']
stats = {key : _stats.get(key, '0') for key in keys}
ok = stats['failed'] == '0'
logger.info(
(' {failed} failed, {passed} passed, {deselected} deselected,'
' {warnings} warnings in {seconds} seconds').format(**stats)
)
if not ok:
logger.debug(DEBUG_FMT, out)
if return_item:
return ok, stats['failed']
else:
return ok
def main():
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.parse_args()
fd = open('test_install.log', 'w')
fd.close()
eok = 0
t0 = time.time()
out, err = check_output('python3 sfepy/scripts/blockgen.py')
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('python3 sfepy/scripts/cylindergen.py')
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('python3 sfepy/scripts/convert_mesh.py meshes/3d/cylinder.vtk out.mesh')
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('python3 sfepy/scripts/convert_mesh.py --tile 2,2 meshes/elements/2_4_2.mesh out-per.mesh')
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('python3 sfepy/scripts/convert_mesh.py --extract-surface --print-surface=- meshes/various_formats/octahedron.node surf_octahedron.mesh')
eok += report(out, '...', -4, 0, '1185')
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/diffusion/poisson.py')
eok += report(out, '...', -3, 5, '1.173819e-16', eps=1e-15)
out, err = check_output("""python3 sfepy/scripts/simple.py -c "ebc_2 : {'name' : 't2', 'region' : 'Gamma_Right', 'dofs' : {'t.0' : -5.0}}" sfepy/examples/diffusion/poisson.py""")
eok += report(out, '...', -3, 5, '2.308051e-16', eps=1e-15)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/diffusion/poisson_iga.py')
eok += report(out, '...', -3, 5, '3.373487e-15', eps=1e-14)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/navier_stokes/stokes.py')
eok += report(out, '...', -3, 5, '1.210678e-13', eps=1e-11)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/diffusion/poisson_parametric_study.py')
eok += report(out, '...', -3, 5, '1.606408e-14', eps=1e-13)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/linear_elasticity/its2D_3.py')
eok += report(out, '...', -24, 5, '3.964886e-12', eps=1e-11)
eok += report(out, '...', -4, 4, '2.58660e+01', eps=1e-5)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/linear_elasticity/linear_elastic.py --format h5')
eok += report(out, '...', -3, 5, '4.638192e-18', eps=1e-15)
out, err = check_output('python3 sfepy/scripts/extractor.py -d cylinder.h5')
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('python3 sfepy/scripts/resview.py --off-screen -o cylinder.png cylinder.h5')
eok += report(out, '...', -2, 1, 'cylinder.png')
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/phononic/band_gaps.py')
eok += report(out, '...', -9, 0, '2.08545116e+08', match_numbers=True)
eok += report(out, '...', -8, 1, '1.16309223e+11', match_numbers=True)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/phononic/band_gaps.py --phonon-phase-velocity')
eok += report(out, '...', -2, 0, '4189.41229592', match_numbers=True)
eok += report(out, '...', -2, 1, '2620.55608256', match_numbers=True)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/phononic/band_gaps.py --phonon-dispersion')
eok += report(out, '...', -6, 1, '[0,')
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/phononic/band_gaps_rigid.py')
eok += report(out, '...', -9, 0, '4.58709531e+07', match_numbers=True)
eok += report(out, '...', -8, 1, '1.13929200e+11', match_numbers=True)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/quantum/hydrogen.py')
eok += report(out, '...', -2, -2, '-0.01913506', eps=1e-4)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/homogenization/perfusion_micro.py')
eok += report2(out, '...', ['computing EpA', 'computing PA_3',
'computing GA', 'computing EmA',
'computing KA'])
out, err = check_output('python3 sfepy/examples/homogenization/rs_correctors.py -n')
eok += report(out, '...', -2, -1, '1.644e-01', match_numbers=True)
out, err = check_output('python3 sfepy/examples/large_deformation/compare_elastic_materials.py -n')
eok += report(out, '...', -3, 5, '1.068759e-14', eps=1e-13)
out, err = check_output('python3 sfepy/examples/linear_elasticity/linear_elastic_interactive.py')
eok += report(out, '...', -16, 0, '1.62128841139e-14', eps=1e-13)
out, err = check_output('python3 sfepy/examples/linear_elasticity/modal_analysis.py')
eok += report(out, '...', -12, 5, '12142.11470773', eps=1e-13)
out, err = check_output('python3 sfepy/examples/multi_physics/thermal_electric.py')
eok += report(out, '...', -4, 5, '2.612933e-14', eps=1e-13)
out, err = check_output('python3 sfepy/examples/diffusion/laplace_refine_interactive.py output')
eok += report(out, '...', -3, 5, '2.675866e-15', eps=1e-13)
out, err = check_output('python3 sfepy/examples/diffusion/laplace_iga_interactive.py -o output')
eok += report(out, '...', -3, 5, '1.028134e-13', eps=1e-12)
out, err = check_output('python3 sfepy/examples/dg/imperative_burgers_1D.py -o output')
eok += report(out, '...', -3, 3, 'moment_1D_limiter')
out, err = check_output('mpiexec -n 2 python3 sfepy/examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --silent -ksp_monitor')
eok += report(out, '...', -2, 4, '8.021313824020e-07', eps=1e-6)
out, err = check_output('mpiexec -n 2 python3 sfepy/examples/multi_physics/biot_parallel_interactive.py output-parallel -2 --silent -ksp_monitor')
eok += report(out, '...', -2, 4, '3.787214380277e-09', eps=1e-7)
t1 = time.time()
out, err = check_output("python3 -c \"import sfepy; sfepy.test('-v', '--disable-warnings')\"")
tok, failed = report_tests(out, return_item=True)
tok = {True : 'ok', False : 'fail'}[tok]
t2 = time.time()
fd = open('test_install_times.log', 'a+')
fd.write('%s: examples: %.2f [s] (%d), tests: %.2f [s] (%s: %s)\n'
% (time.ctime(t0), t1 - t0, eok, t2 - t1, tok, failed))
fd.close()
if __name__ == '__main__':
main()
| """
Run the specified command and capture its outputs.
Returns
-------
out : tuple
The (stdout, stderr) output tuple.
"""
logger.info(cmd)
args = shlex.split(cmd)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = [ii.decode() for ii in p.communicate()]
return out | identifier_body |
test_install.py | #!/usr/bin/env python
"""
Simple script for testing various SfePy functionality, examples not
covered by tests, and running the tests.
The script just runs the commands specified in its main() using the
`subprocess` module, captures the output and compares one or more key
words to the expected ones.
The output of failed commands is saved to 'test_install.log' file.
"""
import time
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import shlex
import subprocess
import logging
import re
DEBUG_FMT = '*' * 55 + '\n%s\n' + '*' * 55
def _get_logger(filename='test_install.log'):
"""
Convenience function to set-up output and logging.
"""
logger = logging.getLogger('test_install.py')
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
file_handler = logging.FileHandler(filename)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
return logger
logger = _get_logger()
def check_output(cmd):
"""
Run the specified command and capture its outputs.
Returns
-------
out : tuple
The (stdout, stderr) output tuple.
"""
logger.info(cmd)
args = shlex.split(cmd)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = [ii.decode() for ii in p.communicate()]
return out
def report(out, name, line, item, value, eps=None, return_item=False,
match_numbers=False):
"""
Check that `item` at `line` of the output string `out` is equal
to `value`. If not, print the output.
"""
try:
if match_numbers:
status = out.split('\n')[line]
else:
status = out.split('\n')[line].split()
except IndexError:
logger.error(' not enough output from command!')
ok = False
else:
try:
if match_numbers:
pat = '([-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?[jJ]?)'
matches = re.findall(pat, status)
status_item = matches[item]
else:
status_item = status[item]
logger.info(' comparing: %s %s', status_item, value)
if eps is None:
ok = (status_item == value)
else:
try:
ok = abs(float(status_item) - float(value)) < eps
except:
ok = False
except IndexError:
ok = False
logger.info(' %s: %s', name, ok)
if not ok:
logger.debug(DEBUG_FMT, out)
if return_item:
return ok, status[item]
else:
return ok
def report2(out, name, items, return_item=False):
"""
Check that `items` are in the output string `out`.
If not, print the output.
"""
ok = True
for s in items:
logger.info(' checking: %s', s)
if s not in out:
ok = False
break
logger.info(' %s: %s', name, ok)
if not ok:
logger.debug(DEBUG_FMT, out)
if return_item:
return ok, s
else:
return ok
def report_tests(out, return_item=False):
"""
Check that all tests in the output string `out` passed.
If not, print the output.
"""
from pyparsing import (Word, Combine, Suppress, Optional, OneOrMore,
delimitedList, nums, Literal)
from functools import partial
integer = Word(nums).setName('integer')
real = Combine(Word(nums) + '.' + Optional(Word(nums))).setName('real')
equals = Suppress(OneOrMore('='))
_stats = {}
def add_stat(s, loc, toks, key=None):
if key is None:
key = toks[1]
_stats[key] = toks[0]
return toks
word = ((integer + 'failed') |
(integer + 'passed') |
(integer + 'deselected') |
(integer + 'warnings')).setParseAction(add_stat)
line = (equals +
Optional(delimitedList(word)) +
'in' +
(real + (Literal('s') | 'seconds'))
.setParseAction(partial(add_stat, key='seconds')) +
equals)
line.searchString(out)
keys = ['failed', 'passed', 'deselected', 'warnings', 'seconds']
stats = {key : _stats.get(key, '0') for key in keys}
ok = stats['failed'] == '0'
logger.info(
(' {failed} failed, {passed} passed, {deselected} deselected,'
' {warnings} warnings in {seconds} seconds').format(**stats)
)
if not ok:
logger.debug(DEBUG_FMT, out)
if return_item:
return ok, stats['failed']
else:
return ok
def | ():
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.parse_args()
fd = open('test_install.log', 'w')
fd.close()
eok = 0
t0 = time.time()
out, err = check_output('python3 sfepy/scripts/blockgen.py')
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('python3 sfepy/scripts/cylindergen.py')
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('python3 sfepy/scripts/convert_mesh.py meshes/3d/cylinder.vtk out.mesh')
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('python3 sfepy/scripts/convert_mesh.py --tile 2,2 meshes/elements/2_4_2.mesh out-per.mesh')
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('python3 sfepy/scripts/convert_mesh.py --extract-surface --print-surface=- meshes/various_formats/octahedron.node surf_octahedron.mesh')
eok += report(out, '...', -4, 0, '1185')
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/diffusion/poisson.py')
eok += report(out, '...', -3, 5, '1.173819e-16', eps=1e-15)
out, err = check_output("""python3 sfepy/scripts/simple.py -c "ebc_2 : {'name' : 't2', 'region' : 'Gamma_Right', 'dofs' : {'t.0' : -5.0}}" sfepy/examples/diffusion/poisson.py""")
eok += report(out, '...', -3, 5, '2.308051e-16', eps=1e-15)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/diffusion/poisson_iga.py')
eok += report(out, '...', -3, 5, '3.373487e-15', eps=1e-14)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/navier_stokes/stokes.py')
eok += report(out, '...', -3, 5, '1.210678e-13', eps=1e-11)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/diffusion/poisson_parametric_study.py')
eok += report(out, '...', -3, 5, '1.606408e-14', eps=1e-13)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/linear_elasticity/its2D_3.py')
eok += report(out, '...', -24, 5, '3.964886e-12', eps=1e-11)
eok += report(out, '...', -4, 4, '2.58660e+01', eps=1e-5)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/linear_elasticity/linear_elastic.py --format h5')
eok += report(out, '...', -3, 5, '4.638192e-18', eps=1e-15)
out, err = check_output('python3 sfepy/scripts/extractor.py -d cylinder.h5')
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('python3 sfepy/scripts/resview.py --off-screen -o cylinder.png cylinder.h5')
eok += report(out, '...', -2, 1, 'cylinder.png')
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/phononic/band_gaps.py')
eok += report(out, '...', -9, 0, '2.08545116e+08', match_numbers=True)
eok += report(out, '...', -8, 1, '1.16309223e+11', match_numbers=True)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/phononic/band_gaps.py --phonon-phase-velocity')
eok += report(out, '...', -2, 0, '4189.41229592', match_numbers=True)
eok += report(out, '...', -2, 1, '2620.55608256', match_numbers=True)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/phononic/band_gaps.py --phonon-dispersion')
eok += report(out, '...', -6, 1, '[0,')
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/phononic/band_gaps_rigid.py')
eok += report(out, '...', -9, 0, '4.58709531e+07', match_numbers=True)
eok += report(out, '...', -8, 1, '1.13929200e+11', match_numbers=True)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/quantum/hydrogen.py')
eok += report(out, '...', -2, -2, '-0.01913506', eps=1e-4)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/homogenization/perfusion_micro.py')
eok += report2(out, '...', ['computing EpA', 'computing PA_3',
'computing GA', 'computing EmA',
'computing KA'])
out, err = check_output('python3 sfepy/examples/homogenization/rs_correctors.py -n')
eok += report(out, '...', -2, -1, '1.644e-01', match_numbers=True)
out, err = check_output('python3 sfepy/examples/large_deformation/compare_elastic_materials.py -n')
eok += report(out, '...', -3, 5, '1.068759e-14', eps=1e-13)
out, err = check_output('python3 sfepy/examples/linear_elasticity/linear_elastic_interactive.py')
eok += report(out, '...', -16, 0, '1.62128841139e-14', eps=1e-13)
out, err = check_output('python3 sfepy/examples/linear_elasticity/modal_analysis.py')
eok += report(out, '...', -12, 5, '12142.11470773', eps=1e-13)
out, err = check_output('python3 sfepy/examples/multi_physics/thermal_electric.py')
eok += report(out, '...', -4, 5, '2.612933e-14', eps=1e-13)
out, err = check_output('python3 sfepy/examples/diffusion/laplace_refine_interactive.py output')
eok += report(out, '...', -3, 5, '2.675866e-15', eps=1e-13)
out, err = check_output('python3 sfepy/examples/diffusion/laplace_iga_interactive.py -o output')
eok += report(out, '...', -3, 5, '1.028134e-13', eps=1e-12)
out, err = check_output('python3 sfepy/examples/dg/imperative_burgers_1D.py -o output')
eok += report(out, '...', -3, 3, 'moment_1D_limiter')
out, err = check_output('mpiexec -n 2 python3 sfepy/examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --silent -ksp_monitor')
eok += report(out, '...', -2, 4, '8.021313824020e-07', eps=1e-6)
out, err = check_output('mpiexec -n 2 python3 sfepy/examples/multi_physics/biot_parallel_interactive.py output-parallel -2 --silent -ksp_monitor')
eok += report(out, '...', -2, 4, '3.787214380277e-09', eps=1e-7)
t1 = time.time()
out, err = check_output("python3 -c \"import sfepy; sfepy.test('-v', '--disable-warnings')\"")
tok, failed = report_tests(out, return_item=True)
tok = {True : 'ok', False : 'fail'}[tok]
t2 = time.time()
fd = open('test_install_times.log', 'a+')
fd.write('%s: examples: %.2f [s] (%d), tests: %.2f [s] (%s: %s)\n'
% (time.ctime(t0), t1 - t0, eok, t2 - t1, tok, failed))
fd.close()
if __name__ == '__main__':
main()
| main | identifier_name |
test_install.py | #!/usr/bin/env python
"""
Simple script for testing various SfePy functionality, examples not
covered by tests, and running the tests.
The script just runs the commands specified in its main() using the
`subprocess` module, captures the output and compares one or more key
words to the expected ones.
The output of failed commands is saved to 'test_install.log' file.
"""
import time
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import shlex
import subprocess
import logging
import re
DEBUG_FMT = '*' * 55 + '\n%s\n' + '*' * 55
def _get_logger(filename='test_install.log'):
"""
Convenience function to set-up output and logging.
"""
logger = logging.getLogger('test_install.py')
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
file_handler = logging.FileHandler(filename)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
return logger
logger = _get_logger()
def check_output(cmd):
"""
Run the specified command and capture its outputs.
Returns
-------
out : tuple
The (stdout, stderr) output tuple.
"""
logger.info(cmd)
args = shlex.split(cmd)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = [ii.decode() for ii in p.communicate()]
return out
def report(out, name, line, item, value, eps=None, return_item=False,
match_numbers=False):
"""
Check that `item` at `line` of the output string `out` is equal
to `value`. If not, print the output.
"""
try:
if match_numbers:
status = out.split('\n')[line]
else:
status = out.split('\n')[line].split()
except IndexError:
logger.error(' not enough output from command!')
ok = False
else:
try:
if match_numbers:
pat = '([-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?[jJ]?)'
matches = re.findall(pat, status)
status_item = matches[item]
else:
status_item = status[item]
logger.info(' comparing: %s %s', status_item, value)
if eps is None:
|
else:
try:
ok = abs(float(status_item) - float(value)) < eps
except:
ok = False
except IndexError:
ok = False
logger.info(' %s: %s', name, ok)
if not ok:
logger.debug(DEBUG_FMT, out)
if return_item:
return ok, status[item]
else:
return ok
def report2(out, name, items, return_item=False):
"""
Check that `items` are in the output string `out`.
If not, print the output.
"""
ok = True
for s in items:
logger.info(' checking: %s', s)
if s not in out:
ok = False
break
logger.info(' %s: %s', name, ok)
if not ok:
logger.debug(DEBUG_FMT, out)
if return_item:
return ok, s
else:
return ok
def report_tests(out, return_item=False):
"""
Check that all tests in the output string `out` passed.
If not, print the output.
"""
from pyparsing import (Word, Combine, Suppress, Optional, OneOrMore,
delimitedList, nums, Literal)
from functools import partial
integer = Word(nums).setName('integer')
real = Combine(Word(nums) + '.' + Optional(Word(nums))).setName('real')
equals = Suppress(OneOrMore('='))
_stats = {}
def add_stat(s, loc, toks, key=None):
if key is None:
key = toks[1]
_stats[key] = toks[0]
return toks
word = ((integer + 'failed') |
(integer + 'passed') |
(integer + 'deselected') |
(integer + 'warnings')).setParseAction(add_stat)
line = (equals +
Optional(delimitedList(word)) +
'in' +
(real + (Literal('s') | 'seconds'))
.setParseAction(partial(add_stat, key='seconds')) +
equals)
line.searchString(out)
keys = ['failed', 'passed', 'deselected', 'warnings', 'seconds']
stats = {key : _stats.get(key, '0') for key in keys}
ok = stats['failed'] == '0'
logger.info(
(' {failed} failed, {passed} passed, {deselected} deselected,'
' {warnings} warnings in {seconds} seconds').format(**stats)
)
if not ok:
logger.debug(DEBUG_FMT, out)
if return_item:
return ok, stats['failed']
else:
return ok
def main():
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.parse_args()
fd = open('test_install.log', 'w')
fd.close()
eok = 0
t0 = time.time()
out, err = check_output('python3 sfepy/scripts/blockgen.py')
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('python3 sfepy/scripts/cylindergen.py')
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('python3 sfepy/scripts/convert_mesh.py meshes/3d/cylinder.vtk out.mesh')
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('python3 sfepy/scripts/convert_mesh.py --tile 2,2 meshes/elements/2_4_2.mesh out-per.mesh')
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('python3 sfepy/scripts/convert_mesh.py --extract-surface --print-surface=- meshes/various_formats/octahedron.node surf_octahedron.mesh')
eok += report(out, '...', -4, 0, '1185')
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/diffusion/poisson.py')
eok += report(out, '...', -3, 5, '1.173819e-16', eps=1e-15)
out, err = check_output("""python3 sfepy/scripts/simple.py -c "ebc_2 : {'name' : 't2', 'region' : 'Gamma_Right', 'dofs' : {'t.0' : -5.0}}" sfepy/examples/diffusion/poisson.py""")
eok += report(out, '...', -3, 5, '2.308051e-16', eps=1e-15)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/diffusion/poisson_iga.py')
eok += report(out, '...', -3, 5, '3.373487e-15', eps=1e-14)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/navier_stokes/stokes.py')
eok += report(out, '...', -3, 5, '1.210678e-13', eps=1e-11)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/diffusion/poisson_parametric_study.py')
eok += report(out, '...', -3, 5, '1.606408e-14', eps=1e-13)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/linear_elasticity/its2D_3.py')
eok += report(out, '...', -24, 5, '3.964886e-12', eps=1e-11)
eok += report(out, '...', -4, 4, '2.58660e+01', eps=1e-5)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/linear_elasticity/linear_elastic.py --format h5')
eok += report(out, '...', -3, 5, '4.638192e-18', eps=1e-15)
out, err = check_output('python3 sfepy/scripts/extractor.py -d cylinder.h5')
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('python3 sfepy/scripts/resview.py --off-screen -o cylinder.png cylinder.h5')
eok += report(out, '...', -2, 1, 'cylinder.png')
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/phononic/band_gaps.py')
eok += report(out, '...', -9, 0, '2.08545116e+08', match_numbers=True)
eok += report(out, '...', -8, 1, '1.16309223e+11', match_numbers=True)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/phononic/band_gaps.py --phonon-phase-velocity')
eok += report(out, '...', -2, 0, '4189.41229592', match_numbers=True)
eok += report(out, '...', -2, 1, '2620.55608256', match_numbers=True)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/phononic/band_gaps.py --phonon-dispersion')
eok += report(out, '...', -6, 1, '[0,')
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/phononic/band_gaps_rigid.py')
eok += report(out, '...', -9, 0, '4.58709531e+07', match_numbers=True)
eok += report(out, '...', -8, 1, '1.13929200e+11', match_numbers=True)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/quantum/hydrogen.py')
eok += report(out, '...', -2, -2, '-0.01913506', eps=1e-4)
out, err = check_output('python3 sfepy/scripts/simple.py sfepy/examples/homogenization/perfusion_micro.py')
eok += report2(out, '...', ['computing EpA', 'computing PA_3',
'computing GA', 'computing EmA',
'computing KA'])
out, err = check_output('python3 sfepy/examples/homogenization/rs_correctors.py -n')
eok += report(out, '...', -2, -1, '1.644e-01', match_numbers=True)
out, err = check_output('python3 sfepy/examples/large_deformation/compare_elastic_materials.py -n')
eok += report(out, '...', -3, 5, '1.068759e-14', eps=1e-13)
out, err = check_output('python3 sfepy/examples/linear_elasticity/linear_elastic_interactive.py')
eok += report(out, '...', -16, 0, '1.62128841139e-14', eps=1e-13)
out, err = check_output('python3 sfepy/examples/linear_elasticity/modal_analysis.py')
eok += report(out, '...', -12, 5, '12142.11470773', eps=1e-13)
out, err = check_output('python3 sfepy/examples/multi_physics/thermal_electric.py')
eok += report(out, '...', -4, 5, '2.612933e-14', eps=1e-13)
out, err = check_output('python3 sfepy/examples/diffusion/laplace_refine_interactive.py output')
eok += report(out, '...', -3, 5, '2.675866e-15', eps=1e-13)
out, err = check_output('python3 sfepy/examples/diffusion/laplace_iga_interactive.py -o output')
eok += report(out, '...', -3, 5, '1.028134e-13', eps=1e-12)
out, err = check_output('python3 sfepy/examples/dg/imperative_burgers_1D.py -o output')
eok += report(out, '...', -3, 3, 'moment_1D_limiter')
out, err = check_output('mpiexec -n 2 python3 sfepy/examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --silent -ksp_monitor')
eok += report(out, '...', -2, 4, '8.021313824020e-07', eps=1e-6)
out, err = check_output('mpiexec -n 2 python3 sfepy/examples/multi_physics/biot_parallel_interactive.py output-parallel -2 --silent -ksp_monitor')
eok += report(out, '...', -2, 4, '3.787214380277e-09', eps=1e-7)
t1 = time.time()
out, err = check_output("python3 -c \"import sfepy; sfepy.test('-v', '--disable-warnings')\"")
tok, failed = report_tests(out, return_item=True)
tok = {True : 'ok', False : 'fail'}[tok]
t2 = time.time()
fd = open('test_install_times.log', 'a+')
fd.write('%s: examples: %.2f [s] (%d), tests: %.2f [s] (%s: %s)\n'
% (time.ctime(t0), t1 - t0, eok, t2 - t1, tok, failed))
fd.close()
if __name__ == '__main__':
main()
| ok = (status_item == value) | conditional_block |
Triangle.js | import { Vector3 } from './Vector3.js';
import { Plane } from './Plane.js';
const _v0 = /*@__PURE__*/ new Vector3();
const _v1 = /*@__PURE__*/ new Vector3();
const _v2 = /*@__PURE__*/ new Vector3();
const _v3 = /*@__PURE__*/ new Vector3();
const _vab = /*@__PURE__*/ new Vector3();
const _vac = /*@__PURE__*/ new Vector3();
const _vbc = /*@__PURE__*/ new Vector3();
const _vap = /*@__PURE__*/ new Vector3();
const _vbp = /*@__PURE__*/ new Vector3();
const _vcp = /*@__PURE__*/ new Vector3();
/**
* class简介:
* Triangle描述在三维空间中一个平面三角形
* 三角形通过三维空间上3个三维坐标点来表示。
* 注意:根据三个点顺序的不同会被认为是不同的三角形
* 根据a、b、c传入的顺序不同,三角面的normal也不同
* */
/** 三角形 */
class Triangle {
/**
* 构造函数
* @param {*} a 平面三角形的顶点1
* @param {*} b 平面三角形的顶点2
* @param {*} c 平面三角形的顶点3
*/
constructor( a = new Vector3(), b = new Vector3(), c = new Vector3() ) {
this.a = a;
this.b = b;
this.c = c;
}
/**
* 给出一个面上的非共线三点,求出这个面的normal
* @param {*} a 平面三角形的顶点1
* @param {*} b 平面三角形的顶点2
* @param {*} c 平面三角形的顶点3
* @param {*} target 结果将会被拷贝到这个Vector3中
* @returns
* 注意:请不要传入共线的三个点
* normal的方向是正还是负数会根据abc传入顺序不同而不同
*/
static getNormal( a, b, c, target ) {
if ( target === undefined ) {
console.warn( 'THREE.Triangle: .getNormal() target is now required' );
target = new Vector3();
}
// 使用叉乘来计算这个面的法向量
target.subVectors( c, b );
_v0.subVectors( a, b );
target.cross( _v0 );
const targetLengthSq = target.lengthSq();
if ( targetLengthSq > 0 ) {
// 归一化处理
return target.multiplyScalar( 1 / Math.sqrt( targetLengthSq ) );
}
return target.set( 0, 0, 0 );
}
/**
* 用来计算重心坐标的静态/实例方法
* @param {*} point 指定的点位
* @param {*} a 平面三角形的顶点1
* @param {*} b 平面三角形的顶点2
* @param {*} c 平面三角形的顶点3
* @param {*} target 结果将会被拷贝到这个Vector3中
* @returns
*
* 函数算法参考:http://www.blackpawn.com/texts/pointinpoly/default.html
* 注意这个函数并不是用来计算一个三角形的重心,而是通过类似计算重心坐标的方式来表示平面上任意一点的位置
* 这样可以简单判断一个点是否在三角形中
*/
static getBarycoord( point, a, b, c, target ) {
const ca = new Vector3();
const ba = new Vector3();
const pa = new Vector3();
ca.subVectors( c, a );
ba.subVectors( b, a );
pa.subVectors( point, a );
const dot00 = ca.dot( ca );
const dot01 = ca.dot( ba );
const dot02 = ca.dot( pa );
const dot11 = ba.dot( ba );
const dot12 = ba.dot( pa );
const denom = ( dot00 * dot11 - dot01 * dot01 );
if ( target === undefined ) {
console.warn( 'THREE.Triangle: .getBarycoord() target is now required' );
target = new Vector3();
}
// 三个点共线 or 奇异三角形
// TODO: 为什么奇异三角形会返回0?
if ( denom === 0 ) {
// 任意点都在三角形外
// 不确定这是不是最好的返回值,或许应该返回undefined
return target.set( - 2, - 1, - 1 );
}
const invDenom = 1 / denom;
const u = ( dot11 * dot02 - dot01 * dot12 ) * invDenom;
const v = ( dot00 * dot12 - dot01 * dot02 ) * invDenom;
// 重心坐标的x,y,z必须加起来等于1
return target.set( 1 - u - v, v, u );
}
/**
* 计算三角形中是否包含某个点
* @param {*} point 指定的点位
* @param {*} a 平面三角形的顶点1
* @param {*} b 平面三角形的顶点2
* @param {*} c 平面三角形的顶点3
* @returns
*/
static containsPoint( point, a, b, c ) {
this.getBarycoord( point, a, b, c, _v3 );
return ( _v3.x >= 0 ) && ( _v3.y >= 0 ) && ( ( _v3.x + _v3.y ) <= 1 );
}
/**
* uv插值计算,通过三个点的uv,插值计算出任意点的uv。
* @param {*} point 要进行uv插值的点的位置
* @param {*} p1 顶点1的位置
* @param {*} p2 顶点2的位置
* @param {*} p3 顶点3的位置
* @param {*} uv1 顶点1的uv
* @param {*} uv2 顶点2的uv
* @param {*} uv3 顶点3的uv
* @param {*} target 结果将会被拷贝到这个Vector2中
* @returns
*/
static getUV( point, p1, p2, p3, uv1, uv2, uv3, target ) {
this.getBarycoord( point, p1, p2, p3, _v3 );
target.set( 0, 0 );
target.addScaledVector( uv1, _v3.x );
target.addScaledVector( uv2, _v3.y );
target.addScaledVector( uv3, _v3.z );
return target;
}
/**
* 判断一个给定向量是否朝向给定三点组成的平面三角形法向量
* @param {*} a 平面三角形的顶点1
* @param {*} b 平面三角形的顶点2
* @param {*} c 平面三角形的顶点3
* @param {*} direction
* @returns
*/
static isFrontFacing( a, b, c, direction ) {
_v0.subVectors( c, b );
_v1.subVectors( a, b );
// strictly front facing
return ( _v0.cross( _v1 ).dot( direction ) < 0 ) ? true : false;
}
/**
* 设置平米三角形的三个点
* @param {*} a
* @param {*} b
* @param {*} c
* @returns
*/
set( a, b, c ) {
this.a.copy( a );
this.b.copy( b );
this.c.copy( c );
return this;
}
/**
* 设置三角形的顶点坐标为数组中的坐标
* @param {*} points 顶点数组
* @param {*} i0 第一个点的索引
* @param {*} i1 第二个点的索引
* @param {*} i2 第三个点的索引
* @returns
*/
setFromPointsAndIndices( points, i0, i1, i2 ) {
this.a.copy( points[ i0 ] );
this.b.copy( points[ i1 ] );
this.c.copy( points[ i2 ] );
return this;
}
/**
* 返回一个该平面三角形的拷贝
* @returns
*/
clone() {
return new this.constructor().copy( this );
}
/**
* 将自己设置为一个指定三角形的复制
* @param {*} triangle
* @returns
*/
copy( triangle ) {
this.a.copy( triangle.a );
this.b.copy( triangle.b );
this.c.copy( triangle.c );
return this;
}
/**
* 计算平面三角形的面积
* @returns
*/
getArea() {
_v0.subVectors( this.c, this.b );
_v1.subVectors( this.a, this.b );
return _v0.cross( _v1 ).length() * 0.5;
}
/**
* 计算三角形的中点。
*/
getMidpoint( target ) {
if ( target === undefined ) {
console.warn( 'THREE.Triangle: .getMidpoint() target is now required' );
target = new Vector3();
}
return target.addVectors( this.a, this.b ).add( this.c ).multiplyScalar( 1 / 3 );
}
/**
* 获取三角面的normal
* @param {*} target
* @returns
*/
getNormal( target ) {
return Triangle.getNormal( this.a, this.b, this.c, target );
}
/**
* 获取平面三角形坐在的平面
* @param {*} target
* @returns
*/
getPlane( target ) {
if ( target === undefined ) {
console.warn( 'THREE.Triangle: .getPlane() target is now required' );
target = new Plane();
}
// 通过空间中三个不共线的三个点确定一个平面
return target.setFromCoplanarPoints( this.a, this.b, this.c );
}
/**
* 计算指定点的重心坐标
* @param {*} point 指定的点
* @param {*} target 结果将会被拷贝到这个Vector3中
* @returns
*/
getBarycoord( point, target ) {
return Triangle.getBarycoord( point, this.a, this.b, this.c, target );
}
/**
* uv插值计算,通过本平面三角形三个顶点的uv,插值计算出任意点的uv。
* @param {*} point 要进行uv插值的点的位置
* @param {*} uv1 顶点1的uv
* @param {*} uv2 顶点2的uv
* @param {*} uv3 顶点3的uv
* @param {*} target 结果将会被拷贝到这个Vector2中
* @returns
*/
getUV( point, uv1, uv2, uv3, target ) {
return Triangle.getUV( point, this.a, this.b, this.c, uv1, uv2, uv3, target );
}
/**
* 判断指定点是否在本平面三角形中
* @param {*} point
* @returns
*/
containsPoint( point ) {
return Triangle.containsPoint( point, this.a, this.b, this.c );
}
/**
* 判断一个给定向量是否朝向平面三角形法向量
* @param {*} direction
* @returns
*/
isFrontFacing( direction ) {
return Triangle.isFrontFacing( this.a, this.b, this.c, direction );
}
/**
* 判定三角形与传入的box是否相交
* @param {*} box
* @returns
*/
intersectsBox( box ) {
return box.intersectsTriangle( this );
}
/**
* 返回三角形上最靠近所给定的point的点
* @param {*} p 指定的点
* @param {*} target 结果将会被拷贝到这个Vector3中
* @returns
*/
closestPointToPoint( p, target ) {
if ( target === undefined ) {
console.warn( 'THREE.Triangle: .closestPointToPoint() target is now required' );
target = new Vector3();
}
const a = this.a, b = this.b, c = this.c;
let v, w;
// 算法来自christer Ericsion所著的《Real-Time Collision Detection》
// Morgan Kaufmann 出版社, (c) 2005 Elsevier Inc.,
// 版权许可。请查看5.1.5章节来了解详情。
// 基本上来说,我们希望通过最少的冗余计算来得到指定点p位于哪个voronoi区域
_vab.subVectors( b, a );
_vac.subVectors( c, a );
_vap.subVectors( p, a );
const d1 = _vab.dot( _vap );
const d2 = _vac.dot( _vap );
if ( d1 <= 0 && d2 <= 0 ) {
// vertex region of A; barycentric coords (1, 0, 0)
// 顶点A的区域,重心坐标(1,0,0)
return target.copy( a );
}
_vbp.subVectors( p, b );
const d3 = _vab.dot( _vbp );
const d4 = _vac.dot( _vbp );
if ( d3 >= 0 && d4 <= d3 ) {
// vertex region of B; barycentric coords (0, 1, 0)
// 顶点B的区域,重心坐标(0,1,0)
return target.copy( b );
}
const vc = d1 * d4 - d3 * d2;
if ( vc <= 0 && d1 >= 0 && d3 <= 0 ) {
v = d1 / ( d1 - d3 );
// edge region of AB; barycentric coords (1-v, v, 0)
// AB边的区域; 重心坐标 (1-v, v, 0)
return target.copy( a ).addScaledVector( _vab, v );
}
_vcp.subVectors( p, c );
const d5 = _vab.dot( _vcp );
const d6 = _vac.dot( _vcp );
if ( d6 >= 0 && d5 <= d6 ) {
// vertex region of C; barycentric coords (0, 0, 1)
// 顶点C的区域,重心坐标(0, 0, 1)
return target.copy( c );
}
const vb = d5 * d2 - d1 * d6;
if ( vb <= 0 && d2 >= 0 && d6 <= 0 ) {
w = d2 / ( d2 - d6 );
// edge region of AC; barycentric coords (1-w, 0, w)
// AC边的区域; 重心坐标 (1-w, 0, w)
return target.copy( a ).addScaledVector( _vac, w );
}
const va = d3 * d6 - d5 * d4;
if ( va <= 0 && ( d4 - d3 ) >= 0 && ( d5 - d6 ) >= 0 ) {
_vbc.subVectors( c, b );
w = ( d4 - d3 ) / ( ( d4 - d3 ) + ( d5 - d6 ) );
// edge region of BC; barycentric coords (0, 1-w, w)
// BC边的区域; 重心坐标 (0, 1-w, w)
return target.copy( b ).addScaledVector( _vbc, w ); // edge region of BC
}
// face region
// 平面三角形面上的区域
const denom = 1 / ( va + vb + vc );
// u = va * denom
v = vb * denom;
w = vc * denom;
return target.copy( a ).addScaledVector( _vab, v ).addScaledVector( _vac, w );
}
/**
* 判断本平面三角形是否和传入的平面三角形相同
* @param {*} triangle
* @returns
*/
equals( triangle ) {
return triangle.a.equals( this.a ) && triangle.b.equals( this.b ) && triangle.c.equals( this.c );
}
}
export { Triangle };
| identifier_body |
||
Triangle.js | import { Vector3 } from './Vector3.js';
import { Plane } from './Plane.js';
const _v0 = /*@__PURE__*/ new Vector3();
const _v1 = /*@__PURE__*/ new Vector3();
const _v2 = /*@__PURE__*/ new Vector3();
const _v3 = /*@__PURE__*/ new Vector3();
const _vab = /*@__PURE__*/ new Vector3();
const _vac = /*@__PURE__*/ new Vector3();
const _vbc = /*@__PURE__*/ new Vector3();
const _vap = /*@__PURE__*/ new Vector3();
const _vbp = /*@__PURE__*/ new Vector3();
const _vcp = /*@__PURE__*/ new Vector3();
/**
* class简介:
* Triangle描述在三维空间中一个平面三角形
* 三角形通过三维空间上3个三维坐标点来表示。
* 注意:根据三个点顺序的不同会被认为是不同的三角形
* 根据a、b、c传入的顺序不同,三角面的normal也不同
* */
/** 三角形 */
class Triangle {
/**
* 构造函数
* @param {*} a 平面三角形的顶点1
* @param {*} b 平面三角形的顶点2
* @param {*} c 平面三角形的顶点3
*/
constructor( a = new Vector3(), b = new Vector3(), c = new Vector3() ) {
this.a = a;
this.b = b;
this.c = c;
}
/**
* 给出一个面上的非共线三点,求出这个面的normal
* @param {*} a 平面三角形的顶点1
* @param {*} b 平面三角形的顶点2
* @param {*} c 平面三角形的顶点3
* @param {*} target 结果将会被拷贝到这个Vector3中
* @returns
* 注意:请不要传入共线的三个点
* normal的方向是正还是负数会根据abc传入顺序不同而不同
*/
static getNormal( a, b, c, target ) {
if ( target === undefined ) {
console.warn( 'THREE.Triangle: .getNormal() target is now required' );
target = new Vector3();
}
// 使用叉乘来计算这个面的法向量
target.subVectors( c, b );
_v0.subVectors( a, b );
target.cross( _v0 );
const targetLengthSq = target.lengthSq();
if ( targetLengthSq > 0 ) {
// 归一化处理
return target.multiplyScalar( 1 / Math.sqrt( targetLengthSq ) );
}
return target.set( 0, 0, 0 );
}
/**
* 用来计算重心坐标的静态/实例方法
* @param {*} point 指定的点位
* @param {*} a 平面三角形的顶点1
* @param {*} b 平面三角形的顶点2
* @param {*} c 平面三角形的顶点3
* @param {*} target 结果将会被拷贝到这个Vector3中
* @returns
*
* 函数算法参考:http://www.blackpawn.com/texts/pointinpoly/default.html
* 注意这个函数并不是用来计算一个三角形的重心,而是通过类似计算重心坐标的方式来表示平面上任意一点的位置
* 这样可以简单判断一个点是否在三角形中
*/
static getBarycoord( point, a, b, c, target ) {
const ca = new Vector3();
const ba = new Vector3();
const pa = new Vector3();
ca.subVectors( c, a );
ba.subVectors( b, a );
pa.subVectors( point, a );
const dot00 = ca.dot( ca );
const dot01 = ca.dot( ba );
const dot02 = ca.dot( pa );
const dot11 = ba.dot( ba );
const dot12 = ba.dot( pa );
const denom = ( dot00 * dot11 - dot01 * dot01 );
if ( target === undefined ) {
console.warn( 'THREE.Triangle: .getBarycoord() target is now required' );
target = new Vector3();
}
// 三个点共线 or 奇异三角形
// TODO: 为什么奇异三角形会返回0?
if ( denom === 0 ) {
// 任意点都在三角形外
// 不确定这是不是最好的返回值,或许应该返回undefined
return target.set( - 2, - 1, - 1 );
}
const invDenom = 1 / denom;
const u = ( dot11 * dot02 - dot01 * dot12 ) * invDenom;
const v = ( dot00 * dot12 - dot01 * dot02 ) * invDenom;
// 重心坐标的x,y,z必须加起来等于1
return target.set( 1 - u - v, v, u );
}
/**
* 计算三角形中是否包含某个点
* @param {*} point 指定的点位
* @param {*} a 平面三角形的顶点1
* @param {*} b 平面三角形的顶点2
* @param {*} c 平面三角形的顶点3
* @returns
*/
static containsPoint( point, a, b, c ) {
this.getBarycoord( point, a, b, c, _v3 );
return ( _v3.x >= 0 ) && ( _v3.y >= 0 ) && ( ( _v3.x + _v3.y ) <= 1 );
}
/**
* uv插值计算,通过三个点的uv,插值计算出任意点的uv。
* @param {*} point 要进行uv插值的点的位置
* @param {*} p1 顶点1的位置
* @param {*} p2 顶点2的位置
* @param {*} p3 顶点3的位置
* @param {*} uv1 顶点1的uv
* @param {*} uv2 顶点2的uv
* @param {*} uv3 顶点3的uv
* @param {*} target 结果将会被拷贝到这个Vector2中
* @returns
*/
static getUV( point, p1, p2, p3, uv1, uv2, uv3, target ) {
this.getBarycoord( point, p1, p2, p3, _v3 );
target.set( 0, 0 );
target.addScaledVector( uv1, _v3.x );
target.addScaledVector( uv2, _v3.y );
target.addScaledVector( uv3, _v3.z );
return target;
}
/**
* 判断一个给定向量是否朝向给定三点组成的平面三角形法向量
* @param {*} a 平面三角形的顶点1
* @param {*} b 平面三角形的顶点2
* @param {*} c 平面三角形的顶点3
* @param {*} direction
* @returns
*/
static isFrontFacing( a, b, c, direction ) {
_v0.subVectors( c, b );
_v1.subVectors( a, b );
// strictly front facing
return ( _v0.cross( _v1 ).dot( direction ) < 0 ) ? true : false;
}
/**
* 设置平米三角形的三个点
* @param {*} a
* @param {*} b
* @param {*} c
* @returns
*/
set( a, b, c ) {
this.a.copy( a );
this.b.copy( b );
this.c.copy( c );
return this;
}
/**
* 设置三角形的顶点坐标为数组中的坐标
* @param {*} points 顶点数组
* @param {*} i0 第一个点的索引
* @param {*} i1 第二个点的索引
* @param {*} i2 第三个点的索引
* @returns
*/
setFromPointsAndIndices( points, i0, i1, i2 ) {
this.a.copy( points[ i0 ] );
this.b.copy( points[ i1 ] );
this.c.copy( points[ i2 ] );
return this;
}
/**
* 返回一个该平面三角形的拷贝
* @returns
*/
clone() {
return new this.constructor().copy( this );
}
/**
* 将自己设置为一个指定三角形的复制
* @param {*} triangle
* @returns
*/
copy( triangle ) {
this.a.copy( triangle.a );
this.b.copy( triangle.b );
this.c.copy( triangle.c );
return this;
}
/**
* 计算平面三角形的面积
* @returns
*/
getArea() {
_v0.subVectors( this.c, this.b );
_v1.subVectors( this.a, this.b );
return _v0.cross( _v1 ).length() * 0.5;
}
/**
* 计算三角形的中点。
*/
getMidpoint( target ) {
if ( target === undefined ) {
console.warn( 'THREE.Triangle: .getMidpoint() target is now required' );
target = new Vector3();
}
return target.addVectors( this.a, this.b ).add( this.c ).multiplyScalar( 1 / 3 );
}
/**
* 获取三角面的normal
* @param {*} target
* @returns
*/
getNormal( target ) {
return Triangle.getNormal( this.a, this.b, this.c, target );
}
/** | * @returns
*/
getPlane( target ) {
if ( target === undefined ) {
console.warn( 'THREE.Triangle: .getPlane() target is now required' );
target = new Plane();
}
// 通过空间中三个不共线的三个点确定一个平面
return target.setFromCoplanarPoints( this.a, this.b, this.c );
}
/**
* 计算指定点的重心坐标
* @param {*} point 指定的点
* @param {*} target 结果将会被拷贝到这个Vector3中
* @returns
*/
getBarycoord( point, target ) {
return Triangle.getBarycoord( point, this.a, this.b, this.c, target );
}
/**
* uv插值计算,通过本平面三角形三个顶点的uv,插值计算出任意点的uv。
* @param {*} point 要进行uv插值的点的位置
* @param {*} uv1 顶点1的uv
* @param {*} uv2 顶点2的uv
* @param {*} uv3 顶点3的uv
* @param {*} target 结果将会被拷贝到这个Vector2中
* @returns
*/
getUV( point, uv1, uv2, uv3, target ) {
return Triangle.getUV( point, this.a, this.b, this.c, uv1, uv2, uv3, target );
}
/**
* 判断指定点是否在本平面三角形中
* @param {*} point
* @returns
*/
containsPoint( point ) {
return Triangle.containsPoint( point, this.a, this.b, this.c );
}
/**
* 判断一个给定向量是否朝向平面三角形法向量
* @param {*} direction
* @returns
*/
isFrontFacing( direction ) {
return Triangle.isFrontFacing( this.a, this.b, this.c, direction );
}
/**
* 判定三角形与传入的box是否相交
* @param {*} box
* @returns
*/
intersectsBox( box ) {
return box.intersectsTriangle( this );
}
/**
* 返回三角形上最靠近所给定的point的点
* @param {*} p 指定的点
* @param {*} target 结果将会被拷贝到这个Vector3中
* @returns
*/
closestPointToPoint( p, target ) {
if ( target === undefined ) {
console.warn( 'THREE.Triangle: .closestPointToPoint() target is now required' );
target = new Vector3();
}
const a = this.a, b = this.b, c = this.c;
let v, w;
// 算法来自christer Ericsion所著的《Real-Time Collision Detection》
// Morgan Kaufmann 出版社, (c) 2005 Elsevier Inc.,
// 版权许可。请查看5.1.5章节来了解详情。
// 基本上来说,我们希望通过最少的冗余计算来得到指定点p位于哪个voronoi区域
_vab.subVectors( b, a );
_vac.subVectors( c, a );
_vap.subVectors( p, a );
const d1 = _vab.dot( _vap );
const d2 = _vac.dot( _vap );
if ( d1 <= 0 && d2 <= 0 ) {
// vertex region of A; barycentric coords (1, 0, 0)
// 顶点A的区域,重心坐标(1,0,0)
return target.copy( a );
}
_vbp.subVectors( p, b );
const d3 = _vab.dot( _vbp );
const d4 = _vac.dot( _vbp );
if ( d3 >= 0 && d4 <= d3 ) {
// vertex region of B; barycentric coords (0, 1, 0)
// 顶点B的区域,重心坐标(0,1,0)
return target.copy( b );
}
const vc = d1 * d4 - d3 * d2;
if ( vc <= 0 && d1 >= 0 && d3 <= 0 ) {
v = d1 / ( d1 - d3 );
// edge region of AB; barycentric coords (1-v, v, 0)
// AB边的区域; 重心坐标 (1-v, v, 0)
return target.copy( a ).addScaledVector( _vab, v );
}
_vcp.subVectors( p, c );
const d5 = _vab.dot( _vcp );
const d6 = _vac.dot( _vcp );
if ( d6 >= 0 && d5 <= d6 ) {
// vertex region of C; barycentric coords (0, 0, 1)
// 顶点C的区域,重心坐标(0, 0, 1)
return target.copy( c );
}
const vb = d5 * d2 - d1 * d6;
if ( vb <= 0 && d2 >= 0 && d6 <= 0 ) {
w = d2 / ( d2 - d6 );
// edge region of AC; barycentric coords (1-w, 0, w)
// AC边的区域; 重心坐标 (1-w, 0, w)
return target.copy( a ).addScaledVector( _vac, w );
}
const va = d3 * d6 - d5 * d4;
if ( va <= 0 && ( d4 - d3 ) >= 0 && ( d5 - d6 ) >= 0 ) {
_vbc.subVectors( c, b );
w = ( d4 - d3 ) / ( ( d4 - d3 ) + ( d5 - d6 ) );
// edge region of BC; barycentric coords (0, 1-w, w)
// BC边的区域; 重心坐标 (0, 1-w, w)
return target.copy( b ).addScaledVector( _vbc, w ); // edge region of BC
}
// face region
// 平面三角形面上的区域
const denom = 1 / ( va + vb + vc );
// u = va * denom
v = vb * denom;
w = vc * denom;
return target.copy( a ).addScaledVector( _vab, v ).addScaledVector( _vac, w );
}
/**
* 判断本平面三角形是否和传入的平面三角形相同
* @param {*} triangle
* @returns
*/
equals( triangle ) {
return triangle.a.equals( this.a ) && triangle.b.equals( this.b ) && triangle.c.equals( this.c );
}
}
export { Triangle }; | * 获取平面三角形坐在的平面
* @param {*} target | random_line_split |
Triangle.js | import { Vector3 } from './Vector3.js';
import { Plane } from './Plane.js';
const _v0 = /*@__PURE__*/ new Vector3();
const _v1 = /*@__PURE__*/ new Vector3();
const _v2 = /*@__PURE__*/ new Vector3();
const _v3 = /*@__PURE__*/ new Vector3();
const _vab = /*@__PURE__*/ new Vector3();
const _vac = /*@__PURE__*/ new Vector3();
const _vbc = /*@__PURE__*/ new Vector3();
const _vap = /*@__PURE__*/ new Vector3();
const _vbp = /*@__PURE__*/ new Vector3();
const _vcp = /*@__PURE__*/ new Vector3();
/**
* class简介:
* Triangle描述在三维空间中一个平面三角形
* 三角形通过三维空间上3个三维坐标点来表示。
* 注意:根据三个点顺序的不同会被认为是不同的三角形
* 根据a、b、c传入的顺序不同,三角面的normal也不同
* */
/** 三角形 */
class Triangle {
/**
* 构造函数
* @param {*} a 平面三角形的顶点1
* @param {*} b 平面三角形的顶点2
* @param {*} c 平面三角形的顶点3
*/
constructor( a = new Vector3(), b = new Vector3(), c = new Vector3() ) {
this.a = a;
this.b = b;
this.c = c;
}
/**
* 给出一个面上的非共线三点,求出这个面的normal
* @param {*} a 平面三角形的顶点1
* @param {*} b 平面三角形的顶点2
* @param {*} c 平面三角形的顶点3
* @param {*} target 结果将会被拷贝到这个Vector3中
* @returns
* 注意:请不要传入共线的三个点
* normal的方向是正还是负数会根据abc传入顺序不同而不同
*/
static getNormal( a, b, c, target ) {
if ( target === undefined ) {
console.warn( 'THREE.Triangle: .getNormal() target is now required' );
target = new Vector3();
}
// 使用叉乘来计算这个面的法向量
target.subVectors( c, b );
_v0.subVectors( a, b );
target.cross( _v0 );
const targetLengthSq = target.lengthSq();
if ( targetLengthSq > 0 ) {
// 归一化处理
return target.multiplyScalar( 1 / Math.sqrt( targetLengthSq ) );
}
return target.set( 0, 0, 0 );
}
/**
* 用来计算重心坐标的静态/实例方法
* @param {*} point 指定的点位
* @param {*} a 平面三角形的顶点1
* @param {*} b 平面三角形的顶点2
* @param {*} c 平面三角形的顶点3
* @param {*} target 结果将会被拷贝到这个Vector3中
* @returns
*
* 函数算法参考:http://www.blackpawn.com/texts/pointinpoly/default.html
* 注意这个函数并不是用来计算一个三角形的重心,而是通过类似计算重心坐标的方式来表示平面上任意一点的位置
* 这样可以简单判断一个点是否在三角形中
*/
static getBarycoord( point, a, b, c, target ) {
const ca = new Vector3();
const ba = new Vector3();
const pa = new Vector3();
ca.subVectors( c, a );
ba.subVectors( b, a );
pa.subVectors( point, a );
const dot00 = ca.dot( ca );
const dot01 = ca.dot( ba );
const dot02 = ca.dot( pa );
const dot11 = ba.dot( ba );
const dot12 = ba.dot( pa );
const denom = ( dot00 * dot11 - dot01 * dot01 );
if ( target === undefined ) {
console.warn( 'THREE.Triangle: .getBarycoord() target is now required' );
target = new Vector3();
}
// 三个点共线 or 奇异三角形
// TODO: 为什么奇异三角形会返回0?
if ( denom === 0 ) {
// 任意点都在三角形外
// 不确定这是不是最好的返回值,或许应该返回undefined
return target.set( - 2, - 1, - 1 );
}
const invDenom = 1 / denom;
const u = ( dot11 * dot02 - dot01 * dot12 ) * invDenom;
const v = ( dot00 * dot12 - dot01 * dot02 ) * invDenom;
// 重心坐标的x,y,z必须加起来等于1
return target.set( 1 - u - v, v, u );
}
/**
* 计算三角形中是否包含某个点
* @param {*} point 指定的点位
* @param {*} a 平面三角形的顶点1
* @param {*} b 平面三角形的顶点2
* @param {*} c 平面三角形的顶点3
* @returns
*/
static containsPoint( point, a, b, c ) {
this.getBarycoord( point, a, b, c, _v3 );
return ( _v3.x >= 0 ) && ( _v3.y >= 0 ) && ( ( _v3.x + _v3.y ) <= 1 );
}
/**
* uv插值计算,通过三个点的uv,插值计算出任意点的uv。
* @param {*} point 要进行uv插值的点的位置
* @param {*} p1 顶点1的位置
* @param {*} p2 顶点2的位置
* @param {*} p3 顶点3的位置
* @param {*} uv1 顶点1的uv
* @param {*} uv2 顶点2的uv
* @param {*} uv3 顶点3的uv
* @param {*} target 结果将会被拷贝到这个Vector2中
* @returns
*/
static getUV( point, p1, p2, p3, uv1, uv2, uv3, target ) {
this.getBarycoord( point, p1, p2, p3, _v3 );
target.set( 0, 0 );
target.addScaledVector( uv1, _v3.x );
target.addScaledVector( uv2, _v3.y );
target.addScaledVector( uv3, _v3.z );
return target;
}
/**
* 判断一个给定向量是否朝向给定三点组成的平面三角形法向量
* @param {*} a 平面三角形的顶点1
* @param {*} b 平面三角形的顶点2
* @param {*} c 平面三角形的顶点3
* @param {*} direction
* @returns
*/
static isFrontFacing( a, b, c, direction ) {
_v0.subVectors( c, b );
_v1.subVectors( a, b );
// strictly front facing
return ( _v0.cross( _v1 ).dot( direction ) < 0 ) ? true : false;
}
/**
* 设置平米三角形的三个点
* @param {*} a
* @param {*} b
* @param {*} c
* @returns
*/
set( a, b, c ) {
this.a.copy( a );
this.b.copy( b );
this.c.copy( c );
return this;
}
/**
* 设置三角形的顶点坐标为数组中的坐标
* @param {*} points 顶点数组
* @param {*} i0 第一个点的索引
* @param {*} i1 第二个点的索引
* @param {*} i2 第三个点的索引
* @returns
*/
setFromPointsAndIndices( points, i0, i1, i2 ) {
this.a.copy( points[ i0 ] );
this.b.copy( points[ i1 ] );
this.c.copy( points[ i2 ] );
return this;
}
/**
* 返回一个该平面三角形的拷贝
* @returns
*/
clone() {
return new this.constructor().copy( this );
}
/**
* 将自己设置为一个指定三角形的复制
* @param {*} triangle
* @returns
*/
copy( triangle ) {
this.a.copy( triangle.a );
this.b.copy( triangle.b );
this.c.copy( triangle.c );
return this;
}
/**
* 计算平面三角形的面积
* @returns
*/
getArea() {
_v0.subVectors( this.c, this.b );
_v1.subVectors( this.a, this.b );
return _v0.cross( _v1 ).length() * 0.5;
}
/**
* 计算三角形的中点。
*/
getMidpoint( target ) {
if ( target === undefined ) {
console.warn( 'THREE.Triangle: .getMidpoint() target is now required' );
target = new Vector3();
}
return targ | addVectors( this.a, this.b ).add( this.c ).multiplyScalar( 1 / 3 );
}
/**
* 获取三角面的normal
* @param {*} target
* @returns
*/
getNormal( target ) {
return Triangle.getNormal( this.a, this.b, this.c, target );
}
/**
* 获取平面三角形坐在的平面
* @param {*} target
* @returns
*/
getPlane( target ) {
if ( target === undefined ) {
console.warn( 'THREE.Triangle: .getPlane() target is now required' );
target = new Plane();
}
// 通过空间中三个不共线的三个点确定一个平面
return target.setFromCoplanarPoints( this.a, this.b, this.c );
}
/**
* 计算指定点的重心坐标
* @param {*} point 指定的点
* @param {*} target 结果将会被拷贝到这个Vector3中
* @returns
*/
getBarycoord( point, target ) {
return Triangle.getBarycoord( point, this.a, this.b, this.c, target );
}
/**
* uv插值计算,通过本平面三角形三个顶点的uv,插值计算出任意点的uv。
* @param {*} point 要进行uv插值的点的位置
* @param {*} uv1 顶点1的uv
* @param {*} uv2 顶点2的uv
* @param {*} uv3 顶点3的uv
* @param {*} target 结果将会被拷贝到这个Vector2中
* @returns
*/
getUV( point, uv1, uv2, uv3, target ) {
return Triangle.getUV( point, this.a, this.b, this.c, uv1, uv2, uv3, target );
}
/**
* 判断指定点是否在本平面三角形中
* @param {*} point
* @returns
*/
containsPoint( point ) {
return Triangle.containsPoint( point, this.a, this.b, this.c );
}
/**
* 判断一个给定向量是否朝向平面三角形法向量
* @param {*} direction
* @returns
*/
isFrontFacing( direction ) {
return Triangle.isFrontFacing( this.a, this.b, this.c, direction );
}
/**
* 判定三角形与传入的box是否相交
* @param {*} box
* @returns
*/
intersectsBox( box ) {
return box.intersectsTriangle( this );
}
/**
* 返回三角形上最靠近所给定的point的点
* @param {*} p 指定的点
* @param {*} target 结果将会被拷贝到这个Vector3中
* @returns
*/
closestPointToPoint( p, target ) {
if ( target === undefined ) {
console.warn( 'THREE.Triangle: .closestPointToPoint() target is now required' );
target = new Vector3();
}
const a = this.a, b = this.b, c = this.c;
let v, w;
// 算法来自christer Ericsion所著的《Real-Time Collision Detection》
// Morgan Kaufmann 出版社, (c) 2005 Elsevier Inc.,
// 版权许可。请查看5.1.5章节来了解详情。
// 基本上来说,我们希望通过最少的冗余计算来得到指定点p位于哪个voronoi区域
_vab.subVectors( b, a );
_vac.subVectors( c, a );
_vap.subVectors( p, a );
const d1 = _vab.dot( _vap );
const d2 = _vac.dot( _vap );
if ( d1 <= 0 && d2 <= 0 ) {
// vertex region of A; barycentric coords (1, 0, 0)
// 顶点A的区域,重心坐标(1,0,0)
return target.copy( a );
}
_vbp.subVectors( p, b );
const d3 = _vab.dot( _vbp );
const d4 = _vac.dot( _vbp );
if ( d3 >= 0 && d4 <= d3 ) {
// vertex region of B; barycentric coords (0, 1, 0)
// 顶点B的区域,重心坐标(0,1,0)
return target.copy( b );
}
const vc = d1 * d4 - d3 * d2;
if ( vc <= 0 && d1 >= 0 && d3 <= 0 ) {
v = d1 / ( d1 - d3 );
// edge region of AB; barycentric coords (1-v, v, 0)
// AB边的区域; 重心坐标 (1-v, v, 0)
return target.copy( a ).addScaledVector( _vab, v );
}
_vcp.subVectors( p, c );
const d5 = _vab.dot( _vcp );
const d6 = _vac.dot( _vcp );
if ( d6 >= 0 && d5 <= d6 ) {
// vertex region of C; barycentric coords (0, 0, 1)
// 顶点C的区域,重心坐标(0, 0, 1)
return target.copy( c );
}
const vb = d5 * d2 - d1 * d6;
if ( vb <= 0 && d2 >= 0 && d6 <= 0 ) {
w = d2 / ( d2 - d6 );
// edge region of AC; barycentric coords (1-w, 0, w)
// AC边的区域; 重心坐标 (1-w, 0, w)
return target.copy( a ).addScaledVector( _vac, w );
}
const va = d3 * d6 - d5 * d4;
if ( va <= 0 && ( d4 - d3 ) >= 0 && ( d5 - d6 ) >= 0 ) {
_vbc.subVectors( c, b );
w = ( d4 - d3 ) / ( ( d4 - d3 ) + ( d5 - d6 ) );
// edge region of BC; barycentric coords (0, 1-w, w)
// BC边的区域; 重心坐标 (0, 1-w, w)
return target.copy( b ).addScaledVector( _vbc, w ); // edge region of BC
}
// face region
// 平面三角形面上的区域
const denom = 1 / ( va + vb + vc );
// u = va * denom
v = vb * denom;
w = vc * denom;
return target.copy( a ).addScaledVector( _vab, v ).addScaledVector( _vac, w );
}
/**
* 判断本平面三角形是否和传入的平面三角形相同
* @param {*} triangle
* @returns
*/
equals( triangle ) {
return triangle.a.equals( this.a ) && triangle.b.equals( this.b ) && triangle.c.equals( this.c );
}
}
export { Triangle };
| et. | identifier_name |
Triangle.js | import { Vector3 } from './Vector3.js';
import { Plane } from './Plane.js';
const _v0 = /*@__PURE__*/ new Vector3();
const _v1 = /*@__PURE__*/ new Vector3();
const _v2 = /*@__PURE__*/ new Vector3();
const _v3 = /*@__PURE__*/ new Vector3();
const _vab = /*@__PURE__*/ new Vector3();
const _vac = /*@__PURE__*/ new Vector3();
const _vbc = /*@__PURE__*/ new Vector3();
const _vap = /*@__PURE__*/ new Vector3();
const _vbp = /*@__PURE__*/ new Vector3();
const _vcp = /*@__PURE__*/ new Vector3();
/**
* class简介:
* Triangle描述在三维空间中一个平面三角形
* 三角形通过三维空间上3个三维坐标点来表示。
* 注意:根据三个点顺序的不同会被认为是不同的三角形
* 根据a、b、c传入的顺序不同,三角面的normal也不同
* */
/** 三角形 */
class Triangle {
/**
* 构造函数
* @param {*} a 平面三角形的顶点1
* @param {*} b 平面三角形的顶点2
* @param {*} c 平面三角形的顶点3
*/
constructor( a = new Vector3(), b = new Vector3(), c = new Vector3() ) {
this.a = a;
this.b = b;
this.c = c;
}
/**
* 给出一个面上的非共线三点,求出这个面的normal
* @param {*} a 平面三角形的顶点1
* @param {*} b 平面三角形的顶点2
* @param {*} c 平面三角形的顶点3
* @param {*} target 结果将会被拷贝到这个Vector3中
* @returns
* 注意:请不要传入共线的三个点
* normal的方向是正还是负数会根据abc传入顺序不同而不同
*/
static getNormal( a, b, c, target ) {
if ( target === undefined ) {
console.warn( 'THREE.Triangle: .getNormal() target is now required' );
target = new Vector3();
}
// 使用叉乘来计算这个面的法向量
target.subVectors( c, b );
_v0.subVectors( a, b );
target.cross( _v0 );
const targetLengthSq = target.lengthSq();
if ( targetLengthSq > 0 ) {
// 归一化处理
return target.multiplyScalar( 1 / Math.sqrt( targetLengthSq ) );
}
return target.set( 0, 0, 0 );
}
/**
* 用来计算重心坐标的静态/实例方法
* @param {*} point 指定的点位
* @param {*} a 平面三角形的顶点1
* @param {*} b 平面三角形的顶点2
* @param {*} c 平面三角形的顶点3
* @param {*} target 结果将会被拷贝到这个Vector3中
* @returns
*
* 函数算法参考:http://www.blackpawn.com/texts/pointinpoly/default.html
* 注意这个函数并不是用来计算一个三角形的重心,而是通过类似计算重心坐标的方式来表示平面上任意一点的位置
* 这样可以简单判断一个点是否在三角形中
*/
static getBarycoord( point, a, b, c, target ) {
const ca = new Vector3();
const ba = new Vector3();
const pa = new Vector3();
ca.subVectors( c, a );
ba.subVectors( b, a );
pa.subVectors( point, a );
const dot00 = ca.dot( ca );
const dot01 = ca.dot( ba );
const dot02 = ca.dot( pa );
const dot11 = ba.dot( ba );
const dot12 = ba.dot( pa );
const denom = ( dot00 * dot11 - dot01 * dot01 );
if ( target === undefined ) {
console.warn( 'THREE.Triangle: .getBarycoord() target is now required' );
target = new Vector3();
}
// 三个点共线 or 奇异三角形
// TODO: 为什么奇异三角形会返回0?
if ( denom === 0 ) {
// 任意点都在三角形外
// 不确定这是不是最好的返回值,或许应该返回undefined
return target.set( - 2, - 1, - 1 );
}
const invDenom = 1 / denom;
const u = ( dot11 * dot02 - dot01 * dot12 ) * invDenom;
const v = ( dot00 * dot12 - dot01 * dot02 ) * invDenom;
// 重心坐标的x,y,z必须加起来等于1
return target.set( 1 - u - v, v, u );
}
/**
* 计算三角形中是否包含某个点
* @param {*} point 指定的点位
* @param {*} a 平面三角形的顶点1
* @param {*} b 平面三角形的顶点2
* @param {*} c 平面三角形的顶点3
* @returns
*/
static containsPoint( point, a, b, c ) {
this.getBarycoord( point, a, b, c, _v3 );
return ( _v3.x >= 0 ) && ( _v3.y >= 0 ) && ( ( _v3.x + _v3.y ) <= 1 );
}
/**
* uv插值计算,通过三个点的uv,插值计算出任意点的uv。
* @param {*} point 要进行uv插值的点的位置
* @param {*} p1 顶点1的位置
* @param {*} p2 顶点2的位置
* @param {*} p3 顶点3的位置
* @param {*} uv1 顶点1的uv
* @param {*} uv2 顶点2的uv
* @param {*} uv3 顶点3的uv
* @param {*} target 结果将会被拷贝到这个Vector2中
* @returns
*/
static getUV( point, p1, p2, p3, uv1, uv2, uv3, target ) {
this.getBarycoord( point, p1, p2, p3, _v3 );
target.set( 0, 0 );
target.addScaledVector( uv1, _v3.x );
target.addScaledVector( uv2, _v3.y );
target.addScaledVector( uv3, _v3.z );
return target;
}
/**
* 判断一个给定向量是否朝向给定三点组成的平面三角形法向量
* @param {*} a 平面三角形的顶点1
* @param {*} b 平面三角形的顶点2
* @param {*} c 平面三角形的顶点3
* @param {*} direction
* @returns
*/
static isFrontFacing( a, b, c, direction ) {
_v0.subVectors( c, b );
_v1.subVectors( a, b );
// strictly front facing
return ( _v0.cross( _v1 ).dot( direction ) < 0 ) ? true : false;
}
/**
* 设置平米三角形的三个点
* @param {*} a
* @param {*} b
* @param {*} c
* @returns
*/
set( a, b, c ) {
this.a.copy( a );
this.b.copy( b );
this.c.copy( c );
return this;
}
/**
* 设置三角形的顶点坐标为数组中的坐标
* @param {*} points 顶点数组
* @param {*} i0 第一个点的索引
* @param {*} i1 第二个点的索引
* @param {*} i2 第三个点的索引
* @returns
*/
setFromPointsAndIndices( points, i0, i1, i2 ) {
this.a.copy( points[ i0 ] );
this.b.copy( points[ i1 ] );
this.c.copy( points[ i2 ] );
return this;
}
/**
* 返回一个该平面三角形的拷贝
* @returns
*/
clone() {
return new this.constructor().copy( this );
}
/**
* 将自己设置为一个指定三角形的复制
* @param {*} triangle
* @returns
*/
copy( triangle ) {
this.a.copy( triangle.a );
this.b.copy( triangle.b );
this.c.copy( triangle.c );
return this;
}
/**
* 计算平面三角形的面积
* @returns
*/
getArea() {
_v0.subVectors( this.c, this.b );
_v1.subVectors( this.a, this.b );
return _v0.cross( _v1 ).length() * 0.5;
}
/**
* 计算三角形的中点。
*/
getMidpoint( target ) {
if ( target === undefined ) {
console.warn( 'THREE.Triangle: .getMidpoint() target is now required' );
target = new Vector3();
}
return target.addVectors( this.a, this.b ).add( this.c ).multiplyScalar( 1 / 3 );
}
/**
* 获取三角面的normal
* @param {*} target
* @returns
*/
getNormal( target ) {
return Triangle.getNormal( this.a, this.b, this.c, target );
}
/**
* 获取平面三角形坐在的平面
* @param {*} target
* @returns
*/
getPlane( target ) {
if ( target === undefined ) {
console.warn( 'THREE.Triangle: .getPlane() target is now required' );
target = new Plane();
}
// 通过空间中三个不共线的三个点确定一个平面
return target.setFromCoplanarPoints( this.a, this.b, this.c );
}
/**
* 计算指定点的重心坐标
* @param {*} point 指定的点
* @param {*} target 结果将会被拷贝到这个Vector3中
* @returns
*/
getBarycoord( point, target ) {
return Triangle.getBarycoord( point, this.a, this.b, this.c, target );
}
/**
* uv插值计算,通过本平面三角形三个顶点的uv,插值计算出任意点的uv。
* @param {*} point 要进行uv插值的点的位置
* @param {*} uv1 顶点1的uv
* @param {*} uv2 顶点2的uv
* @param {*} uv3 顶点3的uv
* @param {*} target 结果将会被拷贝到这个Vector2中
* @returns
*/
getUV( point, uv1, uv2, uv3, target ) {
return Triangle.getUV( point, this.a, this.b, this.c, uv1, uv2, uv3, target );
}
/**
* 判断指定点是否在本平面三角形中
* @param {*} point
* @returns
*/
containsPoint( point ) {
return Triangle.containsPoint( point, this.a, this.b, this.c );
}
/**
* 判断一个给定向量是否朝向平面三角形法向量
* @param {*} direction
* @returns
*/
isFrontFacing( direction ) {
return Triangle.isFrontFacing( this.a, this.b, this.c, direction );
}
/**
* 判定三角形与传入的box是否相交
* @param {*} box
* @returns
*/
intersectsBox( box ) {
return box.intersectsTriangle( this );
}
/**
* 返回三角形上最靠近所给定的point的点
* @param {*} p 指定的点
* @param {*} target 结果将会被拷贝到这个Vector3中
* @returns
*/
closestPointToPoint( p, target ) {
if ( target === undefined ) {
console.warn( 'THREE.Triangle: .closestPointToPoint() target is now required' );
target = new Vector3();
}
const a = this.a, b = this.b, c = this.c;
let v, w;
// 算法来自christer Ericsion所著的《Real-Time Collision Detection》
// Morgan Kaufmann 出版社, (c) 2005 Elsevier Inc.,
// 版权许可。请查看5.1.5章节来了解详情。
// 基本上来说,我们希望通过最少的冗余计算来得到指定点p位于哪个voronoi区域
_vab.subVectors( b, a );
_vac.subVectors( c, a );
_vap.subVectors( p, a );
const d1 = _vab.dot( _vap );
const d2 = _vac.dot( _vap );
if ( d1 <= 0 && d2 <= 0 ) {
// vertex region of A; barycentric coords (1, 0, 0)
// 顶点A的区域,重心坐标(1,0,0)
return target.copy( a );
}
_vbp.subVectors( p, b );
const d3 = _vab.dot( _vbp );
const d4 = _vac.dot( _vbp );
if ( d3 >= 0 && d4 <= d3 ) {
// vertex region of B; barycentric coords (0, 1, 0)
// 顶点B的区域,重心坐标(0,1,0)
return target.copy( b );
}
const vc = d1 * d4 - d3 * d2;
if ( vc <= 0 && d1 >= 0 && d3 <= 0 ) {
v = d1 / ( d1 - d3 );
// edge region of AB; barycentric coords (1-v, v, 0)
// AB边的区域; 重心坐标 (1-v, v, 0)
return target.copy( a ).addScaledVector( _vab, v );
}
_vcp.subVectors( p, c );
const d5 = _vab.dot( _vcp );
const d6 = _vac.dot( _vcp );
if ( d6 >= 0 && d5 <= d6 ) {
// vertex region of C; barycentric coords (0, 0, 1)
// 顶点C的区域,重心坐标(0, 0, 1)
return target.copy( c );
}
const vb = d5 * d2 - d1 * d6;
if ( vb <= 0 && d2 >= 0 && d6 <= 0 ) {
w = d2 / ( d2 - d6 );
// edge region of AC; barycentric coords (1-w, 0, w)
// AC边的区域; 重心坐标 (1-w, 0, w)
return target.copy( a ).addScaledVector( _vac, w );
}
const va = d3 * d6 - d5 * d4;
if ( va <= 0 && ( d4 - d3 ) >= 0 && ( d5 - d6 ) >= 0 ) {
_vbc.subVectors( c, b );
w = ( d4 - d3 ) / ( ( d4 - d3 ) + ( d5 - d6 ) );
// edge region of BC; barycentric coords (0, 1-w, w)
// BC边的区域; 重心坐标 (0, 1-w, w)
return target.copy( b ).addScaledVector( _vbc, w ); // edge region of BC
}
// face region
// 平面三角形面上的区域
const denom = 1 / ( va + vb + vc );
// u = va * denom
v = vb * denom;
w = vc * denom;
return target.copy( a ).addScaledVector( _vab, v ).addScaledVector( _vac, w );
}
/**
* 判断本平面三角形是否和传入的平面三角形相同
* @param {*} triangle
* @returns
*/
equals( triangle ) {
return triangle.a.equals( this.a ) && triangle.b.equals( this.b ) && triangle.c.equals( this.c );
}
}
export { Triangle };
| conditional_block |
||
daemon.py | #!/usr/bin/env python
import time
import serial
import re
from subprocess import call
import os.path
import paho.mqtt.client as mqtt
import urllib2
# ---------------------------------
# Initialize variables and settings
# ---------------------------------
# First global variables. We might need them early.
wantvalue={}
# Server name of iot server and MQTT server.
# Leave one empty to ignore that protocol.
httpservername="homepi"
mqttservername="homepi"
# MQTT Settings, leave user empty for open MQTT servers.
mqttuser="pi"
mqttpassword="raspberry"
mqtttopic="heatpump"
# -------------------
# Command definitions
# -------------------
# Define command to handle when MQTT subthread connects. Tell us and subscribe to command topic.
def on_connect(client, userdata, rc):
print("Connected MQTT with status " + str(rc))
client.subscribe(mqtttopic + '/command/#')
# Define command to handle callback for when MQTT command "mode" arrives. Tell us and call handle_mode.
def on_message_mode(client, userdata, msg):
print("Received mode ")
print(msg.topic + ": " + str(msg.payload))
command = msg.payload.decode("utf-8")
handle_mode(command)
# Define command to handle callback for when MQTT command "temp" arrives.
# Tell us and confirm on MQTT, then call sendtoheatpump to set the new room value.
def on_message_temp(client, userdata, msg):
print("Received temp ")
print(msg.topic + ": " + str(msg.payload))
command = msg.payload.decode("utf-8").lower()
mqttc.publish(mqtttopic + "/status/temp", int(float(command)))
sendtoheatpump('0203', int(float(command)))
# Define command to handle callback for when MQTT command "curve" arrives.
def on_message_curve(client, userdata, msg):
print("Received curve ")
print(msg.topic + ": " + str(msg.payload))
command = msg.payload.decode("utf-8").lower()
mqttc.publish(mqtttopic + "/status/curve", int(float(command)))
sendtoheatpump('0205', int(float(command)))
# Define command to handle callback for when other commands arrive on MQTT. We ignore these.
def on_message(client, userdata, msg):
print("Received unknown command ")
print(msg.topic + ": " + str(msg.payload))
# Define command for handling heatpump mode commands.
# Respond on MQTT and then call sendtoheatpump.
def handle_mode(command):
if 'Auto' == command:
print("Set mode auto")
mqttc.publish(mqtttopic + "/status/mode", "Auto")
sendtoheatpump('2201', '1')
elif 'Heatpump' == command:
print("Set mode heatpump")
mqttc.publish(mqtttopic + "/status/mode", "Heatpump")
sendtoheatpump('2201', '2')
elif 'Electricity' == command:
print("Set mode electricity")
mqttc.publish(mqtttopic + "/status/mode", "Electricity")
sendtoheatpump('2201', '3')
elif 'Water' == command:
print("Set mode water")
mqttc.publish(mqtttopic + "/status/mode", "Water")
sendtoheatpump('2201', '4')
elif 'Off' == command:
print("Set mode off")
mqttc.publish(mqtttopic + "/status/mode", "Off")
sendtoheatpump('2201', '0')
else:
print("Unknown command!")
# Define command to parse incoming status line (XRxxxxyyyy) from H1, and send using MQTT and/or HTTP
# Example: XR010701B8 107 Heating setpoint (44.0c)
def parseandsend(line):
# We need the global dict to know about recent commands sent to heatpump.
global wantvalue
# Process the XR line to get register, value and a good readable label.
print line
splitline=line.split(' (')
labels=splitline[0].split(' ')
label=labels[4:]
label.insert(0,labels[0][2:6])
label='+'.join(label)
register=labels[0][2:6]
# Confirm there were actually a label parsed. A bit paranoid at this point.
if label:
# Clean up label and value from unnessecary characters
label=re.sub('/', '+', label)
label=re.sub(',', '', label)
value = re.sub('[hpcd\) %]', '', splitline[1])
# Make sure we actually got a value there
if value:
# Now start sending received values to MQTT and/or HTTP,
# but dont send anything if we are waiting for a specific
# register to be confirmed (if exists in dict wantvalue)
if not wantvalue.has_key(register):
if mqttservername:
mqttc.publish(mqtttopic + "/" + register, int(float(value)))
# That one there sent the raw register as an MQTT topic.
# Now if the line is temp, curve or mode, send those in a friendlier way.
if labels[0][2:6] == "0203":
mqttc.publish(mqtttopic + "/status/temp", int(float(value)))
if labels[0][2:6] == "0205":
mqttc.publish(mqtttopic + "/status/curve", int(float(value)))
if labels[0][2:6] == "2201":
if int(float(value)) == 0:
mqttc.publish(mqtttopic + "/status/mode", "Off")
if int(float(value)) == 10:
mqttc.publish(mqtttopic + "/status/mode", "Auto")
if int(float(value)) == 20:
mqttc.publish(mqtttopic + "/status/mode", "Heatpump")
if int(float(value)) == 30:
mqttc.publish(mqtttopic + "/status/mode", "Electricity")
if int(float(value)) == 40:
mqttc.publish(mqtttopic + "/status/mode", "Water")
if httpservername:
url="http://" + httpservername + "/iot/iotstore.php?id=HeatPump+" + label + "&set=" + value
urllib2.urlopen(url).read()
# Return a list of the register and data we saw.
return register, int(float(value))
else:
# Corrupted line. No value. Make sure we return something anyway.
return "error", "novalue"
else:
# Corrupted line. No label parsed. Make sure we return something.
return "error", "nolabel"
# Define command to send XW commands through H1 interface to set heatpump settings
# Uses register (4 char hex string) and data (normally integer as int or string)
def sendtoheatpump(dataregister, svalue):
# We need the global dict to put the newly set registers and values in, for later verification.
global wantvalue
# Convert value string to int, multiply 10 for one decimal and format as 2 byte HEX, then form H1 XW command.
hexvalue=format(int(float(svalue))*10, '04X')
sendcommand="XW" + dataregister + hexvalue + "\r"
# Flush buffers and send XW string.
print "Writing command: " + sendcommand
ser.flushOutput()
ser.flushInput()
ser.write(sendcommand)
# Save register and value in dict wantvalue. This is later compared to incoming values
# to make sure it was received by the heatpump.
if dataregister == "2201" and float(svalue) < 10:
# For 2201 register we multiply 10 for the compare value to match later, if we havent already multiplied (resending).
# Mode number is reported by H1 as a direct decimal from the hex data (000A = 10).
# All other values are returned with its base at one decimal (000A = 1.0).
wantvalue[dataregister] = int(float(svalue))*10
else:
wantvalue[dataregister] = int(float(svalue))
# Define command to send 2 byte (XP, XS etc.) settings to H1 interface and verify it was accepted.
# Todo: only allow 2 byte strings.
def | (h1command):
h1response="init"
# Resend command every 0.5s until answer is received
while h1response[:2] != h1command:
print "Writing command: " + h1command
ser.flushOutput()
ser.flushInput()
ser.write(h1command + "\r\n")
h1response=ser.readline()
h1response=re.sub('[\n\r]', '', h1response)
print h1response
time.sleep(0.5)
print "Confirmed " + h1command
# Define command to reset H1 interface. Unused for now, but saved for reference.
def reseth1():
print "Sending reset"
ser.write("!\r\n")
# ------------------
# Start main program
# ------------------
# Set MQTT and launch its subthread, but only if we want MQTT.
if mqttservername:
mqttc = mqtt.Client(mqtttopic)
if mqttuser:
mqttc.username_pw_set(mqttuser, mqttpassword)
mqttc.message_callback_add(mqtttopic + '/command/mode', on_message_mode)
mqttc.message_callback_add(mqtttopic + '/command/temp', on_message_temp)
mqttc.message_callback_add(mqtttopic + '/command/curve', on_message_curve)
mqttc.on_connect = on_connect
mqttc.on_message = on_message
mqttc.connect(mqttservername)
mqttc.loop_start()
# Define H1 interface serial port.
ser = serial.Serial(
port='/dev/serial0',
baudrate = 19200,
timeout=10
)
# Give MQTT time to connect, then clear the serial buffers and start the business.
time.sleep(1)
print "Start collection of data..."
ser.flushOutput()
ser.flushInput()
# The business... (Main loop)
while 1:
# Read line from H1. Strip CR/LF. Times out after 10 seconds (see above)
line=ser.readline()
line=re.sub('[\n\r]', '', line)
if line:
# If we got a line (no timeout) we do checking and parsing of it
if line[:2] == "XR":
# Only care about lines with heatpump data. (XRxxxxyyyy)
if len(line) <= 10:
# If the data line is only 10 characters, we assume the H1 reset and lost its settings.
# Make H1 send readable labels and regular full updates
sendtoh1("XP")
sendtoh1("XM")
else:
# If the data line is full expected length, work with it.
# Call parse and send MQTT/HTTP command. Returns what data was actually used.
parseresult = parseandsend(line)
# Check if the data we found is waiting to be confirmed in "wantvalue" dictionary.
if wantvalue.has_key(parseresult[0]):
if wantvalue[parseresult[0]] == parseresult[1]:
# Data received matches. No more check needed. Delete from dictionary.
# Also run the line again, because parseandsend() skipped last send pending check.
del wantvalue[parseresult[0]]
print "Register " + parseresult[0] + " confirmed!"
parseandsend(line)
else:
# Data received does not match. Resend command to heatpump.
print "Register " + parseresult[0] + " different from requested, resending..."
# Workaround: 2201 is annoying. The compared value is actually 10 times the value we want to resend.
# I should probably rethink the handling of 2201 wantvalue. It's a bit of a mess.
if parseresult[0] == "2201":
sendtoheatpump(parseresult[0], int(float(wantvalue[parseresult[0]])/10))
else:
sendtoheatpump(parseresult[0], wantvalue[parseresult[0]])
else:
# No line was received from H1 interface. Use this time to do other things.
if wantvalue:
# Print out dict of values waiting for confirmation, if any.
print wantvalue
if os.path.exists("/tmp/hpcommand.txt"):
# Send commands to heatpump the alternative (original, non MQTT) way.
# Read from a text file, then delete the file.
file = open("/tmp/hpcommand.txt", "r")
filedata=file.read().split(' ')
sendtoheatpump(filedata[0], filedata[1])
file.close()
call(["rm", "/tmp/hpcommand.txt"])
| sendtoh1 | identifier_name |
daemon.py | #!/usr/bin/env python
import time
import serial
import re
from subprocess import call
import os.path
import paho.mqtt.client as mqtt
import urllib2
# ---------------------------------
# Initialize variables and settings
# ---------------------------------
# First global variables. We might need them early.
wantvalue={}
# Server name of iot server and MQTT server.
# Leave one empty to ignore that protocol.
httpservername="homepi"
mqttservername="homepi"
# MQTT Settings, leave user empty for open MQTT servers.
mqttuser="pi"
mqttpassword="raspberry"
mqtttopic="heatpump"
# -------------------
# Command definitions
# -------------------
# Define command to handle when MQTT subthread connects. Tell us and subscribe to command topic.
def on_connect(client, userdata, rc):
print("Connected MQTT with status " + str(rc))
client.subscribe(mqtttopic + '/command/#')
# Define command to handle callback for when MQTT command "mode" arrives. Tell us and call handle_mode.
def on_message_mode(client, userdata, msg):
print("Received mode ")
print(msg.topic + ": " + str(msg.payload))
command = msg.payload.decode("utf-8")
handle_mode(command)
# Define command to handle callback for when MQTT command "temp" arrives.
# Tell us and confirm on MQTT, then call sendtoheatpump to set the new room value.
def on_message_temp(client, userdata, msg):
print("Received temp ")
print(msg.topic + ": " + str(msg.payload))
command = msg.payload.decode("utf-8").lower()
mqttc.publish(mqtttopic + "/status/temp", int(float(command)))
sendtoheatpump('0203', int(float(command)))
# Define command to handle callback for when MQTT command "curve" arrives.
def on_message_curve(client, userdata, msg):
print("Received curve ")
print(msg.topic + ": " + str(msg.payload))
command = msg.payload.decode("utf-8").lower()
mqttc.publish(mqtttopic + "/status/curve", int(float(command)))
sendtoheatpump('0205', int(float(command)))
# Define command to handle callback for when other commands arrive on MQTT. We ignore these.
def on_message(client, userdata, msg):
print("Received unknown command ")
print(msg.topic + ": " + str(msg.payload))
# Define command for handling heatpump mode commands.
# Respond on MQTT and then call sendtoheatpump.
def handle_mode(command):
if 'Auto' == command:
|
elif 'Heatpump' == command:
print("Set mode heatpump")
mqttc.publish(mqtttopic + "/status/mode", "Heatpump")
sendtoheatpump('2201', '2')
elif 'Electricity' == command:
print("Set mode electricity")
mqttc.publish(mqtttopic + "/status/mode", "Electricity")
sendtoheatpump('2201', '3')
elif 'Water' == command:
print("Set mode water")
mqttc.publish(mqtttopic + "/status/mode", "Water")
sendtoheatpump('2201', '4')
elif 'Off' == command:
print("Set mode off")
mqttc.publish(mqtttopic + "/status/mode", "Off")
sendtoheatpump('2201', '0')
else:
print("Unknown command!")
# Define command to parse incoming status line (XRxxxxyyyy) from H1, and send using MQTT and/or HTTP
# Example: XR010701B8 107 Heating setpoint (44.0c)
def parseandsend(line):
# We need the global dict to know about recent commands sent to heatpump.
global wantvalue
# Process the XR line to get register, value and a good readable label.
print line
splitline=line.split(' (')
labels=splitline[0].split(' ')
label=labels[4:]
label.insert(0,labels[0][2:6])
label='+'.join(label)
register=labels[0][2:6]
# Confirm there were actually a label parsed. A bit paranoid at this point.
if label:
# Clean up label and value from unnessecary characters
label=re.sub('/', '+', label)
label=re.sub(',', '', label)
value = re.sub('[hpcd\) %]', '', splitline[1])
# Make sure we actually got a value there
if value:
# Now start sending received values to MQTT and/or HTTP,
# but dont send anything if we are waiting for a specific
# register to be confirmed (if exists in dict wantvalue)
if not wantvalue.has_key(register):
if mqttservername:
mqttc.publish(mqtttopic + "/" + register, int(float(value)))
# That one there sent the raw register as an MQTT topic.
# Now if the line is temp, curve or mode, send those in a friendlier way.
if labels[0][2:6] == "0203":
mqttc.publish(mqtttopic + "/status/temp", int(float(value)))
if labels[0][2:6] == "0205":
mqttc.publish(mqtttopic + "/status/curve", int(float(value)))
if labels[0][2:6] == "2201":
if int(float(value)) == 0:
mqttc.publish(mqtttopic + "/status/mode", "Off")
if int(float(value)) == 10:
mqttc.publish(mqtttopic + "/status/mode", "Auto")
if int(float(value)) == 20:
mqttc.publish(mqtttopic + "/status/mode", "Heatpump")
if int(float(value)) == 30:
mqttc.publish(mqtttopic + "/status/mode", "Electricity")
if int(float(value)) == 40:
mqttc.publish(mqtttopic + "/status/mode", "Water")
if httpservername:
url="http://" + httpservername + "/iot/iotstore.php?id=HeatPump+" + label + "&set=" + value
urllib2.urlopen(url).read()
# Return a list of the register and data we saw.
return register, int(float(value))
else:
# Corrupted line. No value. Make sure we return something anyway.
return "error", "novalue"
else:
# Corrupted line. No label parsed. Make sure we return something.
return "error", "nolabel"
# Define command to send XW commands through H1 interface to set heatpump settings
# Uses register (4 char hex string) and data (normally integer as int or string)
def sendtoheatpump(dataregister, svalue):
# We need the global dict to put the newly set registers and values in, for later verification.
global wantvalue
# Convert value string to int, multiply 10 for one decimal and format as 2 byte HEX, then form H1 XW command.
hexvalue=format(int(float(svalue))*10, '04X')
sendcommand="XW" + dataregister + hexvalue + "\r"
# Flush buffers and send XW string.
print "Writing command: " + sendcommand
ser.flushOutput()
ser.flushInput()
ser.write(sendcommand)
# Save register and value in dict wantvalue. This is later compared to incoming values
# to make sure it was received by the heatpump.
if dataregister == "2201" and float(svalue) < 10:
# For 2201 register we multiply 10 for the compare value to match later, if we havent already multiplied (resending).
# Mode number is reported by H1 as a direct decimal from the hex data (000A = 10).
# All other values are returned with its base at one decimal (000A = 1.0).
wantvalue[dataregister] = int(float(svalue))*10
else:
wantvalue[dataregister] = int(float(svalue))
# Define command to send 2 byte (XP, XS etc.) settings to H1 interface and verify it was accepted.
# Todo: only allow 2 byte strings.
def sendtoh1(h1command):
h1response="init"
# Resend command every 0.5s until answer is received
while h1response[:2] != h1command:
print "Writing command: " + h1command
ser.flushOutput()
ser.flushInput()
ser.write(h1command + "\r\n")
h1response=ser.readline()
h1response=re.sub('[\n\r]', '', h1response)
print h1response
time.sleep(0.5)
print "Confirmed " + h1command
# Define command to reset H1 interface. Unused for now, but saved for reference.
def reseth1():
print "Sending reset"
ser.write("!\r\n")
# ------------------
# Start main program
# ------------------
# Set MQTT and launch its subthread, but only if we want MQTT.
if mqttservername:
mqttc = mqtt.Client(mqtttopic)
if mqttuser:
mqttc.username_pw_set(mqttuser, mqttpassword)
mqttc.message_callback_add(mqtttopic + '/command/mode', on_message_mode)
mqttc.message_callback_add(mqtttopic + '/command/temp', on_message_temp)
mqttc.message_callback_add(mqtttopic + '/command/curve', on_message_curve)
mqttc.on_connect = on_connect
mqttc.on_message = on_message
mqttc.connect(mqttservername)
mqttc.loop_start()
# Define H1 interface serial port.
ser = serial.Serial(
port='/dev/serial0',
baudrate = 19200,
timeout=10
)
# Give MQTT time to connect, then clear the serial buffers and start the business.
time.sleep(1)
print "Start collection of data..."
ser.flushOutput()
ser.flushInput()
# The business... (Main loop)
while 1:
# Read line from H1. Strip CR/LF. Times out after 10 seconds (see above)
line=ser.readline()
line=re.sub('[\n\r]', '', line)
if line:
# If we got a line (no timeout) we do checking and parsing of it
if line[:2] == "XR":
# Only care about lines with heatpump data. (XRxxxxyyyy)
if len(line) <= 10:
# If the data line is only 10 characters, we assume the H1 reset and lost its settings.
# Make H1 send readable labels and regular full updates
sendtoh1("XP")
sendtoh1("XM")
else:
# If the data line is full expected length, work with it.
# Call parse and send MQTT/HTTP command. Returns what data was actually used.
parseresult = parseandsend(line)
# Check if the data we found is waiting to be confirmed in "wantvalue" dictionary.
if wantvalue.has_key(parseresult[0]):
if wantvalue[parseresult[0]] == parseresult[1]:
# Data received matches. No more check needed. Delete from dictionary.
# Also run the line again, because parseandsend() skipped last send pending check.
del wantvalue[parseresult[0]]
print "Register " + parseresult[0] + " confirmed!"
parseandsend(line)
else:
# Data received does not match. Resend command to heatpump.
print "Register " + parseresult[0] + " different from requested, resending..."
# Workaround: 2201 is annoying. The compared value is actually 10 times the value we want to resend.
# I should probably rethink the handling of 2201 wantvalue. It's a bit of a mess.
if parseresult[0] == "2201":
sendtoheatpump(parseresult[0], int(float(wantvalue[parseresult[0]])/10))
else:
sendtoheatpump(parseresult[0], wantvalue[parseresult[0]])
else:
# No line was received from H1 interface. Use this time to do other things.
if wantvalue:
# Print out dict of values waiting for confirmation, if any.
print wantvalue
if os.path.exists("/tmp/hpcommand.txt"):
# Send commands to heatpump the alternative (original, non MQTT) way.
# Read from a text file, then delete the file.
file = open("/tmp/hpcommand.txt", "r")
filedata=file.read().split(' ')
sendtoheatpump(filedata[0], filedata[1])
file.close()
call(["rm", "/tmp/hpcommand.txt"])
| print("Set mode auto")
mqttc.publish(mqtttopic + "/status/mode", "Auto")
sendtoheatpump('2201', '1') | conditional_block |
daemon.py | #!/usr/bin/env python
import time
import serial
import re
from subprocess import call
import os.path
import paho.mqtt.client as mqtt
import urllib2
# ---------------------------------
# Initialize variables and settings
# ---------------------------------
# First global variables. We might need them early.
wantvalue={}
# Server name of iot server and MQTT server.
# Leave one empty to ignore that protocol.
httpservername="homepi"
mqttservername="homepi"
# MQTT Settings, leave user empty for open MQTT servers.
mqttuser="pi"
mqttpassword="raspberry"
mqtttopic="heatpump"
# -------------------
# Command definitions
# -------------------
# Define command to handle when MQTT subthread connects. Tell us and subscribe to command topic.
def on_connect(client, userdata, rc):
print("Connected MQTT with status " + str(rc))
client.subscribe(mqtttopic + '/command/#')
# Define command to handle callback for when MQTT command "mode" arrives. Tell us and call handle_mode.
def on_message_mode(client, userdata, msg):
print("Received mode ")
print(msg.topic + ": " + str(msg.payload))
command = msg.payload.decode("utf-8")
handle_mode(command)
# Define command to handle callback for when MQTT command "temp" arrives.
# Tell us and confirm on MQTT, then call sendtoheatpump to set the new room value.
def on_message_temp(client, userdata, msg):
print("Received temp ")
print(msg.topic + ": " + str(msg.payload))
command = msg.payload.decode("utf-8").lower()
mqttc.publish(mqtttopic + "/status/temp", int(float(command)))
sendtoheatpump('0203', int(float(command)))
# Define command to handle callback for when MQTT command "curve" arrives.
def on_message_curve(client, userdata, msg):
print("Received curve ")
print(msg.topic + ": " + str(msg.payload))
command = msg.payload.decode("utf-8").lower()
mqttc.publish(mqtttopic + "/status/curve", int(float(command)))
sendtoheatpump('0205', int(float(command)))
# Define command to handle callback for when other commands arrive on MQTT. We ignore these.
def on_message(client, userdata, msg):
|
# Define command for handling heatpump mode commands.
# Respond on MQTT and then call sendtoheatpump.
def handle_mode(command):
if 'Auto' == command:
print("Set mode auto")
mqttc.publish(mqtttopic + "/status/mode", "Auto")
sendtoheatpump('2201', '1')
elif 'Heatpump' == command:
print("Set mode heatpump")
mqttc.publish(mqtttopic + "/status/mode", "Heatpump")
sendtoheatpump('2201', '2')
elif 'Electricity' == command:
print("Set mode electricity")
mqttc.publish(mqtttopic + "/status/mode", "Electricity")
sendtoheatpump('2201', '3')
elif 'Water' == command:
print("Set mode water")
mqttc.publish(mqtttopic + "/status/mode", "Water")
sendtoheatpump('2201', '4')
elif 'Off' == command:
print("Set mode off")
mqttc.publish(mqtttopic + "/status/mode", "Off")
sendtoheatpump('2201', '0')
else:
print("Unknown command!")
# Define command to parse incoming status line (XRxxxxyyyy) from H1, and send using MQTT and/or HTTP
# Example: XR010701B8 107 Heating setpoint (44.0c)
def parseandsend(line):
# We need the global dict to know about recent commands sent to heatpump.
global wantvalue
# Process the XR line to get register, value and a good readable label.
print line
splitline=line.split(' (')
labels=splitline[0].split(' ')
label=labels[4:]
label.insert(0,labels[0][2:6])
label='+'.join(label)
register=labels[0][2:6]
# Confirm there were actually a label parsed. A bit paranoid at this point.
if label:
# Clean up label and value from unnessecary characters
label=re.sub('/', '+', label)
label=re.sub(',', '', label)
value = re.sub('[hpcd\) %]', '', splitline[1])
# Make sure we actually got a value there
if value:
# Now start sending received values to MQTT and/or HTTP,
# but dont send anything if we are waiting for a specific
# register to be confirmed (if exists in dict wantvalue)
if not wantvalue.has_key(register):
if mqttservername:
mqttc.publish(mqtttopic + "/" + register, int(float(value)))
# That one there sent the raw register as an MQTT topic.
# Now if the line is temp, curve or mode, send those in a friendlier way.
if labels[0][2:6] == "0203":
mqttc.publish(mqtttopic + "/status/temp", int(float(value)))
if labels[0][2:6] == "0205":
mqttc.publish(mqtttopic + "/status/curve", int(float(value)))
if labels[0][2:6] == "2201":
if int(float(value)) == 0:
mqttc.publish(mqtttopic + "/status/mode", "Off")
if int(float(value)) == 10:
mqttc.publish(mqtttopic + "/status/mode", "Auto")
if int(float(value)) == 20:
mqttc.publish(mqtttopic + "/status/mode", "Heatpump")
if int(float(value)) == 30:
mqttc.publish(mqtttopic + "/status/mode", "Electricity")
if int(float(value)) == 40:
mqttc.publish(mqtttopic + "/status/mode", "Water")
if httpservername:
url="http://" + httpservername + "/iot/iotstore.php?id=HeatPump+" + label + "&set=" + value
urllib2.urlopen(url).read()
# Return a list of the register and data we saw.
return register, int(float(value))
else:
# Corrupted line. No value. Make sure we return something anyway.
return "error", "novalue"
else:
# Corrupted line. No label parsed. Make sure we return something.
return "error", "nolabel"
# Define command to send XW commands through H1 interface to set heatpump settings
# Uses register (4 char hex string) and data (normally integer as int or string)
def sendtoheatpump(dataregister, svalue):
# We need the global dict to put the newly set registers and values in, for later verification.
global wantvalue
# Convert value string to int, multiply 10 for one decimal and format as 2 byte HEX, then form H1 XW command.
hexvalue=format(int(float(svalue))*10, '04X')
sendcommand="XW" + dataregister + hexvalue + "\r"
# Flush buffers and send XW string.
print "Writing command: " + sendcommand
ser.flushOutput()
ser.flushInput()
ser.write(sendcommand)
# Save register and value in dict wantvalue. This is later compared to incoming values
# to make sure it was received by the heatpump.
if dataregister == "2201" and float(svalue) < 10:
# For 2201 register we multiply 10 for the compare value to match later, if we havent already multiplied (resending).
# Mode number is reported by H1 as a direct decimal from the hex data (000A = 10).
# All other values are returned with its base at one decimal (000A = 1.0).
wantvalue[dataregister] = int(float(svalue))*10
else:
wantvalue[dataregister] = int(float(svalue))
# Define command to send 2 byte (XP, XS etc.) settings to H1 interface and verify it was accepted.
# Todo: only allow 2 byte strings.
def sendtoh1(h1command):
h1response="init"
# Resend command every 0.5s until answer is received
while h1response[:2] != h1command:
print "Writing command: " + h1command
ser.flushOutput()
ser.flushInput()
ser.write(h1command + "\r\n")
h1response=ser.readline()
h1response=re.sub('[\n\r]', '', h1response)
print h1response
time.sleep(0.5)
print "Confirmed " + h1command
# Define command to reset H1 interface. Unused for now, but saved for reference.
def reseth1():
print "Sending reset"
ser.write("!\r\n")
# ------------------
# Start main program
# ------------------
# Set MQTT and launch its subthread, but only if we want MQTT.
if mqttservername:
mqttc = mqtt.Client(mqtttopic)
if mqttuser:
mqttc.username_pw_set(mqttuser, mqttpassword)
mqttc.message_callback_add(mqtttopic + '/command/mode', on_message_mode)
mqttc.message_callback_add(mqtttopic + '/command/temp', on_message_temp)
mqttc.message_callback_add(mqtttopic + '/command/curve', on_message_curve)
mqttc.on_connect = on_connect
mqttc.on_message = on_message
mqttc.connect(mqttservername)
mqttc.loop_start()
# Define H1 interface serial port.
ser = serial.Serial(
port='/dev/serial0',
baudrate = 19200,
timeout=10
)
# Give MQTT time to connect, then clear the serial buffers and start the business.
time.sleep(1)
print "Start collection of data..."
ser.flushOutput()
ser.flushInput()
# The business... (Main loop)
while 1:
# Read line from H1. Strip CR/LF. Times out after 10 seconds (see above)
line=ser.readline()
line=re.sub('[\n\r]', '', line)
if line:
# If we got a line (no timeout) we do checking and parsing of it
if line[:2] == "XR":
# Only care about lines with heatpump data. (XRxxxxyyyy)
if len(line) <= 10:
# If the data line is only 10 characters, we assume the H1 reset and lost its settings.
# Make H1 send readable labels and regular full updates
sendtoh1("XP")
sendtoh1("XM")
else:
# If the data line is full expected length, work with it.
# Call parse and send MQTT/HTTP command. Returns what data was actually used.
parseresult = parseandsend(line)
# Check if the data we found is waiting to be confirmed in "wantvalue" dictionary.
if wantvalue.has_key(parseresult[0]):
if wantvalue[parseresult[0]] == parseresult[1]:
# Data received matches. No more check needed. Delete from dictionary.
# Also run the line again, because parseandsend() skipped last send pending check.
del wantvalue[parseresult[0]]
print "Register " + parseresult[0] + " confirmed!"
parseandsend(line)
else:
# Data received does not match. Resend command to heatpump.
print "Register " + parseresult[0] + " different from requested, resending..."
# Workaround: 2201 is annoying. The compared value is actually 10 times the value we want to resend.
# I should probably rethink the handling of 2201 wantvalue. It's a bit of a mess.
if parseresult[0] == "2201":
sendtoheatpump(parseresult[0], int(float(wantvalue[parseresult[0]])/10))
else:
sendtoheatpump(parseresult[0], wantvalue[parseresult[0]])
else:
# No line was received from H1 interface. Use this time to do other things.
if wantvalue:
# Print out dict of values waiting for confirmation, if any.
print wantvalue
if os.path.exists("/tmp/hpcommand.txt"):
# Send commands to heatpump the alternative (original, non MQTT) way.
# Read from a text file, then delete the file.
file = open("/tmp/hpcommand.txt", "r")
filedata=file.read().split(' ')
sendtoheatpump(filedata[0], filedata[1])
file.close()
call(["rm", "/tmp/hpcommand.txt"])
| print("Received unknown command ")
print(msg.topic + ": " + str(msg.payload)) | identifier_body |
daemon.py | #!/usr/bin/env python
import time
import serial
import re
from subprocess import call
import os.path
import paho.mqtt.client as mqtt
import urllib2
# ---------------------------------
# Initialize variables and settings
# ---------------------------------
# First global variables. We might need them early.
wantvalue={}
# Server name of iot server and MQTT server.
# Leave one empty to ignore that protocol.
httpservername="homepi"
mqttservername="homepi"
# MQTT Settings, leave user empty for open MQTT servers.
mqttuser="pi"
mqttpassword="raspberry"
mqtttopic="heatpump"
# -------------------
# Command definitions
# -------------------
# Define command to handle when MQTT subthread connects. Tell us and subscribe to command topic.
def on_connect(client, userdata, rc):
print("Connected MQTT with status " + str(rc))
client.subscribe(mqtttopic + '/command/#')
# Define command to handle callback for when MQTT command "mode" arrives. Tell us and call handle_mode.
def on_message_mode(client, userdata, msg):
print("Received mode ")
print(msg.topic + ": " + str(msg.payload))
command = msg.payload.decode("utf-8")
handle_mode(command)
# Define command to handle callback for when MQTT command "temp" arrives.
# Tell us and confirm on MQTT, then call sendtoheatpump to set the new room value.
def on_message_temp(client, userdata, msg):
print("Received temp ")
print(msg.topic + ": " + str(msg.payload))
command = msg.payload.decode("utf-8").lower()
mqttc.publish(mqtttopic + "/status/temp", int(float(command)))
sendtoheatpump('0203', int(float(command)))
# Define command to handle callback for when MQTT command "curve" arrives.
def on_message_curve(client, userdata, msg):
print("Received curve ")
print(msg.topic + ": " + str(msg.payload))
command = msg.payload.decode("utf-8").lower()
mqttc.publish(mqtttopic + "/status/curve", int(float(command)))
sendtoheatpump('0205', int(float(command)))
# Define command to handle callback for when other commands arrive on MQTT. We ignore these.
def on_message(client, userdata, msg):
print("Received unknown command ")
print(msg.topic + ": " + str(msg.payload))
# Define command for handling heatpump mode commands.
# Respond on MQTT and then call sendtoheatpump.
def handle_mode(command):
if 'Auto' == command:
print("Set mode auto")
mqttc.publish(mqtttopic + "/status/mode", "Auto")
sendtoheatpump('2201', '1')
elif 'Heatpump' == command:
print("Set mode heatpump")
mqttc.publish(mqtttopic + "/status/mode", "Heatpump")
sendtoheatpump('2201', '2')
elif 'Electricity' == command:
print("Set mode electricity")
mqttc.publish(mqtttopic + "/status/mode", "Electricity")
sendtoheatpump('2201', '3')
elif 'Water' == command:
print("Set mode water")
mqttc.publish(mqtttopic + "/status/mode", "Water")
sendtoheatpump('2201', '4')
elif 'Off' == command:
print("Set mode off")
mqttc.publish(mqtttopic + "/status/mode", "Off")
sendtoheatpump('2201', '0')
else:
print("Unknown command!")
# Define command to parse incoming status line (XRxxxxyyyy) from H1, and send using MQTT and/or HTTP
# Example: XR010701B8 107 Heating setpoint (44.0c)
def parseandsend(line):
# We need the global dict to know about recent commands sent to heatpump.
global wantvalue
# Process the XR line to get register, value and a good readable label.
print line
splitline=line.split(' (')
labels=splitline[0].split(' ')
label=labels[4:]
label.insert(0,labels[0][2:6])
label='+'.join(label)
register=labels[0][2:6]
# Confirm there were actually a label parsed. A bit paranoid at this point.
if label:
# Clean up label and value from unnessecary characters
label=re.sub('/', '+', label)
label=re.sub(',', '', label)
value = re.sub('[hpcd\) %]', '', splitline[1])
# Make sure we actually got a value there
if value:
# Now start sending received values to MQTT and/or HTTP,
# but dont send anything if we are waiting for a specific
# register to be confirmed (if exists in dict wantvalue)
if not wantvalue.has_key(register):
if mqttservername:
mqttc.publish(mqtttopic + "/" + register, int(float(value)))
# That one there sent the raw register as an MQTT topic.
# Now if the line is temp, curve or mode, send those in a friendlier way.
if labels[0][2:6] == "0203":
mqttc.publish(mqtttopic + "/status/temp", int(float(value)))
if labels[0][2:6] == "0205":
mqttc.publish(mqtttopic + "/status/curve", int(float(value)))
if labels[0][2:6] == "2201":
if int(float(value)) == 0:
mqttc.publish(mqtttopic + "/status/mode", "Off")
if int(float(value)) == 10:
mqttc.publish(mqtttopic + "/status/mode", "Auto")
if int(float(value)) == 20:
mqttc.publish(mqtttopic + "/status/mode", "Heatpump")
if int(float(value)) == 30:
mqttc.publish(mqtttopic + "/status/mode", "Electricity")
if int(float(value)) == 40:
mqttc.publish(mqtttopic + "/status/mode", "Water")
if httpservername:
url="http://" + httpservername + "/iot/iotstore.php?id=HeatPump+" + label + "&set=" + value
urllib2.urlopen(url).read()
# Return a list of the register and data we saw.
return register, int(float(value))
else:
# Corrupted line. No value. Make sure we return something anyway.
return "error", "novalue"
else:
# Corrupted line. No label parsed. Make sure we return something.
return "error", "nolabel"
# Define command to send XW commands through H1 interface to set heatpump settings
# Uses register (4 char hex string) and data (normally integer as int or string)
def sendtoheatpump(dataregister, svalue):
# We need the global dict to put the newly set registers and values in, for later verification.
global wantvalue
# Convert value string to int, multiply 10 for one decimal and format as 2 byte HEX, then form H1 XW command.
hexvalue=format(int(float(svalue))*10, '04X')
sendcommand="XW" + dataregister + hexvalue + "\r"
# Flush buffers and send XW string.
print "Writing command: " + sendcommand
ser.flushOutput()
ser.flushInput()
ser.write(sendcommand)
# Save register and value in dict wantvalue. This is later compared to incoming values
# to make sure it was received by the heatpump.
if dataregister == "2201" and float(svalue) < 10:
# For 2201 register we multiply 10 for the compare value to match later, if we havent already multiplied (resending).
# Mode number is reported by H1 as a direct decimal from the hex data (000A = 10).
# All other values are returned with its base at one decimal (000A = 1.0).
wantvalue[dataregister] = int(float(svalue))*10
else:
wantvalue[dataregister] = int(float(svalue))
# Define command to send 2 byte (XP, XS etc.) settings to H1 interface and verify it was accepted.
# Todo: only allow 2 byte strings.
def sendtoh1(h1command):
h1response="init"
# Resend command every 0.5s until answer is received
while h1response[:2] != h1command:
print "Writing command: " + h1command
ser.flushOutput()
ser.flushInput()
ser.write(h1command + "\r\n")
h1response=ser.readline()
h1response=re.sub('[\n\r]', '', h1response)
print h1response
time.sleep(0.5)
print "Confirmed " + h1command
# Define command to reset H1 interface. Unused for now, but saved for reference.
def reseth1():
print "Sending reset"
ser.write("!\r\n")
# ------------------
# Start main program
# ------------------
# Set MQTT and launch its subthread, but only if we want MQTT.
if mqttservername:
mqttc = mqtt.Client(mqtttopic)
if mqttuser:
mqttc.username_pw_set(mqttuser, mqttpassword)
mqttc.message_callback_add(mqtttopic + '/command/mode', on_message_mode)
mqttc.message_callback_add(mqtttopic + '/command/temp', on_message_temp)
mqttc.message_callback_add(mqtttopic + '/command/curve', on_message_curve)
mqttc.on_connect = on_connect
mqttc.on_message = on_message
mqttc.connect(mqttservername)
mqttc.loop_start()
# Define H1 interface serial port.
ser = serial.Serial(
port='/dev/serial0',
baudrate = 19200,
timeout=10
)
# Give MQTT time to connect, then clear the serial buffers and start the business.
time.sleep(1)
print "Start collection of data..."
ser.flushOutput()
ser.flushInput()
# The business... (Main loop)
while 1:
# Read line from H1. Strip CR/LF. Times out after 10 seconds (see above)
line=ser.readline()
line=re.sub('[\n\r]', '', line)
if line:
# If we got a line (no timeout) we do checking and parsing of it
if line[:2] == "XR":
# Only care about lines with heatpump data. (XRxxxxyyyy)
if len(line) <= 10:
# If the data line is only 10 characters, we assume the H1 reset and lost its settings. | # If the data line is full expected length, work with it.
# Call parse and send MQTT/HTTP command. Returns what data was actually used.
parseresult = parseandsend(line)
# Check if the data we found is waiting to be confirmed in "wantvalue" dictionary.
if wantvalue.has_key(parseresult[0]):
if wantvalue[parseresult[0]] == parseresult[1]:
# Data received matches. No more check needed. Delete from dictionary.
# Also run the line again, because parseandsend() skipped last send pending check.
del wantvalue[parseresult[0]]
print "Register " + parseresult[0] + " confirmed!"
parseandsend(line)
else:
# Data received does not match. Resend command to heatpump.
print "Register " + parseresult[0] + " different from requested, resending..."
# Workaround: 2201 is annoying. The compared value is actually 10 times the value we want to resend.
# I should probably rethink the handling of 2201 wantvalue. It's a bit of a mess.
if parseresult[0] == "2201":
sendtoheatpump(parseresult[0], int(float(wantvalue[parseresult[0]])/10))
else:
sendtoheatpump(parseresult[0], wantvalue[parseresult[0]])
else:
# No line was received from H1 interface. Use this time to do other things.
if wantvalue:
# Print out dict of values waiting for confirmation, if any.
print wantvalue
if os.path.exists("/tmp/hpcommand.txt"):
# Send commands to heatpump the alternative (original, non MQTT) way.
# Read from a text file, then delete the file.
file = open("/tmp/hpcommand.txt", "r")
filedata=file.read().split(' ')
sendtoheatpump(filedata[0], filedata[1])
file.close()
call(["rm", "/tmp/hpcommand.txt"]) | # Make H1 send readable labels and regular full updates
sendtoh1("XP")
sendtoh1("XM")
else: | random_line_split |
worker_actions.rs | //! Tasks and task execution of workes
//!
//! Note: This module and submodules will sooner or later need some refactoring.
//! For now, I am still don't really know how I want it to look like.
mod worker_abilities;
mod worker_updates;
use crate::db::DB;
use crate::game_master::event::*;
use crate::game_master::town_worker::*;
use crate::town_view::*;
use actix::prelude::*;
use chrono::offset::TimeZone;
use chrono::{DateTime, Duration, NaiveDateTime, Utc};
use paddlers_shared_lib::api::tasks::*;
use paddlers_shared_lib::game_mechanics::worker::*;
use paddlers_shared_lib::prelude::*;
use worker_abilities::*;
use worker_updates::MutWorkerDBEntity;
trait WorkerAction {
fn x(&self) -> i32;
fn y(&self) -> i32;
fn task_type(&self) -> &TaskType;
fn target(&self) -> Option<HoboKey>;
}
pub struct ValidatedTaskList {
pub new_tasks: Vec<NewTask>,
pub update_tasks: Vec<Task>,
pub village_id: VillageKey,
}
pub(crate) fn validate_task_list(
db: &DB,
tl: &TaskList,
) -> Result<ValidatedTaskList, Box<dyn std::error::Error>> {
let worker_id = tl.worker_id;
// Load relevant data into memory
let mut worker = db.worker_priv(worker_id).ok_or("Worker does not exist")?;
let village_id = VillageKey(worker.home);
let mut town = TownView::load_village(db, village_id);
// check timing and effect of current task interruption
let mut current_task = db
.current_task(worker.key())
.expect("Must have a current task");
let mut timestamp =
interrupt_task(&mut current_task, &worker).ok_or("Cannot interrupt current task.")?;
worker.x = current_task.x;
worker.y = current_task.y;
// iterate tasks and match for task types
let mut tasks = vec![];
for task in tl.tasks.iter() {
// Validate target hobo exists if there is one
if let Some(target_id) = task.target {
db.hobo(HoboKey(target_id)).ok_or("No such hobo id")?;
}
validate_ability(db, task.task_type, worker_id, timestamp)?;
let new_task = NewTask {
worker_id: worker_id.num(),
task_type: task.task_type,
x: task.x as i32,
y: task.y as i32,
start_time: Some(timestamp),
target_hobo_id: task.target,
};
simulate_begin_task(&new_task, &mut town, &mut worker)?;
let duration = simulate_finish_task(&new_task, &mut town, &mut worker)?;
tasks.push(new_task);
timestamp += duration;
}
Ok(ValidatedTaskList {
new_tasks: tasks,
update_tasks: vec![current_task],
village_id,
})
}
pub(crate) fn replace_worker_tasks(
db: &DB,
worker: &Addr<TownWorker>,
worker_id: WorkerKey,
tasks: &[NewTask],
village_id: VillageKey,
) {
db.flush_task_queue(worker_id);
let _inserted = db.insert_tasks(tasks);
let current_task =
execute_worker_tasks(db, worker_id, village_id).expect("Worker has no current task");
if let Some(next_task) = db.earliest_future_task(worker_id) {
let event = Event::WorkerTask {
task_id: current_task.key(),
};
worker
.send(TownWorkerEventMsg(
event,
Utc.from_utc_datetime(&next_task.start_time),
))
.wait()
.expect("Send msg to actor");
}
}
fn interrupt_task(current_task: &mut Task, worker: &Worker) -> Option<NaiveDateTime> {
match current_task.task_type {
TaskType::Idle
| TaskType::ChopTree
| TaskType::Defend
| TaskType::GatherSticks
| TaskType::CollectReward => {
let now = chrono::Utc::now().naive_utc();
Some(now)
}
TaskType::Walk => {
let speed = unit_speed_to_worker_tiles_per_second(worker.speed) as f64;
let time_so_far: Duration = Utc::now().naive_utc() - current_task.start_time;
let steps = (speed * time_so_far.num_microseconds().unwrap() as f64 / 1_000_000.0)
.ceil() as i32;
let total_time = steps as f64 / speed;
let moment = current_task.start_time
+ chrono::Duration::microseconds((total_time * 1_000_000.0) as i64);
let dx = current_task.x - worker.x;
let dy = current_task.y - worker.y;
let x = if dx == 0 {
worker.x
} else if dx < 0 {
worker.x - steps
} else {
worker.x + steps
};
let y = if dy == 0 {
worker.y
} else if dy < 0 {
worker.y - steps
} else {
worker.y + steps
};
// Walking must terminate earlier
current_task.x = x;
current_task.y = y;
Some(moment)
}
TaskType::WelcomeAbility => {
let cast_time = current_task.start_time + AbilityType::Welcome.busy_duration();
Some(cast_time)
}
}
}
/// For the given worker, executes tasks on the DB that are due
fn execute_worker_tasks(db: &DB, worker_id: WorkerKey, village: VillageKey) -> Option<Task> {
let mut tasks = db.past_worker_tasks(worker_id);
let current_task = tasks.pop();
let mut town = TownView::load_village(db, village);
for task in tasks {
if let Err(e) = finish_task(db, task.key(), Some(task), Some(&mut town)) {
println!("Executing task failed: {}", e)
}
}
current_task
}
pub(crate) fn finish_task(
db: &DB,
task_id: TaskKey,
task: Option<Task>,
town: Option<&mut TownView>,
) -> Result<Option<(Event, DateTime<Utc>)>, Box<dyn std::error::Error>> {
let task = task.or_else(|| db.task(task_id));
if let Some(task) = task {
let mut worker = db
.worker_priv(task.worker())
.ok_or("Task references non-existing worker")?;
if let Some(town) = town {
crate::worker_actions::simulate_finish_task(&task, town, &mut worker)?;
apply_task_to_db(db, &task, &mut worker)?;
} else {
let mut town = TownView::load_village(db, VillageKey(worker.home));
crate::worker_actions::simulate_finish_task(&task, &mut town, &mut worker)?;
apply_task_to_db(db, &task, &mut worker)?;
}
db.update_worker(&worker);
db.update_worker_flag_timestamp_now(worker.key(), WorkerFlagType::Work);
db.delete_task(&task);
Ok(Event::load_next_worker_task(db, task.worker()))
} else {
// Already executed.
Ok(None)
}
}
fn apply_task_to_db(db: &DB, task: &Task, worker: &mut Worker) -> Result<(), String> {
match task.task_type {
TaskType::WelcomeAbility => {
let a = AbilityType::Welcome;
let (attribute, strength) = a.apply();
let ne = NewEffect {
hobo_id: task.target().ok_or("Ability must have a target")?.num(),
attribute,
strength: Some(strength),
start_time: None, // default = now
};
db.insert_effect(&ne);
db.update_ability_used_timestamp(WorkerKey(worker.id), a);
*worker.mana.as_mut().unwrap() -= AbilityType::Welcome.mana_cost();
}
TaskType::CollectReward => {
if let Some(building) = db.find_building_by_coordinates(task.x, task.y, worker.home()) {
match building.building_type.reward_exp() {
Some(exp) => {
worker.add_exp(exp);
db.delete_building(&building);
}
None => {
return Err(format!(
"Tried to collect {} as reward",
building.building_type
));
}
}
} else {
return Err(format!("No reward to collect at {},{}", task.x, task.y));
}
}
_ => { /* NOP */ }
}
Ok(())
}
/// (Try to) apply changes to village state that happen when a worker stops doing a given task.
/// E.g. remove unit from building.
/// Returns the time it takes until the task is actually finished.
fn simulate_finish_task<T: WorkerAction>(
task: &T,
town: &mut TownView,
worker: &mut Worker,
) -> Result<Duration, String> {
match task.task_type() {
TaskType::Idle => Ok(Duration::milliseconds(0)),
TaskType::Walk => Ok(worker_walk(
town,
worker,
(task.x() as usize, task.y() as usize),
)?),
TaskType::GatherSticks | TaskType::ChopTree => {
town.state
.register_task_end(*task.task_type())
.map_err(|e| e.to_string())?;
worker_out_of_building(town, worker, (task.x() as usize, task.y() as usize))
}
TaskType::WelcomeAbility => {
let a = AbilityType::Welcome;
let duration = a.busy_duration();
Ok(duration)
}
TaskType::CollectReward => {
// Lookup object to be collected, then delete it in TownView
// Note: DB update is separate
let index = (task.x() as usize, task.y() as usize);
town.state.remove(&index);
Ok(Duration::milliseconds(0))
}
TaskType::Defend => Err("Task not implemented".to_owned()),
}
}
/// (Try to) apply changes to village state that happen when a worker starts a given task.
/// E.g. add unit to a building, or pay required price (only if it is TownView), ...
fn simulate_begin_task<T: WorkerAction>(
task: &T,
town: &mut TownView,
worker: &mut Worker,
) -> Result<(), String> {
match task.task_type() {
TaskType::Idle | TaskType::Walk | TaskType::CollectReward => Ok(()),
TaskType::GatherSticks | TaskType::ChopTree => {
town.state
.register_task_begin(*task.task_type())
.map_err(|e| e.to_string())?;
worker_into_building(town, worker, (task.x() as usize, task.y() as usize))
}
TaskType::WelcomeAbility => {
if let Some(mana) = &mut worker.mana {
let cost = AbilityType::Welcome.mana_cost();
if *mana >= cost {
*mana = *mana - cost;
Ok(())
} else {
Err("Not enough mana".to_owned())
}
} else {
Err("Worker has no mana but tries to use welcome ability".to_owned())
}
}
TaskType::Defend => Err("Task not implemented".to_owned()),
}
}
impl WorkerAction for NewTask {
fn x(&self) -> i32 {
self.x
}
fn | (&self) -> i32 {
self.y
}
fn task_type(&self) -> &TaskType {
&self.task_type
}
fn target(&self) -> Option<HoboKey> {
self.target_hobo_id.map(HoboKey)
}
}
impl WorkerAction for Task {
fn x(&self) -> i32 {
self.x
}
fn y(&self) -> i32 {
self.y
}
fn task_type(&self) -> &TaskType {
&self.task_type
}
fn target(&self) -> Option<HoboKey> {
self.target_hobo_id.map(HoboKey)
}
}
| y | identifier_name |
worker_actions.rs | //! Tasks and task execution of workes
//!
//! Note: This module and submodules will sooner or later need some refactoring.
//! For now, I am still don't really know how I want it to look like.
mod worker_abilities;
mod worker_updates;
use crate::db::DB;
use crate::game_master::event::*;
use crate::game_master::town_worker::*;
use crate::town_view::*;
use actix::prelude::*;
use chrono::offset::TimeZone;
use chrono::{DateTime, Duration, NaiveDateTime, Utc};
use paddlers_shared_lib::api::tasks::*;
use paddlers_shared_lib::game_mechanics::worker::*;
use paddlers_shared_lib::prelude::*;
use worker_abilities::*;
use worker_updates::MutWorkerDBEntity;
trait WorkerAction {
fn x(&self) -> i32;
fn y(&self) -> i32;
fn task_type(&self) -> &TaskType;
fn target(&self) -> Option<HoboKey>;
}
pub struct ValidatedTaskList {
pub new_tasks: Vec<NewTask>,
pub update_tasks: Vec<Task>,
pub village_id: VillageKey,
}
pub(crate) fn validate_task_list(
db: &DB,
tl: &TaskList,
) -> Result<ValidatedTaskList, Box<dyn std::error::Error>> {
let worker_id = tl.worker_id;
// Load relevant data into memory
let mut worker = db.worker_priv(worker_id).ok_or("Worker does not exist")?;
let village_id = VillageKey(worker.home);
let mut town = TownView::load_village(db, village_id);
// check timing and effect of current task interruption
let mut current_task = db
.current_task(worker.key())
.expect("Must have a current task");
let mut timestamp =
interrupt_task(&mut current_task, &worker).ok_or("Cannot interrupt current task.")?;
worker.x = current_task.x;
worker.y = current_task.y;
// iterate tasks and match for task types
let mut tasks = vec![];
for task in tl.tasks.iter() {
// Validate target hobo exists if there is one
if let Some(target_id) = task.target {
db.hobo(HoboKey(target_id)).ok_or("No such hobo id")?;
}
validate_ability(db, task.task_type, worker_id, timestamp)?;
let new_task = NewTask {
worker_id: worker_id.num(),
task_type: task.task_type,
x: task.x as i32,
y: task.y as i32,
start_time: Some(timestamp),
target_hobo_id: task.target,
};
simulate_begin_task(&new_task, &mut town, &mut worker)?;
let duration = simulate_finish_task(&new_task, &mut town, &mut worker)?;
tasks.push(new_task);
timestamp += duration;
}
Ok(ValidatedTaskList {
new_tasks: tasks,
update_tasks: vec![current_task],
village_id,
})
}
pub(crate) fn replace_worker_tasks(
db: &DB,
worker: &Addr<TownWorker>,
worker_id: WorkerKey,
tasks: &[NewTask],
village_id: VillageKey,
) {
db.flush_task_queue(worker_id);
let _inserted = db.insert_tasks(tasks);
let current_task =
execute_worker_tasks(db, worker_id, village_id).expect("Worker has no current task");
if let Some(next_task) = db.earliest_future_task(worker_id) {
let event = Event::WorkerTask {
task_id: current_task.key(),
};
worker
.send(TownWorkerEventMsg(
event,
Utc.from_utc_datetime(&next_task.start_time),
))
.wait()
.expect("Send msg to actor");
}
}
fn interrupt_task(current_task: &mut Task, worker: &Worker) -> Option<NaiveDateTime> {
match current_task.task_type {
TaskType::Idle
| TaskType::ChopTree
| TaskType::Defend
| TaskType::GatherSticks
| TaskType::CollectReward => {
let now = chrono::Utc::now().naive_utc();
Some(now)
}
TaskType::Walk => {
let speed = unit_speed_to_worker_tiles_per_second(worker.speed) as f64;
let time_so_far: Duration = Utc::now().naive_utc() - current_task.start_time;
let steps = (speed * time_so_far.num_microseconds().unwrap() as f64 / 1_000_000.0)
.ceil() as i32;
let total_time = steps as f64 / speed;
let moment = current_task.start_time
+ chrono::Duration::microseconds((total_time * 1_000_000.0) as i64);
let dx = current_task.x - worker.x;
let dy = current_task.y - worker.y;
let x = if dx == 0 {
worker.x
} else if dx < 0 {
worker.x - steps
} else {
worker.x + steps
};
let y = if dy == 0 {
worker.y
} else if dy < 0 {
worker.y - steps
} else {
worker.y + steps
};
// Walking must terminate earlier
current_task.x = x;
current_task.y = y;
Some(moment)
}
TaskType::WelcomeAbility => {
let cast_time = current_task.start_time + AbilityType::Welcome.busy_duration();
Some(cast_time)
}
}
}
/// For the given worker, executes tasks on the DB that are due
fn execute_worker_tasks(db: &DB, worker_id: WorkerKey, village: VillageKey) -> Option<Task> {
let mut tasks = db.past_worker_tasks(worker_id);
let current_task = tasks.pop();
let mut town = TownView::load_village(db, village);
for task in tasks {
if let Err(e) = finish_task(db, task.key(), Some(task), Some(&mut town)) {
println!("Executing task failed: {}", e)
}
}
current_task
}
pub(crate) fn finish_task(
db: &DB,
task_id: TaskKey,
task: Option<Task>,
town: Option<&mut TownView>,
) -> Result<Option<(Event, DateTime<Utc>)>, Box<dyn std::error::Error>> {
let task = task.or_else(|| db.task(task_id));
if let Some(task) = task {
let mut worker = db
.worker_priv(task.worker())
.ok_or("Task references non-existing worker")?;
if let Some(town) = town {
crate::worker_actions::simulate_finish_task(&task, town, &mut worker)?;
apply_task_to_db(db, &task, &mut worker)?;
} else {
let mut town = TownView::load_village(db, VillageKey(worker.home));
crate::worker_actions::simulate_finish_task(&task, &mut town, &mut worker)?;
apply_task_to_db(db, &task, &mut worker)?;
}
db.update_worker(&worker);
db.update_worker_flag_timestamp_now(worker.key(), WorkerFlagType::Work);
db.delete_task(&task);
Ok(Event::load_next_worker_task(db, task.worker()))
} else {
// Already executed.
Ok(None)
}
}
fn apply_task_to_db(db: &DB, task: &Task, worker: &mut Worker) -> Result<(), String> {
match task.task_type {
TaskType::WelcomeAbility => {
let a = AbilityType::Welcome;
let (attribute, strength) = a.apply();
let ne = NewEffect {
hobo_id: task.target().ok_or("Ability must have a target")?.num(),
attribute,
strength: Some(strength),
start_time: None, // default = now
};
db.insert_effect(&ne);
db.update_ability_used_timestamp(WorkerKey(worker.id), a);
*worker.mana.as_mut().unwrap() -= AbilityType::Welcome.mana_cost();
}
TaskType::CollectReward => {
if let Some(building) = db.find_building_by_coordinates(task.x, task.y, worker.home()) {
match building.building_type.reward_exp() {
Some(exp) => {
worker.add_exp(exp);
db.delete_building(&building);
}
None => {
return Err(format!(
"Tried to collect {} as reward",
building.building_type
));
}
}
} else {
return Err(format!("No reward to collect at {},{}", task.x, task.y));
}
}
_ => { /* NOP */ }
}
Ok(())
}
/// (Try to) apply changes to village state that happen when a worker stops doing a given task.
/// E.g. remove unit from building.
/// Returns the time it takes until the task is actually finished.
fn simulate_finish_task<T: WorkerAction>(
task: &T,
town: &mut TownView,
worker: &mut Worker,
) -> Result<Duration, String> {
match task.task_type() {
TaskType::Idle => Ok(Duration::milliseconds(0)),
TaskType::Walk => Ok(worker_walk(
town,
worker,
(task.x() as usize, task.y() as usize),
)?),
TaskType::GatherSticks | TaskType::ChopTree => {
town.state
.register_task_end(*task.task_type())
.map_err(|e| e.to_string())?;
worker_out_of_building(town, worker, (task.x() as usize, task.y() as usize))
}
TaskType::WelcomeAbility => {
let a = AbilityType::Welcome;
let duration = a.busy_duration();
Ok(duration)
}
TaskType::CollectReward => {
// Lookup object to be collected, then delete it in TownView
// Note: DB update is separate
let index = (task.x() as usize, task.y() as usize);
town.state.remove(&index);
Ok(Duration::milliseconds(0))
}
TaskType::Defend => Err("Task not implemented".to_owned()),
}
}
/// (Try to) apply changes to village state that happen when a worker starts a given task.
/// E.g. add unit to a building, or pay required price (only if it is TownView), ...
fn simulate_begin_task<T: WorkerAction>(
task: &T,
town: &mut TownView,
worker: &mut Worker,
) -> Result<(), String> {
match task.task_type() {
TaskType::Idle | TaskType::Walk | TaskType::CollectReward => Ok(()),
TaskType::GatherSticks | TaskType::ChopTree => |
TaskType::WelcomeAbility => {
if let Some(mana) = &mut worker.mana {
let cost = AbilityType::Welcome.mana_cost();
if *mana >= cost {
*mana = *mana - cost;
Ok(())
} else {
Err("Not enough mana".to_owned())
}
} else {
Err("Worker has no mana but tries to use welcome ability".to_owned())
}
}
TaskType::Defend => Err("Task not implemented".to_owned()),
}
}
impl WorkerAction for NewTask {
fn x(&self) -> i32 {
self.x
}
fn y(&self) -> i32 {
self.y
}
fn task_type(&self) -> &TaskType {
&self.task_type
}
fn target(&self) -> Option<HoboKey> {
self.target_hobo_id.map(HoboKey)
}
}
impl WorkerAction for Task {
fn x(&self) -> i32 {
self.x
}
fn y(&self) -> i32 {
self.y
}
fn task_type(&self) -> &TaskType {
&self.task_type
}
fn target(&self) -> Option<HoboKey> {
self.target_hobo_id.map(HoboKey)
}
}
| {
town.state
.register_task_begin(*task.task_type())
.map_err(|e| e.to_string())?;
worker_into_building(town, worker, (task.x() as usize, task.y() as usize))
} | conditional_block |
worker_actions.rs | //! Tasks and task execution of workes
//!
//! Note: This module and submodules will sooner or later need some refactoring.
//! For now, I am still don't really know how I want it to look like.
mod worker_abilities;
mod worker_updates;
use crate::db::DB;
use crate::game_master::event::*;
use crate::game_master::town_worker::*;
use crate::town_view::*;
use actix::prelude::*;
use chrono::offset::TimeZone;
use chrono::{DateTime, Duration, NaiveDateTime, Utc};
use paddlers_shared_lib::api::tasks::*;
use paddlers_shared_lib::game_mechanics::worker::*;
use paddlers_shared_lib::prelude::*;
use worker_abilities::*;
use worker_updates::MutWorkerDBEntity;
trait WorkerAction {
fn x(&self) -> i32;
fn y(&self) -> i32;
fn task_type(&self) -> &TaskType;
fn target(&self) -> Option<HoboKey>;
}
pub struct ValidatedTaskList {
pub new_tasks: Vec<NewTask>,
pub update_tasks: Vec<Task>,
pub village_id: VillageKey,
}
pub(crate) fn validate_task_list(
db: &DB,
tl: &TaskList,
) -> Result<ValidatedTaskList, Box<dyn std::error::Error>> {
let worker_id = tl.worker_id;
// Load relevant data into memory
let mut worker = db.worker_priv(worker_id).ok_or("Worker does not exist")?;
let village_id = VillageKey(worker.home);
let mut town = TownView::load_village(db, village_id);
// check timing and effect of current task interruption
let mut current_task = db
.current_task(worker.key())
.expect("Must have a current task");
let mut timestamp =
interrupt_task(&mut current_task, &worker).ok_or("Cannot interrupt current task.")?;
worker.x = current_task.x;
worker.y = current_task.y;
// iterate tasks and match for task types
let mut tasks = vec![];
for task in tl.tasks.iter() {
// Validate target hobo exists if there is one
if let Some(target_id) = task.target {
db.hobo(HoboKey(target_id)).ok_or("No such hobo id")?;
}
validate_ability(db, task.task_type, worker_id, timestamp)?;
let new_task = NewTask {
worker_id: worker_id.num(),
task_type: task.task_type,
x: task.x as i32,
y: task.y as i32,
start_time: Some(timestamp),
target_hobo_id: task.target,
};
simulate_begin_task(&new_task, &mut town, &mut worker)?;
let duration = simulate_finish_task(&new_task, &mut town, &mut worker)?;
tasks.push(new_task);
timestamp += duration;
}
Ok(ValidatedTaskList {
new_tasks: tasks,
update_tasks: vec![current_task],
village_id,
})
}
pub(crate) fn replace_worker_tasks(
db: &DB,
worker: &Addr<TownWorker>,
worker_id: WorkerKey,
tasks: &[NewTask],
village_id: VillageKey,
) {
db.flush_task_queue(worker_id);
let _inserted = db.insert_tasks(tasks);
let current_task =
execute_worker_tasks(db, worker_id, village_id).expect("Worker has no current task");
if let Some(next_task) = db.earliest_future_task(worker_id) {
let event = Event::WorkerTask {
task_id: current_task.key(),
};
worker
.send(TownWorkerEventMsg(
event,
Utc.from_utc_datetime(&next_task.start_time),
))
.wait()
.expect("Send msg to actor");
}
}
fn interrupt_task(current_task: &mut Task, worker: &Worker) -> Option<NaiveDateTime> {
match current_task.task_type {
TaskType::Idle
| TaskType::ChopTree
| TaskType::Defend
| TaskType::GatherSticks
| TaskType::CollectReward => {
let now = chrono::Utc::now().naive_utc();
Some(now)
}
TaskType::Walk => {
let speed = unit_speed_to_worker_tiles_per_second(worker.speed) as f64;
let time_so_far: Duration = Utc::now().naive_utc() - current_task.start_time;
let steps = (speed * time_so_far.num_microseconds().unwrap() as f64 / 1_000_000.0)
.ceil() as i32;
let total_time = steps as f64 / speed;
let moment = current_task.start_time
+ chrono::Duration::microseconds((total_time * 1_000_000.0) as i64);
let dx = current_task.x - worker.x;
let dy = current_task.y - worker.y;
let x = if dx == 0 {
worker.x
} else if dx < 0 {
worker.x - steps
} else {
worker.x + steps
};
let y = if dy == 0 {
worker.y
} else if dy < 0 {
worker.y - steps
} else {
worker.y + steps
};
// Walking must terminate earlier
current_task.x = x;
current_task.y = y;
Some(moment)
}
TaskType::WelcomeAbility => {
let cast_time = current_task.start_time + AbilityType::Welcome.busy_duration();
Some(cast_time)
}
}
}
/// For the given worker, executes tasks on the DB that are due
fn execute_worker_tasks(db: &DB, worker_id: WorkerKey, village: VillageKey) -> Option<Task> {
let mut tasks = db.past_worker_tasks(worker_id);
let current_task = tasks.pop();
let mut town = TownView::load_village(db, village);
for task in tasks {
if let Err(e) = finish_task(db, task.key(), Some(task), Some(&mut town)) {
println!("Executing task failed: {}", e)
}
}
current_task
}
pub(crate) fn finish_task(
db: &DB,
task_id: TaskKey,
task: Option<Task>,
town: Option<&mut TownView>,
) -> Result<Option<(Event, DateTime<Utc>)>, Box<dyn std::error::Error>> {
let task = task.or_else(|| db.task(task_id));
if let Some(task) = task {
let mut worker = db
.worker_priv(task.worker())
.ok_or("Task references non-existing worker")?;
if let Some(town) = town {
crate::worker_actions::simulate_finish_task(&task, town, &mut worker)?;
apply_task_to_db(db, &task, &mut worker)?;
} else {
let mut town = TownView::load_village(db, VillageKey(worker.home));
crate::worker_actions::simulate_finish_task(&task, &mut town, &mut worker)?;
apply_task_to_db(db, &task, &mut worker)?;
}
db.update_worker(&worker);
db.update_worker_flag_timestamp_now(worker.key(), WorkerFlagType::Work);
db.delete_task(&task);
Ok(Event::load_next_worker_task(db, task.worker()))
} else {
// Already executed.
Ok(None)
}
}
fn apply_task_to_db(db: &DB, task: &Task, worker: &mut Worker) -> Result<(), String> {
match task.task_type {
TaskType::WelcomeAbility => {
let a = AbilityType::Welcome;
let (attribute, strength) = a.apply();
let ne = NewEffect {
hobo_id: task.target().ok_or("Ability must have a target")?.num(),
attribute,
strength: Some(strength),
start_time: None, // default = now
};
db.insert_effect(&ne);
db.update_ability_used_timestamp(WorkerKey(worker.id), a);
*worker.mana.as_mut().unwrap() -= AbilityType::Welcome.mana_cost();
}
TaskType::CollectReward => {
if let Some(building) = db.find_building_by_coordinates(task.x, task.y, worker.home()) {
match building.building_type.reward_exp() {
Some(exp) => {
worker.add_exp(exp);
db.delete_building(&building);
}
None => {
return Err(format!(
"Tried to collect {} as reward",
building.building_type
));
}
}
} else {
return Err(format!("No reward to collect at {},{}", task.x, task.y));
}
}
_ => { /* NOP */ }
}
Ok(())
}
/// (Try to) apply changes to village state that happen when a worker stops doing a given task.
/// E.g. remove unit from building.
/// Returns the time it takes until the task is actually finished.
fn simulate_finish_task<T: WorkerAction>(
task: &T,
town: &mut TownView,
worker: &mut Worker,
) -> Result<Duration, String> {
match task.task_type() {
TaskType::Idle => Ok(Duration::milliseconds(0)),
TaskType::Walk => Ok(worker_walk(
town,
worker,
(task.x() as usize, task.y() as usize),
)?),
TaskType::GatherSticks | TaskType::ChopTree => {
town.state
.register_task_end(*task.task_type())
.map_err(|e| e.to_string())?;
worker_out_of_building(town, worker, (task.x() as usize, task.y() as usize))
}
TaskType::WelcomeAbility => {
let a = AbilityType::Welcome;
let duration = a.busy_duration();
Ok(duration)
}
TaskType::CollectReward => {
// Lookup object to be collected, then delete it in TownView
// Note: DB update is separate
let index = (task.x() as usize, task.y() as usize);
town.state.remove(&index);
Ok(Duration::milliseconds(0))
}
TaskType::Defend => Err("Task not implemented".to_owned()),
}
}
/// (Try to) apply changes to village state that happen when a worker starts a given task.
/// E.g. add unit to a building, or pay required price (only if it is TownView), ...
fn simulate_begin_task<T: WorkerAction>(
task: &T,
town: &mut TownView,
worker: &mut Worker,
) -> Result<(), String> {
match task.task_type() {
TaskType::Idle | TaskType::Walk | TaskType::CollectReward => Ok(()),
TaskType::GatherSticks | TaskType::ChopTree => {
town.state
.register_task_begin(*task.task_type())
.map_err(|e| e.to_string())?;
worker_into_building(town, worker, (task.x() as usize, task.y() as usize))
}
TaskType::WelcomeAbility => {
if let Some(mana) = &mut worker.mana {
let cost = AbilityType::Welcome.mana_cost();
if *mana >= cost {
*mana = *mana - cost;
Ok(())
} else {
Err("Not enough mana".to_owned())
}
} else {
Err("Worker has no mana but tries to use welcome ability".to_owned())
}
}
TaskType::Defend => Err("Task not implemented".to_owned()),
}
}
impl WorkerAction for NewTask {
fn x(&self) -> i32 {
self.x
}
fn y(&self) -> i32 {
self.y
}
fn task_type(&self) -> &TaskType {
&self.task_type
}
fn target(&self) -> Option<HoboKey> {
self.target_hobo_id.map(HoboKey)
}
}
impl WorkerAction for Task {
fn x(&self) -> i32 |
fn y(&self) -> i32 {
self.y
}
fn task_type(&self) -> &TaskType {
&self.task_type
}
fn target(&self) -> Option<HoboKey> {
self.target_hobo_id.map(HoboKey)
}
}
| {
self.x
} | identifier_body |
worker_actions.rs | //! Tasks and task execution of workes
//!
//! Note: This module and submodules will sooner or later need some refactoring.
//! For now, I am still don't really know how I want it to look like.
mod worker_abilities;
mod worker_updates;
use crate::db::DB;
use crate::game_master::event::*;
use crate::game_master::town_worker::*;
use crate::town_view::*;
use actix::prelude::*;
use chrono::offset::TimeZone;
use chrono::{DateTime, Duration, NaiveDateTime, Utc};
use paddlers_shared_lib::api::tasks::*;
use paddlers_shared_lib::game_mechanics::worker::*;
use paddlers_shared_lib::prelude::*;
use worker_abilities::*;
use worker_updates::MutWorkerDBEntity;
trait WorkerAction {
fn x(&self) -> i32;
fn y(&self) -> i32;
fn task_type(&self) -> &TaskType;
fn target(&self) -> Option<HoboKey>;
}
pub struct ValidatedTaskList {
pub new_tasks: Vec<NewTask>,
pub update_tasks: Vec<Task>,
pub village_id: VillageKey,
}
pub(crate) fn validate_task_list(
db: &DB,
tl: &TaskList,
) -> Result<ValidatedTaskList, Box<dyn std::error::Error>> {
let worker_id = tl.worker_id;
// Load relevant data into memory
let mut worker = db.worker_priv(worker_id).ok_or("Worker does not exist")?;
let village_id = VillageKey(worker.home);
let mut town = TownView::load_village(db, village_id);
// check timing and effect of current task interruption
let mut current_task = db
.current_task(worker.key())
.expect("Must have a current task");
let mut timestamp =
interrupt_task(&mut current_task, &worker).ok_or("Cannot interrupt current task.")?;
worker.x = current_task.x;
worker.y = current_task.y;
// iterate tasks and match for task types | // Validate target hobo exists if there is one
if let Some(target_id) = task.target {
db.hobo(HoboKey(target_id)).ok_or("No such hobo id")?;
}
validate_ability(db, task.task_type, worker_id, timestamp)?;
let new_task = NewTask {
worker_id: worker_id.num(),
task_type: task.task_type,
x: task.x as i32,
y: task.y as i32,
start_time: Some(timestamp),
target_hobo_id: task.target,
};
simulate_begin_task(&new_task, &mut town, &mut worker)?;
let duration = simulate_finish_task(&new_task, &mut town, &mut worker)?;
tasks.push(new_task);
timestamp += duration;
}
Ok(ValidatedTaskList {
new_tasks: tasks,
update_tasks: vec![current_task],
village_id,
})
}
pub(crate) fn replace_worker_tasks(
db: &DB,
worker: &Addr<TownWorker>,
worker_id: WorkerKey,
tasks: &[NewTask],
village_id: VillageKey,
) {
db.flush_task_queue(worker_id);
let _inserted = db.insert_tasks(tasks);
let current_task =
execute_worker_tasks(db, worker_id, village_id).expect("Worker has no current task");
if let Some(next_task) = db.earliest_future_task(worker_id) {
let event = Event::WorkerTask {
task_id: current_task.key(),
};
worker
.send(TownWorkerEventMsg(
event,
Utc.from_utc_datetime(&next_task.start_time),
))
.wait()
.expect("Send msg to actor");
}
}
fn interrupt_task(current_task: &mut Task, worker: &Worker) -> Option<NaiveDateTime> {
match current_task.task_type {
TaskType::Idle
| TaskType::ChopTree
| TaskType::Defend
| TaskType::GatherSticks
| TaskType::CollectReward => {
let now = chrono::Utc::now().naive_utc();
Some(now)
}
TaskType::Walk => {
let speed = unit_speed_to_worker_tiles_per_second(worker.speed) as f64;
let time_so_far: Duration = Utc::now().naive_utc() - current_task.start_time;
let steps = (speed * time_so_far.num_microseconds().unwrap() as f64 / 1_000_000.0)
.ceil() as i32;
let total_time = steps as f64 / speed;
let moment = current_task.start_time
+ chrono::Duration::microseconds((total_time * 1_000_000.0) as i64);
let dx = current_task.x - worker.x;
let dy = current_task.y - worker.y;
let x = if dx == 0 {
worker.x
} else if dx < 0 {
worker.x - steps
} else {
worker.x + steps
};
let y = if dy == 0 {
worker.y
} else if dy < 0 {
worker.y - steps
} else {
worker.y + steps
};
// Walking must terminate earlier
current_task.x = x;
current_task.y = y;
Some(moment)
}
TaskType::WelcomeAbility => {
let cast_time = current_task.start_time + AbilityType::Welcome.busy_duration();
Some(cast_time)
}
}
}
/// For the given worker, executes tasks on the DB that are due
fn execute_worker_tasks(db: &DB, worker_id: WorkerKey, village: VillageKey) -> Option<Task> {
let mut tasks = db.past_worker_tasks(worker_id);
let current_task = tasks.pop();
let mut town = TownView::load_village(db, village);
for task in tasks {
if let Err(e) = finish_task(db, task.key(), Some(task), Some(&mut town)) {
println!("Executing task failed: {}", e)
}
}
current_task
}
pub(crate) fn finish_task(
db: &DB,
task_id: TaskKey,
task: Option<Task>,
town: Option<&mut TownView>,
) -> Result<Option<(Event, DateTime<Utc>)>, Box<dyn std::error::Error>> {
let task = task.or_else(|| db.task(task_id));
if let Some(task) = task {
let mut worker = db
.worker_priv(task.worker())
.ok_or("Task references non-existing worker")?;
if let Some(town) = town {
crate::worker_actions::simulate_finish_task(&task, town, &mut worker)?;
apply_task_to_db(db, &task, &mut worker)?;
} else {
let mut town = TownView::load_village(db, VillageKey(worker.home));
crate::worker_actions::simulate_finish_task(&task, &mut town, &mut worker)?;
apply_task_to_db(db, &task, &mut worker)?;
}
db.update_worker(&worker);
db.update_worker_flag_timestamp_now(worker.key(), WorkerFlagType::Work);
db.delete_task(&task);
Ok(Event::load_next_worker_task(db, task.worker()))
} else {
// Already executed.
Ok(None)
}
}
fn apply_task_to_db(db: &DB, task: &Task, worker: &mut Worker) -> Result<(), String> {
match task.task_type {
TaskType::WelcomeAbility => {
let a = AbilityType::Welcome;
let (attribute, strength) = a.apply();
let ne = NewEffect {
hobo_id: task.target().ok_or("Ability must have a target")?.num(),
attribute,
strength: Some(strength),
start_time: None, // default = now
};
db.insert_effect(&ne);
db.update_ability_used_timestamp(WorkerKey(worker.id), a);
*worker.mana.as_mut().unwrap() -= AbilityType::Welcome.mana_cost();
}
TaskType::CollectReward => {
if let Some(building) = db.find_building_by_coordinates(task.x, task.y, worker.home()) {
match building.building_type.reward_exp() {
Some(exp) => {
worker.add_exp(exp);
db.delete_building(&building);
}
None => {
return Err(format!(
"Tried to collect {} as reward",
building.building_type
));
}
}
} else {
return Err(format!("No reward to collect at {},{}", task.x, task.y));
}
}
_ => { /* NOP */ }
}
Ok(())
}
/// (Try to) apply changes to village state that happen when a worker stops doing a given task.
/// E.g. remove unit from building.
/// Returns the time it takes until the task is actually finished.
fn simulate_finish_task<T: WorkerAction>(
task: &T,
town: &mut TownView,
worker: &mut Worker,
) -> Result<Duration, String> {
match task.task_type() {
TaskType::Idle => Ok(Duration::milliseconds(0)),
TaskType::Walk => Ok(worker_walk(
town,
worker,
(task.x() as usize, task.y() as usize),
)?),
TaskType::GatherSticks | TaskType::ChopTree => {
town.state
.register_task_end(*task.task_type())
.map_err(|e| e.to_string())?;
worker_out_of_building(town, worker, (task.x() as usize, task.y() as usize))
}
TaskType::WelcomeAbility => {
let a = AbilityType::Welcome;
let duration = a.busy_duration();
Ok(duration)
}
TaskType::CollectReward => {
// Lookup object to be collected, then delete it in TownView
// Note: DB update is separate
let index = (task.x() as usize, task.y() as usize);
town.state.remove(&index);
Ok(Duration::milliseconds(0))
}
TaskType::Defend => Err("Task not implemented".to_owned()),
}
}
/// (Try to) apply changes to village state that happen when a worker starts a given task.
/// E.g. add unit to a building, or pay required price (only if it is TownView), ...
fn simulate_begin_task<T: WorkerAction>(
task: &T,
town: &mut TownView,
worker: &mut Worker,
) -> Result<(), String> {
match task.task_type() {
TaskType::Idle | TaskType::Walk | TaskType::CollectReward => Ok(()),
TaskType::GatherSticks | TaskType::ChopTree => {
town.state
.register_task_begin(*task.task_type())
.map_err(|e| e.to_string())?;
worker_into_building(town, worker, (task.x() as usize, task.y() as usize))
}
TaskType::WelcomeAbility => {
if let Some(mana) = &mut worker.mana {
let cost = AbilityType::Welcome.mana_cost();
if *mana >= cost {
*mana = *mana - cost;
Ok(())
} else {
Err("Not enough mana".to_owned())
}
} else {
Err("Worker has no mana but tries to use welcome ability".to_owned())
}
}
TaskType::Defend => Err("Task not implemented".to_owned()),
}
}
impl WorkerAction for NewTask {
fn x(&self) -> i32 {
self.x
}
fn y(&self) -> i32 {
self.y
}
fn task_type(&self) -> &TaskType {
&self.task_type
}
fn target(&self) -> Option<HoboKey> {
self.target_hobo_id.map(HoboKey)
}
}
impl WorkerAction for Task {
fn x(&self) -> i32 {
self.x
}
fn y(&self) -> i32 {
self.y
}
fn task_type(&self) -> &TaskType {
&self.task_type
}
fn target(&self) -> Option<HoboKey> {
self.target_hobo_id.map(HoboKey)
}
} | let mut tasks = vec![];
for task in tl.tasks.iter() { | random_line_split |
lockfree.rs | use crate::{
arenas::atomic_bucket::{AtomicBucket, AtomicBucketList},
Capacity, LassoError, LassoErrorKind, LassoResult, MemoryLimits,
};
use core::{
fmt::{self, Debug},
num::NonZeroUsize,
slice, str,
sync::atomic::{AtomicUsize, Ordering},
};
/// An arena allocator that dynamically grows in size when needed, allocating memory in large chunks
pub(crate) struct LockfreeArena {
/// All the internal buckets, storing all allocated and unallocated items
// TODO: We could keep around a second list of buckets to store filled buckets
// in to keep us from having to iterate over them, need more tests to
// see what the impact of that is
buckets: AtomicBucketList,
/// The default capacity of each bucket
///
/// Invariant: `bucket_capacity` must never be zero
bucket_capacity: AtomicUsize,
memory_usage: AtomicUsize,
max_memory_usage: AtomicUsize,
}
impl LockfreeArena {
/// Create a new Arena with the default bucket size of 4096 bytes
pub fn new(capacity: NonZeroUsize, max_memory_usage: usize) -> LassoResult<Self> {
Ok(Self {
// Allocate one bucket
buckets: AtomicBucketList::new(capacity)?,
bucket_capacity: AtomicUsize::new(capacity.get()),
// The current capacity is whatever size the bucket we just allocated is
memory_usage: AtomicUsize::new(capacity.get()),
max_memory_usage: AtomicUsize::new(max_memory_usage),
})
}
#[inline]
pub(crate) fn current_memory_usage(&self) -> usize {
self.memory_usage.load(Ordering::Relaxed)
}
#[inline]
pub(crate) fn set_max_memory_usage(&self, max_memory_usage: usize) {
self.max_memory_usage
.store(max_memory_usage, Ordering::Relaxed);
}
#[inline]
pub(crate) fn get_max_memory_usage(&self) -> usize {
self.max_memory_usage.load(Ordering::Relaxed)
}
fn set_bucket_capacity(&self, capacity: usize) {
debug_assert_ne!(capacity, 0);
self.bucket_capacity.store(capacity, Ordering::Relaxed);
}
/// Doesn't actually allocate anything, but increments `self.memory_usage` and returns `None` if
/// the attempted amount surpasses `max_memory_usage`
// TODO: Make this return a `Result`
fn | (&self, requested_mem: usize) -> LassoResult<()> {
if self.memory_usage.load(Ordering::Relaxed) + requested_mem
> self.max_memory_usage.load(Ordering::Relaxed)
{
Err(LassoError::new(LassoErrorKind::MemoryLimitReached))
} else {
self.memory_usage
.fetch_add(requested_mem, Ordering::Relaxed);
Ok(())
}
}
/// Store a slice in the Arena, returning `None` if memory is exhausted
///
/// # Safety
///
/// The reference passed back must be dropped before the arena that created it is
///
pub unsafe fn store_str(&self, string: &str) -> LassoResult<&'static str> {
// If the string is empty, simply return an empty string.
// This ensures that only strings with lengths greater
// than zero will be allocated within the arena
if string.is_empty() {
return Ok("");
}
let slice = string.as_bytes();
debug_assert_ne!(slice.len(), 0);
// Iterate over all of the buckets within the list while attempting to find one
// that has enough space to fit our string within it
//
// This is a tradeoff between allocation speed and memory usage. As-is we prioritize
// allocation speed in exchange for potentially missing possible reuse situations
// and then allocating more memory than is strictly necessary. In practice this shouldn't
// really matter, but it's worth that the opposite tradeoff can be made by adding bounded
// retries within this loop, the worst-case performance suffers in exchange for potentially
// better memory usage.
for bucket in self.buckets.iter() {
if let Ok(start) = bucket.try_inc_length(slice.len()) {
// Safety: We now have exclusive access to `bucket[start..start + slice.len()]`
let allocated = unsafe { bucket.slice_mut(start) };
// Copy the given slice into the allocation
unsafe { allocated.copy_from_nonoverlapping(slice.as_ptr(), slice.len()) };
// Return the successfully allocated string
let string = unsafe {
str::from_utf8_unchecked(slice::from_raw_parts(allocated, slice.len()))
};
return Ok(string);
}
// Otherwise the bucket doesn't have sufficient capacity for the string
// so we carry on searching through allocated buckets
}
// If we couldn't find a pre-existing bucket with enough room in it, allocate our own bucket
let next_capacity = self.bucket_capacity.load(Ordering::Relaxed) * 2;
debug_assert_ne!(next_capacity, 0);
// If the current string's length is greater than the doubled current capacity, allocate a bucket exactly the
// size of the large string and push it back in the buckets vector. This ensures that obscenely large strings will
// not permanently affect the resource consumption of the interner
if slice.len() > next_capacity {
// Check that we haven't exhausted our memory limit
self.allocate_memory(slice.len())?;
// Safety: `len` will never be zero since we explicitly handled zero-length strings
// at the beginning of the function
let non_zero_len = unsafe { NonZeroUsize::new_unchecked(slice.len()) };
debug_assert_ne!(slice.len(), 0);
let mut bucket = AtomicBucket::with_capacity(non_zero_len)?;
// Safety: The new bucket will have exactly enough room for the string and we have
// exclusive access to the bucket since we just created it
let allocated_string = unsafe { bucket.push_slice(slice) };
self.buckets.push_front(bucket.into_ref());
Ok(allocated_string)
} else {
let memory_usage = self.current_memory_usage();
let max_memory_usage = self.get_max_memory_usage();
// If trying to use the doubled capacity will surpass our memory limit, just allocate as much as we can
if memory_usage + next_capacity > max_memory_usage {
let remaining_memory = max_memory_usage.saturating_sub(memory_usage);
// Check that we haven't exhausted our memory limit
self.allocate_memory(remaining_memory)?;
// Set the capacity to twice of what it currently is to allow for fewer allocations as more strings are interned
let mut bucket = AtomicBucket::with_capacity(
NonZeroUsize::new(remaining_memory)
.ok_or_else(|| LassoError::new(LassoErrorKind::MemoryLimitReached))?,
)?;
// Safety: The new bucket will have exactly enough room for the string and we have
// exclusive access to the bucket since we just created it
let allocated_string = unsafe { bucket.push_slice(slice) };
// TODO: Push the bucket to the back or something so that we can get it somewhat out
// of the search path, reduce the `n` in the `O(n)` list traversal
self.buckets.push_front(bucket.into_ref());
Ok(allocated_string)
// Otherwise just allocate a normal doubled bucket
} else {
// Check that we haven't exhausted our memory limit
self.allocate_memory(next_capacity)?;
// Set the capacity to twice of what it currently is to allow for fewer allocations as more strings are interned
self.set_bucket_capacity(next_capacity);
// Safety: `next_capacity` will never be zero
let capacity = unsafe { NonZeroUsize::new_unchecked(next_capacity) };
debug_assert_ne!(next_capacity, 0);
let mut bucket = AtomicBucket::with_capacity(capacity)?;
// Safety: The new bucket will have enough room for the string
let allocated_string = unsafe { bucket.push_slice(slice) };
self.buckets.push_front(bucket.into_ref());
Ok(allocated_string)
}
}
}
}
impl Default for LockfreeArena {
fn default() -> Self {
Self::new(
Capacity::default().bytes,
MemoryLimits::default().max_memory_usage,
)
.expect("failed to create default arena")
}
}
impl Debug for LockfreeArena {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
struct TotalBuckets(usize);
impl Debug for TotalBuckets {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.0 == 1 {
f.write_str("...1 bucket")
} else {
write!(f, "...{} buckets", self.0)
}
}
}
f.debug_struct("Arena")
.field("buckets", &TotalBuckets(self.buckets.len()))
.field(
"bucket_capacity",
&self.bucket_capacity.load(Ordering::Relaxed),
)
.field("memory_usage", &self.memory_usage.load(Ordering::Relaxed))
.field(
"max_memory_usage",
&self.max_memory_usage.load(Ordering::Relaxed),
)
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn string() {
let arena = LockfreeArena::default();
unsafe {
let idx = arena.store_str("test");
assert_eq!(idx, Ok("test"));
}
}
#[test]
fn empty_str() {
let arena = LockfreeArena::default();
unsafe {
let zst = arena.store_str("");
let zst1 = arena.store_str("");
let zst2 = arena.store_str("");
assert_eq!(zst, Ok(""));
assert_eq!(zst1, Ok(""));
assert_eq!(zst2, Ok(""));
}
}
#[test]
fn exponential_allocations() {
let arena = LockfreeArena::default();
let mut len = 4096;
for _ in 0..10 {
let large_string = "a".repeat(len);
let arena_string = unsafe { arena.store_str(&large_string) };
assert_eq!(arena_string, Ok(large_string.as_str()));
len *= 2;
}
}
#[test]
fn memory_exhausted() {
let arena = LockfreeArena::new(NonZeroUsize::new(10).unwrap(), 10).unwrap();
unsafe {
assert!(arena.store_str("0123456789").is_ok());
// ZSTs take up zero bytes
arena.store_str("").unwrap();
let err = arena.store_str("a").unwrap_err();
assert!(err.kind().is_memory_limit());
let err = arena.store_str("dfgsagdfgsdf").unwrap_err();
assert!(err.kind().is_memory_limit());
}
}
#[test]
fn allocate_too_much() {
let arena = LockfreeArena::new(NonZeroUsize::new(1).unwrap(), 10).unwrap();
unsafe {
let err = arena.store_str("abcdefghijklmnopqrstuvwxyz").unwrap_err();
assert!(err.kind().is_memory_limit());
}
}
#[test]
fn allocate_more_than_double() {
let arena = LockfreeArena::new(NonZeroUsize::new(1).unwrap(), 1000).unwrap();
unsafe {
assert!(arena.store_str("abcdefghijklmnopqrstuvwxyz").is_ok());
}
}
}
| allocate_memory | identifier_name |
lockfree.rs | use crate::{
arenas::atomic_bucket::{AtomicBucket, AtomicBucketList},
Capacity, LassoError, LassoErrorKind, LassoResult, MemoryLimits,
};
use core::{
fmt::{self, Debug},
num::NonZeroUsize,
slice, str,
sync::atomic::{AtomicUsize, Ordering},
};
/// An arena allocator that dynamically grows in size when needed, allocating memory in large chunks
pub(crate) struct LockfreeArena {
/// All the internal buckets, storing all allocated and unallocated items
// TODO: We could keep around a second list of buckets to store filled buckets
// in to keep us from having to iterate over them, need more tests to
// see what the impact of that is
buckets: AtomicBucketList,
/// The default capacity of each bucket
///
/// Invariant: `bucket_capacity` must never be zero
bucket_capacity: AtomicUsize,
memory_usage: AtomicUsize,
max_memory_usage: AtomicUsize,
}
impl LockfreeArena {
/// Create a new Arena with the default bucket size of 4096 bytes
pub fn new(capacity: NonZeroUsize, max_memory_usage: usize) -> LassoResult<Self> {
Ok(Self {
// Allocate one bucket
buckets: AtomicBucketList::new(capacity)?,
bucket_capacity: AtomicUsize::new(capacity.get()),
// The current capacity is whatever size the bucket we just allocated is
memory_usage: AtomicUsize::new(capacity.get()),
max_memory_usage: AtomicUsize::new(max_memory_usage),
})
}
#[inline]
pub(crate) fn current_memory_usage(&self) -> usize {
self.memory_usage.load(Ordering::Relaxed)
}
#[inline]
pub(crate) fn set_max_memory_usage(&self, max_memory_usage: usize) {
self.max_memory_usage
.store(max_memory_usage, Ordering::Relaxed);
}
#[inline]
pub(crate) fn get_max_memory_usage(&self) -> usize {
self.max_memory_usage.load(Ordering::Relaxed)
}
fn set_bucket_capacity(&self, capacity: usize) {
debug_assert_ne!(capacity, 0);
self.bucket_capacity.store(capacity, Ordering::Relaxed);
}
/// Doesn't actually allocate anything, but increments `self.memory_usage` and returns `None` if
/// the attempted amount surpasses `max_memory_usage`
// TODO: Make this return a `Result`
fn allocate_memory(&self, requested_mem: usize) -> LassoResult<()> {
if self.memory_usage.load(Ordering::Relaxed) + requested_mem
> self.max_memory_usage.load(Ordering::Relaxed)
{
Err(LassoError::new(LassoErrorKind::MemoryLimitReached))
} else {
self.memory_usage
.fetch_add(requested_mem, Ordering::Relaxed);
Ok(())
}
}
/// Store a slice in the Arena, returning `None` if memory is exhausted
///
/// # Safety
///
/// The reference passed back must be dropped before the arena that created it is
///
pub unsafe fn store_str(&self, string: &str) -> LassoResult<&'static str> {
// If the string is empty, simply return an empty string.
// This ensures that only strings with lengths greater
// than zero will be allocated within the arena
if string.is_empty() {
return Ok("");
}
let slice = string.as_bytes();
debug_assert_ne!(slice.len(), 0);
// Iterate over all of the buckets within the list while attempting to find one
// that has enough space to fit our string within it
//
// This is a tradeoff between allocation speed and memory usage. As-is we prioritize
// allocation speed in exchange for potentially missing possible reuse situations
// and then allocating more memory than is strictly necessary. In practice this shouldn't
// really matter, but it's worth that the opposite tradeoff can be made by adding bounded
// retries within this loop, the worst-case performance suffers in exchange for potentially
// better memory usage.
for bucket in self.buckets.iter() {
if let Ok(start) = bucket.try_inc_length(slice.len()) {
// Safety: We now have exclusive access to `bucket[start..start + slice.len()]`
let allocated = unsafe { bucket.slice_mut(start) };
// Copy the given slice into the allocation
unsafe { allocated.copy_from_nonoverlapping(slice.as_ptr(), slice.len()) };
// Return the successfully allocated string
let string = unsafe {
str::from_utf8_unchecked(slice::from_raw_parts(allocated, slice.len()))
};
return Ok(string);
}
// Otherwise the bucket doesn't have sufficient capacity for the string
// so we carry on searching through allocated buckets
}
// If we couldn't find a pre-existing bucket with enough room in it, allocate our own bucket
let next_capacity = self.bucket_capacity.load(Ordering::Relaxed) * 2;
debug_assert_ne!(next_capacity, 0);
// If the current string's length is greater than the doubled current capacity, allocate a bucket exactly the
// size of the large string and push it back in the buckets vector. This ensures that obscenely large strings will
// not permanently affect the resource consumption of the interner
if slice.len() > next_capacity {
// Check that we haven't exhausted our memory limit
self.allocate_memory(slice.len())?;
// Safety: `len` will never be zero since we explicitly handled zero-length strings
// at the beginning of the function
let non_zero_len = unsafe { NonZeroUsize::new_unchecked(slice.len()) };
debug_assert_ne!(slice.len(), 0);
let mut bucket = AtomicBucket::with_capacity(non_zero_len)?;
// Safety: The new bucket will have exactly enough room for the string and we have
// exclusive access to the bucket since we just created it
let allocated_string = unsafe { bucket.push_slice(slice) };
self.buckets.push_front(bucket.into_ref());
Ok(allocated_string)
} else |
}
}
impl Default for LockfreeArena {
fn default() -> Self {
Self::new(
Capacity::default().bytes,
MemoryLimits::default().max_memory_usage,
)
.expect("failed to create default arena")
}
}
impl Debug for LockfreeArena {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
struct TotalBuckets(usize);
impl Debug for TotalBuckets {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.0 == 1 {
f.write_str("...1 bucket")
} else {
write!(f, "...{} buckets", self.0)
}
}
}
f.debug_struct("Arena")
.field("buckets", &TotalBuckets(self.buckets.len()))
.field(
"bucket_capacity",
&self.bucket_capacity.load(Ordering::Relaxed),
)
.field("memory_usage", &self.memory_usage.load(Ordering::Relaxed))
.field(
"max_memory_usage",
&self.max_memory_usage.load(Ordering::Relaxed),
)
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn string() {
let arena = LockfreeArena::default();
unsafe {
let idx = arena.store_str("test");
assert_eq!(idx, Ok("test"));
}
}
#[test]
fn empty_str() {
let arena = LockfreeArena::default();
unsafe {
let zst = arena.store_str("");
let zst1 = arena.store_str("");
let zst2 = arena.store_str("");
assert_eq!(zst, Ok(""));
assert_eq!(zst1, Ok(""));
assert_eq!(zst2, Ok(""));
}
}
#[test]
fn exponential_allocations() {
let arena = LockfreeArena::default();
let mut len = 4096;
for _ in 0..10 {
let large_string = "a".repeat(len);
let arena_string = unsafe { arena.store_str(&large_string) };
assert_eq!(arena_string, Ok(large_string.as_str()));
len *= 2;
}
}
#[test]
fn memory_exhausted() {
let arena = LockfreeArena::new(NonZeroUsize::new(10).unwrap(), 10).unwrap();
unsafe {
assert!(arena.store_str("0123456789").is_ok());
// ZSTs take up zero bytes
arena.store_str("").unwrap();
let err = arena.store_str("a").unwrap_err();
assert!(err.kind().is_memory_limit());
let err = arena.store_str("dfgsagdfgsdf").unwrap_err();
assert!(err.kind().is_memory_limit());
}
}
#[test]
fn allocate_too_much() {
let arena = LockfreeArena::new(NonZeroUsize::new(1).unwrap(), 10).unwrap();
unsafe {
let err = arena.store_str("abcdefghijklmnopqrstuvwxyz").unwrap_err();
assert!(err.kind().is_memory_limit());
}
}
#[test]
fn allocate_more_than_double() {
let arena = LockfreeArena::new(NonZeroUsize::new(1).unwrap(), 1000).unwrap();
unsafe {
assert!(arena.store_str("abcdefghijklmnopqrstuvwxyz").is_ok());
}
}
}
| {
let memory_usage = self.current_memory_usage();
let max_memory_usage = self.get_max_memory_usage();
// If trying to use the doubled capacity will surpass our memory limit, just allocate as much as we can
if memory_usage + next_capacity > max_memory_usage {
let remaining_memory = max_memory_usage.saturating_sub(memory_usage);
// Check that we haven't exhausted our memory limit
self.allocate_memory(remaining_memory)?;
// Set the capacity to twice of what it currently is to allow for fewer allocations as more strings are interned
let mut bucket = AtomicBucket::with_capacity(
NonZeroUsize::new(remaining_memory)
.ok_or_else(|| LassoError::new(LassoErrorKind::MemoryLimitReached))?,
)?;
// Safety: The new bucket will have exactly enough room for the string and we have
// exclusive access to the bucket since we just created it
let allocated_string = unsafe { bucket.push_slice(slice) };
// TODO: Push the bucket to the back or something so that we can get it somewhat out
// of the search path, reduce the `n` in the `O(n)` list traversal
self.buckets.push_front(bucket.into_ref());
Ok(allocated_string)
// Otherwise just allocate a normal doubled bucket
} else {
// Check that we haven't exhausted our memory limit
self.allocate_memory(next_capacity)?;
// Set the capacity to twice of what it currently is to allow for fewer allocations as more strings are interned
self.set_bucket_capacity(next_capacity);
// Safety: `next_capacity` will never be zero
let capacity = unsafe { NonZeroUsize::new_unchecked(next_capacity) };
debug_assert_ne!(next_capacity, 0);
let mut bucket = AtomicBucket::with_capacity(capacity)?;
// Safety: The new bucket will have enough room for the string
let allocated_string = unsafe { bucket.push_slice(slice) };
self.buckets.push_front(bucket.into_ref());
Ok(allocated_string)
}
} | conditional_block |
lockfree.rs | use crate::{
arenas::atomic_bucket::{AtomicBucket, AtomicBucketList},
Capacity, LassoError, LassoErrorKind, LassoResult, MemoryLimits,
};
use core::{
fmt::{self, Debug},
num::NonZeroUsize,
slice, str,
sync::atomic::{AtomicUsize, Ordering},
};
/// An arena allocator that dynamically grows in size when needed, allocating memory in large chunks
pub(crate) struct LockfreeArena {
/// All the internal buckets, storing all allocated and unallocated items
// TODO: We could keep around a second list of buckets to store filled buckets
// in to keep us from having to iterate over them, need more tests to
// see what the impact of that is
buckets: AtomicBucketList,
/// The default capacity of each bucket
///
/// Invariant: `bucket_capacity` must never be zero | memory_usage: AtomicUsize,
max_memory_usage: AtomicUsize,
}
impl LockfreeArena {
/// Create a new Arena with the default bucket size of 4096 bytes
pub fn new(capacity: NonZeroUsize, max_memory_usage: usize) -> LassoResult<Self> {
Ok(Self {
// Allocate one bucket
buckets: AtomicBucketList::new(capacity)?,
bucket_capacity: AtomicUsize::new(capacity.get()),
// The current capacity is whatever size the bucket we just allocated is
memory_usage: AtomicUsize::new(capacity.get()),
max_memory_usage: AtomicUsize::new(max_memory_usage),
})
}
#[inline]
pub(crate) fn current_memory_usage(&self) -> usize {
self.memory_usage.load(Ordering::Relaxed)
}
#[inline]
pub(crate) fn set_max_memory_usage(&self, max_memory_usage: usize) {
self.max_memory_usage
.store(max_memory_usage, Ordering::Relaxed);
}
#[inline]
pub(crate) fn get_max_memory_usage(&self) -> usize {
self.max_memory_usage.load(Ordering::Relaxed)
}
fn set_bucket_capacity(&self, capacity: usize) {
debug_assert_ne!(capacity, 0);
self.bucket_capacity.store(capacity, Ordering::Relaxed);
}
/// Doesn't actually allocate anything, but increments `self.memory_usage` and returns `None` if
/// the attempted amount surpasses `max_memory_usage`
// TODO: Make this return a `Result`
fn allocate_memory(&self, requested_mem: usize) -> LassoResult<()> {
if self.memory_usage.load(Ordering::Relaxed) + requested_mem
> self.max_memory_usage.load(Ordering::Relaxed)
{
Err(LassoError::new(LassoErrorKind::MemoryLimitReached))
} else {
self.memory_usage
.fetch_add(requested_mem, Ordering::Relaxed);
Ok(())
}
}
/// Store a slice in the Arena, returning `None` if memory is exhausted
///
/// # Safety
///
/// The reference passed back must be dropped before the arena that created it is
///
pub unsafe fn store_str(&self, string: &str) -> LassoResult<&'static str> {
// If the string is empty, simply return an empty string.
// This ensures that only strings with lengths greater
// than zero will be allocated within the arena
if string.is_empty() {
return Ok("");
}
let slice = string.as_bytes();
debug_assert_ne!(slice.len(), 0);
// Iterate over all of the buckets within the list while attempting to find one
// that has enough space to fit our string within it
//
// This is a tradeoff between allocation speed and memory usage. As-is we prioritize
// allocation speed in exchange for potentially missing possible reuse situations
// and then allocating more memory than is strictly necessary. In practice this shouldn't
// really matter, but it's worth that the opposite tradeoff can be made by adding bounded
// retries within this loop, the worst-case performance suffers in exchange for potentially
// better memory usage.
for bucket in self.buckets.iter() {
if let Ok(start) = bucket.try_inc_length(slice.len()) {
// Safety: We now have exclusive access to `bucket[start..start + slice.len()]`
let allocated = unsafe { bucket.slice_mut(start) };
// Copy the given slice into the allocation
unsafe { allocated.copy_from_nonoverlapping(slice.as_ptr(), slice.len()) };
// Return the successfully allocated string
let string = unsafe {
str::from_utf8_unchecked(slice::from_raw_parts(allocated, slice.len()))
};
return Ok(string);
}
// Otherwise the bucket doesn't have sufficient capacity for the string
// so we carry on searching through allocated buckets
}
// If we couldn't find a pre-existing bucket with enough room in it, allocate our own bucket
let next_capacity = self.bucket_capacity.load(Ordering::Relaxed) * 2;
debug_assert_ne!(next_capacity, 0);
// If the current string's length is greater than the doubled current capacity, allocate a bucket exactly the
// size of the large string and push it back in the buckets vector. This ensures that obscenely large strings will
// not permanently affect the resource consumption of the interner
if slice.len() > next_capacity {
// Check that we haven't exhausted our memory limit
self.allocate_memory(slice.len())?;
// Safety: `len` will never be zero since we explicitly handled zero-length strings
// at the beginning of the function
let non_zero_len = unsafe { NonZeroUsize::new_unchecked(slice.len()) };
debug_assert_ne!(slice.len(), 0);
let mut bucket = AtomicBucket::with_capacity(non_zero_len)?;
// Safety: The new bucket will have exactly enough room for the string and we have
// exclusive access to the bucket since we just created it
let allocated_string = unsafe { bucket.push_slice(slice) };
self.buckets.push_front(bucket.into_ref());
Ok(allocated_string)
} else {
let memory_usage = self.current_memory_usage();
let max_memory_usage = self.get_max_memory_usage();
// If trying to use the doubled capacity will surpass our memory limit, just allocate as much as we can
if memory_usage + next_capacity > max_memory_usage {
let remaining_memory = max_memory_usage.saturating_sub(memory_usage);
// Check that we haven't exhausted our memory limit
self.allocate_memory(remaining_memory)?;
// Set the capacity to twice of what it currently is to allow for fewer allocations as more strings are interned
let mut bucket = AtomicBucket::with_capacity(
NonZeroUsize::new(remaining_memory)
.ok_or_else(|| LassoError::new(LassoErrorKind::MemoryLimitReached))?,
)?;
// Safety: The new bucket will have exactly enough room for the string and we have
// exclusive access to the bucket since we just created it
let allocated_string = unsafe { bucket.push_slice(slice) };
// TODO: Push the bucket to the back or something so that we can get it somewhat out
// of the search path, reduce the `n` in the `O(n)` list traversal
self.buckets.push_front(bucket.into_ref());
Ok(allocated_string)
// Otherwise just allocate a normal doubled bucket
} else {
// Check that we haven't exhausted our memory limit
self.allocate_memory(next_capacity)?;
// Set the capacity to twice of what it currently is to allow for fewer allocations as more strings are interned
self.set_bucket_capacity(next_capacity);
// Safety: `next_capacity` will never be zero
let capacity = unsafe { NonZeroUsize::new_unchecked(next_capacity) };
debug_assert_ne!(next_capacity, 0);
let mut bucket = AtomicBucket::with_capacity(capacity)?;
// Safety: The new bucket will have enough room for the string
let allocated_string = unsafe { bucket.push_slice(slice) };
self.buckets.push_front(bucket.into_ref());
Ok(allocated_string)
}
}
}
}
impl Default for LockfreeArena {
fn default() -> Self {
Self::new(
Capacity::default().bytes,
MemoryLimits::default().max_memory_usage,
)
.expect("failed to create default arena")
}
}
impl Debug for LockfreeArena {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
struct TotalBuckets(usize);
impl Debug for TotalBuckets {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.0 == 1 {
f.write_str("...1 bucket")
} else {
write!(f, "...{} buckets", self.0)
}
}
}
f.debug_struct("Arena")
.field("buckets", &TotalBuckets(self.buckets.len()))
.field(
"bucket_capacity",
&self.bucket_capacity.load(Ordering::Relaxed),
)
.field("memory_usage", &self.memory_usage.load(Ordering::Relaxed))
.field(
"max_memory_usage",
&self.max_memory_usage.load(Ordering::Relaxed),
)
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn string() {
let arena = LockfreeArena::default();
unsafe {
let idx = arena.store_str("test");
assert_eq!(idx, Ok("test"));
}
}
#[test]
fn empty_str() {
let arena = LockfreeArena::default();
unsafe {
let zst = arena.store_str("");
let zst1 = arena.store_str("");
let zst2 = arena.store_str("");
assert_eq!(zst, Ok(""));
assert_eq!(zst1, Ok(""));
assert_eq!(zst2, Ok(""));
}
}
#[test]
fn exponential_allocations() {
let arena = LockfreeArena::default();
let mut len = 4096;
for _ in 0..10 {
let large_string = "a".repeat(len);
let arena_string = unsafe { arena.store_str(&large_string) };
assert_eq!(arena_string, Ok(large_string.as_str()));
len *= 2;
}
}
#[test]
fn memory_exhausted() {
let arena = LockfreeArena::new(NonZeroUsize::new(10).unwrap(), 10).unwrap();
unsafe {
assert!(arena.store_str("0123456789").is_ok());
// ZSTs take up zero bytes
arena.store_str("").unwrap();
let err = arena.store_str("a").unwrap_err();
assert!(err.kind().is_memory_limit());
let err = arena.store_str("dfgsagdfgsdf").unwrap_err();
assert!(err.kind().is_memory_limit());
}
}
#[test]
fn allocate_too_much() {
let arena = LockfreeArena::new(NonZeroUsize::new(1).unwrap(), 10).unwrap();
unsafe {
let err = arena.store_str("abcdefghijklmnopqrstuvwxyz").unwrap_err();
assert!(err.kind().is_memory_limit());
}
}
#[test]
fn allocate_more_than_double() {
let arena = LockfreeArena::new(NonZeroUsize::new(1).unwrap(), 1000).unwrap();
unsafe {
assert!(arena.store_str("abcdefghijklmnopqrstuvwxyz").is_ok());
}
}
} | bucket_capacity: AtomicUsize, | random_line_split |
lockfree.rs | use crate::{
arenas::atomic_bucket::{AtomicBucket, AtomicBucketList},
Capacity, LassoError, LassoErrorKind, LassoResult, MemoryLimits,
};
use core::{
fmt::{self, Debug},
num::NonZeroUsize,
slice, str,
sync::atomic::{AtomicUsize, Ordering},
};
/// An arena allocator that dynamically grows in size when needed, allocating memory in large chunks
pub(crate) struct LockfreeArena {
/// All the internal buckets, storing all allocated and unallocated items
// TODO: We could keep around a second list of buckets to store filled buckets
// in to keep us from having to iterate over them, need more tests to
// see what the impact of that is
buckets: AtomicBucketList,
/// The default capacity of each bucket
///
/// Invariant: `bucket_capacity` must never be zero
bucket_capacity: AtomicUsize,
memory_usage: AtomicUsize,
max_memory_usage: AtomicUsize,
}
impl LockfreeArena {
/// Create a new Arena with the default bucket size of 4096 bytes
pub fn new(capacity: NonZeroUsize, max_memory_usage: usize) -> LassoResult<Self> {
Ok(Self {
// Allocate one bucket
buckets: AtomicBucketList::new(capacity)?,
bucket_capacity: AtomicUsize::new(capacity.get()),
// The current capacity is whatever size the bucket we just allocated is
memory_usage: AtomicUsize::new(capacity.get()),
max_memory_usage: AtomicUsize::new(max_memory_usage),
})
}
#[inline]
pub(crate) fn current_memory_usage(&self) -> usize {
self.memory_usage.load(Ordering::Relaxed)
}
#[inline]
pub(crate) fn set_max_memory_usage(&self, max_memory_usage: usize) {
self.max_memory_usage
.store(max_memory_usage, Ordering::Relaxed);
}
#[inline]
pub(crate) fn get_max_memory_usage(&self) -> usize {
self.max_memory_usage.load(Ordering::Relaxed)
}
fn set_bucket_capacity(&self, capacity: usize) {
debug_assert_ne!(capacity, 0);
self.bucket_capacity.store(capacity, Ordering::Relaxed);
}
/// Doesn't actually allocate anything, but increments `self.memory_usage` and returns `None` if
/// the attempted amount surpasses `max_memory_usage`
// TODO: Make this return a `Result`
fn allocate_memory(&self, requested_mem: usize) -> LassoResult<()> {
if self.memory_usage.load(Ordering::Relaxed) + requested_mem
> self.max_memory_usage.load(Ordering::Relaxed)
{
Err(LassoError::new(LassoErrorKind::MemoryLimitReached))
} else {
self.memory_usage
.fetch_add(requested_mem, Ordering::Relaxed);
Ok(())
}
}
/// Store a slice in the Arena, returning `None` if memory is exhausted
///
/// # Safety
///
/// The reference passed back must be dropped before the arena that created it is
///
pub unsafe fn store_str(&self, string: &str) -> LassoResult<&'static str> {
// If the string is empty, simply return an empty string.
// This ensures that only strings with lengths greater
// than zero will be allocated within the arena
if string.is_empty() {
return Ok("");
}
let slice = string.as_bytes();
debug_assert_ne!(slice.len(), 0);
// Iterate over all of the buckets within the list while attempting to find one
// that has enough space to fit our string within it
//
// This is a tradeoff between allocation speed and memory usage. As-is we prioritize
// allocation speed in exchange for potentially missing possible reuse situations
// and then allocating more memory than is strictly necessary. In practice this shouldn't
// really matter, but it's worth that the opposite tradeoff can be made by adding bounded
// retries within this loop, the worst-case performance suffers in exchange for potentially
// better memory usage.
for bucket in self.buckets.iter() {
if let Ok(start) = bucket.try_inc_length(slice.len()) {
// Safety: We now have exclusive access to `bucket[start..start + slice.len()]`
let allocated = unsafe { bucket.slice_mut(start) };
// Copy the given slice into the allocation
unsafe { allocated.copy_from_nonoverlapping(slice.as_ptr(), slice.len()) };
// Return the successfully allocated string
let string = unsafe {
str::from_utf8_unchecked(slice::from_raw_parts(allocated, slice.len()))
};
return Ok(string);
}
// Otherwise the bucket doesn't have sufficient capacity for the string
// so we carry on searching through allocated buckets
}
// If we couldn't find a pre-existing bucket with enough room in it, allocate our own bucket
let next_capacity = self.bucket_capacity.load(Ordering::Relaxed) * 2;
debug_assert_ne!(next_capacity, 0);
// If the current string's length is greater than the doubled current capacity, allocate a bucket exactly the
// size of the large string and push it back in the buckets vector. This ensures that obscenely large strings will
// not permanently affect the resource consumption of the interner
if slice.len() > next_capacity {
// Check that we haven't exhausted our memory limit
self.allocate_memory(slice.len())?;
// Safety: `len` will never be zero since we explicitly handled zero-length strings
// at the beginning of the function
let non_zero_len = unsafe { NonZeroUsize::new_unchecked(slice.len()) };
debug_assert_ne!(slice.len(), 0);
let mut bucket = AtomicBucket::with_capacity(non_zero_len)?;
// Safety: The new bucket will have exactly enough room for the string and we have
// exclusive access to the bucket since we just created it
let allocated_string = unsafe { bucket.push_slice(slice) };
self.buckets.push_front(bucket.into_ref());
Ok(allocated_string)
} else {
let memory_usage = self.current_memory_usage();
let max_memory_usage = self.get_max_memory_usage();
// If trying to use the doubled capacity will surpass our memory limit, just allocate as much as we can
if memory_usage + next_capacity > max_memory_usage {
let remaining_memory = max_memory_usage.saturating_sub(memory_usage);
// Check that we haven't exhausted our memory limit
self.allocate_memory(remaining_memory)?;
// Set the capacity to twice of what it currently is to allow for fewer allocations as more strings are interned
let mut bucket = AtomicBucket::with_capacity(
NonZeroUsize::new(remaining_memory)
.ok_or_else(|| LassoError::new(LassoErrorKind::MemoryLimitReached))?,
)?;
// Safety: The new bucket will have exactly enough room for the string and we have
// exclusive access to the bucket since we just created it
let allocated_string = unsafe { bucket.push_slice(slice) };
// TODO: Push the bucket to the back or something so that we can get it somewhat out
// of the search path, reduce the `n` in the `O(n)` list traversal
self.buckets.push_front(bucket.into_ref());
Ok(allocated_string)
// Otherwise just allocate a normal doubled bucket
} else {
// Check that we haven't exhausted our memory limit
self.allocate_memory(next_capacity)?;
// Set the capacity to twice of what it currently is to allow for fewer allocations as more strings are interned
self.set_bucket_capacity(next_capacity);
// Safety: `next_capacity` will never be zero
let capacity = unsafe { NonZeroUsize::new_unchecked(next_capacity) };
debug_assert_ne!(next_capacity, 0);
let mut bucket = AtomicBucket::with_capacity(capacity)?;
// Safety: The new bucket will have enough room for the string
let allocated_string = unsafe { bucket.push_slice(slice) };
self.buckets.push_front(bucket.into_ref());
Ok(allocated_string)
}
}
}
}
impl Default for LockfreeArena {
fn default() -> Self {
Self::new(
Capacity::default().bytes,
MemoryLimits::default().max_memory_usage,
)
.expect("failed to create default arena")
}
}
impl Debug for LockfreeArena {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
struct TotalBuckets(usize);
impl Debug for TotalBuckets {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.0 == 1 {
f.write_str("...1 bucket")
} else {
write!(f, "...{} buckets", self.0)
}
}
}
f.debug_struct("Arena")
.field("buckets", &TotalBuckets(self.buckets.len()))
.field(
"bucket_capacity",
&self.bucket_capacity.load(Ordering::Relaxed),
)
.field("memory_usage", &self.memory_usage.load(Ordering::Relaxed))
.field(
"max_memory_usage",
&self.max_memory_usage.load(Ordering::Relaxed),
)
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn string() {
let arena = LockfreeArena::default();
unsafe {
let idx = arena.store_str("test");
assert_eq!(idx, Ok("test"));
}
}
#[test]
fn empty_str() {
let arena = LockfreeArena::default();
unsafe {
let zst = arena.store_str("");
let zst1 = arena.store_str("");
let zst2 = arena.store_str("");
assert_eq!(zst, Ok(""));
assert_eq!(zst1, Ok(""));
assert_eq!(zst2, Ok(""));
}
}
#[test]
fn exponential_allocations() {
let arena = LockfreeArena::default();
let mut len = 4096;
for _ in 0..10 {
let large_string = "a".repeat(len);
let arena_string = unsafe { arena.store_str(&large_string) };
assert_eq!(arena_string, Ok(large_string.as_str()));
len *= 2;
}
}
#[test]
fn memory_exhausted() {
let arena = LockfreeArena::new(NonZeroUsize::new(10).unwrap(), 10).unwrap();
unsafe {
assert!(arena.store_str("0123456789").is_ok());
// ZSTs take up zero bytes
arena.store_str("").unwrap();
let err = arena.store_str("a").unwrap_err();
assert!(err.kind().is_memory_limit());
let err = arena.store_str("dfgsagdfgsdf").unwrap_err();
assert!(err.kind().is_memory_limit());
}
}
#[test]
fn allocate_too_much() {
let arena = LockfreeArena::new(NonZeroUsize::new(1).unwrap(), 10).unwrap();
unsafe {
let err = arena.store_str("abcdefghijklmnopqrstuvwxyz").unwrap_err();
assert!(err.kind().is_memory_limit());
}
}
#[test]
fn allocate_more_than_double() |
}
| {
let arena = LockfreeArena::new(NonZeroUsize::new(1).unwrap(), 1000).unwrap();
unsafe {
assert!(arena.store_str("abcdefghijklmnopqrstuvwxyz").is_ok());
}
} | identifier_body |
config.rs | use std::collections::HashMap;
use std::fs::File;
use std::io::Read;
use std::net::SocketAddr;
use std::ops::Range;
use std::time::Duration;
use clap::{app_from_crate, crate_authors, crate_description, crate_name, crate_version, value_t, Arg, SubCommand};
use toml;
use serde_derive::{Deserialize, Serialize};
use raft_tokio::RaftOptions;
use crate::aggregate::AggregationMode;
use crate::management::{ConsensusAction, LeaderAction, MgmtCommand};
use crate::{ConsensusKind, ConsensusState};
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", default, deny_unknown_fields)]
pub struct System {
/// Logging level
pub verbosity: String,
/// Network settings
pub network: Network,
/// Internal Raft settings
pub raft: Raft,
/// Consul settings
pub consul: Consul,
/// Metric settings
pub metrics: Metrics,
/// Carbon backend settings
pub carbon: Carbon,
/// Number of networking threads, use 0 for number of CPUs
pub n_threads: usize,
/// Number of aggregating(worker) threads, set to 0 to use all CPU cores
pub w_threads: usize,
/// queue size for single counting thread before packet is dropped
pub task_queue_size: usize,
/// Should we start as leader state enabled or not
pub start_as_leader: bool,
/// How often to gather own stats, in ms. Use 0 to disable (stats are still gathered, but not included in
/// metric dump)
pub stats_interval: u64,
/// Prefix to send own metrics with
pub stats_prefix: String,
/// Consensus kind to use
pub consensus: ConsensusKind,
}
impl Default for System {
fn default() -> Self {
Self {
verbosity: "warn".to_string(),
network: Network::default(),
raft: Raft::default(),
consul: Consul::default(),
metrics: Metrics::default(),
carbon: Carbon::default(),
n_threads: 4,
w_threads: 4,
stats_interval: 10000,
task_queue_size: 2048,
start_as_leader: false,
stats_prefix: "resources.monitoring.bioyino".to_string(),
consensus: ConsensusKind::None,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", default, deny_unknown_fields)]
pub struct Metrics {
// TODO: Maximum metric array size, 0 for unlimited
// max_metrics: usize,
/// Should we provide metrics with top update numbers
pub count_updates: bool,
/// Prefix for metric update statistics
pub update_counter_prefix: String,
/// Suffix for metric update statistics
pub update_counter_suffix: String,
/// Minimal update count to be reported
pub update_counter_threshold: u32,
/// Consistent parsing
pub consistent_parsing: bool,
/// Whether we should spam parsing errors in logs
pub log_parse_errors: bool,
/// Maximum length of data parser can keep in buffer befor considering it trash and throwing
/// away
pub max_unparsed_buffer: usize,
/// Choose the way of aggregation
pub aggregation_mode: AggregationMode,
/// Number of threads when aggregating in "multi" mode
pub aggregation_threads: Option<usize>,
}
impl Default for Metrics {
fn default() -> Self {
Self {
// max_metrics: 0,
count_updates: true,
update_counter_prefix: "resources.monitoring.bioyino.updates".to_string(),
update_counter_suffix: String::new(),
update_counter_threshold: 200,
consistent_parsing: true,
log_parse_errors: false,
max_unparsed_buffer: 10000,
aggregation_mode: AggregationMode::Single,
aggregation_threads: None,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", default, deny_unknown_fields)]
pub struct Carbon {
// TODO: will be used when multiple backends support is implemented
///// Enable sending to carbon protocol backend
//pub enabled: bool,
/// IP and port of the carbon-protocol backend to send aggregated data to
pub address: String,
/// client bind address
pub bind_address: Option<SocketAddr>,
/// How often to send metrics to this backend, ms
pub interval: u64,
/// How much to sleep when connection to backend fails, ms
pub connect_delay: u64,
/// Multiply delay to this value for each consequent connection failure
pub connect_delay_multiplier: f32,
/// Maximum retry delay, ms
pub connect_delay_max: u64,
/// How much times to retry when sending data to backend before giving up and dropping all metrics
/// note, that 0 means 1 try
pub send_retries: usize,
/// The whole metrica array can be split into smaller chunks for each chunk to be sent
/// in a separate connection. This is a workaround for go-carbon and carbon-c-relay doing
/// per-connection processing and working ineffectively when lots of metrics is sent in one
/// connection
pub chunks: usize,
}
impl Default for Carbon {
fn default() -> Self {
Self {
// enabled: true,
address: "127.0.0.1:2003".to_string(),
bind_address: None,
interval: 30000,
connect_delay: 250,
connect_delay_multiplier: 2f32,
connect_delay_max: 10000,
send_retries: 30,
chunks: 1,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", default, deny_unknown_fields)]
pub struct Network {
/// Address and UDP port to listen for statsd metrics on
pub listen: SocketAddr,
/// Address and port for replication server to listen on
pub peer_listen: SocketAddr,
/// Snapshot client bind address
pub peer_client_bind: Option<SocketAddr>,
/// Address and port for management server to listen on
pub mgmt_listen: SocketAddr,
/// UDP buffer size for single packet. Needs to be around MTU. Packet's bytes after that value
/// may be lost
pub bufsize: usize,
/// Enable multimessage(recvmmsg) mode
pub multimessage: bool,
/// Number of multimessage packets to receive at once if in multimessage mode
pub mm_packets: usize,
/// Number of multimessage packets to receive at once if in multimessage mode
pub mm_async: bool,
/// A timeout to return from multimessage mode syscall
pub mm_timeout: u64,
/// A timer to flush incoming buffer making sure metrics are not stuck there
pub buffer_flush_time: u64,
/// A length of incoming buffer to flush it making sure metrics are not stuck there
pub buffer_flush_length: usize,
/// Nmber of green threads for single-message mode
pub greens: usize,
/// Socket pool size for single-message mode
pub async_sockets: usize,
/// List of nodes to replicate metrics to
pub nodes: Vec<String>,
/// Interval to send snapshots to nodes, ms
pub snapshot_interval: usize,
}
impl Default for Network {
fn default() -> Self {
Self {
listen: "127.0.0.1:8125".parse().unwrap(),
peer_listen: "127.0.0.1:8136".parse().unwrap(),
peer_client_bind: None,
mgmt_listen: "127.0.0.1:8137".parse().unwrap(),
bufsize: 1500,
multimessage: false,
mm_packets: 100,
mm_async: false,
mm_timeout: 0,
buffer_flush_length: 0,
buffer_flush_time: 0,
greens: 4,
async_sockets: 4,
nodes: Vec::new(),
snapshot_interval: 1000,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", default, deny_unknown_fields)]
pub struct Consul {
/// Start in disabled leader finding mode
pub start_as: ConsensusState,
/// Consul agent address
pub agent: SocketAddr,
/// TTL of consul session, ms (consul cannot set it to less than 10s)
pub session_ttl: usize,
/// How often to renew consul session, ms
pub renew_time: usize,
/// Name of ke to be locked in consul
pub key_name: String,
}
impl Default for Consul {
fn default() -> Self {
Self { start_as: ConsensusState::Disabled, agent: "127.0.0.1:8500".parse().unwrap(), session_ttl: 11000, renew_time: 1000, key_name: "service/bioyino/lock".to_string() }
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", default, deny_unknown_fields)]
pub struct Raft {
/// Delay raft after start (ms)
pub start_delay: u64,
/// Raft heartbeat timeout (ms)
pub heartbeat_timeout: u64,
/// Raft heartbeat timeout (ms)
pub election_timeout_min: u64,
/// Raft heartbeat timeout (ms)
pub election_timeout_max: u64,
/// Name of this node. By default is taken by resolving hostname in DNS.
pub this_node: Option<String>,
/// List of Raft nodes, may include this_node
pub nodes: HashMap<String, u64>,
/// Bind raft client to specific IP when connecting nodes
pub client_bind: Option<SocketAddr>,
}
impl Default for Raft {
fn default() -> Self {
Self { start_delay: 0, heartbeat_timeout: 250, election_timeout_min: 500, election_timeout_max: 750, this_node: None, nodes: HashMap::new(), client_bind: None }
}
}
impl Raft {
pub fn get_raft_options(&self) -> RaftOptions {
RaftOptions {
heartbeat_timeout: Duration::from_millis(self.heartbeat_timeout),
election_timeout: Range { start: Duration::from_millis(self.election_timeout_min), end: Duration::from_millis(self.election_timeout_max) },
}
}
}
#[derive(Debug)]
pub enum Command {
Daemon,
Query(MgmtCommand, String),
}
impl System {
pub fn load() -> (Self, Command) {
// This is a first copy of args - with the "config" option
let app = app_from_crate!()
.long_version(concat!(crate_version!(), " ", env!("VERGEN_COMMIT_DATE"), " ", env!("VERGEN_SHA_SHORT")))
.arg(Arg::with_name("config").help("configuration file path").long("config").short("c").required(true).takes_value(true).default_value("/etc/bioyino/bioyino.toml"))
.arg(Arg::with_name("verbosity").short("v").help("logging level").takes_value(true))
.subcommand(SubCommand::with_name("query").about("send a management command to running bioyino server").arg(Arg::with_name("host").short("h").default_value("127.0.0.1:8137")).subcommand(SubCommand::with_name("status").about("get server state")).subcommand(SubCommand::with_name("consensus").arg(Arg::with_name("action").index(1)).arg(Arg::with_name("leader_action").index(2).default_value("unchanged"))))
.get_matches();
let config = value_t!(app.value_of("config"), String).expect("config file must be string");
let mut file = File::open(&config).expect(&format!("opening config file at {}", &config));
let mut config_str = String::new();
file.read_to_string(&mut config_str).expect("reading config file");
let mut system: System = toml::de::from_str(&config_str).expect("parsing config");
if let Some(v) = app.value_of("verbosity") {
system.verbosity = v.into()
}
if let Some(query) = app.subcommand_matches("query") {
let server = value_t!(query.value_of("host"), String).expect("bad server");
if let Some(_) = query.subcommand_matches("status") {
(system, Command::Query(MgmtCommand::Status, server))
} else if let Some(args) = query.subcommand_matches("consensus") | else {
// shold be unreachable
unreachable!("clap bug?")
}
} else {
(system, Command::Daemon)
}
}
}
| {
let c_action = value_t!(args.value_of("action"), ConsensusAction).expect("bad consensus action");
let l_action = value_t!(args.value_of("leader_action"), LeaderAction).expect("bad leader action");
(system, Command::Query(MgmtCommand::ConsensusCommand(c_action, l_action), server))
} | conditional_block |
config.rs | use std::collections::HashMap;
use std::fs::File;
use std::io::Read;
use std::net::SocketAddr;
use std::ops::Range;
use std::time::Duration;
use clap::{app_from_crate, crate_authors, crate_description, crate_name, crate_version, value_t, Arg, SubCommand};
use toml;
use serde_derive::{Deserialize, Serialize};
use raft_tokio::RaftOptions;
use crate::aggregate::AggregationMode;
use crate::management::{ConsensusAction, LeaderAction, MgmtCommand};
use crate::{ConsensusKind, ConsensusState};
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", default, deny_unknown_fields)]
pub struct System {
/// Logging level
pub verbosity: String,
/// Network settings
pub network: Network,
/// Internal Raft settings
pub raft: Raft,
/// Consul settings
pub consul: Consul,
/// Metric settings
pub metrics: Metrics,
/// Carbon backend settings
pub carbon: Carbon,
/// Number of networking threads, use 0 for number of CPUs
pub n_threads: usize,
/// Number of aggregating(worker) threads, set to 0 to use all CPU cores
pub w_threads: usize,
/// queue size for single counting thread before packet is dropped
pub task_queue_size: usize,
/// Should we start as leader state enabled or not
pub start_as_leader: bool,
/// How often to gather own stats, in ms. Use 0 to disable (stats are still gathered, but not included in
/// metric dump)
pub stats_interval: u64,
/// Prefix to send own metrics with
pub stats_prefix: String,
/// Consensus kind to use
pub consensus: ConsensusKind,
}
impl Default for System {
fn default() -> Self {
Self {
verbosity: "warn".to_string(),
network: Network::default(),
raft: Raft::default(),
consul: Consul::default(),
metrics: Metrics::default(),
carbon: Carbon::default(),
n_threads: 4,
w_threads: 4,
stats_interval: 10000,
task_queue_size: 2048,
start_as_leader: false,
stats_prefix: "resources.monitoring.bioyino".to_string(),
consensus: ConsensusKind::None,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", default, deny_unknown_fields)]
pub struct Metrics {
// TODO: Maximum metric array size, 0 for unlimited
// max_metrics: usize,
/// Should we provide metrics with top update numbers
pub count_updates: bool,
/// Prefix for metric update statistics
pub update_counter_prefix: String,
/// Suffix for metric update statistics
pub update_counter_suffix: String,
/// Minimal update count to be reported
pub update_counter_threshold: u32,
/// Consistent parsing
pub consistent_parsing: bool,
/// Whether we should spam parsing errors in logs
pub log_parse_errors: bool,
/// Maximum length of data parser can keep in buffer befor considering it trash and throwing
/// away
pub max_unparsed_buffer: usize,
/// Choose the way of aggregation
pub aggregation_mode: AggregationMode,
/// Number of threads when aggregating in "multi" mode
pub aggregation_threads: Option<usize>,
}
impl Default for Metrics {
fn default() -> Self {
Self {
// max_metrics: 0,
count_updates: true,
update_counter_prefix: "resources.monitoring.bioyino.updates".to_string(),
update_counter_suffix: String::new(),
update_counter_threshold: 200,
consistent_parsing: true,
log_parse_errors: false,
max_unparsed_buffer: 10000,
aggregation_mode: AggregationMode::Single,
aggregation_threads: None,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", default, deny_unknown_fields)]
pub struct Carbon {
// TODO: will be used when multiple backends support is implemented
///// Enable sending to carbon protocol backend
//pub enabled: bool,
/// IP and port of the carbon-protocol backend to send aggregated data to
pub address: String,
/// client bind address
pub bind_address: Option<SocketAddr>,
/// How often to send metrics to this backend, ms
pub interval: u64,
/// How much to sleep when connection to backend fails, ms
pub connect_delay: u64,
/// Multiply delay to this value for each consequent connection failure
pub connect_delay_multiplier: f32,
/// Maximum retry delay, ms
pub connect_delay_max: u64,
/// How much times to retry when sending data to backend before giving up and dropping all metrics
/// note, that 0 means 1 try
pub send_retries: usize,
/// The whole metrica array can be split into smaller chunks for each chunk to be sent
/// in a separate connection. This is a workaround for go-carbon and carbon-c-relay doing
/// per-connection processing and working ineffectively when lots of metrics is sent in one
/// connection
pub chunks: usize,
}
impl Default for Carbon {
fn default() -> Self {
Self {
// enabled: true,
address: "127.0.0.1:2003".to_string(),
bind_address: None,
interval: 30000,
connect_delay: 250,
connect_delay_multiplier: 2f32,
connect_delay_max: 10000,
send_retries: 30,
chunks: 1,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", default, deny_unknown_fields)]
pub struct Network {
/// Address and UDP port to listen for statsd metrics on
pub listen: SocketAddr,
/// Address and port for replication server to listen on
pub peer_listen: SocketAddr,
/// Snapshot client bind address
pub peer_client_bind: Option<SocketAddr>,
/// Address and port for management server to listen on
pub mgmt_listen: SocketAddr,
/// UDP buffer size for single packet. Needs to be around MTU. Packet's bytes after that value
/// may be lost
pub bufsize: usize,
/// Enable multimessage(recvmmsg) mode
pub multimessage: bool,
/// Number of multimessage packets to receive at once if in multimessage mode
pub mm_packets: usize,
/// Number of multimessage packets to receive at once if in multimessage mode
pub mm_async: bool,
/// A timeout to return from multimessage mode syscall
pub mm_timeout: u64,
/// A timer to flush incoming buffer making sure metrics are not stuck there
pub buffer_flush_time: u64,
/// A length of incoming buffer to flush it making sure metrics are not stuck there
pub buffer_flush_length: usize,
/// Nmber of green threads for single-message mode
pub greens: usize,
/// Socket pool size for single-message mode
pub async_sockets: usize,
/// List of nodes to replicate metrics to
pub nodes: Vec<String>,
/// Interval to send snapshots to nodes, ms
pub snapshot_interval: usize,
}
impl Default for Network {
fn default() -> Self {
Self {
listen: "127.0.0.1:8125".parse().unwrap(),
peer_listen: "127.0.0.1:8136".parse().unwrap(),
peer_client_bind: None,
mgmt_listen: "127.0.0.1:8137".parse().unwrap(),
bufsize: 1500,
multimessage: false,
mm_packets: 100,
mm_async: false,
mm_timeout: 0,
buffer_flush_length: 0, | async_sockets: 4,
nodes: Vec::new(),
snapshot_interval: 1000,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", default, deny_unknown_fields)]
pub struct Consul {
/// Start in disabled leader finding mode
pub start_as: ConsensusState,
/// Consul agent address
pub agent: SocketAddr,
/// TTL of consul session, ms (consul cannot set it to less than 10s)
pub session_ttl: usize,
/// How often to renew consul session, ms
pub renew_time: usize,
/// Name of ke to be locked in consul
pub key_name: String,
}
impl Default for Consul {
fn default() -> Self {
Self { start_as: ConsensusState::Disabled, agent: "127.0.0.1:8500".parse().unwrap(), session_ttl: 11000, renew_time: 1000, key_name: "service/bioyino/lock".to_string() }
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", default, deny_unknown_fields)]
pub struct Raft {
/// Delay raft after start (ms)
pub start_delay: u64,
/// Raft heartbeat timeout (ms)
pub heartbeat_timeout: u64,
/// Raft heartbeat timeout (ms)
pub election_timeout_min: u64,
/// Raft heartbeat timeout (ms)
pub election_timeout_max: u64,
/// Name of this node. By default is taken by resolving hostname in DNS.
pub this_node: Option<String>,
/// List of Raft nodes, may include this_node
pub nodes: HashMap<String, u64>,
/// Bind raft client to specific IP when connecting nodes
pub client_bind: Option<SocketAddr>,
}
impl Default for Raft {
fn default() -> Self {
Self { start_delay: 0, heartbeat_timeout: 250, election_timeout_min: 500, election_timeout_max: 750, this_node: None, nodes: HashMap::new(), client_bind: None }
}
}
impl Raft {
pub fn get_raft_options(&self) -> RaftOptions {
RaftOptions {
heartbeat_timeout: Duration::from_millis(self.heartbeat_timeout),
election_timeout: Range { start: Duration::from_millis(self.election_timeout_min), end: Duration::from_millis(self.election_timeout_max) },
}
}
}
#[derive(Debug)]
pub enum Command {
Daemon,
Query(MgmtCommand, String),
}
impl System {
pub fn load() -> (Self, Command) {
// This is a first copy of args - with the "config" option
let app = app_from_crate!()
.long_version(concat!(crate_version!(), " ", env!("VERGEN_COMMIT_DATE"), " ", env!("VERGEN_SHA_SHORT")))
.arg(Arg::with_name("config").help("configuration file path").long("config").short("c").required(true).takes_value(true).default_value("/etc/bioyino/bioyino.toml"))
.arg(Arg::with_name("verbosity").short("v").help("logging level").takes_value(true))
.subcommand(SubCommand::with_name("query").about("send a management command to running bioyino server").arg(Arg::with_name("host").short("h").default_value("127.0.0.1:8137")).subcommand(SubCommand::with_name("status").about("get server state")).subcommand(SubCommand::with_name("consensus").arg(Arg::with_name("action").index(1)).arg(Arg::with_name("leader_action").index(2).default_value("unchanged"))))
.get_matches();
let config = value_t!(app.value_of("config"), String).expect("config file must be string");
let mut file = File::open(&config).expect(&format!("opening config file at {}", &config));
let mut config_str = String::new();
file.read_to_string(&mut config_str).expect("reading config file");
let mut system: System = toml::de::from_str(&config_str).expect("parsing config");
if let Some(v) = app.value_of("verbosity") {
system.verbosity = v.into()
}
if let Some(query) = app.subcommand_matches("query") {
let server = value_t!(query.value_of("host"), String).expect("bad server");
if let Some(_) = query.subcommand_matches("status") {
(system, Command::Query(MgmtCommand::Status, server))
} else if let Some(args) = query.subcommand_matches("consensus") {
let c_action = value_t!(args.value_of("action"), ConsensusAction).expect("bad consensus action");
let l_action = value_t!(args.value_of("leader_action"), LeaderAction).expect("bad leader action");
(system, Command::Query(MgmtCommand::ConsensusCommand(c_action, l_action), server))
} else {
// shold be unreachable
unreachable!("clap bug?")
}
} else {
(system, Command::Daemon)
}
}
} | buffer_flush_time: 0,
greens: 4, | random_line_split |
config.rs | use std::collections::HashMap;
use std::fs::File;
use std::io::Read;
use std::net::SocketAddr;
use std::ops::Range;
use std::time::Duration;
use clap::{app_from_crate, crate_authors, crate_description, crate_name, crate_version, value_t, Arg, SubCommand};
use toml;
use serde_derive::{Deserialize, Serialize};
use raft_tokio::RaftOptions;
use crate::aggregate::AggregationMode;
use crate::management::{ConsensusAction, LeaderAction, MgmtCommand};
use crate::{ConsensusKind, ConsensusState};
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", default, deny_unknown_fields)]
pub struct System {
/// Logging level
pub verbosity: String,
/// Network settings
pub network: Network,
/// Internal Raft settings
pub raft: Raft,
/// Consul settings
pub consul: Consul,
/// Metric settings
pub metrics: Metrics,
/// Carbon backend settings
pub carbon: Carbon,
/// Number of networking threads, use 0 for number of CPUs
pub n_threads: usize,
/// Number of aggregating(worker) threads, set to 0 to use all CPU cores
pub w_threads: usize,
/// queue size for single counting thread before packet is dropped
pub task_queue_size: usize,
/// Should we start as leader state enabled or not
pub start_as_leader: bool,
/// How often to gather own stats, in ms. Use 0 to disable (stats are still gathered, but not included in
/// metric dump)
pub stats_interval: u64,
/// Prefix to send own metrics with
pub stats_prefix: String,
/// Consensus kind to use
pub consensus: ConsensusKind,
}
impl Default for System {
fn default() -> Self {
Self {
verbosity: "warn".to_string(),
network: Network::default(),
raft: Raft::default(),
consul: Consul::default(),
metrics: Metrics::default(),
carbon: Carbon::default(),
n_threads: 4,
w_threads: 4,
stats_interval: 10000,
task_queue_size: 2048,
start_as_leader: false,
stats_prefix: "resources.monitoring.bioyino".to_string(),
consensus: ConsensusKind::None,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", default, deny_unknown_fields)]
pub struct Metrics {
// TODO: Maximum metric array size, 0 for unlimited
// max_metrics: usize,
/// Should we provide metrics with top update numbers
pub count_updates: bool,
/// Prefix for metric update statistics
pub update_counter_prefix: String,
/// Suffix for metric update statistics
pub update_counter_suffix: String,
/// Minimal update count to be reported
pub update_counter_threshold: u32,
/// Consistent parsing
pub consistent_parsing: bool,
/// Whether we should spam parsing errors in logs
pub log_parse_errors: bool,
/// Maximum length of data parser can keep in buffer befor considering it trash and throwing
/// away
pub max_unparsed_buffer: usize,
/// Choose the way of aggregation
pub aggregation_mode: AggregationMode,
/// Number of threads when aggregating in "multi" mode
pub aggregation_threads: Option<usize>,
}
impl Default for Metrics {
fn default() -> Self {
Self {
// max_metrics: 0,
count_updates: true,
update_counter_prefix: "resources.monitoring.bioyino.updates".to_string(),
update_counter_suffix: String::new(),
update_counter_threshold: 200,
consistent_parsing: true,
log_parse_errors: false,
max_unparsed_buffer: 10000,
aggregation_mode: AggregationMode::Single,
aggregation_threads: None,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", default, deny_unknown_fields)]
pub struct Carbon {
// TODO: will be used when multiple backends support is implemented
///// Enable sending to carbon protocol backend
//pub enabled: bool,
/// IP and port of the carbon-protocol backend to send aggregated data to
pub address: String,
/// client bind address
pub bind_address: Option<SocketAddr>,
/// How often to send metrics to this backend, ms
pub interval: u64,
/// How much to sleep when connection to backend fails, ms
pub connect_delay: u64,
/// Multiply delay to this value for each consequent connection failure
pub connect_delay_multiplier: f32,
/// Maximum retry delay, ms
pub connect_delay_max: u64,
/// How much times to retry when sending data to backend before giving up and dropping all metrics
/// note, that 0 means 1 try
pub send_retries: usize,
/// The whole metrica array can be split into smaller chunks for each chunk to be sent
/// in a separate connection. This is a workaround for go-carbon and carbon-c-relay doing
/// per-connection processing and working ineffectively when lots of metrics is sent in one
/// connection
pub chunks: usize,
}
impl Default for Carbon {
fn default() -> Self {
Self {
// enabled: true,
address: "127.0.0.1:2003".to_string(),
bind_address: None,
interval: 30000,
connect_delay: 250,
connect_delay_multiplier: 2f32,
connect_delay_max: 10000,
send_retries: 30,
chunks: 1,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", default, deny_unknown_fields)]
pub struct Network {
/// Address and UDP port to listen for statsd metrics on
pub listen: SocketAddr,
/// Address and port for replication server to listen on
pub peer_listen: SocketAddr,
/// Snapshot client bind address
pub peer_client_bind: Option<SocketAddr>,
/// Address and port for management server to listen on
pub mgmt_listen: SocketAddr,
/// UDP buffer size for single packet. Needs to be around MTU. Packet's bytes after that value
/// may be lost
pub bufsize: usize,
/// Enable multimessage(recvmmsg) mode
pub multimessage: bool,
/// Number of multimessage packets to receive at once if in multimessage mode
pub mm_packets: usize,
/// Number of multimessage packets to receive at once if in multimessage mode
pub mm_async: bool,
/// A timeout to return from multimessage mode syscall
pub mm_timeout: u64,
/// A timer to flush incoming buffer making sure metrics are not stuck there
pub buffer_flush_time: u64,
/// A length of incoming buffer to flush it making sure metrics are not stuck there
pub buffer_flush_length: usize,
/// Nmber of green threads for single-message mode
pub greens: usize,
/// Socket pool size for single-message mode
pub async_sockets: usize,
/// List of nodes to replicate metrics to
pub nodes: Vec<String>,
/// Interval to send snapshots to nodes, ms
pub snapshot_interval: usize,
}
impl Default for Network {
fn default() -> Self {
Self {
listen: "127.0.0.1:8125".parse().unwrap(),
peer_listen: "127.0.0.1:8136".parse().unwrap(),
peer_client_bind: None,
mgmt_listen: "127.0.0.1:8137".parse().unwrap(),
bufsize: 1500,
multimessage: false,
mm_packets: 100,
mm_async: false,
mm_timeout: 0,
buffer_flush_length: 0,
buffer_flush_time: 0,
greens: 4,
async_sockets: 4,
nodes: Vec::new(),
snapshot_interval: 1000,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", default, deny_unknown_fields)]
pub struct Consul {
/// Start in disabled leader finding mode
pub start_as: ConsensusState,
/// Consul agent address
pub agent: SocketAddr,
/// TTL of consul session, ms (consul cannot set it to less than 10s)
pub session_ttl: usize,
/// How often to renew consul session, ms
pub renew_time: usize,
/// Name of ke to be locked in consul
pub key_name: String,
}
impl Default for Consul {
fn default() -> Self {
Self { start_as: ConsensusState::Disabled, agent: "127.0.0.1:8500".parse().unwrap(), session_ttl: 11000, renew_time: 1000, key_name: "service/bioyino/lock".to_string() }
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", default, deny_unknown_fields)]
pub struct Raft {
/// Delay raft after start (ms)
pub start_delay: u64,
/// Raft heartbeat timeout (ms)
pub heartbeat_timeout: u64,
/// Raft heartbeat timeout (ms)
pub election_timeout_min: u64,
/// Raft heartbeat timeout (ms)
pub election_timeout_max: u64,
/// Name of this node. By default is taken by resolving hostname in DNS.
pub this_node: Option<String>,
/// List of Raft nodes, may include this_node
pub nodes: HashMap<String, u64>,
/// Bind raft client to specific IP when connecting nodes
pub client_bind: Option<SocketAddr>,
}
impl Default for Raft {
fn default() -> Self {
Self { start_delay: 0, heartbeat_timeout: 250, election_timeout_min: 500, election_timeout_max: 750, this_node: None, nodes: HashMap::new(), client_bind: None }
}
}
impl Raft {
pub fn get_raft_options(&self) -> RaftOptions {
RaftOptions {
heartbeat_timeout: Duration::from_millis(self.heartbeat_timeout),
election_timeout: Range { start: Duration::from_millis(self.election_timeout_min), end: Duration::from_millis(self.election_timeout_max) },
}
}
}
#[derive(Debug)]
pub enum | {
Daemon,
Query(MgmtCommand, String),
}
impl System {
pub fn load() -> (Self, Command) {
// This is a first copy of args - with the "config" option
let app = app_from_crate!()
.long_version(concat!(crate_version!(), " ", env!("VERGEN_COMMIT_DATE"), " ", env!("VERGEN_SHA_SHORT")))
.arg(Arg::with_name("config").help("configuration file path").long("config").short("c").required(true).takes_value(true).default_value("/etc/bioyino/bioyino.toml"))
.arg(Arg::with_name("verbosity").short("v").help("logging level").takes_value(true))
.subcommand(SubCommand::with_name("query").about("send a management command to running bioyino server").arg(Arg::with_name("host").short("h").default_value("127.0.0.1:8137")).subcommand(SubCommand::with_name("status").about("get server state")).subcommand(SubCommand::with_name("consensus").arg(Arg::with_name("action").index(1)).arg(Arg::with_name("leader_action").index(2).default_value("unchanged"))))
.get_matches();
let config = value_t!(app.value_of("config"), String).expect("config file must be string");
let mut file = File::open(&config).expect(&format!("opening config file at {}", &config));
let mut config_str = String::new();
file.read_to_string(&mut config_str).expect("reading config file");
let mut system: System = toml::de::from_str(&config_str).expect("parsing config");
if let Some(v) = app.value_of("verbosity") {
system.verbosity = v.into()
}
if let Some(query) = app.subcommand_matches("query") {
let server = value_t!(query.value_of("host"), String).expect("bad server");
if let Some(_) = query.subcommand_matches("status") {
(system, Command::Query(MgmtCommand::Status, server))
} else if let Some(args) = query.subcommand_matches("consensus") {
let c_action = value_t!(args.value_of("action"), ConsensusAction).expect("bad consensus action");
let l_action = value_t!(args.value_of("leader_action"), LeaderAction).expect("bad leader action");
(system, Command::Query(MgmtCommand::ConsensusCommand(c_action, l_action), server))
} else {
// shold be unreachable
unreachable!("clap bug?")
}
} else {
(system, Command::Daemon)
}
}
}
| Command | identifier_name |
building.rs | use std::collections::{BTreeMap, HashSet, VecDeque};
use std::fmt;
use serde::{Deserialize, Serialize};
use strum::IntoEnumIterator;
use strum_macros::{Display, EnumIter, EnumString};
use abstutil::{
deserialize_btreemap, deserialize_usize, serialize_btreemap, serialize_usize, Tags,
};
use geom::{Distance, PolyLine, Polygon, Pt2D};
use crate::{osm, LaneID, Map, PathConstraints, Position};
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord, Serialize, Deserialize)]
pub struct BuildingID(
#[serde(
serialize_with = "serialize_usize",
deserialize_with = "deserialize_usize"
)]
pub usize,
);
impl fmt::Display for BuildingID {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Building #{}", self.0)
}
}
/// A building has connections to the road and sidewalk, may contain commercial amenities, and have
/// off-street parking.
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Building {
pub id: BuildingID,
pub polygon: Polygon,
pub levels: f64,
pub address: String,
pub name: Option<NamePerLanguage>,
pub orig_id: osm::OsmID,
/// Where a text label should be centered to have the best chances of being contained within
/// the polygon.
pub label_center: Pt2D,
pub amenities: Vec<Amenity>,
pub bldg_type: BuildingType,
pub parking: OffstreetParking,
/// Depending on options while importing, these might be empty, to save file space.
pub osm_tags: Tags,
/// The building's connection for any agent can change based on map edits. Just store the one
/// for pedestrians and lazily calculate the others.
pub sidewalk_pos: Position,
/// Goes from building to sidewalk
pub driveway_geom: PolyLine,
}
/// A business located inside a building.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Amenity {
pub names: NamePerLanguage,
/// This is the specific amenity listed in OSM, not the more general `AmenityType` category.
pub amenity_type: String,
/// Depending on options while importing, these might be empty, to save file space.
pub osm_tags: Tags,
}
/// Represent no parking as Private(0, false).
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
pub enum OffstreetParking {
/// (Name, spots)
PublicGarage(String, usize),
/// (Spots, explicitly tagged as a garage)
Private(usize, bool),
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub enum BuildingType {
Residential {
num_residents: usize,
num_housing_units: usize,
},
/// An estimated number of residents, workers
ResidentialCommercial(usize, usize),
/// An estimated number of workers
Commercial(usize),
Empty,
}
impl BuildingType {
pub fn has_residents(&self) -> bool {
match self {
BuildingType::Residential { .. } | BuildingType::ResidentialCommercial(_, _) => true,
BuildingType::Commercial(_) | BuildingType::Empty => false,
}
}
}
/// None corresponds to the native name
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
pub struct NamePerLanguage(
#[serde(
serialize_with = "serialize_btreemap",
deserialize_with = "deserialize_btreemap"
)]
pub(crate) BTreeMap<Option<String>, String>,
);
impl NamePerLanguage {
pub fn get(&self, lang: Option<&String>) -> &String {
// TODO Can we avoid this clone?
let lang = lang.cloned();
if let Some(name) = self.0.get(&lang) {
return name;
}
&self.0[&None]
}
pub fn new(tags: &Tags) -> Option<NamePerLanguage> {
let native_name = tags.get(osm::NAME)?;
let mut map = BTreeMap::new();
map.insert(None, native_name.to_string());
for (k, v) in tags.inner() {
if let Some(lang) = k.strip_prefix("name:") {
map.insert(Some(lang.to_string()), v.to_string());
}
}
Some(NamePerLanguage(map))
}
pub fn unnamed() -> NamePerLanguage {
let mut map = BTreeMap::new();
map.insert(None, "unnamed".to_string());
NamePerLanguage(map)
}
}
impl Building {
pub fn sidewalk(&self) -> LaneID {
self.sidewalk_pos.lane()
}
pub fn house_number(&self) -> Option<String> {
let num = self.address.split(' ').next().unwrap();
if num != "???" {
Some(num.to_string())
} else {
None
}
}
/// The polyline goes from the building to the driving position
// TODO Make this handle parking_blackhole
pub fn driving_connection(&self, map: &Map) -> Option<(Position, PolyLine)> |
/// Returns (biking position, sidewalk position). Could fail if the biking graph is
/// disconnected.
pub fn biking_connection(&self, map: &Map) -> Option<(Position, Position)> {
// Easy case: the building is directly next to a usable lane
if let Some(pair) = sidewalk_to_bike(self.sidewalk_pos, map) {
return Some(pair);
}
// Floodfill the sidewalk graph until we find a sidewalk<->bike connection.
let mut queue: VecDeque<LaneID> = VecDeque::new();
let mut visited: HashSet<LaneID> = HashSet::new();
queue.push_back(self.sidewalk());
loop {
if queue.is_empty() {
return None;
}
let l = queue.pop_front().unwrap();
if visited.contains(&l) {
continue;
}
visited.insert(l);
// TODO Could search by sidewalk endpoint
if let Some(pair) = sidewalk_to_bike(Position::new(l, map.get_l(l).length() / 2.0), map)
{
return Some(pair);
}
for t in map.get_turns_from_lane(l) {
if !visited.contains(&t.id.dst) {
queue.push_back(t.id.dst);
}
}
}
}
pub fn num_parking_spots(&self) -> usize {
match self.parking {
OffstreetParking::PublicGarage(_, n) => n,
OffstreetParking::Private(n, _) => n,
}
}
/// Does this building contain any amenity matching the category?
pub fn has_amenity(&self, category: AmenityType) -> bool {
for amenity in &self.amenities {
if AmenityType::categorize(&amenity.amenity_type) == Some(category) {
return true;
}
}
false
}
}
fn sidewalk_to_bike(sidewalk_pos: Position, map: &Map) -> Option<(Position, Position)> {
let lane = map
.get_parent(sidewalk_pos.lane())
.find_closest_lane(sidewalk_pos.lane(), |l| {
!l.biking_blackhole && PathConstraints::Bike.can_use(l, map)
})?;
// No buffer needed
Some((sidewalk_pos.equiv_pos(lane, map), sidewalk_pos))
}
/// Businesses are categorized into one of these types.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, EnumString, Display, EnumIter)]
pub enum AmenityType {
Bank,
Bar,
Beauty,
Bike,
Cafe,
CarRepair,
CarShare,
Childcare,
ConvenienceStore,
Culture,
Exercise,
FastFood,
Food,
GreenSpace,
Hotel,
Laundry,
Library,
Medical,
Pet,
Playground,
Pool,
PostOffice,
Religious,
School,
Shopping,
Supermarket,
Tourism,
University,
}
impl AmenityType {
fn types(self) -> Vec<&'static str> {
match self {
AmenityType::Bank => vec!["bank"],
AmenityType::Bar => vec!["bar", "pub", "nightclub", "biergarten"],
AmenityType::Beauty => vec!["hairdresser", "beauty", "chemist", "cosmetics"],
AmenityType::Bike => vec!["bicycle"],
AmenityType::Cafe => vec!["cafe", "pastry", "coffee", "tea", "bakery"],
AmenityType::CarRepair => vec!["car_repair"],
AmenityType::CarShare => vec!["car_sharing"],
AmenityType::Childcare => vec!["childcare", "kindergarten"],
AmenityType::ConvenienceStore => vec!["convenience"],
AmenityType::Culture => vec!["arts_centre", "art", "cinema", "theatre"],
AmenityType::Exercise => vec!["fitness_centre", "sports_centre", "track", "pitch"],
AmenityType::FastFood => vec!["fast_food", "food_court"],
AmenityType::Food => vec![
"restaurant",
"farm",
"ice_cream",
"seafood",
"cheese",
"chocolate",
"deli",
"butcher",
"confectionery",
"beverages",
"alcohol",
],
AmenityType::GreenSpace => vec!["park", "garden", "nature_reserve"],
AmenityType::Hotel => vec!["hotel", "hostel", "guest_house", "motel"],
AmenityType::Laundry => vec!["dry_cleaning", "laundry", "tailor"],
AmenityType::Library => vec!["library"],
AmenityType::Medical => vec![
"clinic", "dentist", "hospital", "pharmacy", "doctors", "optician",
],
AmenityType::Pet => vec!["veterinary", "pet", "animal_boarding", "pet_grooming"],
AmenityType::Playground => vec!["playground"],
AmenityType::Pool => vec!["swimming_pool"],
AmenityType::PostOffice => vec!["post_office"],
AmenityType::Religious => vec!["place_of_worship", "religion"],
AmenityType::School => vec!["school"],
AmenityType::Shopping => vec![
"wholesale",
"bag",
"marketplace",
"second_hand",
"charity",
"clothes",
"lottery",
"shoes",
"mall",
"department_store",
"car",
"tailor",
"nutrition_supplements",
"watches",
"craft",
"fabric",
"kiosk",
"antiques",
"shoemaker",
"hardware",
"houseware",
"mobile_phone",
"photo",
"toys",
"bed",
"florist",
"electronics",
"fishing",
"garden_centre",
"frame",
"watchmaker",
"boutique",
"mobile_phone",
"party",
"car_parts",
"video",
"video_games",
"musical_instrument",
"music",
"baby_goods",
"doityourself",
"jewelry",
"variety_store",
"gift",
"carpet",
"perfumery",
"curtain",
"appliance",
"furniture",
"lighting",
"sewing",
"books",
"sports",
"travel_agency",
"interior_decoration",
"stationery",
"computer",
"tyres",
"newsagent",
"general",
],
AmenityType::Supermarket => vec!["supermarket", "greengrocer"],
AmenityType::Tourism => vec![
"gallery",
"museum",
"zoo",
"attraction",
"theme_park",
"aquarium",
],
AmenityType::University => vec!["college", "university"],
}
}
/// All types of amenities, in alphabetical order.
pub fn all() -> Vec<AmenityType> {
AmenityType::iter().collect()
}
/// Categorize an OSM amenity tag.
pub fn categorize(a: &str) -> Option<AmenityType> {
for at in AmenityType::all() {
if at.types().contains(&a) {
return Some(at);
}
}
None
}
}
| {
let lane = map
.get_parent(self.sidewalk())
.find_closest_lane(self.sidewalk(), |l| PathConstraints::Car.can_use(l, map))?;
// TODO Do we need to insist on this buffer, now that we can make cars gradually appear?
let pos = self
.sidewalk_pos
.equiv_pos(lane, map)
.buffer_dist(Distance::meters(7.0), map)?;
Some((pos, self.driveway_geom.clone().optionally_push(pos.pt(map))))
} | identifier_body |
building.rs | use std::collections::{BTreeMap, HashSet, VecDeque};
use std::fmt;
use serde::{Deserialize, Serialize};
use strum::IntoEnumIterator;
use strum_macros::{Display, EnumIter, EnumString};
use abstutil::{
deserialize_btreemap, deserialize_usize, serialize_btreemap, serialize_usize, Tags,
};
use geom::{Distance, PolyLine, Polygon, Pt2D};
use crate::{osm, LaneID, Map, PathConstraints, Position};
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord, Serialize, Deserialize)]
pub struct BuildingID(
#[serde(
serialize_with = "serialize_usize",
deserialize_with = "deserialize_usize"
)]
pub usize,
);
impl fmt::Display for BuildingID {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Building #{}", self.0)
}
}
/// A building has connections to the road and sidewalk, may contain commercial amenities, and have
/// off-street parking.
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Building {
pub id: BuildingID,
pub polygon: Polygon,
pub levels: f64,
pub address: String,
pub name: Option<NamePerLanguage>,
pub orig_id: osm::OsmID,
/// Where a text label should be centered to have the best chances of being contained within
/// the polygon.
pub label_center: Pt2D,
pub amenities: Vec<Amenity>,
pub bldg_type: BuildingType,
pub parking: OffstreetParking,
/// Depending on options while importing, these might be empty, to save file space.
pub osm_tags: Tags,
/// The building's connection for any agent can change based on map edits. Just store the one
/// for pedestrians and lazily calculate the others.
pub sidewalk_pos: Position,
/// Goes from building to sidewalk
pub driveway_geom: PolyLine,
}
/// A business located inside a building.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Amenity {
pub names: NamePerLanguage,
/// This is the specific amenity listed in OSM, not the more general `AmenityType` category.
pub amenity_type: String,
/// Depending on options while importing, these might be empty, to save file space.
pub osm_tags: Tags,
}
/// Represent no parking as Private(0, false).
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
pub enum OffstreetParking {
/// (Name, spots)
PublicGarage(String, usize),
/// (Spots, explicitly tagged as a garage)
Private(usize, bool),
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub enum BuildingType {
Residential {
num_residents: usize,
num_housing_units: usize,
},
/// An estimated number of residents, workers
ResidentialCommercial(usize, usize),
/// An estimated number of workers
Commercial(usize),
Empty,
}
impl BuildingType {
pub fn has_residents(&self) -> bool {
match self {
BuildingType::Residential { .. } | BuildingType::ResidentialCommercial(_, _) => true,
BuildingType::Commercial(_) | BuildingType::Empty => false,
}
}
}
/// None corresponds to the native name
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
pub struct NamePerLanguage(
#[serde(
serialize_with = "serialize_btreemap",
deserialize_with = "deserialize_btreemap"
)]
pub(crate) BTreeMap<Option<String>, String>,
);
impl NamePerLanguage {
pub fn get(&self, lang: Option<&String>) -> &String {
// TODO Can we avoid this clone?
let lang = lang.cloned();
if let Some(name) = self.0.get(&lang) {
return name;
}
&self.0[&None]
}
pub fn new(tags: &Tags) -> Option<NamePerLanguage> {
let native_name = tags.get(osm::NAME)?;
let mut map = BTreeMap::new();
map.insert(None, native_name.to_string());
for (k, v) in tags.inner() {
if let Some(lang) = k.strip_prefix("name:") {
map.insert(Some(lang.to_string()), v.to_string());
}
}
Some(NamePerLanguage(map))
}
pub fn unnamed() -> NamePerLanguage {
let mut map = BTreeMap::new();
map.insert(None, "unnamed".to_string());
NamePerLanguage(map)
}
}
impl Building {
pub fn sidewalk(&self) -> LaneID {
self.sidewalk_pos.lane()
}
pub fn house_number(&self) -> Option<String> {
let num = self.address.split(' ').next().unwrap();
if num != "???" {
Some(num.to_string())
} else {
None
}
}
/// The polyline goes from the building to the driving position
// TODO Make this handle parking_blackhole
pub fn driving_connection(&self, map: &Map) -> Option<(Position, PolyLine)> {
let lane = map
.get_parent(self.sidewalk())
.find_closest_lane(self.sidewalk(), |l| PathConstraints::Car.can_use(l, map))?;
// TODO Do we need to insist on this buffer, now that we can make cars gradually appear?
let pos = self
.sidewalk_pos
.equiv_pos(lane, map)
.buffer_dist(Distance::meters(7.0), map)?;
Some((pos, self.driveway_geom.clone().optionally_push(pos.pt(map))))
}
/// Returns (biking position, sidewalk position). Could fail if the biking graph is
/// disconnected.
pub fn biking_connection(&self, map: &Map) -> Option<(Position, Position)> {
// Easy case: the building is directly next to a usable lane
if let Some(pair) = sidewalk_to_bike(self.sidewalk_pos, map) {
return Some(pair);
}
// Floodfill the sidewalk graph until we find a sidewalk<->bike connection.
let mut queue: VecDeque<LaneID> = VecDeque::new();
let mut visited: HashSet<LaneID> = HashSet::new();
queue.push_back(self.sidewalk());
loop {
if queue.is_empty() {
return None;
}
let l = queue.pop_front().unwrap();
if visited.contains(&l) {
continue;
}
visited.insert(l);
// TODO Could search by sidewalk endpoint
if let Some(pair) = sidewalk_to_bike(Position::new(l, map.get_l(l).length() / 2.0), map)
{
return Some(pair);
}
for t in map.get_turns_from_lane(l) {
if !visited.contains(&t.id.dst) {
queue.push_back(t.id.dst);
}
}
}
}
pub fn num_parking_spots(&self) -> usize {
match self.parking {
OffstreetParking::PublicGarage(_, n) => n,
OffstreetParking::Private(n, _) => n,
}
}
/// Does this building contain any amenity matching the category?
pub fn has_amenity(&self, category: AmenityType) -> bool {
for amenity in &self.amenities {
if AmenityType::categorize(&amenity.amenity_type) == Some(category) {
return true;
}
}
false
}
}
fn sidewalk_to_bike(sidewalk_pos: Position, map: &Map) -> Option<(Position, Position)> {
let lane = map
.get_parent(sidewalk_pos.lane())
.find_closest_lane(sidewalk_pos.lane(), |l| {
!l.biking_blackhole && PathConstraints::Bike.can_use(l, map)
})?;
// No buffer needed
Some((sidewalk_pos.equiv_pos(lane, map), sidewalk_pos))
}
/// Businesses are categorized into one of these types.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, EnumString, Display, EnumIter)]
pub enum | {
Bank,
Bar,
Beauty,
Bike,
Cafe,
CarRepair,
CarShare,
Childcare,
ConvenienceStore,
Culture,
Exercise,
FastFood,
Food,
GreenSpace,
Hotel,
Laundry,
Library,
Medical,
Pet,
Playground,
Pool,
PostOffice,
Religious,
School,
Shopping,
Supermarket,
Tourism,
University,
}
impl AmenityType {
fn types(self) -> Vec<&'static str> {
match self {
AmenityType::Bank => vec!["bank"],
AmenityType::Bar => vec!["bar", "pub", "nightclub", "biergarten"],
AmenityType::Beauty => vec!["hairdresser", "beauty", "chemist", "cosmetics"],
AmenityType::Bike => vec!["bicycle"],
AmenityType::Cafe => vec!["cafe", "pastry", "coffee", "tea", "bakery"],
AmenityType::CarRepair => vec!["car_repair"],
AmenityType::CarShare => vec!["car_sharing"],
AmenityType::Childcare => vec!["childcare", "kindergarten"],
AmenityType::ConvenienceStore => vec!["convenience"],
AmenityType::Culture => vec!["arts_centre", "art", "cinema", "theatre"],
AmenityType::Exercise => vec!["fitness_centre", "sports_centre", "track", "pitch"],
AmenityType::FastFood => vec!["fast_food", "food_court"],
AmenityType::Food => vec![
"restaurant",
"farm",
"ice_cream",
"seafood",
"cheese",
"chocolate",
"deli",
"butcher",
"confectionery",
"beverages",
"alcohol",
],
AmenityType::GreenSpace => vec!["park", "garden", "nature_reserve"],
AmenityType::Hotel => vec!["hotel", "hostel", "guest_house", "motel"],
AmenityType::Laundry => vec!["dry_cleaning", "laundry", "tailor"],
AmenityType::Library => vec!["library"],
AmenityType::Medical => vec![
"clinic", "dentist", "hospital", "pharmacy", "doctors", "optician",
],
AmenityType::Pet => vec!["veterinary", "pet", "animal_boarding", "pet_grooming"],
AmenityType::Playground => vec!["playground"],
AmenityType::Pool => vec!["swimming_pool"],
AmenityType::PostOffice => vec!["post_office"],
AmenityType::Religious => vec!["place_of_worship", "religion"],
AmenityType::School => vec!["school"],
AmenityType::Shopping => vec![
"wholesale",
"bag",
"marketplace",
"second_hand",
"charity",
"clothes",
"lottery",
"shoes",
"mall",
"department_store",
"car",
"tailor",
"nutrition_supplements",
"watches",
"craft",
"fabric",
"kiosk",
"antiques",
"shoemaker",
"hardware",
"houseware",
"mobile_phone",
"photo",
"toys",
"bed",
"florist",
"electronics",
"fishing",
"garden_centre",
"frame",
"watchmaker",
"boutique",
"mobile_phone",
"party",
"car_parts",
"video",
"video_games",
"musical_instrument",
"music",
"baby_goods",
"doityourself",
"jewelry",
"variety_store",
"gift",
"carpet",
"perfumery",
"curtain",
"appliance",
"furniture",
"lighting",
"sewing",
"books",
"sports",
"travel_agency",
"interior_decoration",
"stationery",
"computer",
"tyres",
"newsagent",
"general",
],
AmenityType::Supermarket => vec!["supermarket", "greengrocer"],
AmenityType::Tourism => vec![
"gallery",
"museum",
"zoo",
"attraction",
"theme_park",
"aquarium",
],
AmenityType::University => vec!["college", "university"],
}
}
/// All types of amenities, in alphabetical order.
pub fn all() -> Vec<AmenityType> {
AmenityType::iter().collect()
}
/// Categorize an OSM amenity tag.
pub fn categorize(a: &str) -> Option<AmenityType> {
for at in AmenityType::all() {
if at.types().contains(&a) {
return Some(at);
}
}
None
}
}
| AmenityType | identifier_name |
building.rs | use std::collections::{BTreeMap, HashSet, VecDeque};
use std::fmt;
use serde::{Deserialize, Serialize};
use strum::IntoEnumIterator;
use strum_macros::{Display, EnumIter, EnumString};
use abstutil::{
deserialize_btreemap, deserialize_usize, serialize_btreemap, serialize_usize, Tags,
};
use geom::{Distance, PolyLine, Polygon, Pt2D};
use crate::{osm, LaneID, Map, PathConstraints, Position};
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord, Serialize, Deserialize)]
pub struct BuildingID(
#[serde(
serialize_with = "serialize_usize",
deserialize_with = "deserialize_usize"
)]
pub usize,
);
impl fmt::Display for BuildingID {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Building #{}", self.0)
}
}
/// A building has connections to the road and sidewalk, may contain commercial amenities, and have
/// off-street parking.
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Building {
pub id: BuildingID,
pub polygon: Polygon,
pub levels: f64,
pub address: String,
pub name: Option<NamePerLanguage>,
pub orig_id: osm::OsmID,
/// Where a text label should be centered to have the best chances of being contained within
/// the polygon.
pub label_center: Pt2D,
pub amenities: Vec<Amenity>,
pub bldg_type: BuildingType,
pub parking: OffstreetParking,
/// Depending on options while importing, these might be empty, to save file space.
pub osm_tags: Tags,
/// The building's connection for any agent can change based on map edits. Just store the one
/// for pedestrians and lazily calculate the others.
pub sidewalk_pos: Position,
/// Goes from building to sidewalk
pub driveway_geom: PolyLine,
}
/// A business located inside a building.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Amenity {
pub names: NamePerLanguage,
/// This is the specific amenity listed in OSM, not the more general `AmenityType` category.
pub amenity_type: String,
/// Depending on options while importing, these might be empty, to save file space.
pub osm_tags: Tags,
}
/// Represent no parking as Private(0, false).
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
pub enum OffstreetParking {
/// (Name, spots)
PublicGarage(String, usize),
/// (Spots, explicitly tagged as a garage)
Private(usize, bool),
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub enum BuildingType {
Residential {
num_residents: usize,
num_housing_units: usize,
},
/// An estimated number of residents, workers
ResidentialCommercial(usize, usize),
/// An estimated number of workers
Commercial(usize),
Empty,
}
impl BuildingType {
pub fn has_residents(&self) -> bool {
match self {
BuildingType::Residential { .. } | BuildingType::ResidentialCommercial(_, _) => true,
BuildingType::Commercial(_) | BuildingType::Empty => false,
}
}
}
/// None corresponds to the native name
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
pub struct NamePerLanguage(
#[serde(
serialize_with = "serialize_btreemap",
deserialize_with = "deserialize_btreemap"
)]
pub(crate) BTreeMap<Option<String>, String>,
);
impl NamePerLanguage {
pub fn get(&self, lang: Option<&String>) -> &String {
// TODO Can we avoid this clone?
let lang = lang.cloned();
if let Some(name) = self.0.get(&lang) { | return name;
}
&self.0[&None]
}
pub fn new(tags: &Tags) -> Option<NamePerLanguage> {
let native_name = tags.get(osm::NAME)?;
let mut map = BTreeMap::new();
map.insert(None, native_name.to_string());
for (k, v) in tags.inner() {
if let Some(lang) = k.strip_prefix("name:") {
map.insert(Some(lang.to_string()), v.to_string());
}
}
Some(NamePerLanguage(map))
}
pub fn unnamed() -> NamePerLanguage {
let mut map = BTreeMap::new();
map.insert(None, "unnamed".to_string());
NamePerLanguage(map)
}
}
impl Building {
pub fn sidewalk(&self) -> LaneID {
self.sidewalk_pos.lane()
}
pub fn house_number(&self) -> Option<String> {
let num = self.address.split(' ').next().unwrap();
if num != "???" {
Some(num.to_string())
} else {
None
}
}
/// The polyline goes from the building to the driving position
// TODO Make this handle parking_blackhole
pub fn driving_connection(&self, map: &Map) -> Option<(Position, PolyLine)> {
let lane = map
.get_parent(self.sidewalk())
.find_closest_lane(self.sidewalk(), |l| PathConstraints::Car.can_use(l, map))?;
// TODO Do we need to insist on this buffer, now that we can make cars gradually appear?
let pos = self
.sidewalk_pos
.equiv_pos(lane, map)
.buffer_dist(Distance::meters(7.0), map)?;
Some((pos, self.driveway_geom.clone().optionally_push(pos.pt(map))))
}
/// Returns (biking position, sidewalk position). Could fail if the biking graph is
/// disconnected.
pub fn biking_connection(&self, map: &Map) -> Option<(Position, Position)> {
// Easy case: the building is directly next to a usable lane
if let Some(pair) = sidewalk_to_bike(self.sidewalk_pos, map) {
return Some(pair);
}
// Floodfill the sidewalk graph until we find a sidewalk<->bike connection.
let mut queue: VecDeque<LaneID> = VecDeque::new();
let mut visited: HashSet<LaneID> = HashSet::new();
queue.push_back(self.sidewalk());
loop {
if queue.is_empty() {
return None;
}
let l = queue.pop_front().unwrap();
if visited.contains(&l) {
continue;
}
visited.insert(l);
// TODO Could search by sidewalk endpoint
if let Some(pair) = sidewalk_to_bike(Position::new(l, map.get_l(l).length() / 2.0), map)
{
return Some(pair);
}
for t in map.get_turns_from_lane(l) {
if !visited.contains(&t.id.dst) {
queue.push_back(t.id.dst);
}
}
}
}
pub fn num_parking_spots(&self) -> usize {
match self.parking {
OffstreetParking::PublicGarage(_, n) => n,
OffstreetParking::Private(n, _) => n,
}
}
/// Does this building contain any amenity matching the category?
pub fn has_amenity(&self, category: AmenityType) -> bool {
for amenity in &self.amenities {
if AmenityType::categorize(&amenity.amenity_type) == Some(category) {
return true;
}
}
false
}
}
fn sidewalk_to_bike(sidewalk_pos: Position, map: &Map) -> Option<(Position, Position)> {
let lane = map
.get_parent(sidewalk_pos.lane())
.find_closest_lane(sidewalk_pos.lane(), |l| {
!l.biking_blackhole && PathConstraints::Bike.can_use(l, map)
})?;
// No buffer needed
Some((sidewalk_pos.equiv_pos(lane, map), sidewalk_pos))
}
/// Businesses are categorized into one of these types.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, EnumString, Display, EnumIter)]
pub enum AmenityType {
Bank,
Bar,
Beauty,
Bike,
Cafe,
CarRepair,
CarShare,
Childcare,
ConvenienceStore,
Culture,
Exercise,
FastFood,
Food,
GreenSpace,
Hotel,
Laundry,
Library,
Medical,
Pet,
Playground,
Pool,
PostOffice,
Religious,
School,
Shopping,
Supermarket,
Tourism,
University,
}
impl AmenityType {
fn types(self) -> Vec<&'static str> {
match self {
AmenityType::Bank => vec!["bank"],
AmenityType::Bar => vec!["bar", "pub", "nightclub", "biergarten"],
AmenityType::Beauty => vec!["hairdresser", "beauty", "chemist", "cosmetics"],
AmenityType::Bike => vec!["bicycle"],
AmenityType::Cafe => vec!["cafe", "pastry", "coffee", "tea", "bakery"],
AmenityType::CarRepair => vec!["car_repair"],
AmenityType::CarShare => vec!["car_sharing"],
AmenityType::Childcare => vec!["childcare", "kindergarten"],
AmenityType::ConvenienceStore => vec!["convenience"],
AmenityType::Culture => vec!["arts_centre", "art", "cinema", "theatre"],
AmenityType::Exercise => vec!["fitness_centre", "sports_centre", "track", "pitch"],
AmenityType::FastFood => vec!["fast_food", "food_court"],
AmenityType::Food => vec![
"restaurant",
"farm",
"ice_cream",
"seafood",
"cheese",
"chocolate",
"deli",
"butcher",
"confectionery",
"beverages",
"alcohol",
],
AmenityType::GreenSpace => vec!["park", "garden", "nature_reserve"],
AmenityType::Hotel => vec!["hotel", "hostel", "guest_house", "motel"],
AmenityType::Laundry => vec!["dry_cleaning", "laundry", "tailor"],
AmenityType::Library => vec!["library"],
AmenityType::Medical => vec![
"clinic", "dentist", "hospital", "pharmacy", "doctors", "optician",
],
AmenityType::Pet => vec!["veterinary", "pet", "animal_boarding", "pet_grooming"],
AmenityType::Playground => vec!["playground"],
AmenityType::Pool => vec!["swimming_pool"],
AmenityType::PostOffice => vec!["post_office"],
AmenityType::Religious => vec!["place_of_worship", "religion"],
AmenityType::School => vec!["school"],
AmenityType::Shopping => vec![
"wholesale",
"bag",
"marketplace",
"second_hand",
"charity",
"clothes",
"lottery",
"shoes",
"mall",
"department_store",
"car",
"tailor",
"nutrition_supplements",
"watches",
"craft",
"fabric",
"kiosk",
"antiques",
"shoemaker",
"hardware",
"houseware",
"mobile_phone",
"photo",
"toys",
"bed",
"florist",
"electronics",
"fishing",
"garden_centre",
"frame",
"watchmaker",
"boutique",
"mobile_phone",
"party",
"car_parts",
"video",
"video_games",
"musical_instrument",
"music",
"baby_goods",
"doityourself",
"jewelry",
"variety_store",
"gift",
"carpet",
"perfumery",
"curtain",
"appliance",
"furniture",
"lighting",
"sewing",
"books",
"sports",
"travel_agency",
"interior_decoration",
"stationery",
"computer",
"tyres",
"newsagent",
"general",
],
AmenityType::Supermarket => vec!["supermarket", "greengrocer"],
AmenityType::Tourism => vec![
"gallery",
"museum",
"zoo",
"attraction",
"theme_park",
"aquarium",
],
AmenityType::University => vec!["college", "university"],
}
}
/// All types of amenities, in alphabetical order.
pub fn all() -> Vec<AmenityType> {
AmenityType::iter().collect()
}
/// Categorize an OSM amenity tag.
pub fn categorize(a: &str) -> Option<AmenityType> {
for at in AmenityType::all() {
if at.types().contains(&a) {
return Some(at);
}
}
None
}
} | random_line_split |
|
building.rs | use std::collections::{BTreeMap, HashSet, VecDeque};
use std::fmt;
use serde::{Deserialize, Serialize};
use strum::IntoEnumIterator;
use strum_macros::{Display, EnumIter, EnumString};
use abstutil::{
deserialize_btreemap, deserialize_usize, serialize_btreemap, serialize_usize, Tags,
};
use geom::{Distance, PolyLine, Polygon, Pt2D};
use crate::{osm, LaneID, Map, PathConstraints, Position};
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord, Serialize, Deserialize)]
pub struct BuildingID(
#[serde(
serialize_with = "serialize_usize",
deserialize_with = "deserialize_usize"
)]
pub usize,
);
impl fmt::Display for BuildingID {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Building #{}", self.0)
}
}
/// A building has connections to the road and sidewalk, may contain commercial amenities, and have
/// off-street parking.
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Building {
pub id: BuildingID,
pub polygon: Polygon,
pub levels: f64,
pub address: String,
pub name: Option<NamePerLanguage>,
pub orig_id: osm::OsmID,
/// Where a text label should be centered to have the best chances of being contained within
/// the polygon.
pub label_center: Pt2D,
pub amenities: Vec<Amenity>,
pub bldg_type: BuildingType,
pub parking: OffstreetParking,
/// Depending on options while importing, these might be empty, to save file space.
pub osm_tags: Tags,
/// The building's connection for any agent can change based on map edits. Just store the one
/// for pedestrians and lazily calculate the others.
pub sidewalk_pos: Position,
/// Goes from building to sidewalk
pub driveway_geom: PolyLine,
}
/// A business located inside a building.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Amenity {
pub names: NamePerLanguage,
/// This is the specific amenity listed in OSM, not the more general `AmenityType` category.
pub amenity_type: String,
/// Depending on options while importing, these might be empty, to save file space.
pub osm_tags: Tags,
}
/// Represent no parking as Private(0, false).
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
pub enum OffstreetParking {
/// (Name, spots)
PublicGarage(String, usize),
/// (Spots, explicitly tagged as a garage)
Private(usize, bool),
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub enum BuildingType {
Residential {
num_residents: usize,
num_housing_units: usize,
},
/// An estimated number of residents, workers
ResidentialCommercial(usize, usize),
/// An estimated number of workers
Commercial(usize),
Empty,
}
impl BuildingType {
pub fn has_residents(&self) -> bool {
match self {
BuildingType::Residential { .. } | BuildingType::ResidentialCommercial(_, _) => true,
BuildingType::Commercial(_) | BuildingType::Empty => false,
}
}
}
/// None corresponds to the native name
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
pub struct NamePerLanguage(
#[serde(
serialize_with = "serialize_btreemap",
deserialize_with = "deserialize_btreemap"
)]
pub(crate) BTreeMap<Option<String>, String>,
);
impl NamePerLanguage {
pub fn get(&self, lang: Option<&String>) -> &String {
// TODO Can we avoid this clone?
let lang = lang.cloned();
if let Some(name) = self.0.get(&lang) {
return name;
}
&self.0[&None]
}
pub fn new(tags: &Tags) -> Option<NamePerLanguage> {
let native_name = tags.get(osm::NAME)?;
let mut map = BTreeMap::new();
map.insert(None, native_name.to_string());
for (k, v) in tags.inner() {
if let Some(lang) = k.strip_prefix("name:") {
map.insert(Some(lang.to_string()), v.to_string());
}
}
Some(NamePerLanguage(map))
}
pub fn unnamed() -> NamePerLanguage {
let mut map = BTreeMap::new();
map.insert(None, "unnamed".to_string());
NamePerLanguage(map)
}
}
impl Building {
pub fn sidewalk(&self) -> LaneID {
self.sidewalk_pos.lane()
}
pub fn house_number(&self) -> Option<String> {
let num = self.address.split(' ').next().unwrap();
if num != "???" {
Some(num.to_string())
} else {
None
}
}
/// The polyline goes from the building to the driving position
// TODO Make this handle parking_blackhole
pub fn driving_connection(&self, map: &Map) -> Option<(Position, PolyLine)> {
let lane = map
.get_parent(self.sidewalk())
.find_closest_lane(self.sidewalk(), |l| PathConstraints::Car.can_use(l, map))?;
// TODO Do we need to insist on this buffer, now that we can make cars gradually appear?
let pos = self
.sidewalk_pos
.equiv_pos(lane, map)
.buffer_dist(Distance::meters(7.0), map)?;
Some((pos, self.driveway_geom.clone().optionally_push(pos.pt(map))))
}
/// Returns (biking position, sidewalk position). Could fail if the biking graph is
/// disconnected.
pub fn biking_connection(&self, map: &Map) -> Option<(Position, Position)> {
// Easy case: the building is directly next to a usable lane
if let Some(pair) = sidewalk_to_bike(self.sidewalk_pos, map) {
return Some(pair);
}
// Floodfill the sidewalk graph until we find a sidewalk<->bike connection.
let mut queue: VecDeque<LaneID> = VecDeque::new();
let mut visited: HashSet<LaneID> = HashSet::new();
queue.push_back(self.sidewalk());
loop {
if queue.is_empty() {
return None;
}
let l = queue.pop_front().unwrap();
if visited.contains(&l) {
continue;
}
visited.insert(l);
// TODO Could search by sidewalk endpoint
if let Some(pair) = sidewalk_to_bike(Position::new(l, map.get_l(l).length() / 2.0), map)
{
return Some(pair);
}
for t in map.get_turns_from_lane(l) {
if !visited.contains(&t.id.dst) {
queue.push_back(t.id.dst);
}
}
}
}
pub fn num_parking_spots(&self) -> usize {
match self.parking {
OffstreetParking::PublicGarage(_, n) => n,
OffstreetParking::Private(n, _) => n,
}
}
/// Does this building contain any amenity matching the category?
pub fn has_amenity(&self, category: AmenityType) -> bool {
for amenity in &self.amenities {
if AmenityType::categorize(&amenity.amenity_type) == Some(category) |
}
false
}
}
fn sidewalk_to_bike(sidewalk_pos: Position, map: &Map) -> Option<(Position, Position)> {
let lane = map
.get_parent(sidewalk_pos.lane())
.find_closest_lane(sidewalk_pos.lane(), |l| {
!l.biking_blackhole && PathConstraints::Bike.can_use(l, map)
})?;
// No buffer needed
Some((sidewalk_pos.equiv_pos(lane, map), sidewalk_pos))
}
/// Businesses are categorized into one of these types.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, EnumString, Display, EnumIter)]
pub enum AmenityType {
Bank,
Bar,
Beauty,
Bike,
Cafe,
CarRepair,
CarShare,
Childcare,
ConvenienceStore,
Culture,
Exercise,
FastFood,
Food,
GreenSpace,
Hotel,
Laundry,
Library,
Medical,
Pet,
Playground,
Pool,
PostOffice,
Religious,
School,
Shopping,
Supermarket,
Tourism,
University,
}
impl AmenityType {
fn types(self) -> Vec<&'static str> {
match self {
AmenityType::Bank => vec!["bank"],
AmenityType::Bar => vec!["bar", "pub", "nightclub", "biergarten"],
AmenityType::Beauty => vec!["hairdresser", "beauty", "chemist", "cosmetics"],
AmenityType::Bike => vec!["bicycle"],
AmenityType::Cafe => vec!["cafe", "pastry", "coffee", "tea", "bakery"],
AmenityType::CarRepair => vec!["car_repair"],
AmenityType::CarShare => vec!["car_sharing"],
AmenityType::Childcare => vec!["childcare", "kindergarten"],
AmenityType::ConvenienceStore => vec!["convenience"],
AmenityType::Culture => vec!["arts_centre", "art", "cinema", "theatre"],
AmenityType::Exercise => vec!["fitness_centre", "sports_centre", "track", "pitch"],
AmenityType::FastFood => vec!["fast_food", "food_court"],
AmenityType::Food => vec![
"restaurant",
"farm",
"ice_cream",
"seafood",
"cheese",
"chocolate",
"deli",
"butcher",
"confectionery",
"beverages",
"alcohol",
],
AmenityType::GreenSpace => vec!["park", "garden", "nature_reserve"],
AmenityType::Hotel => vec!["hotel", "hostel", "guest_house", "motel"],
AmenityType::Laundry => vec!["dry_cleaning", "laundry", "tailor"],
AmenityType::Library => vec!["library"],
AmenityType::Medical => vec![
"clinic", "dentist", "hospital", "pharmacy", "doctors", "optician",
],
AmenityType::Pet => vec!["veterinary", "pet", "animal_boarding", "pet_grooming"],
AmenityType::Playground => vec!["playground"],
AmenityType::Pool => vec!["swimming_pool"],
AmenityType::PostOffice => vec!["post_office"],
AmenityType::Religious => vec!["place_of_worship", "religion"],
AmenityType::School => vec!["school"],
AmenityType::Shopping => vec![
"wholesale",
"bag",
"marketplace",
"second_hand",
"charity",
"clothes",
"lottery",
"shoes",
"mall",
"department_store",
"car",
"tailor",
"nutrition_supplements",
"watches",
"craft",
"fabric",
"kiosk",
"antiques",
"shoemaker",
"hardware",
"houseware",
"mobile_phone",
"photo",
"toys",
"bed",
"florist",
"electronics",
"fishing",
"garden_centre",
"frame",
"watchmaker",
"boutique",
"mobile_phone",
"party",
"car_parts",
"video",
"video_games",
"musical_instrument",
"music",
"baby_goods",
"doityourself",
"jewelry",
"variety_store",
"gift",
"carpet",
"perfumery",
"curtain",
"appliance",
"furniture",
"lighting",
"sewing",
"books",
"sports",
"travel_agency",
"interior_decoration",
"stationery",
"computer",
"tyres",
"newsagent",
"general",
],
AmenityType::Supermarket => vec!["supermarket", "greengrocer"],
AmenityType::Tourism => vec![
"gallery",
"museum",
"zoo",
"attraction",
"theme_park",
"aquarium",
],
AmenityType::University => vec!["college", "university"],
}
}
/// All types of amenities, in alphabetical order.
pub fn all() -> Vec<AmenityType> {
AmenityType::iter().collect()
}
/// Categorize an OSM amenity tag.
pub fn categorize(a: &str) -> Option<AmenityType> {
for at in AmenityType::all() {
if at.types().contains(&a) {
return Some(at);
}
}
None
}
}
| {
return true;
} | conditional_block |
main.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"io"
"net/http"
"os"
"strconv"
"sync"
"time"
"k8s.io/klog/v2"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
// meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
corev1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/workqueue"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
const (
// The HTTP port under which the scraping endpoint ("/metrics") is served.
MetricsAddr = ":9101"
// The HTTP path under which the scraping endpoint ("/metrics") is served.
MetricsPath = "/metrics"
// The namespace, subsystem and name of the histogram collected by this controller.
HistogramNamespace = "scaletest"
HistogramSubsystem = "configmaps"
CreateHistogramName = "create_latency_seconds"
UpdateHistogramName = "update_latency_seconds"
// The name of the annotation holding the client-side creation timestamp.
CreateTimestampAnnotation = "scaletest/createTimestamp"
UpdateTimestampAnnotation = "scaletest/updateTimestamp"
// The layout of the annotation holding the client-side creation timestamp.
CreateTimestampLayout = "2006-01-02 15:04:05.000 -0700"
)
type Controller struct {
myAddr string
compare bool
queue workqueue.RateLimitingInterface
informer cache.Controller
lister corev1listers.ConfigMapLister
csvFilename string
csvFile io.Writer
// A histogram to collect latency samples
createLatencyHistogram *prometheus.HistogramVec
updateLatencyHistogram *prometheus.HistogramVec
// Counters for unusual events
updateCounter, strangeCounter *prometheus.CounterVec
duplicateCounter prometheus.Counter
// Guage for ResourceVersion
rvGauge prometheus.Gauge
sync.Mutex
// Data about each endpoint, access under mutex
objects map[string]*ObjectData
}
type ObjectData struct {
sync.Mutex
actuallyExists bool
ObjectQueueData
lastSeen *corev1.ConfigMap
}
// ObjectQueueData says what has happened since the last time
// a reference to the object was dequeued for logging.
type ObjectQueueData struct {
firstEnqueue, lastEnqueue time.Time
queuedAdds, queuedUpdates, queuedDeletes uint
}
var zeroObjectQueueData ObjectQueueData
var dummyTime = time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)
func (c *Controller) | (key string, addIfMissing, deleteIfPresent bool) *ObjectData {
c.Lock()
defer c.Unlock()
od := c.objects[key]
if od == nil {
od = &ObjectData{}
if addIfMissing {
c.objects[key] = od
}
} else if deleteIfPresent {
delete(c.objects, key)
}
return od
}
func NewController(queue workqueue.RateLimitingInterface, informer cache.Controller, lister corev1listers.ConfigMapLister, compare bool, csvFilename, myAddr string) *Controller {
createHistogram := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: CreateHistogramName,
Help: "Configmap creation notification latency, in seconds",
Buckets: []float64{-0.1, 0, 0.03125, 0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8},
},
[]string{"logger"},
)
if err := prometheus.Register(createHistogram); err != nil {
klog.Error(err)
createHistogram = nil
} else {
createHistogram.With(prometheus.Labels{"logger": myAddr}).Observe(0)
}
updateHistogram := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: UpdateHistogramName,
Help: "Configmap creation notification latency, in seconds",
Buckets: []float64{-0.1, 0, 0.03125, 0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8},
},
[]string{"logger"},
)
if err := prometheus.Register(updateHistogram); err != nil {
klog.Error(err)
updateHistogram = nil
} else {
updateHistogram.With(prometheus.Labels{"logger": myAddr}).Observe(0)
}
updateCounter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: "updates",
Help: "number of updates dequeued",
},
[]string{"logger"},
)
if err := prometheus.Register(updateCounter); err != nil {
klog.Error(err)
updateCounter = nil
} else {
updateCounter.With(prometheus.Labels{"logger": myAddr}).Add(0)
}
strangeCounter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: "stranges",
Help: "number of strange situations dequeued",
},
[]string{"logger"},
)
if err := prometheus.Register(strangeCounter); err != nil {
klog.Error(err)
strangeCounter = nil
} else {
strangeCounter.With(prometheus.Labels{"logger": myAddr}).Add(0)
}
duplicateCounter := prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: "duplicates",
Help: "number of duplicates dequeued",
ConstLabels: map[string]string{"logger": myAddr},
})
if err := prometheus.Register(duplicateCounter); err != nil {
klog.Error(err)
duplicateCounter = nil
} else {
duplicateCounter.Add(0)
}
rvGauge := prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: "resourceVersion",
Help: "latest ResourceVersion observed",
ConstLabels: map[string]string{"logger": myAddr},
})
if err := prometheus.Register(rvGauge); err != nil {
klog.Error(err)
rvGauge = nil
}
return &Controller{
myAddr: myAddr,
compare: compare,
informer: informer,
queue: queue,
lister: lister,
csvFilename: csvFilename,
createLatencyHistogram: createHistogram,
updateLatencyHistogram: updateHistogram,
updateCounter: updateCounter,
strangeCounter: strangeCounter,
duplicateCounter: duplicateCounter,
rvGauge: rvGauge,
objects: make(map[string]*ObjectData),
}
}
func (c *Controller) processNextItem() bool {
// Wait until there is a new item in the working queue
key, quit := c.queue.Get()
if quit {
return false
}
// Tell the queue that we are done with processing this key. This unblocks the key for other workers
// This allows safe parallel processing because two objects with the same key are never processed in
// parallel.
defer c.queue.Done(key)
// Invoke the method containing the business logic
err := c.logDequeue(key.(string))
// Handle the error if something went wrong during the execution of the business logic
c.handleErr(err, key)
return true
}
func (c *Controller) logDequeue(key string) error {
now := time.Now()
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
runtime.HandleError(fmt.Errorf("Failed to split key %q: %v", key, err))
return nil
}
obj, err := c.lister.ConfigMaps(namespace).Get(name)
if err != nil && !apierrors.IsNotFound(err) {
runtime.HandleError(fmt.Errorf("Fetching object with key %s from store failed with %v", key, err))
return nil
}
desireExist := err == nil
od := c.getObjectData(key, desireExist, !desireExist)
op := "delete"
var creationTime time.Time = dummyTime
if obj != nil {
creationTime = obj.ObjectMeta.CreationTimestamp.Time
}
var oqd ObjectQueueData
var lastSeen *corev1.ConfigMap
func() {
od.Lock()
defer od.Unlock()
oqd = od.ObjectQueueData
if c.compare {
lastSeen = od.lastSeen
od.lastSeen = obj.DeepCopy()
}
if desireExist {
if od.actuallyExists {
op = "update"
} else {
op = "create"
}
od.ObjectQueueData = zeroObjectQueueData
od.actuallyExists = true
}
}()
var diff int
if c.compare {
if ConfigMapQuickEqual(lastSeen, obj) {
diff = 2
c.duplicateCounter.Add(1)
} else {
diff = 3
}
}
// Log it
if c.csvFile != nil {
_, err = c.csvFile.Write([]byte(fmt.Sprintf("%s,%s,%q,%s,%d,%d,%d,%s,%s,%d\n",
formatTime(now), op, key, formatTimeNoMillis(creationTime),
oqd.queuedAdds, oqd.queuedUpdates, oqd.queuedDeletes,
formatTime(oqd.firstEnqueue), formatTime(oqd.lastEnqueue),
diff,
)))
if err != nil {
runtime.HandleError(fmt.Errorf("Error writing to CSV file named %q: %+v", c.csvFilename, err))
}
} else {
klog.V(4).Infof("c.csvFile == nil\n")
}
if diff == 2 {
return nil
}
if oqd.queuedAdds+oqd.queuedUpdates+oqd.queuedDeletes != 1 {
if c.strangeCounter != nil {
c.strangeCounter.
With(prometheus.Labels{"logger": c.myAddr}).
Add(1)
}
} else if oqd.queuedUpdates == 1 && c.updateCounter != nil {
c.updateCounter.
With(prometheus.Labels{"logger": c.myAddr}).
Add(1)
}
if op != "delete" && obj != nil && obj.Annotations != nil {
var ctS string
var latencyHistogram *prometheus.HistogramVec
if op == "create" {
ctS = obj.Annotations[CreateTimestampAnnotation]
latencyHistogram = c.createLatencyHistogram
} else {
ctS = obj.Annotations[UpdateTimestampAnnotation]
latencyHistogram = c.updateLatencyHistogram
}
if ctS != "" && latencyHistogram != nil {
clientTime, err := time.Parse(CreateTimestampLayout, ctS)
if err != nil {
return nil
}
latency := now.Sub(clientTime)
klog.V(4).Infof("Latency = %v for op=%s, key=%s, now=%s, clientTime=%s, ts=%s\n", latency, op, key, now, clientTime, ctS)
latencyHistogram.
With(prometheus.Labels{"logger": c.myAddr}).
Observe(latency.Seconds())
}
}
return nil
}
// handleErr checks if an error happened and makes sure we will retry later.
func (c *Controller) handleErr(err error, key interface{}) {
if err == nil {
// Forget about the #AddRateLimited history of the key on every successful synchronization.
// This ensures that future processing of updates for this key is not delayed because of
// an outdated error history.
c.queue.Forget(key)
return
}
klog.Infof("Error syncing ConfigMap %v: %v", key, err)
// Re-enqueue the key rate limited. Based on the rate limiter on the
// queue and the re-enqueue history, the key will be processed later again.
c.queue.AddRateLimited(key)
return
}
func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
defer runtime.HandleCrash()
// Let the workers stop when we are done
defer c.queue.ShutDown()
klog.Info("Starting Object Logging controller")
csvFile, err := os.Create(c.csvFilename)
if err != nil {
runtime.HandleError(fmt.Errorf("Failed to create data file named %q: %s", c.csvFilename, err))
} else {
c.csvFile = csvFile
defer csvFile.Close()
}
go c.informer.Run(stopCh)
// Wait for all involved caches to be synced, before processing items from the queue is started
if !cache.WaitForCacheSync(stopCh, c.informer.HasSynced) {
runtime.HandleError(fmt.Errorf("Timed out waiting for caches to sync"))
return
}
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
<-stopCh
klog.Info("Stopping Object Logging controller")
}
func (c *Controller) runWorker() {
for c.processNextItem() {
}
}
func (c *Controller) ObserveResourceVersion(obj interface{}) {
switch o := obj.(type) {
case cache.DeletedFinalStateUnknown:
klog.V(5).Infof("Recursing for %#+v @ %#p\n", obj, obj)
c.ObserveResourceVersion(o.Obj)
case cache.ExplicitKey:
klog.V(5).Infof("Got ExplicitKey %q\n", o)
return
default:
meta, err := apimeta.Accessor(obj)
if err != nil {
klog.V(5).Infof("apimeta.Accessor(%#+v) threw %#+v\n", obj, err)
return
}
rvS := meta.GetResourceVersion()
rvU, err := strconv.ParseUint(rvS, 10, 64)
if err != nil {
klog.V(5).Infof("Error parsing ResourceVersion %q of %#+v: %#+v\n", rvS, obj, err)
} else {
klog.V(5).Infof("Observing ResourceVersion %d of %#+v @ %#p\n", rvU, obj, obj)
c.rvGauge.Set(float64(rvU))
}
}
}
func main() {
var kubeconfig string
var master string
var useProtobuf bool
var dataFilename string
var numThreads int
var noCompare bool
klog.InitFlags(nil)
flag.StringVar(&kubeconfig, "kubeconfig", "", "absolute path to the kubeconfig file")
flag.StringVar(&master, "master", "", "master url")
flag.BoolVar(&useProtobuf, "useProtobuf", false, "indicates whether to encode objects with protobuf (as opposed to JSON)")
flag.StringVar(&dataFilename, "data-filname", "/tmp/obj-log.csv", "name of CSV file to create")
flag.IntVar(&numThreads, "threads", 1, "number of worker threads")
flag.BoolVar(&noCompare, "no-compare", false, "omit comparing object values")
flag.Set("logtostderr", "true")
flag.Parse()
// creates the connection
config, err := clientcmd.BuildConfigFromFlags(master, kubeconfig)
if err != nil {
klog.Fatal(err)
}
klog.Infof("Config.Host=%q\n", config.Host)
klog.Infof("Config.APIPath=%q\n", config.APIPath)
myAddr := GetHostAddr()
klog.Infof("Using %s as my host address\n", myAddr)
config.UserAgent = fmt.Sprintf("obj-logger@%s", myAddr)
if useProtobuf {
config.ContentType = "application/vnd.kubernetes.protobuf"
}
// creates the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
klog.Fatal(err)
}
informerFactory := informers.NewSharedInformerFactory(clientset, 0)
cfgMapInformer := informerFactory.Core().V1().ConfigMaps()
informer := cfgMapInformer.Informer()
lister := cfgMapInformer.Lister()
// create the workqueue
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
controller := NewController(queue, informer, lister, !noCompare, dataFilename, myAddr)
// Bind the workqueue to a cache with the help of an informer. This way we make sure that
// whenever the cache is updated, the object key is added to the workqueue.
// Note that when we finally process the item from the workqueue, we might see a newer version
// of the object than the version which was responsible for triggering the update.
informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
now := time.Now()
klog.V(4).Infof("ADD %+v @ %#p\n", obj, obj)
controller.ObserveResourceVersion(obj)
key, err := cache.MetaNamespaceKeyFunc(obj)
if err == nil {
od := controller.getObjectData(key, true, false)
od.Lock()
defer od.Unlock()
if od.queuedAdds+od.queuedUpdates+od.queuedDeletes == 0 {
od.firstEnqueue = now
}
od.lastEnqueue = now
od.queuedAdds++
queue.Add(key)
} else {
klog.Errorf("Failed to parse key from obj %#v: %v\n", obj, err)
}
},
UpdateFunc: func(oldobj interface{}, newobj interface{}) {
now := time.Now()
klog.V(4).Infof("UPDATE %#v @ %#p\n", newobj, newobj)
controller.ObserveResourceVersion(newobj)
key, err := cache.MetaNamespaceKeyFunc(newobj)
if err == nil {
od := controller.getObjectData(key, true, false)
od.Lock()
defer od.Unlock()
if od.queuedAdds+od.queuedUpdates+od.queuedDeletes == 0 {
od.firstEnqueue = now
}
od.lastEnqueue = now
od.queuedUpdates++
queue.Add(key)
} else {
klog.Errorf("Failed to parse key from obj %#v: %v\n", newobj, err)
}
},
DeleteFunc: func(obj interface{}) {
now := time.Now()
klog.V(4).Infof("DELETE %#v @ %#p\n", obj, obj)
controller.ObserveResourceVersion(obj)
// IndexerInformer uses a delta queue, therefore for deletes we have to use this
// key function.
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err == nil {
od := controller.getObjectData(key, true, false)
od.Lock()
defer od.Unlock()
if od.queuedAdds+od.queuedUpdates+od.queuedDeletes == 0 {
od.firstEnqueue = now
}
od.lastEnqueue = now
od.queuedDeletes++
queue.Add(key)
} else {
klog.Errorf("Failed to parse key from obj %#v: %v\n", obj, err)
}
},
})
// Now let's start the controller
stop := make(chan struct{})
defer close(stop)
go controller.Run(numThreads, stop)
// Serve Prometheus metrics
http.Handle("/metrics", promhttp.Handler())
go func() {
klog.Error(http.ListenAndServe(MetricsAddr, nil))
}()
// Wait forever
select {}
}
func formatTime(t time.Time) string {
t = t.UTC()
Y, M, D := t.Date()
h, m, s := t.Clock()
ms := t.Nanosecond() / 1000000
return fmt.Sprintf("%d-%02d-%02d %02d:%02d:%02d.%03d", Y, M, D, h, m, s, ms)
}
func formatTimeNoMillis(t time.Time) string {
t = t.UTC()
Y, M, D := t.Date()
h, m, s := t.Clock()
return fmt.Sprintf("%d-%02d-%02d %02d:%02d:%02d", Y, M, D, h, m, s)
}
func ConfigMapQuickEqual(x, y *corev1.ConfigMap) bool {
if x == y {
return true
}
if x == nil || y == nil {
return false
}
return x.Name == y.Name && x.Namespace == y.Namespace &&
x.UID == y.UID && x.ResourceVersion == y.ResourceVersion &&
MapStringStringEqual(x.Data, y.Data) &&
MapStringStringEqual(x.Labels, y.Labels) &&
MapStringStringEqual(x.Annotations, y.Annotations)
}
func MapStringStringEqual(x, y map[string]string) bool {
if x == nil {
return y == nil
} else if y == nil {
return false
}
if len(x) != len(y) {
return false
}
for k, v := range x {
if y[k] != v {
return false
}
}
return true
}
| getObjectData | identifier_name |
main.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"io"
"net/http"
"os"
"strconv"
"sync"
"time"
"k8s.io/klog/v2"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
// meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
corev1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/workqueue"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
const (
// The HTTP port under which the scraping endpoint ("/metrics") is served.
MetricsAddr = ":9101"
// The HTTP path under which the scraping endpoint ("/metrics") is served.
MetricsPath = "/metrics"
// The namespace, subsystem and name of the histogram collected by this controller.
HistogramNamespace = "scaletest"
HistogramSubsystem = "configmaps"
CreateHistogramName = "create_latency_seconds"
UpdateHistogramName = "update_latency_seconds"
// The name of the annotation holding the client-side creation timestamp.
CreateTimestampAnnotation = "scaletest/createTimestamp"
UpdateTimestampAnnotation = "scaletest/updateTimestamp"
// The layout of the annotation holding the client-side creation timestamp.
CreateTimestampLayout = "2006-01-02 15:04:05.000 -0700"
)
type Controller struct {
myAddr string
compare bool
queue workqueue.RateLimitingInterface
informer cache.Controller
lister corev1listers.ConfigMapLister
csvFilename string
csvFile io.Writer
// A histogram to collect latency samples
createLatencyHistogram *prometheus.HistogramVec
updateLatencyHistogram *prometheus.HistogramVec
// Counters for unusual events
updateCounter, strangeCounter *prometheus.CounterVec
duplicateCounter prometheus.Counter
// Guage for ResourceVersion
rvGauge prometheus.Gauge
sync.Mutex
// Data about each endpoint, access under mutex
objects map[string]*ObjectData
}
type ObjectData struct {
sync.Mutex
actuallyExists bool
ObjectQueueData
lastSeen *corev1.ConfigMap
}
// ObjectQueueData says what has happened since the last time
// a reference to the object was dequeued for logging.
type ObjectQueueData struct {
firstEnqueue, lastEnqueue time.Time
queuedAdds, queuedUpdates, queuedDeletes uint
}
var zeroObjectQueueData ObjectQueueData
var dummyTime = time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)
func (c *Controller) getObjectData(key string, addIfMissing, deleteIfPresent bool) *ObjectData {
c.Lock()
defer c.Unlock()
od := c.objects[key]
if od == nil {
od = &ObjectData{}
if addIfMissing {
c.objects[key] = od
}
} else if deleteIfPresent {
delete(c.objects, key)
}
return od
}
func NewController(queue workqueue.RateLimitingInterface, informer cache.Controller, lister corev1listers.ConfigMapLister, compare bool, csvFilename, myAddr string) *Controller {
createHistogram := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: CreateHistogramName,
Help: "Configmap creation notification latency, in seconds",
Buckets: []float64{-0.1, 0, 0.03125, 0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8},
},
[]string{"logger"},
)
if err := prometheus.Register(createHistogram); err != nil {
klog.Error(err)
createHistogram = nil
} else {
createHistogram.With(prometheus.Labels{"logger": myAddr}).Observe(0)
}
updateHistogram := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: UpdateHistogramName,
Help: "Configmap creation notification latency, in seconds",
Buckets: []float64{-0.1, 0, 0.03125, 0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8},
},
[]string{"logger"},
)
if err := prometheus.Register(updateHistogram); err != nil {
klog.Error(err)
updateHistogram = nil
} else {
updateHistogram.With(prometheus.Labels{"logger": myAddr}).Observe(0)
}
updateCounter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: "updates",
Help: "number of updates dequeued",
},
[]string{"logger"},
)
if err := prometheus.Register(updateCounter); err != nil {
klog.Error(err)
updateCounter = nil
} else {
updateCounter.With(prometheus.Labels{"logger": myAddr}).Add(0)
}
strangeCounter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: "stranges",
Help: "number of strange situations dequeued",
},
[]string{"logger"},
)
if err := prometheus.Register(strangeCounter); err != nil {
klog.Error(err)
strangeCounter = nil
} else {
strangeCounter.With(prometheus.Labels{"logger": myAddr}).Add(0)
}
duplicateCounter := prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: "duplicates",
Help: "number of duplicates dequeued",
ConstLabels: map[string]string{"logger": myAddr},
})
if err := prometheus.Register(duplicateCounter); err != nil {
klog.Error(err)
duplicateCounter = nil
} else {
duplicateCounter.Add(0)
}
rvGauge := prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: "resourceVersion",
Help: "latest ResourceVersion observed",
ConstLabels: map[string]string{"logger": myAddr},
})
if err := prometheus.Register(rvGauge); err != nil {
klog.Error(err)
rvGauge = nil
}
return &Controller{
myAddr: myAddr,
compare: compare,
informer: informer,
queue: queue,
lister: lister,
csvFilename: csvFilename,
createLatencyHistogram: createHistogram,
updateLatencyHistogram: updateHistogram,
updateCounter: updateCounter,
strangeCounter: strangeCounter,
duplicateCounter: duplicateCounter,
rvGauge: rvGauge,
objects: make(map[string]*ObjectData),
}
}
func (c *Controller) processNextItem() bool |
func (c *Controller) logDequeue(key string) error {
now := time.Now()
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
runtime.HandleError(fmt.Errorf("Failed to split key %q: %v", key, err))
return nil
}
obj, err := c.lister.ConfigMaps(namespace).Get(name)
if err != nil && !apierrors.IsNotFound(err) {
runtime.HandleError(fmt.Errorf("Fetching object with key %s from store failed with %v", key, err))
return nil
}
desireExist := err == nil
od := c.getObjectData(key, desireExist, !desireExist)
op := "delete"
var creationTime time.Time = dummyTime
if obj != nil {
creationTime = obj.ObjectMeta.CreationTimestamp.Time
}
var oqd ObjectQueueData
var lastSeen *corev1.ConfigMap
func() {
od.Lock()
defer od.Unlock()
oqd = od.ObjectQueueData
if c.compare {
lastSeen = od.lastSeen
od.lastSeen = obj.DeepCopy()
}
if desireExist {
if od.actuallyExists {
op = "update"
} else {
op = "create"
}
od.ObjectQueueData = zeroObjectQueueData
od.actuallyExists = true
}
}()
var diff int
if c.compare {
if ConfigMapQuickEqual(lastSeen, obj) {
diff = 2
c.duplicateCounter.Add(1)
} else {
diff = 3
}
}
// Log it
if c.csvFile != nil {
_, err = c.csvFile.Write([]byte(fmt.Sprintf("%s,%s,%q,%s,%d,%d,%d,%s,%s,%d\n",
formatTime(now), op, key, formatTimeNoMillis(creationTime),
oqd.queuedAdds, oqd.queuedUpdates, oqd.queuedDeletes,
formatTime(oqd.firstEnqueue), formatTime(oqd.lastEnqueue),
diff,
)))
if err != nil {
runtime.HandleError(fmt.Errorf("Error writing to CSV file named %q: %+v", c.csvFilename, err))
}
} else {
klog.V(4).Infof("c.csvFile == nil\n")
}
if diff == 2 {
return nil
}
if oqd.queuedAdds+oqd.queuedUpdates+oqd.queuedDeletes != 1 {
if c.strangeCounter != nil {
c.strangeCounter.
With(prometheus.Labels{"logger": c.myAddr}).
Add(1)
}
} else if oqd.queuedUpdates == 1 && c.updateCounter != nil {
c.updateCounter.
With(prometheus.Labels{"logger": c.myAddr}).
Add(1)
}
if op != "delete" && obj != nil && obj.Annotations != nil {
var ctS string
var latencyHistogram *prometheus.HistogramVec
if op == "create" {
ctS = obj.Annotations[CreateTimestampAnnotation]
latencyHistogram = c.createLatencyHistogram
} else {
ctS = obj.Annotations[UpdateTimestampAnnotation]
latencyHistogram = c.updateLatencyHistogram
}
if ctS != "" && latencyHistogram != nil {
clientTime, err := time.Parse(CreateTimestampLayout, ctS)
if err != nil {
return nil
}
latency := now.Sub(clientTime)
klog.V(4).Infof("Latency = %v for op=%s, key=%s, now=%s, clientTime=%s, ts=%s\n", latency, op, key, now, clientTime, ctS)
latencyHistogram.
With(prometheus.Labels{"logger": c.myAddr}).
Observe(latency.Seconds())
}
}
return nil
}
// handleErr checks if an error happened and makes sure we will retry later.
func (c *Controller) handleErr(err error, key interface{}) {
if err == nil {
// Forget about the #AddRateLimited history of the key on every successful synchronization.
// This ensures that future processing of updates for this key is not delayed because of
// an outdated error history.
c.queue.Forget(key)
return
}
klog.Infof("Error syncing ConfigMap %v: %v", key, err)
// Re-enqueue the key rate limited. Based on the rate limiter on the
// queue and the re-enqueue history, the key will be processed later again.
c.queue.AddRateLimited(key)
return
}
func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
defer runtime.HandleCrash()
// Let the workers stop when we are done
defer c.queue.ShutDown()
klog.Info("Starting Object Logging controller")
csvFile, err := os.Create(c.csvFilename)
if err != nil {
runtime.HandleError(fmt.Errorf("Failed to create data file named %q: %s", c.csvFilename, err))
} else {
c.csvFile = csvFile
defer csvFile.Close()
}
go c.informer.Run(stopCh)
// Wait for all involved caches to be synced, before processing items from the queue is started
if !cache.WaitForCacheSync(stopCh, c.informer.HasSynced) {
runtime.HandleError(fmt.Errorf("Timed out waiting for caches to sync"))
return
}
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
<-stopCh
klog.Info("Stopping Object Logging controller")
}
func (c *Controller) runWorker() {
for c.processNextItem() {
}
}
func (c *Controller) ObserveResourceVersion(obj interface{}) {
switch o := obj.(type) {
case cache.DeletedFinalStateUnknown:
klog.V(5).Infof("Recursing for %#+v @ %#p\n", obj, obj)
c.ObserveResourceVersion(o.Obj)
case cache.ExplicitKey:
klog.V(5).Infof("Got ExplicitKey %q\n", o)
return
default:
meta, err := apimeta.Accessor(obj)
if err != nil {
klog.V(5).Infof("apimeta.Accessor(%#+v) threw %#+v\n", obj, err)
return
}
rvS := meta.GetResourceVersion()
rvU, err := strconv.ParseUint(rvS, 10, 64)
if err != nil {
klog.V(5).Infof("Error parsing ResourceVersion %q of %#+v: %#+v\n", rvS, obj, err)
} else {
klog.V(5).Infof("Observing ResourceVersion %d of %#+v @ %#p\n", rvU, obj, obj)
c.rvGauge.Set(float64(rvU))
}
}
}
func main() {
var kubeconfig string
var master string
var useProtobuf bool
var dataFilename string
var numThreads int
var noCompare bool
klog.InitFlags(nil)
flag.StringVar(&kubeconfig, "kubeconfig", "", "absolute path to the kubeconfig file")
flag.StringVar(&master, "master", "", "master url")
flag.BoolVar(&useProtobuf, "useProtobuf", false, "indicates whether to encode objects with protobuf (as opposed to JSON)")
flag.StringVar(&dataFilename, "data-filname", "/tmp/obj-log.csv", "name of CSV file to create")
flag.IntVar(&numThreads, "threads", 1, "number of worker threads")
flag.BoolVar(&noCompare, "no-compare", false, "omit comparing object values")
flag.Set("logtostderr", "true")
flag.Parse()
// creates the connection
config, err := clientcmd.BuildConfigFromFlags(master, kubeconfig)
if err != nil {
klog.Fatal(err)
}
klog.Infof("Config.Host=%q\n", config.Host)
klog.Infof("Config.APIPath=%q\n", config.APIPath)
myAddr := GetHostAddr()
klog.Infof("Using %s as my host address\n", myAddr)
config.UserAgent = fmt.Sprintf("obj-logger@%s", myAddr)
if useProtobuf {
config.ContentType = "application/vnd.kubernetes.protobuf"
}
// creates the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
klog.Fatal(err)
}
informerFactory := informers.NewSharedInformerFactory(clientset, 0)
cfgMapInformer := informerFactory.Core().V1().ConfigMaps()
informer := cfgMapInformer.Informer()
lister := cfgMapInformer.Lister()
// create the workqueue
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
controller := NewController(queue, informer, lister, !noCompare, dataFilename, myAddr)
// Bind the workqueue to a cache with the help of an informer. This way we make sure that
// whenever the cache is updated, the object key is added to the workqueue.
// Note that when we finally process the item from the workqueue, we might see a newer version
// of the object than the version which was responsible for triggering the update.
informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
now := time.Now()
klog.V(4).Infof("ADD %+v @ %#p\n", obj, obj)
controller.ObserveResourceVersion(obj)
key, err := cache.MetaNamespaceKeyFunc(obj)
if err == nil {
od := controller.getObjectData(key, true, false)
od.Lock()
defer od.Unlock()
if od.queuedAdds+od.queuedUpdates+od.queuedDeletes == 0 {
od.firstEnqueue = now
}
od.lastEnqueue = now
od.queuedAdds++
queue.Add(key)
} else {
klog.Errorf("Failed to parse key from obj %#v: %v\n", obj, err)
}
},
UpdateFunc: func(oldobj interface{}, newobj interface{}) {
now := time.Now()
klog.V(4).Infof("UPDATE %#v @ %#p\n", newobj, newobj)
controller.ObserveResourceVersion(newobj)
key, err := cache.MetaNamespaceKeyFunc(newobj)
if err == nil {
od := controller.getObjectData(key, true, false)
od.Lock()
defer od.Unlock()
if od.queuedAdds+od.queuedUpdates+od.queuedDeletes == 0 {
od.firstEnqueue = now
}
od.lastEnqueue = now
od.queuedUpdates++
queue.Add(key)
} else {
klog.Errorf("Failed to parse key from obj %#v: %v\n", newobj, err)
}
},
DeleteFunc: func(obj interface{}) {
now := time.Now()
klog.V(4).Infof("DELETE %#v @ %#p\n", obj, obj)
controller.ObserveResourceVersion(obj)
// IndexerInformer uses a delta queue, therefore for deletes we have to use this
// key function.
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err == nil {
od := controller.getObjectData(key, true, false)
od.Lock()
defer od.Unlock()
if od.queuedAdds+od.queuedUpdates+od.queuedDeletes == 0 {
od.firstEnqueue = now
}
od.lastEnqueue = now
od.queuedDeletes++
queue.Add(key)
} else {
klog.Errorf("Failed to parse key from obj %#v: %v\n", obj, err)
}
},
})
// Now let's start the controller
stop := make(chan struct{})
defer close(stop)
go controller.Run(numThreads, stop)
// Serve Prometheus metrics
http.Handle("/metrics", promhttp.Handler())
go func() {
klog.Error(http.ListenAndServe(MetricsAddr, nil))
}()
// Wait forever
select {}
}
func formatTime(t time.Time) string {
t = t.UTC()
Y, M, D := t.Date()
h, m, s := t.Clock()
ms := t.Nanosecond() / 1000000
return fmt.Sprintf("%d-%02d-%02d %02d:%02d:%02d.%03d", Y, M, D, h, m, s, ms)
}
func formatTimeNoMillis(t time.Time) string {
t = t.UTC()
Y, M, D := t.Date()
h, m, s := t.Clock()
return fmt.Sprintf("%d-%02d-%02d %02d:%02d:%02d", Y, M, D, h, m, s)
}
func ConfigMapQuickEqual(x, y *corev1.ConfigMap) bool {
if x == y {
return true
}
if x == nil || y == nil {
return false
}
return x.Name == y.Name && x.Namespace == y.Namespace &&
x.UID == y.UID && x.ResourceVersion == y.ResourceVersion &&
MapStringStringEqual(x.Data, y.Data) &&
MapStringStringEqual(x.Labels, y.Labels) &&
MapStringStringEqual(x.Annotations, y.Annotations)
}
func MapStringStringEqual(x, y map[string]string) bool {
if x == nil {
return y == nil
} else if y == nil {
return false
}
if len(x) != len(y) {
return false
}
for k, v := range x {
if y[k] != v {
return false
}
}
return true
}
| {
// Wait until there is a new item in the working queue
key, quit := c.queue.Get()
if quit {
return false
}
// Tell the queue that we are done with processing this key. This unblocks the key for other workers
// This allows safe parallel processing because two objects with the same key are never processed in
// parallel.
defer c.queue.Done(key)
// Invoke the method containing the business logic
err := c.logDequeue(key.(string))
// Handle the error if something went wrong during the execution of the business logic
c.handleErr(err, key)
return true
} | identifier_body |
main.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"io"
"net/http"
"os"
"strconv"
"sync"
"time"
"k8s.io/klog/v2"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
// meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
corev1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/workqueue"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
const (
// The HTTP port under which the scraping endpoint ("/metrics") is served.
MetricsAddr = ":9101"
// The HTTP path under which the scraping endpoint ("/metrics") is served.
MetricsPath = "/metrics"
// The namespace, subsystem and name of the histogram collected by this controller.
HistogramNamespace = "scaletest"
HistogramSubsystem = "configmaps"
CreateHistogramName = "create_latency_seconds"
UpdateHistogramName = "update_latency_seconds"
// The name of the annotation holding the client-side creation timestamp.
CreateTimestampAnnotation = "scaletest/createTimestamp"
UpdateTimestampAnnotation = "scaletest/updateTimestamp"
// The layout of the annotation holding the client-side creation timestamp.
CreateTimestampLayout = "2006-01-02 15:04:05.000 -0700"
)
type Controller struct {
myAddr string
compare bool
queue workqueue.RateLimitingInterface
informer cache.Controller
lister corev1listers.ConfigMapLister
csvFilename string
csvFile io.Writer
// A histogram to collect latency samples
createLatencyHistogram *prometheus.HistogramVec
updateLatencyHistogram *prometheus.HistogramVec
// Counters for unusual events
updateCounter, strangeCounter *prometheus.CounterVec
duplicateCounter prometheus.Counter
// Guage for ResourceVersion
rvGauge prometheus.Gauge
sync.Mutex
// Data about each endpoint, access under mutex
objects map[string]*ObjectData
}
type ObjectData struct {
sync.Mutex
actuallyExists bool
ObjectQueueData
lastSeen *corev1.ConfigMap
}
// ObjectQueueData says what has happened since the last time
// a reference to the object was dequeued for logging.
type ObjectQueueData struct {
firstEnqueue, lastEnqueue time.Time
queuedAdds, queuedUpdates, queuedDeletes uint
}
var zeroObjectQueueData ObjectQueueData
var dummyTime = time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)
func (c *Controller) getObjectData(key string, addIfMissing, deleteIfPresent bool) *ObjectData {
c.Lock()
defer c.Unlock()
od := c.objects[key]
if od == nil {
od = &ObjectData{}
if addIfMissing {
c.objects[key] = od
}
} else if deleteIfPresent {
delete(c.objects, key)
}
return od
}
func NewController(queue workqueue.RateLimitingInterface, informer cache.Controller, lister corev1listers.ConfigMapLister, compare bool, csvFilename, myAddr string) *Controller {
createHistogram := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: CreateHistogramName,
Help: "Configmap creation notification latency, in seconds",
Buckets: []float64{-0.1, 0, 0.03125, 0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8},
},
[]string{"logger"},
)
if err := prometheus.Register(createHistogram); err != nil {
klog.Error(err)
createHistogram = nil
} else {
createHistogram.With(prometheus.Labels{"logger": myAddr}).Observe(0)
}
updateHistogram := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: UpdateHistogramName,
Help: "Configmap creation notification latency, in seconds",
Buckets: []float64{-0.1, 0, 0.03125, 0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8},
},
[]string{"logger"},
)
if err := prometheus.Register(updateHistogram); err != nil {
klog.Error(err)
updateHistogram = nil
} else {
updateHistogram.With(prometheus.Labels{"logger": myAddr}).Observe(0)
}
updateCounter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: "updates",
Help: "number of updates dequeued",
},
[]string{"logger"},
)
if err := prometheus.Register(updateCounter); err != nil {
klog.Error(err)
updateCounter = nil
} else {
updateCounter.With(prometheus.Labels{"logger": myAddr}).Add(0)
}
strangeCounter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: "stranges",
Help: "number of strange situations dequeued",
},
[]string{"logger"},
)
if err := prometheus.Register(strangeCounter); err != nil {
klog.Error(err)
strangeCounter = nil
} else {
strangeCounter.With(prometheus.Labels{"logger": myAddr}).Add(0)
}
duplicateCounter := prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: "duplicates",
Help: "number of duplicates dequeued",
ConstLabels: map[string]string{"logger": myAddr},
})
if err := prometheus.Register(duplicateCounter); err != nil {
klog.Error(err)
duplicateCounter = nil
} else {
duplicateCounter.Add(0)
}
rvGauge := prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: "resourceVersion",
Help: "latest ResourceVersion observed",
ConstLabels: map[string]string{"logger": myAddr},
})
if err := prometheus.Register(rvGauge); err != nil {
klog.Error(err)
rvGauge = nil
}
return &Controller{
myAddr: myAddr,
compare: compare,
informer: informer,
queue: queue,
lister: lister,
csvFilename: csvFilename,
createLatencyHistogram: createHistogram,
updateLatencyHistogram: updateHistogram,
updateCounter: updateCounter,
strangeCounter: strangeCounter,
duplicateCounter: duplicateCounter,
rvGauge: rvGauge,
objects: make(map[string]*ObjectData),
}
}
func (c *Controller) processNextItem() bool {
// Wait until there is a new item in the working queue
key, quit := c.queue.Get()
if quit {
return false
}
// Tell the queue that we are done with processing this key. This unblocks the key for other workers
// This allows safe parallel processing because two objects with the same key are never processed in
// parallel.
defer c.queue.Done(key)
// Invoke the method containing the business logic
err := c.logDequeue(key.(string))
// Handle the error if something went wrong during the execution of the business logic
c.handleErr(err, key)
return true
}
func (c *Controller) logDequeue(key string) error {
now := time.Now()
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
runtime.HandleError(fmt.Errorf("Failed to split key %q: %v", key, err))
return nil
}
obj, err := c.lister.ConfigMaps(namespace).Get(name)
if err != nil && !apierrors.IsNotFound(err) {
runtime.HandleError(fmt.Errorf("Fetching object with key %s from store failed with %v", key, err))
return nil
}
desireExist := err == nil
od := c.getObjectData(key, desireExist, !desireExist)
op := "delete"
var creationTime time.Time = dummyTime
if obj != nil {
creationTime = obj.ObjectMeta.CreationTimestamp.Time
}
var oqd ObjectQueueData
var lastSeen *corev1.ConfigMap
func() {
od.Lock()
defer od.Unlock()
oqd = od.ObjectQueueData
if c.compare {
lastSeen = od.lastSeen
od.lastSeen = obj.DeepCopy()
}
if desireExist {
if od.actuallyExists {
op = "update"
} else {
op = "create"
}
od.ObjectQueueData = zeroObjectQueueData
od.actuallyExists = true
}
}()
var diff int
if c.compare {
if ConfigMapQuickEqual(lastSeen, obj) {
diff = 2
c.duplicateCounter.Add(1)
} else {
diff = 3
}
}
// Log it
if c.csvFile != nil {
_, err = c.csvFile.Write([]byte(fmt.Sprintf("%s,%s,%q,%s,%d,%d,%d,%s,%s,%d\n",
formatTime(now), op, key, formatTimeNoMillis(creationTime),
oqd.queuedAdds, oqd.queuedUpdates, oqd.queuedDeletes,
formatTime(oqd.firstEnqueue), formatTime(oqd.lastEnqueue),
diff,
)))
if err != nil {
runtime.HandleError(fmt.Errorf("Error writing to CSV file named %q: %+v", c.csvFilename, err))
}
} else {
klog.V(4).Infof("c.csvFile == nil\n")
}
if diff == 2 {
return nil
}
if oqd.queuedAdds+oqd.queuedUpdates+oqd.queuedDeletes != 1 {
if c.strangeCounter != nil {
c.strangeCounter.
With(prometheus.Labels{"logger": c.myAddr}).
Add(1)
}
} else if oqd.queuedUpdates == 1 && c.updateCounter != nil {
c.updateCounter.
With(prometheus.Labels{"logger": c.myAddr}).
Add(1)
}
if op != "delete" && obj != nil && obj.Annotations != nil {
var ctS string
var latencyHistogram *prometheus.HistogramVec
if op == "create" {
ctS = obj.Annotations[CreateTimestampAnnotation]
latencyHistogram = c.createLatencyHistogram
} else {
ctS = obj.Annotations[UpdateTimestampAnnotation]
latencyHistogram = c.updateLatencyHistogram
}
if ctS != "" && latencyHistogram != nil {
clientTime, err := time.Parse(CreateTimestampLayout, ctS)
if err != nil {
return nil
}
latency := now.Sub(clientTime)
klog.V(4).Infof("Latency = %v for op=%s, key=%s, now=%s, clientTime=%s, ts=%s\n", latency, op, key, now, clientTime, ctS)
latencyHistogram.
With(prometheus.Labels{"logger": c.myAddr}).
Observe(latency.Seconds())
}
}
return nil
}
// handleErr checks if an error happened and makes sure we will retry later.
func (c *Controller) handleErr(err error, key interface{}) {
if err == nil {
// Forget about the #AddRateLimited history of the key on every successful synchronization.
// This ensures that future processing of updates for this key is not delayed because of
// an outdated error history.
c.queue.Forget(key)
return
}
klog.Infof("Error syncing ConfigMap %v: %v", key, err)
// Re-enqueue the key rate limited. Based on the rate limiter on the
// queue and the re-enqueue history, the key will be processed later again.
c.queue.AddRateLimited(key)
return
}
func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
defer runtime.HandleCrash()
// Let the workers stop when we are done
defer c.queue.ShutDown()
klog.Info("Starting Object Logging controller")
csvFile, err := os.Create(c.csvFilename)
if err != nil {
runtime.HandleError(fmt.Errorf("Failed to create data file named %q: %s", c.csvFilename, err))
} else {
c.csvFile = csvFile
defer csvFile.Close()
}
go c.informer.Run(stopCh)
// Wait for all involved caches to be synced, before processing items from the queue is started
if !cache.WaitForCacheSync(stopCh, c.informer.HasSynced) {
runtime.HandleError(fmt.Errorf("Timed out waiting for caches to sync"))
return
}
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
<-stopCh
klog.Info("Stopping Object Logging controller")
}
func (c *Controller) runWorker() {
for c.processNextItem() {
}
}
func (c *Controller) ObserveResourceVersion(obj interface{}) {
switch o := obj.(type) {
case cache.DeletedFinalStateUnknown:
klog.V(5).Infof("Recursing for %#+v @ %#p\n", obj, obj)
c.ObserveResourceVersion(o.Obj)
case cache.ExplicitKey:
klog.V(5).Infof("Got ExplicitKey %q\n", o)
return
default:
meta, err := apimeta.Accessor(obj)
if err != nil {
klog.V(5).Infof("apimeta.Accessor(%#+v) threw %#+v\n", obj, err)
return
}
rvS := meta.GetResourceVersion()
rvU, err := strconv.ParseUint(rvS, 10, 64)
if err != nil {
klog.V(5).Infof("Error parsing ResourceVersion %q of %#+v: %#+v\n", rvS, obj, err)
} else {
klog.V(5).Infof("Observing ResourceVersion %d of %#+v @ %#p\n", rvU, obj, obj)
c.rvGauge.Set(float64(rvU))
}
}
}
func main() {
var kubeconfig string
var master string
var useProtobuf bool
var dataFilename string
var numThreads int
var noCompare bool
klog.InitFlags(nil)
flag.StringVar(&kubeconfig, "kubeconfig", "", "absolute path to the kubeconfig file")
flag.StringVar(&master, "master", "", "master url")
flag.BoolVar(&useProtobuf, "useProtobuf", false, "indicates whether to encode objects with protobuf (as opposed to JSON)")
flag.StringVar(&dataFilename, "data-filname", "/tmp/obj-log.csv", "name of CSV file to create")
flag.IntVar(&numThreads, "threads", 1, "number of worker threads")
flag.BoolVar(&noCompare, "no-compare", false, "omit comparing object values")
flag.Set("logtostderr", "true")
flag.Parse()
// creates the connection
config, err := clientcmd.BuildConfigFromFlags(master, kubeconfig)
if err != nil {
klog.Fatal(err)
}
klog.Infof("Config.Host=%q\n", config.Host)
klog.Infof("Config.APIPath=%q\n", config.APIPath)
myAddr := GetHostAddr()
klog.Infof("Using %s as my host address\n", myAddr)
config.UserAgent = fmt.Sprintf("obj-logger@%s", myAddr)
if useProtobuf {
config.ContentType = "application/vnd.kubernetes.protobuf"
}
// creates the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
klog.Fatal(err)
}
informerFactory := informers.NewSharedInformerFactory(clientset, 0)
cfgMapInformer := informerFactory.Core().V1().ConfigMaps()
informer := cfgMapInformer.Informer()
lister := cfgMapInformer.Lister()
// create the workqueue
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
controller := NewController(queue, informer, lister, !noCompare, dataFilename, myAddr)
// Bind the workqueue to a cache with the help of an informer. This way we make sure that
// whenever the cache is updated, the object key is added to the workqueue.
// Note that when we finally process the item from the workqueue, we might see a newer version
// of the object than the version which was responsible for triggering the update.
informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
now := time.Now()
klog.V(4).Infof("ADD %+v @ %#p\n", obj, obj)
controller.ObserveResourceVersion(obj)
key, err := cache.MetaNamespaceKeyFunc(obj)
if err == nil {
od := controller.getObjectData(key, true, false)
od.Lock()
defer od.Unlock()
if od.queuedAdds+od.queuedUpdates+od.queuedDeletes == 0 {
od.firstEnqueue = now
}
od.lastEnqueue = now
od.queuedAdds++
queue.Add(key)
} else {
klog.Errorf("Failed to parse key from obj %#v: %v\n", obj, err)
}
},
UpdateFunc: func(oldobj interface{}, newobj interface{}) {
now := time.Now()
klog.V(4).Infof("UPDATE %#v @ %#p\n", newobj, newobj)
controller.ObserveResourceVersion(newobj)
key, err := cache.MetaNamespaceKeyFunc(newobj)
if err == nil {
od := controller.getObjectData(key, true, false)
od.Lock()
defer od.Unlock()
if od.queuedAdds+od.queuedUpdates+od.queuedDeletes == 0 {
od.firstEnqueue = now
}
od.lastEnqueue = now
od.queuedUpdates++
queue.Add(key)
} else {
klog.Errorf("Failed to parse key from obj %#v: %v\n", newobj, err)
}
},
DeleteFunc: func(obj interface{}) {
now := time.Now()
klog.V(4).Infof("DELETE %#v @ %#p\n", obj, obj)
controller.ObserveResourceVersion(obj)
// IndexerInformer uses a delta queue, therefore for deletes we have to use this
// key function.
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err == nil {
od := controller.getObjectData(key, true, false)
od.Lock()
defer od.Unlock()
if od.queuedAdds+od.queuedUpdates+od.queuedDeletes == 0 {
od.firstEnqueue = now
}
od.lastEnqueue = now
od.queuedDeletes++
queue.Add(key)
} else {
klog.Errorf("Failed to parse key from obj %#v: %v\n", obj, err)
}
},
})
// Now let's start the controller
stop := make(chan struct{})
defer close(stop)
go controller.Run(numThreads, stop)
// Serve Prometheus metrics
http.Handle("/metrics", promhttp.Handler())
go func() {
klog.Error(http.ListenAndServe(MetricsAddr, nil))
}()
// Wait forever
select {}
}
func formatTime(t time.Time) string {
t = t.UTC()
Y, M, D := t.Date()
h, m, s := t.Clock()
ms := t.Nanosecond() / 1000000
return fmt.Sprintf("%d-%02d-%02d %02d:%02d:%02d.%03d", Y, M, D, h, m, s, ms)
}
func formatTimeNoMillis(t time.Time) string {
t = t.UTC()
Y, M, D := t.Date()
h, m, s := t.Clock()
return fmt.Sprintf("%d-%02d-%02d %02d:%02d:%02d", Y, M, D, h, m, s)
}
func ConfigMapQuickEqual(x, y *corev1.ConfigMap) bool {
if x == y {
return true
}
if x == nil || y == nil {
return false
}
return x.Name == y.Name && x.Namespace == y.Namespace &&
x.UID == y.UID && x.ResourceVersion == y.ResourceVersion &&
MapStringStringEqual(x.Data, y.Data) &&
MapStringStringEqual(x.Labels, y.Labels) &&
MapStringStringEqual(x.Annotations, y.Annotations)
}
func MapStringStringEqual(x, y map[string]string) bool {
if x == nil {
return y == nil
} else if y == nil {
return false
}
if len(x) != len(y) {
return false
}
for k, v := range x |
return true
}
| {
if y[k] != v {
return false
}
} | conditional_block |
main.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"io"
"net/http"
"os"
"strconv"
"sync"
"time"
"k8s.io/klog/v2"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
// meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
corev1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/workqueue"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
const (
// The HTTP port under which the scraping endpoint ("/metrics") is served.
MetricsAddr = ":9101"
// The HTTP path under which the scraping endpoint ("/metrics") is served.
MetricsPath = "/metrics"
// The namespace, subsystem and name of the histogram collected by this controller.
HistogramNamespace = "scaletest"
HistogramSubsystem = "configmaps"
CreateHistogramName = "create_latency_seconds"
UpdateHistogramName = "update_latency_seconds"
// The name of the annotation holding the client-side creation timestamp.
CreateTimestampAnnotation = "scaletest/createTimestamp"
UpdateTimestampAnnotation = "scaletest/updateTimestamp"
// The layout of the annotation holding the client-side creation timestamp.
CreateTimestampLayout = "2006-01-02 15:04:05.000 -0700"
)
type Controller struct {
myAddr string
compare bool
queue workqueue.RateLimitingInterface
informer cache.Controller
lister corev1listers.ConfigMapLister
csvFilename string
csvFile io.Writer
// A histogram to collect latency samples
createLatencyHistogram *prometheus.HistogramVec
updateLatencyHistogram *prometheus.HistogramVec
// Counters for unusual events
updateCounter, strangeCounter *prometheus.CounterVec
duplicateCounter prometheus.Counter
// Guage for ResourceVersion
rvGauge prometheus.Gauge
sync.Mutex
// Data about each endpoint, access under mutex
objects map[string]*ObjectData
}
type ObjectData struct {
sync.Mutex
actuallyExists bool
ObjectQueueData
lastSeen *corev1.ConfigMap
}
// ObjectQueueData says what has happened since the last time
// a reference to the object was dequeued for logging.
type ObjectQueueData struct {
firstEnqueue, lastEnqueue time.Time
queuedAdds, queuedUpdates, queuedDeletes uint
}
var zeroObjectQueueData ObjectQueueData
var dummyTime = time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)
func (c *Controller) getObjectData(key string, addIfMissing, deleteIfPresent bool) *ObjectData {
c.Lock()
defer c.Unlock()
od := c.objects[key]
if od == nil {
od = &ObjectData{}
if addIfMissing {
c.objects[key] = od
}
} else if deleteIfPresent {
delete(c.objects, key)
}
return od
}
func NewController(queue workqueue.RateLimitingInterface, informer cache.Controller, lister corev1listers.ConfigMapLister, compare bool, csvFilename, myAddr string) *Controller {
createHistogram := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: CreateHistogramName,
Help: "Configmap creation notification latency, in seconds",
Buckets: []float64{-0.1, 0, 0.03125, 0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8},
},
[]string{"logger"},
)
if err := prometheus.Register(createHistogram); err != nil {
klog.Error(err)
createHistogram = nil
} else {
createHistogram.With(prometheus.Labels{"logger": myAddr}).Observe(0)
}
updateHistogram := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: UpdateHistogramName,
Help: "Configmap creation notification latency, in seconds",
Buckets: []float64{-0.1, 0, 0.03125, 0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8},
},
[]string{"logger"},
)
if err := prometheus.Register(updateHistogram); err != nil {
klog.Error(err)
updateHistogram = nil
} else {
updateHistogram.With(prometheus.Labels{"logger": myAddr}).Observe(0)
}
updateCounter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: "updates",
Help: "number of updates dequeued",
},
[]string{"logger"},
)
if err := prometheus.Register(updateCounter); err != nil {
klog.Error(err)
updateCounter = nil
} else {
updateCounter.With(prometheus.Labels{"logger": myAddr}).Add(0)
}
strangeCounter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: "stranges",
Help: "number of strange situations dequeued",
},
[]string{"logger"},
)
if err := prometheus.Register(strangeCounter); err != nil {
klog.Error(err)
strangeCounter = nil
} else {
strangeCounter.With(prometheus.Labels{"logger": myAddr}).Add(0)
}
duplicateCounter := prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: "duplicates",
Help: "number of duplicates dequeued",
ConstLabels: map[string]string{"logger": myAddr},
})
if err := prometheus.Register(duplicateCounter); err != nil {
klog.Error(err)
duplicateCounter = nil
} else {
duplicateCounter.Add(0)
}
rvGauge := prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: HistogramNamespace,
Subsystem: HistogramSubsystem,
Name: "resourceVersion",
Help: "latest ResourceVersion observed",
ConstLabels: map[string]string{"logger": myAddr},
})
if err := prometheus.Register(rvGauge); err != nil {
klog.Error(err)
rvGauge = nil
}
return &Controller{
myAddr: myAddr,
compare: compare,
informer: informer,
queue: queue,
lister: lister,
csvFilename: csvFilename,
createLatencyHistogram: createHistogram,
updateLatencyHistogram: updateHistogram,
updateCounter: updateCounter,
strangeCounter: strangeCounter,
duplicateCounter: duplicateCounter,
rvGauge: rvGauge,
objects: make(map[string]*ObjectData),
}
}
func (c *Controller) processNextItem() bool {
// Wait until there is a new item in the working queue
key, quit := c.queue.Get()
if quit {
return false
}
// Tell the queue that we are done with processing this key. This unblocks the key for other workers
// This allows safe parallel processing because two objects with the same key are never processed in
// parallel.
defer c.queue.Done(key)
// Invoke the method containing the business logic
err := c.logDequeue(key.(string))
// Handle the error if something went wrong during the execution of the business logic
c.handleErr(err, key)
return true
}
func (c *Controller) logDequeue(key string) error {
now := time.Now()
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
runtime.HandleError(fmt.Errorf("Failed to split key %q: %v", key, err))
return nil
}
obj, err := c.lister.ConfigMaps(namespace).Get(name)
if err != nil && !apierrors.IsNotFound(err) {
runtime.HandleError(fmt.Errorf("Fetching object with key %s from store failed with %v", key, err))
return nil
}
desireExist := err == nil
od := c.getObjectData(key, desireExist, !desireExist)
op := "delete"
var creationTime time.Time = dummyTime
if obj != nil {
creationTime = obj.ObjectMeta.CreationTimestamp.Time
}
var oqd ObjectQueueData
var lastSeen *corev1.ConfigMap
func() {
od.Lock()
defer od.Unlock()
oqd = od.ObjectQueueData
if c.compare {
lastSeen = od.lastSeen
od.lastSeen = obj.DeepCopy()
}
if desireExist {
if od.actuallyExists {
op = "update"
} else {
op = "create"
}
od.ObjectQueueData = zeroObjectQueueData
od.actuallyExists = true
}
}()
var diff int
if c.compare {
if ConfigMapQuickEqual(lastSeen, obj) {
diff = 2
c.duplicateCounter.Add(1)
} else {
diff = 3
}
}
// Log it
if c.csvFile != nil {
_, err = c.csvFile.Write([]byte(fmt.Sprintf("%s,%s,%q,%s,%d,%d,%d,%s,%s,%d\n",
formatTime(now), op, key, formatTimeNoMillis(creationTime),
oqd.queuedAdds, oqd.queuedUpdates, oqd.queuedDeletes,
formatTime(oqd.firstEnqueue), formatTime(oqd.lastEnqueue),
diff,
)))
if err != nil {
runtime.HandleError(fmt.Errorf("Error writing to CSV file named %q: %+v", c.csvFilename, err))
}
} else {
klog.V(4).Infof("c.csvFile == nil\n")
}
if diff == 2 {
return nil
}
if oqd.queuedAdds+oqd.queuedUpdates+oqd.queuedDeletes != 1 {
if c.strangeCounter != nil {
c.strangeCounter.
With(prometheus.Labels{"logger": c.myAddr}).
Add(1)
}
} else if oqd.queuedUpdates == 1 && c.updateCounter != nil {
c.updateCounter.
With(prometheus.Labels{"logger": c.myAddr}).
Add(1)
}
if op != "delete" && obj != nil && obj.Annotations != nil {
var ctS string
var latencyHistogram *prometheus.HistogramVec
if op == "create" {
ctS = obj.Annotations[CreateTimestampAnnotation]
latencyHistogram = c.createLatencyHistogram
} else {
ctS = obj.Annotations[UpdateTimestampAnnotation]
latencyHistogram = c.updateLatencyHistogram
}
if ctS != "" && latencyHistogram != nil {
clientTime, err := time.Parse(CreateTimestampLayout, ctS)
if err != nil {
return nil
}
latency := now.Sub(clientTime)
klog.V(4).Infof("Latency = %v for op=%s, key=%s, now=%s, clientTime=%s, ts=%s\n", latency, op, key, now, clientTime, ctS)
latencyHistogram.
With(prometheus.Labels{"logger": c.myAddr}).
Observe(latency.Seconds())
}
}
return nil
}
// handleErr checks if an error happened and makes sure we will retry later.
func (c *Controller) handleErr(err error, key interface{}) {
if err == nil {
// Forget about the #AddRateLimited history of the key on every successful synchronization.
// This ensures that future processing of updates for this key is not delayed because of
// an outdated error history.
c.queue.Forget(key)
return
}
klog.Infof("Error syncing ConfigMap %v: %v", key, err)
// Re-enqueue the key rate limited. Based on the rate limiter on the
// queue and the re-enqueue history, the key will be processed later again.
c.queue.AddRateLimited(key)
return
}
func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
defer runtime.HandleCrash()
// Let the workers stop when we are done
defer c.queue.ShutDown()
klog.Info("Starting Object Logging controller")
csvFile, err := os.Create(c.csvFilename)
if err != nil {
runtime.HandleError(fmt.Errorf("Failed to create data file named %q: %s", c.csvFilename, err))
} else {
c.csvFile = csvFile
defer csvFile.Close()
}
go c.informer.Run(stopCh)
// Wait for all involved caches to be synced, before processing items from the queue is started
if !cache.WaitForCacheSync(stopCh, c.informer.HasSynced) {
runtime.HandleError(fmt.Errorf("Timed out waiting for caches to sync"))
return
}
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
<-stopCh
klog.Info("Stopping Object Logging controller")
}
func (c *Controller) runWorker() {
for c.processNextItem() {
}
}
func (c *Controller) ObserveResourceVersion(obj interface{}) {
switch o := obj.(type) {
case cache.DeletedFinalStateUnknown:
klog.V(5).Infof("Recursing for %#+v @ %#p\n", obj, obj)
c.ObserveResourceVersion(o.Obj)
case cache.ExplicitKey:
klog.V(5).Infof("Got ExplicitKey %q\n", o)
return
default:
meta, err := apimeta.Accessor(obj)
if err != nil {
klog.V(5).Infof("apimeta.Accessor(%#+v) threw %#+v\n", obj, err)
return
}
rvS := meta.GetResourceVersion()
rvU, err := strconv.ParseUint(rvS, 10, 64)
if err != nil {
klog.V(5).Infof("Error parsing ResourceVersion %q of %#+v: %#+v\n", rvS, obj, err)
} else {
klog.V(5).Infof("Observing ResourceVersion %d of %#+v @ %#p\n", rvU, obj, obj)
c.rvGauge.Set(float64(rvU))
}
}
}
func main() {
var kubeconfig string
var master string
var useProtobuf bool
var dataFilename string
var numThreads int
var noCompare bool
klog.InitFlags(nil)
flag.StringVar(&kubeconfig, "kubeconfig", "", "absolute path to the kubeconfig file")
flag.StringVar(&master, "master", "", "master url")
flag.BoolVar(&useProtobuf, "useProtobuf", false, "indicates whether to encode objects with protobuf (as opposed to JSON)")
flag.StringVar(&dataFilename, "data-filname", "/tmp/obj-log.csv", "name of CSV file to create")
flag.IntVar(&numThreads, "threads", 1, "number of worker threads")
flag.BoolVar(&noCompare, "no-compare", false, "omit comparing object values")
flag.Set("logtostderr", "true")
flag.Parse()
// creates the connection
config, err := clientcmd.BuildConfigFromFlags(master, kubeconfig)
if err != nil {
klog.Fatal(err)
}
klog.Infof("Config.Host=%q\n", config.Host)
klog.Infof("Config.APIPath=%q\n", config.APIPath)
myAddr := GetHostAddr()
klog.Infof("Using %s as my host address\n", myAddr)
config.UserAgent = fmt.Sprintf("obj-logger@%s", myAddr)
if useProtobuf {
config.ContentType = "application/vnd.kubernetes.protobuf"
}
// creates the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
klog.Fatal(err)
}
informerFactory := informers.NewSharedInformerFactory(clientset, 0)
cfgMapInformer := informerFactory.Core().V1().ConfigMaps()
informer := cfgMapInformer.Informer()
lister := cfgMapInformer.Lister()
// create the workqueue
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
controller := NewController(queue, informer, lister, !noCompare, dataFilename, myAddr)
// Bind the workqueue to a cache with the help of an informer. This way we make sure that
// whenever the cache is updated, the object key is added to the workqueue.
// Note that when we finally process the item from the workqueue, we might see a newer version
// of the object than the version which was responsible for triggering the update.
informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
now := time.Now()
klog.V(4).Infof("ADD %+v @ %#p\n", obj, obj)
controller.ObserveResourceVersion(obj)
key, err := cache.MetaNamespaceKeyFunc(obj)
if err == nil {
od := controller.getObjectData(key, true, false)
od.Lock()
defer od.Unlock()
if od.queuedAdds+od.queuedUpdates+od.queuedDeletes == 0 {
od.firstEnqueue = now
}
od.lastEnqueue = now
od.queuedAdds++
queue.Add(key)
} else {
klog.Errorf("Failed to parse key from obj %#v: %v\n", obj, err)
}
},
UpdateFunc: func(oldobj interface{}, newobj interface{}) {
now := time.Now()
klog.V(4).Infof("UPDATE %#v @ %#p\n", newobj, newobj)
controller.ObserveResourceVersion(newobj)
key, err := cache.MetaNamespaceKeyFunc(newobj)
if err == nil {
od := controller.getObjectData(key, true, false)
od.Lock()
defer od.Unlock()
if od.queuedAdds+od.queuedUpdates+od.queuedDeletes == 0 {
od.firstEnqueue = now
}
od.lastEnqueue = now
od.queuedUpdates++
queue.Add(key) | now := time.Now()
klog.V(4).Infof("DELETE %#v @ %#p\n", obj, obj)
controller.ObserveResourceVersion(obj)
// IndexerInformer uses a delta queue, therefore for deletes we have to use this
// key function.
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err == nil {
od := controller.getObjectData(key, true, false)
od.Lock()
defer od.Unlock()
if od.queuedAdds+od.queuedUpdates+od.queuedDeletes == 0 {
od.firstEnqueue = now
}
od.lastEnqueue = now
od.queuedDeletes++
queue.Add(key)
} else {
klog.Errorf("Failed to parse key from obj %#v: %v\n", obj, err)
}
},
})
// Now let's start the controller
stop := make(chan struct{})
defer close(stop)
go controller.Run(numThreads, stop)
// Serve Prometheus metrics
http.Handle("/metrics", promhttp.Handler())
go func() {
klog.Error(http.ListenAndServe(MetricsAddr, nil))
}()
// Wait forever
select {}
}
func formatTime(t time.Time) string {
t = t.UTC()
Y, M, D := t.Date()
h, m, s := t.Clock()
ms := t.Nanosecond() / 1000000
return fmt.Sprintf("%d-%02d-%02d %02d:%02d:%02d.%03d", Y, M, D, h, m, s, ms)
}
func formatTimeNoMillis(t time.Time) string {
t = t.UTC()
Y, M, D := t.Date()
h, m, s := t.Clock()
return fmt.Sprintf("%d-%02d-%02d %02d:%02d:%02d", Y, M, D, h, m, s)
}
func ConfigMapQuickEqual(x, y *corev1.ConfigMap) bool {
if x == y {
return true
}
if x == nil || y == nil {
return false
}
return x.Name == y.Name && x.Namespace == y.Namespace &&
x.UID == y.UID && x.ResourceVersion == y.ResourceVersion &&
MapStringStringEqual(x.Data, y.Data) &&
MapStringStringEqual(x.Labels, y.Labels) &&
MapStringStringEqual(x.Annotations, y.Annotations)
}
func MapStringStringEqual(x, y map[string]string) bool {
if x == nil {
return y == nil
} else if y == nil {
return false
}
if len(x) != len(y) {
return false
}
for k, v := range x {
if y[k] != v {
return false
}
}
return true
} | } else {
klog.Errorf("Failed to parse key from obj %#v: %v\n", newobj, err)
}
},
DeleteFunc: func(obj interface{}) { | random_line_split |
types.rs | use super::parser::{Expr, Literal, Pattern};
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum Type {
Int64,
Int32,
Float,
Dataset(im::HashMap<String, Type>),
/// T -> U
TyArr(Box<Type>, Box<Type>),
/// Type variable
TyVar(String),
// Data type
TyCon(String),
}
static COUNTER: std::sync::atomic::AtomicUsize = std::sync::atomic::AtomicUsize::new(1);
fn get_id() -> usize {
COUNTER.fetch_add(1, std::sync::atomic::Ordering::Relaxed)
}
fn get_item_type<'a>(
items: &[Expr],
env: &im::HashMap<String, Scheme>,
) -> Result<TypeRes<'a>, String> {
let mut ty = Type::TyVar(get_id().to_string());
for x in items {
let (_subs, ty2) = x.get_type(env)?;
let subs = unify(&ty, &ty2)?;
ty = apply_sub_type(&subs, &ty);
}
Ok((im::HashMap::new(), ty))
}
type TypeRes<'a> = (im::HashMap<String, Type>, Type);
pub type Scheme = (im::HashSet<String>, Type);
type Subs<'a> = &'a im::HashMap<String, Type>;
fn apply_sub_type(subs: Subs, ty: &Type) -> Type {
match ty {
Type::TyVar(name) => subs.get(name).unwrap_or_else(|| &ty).clone(),
Type::TyArr(t1, t2) => Type::TyArr(
Box::new(apply_sub_type(subs, t1)),
Box::new(apply_sub_type(subs, t2)),
),
_ => ty.clone(),
}
}
fn apply_sub_scheme(subs: Subs, scheme: Scheme) -> Scheme {
let mut subs1 = subs.clone();
for key in scheme.0.iter() {
subs1 = subs1.without(key);
}
let ty = apply_sub_type(&subs1, &scheme.1);
(scheme.0, ty)
}
fn apply_sub_env(
subs: &im::HashMap<String, Type>,
env: &im::HashMap<String, Scheme>,
) -> im::HashMap<String, Scheme> {
let mut h = im::HashMap::new();
for (key, value) in env.into_iter() {
h = h.update(key.to_string(), apply_sub_scheme(subs, value.clone()));
}
h
}
fn | (subs: Subs, subs2: Subs) -> im::HashMap<String, Type> {
let mut h = im::HashMap::new();
for (key, value) in subs.into_iter() {
h = h.update(key.to_string(), apply_sub_type(subs, &value.clone()));
}
h.union(subs2.clone())
}
fn ftv_ty(ty: &Type) -> im::HashSet<String> {
match ty {
Type::TyVar(a) => im::HashSet::unit(a.clone()),
Type::TyArr(ty1, ty2) => {
let x = ftv_ty(ty1);
let y = ftv_ty(ty2);
x.union(y)
}
_ => im::HashSet::new(),
}
}
fn ftv_env(env: &im::HashMap<String, Scheme>) -> im::HashSet<String> {
let ftvs = env.values().map(|x| ftv_ty(&x.1));
im::HashSet::unions(ftvs)
}
fn generalize(env: &im::HashMap<String, Scheme>, ty: &Type) -> Scheme {
let xs = ftv_ty(ty);
let ys = ftv_env(env);
let a = xs.difference(ys);
(a, ty.clone())
}
fn unify(ty1: &Type, ty2: &Type) -> Result<im::HashMap<String, Type>, String> {
match (ty1, ty2) {
(Type::TyArr(l, r), Type::TyArr(l1, r1)) => {
let s1 = unify(l, l1)?;
let s2 = unify(&apply_sub_type(&s1, &r), &apply_sub_type(&s1, &r1))?;
Ok(compose(&s2, &s1))
}
(Type::TyVar(a), t) => bind(&a, &t),
(t, Type::TyVar(a)) => bind(&a, &t),
(t1, t2) => {
if t1 == t2 {
Ok(im::HashMap::new())
} else {
Err("UnificationFail".to_string())
}
}
}
}
fn bind(var: &str, ty: &Type) -> Result<im::HashMap<String, Type>, String> {
if let Type::TyVar(x) = ty {
if var == x {
return Ok(im::HashMap::new());
}
}
if ftv_ty(ty).contains(var) {
return Err("Infinite Type".to_string());
}
Ok(im::HashMap::new().update(var.to_string(), ty.clone()))
}
fn type_pat(
env: &im::HashMap<String, Scheme>,
case_type: &Type,
pattern: &Pattern,
) -> Result<im::HashMap<String, Type>, String> {
// todo vars / wildcards, etc
let (_s, ty) = env.get(pattern.name).unwrap();
unify(case_type, ty)
}
/// Converts inner type of dataset
fn convert_inner(
env: &im::HashMap<String, Scheme>,
key: &str,
items: &[Expr],
) -> Result<(String, Type), String> {
let (_s, ty) = get_item_type(items, env)?;
Ok((key.to_string(), ty))
}
// Type inference using http://dev.stephendiehl.com/fun/006_hindley_milner.html#substitution
impl<'a> Expr<'_> {
pub fn get_type(&self, env: &im::HashMap<String, Scheme>) -> Result<TypeRes, String> {
match self {
Expr::Literal(l) => Ok((im::HashMap::new(), l.get_type())),
Expr::Ref(x) => {
let err = format!("Could not find reference {}", x);
let ty = env.get(*x).cloned().ok_or(err)?;
Ok((im::HashMap::new(), ty.1))
}
Expr::LetIn(x) => {
let (s1, t1) = x.expr1.expr.get_type(env)?;
let env1 = apply_sub_env(&s1, env);
let t2 = generalize(&env1, &t1);
let extended_ty = env.update(x.name.to_string(), t2);
let (s2, t2) = x.expr2.expr.get_type(&extended_ty)?;
Ok((compose(&s1, &s2), t2))
}
Expr::DataSet(items) => {
let d: im::HashMap<String, Type> = items
.iter()
.map(|(k, items)| convert_inner(env, k, items))
.flatten()
.collect();
if d.len() == items.len() {
Ok((im::HashMap::new(), Type::Dataset(d)))
} else {
Err("Not all rows matched in type".to_string())
}
}
Expr::Lambda(name, expr) => {
let type_var = Type::TyVar(get_id().to_string()); //fresh();
let env1 = env.update((*name).to_string(), (im::HashSet::new(), type_var.clone()));
let (sub, t1) = expr.expr.get_type(&env1)?;
let substituted = apply_sub_type(&sub, &type_var);
Ok((sub, Type::TyArr(Box::new(substituted), Box::new(t1))))
}
Expr::App(expr1, expr2) => {
let tv = Type::TyVar(get_id().to_string());
let (s1, t1) = expr1.get_type(env)?;
let (s2, t2) = expr2.get_type(&apply_sub_env(&s1, env))?;
let s3 = unify(
&apply_sub_type(&s2, &t1),
&Type::TyArr(Box::new(t2), Box::new(tv.clone())),
)?;
Ok((compose(&compose(&s3, &s2), &s1), apply_sub_type(&s3, &tv)))
}
Expr::Match(expr, exprs) => {
let (mut subs, case_type) = expr.get_type(env)?;
let mut branch_type = Type::TyVar(get_id().to_string());
for (p, branch) in exprs {
// TODO check, test
let pat_sub = type_pat(env, &case_type, p)?;
subs = compose(&subs, &pat_sub);
let (s, n_branch_type) = branch.get_type(env)?;
subs = compose(&subs, &s);
let cur_branch_type = apply_sub_type(&subs, &n_branch_type);
let s2 = unify(&branch_type, &cur_branch_type)?;
subs = compose(&subs, &s2);
branch_type = apply_sub_type(&subs, &branch_type);
}
Ok((subs, branch_type))
}
Expr::Projection(names, expr) => {
let from_ty = expr.get_type(env)?;
match from_ty {
(_s, Type::Dataset(items)) => {
if names
.iter()
.filter(|x| !items.contains_key(&x.to_string()))
.count()
> 0
{
// TODO; improve error
return Err("Not all fields in dataset".to_string());
}
Ok((
im::HashMap::new(),
Type::Dataset(
items
.iter()
.filter(|(k, _v)| names.contains(&&*k.to_string()))
.map(|(k, v)| (k.to_string(), v.clone()))
.collect(),
),
))
}
_ => Err("Expected dataset".to_string()),
}
}
x => Err(format!("not implemented {:?}", x)),
}
}
}
impl Literal {
fn get_type(&self) -> Type {
match self {
Literal::Int64(_) => Type::Int64,
Literal::Int32(_) => Type::Int32,
Literal::Float(_) => Type::Float,
}
}
}
#[cfg(test)]
use super::parser::{expression, Span};
#[test]
fn test_type() {
assert_eq!(Literal::Float(1.0).get_type(), Type::Float);
assert_eq!(Literal::Int64(1).get_type(), Type::Int64);
}
#[test]
fn test_type_let() {
let (_, expr) = expression(Span::new("let x = 1 in x")).unwrap();
assert_eq!(expr.get_type(&im::HashMap::new()).unwrap().1, Type::Int64);
}
#[test]
fn test_type_lam() {
let (_, expr) = expression(Span::new(r"\x -> x")).unwrap();
let ty = expr.get_type(&im::HashMap::new()).unwrap().1;
match ty {
Type::TyArr(x, y) => assert_eq!(x, y),
_ => panic!("Did not expect non-tyarr result"),
}
}
#[test]
fn test_type_lam_app() {
let (_, expr) = expression(Span::new(r"let id = \x -> x in id 1")).unwrap();
let ty = expr.get_type(&im::HashMap::new()).unwrap().1;
assert_eq!(ty, Type::Int64);
}
#[test]
fn test_type_sql() {
let (_, expr) = expression(Span::new("let t = {a\n1} in select a from t")).unwrap();
let ty = expr.get_type(&im::HashMap::new()).unwrap().1;
assert_eq!(
ty,
Type::Dataset([("a".to_string(), Type::Int64)].iter().cloned().collect())
);
}
#[test]
fn test_multiple_rows() {
let (_, expr) = expression(Span::new("let t = {a\n1\n2\n3} in t")).unwrap();
let ty = expr.get_type(&im::HashMap::new()).unwrap().1;
assert_eq!(
ty,
Type::Dataset([("a".to_string(), Type::Int64)].iter().cloned().collect())
);
}
#[test]
fn test_multiple_rows_incompatible() {
let (_, expr) = expression(Span::new("let t = {a\n1\n2\n3.0} in t")).unwrap();
let ty = expr.get_type(&im::HashMap::new());
assert!(ty.is_err());
}
| compose | identifier_name |
types.rs | use super::parser::{Expr, Literal, Pattern};
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum Type {
Int64,
Int32,
Float,
Dataset(im::HashMap<String, Type>),
/// T -> U
TyArr(Box<Type>, Box<Type>),
/// Type variable
TyVar(String),
// Data type
TyCon(String),
}
static COUNTER: std::sync::atomic::AtomicUsize = std::sync::atomic::AtomicUsize::new(1);
fn get_id() -> usize {
COUNTER.fetch_add(1, std::sync::atomic::Ordering::Relaxed)
}
fn get_item_type<'a>(
items: &[Expr],
env: &im::HashMap<String, Scheme>,
) -> Result<TypeRes<'a>, String> {
let mut ty = Type::TyVar(get_id().to_string());
for x in items {
let (_subs, ty2) = x.get_type(env)?;
let subs = unify(&ty, &ty2)?;
ty = apply_sub_type(&subs, &ty);
}
Ok((im::HashMap::new(), ty))
}
type TypeRes<'a> = (im::HashMap<String, Type>, Type);
pub type Scheme = (im::HashSet<String>, Type);
type Subs<'a> = &'a im::HashMap<String, Type>;
fn apply_sub_type(subs: Subs, ty: &Type) -> Type {
match ty {
Type::TyVar(name) => subs.get(name).unwrap_or_else(|| &ty).clone(),
Type::TyArr(t1, t2) => Type::TyArr(
Box::new(apply_sub_type(subs, t1)),
Box::new(apply_sub_type(subs, t2)),
),
_ => ty.clone(),
}
}
fn apply_sub_scheme(subs: Subs, scheme: Scheme) -> Scheme {
let mut subs1 = subs.clone();
for key in scheme.0.iter() {
subs1 = subs1.without(key);
}
let ty = apply_sub_type(&subs1, &scheme.1);
(scheme.0, ty)
}
fn apply_sub_env(
subs: &im::HashMap<String, Type>,
env: &im::HashMap<String, Scheme>,
) -> im::HashMap<String, Scheme> {
let mut h = im::HashMap::new();
for (key, value) in env.into_iter() {
h = h.update(key.to_string(), apply_sub_scheme(subs, value.clone()));
}
h
}
fn compose(subs: Subs, subs2: Subs) -> im::HashMap<String, Type> {
let mut h = im::HashMap::new();
for (key, value) in subs.into_iter() {
h = h.update(key.to_string(), apply_sub_type(subs, &value.clone()));
}
h.union(subs2.clone())
}
fn ftv_ty(ty: &Type) -> im::HashSet<String> {
match ty {
Type::TyVar(a) => im::HashSet::unit(a.clone()),
Type::TyArr(ty1, ty2) => {
let x = ftv_ty(ty1);
let y = ftv_ty(ty2);
x.union(y)
}
_ => im::HashSet::new(),
}
}
fn ftv_env(env: &im::HashMap<String, Scheme>) -> im::HashSet<String> {
let ftvs = env.values().map(|x| ftv_ty(&x.1));
im::HashSet::unions(ftvs)
}
fn generalize(env: &im::HashMap<String, Scheme>, ty: &Type) -> Scheme |
fn unify(ty1: &Type, ty2: &Type) -> Result<im::HashMap<String, Type>, String> {
match (ty1, ty2) {
(Type::TyArr(l, r), Type::TyArr(l1, r1)) => {
let s1 = unify(l, l1)?;
let s2 = unify(&apply_sub_type(&s1, &r), &apply_sub_type(&s1, &r1))?;
Ok(compose(&s2, &s1))
}
(Type::TyVar(a), t) => bind(&a, &t),
(t, Type::TyVar(a)) => bind(&a, &t),
(t1, t2) => {
if t1 == t2 {
Ok(im::HashMap::new())
} else {
Err("UnificationFail".to_string())
}
}
}
}
fn bind(var: &str, ty: &Type) -> Result<im::HashMap<String, Type>, String> {
if let Type::TyVar(x) = ty {
if var == x {
return Ok(im::HashMap::new());
}
}
if ftv_ty(ty).contains(var) {
return Err("Infinite Type".to_string());
}
Ok(im::HashMap::new().update(var.to_string(), ty.clone()))
}
fn type_pat(
env: &im::HashMap<String, Scheme>,
case_type: &Type,
pattern: &Pattern,
) -> Result<im::HashMap<String, Type>, String> {
// todo vars / wildcards, etc
let (_s, ty) = env.get(pattern.name).unwrap();
unify(case_type, ty)
}
/// Converts inner type of dataset
fn convert_inner(
env: &im::HashMap<String, Scheme>,
key: &str,
items: &[Expr],
) -> Result<(String, Type), String> {
let (_s, ty) = get_item_type(items, env)?;
Ok((key.to_string(), ty))
}
// Type inference using http://dev.stephendiehl.com/fun/006_hindley_milner.html#substitution
impl<'a> Expr<'_> {
pub fn get_type(&self, env: &im::HashMap<String, Scheme>) -> Result<TypeRes, String> {
match self {
Expr::Literal(l) => Ok((im::HashMap::new(), l.get_type())),
Expr::Ref(x) => {
let err = format!("Could not find reference {}", x);
let ty = env.get(*x).cloned().ok_or(err)?;
Ok((im::HashMap::new(), ty.1))
}
Expr::LetIn(x) => {
let (s1, t1) = x.expr1.expr.get_type(env)?;
let env1 = apply_sub_env(&s1, env);
let t2 = generalize(&env1, &t1);
let extended_ty = env.update(x.name.to_string(), t2);
let (s2, t2) = x.expr2.expr.get_type(&extended_ty)?;
Ok((compose(&s1, &s2), t2))
}
Expr::DataSet(items) => {
let d: im::HashMap<String, Type> = items
.iter()
.map(|(k, items)| convert_inner(env, k, items))
.flatten()
.collect();
if d.len() == items.len() {
Ok((im::HashMap::new(), Type::Dataset(d)))
} else {
Err("Not all rows matched in type".to_string())
}
}
Expr::Lambda(name, expr) => {
let type_var = Type::TyVar(get_id().to_string()); //fresh();
let env1 = env.update((*name).to_string(), (im::HashSet::new(), type_var.clone()));
let (sub, t1) = expr.expr.get_type(&env1)?;
let substituted = apply_sub_type(&sub, &type_var);
Ok((sub, Type::TyArr(Box::new(substituted), Box::new(t1))))
}
Expr::App(expr1, expr2) => {
let tv = Type::TyVar(get_id().to_string());
let (s1, t1) = expr1.get_type(env)?;
let (s2, t2) = expr2.get_type(&apply_sub_env(&s1, env))?;
let s3 = unify(
&apply_sub_type(&s2, &t1),
&Type::TyArr(Box::new(t2), Box::new(tv.clone())),
)?;
Ok((compose(&compose(&s3, &s2), &s1), apply_sub_type(&s3, &tv)))
}
Expr::Match(expr, exprs) => {
let (mut subs, case_type) = expr.get_type(env)?;
let mut branch_type = Type::TyVar(get_id().to_string());
for (p, branch) in exprs {
// TODO check, test
let pat_sub = type_pat(env, &case_type, p)?;
subs = compose(&subs, &pat_sub);
let (s, n_branch_type) = branch.get_type(env)?;
subs = compose(&subs, &s);
let cur_branch_type = apply_sub_type(&subs, &n_branch_type);
let s2 = unify(&branch_type, &cur_branch_type)?;
subs = compose(&subs, &s2);
branch_type = apply_sub_type(&subs, &branch_type);
}
Ok((subs, branch_type))
}
Expr::Projection(names, expr) => {
let from_ty = expr.get_type(env)?;
match from_ty {
(_s, Type::Dataset(items)) => {
if names
.iter()
.filter(|x| !items.contains_key(&x.to_string()))
.count()
> 0
{
// TODO; improve error
return Err("Not all fields in dataset".to_string());
}
Ok((
im::HashMap::new(),
Type::Dataset(
items
.iter()
.filter(|(k, _v)| names.contains(&&*k.to_string()))
.map(|(k, v)| (k.to_string(), v.clone()))
.collect(),
),
))
}
_ => Err("Expected dataset".to_string()),
}
}
x => Err(format!("not implemented {:?}", x)),
}
}
}
impl Literal {
fn get_type(&self) -> Type {
match self {
Literal::Int64(_) => Type::Int64,
Literal::Int32(_) => Type::Int32,
Literal::Float(_) => Type::Float,
}
}
}
#[cfg(test)]
use super::parser::{expression, Span};
#[test]
fn test_type() {
assert_eq!(Literal::Float(1.0).get_type(), Type::Float);
assert_eq!(Literal::Int64(1).get_type(), Type::Int64);
}
#[test]
fn test_type_let() {
let (_, expr) = expression(Span::new("let x = 1 in x")).unwrap();
assert_eq!(expr.get_type(&im::HashMap::new()).unwrap().1, Type::Int64);
}
#[test]
fn test_type_lam() {
let (_, expr) = expression(Span::new(r"\x -> x")).unwrap();
let ty = expr.get_type(&im::HashMap::new()).unwrap().1;
match ty {
Type::TyArr(x, y) => assert_eq!(x, y),
_ => panic!("Did not expect non-tyarr result"),
}
}
#[test]
fn test_type_lam_app() {
let (_, expr) = expression(Span::new(r"let id = \x -> x in id 1")).unwrap();
let ty = expr.get_type(&im::HashMap::new()).unwrap().1;
assert_eq!(ty, Type::Int64);
}
#[test]
fn test_type_sql() {
let (_, expr) = expression(Span::new("let t = {a\n1} in select a from t")).unwrap();
let ty = expr.get_type(&im::HashMap::new()).unwrap().1;
assert_eq!(
ty,
Type::Dataset([("a".to_string(), Type::Int64)].iter().cloned().collect())
);
}
#[test]
fn test_multiple_rows() {
let (_, expr) = expression(Span::new("let t = {a\n1\n2\n3} in t")).unwrap();
let ty = expr.get_type(&im::HashMap::new()).unwrap().1;
assert_eq!(
ty,
Type::Dataset([("a".to_string(), Type::Int64)].iter().cloned().collect())
);
}
#[test]
fn test_multiple_rows_incompatible() {
let (_, expr) = expression(Span::new("let t = {a\n1\n2\n3.0} in t")).unwrap();
let ty = expr.get_type(&im::HashMap::new());
assert!(ty.is_err());
}
| {
let xs = ftv_ty(ty);
let ys = ftv_env(env);
let a = xs.difference(ys);
(a, ty.clone())
} | identifier_body |
types.rs | use super::parser::{Expr, Literal, Pattern};
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum Type {
Int64,
Int32,
Float,
Dataset(im::HashMap<String, Type>),
/// T -> U
TyArr(Box<Type>, Box<Type>),
/// Type variable
TyVar(String),
// Data type
TyCon(String),
}
static COUNTER: std::sync::atomic::AtomicUsize = std::sync::atomic::AtomicUsize::new(1);
fn get_id() -> usize {
COUNTER.fetch_add(1, std::sync::atomic::Ordering::Relaxed)
}
fn get_item_type<'a>(
items: &[Expr],
env: &im::HashMap<String, Scheme>,
) -> Result<TypeRes<'a>, String> {
let mut ty = Type::TyVar(get_id().to_string());
for x in items {
let (_subs, ty2) = x.get_type(env)?;
let subs = unify(&ty, &ty2)?;
ty = apply_sub_type(&subs, &ty);
}
Ok((im::HashMap::new(), ty))
}
type TypeRes<'a> = (im::HashMap<String, Type>, Type);
pub type Scheme = (im::HashSet<String>, Type);
type Subs<'a> = &'a im::HashMap<String, Type>;
fn apply_sub_type(subs: Subs, ty: &Type) -> Type {
match ty {
Type::TyVar(name) => subs.get(name).unwrap_or_else(|| &ty).clone(),
Type::TyArr(t1, t2) => Type::TyArr(
Box::new(apply_sub_type(subs, t1)),
Box::new(apply_sub_type(subs, t2)),
),
_ => ty.clone(),
}
}
fn apply_sub_scheme(subs: Subs, scheme: Scheme) -> Scheme {
let mut subs1 = subs.clone();
for key in scheme.0.iter() {
subs1 = subs1.without(key);
}
let ty = apply_sub_type(&subs1, &scheme.1);
(scheme.0, ty)
}
fn apply_sub_env(
subs: &im::HashMap<String, Type>,
env: &im::HashMap<String, Scheme>,
) -> im::HashMap<String, Scheme> {
let mut h = im::HashMap::new();
for (key, value) in env.into_iter() {
h = h.update(key.to_string(), apply_sub_scheme(subs, value.clone()));
}
h
}
fn compose(subs: Subs, subs2: Subs) -> im::HashMap<String, Type> {
let mut h = im::HashMap::new();
for (key, value) in subs.into_iter() {
h = h.update(key.to_string(), apply_sub_type(subs, &value.clone()));
}
h.union(subs2.clone())
}
fn ftv_ty(ty: &Type) -> im::HashSet<String> {
match ty {
Type::TyVar(a) => im::HashSet::unit(a.clone()),
Type::TyArr(ty1, ty2) => {
let x = ftv_ty(ty1);
let y = ftv_ty(ty2);
x.union(y)
}
_ => im::HashSet::new(),
}
}
fn ftv_env(env: &im::HashMap<String, Scheme>) -> im::HashSet<String> {
let ftvs = env.values().map(|x| ftv_ty(&x.1));
im::HashSet::unions(ftvs)
}
fn generalize(env: &im::HashMap<String, Scheme>, ty: &Type) -> Scheme {
let xs = ftv_ty(ty);
let ys = ftv_env(env);
let a = xs.difference(ys);
(a, ty.clone())
}
fn unify(ty1: &Type, ty2: &Type) -> Result<im::HashMap<String, Type>, String> {
match (ty1, ty2) {
(Type::TyArr(l, r), Type::TyArr(l1, r1)) => {
let s1 = unify(l, l1)?;
let s2 = unify(&apply_sub_type(&s1, &r), &apply_sub_type(&s1, &r1))?;
Ok(compose(&s2, &s1))
}
(Type::TyVar(a), t) => bind(&a, &t),
(t, Type::TyVar(a)) => bind(&a, &t),
(t1, t2) => {
if t1 == t2 {
Ok(im::HashMap::new())
} else {
Err("UnificationFail".to_string())
}
}
}
}
fn bind(var: &str, ty: &Type) -> Result<im::HashMap<String, Type>, String> {
if let Type::TyVar(x) = ty {
if var == x {
return Ok(im::HashMap::new());
}
}
if ftv_ty(ty).contains(var) {
return Err("Infinite Type".to_string());
}
Ok(im::HashMap::new().update(var.to_string(), ty.clone()))
}
fn type_pat(
env: &im::HashMap<String, Scheme>,
case_type: &Type,
pattern: &Pattern,
) -> Result<im::HashMap<String, Type>, String> {
// todo vars / wildcards, etc
let (_s, ty) = env.get(pattern.name).unwrap();
unify(case_type, ty)
}
/// Converts inner type of dataset
fn convert_inner(
env: &im::HashMap<String, Scheme>,
key: &str,
items: &[Expr],
) -> Result<(String, Type), String> {
let (_s, ty) = get_item_type(items, env)?;
Ok((key.to_string(), ty))
}
// Type inference using http://dev.stephendiehl.com/fun/006_hindley_milner.html#substitution
impl<'a> Expr<'_> {
pub fn get_type(&self, env: &im::HashMap<String, Scheme>) -> Result<TypeRes, String> {
match self {
Expr::Literal(l) => Ok((im::HashMap::new(), l.get_type())),
Expr::Ref(x) => {
let err = format!("Could not find reference {}", x);
let ty = env.get(*x).cloned().ok_or(err)?;
Ok((im::HashMap::new(), ty.1))
}
Expr::LetIn(x) => {
let (s1, t1) = x.expr1.expr.get_type(env)?;
let env1 = apply_sub_env(&s1, env);
let t2 = generalize(&env1, &t1);
let extended_ty = env.update(x.name.to_string(), t2);
let (s2, t2) = x.expr2.expr.get_type(&extended_ty)?;
Ok((compose(&s1, &s2), t2))
}
Expr::DataSet(items) => { | .iter()
.map(|(k, items)| convert_inner(env, k, items))
.flatten()
.collect();
if d.len() == items.len() {
Ok((im::HashMap::new(), Type::Dataset(d)))
} else {
Err("Not all rows matched in type".to_string())
}
}
Expr::Lambda(name, expr) => {
let type_var = Type::TyVar(get_id().to_string()); //fresh();
let env1 = env.update((*name).to_string(), (im::HashSet::new(), type_var.clone()));
let (sub, t1) = expr.expr.get_type(&env1)?;
let substituted = apply_sub_type(&sub, &type_var);
Ok((sub, Type::TyArr(Box::new(substituted), Box::new(t1))))
}
Expr::App(expr1, expr2) => {
let tv = Type::TyVar(get_id().to_string());
let (s1, t1) = expr1.get_type(env)?;
let (s2, t2) = expr2.get_type(&apply_sub_env(&s1, env))?;
let s3 = unify(
&apply_sub_type(&s2, &t1),
&Type::TyArr(Box::new(t2), Box::new(tv.clone())),
)?;
Ok((compose(&compose(&s3, &s2), &s1), apply_sub_type(&s3, &tv)))
}
Expr::Match(expr, exprs) => {
let (mut subs, case_type) = expr.get_type(env)?;
let mut branch_type = Type::TyVar(get_id().to_string());
for (p, branch) in exprs {
// TODO check, test
let pat_sub = type_pat(env, &case_type, p)?;
subs = compose(&subs, &pat_sub);
let (s, n_branch_type) = branch.get_type(env)?;
subs = compose(&subs, &s);
let cur_branch_type = apply_sub_type(&subs, &n_branch_type);
let s2 = unify(&branch_type, &cur_branch_type)?;
subs = compose(&subs, &s2);
branch_type = apply_sub_type(&subs, &branch_type);
}
Ok((subs, branch_type))
}
Expr::Projection(names, expr) => {
let from_ty = expr.get_type(env)?;
match from_ty {
(_s, Type::Dataset(items)) => {
if names
.iter()
.filter(|x| !items.contains_key(&x.to_string()))
.count()
> 0
{
// TODO; improve error
return Err("Not all fields in dataset".to_string());
}
Ok((
im::HashMap::new(),
Type::Dataset(
items
.iter()
.filter(|(k, _v)| names.contains(&&*k.to_string()))
.map(|(k, v)| (k.to_string(), v.clone()))
.collect(),
),
))
}
_ => Err("Expected dataset".to_string()),
}
}
x => Err(format!("not implemented {:?}", x)),
}
}
}
impl Literal {
fn get_type(&self) -> Type {
match self {
Literal::Int64(_) => Type::Int64,
Literal::Int32(_) => Type::Int32,
Literal::Float(_) => Type::Float,
}
}
}
#[cfg(test)]
use super::parser::{expression, Span};
#[test]
fn test_type() {
assert_eq!(Literal::Float(1.0).get_type(), Type::Float);
assert_eq!(Literal::Int64(1).get_type(), Type::Int64);
}
#[test]
fn test_type_let() {
let (_, expr) = expression(Span::new("let x = 1 in x")).unwrap();
assert_eq!(expr.get_type(&im::HashMap::new()).unwrap().1, Type::Int64);
}
#[test]
fn test_type_lam() {
let (_, expr) = expression(Span::new(r"\x -> x")).unwrap();
let ty = expr.get_type(&im::HashMap::new()).unwrap().1;
match ty {
Type::TyArr(x, y) => assert_eq!(x, y),
_ => panic!("Did not expect non-tyarr result"),
}
}
#[test]
fn test_type_lam_app() {
let (_, expr) = expression(Span::new(r"let id = \x -> x in id 1")).unwrap();
let ty = expr.get_type(&im::HashMap::new()).unwrap().1;
assert_eq!(ty, Type::Int64);
}
#[test]
fn test_type_sql() {
let (_, expr) = expression(Span::new("let t = {a\n1} in select a from t")).unwrap();
let ty = expr.get_type(&im::HashMap::new()).unwrap().1;
assert_eq!(
ty,
Type::Dataset([("a".to_string(), Type::Int64)].iter().cloned().collect())
);
}
#[test]
fn test_multiple_rows() {
let (_, expr) = expression(Span::new("let t = {a\n1\n2\n3} in t")).unwrap();
let ty = expr.get_type(&im::HashMap::new()).unwrap().1;
assert_eq!(
ty,
Type::Dataset([("a".to_string(), Type::Int64)].iter().cloned().collect())
);
}
#[test]
fn test_multiple_rows_incompatible() {
let (_, expr) = expression(Span::new("let t = {a\n1\n2\n3.0} in t")).unwrap();
let ty = expr.get_type(&im::HashMap::new());
assert!(ty.is_err());
} | let d: im::HashMap<String, Type> = items | random_line_split |
util.rs | use super::{
converter::BaseConvertInfo,
flags::RuntimeHelper,
ir::{JsExpr as Js, VNodeIR},
parser::{Directive, DirectiveArg, ElemProp, Element},
scanner::Attribute,
};
use std::{
borrow::{Borrow, BorrowMut},
cell::UnsafeCell,
marker::PhantomData,
ops::Deref,
};
#[macro_export]
macro_rules! cast {
($target: expr, $pat: path) => {{
if let $pat(a, ..) = $target {
a
} else {
panic!("mismatch variant when cast to {}", stringify!($pat));
}
}};
}
mod decode_html;
mod json;
mod named_chars;
pub mod rslint;
mod v_str;
pub use v_str::VStr;
pub fn non_whitespace(c: char) -> bool {
!c.is_ascii_whitespace()
}
pub fn get_core_component(tag: &str) -> Option<RuntimeHelper> {
use RuntimeHelper as RH;
Some(match tag {
"Teleport" | "teleport" => RH::TELEPORT,
"Suspense" | "suspense" => RH::SUSPENSE,
"KeepAlive" | "keep-alive" => RH::KEEP_ALIVE,
"BaseTransition" | "base-transition" => RH::BASE_TRANSITION,
_ => return None,
})
}
pub fn is_core_component(tag: &str) -> bool {
get_core_component(tag).is_some()
}
fn is_event_prop(prop: &str) -> bool {
let bytes = prop.as_bytes();
// equivalent to /^on[^a-z]/
bytes.len() > 2 && bytes.starts_with(b"on") && !bytes[3].is_ascii_lowercase()
}
pub fn is_mergeable_prop(prop: &str) -> bool {
prop == "class" || prop == "style" || is_event_prop(prop)
}
#[inline]
pub fn not_js_identifier(c: char) -> bool {
!c.is_alphanumeric() && c != '$' && c != '_'
}
pub fn is_simple_identifier(s: VStr) -> bool {
if VStr::has_affix(&s) {
return false;
}
let is_ident = |c| !not_js_identifier(c);
let raw = s.raw;
raw.chars().all(is_ident) && !raw.starts_with(|c: char| c.is_ascii_digit())
}
macro_rules! make_list {
( $($id: ident),* ) => {
&[
$(stringify!($id)),*
]
}
}
// use simple contains for small str array
// benchmark shows linear scan takes at most 10ns
// while phf or bsearch takes 30ns
const ALLOWED_GLOBALS: &[&str] = make_list![
Infinity,
undefined,
NaN,
isFinite,
isNaN,
parseFloat,
parseInt,
decodeURI,
decodeURIComponent,
encodeURI,
encodeURIComponent,
Math,
Number,
Date,
Array,
Object,
Boolean,
String,
RegExp,
Map,
Set,
JSON,
Intl,
BigInt
];
pub fn is_global_allow_listed(s: &str) -> bool {
ALLOWED_GLOBALS.contains(&s)
}
// https://github.com/vuejs/rfcs/blob/master/active-rfcs/0008-render-function-api-change.md#special-reserved-props
const RESERVED: &[&str] = make_list![
key,
ref,
onVnodeMounted,
onVnodeUpdated,
onVnodeUnmounted,
onVnodeBeforeMount,
onVnodeBeforeUpdate,
onVnodeBeforeUnmount
];
#[inline]
pub fn is_reserved_prop(tag: &str) -> bool {
RESERVED.contains(&tag)
}
pub fn is_component_tag(tag: &str) -> bool {
tag == "component" || tag == "Component"
}
pub const fn yes(_: &str) -> bool {
true
}
pub const fn no(_: &str) -> bool {
false
}
pub fn get_vnode_call_helper(v: &VNodeIR<BaseConvertInfo>) -> RuntimeHelper {
use RuntimeHelper as RH;
if v.is_block {
return if v.is_component {
RH::CREATE_BLOCK
} else {
RH::CREATE_ELEMENT_BLOCK
};
}
if v.is_component {
RH::CREATE_VNODE
} else {
RH::CREATE_ELEMENT_VNODE
}
}
pub fn is_builtin_symbol(tag: &Js, helper: RuntimeHelper) -> bool {
if let Js::Symbol(r) = tag {
r == &helper
} else {
false
}
}
pub trait PropPattern {
fn matches(&self, name: &str) -> bool;
}
impl PropPattern for &str {
fn matches(&self, name: &str) -> bool {
name == *self
}
}
impl<F> PropPattern for F
where
F: Fn(&str) -> bool,
{
fn matches(&self, name: &str) -> bool {
self(name)
}
}
impl<const N: usize> PropPattern for [&'static str; N] {
fn matches(&self, name: &str) -> bool {
self.contains(&name)
}
}
type NameExp<'a> = Option<(&'a str, Option<VStr<'a>>)>;
pub trait PropMatcher<'a> {
fn get_name_and_exp(prop: &ElemProp<'a>) -> NameExp<'a>;
fn get_ref<'b>(prop: &'b ElemProp<'a>) -> &'b Self;
fn take(prop: ElemProp<'a>) -> Self;
fn is_match<P>(p: &ElemProp<'a>, pat: &P, allow_empty: bool) -> bool
where
P: PropPattern,
{
Self::get_name_and_exp(p).map_or(false, |(name, exp)| {
pat.matches(name) && (allow_empty || !exp.map_or(true, |v| v.is_empty()))
})
}
}
pub fn is_bind_key<'a>(arg: &Option<DirectiveArg<'a>>, name: &str) -> bool {
get_bind_key(arg).map_or(false, |v| v == name)
}
fn get_bind_key<'a>(arg: &Option<DirectiveArg<'a>>) -> Option<&'a str> {
if let DirectiveArg::Static(name) = arg.as_ref()? {
Some(name)
} else {
None
}
}
impl<'a> PropMatcher<'a> for ElemProp<'a> {
fn get_name_and_exp(prop: &ElemProp<'a>) -> NameExp<'a> {
match prop {
ElemProp::Attr(Attribute { name, value, .. }) => {
let exp = value.as_ref().map(|v| v.content);
Some((name, exp))
}
ElemProp::Dir(dir @ Directive { name: "bind", .. }) => {
let name = get_bind_key(&dir.argument)?;
let exp = dir.expression.as_ref().map(|v| v.content);
Some((name, exp))
}
_ => None,
}
}
fn get_ref<'b>(prop: &'b ElemProp<'a>) -> &'b Self {
prop
}
fn take(prop: ElemProp<'a>) -> Self {
prop
}
}
impl<'a> PropMatcher<'a> for Directive<'a> {
fn get_name_and_exp(prop: &ElemProp<'a>) -> NameExp<'a> {
if let ElemProp::Dir(Directive {
name, expression, ..
}) = prop
{
let exp = expression.as_ref().map(|v| v.content);
Some((name, exp))
} else {
None
}
}
fn get_ref<'b>(prop: &'b ElemProp<'a>) -> &'b Self {
if let ElemProp::Dir(dir) = prop {
return dir;
}
unreachable!("invalid call")
}
fn take(prop: ElemProp<'a>) -> Self {
if let ElemProp::Dir(dir) = prop {
return dir;
}
unreachable!("invalid call")
}
}
pub struct PropFound<'a, E, M>
where
E: Borrow<Element<'a>>,
M: PropMatcher<'a>,
{
elem: E,
pos: usize,
m: PhantomData<&'a M>,
}
impl<'a, E, M> PropFound<'a, E, M>
where
E: Borrow<Element<'a>>,
M: PropMatcher<'a>,
{
fn new(elem: E, pos: usize) -> Option<Self> {
Some(Self {
elem,
pos,
m: PhantomData,
})
}
pub fn get_ref(&self) -> &M {
M::get_ref(&self.elem.borrow().properties[self.pos])
}
}
// take is only available when access is mutable
impl<'a, E, M> PropFound<'a, E, M>
where
E: BorrowMut<Element<'a>>,
M: PropMatcher<'a>, | }
type DirFound<'a, E> = PropFound<'a, E, Directive<'a>>;
// sometimes mutable access to the element is not available so
// Borrow is used to refine PropFound so `take` is optional
pub fn dir_finder<'a, E, P>(elem: E, pat: P) -> PropFinder<'a, E, P, Directive<'a>>
where
E: Borrow<Element<'a>>,
P: PropPattern,
{
PropFinder::new(elem, pat)
}
pub fn find_dir<'a, E, P>(elem: E, pat: P) -> Option<DirFound<'a, E>>
where
E: Borrow<Element<'a>>,
P: PropPattern,
{
PropFinder::new(elem, pat).find()
}
pub fn find_dir_empty<'a, E, P>(elem: E, pat: P) -> Option<DirFound<'a, E>>
where
E: Borrow<Element<'a>>,
P: PropPattern,
{
PropFinder::new(elem, pat).allow_empty().find()
}
pub struct PropFinder<'a, E, P, M = ElemProp<'a>>
where
E: Borrow<Element<'a>>,
P: PropPattern,
M: PropMatcher<'a>,
{
elem: E,
pat: P,
allow_empty: bool,
filter: fn(&ElemProp<'a>) -> bool,
m: PhantomData<&'a M>,
}
impl<'a, E, P, M> PropFinder<'a, E, P, M>
where
E: Borrow<Element<'a>>,
P: PropPattern,
M: PropMatcher<'a>,
{
fn new(elem: E, pat: P) -> Self {
Self {
elem,
pat,
allow_empty: false,
filter: |_| true,
m: PhantomData,
}
}
fn is_match(&self, p: &ElemProp<'a>) -> bool {
M::is_match(p, &self.pat, self.allow_empty)
}
pub fn dynamic_only(self) -> Self {
Self {
filter: |p| matches!(p, ElemProp::Dir(..)),
..self
}
}
pub fn attr_only(self) -> Self {
Self {
filter: |p| matches!(p, ElemProp::Attr(..)),
..self
}
}
pub fn find(self) -> Option<PropFound<'a, E, M>> {
let pos = self
.elem
.borrow()
.properties
.iter()
.position(|p| self.is_match(p) && (self.filter)(p))?;
PropFound::new(self.elem, pos)
}
pub fn allow_empty(self) -> Self {
Self {
allow_empty: true,
..self
}
}
}
impl<'a, P> PropFinder<'a, Element<'a>, P, ElemProp<'a>>
where
P: PropPattern + Copy,
{
pub fn find_all(self) -> impl Iterator<Item = Result<ElemProp<'a>, ElemProp<'a>>> {
let PropFinder {
elem,
pat,
allow_empty,
..
} = self;
elem.properties.into_iter().map(move |p| {
if ElemProp::is_match(&p, &pat, allow_empty) {
Ok(p)
} else {
Err(p)
}
})
}
}
pub fn find_prop<'a, E, P>(elem: E, pat: P) -> Option<PropFound<'a, E, ElemProp<'a>>>
where
E: Borrow<Element<'a>>,
P: PropPattern,
{
PropFinder::new(elem, pat).find()
}
pub fn prop_finder<'a, E, P>(elem: E, pat: P) -> PropFinder<'a, E, P>
where
E: Borrow<Element<'a>>,
P: PropPattern,
{
PropFinder::new(elem, pat)
}
// since std::lazy::Lazy is not stable
// it is not thread safe, not Sync.
// it is Send if F and T is Send
pub struct Lazy<T, F = fn() -> T>(UnsafeCell<Result<T, Option<F>>>)
where
F: FnOnce() -> T;
impl<T, F> Lazy<T, F>
where
F: FnOnce() -> T,
{
pub fn new(f: F) -> Self {
Self(UnsafeCell::new(Err(Some(f))))
}
}
impl<T, F> Deref for Lazy<T, F>
where
F: FnOnce() -> T,
{
type Target = T;
fn deref(&self) -> &Self::Target {
let m = unsafe { &mut *self.0.get() };
let f = match m {
Ok(t) => return t,
Err(f) => f,
};
*m = Ok(f.take().unwrap()());
match m {
Ok(t) => t,
_ => panic!("unwrap Ok"),
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::parser::test::mock_element;
#[test]
fn test_find_dir() {
let e = mock_element("<p v-if=true/>");
let found = find_dir(&e, "if");
let found = found.expect("should found directive");
assert_eq!(found.get_ref().name, "if");
assert_eq!(e.properties.len(), 1);
}
#[test]
fn test_find_dir_mut() {
let mut e = mock_element("<p v-if=true/>");
let found = find_dir(&mut e, "if");
let found = found.expect("should found directive");
assert_eq!(found.get_ref().name, "if");
assert_eq!(found.take().name, "if");
assert!(e.properties.is_empty());
}
#[test]
fn test_find_empty_dir() {
let e = mock_element("<p v-if=true v-for>");
assert!(find_dir(&e, "if").is_some());
assert!(find_dir(&e, "for").is_none());
let found = dir_finder(&e, "for").allow_empty().find();
assert!(found.is_some());
}
#[test]
fn test_find_prop() {
let mut e = mock_element("<p :name=foo name=bar/>");
assert!(find_dir(&e, "name").is_none());
assert!(find_dir(&e, "bind").is_some());
// prop only looks at attr and v-bind
assert!(find_prop(&e, "bind").is_none());
find_prop(&mut e, "name").unwrap().take();
assert!(find_prop(&e, "bind").is_none());
find_prop(&mut e, "name").unwrap().take();
assert!(find_prop(&e, "name").is_none());
}
#[test]
fn find_prop_ignore_dynamic_bind() {
let e = mock_element("<p :[name]=foo/>");
assert!(find_dir(&e, "name").is_none());
assert!(find_dir(&e, "bind").is_some());
assert!(find_prop(&e, "name").is_none());
}
#[test]
fn find_dynamic_only_prop() {
let e = mock_element("<p name=foo/>");
assert!(prop_finder(&e, "name").dynamic_only().find().is_none());
let e = mock_element("<p v-bind:name=foo/>");
assert!(prop_finder(&e, "name").dynamic_only().find().is_some());
let e = mock_element("<p :name=foo/>");
assert!(prop_finder(&e, "name").dynamic_only().find().is_some());
let e = mock_element("<p :[name]=foo/>");
assert!(prop_finder(&e, "name").dynamic_only().find().is_none());
}
#[test]
fn prop_find_all() {
let e = mock_element("<p :name=foo name=bar :[name]=baz/>");
let a: Vec<_> = prop_finder(e, "name").find_all().collect();
assert_eq!(a.len(), 3);
assert!(a[0].is_ok());
assert!(a[1].is_ok());
assert!(a[2].is_err());
}
#[test]
fn layman_lazy() {
let mut test = 0;
let l = Lazy::new(|| {
test += 1;
(0..=100).sum::<i32>()
});
assert_eq!(*l, 5050);
assert_eq!(*l, 5050);
assert_eq!(test, 1);
}
} | {
pub fn take(mut self) -> M {
// TODO: avoid O(n) behavior
M::take(self.elem.borrow_mut().properties.remove(self.pos))
} | random_line_split |
util.rs | use super::{
converter::BaseConvertInfo,
flags::RuntimeHelper,
ir::{JsExpr as Js, VNodeIR},
parser::{Directive, DirectiveArg, ElemProp, Element},
scanner::Attribute,
};
use std::{
borrow::{Borrow, BorrowMut},
cell::UnsafeCell,
marker::PhantomData,
ops::Deref,
};
#[macro_export]
macro_rules! cast {
($target: expr, $pat: path) => {{
if let $pat(a, ..) = $target {
a
} else {
panic!("mismatch variant when cast to {}", stringify!($pat));
}
}};
}
mod decode_html;
mod json;
mod named_chars;
pub mod rslint;
mod v_str;
pub use v_str::VStr;
pub fn non_whitespace(c: char) -> bool {
!c.is_ascii_whitespace()
}
pub fn get_core_component(tag: &str) -> Option<RuntimeHelper> {
use RuntimeHelper as RH;
Some(match tag {
"Teleport" | "teleport" => RH::TELEPORT,
"Suspense" | "suspense" => RH::SUSPENSE,
"KeepAlive" | "keep-alive" => RH::KEEP_ALIVE,
"BaseTransition" | "base-transition" => RH::BASE_TRANSITION,
_ => return None,
})
}
pub fn is_core_component(tag: &str) -> bool {
get_core_component(tag).is_some()
}
fn is_event_prop(prop: &str) -> bool {
let bytes = prop.as_bytes();
// equivalent to /^on[^a-z]/
bytes.len() > 2 && bytes.starts_with(b"on") && !bytes[3].is_ascii_lowercase()
}
pub fn is_mergeable_prop(prop: &str) -> bool {
prop == "class" || prop == "style" || is_event_prop(prop)
}
#[inline]
pub fn not_js_identifier(c: char) -> bool {
!c.is_alphanumeric() && c != '$' && c != '_'
}
pub fn is_simple_identifier(s: VStr) -> bool {
if VStr::has_affix(&s) {
return false;
}
let is_ident = |c| !not_js_identifier(c);
let raw = s.raw;
raw.chars().all(is_ident) && !raw.starts_with(|c: char| c.is_ascii_digit())
}
macro_rules! make_list {
( $($id: ident),* ) => {
&[
$(stringify!($id)),*
]
}
}
// use simple contains for small str array
// benchmark shows linear scan takes at most 10ns
// while phf or bsearch takes 30ns
const ALLOWED_GLOBALS: &[&str] = make_list![
Infinity,
undefined,
NaN,
isFinite,
isNaN,
parseFloat,
parseInt,
decodeURI,
decodeURIComponent,
encodeURI,
encodeURIComponent,
Math,
Number,
Date,
Array,
Object,
Boolean,
String,
RegExp,
Map,
Set,
JSON,
Intl,
BigInt
];
pub fn is_global_allow_listed(s: &str) -> bool {
ALLOWED_GLOBALS.contains(&s)
}
// https://github.com/vuejs/rfcs/blob/master/active-rfcs/0008-render-function-api-change.md#special-reserved-props
const RESERVED: &[&str] = make_list![
key,
ref,
onVnodeMounted,
onVnodeUpdated,
onVnodeUnmounted,
onVnodeBeforeMount,
onVnodeBeforeUpdate,
onVnodeBeforeUnmount
];
#[inline]
pub fn is_reserved_prop(tag: &str) -> bool {
RESERVED.contains(&tag)
}
pub fn is_component_tag(tag: &str) -> bool {
tag == "component" || tag == "Component"
}
pub const fn yes(_: &str) -> bool {
true
}
pub const fn no(_: &str) -> bool {
false
}
pub fn get_vnode_call_helper(v: &VNodeIR<BaseConvertInfo>) -> RuntimeHelper {
use RuntimeHelper as RH;
if v.is_block {
return if v.is_component {
RH::CREATE_BLOCK
} else {
RH::CREATE_ELEMENT_BLOCK
};
}
if v.is_component {
RH::CREATE_VNODE
} else {
RH::CREATE_ELEMENT_VNODE
}
}
pub fn is_builtin_symbol(tag: &Js, helper: RuntimeHelper) -> bool {
if let Js::Symbol(r) = tag {
r == &helper
} else {
false
}
}
pub trait PropPattern {
fn matches(&self, name: &str) -> bool;
}
impl PropPattern for &str {
fn matches(&self, name: &str) -> bool {
name == *self
}
}
impl<F> PropPattern for F
where
F: Fn(&str) -> bool,
{
fn matches(&self, name: &str) -> bool {
self(name)
}
}
impl<const N: usize> PropPattern for [&'static str; N] {
fn matches(&self, name: &str) -> bool {
self.contains(&name)
}
}
type NameExp<'a> = Option<(&'a str, Option<VStr<'a>>)>;
pub trait PropMatcher<'a> {
fn get_name_and_exp(prop: &ElemProp<'a>) -> NameExp<'a>;
fn get_ref<'b>(prop: &'b ElemProp<'a>) -> &'b Self;
fn take(prop: ElemProp<'a>) -> Self;
fn is_match<P>(p: &ElemProp<'a>, pat: &P, allow_empty: bool) -> bool
where
P: PropPattern,
{
Self::get_name_and_exp(p).map_or(false, |(name, exp)| {
pat.matches(name) && (allow_empty || !exp.map_or(true, |v| v.is_empty()))
})
}
}
pub fn is_bind_key<'a>(arg: &Option<DirectiveArg<'a>>, name: &str) -> bool {
get_bind_key(arg).map_or(false, |v| v == name)
}
fn get_bind_key<'a>(arg: &Option<DirectiveArg<'a>>) -> Option<&'a str> {
if let DirectiveArg::Static(name) = arg.as_ref()? {
Some(name)
} else {
None
}
}
impl<'a> PropMatcher<'a> for ElemProp<'a> {
fn get_name_and_exp(prop: &ElemProp<'a>) -> NameExp<'a> {
match prop {
ElemProp::Attr(Attribute { name, value, .. }) => {
let exp = value.as_ref().map(|v| v.content);
Some((name, exp))
}
ElemProp::Dir(dir @ Directive { name: "bind", .. }) => {
let name = get_bind_key(&dir.argument)?;
let exp = dir.expression.as_ref().map(|v| v.content);
Some((name, exp))
}
_ => None,
}
}
fn get_ref<'b>(prop: &'b ElemProp<'a>) -> &'b Self {
prop
}
fn take(prop: ElemProp<'a>) -> Self {
prop
}
}
impl<'a> PropMatcher<'a> for Directive<'a> {
fn get_name_and_exp(prop: &ElemProp<'a>) -> NameExp<'a> {
if let ElemProp::Dir(Directive {
name, expression, ..
}) = prop
{
let exp = expression.as_ref().map(|v| v.content);
Some((name, exp))
} else {
None
}
}
fn get_ref<'b>(prop: &'b ElemProp<'a>) -> &'b Self {
if let ElemProp::Dir(dir) = prop {
return dir;
}
unreachable!("invalid call")
}
fn take(prop: ElemProp<'a>) -> Self {
if let ElemProp::Dir(dir) = prop {
return dir;
}
unreachable!("invalid call")
}
}
pub struct PropFound<'a, E, M>
where
E: Borrow<Element<'a>>,
M: PropMatcher<'a>,
{
elem: E,
pos: usize,
m: PhantomData<&'a M>,
}
impl<'a, E, M> PropFound<'a, E, M>
where
E: Borrow<Element<'a>>,
M: PropMatcher<'a>,
{
fn new(elem: E, pos: usize) -> Option<Self> {
Some(Self {
elem,
pos,
m: PhantomData,
})
}
pub fn get_ref(&self) -> &M {
M::get_ref(&self.elem.borrow().properties[self.pos])
}
}
// take is only available when access is mutable
impl<'a, E, M> PropFound<'a, E, M>
where
E: BorrowMut<Element<'a>>,
M: PropMatcher<'a>,
{
pub fn take(mut self) -> M {
// TODO: avoid O(n) behavior
M::take(self.elem.borrow_mut().properties.remove(self.pos))
}
}
type DirFound<'a, E> = PropFound<'a, E, Directive<'a>>;
// sometimes mutable access to the element is not available so
// Borrow is used to refine PropFound so `take` is optional
pub fn dir_finder<'a, E, P>(elem: E, pat: P) -> PropFinder<'a, E, P, Directive<'a>>
where
E: Borrow<Element<'a>>,
P: PropPattern,
{
PropFinder::new(elem, pat)
}
pub fn find_dir<'a, E, P>(elem: E, pat: P) -> Option<DirFound<'a, E>>
where
E: Borrow<Element<'a>>,
P: PropPattern,
{
PropFinder::new(elem, pat).find()
}
pub fn find_dir_empty<'a, E, P>(elem: E, pat: P) -> Option<DirFound<'a, E>>
where
E: Borrow<Element<'a>>,
P: PropPattern,
{
PropFinder::new(elem, pat).allow_empty().find()
}
pub struct PropFinder<'a, E, P, M = ElemProp<'a>>
where
E: Borrow<Element<'a>>,
P: PropPattern,
M: PropMatcher<'a>,
{
elem: E,
pat: P,
allow_empty: bool,
filter: fn(&ElemProp<'a>) -> bool,
m: PhantomData<&'a M>,
}
impl<'a, E, P, M> PropFinder<'a, E, P, M>
where
E: Borrow<Element<'a>>,
P: PropPattern,
M: PropMatcher<'a>,
{
fn new(elem: E, pat: P) -> Self {
Self {
elem,
pat,
allow_empty: false,
filter: |_| true,
m: PhantomData,
}
}
fn is_match(&self, p: &ElemProp<'a>) -> bool {
M::is_match(p, &self.pat, self.allow_empty)
}
pub fn dynamic_only(self) -> Self {
Self {
filter: |p| matches!(p, ElemProp::Dir(..)),
..self
}
}
pub fn attr_only(self) -> Self {
Self {
filter: |p| matches!(p, ElemProp::Attr(..)),
..self
}
}
pub fn find(self) -> Option<PropFound<'a, E, M>> {
let pos = self
.elem
.borrow()
.properties
.iter()
.position(|p| self.is_match(p) && (self.filter)(p))?;
PropFound::new(self.elem, pos)
}
pub fn allow_empty(self) -> Self {
Self {
allow_empty: true,
..self
}
}
}
impl<'a, P> PropFinder<'a, Element<'a>, P, ElemProp<'a>>
where
P: PropPattern + Copy,
{
pub fn find_all(self) -> impl Iterator<Item = Result<ElemProp<'a>, ElemProp<'a>>> {
let PropFinder {
elem,
pat,
allow_empty,
..
} = self;
elem.properties.into_iter().map(move |p| {
if ElemProp::is_match(&p, &pat, allow_empty) {
Ok(p)
} else {
Err(p)
}
})
}
}
pub fn find_prop<'a, E, P>(elem: E, pat: P) -> Option<PropFound<'a, E, ElemProp<'a>>>
where
E: Borrow<Element<'a>>,
P: PropPattern,
{
PropFinder::new(elem, pat).find()
}
pub fn prop_finder<'a, E, P>(elem: E, pat: P) -> PropFinder<'a, E, P>
where
E: Borrow<Element<'a>>,
P: PropPattern,
|
// since std::lazy::Lazy is not stable
// it is not thread safe, not Sync.
// it is Send if F and T is Send
pub struct Lazy<T, F = fn() -> T>(UnsafeCell<Result<T, Option<F>>>)
where
F: FnOnce() -> T;
impl<T, F> Lazy<T, F>
where
F: FnOnce() -> T,
{
pub fn new(f: F) -> Self {
Self(UnsafeCell::new(Err(Some(f))))
}
}
impl<T, F> Deref for Lazy<T, F>
where
F: FnOnce() -> T,
{
type Target = T;
fn deref(&self) -> &Self::Target {
let m = unsafe { &mut *self.0.get() };
let f = match m {
Ok(t) => return t,
Err(f) => f,
};
*m = Ok(f.take().unwrap()());
match m {
Ok(t) => t,
_ => panic!("unwrap Ok"),
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::parser::test::mock_element;
#[test]
fn test_find_dir() {
let e = mock_element("<p v-if=true/>");
let found = find_dir(&e, "if");
let found = found.expect("should found directive");
assert_eq!(found.get_ref().name, "if");
assert_eq!(e.properties.len(), 1);
}
#[test]
fn test_find_dir_mut() {
let mut e = mock_element("<p v-if=true/>");
let found = find_dir(&mut e, "if");
let found = found.expect("should found directive");
assert_eq!(found.get_ref().name, "if");
assert_eq!(found.take().name, "if");
assert!(e.properties.is_empty());
}
#[test]
fn test_find_empty_dir() {
let e = mock_element("<p v-if=true v-for>");
assert!(find_dir(&e, "if").is_some());
assert!(find_dir(&e, "for").is_none());
let found = dir_finder(&e, "for").allow_empty().find();
assert!(found.is_some());
}
#[test]
fn test_find_prop() {
let mut e = mock_element("<p :name=foo name=bar/>");
assert!(find_dir(&e, "name").is_none());
assert!(find_dir(&e, "bind").is_some());
// prop only looks at attr and v-bind
assert!(find_prop(&e, "bind").is_none());
find_prop(&mut e, "name").unwrap().take();
assert!(find_prop(&e, "bind").is_none());
find_prop(&mut e, "name").unwrap().take();
assert!(find_prop(&e, "name").is_none());
}
#[test]
fn find_prop_ignore_dynamic_bind() {
let e = mock_element("<p :[name]=foo/>");
assert!(find_dir(&e, "name").is_none());
assert!(find_dir(&e, "bind").is_some());
assert!(find_prop(&e, "name").is_none());
}
#[test]
fn find_dynamic_only_prop() {
let e = mock_element("<p name=foo/>");
assert!(prop_finder(&e, "name").dynamic_only().find().is_none());
let e = mock_element("<p v-bind:name=foo/>");
assert!(prop_finder(&e, "name").dynamic_only().find().is_some());
let e = mock_element("<p :name=foo/>");
assert!(prop_finder(&e, "name").dynamic_only().find().is_some());
let e = mock_element("<p :[name]=foo/>");
assert!(prop_finder(&e, "name").dynamic_only().find().is_none());
}
#[test]
fn prop_find_all() {
let e = mock_element("<p :name=foo name=bar :[name]=baz/>");
let a: Vec<_> = prop_finder(e, "name").find_all().collect();
assert_eq!(a.len(), 3);
assert!(a[0].is_ok());
assert!(a[1].is_ok());
assert!(a[2].is_err());
}
#[test]
fn layman_lazy() {
let mut test = 0;
let l = Lazy::new(|| {
test += 1;
(0..=100).sum::<i32>()
});
assert_eq!(*l, 5050);
assert_eq!(*l, 5050);
assert_eq!(test, 1);
}
}
| {
PropFinder::new(elem, pat)
} | identifier_body |
util.rs | use super::{
converter::BaseConvertInfo,
flags::RuntimeHelper,
ir::{JsExpr as Js, VNodeIR},
parser::{Directive, DirectiveArg, ElemProp, Element},
scanner::Attribute,
};
use std::{
borrow::{Borrow, BorrowMut},
cell::UnsafeCell,
marker::PhantomData,
ops::Deref,
};
#[macro_export]
macro_rules! cast {
($target: expr, $pat: path) => {{
if let $pat(a, ..) = $target {
a
} else {
panic!("mismatch variant when cast to {}", stringify!($pat));
}
}};
}
mod decode_html;
mod json;
mod named_chars;
pub mod rslint;
mod v_str;
pub use v_str::VStr;
pub fn non_whitespace(c: char) -> bool {
!c.is_ascii_whitespace()
}
pub fn get_core_component(tag: &str) -> Option<RuntimeHelper> {
use RuntimeHelper as RH;
Some(match tag {
"Teleport" | "teleport" => RH::TELEPORT,
"Suspense" | "suspense" => RH::SUSPENSE,
"KeepAlive" | "keep-alive" => RH::KEEP_ALIVE,
"BaseTransition" | "base-transition" => RH::BASE_TRANSITION,
_ => return None,
})
}
pub fn is_core_component(tag: &str) -> bool {
get_core_component(tag).is_some()
}
fn is_event_prop(prop: &str) -> bool {
let bytes = prop.as_bytes();
// equivalent to /^on[^a-z]/
bytes.len() > 2 && bytes.starts_with(b"on") && !bytes[3].is_ascii_lowercase()
}
pub fn is_mergeable_prop(prop: &str) -> bool {
prop == "class" || prop == "style" || is_event_prop(prop)
}
#[inline]
pub fn not_js_identifier(c: char) -> bool {
!c.is_alphanumeric() && c != '$' && c != '_'
}
pub fn is_simple_identifier(s: VStr) -> bool {
if VStr::has_affix(&s) {
return false;
}
let is_ident = |c| !not_js_identifier(c);
let raw = s.raw;
raw.chars().all(is_ident) && !raw.starts_with(|c: char| c.is_ascii_digit())
}
macro_rules! make_list {
( $($id: ident),* ) => {
&[
$(stringify!($id)),*
]
}
}
// use simple contains for small str array
// benchmark shows linear scan takes at most 10ns
// while phf or bsearch takes 30ns
const ALLOWED_GLOBALS: &[&str] = make_list![
Infinity,
undefined,
NaN,
isFinite,
isNaN,
parseFloat,
parseInt,
decodeURI,
decodeURIComponent,
encodeURI,
encodeURIComponent,
Math,
Number,
Date,
Array,
Object,
Boolean,
String,
RegExp,
Map,
Set,
JSON,
Intl,
BigInt
];
pub fn is_global_allow_listed(s: &str) -> bool {
ALLOWED_GLOBALS.contains(&s)
}
// https://github.com/vuejs/rfcs/blob/master/active-rfcs/0008-render-function-api-change.md#special-reserved-props
const RESERVED: &[&str] = make_list![
key,
ref,
onVnodeMounted,
onVnodeUpdated,
onVnodeUnmounted,
onVnodeBeforeMount,
onVnodeBeforeUpdate,
onVnodeBeforeUnmount
];
#[inline]
pub fn is_reserved_prop(tag: &str) -> bool {
RESERVED.contains(&tag)
}
pub fn is_component_tag(tag: &str) -> bool {
tag == "component" || tag == "Component"
}
pub const fn yes(_: &str) -> bool {
true
}
pub const fn no(_: &str) -> bool {
false
}
pub fn get_vnode_call_helper(v: &VNodeIR<BaseConvertInfo>) -> RuntimeHelper {
use RuntimeHelper as RH;
if v.is_block {
return if v.is_component {
RH::CREATE_BLOCK
} else {
RH::CREATE_ELEMENT_BLOCK
};
}
if v.is_component {
RH::CREATE_VNODE
} else {
RH::CREATE_ELEMENT_VNODE
}
}
pub fn is_builtin_symbol(tag: &Js, helper: RuntimeHelper) -> bool {
if let Js::Symbol(r) = tag {
r == &helper
} else {
false
}
}
pub trait PropPattern {
fn matches(&self, name: &str) -> bool;
}
impl PropPattern for &str {
fn matches(&self, name: &str) -> bool {
name == *self
}
}
impl<F> PropPattern for F
where
F: Fn(&str) -> bool,
{
fn matches(&self, name: &str) -> bool {
self(name)
}
}
impl<const N: usize> PropPattern for [&'static str; N] {
fn matches(&self, name: &str) -> bool {
self.contains(&name)
}
}
type NameExp<'a> = Option<(&'a str, Option<VStr<'a>>)>;
pub trait PropMatcher<'a> {
fn get_name_and_exp(prop: &ElemProp<'a>) -> NameExp<'a>;
fn get_ref<'b>(prop: &'b ElemProp<'a>) -> &'b Self;
fn take(prop: ElemProp<'a>) -> Self;
fn is_match<P>(p: &ElemProp<'a>, pat: &P, allow_empty: bool) -> bool
where
P: PropPattern,
{
Self::get_name_and_exp(p).map_or(false, |(name, exp)| {
pat.matches(name) && (allow_empty || !exp.map_or(true, |v| v.is_empty()))
})
}
}
pub fn is_bind_key<'a>(arg: &Option<DirectiveArg<'a>>, name: &str) -> bool {
get_bind_key(arg).map_or(false, |v| v == name)
}
fn get_bind_key<'a>(arg: &Option<DirectiveArg<'a>>) -> Option<&'a str> {
if let DirectiveArg::Static(name) = arg.as_ref()? {
Some(name)
} else |
}
impl<'a> PropMatcher<'a> for ElemProp<'a> {
fn get_name_and_exp(prop: &ElemProp<'a>) -> NameExp<'a> {
match prop {
ElemProp::Attr(Attribute { name, value, .. }) => {
let exp = value.as_ref().map(|v| v.content);
Some((name, exp))
}
ElemProp::Dir(dir @ Directive { name: "bind", .. }) => {
let name = get_bind_key(&dir.argument)?;
let exp = dir.expression.as_ref().map(|v| v.content);
Some((name, exp))
}
_ => None,
}
}
fn get_ref<'b>(prop: &'b ElemProp<'a>) -> &'b Self {
prop
}
fn take(prop: ElemProp<'a>) -> Self {
prop
}
}
impl<'a> PropMatcher<'a> for Directive<'a> {
fn get_name_and_exp(prop: &ElemProp<'a>) -> NameExp<'a> {
if let ElemProp::Dir(Directive {
name, expression, ..
}) = prop
{
let exp = expression.as_ref().map(|v| v.content);
Some((name, exp))
} else {
None
}
}
fn get_ref<'b>(prop: &'b ElemProp<'a>) -> &'b Self {
if let ElemProp::Dir(dir) = prop {
return dir;
}
unreachable!("invalid call")
}
fn take(prop: ElemProp<'a>) -> Self {
if let ElemProp::Dir(dir) = prop {
return dir;
}
unreachable!("invalid call")
}
}
pub struct PropFound<'a, E, M>
where
E: Borrow<Element<'a>>,
M: PropMatcher<'a>,
{
elem: E,
pos: usize,
m: PhantomData<&'a M>,
}
impl<'a, E, M> PropFound<'a, E, M>
where
E: Borrow<Element<'a>>,
M: PropMatcher<'a>,
{
fn new(elem: E, pos: usize) -> Option<Self> {
Some(Self {
elem,
pos,
m: PhantomData,
})
}
pub fn get_ref(&self) -> &M {
M::get_ref(&self.elem.borrow().properties[self.pos])
}
}
// take is only available when access is mutable
impl<'a, E, M> PropFound<'a, E, M>
where
E: BorrowMut<Element<'a>>,
M: PropMatcher<'a>,
{
pub fn take(mut self) -> M {
// TODO: avoid O(n) behavior
M::take(self.elem.borrow_mut().properties.remove(self.pos))
}
}
type DirFound<'a, E> = PropFound<'a, E, Directive<'a>>;
// sometimes mutable access to the element is not available so
// Borrow is used to refine PropFound so `take` is optional
pub fn dir_finder<'a, E, P>(elem: E, pat: P) -> PropFinder<'a, E, P, Directive<'a>>
where
E: Borrow<Element<'a>>,
P: PropPattern,
{
PropFinder::new(elem, pat)
}
pub fn find_dir<'a, E, P>(elem: E, pat: P) -> Option<DirFound<'a, E>>
where
E: Borrow<Element<'a>>,
P: PropPattern,
{
PropFinder::new(elem, pat).find()
}
pub fn find_dir_empty<'a, E, P>(elem: E, pat: P) -> Option<DirFound<'a, E>>
where
E: Borrow<Element<'a>>,
P: PropPattern,
{
PropFinder::new(elem, pat).allow_empty().find()
}
pub struct PropFinder<'a, E, P, M = ElemProp<'a>>
where
E: Borrow<Element<'a>>,
P: PropPattern,
M: PropMatcher<'a>,
{
elem: E,
pat: P,
allow_empty: bool,
filter: fn(&ElemProp<'a>) -> bool,
m: PhantomData<&'a M>,
}
impl<'a, E, P, M> PropFinder<'a, E, P, M>
where
E: Borrow<Element<'a>>,
P: PropPattern,
M: PropMatcher<'a>,
{
fn new(elem: E, pat: P) -> Self {
Self {
elem,
pat,
allow_empty: false,
filter: |_| true,
m: PhantomData,
}
}
fn is_match(&self, p: &ElemProp<'a>) -> bool {
M::is_match(p, &self.pat, self.allow_empty)
}
pub fn dynamic_only(self) -> Self {
Self {
filter: |p| matches!(p, ElemProp::Dir(..)),
..self
}
}
pub fn attr_only(self) -> Self {
Self {
filter: |p| matches!(p, ElemProp::Attr(..)),
..self
}
}
pub fn find(self) -> Option<PropFound<'a, E, M>> {
let pos = self
.elem
.borrow()
.properties
.iter()
.position(|p| self.is_match(p) && (self.filter)(p))?;
PropFound::new(self.elem, pos)
}
pub fn allow_empty(self) -> Self {
Self {
allow_empty: true,
..self
}
}
}
impl<'a, P> PropFinder<'a, Element<'a>, P, ElemProp<'a>>
where
P: PropPattern + Copy,
{
pub fn find_all(self) -> impl Iterator<Item = Result<ElemProp<'a>, ElemProp<'a>>> {
let PropFinder {
elem,
pat,
allow_empty,
..
} = self;
elem.properties.into_iter().map(move |p| {
if ElemProp::is_match(&p, &pat, allow_empty) {
Ok(p)
} else {
Err(p)
}
})
}
}
pub fn find_prop<'a, E, P>(elem: E, pat: P) -> Option<PropFound<'a, E, ElemProp<'a>>>
where
E: Borrow<Element<'a>>,
P: PropPattern,
{
PropFinder::new(elem, pat).find()
}
pub fn prop_finder<'a, E, P>(elem: E, pat: P) -> PropFinder<'a, E, P>
where
E: Borrow<Element<'a>>,
P: PropPattern,
{
PropFinder::new(elem, pat)
}
// since std::lazy::Lazy is not stable
// it is not thread safe, not Sync.
// it is Send if F and T is Send
pub struct Lazy<T, F = fn() -> T>(UnsafeCell<Result<T, Option<F>>>)
where
F: FnOnce() -> T;
impl<T, F> Lazy<T, F>
where
F: FnOnce() -> T,
{
pub fn new(f: F) -> Self {
Self(UnsafeCell::new(Err(Some(f))))
}
}
impl<T, F> Deref for Lazy<T, F>
where
F: FnOnce() -> T,
{
type Target = T;
fn deref(&self) -> &Self::Target {
let m = unsafe { &mut *self.0.get() };
let f = match m {
Ok(t) => return t,
Err(f) => f,
};
*m = Ok(f.take().unwrap()());
match m {
Ok(t) => t,
_ => panic!("unwrap Ok"),
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::parser::test::mock_element;
#[test]
fn test_find_dir() {
let e = mock_element("<p v-if=true/>");
let found = find_dir(&e, "if");
let found = found.expect("should found directive");
assert_eq!(found.get_ref().name, "if");
assert_eq!(e.properties.len(), 1);
}
#[test]
fn test_find_dir_mut() {
let mut e = mock_element("<p v-if=true/>");
let found = find_dir(&mut e, "if");
let found = found.expect("should found directive");
assert_eq!(found.get_ref().name, "if");
assert_eq!(found.take().name, "if");
assert!(e.properties.is_empty());
}
#[test]
fn test_find_empty_dir() {
let e = mock_element("<p v-if=true v-for>");
assert!(find_dir(&e, "if").is_some());
assert!(find_dir(&e, "for").is_none());
let found = dir_finder(&e, "for").allow_empty().find();
assert!(found.is_some());
}
#[test]
fn test_find_prop() {
let mut e = mock_element("<p :name=foo name=bar/>");
assert!(find_dir(&e, "name").is_none());
assert!(find_dir(&e, "bind").is_some());
// prop only looks at attr and v-bind
assert!(find_prop(&e, "bind").is_none());
find_prop(&mut e, "name").unwrap().take();
assert!(find_prop(&e, "bind").is_none());
find_prop(&mut e, "name").unwrap().take();
assert!(find_prop(&e, "name").is_none());
}
#[test]
fn find_prop_ignore_dynamic_bind() {
let e = mock_element("<p :[name]=foo/>");
assert!(find_dir(&e, "name").is_none());
assert!(find_dir(&e, "bind").is_some());
assert!(find_prop(&e, "name").is_none());
}
#[test]
fn find_dynamic_only_prop() {
let e = mock_element("<p name=foo/>");
assert!(prop_finder(&e, "name").dynamic_only().find().is_none());
let e = mock_element("<p v-bind:name=foo/>");
assert!(prop_finder(&e, "name").dynamic_only().find().is_some());
let e = mock_element("<p :name=foo/>");
assert!(prop_finder(&e, "name").dynamic_only().find().is_some());
let e = mock_element("<p :[name]=foo/>");
assert!(prop_finder(&e, "name").dynamic_only().find().is_none());
}
#[test]
fn prop_find_all() {
let e = mock_element("<p :name=foo name=bar :[name]=baz/>");
let a: Vec<_> = prop_finder(e, "name").find_all().collect();
assert_eq!(a.len(), 3);
assert!(a[0].is_ok());
assert!(a[1].is_ok());
assert!(a[2].is_err());
}
#[test]
fn layman_lazy() {
let mut test = 0;
let l = Lazy::new(|| {
test += 1;
(0..=100).sum::<i32>()
});
assert_eq!(*l, 5050);
assert_eq!(*l, 5050);
assert_eq!(test, 1);
}
}
| {
None
} | conditional_block |
util.rs | use super::{
converter::BaseConvertInfo,
flags::RuntimeHelper,
ir::{JsExpr as Js, VNodeIR},
parser::{Directive, DirectiveArg, ElemProp, Element},
scanner::Attribute,
};
use std::{
borrow::{Borrow, BorrowMut},
cell::UnsafeCell,
marker::PhantomData,
ops::Deref,
};
#[macro_export]
macro_rules! cast {
($target: expr, $pat: path) => {{
if let $pat(a, ..) = $target {
a
} else {
panic!("mismatch variant when cast to {}", stringify!($pat));
}
}};
}
mod decode_html;
mod json;
mod named_chars;
pub mod rslint;
mod v_str;
pub use v_str::VStr;
pub fn non_whitespace(c: char) -> bool {
!c.is_ascii_whitespace()
}
pub fn get_core_component(tag: &str) -> Option<RuntimeHelper> {
use RuntimeHelper as RH;
Some(match tag {
"Teleport" | "teleport" => RH::TELEPORT,
"Suspense" | "suspense" => RH::SUSPENSE,
"KeepAlive" | "keep-alive" => RH::KEEP_ALIVE,
"BaseTransition" | "base-transition" => RH::BASE_TRANSITION,
_ => return None,
})
}
pub fn is_core_component(tag: &str) -> bool {
get_core_component(tag).is_some()
}
fn is_event_prop(prop: &str) -> bool {
let bytes = prop.as_bytes();
// equivalent to /^on[^a-z]/
bytes.len() > 2 && bytes.starts_with(b"on") && !bytes[3].is_ascii_lowercase()
}
pub fn is_mergeable_prop(prop: &str) -> bool {
prop == "class" || prop == "style" || is_event_prop(prop)
}
#[inline]
pub fn not_js_identifier(c: char) -> bool {
!c.is_alphanumeric() && c != '$' && c != '_'
}
pub fn is_simple_identifier(s: VStr) -> bool {
if VStr::has_affix(&s) {
return false;
}
let is_ident = |c| !not_js_identifier(c);
let raw = s.raw;
raw.chars().all(is_ident) && !raw.starts_with(|c: char| c.is_ascii_digit())
}
macro_rules! make_list {
( $($id: ident),* ) => {
&[
$(stringify!($id)),*
]
}
}
// use simple contains for small str array
// benchmark shows linear scan takes at most 10ns
// while phf or bsearch takes 30ns
const ALLOWED_GLOBALS: &[&str] = make_list![
Infinity,
undefined,
NaN,
isFinite,
isNaN,
parseFloat,
parseInt,
decodeURI,
decodeURIComponent,
encodeURI,
encodeURIComponent,
Math,
Number,
Date,
Array,
Object,
Boolean,
String,
RegExp,
Map,
Set,
JSON,
Intl,
BigInt
];
pub fn is_global_allow_listed(s: &str) -> bool {
ALLOWED_GLOBALS.contains(&s)
}
// https://github.com/vuejs/rfcs/blob/master/active-rfcs/0008-render-function-api-change.md#special-reserved-props
const RESERVED: &[&str] = make_list![
key,
ref,
onVnodeMounted,
onVnodeUpdated,
onVnodeUnmounted,
onVnodeBeforeMount,
onVnodeBeforeUpdate,
onVnodeBeforeUnmount
];
#[inline]
pub fn is_reserved_prop(tag: &str) -> bool {
RESERVED.contains(&tag)
}
pub fn is_component_tag(tag: &str) -> bool {
tag == "component" || tag == "Component"
}
pub const fn yes(_: &str) -> bool {
true
}
pub const fn no(_: &str) -> bool {
false
}
pub fn get_vnode_call_helper(v: &VNodeIR<BaseConvertInfo>) -> RuntimeHelper {
use RuntimeHelper as RH;
if v.is_block {
return if v.is_component {
RH::CREATE_BLOCK
} else {
RH::CREATE_ELEMENT_BLOCK
};
}
if v.is_component {
RH::CREATE_VNODE
} else {
RH::CREATE_ELEMENT_VNODE
}
}
pub fn is_builtin_symbol(tag: &Js, helper: RuntimeHelper) -> bool {
if let Js::Symbol(r) = tag {
r == &helper
} else {
false
}
}
pub trait PropPattern {
fn matches(&self, name: &str) -> bool;
}
impl PropPattern for &str {
fn matches(&self, name: &str) -> bool {
name == *self
}
}
impl<F> PropPattern for F
where
F: Fn(&str) -> bool,
{
fn matches(&self, name: &str) -> bool {
self(name)
}
}
impl<const N: usize> PropPattern for [&'static str; N] {
fn matches(&self, name: &str) -> bool {
self.contains(&name)
}
}
type NameExp<'a> = Option<(&'a str, Option<VStr<'a>>)>;
pub trait PropMatcher<'a> {
fn get_name_and_exp(prop: &ElemProp<'a>) -> NameExp<'a>;
fn get_ref<'b>(prop: &'b ElemProp<'a>) -> &'b Self;
fn take(prop: ElemProp<'a>) -> Self;
fn is_match<P>(p: &ElemProp<'a>, pat: &P, allow_empty: bool) -> bool
where
P: PropPattern,
{
Self::get_name_and_exp(p).map_or(false, |(name, exp)| {
pat.matches(name) && (allow_empty || !exp.map_or(true, |v| v.is_empty()))
})
}
}
pub fn is_bind_key<'a>(arg: &Option<DirectiveArg<'a>>, name: &str) -> bool {
get_bind_key(arg).map_or(false, |v| v == name)
}
fn get_bind_key<'a>(arg: &Option<DirectiveArg<'a>>) -> Option<&'a str> {
if let DirectiveArg::Static(name) = arg.as_ref()? {
Some(name)
} else {
None
}
}
impl<'a> PropMatcher<'a> for ElemProp<'a> {
fn get_name_and_exp(prop: &ElemProp<'a>) -> NameExp<'a> {
match prop {
ElemProp::Attr(Attribute { name, value, .. }) => {
let exp = value.as_ref().map(|v| v.content);
Some((name, exp))
}
ElemProp::Dir(dir @ Directive { name: "bind", .. }) => {
let name = get_bind_key(&dir.argument)?;
let exp = dir.expression.as_ref().map(|v| v.content);
Some((name, exp))
}
_ => None,
}
}
fn get_ref<'b>(prop: &'b ElemProp<'a>) -> &'b Self {
prop
}
fn take(prop: ElemProp<'a>) -> Self {
prop
}
}
impl<'a> PropMatcher<'a> for Directive<'a> {
fn get_name_and_exp(prop: &ElemProp<'a>) -> NameExp<'a> {
if let ElemProp::Dir(Directive {
name, expression, ..
}) = prop
{
let exp = expression.as_ref().map(|v| v.content);
Some((name, exp))
} else {
None
}
}
fn get_ref<'b>(prop: &'b ElemProp<'a>) -> &'b Self {
if let ElemProp::Dir(dir) = prop {
return dir;
}
unreachable!("invalid call")
}
fn take(prop: ElemProp<'a>) -> Self {
if let ElemProp::Dir(dir) = prop {
return dir;
}
unreachable!("invalid call")
}
}
pub struct | <'a, E, M>
where
E: Borrow<Element<'a>>,
M: PropMatcher<'a>,
{
elem: E,
pos: usize,
m: PhantomData<&'a M>,
}
impl<'a, E, M> PropFound<'a, E, M>
where
E: Borrow<Element<'a>>,
M: PropMatcher<'a>,
{
fn new(elem: E, pos: usize) -> Option<Self> {
Some(Self {
elem,
pos,
m: PhantomData,
})
}
pub fn get_ref(&self) -> &M {
M::get_ref(&self.elem.borrow().properties[self.pos])
}
}
// take is only available when access is mutable
impl<'a, E, M> PropFound<'a, E, M>
where
E: BorrowMut<Element<'a>>,
M: PropMatcher<'a>,
{
pub fn take(mut self) -> M {
// TODO: avoid O(n) behavior
M::take(self.elem.borrow_mut().properties.remove(self.pos))
}
}
type DirFound<'a, E> = PropFound<'a, E, Directive<'a>>;
// sometimes mutable access to the element is not available so
// Borrow is used to refine PropFound so `take` is optional
pub fn dir_finder<'a, E, P>(elem: E, pat: P) -> PropFinder<'a, E, P, Directive<'a>>
where
E: Borrow<Element<'a>>,
P: PropPattern,
{
PropFinder::new(elem, pat)
}
pub fn find_dir<'a, E, P>(elem: E, pat: P) -> Option<DirFound<'a, E>>
where
E: Borrow<Element<'a>>,
P: PropPattern,
{
PropFinder::new(elem, pat).find()
}
pub fn find_dir_empty<'a, E, P>(elem: E, pat: P) -> Option<DirFound<'a, E>>
where
E: Borrow<Element<'a>>,
P: PropPattern,
{
PropFinder::new(elem, pat).allow_empty().find()
}
pub struct PropFinder<'a, E, P, M = ElemProp<'a>>
where
E: Borrow<Element<'a>>,
P: PropPattern,
M: PropMatcher<'a>,
{
elem: E,
pat: P,
allow_empty: bool,
filter: fn(&ElemProp<'a>) -> bool,
m: PhantomData<&'a M>,
}
impl<'a, E, P, M> PropFinder<'a, E, P, M>
where
E: Borrow<Element<'a>>,
P: PropPattern,
M: PropMatcher<'a>,
{
fn new(elem: E, pat: P) -> Self {
Self {
elem,
pat,
allow_empty: false,
filter: |_| true,
m: PhantomData,
}
}
fn is_match(&self, p: &ElemProp<'a>) -> bool {
M::is_match(p, &self.pat, self.allow_empty)
}
pub fn dynamic_only(self) -> Self {
Self {
filter: |p| matches!(p, ElemProp::Dir(..)),
..self
}
}
pub fn attr_only(self) -> Self {
Self {
filter: |p| matches!(p, ElemProp::Attr(..)),
..self
}
}
pub fn find(self) -> Option<PropFound<'a, E, M>> {
let pos = self
.elem
.borrow()
.properties
.iter()
.position(|p| self.is_match(p) && (self.filter)(p))?;
PropFound::new(self.elem, pos)
}
pub fn allow_empty(self) -> Self {
Self {
allow_empty: true,
..self
}
}
}
impl<'a, P> PropFinder<'a, Element<'a>, P, ElemProp<'a>>
where
P: PropPattern + Copy,
{
pub fn find_all(self) -> impl Iterator<Item = Result<ElemProp<'a>, ElemProp<'a>>> {
let PropFinder {
elem,
pat,
allow_empty,
..
} = self;
elem.properties.into_iter().map(move |p| {
if ElemProp::is_match(&p, &pat, allow_empty) {
Ok(p)
} else {
Err(p)
}
})
}
}
pub fn find_prop<'a, E, P>(elem: E, pat: P) -> Option<PropFound<'a, E, ElemProp<'a>>>
where
E: Borrow<Element<'a>>,
P: PropPattern,
{
PropFinder::new(elem, pat).find()
}
pub fn prop_finder<'a, E, P>(elem: E, pat: P) -> PropFinder<'a, E, P>
where
E: Borrow<Element<'a>>,
P: PropPattern,
{
PropFinder::new(elem, pat)
}
// since std::lazy::Lazy is not stable
// it is not thread safe, not Sync.
// it is Send if F and T is Send
pub struct Lazy<T, F = fn() -> T>(UnsafeCell<Result<T, Option<F>>>)
where
F: FnOnce() -> T;
impl<T, F> Lazy<T, F>
where
F: FnOnce() -> T,
{
pub fn new(f: F) -> Self {
Self(UnsafeCell::new(Err(Some(f))))
}
}
impl<T, F> Deref for Lazy<T, F>
where
F: FnOnce() -> T,
{
type Target = T;
fn deref(&self) -> &Self::Target {
let m = unsafe { &mut *self.0.get() };
let f = match m {
Ok(t) => return t,
Err(f) => f,
};
*m = Ok(f.take().unwrap()());
match m {
Ok(t) => t,
_ => panic!("unwrap Ok"),
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::parser::test::mock_element;
#[test]
fn test_find_dir() {
let e = mock_element("<p v-if=true/>");
let found = find_dir(&e, "if");
let found = found.expect("should found directive");
assert_eq!(found.get_ref().name, "if");
assert_eq!(e.properties.len(), 1);
}
#[test]
fn test_find_dir_mut() {
let mut e = mock_element("<p v-if=true/>");
let found = find_dir(&mut e, "if");
let found = found.expect("should found directive");
assert_eq!(found.get_ref().name, "if");
assert_eq!(found.take().name, "if");
assert!(e.properties.is_empty());
}
#[test]
fn test_find_empty_dir() {
let e = mock_element("<p v-if=true v-for>");
assert!(find_dir(&e, "if").is_some());
assert!(find_dir(&e, "for").is_none());
let found = dir_finder(&e, "for").allow_empty().find();
assert!(found.is_some());
}
#[test]
fn test_find_prop() {
let mut e = mock_element("<p :name=foo name=bar/>");
assert!(find_dir(&e, "name").is_none());
assert!(find_dir(&e, "bind").is_some());
// prop only looks at attr and v-bind
assert!(find_prop(&e, "bind").is_none());
find_prop(&mut e, "name").unwrap().take();
assert!(find_prop(&e, "bind").is_none());
find_prop(&mut e, "name").unwrap().take();
assert!(find_prop(&e, "name").is_none());
}
#[test]
fn find_prop_ignore_dynamic_bind() {
let e = mock_element("<p :[name]=foo/>");
assert!(find_dir(&e, "name").is_none());
assert!(find_dir(&e, "bind").is_some());
assert!(find_prop(&e, "name").is_none());
}
#[test]
fn find_dynamic_only_prop() {
let e = mock_element("<p name=foo/>");
assert!(prop_finder(&e, "name").dynamic_only().find().is_none());
let e = mock_element("<p v-bind:name=foo/>");
assert!(prop_finder(&e, "name").dynamic_only().find().is_some());
let e = mock_element("<p :name=foo/>");
assert!(prop_finder(&e, "name").dynamic_only().find().is_some());
let e = mock_element("<p :[name]=foo/>");
assert!(prop_finder(&e, "name").dynamic_only().find().is_none());
}
#[test]
fn prop_find_all() {
let e = mock_element("<p :name=foo name=bar :[name]=baz/>");
let a: Vec<_> = prop_finder(e, "name").find_all().collect();
assert_eq!(a.len(), 3);
assert!(a[0].is_ok());
assert!(a[1].is_ok());
assert!(a[2].is_err());
}
#[test]
fn layman_lazy() {
let mut test = 0;
let l = Lazy::new(|| {
test += 1;
(0..=100).sum::<i32>()
});
assert_eq!(*l, 5050);
assert_eq!(*l, 5050);
assert_eq!(test, 1);
}
}
| PropFound | identifier_name |
lib.rs | // Copyright 2019-2023 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
#![cfg_attr(doc_cfg, feature(doc_cfg))]
pub use anyhow::Result;
use cargo_toml::{Dependency, Manifest};
use heck::AsShoutySnakeCase;
use tauri_utils::{
config::Config,
resources::{external_binaries, resource_relpath, ResourcePaths},
};
use std::path::{Path, PathBuf};
#[cfg(feature = "codegen")]
mod codegen;
mod static_vcruntime;
#[cfg(feature = "codegen")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "codegen")))]
pub use codegen::context::CodegenContext;
fn copy_file(from: impl AsRef<Path>, to: impl AsRef<Path>) -> Result<()> {
let from = from.as_ref();
let to = to.as_ref();
if !from.exists() {
return Err(anyhow::anyhow!("{:?} does not exist", from));
}
if !from.is_file() {
return Err(anyhow::anyhow!("{:?} is not a file", from));
}
let dest_dir = to.parent().expect("No data in parent");
std::fs::create_dir_all(dest_dir)?;
std::fs::copy(from, to)?;
Ok(())
}
fn copy_binaries(
binaries: ResourcePaths,
target_triple: &str,
path: &Path,
package_name: Option<&String>,
) -> Result<()> {
for src in binaries {
let src = src?;
println!("cargo:rerun-if-changed={}", src.display());
let file_name = src
.file_name()
.expect("failed to extract external binary filename")
.to_string_lossy()
.replace(&format!("-{target_triple}"), "");
if package_name.map_or(false, |n| n == &file_name) {
return Err(anyhow::anyhow!(
"Cannot define a sidecar with the same name as the Cargo package name `{}`. Please change the sidecar name in the filesystem and the Tauri configuration.",
file_name
));
}
let dest = path.join(file_name);
if dest.exists() {
std::fs::remove_file(&dest).unwrap();
}
copy_file(&src, &dest)?;
}
Ok(())
}
/// Copies resources to a path.
fn copy_resources(resources: ResourcePaths<'_>, path: &Path) -> Result<()> {
for src in resources {
let src = src?;
println!("cargo:rerun-if-changed={}", src.display());
let dest = path.join(resource_relpath(&src));
copy_file(&src, dest)?;
}
Ok(())
}
// checks if the given Cargo feature is enabled.
fn has_feature(feature: &str) -> bool {
// when a feature is enabled, Cargo sets the `CARGO_FEATURE_<name` env var to 1
// https://doc.rust-lang.org/cargo/reference/environment-variables.html#environment-variables-cargo-sets-for-build-scripts
std::env::var(format!("CARGO_FEATURE_{}", AsShoutySnakeCase(feature)))
.map(|x| x == "1")
.unwrap_or(false)
}
// creates a cfg alias if `has_feature` is true.
// `alias` must be a snake case string.
fn cfg_alias(alias: &str, has_feature: bool) {
if has_feature {
println!("cargo:rustc-cfg={alias}");
}
}
/// Attributes used on Windows.
#[allow(dead_code)]
#[derive(Debug, Default)]
pub struct WindowsAttributes {
window_icon_path: Option<PathBuf>,
/// The path to the sdk location.
///
/// For the GNU toolkit this has to be the path where MinGW put windres.exe and ar.exe.
/// This could be something like: "C:\Program Files\mingw-w64\x86_64-5.3.0-win32-seh-rt_v4-rev0\mingw64\bin"
///
/// For MSVC the Windows SDK has to be installed. It comes with the resource compiler rc.exe.
/// This should be set to the root directory of the Windows SDK, e.g., "C:\Program Files (x86)\Windows Kits\10" or,
/// if multiple 10 versions are installed, set it directly to the correct bin directory "C:\Program Files (x86)\Windows Kits\10\bin\10.0.14393.0\x64"
///
/// If it is left unset, it will look up a path in the registry, i.e. HKLM\SOFTWARE\Microsoft\Windows Kits\Installed Roots
sdk_dir: Option<PathBuf>,
/// A string containing an [application manifest] to be included with the application on Windows.
///
/// Defaults to:
/// ```ignore
#[doc = include_str!("window-app-manifest.xml")]
/// ```
///
/// [application manifest]: https://learn.microsoft.com/en-us/windows/win32/sbscs/application-manifests
app_manifest: Option<String>,
}
impl WindowsAttributes {
/// Creates the default attribute set.
pub fn new() -> Self {
Self::default()
}
/// Sets the icon to use on the window. Currently only used on Windows.
/// It must be in `ico` format. Defaults to `icons/icon.ico`.
#[must_use]
pub fn window_icon_path<P: AsRef<Path>>(mut self, window_icon_path: P) -> Self {
self
.window_icon_path
.replace(window_icon_path.as_ref().into());
self
}
/// Sets the sdk dir for windows. Currently only used on Windows. This must be a valid UTF-8
/// path. Defaults to whatever the `winres` crate determines is best.
#[must_use]
pub fn sdk_dir<P: AsRef<Path>>(mut self, sdk_dir: P) -> Self {
self.sdk_dir = Some(sdk_dir.as_ref().into());
self
}
/// Sets the Windows app [manifest].
///
/// # Example
///
/// The following manifest will brand the exe as requesting administrator privileges.
/// Thus, everytime it is executed, a Windows UAC dialog will appear.
///
/// Note that you can move the manifest contents to a separate file and use `include_str!("manifest.xml")`
/// instead of the inline string.
///
/// ```rust,no_run
/// let mut windows = tauri_build::WindowsAttributes::new();
/// windows = windows.app_manifest(r#"
/// <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
/// <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
/// <security>
/// <requestedPrivileges>
/// <requestedExecutionLevel level="requireAdministrator" uiAccess="false" />
/// </requestedPrivileges>
/// </security>
/// </trustInfo>
/// </assembly>
/// "#);
/// tauri_build::try_build(
/// tauri_build::Attributes::new().windows_attributes(windows)
/// ).expect("failed to run build script");
/// ```
///
/// Defaults to:
/// ```ignore
#[doc = include_str!("window-app-manifest.xml")]
/// [manifest]: https://learn.microsoft.com/en-us/windows/win32/sbscs/application-manifests
/// ```
#[must_use]
pub fn app_manifest<S: AsRef<str>>(mut self, manifest: S) -> Self {
self.app_manifest = Some(manifest.as_ref().to_string());
self
}
}
/// The attributes used on the build.
#[derive(Debug, Default)]
pub struct Attributes {
#[allow(dead_code)]
windows_attributes: WindowsAttributes,
}
impl Attributes {
/// Creates the default attribute set.
pub fn new() -> Self |
/// Sets the icon to use on the window. Currently only used on Windows.
#[must_use]
pub fn windows_attributes(mut self, windows_attributes: WindowsAttributes) -> Self {
self.windows_attributes = windows_attributes;
self
}
}
/// Run all build time helpers for your Tauri Application.
///
/// The current helpers include the following:
/// * Generates a Windows Resource file when targeting Windows.
///
/// # Platforms
///
/// [`build()`] should be called inside of `build.rs` regardless of the platform:
/// * New helpers may target more platforms in the future.
/// * Platform specific code is handled by the helpers automatically.
/// * A build script is required in order to activate some cargo environmental variables that are
/// used when generating code and embedding assets - so [`build()`] may as well be called.
///
/// In short, this is saying don't put the call to [`build()`] behind a `#[cfg(windows)]`.
///
/// # Panics
///
/// If any of the build time helpers fail, they will [`std::panic!`] with the related error message.
/// This is typically desirable when running inside a build script; see [`try_build`] for no panics.
pub fn build() {
if let Err(error) = try_build(Attributes::default()) {
let error = format!("{error:#}");
println!("{error}");
if error.starts_with("unknown field") {
print!("found an unknown configuration field. This usually means that you are using a CLI version that is newer than `tauri-build` and is incompatible. ");
println!(
"Please try updating the Rust crates by running `cargo update` in the Tauri app folder."
);
}
std::process::exit(1);
}
}
/// Non-panicking [`build()`].
#[allow(unused_variables)]
pub fn try_build(attributes: Attributes) -> Result<()> {
use anyhow::anyhow;
println!("cargo:rerun-if-env-changed=TAURI_CONFIG");
println!("cargo:rerun-if-changed=tauri.conf.json");
#[cfg(feature = "config-json5")]
println!("cargo:rerun-if-changed=tauri.conf.json5");
#[cfg(feature = "config-toml")]
println!("cargo:rerun-if-changed=Tauri.toml");
let target_os = std::env::var("CARGO_CFG_TARGET_OS").unwrap();
let mobile = target_os == "ios" || target_os == "android";
cfg_alias("desktop", !mobile);
cfg_alias("mobile", mobile);
let mut config = serde_json::from_value(tauri_utils::config::parse::read_from(
std::env::current_dir().unwrap(),
)?)?;
if let Ok(env) = std::env::var("TAURI_CONFIG") {
let merge_config: serde_json::Value = serde_json::from_str(&env)?;
json_patch::merge(&mut config, &merge_config);
}
let config: Config = serde_json::from_value(config)?;
cfg_alias("dev", !has_feature("custom-protocol"));
let ws_path = get_workspace_dir()?;
let mut manifest =
Manifest::<cargo_toml::Value>::from_slice_with_metadata(&std::fs::read("Cargo.toml")?)?;
if let Ok(ws_manifest) = Manifest::from_path(ws_path.join("Cargo.toml")) {
Manifest::complete_from_path_and_workspace(
&mut manifest,
Path::new("Cargo.toml"),
Some((&ws_manifest, ws_path.as_path())),
)?;
} else {
Manifest::complete_from_path(&mut manifest, Path::new("Cargo.toml"))?;
}
if let Some(tauri_build) = manifest.build_dependencies.remove("tauri-build") {
let error_message = check_features(&config, tauri_build, true);
if !error_message.is_empty() {
return Err(anyhow!("
The `tauri-build` dependency features on the `Cargo.toml` file does not match the allowlist defined under `tauri.conf.json`.
Please run `tauri dev` or `tauri build` or {}.
", error_message));
}
}
if let Some(tauri) = manifest.dependencies.remove("tauri") {
let error_message = check_features(&config, tauri, false);
if !error_message.is_empty() {
return Err(anyhow!("
The `tauri` dependency features on the `Cargo.toml` file does not match the allowlist defined under `tauri.conf.json`.
Please run `tauri dev` or `tauri build` or {}.
", error_message));
}
}
let target_triple = std::env::var("TARGET").unwrap();
let out_dir = PathBuf::from(std::env::var("OUT_DIR").unwrap());
// TODO: far from ideal, but there's no other way to get the target dir, see <https://github.com/rust-lang/cargo/issues/5457>
let target_dir = out_dir
.parent()
.unwrap()
.parent()
.unwrap()
.parent()
.unwrap();
if let Some(paths) = &config.tauri.bundle.external_bin {
copy_binaries(
ResourcePaths::new(external_binaries(paths, &target_triple).as_slice(), true),
&target_triple,
target_dir,
manifest.package.as_ref().map(|p| &p.name),
)?;
}
#[allow(unused_mut, clippy::redundant_clone)]
let mut resources = config.tauri.bundle.resources.clone().unwrap_or_default();
if target_triple.contains("windows") {
if let Some(fixed_webview2_runtime_path) =
&config.tauri.bundle.windows.webview_fixed_runtime_path
{
resources.push(fixed_webview2_runtime_path.display().to_string());
}
}
copy_resources(ResourcePaths::new(resources.as_slice(), true), target_dir)?;
if target_triple.contains("darwin") {
if let Some(version) = &config.tauri.bundle.macos.minimum_system_version {
println!("cargo:rustc-env=MACOSX_DEPLOYMENT_TARGET={}", version);
}
}
if target_triple.contains("windows") {
use anyhow::Context;
use semver::Version;
use tauri_winres::{VersionInfo, WindowsResource};
fn find_icon<F: Fn(&&String) -> bool>(config: &Config, predicate: F, default: &str) -> PathBuf {
let icon_path = config
.tauri
.bundle
.icon
.iter()
.find(|i| predicate(i))
.cloned()
.unwrap_or_else(|| default.to_string());
icon_path.into()
}
let window_icon_path = attributes
.windows_attributes
.window_icon_path
.unwrap_or_else(|| find_icon(&config, |i| i.ends_with(".ico"), "icons/icon.ico"));
if window_icon_path.exists() {
let mut res = WindowsResource::new();
if let Some(manifest) = attributes.windows_attributes.app_manifest {
res.set_manifest(&manifest);
} else {
res.set_manifest(include_str!("window-app-manifest.xml"));
}
if let Some(sdk_dir) = &attributes.windows_attributes.sdk_dir {
if let Some(sdk_dir_str) = sdk_dir.to_str() {
res.set_toolkit_path(sdk_dir_str);
} else {
return Err(anyhow!(
"sdk_dir path is not valid; only UTF-8 characters are allowed"
));
}
}
if let Some(version) = &config.package.version {
if let Ok(v) = Version::parse(version) {
let version = v.major << 48 | v.minor << 32 | v.patch << 16;
res.set_version_info(VersionInfo::FILEVERSION, version);
res.set_version_info(VersionInfo::PRODUCTVERSION, version);
}
res.set("FileVersion", version);
res.set("ProductVersion", version);
}
if let Some(product_name) = &config.package.product_name {
res.set("ProductName", product_name);
res.set("FileDescription", product_name);
}
res.set_icon_with_id(&window_icon_path.display().to_string(), "32512");
res.compile().with_context(|| {
format!(
"failed to compile `{}` into a Windows Resource file during tauri-build",
window_icon_path.display()
)
})?;
} else {
return Err(anyhow!(format!(
"`{}` not found; required for generating a Windows Resource file during tauri-build",
window_icon_path.display()
)));
}
let target_env = std::env::var("CARGO_CFG_TARGET_ENV").unwrap();
match target_env.as_str() {
"gnu" => {
let target_arch = match std::env::var("CARGO_CFG_TARGET_ARCH").unwrap().as_str() {
"x86_64" => Some("x64"),
"x86" => Some("x86"),
"aarch64" => Some("arm64"),
arch => None,
};
if let Some(target_arch) = target_arch {
for entry in std::fs::read_dir(target_dir.join("build"))? {
let path = entry?.path();
let webview2_loader_path = path
.join("out")
.join(target_arch)
.join("WebView2Loader.dll");
if path.to_string_lossy().contains("webview2-com-sys") && webview2_loader_path.exists()
{
std::fs::copy(webview2_loader_path, target_dir.join("WebView2Loader.dll"))?;
break;
}
}
}
}
"msvc" => {
if std::env::var("STATIC_VCRUNTIME").map_or(false, |v| v == "true") {
static_vcruntime::build();
}
}
_ => (),
}
}
Ok(())
}
#[derive(Debug, Default, PartialEq, Eq)]
struct Diff {
remove: Vec<String>,
add: Vec<String>,
}
fn features_diff(current: &[String], expected: &[String]) -> Diff {
let mut remove = Vec::new();
let mut add = Vec::new();
for feature in current {
if !expected.contains(feature) {
remove.push(feature.clone());
}
}
for feature in expected {
if !current.contains(feature) {
add.push(feature.clone());
}
}
Diff { remove, add }
}
fn check_features(config: &Config, dependency: Dependency, is_tauri_build: bool) -> String {
use tauri_utils::config::{PatternKind, TauriConfig};
let features = match dependency {
Dependency::Simple(_) => Vec::new(),
Dependency::Detailed(dep) => dep.features,
Dependency::Inherited(dep) => dep.features,
};
let all_cli_managed_features = if is_tauri_build {
vec!["isolation"]
} else {
TauriConfig::all_features()
};
let expected = if is_tauri_build {
match config.tauri.pattern {
PatternKind::Isolation { .. } => vec!["isolation".to_string()],
_ => vec![],
}
} else {
config
.tauri
.features()
.into_iter()
.map(|f| f.to_string())
.collect::<Vec<String>>()
};
let diff = features_diff(
&features
.into_iter()
.filter(|f| all_cli_managed_features.contains(&f.as_str()))
.collect::<Vec<String>>(),
&expected,
);
let mut error_message = String::new();
if !diff.remove.is_empty() {
error_message.push_str("remove the `");
error_message.push_str(&diff.remove.join(", "));
error_message.push_str(if diff.remove.len() == 1 {
"` feature"
} else {
"` features"
});
if !diff.add.is_empty() {
error_message.push_str(" and ");
}
}
if !diff.add.is_empty() {
error_message.push_str("add the `");
error_message.push_str(&diff.add.join(", "));
error_message.push_str(if diff.add.len() == 1 {
"` feature"
} else {
"` features"
});
}
error_message
}
#[derive(serde::Deserialize)]
struct CargoMetadata {
workspace_root: PathBuf,
}
fn get_workspace_dir() -> Result<PathBuf> {
let output = std::process::Command::new("cargo")
.args(["metadata", "--no-deps", "--format-version", "1"])
.output()?;
if !output.status.success() {
return Err(anyhow::anyhow!(
"cargo metadata command exited with a non zero exit code: {}",
String::from_utf8(output.stderr)?
));
}
Ok(serde_json::from_slice::<CargoMetadata>(&output.stdout)?.workspace_root)
}
#[cfg(test)]
mod tests {
use super::Diff;
#[test]
fn array_diff() {
for (current, expected, result) in [
(vec![], vec![], Default::default()),
(
vec!["a".into()],
vec![],
Diff {
remove: vec!["a".into()],
add: vec![],
},
),
(vec!["a".into()], vec!["a".into()], Default::default()),
(
vec!["a".into(), "b".into()],
vec!["a".into()],
Diff {
remove: vec!["b".into()],
add: vec![],
},
),
(
vec!["a".into(), "b".into()],
vec!["a".into(), "c".into()],
Diff {
remove: vec!["b".into()],
add: vec!["c".into()],
},
),
] {
assert_eq!(super::features_diff(¤t, &expected), result);
}
}
}
| {
Self::default()
} | identifier_body |
lib.rs | // Copyright 2019-2023 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
#![cfg_attr(doc_cfg, feature(doc_cfg))]
pub use anyhow::Result;
use cargo_toml::{Dependency, Manifest};
use heck::AsShoutySnakeCase;
use tauri_utils::{
config::Config,
resources::{external_binaries, resource_relpath, ResourcePaths},
};
use std::path::{Path, PathBuf};
#[cfg(feature = "codegen")]
mod codegen;
mod static_vcruntime;
#[cfg(feature = "codegen")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "codegen")))]
pub use codegen::context::CodegenContext;
fn copy_file(from: impl AsRef<Path>, to: impl AsRef<Path>) -> Result<()> {
let from = from.as_ref();
let to = to.as_ref();
if !from.exists() {
return Err(anyhow::anyhow!("{:?} does not exist", from));
}
if !from.is_file() {
return Err(anyhow::anyhow!("{:?} is not a file", from));
}
let dest_dir = to.parent().expect("No data in parent");
std::fs::create_dir_all(dest_dir)?;
std::fs::copy(from, to)?;
Ok(())
}
fn copy_binaries(
binaries: ResourcePaths,
target_triple: &str,
path: &Path,
package_name: Option<&String>,
) -> Result<()> {
for src in binaries {
let src = src?;
println!("cargo:rerun-if-changed={}", src.display());
let file_name = src
.file_name()
.expect("failed to extract external binary filename")
.to_string_lossy()
.replace(&format!("-{target_triple}"), "");
if package_name.map_or(false, |n| n == &file_name) {
return Err(anyhow::anyhow!(
"Cannot define a sidecar with the same name as the Cargo package name `{}`. Please change the sidecar name in the filesystem and the Tauri configuration.",
file_name
));
}
let dest = path.join(file_name);
if dest.exists() {
std::fs::remove_file(&dest).unwrap();
}
copy_file(&src, &dest)?;
}
Ok(())
}
/// Copies resources to a path.
fn copy_resources(resources: ResourcePaths<'_>, path: &Path) -> Result<()> {
for src in resources {
let src = src?;
println!("cargo:rerun-if-changed={}", src.display());
let dest = path.join(resource_relpath(&src));
copy_file(&src, dest)?;
}
Ok(())
}
// checks if the given Cargo feature is enabled.
fn has_feature(feature: &str) -> bool {
// when a feature is enabled, Cargo sets the `CARGO_FEATURE_<name` env var to 1
// https://doc.rust-lang.org/cargo/reference/environment-variables.html#environment-variables-cargo-sets-for-build-scripts
std::env::var(format!("CARGO_FEATURE_{}", AsShoutySnakeCase(feature)))
.map(|x| x == "1")
.unwrap_or(false)
}
// creates a cfg alias if `has_feature` is true.
// `alias` must be a snake case string.
fn cfg_alias(alias: &str, has_feature: bool) {
if has_feature {
println!("cargo:rustc-cfg={alias}");
}
}
/// Attributes used on Windows.
#[allow(dead_code)]
#[derive(Debug, Default)]
pub struct WindowsAttributes {
window_icon_path: Option<PathBuf>,
/// The path to the sdk location.
///
/// For the GNU toolkit this has to be the path where MinGW put windres.exe and ar.exe.
/// This could be something like: "C:\Program Files\mingw-w64\x86_64-5.3.0-win32-seh-rt_v4-rev0\mingw64\bin"
///
/// For MSVC the Windows SDK has to be installed. It comes with the resource compiler rc.exe.
/// This should be set to the root directory of the Windows SDK, e.g., "C:\Program Files (x86)\Windows Kits\10" or,
/// if multiple 10 versions are installed, set it directly to the correct bin directory "C:\Program Files (x86)\Windows Kits\10\bin\10.0.14393.0\x64"
///
/// If it is left unset, it will look up a path in the registry, i.e. HKLM\SOFTWARE\Microsoft\Windows Kits\Installed Roots
sdk_dir: Option<PathBuf>,
/// A string containing an [application manifest] to be included with the application on Windows.
///
/// Defaults to:
/// ```ignore
#[doc = include_str!("window-app-manifest.xml")]
/// ```
///
/// [application manifest]: https://learn.microsoft.com/en-us/windows/win32/sbscs/application-manifests
app_manifest: Option<String>,
}
impl WindowsAttributes {
/// Creates the default attribute set.
pub fn new() -> Self {
Self::default()
}
/// Sets the icon to use on the window. Currently only used on Windows.
/// It must be in `ico` format. Defaults to `icons/icon.ico`.
#[must_use]
pub fn window_icon_path<P: AsRef<Path>>(mut self, window_icon_path: P) -> Self {
self
.window_icon_path
.replace(window_icon_path.as_ref().into());
self
}
/// Sets the sdk dir for windows. Currently only used on Windows. This must be a valid UTF-8
/// path. Defaults to whatever the `winres` crate determines is best.
#[must_use]
pub fn sdk_dir<P: AsRef<Path>>(mut self, sdk_dir: P) -> Self {
self.sdk_dir = Some(sdk_dir.as_ref().into());
self
}
/// Sets the Windows app [manifest].
///
/// # Example
///
/// The following manifest will brand the exe as requesting administrator privileges.
/// Thus, everytime it is executed, a Windows UAC dialog will appear.
///
/// Note that you can move the manifest contents to a separate file and use `include_str!("manifest.xml")`
/// instead of the inline string.
///
/// ```rust,no_run
/// let mut windows = tauri_build::WindowsAttributes::new();
/// windows = windows.app_manifest(r#"
/// <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
/// <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
/// <security>
/// <requestedPrivileges>
/// <requestedExecutionLevel level="requireAdministrator" uiAccess="false" />
/// </requestedPrivileges>
/// </security>
/// </trustInfo>
/// </assembly>
/// "#);
/// tauri_build::try_build(
/// tauri_build::Attributes::new().windows_attributes(windows)
/// ).expect("failed to run build script");
/// ```
///
/// Defaults to:
/// ```ignore
#[doc = include_str!("window-app-manifest.xml")]
/// [manifest]: https://learn.microsoft.com/en-us/windows/win32/sbscs/application-manifests
/// ```
#[must_use]
pub fn app_manifest<S: AsRef<str>>(mut self, manifest: S) -> Self {
self.app_manifest = Some(manifest.as_ref().to_string());
self
}
}
/// The attributes used on the build.
#[derive(Debug, Default)]
pub struct Attributes {
#[allow(dead_code)]
windows_attributes: WindowsAttributes,
}
impl Attributes {
/// Creates the default attribute set.
pub fn new() -> Self {
Self::default()
}
/// Sets the icon to use on the window. Currently only used on Windows.
#[must_use]
pub fn | (mut self, windows_attributes: WindowsAttributes) -> Self {
self.windows_attributes = windows_attributes;
self
}
}
/// Run all build time helpers for your Tauri Application.
///
/// The current helpers include the following:
/// * Generates a Windows Resource file when targeting Windows.
///
/// # Platforms
///
/// [`build()`] should be called inside of `build.rs` regardless of the platform:
/// * New helpers may target more platforms in the future.
/// * Platform specific code is handled by the helpers automatically.
/// * A build script is required in order to activate some cargo environmental variables that are
/// used when generating code and embedding assets - so [`build()`] may as well be called.
///
/// In short, this is saying don't put the call to [`build()`] behind a `#[cfg(windows)]`.
///
/// # Panics
///
/// If any of the build time helpers fail, they will [`std::panic!`] with the related error message.
/// This is typically desirable when running inside a build script; see [`try_build`] for no panics.
pub fn build() {
if let Err(error) = try_build(Attributes::default()) {
let error = format!("{error:#}");
println!("{error}");
if error.starts_with("unknown field") {
print!("found an unknown configuration field. This usually means that you are using a CLI version that is newer than `tauri-build` and is incompatible. ");
println!(
"Please try updating the Rust crates by running `cargo update` in the Tauri app folder."
);
}
std::process::exit(1);
}
}
/// Non-panicking [`build()`].
#[allow(unused_variables)]
pub fn try_build(attributes: Attributes) -> Result<()> {
use anyhow::anyhow;
println!("cargo:rerun-if-env-changed=TAURI_CONFIG");
println!("cargo:rerun-if-changed=tauri.conf.json");
#[cfg(feature = "config-json5")]
println!("cargo:rerun-if-changed=tauri.conf.json5");
#[cfg(feature = "config-toml")]
println!("cargo:rerun-if-changed=Tauri.toml");
let target_os = std::env::var("CARGO_CFG_TARGET_OS").unwrap();
let mobile = target_os == "ios" || target_os == "android";
cfg_alias("desktop", !mobile);
cfg_alias("mobile", mobile);
let mut config = serde_json::from_value(tauri_utils::config::parse::read_from(
std::env::current_dir().unwrap(),
)?)?;
if let Ok(env) = std::env::var("TAURI_CONFIG") {
let merge_config: serde_json::Value = serde_json::from_str(&env)?;
json_patch::merge(&mut config, &merge_config);
}
let config: Config = serde_json::from_value(config)?;
cfg_alias("dev", !has_feature("custom-protocol"));
let ws_path = get_workspace_dir()?;
let mut manifest =
Manifest::<cargo_toml::Value>::from_slice_with_metadata(&std::fs::read("Cargo.toml")?)?;
if let Ok(ws_manifest) = Manifest::from_path(ws_path.join("Cargo.toml")) {
Manifest::complete_from_path_and_workspace(
&mut manifest,
Path::new("Cargo.toml"),
Some((&ws_manifest, ws_path.as_path())),
)?;
} else {
Manifest::complete_from_path(&mut manifest, Path::new("Cargo.toml"))?;
}
if let Some(tauri_build) = manifest.build_dependencies.remove("tauri-build") {
let error_message = check_features(&config, tauri_build, true);
if !error_message.is_empty() {
return Err(anyhow!("
The `tauri-build` dependency features on the `Cargo.toml` file does not match the allowlist defined under `tauri.conf.json`.
Please run `tauri dev` or `tauri build` or {}.
", error_message));
}
}
if let Some(tauri) = manifest.dependencies.remove("tauri") {
let error_message = check_features(&config, tauri, false);
if !error_message.is_empty() {
return Err(anyhow!("
The `tauri` dependency features on the `Cargo.toml` file does not match the allowlist defined under `tauri.conf.json`.
Please run `tauri dev` or `tauri build` or {}.
", error_message));
}
}
let target_triple = std::env::var("TARGET").unwrap();
let out_dir = PathBuf::from(std::env::var("OUT_DIR").unwrap());
// TODO: far from ideal, but there's no other way to get the target dir, see <https://github.com/rust-lang/cargo/issues/5457>
let target_dir = out_dir
.parent()
.unwrap()
.parent()
.unwrap()
.parent()
.unwrap();
if let Some(paths) = &config.tauri.bundle.external_bin {
copy_binaries(
ResourcePaths::new(external_binaries(paths, &target_triple).as_slice(), true),
&target_triple,
target_dir,
manifest.package.as_ref().map(|p| &p.name),
)?;
}
#[allow(unused_mut, clippy::redundant_clone)]
let mut resources = config.tauri.bundle.resources.clone().unwrap_or_default();
if target_triple.contains("windows") {
if let Some(fixed_webview2_runtime_path) =
&config.tauri.bundle.windows.webview_fixed_runtime_path
{
resources.push(fixed_webview2_runtime_path.display().to_string());
}
}
copy_resources(ResourcePaths::new(resources.as_slice(), true), target_dir)?;
if target_triple.contains("darwin") {
if let Some(version) = &config.tauri.bundle.macos.minimum_system_version {
println!("cargo:rustc-env=MACOSX_DEPLOYMENT_TARGET={}", version);
}
}
if target_triple.contains("windows") {
use anyhow::Context;
use semver::Version;
use tauri_winres::{VersionInfo, WindowsResource};
fn find_icon<F: Fn(&&String) -> bool>(config: &Config, predicate: F, default: &str) -> PathBuf {
let icon_path = config
.tauri
.bundle
.icon
.iter()
.find(|i| predicate(i))
.cloned()
.unwrap_or_else(|| default.to_string());
icon_path.into()
}
let window_icon_path = attributes
.windows_attributes
.window_icon_path
.unwrap_or_else(|| find_icon(&config, |i| i.ends_with(".ico"), "icons/icon.ico"));
if window_icon_path.exists() {
let mut res = WindowsResource::new();
if let Some(manifest) = attributes.windows_attributes.app_manifest {
res.set_manifest(&manifest);
} else {
res.set_manifest(include_str!("window-app-manifest.xml"));
}
if let Some(sdk_dir) = &attributes.windows_attributes.sdk_dir {
if let Some(sdk_dir_str) = sdk_dir.to_str() {
res.set_toolkit_path(sdk_dir_str);
} else {
return Err(anyhow!(
"sdk_dir path is not valid; only UTF-8 characters are allowed"
));
}
}
if let Some(version) = &config.package.version {
if let Ok(v) = Version::parse(version) {
let version = v.major << 48 | v.minor << 32 | v.patch << 16;
res.set_version_info(VersionInfo::FILEVERSION, version);
res.set_version_info(VersionInfo::PRODUCTVERSION, version);
}
res.set("FileVersion", version);
res.set("ProductVersion", version);
}
if let Some(product_name) = &config.package.product_name {
res.set("ProductName", product_name);
res.set("FileDescription", product_name);
}
res.set_icon_with_id(&window_icon_path.display().to_string(), "32512");
res.compile().with_context(|| {
format!(
"failed to compile `{}` into a Windows Resource file during tauri-build",
window_icon_path.display()
)
})?;
} else {
return Err(anyhow!(format!(
"`{}` not found; required for generating a Windows Resource file during tauri-build",
window_icon_path.display()
)));
}
let target_env = std::env::var("CARGO_CFG_TARGET_ENV").unwrap();
match target_env.as_str() {
"gnu" => {
let target_arch = match std::env::var("CARGO_CFG_TARGET_ARCH").unwrap().as_str() {
"x86_64" => Some("x64"),
"x86" => Some("x86"),
"aarch64" => Some("arm64"),
arch => None,
};
if let Some(target_arch) = target_arch {
for entry in std::fs::read_dir(target_dir.join("build"))? {
let path = entry?.path();
let webview2_loader_path = path
.join("out")
.join(target_arch)
.join("WebView2Loader.dll");
if path.to_string_lossy().contains("webview2-com-sys") && webview2_loader_path.exists()
{
std::fs::copy(webview2_loader_path, target_dir.join("WebView2Loader.dll"))?;
break;
}
}
}
}
"msvc" => {
if std::env::var("STATIC_VCRUNTIME").map_or(false, |v| v == "true") {
static_vcruntime::build();
}
}
_ => (),
}
}
Ok(())
}
#[derive(Debug, Default, PartialEq, Eq)]
struct Diff {
remove: Vec<String>,
add: Vec<String>,
}
fn features_diff(current: &[String], expected: &[String]) -> Diff {
let mut remove = Vec::new();
let mut add = Vec::new();
for feature in current {
if !expected.contains(feature) {
remove.push(feature.clone());
}
}
for feature in expected {
if !current.contains(feature) {
add.push(feature.clone());
}
}
Diff { remove, add }
}
fn check_features(config: &Config, dependency: Dependency, is_tauri_build: bool) -> String {
use tauri_utils::config::{PatternKind, TauriConfig};
let features = match dependency {
Dependency::Simple(_) => Vec::new(),
Dependency::Detailed(dep) => dep.features,
Dependency::Inherited(dep) => dep.features,
};
let all_cli_managed_features = if is_tauri_build {
vec!["isolation"]
} else {
TauriConfig::all_features()
};
let expected = if is_tauri_build {
match config.tauri.pattern {
PatternKind::Isolation { .. } => vec!["isolation".to_string()],
_ => vec![],
}
} else {
config
.tauri
.features()
.into_iter()
.map(|f| f.to_string())
.collect::<Vec<String>>()
};
let diff = features_diff(
&features
.into_iter()
.filter(|f| all_cli_managed_features.contains(&f.as_str()))
.collect::<Vec<String>>(),
&expected,
);
let mut error_message = String::new();
if !diff.remove.is_empty() {
error_message.push_str("remove the `");
error_message.push_str(&diff.remove.join(", "));
error_message.push_str(if diff.remove.len() == 1 {
"` feature"
} else {
"` features"
});
if !diff.add.is_empty() {
error_message.push_str(" and ");
}
}
if !diff.add.is_empty() {
error_message.push_str("add the `");
error_message.push_str(&diff.add.join(", "));
error_message.push_str(if diff.add.len() == 1 {
"` feature"
} else {
"` features"
});
}
error_message
}
#[derive(serde::Deserialize)]
struct CargoMetadata {
workspace_root: PathBuf,
}
fn get_workspace_dir() -> Result<PathBuf> {
let output = std::process::Command::new("cargo")
.args(["metadata", "--no-deps", "--format-version", "1"])
.output()?;
if !output.status.success() {
return Err(anyhow::anyhow!(
"cargo metadata command exited with a non zero exit code: {}",
String::from_utf8(output.stderr)?
));
}
Ok(serde_json::from_slice::<CargoMetadata>(&output.stdout)?.workspace_root)
}
#[cfg(test)]
mod tests {
use super::Diff;
#[test]
fn array_diff() {
for (current, expected, result) in [
(vec![], vec![], Default::default()),
(
vec!["a".into()],
vec![],
Diff {
remove: vec!["a".into()],
add: vec![],
},
),
(vec!["a".into()], vec!["a".into()], Default::default()),
(
vec!["a".into(), "b".into()],
vec!["a".into()],
Diff {
remove: vec!["b".into()],
add: vec![],
},
),
(
vec!["a".into(), "b".into()],
vec!["a".into(), "c".into()],
Diff {
remove: vec!["b".into()],
add: vec!["c".into()],
},
),
] {
assert_eq!(super::features_diff(¤t, &expected), result);
}
}
}
| windows_attributes | identifier_name |
lib.rs | // Copyright 2019-2023 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
#![cfg_attr(doc_cfg, feature(doc_cfg))]
pub use anyhow::Result;
use cargo_toml::{Dependency, Manifest};
use heck::AsShoutySnakeCase;
use tauri_utils::{
config::Config,
resources::{external_binaries, resource_relpath, ResourcePaths},
};
use std::path::{Path, PathBuf};
#[cfg(feature = "codegen")]
mod codegen;
mod static_vcruntime;
#[cfg(feature = "codegen")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "codegen")))]
pub use codegen::context::CodegenContext;
fn copy_file(from: impl AsRef<Path>, to: impl AsRef<Path>) -> Result<()> {
let from = from.as_ref();
let to = to.as_ref();
if !from.exists() {
return Err(anyhow::anyhow!("{:?} does not exist", from));
}
if !from.is_file() {
return Err(anyhow::anyhow!("{:?} is not a file", from));
}
let dest_dir = to.parent().expect("No data in parent");
std::fs::create_dir_all(dest_dir)?;
std::fs::copy(from, to)?;
Ok(())
}
fn copy_binaries(
binaries: ResourcePaths,
target_triple: &str,
path: &Path,
package_name: Option<&String>,
) -> Result<()> {
for src in binaries {
let src = src?;
println!("cargo:rerun-if-changed={}", src.display());
let file_name = src
.file_name()
.expect("failed to extract external binary filename")
.to_string_lossy()
.replace(&format!("-{target_triple}"), "");
if package_name.map_or(false, |n| n == &file_name) {
return Err(anyhow::anyhow!(
"Cannot define a sidecar with the same name as the Cargo package name `{}`. Please change the sidecar name in the filesystem and the Tauri configuration.",
file_name
));
}
let dest = path.join(file_name);
if dest.exists() {
std::fs::remove_file(&dest).unwrap();
}
copy_file(&src, &dest)?;
}
Ok(())
}
/// Copies resources to a path.
fn copy_resources(resources: ResourcePaths<'_>, path: &Path) -> Result<()> {
for src in resources {
let src = src?;
println!("cargo:rerun-if-changed={}", src.display());
let dest = path.join(resource_relpath(&src));
copy_file(&src, dest)?;
}
Ok(())
}
// checks if the given Cargo feature is enabled.
fn has_feature(feature: &str) -> bool {
// when a feature is enabled, Cargo sets the `CARGO_FEATURE_<name` env var to 1
// https://doc.rust-lang.org/cargo/reference/environment-variables.html#environment-variables-cargo-sets-for-build-scripts
std::env::var(format!("CARGO_FEATURE_{}", AsShoutySnakeCase(feature)))
.map(|x| x == "1")
.unwrap_or(false)
}
// creates a cfg alias if `has_feature` is true.
// `alias` must be a snake case string.
fn cfg_alias(alias: &str, has_feature: bool) {
if has_feature {
println!("cargo:rustc-cfg={alias}");
}
}
/// Attributes used on Windows.
#[allow(dead_code)]
#[derive(Debug, Default)]
pub struct WindowsAttributes {
window_icon_path: Option<PathBuf>,
/// The path to the sdk location.
///
/// For the GNU toolkit this has to be the path where MinGW put windres.exe and ar.exe.
/// This could be something like: "C:\Program Files\mingw-w64\x86_64-5.3.0-win32-seh-rt_v4-rev0\mingw64\bin"
///
/// For MSVC the Windows SDK has to be installed. It comes with the resource compiler rc.exe.
/// This should be set to the root directory of the Windows SDK, e.g., "C:\Program Files (x86)\Windows Kits\10" or,
/// if multiple 10 versions are installed, set it directly to the correct bin directory "C:\Program Files (x86)\Windows Kits\10\bin\10.0.14393.0\x64"
///
/// If it is left unset, it will look up a path in the registry, i.e. HKLM\SOFTWARE\Microsoft\Windows Kits\Installed Roots
sdk_dir: Option<PathBuf>,
/// A string containing an [application manifest] to be included with the application on Windows.
///
/// Defaults to:
/// ```ignore
#[doc = include_str!("window-app-manifest.xml")]
/// ```
///
/// [application manifest]: https://learn.microsoft.com/en-us/windows/win32/sbscs/application-manifests
app_manifest: Option<String>,
}
impl WindowsAttributes {
/// Creates the default attribute set.
pub fn new() -> Self {
Self::default()
}
/// Sets the icon to use on the window. Currently only used on Windows.
/// It must be in `ico` format. Defaults to `icons/icon.ico`.
#[must_use]
pub fn window_icon_path<P: AsRef<Path>>(mut self, window_icon_path: P) -> Self {
self
.window_icon_path
.replace(window_icon_path.as_ref().into());
self
}
/// Sets the sdk dir for windows. Currently only used on Windows. This must be a valid UTF-8
/// path. Defaults to whatever the `winres` crate determines is best.
#[must_use]
pub fn sdk_dir<P: AsRef<Path>>(mut self, sdk_dir: P) -> Self {
self.sdk_dir = Some(sdk_dir.as_ref().into());
self
}
/// Sets the Windows app [manifest].
///
/// # Example
///
/// The following manifest will brand the exe as requesting administrator privileges.
/// Thus, everytime it is executed, a Windows UAC dialog will appear.
///
/// Note that you can move the manifest contents to a separate file and use `include_str!("manifest.xml")`
/// instead of the inline string.
///
/// ```rust,no_run
/// let mut windows = tauri_build::WindowsAttributes::new();
/// windows = windows.app_manifest(r#"
/// <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
/// <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
/// <security>
/// <requestedPrivileges>
/// <requestedExecutionLevel level="requireAdministrator" uiAccess="false" />
/// </requestedPrivileges>
/// </security>
/// </trustInfo>
/// </assembly>
/// "#);
/// tauri_build::try_build(
/// tauri_build::Attributes::new().windows_attributes(windows)
/// ).expect("failed to run build script");
/// ```
///
/// Defaults to:
/// ```ignore
#[doc = include_str!("window-app-manifest.xml")]
/// [manifest]: https://learn.microsoft.com/en-us/windows/win32/sbscs/application-manifests
/// ```
#[must_use]
pub fn app_manifest<S: AsRef<str>>(mut self, manifest: S) -> Self {
self.app_manifest = Some(manifest.as_ref().to_string());
self
}
}
/// The attributes used on the build.
#[derive(Debug, Default)]
pub struct Attributes {
#[allow(dead_code)]
windows_attributes: WindowsAttributes,
}
impl Attributes {
/// Creates the default attribute set.
pub fn new() -> Self {
Self::default()
}
/// Sets the icon to use on the window. Currently only used on Windows.
#[must_use]
pub fn windows_attributes(mut self, windows_attributes: WindowsAttributes) -> Self {
self.windows_attributes = windows_attributes;
self
}
}
/// Run all build time helpers for your Tauri Application.
///
/// The current helpers include the following:
/// * Generates a Windows Resource file when targeting Windows.
///
/// # Platforms
///
/// [`build()`] should be called inside of `build.rs` regardless of the platform:
/// * New helpers may target more platforms in the future.
/// * Platform specific code is handled by the helpers automatically.
/// * A build script is required in order to activate some cargo environmental variables that are
/// used when generating code and embedding assets - so [`build()`] may as well be called.
///
/// In short, this is saying don't put the call to [`build()`] behind a `#[cfg(windows)]`.
///
/// # Panics
///
/// If any of the build time helpers fail, they will [`std::panic!`] with the related error message.
/// This is typically desirable when running inside a build script; see [`try_build`] for no panics.
pub fn build() {
if let Err(error) = try_build(Attributes::default()) {
let error = format!("{error:#}");
println!("{error}");
if error.starts_with("unknown field") {
print!("found an unknown configuration field. This usually means that you are using a CLI version that is newer than `tauri-build` and is incompatible. ");
println!(
"Please try updating the Rust crates by running `cargo update` in the Tauri app folder."
);
}
std::process::exit(1);
}
}
/// Non-panicking [`build()`].
#[allow(unused_variables)]
pub fn try_build(attributes: Attributes) -> Result<()> {
use anyhow::anyhow;
println!("cargo:rerun-if-env-changed=TAURI_CONFIG");
println!("cargo:rerun-if-changed=tauri.conf.json");
#[cfg(feature = "config-json5")]
println!("cargo:rerun-if-changed=tauri.conf.json5");
#[cfg(feature = "config-toml")]
println!("cargo:rerun-if-changed=Tauri.toml");
let target_os = std::env::var("CARGO_CFG_TARGET_OS").unwrap();
let mobile = target_os == "ios" || target_os == "android";
cfg_alias("desktop", !mobile);
cfg_alias("mobile", mobile);
let mut config = serde_json::from_value(tauri_utils::config::parse::read_from(
std::env::current_dir().unwrap(),
)?)?;
if let Ok(env) = std::env::var("TAURI_CONFIG") {
let merge_config: serde_json::Value = serde_json::from_str(&env)?;
json_patch::merge(&mut config, &merge_config);
}
let config: Config = serde_json::from_value(config)?;
cfg_alias("dev", !has_feature("custom-protocol"));
let ws_path = get_workspace_dir()?;
let mut manifest =
Manifest::<cargo_toml::Value>::from_slice_with_metadata(&std::fs::read("Cargo.toml")?)?;
if let Ok(ws_manifest) = Manifest::from_path(ws_path.join("Cargo.toml")) {
Manifest::complete_from_path_and_workspace(
&mut manifest,
Path::new("Cargo.toml"),
Some((&ws_manifest, ws_path.as_path())),
)?;
} else {
Manifest::complete_from_path(&mut manifest, Path::new("Cargo.toml"))?;
}
if let Some(tauri_build) = manifest.build_dependencies.remove("tauri-build") {
let error_message = check_features(&config, tauri_build, true);
if !error_message.is_empty() {
return Err(anyhow!("
The `tauri-build` dependency features on the `Cargo.toml` file does not match the allowlist defined under `tauri.conf.json`.
Please run `tauri dev` or `tauri build` or {}.
", error_message));
}
}
if let Some(tauri) = manifest.dependencies.remove("tauri") {
let error_message = check_features(&config, tauri, false);
if !error_message.is_empty() {
return Err(anyhow!("
The `tauri` dependency features on the `Cargo.toml` file does not match the allowlist defined under `tauri.conf.json`.
Please run `tauri dev` or `tauri build` or {}.
", error_message));
}
}
let target_triple = std::env::var("TARGET").unwrap();
let out_dir = PathBuf::from(std::env::var("OUT_DIR").unwrap());
// TODO: far from ideal, but there's no other way to get the target dir, see <https://github.com/rust-lang/cargo/issues/5457>
let target_dir = out_dir
.parent()
.unwrap()
.parent()
.unwrap()
.parent()
.unwrap();
if let Some(paths) = &config.tauri.bundle.external_bin {
copy_binaries(
ResourcePaths::new(external_binaries(paths, &target_triple).as_slice(), true),
&target_triple,
target_dir,
manifest.package.as_ref().map(|p| &p.name),
)?;
}
#[allow(unused_mut, clippy::redundant_clone)]
let mut resources = config.tauri.bundle.resources.clone().unwrap_or_default();
if target_triple.contains("windows") {
if let Some(fixed_webview2_runtime_path) =
&config.tauri.bundle.windows.webview_fixed_runtime_path
{
resources.push(fixed_webview2_runtime_path.display().to_string());
}
}
copy_resources(ResourcePaths::new(resources.as_slice(), true), target_dir)?;
if target_triple.contains("darwin") {
if let Some(version) = &config.tauri.bundle.macos.minimum_system_version {
println!("cargo:rustc-env=MACOSX_DEPLOYMENT_TARGET={}", version);
}
}
if target_triple.contains("windows") {
use anyhow::Context;
use semver::Version;
use tauri_winres::{VersionInfo, WindowsResource};
fn find_icon<F: Fn(&&String) -> bool>(config: &Config, predicate: F, default: &str) -> PathBuf {
let icon_path = config
.tauri
.bundle
.icon
.iter()
.find(|i| predicate(i))
.cloned()
.unwrap_or_else(|| default.to_string());
icon_path.into()
}
let window_icon_path = attributes
.windows_attributes
.window_icon_path
.unwrap_or_else(|| find_icon(&config, |i| i.ends_with(".ico"), "icons/icon.ico"));
if window_icon_path.exists() {
let mut res = WindowsResource::new();
if let Some(manifest) = attributes.windows_attributes.app_manifest {
res.set_manifest(&manifest);
} else {
res.set_manifest(include_str!("window-app-manifest.xml"));
}
if let Some(sdk_dir) = &attributes.windows_attributes.sdk_dir {
if let Some(sdk_dir_str) = sdk_dir.to_str() {
res.set_toolkit_path(sdk_dir_str);
} else {
return Err(anyhow!(
"sdk_dir path is not valid; only UTF-8 characters are allowed"
));
}
}
if let Some(version) = &config.package.version {
if let Ok(v) = Version::parse(version) {
let version = v.major << 48 | v.minor << 32 | v.patch << 16;
res.set_version_info(VersionInfo::FILEVERSION, version);
res.set_version_info(VersionInfo::PRODUCTVERSION, version);
}
res.set("FileVersion", version);
res.set("ProductVersion", version);
}
if let Some(product_name) = &config.package.product_name {
res.set("ProductName", product_name);
res.set("FileDescription", product_name);
}
res.set_icon_with_id(&window_icon_path.display().to_string(), "32512");
res.compile().with_context(|| {
format!(
"failed to compile `{}` into a Windows Resource file during tauri-build",
window_icon_path.display()
)
})?;
} else {
return Err(anyhow!(format!(
"`{}` not found; required for generating a Windows Resource file during tauri-build",
window_icon_path.display()
)));
}
let target_env = std::env::var("CARGO_CFG_TARGET_ENV").unwrap();
match target_env.as_str() {
"gnu" => {
let target_arch = match std::env::var("CARGO_CFG_TARGET_ARCH").unwrap().as_str() {
"x86_64" => Some("x64"),
"x86" => Some("x86"),
"aarch64" => Some("arm64"),
arch => None,
};
if let Some(target_arch) = target_arch {
for entry in std::fs::read_dir(target_dir.join("build"))? {
let path = entry?.path();
let webview2_loader_path = path
.join("out")
.join(target_arch)
.join("WebView2Loader.dll");
if path.to_string_lossy().contains("webview2-com-sys") && webview2_loader_path.exists()
{
std::fs::copy(webview2_loader_path, target_dir.join("WebView2Loader.dll"))?;
break;
}
}
}
}
"msvc" => {
if std::env::var("STATIC_VCRUNTIME").map_or(false, |v| v == "true") {
static_vcruntime::build();
}
}
_ => (),
}
}
Ok(())
}
#[derive(Debug, Default, PartialEq, Eq)]
struct Diff {
remove: Vec<String>,
add: Vec<String>,
}
fn features_diff(current: &[String], expected: &[String]) -> Diff {
let mut remove = Vec::new();
let mut add = Vec::new();
for feature in current {
if !expected.contains(feature) {
remove.push(feature.clone());
}
}
for feature in expected {
if !current.contains(feature) {
add.push(feature.clone());
}
}
Diff { remove, add }
}
fn check_features(config: &Config, dependency: Dependency, is_tauri_build: bool) -> String {
use tauri_utils::config::{PatternKind, TauriConfig};
let features = match dependency {
Dependency::Simple(_) => Vec::new(),
Dependency::Detailed(dep) => dep.features,
Dependency::Inherited(dep) => dep.features,
};
let all_cli_managed_features = if is_tauri_build {
vec!["isolation"]
} else {
TauriConfig::all_features()
};
let expected = if is_tauri_build {
match config.tauri.pattern {
PatternKind::Isolation { .. } => vec!["isolation".to_string()],
_ => vec![],
}
} else | ;
let diff = features_diff(
&features
.into_iter()
.filter(|f| all_cli_managed_features.contains(&f.as_str()))
.collect::<Vec<String>>(),
&expected,
);
let mut error_message = String::new();
if !diff.remove.is_empty() {
error_message.push_str("remove the `");
error_message.push_str(&diff.remove.join(", "));
error_message.push_str(if diff.remove.len() == 1 {
"` feature"
} else {
"` features"
});
if !diff.add.is_empty() {
error_message.push_str(" and ");
}
}
if !diff.add.is_empty() {
error_message.push_str("add the `");
error_message.push_str(&diff.add.join(", "));
error_message.push_str(if diff.add.len() == 1 {
"` feature"
} else {
"` features"
});
}
error_message
}
#[derive(serde::Deserialize)]
struct CargoMetadata {
workspace_root: PathBuf,
}
fn get_workspace_dir() -> Result<PathBuf> {
let output = std::process::Command::new("cargo")
.args(["metadata", "--no-deps", "--format-version", "1"])
.output()?;
if !output.status.success() {
return Err(anyhow::anyhow!(
"cargo metadata command exited with a non zero exit code: {}",
String::from_utf8(output.stderr)?
));
}
Ok(serde_json::from_slice::<CargoMetadata>(&output.stdout)?.workspace_root)
}
#[cfg(test)]
mod tests {
use super::Diff;
#[test]
fn array_diff() {
for (current, expected, result) in [
(vec![], vec![], Default::default()),
(
vec!["a".into()],
vec![],
Diff {
remove: vec!["a".into()],
add: vec![],
},
),
(vec!["a".into()], vec!["a".into()], Default::default()),
(
vec!["a".into(), "b".into()],
vec!["a".into()],
Diff {
remove: vec!["b".into()],
add: vec![],
},
),
(
vec!["a".into(), "b".into()],
vec!["a".into(), "c".into()],
Diff {
remove: vec!["b".into()],
add: vec!["c".into()],
},
),
] {
assert_eq!(super::features_diff(¤t, &expected), result);
}
}
}
| {
config
.tauri
.features()
.into_iter()
.map(|f| f.to_string())
.collect::<Vec<String>>()
} | conditional_block |
lib.rs | // Copyright 2019-2023 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
#![cfg_attr(doc_cfg, feature(doc_cfg))]
pub use anyhow::Result;
use cargo_toml::{Dependency, Manifest};
use heck::AsShoutySnakeCase;
use tauri_utils::{
config::Config,
resources::{external_binaries, resource_relpath, ResourcePaths},
};
use std::path::{Path, PathBuf};
#[cfg(feature = "codegen")]
mod codegen;
mod static_vcruntime;
#[cfg(feature = "codegen")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "codegen")))]
pub use codegen::context::CodegenContext;
fn copy_file(from: impl AsRef<Path>, to: impl AsRef<Path>) -> Result<()> {
let from = from.as_ref();
let to = to.as_ref();
if !from.exists() {
return Err(anyhow::anyhow!("{:?} does not exist", from));
}
if !from.is_file() {
return Err(anyhow::anyhow!("{:?} is not a file", from));
}
let dest_dir = to.parent().expect("No data in parent");
std::fs::create_dir_all(dest_dir)?;
std::fs::copy(from, to)?;
Ok(())
}
fn copy_binaries(
binaries: ResourcePaths,
target_triple: &str,
path: &Path,
package_name: Option<&String>,
) -> Result<()> {
for src in binaries {
let src = src?;
println!("cargo:rerun-if-changed={}", src.display());
let file_name = src
.file_name()
.expect("failed to extract external binary filename")
.to_string_lossy()
.replace(&format!("-{target_triple}"), "");
if package_name.map_or(false, |n| n == &file_name) {
return Err(anyhow::anyhow!(
"Cannot define a sidecar with the same name as the Cargo package name `{}`. Please change the sidecar name in the filesystem and the Tauri configuration.",
file_name
));
}
let dest = path.join(file_name);
if dest.exists() {
std::fs::remove_file(&dest).unwrap();
}
copy_file(&src, &dest)?;
}
Ok(())
}
/// Copies resources to a path.
fn copy_resources(resources: ResourcePaths<'_>, path: &Path) -> Result<()> {
for src in resources {
let src = src?;
println!("cargo:rerun-if-changed={}", src.display());
let dest = path.join(resource_relpath(&src));
copy_file(&src, dest)?;
}
Ok(())
}
// checks if the given Cargo feature is enabled.
fn has_feature(feature: &str) -> bool {
// when a feature is enabled, Cargo sets the `CARGO_FEATURE_<name` env var to 1
// https://doc.rust-lang.org/cargo/reference/environment-variables.html#environment-variables-cargo-sets-for-build-scripts
std::env::var(format!("CARGO_FEATURE_{}", AsShoutySnakeCase(feature)))
.map(|x| x == "1")
.unwrap_or(false)
}
// creates a cfg alias if `has_feature` is true.
// `alias` must be a snake case string.
fn cfg_alias(alias: &str, has_feature: bool) {
if has_feature {
println!("cargo:rustc-cfg={alias}");
}
}
/// Attributes used on Windows.
#[allow(dead_code)]
#[derive(Debug, Default)]
pub struct WindowsAttributes {
window_icon_path: Option<PathBuf>,
/// The path to the sdk location.
///
/// For the GNU toolkit this has to be the path where MinGW put windres.exe and ar.exe.
/// This could be something like: "C:\Program Files\mingw-w64\x86_64-5.3.0-win32-seh-rt_v4-rev0\mingw64\bin"
///
/// For MSVC the Windows SDK has to be installed. It comes with the resource compiler rc.exe.
/// This should be set to the root directory of the Windows SDK, e.g., "C:\Program Files (x86)\Windows Kits\10" or,
/// if multiple 10 versions are installed, set it directly to the correct bin directory "C:\Program Files (x86)\Windows Kits\10\bin\10.0.14393.0\x64"
///
/// If it is left unset, it will look up a path in the registry, i.e. HKLM\SOFTWARE\Microsoft\Windows Kits\Installed Roots
sdk_dir: Option<PathBuf>,
/// A string containing an [application manifest] to be included with the application on Windows.
///
/// Defaults to:
/// ```ignore
#[doc = include_str!("window-app-manifest.xml")]
/// ```
///
/// [application manifest]: https://learn.microsoft.com/en-us/windows/win32/sbscs/application-manifests
app_manifest: Option<String>,
}
impl WindowsAttributes {
/// Creates the default attribute set.
pub fn new() -> Self {
Self::default()
}
/// Sets the icon to use on the window. Currently only used on Windows.
/// It must be in `ico` format. Defaults to `icons/icon.ico`.
#[must_use]
pub fn window_icon_path<P: AsRef<Path>>(mut self, window_icon_path: P) -> Self {
self
.window_icon_path
.replace(window_icon_path.as_ref().into());
self
}
/// Sets the sdk dir for windows. Currently only used on Windows. This must be a valid UTF-8
/// path. Defaults to whatever the `winres` crate determines is best.
#[must_use]
pub fn sdk_dir<P: AsRef<Path>>(mut self, sdk_dir: P) -> Self {
self.sdk_dir = Some(sdk_dir.as_ref().into());
self
}
/// Sets the Windows app [manifest].
///
/// # Example
///
/// The following manifest will brand the exe as requesting administrator privileges.
/// Thus, everytime it is executed, a Windows UAC dialog will appear.
///
/// Note that you can move the manifest contents to a separate file and use `include_str!("manifest.xml")`
/// instead of the inline string.
///
/// ```rust,no_run
/// let mut windows = tauri_build::WindowsAttributes::new();
/// windows = windows.app_manifest(r#"
/// <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
/// <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
/// <security>
/// <requestedPrivileges>
/// <requestedExecutionLevel level="requireAdministrator" uiAccess="false" />
/// </requestedPrivileges>
/// </security>
/// </trustInfo>
/// </assembly>
/// "#);
/// tauri_build::try_build(
/// tauri_build::Attributes::new().windows_attributes(windows)
/// ).expect("failed to run build script");
/// ```
///
/// Defaults to:
/// ```ignore
#[doc = include_str!("window-app-manifest.xml")]
/// [manifest]: https://learn.microsoft.com/en-us/windows/win32/sbscs/application-manifests
/// ```
#[must_use]
pub fn app_manifest<S: AsRef<str>>(mut self, manifest: S) -> Self {
self.app_manifest = Some(manifest.as_ref().to_string());
self
}
}
/// The attributes used on the build.
#[derive(Debug, Default)]
pub struct Attributes {
#[allow(dead_code)]
windows_attributes: WindowsAttributes,
}
impl Attributes {
/// Creates the default attribute set.
pub fn new() -> Self {
Self::default()
}
/// Sets the icon to use on the window. Currently only used on Windows.
#[must_use]
pub fn windows_attributes(mut self, windows_attributes: WindowsAttributes) -> Self {
self.windows_attributes = windows_attributes;
self
}
}
/// Run all build time helpers for your Tauri Application.
///
/// The current helpers include the following:
/// * Generates a Windows Resource file when targeting Windows.
///
/// # Platforms
///
/// [`build()`] should be called inside of `build.rs` regardless of the platform:
/// * New helpers may target more platforms in the future.
/// * Platform specific code is handled by the helpers automatically.
/// * A build script is required in order to activate some cargo environmental variables that are
/// used when generating code and embedding assets - so [`build()`] may as well be called.
///
/// In short, this is saying don't put the call to [`build()`] behind a `#[cfg(windows)]`.
///
/// # Panics
///
/// If any of the build time helpers fail, they will [`std::panic!`] with the related error message.
/// This is typically desirable when running inside a build script; see [`try_build`] for no panics.
pub fn build() {
if let Err(error) = try_build(Attributes::default()) {
let error = format!("{error:#}");
println!("{error}");
if error.starts_with("unknown field") {
print!("found an unknown configuration field. This usually means that you are using a CLI version that is newer than `tauri-build` and is incompatible. ");
println!(
"Please try updating the Rust crates by running `cargo update` in the Tauri app folder."
);
}
std::process::exit(1);
}
}
/// Non-panicking [`build()`].
#[allow(unused_variables)]
pub fn try_build(attributes: Attributes) -> Result<()> {
use anyhow::anyhow;
println!("cargo:rerun-if-env-changed=TAURI_CONFIG");
println!("cargo:rerun-if-changed=tauri.conf.json");
#[cfg(feature = "config-json5")]
println!("cargo:rerun-if-changed=tauri.conf.json5");
#[cfg(feature = "config-toml")]
println!("cargo:rerun-if-changed=Tauri.toml");
let target_os = std::env::var("CARGO_CFG_TARGET_OS").unwrap();
let mobile = target_os == "ios" || target_os == "android";
cfg_alias("desktop", !mobile);
cfg_alias("mobile", mobile);
let mut config = serde_json::from_value(tauri_utils::config::parse::read_from(
std::env::current_dir().unwrap(),
)?)?;
if let Ok(env) = std::env::var("TAURI_CONFIG") {
let merge_config: serde_json::Value = serde_json::from_str(&env)?;
json_patch::merge(&mut config, &merge_config);
}
let config: Config = serde_json::from_value(config)?;
cfg_alias("dev", !has_feature("custom-protocol"));
let ws_path = get_workspace_dir()?;
let mut manifest =
Manifest::<cargo_toml::Value>::from_slice_with_metadata(&std::fs::read("Cargo.toml")?)?;
if let Ok(ws_manifest) = Manifest::from_path(ws_path.join("Cargo.toml")) {
Manifest::complete_from_path_and_workspace(
&mut manifest,
Path::new("Cargo.toml"),
Some((&ws_manifest, ws_path.as_path())),
)?;
} else {
Manifest::complete_from_path(&mut manifest, Path::new("Cargo.toml"))?;
}
if let Some(tauri_build) = manifest.build_dependencies.remove("tauri-build") {
let error_message = check_features(&config, tauri_build, true);
if !error_message.is_empty() {
return Err(anyhow!("
The `tauri-build` dependency features on the `Cargo.toml` file does not match the allowlist defined under `tauri.conf.json`.
Please run `tauri dev` or `tauri build` or {}.
", error_message));
}
}
if let Some(tauri) = manifest.dependencies.remove("tauri") {
let error_message = check_features(&config, tauri, false);
if !error_message.is_empty() {
return Err(anyhow!("
The `tauri` dependency features on the `Cargo.toml` file does not match the allowlist defined under `tauri.conf.json`.
Please run `tauri dev` or `tauri build` or {}.
", error_message));
}
}
let target_triple = std::env::var("TARGET").unwrap();
let out_dir = PathBuf::from(std::env::var("OUT_DIR").unwrap());
// TODO: far from ideal, but there's no other way to get the target dir, see <https://github.com/rust-lang/cargo/issues/5457>
let target_dir = out_dir
.parent()
.unwrap()
.parent()
.unwrap()
.parent()
.unwrap();
if let Some(paths) = &config.tauri.bundle.external_bin {
copy_binaries(
ResourcePaths::new(external_binaries(paths, &target_triple).as_slice(), true),
&target_triple,
target_dir,
manifest.package.as_ref().map(|p| &p.name),
)?;
}
#[allow(unused_mut, clippy::redundant_clone)]
let mut resources = config.tauri.bundle.resources.clone().unwrap_or_default();
if target_triple.contains("windows") {
if let Some(fixed_webview2_runtime_path) =
&config.tauri.bundle.windows.webview_fixed_runtime_path
{
resources.push(fixed_webview2_runtime_path.display().to_string());
}
}
copy_resources(ResourcePaths::new(resources.as_slice(), true), target_dir)?;
if target_triple.contains("darwin") {
if let Some(version) = &config.tauri.bundle.macos.minimum_system_version {
println!("cargo:rustc-env=MACOSX_DEPLOYMENT_TARGET={}", version);
}
}
if target_triple.contains("windows") {
use anyhow::Context;
use semver::Version;
use tauri_winres::{VersionInfo, WindowsResource};
fn find_icon<F: Fn(&&String) -> bool>(config: &Config, predicate: F, default: &str) -> PathBuf {
let icon_path = config | .icon
.iter()
.find(|i| predicate(i))
.cloned()
.unwrap_or_else(|| default.to_string());
icon_path.into()
}
let window_icon_path = attributes
.windows_attributes
.window_icon_path
.unwrap_or_else(|| find_icon(&config, |i| i.ends_with(".ico"), "icons/icon.ico"));
if window_icon_path.exists() {
let mut res = WindowsResource::new();
if let Some(manifest) = attributes.windows_attributes.app_manifest {
res.set_manifest(&manifest);
} else {
res.set_manifest(include_str!("window-app-manifest.xml"));
}
if let Some(sdk_dir) = &attributes.windows_attributes.sdk_dir {
if let Some(sdk_dir_str) = sdk_dir.to_str() {
res.set_toolkit_path(sdk_dir_str);
} else {
return Err(anyhow!(
"sdk_dir path is not valid; only UTF-8 characters are allowed"
));
}
}
if let Some(version) = &config.package.version {
if let Ok(v) = Version::parse(version) {
let version = v.major << 48 | v.minor << 32 | v.patch << 16;
res.set_version_info(VersionInfo::FILEVERSION, version);
res.set_version_info(VersionInfo::PRODUCTVERSION, version);
}
res.set("FileVersion", version);
res.set("ProductVersion", version);
}
if let Some(product_name) = &config.package.product_name {
res.set("ProductName", product_name);
res.set("FileDescription", product_name);
}
res.set_icon_with_id(&window_icon_path.display().to_string(), "32512");
res.compile().with_context(|| {
format!(
"failed to compile `{}` into a Windows Resource file during tauri-build",
window_icon_path.display()
)
})?;
} else {
return Err(anyhow!(format!(
"`{}` not found; required for generating a Windows Resource file during tauri-build",
window_icon_path.display()
)));
}
let target_env = std::env::var("CARGO_CFG_TARGET_ENV").unwrap();
match target_env.as_str() {
"gnu" => {
let target_arch = match std::env::var("CARGO_CFG_TARGET_ARCH").unwrap().as_str() {
"x86_64" => Some("x64"),
"x86" => Some("x86"),
"aarch64" => Some("arm64"),
arch => None,
};
if let Some(target_arch) = target_arch {
for entry in std::fs::read_dir(target_dir.join("build"))? {
let path = entry?.path();
let webview2_loader_path = path
.join("out")
.join(target_arch)
.join("WebView2Loader.dll");
if path.to_string_lossy().contains("webview2-com-sys") && webview2_loader_path.exists()
{
std::fs::copy(webview2_loader_path, target_dir.join("WebView2Loader.dll"))?;
break;
}
}
}
}
"msvc" => {
if std::env::var("STATIC_VCRUNTIME").map_or(false, |v| v == "true") {
static_vcruntime::build();
}
}
_ => (),
}
}
Ok(())
}
#[derive(Debug, Default, PartialEq, Eq)]
struct Diff {
remove: Vec<String>,
add: Vec<String>,
}
fn features_diff(current: &[String], expected: &[String]) -> Diff {
let mut remove = Vec::new();
let mut add = Vec::new();
for feature in current {
if !expected.contains(feature) {
remove.push(feature.clone());
}
}
for feature in expected {
if !current.contains(feature) {
add.push(feature.clone());
}
}
Diff { remove, add }
}
fn check_features(config: &Config, dependency: Dependency, is_tauri_build: bool) -> String {
use tauri_utils::config::{PatternKind, TauriConfig};
let features = match dependency {
Dependency::Simple(_) => Vec::new(),
Dependency::Detailed(dep) => dep.features,
Dependency::Inherited(dep) => dep.features,
};
let all_cli_managed_features = if is_tauri_build {
vec!["isolation"]
} else {
TauriConfig::all_features()
};
let expected = if is_tauri_build {
match config.tauri.pattern {
PatternKind::Isolation { .. } => vec!["isolation".to_string()],
_ => vec![],
}
} else {
config
.tauri
.features()
.into_iter()
.map(|f| f.to_string())
.collect::<Vec<String>>()
};
let diff = features_diff(
&features
.into_iter()
.filter(|f| all_cli_managed_features.contains(&f.as_str()))
.collect::<Vec<String>>(),
&expected,
);
let mut error_message = String::new();
if !diff.remove.is_empty() {
error_message.push_str("remove the `");
error_message.push_str(&diff.remove.join(", "));
error_message.push_str(if diff.remove.len() == 1 {
"` feature"
} else {
"` features"
});
if !diff.add.is_empty() {
error_message.push_str(" and ");
}
}
if !diff.add.is_empty() {
error_message.push_str("add the `");
error_message.push_str(&diff.add.join(", "));
error_message.push_str(if diff.add.len() == 1 {
"` feature"
} else {
"` features"
});
}
error_message
}
#[derive(serde::Deserialize)]
struct CargoMetadata {
workspace_root: PathBuf,
}
fn get_workspace_dir() -> Result<PathBuf> {
let output = std::process::Command::new("cargo")
.args(["metadata", "--no-deps", "--format-version", "1"])
.output()?;
if !output.status.success() {
return Err(anyhow::anyhow!(
"cargo metadata command exited with a non zero exit code: {}",
String::from_utf8(output.stderr)?
));
}
Ok(serde_json::from_slice::<CargoMetadata>(&output.stdout)?.workspace_root)
}
#[cfg(test)]
mod tests {
use super::Diff;
#[test]
fn array_diff() {
for (current, expected, result) in [
(vec![], vec![], Default::default()),
(
vec!["a".into()],
vec![],
Diff {
remove: vec!["a".into()],
add: vec![],
},
),
(vec!["a".into()], vec!["a".into()], Default::default()),
(
vec!["a".into(), "b".into()],
vec!["a".into()],
Diff {
remove: vec!["b".into()],
add: vec![],
},
),
(
vec!["a".into(), "b".into()],
vec!["a".into(), "c".into()],
Diff {
remove: vec!["b".into()],
add: vec!["c".into()],
},
),
] {
assert_eq!(super::features_diff(¤t, &expected), result);
}
}
} | .tauri
.bundle | random_line_split |
main.rs | use clap::{App, AppSettings, Arg, SubCommand};
use default_boxed::DefaultBoxed;
#[derive(DefaultBoxed)]
struct Outer<'a, 'b> {
inner: HeapApp<'a, 'b>,
}
struct HeapApp<'a, 'b> {
app: App<'a, 'b>,
}
impl<'a, 'b> Default for HeapApp<'a, 'b> {
fn default() -> Self {
let mut app = App::new("serviceusage1_beta1")
.setting(clap::AppSettings::ColoredHelp)
.author("Sebastian Thiel <[email protected]>")
.version("0.1.0-20210317")
.about("Enables services that service consumers want to use on Google Cloud Platform, lists the available or enabled services, or disables services that service consumers no longer use.")
.after_help("All documentation details can be found at <TODO figure out URL>")
.arg(Arg::with_name("scope")
.long("scope")
.help("Specify the authentication method should be executed in. Each scope requires the user to grant this application permission to use it. If unset, it defaults to the shortest scope url for a particular method.")
.multiple(true)
.takes_value(true))
.arg(Arg::with_name("folder")
.long("config-dir")
.help("A directory into which we will store our persistent data. Defaults to a user-writable directory that we will create during the first invocation." )
.multiple(false)
.takes_value(true))
.arg(Arg::with_name("debug")
.long("debug")
.help("Provide more output to aid with debugging")
.multiple(false)
.takes_value(false));
let mut operations0 = SubCommand::with_name("operations")
.setting(AppSettings::ColoredHelp)
.about("methods: get and list");
{
let mcmd = SubCommand::with_name("get").about("Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.");
operations0 = operations0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("list").about("Lists operations that match the specified filter in the request. If the server doesn\'t support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.");
operations0 = operations0.subcommand(mcmd);
}
let mut services0 = SubCommand::with_name("services")
.setting(AppSettings::ColoredHelp)
.about(
"methods: batch_enable, disable, enable, generate_service_identity, get and list",
);
{
let mcmd = SubCommand::with_name("batch_enable").about("Enable multiple services on a project. The operation is atomic: if enabling any service fails, then the entire batch fails, and no state changes occur. Operation");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("disable").about("Disable a service so that it can no longer be used with a project. This prevents unintended usage that may cause unexpected billing charges or security leaks. It is not valid to call the disable method on a service that is not currently enabled. Callers will receive a `FAILED_PRECONDITION` status if the target service is not currently enabled. Operation");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("enable")
.about("Enable a service so that it can be used with a project. Operation");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("generate_service_identity")
.about("Generate service identity for service.");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("get")
.about("Returns the service configuration and enabled state for a given service.");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("list").about("List all services available to the specified project, and the current state of those services with respect to the project. The list includes all public services, all services for which the calling user has the `servicemanagement.services.bind` permission, and all services that have already been enabled on the project. The list can be filtered to only include services in a specific state, for example to only include services enabled on the project.");
services0 = services0.subcommand(mcmd);
}
let mut consumer_quota_metrics1 = SubCommand::with_name("consumer_quota_metrics")
.setting(AppSettings::ColoredHelp)
.about("methods: get, import_admin_overrides, import_consumer_overrides and list");
{
let mcmd = SubCommand::with_name("get")
.about("Retrieves a summary of quota information for a specific quota metric");
consumer_quota_metrics1 = consumer_quota_metrics1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("import_admin_overrides").about("Create or update multiple admin overrides atomically, all on the same consumer, but on many different metrics or limits. The name field in the quota override message should not be set.");
consumer_quota_metrics1 = consumer_quota_metrics1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("import_consumer_overrides").about("Create or update multiple consumer overrides atomically, all on the same consumer, but on many different metrics or limits. The name field in the quota override message should not be set.");
consumer_quota_metrics1 = consumer_quota_metrics1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("list").about("Retrieves a summary of all quota information visible to the service consumer, organized by service metric. Each metric includes information about all of its defined limits. Each limit includes the limit configuration (quota unit, preciseness, default value), the current effective limit value, and all of the overrides applied to the limit.");
consumer_quota_metrics1 = consumer_quota_metrics1.subcommand(mcmd);
}
let mut limits2 = SubCommand::with_name("limits")
.setting(AppSettings::ColoredHelp)
.about("methods: get");
{
let mcmd = SubCommand::with_name("get")
.about("Retrieves a summary of quota information for a specific quota limit.");
limits2 = limits2.subcommand(mcmd);
}
let mut admin_overrides3 = SubCommand::with_name("admin_overrides")
.setting(AppSettings::ColoredHelp)
.about("methods: create, delete, list and patch");
{
let mcmd = SubCommand::with_name("create").about("Creates an admin override. An admin override is applied by an administrator of a parent folder or parent organization of the consumer receiving the override. An admin override is intended to limit the amount of quota the consumer can use out of the total quota pool allocated to all children of the folder or organization.");
admin_overrides3 = admin_overrides3.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("delete").about("Deletes an admin override.");
admin_overrides3 = admin_overrides3.subcommand(mcmd);
}
{
let mcmd =
SubCommand::with_name("list").about("Lists all admin overrides on this limit.");
admin_overrides3 = admin_overrides3.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("patch").about("Updates an admin override.");
admin_overrides3 = admin_overrides3.subcommand(mcmd);
}
let mut consumer_overrides3 = SubCommand::with_name("consumer_overrides")
.setting(AppSettings::ColoredHelp)
.about("methods: create, delete, list and patch");
{
let mcmd = SubCommand::with_name("create").about("Creates a consumer override. A consumer override is applied to the consumer on its own authority to limit its own quota usage. Consumer overrides cannot be used to grant more quota than would be allowed by admin overrides, producer overrides, or the default limit of the service.");
consumer_overrides3 = consumer_overrides3.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("delete").about("Deletes a consumer override.");
consumer_overrides3 = consumer_overrides3.subcommand(mcmd);
}
{
let mcmd =
SubCommand::with_name("list").about("Lists all consumer overrides on this limit.");
consumer_overrides3 = consumer_overrides3.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("patch").about("Updates a consumer override.");
consumer_overrides3 = consumer_overrides3.subcommand(mcmd);
}
limits2 = limits2.subcommand(consumer_overrides3);
limits2 = limits2.subcommand(admin_overrides3);
consumer_quota_metrics1 = consumer_quota_metrics1.subcommand(limits2);
services0 = services0.subcommand(consumer_quota_metrics1);
app = app.subcommand(services0);
app = app.subcommand(operations0);
Self { app }
}
}
use google_serviceusage1_beta1 as api;
fn main() | {
// TODO: set homedir afterwards, once the address is unmovable, or use Pin for the very first time
// to allow a self-referential structure :D!
let _home_dir = dirs::config_dir()
.expect("configuration directory can be obtained")
.join("google-service-cli");
let outer = Outer::default_boxed();
let app = outer.inner.app;
let _matches = app.get_matches();
} | identifier_body |
|
main.rs | use clap::{App, AppSettings, Arg, SubCommand};
use default_boxed::DefaultBoxed;
#[derive(DefaultBoxed)]
struct Outer<'a, 'b> {
inner: HeapApp<'a, 'b>,
}
struct HeapApp<'a, 'b> {
app: App<'a, 'b>,
}
impl<'a, 'b> Default for HeapApp<'a, 'b> {
fn default() -> Self {
let mut app = App::new("serviceusage1_beta1")
.setting(clap::AppSettings::ColoredHelp)
.author("Sebastian Thiel <[email protected]>")
.version("0.1.0-20210317")
.about("Enables services that service consumers want to use on Google Cloud Platform, lists the available or enabled services, or disables services that service consumers no longer use.")
.after_help("All documentation details can be found at <TODO figure out URL>")
.arg(Arg::with_name("scope")
.long("scope")
.help("Specify the authentication method should be executed in. Each scope requires the user to grant this application permission to use it. If unset, it defaults to the shortest scope url for a particular method.")
.multiple(true)
.takes_value(true))
.arg(Arg::with_name("folder")
.long("config-dir")
.help("A directory into which we will store our persistent data. Defaults to a user-writable directory that we will create during the first invocation." )
.multiple(false)
.takes_value(true))
.arg(Arg::with_name("debug")
.long("debug")
.help("Provide more output to aid with debugging")
.multiple(false)
.takes_value(false));
let mut operations0 = SubCommand::with_name("operations")
.setting(AppSettings::ColoredHelp)
.about("methods: get and list");
{
let mcmd = SubCommand::with_name("get").about("Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.");
operations0 = operations0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("list").about("Lists operations that match the specified filter in the request. If the server doesn\'t support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.");
operations0 = operations0.subcommand(mcmd);
}
let mut services0 = SubCommand::with_name("services")
.setting(AppSettings::ColoredHelp)
.about(
"methods: batch_enable, disable, enable, generate_service_identity, get and list",
);
{
let mcmd = SubCommand::with_name("batch_enable").about("Enable multiple services on a project. The operation is atomic: if enabling any service fails, then the entire batch fails, and no state changes occur. Operation");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("disable").about("Disable a service so that it can no longer be used with a project. This prevents unintended usage that may cause unexpected billing charges or security leaks. It is not valid to call the disable method on a service that is not currently enabled. Callers will receive a `FAILED_PRECONDITION` status if the target service is not currently enabled. Operation");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("enable")
.about("Enable a service so that it can be used with a project. Operation");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("generate_service_identity")
.about("Generate service identity for service.");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("get")
.about("Returns the service configuration and enabled state for a given service.");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("list").about("List all services available to the specified project, and the current state of those services with respect to the project. The list includes all public services, all services for which the calling user has the `servicemanagement.services.bind` permission, and all services that have already been enabled on the project. The list can be filtered to only include services in a specific state, for example to only include services enabled on the project.");
services0 = services0.subcommand(mcmd);
}
let mut consumer_quota_metrics1 = SubCommand::with_name("consumer_quota_metrics")
.setting(AppSettings::ColoredHelp)
.about("methods: get, import_admin_overrides, import_consumer_overrides and list");
{
let mcmd = SubCommand::with_name("get")
.about("Retrieves a summary of quota information for a specific quota metric");
consumer_quota_metrics1 = consumer_quota_metrics1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("import_admin_overrides").about("Create or update multiple admin overrides atomically, all on the same consumer, but on many different metrics or limits. The name field in the quota override message should not be set.");
consumer_quota_metrics1 = consumer_quota_metrics1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("import_consumer_overrides").about("Create or update multiple consumer overrides atomically, all on the same consumer, but on many different metrics or limits. The name field in the quota override message should not be set.");
consumer_quota_metrics1 = consumer_quota_metrics1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("list").about("Retrieves a summary of all quota information visible to the service consumer, organized by service metric. Each metric includes information about all of its defined limits. Each limit includes the limit configuration (quota unit, preciseness, default value), the current effective limit value, and all of the overrides applied to the limit.");
consumer_quota_metrics1 = consumer_quota_metrics1.subcommand(mcmd);
}
let mut limits2 = SubCommand::with_name("limits")
.setting(AppSettings::ColoredHelp)
.about("methods: get");
{
let mcmd = SubCommand::with_name("get")
.about("Retrieves a summary of quota information for a specific quota limit.");
limits2 = limits2.subcommand(mcmd);
}
let mut admin_overrides3 = SubCommand::with_name("admin_overrides")
.setting(AppSettings::ColoredHelp)
.about("methods: create, delete, list and patch");
{
let mcmd = SubCommand::with_name("create").about("Creates an admin override. An admin override is applied by an administrator of a parent folder or parent organization of the consumer receiving the override. An admin override is intended to limit the amount of quota the consumer can use out of the total quota pool allocated to all children of the folder or organization.");
admin_overrides3 = admin_overrides3.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("delete").about("Deletes an admin override.");
admin_overrides3 = admin_overrides3.subcommand(mcmd);
}
{
let mcmd =
SubCommand::with_name("list").about("Lists all admin overrides on this limit.");
admin_overrides3 = admin_overrides3.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("patch").about("Updates an admin override.");
admin_overrides3 = admin_overrides3.subcommand(mcmd);
} | {
let mcmd = SubCommand::with_name("create").about("Creates a consumer override. A consumer override is applied to the consumer on its own authority to limit its own quota usage. Consumer overrides cannot be used to grant more quota than would be allowed by admin overrides, producer overrides, or the default limit of the service.");
consumer_overrides3 = consumer_overrides3.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("delete").about("Deletes a consumer override.");
consumer_overrides3 = consumer_overrides3.subcommand(mcmd);
}
{
let mcmd =
SubCommand::with_name("list").about("Lists all consumer overrides on this limit.");
consumer_overrides3 = consumer_overrides3.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("patch").about("Updates a consumer override.");
consumer_overrides3 = consumer_overrides3.subcommand(mcmd);
}
limits2 = limits2.subcommand(consumer_overrides3);
limits2 = limits2.subcommand(admin_overrides3);
consumer_quota_metrics1 = consumer_quota_metrics1.subcommand(limits2);
services0 = services0.subcommand(consumer_quota_metrics1);
app = app.subcommand(services0);
app = app.subcommand(operations0);
Self { app }
}
}
use google_serviceusage1_beta1 as api;
fn main() {
// TODO: set homedir afterwards, once the address is unmovable, or use Pin for the very first time
// to allow a self-referential structure :D!
let _home_dir = dirs::config_dir()
.expect("configuration directory can be obtained")
.join("google-service-cli");
let outer = Outer::default_boxed();
let app = outer.inner.app;
let _matches = app.get_matches();
} | let mut consumer_overrides3 = SubCommand::with_name("consumer_overrides")
.setting(AppSettings::ColoredHelp)
.about("methods: create, delete, list and patch"); | random_line_split |
main.rs | use clap::{App, AppSettings, Arg, SubCommand};
use default_boxed::DefaultBoxed;
#[derive(DefaultBoxed)]
struct Outer<'a, 'b> {
inner: HeapApp<'a, 'b>,
}
struct | <'a, 'b> {
app: App<'a, 'b>,
}
impl<'a, 'b> Default for HeapApp<'a, 'b> {
fn default() -> Self {
let mut app = App::new("serviceusage1_beta1")
.setting(clap::AppSettings::ColoredHelp)
.author("Sebastian Thiel <[email protected]>")
.version("0.1.0-20210317")
.about("Enables services that service consumers want to use on Google Cloud Platform, lists the available or enabled services, or disables services that service consumers no longer use.")
.after_help("All documentation details can be found at <TODO figure out URL>")
.arg(Arg::with_name("scope")
.long("scope")
.help("Specify the authentication method should be executed in. Each scope requires the user to grant this application permission to use it. If unset, it defaults to the shortest scope url for a particular method.")
.multiple(true)
.takes_value(true))
.arg(Arg::with_name("folder")
.long("config-dir")
.help("A directory into which we will store our persistent data. Defaults to a user-writable directory that we will create during the first invocation." )
.multiple(false)
.takes_value(true))
.arg(Arg::with_name("debug")
.long("debug")
.help("Provide more output to aid with debugging")
.multiple(false)
.takes_value(false));
let mut operations0 = SubCommand::with_name("operations")
.setting(AppSettings::ColoredHelp)
.about("methods: get and list");
{
let mcmd = SubCommand::with_name("get").about("Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.");
operations0 = operations0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("list").about("Lists operations that match the specified filter in the request. If the server doesn\'t support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.");
operations0 = operations0.subcommand(mcmd);
}
let mut services0 = SubCommand::with_name("services")
.setting(AppSettings::ColoredHelp)
.about(
"methods: batch_enable, disable, enable, generate_service_identity, get and list",
);
{
let mcmd = SubCommand::with_name("batch_enable").about("Enable multiple services on a project. The operation is atomic: if enabling any service fails, then the entire batch fails, and no state changes occur. Operation");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("disable").about("Disable a service so that it can no longer be used with a project. This prevents unintended usage that may cause unexpected billing charges or security leaks. It is not valid to call the disable method on a service that is not currently enabled. Callers will receive a `FAILED_PRECONDITION` status if the target service is not currently enabled. Operation");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("enable")
.about("Enable a service so that it can be used with a project. Operation");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("generate_service_identity")
.about("Generate service identity for service.");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("get")
.about("Returns the service configuration and enabled state for a given service.");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("list").about("List all services available to the specified project, and the current state of those services with respect to the project. The list includes all public services, all services for which the calling user has the `servicemanagement.services.bind` permission, and all services that have already been enabled on the project. The list can be filtered to only include services in a specific state, for example to only include services enabled on the project.");
services0 = services0.subcommand(mcmd);
}
let mut consumer_quota_metrics1 = SubCommand::with_name("consumer_quota_metrics")
.setting(AppSettings::ColoredHelp)
.about("methods: get, import_admin_overrides, import_consumer_overrides and list");
{
let mcmd = SubCommand::with_name("get")
.about("Retrieves a summary of quota information for a specific quota metric");
consumer_quota_metrics1 = consumer_quota_metrics1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("import_admin_overrides").about("Create or update multiple admin overrides atomically, all on the same consumer, but on many different metrics or limits. The name field in the quota override message should not be set.");
consumer_quota_metrics1 = consumer_quota_metrics1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("import_consumer_overrides").about("Create or update multiple consumer overrides atomically, all on the same consumer, but on many different metrics or limits. The name field in the quota override message should not be set.");
consumer_quota_metrics1 = consumer_quota_metrics1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("list").about("Retrieves a summary of all quota information visible to the service consumer, organized by service metric. Each metric includes information about all of its defined limits. Each limit includes the limit configuration (quota unit, preciseness, default value), the current effective limit value, and all of the overrides applied to the limit.");
consumer_quota_metrics1 = consumer_quota_metrics1.subcommand(mcmd);
}
let mut limits2 = SubCommand::with_name("limits")
.setting(AppSettings::ColoredHelp)
.about("methods: get");
{
let mcmd = SubCommand::with_name("get")
.about("Retrieves a summary of quota information for a specific quota limit.");
limits2 = limits2.subcommand(mcmd);
}
let mut admin_overrides3 = SubCommand::with_name("admin_overrides")
.setting(AppSettings::ColoredHelp)
.about("methods: create, delete, list and patch");
{
let mcmd = SubCommand::with_name("create").about("Creates an admin override. An admin override is applied by an administrator of a parent folder or parent organization of the consumer receiving the override. An admin override is intended to limit the amount of quota the consumer can use out of the total quota pool allocated to all children of the folder or organization.");
admin_overrides3 = admin_overrides3.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("delete").about("Deletes an admin override.");
admin_overrides3 = admin_overrides3.subcommand(mcmd);
}
{
let mcmd =
SubCommand::with_name("list").about("Lists all admin overrides on this limit.");
admin_overrides3 = admin_overrides3.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("patch").about("Updates an admin override.");
admin_overrides3 = admin_overrides3.subcommand(mcmd);
}
let mut consumer_overrides3 = SubCommand::with_name("consumer_overrides")
.setting(AppSettings::ColoredHelp)
.about("methods: create, delete, list and patch");
{
let mcmd = SubCommand::with_name("create").about("Creates a consumer override. A consumer override is applied to the consumer on its own authority to limit its own quota usage. Consumer overrides cannot be used to grant more quota than would be allowed by admin overrides, producer overrides, or the default limit of the service.");
consumer_overrides3 = consumer_overrides3.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("delete").about("Deletes a consumer override.");
consumer_overrides3 = consumer_overrides3.subcommand(mcmd);
}
{
let mcmd =
SubCommand::with_name("list").about("Lists all consumer overrides on this limit.");
consumer_overrides3 = consumer_overrides3.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("patch").about("Updates a consumer override.");
consumer_overrides3 = consumer_overrides3.subcommand(mcmd);
}
limits2 = limits2.subcommand(consumer_overrides3);
limits2 = limits2.subcommand(admin_overrides3);
consumer_quota_metrics1 = consumer_quota_metrics1.subcommand(limits2);
services0 = services0.subcommand(consumer_quota_metrics1);
app = app.subcommand(services0);
app = app.subcommand(operations0);
Self { app }
}
}
use google_serviceusage1_beta1 as api;
fn main() {
// TODO: set homedir afterwards, once the address is unmovable, or use Pin for the very first time
// to allow a self-referential structure :D!
let _home_dir = dirs::config_dir()
.expect("configuration directory can be obtained")
.join("google-service-cli");
let outer = Outer::default_boxed();
let app = outer.inner.app;
let _matches = app.get_matches();
}
| HeapApp | identifier_name |
v3.go | /*
* Copyright IBM Corporation 2021
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package compose
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/docker/cli/cli/compose/loader"
"github.com/docker/cli/cli/compose/types"
libcomposeyaml "github.com/docker/libcompose/yaml"
"github.com/google/go-cmp/cmp"
"github.com/konveyor/move2kube/internal/common"
irtypes "github.com/konveyor/move2kube/types/ir"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cast"
"k8s.io/apimachinery/pkg/api/resource"
core "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/networking"
)
// V3Loader loads a v3 compose file
type V3Loader struct {
}
func removeNonExistentEnvFilesV3(path string, parsedComposeFile map[string]interface{}) map[string]interface{} {
// Remove unresolvable env files, so that the parser does not throw error
composeFileDir := filepath.Dir(path)
if val, ok := parsedComposeFile["services"]; ok {
if services, ok := val.(map[string]interface{}); ok {
for serviceName, val := range services {
if vals, ok := val.(map[string]interface{}); ok {
if envfilesvals, ok := vals[envFile]; ok {
// env_file can be a string or list of strings
// https://docs.docker.com/compose/compose-file/#env_file
if envfilesstr, ok := envfilesvals.(string); ok {
envFilePath := envfilesstr
if !filepath.IsAbs(envFilePath) {
envFilePath = filepath.Join(composeFileDir, envFilePath)
}
finfo, err := os.Stat(envFilePath)
if os.IsNotExist(err) || finfo.IsDir() {
logrus.Warnf("Unable to find env config file %s referred in service %s in file %s. Ignoring it.", envFilePath, serviceName, path)
delete(vals, envFile)
}
} else if envfilesvalsint, ok := envfilesvals.([]interface{}); ok {
envfiles := []interface{}{}
for _, envfilesval := range envfilesvalsint {
if envfilesstr, ok := envfilesval.(string); ok {
envFilePath := envfilesstr
if !filepath.IsAbs(envFilePath) {
envFilePath = filepath.Join(composeFileDir, envFilePath)
}
finfo, err := os.Stat(envFilePath)
if os.IsNotExist(err) || finfo.IsDir() {
logrus.Warnf("Unable to find env config file %s referred in service %s in file %s. Ignoring it.", envFilePath, serviceName, path)
continue
}
envfiles = append(envfiles, envfilesstr)
}
}
vals[envFile] = envfiles
}
}
}
}
}
}
return parsedComposeFile
}
// ParseV3 parses version 3 compose files
func ParseV3(path string) (*types.Config, error) {
fileData, err := ioutil.ReadFile(path)
if err != nil {
err := fmt.Errorf("unable to load Compose file at path %s Error: %q", path, err)
logrus.Debug(err)
return nil, err
}
// Parse the Compose File
parsedComposeFile, err := loader.ParseYAML(fileData)
if err != nil {
err := fmt.Errorf("unable to load Compose file at path %s Error: %q", path, err)
logrus.Debug(err)
return nil, err
}
parsedComposeFile = removeNonExistentEnvFilesV3(path, parsedComposeFile)
// Config details
configDetails := types.ConfigDetails{
WorkingDir: filepath.Dir(path),
ConfigFiles: []types.ConfigFile{{Filename: path, Config: parsedComposeFile}},
Environment: getEnvironmentVariables(),
}
config, err := loader.Load(configDetails)
if err != nil {
err := fmt.Errorf("unable to load Compose file at path %s Error: %q", path, err)
logrus.Debug(err)
return nil, err
}
return config, nil
}
// ConvertToIR loads an v3 compose file into IR
func (c *V3Loader) ConvertToIR(composefilepath string, serviceName string) (irtypes.IR, error) {
logrus.Debugf("About to load configuration from docker compose file at path %s", composefilepath)
config, err := ParseV3(composefilepath)
if err != nil {
logrus.Warnf("Error while loading docker compose config : %s", err)
return irtypes.IR{}, err
}
logrus.Debugf("About to start loading docker compose to intermediate rep")
return c.convertToIR(filepath.Dir(composefilepath), *config, serviceName)
}
func (c *V3Loader) convertToIR(filedir string, composeObject types.Config, serviceName string) (irtypes.IR, error) {
ir := irtypes.IR{
Services: map[string]irtypes.Service{},
}
//Secret volumes transformed to IR
ir.Storages = c.getSecretStorages(composeObject.Secrets)
//ConfigMap volumes transformed to IR
ir.Storages = append(ir.Storages, c.getConfigStorages(composeObject.Configs)...)
for _, composeServiceConfig := range composeObject.Services {
if composeServiceConfig.Name != serviceName {
continue
}
name := common.NormalizeForServiceName(composeServiceConfig.Name)
serviceConfig := irtypes.NewServiceWithName(name)
serviceContainer := core.Container{}
serviceContainer.Image = composeServiceConfig.Image
if serviceContainer.Image == "" {
serviceContainer.Image = name + ":latest"
}
serviceContainer.WorkingDir = composeServiceConfig.WorkingDir
serviceContainer.Command = composeServiceConfig.Entrypoint
serviceContainer.Args = composeServiceConfig.Command
serviceContainer.Stdin = composeServiceConfig.StdinOpen
serviceContainer.Name = strings.ToLower(composeServiceConfig.ContainerName)
if serviceContainer.Name == "" {
serviceContainer.Name = strings.ToLower(serviceConfig.Name)
}
serviceContainer.TTY = composeServiceConfig.Tty
serviceContainer.Ports = c.getPorts(composeServiceConfig.Ports, composeServiceConfig.Expose)
c.addPorts(composeServiceConfig.Ports, composeServiceConfig.Expose, &serviceConfig)
serviceConfig.Annotations = map[string]string(composeServiceConfig.Labels)
serviceConfig.Labels = common.MergeStringMaps(composeServiceConfig.Labels, composeServiceConfig.Deploy.Labels)
if composeServiceConfig.Hostname != "" {
serviceConfig.Hostname = composeServiceConfig.Hostname
}
if composeServiceConfig.DomainName != "" {
serviceConfig.Subdomain = composeServiceConfig.DomainName
}
if composeServiceConfig.Pid != "" {
if composeServiceConfig.Pid == "host" {
serviceConfig.SecurityContext.HostPID = true
} else {
logrus.Warnf("Ignoring PID key for service \"%v\". Invalid value \"%v\".", name, composeServiceConfig.Pid)
}
}
securityContext := &core.SecurityContext{}
if composeServiceConfig.Privileged {
securityContext.Privileged = &composeServiceConfig.Privileged
}
if composeServiceConfig.User != "" {
uid, err := cast.ToInt64E(composeServiceConfig.User)
if err != nil {
logrus.Warn("Ignoring user directive. User to be specified as a UID (numeric).")
} else {
securityContext.RunAsUser = &uid
}
}
capsAdd := []core.Capability{}
capsDrop := []core.Capability{}
for _, capAdd := range composeServiceConfig.CapAdd {
capsAdd = append(capsAdd, core.Capability(capAdd))
}
for _, capDrop := range composeServiceConfig.CapDrop {
capsDrop = append(capsDrop, core.Capability(capDrop))
}
//set capabilities if it is not empty
if len(capsAdd) > 0 || len(capsDrop) > 0 {
securityContext.Capabilities = &core.Capabilities{
Add: capsAdd,
Drop: capsDrop,
}
}
// update template only if securityContext is not empty
if *securityContext != (core.SecurityContext{}) {
serviceContainer.SecurityContext = securityContext
}
podSecurityContext := &core.PodSecurityContext{}
if !cmp.Equal(*podSecurityContext, core.PodSecurityContext{}) {
serviceConfig.SecurityContext = podSecurityContext
}
if composeServiceConfig.Deploy.Mode == "global" {
serviceConfig.Daemon = true
}
serviceConfig.Networks = c.getNetworks(composeServiceConfig, composeObject)
if (composeServiceConfig.Deploy.Resources != types.Resources{}) {
if composeServiceConfig.Deploy.Resources.Limits != nil {
resourceLimit := core.ResourceList{}
memLimit := libcomposeyaml.MemStringorInt(composeServiceConfig.Deploy.Resources.Limits.MemoryBytes)
if memLimit != 0 {
resourceLimit[core.ResourceMemory] = *resource.NewQuantity(int64(memLimit), "RandomStringForFormat")
}
if composeServiceConfig.Deploy.Resources.Limits.NanoCPUs != "" {
cpuLimit, err := cast.ToFloat64E(composeServiceConfig.Deploy.Resources.Limits.NanoCPUs)
if err != nil {
logrus.Warnf("Unable to convert cpu limits resources value : %s", err)
}
CPULimit := int64(cpuLimit * 1000)
if CPULimit != 0 {
resourceLimit[core.ResourceCPU] = *resource.NewMilliQuantity(CPULimit, resource.DecimalSI)
}
}
serviceContainer.Resources.Limits = resourceLimit
}
if composeServiceConfig.Deploy.Resources.Reservations != nil {
resourceRequests := core.ResourceList{}
MemReservation := libcomposeyaml.MemStringorInt(composeServiceConfig.Deploy.Resources.Reservations.MemoryBytes)
if MemReservation != 0 {
resourceRequests[core.ResourceMemory] = *resource.NewQuantity(int64(MemReservation), "RandomStringForFormat")
}
if composeServiceConfig.Deploy.Resources.Reservations.NanoCPUs != "" {
cpuReservation, err := cast.ToFloat64E(composeServiceConfig.Deploy.Resources.Reservations.NanoCPUs)
if err != nil {
logrus.Warnf("Unable to convert cpu limits reservation value : %s", err)
}
CPUReservation := int64(cpuReservation * 1000)
if CPUReservation != 0 {
resourceRequests[core.ResourceCPU] = *resource.NewMilliQuantity(CPUReservation, resource.DecimalSI)
}
}
serviceContainer.Resources.Requests = resourceRequests
}
}
// HealthCheck
if composeServiceConfig.HealthCheck != nil && !composeServiceConfig.HealthCheck.Disable {
probe, err := c.getHealthCheck(*composeServiceConfig.HealthCheck)
if err != nil {
logrus.Warnf("Unable to parse health check : %s", err)
} else {
serviceContainer.LivenessProbe = &probe
}
}
restart := composeServiceConfig.Restart
if composeServiceConfig.Deploy.RestartPolicy != nil {
restart = composeServiceConfig.Deploy.RestartPolicy.Condition
}
if restart == "unless-stopped" {
logrus.Warnf("Restart policy 'unless-stopped' in service %s is not supported, convert it to 'always'", name)
serviceConfig.RestartPolicy = core.RestartPolicyAlways
}
// replicas:
if composeServiceConfig.Deploy.Replicas != nil {
serviceConfig.Replicas = int(*composeServiceConfig.Deploy.Replicas)
}
serviceContainer.Env = c.getEnvs(composeServiceConfig)
vml, vl := makeVolumesFromTmpFS(name, composeServiceConfig.Tmpfs)
for _, v := range vl {
serviceConfig.AddVolume(v)
}
serviceContainer.VolumeMounts = append(serviceContainer.VolumeMounts, vml...)
for _, secret := range composeServiceConfig.Secrets {
target := filepath.Join(defaultSecretBasePath, secret.Source)
src := secret.Source
if secret.Target != "" {
tokens := strings.Split(secret.Source, "/")
var prefix string
if !strings.HasPrefix(secret.Target, "/") {
prefix = defaultSecretBasePath + "/"
}
if tokens[len(tokens)-1] == secret.Target {
target = prefix + secret.Source
} else {
target = prefix + strings.TrimSuffix(secret.Target, "/"+tokens[len(tokens)-1])
}
src = tokens[len(tokens)-1]
}
vSrc := core.VolumeSource{
Secret: &core.SecretVolumeSource{
SecretName: secret.Source,
Items: []core.KeyToPath{{
Key: secret.Source,
Path: src,
}},
},
}
if secret.Mode != nil {
mode := cast.ToInt32(*secret.Mode)
vSrc.Secret.DefaultMode = &mode
}
serviceConfig.AddVolume(core.Volume{
Name: secret.Source,
VolumeSource: vSrc,
})
serviceContainer.VolumeMounts = append(serviceContainer.VolumeMounts, core.VolumeMount{
Name: secret.Source,
MountPath: target,
})
}
for _, c := range composeServiceConfig.Configs {
target := c.Target
if target == "" {
target = "/" + c.Source
}
vSrc := core.ConfigMapVolumeSource{}
vSrc.Name = common.MakeFileNameCompliant(c.Source)
if o, ok := composeObject.Configs[c.Source]; ok {
if o.External.External {
logrus.Errorf("Config metadata %s has an external source", c.Source)
} else {
srcBaseName := filepath.Base(o.File)
vSrc.Items = []core.KeyToPath{{Key: srcBaseName, Path: filepath.Base(target)}}
if c.Mode != nil {
signedMode := int32(*c.Mode)
vSrc.DefaultMode = &signedMode
}
}
} else {
logrus.Errorf("Unable to find configmap object for %s", vSrc.Name)
}
serviceConfig.AddVolume(core.Volume{
Name: vSrc.Name,
VolumeSource: core.VolumeSource{ConfigMap: &vSrc},
})
serviceContainer.VolumeMounts = append(serviceContainer.VolumeMounts,
core.VolumeMount{
Name: vSrc.Name,
MountPath: target,
SubPath: filepath.Base(target),
})
}
for _, vol := range composeServiceConfig.Volumes {
if isPath(vol.Source) {
hPath := vol.Source
if !filepath.IsAbs(vol.Source) {
hPath, err := filepath.Abs(vol.Source)
if err != nil {
logrus.Debugf("Could not create an absolute path for [%s]", hPath)
}
}
// Generate a hash Id for the given source file path to be mounted.
hashID := getHash([]byte(hPath))
volumeName := fmt.Sprintf("%s%d", common.VolumePrefix, hashID)
serviceContainer.VolumeMounts = append(serviceContainer.VolumeMounts, core.VolumeMount{
Name: volumeName,
MountPath: vol.Target,
})
serviceConfig.AddVolume(core.Volume{
Name: volumeName,
VolumeSource: core.VolumeSource{
HostPath: &core.HostPathVolumeSource{Path: vol.Source},
},
})
} else {
serviceContainer.VolumeMounts = append(serviceContainer.VolumeMounts, core.VolumeMount{
Name: vol.Source,
MountPath: vol.Target,
})
serviceConfig.AddVolume(core.Volume{
Name: vol.Source,
VolumeSource: core.VolumeSource{
PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{
ClaimName: vol.Source,
},
},
})
storageObj := irtypes.Storage{StorageType: irtypes.PVCKind, Name: vol.Source, Content: nil}
ir.AddStorage(storageObj)
}
}
serviceConfig.Containers = []core.Container{serviceContainer}
ir.Services[name] = serviceConfig
}
return ir, nil
}
func (c *V3Loader) getSecretStorages(secrets map[string]types.SecretConfig) []irtypes.Storage {
storages := make([]irtypes.Storage, len(secrets))
for secretName, secretObj := range secrets {
storage := irtypes.Storage{
Name: secretName,
StorageType: irtypes.SecretKind,
}
if !secretObj.External.External {
content, err := ioutil.ReadFile(secretObj.File)
if err != nil {
logrus.Warnf("Could not read the secret file [%s]", secretObj.File)
} else {
storage.Content = map[string][]byte{secretName: content}
}
}
storages = append(storages, storage)
}
return storages
}
func (c *V3Loader) getConfigStorages(configs map[string]types.ConfigObjConfig) []irtypes.Storage {
Storages := make([]irtypes.Storage, len(configs))
for cfgName, cfgObj := range configs {
storage := irtypes.Storage{
Name: cfgName,
StorageType: irtypes.ConfigMapKind,
}
if !cfgObj.External.External {
fileInfo, err := os.Stat(cfgObj.File)
if err != nil {
logrus.Warnf("Could not identify the type of secret artifact [%s]. Encountered [%s]", cfgObj.File, err)
} else {
if !fileInfo.IsDir() {
content, err := ioutil.ReadFile(cfgObj.File)
if err != nil {
logrus.Warnf("Could not read the secret file [%s]. Encountered [%s]", cfgObj.File, err)
} else {
storage.Content = map[string][]byte{cfgName: content}
}
} else {
dataMap, err := c.getAllDirContentAsMap(cfgObj.File)
if err != nil {
logrus.Warnf("Could not read the secret directory [%s]. Encountered [%s]", cfgObj.File, err)
} else {
storage.Content = dataMap
}
}
}
}
Storages = append(Storages, storage)
}
return Storages
}
func (*V3Loader) getPorts(ports []types.ServicePortConfig, expose []string) []core.ContainerPort {
containerPorts := []core.ContainerPort{}
exist := map[string]bool{}
for _, port := range ports {
proto := core.ProtocolTCP
if strings.EqualFold(string(core.ProtocolUDP), port.Protocol) {
proto = core.ProtocolUDP
}
// Add the port to the k8s pod.
containerPorts = append(containerPorts, core.ContainerPort{
ContainerPort: int32(port.Target),
Protocol: proto,
})
exist[cast.ToString(port.Target)] = true
}
for _, port := range expose {
portValue := port
protocol := core.ProtocolTCP
if strings.Contains(portValue, "/") {
splits := strings.Split(port, "/")
portValue = splits[0]
protocol = core.Protocol(strings.ToUpper(splits[1]))
}
if exist[portValue] {
continue
}
// Add the port to the k8s pod.
containerPorts = append(containerPorts, core.ContainerPort{
ContainerPort: cast.ToInt32(portValue),
Protocol: protocol,
})
}
return containerPorts
}
func (*V3Loader) addPorts(ports []types.ServicePortConfig, expose []string, service *irtypes.Service) {
exist := map[string]bool{}
for _, port := range ports {
// Forward the port on the k8s service to the k8s pod.
podPort := networking.ServiceBackendPort{
Number: int32(port.Target),
}
servicePort := networking.ServiceBackendPort{
Number: int32(port.Published),
}
service.AddPortForwarding(servicePort, podPort, "")
exist[cast.ToString(port.Target)] = true
}
for _, port := range expose {
portValue := port
if strings.Contains(portValue, "/") {
splits := strings.Split(port, "/")
portValue = splits[0]
}
if exist[portValue] {
continue
}
// Forward the port on the k8s service to the k8s pod.
portNumber := cast.ToInt32(portValue)
podPort := networking.ServiceBackendPort{
Number: portNumber,
}
servicePort := networking.ServiceBackendPort{
Number: portNumber,
}
service.AddPortForwarding(servicePort, podPort, "")
}
}
func (c *V3Loader) getNetworks(composeServiceConfig types.ServiceConfig, composeObject types.Config) (networks []string) {
networks = []string{}
for key := range composeServiceConfig.Networks {
netName := composeObject.Networks[key].Name
if netName == "" {
netName = key
}
networks = append(networks, netName)
}
return networks
}
func (c *V3Loader) getHealthCheck(composeHealthCheck types.HealthCheckConfig) (core.Probe, error) |
func (c *V3Loader) getEnvs(composeServiceConfig types.ServiceConfig) (envs []core.EnvVar) {
for name, value := range composeServiceConfig.Environment {
var env core.EnvVar
if value != nil {
env = core.EnvVar{Name: name, Value: *value}
} else {
env = core.EnvVar{Name: name, Value: "unknown"}
}
envs = append(envs, env)
}
return envs
}
func (c *V3Loader) getAllDirContentAsMap(directoryPath string) (map[string][]byte, error) {
fileList, err := ioutil.ReadDir(directoryPath)
if err != nil {
return nil, err
}
dataMap := map[string][]byte{}
count := 0
for _, file := range fileList {
if file.IsDir() {
continue
}
fileName := file.Name()
logrus.Debugf("Reading file into the data map: [%s]", fileName)
data, err := ioutil.ReadFile(filepath.Join(directoryPath, fileName))
if err != nil {
logrus.Debugf("Unable to read file data : %s", fileName)
continue
}
dataMap[fileName] = data
count = count + 1
}
logrus.Debugf("Read %d files into the data map", count)
return dataMap, nil
}
| {
probe := core.Probe{}
if len(composeHealthCheck.Test) > 1 {
probe.Handler = core.Handler{
Exec: &core.ExecAction{
// docker/cli adds "CMD-SHELL" to the struct, hence we remove the first element of composeHealthCheck.Test
Command: composeHealthCheck.Test[1:],
},
}
} else {
logrus.Warnf("Could not find command to execute in probe : %s", composeHealthCheck.Test)
}
if composeHealthCheck.Timeout != nil {
parse, err := time.ParseDuration(composeHealthCheck.Timeout.String())
if err != nil {
return probe, errors.Wrap(err, "unable to parse health check timeout variable")
}
probe.TimeoutSeconds = int32(parse.Seconds())
}
if composeHealthCheck.Interval != nil {
parse, err := time.ParseDuration(composeHealthCheck.Interval.String())
if err != nil {
return probe, errors.Wrap(err, "unable to parse health check interval variable")
}
probe.PeriodSeconds = int32(parse.Seconds())
}
if composeHealthCheck.Retries != nil {
probe.FailureThreshold = int32(*composeHealthCheck.Retries)
}
if composeHealthCheck.StartPeriod != nil {
parse, err := time.ParseDuration(composeHealthCheck.StartPeriod.String())
if err != nil {
return probe, errors.Wrap(err, "unable to parse health check startPeriod variable")
}
probe.InitialDelaySeconds = int32(parse.Seconds())
}
return probe, nil
} | identifier_body |
v3.go | /*
* Copyright IBM Corporation 2021
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package compose
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/docker/cli/cli/compose/loader"
"github.com/docker/cli/cli/compose/types"
libcomposeyaml "github.com/docker/libcompose/yaml"
"github.com/google/go-cmp/cmp"
"github.com/konveyor/move2kube/internal/common"
irtypes "github.com/konveyor/move2kube/types/ir"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cast"
"k8s.io/apimachinery/pkg/api/resource"
core "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/networking"
)
// V3Loader loads a v3 compose file
type V3Loader struct {
}
func removeNonExistentEnvFilesV3(path string, parsedComposeFile map[string]interface{}) map[string]interface{} {
// Remove unresolvable env files, so that the parser does not throw error
composeFileDir := filepath.Dir(path)
if val, ok := parsedComposeFile["services"]; ok {
if services, ok := val.(map[string]interface{}); ok {
for serviceName, val := range services {
if vals, ok := val.(map[string]interface{}); ok {
if envfilesvals, ok := vals[envFile]; ok {
// env_file can be a string or list of strings
// https://docs.docker.com/compose/compose-file/#env_file
if envfilesstr, ok := envfilesvals.(string); ok {
envFilePath := envfilesstr
if !filepath.IsAbs(envFilePath) {
envFilePath = filepath.Join(composeFileDir, envFilePath)
}
finfo, err := os.Stat(envFilePath)
if os.IsNotExist(err) || finfo.IsDir() {
logrus.Warnf("Unable to find env config file %s referred in service %s in file %s. Ignoring it.", envFilePath, serviceName, path)
delete(vals, envFile)
}
} else if envfilesvalsint, ok := envfilesvals.([]interface{}); ok {
envfiles := []interface{}{}
for _, envfilesval := range envfilesvalsint {
if envfilesstr, ok := envfilesval.(string); ok {
envFilePath := envfilesstr
if !filepath.IsAbs(envFilePath) {
envFilePath = filepath.Join(composeFileDir, envFilePath)
}
finfo, err := os.Stat(envFilePath)
if os.IsNotExist(err) || finfo.IsDir() {
logrus.Warnf("Unable to find env config file %s referred in service %s in file %s. Ignoring it.", envFilePath, serviceName, path)
continue
}
envfiles = append(envfiles, envfilesstr)
}
}
vals[envFile] = envfiles
}
}
}
}
}
}
return parsedComposeFile
}
// ParseV3 parses version 3 compose files
func ParseV3(path string) (*types.Config, error) {
fileData, err := ioutil.ReadFile(path)
if err != nil {
err := fmt.Errorf("unable to load Compose file at path %s Error: %q", path, err)
logrus.Debug(err)
return nil, err
}
// Parse the Compose File
parsedComposeFile, err := loader.ParseYAML(fileData)
if err != nil {
err := fmt.Errorf("unable to load Compose file at path %s Error: %q", path, err)
logrus.Debug(err)
return nil, err
}
parsedComposeFile = removeNonExistentEnvFilesV3(path, parsedComposeFile)
// Config details
configDetails := types.ConfigDetails{
WorkingDir: filepath.Dir(path),
ConfigFiles: []types.ConfigFile{{Filename: path, Config: parsedComposeFile}},
Environment: getEnvironmentVariables(),
}
config, err := loader.Load(configDetails)
if err != nil {
err := fmt.Errorf("unable to load Compose file at path %s Error: %q", path, err)
logrus.Debug(err)
return nil, err
}
return config, nil
}
// ConvertToIR loads an v3 compose file into IR
func (c *V3Loader) ConvertToIR(composefilepath string, serviceName string) (irtypes.IR, error) {
logrus.Debugf("About to load configuration from docker compose file at path %s", composefilepath)
config, err := ParseV3(composefilepath)
if err != nil {
logrus.Warnf("Error while loading docker compose config : %s", err)
return irtypes.IR{}, err
}
logrus.Debugf("About to start loading docker compose to intermediate rep")
return c.convertToIR(filepath.Dir(composefilepath), *config, serviceName)
}
func (c *V3Loader) convertToIR(filedir string, composeObject types.Config, serviceName string) (irtypes.IR, error) {
ir := irtypes.IR{
Services: map[string]irtypes.Service{},
}
//Secret volumes transformed to IR
ir.Storages = c.getSecretStorages(composeObject.Secrets)
//ConfigMap volumes transformed to IR
ir.Storages = append(ir.Storages, c.getConfigStorages(composeObject.Configs)...)
for _, composeServiceConfig := range composeObject.Services {
if composeServiceConfig.Name != serviceName {
continue
}
name := common.NormalizeForServiceName(composeServiceConfig.Name)
serviceConfig := irtypes.NewServiceWithName(name)
serviceContainer := core.Container{}
serviceContainer.Image = composeServiceConfig.Image
if serviceContainer.Image == "" {
serviceContainer.Image = name + ":latest"
}
serviceContainer.WorkingDir = composeServiceConfig.WorkingDir
serviceContainer.Command = composeServiceConfig.Entrypoint
serviceContainer.Args = composeServiceConfig.Command
serviceContainer.Stdin = composeServiceConfig.StdinOpen
serviceContainer.Name = strings.ToLower(composeServiceConfig.ContainerName)
if serviceContainer.Name == "" {
serviceContainer.Name = strings.ToLower(serviceConfig.Name)
}
serviceContainer.TTY = composeServiceConfig.Tty
serviceContainer.Ports = c.getPorts(composeServiceConfig.Ports, composeServiceConfig.Expose)
c.addPorts(composeServiceConfig.Ports, composeServiceConfig.Expose, &serviceConfig)
serviceConfig.Annotations = map[string]string(composeServiceConfig.Labels)
serviceConfig.Labels = common.MergeStringMaps(composeServiceConfig.Labels, composeServiceConfig.Deploy.Labels)
if composeServiceConfig.Hostname != "" {
serviceConfig.Hostname = composeServiceConfig.Hostname
}
if composeServiceConfig.DomainName != "" {
serviceConfig.Subdomain = composeServiceConfig.DomainName
}
if composeServiceConfig.Pid != "" {
if composeServiceConfig.Pid == "host" {
serviceConfig.SecurityContext.HostPID = true
} else {
logrus.Warnf("Ignoring PID key for service \"%v\". Invalid value \"%v\".", name, composeServiceConfig.Pid)
}
}
securityContext := &core.SecurityContext{}
if composeServiceConfig.Privileged {
securityContext.Privileged = &composeServiceConfig.Privileged
}
if composeServiceConfig.User != "" {
uid, err := cast.ToInt64E(composeServiceConfig.User)
if err != nil {
logrus.Warn("Ignoring user directive. User to be specified as a UID (numeric).")
} else {
securityContext.RunAsUser = &uid
}
}
capsAdd := []core.Capability{}
capsDrop := []core.Capability{}
for _, capAdd := range composeServiceConfig.CapAdd {
capsAdd = append(capsAdd, core.Capability(capAdd))
}
for _, capDrop := range composeServiceConfig.CapDrop {
capsDrop = append(capsDrop, core.Capability(capDrop))
}
//set capabilities if it is not empty
if len(capsAdd) > 0 || len(capsDrop) > 0 {
securityContext.Capabilities = &core.Capabilities{
Add: capsAdd,
Drop: capsDrop,
}
}
// update template only if securityContext is not empty
if *securityContext != (core.SecurityContext{}) {
serviceContainer.SecurityContext = securityContext
}
podSecurityContext := &core.PodSecurityContext{}
if !cmp.Equal(*podSecurityContext, core.PodSecurityContext{}) {
serviceConfig.SecurityContext = podSecurityContext
}
if composeServiceConfig.Deploy.Mode == "global" {
serviceConfig.Daemon = true
}
serviceConfig.Networks = c.getNetworks(composeServiceConfig, composeObject)
if (composeServiceConfig.Deploy.Resources != types.Resources{}) {
if composeServiceConfig.Deploy.Resources.Limits != nil {
resourceLimit := core.ResourceList{}
memLimit := libcomposeyaml.MemStringorInt(composeServiceConfig.Deploy.Resources.Limits.MemoryBytes)
if memLimit != 0 {
resourceLimit[core.ResourceMemory] = *resource.NewQuantity(int64(memLimit), "RandomStringForFormat")
}
if composeServiceConfig.Deploy.Resources.Limits.NanoCPUs != "" {
cpuLimit, err := cast.ToFloat64E(composeServiceConfig.Deploy.Resources.Limits.NanoCPUs)
if err != nil {
logrus.Warnf("Unable to convert cpu limits resources value : %s", err)
}
CPULimit := int64(cpuLimit * 1000)
if CPULimit != 0 {
resourceLimit[core.ResourceCPU] = *resource.NewMilliQuantity(CPULimit, resource.DecimalSI)
}
}
serviceContainer.Resources.Limits = resourceLimit
}
if composeServiceConfig.Deploy.Resources.Reservations != nil {
resourceRequests := core.ResourceList{}
MemReservation := libcomposeyaml.MemStringorInt(composeServiceConfig.Deploy.Resources.Reservations.MemoryBytes)
if MemReservation != 0 {
resourceRequests[core.ResourceMemory] = *resource.NewQuantity(int64(MemReservation), "RandomStringForFormat")
}
if composeServiceConfig.Deploy.Resources.Reservations.NanoCPUs != "" {
cpuReservation, err := cast.ToFloat64E(composeServiceConfig.Deploy.Resources.Reservations.NanoCPUs)
if err != nil {
logrus.Warnf("Unable to convert cpu limits reservation value : %s", err)
}
CPUReservation := int64(cpuReservation * 1000)
if CPUReservation != 0 {
resourceRequests[core.ResourceCPU] = *resource.NewMilliQuantity(CPUReservation, resource.DecimalSI)
}
}
serviceContainer.Resources.Requests = resourceRequests
}
}
// HealthCheck
if composeServiceConfig.HealthCheck != nil && !composeServiceConfig.HealthCheck.Disable {
probe, err := c.getHealthCheck(*composeServiceConfig.HealthCheck)
if err != nil {
logrus.Warnf("Unable to parse health check : %s", err)
} else {
serviceContainer.LivenessProbe = &probe
}
}
restart := composeServiceConfig.Restart
if composeServiceConfig.Deploy.RestartPolicy != nil {
restart = composeServiceConfig.Deploy.RestartPolicy.Condition
}
if restart == "unless-stopped" {
logrus.Warnf("Restart policy 'unless-stopped' in service %s is not supported, convert it to 'always'", name)
serviceConfig.RestartPolicy = core.RestartPolicyAlways
}
// replicas:
if composeServiceConfig.Deploy.Replicas != nil {
serviceConfig.Replicas = int(*composeServiceConfig.Deploy.Replicas)
}
serviceContainer.Env = c.getEnvs(composeServiceConfig)
vml, vl := makeVolumesFromTmpFS(name, composeServiceConfig.Tmpfs)
for _, v := range vl {
serviceConfig.AddVolume(v)
}
serviceContainer.VolumeMounts = append(serviceContainer.VolumeMounts, vml...)
for _, secret := range composeServiceConfig.Secrets {
target := filepath.Join(defaultSecretBasePath, secret.Source)
src := secret.Source
if secret.Target != "" {
tokens := strings.Split(secret.Source, "/")
var prefix string
if !strings.HasPrefix(secret.Target, "/") {
prefix = defaultSecretBasePath + "/"
}
if tokens[len(tokens)-1] == secret.Target {
target = prefix + secret.Source
} else {
target = prefix + strings.TrimSuffix(secret.Target, "/"+tokens[len(tokens)-1])
}
src = tokens[len(tokens)-1]
}
vSrc := core.VolumeSource{
Secret: &core.SecretVolumeSource{ | },
}
if secret.Mode != nil {
mode := cast.ToInt32(*secret.Mode)
vSrc.Secret.DefaultMode = &mode
}
serviceConfig.AddVolume(core.Volume{
Name: secret.Source,
VolumeSource: vSrc,
})
serviceContainer.VolumeMounts = append(serviceContainer.VolumeMounts, core.VolumeMount{
Name: secret.Source,
MountPath: target,
})
}
for _, c := range composeServiceConfig.Configs {
target := c.Target
if target == "" {
target = "/" + c.Source
}
vSrc := core.ConfigMapVolumeSource{}
vSrc.Name = common.MakeFileNameCompliant(c.Source)
if o, ok := composeObject.Configs[c.Source]; ok {
if o.External.External {
logrus.Errorf("Config metadata %s has an external source", c.Source)
} else {
srcBaseName := filepath.Base(o.File)
vSrc.Items = []core.KeyToPath{{Key: srcBaseName, Path: filepath.Base(target)}}
if c.Mode != nil {
signedMode := int32(*c.Mode)
vSrc.DefaultMode = &signedMode
}
}
} else {
logrus.Errorf("Unable to find configmap object for %s", vSrc.Name)
}
serviceConfig.AddVolume(core.Volume{
Name: vSrc.Name,
VolumeSource: core.VolumeSource{ConfigMap: &vSrc},
})
serviceContainer.VolumeMounts = append(serviceContainer.VolumeMounts,
core.VolumeMount{
Name: vSrc.Name,
MountPath: target,
SubPath: filepath.Base(target),
})
}
for _, vol := range composeServiceConfig.Volumes {
if isPath(vol.Source) {
hPath := vol.Source
if !filepath.IsAbs(vol.Source) {
hPath, err := filepath.Abs(vol.Source)
if err != nil {
logrus.Debugf("Could not create an absolute path for [%s]", hPath)
}
}
// Generate a hash Id for the given source file path to be mounted.
hashID := getHash([]byte(hPath))
volumeName := fmt.Sprintf("%s%d", common.VolumePrefix, hashID)
serviceContainer.VolumeMounts = append(serviceContainer.VolumeMounts, core.VolumeMount{
Name: volumeName,
MountPath: vol.Target,
})
serviceConfig.AddVolume(core.Volume{
Name: volumeName,
VolumeSource: core.VolumeSource{
HostPath: &core.HostPathVolumeSource{Path: vol.Source},
},
})
} else {
serviceContainer.VolumeMounts = append(serviceContainer.VolumeMounts, core.VolumeMount{
Name: vol.Source,
MountPath: vol.Target,
})
serviceConfig.AddVolume(core.Volume{
Name: vol.Source,
VolumeSource: core.VolumeSource{
PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{
ClaimName: vol.Source,
},
},
})
storageObj := irtypes.Storage{StorageType: irtypes.PVCKind, Name: vol.Source, Content: nil}
ir.AddStorage(storageObj)
}
}
serviceConfig.Containers = []core.Container{serviceContainer}
ir.Services[name] = serviceConfig
}
return ir, nil
}
func (c *V3Loader) getSecretStorages(secrets map[string]types.SecretConfig) []irtypes.Storage {
storages := make([]irtypes.Storage, len(secrets))
for secretName, secretObj := range secrets {
storage := irtypes.Storage{
Name: secretName,
StorageType: irtypes.SecretKind,
}
if !secretObj.External.External {
content, err := ioutil.ReadFile(secretObj.File)
if err != nil {
logrus.Warnf("Could not read the secret file [%s]", secretObj.File)
} else {
storage.Content = map[string][]byte{secretName: content}
}
}
storages = append(storages, storage)
}
return storages
}
func (c *V3Loader) getConfigStorages(configs map[string]types.ConfigObjConfig) []irtypes.Storage {
Storages := make([]irtypes.Storage, len(configs))
for cfgName, cfgObj := range configs {
storage := irtypes.Storage{
Name: cfgName,
StorageType: irtypes.ConfigMapKind,
}
if !cfgObj.External.External {
fileInfo, err := os.Stat(cfgObj.File)
if err != nil {
logrus.Warnf("Could not identify the type of secret artifact [%s]. Encountered [%s]", cfgObj.File, err)
} else {
if !fileInfo.IsDir() {
content, err := ioutil.ReadFile(cfgObj.File)
if err != nil {
logrus.Warnf("Could not read the secret file [%s]. Encountered [%s]", cfgObj.File, err)
} else {
storage.Content = map[string][]byte{cfgName: content}
}
} else {
dataMap, err := c.getAllDirContentAsMap(cfgObj.File)
if err != nil {
logrus.Warnf("Could not read the secret directory [%s]. Encountered [%s]", cfgObj.File, err)
} else {
storage.Content = dataMap
}
}
}
}
Storages = append(Storages, storage)
}
return Storages
}
func (*V3Loader) getPorts(ports []types.ServicePortConfig, expose []string) []core.ContainerPort {
containerPorts := []core.ContainerPort{}
exist := map[string]bool{}
for _, port := range ports {
proto := core.ProtocolTCP
if strings.EqualFold(string(core.ProtocolUDP), port.Protocol) {
proto = core.ProtocolUDP
}
// Add the port to the k8s pod.
containerPorts = append(containerPorts, core.ContainerPort{
ContainerPort: int32(port.Target),
Protocol: proto,
})
exist[cast.ToString(port.Target)] = true
}
for _, port := range expose {
portValue := port
protocol := core.ProtocolTCP
if strings.Contains(portValue, "/") {
splits := strings.Split(port, "/")
portValue = splits[0]
protocol = core.Protocol(strings.ToUpper(splits[1]))
}
if exist[portValue] {
continue
}
// Add the port to the k8s pod.
containerPorts = append(containerPorts, core.ContainerPort{
ContainerPort: cast.ToInt32(portValue),
Protocol: protocol,
})
}
return containerPorts
}
func (*V3Loader) addPorts(ports []types.ServicePortConfig, expose []string, service *irtypes.Service) {
exist := map[string]bool{}
for _, port := range ports {
// Forward the port on the k8s service to the k8s pod.
podPort := networking.ServiceBackendPort{
Number: int32(port.Target),
}
servicePort := networking.ServiceBackendPort{
Number: int32(port.Published),
}
service.AddPortForwarding(servicePort, podPort, "")
exist[cast.ToString(port.Target)] = true
}
for _, port := range expose {
portValue := port
if strings.Contains(portValue, "/") {
splits := strings.Split(port, "/")
portValue = splits[0]
}
if exist[portValue] {
continue
}
// Forward the port on the k8s service to the k8s pod.
portNumber := cast.ToInt32(portValue)
podPort := networking.ServiceBackendPort{
Number: portNumber,
}
servicePort := networking.ServiceBackendPort{
Number: portNumber,
}
service.AddPortForwarding(servicePort, podPort, "")
}
}
func (c *V3Loader) getNetworks(composeServiceConfig types.ServiceConfig, composeObject types.Config) (networks []string) {
networks = []string{}
for key := range composeServiceConfig.Networks {
netName := composeObject.Networks[key].Name
if netName == "" {
netName = key
}
networks = append(networks, netName)
}
return networks
}
func (c *V3Loader) getHealthCheck(composeHealthCheck types.HealthCheckConfig) (core.Probe, error) {
probe := core.Probe{}
if len(composeHealthCheck.Test) > 1 {
probe.Handler = core.Handler{
Exec: &core.ExecAction{
// docker/cli adds "CMD-SHELL" to the struct, hence we remove the first element of composeHealthCheck.Test
Command: composeHealthCheck.Test[1:],
},
}
} else {
logrus.Warnf("Could not find command to execute in probe : %s", composeHealthCheck.Test)
}
if composeHealthCheck.Timeout != nil {
parse, err := time.ParseDuration(composeHealthCheck.Timeout.String())
if err != nil {
return probe, errors.Wrap(err, "unable to parse health check timeout variable")
}
probe.TimeoutSeconds = int32(parse.Seconds())
}
if composeHealthCheck.Interval != nil {
parse, err := time.ParseDuration(composeHealthCheck.Interval.String())
if err != nil {
return probe, errors.Wrap(err, "unable to parse health check interval variable")
}
probe.PeriodSeconds = int32(parse.Seconds())
}
if composeHealthCheck.Retries != nil {
probe.FailureThreshold = int32(*composeHealthCheck.Retries)
}
if composeHealthCheck.StartPeriod != nil {
parse, err := time.ParseDuration(composeHealthCheck.StartPeriod.String())
if err != nil {
return probe, errors.Wrap(err, "unable to parse health check startPeriod variable")
}
probe.InitialDelaySeconds = int32(parse.Seconds())
}
return probe, nil
}
func (c *V3Loader) getEnvs(composeServiceConfig types.ServiceConfig) (envs []core.EnvVar) {
for name, value := range composeServiceConfig.Environment {
var env core.EnvVar
if value != nil {
env = core.EnvVar{Name: name, Value: *value}
} else {
env = core.EnvVar{Name: name, Value: "unknown"}
}
envs = append(envs, env)
}
return envs
}
func (c *V3Loader) getAllDirContentAsMap(directoryPath string) (map[string][]byte, error) {
fileList, err := ioutil.ReadDir(directoryPath)
if err != nil {
return nil, err
}
dataMap := map[string][]byte{}
count := 0
for _, file := range fileList {
if file.IsDir() {
continue
}
fileName := file.Name()
logrus.Debugf("Reading file into the data map: [%s]", fileName)
data, err := ioutil.ReadFile(filepath.Join(directoryPath, fileName))
if err != nil {
logrus.Debugf("Unable to read file data : %s", fileName)
continue
}
dataMap[fileName] = data
count = count + 1
}
logrus.Debugf("Read %d files into the data map", count)
return dataMap, nil
} | SecretName: secret.Source,
Items: []core.KeyToPath{{
Key: secret.Source,
Path: src,
}}, | random_line_split |
v3.go | /*
* Copyright IBM Corporation 2021
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package compose
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/docker/cli/cli/compose/loader"
"github.com/docker/cli/cli/compose/types"
libcomposeyaml "github.com/docker/libcompose/yaml"
"github.com/google/go-cmp/cmp"
"github.com/konveyor/move2kube/internal/common"
irtypes "github.com/konveyor/move2kube/types/ir"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cast"
"k8s.io/apimachinery/pkg/api/resource"
core "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/networking"
)
// V3Loader loads a v3 compose file
type V3Loader struct {
}
func removeNonExistentEnvFilesV3(path string, parsedComposeFile map[string]interface{}) map[string]interface{} {
// Remove unresolvable env files, so that the parser does not throw error
composeFileDir := filepath.Dir(path)
if val, ok := parsedComposeFile["services"]; ok {
if services, ok := val.(map[string]interface{}); ok {
for serviceName, val := range services {
if vals, ok := val.(map[string]interface{}); ok {
if envfilesvals, ok := vals[envFile]; ok {
// env_file can be a string or list of strings
// https://docs.docker.com/compose/compose-file/#env_file
if envfilesstr, ok := envfilesvals.(string); ok {
envFilePath := envfilesstr
if !filepath.IsAbs(envFilePath) {
envFilePath = filepath.Join(composeFileDir, envFilePath)
}
finfo, err := os.Stat(envFilePath)
if os.IsNotExist(err) || finfo.IsDir() {
logrus.Warnf("Unable to find env config file %s referred in service %s in file %s. Ignoring it.", envFilePath, serviceName, path)
delete(vals, envFile)
}
} else if envfilesvalsint, ok := envfilesvals.([]interface{}); ok {
envfiles := []interface{}{}
for _, envfilesval := range envfilesvalsint {
if envfilesstr, ok := envfilesval.(string); ok {
envFilePath := envfilesstr
if !filepath.IsAbs(envFilePath) {
envFilePath = filepath.Join(composeFileDir, envFilePath)
}
finfo, err := os.Stat(envFilePath)
if os.IsNotExist(err) || finfo.IsDir() {
logrus.Warnf("Unable to find env config file %s referred in service %s in file %s. Ignoring it.", envFilePath, serviceName, path)
continue
}
envfiles = append(envfiles, envfilesstr)
}
}
vals[envFile] = envfiles
}
}
}
}
}
}
return parsedComposeFile
}
// ParseV3 parses version 3 compose files
func ParseV3(path string) (*types.Config, error) {
fileData, err := ioutil.ReadFile(path)
if err != nil {
err := fmt.Errorf("unable to load Compose file at path %s Error: %q", path, err)
logrus.Debug(err)
return nil, err
}
// Parse the Compose File
parsedComposeFile, err := loader.ParseYAML(fileData)
if err != nil {
err := fmt.Errorf("unable to load Compose file at path %s Error: %q", path, err)
logrus.Debug(err)
return nil, err
}
parsedComposeFile = removeNonExistentEnvFilesV3(path, parsedComposeFile)
// Config details
configDetails := types.ConfigDetails{
WorkingDir: filepath.Dir(path),
ConfigFiles: []types.ConfigFile{{Filename: path, Config: parsedComposeFile}},
Environment: getEnvironmentVariables(),
}
config, err := loader.Load(configDetails)
if err != nil {
err := fmt.Errorf("unable to load Compose file at path %s Error: %q", path, err)
logrus.Debug(err)
return nil, err
}
return config, nil
}
// ConvertToIR loads an v3 compose file into IR
func (c *V3Loader) ConvertToIR(composefilepath string, serviceName string) (irtypes.IR, error) {
logrus.Debugf("About to load configuration from docker compose file at path %s", composefilepath)
config, err := ParseV3(composefilepath)
if err != nil {
logrus.Warnf("Error while loading docker compose config : %s", err)
return irtypes.IR{}, err
}
logrus.Debugf("About to start loading docker compose to intermediate rep")
return c.convertToIR(filepath.Dir(composefilepath), *config, serviceName)
}
func (c *V3Loader) convertToIR(filedir string, composeObject types.Config, serviceName string) (irtypes.IR, error) {
ir := irtypes.IR{
Services: map[string]irtypes.Service{},
}
//Secret volumes transformed to IR
ir.Storages = c.getSecretStorages(composeObject.Secrets)
//ConfigMap volumes transformed to IR
ir.Storages = append(ir.Storages, c.getConfigStorages(composeObject.Configs)...)
for _, composeServiceConfig := range composeObject.Services {
if composeServiceConfig.Name != serviceName {
continue
}
name := common.NormalizeForServiceName(composeServiceConfig.Name)
serviceConfig := irtypes.NewServiceWithName(name)
serviceContainer := core.Container{}
serviceContainer.Image = composeServiceConfig.Image
if serviceContainer.Image == "" {
serviceContainer.Image = name + ":latest"
}
serviceContainer.WorkingDir = composeServiceConfig.WorkingDir
serviceContainer.Command = composeServiceConfig.Entrypoint
serviceContainer.Args = composeServiceConfig.Command
serviceContainer.Stdin = composeServiceConfig.StdinOpen
serviceContainer.Name = strings.ToLower(composeServiceConfig.ContainerName)
if serviceContainer.Name == "" {
serviceContainer.Name = strings.ToLower(serviceConfig.Name)
}
serviceContainer.TTY = composeServiceConfig.Tty
serviceContainer.Ports = c.getPorts(composeServiceConfig.Ports, composeServiceConfig.Expose)
c.addPorts(composeServiceConfig.Ports, composeServiceConfig.Expose, &serviceConfig)
serviceConfig.Annotations = map[string]string(composeServiceConfig.Labels)
serviceConfig.Labels = common.MergeStringMaps(composeServiceConfig.Labels, composeServiceConfig.Deploy.Labels)
if composeServiceConfig.Hostname != "" {
serviceConfig.Hostname = composeServiceConfig.Hostname
}
if composeServiceConfig.DomainName != "" {
serviceConfig.Subdomain = composeServiceConfig.DomainName
}
if composeServiceConfig.Pid != "" {
if composeServiceConfig.Pid == "host" {
serviceConfig.SecurityContext.HostPID = true
} else {
logrus.Warnf("Ignoring PID key for service \"%v\". Invalid value \"%v\".", name, composeServiceConfig.Pid)
}
}
securityContext := &core.SecurityContext{}
if composeServiceConfig.Privileged {
securityContext.Privileged = &composeServiceConfig.Privileged
}
if composeServiceConfig.User != "" {
uid, err := cast.ToInt64E(composeServiceConfig.User)
if err != nil {
logrus.Warn("Ignoring user directive. User to be specified as a UID (numeric).")
} else {
securityContext.RunAsUser = &uid
}
}
capsAdd := []core.Capability{}
capsDrop := []core.Capability{}
for _, capAdd := range composeServiceConfig.CapAdd {
capsAdd = append(capsAdd, core.Capability(capAdd))
}
for _, capDrop := range composeServiceConfig.CapDrop {
capsDrop = append(capsDrop, core.Capability(capDrop))
}
//set capabilities if it is not empty
if len(capsAdd) > 0 || len(capsDrop) > 0 {
securityContext.Capabilities = &core.Capabilities{
Add: capsAdd,
Drop: capsDrop,
}
}
// update template only if securityContext is not empty
if *securityContext != (core.SecurityContext{}) {
serviceContainer.SecurityContext = securityContext
}
podSecurityContext := &core.PodSecurityContext{}
if !cmp.Equal(*podSecurityContext, core.PodSecurityContext{}) {
serviceConfig.SecurityContext = podSecurityContext
}
if composeServiceConfig.Deploy.Mode == "global" {
serviceConfig.Daemon = true
}
serviceConfig.Networks = c.getNetworks(composeServiceConfig, composeObject)
if (composeServiceConfig.Deploy.Resources != types.Resources{}) {
if composeServiceConfig.Deploy.Resources.Limits != nil {
resourceLimit := core.ResourceList{}
memLimit := libcomposeyaml.MemStringorInt(composeServiceConfig.Deploy.Resources.Limits.MemoryBytes)
if memLimit != 0 {
resourceLimit[core.ResourceMemory] = *resource.NewQuantity(int64(memLimit), "RandomStringForFormat")
}
if composeServiceConfig.Deploy.Resources.Limits.NanoCPUs != "" {
cpuLimit, err := cast.ToFloat64E(composeServiceConfig.Deploy.Resources.Limits.NanoCPUs)
if err != nil {
logrus.Warnf("Unable to convert cpu limits resources value : %s", err)
}
CPULimit := int64(cpuLimit * 1000)
if CPULimit != 0 {
resourceLimit[core.ResourceCPU] = *resource.NewMilliQuantity(CPULimit, resource.DecimalSI)
}
}
serviceContainer.Resources.Limits = resourceLimit
}
if composeServiceConfig.Deploy.Resources.Reservations != nil {
resourceRequests := core.ResourceList{}
MemReservation := libcomposeyaml.MemStringorInt(composeServiceConfig.Deploy.Resources.Reservations.MemoryBytes)
if MemReservation != 0 {
resourceRequests[core.ResourceMemory] = *resource.NewQuantity(int64(MemReservation), "RandomStringForFormat")
}
if composeServiceConfig.Deploy.Resources.Reservations.NanoCPUs != "" {
cpuReservation, err := cast.ToFloat64E(composeServiceConfig.Deploy.Resources.Reservations.NanoCPUs)
if err != nil {
logrus.Warnf("Unable to convert cpu limits reservation value : %s", err)
}
CPUReservation := int64(cpuReservation * 1000)
if CPUReservation != 0 {
resourceRequests[core.ResourceCPU] = *resource.NewMilliQuantity(CPUReservation, resource.DecimalSI)
}
}
serviceContainer.Resources.Requests = resourceRequests
}
}
// HealthCheck
if composeServiceConfig.HealthCheck != nil && !composeServiceConfig.HealthCheck.Disable {
probe, err := c.getHealthCheck(*composeServiceConfig.HealthCheck)
if err != nil {
logrus.Warnf("Unable to parse health check : %s", err)
} else {
serviceContainer.LivenessProbe = &probe
}
}
restart := composeServiceConfig.Restart
if composeServiceConfig.Deploy.RestartPolicy != nil {
restart = composeServiceConfig.Deploy.RestartPolicy.Condition
}
if restart == "unless-stopped" {
logrus.Warnf("Restart policy 'unless-stopped' in service %s is not supported, convert it to 'always'", name)
serviceConfig.RestartPolicy = core.RestartPolicyAlways
}
// replicas:
if composeServiceConfig.Deploy.Replicas != nil {
serviceConfig.Replicas = int(*composeServiceConfig.Deploy.Replicas)
}
serviceContainer.Env = c.getEnvs(composeServiceConfig)
vml, vl := makeVolumesFromTmpFS(name, composeServiceConfig.Tmpfs)
for _, v := range vl {
serviceConfig.AddVolume(v)
}
serviceContainer.VolumeMounts = append(serviceContainer.VolumeMounts, vml...)
for _, secret := range composeServiceConfig.Secrets {
target := filepath.Join(defaultSecretBasePath, secret.Source)
src := secret.Source
if secret.Target != "" {
tokens := strings.Split(secret.Source, "/")
var prefix string
if !strings.HasPrefix(secret.Target, "/") {
prefix = defaultSecretBasePath + "/"
}
if tokens[len(tokens)-1] == secret.Target {
target = prefix + secret.Source
} else {
target = prefix + strings.TrimSuffix(secret.Target, "/"+tokens[len(tokens)-1])
}
src = tokens[len(tokens)-1]
}
vSrc := core.VolumeSource{
Secret: &core.SecretVolumeSource{
SecretName: secret.Source,
Items: []core.KeyToPath{{
Key: secret.Source,
Path: src,
}},
},
}
if secret.Mode != nil {
mode := cast.ToInt32(*secret.Mode)
vSrc.Secret.DefaultMode = &mode
}
serviceConfig.AddVolume(core.Volume{
Name: secret.Source,
VolumeSource: vSrc,
})
serviceContainer.VolumeMounts = append(serviceContainer.VolumeMounts, core.VolumeMount{
Name: secret.Source,
MountPath: target,
})
}
for _, c := range composeServiceConfig.Configs {
target := c.Target
if target == "" {
target = "/" + c.Source
}
vSrc := core.ConfigMapVolumeSource{}
vSrc.Name = common.MakeFileNameCompliant(c.Source)
if o, ok := composeObject.Configs[c.Source]; ok {
if o.External.External {
logrus.Errorf("Config metadata %s has an external source", c.Source)
} else {
srcBaseName := filepath.Base(o.File)
vSrc.Items = []core.KeyToPath{{Key: srcBaseName, Path: filepath.Base(target)}}
if c.Mode != nil {
signedMode := int32(*c.Mode)
vSrc.DefaultMode = &signedMode
}
}
} else {
logrus.Errorf("Unable to find configmap object for %s", vSrc.Name)
}
serviceConfig.AddVolume(core.Volume{
Name: vSrc.Name,
VolumeSource: core.VolumeSource{ConfigMap: &vSrc},
})
serviceContainer.VolumeMounts = append(serviceContainer.VolumeMounts,
core.VolumeMount{
Name: vSrc.Name,
MountPath: target,
SubPath: filepath.Base(target),
})
}
for _, vol := range composeServiceConfig.Volumes {
if isPath(vol.Source) {
hPath := vol.Source
if !filepath.IsAbs(vol.Source) {
hPath, err := filepath.Abs(vol.Source)
if err != nil {
logrus.Debugf("Could not create an absolute path for [%s]", hPath)
}
}
// Generate a hash Id for the given source file path to be mounted.
hashID := getHash([]byte(hPath))
volumeName := fmt.Sprintf("%s%d", common.VolumePrefix, hashID)
serviceContainer.VolumeMounts = append(serviceContainer.VolumeMounts, core.VolumeMount{
Name: volumeName,
MountPath: vol.Target,
})
serviceConfig.AddVolume(core.Volume{
Name: volumeName,
VolumeSource: core.VolumeSource{
HostPath: &core.HostPathVolumeSource{Path: vol.Source},
},
})
} else {
serviceContainer.VolumeMounts = append(serviceContainer.VolumeMounts, core.VolumeMount{
Name: vol.Source,
MountPath: vol.Target,
})
serviceConfig.AddVolume(core.Volume{
Name: vol.Source,
VolumeSource: core.VolumeSource{
PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{
ClaimName: vol.Source,
},
},
})
storageObj := irtypes.Storage{StorageType: irtypes.PVCKind, Name: vol.Source, Content: nil}
ir.AddStorage(storageObj)
}
}
serviceConfig.Containers = []core.Container{serviceContainer}
ir.Services[name] = serviceConfig
}
return ir, nil
}
func (c *V3Loader) getSecretStorages(secrets map[string]types.SecretConfig) []irtypes.Storage {
storages := make([]irtypes.Storage, len(secrets))
for secretName, secretObj := range secrets {
storage := irtypes.Storage{
Name: secretName,
StorageType: irtypes.SecretKind,
}
if !secretObj.External.External {
content, err := ioutil.ReadFile(secretObj.File)
if err != nil {
logrus.Warnf("Could not read the secret file [%s]", secretObj.File)
} else {
storage.Content = map[string][]byte{secretName: content}
}
}
storages = append(storages, storage)
}
return storages
}
func (c *V3Loader) getConfigStorages(configs map[string]types.ConfigObjConfig) []irtypes.Storage {
Storages := make([]irtypes.Storage, len(configs))
for cfgName, cfgObj := range configs {
storage := irtypes.Storage{
Name: cfgName,
StorageType: irtypes.ConfigMapKind,
}
if !cfgObj.External.External {
fileInfo, err := os.Stat(cfgObj.File)
if err != nil {
logrus.Warnf("Could not identify the type of secret artifact [%s]. Encountered [%s]", cfgObj.File, err)
} else {
if !fileInfo.IsDir() {
content, err := ioutil.ReadFile(cfgObj.File)
if err != nil {
logrus.Warnf("Could not read the secret file [%s]. Encountered [%s]", cfgObj.File, err)
} else {
storage.Content = map[string][]byte{cfgName: content}
}
} else {
dataMap, err := c.getAllDirContentAsMap(cfgObj.File)
if err != nil {
logrus.Warnf("Could not read the secret directory [%s]. Encountered [%s]", cfgObj.File, err)
} else {
storage.Content = dataMap
}
}
}
}
Storages = append(Storages, storage)
}
return Storages
}
func (*V3Loader) getPorts(ports []types.ServicePortConfig, expose []string) []core.ContainerPort {
containerPorts := []core.ContainerPort{}
exist := map[string]bool{}
for _, port := range ports {
proto := core.ProtocolTCP
if strings.EqualFold(string(core.ProtocolUDP), port.Protocol) {
proto = core.ProtocolUDP
}
// Add the port to the k8s pod.
containerPorts = append(containerPorts, core.ContainerPort{
ContainerPort: int32(port.Target),
Protocol: proto,
})
exist[cast.ToString(port.Target)] = true
}
for _, port := range expose {
portValue := port
protocol := core.ProtocolTCP
if strings.Contains(portValue, "/") {
splits := strings.Split(port, "/")
portValue = splits[0]
protocol = core.Protocol(strings.ToUpper(splits[1]))
}
if exist[portValue] {
continue
}
// Add the port to the k8s pod.
containerPorts = append(containerPorts, core.ContainerPort{
ContainerPort: cast.ToInt32(portValue),
Protocol: protocol,
})
}
return containerPorts
}
func (*V3Loader) addPorts(ports []types.ServicePortConfig, expose []string, service *irtypes.Service) {
exist := map[string]bool{}
for _, port := range ports {
// Forward the port on the k8s service to the k8s pod.
podPort := networking.ServiceBackendPort{
Number: int32(port.Target),
}
servicePort := networking.ServiceBackendPort{
Number: int32(port.Published),
}
service.AddPortForwarding(servicePort, podPort, "")
exist[cast.ToString(port.Target)] = true
}
for _, port := range expose {
portValue := port
if strings.Contains(portValue, "/") |
if exist[portValue] {
continue
}
// Forward the port on the k8s service to the k8s pod.
portNumber := cast.ToInt32(portValue)
podPort := networking.ServiceBackendPort{
Number: portNumber,
}
servicePort := networking.ServiceBackendPort{
Number: portNumber,
}
service.AddPortForwarding(servicePort, podPort, "")
}
}
func (c *V3Loader) getNetworks(composeServiceConfig types.ServiceConfig, composeObject types.Config) (networks []string) {
networks = []string{}
for key := range composeServiceConfig.Networks {
netName := composeObject.Networks[key].Name
if netName == "" {
netName = key
}
networks = append(networks, netName)
}
return networks
}
func (c *V3Loader) getHealthCheck(composeHealthCheck types.HealthCheckConfig) (core.Probe, error) {
probe := core.Probe{}
if len(composeHealthCheck.Test) > 1 {
probe.Handler = core.Handler{
Exec: &core.ExecAction{
// docker/cli adds "CMD-SHELL" to the struct, hence we remove the first element of composeHealthCheck.Test
Command: composeHealthCheck.Test[1:],
},
}
} else {
logrus.Warnf("Could not find command to execute in probe : %s", composeHealthCheck.Test)
}
if composeHealthCheck.Timeout != nil {
parse, err := time.ParseDuration(composeHealthCheck.Timeout.String())
if err != nil {
return probe, errors.Wrap(err, "unable to parse health check timeout variable")
}
probe.TimeoutSeconds = int32(parse.Seconds())
}
if composeHealthCheck.Interval != nil {
parse, err := time.ParseDuration(composeHealthCheck.Interval.String())
if err != nil {
return probe, errors.Wrap(err, "unable to parse health check interval variable")
}
probe.PeriodSeconds = int32(parse.Seconds())
}
if composeHealthCheck.Retries != nil {
probe.FailureThreshold = int32(*composeHealthCheck.Retries)
}
if composeHealthCheck.StartPeriod != nil {
parse, err := time.ParseDuration(composeHealthCheck.StartPeriod.String())
if err != nil {
return probe, errors.Wrap(err, "unable to parse health check startPeriod variable")
}
probe.InitialDelaySeconds = int32(parse.Seconds())
}
return probe, nil
}
func (c *V3Loader) getEnvs(composeServiceConfig types.ServiceConfig) (envs []core.EnvVar) {
for name, value := range composeServiceConfig.Environment {
var env core.EnvVar
if value != nil {
env = core.EnvVar{Name: name, Value: *value}
} else {
env = core.EnvVar{Name: name, Value: "unknown"}
}
envs = append(envs, env)
}
return envs
}
func (c *V3Loader) getAllDirContentAsMap(directoryPath string) (map[string][]byte, error) {
fileList, err := ioutil.ReadDir(directoryPath)
if err != nil {
return nil, err
}
dataMap := map[string][]byte{}
count := 0
for _, file := range fileList {
if file.IsDir() {
continue
}
fileName := file.Name()
logrus.Debugf("Reading file into the data map: [%s]", fileName)
data, err := ioutil.ReadFile(filepath.Join(directoryPath, fileName))
if err != nil {
logrus.Debugf("Unable to read file data : %s", fileName)
continue
}
dataMap[fileName] = data
count = count + 1
}
logrus.Debugf("Read %d files into the data map", count)
return dataMap, nil
}
| {
splits := strings.Split(port, "/")
portValue = splits[0]
} | conditional_block |
v3.go | /*
* Copyright IBM Corporation 2021
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package compose
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/docker/cli/cli/compose/loader"
"github.com/docker/cli/cli/compose/types"
libcomposeyaml "github.com/docker/libcompose/yaml"
"github.com/google/go-cmp/cmp"
"github.com/konveyor/move2kube/internal/common"
irtypes "github.com/konveyor/move2kube/types/ir"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cast"
"k8s.io/apimachinery/pkg/api/resource"
core "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/networking"
)
// V3Loader loads a v3 compose file
type V3Loader struct {
}
func removeNonExistentEnvFilesV3(path string, parsedComposeFile map[string]interface{}) map[string]interface{} {
// Remove unresolvable env files, so that the parser does not throw error
composeFileDir := filepath.Dir(path)
if val, ok := parsedComposeFile["services"]; ok {
if services, ok := val.(map[string]interface{}); ok {
for serviceName, val := range services {
if vals, ok := val.(map[string]interface{}); ok {
if envfilesvals, ok := vals[envFile]; ok {
// env_file can be a string or list of strings
// https://docs.docker.com/compose/compose-file/#env_file
if envfilesstr, ok := envfilesvals.(string); ok {
envFilePath := envfilesstr
if !filepath.IsAbs(envFilePath) {
envFilePath = filepath.Join(composeFileDir, envFilePath)
}
finfo, err := os.Stat(envFilePath)
if os.IsNotExist(err) || finfo.IsDir() {
logrus.Warnf("Unable to find env config file %s referred in service %s in file %s. Ignoring it.", envFilePath, serviceName, path)
delete(vals, envFile)
}
} else if envfilesvalsint, ok := envfilesvals.([]interface{}); ok {
envfiles := []interface{}{}
for _, envfilesval := range envfilesvalsint {
if envfilesstr, ok := envfilesval.(string); ok {
envFilePath := envfilesstr
if !filepath.IsAbs(envFilePath) {
envFilePath = filepath.Join(composeFileDir, envFilePath)
}
finfo, err := os.Stat(envFilePath)
if os.IsNotExist(err) || finfo.IsDir() {
logrus.Warnf("Unable to find env config file %s referred in service %s in file %s. Ignoring it.", envFilePath, serviceName, path)
continue
}
envfiles = append(envfiles, envfilesstr)
}
}
vals[envFile] = envfiles
}
}
}
}
}
}
return parsedComposeFile
}
// ParseV3 parses version 3 compose files
func ParseV3(path string) (*types.Config, error) {
fileData, err := ioutil.ReadFile(path)
if err != nil {
err := fmt.Errorf("unable to load Compose file at path %s Error: %q", path, err)
logrus.Debug(err)
return nil, err
}
// Parse the Compose File
parsedComposeFile, err := loader.ParseYAML(fileData)
if err != nil {
err := fmt.Errorf("unable to load Compose file at path %s Error: %q", path, err)
logrus.Debug(err)
return nil, err
}
parsedComposeFile = removeNonExistentEnvFilesV3(path, parsedComposeFile)
// Config details
configDetails := types.ConfigDetails{
WorkingDir: filepath.Dir(path),
ConfigFiles: []types.ConfigFile{{Filename: path, Config: parsedComposeFile}},
Environment: getEnvironmentVariables(),
}
config, err := loader.Load(configDetails)
if err != nil {
err := fmt.Errorf("unable to load Compose file at path %s Error: %q", path, err)
logrus.Debug(err)
return nil, err
}
return config, nil
}
// ConvertToIR loads an v3 compose file into IR
func (c *V3Loader) ConvertToIR(composefilepath string, serviceName string) (irtypes.IR, error) {
logrus.Debugf("About to load configuration from docker compose file at path %s", composefilepath)
config, err := ParseV3(composefilepath)
if err != nil {
logrus.Warnf("Error while loading docker compose config : %s", err)
return irtypes.IR{}, err
}
logrus.Debugf("About to start loading docker compose to intermediate rep")
return c.convertToIR(filepath.Dir(composefilepath), *config, serviceName)
}
func (c *V3Loader) | (filedir string, composeObject types.Config, serviceName string) (irtypes.IR, error) {
ir := irtypes.IR{
Services: map[string]irtypes.Service{},
}
//Secret volumes transformed to IR
ir.Storages = c.getSecretStorages(composeObject.Secrets)
//ConfigMap volumes transformed to IR
ir.Storages = append(ir.Storages, c.getConfigStorages(composeObject.Configs)...)
for _, composeServiceConfig := range composeObject.Services {
if composeServiceConfig.Name != serviceName {
continue
}
name := common.NormalizeForServiceName(composeServiceConfig.Name)
serviceConfig := irtypes.NewServiceWithName(name)
serviceContainer := core.Container{}
serviceContainer.Image = composeServiceConfig.Image
if serviceContainer.Image == "" {
serviceContainer.Image = name + ":latest"
}
serviceContainer.WorkingDir = composeServiceConfig.WorkingDir
serviceContainer.Command = composeServiceConfig.Entrypoint
serviceContainer.Args = composeServiceConfig.Command
serviceContainer.Stdin = composeServiceConfig.StdinOpen
serviceContainer.Name = strings.ToLower(composeServiceConfig.ContainerName)
if serviceContainer.Name == "" {
serviceContainer.Name = strings.ToLower(serviceConfig.Name)
}
serviceContainer.TTY = composeServiceConfig.Tty
serviceContainer.Ports = c.getPorts(composeServiceConfig.Ports, composeServiceConfig.Expose)
c.addPorts(composeServiceConfig.Ports, composeServiceConfig.Expose, &serviceConfig)
serviceConfig.Annotations = map[string]string(composeServiceConfig.Labels)
serviceConfig.Labels = common.MergeStringMaps(composeServiceConfig.Labels, composeServiceConfig.Deploy.Labels)
if composeServiceConfig.Hostname != "" {
serviceConfig.Hostname = composeServiceConfig.Hostname
}
if composeServiceConfig.DomainName != "" {
serviceConfig.Subdomain = composeServiceConfig.DomainName
}
if composeServiceConfig.Pid != "" {
if composeServiceConfig.Pid == "host" {
serviceConfig.SecurityContext.HostPID = true
} else {
logrus.Warnf("Ignoring PID key for service \"%v\". Invalid value \"%v\".", name, composeServiceConfig.Pid)
}
}
securityContext := &core.SecurityContext{}
if composeServiceConfig.Privileged {
securityContext.Privileged = &composeServiceConfig.Privileged
}
if composeServiceConfig.User != "" {
uid, err := cast.ToInt64E(composeServiceConfig.User)
if err != nil {
logrus.Warn("Ignoring user directive. User to be specified as a UID (numeric).")
} else {
securityContext.RunAsUser = &uid
}
}
capsAdd := []core.Capability{}
capsDrop := []core.Capability{}
for _, capAdd := range composeServiceConfig.CapAdd {
capsAdd = append(capsAdd, core.Capability(capAdd))
}
for _, capDrop := range composeServiceConfig.CapDrop {
capsDrop = append(capsDrop, core.Capability(capDrop))
}
//set capabilities if it is not empty
if len(capsAdd) > 0 || len(capsDrop) > 0 {
securityContext.Capabilities = &core.Capabilities{
Add: capsAdd,
Drop: capsDrop,
}
}
// update template only if securityContext is not empty
if *securityContext != (core.SecurityContext{}) {
serviceContainer.SecurityContext = securityContext
}
podSecurityContext := &core.PodSecurityContext{}
if !cmp.Equal(*podSecurityContext, core.PodSecurityContext{}) {
serviceConfig.SecurityContext = podSecurityContext
}
if composeServiceConfig.Deploy.Mode == "global" {
serviceConfig.Daemon = true
}
serviceConfig.Networks = c.getNetworks(composeServiceConfig, composeObject)
if (composeServiceConfig.Deploy.Resources != types.Resources{}) {
if composeServiceConfig.Deploy.Resources.Limits != nil {
resourceLimit := core.ResourceList{}
memLimit := libcomposeyaml.MemStringorInt(composeServiceConfig.Deploy.Resources.Limits.MemoryBytes)
if memLimit != 0 {
resourceLimit[core.ResourceMemory] = *resource.NewQuantity(int64(memLimit), "RandomStringForFormat")
}
if composeServiceConfig.Deploy.Resources.Limits.NanoCPUs != "" {
cpuLimit, err := cast.ToFloat64E(composeServiceConfig.Deploy.Resources.Limits.NanoCPUs)
if err != nil {
logrus.Warnf("Unable to convert cpu limits resources value : %s", err)
}
CPULimit := int64(cpuLimit * 1000)
if CPULimit != 0 {
resourceLimit[core.ResourceCPU] = *resource.NewMilliQuantity(CPULimit, resource.DecimalSI)
}
}
serviceContainer.Resources.Limits = resourceLimit
}
if composeServiceConfig.Deploy.Resources.Reservations != nil {
resourceRequests := core.ResourceList{}
MemReservation := libcomposeyaml.MemStringorInt(composeServiceConfig.Deploy.Resources.Reservations.MemoryBytes)
if MemReservation != 0 {
resourceRequests[core.ResourceMemory] = *resource.NewQuantity(int64(MemReservation), "RandomStringForFormat")
}
if composeServiceConfig.Deploy.Resources.Reservations.NanoCPUs != "" {
cpuReservation, err := cast.ToFloat64E(composeServiceConfig.Deploy.Resources.Reservations.NanoCPUs)
if err != nil {
logrus.Warnf("Unable to convert cpu limits reservation value : %s", err)
}
CPUReservation := int64(cpuReservation * 1000)
if CPUReservation != 0 {
resourceRequests[core.ResourceCPU] = *resource.NewMilliQuantity(CPUReservation, resource.DecimalSI)
}
}
serviceContainer.Resources.Requests = resourceRequests
}
}
// HealthCheck
if composeServiceConfig.HealthCheck != nil && !composeServiceConfig.HealthCheck.Disable {
probe, err := c.getHealthCheck(*composeServiceConfig.HealthCheck)
if err != nil {
logrus.Warnf("Unable to parse health check : %s", err)
} else {
serviceContainer.LivenessProbe = &probe
}
}
restart := composeServiceConfig.Restart
if composeServiceConfig.Deploy.RestartPolicy != nil {
restart = composeServiceConfig.Deploy.RestartPolicy.Condition
}
if restart == "unless-stopped" {
logrus.Warnf("Restart policy 'unless-stopped' in service %s is not supported, convert it to 'always'", name)
serviceConfig.RestartPolicy = core.RestartPolicyAlways
}
// replicas:
if composeServiceConfig.Deploy.Replicas != nil {
serviceConfig.Replicas = int(*composeServiceConfig.Deploy.Replicas)
}
serviceContainer.Env = c.getEnvs(composeServiceConfig)
vml, vl := makeVolumesFromTmpFS(name, composeServiceConfig.Tmpfs)
for _, v := range vl {
serviceConfig.AddVolume(v)
}
serviceContainer.VolumeMounts = append(serviceContainer.VolumeMounts, vml...)
for _, secret := range composeServiceConfig.Secrets {
target := filepath.Join(defaultSecretBasePath, secret.Source)
src := secret.Source
if secret.Target != "" {
tokens := strings.Split(secret.Source, "/")
var prefix string
if !strings.HasPrefix(secret.Target, "/") {
prefix = defaultSecretBasePath + "/"
}
if tokens[len(tokens)-1] == secret.Target {
target = prefix + secret.Source
} else {
target = prefix + strings.TrimSuffix(secret.Target, "/"+tokens[len(tokens)-1])
}
src = tokens[len(tokens)-1]
}
vSrc := core.VolumeSource{
Secret: &core.SecretVolumeSource{
SecretName: secret.Source,
Items: []core.KeyToPath{{
Key: secret.Source,
Path: src,
}},
},
}
if secret.Mode != nil {
mode := cast.ToInt32(*secret.Mode)
vSrc.Secret.DefaultMode = &mode
}
serviceConfig.AddVolume(core.Volume{
Name: secret.Source,
VolumeSource: vSrc,
})
serviceContainer.VolumeMounts = append(serviceContainer.VolumeMounts, core.VolumeMount{
Name: secret.Source,
MountPath: target,
})
}
for _, c := range composeServiceConfig.Configs {
target := c.Target
if target == "" {
target = "/" + c.Source
}
vSrc := core.ConfigMapVolumeSource{}
vSrc.Name = common.MakeFileNameCompliant(c.Source)
if o, ok := composeObject.Configs[c.Source]; ok {
if o.External.External {
logrus.Errorf("Config metadata %s has an external source", c.Source)
} else {
srcBaseName := filepath.Base(o.File)
vSrc.Items = []core.KeyToPath{{Key: srcBaseName, Path: filepath.Base(target)}}
if c.Mode != nil {
signedMode := int32(*c.Mode)
vSrc.DefaultMode = &signedMode
}
}
} else {
logrus.Errorf("Unable to find configmap object for %s", vSrc.Name)
}
serviceConfig.AddVolume(core.Volume{
Name: vSrc.Name,
VolumeSource: core.VolumeSource{ConfigMap: &vSrc},
})
serviceContainer.VolumeMounts = append(serviceContainer.VolumeMounts,
core.VolumeMount{
Name: vSrc.Name,
MountPath: target,
SubPath: filepath.Base(target),
})
}
for _, vol := range composeServiceConfig.Volumes {
if isPath(vol.Source) {
hPath := vol.Source
if !filepath.IsAbs(vol.Source) {
hPath, err := filepath.Abs(vol.Source)
if err != nil {
logrus.Debugf("Could not create an absolute path for [%s]", hPath)
}
}
// Generate a hash Id for the given source file path to be mounted.
hashID := getHash([]byte(hPath))
volumeName := fmt.Sprintf("%s%d", common.VolumePrefix, hashID)
serviceContainer.VolumeMounts = append(serviceContainer.VolumeMounts, core.VolumeMount{
Name: volumeName,
MountPath: vol.Target,
})
serviceConfig.AddVolume(core.Volume{
Name: volumeName,
VolumeSource: core.VolumeSource{
HostPath: &core.HostPathVolumeSource{Path: vol.Source},
},
})
} else {
serviceContainer.VolumeMounts = append(serviceContainer.VolumeMounts, core.VolumeMount{
Name: vol.Source,
MountPath: vol.Target,
})
serviceConfig.AddVolume(core.Volume{
Name: vol.Source,
VolumeSource: core.VolumeSource{
PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{
ClaimName: vol.Source,
},
},
})
storageObj := irtypes.Storage{StorageType: irtypes.PVCKind, Name: vol.Source, Content: nil}
ir.AddStorage(storageObj)
}
}
serviceConfig.Containers = []core.Container{serviceContainer}
ir.Services[name] = serviceConfig
}
return ir, nil
}
func (c *V3Loader) getSecretStorages(secrets map[string]types.SecretConfig) []irtypes.Storage {
storages := make([]irtypes.Storage, len(secrets))
for secretName, secretObj := range secrets {
storage := irtypes.Storage{
Name: secretName,
StorageType: irtypes.SecretKind,
}
if !secretObj.External.External {
content, err := ioutil.ReadFile(secretObj.File)
if err != nil {
logrus.Warnf("Could not read the secret file [%s]", secretObj.File)
} else {
storage.Content = map[string][]byte{secretName: content}
}
}
storages = append(storages, storage)
}
return storages
}
func (c *V3Loader) getConfigStorages(configs map[string]types.ConfigObjConfig) []irtypes.Storage {
Storages := make([]irtypes.Storage, len(configs))
for cfgName, cfgObj := range configs {
storage := irtypes.Storage{
Name: cfgName,
StorageType: irtypes.ConfigMapKind,
}
if !cfgObj.External.External {
fileInfo, err := os.Stat(cfgObj.File)
if err != nil {
logrus.Warnf("Could not identify the type of secret artifact [%s]. Encountered [%s]", cfgObj.File, err)
} else {
if !fileInfo.IsDir() {
content, err := ioutil.ReadFile(cfgObj.File)
if err != nil {
logrus.Warnf("Could not read the secret file [%s]. Encountered [%s]", cfgObj.File, err)
} else {
storage.Content = map[string][]byte{cfgName: content}
}
} else {
dataMap, err := c.getAllDirContentAsMap(cfgObj.File)
if err != nil {
logrus.Warnf("Could not read the secret directory [%s]. Encountered [%s]", cfgObj.File, err)
} else {
storage.Content = dataMap
}
}
}
}
Storages = append(Storages, storage)
}
return Storages
}
func (*V3Loader) getPorts(ports []types.ServicePortConfig, expose []string) []core.ContainerPort {
containerPorts := []core.ContainerPort{}
exist := map[string]bool{}
for _, port := range ports {
proto := core.ProtocolTCP
if strings.EqualFold(string(core.ProtocolUDP), port.Protocol) {
proto = core.ProtocolUDP
}
// Add the port to the k8s pod.
containerPorts = append(containerPorts, core.ContainerPort{
ContainerPort: int32(port.Target),
Protocol: proto,
})
exist[cast.ToString(port.Target)] = true
}
for _, port := range expose {
portValue := port
protocol := core.ProtocolTCP
if strings.Contains(portValue, "/") {
splits := strings.Split(port, "/")
portValue = splits[0]
protocol = core.Protocol(strings.ToUpper(splits[1]))
}
if exist[portValue] {
continue
}
// Add the port to the k8s pod.
containerPorts = append(containerPorts, core.ContainerPort{
ContainerPort: cast.ToInt32(portValue),
Protocol: protocol,
})
}
return containerPorts
}
func (*V3Loader) addPorts(ports []types.ServicePortConfig, expose []string, service *irtypes.Service) {
exist := map[string]bool{}
for _, port := range ports {
// Forward the port on the k8s service to the k8s pod.
podPort := networking.ServiceBackendPort{
Number: int32(port.Target),
}
servicePort := networking.ServiceBackendPort{
Number: int32(port.Published),
}
service.AddPortForwarding(servicePort, podPort, "")
exist[cast.ToString(port.Target)] = true
}
for _, port := range expose {
portValue := port
if strings.Contains(portValue, "/") {
splits := strings.Split(port, "/")
portValue = splits[0]
}
if exist[portValue] {
continue
}
// Forward the port on the k8s service to the k8s pod.
portNumber := cast.ToInt32(portValue)
podPort := networking.ServiceBackendPort{
Number: portNumber,
}
servicePort := networking.ServiceBackendPort{
Number: portNumber,
}
service.AddPortForwarding(servicePort, podPort, "")
}
}
func (c *V3Loader) getNetworks(composeServiceConfig types.ServiceConfig, composeObject types.Config) (networks []string) {
networks = []string{}
for key := range composeServiceConfig.Networks {
netName := composeObject.Networks[key].Name
if netName == "" {
netName = key
}
networks = append(networks, netName)
}
return networks
}
func (c *V3Loader) getHealthCheck(composeHealthCheck types.HealthCheckConfig) (core.Probe, error) {
probe := core.Probe{}
if len(composeHealthCheck.Test) > 1 {
probe.Handler = core.Handler{
Exec: &core.ExecAction{
// docker/cli adds "CMD-SHELL" to the struct, hence we remove the first element of composeHealthCheck.Test
Command: composeHealthCheck.Test[1:],
},
}
} else {
logrus.Warnf("Could not find command to execute in probe : %s", composeHealthCheck.Test)
}
if composeHealthCheck.Timeout != nil {
parse, err := time.ParseDuration(composeHealthCheck.Timeout.String())
if err != nil {
return probe, errors.Wrap(err, "unable to parse health check timeout variable")
}
probe.TimeoutSeconds = int32(parse.Seconds())
}
if composeHealthCheck.Interval != nil {
parse, err := time.ParseDuration(composeHealthCheck.Interval.String())
if err != nil {
return probe, errors.Wrap(err, "unable to parse health check interval variable")
}
probe.PeriodSeconds = int32(parse.Seconds())
}
if composeHealthCheck.Retries != nil {
probe.FailureThreshold = int32(*composeHealthCheck.Retries)
}
if composeHealthCheck.StartPeriod != nil {
parse, err := time.ParseDuration(composeHealthCheck.StartPeriod.String())
if err != nil {
return probe, errors.Wrap(err, "unable to parse health check startPeriod variable")
}
probe.InitialDelaySeconds = int32(parse.Seconds())
}
return probe, nil
}
func (c *V3Loader) getEnvs(composeServiceConfig types.ServiceConfig) (envs []core.EnvVar) {
for name, value := range composeServiceConfig.Environment {
var env core.EnvVar
if value != nil {
env = core.EnvVar{Name: name, Value: *value}
} else {
env = core.EnvVar{Name: name, Value: "unknown"}
}
envs = append(envs, env)
}
return envs
}
func (c *V3Loader) getAllDirContentAsMap(directoryPath string) (map[string][]byte, error) {
fileList, err := ioutil.ReadDir(directoryPath)
if err != nil {
return nil, err
}
dataMap := map[string][]byte{}
count := 0
for _, file := range fileList {
if file.IsDir() {
continue
}
fileName := file.Name()
logrus.Debugf("Reading file into the data map: [%s]", fileName)
data, err := ioutil.ReadFile(filepath.Join(directoryPath, fileName))
if err != nil {
logrus.Debugf("Unable to read file data : %s", fileName)
continue
}
dataMap[fileName] = data
count = count + 1
}
logrus.Debugf("Read %d files into the data map", count)
return dataMap, nil
}
| convertToIR | identifier_name |
fmt.rs | use std::{
borrow::Cow,
fmt::{self, Write as _},
io,
time::Duration,
};
use termcolor::{ColorSpec, WriteColor};
use unicode_width::UnicodeWidthChar;
use crate::{markup, Markup, MarkupElement};
/// A stack-allocated linked-list of [MarkupElement] slices
pub enum MarkupElements<'a> {
Root,
Node(&'a Self, &'a [MarkupElement]),
}
impl<'a> MarkupElements<'a> {
/// Iterates on all the element slices depth-first
pub fn for_each(&self, func: &mut impl FnMut(&'a [MarkupElement])) {
if let Self::Node(parent, elem) = self {
parent.for_each(func);
func(elem);
}
}
}
pub trait Write {
fn write_str(&mut self, elements: &MarkupElements, content: &str) -> io::Result<()>;
fn write_fmt(&mut self, elements: &MarkupElements, content: fmt::Arguments) -> io::Result<()>;
}
/// Applies the current format in `state` to `writer`, calls `func` to
/// print a piece of text, then reset the printing format
fn with_format<W>(
writer: &mut W,
state: &MarkupElements,
func: impl FnOnce(&mut W) -> io::Result<()>,
) -> io::Result<()>
where
W: WriteColor,
{
let mut color = ColorSpec::new();
state.for_each(&mut |elements| {
for element in elements {
element.update_color(&mut color);
}
});
if let Err(err) = writer.set_color(&color) {
writer.reset()?;
return Err(err);
}
let result = func(writer);
writer.reset()?;
result
}
/// Adapter struct implementing [Write] over types implementing [WriteColor]
pub struct Termcolor<W>(pub W);
impl<W> Write for Termcolor<W>
where
W: WriteColor,
{
fn write_str(&mut self, elements: &MarkupElements, content: &str) -> io::Result<()> {
with_format(&mut self.0, elements, |writer| {
let mut adapter = SanitizeAdapter {
writer,
error: Ok(()),
};
match adapter.write_str(content) {
Ok(()) => Ok(()),
Err(..) => {
if adapter.error.is_err() {
adapter.error
} else {
// SanitizeAdapter can only fail if the underlying
// writer returns an error
unreachable!()
}
}
}
})
}
fn write_fmt(&mut self, elements: &MarkupElements, content: fmt::Arguments) -> io::Result<()> {
with_format(&mut self.0, elements, |writer| {
let mut adapter = SanitizeAdapter {
writer,
error: Ok(()),
};
match adapter.write_fmt(content) {
Ok(()) => Ok(()),
Err(..) => {
if adapter.error.is_err() {
adapter.error
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"a Display formatter returned an error",
))
}
}
}
})
}
}
/// Adapter [fmt::Write] calls to [io::Write] with sanitization,
/// implemented as an internal struct to avoid exposing [fmt::Write] on
/// [Termcolor]
struct SanitizeAdapter<W> {
writer: W,
error: io::Result<()>,
}
impl<W: io::Write> fmt::Write for SanitizeAdapter<W> {
fn write_str(&mut self, content: &str) -> fmt::Result {
let mut buffer = [0; 4];
for item in content.chars() {
// Replace non-whitespace, zero-width characters with the Unicode replacement character
let is_whitespace = item.is_whitespace();
let is_zero_width = UnicodeWidthChar::width(item).map_or(true, |width| width == 0);
let item = if !is_whitespace && is_zero_width {
char::REPLACEMENT_CHARACTER
} else {
item
};
item.encode_utf8(&mut buffer);
if let Err(err) = self.writer.write_all(&buffer[..item.len_utf8()]) {
self.error = Err(err);
return Err(fmt::Error);
}
}
Ok(())
}
}
/// The [Formatter] is the `rome_console` equivalent to [std::fmt::Formatter]:
/// it's never constructed directly by consumers, and can only be used through
/// the mutable reference passed to implementations of the [Display] trait).
/// It manages the state of the markup to print, and implementations of
/// [Display] can call into its methods to append content into the current
/// printing session
pub struct Formatter<'fmt> {
/// Stack of markup elements currently applied to the text being printed
state: MarkupElements<'fmt>,
/// Inner IO writer this [Formatter] will print text into
writer: &'fmt mut dyn Write,
}
impl<'fmt> Formatter<'fmt> {
/// Create a new instance of the [Formatter] using the provided `writer` for printing
pub fn new(writer: &'fmt mut dyn Write) -> Self {
Self {
state: MarkupElements::Root,
writer,
}
}
/// Return a new instance of the [Formatter] with `elements` appended to its element stack
fn with_elements<'b>(&'b mut self, elements: &'b [MarkupElement]) -> Formatter<'b> {
Formatter {
state: MarkupElements::Node(&self.state, elements),
writer: self.writer,
}
}
/// Write a piece of markup into this formatter
pub fn write_markup(&mut self, markup: Markup) -> io::Result<()> {
for node in markup.0 {
let mut fmt = self.with_elements(node.elements);
node.content.fmt(&mut fmt)?;
}
Ok(())
}
/// Write a slice of text into this formatter
pub fn write_str(&mut self, content: &str) -> io::Result<()> {
self.writer.write_str(&self.state, content)
}
/// Write formatted text into this formatter
pub fn write_fmt(&mut self, content: fmt::Arguments) -> io::Result<()> {
self.writer.write_fmt(&self.state, content)
}
}
/// Formatting trait for types to be displayed as markup, the `rome_console`
/// equivalent to [std::fmt::Display]
///
/// # Example
/// Implementing `Display` on a custom struct
/// ```
/// use std::io;
/// use rome_console::{fmt::{Display, Formatter}, markup};
///
/// struct Warning(String);
///
/// impl Display for Warning {
/// fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
/// fmt.write_markup(markup! {
/// <Warn>{self.0}</Warn>
/// })
/// }
/// }
///
/// let warning = Warning(String::from("content"));
/// markup! {
/// <Emphasis>{warning}</Emphasis>
/// };
/// ```
pub trait Display {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()>;
}
// Blanket implementations of Display for reference types
impl<'a, T> Display for &'a T
where
T: Display + ?Sized,
{
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
T::fmt(self, fmt)
}
}
impl<'a, T> Display for Cow<'a, T>
where
T: Display + ToOwned + ?Sized,
{
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
T::fmt(self, fmt)
}
}
// Simple implementations of Display calling through to write_str for types
// that implement Deref<str>
impl Display for str {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
fmt.write_str(self)
}
}
impl Display for String {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
fmt.write_str(self)
}
}
// Implement Display for Markup and Rust format Arguments
impl<'a> Display for Markup<'a> {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
fmt.write_markup(*self)
}
}
impl<'a> Display for std::fmt::Arguments<'a> {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
fmt.write_fmt(*self)
}
}
/// Implement [Display] for types that implement [std::fmt::Display] by calling
/// through to [Formatter::write_fmt]
macro_rules! impl_std_display {
($ty:ty) => {
impl Display for $ty {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
write!(fmt, "{self}")
}
}
};
}
impl_std_display!(char);
impl_std_display!(i8);
impl_std_display!(i16);
impl_std_display!(i32);
impl_std_display!(i64);
impl_std_display!(i128);
impl_std_display!(isize);
impl_std_display!(u8);
impl_std_display!(u16);
impl_std_display!(u32);
impl_std_display!(u64);
impl_std_display!(u128);
impl_std_display!(usize);
impl Display for Duration {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
use crate as rome_console;
let secs = self.as_secs();
if secs > 1 {
return fmt.write_markup(markup! {
{secs}<Dim>"s"</Dim>
});
}
let millis = self.as_millis();
if millis > 1 {
return fmt.write_markup(markup! {
{millis}<Dim>"ms"</Dim>
});
}
let micros = self.as_micros();
if micros > 1 {
return fmt.write_markup(markup! {
{micros}<Dim>"µs"</Dim>
});
}
let nanos = self.as_nanos();
fmt.write_markup(markup! {
{nanos}<Dim>"ns"</Dim>
})
}
}
#[cfg(test)]
mod tests {
use std::{fmt::Write, str::from_utf8};
use super::SanitizeAdapter;
#[test]
fn test_sanitize() {
// Sanitization should leave whitespace control characters (space,
// tabs, newline, ...) and non-ASCII unicode characters as-is but
// redact zero-width characters (RTL override, null character, bell,
// zero-width space, ...) | {
let mut adapter = SanitizeAdapter {
writer: &mut buffer,
error: Ok(()),
};
adapter.write_str(INPUT).unwrap();
adapter.error.unwrap();
}
assert_eq!(from_utf8(&buffer).unwrap(), OUTPUT);
}
} | const INPUT: &str = "t\tes t\r\n\u{202D}t\0es\x07t\u{202E}\nt\u{200B}es🐛t";
const OUTPUT: &str = "t\tes t\r\n\u{FFFD}t\u{FFFD}es\u{FFFD}t\u{FFFD}\nt\u{FFFD}es🐛t";
let mut buffer = Vec::new();
| random_line_split |
fmt.rs | use std::{
borrow::Cow,
fmt::{self, Write as _},
io,
time::Duration,
};
use termcolor::{ColorSpec, WriteColor};
use unicode_width::UnicodeWidthChar;
use crate::{markup, Markup, MarkupElement};
/// A stack-allocated linked-list of [MarkupElement] slices
pub enum MarkupElements<'a> {
Root,
Node(&'a Self, &'a [MarkupElement]),
}
impl<'a> MarkupElements<'a> {
/// Iterates on all the element slices depth-first
pub fn for_each(&self, func: &mut impl FnMut(&'a [MarkupElement])) {
if let Self::Node(parent, elem) = self {
parent.for_each(func);
func(elem);
}
}
}
pub trait Write {
fn write_str(&mut self, elements: &MarkupElements, content: &str) -> io::Result<()>;
fn write_fmt(&mut self, elements: &MarkupElements, content: fmt::Arguments) -> io::Result<()>;
}
/// Applies the current format in `state` to `writer`, calls `func` to
/// print a piece of text, then reset the printing format
fn with_format<W>(
writer: &mut W,
state: &MarkupElements,
func: impl FnOnce(&mut W) -> io::Result<()>,
) -> io::Result<()>
where
W: WriteColor,
{
let mut color = ColorSpec::new();
state.for_each(&mut |elements| {
for element in elements {
element.update_color(&mut color);
}
});
if let Err(err) = writer.set_color(&color) {
writer.reset()?;
return Err(err);
}
let result = func(writer);
writer.reset()?;
result
}
/// Adapter struct implementing [Write] over types implementing [WriteColor]
pub struct Termcolor<W>(pub W);
impl<W> Write for Termcolor<W>
where
W: WriteColor,
{
fn write_str(&mut self, elements: &MarkupElements, content: &str) -> io::Result<()> {
with_format(&mut self.0, elements, |writer| {
let mut adapter = SanitizeAdapter {
writer,
error: Ok(()),
};
match adapter.write_str(content) {
Ok(()) => Ok(()),
Err(..) => {
if adapter.error.is_err() {
adapter.error
} else {
// SanitizeAdapter can only fail if the underlying
// writer returns an error
unreachable!()
}
}
}
})
}
fn write_fmt(&mut self, elements: &MarkupElements, content: fmt::Arguments) -> io::Result<()> {
with_format(&mut self.0, elements, |writer| {
let mut adapter = SanitizeAdapter {
writer,
error: Ok(()),
};
match adapter.write_fmt(content) {
Ok(()) => Ok(()),
Err(..) => {
if adapter.error.is_err() {
adapter.error
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"a Display formatter returned an error",
))
}
}
}
})
}
}
/// Adapter [fmt::Write] calls to [io::Write] with sanitization,
/// implemented as an internal struct to avoid exposing [fmt::Write] on
/// [Termcolor]
struct SanitizeAdapter<W> {
writer: W,
error: io::Result<()>,
}
impl<W: io::Write> fmt::Write for SanitizeAdapter<W> {
fn write_str(&mut self, content: &str) -> fmt::Result {
let mut buffer = [0; 4];
for item in content.chars() {
// Replace non-whitespace, zero-width characters with the Unicode replacement character
let is_whitespace = item.is_whitespace();
let is_zero_width = UnicodeWidthChar::width(item).map_or(true, |width| width == 0);
let item = if !is_whitespace && is_zero_width {
char::REPLACEMENT_CHARACTER
} else {
item
};
item.encode_utf8(&mut buffer);
if let Err(err) = self.writer.write_all(&buffer[..item.len_utf8()]) {
self.error = Err(err);
return Err(fmt::Error);
}
}
Ok(())
}
}
/// The [Formatter] is the `rome_console` equivalent to [std::fmt::Formatter]:
/// it's never constructed directly by consumers, and can only be used through
/// the mutable reference passed to implementations of the [Display] trait).
/// It manages the state of the markup to print, and implementations of
/// [Display] can call into its methods to append content into the current
/// printing session
pub struct Formatter<'fmt> {
/// Stack of markup elements currently applied to the text being printed
state: MarkupElements<'fmt>,
/// Inner IO writer this [Formatter] will print text into
writer: &'fmt mut dyn Write,
}
impl<'fmt> Formatter<'fmt> {
/// Create a new instance of the [Formatter] using the provided `writer` for printing
pub fn new(writer: &'fmt mut dyn Write) -> Self {
Self {
state: MarkupElements::Root,
writer,
}
}
/// Return a new instance of the [Formatter] with `elements` appended to its element stack
fn with_elements<'b>(&'b mut self, elements: &'b [MarkupElement]) -> Formatter<'b> |
/// Write a piece of markup into this formatter
pub fn write_markup(&mut self, markup: Markup) -> io::Result<()> {
for node in markup.0 {
let mut fmt = self.with_elements(node.elements);
node.content.fmt(&mut fmt)?;
}
Ok(())
}
/// Write a slice of text into this formatter
pub fn write_str(&mut self, content: &str) -> io::Result<()> {
self.writer.write_str(&self.state, content)
}
/// Write formatted text into this formatter
pub fn write_fmt(&mut self, content: fmt::Arguments) -> io::Result<()> {
self.writer.write_fmt(&self.state, content)
}
}
/// Formatting trait for types to be displayed as markup, the `rome_console`
/// equivalent to [std::fmt::Display]
///
/// # Example
/// Implementing `Display` on a custom struct
/// ```
/// use std::io;
/// use rome_console::{fmt::{Display, Formatter}, markup};
///
/// struct Warning(String);
///
/// impl Display for Warning {
/// fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
/// fmt.write_markup(markup! {
/// <Warn>{self.0}</Warn>
/// })
/// }
/// }
///
/// let warning = Warning(String::from("content"));
/// markup! {
/// <Emphasis>{warning}</Emphasis>
/// };
/// ```
pub trait Display {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()>;
}
// Blanket implementations of Display for reference types
impl<'a, T> Display for &'a T
where
T: Display + ?Sized,
{
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
T::fmt(self, fmt)
}
}
impl<'a, T> Display for Cow<'a, T>
where
T: Display + ToOwned + ?Sized,
{
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
T::fmt(self, fmt)
}
}
// Simple implementations of Display calling through to write_str for types
// that implement Deref<str>
impl Display for str {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
fmt.write_str(self)
}
}
impl Display for String {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
fmt.write_str(self)
}
}
// Implement Display for Markup and Rust format Arguments
impl<'a> Display for Markup<'a> {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
fmt.write_markup(*self)
}
}
impl<'a> Display for std::fmt::Arguments<'a> {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
fmt.write_fmt(*self)
}
}
/// Implement [Display] for types that implement [std::fmt::Display] by calling
/// through to [Formatter::write_fmt]
macro_rules! impl_std_display {
($ty:ty) => {
impl Display for $ty {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
write!(fmt, "{self}")
}
}
};
}
impl_std_display!(char);
impl_std_display!(i8);
impl_std_display!(i16);
impl_std_display!(i32);
impl_std_display!(i64);
impl_std_display!(i128);
impl_std_display!(isize);
impl_std_display!(u8);
impl_std_display!(u16);
impl_std_display!(u32);
impl_std_display!(u64);
impl_std_display!(u128);
impl_std_display!(usize);
impl Display for Duration {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
use crate as rome_console;
let secs = self.as_secs();
if secs > 1 {
return fmt.write_markup(markup! {
{secs}<Dim>"s"</Dim>
});
}
let millis = self.as_millis();
if millis > 1 {
return fmt.write_markup(markup! {
{millis}<Dim>"ms"</Dim>
});
}
let micros = self.as_micros();
if micros > 1 {
return fmt.write_markup(markup! {
{micros}<Dim>"µs"</Dim>
});
}
let nanos = self.as_nanos();
fmt.write_markup(markup! {
{nanos}<Dim>"ns"</Dim>
})
}
}
#[cfg(test)]
mod tests {
use std::{fmt::Write, str::from_utf8};
use super::SanitizeAdapter;
#[test]
fn test_sanitize() {
// Sanitization should leave whitespace control characters (space,
// tabs, newline, ...) and non-ASCII unicode characters as-is but
// redact zero-width characters (RTL override, null character, bell,
// zero-width space, ...)
const INPUT: &str = "t\tes t\r\n\u{202D}t\0es\x07t\u{202E}\nt\u{200B}es🐛t";
const OUTPUT: &str = "t\tes t\r\n\u{FFFD}t\u{FFFD}es\u{FFFD}t\u{FFFD}\nt\u{FFFD}es🐛t";
let mut buffer = Vec::new();
{
let mut adapter = SanitizeAdapter {
writer: &mut buffer,
error: Ok(()),
};
adapter.write_str(INPUT).unwrap();
adapter.error.unwrap();
}
assert_eq!(from_utf8(&buffer).unwrap(), OUTPUT);
}
}
| {
Formatter {
state: MarkupElements::Node(&self.state, elements),
writer: self.writer,
}
} | identifier_body |
fmt.rs | use std::{
borrow::Cow,
fmt::{self, Write as _},
io,
time::Duration,
};
use termcolor::{ColorSpec, WriteColor};
use unicode_width::UnicodeWidthChar;
use crate::{markup, Markup, MarkupElement};
/// A stack-allocated linked-list of [MarkupElement] slices
pub enum MarkupElements<'a> {
Root,
Node(&'a Self, &'a [MarkupElement]),
}
impl<'a> MarkupElements<'a> {
/// Iterates on all the element slices depth-first
pub fn for_each(&self, func: &mut impl FnMut(&'a [MarkupElement])) {
if let Self::Node(parent, elem) = self {
parent.for_each(func);
func(elem);
}
}
}
pub trait Write {
fn write_str(&mut self, elements: &MarkupElements, content: &str) -> io::Result<()>;
fn write_fmt(&mut self, elements: &MarkupElements, content: fmt::Arguments) -> io::Result<()>;
}
/// Applies the current format in `state` to `writer`, calls `func` to
/// print a piece of text, then reset the printing format
fn with_format<W>(
writer: &mut W,
state: &MarkupElements,
func: impl FnOnce(&mut W) -> io::Result<()>,
) -> io::Result<()>
where
W: WriteColor,
{
let mut color = ColorSpec::new();
state.for_each(&mut |elements| {
for element in elements {
element.update_color(&mut color);
}
});
if let Err(err) = writer.set_color(&color) {
writer.reset()?;
return Err(err);
}
let result = func(writer);
writer.reset()?;
result
}
/// Adapter struct implementing [Write] over types implementing [WriteColor]
pub struct Termcolor<W>(pub W);
impl<W> Write for Termcolor<W>
where
W: WriteColor,
{
fn write_str(&mut self, elements: &MarkupElements, content: &str) -> io::Result<()> {
with_format(&mut self.0, elements, |writer| {
let mut adapter = SanitizeAdapter {
writer,
error: Ok(()),
};
match adapter.write_str(content) {
Ok(()) => Ok(()),
Err(..) => {
if adapter.error.is_err() | else {
// SanitizeAdapter can only fail if the underlying
// writer returns an error
unreachable!()
}
}
}
})
}
fn write_fmt(&mut self, elements: &MarkupElements, content: fmt::Arguments) -> io::Result<()> {
with_format(&mut self.0, elements, |writer| {
let mut adapter = SanitizeAdapter {
writer,
error: Ok(()),
};
match adapter.write_fmt(content) {
Ok(()) => Ok(()),
Err(..) => {
if adapter.error.is_err() {
adapter.error
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"a Display formatter returned an error",
))
}
}
}
})
}
}
/// Adapter [fmt::Write] calls to [io::Write] with sanitization,
/// implemented as an internal struct to avoid exposing [fmt::Write] on
/// [Termcolor]
struct SanitizeAdapter<W> {
writer: W,
error: io::Result<()>,
}
impl<W: io::Write> fmt::Write for SanitizeAdapter<W> {
fn write_str(&mut self, content: &str) -> fmt::Result {
let mut buffer = [0; 4];
for item in content.chars() {
// Replace non-whitespace, zero-width characters with the Unicode replacement character
let is_whitespace = item.is_whitespace();
let is_zero_width = UnicodeWidthChar::width(item).map_or(true, |width| width == 0);
let item = if !is_whitespace && is_zero_width {
char::REPLACEMENT_CHARACTER
} else {
item
};
item.encode_utf8(&mut buffer);
if let Err(err) = self.writer.write_all(&buffer[..item.len_utf8()]) {
self.error = Err(err);
return Err(fmt::Error);
}
}
Ok(())
}
}
/// The [Formatter] is the `rome_console` equivalent to [std::fmt::Formatter]:
/// it's never constructed directly by consumers, and can only be used through
/// the mutable reference passed to implementations of the [Display] trait).
/// It manages the state of the markup to print, and implementations of
/// [Display] can call into its methods to append content into the current
/// printing session
pub struct Formatter<'fmt> {
/// Stack of markup elements currently applied to the text being printed
state: MarkupElements<'fmt>,
/// Inner IO writer this [Formatter] will print text into
writer: &'fmt mut dyn Write,
}
impl<'fmt> Formatter<'fmt> {
/// Create a new instance of the [Formatter] using the provided `writer` for printing
pub fn new(writer: &'fmt mut dyn Write) -> Self {
Self {
state: MarkupElements::Root,
writer,
}
}
/// Return a new instance of the [Formatter] with `elements` appended to its element stack
fn with_elements<'b>(&'b mut self, elements: &'b [MarkupElement]) -> Formatter<'b> {
Formatter {
state: MarkupElements::Node(&self.state, elements),
writer: self.writer,
}
}
/// Write a piece of markup into this formatter
pub fn write_markup(&mut self, markup: Markup) -> io::Result<()> {
for node in markup.0 {
let mut fmt = self.with_elements(node.elements);
node.content.fmt(&mut fmt)?;
}
Ok(())
}
/// Write a slice of text into this formatter
pub fn write_str(&mut self, content: &str) -> io::Result<()> {
self.writer.write_str(&self.state, content)
}
/// Write formatted text into this formatter
pub fn write_fmt(&mut self, content: fmt::Arguments) -> io::Result<()> {
self.writer.write_fmt(&self.state, content)
}
}
/// Formatting trait for types to be displayed as markup, the `rome_console`
/// equivalent to [std::fmt::Display]
///
/// # Example
/// Implementing `Display` on a custom struct
/// ```
/// use std::io;
/// use rome_console::{fmt::{Display, Formatter}, markup};
///
/// struct Warning(String);
///
/// impl Display for Warning {
/// fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
/// fmt.write_markup(markup! {
/// <Warn>{self.0}</Warn>
/// })
/// }
/// }
///
/// let warning = Warning(String::from("content"));
/// markup! {
/// <Emphasis>{warning}</Emphasis>
/// };
/// ```
pub trait Display {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()>;
}
// Blanket implementations of Display for reference types
impl<'a, T> Display for &'a T
where
T: Display + ?Sized,
{
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
T::fmt(self, fmt)
}
}
impl<'a, T> Display for Cow<'a, T>
where
T: Display + ToOwned + ?Sized,
{
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
T::fmt(self, fmt)
}
}
// Simple implementations of Display calling through to write_str for types
// that implement Deref<str>
impl Display for str {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
fmt.write_str(self)
}
}
impl Display for String {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
fmt.write_str(self)
}
}
// Implement Display for Markup and Rust format Arguments
impl<'a> Display for Markup<'a> {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
fmt.write_markup(*self)
}
}
impl<'a> Display for std::fmt::Arguments<'a> {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
fmt.write_fmt(*self)
}
}
/// Implement [Display] for types that implement [std::fmt::Display] by calling
/// through to [Formatter::write_fmt]
macro_rules! impl_std_display {
($ty:ty) => {
impl Display for $ty {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
write!(fmt, "{self}")
}
}
};
}
impl_std_display!(char);
impl_std_display!(i8);
impl_std_display!(i16);
impl_std_display!(i32);
impl_std_display!(i64);
impl_std_display!(i128);
impl_std_display!(isize);
impl_std_display!(u8);
impl_std_display!(u16);
impl_std_display!(u32);
impl_std_display!(u64);
impl_std_display!(u128);
impl_std_display!(usize);
impl Display for Duration {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
use crate as rome_console;
let secs = self.as_secs();
if secs > 1 {
return fmt.write_markup(markup! {
{secs}<Dim>"s"</Dim>
});
}
let millis = self.as_millis();
if millis > 1 {
return fmt.write_markup(markup! {
{millis}<Dim>"ms"</Dim>
});
}
let micros = self.as_micros();
if micros > 1 {
return fmt.write_markup(markup! {
{micros}<Dim>"µs"</Dim>
});
}
let nanos = self.as_nanos();
fmt.write_markup(markup! {
{nanos}<Dim>"ns"</Dim>
})
}
}
#[cfg(test)]
mod tests {
use std::{fmt::Write, str::from_utf8};
use super::SanitizeAdapter;
#[test]
fn test_sanitize() {
// Sanitization should leave whitespace control characters (space,
// tabs, newline, ...) and non-ASCII unicode characters as-is but
// redact zero-width characters (RTL override, null character, bell,
// zero-width space, ...)
const INPUT: &str = "t\tes t\r\n\u{202D}t\0es\x07t\u{202E}\nt\u{200B}es🐛t";
const OUTPUT: &str = "t\tes t\r\n\u{FFFD}t\u{FFFD}es\u{FFFD}t\u{FFFD}\nt\u{FFFD}es🐛t";
let mut buffer = Vec::new();
{
let mut adapter = SanitizeAdapter {
writer: &mut buffer,
error: Ok(()),
};
adapter.write_str(INPUT).unwrap();
adapter.error.unwrap();
}
assert_eq!(from_utf8(&buffer).unwrap(), OUTPUT);
}
}
| {
adapter.error
} | conditional_block |
fmt.rs | use std::{
borrow::Cow,
fmt::{self, Write as _},
io,
time::Duration,
};
use termcolor::{ColorSpec, WriteColor};
use unicode_width::UnicodeWidthChar;
use crate::{markup, Markup, MarkupElement};
/// A stack-allocated linked-list of [MarkupElement] slices
pub enum MarkupElements<'a> {
Root,
Node(&'a Self, &'a [MarkupElement]),
}
impl<'a> MarkupElements<'a> {
/// Iterates on all the element slices depth-first
pub fn for_each(&self, func: &mut impl FnMut(&'a [MarkupElement])) {
if let Self::Node(parent, elem) = self {
parent.for_each(func);
func(elem);
}
}
}
pub trait Write {
fn write_str(&mut self, elements: &MarkupElements, content: &str) -> io::Result<()>;
fn write_fmt(&mut self, elements: &MarkupElements, content: fmt::Arguments) -> io::Result<()>;
}
/// Applies the current format in `state` to `writer`, calls `func` to
/// print a piece of text, then reset the printing format
fn with_format<W>(
writer: &mut W,
state: &MarkupElements,
func: impl FnOnce(&mut W) -> io::Result<()>,
) -> io::Result<()>
where
W: WriteColor,
{
let mut color = ColorSpec::new();
state.for_each(&mut |elements| {
for element in elements {
element.update_color(&mut color);
}
});
if let Err(err) = writer.set_color(&color) {
writer.reset()?;
return Err(err);
}
let result = func(writer);
writer.reset()?;
result
}
/// Adapter struct implementing [Write] over types implementing [WriteColor]
pub struct Termcolor<W>(pub W);
impl<W> Write for Termcolor<W>
where
W: WriteColor,
{
fn write_str(&mut self, elements: &MarkupElements, content: &str) -> io::Result<()> {
with_format(&mut self.0, elements, |writer| {
let mut adapter = SanitizeAdapter {
writer,
error: Ok(()),
};
match adapter.write_str(content) {
Ok(()) => Ok(()),
Err(..) => {
if adapter.error.is_err() {
adapter.error
} else {
// SanitizeAdapter can only fail if the underlying
// writer returns an error
unreachable!()
}
}
}
})
}
fn write_fmt(&mut self, elements: &MarkupElements, content: fmt::Arguments) -> io::Result<()> {
with_format(&mut self.0, elements, |writer| {
let mut adapter = SanitizeAdapter {
writer,
error: Ok(()),
};
match adapter.write_fmt(content) {
Ok(()) => Ok(()),
Err(..) => {
if adapter.error.is_err() {
adapter.error
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"a Display formatter returned an error",
))
}
}
}
})
}
}
/// Adapter [fmt::Write] calls to [io::Write] with sanitization,
/// implemented as an internal struct to avoid exposing [fmt::Write] on
/// [Termcolor]
struct SanitizeAdapter<W> {
writer: W,
error: io::Result<()>,
}
impl<W: io::Write> fmt::Write for SanitizeAdapter<W> {
fn | (&mut self, content: &str) -> fmt::Result {
let mut buffer = [0; 4];
for item in content.chars() {
// Replace non-whitespace, zero-width characters with the Unicode replacement character
let is_whitespace = item.is_whitespace();
let is_zero_width = UnicodeWidthChar::width(item).map_or(true, |width| width == 0);
let item = if !is_whitespace && is_zero_width {
char::REPLACEMENT_CHARACTER
} else {
item
};
item.encode_utf8(&mut buffer);
if let Err(err) = self.writer.write_all(&buffer[..item.len_utf8()]) {
self.error = Err(err);
return Err(fmt::Error);
}
}
Ok(())
}
}
/// The [Formatter] is the `rome_console` equivalent to [std::fmt::Formatter]:
/// it's never constructed directly by consumers, and can only be used through
/// the mutable reference passed to implementations of the [Display] trait).
/// It manages the state of the markup to print, and implementations of
/// [Display] can call into its methods to append content into the current
/// printing session
pub struct Formatter<'fmt> {
/// Stack of markup elements currently applied to the text being printed
state: MarkupElements<'fmt>,
/// Inner IO writer this [Formatter] will print text into
writer: &'fmt mut dyn Write,
}
impl<'fmt> Formatter<'fmt> {
/// Create a new instance of the [Formatter] using the provided `writer` for printing
pub fn new(writer: &'fmt mut dyn Write) -> Self {
Self {
state: MarkupElements::Root,
writer,
}
}
/// Return a new instance of the [Formatter] with `elements` appended to its element stack
fn with_elements<'b>(&'b mut self, elements: &'b [MarkupElement]) -> Formatter<'b> {
Formatter {
state: MarkupElements::Node(&self.state, elements),
writer: self.writer,
}
}
/// Write a piece of markup into this formatter
pub fn write_markup(&mut self, markup: Markup) -> io::Result<()> {
for node in markup.0 {
let mut fmt = self.with_elements(node.elements);
node.content.fmt(&mut fmt)?;
}
Ok(())
}
/// Write a slice of text into this formatter
pub fn write_str(&mut self, content: &str) -> io::Result<()> {
self.writer.write_str(&self.state, content)
}
/// Write formatted text into this formatter
pub fn write_fmt(&mut self, content: fmt::Arguments) -> io::Result<()> {
self.writer.write_fmt(&self.state, content)
}
}
/// Formatting trait for types to be displayed as markup, the `rome_console`
/// equivalent to [std::fmt::Display]
///
/// # Example
/// Implementing `Display` on a custom struct
/// ```
/// use std::io;
/// use rome_console::{fmt::{Display, Formatter}, markup};
///
/// struct Warning(String);
///
/// impl Display for Warning {
/// fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
/// fmt.write_markup(markup! {
/// <Warn>{self.0}</Warn>
/// })
/// }
/// }
///
/// let warning = Warning(String::from("content"));
/// markup! {
/// <Emphasis>{warning}</Emphasis>
/// };
/// ```
pub trait Display {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()>;
}
// Blanket implementations of Display for reference types
impl<'a, T> Display for &'a T
where
T: Display + ?Sized,
{
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
T::fmt(self, fmt)
}
}
impl<'a, T> Display for Cow<'a, T>
where
T: Display + ToOwned + ?Sized,
{
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
T::fmt(self, fmt)
}
}
// Simple implementations of Display calling through to write_str for types
// that implement Deref<str>
impl Display for str {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
fmt.write_str(self)
}
}
impl Display for String {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
fmt.write_str(self)
}
}
// Implement Display for Markup and Rust format Arguments
impl<'a> Display for Markup<'a> {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
fmt.write_markup(*self)
}
}
impl<'a> Display for std::fmt::Arguments<'a> {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
fmt.write_fmt(*self)
}
}
/// Implement [Display] for types that implement [std::fmt::Display] by calling
/// through to [Formatter::write_fmt]
macro_rules! impl_std_display {
($ty:ty) => {
impl Display for $ty {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
write!(fmt, "{self}")
}
}
};
}
impl_std_display!(char);
impl_std_display!(i8);
impl_std_display!(i16);
impl_std_display!(i32);
impl_std_display!(i64);
impl_std_display!(i128);
impl_std_display!(isize);
impl_std_display!(u8);
impl_std_display!(u16);
impl_std_display!(u32);
impl_std_display!(u64);
impl_std_display!(u128);
impl_std_display!(usize);
impl Display for Duration {
fn fmt(&self, fmt: &mut Formatter) -> io::Result<()> {
use crate as rome_console;
let secs = self.as_secs();
if secs > 1 {
return fmt.write_markup(markup! {
{secs}<Dim>"s"</Dim>
});
}
let millis = self.as_millis();
if millis > 1 {
return fmt.write_markup(markup! {
{millis}<Dim>"ms"</Dim>
});
}
let micros = self.as_micros();
if micros > 1 {
return fmt.write_markup(markup! {
{micros}<Dim>"µs"</Dim>
});
}
let nanos = self.as_nanos();
fmt.write_markup(markup! {
{nanos}<Dim>"ns"</Dim>
})
}
}
#[cfg(test)]
mod tests {
use std::{fmt::Write, str::from_utf8};
use super::SanitizeAdapter;
#[test]
fn test_sanitize() {
// Sanitization should leave whitespace control characters (space,
// tabs, newline, ...) and non-ASCII unicode characters as-is but
// redact zero-width characters (RTL override, null character, bell,
// zero-width space, ...)
const INPUT: &str = "t\tes t\r\n\u{202D}t\0es\x07t\u{202E}\nt\u{200B}es🐛t";
const OUTPUT: &str = "t\tes t\r\n\u{FFFD}t\u{FFFD}es\u{FFFD}t\u{FFFD}\nt\u{FFFD}es🐛t";
let mut buffer = Vec::new();
{
let mut adapter = SanitizeAdapter {
writer: &mut buffer,
error: Ok(()),
};
adapter.write_str(INPUT).unwrap();
adapter.error.unwrap();
}
assert_eq!(from_utf8(&buffer).unwrap(), OUTPUT);
}
}
| write_str | identifier_name |
serial.rs | // Copyright 2016 taskqueue developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::boxed::FnBox;
use std::fmt;
use std::thread;
use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize, Ordering};
use mioco::{self, ExitStatus, Config, Mioco};
use mioco::mail::*;
use future::{Future, FutureInternal, FutureGuard};
use group::Group;
use queue::{Queue, QueueId, LoopResult};
use util::mioco_handler::{Userdata, blocking_mioco_run_loop, new_coroutine};
use util::unsafe_wrap::NotThreadSafe;
use util::stack::Stack;
static ID: AtomicUsize = ATOMIC_USIZE_INIT;
enum Command
{
Run(Box<FnBox() + Send + 'static>),
Wait(MailboxInnerEnd<ExitStatus>),
End,
}
/// Queue executing Tasks serially, non-overlapping in queued Order
///
/// ## Properties
/// - executes tasks in serial order
/// - no tasks may overlap
/// - they never change their native background thread (but may share it)
/// - the tasks are executed in order of queuing
/// - safety against deadlocks from recursive queueing (see example)
///
/// Through these guarantees SerialQueues may bound to a type that is **not** Send or Sync
/// and provide easy thread safe access to this critcal resource.
/// Such a SerialQueue is called [*BoundSerialQueue*](./struct.BoundSerialQueue.html).
///
/// ## Example
///
/// ```rust
/// # use taskqueue::*;
/// init_main(|main| {
/// let thread_one = SerialQueue::new();
/// let thread_two = SerialQueue::new();
///
/// let future_one = thread_one.async(|| {
/// 42
/// });
/// let future_two = thread_two.async(|| {
/// 96
/// });
///
/// println!("Although this is happening in main,");
/// main.async(|| {
/// println!("this task is running before...");
/// });
/// main.sync(|| {
/// println!("...this task and...");
/// assert_eq!(future_one.get() + future_two.get(), 138);
/// });
/// println!("...this is running last");
/// });
/// ```
pub struct SerialQueue
{
id: usize,
tx: MailboxOuterEnd<Command>,
deadlock_tx: MailboxOuterEnd<()>,
}
impl PartialEq for SerialQueue
{
fn eq(&self, other: &Self) -> bool
{
self.id == other.id
}
}
impl fmt::Debug for SerialQueue
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
{
write!(f, "SerialQueue ({})", self.id)
}
}
unsafe impl Send for SerialQueue
{}
unsafe impl Sync for SerialQueue
{}
impl SerialQueue
{
/// Create a new SerialQueue and assign it to the global thread pool
pub fn new() -> SerialQueue
{
let (tx, rx) = mailbox();
let (deadlock_tx, deadlock_rx) = mailbox();
let internal_tx = tx.clone();
new_coroutine(move || {
SerialQueue::do_loop(internal_tx, rx, deadlock_rx);
Ok(())
});
let queue = SerialQueue {
id: ID.fetch_add(1, Ordering::SeqCst),
tx: tx,
deadlock_tx: deadlock_tx,
};
info!("Queue created ({:?})", queue);
queue
}
/// Create a new SerialQueue and assign it solely to a newly created OS Thread
///
/// A SerialQueue created through this method will spawn a new native OS Thread
/// and the queue will be the only utilizing it. The thread will be destructed,
/// when the queue is dropped.
///
/// The purpose of this constructor is to provide a way to use blocking IO with TaskQueue.
/// The use of this method however is discouraged, as the new thread may influence
/// the scheduler negatively and evented IO, where possible performs a lot better in combination
/// with the TaskQueue library
pub fn new_native() -> SerialQueue
{
let (tx, rx) = mailbox();
let (deadlock_tx, deadlock_rx) = mailbox();
let internal_tx = tx.clone();
thread::spawn(move || {
Mioco::new_configured({
let mut config = Config::new();
config.set_thread_num(1);
config
}).start(move || {
SerialQueue::do_loop(internal_tx, rx, deadlock_rx);
Ok(())
});
});
let queue = SerialQueue {
id: ID.fetch_add(1, Ordering::SeqCst),
tx: tx,
deadlock_tx: deadlock_tx,
};
info!("Native Queue created ({:?})", queue);
queue
}
fn do_loop(queue_tx: MailboxOuterEnd<Command>,
rx: MailboxInnerEnd<Command>,
deadlock_rx: MailboxInnerEnd<()>)
{
debug!("loop: spawing serial loop");
loop {
trace!("loop: next iteration");
match rx.read() {
Command::End => break,
Command::Wait(routine) => {
trace!("loop: handling previous deadlocked coroutine");
let tx_clone = queue_tx.clone();
loop {
select!(
routine:r => {
if routine.try_read().is_some() {
trace!("loop: task ended");
break;
} else {
continue;
}
},
deadlock_rx:r => {
if deadlock_rx.try_read().is_some() {
trace!("loop: deadlock detected");
tx_clone.send(Command::Wait(routine));
break;
} else {
continue;
}
},
);
}
}
Command::Run(task) => {
let tx_clone = queue_tx.clone();
mioco::set_children_userdata(Some(Userdata::SameThread));
let routine = mioco::spawn_ext(move || {
trace!("loop: spawned new coroutine for task");
task.call_box(());
Ok(())
})
.exit_notificator();
trace!("loop: wait for deadlock notification or coroutine finish");
loop {
select!(
routine:r => {
if routine.try_read().is_some() {
trace!("loop: task ended");
break;
} else {
continue;
}
},
deadlock_rx:r => {
if deadlock_rx.try_read().is_some() {
trace!("loop: deadlock detected");
tx_clone.send(Command::Wait(routine));
break;
} else {
continue;
}
},
);
}
}
}
}
debug!("loop: queue ended");
}
/// Bind this queue to a variable
///
/// This function allows to create a `BoundSerialQueue`.
/// Its purpose is to bind variables to a queue, so they can be used by the tasks submitted.
///
/// # Example
/// ```
/// # use taskqueue::*;
/// # let queue = SerialQueue::new();
/// let x = 5;
/// let bound = queue.with(move || x);
/// bound.scoped_with(|x| println!("{}", x));
/// // x gets dropped inside the queues thread before the queue gets dropped
/// ```
///
/// You can create multiple bindings at once.
/// Through tuples you may bind multiple variables at once.
///
/// It is even possible to move the creation of the bound variable into the queue by creating
/// it inside the passed constructor, which is then executed on the queue.
/// And because SerialQueues never change their underlying OS Thread,
/// this allows to use variables that are not Send and Sync in a thread-safe but shared way.
///
/// # Example
/// ```
/// # use taskqueue::*;
/// # let queue = SerialQueue::new();
/// # fn my_ffi_function() -> *mut () { &mut () };
/// let bound = queue.with(|| {
/// let raw_ptr = my_ffi_function();
/// raw_ptr
/// });
/// bound.scoped_with(|raw_ptr| println!("{}", raw_ptr.is_null()));
/// // raw_ptr gets dropped inside the queues thread.
/// // This way raw_ptr is never moved between threads.
/// ```
pub fn with<'queue, R: 'static, F>(&'queue self,
constructor: F)
-> BoundSerialQueue<'queue, R>
where F: FnOnce() -> R + Send
{
let binding = self.sync(move || NotThreadSafe::new(constructor()));
BoundSerialQueue {
queue: self,
binding: binding,
}
}
}
impl Queue for SerialQueue
{
fn async<R, F>(&self, operation: F) -> Future<R>
where R: Send + 'static,
F: FnOnce() -> R + Send + 'static
{
let (tx, rx) = mailbox();
let operation: Box<FnBox() + Send + 'static> = Stack::assemble(self, move || {
tx.send(operation());
});
debug!("Queue ({:?}) queued task", self);
self.tx.send(Command::Run(operation));
Future::new_from_serial(self, Some(self.deadlock_tx.clone()), rx)
}
}
/// A bound SerialQueue holding a queue-bound variable
///
/// Create a BoundSerialQueue using `SerialQueue::with`.
/// BoundSerialQueue's hold variables that may be used through
/// tasks executed on this queue though `scoped_with`, `sync_with`
/// `foreach_with` or `loop_while_with`.
///
/// `async_with` cannot be provided, as the bound variable is
/// dropped, when the BoundSerialQueue gets dropped.
///
/// Internally BoundSerialQueue refer to the same queue, they were created from.
/// Multiple BoundSerialQueue's may exist for one queue at once.
pub struct BoundSerialQueue<'queue, T: 'static>
{
queue: &'queue SerialQueue,
binding: NotThreadSafe<T>,
}
impl<'queue, T: 'static> fmt::Debug for BoundSerialQueue<'queue, T>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
{
write!(f, "BoundSerialQueue ({:p})", self)
}
}
impl<'queue, T: 'static> BoundSerialQueue<'queue, T>
{
/// Like `Queue::scoped` but provides a mutable reference to the bound variable
///
/// # Safety
///
/// The same rules as for `Queue::scoped` to apply.
///
/// # Example
///
/// ```
/// # use taskqueue::*;
/// # let queue = SerialQueue::new();
/// let bound = queue.with(|| { "Hello".to_string() });
/// let name = "Stack".to_string();
/// bound.scoped_with(|message| { println!("{} {}!", message, name) });
/// ```
pub fn scoped_with<R, F>(&'queue self, operation: F) -> FutureGuard<R>
where R: Send + 'static,
F: FnOnce(&'queue mut T) -> R + Send + 'queue
{
self.queue.scoped(move || operation(unsafe { self.binding.get_mut() }))
}
/// Like `Queue::sync` but provides a mutable reference to the bound variable
///
/// # Example
///
/// ```
/// # use taskqueue::*;
/// # let queue = SerialQueue::new();
/// let bound = queue.with(|| { "a bound queue".to_string() });
/// bound.sync_with(|name| { println!("Hello {}", name) });
/// ```
pub fn sync_with<R, F>(&'queue self, operation: F) -> R
where R: Send + 'static,
F: FnOnce(&'queue mut T) -> R + Send + 'queue
|
/// Like `Queue::foreach` but provides a mutable reference to the bound variable
///
/// # Example
///
/// ```
/// # use taskqueue::*;
/// # let queue = SerialQueue::new();
/// let bound = queue.with(|| 2);
/// let doubled: Vec<i32> = bound.foreach_with((0..20), |factor, x| x*factor).wait().collect();
/// # assert_eq!((0..20).map(|x| x*2).collect::<Vec<i32>>(), doubled);
/// ```
pub fn foreach_with<B, R, I, F>(&'queue self, mut iter: I, operation: F) -> Group<R>
where B: Send,
R: Send + 'queue,
I: Iterator<Item = B> + Send,
F: Fn(&'queue T, B) -> R + Send + Sync + 'queue
{
let mut group = Group::new();
loop {
match iter.next() {
Some(x) => {
let op = &operation;
let binding = &self.binding;
group.scoped(self, move || op(unsafe { binding.get_mut() }, x))
},
None => break,
}
}
group
}
/// Like `Queue::loop_while` but provides a mutable reference to the bound variable
///
/// # Example
///
/// ```
/// # use taskqueue::*;
/// use std::mem;
///
/// let queue = SerialQueue::new();
/// let bound = queue.with(|| (15, 25));
///
/// let greatest_common_divisor = bound.loop_while_with(|tuple| {
/// let x = tuple.0;
/// let y = tuple.1;
///
/// if y == 0 {
/// LoopResult::Done(x.clone())
/// } else {
/// let remainder = x % y;
/// let mut new_tuple = (y, remainder);
/// mem::swap(tuple, &mut new_tuple);
/// LoopResult::Continue
/// }
/// }).get();
/// #
/// # assert_eq!(5, greatest_common_divisor);
/// ```
pub fn loop_while_with<R, F>(&'queue self, operation: F) -> FutureGuard<R>
where F: Fn(&'queue mut T) -> LoopResult<R> + Send + Sync + 'queue,
R: Send + 'static,
{
self.queue.loop_while(move || operation(unsafe { self.binding.get_mut() }))
}
}
impl<'a, T: 'static> Queue for BoundSerialQueue<'a, T>
{
fn async<R, F>(&self, operation: F) -> Future<R>
where R: Send + 'static,
F: FnOnce() -> R + Send + 'static
{
self.queue.async(operation)
}
}
impl<'queue, T: 'static> Drop for BoundSerialQueue<'queue, T>
{
fn drop(&mut self)
{
let binding = self.binding.clone();
self.queue.async(move || {
unsafe {
binding.drop();
}
});
}
}
impl QueueId for SerialQueue
{
fn id(&self) -> usize
{
self.id
}
}
/// Convert the main thread into a SerialQueue
///
/// This function warps the current thread into a SerialQueue, that
/// is passed to the executed function, blocking the current thread
/// until the created Queue is done.
///
/// This function is only intended to be used on the main thread and
/// library creators should never need to use it.
///
/// If you need a queue based on a newly created OS thread use `SerialQueue::new_native()`.
///
/// # Example
///
/// ```
/// # use taskqueue::*;
/// fn main() {
/// init_main(|main_queue| {
/// // start using it!
/// })
/// }
/// ```
pub fn init_main<F: FnOnce(SerialQueue) + Send + 'static>(start: F)
{
blocking_mioco_run_loop(move || {
mioco::set_children_userdata(Some(Userdata::RoundRobin));
let (tx, rx) = mailbox::<Command>();
let (deadlock_tx, deadlock_rx) = mailbox::<()>();
let new_queue = SerialQueue {
id: ID.fetch_add(1, Ordering::SeqCst),
tx: tx.clone(),
deadlock_tx: deadlock_tx.clone(),
};
tx.send(Command::Run(
Stack::assemble_main(new_queue, start)
));
info!("Main Queue ready!");
SerialQueue::do_loop(tx, rx, deadlock_rx);
Ok(())
})
}
impl Drop for SerialQueue
{
fn drop(&mut self)
{
trace!("Dropping {:?}", self);
self.tx.send(Command::End);
}
}
| {
self.queue.sync(move || operation(unsafe { self.binding.get_mut() }))
} | identifier_body |
serial.rs | // Copyright 2016 taskqueue developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::boxed::FnBox;
use std::fmt;
use std::thread;
use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize, Ordering};
use mioco::{self, ExitStatus, Config, Mioco};
use mioco::mail::*;
use future::{Future, FutureInternal, FutureGuard};
use group::Group;
use queue::{Queue, QueueId, LoopResult};
use util::mioco_handler::{Userdata, blocking_mioco_run_loop, new_coroutine};
use util::unsafe_wrap::NotThreadSafe;
use util::stack::Stack;
static ID: AtomicUsize = ATOMIC_USIZE_INIT;
enum Command
{
Run(Box<FnBox() + Send + 'static>),
Wait(MailboxInnerEnd<ExitStatus>),
End,
}
/// Queue executing Tasks serially, non-overlapping in queued Order
///
/// ## Properties
/// - executes tasks in serial order
/// - no tasks may overlap
/// - they never change their native background thread (but may share it)
/// - the tasks are executed in order of queuing
/// - safety against deadlocks from recursive queueing (see example)
///
/// Through these guarantees SerialQueues may bound to a type that is **not** Send or Sync
/// and provide easy thread safe access to this critcal resource.
/// Such a SerialQueue is called [*BoundSerialQueue*](./struct.BoundSerialQueue.html).
///
/// ## Example
///
/// ```rust
/// # use taskqueue::*;
/// init_main(|main| {
/// let thread_one = SerialQueue::new();
/// let thread_two = SerialQueue::new();
///
/// let future_one = thread_one.async(|| {
/// 42
/// });
/// let future_two = thread_two.async(|| {
/// 96
/// });
///
/// println!("Although this is happening in main,");
/// main.async(|| {
/// println!("this task is running before...");
/// });
/// main.sync(|| {
/// println!("...this task and...");
/// assert_eq!(future_one.get() + future_two.get(), 138);
/// });
/// println!("...this is running last");
/// });
/// ```
pub struct SerialQueue
{
id: usize,
tx: MailboxOuterEnd<Command>,
deadlock_tx: MailboxOuterEnd<()>,
}
impl PartialEq for SerialQueue
{
fn eq(&self, other: &Self) -> bool
{
self.id == other.id
}
}
impl fmt::Debug for SerialQueue
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
{
write!(f, "SerialQueue ({})", self.id)
}
}
unsafe impl Send for SerialQueue
{}
unsafe impl Sync for SerialQueue
{}
impl SerialQueue
{
/// Create a new SerialQueue and assign it to the global thread pool
pub fn new() -> SerialQueue
{
let (tx, rx) = mailbox();
let (deadlock_tx, deadlock_rx) = mailbox();
let internal_tx = tx.clone();
new_coroutine(move || {
SerialQueue::do_loop(internal_tx, rx, deadlock_rx);
Ok(())
});
let queue = SerialQueue {
id: ID.fetch_add(1, Ordering::SeqCst),
tx: tx,
deadlock_tx: deadlock_tx,
};
info!("Queue created ({:?})", queue);
queue
}
/// Create a new SerialQueue and assign it solely to a newly created OS Thread
///
/// A SerialQueue created through this method will spawn a new native OS Thread
/// and the queue will be the only utilizing it. The thread will be destructed,
/// when the queue is dropped.
///
/// The purpose of this constructor is to provide a way to use blocking IO with TaskQueue.
/// The use of this method however is discouraged, as the new thread may influence
/// the scheduler negatively and evented IO, where possible performs a lot better in combination
/// with the TaskQueue library
pub fn new_native() -> SerialQueue
{
let (tx, rx) = mailbox();
let (deadlock_tx, deadlock_rx) = mailbox();
let internal_tx = tx.clone();
thread::spawn(move || {
Mioco::new_configured({
let mut config = Config::new();
config.set_thread_num(1);
config
}).start(move || {
SerialQueue::do_loop(internal_tx, rx, deadlock_rx);
Ok(())
});
});
let queue = SerialQueue {
id: ID.fetch_add(1, Ordering::SeqCst),
tx: tx,
deadlock_tx: deadlock_tx,
};
info!("Native Queue created ({:?})", queue);
queue
}
fn do_loop(queue_tx: MailboxOuterEnd<Command>,
rx: MailboxInnerEnd<Command>,
deadlock_rx: MailboxInnerEnd<()>)
{
debug!("loop: spawing serial loop");
loop {
trace!("loop: next iteration");
match rx.read() {
Command::End => break,
Command::Wait(routine) => {
trace!("loop: handling previous deadlocked coroutine");
let tx_clone = queue_tx.clone();
loop {
select!(
routine:r => {
if routine.try_read().is_some() {
trace!("loop: task ended");
break;
} else {
continue;
}
},
deadlock_rx:r => {
if deadlock_rx.try_read().is_some() {
trace!("loop: deadlock detected");
tx_clone.send(Command::Wait(routine));
break;
} else {
continue;
}
},
);
}
}
Command::Run(task) => {
let tx_clone = queue_tx.clone();
mioco::set_children_userdata(Some(Userdata::SameThread));
let routine = mioco::spawn_ext(move || {
trace!("loop: spawned new coroutine for task");
task.call_box(());
Ok(())
})
.exit_notificator();
trace!("loop: wait for deadlock notification or coroutine finish");
loop {
select!(
routine:r => {
if routine.try_read().is_some() {
trace!("loop: task ended");
break;
} else {
continue;
}
},
deadlock_rx:r => {
if deadlock_rx.try_read().is_some() {
trace!("loop: deadlock detected");
tx_clone.send(Command::Wait(routine));
break;
} else {
continue;
}
},
);
}
}
}
}
debug!("loop: queue ended");
}
/// Bind this queue to a variable
///
/// This function allows to create a `BoundSerialQueue`.
/// Its purpose is to bind variables to a queue, so they can be used by the tasks submitted.
///
/// # Example
/// ```
/// # use taskqueue::*;
/// # let queue = SerialQueue::new();
/// let x = 5;
/// let bound = queue.with(move || x);
/// bound.scoped_with(|x| println!("{}", x));
/// // x gets dropped inside the queues thread before the queue gets dropped
/// ```
///
/// You can create multiple bindings at once.
/// Through tuples you may bind multiple variables at once.
///
/// It is even possible to move the creation of the bound variable into the queue by creating
/// it inside the passed constructor, which is then executed on the queue.
/// And because SerialQueues never change their underlying OS Thread,
/// this allows to use variables that are not Send and Sync in a thread-safe but shared way.
///
/// # Example
/// ```
/// # use taskqueue::*;
/// # let queue = SerialQueue::new();
/// # fn my_ffi_function() -> *mut () { &mut () };
/// let bound = queue.with(|| {
/// let raw_ptr = my_ffi_function();
/// raw_ptr
/// });
/// bound.scoped_with(|raw_ptr| println!("{}", raw_ptr.is_null()));
/// // raw_ptr gets dropped inside the queues thread.
/// // This way raw_ptr is never moved between threads.
/// ```
pub fn with<'queue, R: 'static, F>(&'queue self,
constructor: F)
-> BoundSerialQueue<'queue, R>
where F: FnOnce() -> R + Send
{
let binding = self.sync(move || NotThreadSafe::new(constructor()));
BoundSerialQueue {
queue: self,
binding: binding,
}
}
}
impl Queue for SerialQueue
{
fn | <R, F>(&self, operation: F) -> Future<R>
where R: Send + 'static,
F: FnOnce() -> R + Send + 'static
{
let (tx, rx) = mailbox();
let operation: Box<FnBox() + Send + 'static> = Stack::assemble(self, move || {
tx.send(operation());
});
debug!("Queue ({:?}) queued task", self);
self.tx.send(Command::Run(operation));
Future::new_from_serial(self, Some(self.deadlock_tx.clone()), rx)
}
}
/// A bound SerialQueue holding a queue-bound variable
///
/// Create a BoundSerialQueue using `SerialQueue::with`.
/// BoundSerialQueue's hold variables that may be used through
/// tasks executed on this queue though `scoped_with`, `sync_with`
/// `foreach_with` or `loop_while_with`.
///
/// `async_with` cannot be provided, as the bound variable is
/// dropped, when the BoundSerialQueue gets dropped.
///
/// Internally BoundSerialQueue refer to the same queue, they were created from.
/// Multiple BoundSerialQueue's may exist for one queue at once.
pub struct BoundSerialQueue<'queue, T: 'static>
{
queue: &'queue SerialQueue,
binding: NotThreadSafe<T>,
}
impl<'queue, T: 'static> fmt::Debug for BoundSerialQueue<'queue, T>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
{
write!(f, "BoundSerialQueue ({:p})", self)
}
}
impl<'queue, T: 'static> BoundSerialQueue<'queue, T>
{
/// Like `Queue::scoped` but provides a mutable reference to the bound variable
///
/// # Safety
///
/// The same rules as for `Queue::scoped` to apply.
///
/// # Example
///
/// ```
/// # use taskqueue::*;
/// # let queue = SerialQueue::new();
/// let bound = queue.with(|| { "Hello".to_string() });
/// let name = "Stack".to_string();
/// bound.scoped_with(|message| { println!("{} {}!", message, name) });
/// ```
pub fn scoped_with<R, F>(&'queue self, operation: F) -> FutureGuard<R>
where R: Send + 'static,
F: FnOnce(&'queue mut T) -> R + Send + 'queue
{
self.queue.scoped(move || operation(unsafe { self.binding.get_mut() }))
}
/// Like `Queue::sync` but provides a mutable reference to the bound variable
///
/// # Example
///
/// ```
/// # use taskqueue::*;
/// # let queue = SerialQueue::new();
/// let bound = queue.with(|| { "a bound queue".to_string() });
/// bound.sync_with(|name| { println!("Hello {}", name) });
/// ```
pub fn sync_with<R, F>(&'queue self, operation: F) -> R
where R: Send + 'static,
F: FnOnce(&'queue mut T) -> R + Send + 'queue
{
self.queue.sync(move || operation(unsafe { self.binding.get_mut() }))
}
/// Like `Queue::foreach` but provides a mutable reference to the bound variable
///
/// # Example
///
/// ```
/// # use taskqueue::*;
/// # let queue = SerialQueue::new();
/// let bound = queue.with(|| 2);
/// let doubled: Vec<i32> = bound.foreach_with((0..20), |factor, x| x*factor).wait().collect();
/// # assert_eq!((0..20).map(|x| x*2).collect::<Vec<i32>>(), doubled);
/// ```
pub fn foreach_with<B, R, I, F>(&'queue self, mut iter: I, operation: F) -> Group<R>
where B: Send,
R: Send + 'queue,
I: Iterator<Item = B> + Send,
F: Fn(&'queue T, B) -> R + Send + Sync + 'queue
{
let mut group = Group::new();
loop {
match iter.next() {
Some(x) => {
let op = &operation;
let binding = &self.binding;
group.scoped(self, move || op(unsafe { binding.get_mut() }, x))
},
None => break,
}
}
group
}
/// Like `Queue::loop_while` but provides a mutable reference to the bound variable
///
/// # Example
///
/// ```
/// # use taskqueue::*;
/// use std::mem;
///
/// let queue = SerialQueue::new();
/// let bound = queue.with(|| (15, 25));
///
/// let greatest_common_divisor = bound.loop_while_with(|tuple| {
/// let x = tuple.0;
/// let y = tuple.1;
///
/// if y == 0 {
/// LoopResult::Done(x.clone())
/// } else {
/// let remainder = x % y;
/// let mut new_tuple = (y, remainder);
/// mem::swap(tuple, &mut new_tuple);
/// LoopResult::Continue
/// }
/// }).get();
/// #
/// # assert_eq!(5, greatest_common_divisor);
/// ```
pub fn loop_while_with<R, F>(&'queue self, operation: F) -> FutureGuard<R>
where F: Fn(&'queue mut T) -> LoopResult<R> + Send + Sync + 'queue,
R: Send + 'static,
{
self.queue.loop_while(move || operation(unsafe { self.binding.get_mut() }))
}
}
impl<'a, T: 'static> Queue for BoundSerialQueue<'a, T>
{
fn async<R, F>(&self, operation: F) -> Future<R>
where R: Send + 'static,
F: FnOnce() -> R + Send + 'static
{
self.queue.async(operation)
}
}
impl<'queue, T: 'static> Drop for BoundSerialQueue<'queue, T>
{
fn drop(&mut self)
{
let binding = self.binding.clone();
self.queue.async(move || {
unsafe {
binding.drop();
}
});
}
}
impl QueueId for SerialQueue
{
fn id(&self) -> usize
{
self.id
}
}
/// Convert the main thread into a SerialQueue
///
/// This function warps the current thread into a SerialQueue, that
/// is passed to the executed function, blocking the current thread
/// until the created Queue is done.
///
/// This function is only intended to be used on the main thread and
/// library creators should never need to use it.
///
/// If you need a queue based on a newly created OS thread use `SerialQueue::new_native()`.
///
/// # Example
///
/// ```
/// # use taskqueue::*;
/// fn main() {
/// init_main(|main_queue| {
/// // start using it!
/// })
/// }
/// ```
pub fn init_main<F: FnOnce(SerialQueue) + Send + 'static>(start: F)
{
blocking_mioco_run_loop(move || {
mioco::set_children_userdata(Some(Userdata::RoundRobin));
let (tx, rx) = mailbox::<Command>();
let (deadlock_tx, deadlock_rx) = mailbox::<()>();
let new_queue = SerialQueue {
id: ID.fetch_add(1, Ordering::SeqCst),
tx: tx.clone(),
deadlock_tx: deadlock_tx.clone(),
};
tx.send(Command::Run(
Stack::assemble_main(new_queue, start)
));
info!("Main Queue ready!");
SerialQueue::do_loop(tx, rx, deadlock_rx);
Ok(())
})
}
impl Drop for SerialQueue
{
fn drop(&mut self)
{
trace!("Dropping {:?}", self);
self.tx.send(Command::End);
}
}
| async | identifier_name |
serial.rs | // Copyright 2016 taskqueue developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::boxed::FnBox;
use std::fmt;
use std::thread;
use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize, Ordering};
use mioco::{self, ExitStatus, Config, Mioco};
use mioco::mail::*;
use future::{Future, FutureInternal, FutureGuard};
use group::Group;
use queue::{Queue, QueueId, LoopResult};
use util::mioco_handler::{Userdata, blocking_mioco_run_loop, new_coroutine};
use util::unsafe_wrap::NotThreadSafe;
use util::stack::Stack;
static ID: AtomicUsize = ATOMIC_USIZE_INIT;
enum Command
{
Run(Box<FnBox() + Send + 'static>),
Wait(MailboxInnerEnd<ExitStatus>),
End,
}
/// Queue executing Tasks serially, non-overlapping in queued Order
///
/// ## Properties
/// - executes tasks in serial order
/// - no tasks may overlap
/// - they never change their native background thread (but may share it)
/// - the tasks are executed in order of queuing
/// - safety against deadlocks from recursive queueing (see example)
///
/// Through these guarantees SerialQueues may bound to a type that is **not** Send or Sync
/// and provide easy thread safe access to this critcal resource.
/// Such a SerialQueue is called [*BoundSerialQueue*](./struct.BoundSerialQueue.html).
///
/// ## Example
///
/// ```rust
/// # use taskqueue::*;
/// init_main(|main| {
/// let thread_one = SerialQueue::new();
/// let thread_two = SerialQueue::new();
///
/// let future_one = thread_one.async(|| {
/// 42
/// });
/// let future_two = thread_two.async(|| {
/// 96
/// });
///
/// println!("Although this is happening in main,");
/// main.async(|| {
/// println!("this task is running before...");
/// });
/// main.sync(|| {
/// println!("...this task and...");
/// assert_eq!(future_one.get() + future_two.get(), 138);
/// });
/// println!("...this is running last");
/// });
/// ```
pub struct SerialQueue
{
id: usize,
tx: MailboxOuterEnd<Command>,
deadlock_tx: MailboxOuterEnd<()>,
}
impl PartialEq for SerialQueue
{
fn eq(&self, other: &Self) -> bool
{
self.id == other.id
}
} | {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
{
write!(f, "SerialQueue ({})", self.id)
}
}
unsafe impl Send for SerialQueue
{}
unsafe impl Sync for SerialQueue
{}
impl SerialQueue
{
/// Create a new SerialQueue and assign it to the global thread pool
pub fn new() -> SerialQueue
{
let (tx, rx) = mailbox();
let (deadlock_tx, deadlock_rx) = mailbox();
let internal_tx = tx.clone();
new_coroutine(move || {
SerialQueue::do_loop(internal_tx, rx, deadlock_rx);
Ok(())
});
let queue = SerialQueue {
id: ID.fetch_add(1, Ordering::SeqCst),
tx: tx,
deadlock_tx: deadlock_tx,
};
info!("Queue created ({:?})", queue);
queue
}
/// Create a new SerialQueue and assign it solely to a newly created OS Thread
///
/// A SerialQueue created through this method will spawn a new native OS Thread
/// and the queue will be the only utilizing it. The thread will be destructed,
/// when the queue is dropped.
///
/// The purpose of this constructor is to provide a way to use blocking IO with TaskQueue.
/// The use of this method however is discouraged, as the new thread may influence
/// the scheduler negatively and evented IO, where possible performs a lot better in combination
/// with the TaskQueue library
pub fn new_native() -> SerialQueue
{
let (tx, rx) = mailbox();
let (deadlock_tx, deadlock_rx) = mailbox();
let internal_tx = tx.clone();
thread::spawn(move || {
Mioco::new_configured({
let mut config = Config::new();
config.set_thread_num(1);
config
}).start(move || {
SerialQueue::do_loop(internal_tx, rx, deadlock_rx);
Ok(())
});
});
let queue = SerialQueue {
id: ID.fetch_add(1, Ordering::SeqCst),
tx: tx,
deadlock_tx: deadlock_tx,
};
info!("Native Queue created ({:?})", queue);
queue
}
fn do_loop(queue_tx: MailboxOuterEnd<Command>,
rx: MailboxInnerEnd<Command>,
deadlock_rx: MailboxInnerEnd<()>)
{
debug!("loop: spawing serial loop");
loop {
trace!("loop: next iteration");
match rx.read() {
Command::End => break,
Command::Wait(routine) => {
trace!("loop: handling previous deadlocked coroutine");
let tx_clone = queue_tx.clone();
loop {
select!(
routine:r => {
if routine.try_read().is_some() {
trace!("loop: task ended");
break;
} else {
continue;
}
},
deadlock_rx:r => {
if deadlock_rx.try_read().is_some() {
trace!("loop: deadlock detected");
tx_clone.send(Command::Wait(routine));
break;
} else {
continue;
}
},
);
}
}
Command::Run(task) => {
let tx_clone = queue_tx.clone();
mioco::set_children_userdata(Some(Userdata::SameThread));
let routine = mioco::spawn_ext(move || {
trace!("loop: spawned new coroutine for task");
task.call_box(());
Ok(())
})
.exit_notificator();
trace!("loop: wait for deadlock notification or coroutine finish");
loop {
select!(
routine:r => {
if routine.try_read().is_some() {
trace!("loop: task ended");
break;
} else {
continue;
}
},
deadlock_rx:r => {
if deadlock_rx.try_read().is_some() {
trace!("loop: deadlock detected");
tx_clone.send(Command::Wait(routine));
break;
} else {
continue;
}
},
);
}
}
}
}
debug!("loop: queue ended");
}
/// Bind this queue to a variable
///
/// This function allows to create a `BoundSerialQueue`.
/// Its purpose is to bind variables to a queue, so they can be used by the tasks submitted.
///
/// # Example
/// ```
/// # use taskqueue::*;
/// # let queue = SerialQueue::new();
/// let x = 5;
/// let bound = queue.with(move || x);
/// bound.scoped_with(|x| println!("{}", x));
/// // x gets dropped inside the queues thread before the queue gets dropped
/// ```
///
/// You can create multiple bindings at once.
/// Through tuples you may bind multiple variables at once.
///
/// It is even possible to move the creation of the bound variable into the queue by creating
/// it inside the passed constructor, which is then executed on the queue.
/// And because SerialQueues never change their underlying OS Thread,
/// this allows to use variables that are not Send and Sync in a thread-safe but shared way.
///
/// # Example
/// ```
/// # use taskqueue::*;
/// # let queue = SerialQueue::new();
/// # fn my_ffi_function() -> *mut () { &mut () };
/// let bound = queue.with(|| {
/// let raw_ptr = my_ffi_function();
/// raw_ptr
/// });
/// bound.scoped_with(|raw_ptr| println!("{}", raw_ptr.is_null()));
/// // raw_ptr gets dropped inside the queues thread.
/// // This way raw_ptr is never moved between threads.
/// ```
pub fn with<'queue, R: 'static, F>(&'queue self,
constructor: F)
-> BoundSerialQueue<'queue, R>
where F: FnOnce() -> R + Send
{
let binding = self.sync(move || NotThreadSafe::new(constructor()));
BoundSerialQueue {
queue: self,
binding: binding,
}
}
}
impl Queue for SerialQueue
{
fn async<R, F>(&self, operation: F) -> Future<R>
where R: Send + 'static,
F: FnOnce() -> R + Send + 'static
{
let (tx, rx) = mailbox();
let operation: Box<FnBox() + Send + 'static> = Stack::assemble(self, move || {
tx.send(operation());
});
debug!("Queue ({:?}) queued task", self);
self.tx.send(Command::Run(operation));
Future::new_from_serial(self, Some(self.deadlock_tx.clone()), rx)
}
}
/// A bound SerialQueue holding a queue-bound variable
///
/// Create a BoundSerialQueue using `SerialQueue::with`.
/// BoundSerialQueue's hold variables that may be used through
/// tasks executed on this queue though `scoped_with`, `sync_with`
/// `foreach_with` or `loop_while_with`.
///
/// `async_with` cannot be provided, as the bound variable is
/// dropped, when the BoundSerialQueue gets dropped.
///
/// Internally BoundSerialQueue refer to the same queue, they were created from.
/// Multiple BoundSerialQueue's may exist for one queue at once.
pub struct BoundSerialQueue<'queue, T: 'static>
{
queue: &'queue SerialQueue,
binding: NotThreadSafe<T>,
}
impl<'queue, T: 'static> fmt::Debug for BoundSerialQueue<'queue, T>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
{
write!(f, "BoundSerialQueue ({:p})", self)
}
}
impl<'queue, T: 'static> BoundSerialQueue<'queue, T>
{
/// Like `Queue::scoped` but provides a mutable reference to the bound variable
///
/// # Safety
///
/// The same rules as for `Queue::scoped` to apply.
///
/// # Example
///
/// ```
/// # use taskqueue::*;
/// # let queue = SerialQueue::new();
/// let bound = queue.with(|| { "Hello".to_string() });
/// let name = "Stack".to_string();
/// bound.scoped_with(|message| { println!("{} {}!", message, name) });
/// ```
pub fn scoped_with<R, F>(&'queue self, operation: F) -> FutureGuard<R>
where R: Send + 'static,
F: FnOnce(&'queue mut T) -> R + Send + 'queue
{
self.queue.scoped(move || operation(unsafe { self.binding.get_mut() }))
}
/// Like `Queue::sync` but provides a mutable reference to the bound variable
///
/// # Example
///
/// ```
/// # use taskqueue::*;
/// # let queue = SerialQueue::new();
/// let bound = queue.with(|| { "a bound queue".to_string() });
/// bound.sync_with(|name| { println!("Hello {}", name) });
/// ```
pub fn sync_with<R, F>(&'queue self, operation: F) -> R
where R: Send + 'static,
F: FnOnce(&'queue mut T) -> R + Send + 'queue
{
self.queue.sync(move || operation(unsafe { self.binding.get_mut() }))
}
/// Like `Queue::foreach` but provides a mutable reference to the bound variable
///
/// # Example
///
/// ```
/// # use taskqueue::*;
/// # let queue = SerialQueue::new();
/// let bound = queue.with(|| 2);
/// let doubled: Vec<i32> = bound.foreach_with((0..20), |factor, x| x*factor).wait().collect();
/// # assert_eq!((0..20).map(|x| x*2).collect::<Vec<i32>>(), doubled);
/// ```
pub fn foreach_with<B, R, I, F>(&'queue self, mut iter: I, operation: F) -> Group<R>
where B: Send,
R: Send + 'queue,
I: Iterator<Item = B> + Send,
F: Fn(&'queue T, B) -> R + Send + Sync + 'queue
{
let mut group = Group::new();
loop {
match iter.next() {
Some(x) => {
let op = &operation;
let binding = &self.binding;
group.scoped(self, move || op(unsafe { binding.get_mut() }, x))
},
None => break,
}
}
group
}
/// Like `Queue::loop_while` but provides a mutable reference to the bound variable
///
/// # Example
///
/// ```
/// # use taskqueue::*;
/// use std::mem;
///
/// let queue = SerialQueue::new();
/// let bound = queue.with(|| (15, 25));
///
/// let greatest_common_divisor = bound.loop_while_with(|tuple| {
/// let x = tuple.0;
/// let y = tuple.1;
///
/// if y == 0 {
/// LoopResult::Done(x.clone())
/// } else {
/// let remainder = x % y;
/// let mut new_tuple = (y, remainder);
/// mem::swap(tuple, &mut new_tuple);
/// LoopResult::Continue
/// }
/// }).get();
/// #
/// # assert_eq!(5, greatest_common_divisor);
/// ```
pub fn loop_while_with<R, F>(&'queue self, operation: F) -> FutureGuard<R>
where F: Fn(&'queue mut T) -> LoopResult<R> + Send + Sync + 'queue,
R: Send + 'static,
{
self.queue.loop_while(move || operation(unsafe { self.binding.get_mut() }))
}
}
impl<'a, T: 'static> Queue for BoundSerialQueue<'a, T>
{
fn async<R, F>(&self, operation: F) -> Future<R>
where R: Send + 'static,
F: FnOnce() -> R + Send + 'static
{
self.queue.async(operation)
}
}
impl<'queue, T: 'static> Drop for BoundSerialQueue<'queue, T>
{
fn drop(&mut self)
{
let binding = self.binding.clone();
self.queue.async(move || {
unsafe {
binding.drop();
}
});
}
}
impl QueueId for SerialQueue
{
fn id(&self) -> usize
{
self.id
}
}
/// Convert the main thread into a SerialQueue
///
/// This function warps the current thread into a SerialQueue, that
/// is passed to the executed function, blocking the current thread
/// until the created Queue is done.
///
/// This function is only intended to be used on the main thread and
/// library creators should never need to use it.
///
/// If you need a queue based on a newly created OS thread use `SerialQueue::new_native()`.
///
/// # Example
///
/// ```
/// # use taskqueue::*;
/// fn main() {
/// init_main(|main_queue| {
/// // start using it!
/// })
/// }
/// ```
pub fn init_main<F: FnOnce(SerialQueue) + Send + 'static>(start: F)
{
blocking_mioco_run_loop(move || {
mioco::set_children_userdata(Some(Userdata::RoundRobin));
let (tx, rx) = mailbox::<Command>();
let (deadlock_tx, deadlock_rx) = mailbox::<()>();
let new_queue = SerialQueue {
id: ID.fetch_add(1, Ordering::SeqCst),
tx: tx.clone(),
deadlock_tx: deadlock_tx.clone(),
};
tx.send(Command::Run(
Stack::assemble_main(new_queue, start)
));
info!("Main Queue ready!");
SerialQueue::do_loop(tx, rx, deadlock_rx);
Ok(())
})
}
impl Drop for SerialQueue
{
fn drop(&mut self)
{
trace!("Dropping {:?}", self);
self.tx.send(Command::End);
}
} |
impl fmt::Debug for SerialQueue | random_line_split |
paths.py | #!/usr/bin/env python
"""Pathspecs are methods of specifying the path on the client.
The GRR client has a number of drivers to virtualize access to different objects
to create a Virtual File System (VFS) abstraction. These are called 'VFS
Handlers' and they provide typical file-like operations (e.g. read, seek, tell
and stat). It is possible to recursively apply different drivers in the correct
order to arrive at a certain file like object. In order to specify how drivers
should be applied we use 'Path Specifications' or pathspec.
Each VFS handler is constructed from a previous handler and a pathspec. The
pathspec is just a collection of arguments which make sense to the specific VFS
handler. The type of the handler is carried by the pathtype parameter.
On the server the PathSpec is represented as a PathSpec object, and stored
as an attribute of the AFF4 object. This module defines this abstraction.
"""
import itertools
import posixpath
import re
from typing import Sequence
from grr_response_core.lib import artifact_utils
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import standard as rdf_standard
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_proto import flows_pb2
from grr_response_proto import jobs_pb2
class PathSpec(rdf_structs.RDFProtoStruct):
"""A path specification.
The pathspec protobuf is a recursive protobuf which contains components. This
class makes it easier to manipulate these structures by providing useful
helpers.
"""
protobuf = jobs_pb2.PathSpec
rdf_deps = [
rdfvalue.ByteSize,
"PathSpec", # TODO(user): recursive definition.
]
@classmethod
def OS(cls, **kwargs):
|
@classmethod
def TSK(cls, **kwargs):
return cls(pathtype=PathSpec.PathType.TSK, **kwargs)
@classmethod
def NTFS(cls, **kwargs):
return cls(pathtype=PathSpec.PathType.NTFS, **kwargs)
@classmethod
def Registry(cls, **kwargs):
return cls(pathtype=PathSpec.PathType.REGISTRY, **kwargs)
@classmethod
def Temp(cls, **kwargs):
return cls(pathtype=PathSpec.PathType.TMPFILE, **kwargs)
def CopyConstructor(self, other):
# pylint: disable=protected-access
self.SetRawData(other._CopyRawData())
# pylint: enable=protected-access
def __len__(self):
"""Return the total number of path components."""
i = -1
# TODO(user):pytype: type checker doesn't treat self as iterable.
for i, _ in enumerate(self): # pytype: disable=wrong-arg-types
pass
return i + 1
def __getitem__(self, item):
# TODO(user):pytype: type checker doesn't treat self as iterable.
for i, element in enumerate(self): # pytype: disable=wrong-arg-types
if i == item:
return element
raise IndexError("Pathspec index (%s) out of range" % item)
def __iter__(self):
"""Only iterate over all components from the current pointer."""
element = self
while element.HasField("pathtype"):
yield element
if element.HasField("nested_path"):
element = element.nested_path
else:
break
def Insert(self, index, rdfpathspec=None, **kwarg):
"""Insert a single component at index."""
if rdfpathspec is None:
rdfpathspec = self.__class__(**kwarg)
if index == 0:
# Copy ourselves to a temp copy.
nested_proto = self.__class__()
nested_proto.SetRawData(self.GetRawData())
# Replace ourselves with the new object.
self.SetRawData(rdfpathspec.GetRawData())
# Append the temp copy to the end.
self.last.nested_path = nested_proto
else:
previous = self[index - 1]
rdfpathspec.last.nested_path = previous.nested_path
previous.nested_path = rdfpathspec
def Append(self, component=None, **kwarg):
"""Append a new pathspec component to this pathspec."""
if component is None:
component = self.__class__(**kwarg)
if self.HasField("pathtype"):
self.last.nested_path = component
else:
for k, v in kwarg.items():
setattr(self, k, v)
self.SetRawData(component.GetRawData())
return self
def CollapsePath(self):
return utils.JoinPath(*[x.path for x in self])
def Pop(self, index=0):
"""Removes and returns the pathspec at the specified index."""
if index < 0:
index += len(self)
if index == 0:
result = self.__class__()
result.SetRawData(self.GetRawData())
self.SetRawData(self.nested_path.GetRawData())
else:
# Get the raw protobufs for the previous member.
previous = self[index - 1]
result = previous.nested_path
# Manipulate the previous members protobuf to patch the next component in.
previous.nested_path = result.nested_path
result.nested_path = None
return result
@property
def first(self):
return self
@property
def last(self):
if self.HasField("pathtype") and self.pathtype != self.PathType.UNSET:
# TODO(user):pytype: type checker doesn't treat self as iterable.
return list(self)[-1] # pytype: disable=wrong-arg-types
return self
def Dirname(self):
"""Get a new copied object with only the directory path."""
result = self.Copy()
while 1:
last_directory = posixpath.dirname(result.last.path)
if last_directory != "/" or len(result) <= 1:
result.last.path = last_directory
# Make sure to clear the inode information.
result.last.inode = None
break
result.Pop(-1)
return result
def Basename(self):
# TODO(user):pytype: type checker doesn't treat self as reversible.
for component in reversed(self): # pytype: disable=wrong-arg-types
basename = posixpath.basename(component.path)
if basename:
return basename
return ""
def Validate(self):
if not self.HasField("pathtype") or self.pathtype == self.PathType.UNSET:
raise ValueError("No path type set in PathSpec.")
AFF4_PREFIXES = {
0: "/fs/os", # PathSpec.PathType.OS
1: "/fs/tsk", # PathSpec.PathType.TSK
2: "/registry", # PathSpec.PathType.REGISTRY
4: "/temp", # PathSpec.PathType.TMPFILE
5: "/fs/ntfs", # PathSpec.PathType.NTFS
}
def AFF4Path(self, client_urn):
"""Returns the AFF4 URN this pathspec will be stored under.
Args:
client_urn: A ClientURN.
Returns:
A urn that corresponds to this pathspec.
Raises:
ValueError: If pathspec is not of the correct type.
"""
# If the first level is OS and the second level is TSK its probably a mount
# point resolution. We map it into the tsk branch. For example if we get:
# path: \\\\.\\Volume{1234}\\
# pathtype: OS
# mount_point: /c:/
# nested_path {
# path: /windows/
# pathtype: TSK
# }
# We map this to aff4://client_id/fs/tsk/\\\\.\\Volume{1234}\\/windows/
# (The same applies for NTFS)
if not self.HasField("pathtype"):
raise ValueError(
"Can't determine AFF4 path without a valid pathtype for {}.".format(
self))
first_component = self[0]
dev = first_component.path
if first_component.HasField("offset"):
# We divide here just to get prettier numbers in the GUI
dev += ":{}".format(first_component.offset // 512)
if (len(self) > 1 and first_component.pathtype == PathSpec.PathType.OS and
self[1].pathtype in (PathSpec.PathType.TSK, PathSpec.PathType.NTFS)):
result = [self.AFF4_PREFIXES[self[1].pathtype], dev]
# Skip the top level pathspec.
start = 1
else:
# For now just map the top level prefix based on the first pathtype
result = [self.AFF4_PREFIXES[first_component.pathtype]]
start = 0
for p in self[start]:
component = p.path
# The following encode different pathspec properties into the AFF4 path in
# such a way that unique files on the client are mapped to unique URNs in
# the AFF4 space. Note that this transformation does not need to be
# reversible since we always use the PathSpec when accessing files on the
# client.
if p.HasField("offset"):
component += ":{}".format(p.offset // 512)
# Support ADS names.
if p.HasField("stream_name"):
component += ":" + p.stream_name
result.append(component)
return client_urn.Add("/".join(result))
def _unique(iterable):
"""Returns a list of unique values in preserved order."""
return list(dict.fromkeys(iterable))
class GlobComponentExplanation(rdf_structs.RDFProtoStruct):
"""A sub-part of a GlobExpression with examples."""
protobuf = flows_pb2.GlobComponentExplanation
# Grouping pattern: e.g. {test.exe,foo.doc,bar.txt}
GROUPING_PATTERN = re.compile("{([^}]+,[^}]+)}")
_VAR_PATTERN = re.compile("(" + "|".join([r"%%\w+%%", r"%%\w+\.\w+%%"]) + ")")
_REGEX_SPLIT_PATTERN = re.compile(
"(" + "|".join(["{[^}]+,[^}]+}", "\\?", "\\*\\*\\/?", "\\*"]) + ")")
_COMPONENT_SPLIT_PATTERN = re.compile("(" + "|".join([
r"{[^}]+,[^}]+}", r"\?", r"\*\*\d*/?", r"\*", r"%%\w+%%", r"%%\w+\.\w+%%"
]) + ")")
class GlobExpression(rdfvalue.RDFString):
"""A glob expression for a client path.
A glob expression represents a set of regular expressions which match files on
the client. The Glob expression supports the following expansions:
1) Client attribute expansions are surrounded with %% characters. They will be
expanded from the client AFF4 object.
2) Groupings are collections of alternates. e.g. {foo.exe,bar.sys}
3) Wild cards like * and ?
"""
context_help_url = "investigating-with-grr/flows/specifying-file-paths.html"
RECURSION_REGEX = re.compile(r"\*\*(\d*)")
def Validate(self):
"""GlobExpression is valid."""
if len(self.RECURSION_REGEX.findall(self._value)) > 1:
raise ValueError("Only one ** is permitted per path: %s." % self._value)
def Interpolate(self, knowledge_base=None):
kb = knowledge_base
patterns = artifact_utils.InterpolateKbAttributes(self._value, kb)
for pattern in patterns:
# Normalize the component path (this allows us to resolve ../
# sequences).
pattern = utils.NormalizePath(pattern.replace("\\", "/"))
for p in self.InterpolateGrouping(pattern):
yield p
def InterpolateGrouping(self, pattern):
"""Interpolate inline globbing groups."""
components = []
offset = 0
for match in GROUPING_PATTERN.finditer(pattern):
components.append([pattern[offset:match.start()]])
# Expand the attribute into the set of possibilities:
alternatives = match.group(1).split(",")
components.append(_unique(alternatives))
offset = match.end()
components.append([pattern[offset:]])
# Now calculate the cartesian products of all these sets to form all
# strings.
for vector in itertools.product(*components):
yield u"".join(vector)
def _ReplaceRegExGrouping(self, grouping):
alternatives = grouping.group(1).split(",")
return "(" + "|".join(re.escape(s) for s in alternatives) + ")"
def _ReplaceRegExPart(self, part):
if part == "**/":
return "(?:.*\\/)?"
elif part == "*":
return "[^\\/]*"
elif part == "?":
return "[^\\/]"
elif GROUPING_PATTERN.match(part):
return GROUPING_PATTERN.sub(self._ReplaceRegExGrouping, part)
else:
return re.escape(part)
def ExplainComponents(self, example_count: int,
knowledge_base) -> Sequence[GlobComponentExplanation]:
"""Returns a list of GlobComponentExplanations with examples."""
parts = _COMPONENT_SPLIT_PATTERN.split(self._value)
components = []
for glob_part in parts:
if not glob_part:
continue
component = GlobComponentExplanation(glob_expression=glob_part)
if GROUPING_PATTERN.match(glob_part):
examples = self.InterpolateGrouping(glob_part)
elif _VAR_PATTERN.match(glob_part):
# Examples for variable substitutions might not be 100 % accurate,
# because the scope is not shared between two variables. Thus,
# if a GlobExpression uses %%users.a%% and %%users.b%%, the underlying
# user might be different for a and b. For the sake of explaining
# possible values, this should still be enough.
try:
examples = artifact_utils.InterpolateKbAttributes(
glob_part, knowledge_base)
except artifact_utils.Error:
# Interpolation can fail for many non-critical reasons, e.g. when the
# client is missing a KB attribute.
examples = []
else:
examples = []
component.examples = list(itertools.islice(examples, example_count))
components.append(component)
return components
def AsRegEx(self):
"""Return the current glob as a simple regex.
Note: No interpolation is performed.
Returns:
A RegularExpression() object.
"""
parts = _REGEX_SPLIT_PATTERN.split(self._value)
result = u"".join(self._ReplaceRegExPart(p) for p in parts)
return rdf_standard.RegularExpression(u"(?i)\\A%s\\Z" % result)
| return cls(pathtype=PathSpec.PathType.OS, **kwargs) | identifier_body |
paths.py | #!/usr/bin/env python
"""Pathspecs are methods of specifying the path on the client.
The GRR client has a number of drivers to virtualize access to different objects
to create a Virtual File System (VFS) abstraction. These are called 'VFS
Handlers' and they provide typical file-like operations (e.g. read, seek, tell
and stat). It is possible to recursively apply different drivers in the correct
order to arrive at a certain file like object. In order to specify how drivers
should be applied we use 'Path Specifications' or pathspec.
Each VFS handler is constructed from a previous handler and a pathspec. The
pathspec is just a collection of arguments which make sense to the specific VFS
handler. The type of the handler is carried by the pathtype parameter.
On the server the PathSpec is represented as a PathSpec object, and stored
as an attribute of the AFF4 object. This module defines this abstraction.
"""
import itertools
import posixpath
import re
from typing import Sequence
from grr_response_core.lib import artifact_utils
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import standard as rdf_standard
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_proto import flows_pb2
from grr_response_proto import jobs_pb2
class PathSpec(rdf_structs.RDFProtoStruct):
"""A path specification.
The pathspec protobuf is a recursive protobuf which contains components. This
class makes it easier to manipulate these structures by providing useful
helpers.
"""
protobuf = jobs_pb2.PathSpec
rdf_deps = [
rdfvalue.ByteSize,
"PathSpec", # TODO(user): recursive definition.
]
@classmethod
def OS(cls, **kwargs):
return cls(pathtype=PathSpec.PathType.OS, **kwargs)
@classmethod
def TSK(cls, **kwargs):
return cls(pathtype=PathSpec.PathType.TSK, **kwargs)
@classmethod
def NTFS(cls, **kwargs):
return cls(pathtype=PathSpec.PathType.NTFS, **kwargs)
@classmethod
def Registry(cls, **kwargs):
return cls(pathtype=PathSpec.PathType.REGISTRY, **kwargs)
@classmethod
def Temp(cls, **kwargs):
return cls(pathtype=PathSpec.PathType.TMPFILE, **kwargs)
def CopyConstructor(self, other):
# pylint: disable=protected-access
self.SetRawData(other._CopyRawData())
# pylint: enable=protected-access
def __len__(self):
"""Return the total number of path components."""
i = -1
# TODO(user):pytype: type checker doesn't treat self as iterable.
for i, _ in enumerate(self): # pytype: disable=wrong-arg-types
pass
return i + 1
def __getitem__(self, item):
# TODO(user):pytype: type checker doesn't treat self as iterable.
for i, element in enumerate(self): # pytype: disable=wrong-arg-types
if i == item:
return element
raise IndexError("Pathspec index (%s) out of range" % item)
def __iter__(self):
"""Only iterate over all components from the current pointer."""
element = self
while element.HasField("pathtype"):
yield element
if element.HasField("nested_path"):
element = element.nested_path
else:
break
def Insert(self, index, rdfpathspec=None, **kwarg):
"""Insert a single component at index."""
if rdfpathspec is None:
rdfpathspec = self.__class__(**kwarg)
if index == 0:
# Copy ourselves to a temp copy.
nested_proto = self.__class__()
nested_proto.SetRawData(self.GetRawData())
# Replace ourselves with the new object.
self.SetRawData(rdfpathspec.GetRawData())
# Append the temp copy to the end.
self.last.nested_path = nested_proto
else:
previous = self[index - 1]
rdfpathspec.last.nested_path = previous.nested_path
previous.nested_path = rdfpathspec
def Append(self, component=None, **kwarg):
"""Append a new pathspec component to this pathspec."""
if component is None:
component = self.__class__(**kwarg)
if self.HasField("pathtype"):
self.last.nested_path = component
else:
for k, v in kwarg.items():
setattr(self, k, v)
self.SetRawData(component.GetRawData())
return self
def CollapsePath(self):
return utils.JoinPath(*[x.path for x in self])
def Pop(self, index=0):
"""Removes and returns the pathspec at the specified index."""
if index < 0:
index += len(self)
if index == 0:
result = self.__class__()
result.SetRawData(self.GetRawData())
self.SetRawData(self.nested_path.GetRawData())
else:
# Get the raw protobufs for the previous member.
previous = self[index - 1]
result = previous.nested_path
# Manipulate the previous members protobuf to patch the next component in.
previous.nested_path = result.nested_path
result.nested_path = None
return result
@property
def first(self):
return self
@property
def last(self):
if self.HasField("pathtype") and self.pathtype != self.PathType.UNSET:
# TODO(user):pytype: type checker doesn't treat self as iterable.
return list(self)[-1] # pytype: disable=wrong-arg-types
return self
def Dirname(self):
"""Get a new copied object with only the directory path."""
result = self.Copy()
while 1:
last_directory = posixpath.dirname(result.last.path)
if last_directory != "/" or len(result) <= 1:
result.last.path = last_directory
# Make sure to clear the inode information.
result.last.inode = None
break
result.Pop(-1)
return result
def Basename(self):
# TODO(user):pytype: type checker doesn't treat self as reversible.
for component in reversed(self): # pytype: disable=wrong-arg-types
basename = posixpath.basename(component.path)
if basename:
return basename
return ""
def Validate(self):
if not self.HasField("pathtype") or self.pathtype == self.PathType.UNSET:
raise ValueError("No path type set in PathSpec.")
AFF4_PREFIXES = {
0: "/fs/os", # PathSpec.PathType.OS
1: "/fs/tsk", # PathSpec.PathType.TSK
2: "/registry", # PathSpec.PathType.REGISTRY
4: "/temp", # PathSpec.PathType.TMPFILE
5: "/fs/ntfs", # PathSpec.PathType.NTFS
}
def AFF4Path(self, client_urn):
"""Returns the AFF4 URN this pathspec will be stored under.
Args:
client_urn: A ClientURN.
Returns:
A urn that corresponds to this pathspec.
Raises:
ValueError: If pathspec is not of the correct type.
"""
# If the first level is OS and the second level is TSK its probably a mount
# point resolution. We map it into the tsk branch. For example if we get:
# path: \\\\.\\Volume{1234}\\
# pathtype: OS
# mount_point: /c:/
# nested_path {
# path: /windows/
# pathtype: TSK
# }
# We map this to aff4://client_id/fs/tsk/\\\\.\\Volume{1234}\\/windows/
# (The same applies for NTFS)
if not self.HasField("pathtype"):
raise ValueError(
"Can't determine AFF4 path without a valid pathtype for {}.".format(
self))
first_component = self[0]
dev = first_component.path
if first_component.HasField("offset"):
# We divide here just to get prettier numbers in the GUI
dev += ":{}".format(first_component.offset // 512)
if (len(self) > 1 and first_component.pathtype == PathSpec.PathType.OS and
self[1].pathtype in (PathSpec.PathType.TSK, PathSpec.PathType.NTFS)):
result = [self.AFF4_PREFIXES[self[1].pathtype], dev]
# Skip the top level pathspec.
start = 1
else:
# For now just map the top level prefix based on the first pathtype
result = [self.AFF4_PREFIXES[first_component.pathtype]]
start = 0
for p in self[start]:
component = p.path
# The following encode different pathspec properties into the AFF4 path in
# such a way that unique files on the client are mapped to unique URNs in
# the AFF4 space. Note that this transformation does not need to be
# reversible since we always use the PathSpec when accessing files on the
# client.
if p.HasField("offset"):
component += ":{}".format(p.offset // 512)
# Support ADS names.
if p.HasField("stream_name"):
component += ":" + p.stream_name
result.append(component)
return client_urn.Add("/".join(result))
def _unique(iterable):
"""Returns a list of unique values in preserved order."""
return list(dict.fromkeys(iterable))
class | (rdf_structs.RDFProtoStruct):
"""A sub-part of a GlobExpression with examples."""
protobuf = flows_pb2.GlobComponentExplanation
# Grouping pattern: e.g. {test.exe,foo.doc,bar.txt}
GROUPING_PATTERN = re.compile("{([^}]+,[^}]+)}")
_VAR_PATTERN = re.compile("(" + "|".join([r"%%\w+%%", r"%%\w+\.\w+%%"]) + ")")
_REGEX_SPLIT_PATTERN = re.compile(
"(" + "|".join(["{[^}]+,[^}]+}", "\\?", "\\*\\*\\/?", "\\*"]) + ")")
_COMPONENT_SPLIT_PATTERN = re.compile("(" + "|".join([
r"{[^}]+,[^}]+}", r"\?", r"\*\*\d*/?", r"\*", r"%%\w+%%", r"%%\w+\.\w+%%"
]) + ")")
class GlobExpression(rdfvalue.RDFString):
"""A glob expression for a client path.
A glob expression represents a set of regular expressions which match files on
the client. The Glob expression supports the following expansions:
1) Client attribute expansions are surrounded with %% characters. They will be
expanded from the client AFF4 object.
2) Groupings are collections of alternates. e.g. {foo.exe,bar.sys}
3) Wild cards like * and ?
"""
context_help_url = "investigating-with-grr/flows/specifying-file-paths.html"
RECURSION_REGEX = re.compile(r"\*\*(\d*)")
def Validate(self):
"""GlobExpression is valid."""
if len(self.RECURSION_REGEX.findall(self._value)) > 1:
raise ValueError("Only one ** is permitted per path: %s." % self._value)
def Interpolate(self, knowledge_base=None):
kb = knowledge_base
patterns = artifact_utils.InterpolateKbAttributes(self._value, kb)
for pattern in patterns:
# Normalize the component path (this allows us to resolve ../
# sequences).
pattern = utils.NormalizePath(pattern.replace("\\", "/"))
for p in self.InterpolateGrouping(pattern):
yield p
def InterpolateGrouping(self, pattern):
"""Interpolate inline globbing groups."""
components = []
offset = 0
for match in GROUPING_PATTERN.finditer(pattern):
components.append([pattern[offset:match.start()]])
# Expand the attribute into the set of possibilities:
alternatives = match.group(1).split(",")
components.append(_unique(alternatives))
offset = match.end()
components.append([pattern[offset:]])
# Now calculate the cartesian products of all these sets to form all
# strings.
for vector in itertools.product(*components):
yield u"".join(vector)
def _ReplaceRegExGrouping(self, grouping):
alternatives = grouping.group(1).split(",")
return "(" + "|".join(re.escape(s) for s in alternatives) + ")"
def _ReplaceRegExPart(self, part):
if part == "**/":
return "(?:.*\\/)?"
elif part == "*":
return "[^\\/]*"
elif part == "?":
return "[^\\/]"
elif GROUPING_PATTERN.match(part):
return GROUPING_PATTERN.sub(self._ReplaceRegExGrouping, part)
else:
return re.escape(part)
def ExplainComponents(self, example_count: int,
knowledge_base) -> Sequence[GlobComponentExplanation]:
"""Returns a list of GlobComponentExplanations with examples."""
parts = _COMPONENT_SPLIT_PATTERN.split(self._value)
components = []
for glob_part in parts:
if not glob_part:
continue
component = GlobComponentExplanation(glob_expression=glob_part)
if GROUPING_PATTERN.match(glob_part):
examples = self.InterpolateGrouping(glob_part)
elif _VAR_PATTERN.match(glob_part):
# Examples for variable substitutions might not be 100 % accurate,
# because the scope is not shared between two variables. Thus,
# if a GlobExpression uses %%users.a%% and %%users.b%%, the underlying
# user might be different for a and b. For the sake of explaining
# possible values, this should still be enough.
try:
examples = artifact_utils.InterpolateKbAttributes(
glob_part, knowledge_base)
except artifact_utils.Error:
# Interpolation can fail for many non-critical reasons, e.g. when the
# client is missing a KB attribute.
examples = []
else:
examples = []
component.examples = list(itertools.islice(examples, example_count))
components.append(component)
return components
def AsRegEx(self):
"""Return the current glob as a simple regex.
Note: No interpolation is performed.
Returns:
A RegularExpression() object.
"""
parts = _REGEX_SPLIT_PATTERN.split(self._value)
result = u"".join(self._ReplaceRegExPart(p) for p in parts)
return rdf_standard.RegularExpression(u"(?i)\\A%s\\Z" % result)
| GlobComponentExplanation | identifier_name |
paths.py | #!/usr/bin/env python
"""Pathspecs are methods of specifying the path on the client.
The GRR client has a number of drivers to virtualize access to different objects
to create a Virtual File System (VFS) abstraction. These are called 'VFS
Handlers' and they provide typical file-like operations (e.g. read, seek, tell
and stat). It is possible to recursively apply different drivers in the correct
order to arrive at a certain file like object. In order to specify how drivers
should be applied we use 'Path Specifications' or pathspec.
Each VFS handler is constructed from a previous handler and a pathspec. The
pathspec is just a collection of arguments which make sense to the specific VFS
handler. The type of the handler is carried by the pathtype parameter.
On the server the PathSpec is represented as a PathSpec object, and stored
as an attribute of the AFF4 object. This module defines this abstraction.
"""
import itertools
import posixpath
import re
from typing import Sequence
from grr_response_core.lib import artifact_utils
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import standard as rdf_standard
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_proto import flows_pb2
from grr_response_proto import jobs_pb2
class PathSpec(rdf_structs.RDFProtoStruct):
"""A path specification.
The pathspec protobuf is a recursive protobuf which contains components. This
class makes it easier to manipulate these structures by providing useful
helpers.
"""
protobuf = jobs_pb2.PathSpec |
@classmethod
def OS(cls, **kwargs):
return cls(pathtype=PathSpec.PathType.OS, **kwargs)
@classmethod
def TSK(cls, **kwargs):
return cls(pathtype=PathSpec.PathType.TSK, **kwargs)
@classmethod
def NTFS(cls, **kwargs):
return cls(pathtype=PathSpec.PathType.NTFS, **kwargs)
@classmethod
def Registry(cls, **kwargs):
return cls(pathtype=PathSpec.PathType.REGISTRY, **kwargs)
@classmethod
def Temp(cls, **kwargs):
return cls(pathtype=PathSpec.PathType.TMPFILE, **kwargs)
def CopyConstructor(self, other):
# pylint: disable=protected-access
self.SetRawData(other._CopyRawData())
# pylint: enable=protected-access
def __len__(self):
"""Return the total number of path components."""
i = -1
# TODO(user):pytype: type checker doesn't treat self as iterable.
for i, _ in enumerate(self): # pytype: disable=wrong-arg-types
pass
return i + 1
def __getitem__(self, item):
# TODO(user):pytype: type checker doesn't treat self as iterable.
for i, element in enumerate(self): # pytype: disable=wrong-arg-types
if i == item:
return element
raise IndexError("Pathspec index (%s) out of range" % item)
def __iter__(self):
"""Only iterate over all components from the current pointer."""
element = self
while element.HasField("pathtype"):
yield element
if element.HasField("nested_path"):
element = element.nested_path
else:
break
def Insert(self, index, rdfpathspec=None, **kwarg):
"""Insert a single component at index."""
if rdfpathspec is None:
rdfpathspec = self.__class__(**kwarg)
if index == 0:
# Copy ourselves to a temp copy.
nested_proto = self.__class__()
nested_proto.SetRawData(self.GetRawData())
# Replace ourselves with the new object.
self.SetRawData(rdfpathspec.GetRawData())
# Append the temp copy to the end.
self.last.nested_path = nested_proto
else:
previous = self[index - 1]
rdfpathspec.last.nested_path = previous.nested_path
previous.nested_path = rdfpathspec
def Append(self, component=None, **kwarg):
"""Append a new pathspec component to this pathspec."""
if component is None:
component = self.__class__(**kwarg)
if self.HasField("pathtype"):
self.last.nested_path = component
else:
for k, v in kwarg.items():
setattr(self, k, v)
self.SetRawData(component.GetRawData())
return self
def CollapsePath(self):
return utils.JoinPath(*[x.path for x in self])
def Pop(self, index=0):
"""Removes and returns the pathspec at the specified index."""
if index < 0:
index += len(self)
if index == 0:
result = self.__class__()
result.SetRawData(self.GetRawData())
self.SetRawData(self.nested_path.GetRawData())
else:
# Get the raw protobufs for the previous member.
previous = self[index - 1]
result = previous.nested_path
# Manipulate the previous members protobuf to patch the next component in.
previous.nested_path = result.nested_path
result.nested_path = None
return result
@property
def first(self):
return self
@property
def last(self):
if self.HasField("pathtype") and self.pathtype != self.PathType.UNSET:
# TODO(user):pytype: type checker doesn't treat self as iterable.
return list(self)[-1] # pytype: disable=wrong-arg-types
return self
def Dirname(self):
"""Get a new copied object with only the directory path."""
result = self.Copy()
while 1:
last_directory = posixpath.dirname(result.last.path)
if last_directory != "/" or len(result) <= 1:
result.last.path = last_directory
# Make sure to clear the inode information.
result.last.inode = None
break
result.Pop(-1)
return result
def Basename(self):
# TODO(user):pytype: type checker doesn't treat self as reversible.
for component in reversed(self): # pytype: disable=wrong-arg-types
basename = posixpath.basename(component.path)
if basename:
return basename
return ""
def Validate(self):
if not self.HasField("pathtype") or self.pathtype == self.PathType.UNSET:
raise ValueError("No path type set in PathSpec.")
AFF4_PREFIXES = {
0: "/fs/os", # PathSpec.PathType.OS
1: "/fs/tsk", # PathSpec.PathType.TSK
2: "/registry", # PathSpec.PathType.REGISTRY
4: "/temp", # PathSpec.PathType.TMPFILE
5: "/fs/ntfs", # PathSpec.PathType.NTFS
}
def AFF4Path(self, client_urn):
"""Returns the AFF4 URN this pathspec will be stored under.
Args:
client_urn: A ClientURN.
Returns:
A urn that corresponds to this pathspec.
Raises:
ValueError: If pathspec is not of the correct type.
"""
# If the first level is OS and the second level is TSK its probably a mount
# point resolution. We map it into the tsk branch. For example if we get:
# path: \\\\.\\Volume{1234}\\
# pathtype: OS
# mount_point: /c:/
# nested_path {
# path: /windows/
# pathtype: TSK
# }
# We map this to aff4://client_id/fs/tsk/\\\\.\\Volume{1234}\\/windows/
# (The same applies for NTFS)
if not self.HasField("pathtype"):
raise ValueError(
"Can't determine AFF4 path without a valid pathtype for {}.".format(
self))
first_component = self[0]
dev = first_component.path
if first_component.HasField("offset"):
# We divide here just to get prettier numbers in the GUI
dev += ":{}".format(first_component.offset // 512)
if (len(self) > 1 and first_component.pathtype == PathSpec.PathType.OS and
self[1].pathtype in (PathSpec.PathType.TSK, PathSpec.PathType.NTFS)):
result = [self.AFF4_PREFIXES[self[1].pathtype], dev]
# Skip the top level pathspec.
start = 1
else:
# For now just map the top level prefix based on the first pathtype
result = [self.AFF4_PREFIXES[first_component.pathtype]]
start = 0
for p in self[start]:
component = p.path
# The following encode different pathspec properties into the AFF4 path in
# such a way that unique files on the client are mapped to unique URNs in
# the AFF4 space. Note that this transformation does not need to be
# reversible since we always use the PathSpec when accessing files on the
# client.
if p.HasField("offset"):
component += ":{}".format(p.offset // 512)
# Support ADS names.
if p.HasField("stream_name"):
component += ":" + p.stream_name
result.append(component)
return client_urn.Add("/".join(result))
def _unique(iterable):
"""Returns a list of unique values in preserved order."""
return list(dict.fromkeys(iterable))
class GlobComponentExplanation(rdf_structs.RDFProtoStruct):
"""A sub-part of a GlobExpression with examples."""
protobuf = flows_pb2.GlobComponentExplanation
# Grouping pattern: e.g. {test.exe,foo.doc,bar.txt}
GROUPING_PATTERN = re.compile("{([^}]+,[^}]+)}")
_VAR_PATTERN = re.compile("(" + "|".join([r"%%\w+%%", r"%%\w+\.\w+%%"]) + ")")
_REGEX_SPLIT_PATTERN = re.compile(
"(" + "|".join(["{[^}]+,[^}]+}", "\\?", "\\*\\*\\/?", "\\*"]) + ")")
_COMPONENT_SPLIT_PATTERN = re.compile("(" + "|".join([
r"{[^}]+,[^}]+}", r"\?", r"\*\*\d*/?", r"\*", r"%%\w+%%", r"%%\w+\.\w+%%"
]) + ")")
class GlobExpression(rdfvalue.RDFString):
"""A glob expression for a client path.
A glob expression represents a set of regular expressions which match files on
the client. The Glob expression supports the following expansions:
1) Client attribute expansions are surrounded with %% characters. They will be
expanded from the client AFF4 object.
2) Groupings are collections of alternates. e.g. {foo.exe,bar.sys}
3) Wild cards like * and ?
"""
context_help_url = "investigating-with-grr/flows/specifying-file-paths.html"
RECURSION_REGEX = re.compile(r"\*\*(\d*)")
def Validate(self):
"""GlobExpression is valid."""
if len(self.RECURSION_REGEX.findall(self._value)) > 1:
raise ValueError("Only one ** is permitted per path: %s." % self._value)
def Interpolate(self, knowledge_base=None):
kb = knowledge_base
patterns = artifact_utils.InterpolateKbAttributes(self._value, kb)
for pattern in patterns:
# Normalize the component path (this allows us to resolve ../
# sequences).
pattern = utils.NormalizePath(pattern.replace("\\", "/"))
for p in self.InterpolateGrouping(pattern):
yield p
def InterpolateGrouping(self, pattern):
"""Interpolate inline globbing groups."""
components = []
offset = 0
for match in GROUPING_PATTERN.finditer(pattern):
components.append([pattern[offset:match.start()]])
# Expand the attribute into the set of possibilities:
alternatives = match.group(1).split(",")
components.append(_unique(alternatives))
offset = match.end()
components.append([pattern[offset:]])
# Now calculate the cartesian products of all these sets to form all
# strings.
for vector in itertools.product(*components):
yield u"".join(vector)
def _ReplaceRegExGrouping(self, grouping):
alternatives = grouping.group(1).split(",")
return "(" + "|".join(re.escape(s) for s in alternatives) + ")"
def _ReplaceRegExPart(self, part):
if part == "**/":
return "(?:.*\\/)?"
elif part == "*":
return "[^\\/]*"
elif part == "?":
return "[^\\/]"
elif GROUPING_PATTERN.match(part):
return GROUPING_PATTERN.sub(self._ReplaceRegExGrouping, part)
else:
return re.escape(part)
def ExplainComponents(self, example_count: int,
knowledge_base) -> Sequence[GlobComponentExplanation]:
"""Returns a list of GlobComponentExplanations with examples."""
parts = _COMPONENT_SPLIT_PATTERN.split(self._value)
components = []
for glob_part in parts:
if not glob_part:
continue
component = GlobComponentExplanation(glob_expression=glob_part)
if GROUPING_PATTERN.match(glob_part):
examples = self.InterpolateGrouping(glob_part)
elif _VAR_PATTERN.match(glob_part):
# Examples for variable substitutions might not be 100 % accurate,
# because the scope is not shared between two variables. Thus,
# if a GlobExpression uses %%users.a%% and %%users.b%%, the underlying
# user might be different for a and b. For the sake of explaining
# possible values, this should still be enough.
try:
examples = artifact_utils.InterpolateKbAttributes(
glob_part, knowledge_base)
except artifact_utils.Error:
# Interpolation can fail for many non-critical reasons, e.g. when the
# client is missing a KB attribute.
examples = []
else:
examples = []
component.examples = list(itertools.islice(examples, example_count))
components.append(component)
return components
def AsRegEx(self):
"""Return the current glob as a simple regex.
Note: No interpolation is performed.
Returns:
A RegularExpression() object.
"""
parts = _REGEX_SPLIT_PATTERN.split(self._value)
result = u"".join(self._ReplaceRegExPart(p) for p in parts)
return rdf_standard.RegularExpression(u"(?i)\\A%s\\Z" % result) | rdf_deps = [
rdfvalue.ByteSize,
"PathSpec", # TODO(user): recursive definition.
] | random_line_split |
paths.py | #!/usr/bin/env python
"""Pathspecs are methods of specifying the path on the client.
The GRR client has a number of drivers to virtualize access to different objects
to create a Virtual File System (VFS) abstraction. These are called 'VFS
Handlers' and they provide typical file-like operations (e.g. read, seek, tell
and stat). It is possible to recursively apply different drivers in the correct
order to arrive at a certain file like object. In order to specify how drivers
should be applied we use 'Path Specifications' or pathspec.
Each VFS handler is constructed from a previous handler and a pathspec. The
pathspec is just a collection of arguments which make sense to the specific VFS
handler. The type of the handler is carried by the pathtype parameter.
On the server the PathSpec is represented as a PathSpec object, and stored
as an attribute of the AFF4 object. This module defines this abstraction.
"""
import itertools
import posixpath
import re
from typing import Sequence
from grr_response_core.lib import artifact_utils
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import standard as rdf_standard
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_proto import flows_pb2
from grr_response_proto import jobs_pb2
class PathSpec(rdf_structs.RDFProtoStruct):
"""A path specification.
The pathspec protobuf is a recursive protobuf which contains components. This
class makes it easier to manipulate these structures by providing useful
helpers.
"""
protobuf = jobs_pb2.PathSpec
rdf_deps = [
rdfvalue.ByteSize,
"PathSpec", # TODO(user): recursive definition.
]
@classmethod
def OS(cls, **kwargs):
return cls(pathtype=PathSpec.PathType.OS, **kwargs)
@classmethod
def TSK(cls, **kwargs):
return cls(pathtype=PathSpec.PathType.TSK, **kwargs)
@classmethod
def NTFS(cls, **kwargs):
return cls(pathtype=PathSpec.PathType.NTFS, **kwargs)
@classmethod
def Registry(cls, **kwargs):
return cls(pathtype=PathSpec.PathType.REGISTRY, **kwargs)
@classmethod
def Temp(cls, **kwargs):
return cls(pathtype=PathSpec.PathType.TMPFILE, **kwargs)
def CopyConstructor(self, other):
# pylint: disable=protected-access
self.SetRawData(other._CopyRawData())
# pylint: enable=protected-access
def __len__(self):
"""Return the total number of path components."""
i = -1
# TODO(user):pytype: type checker doesn't treat self as iterable.
for i, _ in enumerate(self): # pytype: disable=wrong-arg-types
pass
return i + 1
def __getitem__(self, item):
# TODO(user):pytype: type checker doesn't treat self as iterable.
for i, element in enumerate(self): # pytype: disable=wrong-arg-types
if i == item:
return element
raise IndexError("Pathspec index (%s) out of range" % item)
def __iter__(self):
"""Only iterate over all components from the current pointer."""
element = self
while element.HasField("pathtype"):
yield element
if element.HasField("nested_path"):
element = element.nested_path
else:
break
def Insert(self, index, rdfpathspec=None, **kwarg):
"""Insert a single component at index."""
if rdfpathspec is None:
rdfpathspec = self.__class__(**kwarg)
if index == 0:
# Copy ourselves to a temp copy.
nested_proto = self.__class__()
nested_proto.SetRawData(self.GetRawData())
# Replace ourselves with the new object.
self.SetRawData(rdfpathspec.GetRawData())
# Append the temp copy to the end.
self.last.nested_path = nested_proto
else:
previous = self[index - 1]
rdfpathspec.last.nested_path = previous.nested_path
previous.nested_path = rdfpathspec
def Append(self, component=None, **kwarg):
"""Append a new pathspec component to this pathspec."""
if component is None:
component = self.__class__(**kwarg)
if self.HasField("pathtype"):
self.last.nested_path = component
else:
for k, v in kwarg.items():
setattr(self, k, v)
self.SetRawData(component.GetRawData())
return self
def CollapsePath(self):
return utils.JoinPath(*[x.path for x in self])
def Pop(self, index=0):
"""Removes and returns the pathspec at the specified index."""
if index < 0:
index += len(self)
if index == 0:
result = self.__class__()
result.SetRawData(self.GetRawData())
self.SetRawData(self.nested_path.GetRawData())
else:
# Get the raw protobufs for the previous member.
previous = self[index - 1]
result = previous.nested_path
# Manipulate the previous members protobuf to patch the next component in.
previous.nested_path = result.nested_path
result.nested_path = None
return result
@property
def first(self):
return self
@property
def last(self):
if self.HasField("pathtype") and self.pathtype != self.PathType.UNSET:
# TODO(user):pytype: type checker doesn't treat self as iterable.
return list(self)[-1] # pytype: disable=wrong-arg-types
return self
def Dirname(self):
"""Get a new copied object with only the directory path."""
result = self.Copy()
while 1:
last_directory = posixpath.dirname(result.last.path)
if last_directory != "/" or len(result) <= 1:
result.last.path = last_directory
# Make sure to clear the inode information.
result.last.inode = None
break
result.Pop(-1)
return result
def Basename(self):
# TODO(user):pytype: type checker doesn't treat self as reversible.
for component in reversed(self): # pytype: disable=wrong-arg-types
basename = posixpath.basename(component.path)
if basename:
return basename
return ""
def Validate(self):
if not self.HasField("pathtype") or self.pathtype == self.PathType.UNSET:
raise ValueError("No path type set in PathSpec.")
AFF4_PREFIXES = {
0: "/fs/os", # PathSpec.PathType.OS
1: "/fs/tsk", # PathSpec.PathType.TSK
2: "/registry", # PathSpec.PathType.REGISTRY
4: "/temp", # PathSpec.PathType.TMPFILE
5: "/fs/ntfs", # PathSpec.PathType.NTFS
}
def AFF4Path(self, client_urn):
"""Returns the AFF4 URN this pathspec will be stored under.
Args:
client_urn: A ClientURN.
Returns:
A urn that corresponds to this pathspec.
Raises:
ValueError: If pathspec is not of the correct type.
"""
# If the first level is OS and the second level is TSK its probably a mount
# point resolution. We map it into the tsk branch. For example if we get:
# path: \\\\.\\Volume{1234}\\
# pathtype: OS
# mount_point: /c:/
# nested_path {
# path: /windows/
# pathtype: TSK
# }
# We map this to aff4://client_id/fs/tsk/\\\\.\\Volume{1234}\\/windows/
# (The same applies for NTFS)
if not self.HasField("pathtype"):
raise ValueError(
"Can't determine AFF4 path without a valid pathtype for {}.".format(
self))
first_component = self[0]
dev = first_component.path
if first_component.HasField("offset"):
# We divide here just to get prettier numbers in the GUI
dev += ":{}".format(first_component.offset // 512)
if (len(self) > 1 and first_component.pathtype == PathSpec.PathType.OS and
self[1].pathtype in (PathSpec.PathType.TSK, PathSpec.PathType.NTFS)):
result = [self.AFF4_PREFIXES[self[1].pathtype], dev]
# Skip the top level pathspec.
start = 1
else:
# For now just map the top level prefix based on the first pathtype
result = [self.AFF4_PREFIXES[first_component.pathtype]]
start = 0
for p in self[start]:
component = p.path
# The following encode different pathspec properties into the AFF4 path in
# such a way that unique files on the client are mapped to unique URNs in
# the AFF4 space. Note that this transformation does not need to be
# reversible since we always use the PathSpec when accessing files on the
# client.
if p.HasField("offset"):
component += ":{}".format(p.offset // 512)
# Support ADS names.
if p.HasField("stream_name"):
component += ":" + p.stream_name
result.append(component)
return client_urn.Add("/".join(result))
def _unique(iterable):
"""Returns a list of unique values in preserved order."""
return list(dict.fromkeys(iterable))
class GlobComponentExplanation(rdf_structs.RDFProtoStruct):
"""A sub-part of a GlobExpression with examples."""
protobuf = flows_pb2.GlobComponentExplanation
# Grouping pattern: e.g. {test.exe,foo.doc,bar.txt}
GROUPING_PATTERN = re.compile("{([^}]+,[^}]+)}")
_VAR_PATTERN = re.compile("(" + "|".join([r"%%\w+%%", r"%%\w+\.\w+%%"]) + ")")
_REGEX_SPLIT_PATTERN = re.compile(
"(" + "|".join(["{[^}]+,[^}]+}", "\\?", "\\*\\*\\/?", "\\*"]) + ")")
_COMPONENT_SPLIT_PATTERN = re.compile("(" + "|".join([
r"{[^}]+,[^}]+}", r"\?", r"\*\*\d*/?", r"\*", r"%%\w+%%", r"%%\w+\.\w+%%"
]) + ")")
class GlobExpression(rdfvalue.RDFString):
"""A glob expression for a client path.
A glob expression represents a set of regular expressions which match files on
the client. The Glob expression supports the following expansions:
1) Client attribute expansions are surrounded with %% characters. They will be
expanded from the client AFF4 object.
2) Groupings are collections of alternates. e.g. {foo.exe,bar.sys}
3) Wild cards like * and ?
"""
context_help_url = "investigating-with-grr/flows/specifying-file-paths.html"
RECURSION_REGEX = re.compile(r"\*\*(\d*)")
def Validate(self):
"""GlobExpression is valid."""
if len(self.RECURSION_REGEX.findall(self._value)) > 1:
raise ValueError("Only one ** is permitted per path: %s." % self._value)
def Interpolate(self, knowledge_base=None):
kb = knowledge_base
patterns = artifact_utils.InterpolateKbAttributes(self._value, kb)
for pattern in patterns:
# Normalize the component path (this allows us to resolve ../
# sequences).
pattern = utils.NormalizePath(pattern.replace("\\", "/"))
for p in self.InterpolateGrouping(pattern):
yield p
def InterpolateGrouping(self, pattern):
"""Interpolate inline globbing groups."""
components = []
offset = 0
for match in GROUPING_PATTERN.finditer(pattern):
components.append([pattern[offset:match.start()]])
# Expand the attribute into the set of possibilities:
alternatives = match.group(1).split(",")
components.append(_unique(alternatives))
offset = match.end()
components.append([pattern[offset:]])
# Now calculate the cartesian products of all these sets to form all
# strings.
for vector in itertools.product(*components):
yield u"".join(vector)
def _ReplaceRegExGrouping(self, grouping):
alternatives = grouping.group(1).split(",")
return "(" + "|".join(re.escape(s) for s in alternatives) + ")"
def _ReplaceRegExPart(self, part):
if part == "**/":
return "(?:.*\\/)?"
elif part == "*":
return "[^\\/]*"
elif part == "?":
return "[^\\/]"
elif GROUPING_PATTERN.match(part):
|
else:
return re.escape(part)
def ExplainComponents(self, example_count: int,
knowledge_base) -> Sequence[GlobComponentExplanation]:
"""Returns a list of GlobComponentExplanations with examples."""
parts = _COMPONENT_SPLIT_PATTERN.split(self._value)
components = []
for glob_part in parts:
if not glob_part:
continue
component = GlobComponentExplanation(glob_expression=glob_part)
if GROUPING_PATTERN.match(glob_part):
examples = self.InterpolateGrouping(glob_part)
elif _VAR_PATTERN.match(glob_part):
# Examples for variable substitutions might not be 100 % accurate,
# because the scope is not shared between two variables. Thus,
# if a GlobExpression uses %%users.a%% and %%users.b%%, the underlying
# user might be different for a and b. For the sake of explaining
# possible values, this should still be enough.
try:
examples = artifact_utils.InterpolateKbAttributes(
glob_part, knowledge_base)
except artifact_utils.Error:
# Interpolation can fail for many non-critical reasons, e.g. when the
# client is missing a KB attribute.
examples = []
else:
examples = []
component.examples = list(itertools.islice(examples, example_count))
components.append(component)
return components
def AsRegEx(self):
"""Return the current glob as a simple regex.
Note: No interpolation is performed.
Returns:
A RegularExpression() object.
"""
parts = _REGEX_SPLIT_PATTERN.split(self._value)
result = u"".join(self._ReplaceRegExPart(p) for p in parts)
return rdf_standard.RegularExpression(u"(?i)\\A%s\\Z" % result)
| return GROUPING_PATTERN.sub(self._ReplaceRegExGrouping, part) | conditional_block |
images.go | // Copyright 2015 The rkt Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//+build linux
package main
import (
"bytes"
"container/list"
"crypto/tls"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
docker2aci "github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/docker2aci/lib"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/docker2aci/lib/common"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/spec/aci"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/spec/discovery"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/spec/schema/types"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/coreos/ioprogress"
"github.com/coreos/rkt/Godeps/_workspace/src/golang.org/x/crypto/openpgp"
pgperrors "github.com/coreos/rkt/Godeps/_workspace/src/golang.org/x/crypto/openpgp/errors"
"github.com/coreos/rkt/common/apps"
"github.com/coreos/rkt/pkg/keystore"
"github.com/coreos/rkt/rkt/config"
"github.com/coreos/rkt/store"
"github.com/coreos/rkt/version"
)
type imageActionData struct {
s *store.Store
ks *keystore.Keystore
headers map[string]config.Headerer
dockerAuth map[string]config.BasicCredentials
insecureSkipVerify bool
debug bool
}
type finder struct {
imageActionData
local bool
withDeps bool
}
// findImages uses findImage to attain a list of image hashes using discovery if necessary
func (f *finder) findImages(al *apps.Apps) error {
return al.Walk(func(app *apps.App) error {
h, err := f.findImage(app.Image, app.Asc, true)
if err != nil {
return err
}
app.ImageID = *h
return nil
})
}
// findImage will recognize a ACI hash and use that, import a local file, use
// discovery or download an ACI directly.
func (f *finder) findImage(img string, asc string, discover bool) (*types.Hash, error) {
// check if it is a valid hash, if so let it pass through
h, err := types.NewHash(img)
if err == nil {
fullKey, err := f.s.ResolveKey(img)
if err != nil {
return nil, fmt.Errorf("could not resolve key: %v", err)
}
h, err = types.NewHash(fullKey)
if err != nil {
// should never happen
panic(err)
}
return h, nil
}
// try fetching the image, potentially remotely
ft := &fetcher{
imageActionData: f.imageActionData,
local: f.local,
withDeps: f.withDeps,
}
key, err := ft.fetchImage(img, asc, discover)
if err != nil {
return nil, err
}
h, err = types.NewHash(key)
if err != nil {
// should never happen
panic(err)
}
return h, nil
}
var errStatusAccepted = errors.New("server is still processing the request")
type fetcher struct {
imageActionData
local bool
withDeps bool
}
// fetchImage will take an image as either a URL or a name string and import it
// into the store if found. If discover is true meta-discovery is enabled. If
// asc is not "", it must exist as a local file and will be used
// as the signature file for verification, unless verification is disabled.
// If f.withDeps is true also image dependencies are fetched.
func (f *fetcher) fetchImage(img string, asc string, discover bool) (string, error) {
if f.withDeps && !discover {
return "", fmt.Errorf("cannot fetch image's dependencies with discovery disabled")
}
hash, err := f.fetchSingleImage(img, asc, discover)
if err != nil {
return "", err
}
if f.withDeps {
err = f.fetchImageDeps(hash)
if err != nil {
return "", err
}
}
return hash, nil
}
func (f *fetcher) getImageDeps(hash string) (types.Dependencies, error) {
key, err := f.s.ResolveKey(hash)
if err != nil {
return nil, err
}
im, err := f.s.GetImageManifest(key)
if err != nil {
return nil, err
}
return im.Dependencies, nil
}
func (f *fetcher) addImageDeps(hash string, imgsl *list.List, seen map[string]struct{}) error {
dependencies, err := f.getImageDeps(hash)
if err != nil {
return err
}
for _, d := range dependencies {
app, err := discovery.NewApp(d.ImageName.String(), d.Labels.ToMap())
if err != nil {
return err
}
imgsl.PushBack(app.String())
if _, ok := seen[app.String()]; ok {
return fmt.Errorf("dependency %s specified multiple times in the dependency tree for imageID: %s", app.String(), hash)
}
seen[app.String()] = struct{}{}
}
return nil
}
// fetchImageDeps will recursively fetch all the image dependencies
func (f *fetcher) fetchImageDeps(hash string) error {
imgsl := list.New()
seen := map[string]struct{}{}
f.addImageDeps(hash, imgsl, seen)
for el := imgsl.Front(); el != nil; el = el.Next() {
img := el.Value.(string)
hash, err := f.fetchSingleImage(img, "", true)
if err != nil {
return err
}
f.addImageDeps(hash, imgsl, seen)
}
return nil
}
// fetchSingleImage will take an image as either a URL or a name string and
// import it into the store if found. If discover is true meta-discovery is
// enabled. If asc is not "", it must exist as a local file and will be used
// as the signature file for verification, unless verification is disabled.
func (f *fetcher) fetchSingleImage(img string, asc string, discover bool) (string, error) {
var (
ascFile *os.File
err error
latest bool
)
if asc != "" && f.ks != nil {
ascFile, err = os.Open(asc)
if err != nil {
return "", fmt.Errorf("unable to open signature file: %v", err)
}
defer ascFile.Close()
}
u, err := url.Parse(img)
if err != nil {
return "", fmt.Errorf("not a valid image reference (%s)", img)
}
// if img refers to a local file, ensure the scheme is file:// and make the url path absolute
_, err = os.Stat(u.Path)
if err == nil {
u.Path, err = filepath.Abs(u.Path)
if err != nil {
return "", fmt.Errorf("unable to get abs path: %v", err)
}
u.Scheme = "file"
} else if !os.IsNotExist(err) {
return "", fmt.Errorf("unable to access %q: %v", img, err)
}
if discover && u.Scheme == "" {
if app := newDiscoveryApp(img); app != nil {
var discoveryError error
if !f.local {
stderr("rkt: searching for app image %s", img)
ep, err := discoverApp(app, true)
if err != nil {
discoveryError = err
} else {
// No specified version label, mark it as latest
if _, ok := app.Labels["version"]; !ok {
latest = true
}
return f.fetchImageFromEndpoints(app.Name.String(), ep, ascFile, latest)
}
}
if discoveryError != nil {
stderr("discovery failed for %q: %v. Trying to find image in the store.", img, discoveryError)
}
if f.local || discoveryError != nil {
return f.fetchImageFromStore(img)
}
}
}
switch u.Scheme {
case "http", "https", "file":
case "docker":
dockerURL := common.ParseDockerURL(path.Join(u.Host, u.Path))
if dockerURL.Tag == "latest" {
latest = true
}
default:
return "", fmt.Errorf("rkt only supports http, https, docker or file URLs (%s)", img)
}
return f.fetchImageFromURL(u.String(), u.Scheme, ascFile, latest)
}
func (f *fetcher) fetchImageFromStore(img string) (string, error) {
return getStoreKeyFromApp(f.s, img)
}
func (f *fetcher) fetchImageFromEndpoints(appName string, ep *discovery.Endpoints, ascFile *os.File, latest bool) (string, error) {
return f.fetchImageFrom(appName, ep.ACIEndpoints[0].ACI, ep.ACIEndpoints[0].ASC, "", ascFile, latest)
}
func (f *fetcher) fetchImageFromURL(imgurl string, scheme string, ascFile *os.File, latest bool) (string, error) {
return f.fetchImageFrom("", imgurl, ascURLFromImgURL(imgurl), scheme, ascFile, latest)
}
// fetchImageFrom fetches an image from the aciURL.
// If the aciURL is a file path (scheme == 'file'), then we bypass the on-disk store.
// If the `--local` flag is provided, then we will only fetch from the on-disk store (unless aciURL is a file path).
// If the label is 'latest', then we will bypass the on-disk store (unless '--local' is specified).
// Otherwise if '--local' is false, aciURL is not a file path, and the label is not 'latest' or empty, we will first
// try to fetch from the on-disk store, if not found, then fetch from the internet.
func (f *fetcher) fetchImageFrom(appName string, aciURL, ascURL, scheme string, ascFile *os.File, latest bool) (string, error) {
var rem *store.Remote
if f.insecureSkipVerify {
if f.ks != nil {
stderr("rkt: warning: TLS verification and signature verification has been disabled")
}
} else if scheme == "docker" {
return "", fmt.Errorf("signature verification for docker images is not supported (try --insecure-skip-verify)")
}
if (f.local && scheme != "file") || (scheme != "file" && !latest) {
var err error
ok := false
rem, ok, err = f.s.GetRemote(aciURL)
if err != nil {
return "", err
}
if ok {
if f.local {
stderr("rkt: using image in local store for app %s", appName)
return rem.BlobKey, nil
}
if useCached(rem.DownloadTime, rem.CacheMaxAge) {
stderr("rkt: found image in local store, skipping fetching from %s", aciURL)
return rem.BlobKey, nil
}
}
if f.local {
return "", fmt.Errorf("url %s not available in local store", aciURL)
}
}
if scheme != "file" && f.debug {
stderr("rkt: fetching image from %s", aciURL)
}
var etag string
if rem != nil {
etag = rem.ETag
}
entity, aciFile, cd, err := f.fetch(appName, aciURL, ascURL, ascFile, etag)
if err != nil {
return "", err
}
if cd != nil && cd.useCached {
if rem != nil {
return rem.BlobKey, nil
} else {
// should never happen
panic("asked to use cached image but remote is nil")
}
}
if scheme != "file" {
defer os.Remove(aciFile.Name())
}
if entity != nil && !f.insecureSkipVerify {
stderr("rkt: signature verified:")
for _, v := range entity.Identities {
stderr(" %s", v.Name)
}
}
key, err := f.s.WriteACI(aciFile, latest)
if err != nil {
return "", err
}
if scheme != "file" {
rem := store.NewRemote(aciURL, ascURL)
rem.BlobKey = key
rem.DownloadTime = time.Now()
if cd != nil {
rem.ETag = cd.etag
rem.CacheMaxAge = cd.maxAge
}
err = f.s.WriteRemote(rem)
if err != nil {
return "", err
}
}
return key, nil
}
// fetch opens/downloads and verifies the remote ACI.
// If appName is not "", it will be used to check that the manifest contain the correct appName
// If ascFile is not nil, it will be used as the signature file and ascURL will be ignored.
// If Keystore is nil signature verification will be skipped, regardless of ascFile.
// fetch returns the signer, an *os.File representing the ACI, and an error if any.
// err will be nil if the ACI fetches successfully and the ACI is verified.
func (f *fetcher) fetch(appName string, aciURL, ascURL string, ascFile *os.File, etag string) (*openpgp.Entity, *os.File, *cacheData, error) {
var (
entity *openpgp.Entity
cd *cacheData
)
u, err := url.Parse(aciURL)
if err != nil {
return nil, nil, nil, fmt.Errorf("error parsing ACI url: %v", err)
}
if u.Scheme == "docker" {
registryURL := strings.TrimPrefix(aciURL, "docker://")
storeTmpDir, err := f.s.TmpDir()
if err != nil {
return nil, nil, nil, fmt.Errorf("error creating temporary dir for docker to ACI conversion: %v", err)
}
tmpDir, err := ioutil.TempDir(storeTmpDir, "docker2aci-")
if err != nil {
return nil, nil, nil, err
}
defer os.RemoveAll(tmpDir)
indexName := docker2aci.GetIndexName(registryURL)
user := ""
password := ""
if creds, ok := f.dockerAuth[indexName]; ok {
user = creds.User
password = creds.Password
}
acis, err := docker2aci.Convert(registryURL, true, tmpDir, tmpDir, user, password)
if err != nil {
return nil, nil, nil, fmt.Errorf("error converting docker image to ACI: %v", err)
}
aciFile, err := os.Open(acis[0])
if err != nil {
return nil, nil, nil, fmt.Errorf("error opening squashed ACI file: %v", err)
}
return nil, aciFile, nil, nil
}
// attempt to automatically fetch the public key in case it is available on a TLS connection.
if globalFlags.TrustKeysFromHttps && !globalFlags.InsecureSkipVerify && appName != "" {
pkls, err := getPubKeyLocations(appName, false, globalFlags.Debug)
if err != nil {
stderr("Error determining key location: %v", err)
} else {
// no http, don't ask user for accepting the key, no overriding
if err := addKeys(pkls, appName, false, true, false); err != nil {
stderr("Error adding keys: %v", err)
}
}
}
var retrySignature bool
if f.ks != nil && ascFile == nil {
u, err := url.Parse(ascURL)
if err != nil {
return nil, nil, nil, fmt.Errorf("error parsing ASC url: %v", err)
}
if u.Scheme == "file" {
ascFile, err = os.Open(u.Path)
if err != nil {
return nil, nil, nil, fmt.Errorf("error opening signature file: %v", err)
}
} else {
stderr("Downloading signature from %v\n", ascURL)
ascFile, err = f.s.TmpFile()
if err != nil {
return nil, nil, nil, fmt.Errorf("error setting up temporary file: %v", err)
}
defer os.Remove(ascFile.Name())
err = f.downloadSignatureFile(ascURL, ascFile)
switch err {
case errStatusAccepted:
retrySignature = true
stderr("rkt: server requested deferring the signature download")
case nil:
break
default:
return nil, nil, nil, fmt.Errorf("error downloading the signature file: %v", err)
}
}
defer ascFile.Close()
}
// check if the identity used by the signature is in the store before a
// possibly expensive download. This is only an optimization and it's
// ok to skip the test if the signature will be downloaded later.
if !retrySignature && f.ks != nil && appName != "" {
if _, err := ascFile.Seek(0, 0); err != nil {
return nil, nil, nil, fmt.Errorf("error seeking signature file: %v", err)
}
if entity, err = f.ks.CheckSignature(appName, bytes.NewReader([]byte{}), ascFile); err != nil {
if _, ok := err.(pgperrors.SignatureError); !ok {
return nil, nil, nil, err
}
}
}
var aciFile *os.File
if u.Scheme == "file" {
aciFile, err = os.Open(u.Path)
if err != nil {
return nil, nil, nil, fmt.Errorf("error opening ACI file: %v", err)
}
} else {
aciFile, err = f.s.TmpFile()
if err != nil {
return nil, aciFile, nil, fmt.Errorf("error setting up temporary file: %v", err)
}
defer os.Remove(aciFile.Name())
if cd, err = f.downloadACI(aciURL, aciFile, etag); err != nil {
return nil, nil, nil, fmt.Errorf("error downloading ACI: %v", err)
}
if cd.useCached {
return nil, nil, cd, nil
}
}
if retrySignature {
if err = f.downloadSignatureFile(ascURL, ascFile); err != nil {
return nil, aciFile, nil, fmt.Errorf("error downloading the signature file: %v", err)
}
}
manifest, err := aci.ManifestFromImage(aciFile)
if err != nil {
return nil, aciFile, nil, err
}
// Check if the downloaded ACI has the correct app name.
// The check is only performed when the aci is downloaded through the
// discovery protocol, but not with local files or full URL.
if appName != "" && manifest.Name.String() != appName {
return nil, aciFile, nil,
fmt.Errorf("error when reading the app name: %q expected but %q found",
appName, manifest.Name.String())
}
if f.ks != nil {
if _, err := aciFile.Seek(0, 0); err != nil {
return nil, aciFile, nil, fmt.Errorf("error seeking ACI file: %v", err)
}
if _, err := ascFile.Seek(0, 0); err != nil {
return nil, aciFile, nil, fmt.Errorf("error seeking signature file: %v", err)
}
if entity, err = f.ks.CheckSignature(manifest.Name.String(), aciFile, ascFile); err != nil {
return nil, aciFile, nil, err
}
}
if _, err := aciFile.Seek(0, 0); err != nil {
return nil, aciFile, nil, fmt.Errorf("error seeking ACI file: %v", err)
}
return entity, aciFile, cd, nil
}
type writeSyncer interface {
io.Writer
Sync() error
}
// downloadACI gets the aci specified at aciurl
func (f *fetcher) downloadACI(aciurl string, out writeSyncer, etag string) (*cacheData, error) {
return f.downloadHTTP(aciurl, "ACI", out, etag)
}
// downloadSignatureFile gets the signature specified at sigurl
func (f *fetcher) downloadSignatureFile(sigurl string, out writeSyncer) error {
_, err := f.downloadHTTP(sigurl, "signature", out, "")
return err
}
// downloadHTTP retrieves url, creating a temp file using getTempFile
// http:// and https:// urls supported
func (f *fetcher) downloadHTTP(url, label string, out writeSyncer, etag string) (*cacheData, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
options := make(http.Header)
// Send credentials only over secure channel
if req.URL.Scheme == "https" {
if hostOpts, ok := f.headers[req.URL.Host]; ok {
options = hostOpts.Header()
}
}
for k, v := range options {
for _, e := range v {
req.Header.Add(k, e)
}
}
transport := http.DefaultTransport
if f.insecureSkipVerify {
transport = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
}
if etag != "" {
req.Header.Add("If-None-Match", etag)
}
req.Header.Add("User-Agent", fmt.Sprintf("rkt/%s", version.Version))
client := &http.Client{Transport: transport}
res, err := client.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
cd := &cacheData{}
// TODO(jonboulle): handle http more robustly (redirects?)
switch res.StatusCode {
case http.StatusAccepted:
// If the server returns Status Accepted (HTTP 202), we should retry
// downloading the signature later.
return nil, errStatusAccepted
case http.StatusOK:
fallthrough
case http.StatusNotModified:
cd.etag = res.Header.Get("ETag")
cd.maxAge = getMaxAge(res.Header.Get("Cache-Control"))
cd.useCached = (res.StatusCode == http.StatusNotModified)
if cd.useCached {
return cd, nil
}
default:
return nil, fmt.Errorf("bad HTTP status code: %d", res.StatusCode)
}
prefix := "Downloading " + label
fmtBytesSize := 18
barSize := int64(80 - len(prefix) - fmtBytesSize)
bar := ioprogress.DrawTextFormatBar(barSize)
fmtfunc := func(progress, total int64) string {
// Content-Length is set to -1 when unknown.
if total == -1 {
return fmt.Sprintf(
"%s: %v of an unknown total size",
prefix,
ioprogress.ByteUnitStr(progress),
)
}
return fmt.Sprintf(
"%s: %s %s",
prefix,
bar(progress, total),
ioprogress.DrawTextFormatBytes(progress, total),
)
}
reader := &ioprogress.Reader{
Reader: res.Body,
Size: res.ContentLength,
DrawFunc: ioprogress.DrawTerminalf(os.Stdout, fmtfunc),
DrawInterval: time.Second,
}
if _, err := io.Copy(out, reader); err != nil {
return nil, fmt.Errorf("error copying %s: %v", label, err)
} | return nil, fmt.Errorf("error writing %s: %v", label, err)
}
return cd, nil
}
func ascURLFromImgURL(imgurl string) string {
s := strings.TrimSuffix(imgurl, ".aci")
return s + ".aci.asc"
}
// newDiscoveryApp creates a discovery app if the given img is an app name and
// has a URL-like structure, for example example.com/reduce-worker.
// Or it returns nil.
func newDiscoveryApp(img string) *discovery.App {
app, err := discovery.NewAppFromString(img)
if err != nil {
return nil
}
u, err := url.Parse(app.Name.String())
if err != nil || u.Scheme != "" {
return nil
}
if _, ok := app.Labels["arch"]; !ok {
app.Labels["arch"] = defaultArch
}
if _, ok := app.Labels["os"]; !ok {
app.Labels["os"] = defaultOS
}
return app
}
func discoverApp(app *discovery.App, insecure bool) (*discovery.Endpoints, error) {
ep, attempts, err := discovery.DiscoverEndpoints(*app, insecure)
if globalFlags.Debug {
for _, a := range attempts {
stderr("meta tag 'ac-discovery' not found on %s: %v", a.Prefix, a.Error)
}
}
if err != nil {
return nil, err
}
if len(ep.ACIEndpoints) == 0 {
return nil, fmt.Errorf("no endpoints discovered")
}
return ep, nil
}
func getStoreKeyFromApp(s *store.Store, img string) (string, error) {
app, err := discovery.NewAppFromString(img)
if err != nil {
return "", fmt.Errorf("cannot parse the image name: %v", err)
}
labels, err := types.LabelsFromMap(app.Labels)
if err != nil {
return "", fmt.Errorf("invalid labels in the name: %v", err)
}
key, err := s.GetACI(app.Name, labels)
if err != nil {
return "", fmt.Errorf("cannot find image: %v", err)
}
return key, nil
}
func getStoreKeyFromAppOrHash(s *store.Store, input string) (string, error) {
var key string
if _, err := types.NewHash(input); err == nil {
key, err = s.ResolveKey(input)
if err != nil {
return "", fmt.Errorf("cannot resolve key: %v", err)
}
} else {
key, err = getStoreKeyFromApp(s, input)
if err != nil {
return "", fmt.Errorf("cannot find image: %v", err)
}
}
return key, nil
}
type cacheData struct {
useCached bool
etag string
maxAge int
}
func getMaxAge(headerValue string) int {
var MaxAge int = 0
if len(headerValue) > 0 {
parts := strings.Split(headerValue, " ")
for i := 0; i < len(parts); i++ {
attr, val := parts[i], ""
if j := strings.Index(attr, "="); j >= 0 {
attr, val = attr[:j], attr[j+1:]
}
lowerAttr := strings.ToLower(attr)
switch lowerAttr {
case "no-store":
MaxAge = 0
continue
case "no-cache":
MaxAge = 0
continue
case "max-age":
secs, err := strconv.Atoi(val)
if err != nil || secs != 0 && val[0] == '0' {
break
}
if secs <= 0 {
MaxAge = 0
} else {
MaxAge = secs
}
continue
}
}
}
return MaxAge
}
// useCached checks if downloadTime plus maxAge is before/after the current time.
// return true if the cached image should be used, false otherwise.
func useCached(downloadTime time.Time, maxAge int) bool {
freshnessLifetime := int(time.Now().Sub(downloadTime).Seconds())
if maxAge > 0 && freshnessLifetime < maxAge {
return true
}
return false
} |
if err := out.Sync(); err != nil { | random_line_split |
images.go | // Copyright 2015 The rkt Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//+build linux
package main
import (
"bytes"
"container/list"
"crypto/tls"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
docker2aci "github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/docker2aci/lib"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/docker2aci/lib/common"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/spec/aci"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/spec/discovery"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/spec/schema/types"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/coreos/ioprogress"
"github.com/coreos/rkt/Godeps/_workspace/src/golang.org/x/crypto/openpgp"
pgperrors "github.com/coreos/rkt/Godeps/_workspace/src/golang.org/x/crypto/openpgp/errors"
"github.com/coreos/rkt/common/apps"
"github.com/coreos/rkt/pkg/keystore"
"github.com/coreos/rkt/rkt/config"
"github.com/coreos/rkt/store"
"github.com/coreos/rkt/version"
)
type imageActionData struct {
s *store.Store
ks *keystore.Keystore
headers map[string]config.Headerer
dockerAuth map[string]config.BasicCredentials
insecureSkipVerify bool
debug bool
}
type finder struct {
imageActionData
local bool
withDeps bool
}
// findImages uses findImage to attain a list of image hashes using discovery if necessary
func (f *finder) findImages(al *apps.Apps) error {
return al.Walk(func(app *apps.App) error {
h, err := f.findImage(app.Image, app.Asc, true)
if err != nil {
return err
}
app.ImageID = *h
return nil
})
}
// findImage will recognize a ACI hash and use that, import a local file, use
// discovery or download an ACI directly.
func (f *finder) findImage(img string, asc string, discover bool) (*types.Hash, error) {
// check if it is a valid hash, if so let it pass through
h, err := types.NewHash(img)
if err == nil {
fullKey, err := f.s.ResolveKey(img)
if err != nil {
return nil, fmt.Errorf("could not resolve key: %v", err)
}
h, err = types.NewHash(fullKey)
if err != nil {
// should never happen
panic(err)
}
return h, nil
}
// try fetching the image, potentially remotely
ft := &fetcher{
imageActionData: f.imageActionData,
local: f.local,
withDeps: f.withDeps,
}
key, err := ft.fetchImage(img, asc, discover)
if err != nil {
return nil, err
}
h, err = types.NewHash(key)
if err != nil {
// should never happen
panic(err)
}
return h, nil
}
var errStatusAccepted = errors.New("server is still processing the request")
type fetcher struct {
imageActionData
local bool
withDeps bool
}
// fetchImage will take an image as either a URL or a name string and import it
// into the store if found. If discover is true meta-discovery is enabled. If
// asc is not "", it must exist as a local file and will be used
// as the signature file for verification, unless verification is disabled.
// If f.withDeps is true also image dependencies are fetched.
func (f *fetcher) fetchImage(img string, asc string, discover bool) (string, error) {
if f.withDeps && !discover {
return "", fmt.Errorf("cannot fetch image's dependencies with discovery disabled")
}
hash, err := f.fetchSingleImage(img, asc, discover)
if err != nil {
return "", err
}
if f.withDeps {
err = f.fetchImageDeps(hash)
if err != nil {
return "", err
}
}
return hash, nil
}
func (f *fetcher) getImageDeps(hash string) (types.Dependencies, error) {
key, err := f.s.ResolveKey(hash)
if err != nil {
return nil, err
}
im, err := f.s.GetImageManifest(key)
if err != nil {
return nil, err
}
return im.Dependencies, nil
}
func (f *fetcher) addImageDeps(hash string, imgsl *list.List, seen map[string]struct{}) error {
dependencies, err := f.getImageDeps(hash)
if err != nil {
return err
}
for _, d := range dependencies {
app, err := discovery.NewApp(d.ImageName.String(), d.Labels.ToMap())
if err != nil {
return err
}
imgsl.PushBack(app.String())
if _, ok := seen[app.String()]; ok {
return fmt.Errorf("dependency %s specified multiple times in the dependency tree for imageID: %s", app.String(), hash)
}
seen[app.String()] = struct{}{}
}
return nil
}
// fetchImageDeps will recursively fetch all the image dependencies
func (f *fetcher) fetchImageDeps(hash string) error {
imgsl := list.New()
seen := map[string]struct{}{}
f.addImageDeps(hash, imgsl, seen)
for el := imgsl.Front(); el != nil; el = el.Next() {
img := el.Value.(string)
hash, err := f.fetchSingleImage(img, "", true)
if err != nil {
return err
}
f.addImageDeps(hash, imgsl, seen)
}
return nil
}
// fetchSingleImage will take an image as either a URL or a name string and
// import it into the store if found. If discover is true meta-discovery is
// enabled. If asc is not "", it must exist as a local file and will be used
// as the signature file for verification, unless verification is disabled.
func (f *fetcher) fetchSingleImage(img string, asc string, discover bool) (string, error) {
var (
ascFile *os.File
err error
latest bool
)
if asc != "" && f.ks != nil {
ascFile, err = os.Open(asc)
if err != nil {
return "", fmt.Errorf("unable to open signature file: %v", err)
}
defer ascFile.Close()
}
u, err := url.Parse(img)
if err != nil {
return "", fmt.Errorf("not a valid image reference (%s)", img)
}
// if img refers to a local file, ensure the scheme is file:// and make the url path absolute
_, err = os.Stat(u.Path)
if err == nil {
u.Path, err = filepath.Abs(u.Path)
if err != nil {
return "", fmt.Errorf("unable to get abs path: %v", err)
}
u.Scheme = "file"
} else if !os.IsNotExist(err) {
return "", fmt.Errorf("unable to access %q: %v", img, err)
}
if discover && u.Scheme == "" {
if app := newDiscoveryApp(img); app != nil {
var discoveryError error
if !f.local {
stderr("rkt: searching for app image %s", img)
ep, err := discoverApp(app, true)
if err != nil {
discoveryError = err
} else {
// No specified version label, mark it as latest
if _, ok := app.Labels["version"]; !ok {
latest = true
}
return f.fetchImageFromEndpoints(app.Name.String(), ep, ascFile, latest)
}
}
if discoveryError != nil {
stderr("discovery failed for %q: %v. Trying to find image in the store.", img, discoveryError)
}
if f.local || discoveryError != nil {
return f.fetchImageFromStore(img)
}
}
}
switch u.Scheme {
case "http", "https", "file":
case "docker":
dockerURL := common.ParseDockerURL(path.Join(u.Host, u.Path))
if dockerURL.Tag == "latest" {
latest = true
}
default:
return "", fmt.Errorf("rkt only supports http, https, docker or file URLs (%s)", img)
}
return f.fetchImageFromURL(u.String(), u.Scheme, ascFile, latest)
}
func (f *fetcher) fetchImageFromStore(img string) (string, error) {
return getStoreKeyFromApp(f.s, img)
}
func (f *fetcher) fetchImageFromEndpoints(appName string, ep *discovery.Endpoints, ascFile *os.File, latest bool) (string, error) {
return f.fetchImageFrom(appName, ep.ACIEndpoints[0].ACI, ep.ACIEndpoints[0].ASC, "", ascFile, latest)
}
func (f *fetcher) fetchImageFromURL(imgurl string, scheme string, ascFile *os.File, latest bool) (string, error) {
return f.fetchImageFrom("", imgurl, ascURLFromImgURL(imgurl), scheme, ascFile, latest)
}
// fetchImageFrom fetches an image from the aciURL.
// If the aciURL is a file path (scheme == 'file'), then we bypass the on-disk store.
// If the `--local` flag is provided, then we will only fetch from the on-disk store (unless aciURL is a file path).
// If the label is 'latest', then we will bypass the on-disk store (unless '--local' is specified).
// Otherwise if '--local' is false, aciURL is not a file path, and the label is not 'latest' or empty, we will first
// try to fetch from the on-disk store, if not found, then fetch from the internet.
func (f *fetcher) fetchImageFrom(appName string, aciURL, ascURL, scheme string, ascFile *os.File, latest bool) (string, error) {
var rem *store.Remote
if f.insecureSkipVerify {
if f.ks != nil {
stderr("rkt: warning: TLS verification and signature verification has been disabled")
}
} else if scheme == "docker" {
return "", fmt.Errorf("signature verification for docker images is not supported (try --insecure-skip-verify)")
}
if (f.local && scheme != "file") || (scheme != "file" && !latest) {
var err error
ok := false
rem, ok, err = f.s.GetRemote(aciURL)
if err != nil {
return "", err
}
if ok {
if f.local {
stderr("rkt: using image in local store for app %s", appName)
return rem.BlobKey, nil
}
if useCached(rem.DownloadTime, rem.CacheMaxAge) {
stderr("rkt: found image in local store, skipping fetching from %s", aciURL)
return rem.BlobKey, nil
}
}
if f.local {
return "", fmt.Errorf("url %s not available in local store", aciURL)
}
}
if scheme != "file" && f.debug {
stderr("rkt: fetching image from %s", aciURL)
}
var etag string
if rem != nil {
etag = rem.ETag
}
entity, aciFile, cd, err := f.fetch(appName, aciURL, ascURL, ascFile, etag)
if err != nil {
return "", err
}
if cd != nil && cd.useCached {
if rem != nil {
return rem.BlobKey, nil
} else {
// should never happen
panic("asked to use cached image but remote is nil")
}
}
if scheme != "file" {
defer os.Remove(aciFile.Name())
}
if entity != nil && !f.insecureSkipVerify {
stderr("rkt: signature verified:")
for _, v := range entity.Identities {
stderr(" %s", v.Name)
}
}
key, err := f.s.WriteACI(aciFile, latest)
if err != nil {
return "", err
}
if scheme != "file" {
rem := store.NewRemote(aciURL, ascURL)
rem.BlobKey = key
rem.DownloadTime = time.Now()
if cd != nil {
rem.ETag = cd.etag
rem.CacheMaxAge = cd.maxAge
}
err = f.s.WriteRemote(rem)
if err != nil {
return "", err
}
}
return key, nil
}
// fetch opens/downloads and verifies the remote ACI.
// If appName is not "", it will be used to check that the manifest contain the correct appName
// If ascFile is not nil, it will be used as the signature file and ascURL will be ignored.
// If Keystore is nil signature verification will be skipped, regardless of ascFile.
// fetch returns the signer, an *os.File representing the ACI, and an error if any.
// err will be nil if the ACI fetches successfully and the ACI is verified.
func (f *fetcher) fetch(appName string, aciURL, ascURL string, ascFile *os.File, etag string) (*openpgp.Entity, *os.File, *cacheData, error) {
var (
entity *openpgp.Entity
cd *cacheData
)
u, err := url.Parse(aciURL)
if err != nil {
return nil, nil, nil, fmt.Errorf("error parsing ACI url: %v", err)
}
if u.Scheme == "docker" {
registryURL := strings.TrimPrefix(aciURL, "docker://")
storeTmpDir, err := f.s.TmpDir()
if err != nil {
return nil, nil, nil, fmt.Errorf("error creating temporary dir for docker to ACI conversion: %v", err)
}
tmpDir, err := ioutil.TempDir(storeTmpDir, "docker2aci-")
if err != nil {
return nil, nil, nil, err
}
defer os.RemoveAll(tmpDir)
indexName := docker2aci.GetIndexName(registryURL)
user := ""
password := ""
if creds, ok := f.dockerAuth[indexName]; ok {
user = creds.User
password = creds.Password
}
acis, err := docker2aci.Convert(registryURL, true, tmpDir, tmpDir, user, password)
if err != nil {
return nil, nil, nil, fmt.Errorf("error converting docker image to ACI: %v", err)
}
aciFile, err := os.Open(acis[0])
if err != nil {
return nil, nil, nil, fmt.Errorf("error opening squashed ACI file: %v", err)
}
return nil, aciFile, nil, nil
}
// attempt to automatically fetch the public key in case it is available on a TLS connection.
if globalFlags.TrustKeysFromHttps && !globalFlags.InsecureSkipVerify && appName != "" {
pkls, err := getPubKeyLocations(appName, false, globalFlags.Debug)
if err != nil {
stderr("Error determining key location: %v", err)
} else {
// no http, don't ask user for accepting the key, no overriding
if err := addKeys(pkls, appName, false, true, false); err != nil {
stderr("Error adding keys: %v", err)
}
}
}
var retrySignature bool
if f.ks != nil && ascFile == nil {
u, err := url.Parse(ascURL)
if err != nil {
return nil, nil, nil, fmt.Errorf("error parsing ASC url: %v", err)
}
if u.Scheme == "file" {
ascFile, err = os.Open(u.Path)
if err != nil {
return nil, nil, nil, fmt.Errorf("error opening signature file: %v", err)
}
} else {
stderr("Downloading signature from %v\n", ascURL)
ascFile, err = f.s.TmpFile()
if err != nil {
return nil, nil, nil, fmt.Errorf("error setting up temporary file: %v", err)
}
defer os.Remove(ascFile.Name())
err = f.downloadSignatureFile(ascURL, ascFile)
switch err {
case errStatusAccepted:
retrySignature = true
stderr("rkt: server requested deferring the signature download")
case nil:
break
default:
return nil, nil, nil, fmt.Errorf("error downloading the signature file: %v", err)
}
}
defer ascFile.Close()
}
// check if the identity used by the signature is in the store before a
// possibly expensive download. This is only an optimization and it's
// ok to skip the test if the signature will be downloaded later.
if !retrySignature && f.ks != nil && appName != "" {
if _, err := ascFile.Seek(0, 0); err != nil {
return nil, nil, nil, fmt.Errorf("error seeking signature file: %v", err)
}
if entity, err = f.ks.CheckSignature(appName, bytes.NewReader([]byte{}), ascFile); err != nil {
if _, ok := err.(pgperrors.SignatureError); !ok {
return nil, nil, nil, err
}
}
}
var aciFile *os.File
if u.Scheme == "file" {
aciFile, err = os.Open(u.Path)
if err != nil {
return nil, nil, nil, fmt.Errorf("error opening ACI file: %v", err)
}
} else {
aciFile, err = f.s.TmpFile()
if err != nil {
return nil, aciFile, nil, fmt.Errorf("error setting up temporary file: %v", err)
}
defer os.Remove(aciFile.Name())
if cd, err = f.downloadACI(aciURL, aciFile, etag); err != nil {
return nil, nil, nil, fmt.Errorf("error downloading ACI: %v", err)
}
if cd.useCached {
return nil, nil, cd, nil
}
}
if retrySignature {
if err = f.downloadSignatureFile(ascURL, ascFile); err != nil {
return nil, aciFile, nil, fmt.Errorf("error downloading the signature file: %v", err)
}
}
manifest, err := aci.ManifestFromImage(aciFile)
if err != nil {
return nil, aciFile, nil, err
}
// Check if the downloaded ACI has the correct app name.
// The check is only performed when the aci is downloaded through the
// discovery protocol, but not with local files or full URL.
if appName != "" && manifest.Name.String() != appName {
return nil, aciFile, nil,
fmt.Errorf("error when reading the app name: %q expected but %q found",
appName, manifest.Name.String())
}
if f.ks != nil {
if _, err := aciFile.Seek(0, 0); err != nil {
return nil, aciFile, nil, fmt.Errorf("error seeking ACI file: %v", err)
}
if _, err := ascFile.Seek(0, 0); err != nil {
return nil, aciFile, nil, fmt.Errorf("error seeking signature file: %v", err)
}
if entity, err = f.ks.CheckSignature(manifest.Name.String(), aciFile, ascFile); err != nil {
return nil, aciFile, nil, err
}
}
if _, err := aciFile.Seek(0, 0); err != nil {
return nil, aciFile, nil, fmt.Errorf("error seeking ACI file: %v", err)
}
return entity, aciFile, cd, nil
}
type writeSyncer interface {
io.Writer
Sync() error
}
// downloadACI gets the aci specified at aciurl
func (f *fetcher) downloadACI(aciurl string, out writeSyncer, etag string) (*cacheData, error) {
return f.downloadHTTP(aciurl, "ACI", out, etag)
}
// downloadSignatureFile gets the signature specified at sigurl
func (f *fetcher) downloadSignatureFile(sigurl string, out writeSyncer) error {
_, err := f.downloadHTTP(sigurl, "signature", out, "")
return err
}
// downloadHTTP retrieves url, creating a temp file using getTempFile
// http:// and https:// urls supported
func (f *fetcher) downloadHTTP(url, label string, out writeSyncer, etag string) (*cacheData, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
options := make(http.Header)
// Send credentials only over secure channel
if req.URL.Scheme == "https" {
if hostOpts, ok := f.headers[req.URL.Host]; ok {
options = hostOpts.Header()
}
}
for k, v := range options {
for _, e := range v {
req.Header.Add(k, e)
}
}
transport := http.DefaultTransport
if f.insecureSkipVerify {
transport = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
}
if etag != "" {
req.Header.Add("If-None-Match", etag)
}
req.Header.Add("User-Agent", fmt.Sprintf("rkt/%s", version.Version))
client := &http.Client{Transport: transport}
res, err := client.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
cd := &cacheData{}
// TODO(jonboulle): handle http more robustly (redirects?)
switch res.StatusCode {
case http.StatusAccepted:
// If the server returns Status Accepted (HTTP 202), we should retry
// downloading the signature later.
return nil, errStatusAccepted
case http.StatusOK:
fallthrough
case http.StatusNotModified:
cd.etag = res.Header.Get("ETag")
cd.maxAge = getMaxAge(res.Header.Get("Cache-Control"))
cd.useCached = (res.StatusCode == http.StatusNotModified)
if cd.useCached {
return cd, nil
}
default:
return nil, fmt.Errorf("bad HTTP status code: %d", res.StatusCode)
}
prefix := "Downloading " + label
fmtBytesSize := 18
barSize := int64(80 - len(prefix) - fmtBytesSize)
bar := ioprogress.DrawTextFormatBar(barSize)
fmtfunc := func(progress, total int64) string {
// Content-Length is set to -1 when unknown.
if total == -1 {
return fmt.Sprintf(
"%s: %v of an unknown total size",
prefix,
ioprogress.ByteUnitStr(progress),
)
}
return fmt.Sprintf(
"%s: %s %s",
prefix,
bar(progress, total),
ioprogress.DrawTextFormatBytes(progress, total),
)
}
reader := &ioprogress.Reader{
Reader: res.Body,
Size: res.ContentLength,
DrawFunc: ioprogress.DrawTerminalf(os.Stdout, fmtfunc),
DrawInterval: time.Second,
}
if _, err := io.Copy(out, reader); err != nil {
return nil, fmt.Errorf("error copying %s: %v", label, err)
}
if err := out.Sync(); err != nil {
return nil, fmt.Errorf("error writing %s: %v", label, err)
}
return cd, nil
}
func ascURLFromImgURL(imgurl string) string {
s := strings.TrimSuffix(imgurl, ".aci")
return s + ".aci.asc"
}
// newDiscoveryApp creates a discovery app if the given img is an app name and
// has a URL-like structure, for example example.com/reduce-worker.
// Or it returns nil.
func newDiscoveryApp(img string) *discovery.App {
app, err := discovery.NewAppFromString(img)
if err != nil {
return nil
}
u, err := url.Parse(app.Name.String())
if err != nil || u.Scheme != "" {
return nil
}
if _, ok := app.Labels["arch"]; !ok {
app.Labels["arch"] = defaultArch
}
if _, ok := app.Labels["os"]; !ok {
app.Labels["os"] = defaultOS
}
return app
}
func discoverApp(app *discovery.App, insecure bool) (*discovery.Endpoints, error) {
ep, attempts, err := discovery.DiscoverEndpoints(*app, insecure)
if globalFlags.Debug {
for _, a := range attempts {
stderr("meta tag 'ac-discovery' not found on %s: %v", a.Prefix, a.Error)
}
}
if err != nil {
return nil, err
}
if len(ep.ACIEndpoints) == 0 {
return nil, fmt.Errorf("no endpoints discovered")
}
return ep, nil
}
func getStoreKeyFromApp(s *store.Store, img string) (string, error) {
app, err := discovery.NewAppFromString(img)
if err != nil {
return "", fmt.Errorf("cannot parse the image name: %v", err)
}
labels, err := types.LabelsFromMap(app.Labels)
if err != nil {
return "", fmt.Errorf("invalid labels in the name: %v", err)
}
key, err := s.GetACI(app.Name, labels)
if err != nil {
return "", fmt.Errorf("cannot find image: %v", err)
}
return key, nil
}
func getStoreKeyFromAppOrHash(s *store.Store, input string) (string, error) {
var key string
if _, err := types.NewHash(input); err == nil {
key, err = s.ResolveKey(input)
if err != nil {
return "", fmt.Errorf("cannot resolve key: %v", err)
}
} else {
key, err = getStoreKeyFromApp(s, input)
if err != nil {
return "", fmt.Errorf("cannot find image: %v", err)
}
}
return key, nil
}
type cacheData struct {
useCached bool
etag string
maxAge int
}
func | (headerValue string) int {
var MaxAge int = 0
if len(headerValue) > 0 {
parts := strings.Split(headerValue, " ")
for i := 0; i < len(parts); i++ {
attr, val := parts[i], ""
if j := strings.Index(attr, "="); j >= 0 {
attr, val = attr[:j], attr[j+1:]
}
lowerAttr := strings.ToLower(attr)
switch lowerAttr {
case "no-store":
MaxAge = 0
continue
case "no-cache":
MaxAge = 0
continue
case "max-age":
secs, err := strconv.Atoi(val)
if err != nil || secs != 0 && val[0] == '0' {
break
}
if secs <= 0 {
MaxAge = 0
} else {
MaxAge = secs
}
continue
}
}
}
return MaxAge
}
// useCached checks if downloadTime plus maxAge is before/after the current time.
// return true if the cached image should be used, false otherwise.
func useCached(downloadTime time.Time, maxAge int) bool {
freshnessLifetime := int(time.Now().Sub(downloadTime).Seconds())
if maxAge > 0 && freshnessLifetime < maxAge {
return true
}
return false
}
| getMaxAge | identifier_name |
images.go | // Copyright 2015 The rkt Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//+build linux
package main
import (
"bytes"
"container/list"
"crypto/tls"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
docker2aci "github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/docker2aci/lib"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/docker2aci/lib/common"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/spec/aci"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/spec/discovery"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/spec/schema/types"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/coreos/ioprogress"
"github.com/coreos/rkt/Godeps/_workspace/src/golang.org/x/crypto/openpgp"
pgperrors "github.com/coreos/rkt/Godeps/_workspace/src/golang.org/x/crypto/openpgp/errors"
"github.com/coreos/rkt/common/apps"
"github.com/coreos/rkt/pkg/keystore"
"github.com/coreos/rkt/rkt/config"
"github.com/coreos/rkt/store"
"github.com/coreos/rkt/version"
)
type imageActionData struct {
s *store.Store
ks *keystore.Keystore
headers map[string]config.Headerer
dockerAuth map[string]config.BasicCredentials
insecureSkipVerify bool
debug bool
}
type finder struct {
imageActionData
local bool
withDeps bool
}
// findImages uses findImage to attain a list of image hashes using discovery if necessary
func (f *finder) findImages(al *apps.Apps) error {
return al.Walk(func(app *apps.App) error {
h, err := f.findImage(app.Image, app.Asc, true)
if err != nil {
return err
}
app.ImageID = *h
return nil
})
}
// findImage will recognize a ACI hash and use that, import a local file, use
// discovery or download an ACI directly.
func (f *finder) findImage(img string, asc string, discover bool) (*types.Hash, error) {
// check if it is a valid hash, if so let it pass through
h, err := types.NewHash(img)
if err == nil {
fullKey, err := f.s.ResolveKey(img)
if err != nil {
return nil, fmt.Errorf("could not resolve key: %v", err)
}
h, err = types.NewHash(fullKey)
if err != nil {
// should never happen
panic(err)
}
return h, nil
}
// try fetching the image, potentially remotely
ft := &fetcher{
imageActionData: f.imageActionData,
local: f.local,
withDeps: f.withDeps,
}
key, err := ft.fetchImage(img, asc, discover)
if err != nil {
return nil, err
}
h, err = types.NewHash(key)
if err != nil {
// should never happen
panic(err)
}
return h, nil
}
var errStatusAccepted = errors.New("server is still processing the request")
type fetcher struct {
imageActionData
local bool
withDeps bool
}
// fetchImage will take an image as either a URL or a name string and import it
// into the store if found. If discover is true meta-discovery is enabled. If
// asc is not "", it must exist as a local file and will be used
// as the signature file for verification, unless verification is disabled.
// If f.withDeps is true also image dependencies are fetched.
func (f *fetcher) fetchImage(img string, asc string, discover bool) (string, error) {
if f.withDeps && !discover {
return "", fmt.Errorf("cannot fetch image's dependencies with discovery disabled")
}
hash, err := f.fetchSingleImage(img, asc, discover)
if err != nil {
return "", err
}
if f.withDeps {
err = f.fetchImageDeps(hash)
if err != nil {
return "", err
}
}
return hash, nil
}
func (f *fetcher) getImageDeps(hash string) (types.Dependencies, error) {
key, err := f.s.ResolveKey(hash)
if err != nil {
return nil, err
}
im, err := f.s.GetImageManifest(key)
if err != nil {
return nil, err
}
return im.Dependencies, nil
}
func (f *fetcher) addImageDeps(hash string, imgsl *list.List, seen map[string]struct{}) error {
dependencies, err := f.getImageDeps(hash)
if err != nil {
return err
}
for _, d := range dependencies {
app, err := discovery.NewApp(d.ImageName.String(), d.Labels.ToMap())
if err != nil {
return err
}
imgsl.PushBack(app.String())
if _, ok := seen[app.String()]; ok {
return fmt.Errorf("dependency %s specified multiple times in the dependency tree for imageID: %s", app.String(), hash)
}
seen[app.String()] = struct{}{}
}
return nil
}
// fetchImageDeps will recursively fetch all the image dependencies
func (f *fetcher) fetchImageDeps(hash string) error {
imgsl := list.New()
seen := map[string]struct{}{}
f.addImageDeps(hash, imgsl, seen)
for el := imgsl.Front(); el != nil; el = el.Next() {
img := el.Value.(string)
hash, err := f.fetchSingleImage(img, "", true)
if err != nil {
return err
}
f.addImageDeps(hash, imgsl, seen)
}
return nil
}
// fetchSingleImage will take an image as either a URL or a name string and
// import it into the store if found. If discover is true meta-discovery is
// enabled. If asc is not "", it must exist as a local file and will be used
// as the signature file for verification, unless verification is disabled.
func (f *fetcher) fetchSingleImage(img string, asc string, discover bool) (string, error) {
var (
ascFile *os.File
err error
latest bool
)
if asc != "" && f.ks != nil {
ascFile, err = os.Open(asc)
if err != nil {
return "", fmt.Errorf("unable to open signature file: %v", err)
}
defer ascFile.Close()
}
u, err := url.Parse(img)
if err != nil {
return "", fmt.Errorf("not a valid image reference (%s)", img)
}
// if img refers to a local file, ensure the scheme is file:// and make the url path absolute
_, err = os.Stat(u.Path)
if err == nil {
u.Path, err = filepath.Abs(u.Path)
if err != nil {
return "", fmt.Errorf("unable to get abs path: %v", err)
}
u.Scheme = "file"
} else if !os.IsNotExist(err) {
return "", fmt.Errorf("unable to access %q: %v", img, err)
}
if discover && u.Scheme == "" {
if app := newDiscoveryApp(img); app != nil {
var discoveryError error
if !f.local {
stderr("rkt: searching for app image %s", img)
ep, err := discoverApp(app, true)
if err != nil {
discoveryError = err
} else {
// No specified version label, mark it as latest
if _, ok := app.Labels["version"]; !ok {
latest = true
}
return f.fetchImageFromEndpoints(app.Name.String(), ep, ascFile, latest)
}
}
if discoveryError != nil {
stderr("discovery failed for %q: %v. Trying to find image in the store.", img, discoveryError)
}
if f.local || discoveryError != nil {
return f.fetchImageFromStore(img)
}
}
}
switch u.Scheme {
case "http", "https", "file":
case "docker":
dockerURL := common.ParseDockerURL(path.Join(u.Host, u.Path))
if dockerURL.Tag == "latest" {
latest = true
}
default:
return "", fmt.Errorf("rkt only supports http, https, docker or file URLs (%s)", img)
}
return f.fetchImageFromURL(u.String(), u.Scheme, ascFile, latest)
}
func (f *fetcher) fetchImageFromStore(img string) (string, error) {
return getStoreKeyFromApp(f.s, img)
}
func (f *fetcher) fetchImageFromEndpoints(appName string, ep *discovery.Endpoints, ascFile *os.File, latest bool) (string, error) {
return f.fetchImageFrom(appName, ep.ACIEndpoints[0].ACI, ep.ACIEndpoints[0].ASC, "", ascFile, latest)
}
func (f *fetcher) fetchImageFromURL(imgurl string, scheme string, ascFile *os.File, latest bool) (string, error) {
return f.fetchImageFrom("", imgurl, ascURLFromImgURL(imgurl), scheme, ascFile, latest)
}
// fetchImageFrom fetches an image from the aciURL.
// If the aciURL is a file path (scheme == 'file'), then we bypass the on-disk store.
// If the `--local` flag is provided, then we will only fetch from the on-disk store (unless aciURL is a file path).
// If the label is 'latest', then we will bypass the on-disk store (unless '--local' is specified).
// Otherwise if '--local' is false, aciURL is not a file path, and the label is not 'latest' or empty, we will first
// try to fetch from the on-disk store, if not found, then fetch from the internet.
func (f *fetcher) fetchImageFrom(appName string, aciURL, ascURL, scheme string, ascFile *os.File, latest bool) (string, error) {
var rem *store.Remote
if f.insecureSkipVerify {
if f.ks != nil {
stderr("rkt: warning: TLS verification and signature verification has been disabled")
}
} else if scheme == "docker" {
return "", fmt.Errorf("signature verification for docker images is not supported (try --insecure-skip-verify)")
}
if (f.local && scheme != "file") || (scheme != "file" && !latest) {
var err error
ok := false
rem, ok, err = f.s.GetRemote(aciURL)
if err != nil {
return "", err
}
if ok {
if f.local {
stderr("rkt: using image in local store for app %s", appName)
return rem.BlobKey, nil
}
if useCached(rem.DownloadTime, rem.CacheMaxAge) {
stderr("rkt: found image in local store, skipping fetching from %s", aciURL)
return rem.BlobKey, nil
}
}
if f.local {
return "", fmt.Errorf("url %s not available in local store", aciURL)
}
}
if scheme != "file" && f.debug {
stderr("rkt: fetching image from %s", aciURL)
}
var etag string
if rem != nil {
etag = rem.ETag
}
entity, aciFile, cd, err := f.fetch(appName, aciURL, ascURL, ascFile, etag)
if err != nil {
return "", err
}
if cd != nil && cd.useCached {
if rem != nil {
return rem.BlobKey, nil
} else {
// should never happen
panic("asked to use cached image but remote is nil")
}
}
if scheme != "file" {
defer os.Remove(aciFile.Name())
}
if entity != nil && !f.insecureSkipVerify {
stderr("rkt: signature verified:")
for _, v := range entity.Identities {
stderr(" %s", v.Name)
}
}
key, err := f.s.WriteACI(aciFile, latest)
if err != nil {
return "", err
}
if scheme != "file" {
rem := store.NewRemote(aciURL, ascURL)
rem.BlobKey = key
rem.DownloadTime = time.Now()
if cd != nil {
rem.ETag = cd.etag
rem.CacheMaxAge = cd.maxAge
}
err = f.s.WriteRemote(rem)
if err != nil {
return "", err
}
}
return key, nil
}
// fetch opens/downloads and verifies the remote ACI.
// If appName is not "", it will be used to check that the manifest contain the correct appName
// If ascFile is not nil, it will be used as the signature file and ascURL will be ignored.
// If Keystore is nil signature verification will be skipped, regardless of ascFile.
// fetch returns the signer, an *os.File representing the ACI, and an error if any.
// err will be nil if the ACI fetches successfully and the ACI is verified.
func (f *fetcher) fetch(appName string, aciURL, ascURL string, ascFile *os.File, etag string) (*openpgp.Entity, *os.File, *cacheData, error) {
var (
entity *openpgp.Entity
cd *cacheData
)
u, err := url.Parse(aciURL)
if err != nil {
return nil, nil, nil, fmt.Errorf("error parsing ACI url: %v", err)
}
if u.Scheme == "docker" {
registryURL := strings.TrimPrefix(aciURL, "docker://")
storeTmpDir, err := f.s.TmpDir()
if err != nil {
return nil, nil, nil, fmt.Errorf("error creating temporary dir for docker to ACI conversion: %v", err)
}
tmpDir, err := ioutil.TempDir(storeTmpDir, "docker2aci-")
if err != nil {
return nil, nil, nil, err
}
defer os.RemoveAll(tmpDir)
indexName := docker2aci.GetIndexName(registryURL)
user := ""
password := ""
if creds, ok := f.dockerAuth[indexName]; ok {
user = creds.User
password = creds.Password
}
acis, err := docker2aci.Convert(registryURL, true, tmpDir, tmpDir, user, password)
if err != nil {
return nil, nil, nil, fmt.Errorf("error converting docker image to ACI: %v", err)
}
aciFile, err := os.Open(acis[0])
if err != nil {
return nil, nil, nil, fmt.Errorf("error opening squashed ACI file: %v", err)
}
return nil, aciFile, nil, nil
}
// attempt to automatically fetch the public key in case it is available on a TLS connection.
if globalFlags.TrustKeysFromHttps && !globalFlags.InsecureSkipVerify && appName != "" {
pkls, err := getPubKeyLocations(appName, false, globalFlags.Debug)
if err != nil {
stderr("Error determining key location: %v", err)
} else {
// no http, don't ask user for accepting the key, no overriding
if err := addKeys(pkls, appName, false, true, false); err != nil {
stderr("Error adding keys: %v", err)
}
}
}
var retrySignature bool
if f.ks != nil && ascFile == nil {
u, err := url.Parse(ascURL)
if err != nil {
return nil, nil, nil, fmt.Errorf("error parsing ASC url: %v", err)
}
if u.Scheme == "file" {
ascFile, err = os.Open(u.Path)
if err != nil {
return nil, nil, nil, fmt.Errorf("error opening signature file: %v", err)
}
} else {
stderr("Downloading signature from %v\n", ascURL)
ascFile, err = f.s.TmpFile()
if err != nil {
return nil, nil, nil, fmt.Errorf("error setting up temporary file: %v", err)
}
defer os.Remove(ascFile.Name())
err = f.downloadSignatureFile(ascURL, ascFile)
switch err {
case errStatusAccepted:
retrySignature = true
stderr("rkt: server requested deferring the signature download")
case nil:
break
default:
return nil, nil, nil, fmt.Errorf("error downloading the signature file: %v", err)
}
}
defer ascFile.Close()
}
// check if the identity used by the signature is in the store before a
// possibly expensive download. This is only an optimization and it's
// ok to skip the test if the signature will be downloaded later.
if !retrySignature && f.ks != nil && appName != "" {
if _, err := ascFile.Seek(0, 0); err != nil {
return nil, nil, nil, fmt.Errorf("error seeking signature file: %v", err)
}
if entity, err = f.ks.CheckSignature(appName, bytes.NewReader([]byte{}), ascFile); err != nil {
if _, ok := err.(pgperrors.SignatureError); !ok {
return nil, nil, nil, err
}
}
}
var aciFile *os.File
if u.Scheme == "file" | else {
aciFile, err = f.s.TmpFile()
if err != nil {
return nil, aciFile, nil, fmt.Errorf("error setting up temporary file: %v", err)
}
defer os.Remove(aciFile.Name())
if cd, err = f.downloadACI(aciURL, aciFile, etag); err != nil {
return nil, nil, nil, fmt.Errorf("error downloading ACI: %v", err)
}
if cd.useCached {
return nil, nil, cd, nil
}
}
if retrySignature {
if err = f.downloadSignatureFile(ascURL, ascFile); err != nil {
return nil, aciFile, nil, fmt.Errorf("error downloading the signature file: %v", err)
}
}
manifest, err := aci.ManifestFromImage(aciFile)
if err != nil {
return nil, aciFile, nil, err
}
// Check if the downloaded ACI has the correct app name.
// The check is only performed when the aci is downloaded through the
// discovery protocol, but not with local files or full URL.
if appName != "" && manifest.Name.String() != appName {
return nil, aciFile, nil,
fmt.Errorf("error when reading the app name: %q expected but %q found",
appName, manifest.Name.String())
}
if f.ks != nil {
if _, err := aciFile.Seek(0, 0); err != nil {
return nil, aciFile, nil, fmt.Errorf("error seeking ACI file: %v", err)
}
if _, err := ascFile.Seek(0, 0); err != nil {
return nil, aciFile, nil, fmt.Errorf("error seeking signature file: %v", err)
}
if entity, err = f.ks.CheckSignature(manifest.Name.String(), aciFile, ascFile); err != nil {
return nil, aciFile, nil, err
}
}
if _, err := aciFile.Seek(0, 0); err != nil {
return nil, aciFile, nil, fmt.Errorf("error seeking ACI file: %v", err)
}
return entity, aciFile, cd, nil
}
type writeSyncer interface {
io.Writer
Sync() error
}
// downloadACI gets the aci specified at aciurl
func (f *fetcher) downloadACI(aciurl string, out writeSyncer, etag string) (*cacheData, error) {
return f.downloadHTTP(aciurl, "ACI", out, etag)
}
// downloadSignatureFile gets the signature specified at sigurl
func (f *fetcher) downloadSignatureFile(sigurl string, out writeSyncer) error {
_, err := f.downloadHTTP(sigurl, "signature", out, "")
return err
}
// downloadHTTP retrieves url, creating a temp file using getTempFile
// http:// and https:// urls supported
func (f *fetcher) downloadHTTP(url, label string, out writeSyncer, etag string) (*cacheData, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
options := make(http.Header)
// Send credentials only over secure channel
if req.URL.Scheme == "https" {
if hostOpts, ok := f.headers[req.URL.Host]; ok {
options = hostOpts.Header()
}
}
for k, v := range options {
for _, e := range v {
req.Header.Add(k, e)
}
}
transport := http.DefaultTransport
if f.insecureSkipVerify {
transport = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
}
if etag != "" {
req.Header.Add("If-None-Match", etag)
}
req.Header.Add("User-Agent", fmt.Sprintf("rkt/%s", version.Version))
client := &http.Client{Transport: transport}
res, err := client.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
cd := &cacheData{}
// TODO(jonboulle): handle http more robustly (redirects?)
switch res.StatusCode {
case http.StatusAccepted:
// If the server returns Status Accepted (HTTP 202), we should retry
// downloading the signature later.
return nil, errStatusAccepted
case http.StatusOK:
fallthrough
case http.StatusNotModified:
cd.etag = res.Header.Get("ETag")
cd.maxAge = getMaxAge(res.Header.Get("Cache-Control"))
cd.useCached = (res.StatusCode == http.StatusNotModified)
if cd.useCached {
return cd, nil
}
default:
return nil, fmt.Errorf("bad HTTP status code: %d", res.StatusCode)
}
prefix := "Downloading " + label
fmtBytesSize := 18
barSize := int64(80 - len(prefix) - fmtBytesSize)
bar := ioprogress.DrawTextFormatBar(barSize)
fmtfunc := func(progress, total int64) string {
// Content-Length is set to -1 when unknown.
if total == -1 {
return fmt.Sprintf(
"%s: %v of an unknown total size",
prefix,
ioprogress.ByteUnitStr(progress),
)
}
return fmt.Sprintf(
"%s: %s %s",
prefix,
bar(progress, total),
ioprogress.DrawTextFormatBytes(progress, total),
)
}
reader := &ioprogress.Reader{
Reader: res.Body,
Size: res.ContentLength,
DrawFunc: ioprogress.DrawTerminalf(os.Stdout, fmtfunc),
DrawInterval: time.Second,
}
if _, err := io.Copy(out, reader); err != nil {
return nil, fmt.Errorf("error copying %s: %v", label, err)
}
if err := out.Sync(); err != nil {
return nil, fmt.Errorf("error writing %s: %v", label, err)
}
return cd, nil
}
func ascURLFromImgURL(imgurl string) string {
s := strings.TrimSuffix(imgurl, ".aci")
return s + ".aci.asc"
}
// newDiscoveryApp creates a discovery app if the given img is an app name and
// has a URL-like structure, for example example.com/reduce-worker.
// Or it returns nil.
func newDiscoveryApp(img string) *discovery.App {
app, err := discovery.NewAppFromString(img)
if err != nil {
return nil
}
u, err := url.Parse(app.Name.String())
if err != nil || u.Scheme != "" {
return nil
}
if _, ok := app.Labels["arch"]; !ok {
app.Labels["arch"] = defaultArch
}
if _, ok := app.Labels["os"]; !ok {
app.Labels["os"] = defaultOS
}
return app
}
func discoverApp(app *discovery.App, insecure bool) (*discovery.Endpoints, error) {
ep, attempts, err := discovery.DiscoverEndpoints(*app, insecure)
if globalFlags.Debug {
for _, a := range attempts {
stderr("meta tag 'ac-discovery' not found on %s: %v", a.Prefix, a.Error)
}
}
if err != nil {
return nil, err
}
if len(ep.ACIEndpoints) == 0 {
return nil, fmt.Errorf("no endpoints discovered")
}
return ep, nil
}
func getStoreKeyFromApp(s *store.Store, img string) (string, error) {
app, err := discovery.NewAppFromString(img)
if err != nil {
return "", fmt.Errorf("cannot parse the image name: %v", err)
}
labels, err := types.LabelsFromMap(app.Labels)
if err != nil {
return "", fmt.Errorf("invalid labels in the name: %v", err)
}
key, err := s.GetACI(app.Name, labels)
if err != nil {
return "", fmt.Errorf("cannot find image: %v", err)
}
return key, nil
}
func getStoreKeyFromAppOrHash(s *store.Store, input string) (string, error) {
var key string
if _, err := types.NewHash(input); err == nil {
key, err = s.ResolveKey(input)
if err != nil {
return "", fmt.Errorf("cannot resolve key: %v", err)
}
} else {
key, err = getStoreKeyFromApp(s, input)
if err != nil {
return "", fmt.Errorf("cannot find image: %v", err)
}
}
return key, nil
}
type cacheData struct {
useCached bool
etag string
maxAge int
}
func getMaxAge(headerValue string) int {
var MaxAge int = 0
if len(headerValue) > 0 {
parts := strings.Split(headerValue, " ")
for i := 0; i < len(parts); i++ {
attr, val := parts[i], ""
if j := strings.Index(attr, "="); j >= 0 {
attr, val = attr[:j], attr[j+1:]
}
lowerAttr := strings.ToLower(attr)
switch lowerAttr {
case "no-store":
MaxAge = 0
continue
case "no-cache":
MaxAge = 0
continue
case "max-age":
secs, err := strconv.Atoi(val)
if err != nil || secs != 0 && val[0] == '0' {
break
}
if secs <= 0 {
MaxAge = 0
} else {
MaxAge = secs
}
continue
}
}
}
return MaxAge
}
// useCached checks if downloadTime plus maxAge is before/after the current time.
// return true if the cached image should be used, false otherwise.
func useCached(downloadTime time.Time, maxAge int) bool {
freshnessLifetime := int(time.Now().Sub(downloadTime).Seconds())
if maxAge > 0 && freshnessLifetime < maxAge {
return true
}
return false
}
| {
aciFile, err = os.Open(u.Path)
if err != nil {
return nil, nil, nil, fmt.Errorf("error opening ACI file: %v", err)
}
} | conditional_block |
images.go | // Copyright 2015 The rkt Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//+build linux
package main
import (
"bytes"
"container/list"
"crypto/tls"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
docker2aci "github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/docker2aci/lib"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/docker2aci/lib/common"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/spec/aci"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/spec/discovery"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/spec/schema/types"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/coreos/ioprogress"
"github.com/coreos/rkt/Godeps/_workspace/src/golang.org/x/crypto/openpgp"
pgperrors "github.com/coreos/rkt/Godeps/_workspace/src/golang.org/x/crypto/openpgp/errors"
"github.com/coreos/rkt/common/apps"
"github.com/coreos/rkt/pkg/keystore"
"github.com/coreos/rkt/rkt/config"
"github.com/coreos/rkt/store"
"github.com/coreos/rkt/version"
)
type imageActionData struct {
s *store.Store
ks *keystore.Keystore
headers map[string]config.Headerer
dockerAuth map[string]config.BasicCredentials
insecureSkipVerify bool
debug bool
}
type finder struct {
imageActionData
local bool
withDeps bool
}
// findImages uses findImage to attain a list of image hashes using discovery if necessary
func (f *finder) findImages(al *apps.Apps) error {
return al.Walk(func(app *apps.App) error {
h, err := f.findImage(app.Image, app.Asc, true)
if err != nil {
return err
}
app.ImageID = *h
return nil
})
}
// findImage will recognize a ACI hash and use that, import a local file, use
// discovery or download an ACI directly.
func (f *finder) findImage(img string, asc string, discover bool) (*types.Hash, error) {
// check if it is a valid hash, if so let it pass through
h, err := types.NewHash(img)
if err == nil {
fullKey, err := f.s.ResolveKey(img)
if err != nil {
return nil, fmt.Errorf("could not resolve key: %v", err)
}
h, err = types.NewHash(fullKey)
if err != nil {
// should never happen
panic(err)
}
return h, nil
}
// try fetching the image, potentially remotely
ft := &fetcher{
imageActionData: f.imageActionData,
local: f.local,
withDeps: f.withDeps,
}
key, err := ft.fetchImage(img, asc, discover)
if err != nil {
return nil, err
}
h, err = types.NewHash(key)
if err != nil {
// should never happen
panic(err)
}
return h, nil
}
var errStatusAccepted = errors.New("server is still processing the request")
type fetcher struct {
imageActionData
local bool
withDeps bool
}
// fetchImage will take an image as either a URL or a name string and import it
// into the store if found. If discover is true meta-discovery is enabled. If
// asc is not "", it must exist as a local file and will be used
// as the signature file for verification, unless verification is disabled.
// If f.withDeps is true also image dependencies are fetched.
func (f *fetcher) fetchImage(img string, asc string, discover bool) (string, error) {
if f.withDeps && !discover {
return "", fmt.Errorf("cannot fetch image's dependencies with discovery disabled")
}
hash, err := f.fetchSingleImage(img, asc, discover)
if err != nil {
return "", err
}
if f.withDeps {
err = f.fetchImageDeps(hash)
if err != nil {
return "", err
}
}
return hash, nil
}
func (f *fetcher) getImageDeps(hash string) (types.Dependencies, error) {
key, err := f.s.ResolveKey(hash)
if err != nil {
return nil, err
}
im, err := f.s.GetImageManifest(key)
if err != nil {
return nil, err
}
return im.Dependencies, nil
}
func (f *fetcher) addImageDeps(hash string, imgsl *list.List, seen map[string]struct{}) error {
dependencies, err := f.getImageDeps(hash)
if err != nil {
return err
}
for _, d := range dependencies {
app, err := discovery.NewApp(d.ImageName.String(), d.Labels.ToMap())
if err != nil {
return err
}
imgsl.PushBack(app.String())
if _, ok := seen[app.String()]; ok {
return fmt.Errorf("dependency %s specified multiple times in the dependency tree for imageID: %s", app.String(), hash)
}
seen[app.String()] = struct{}{}
}
return nil
}
// fetchImageDeps will recursively fetch all the image dependencies
func (f *fetcher) fetchImageDeps(hash string) error {
imgsl := list.New()
seen := map[string]struct{}{}
f.addImageDeps(hash, imgsl, seen)
for el := imgsl.Front(); el != nil; el = el.Next() {
img := el.Value.(string)
hash, err := f.fetchSingleImage(img, "", true)
if err != nil {
return err
}
f.addImageDeps(hash, imgsl, seen)
}
return nil
}
// fetchSingleImage will take an image as either a URL or a name string and
// import it into the store if found. If discover is true meta-discovery is
// enabled. If asc is not "", it must exist as a local file and will be used
// as the signature file for verification, unless verification is disabled.
func (f *fetcher) fetchSingleImage(img string, asc string, discover bool) (string, error) {
var (
ascFile *os.File
err error
latest bool
)
if asc != "" && f.ks != nil {
ascFile, err = os.Open(asc)
if err != nil {
return "", fmt.Errorf("unable to open signature file: %v", err)
}
defer ascFile.Close()
}
u, err := url.Parse(img)
if err != nil {
return "", fmt.Errorf("not a valid image reference (%s)", img)
}
// if img refers to a local file, ensure the scheme is file:// and make the url path absolute
_, err = os.Stat(u.Path)
if err == nil {
u.Path, err = filepath.Abs(u.Path)
if err != nil {
return "", fmt.Errorf("unable to get abs path: %v", err)
}
u.Scheme = "file"
} else if !os.IsNotExist(err) {
return "", fmt.Errorf("unable to access %q: %v", img, err)
}
if discover && u.Scheme == "" {
if app := newDiscoveryApp(img); app != nil {
var discoveryError error
if !f.local {
stderr("rkt: searching for app image %s", img)
ep, err := discoverApp(app, true)
if err != nil {
discoveryError = err
} else {
// No specified version label, mark it as latest
if _, ok := app.Labels["version"]; !ok {
latest = true
}
return f.fetchImageFromEndpoints(app.Name.String(), ep, ascFile, latest)
}
}
if discoveryError != nil {
stderr("discovery failed for %q: %v. Trying to find image in the store.", img, discoveryError)
}
if f.local || discoveryError != nil {
return f.fetchImageFromStore(img)
}
}
}
switch u.Scheme {
case "http", "https", "file":
case "docker":
dockerURL := common.ParseDockerURL(path.Join(u.Host, u.Path))
if dockerURL.Tag == "latest" {
latest = true
}
default:
return "", fmt.Errorf("rkt only supports http, https, docker or file URLs (%s)", img)
}
return f.fetchImageFromURL(u.String(), u.Scheme, ascFile, latest)
}
func (f *fetcher) fetchImageFromStore(img string) (string, error) {
return getStoreKeyFromApp(f.s, img)
}
func (f *fetcher) fetchImageFromEndpoints(appName string, ep *discovery.Endpoints, ascFile *os.File, latest bool) (string, error) {
return f.fetchImageFrom(appName, ep.ACIEndpoints[0].ACI, ep.ACIEndpoints[0].ASC, "", ascFile, latest)
}
func (f *fetcher) fetchImageFromURL(imgurl string, scheme string, ascFile *os.File, latest bool) (string, error) {
return f.fetchImageFrom("", imgurl, ascURLFromImgURL(imgurl), scheme, ascFile, latest)
}
// fetchImageFrom fetches an image from the aciURL.
// If the aciURL is a file path (scheme == 'file'), then we bypass the on-disk store.
// If the `--local` flag is provided, then we will only fetch from the on-disk store (unless aciURL is a file path).
// If the label is 'latest', then we will bypass the on-disk store (unless '--local' is specified).
// Otherwise if '--local' is false, aciURL is not a file path, and the label is not 'latest' or empty, we will first
// try to fetch from the on-disk store, if not found, then fetch from the internet.
func (f *fetcher) fetchImageFrom(appName string, aciURL, ascURL, scheme string, ascFile *os.File, latest bool) (string, error) {
var rem *store.Remote
if f.insecureSkipVerify {
if f.ks != nil {
stderr("rkt: warning: TLS verification and signature verification has been disabled")
}
} else if scheme == "docker" {
return "", fmt.Errorf("signature verification for docker images is not supported (try --insecure-skip-verify)")
}
if (f.local && scheme != "file") || (scheme != "file" && !latest) {
var err error
ok := false
rem, ok, err = f.s.GetRemote(aciURL)
if err != nil {
return "", err
}
if ok {
if f.local {
stderr("rkt: using image in local store for app %s", appName)
return rem.BlobKey, nil
}
if useCached(rem.DownloadTime, rem.CacheMaxAge) {
stderr("rkt: found image in local store, skipping fetching from %s", aciURL)
return rem.BlobKey, nil
}
}
if f.local {
return "", fmt.Errorf("url %s not available in local store", aciURL)
}
}
if scheme != "file" && f.debug {
stderr("rkt: fetching image from %s", aciURL)
}
var etag string
if rem != nil {
etag = rem.ETag
}
entity, aciFile, cd, err := f.fetch(appName, aciURL, ascURL, ascFile, etag)
if err != nil {
return "", err
}
if cd != nil && cd.useCached {
if rem != nil {
return rem.BlobKey, nil
} else {
// should never happen
panic("asked to use cached image but remote is nil")
}
}
if scheme != "file" {
defer os.Remove(aciFile.Name())
}
if entity != nil && !f.insecureSkipVerify {
stderr("rkt: signature verified:")
for _, v := range entity.Identities {
stderr(" %s", v.Name)
}
}
key, err := f.s.WriteACI(aciFile, latest)
if err != nil {
return "", err
}
if scheme != "file" {
rem := store.NewRemote(aciURL, ascURL)
rem.BlobKey = key
rem.DownloadTime = time.Now()
if cd != nil {
rem.ETag = cd.etag
rem.CacheMaxAge = cd.maxAge
}
err = f.s.WriteRemote(rem)
if err != nil {
return "", err
}
}
return key, nil
}
// fetch opens/downloads and verifies the remote ACI.
// If appName is not "", it will be used to check that the manifest contain the correct appName
// If ascFile is not nil, it will be used as the signature file and ascURL will be ignored.
// If Keystore is nil signature verification will be skipped, regardless of ascFile.
// fetch returns the signer, an *os.File representing the ACI, and an error if any.
// err will be nil if the ACI fetches successfully and the ACI is verified.
func (f *fetcher) fetch(appName string, aciURL, ascURL string, ascFile *os.File, etag string) (*openpgp.Entity, *os.File, *cacheData, error) {
var (
entity *openpgp.Entity
cd *cacheData
)
u, err := url.Parse(aciURL)
if err != nil {
return nil, nil, nil, fmt.Errorf("error parsing ACI url: %v", err)
}
if u.Scheme == "docker" {
registryURL := strings.TrimPrefix(aciURL, "docker://")
storeTmpDir, err := f.s.TmpDir()
if err != nil {
return nil, nil, nil, fmt.Errorf("error creating temporary dir for docker to ACI conversion: %v", err)
}
tmpDir, err := ioutil.TempDir(storeTmpDir, "docker2aci-")
if err != nil {
return nil, nil, nil, err
}
defer os.RemoveAll(tmpDir)
indexName := docker2aci.GetIndexName(registryURL)
user := ""
password := ""
if creds, ok := f.dockerAuth[indexName]; ok {
user = creds.User
password = creds.Password
}
acis, err := docker2aci.Convert(registryURL, true, tmpDir, tmpDir, user, password)
if err != nil {
return nil, nil, nil, fmt.Errorf("error converting docker image to ACI: %v", err)
}
aciFile, err := os.Open(acis[0])
if err != nil {
return nil, nil, nil, fmt.Errorf("error opening squashed ACI file: %v", err)
}
return nil, aciFile, nil, nil
}
// attempt to automatically fetch the public key in case it is available on a TLS connection.
if globalFlags.TrustKeysFromHttps && !globalFlags.InsecureSkipVerify && appName != "" {
pkls, err := getPubKeyLocations(appName, false, globalFlags.Debug)
if err != nil {
stderr("Error determining key location: %v", err)
} else {
// no http, don't ask user for accepting the key, no overriding
if err := addKeys(pkls, appName, false, true, false); err != nil {
stderr("Error adding keys: %v", err)
}
}
}
var retrySignature bool
if f.ks != nil && ascFile == nil {
u, err := url.Parse(ascURL)
if err != nil {
return nil, nil, nil, fmt.Errorf("error parsing ASC url: %v", err)
}
if u.Scheme == "file" {
ascFile, err = os.Open(u.Path)
if err != nil {
return nil, nil, nil, fmt.Errorf("error opening signature file: %v", err)
}
} else {
stderr("Downloading signature from %v\n", ascURL)
ascFile, err = f.s.TmpFile()
if err != nil {
return nil, nil, nil, fmt.Errorf("error setting up temporary file: %v", err)
}
defer os.Remove(ascFile.Name())
err = f.downloadSignatureFile(ascURL, ascFile)
switch err {
case errStatusAccepted:
retrySignature = true
stderr("rkt: server requested deferring the signature download")
case nil:
break
default:
return nil, nil, nil, fmt.Errorf("error downloading the signature file: %v", err)
}
}
defer ascFile.Close()
}
// check if the identity used by the signature is in the store before a
// possibly expensive download. This is only an optimization and it's
// ok to skip the test if the signature will be downloaded later.
if !retrySignature && f.ks != nil && appName != "" {
if _, err := ascFile.Seek(0, 0); err != nil {
return nil, nil, nil, fmt.Errorf("error seeking signature file: %v", err)
}
if entity, err = f.ks.CheckSignature(appName, bytes.NewReader([]byte{}), ascFile); err != nil {
if _, ok := err.(pgperrors.SignatureError); !ok {
return nil, nil, nil, err
}
}
}
var aciFile *os.File
if u.Scheme == "file" {
aciFile, err = os.Open(u.Path)
if err != nil {
return nil, nil, nil, fmt.Errorf("error opening ACI file: %v", err)
}
} else {
aciFile, err = f.s.TmpFile()
if err != nil {
return nil, aciFile, nil, fmt.Errorf("error setting up temporary file: %v", err)
}
defer os.Remove(aciFile.Name())
if cd, err = f.downloadACI(aciURL, aciFile, etag); err != nil {
return nil, nil, nil, fmt.Errorf("error downloading ACI: %v", err)
}
if cd.useCached {
return nil, nil, cd, nil
}
}
if retrySignature {
if err = f.downloadSignatureFile(ascURL, ascFile); err != nil {
return nil, aciFile, nil, fmt.Errorf("error downloading the signature file: %v", err)
}
}
manifest, err := aci.ManifestFromImage(aciFile)
if err != nil {
return nil, aciFile, nil, err
}
// Check if the downloaded ACI has the correct app name.
// The check is only performed when the aci is downloaded through the
// discovery protocol, but not with local files or full URL.
if appName != "" && manifest.Name.String() != appName {
return nil, aciFile, nil,
fmt.Errorf("error when reading the app name: %q expected but %q found",
appName, manifest.Name.String())
}
if f.ks != nil {
if _, err := aciFile.Seek(0, 0); err != nil {
return nil, aciFile, nil, fmt.Errorf("error seeking ACI file: %v", err)
}
if _, err := ascFile.Seek(0, 0); err != nil {
return nil, aciFile, nil, fmt.Errorf("error seeking signature file: %v", err)
}
if entity, err = f.ks.CheckSignature(manifest.Name.String(), aciFile, ascFile); err != nil {
return nil, aciFile, nil, err
}
}
if _, err := aciFile.Seek(0, 0); err != nil {
return nil, aciFile, nil, fmt.Errorf("error seeking ACI file: %v", err)
}
return entity, aciFile, cd, nil
}
type writeSyncer interface {
io.Writer
Sync() error
}
// downloadACI gets the aci specified at aciurl
func (f *fetcher) downloadACI(aciurl string, out writeSyncer, etag string) (*cacheData, error) {
return f.downloadHTTP(aciurl, "ACI", out, etag)
}
// downloadSignatureFile gets the signature specified at sigurl
func (f *fetcher) downloadSignatureFile(sigurl string, out writeSyncer) error {
_, err := f.downloadHTTP(sigurl, "signature", out, "")
return err
}
// downloadHTTP retrieves url, creating a temp file using getTempFile
// http:// and https:// urls supported
func (f *fetcher) downloadHTTP(url, label string, out writeSyncer, etag string) (*cacheData, error) |
func ascURLFromImgURL(imgurl string) string {
s := strings.TrimSuffix(imgurl, ".aci")
return s + ".aci.asc"
}
// newDiscoveryApp creates a discovery app if the given img is an app name and
// has a URL-like structure, for example example.com/reduce-worker.
// Or it returns nil.
func newDiscoveryApp(img string) *discovery.App {
app, err := discovery.NewAppFromString(img)
if err != nil {
return nil
}
u, err := url.Parse(app.Name.String())
if err != nil || u.Scheme != "" {
return nil
}
if _, ok := app.Labels["arch"]; !ok {
app.Labels["arch"] = defaultArch
}
if _, ok := app.Labels["os"]; !ok {
app.Labels["os"] = defaultOS
}
return app
}
func discoverApp(app *discovery.App, insecure bool) (*discovery.Endpoints, error) {
ep, attempts, err := discovery.DiscoverEndpoints(*app, insecure)
if globalFlags.Debug {
for _, a := range attempts {
stderr("meta tag 'ac-discovery' not found on %s: %v", a.Prefix, a.Error)
}
}
if err != nil {
return nil, err
}
if len(ep.ACIEndpoints) == 0 {
return nil, fmt.Errorf("no endpoints discovered")
}
return ep, nil
}
func getStoreKeyFromApp(s *store.Store, img string) (string, error) {
app, err := discovery.NewAppFromString(img)
if err != nil {
return "", fmt.Errorf("cannot parse the image name: %v", err)
}
labels, err := types.LabelsFromMap(app.Labels)
if err != nil {
return "", fmt.Errorf("invalid labels in the name: %v", err)
}
key, err := s.GetACI(app.Name, labels)
if err != nil {
return "", fmt.Errorf("cannot find image: %v", err)
}
return key, nil
}
func getStoreKeyFromAppOrHash(s *store.Store, input string) (string, error) {
var key string
if _, err := types.NewHash(input); err == nil {
key, err = s.ResolveKey(input)
if err != nil {
return "", fmt.Errorf("cannot resolve key: %v", err)
}
} else {
key, err = getStoreKeyFromApp(s, input)
if err != nil {
return "", fmt.Errorf("cannot find image: %v", err)
}
}
return key, nil
}
type cacheData struct {
useCached bool
etag string
maxAge int
}
func getMaxAge(headerValue string) int {
var MaxAge int = 0
if len(headerValue) > 0 {
parts := strings.Split(headerValue, " ")
for i := 0; i < len(parts); i++ {
attr, val := parts[i], ""
if j := strings.Index(attr, "="); j >= 0 {
attr, val = attr[:j], attr[j+1:]
}
lowerAttr := strings.ToLower(attr)
switch lowerAttr {
case "no-store":
MaxAge = 0
continue
case "no-cache":
MaxAge = 0
continue
case "max-age":
secs, err := strconv.Atoi(val)
if err != nil || secs != 0 && val[0] == '0' {
break
}
if secs <= 0 {
MaxAge = 0
} else {
MaxAge = secs
}
continue
}
}
}
return MaxAge
}
// useCached checks if downloadTime plus maxAge is before/after the current time.
// return true if the cached image should be used, false otherwise.
func useCached(downloadTime time.Time, maxAge int) bool {
freshnessLifetime := int(time.Now().Sub(downloadTime).Seconds())
if maxAge > 0 && freshnessLifetime < maxAge {
return true
}
return false
}
| {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
options := make(http.Header)
// Send credentials only over secure channel
if req.URL.Scheme == "https" {
if hostOpts, ok := f.headers[req.URL.Host]; ok {
options = hostOpts.Header()
}
}
for k, v := range options {
for _, e := range v {
req.Header.Add(k, e)
}
}
transport := http.DefaultTransport
if f.insecureSkipVerify {
transport = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
}
if etag != "" {
req.Header.Add("If-None-Match", etag)
}
req.Header.Add("User-Agent", fmt.Sprintf("rkt/%s", version.Version))
client := &http.Client{Transport: transport}
res, err := client.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
cd := &cacheData{}
// TODO(jonboulle): handle http more robustly (redirects?)
switch res.StatusCode {
case http.StatusAccepted:
// If the server returns Status Accepted (HTTP 202), we should retry
// downloading the signature later.
return nil, errStatusAccepted
case http.StatusOK:
fallthrough
case http.StatusNotModified:
cd.etag = res.Header.Get("ETag")
cd.maxAge = getMaxAge(res.Header.Get("Cache-Control"))
cd.useCached = (res.StatusCode == http.StatusNotModified)
if cd.useCached {
return cd, nil
}
default:
return nil, fmt.Errorf("bad HTTP status code: %d", res.StatusCode)
}
prefix := "Downloading " + label
fmtBytesSize := 18
barSize := int64(80 - len(prefix) - fmtBytesSize)
bar := ioprogress.DrawTextFormatBar(barSize)
fmtfunc := func(progress, total int64) string {
// Content-Length is set to -1 when unknown.
if total == -1 {
return fmt.Sprintf(
"%s: %v of an unknown total size",
prefix,
ioprogress.ByteUnitStr(progress),
)
}
return fmt.Sprintf(
"%s: %s %s",
prefix,
bar(progress, total),
ioprogress.DrawTextFormatBytes(progress, total),
)
}
reader := &ioprogress.Reader{
Reader: res.Body,
Size: res.ContentLength,
DrawFunc: ioprogress.DrawTerminalf(os.Stdout, fmtfunc),
DrawInterval: time.Second,
}
if _, err := io.Copy(out, reader); err != nil {
return nil, fmt.Errorf("error copying %s: %v", label, err)
}
if err := out.Sync(); err != nil {
return nil, fmt.Errorf("error writing %s: %v", label, err)
}
return cd, nil
} | identifier_body |
query.go | /*
Copyright 2018 Iguazio Systems Ltd.
Licensed under the Apache License, Version 2.0 (the "License") with
an addition restriction as set forth herein. You may not use this
file except in compliance with the License. You may obtain a copy of
the License at http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License.
In addition, you may not use the software for any purposes that are
illegal under applicable law, and the grant of the foregoing license
under the Apache 2.0 license is conditioned upon your compliance with
such restriction.
*/
package tsdbctl
import (
"context"
"strings"
"time"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/v3io/v3io-tsdb/pkg/config"
"github.com/v3io/v3io-tsdb/pkg/formatter"
"github.com/v3io/v3io-tsdb/pkg/pquerier"
"github.com/v3io/v3io-tsdb/pkg/utils"
)
type queryCommandeer struct {
cmd *cobra.Command
rootCommandeer *RootCommandeer
name string
filter string
to string
from string
last string
functions string
step string
output string
oldQuerier bool
groupBy string
usePreciseAggregations bool
aggregationWindow string
}
func newQueryCommandeer(rootCommandeer *RootCommandeer) *queryCommandeer {
commandeer := &queryCommandeer{
rootCommandeer: rootCommandeer,
}
cmd := &cobra.Command{
Aliases: []string{"get"},
Use: "query [<metrics>] [flags]",
Short: "Query a TSDB instance",
Long: `Query a TSDB instance (table).`,
Example: `The examples assume that the endpoint of the web-gateway service, the login credentials, and
the name of the data container are configured in the default configuration file (` + config.DefaultConfigurationFileName + `)
instead of using the -s|--server, -u|--username, -p|--password, and -c|--container flags.
- tsdbctl query temperature -t mytsdb
- tsdbctl query -t performance -f "starts(__name__, 'cpu') AND os=='win'"
- tsdbctl query metric2,metric3,metric4 -t pmetrics -b 0 -e now-1h -a "sum,avg" -i 20m
- tsdbctl query -t mytsdb -f "LabelA==8.1" -l 1d -o json
- tsdbctl query metric1 -t my_tsdb -l 1d -a "count,sum,avg" --groupBy LabelA,LabelB
- tsdbctl query metric1 -t my_tsdb -l 1d -a "count_all,sum_all"
Notes:
- You must set the metric-name argument (<metrics>) and/or the query-filter flag (-f|--filter).
- Queries that set the metric-name argument (<metrics>) use range scan and are therefore faster.
- To query the full TSDB content, set the -f|--filter to a query filter that always evaluates
to true (such as "1==1"), don't set the <metrics> argument, and set the -b|--begin flag to 0.
- You can use either over-time aggregates or cross series (*_all) aggregates, but not both in the same query.
Arguments:
<metrics> (string) Comma-separated list of metric names to query. If you don't set this argument, you must
provide a query filter using the -f|--filter flag.`,
RunE: func(cmd *cobra.Command, args []string) error {
// Save the metric name if provided as a positional argument ($1)
if len(args) > 0 {
commandeer.name = args[0]
}
return commandeer.query()
},
}
cmd.Flags().StringVarP(&commandeer.to, "end", "e", "",
"End (maximum) time for the query, as a string containing an\nRFC 3339 time string, a Unix timestamp in milliseconds, or\na relative time of the format \"now\" or \"now-[0-9]+[mhd]\"\n(where 'm' = minutes, 'h' = hours, and 'd' = days).\nExamples: \"2018-09-26T14:10:20Z\"; \"1537971006000\";\n\"now-3h\"; \"now-7d\". (default \"now\")")
cmd.Flags().StringVarP(&commandeer.from, "begin", "b", "",
"Start (minimum) time for the query, as a string containing\nan RFC 3339 time, a Unix timestamp in milliseconds, a\nrelative time of the format \"now\" or \"now-[0-9]+[mhd]\"\n(where 'm' = minutes, 'h' = hours, and 'd' = days), or 0\nfor the earliest time. Examples: \"2016-01-02T15:34:26Z\";\n\"1451748866\"; \"now-90m\"; \"0\". (default = <end time> - 1h)")
cmd.Flags().StringVarP(&commandeer.output, "output", "o", formatter.DefaultOutputFormat,
"Output format in which to display the query results -\n\"text\" | \"csv\" | \"json\".")
cmd.Flags().StringVarP(&commandeer.filter, "filter", "f", "",
"Query filter, as an Iguazio Data Science Platform\nfilter expression. To reference a metric name from within\nthe query filter, use the \"__name__\" attribute.\nExamples: \"method=='get'\"; \"__name__='cpu' AND os=='win'\".")
cmd.Flags().StringVarP(&commandeer.last, "last", "l", "",
"Return data for the specified time period before the\ncurrent time, of the format \"[0-9]+[mhd]\" (where\n'm' = minutes, 'h' = hours, and 'd' = days>). When setting\nthis flag, don't set the -b|--begin or -e|--end flags.\nExamples: \"1h\"; \"15m\"; \"30d\" to return data for the last\n1 hour, 15 minutes, or 30 days.")
cmd.Flags().StringVarP(&commandeer.aggregationWindow, "aggregation-window", "w", "",
"Sliding time window for aggregation. Must be used in conjunction with `-a <aggr>`. Examples: \"1h\"; \"150m\".")
cmd.Flags().StringVarP(&commandeer.functions, "aggregates", "a", "",
"Aggregation information to return, as a comma-separated\nlist of supported aggregation functions - count | avg |\nsum | min | max | stddev | stdvar | last | rate.\nFor cross series aggregations add an \"_all\" suffix for the wanted aggregate.\nNote: you can query either over time aggregates or cross series aggregate but not both in the same query.\nExample: \"sum,min,max,count\", \"sum_all,avg_all\".")
cmd.Flags().StringVarP(&commandeer.step, "aggregation-interval", "i", "",
"Aggregation interval for applying the aggregation functions\n(if set - see the -a|--aggregates flag), of the format\n\"[0-9]+[mhd]\" (where 'm' = minutes, 'h' = hours, and\n'd' = days). Examples: \"1h\"; \"150m\". (default =\n<end time> - <start time>)")
cmd.Flags().StringVar(&commandeer.groupBy, "groupBy", "",
"Comma separated list of labels to group the result by")
cmd.Flags().BoolVar(&commandeer.usePreciseAggregations, "use-precise-aggregations", false,
"Disable server aggregation optimizations for more accurate results.")
cmd.Flags().BoolVarP(&commandeer.oldQuerier, "oldQuerier", "q", false, "use old querier")
cmd.Flags().Lookup("oldQuerier").Hidden = true
commandeer.cmd = cmd
return commandeer
}
func (qc *queryCommandeer) query() error {
if qc.name == "" && qc.filter == "" {
return errors.New("the query command must receive either a metric-name parameter (<metrics>) or a query filter (set via the -f|--filter flag)")
}
if qc.last != "" && (qc.from != "" || qc.to != "") {
return errors.New("the -l|--last flag cannot be set together with the -b|--begin and/or -e|--end flags")
}
// Initialize parameters and adapter
if err := qc.rootCommandeer.initialize(); err != nil {
return err
}
if err := qc.rootCommandeer.startAdapter(); err != nil {
return err
}
step, err := utils.Str2duration(qc.step)
if err != nil {
return err
}
// Set start & end times
to := time.Now().Unix() * 1000
if qc.to != "" {
to, err = utils.Str2unixTime(qc.to)
if err != nil {
return err
}
}
from := to - 1000*3600 // Default start time = one hour before the end time
if qc.from != "" {
from, err = utils.Str2unixTime(qc.from)
if err != nil {
return err
}
}
if qc.last != "" {
last, err := utils.Str2duration(qc.last)
if err != nil {
return err
}
from = to - last
}
qc.rootCommandeer.logger.DebugWith("Query", "from", from, "to", to, "name", qc.name,
"filter", qc.filter, "functions", qc.functions, "step", qc.step, "groupBy", qc.groupBy)
if !qc.oldQuerier {
return qc.newQuery(from, to, step)
}
return qc.oldQuery(from, to, step)
}
func (qc *queryCommandeer) newQuery(from, to, step int64) error {
qry, err := qc.rootCommandeer.adapter.QuerierV2()
if err != nil {
return errors.Wrap(err, "Failed to initialize the Querier object.")
}
aggregationWindow, err := utils.Str2duration(qc.aggregationWindow)
if err != nil |
var selectParams *pquerier.SelectParams
if strings.HasPrefix(qc.name, "select") {
selectParams, _, err = pquerier.ParseQuery(qc.name)
if err != nil {
return errors.Wrap(err, "failed to parse sql")
}
selectParams.Step = step
selectParams.From = from
selectParams.To = to
selectParams.UseOnlyClientAggr = qc.usePreciseAggregations
selectParams.AggregationWindow = aggregationWindow
} else {
selectParams = &pquerier.SelectParams{Name: qc.name, Functions: qc.functions,
Step: step, Filter: qc.filter, From: from, To: to, GroupBy: qc.groupBy,
UseOnlyClientAggr: qc.usePreciseAggregations,
AggregationWindow: aggregationWindow}
}
set, err := qry.Select(selectParams)
if err != nil {
return errors.Wrap(err, "The query selection failed.")
}
f, err := formatter.NewFormatter(qc.output, nil)
if err != nil {
return errors.Wrapf(err, "Failed to start formatter '%s'.", qc.output)
}
err = f.Write(qc.cmd.OutOrStdout(), set)
return err
}
func (qc *queryCommandeer) oldQuery(from, to, step int64) error {
qry, err := qc.rootCommandeer.adapter.Querier(context.TODO(), from, to)
if err != nil {
return errors.Wrap(err, "Failed to initialize the Querier object.")
}
var set utils.SeriesSet
set, err = qry.Select(qc.name, qc.functions, step, qc.filter)
if err != nil {
return errors.Wrap(err, "The query selection failed.")
}
f, err := formatter.NewFormatter(qc.output, nil)
if err != nil {
return errors.Wrapf(err, "Failed to start formatter '%s'.", qc.output)
}
err = f.Write(qc.cmd.OutOrStdout(), set)
return err
}
| {
return errors.Wrap(err, "Failed to parse aggregation window")
} | conditional_block |
query.go | /*
Copyright 2018 Iguazio Systems Ltd.
Licensed under the Apache License, Version 2.0 (the "License") with
an addition restriction as set forth herein. You may not use this
file except in compliance with the License. You may obtain a copy of
the License at http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License.
In addition, you may not use the software for any purposes that are
illegal under applicable law, and the grant of the foregoing license
under the Apache 2.0 license is conditioned upon your compliance with
such restriction.
*/
package tsdbctl
import (
"context"
"strings"
"time"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/v3io/v3io-tsdb/pkg/config"
"github.com/v3io/v3io-tsdb/pkg/formatter"
"github.com/v3io/v3io-tsdb/pkg/pquerier"
"github.com/v3io/v3io-tsdb/pkg/utils"
)
type queryCommandeer struct {
cmd *cobra.Command
rootCommandeer *RootCommandeer
name string
filter string
to string
from string
last string
functions string
step string
output string
oldQuerier bool
groupBy string
usePreciseAggregations bool
aggregationWindow string
}
func newQueryCommandeer(rootCommandeer *RootCommandeer) *queryCommandeer {
commandeer := &queryCommandeer{
rootCommandeer: rootCommandeer,
}
cmd := &cobra.Command{
Aliases: []string{"get"},
Use: "query [<metrics>] [flags]",
Short: "Query a TSDB instance",
Long: `Query a TSDB instance (table).`,
Example: `The examples assume that the endpoint of the web-gateway service, the login credentials, and
the name of the data container are configured in the default configuration file (` + config.DefaultConfigurationFileName + `)
instead of using the -s|--server, -u|--username, -p|--password, and -c|--container flags.
- tsdbctl query temperature -t mytsdb
- tsdbctl query -t performance -f "starts(__name__, 'cpu') AND os=='win'"
- tsdbctl query metric2,metric3,metric4 -t pmetrics -b 0 -e now-1h -a "sum,avg" -i 20m
- tsdbctl query -t mytsdb -f "LabelA==8.1" -l 1d -o json
- tsdbctl query metric1 -t my_tsdb -l 1d -a "count,sum,avg" --groupBy LabelA,LabelB
- tsdbctl query metric1 -t my_tsdb -l 1d -a "count_all,sum_all"
Notes:
- You must set the metric-name argument (<metrics>) and/or the query-filter flag (-f|--filter).
- Queries that set the metric-name argument (<metrics>) use range scan and are therefore faster.
- To query the full TSDB content, set the -f|--filter to a query filter that always evaluates
to true (such as "1==1"), don't set the <metrics> argument, and set the -b|--begin flag to 0.
- You can use either over-time aggregates or cross series (*_all) aggregates, but not both in the same query.
Arguments:
<metrics> (string) Comma-separated list of metric names to query. If you don't set this argument, you must
provide a query filter using the -f|--filter flag.`,
RunE: func(cmd *cobra.Command, args []string) error {
// Save the metric name if provided as a positional argument ($1)
if len(args) > 0 {
commandeer.name = args[0]
}
return commandeer.query()
},
}
cmd.Flags().StringVarP(&commandeer.to, "end", "e", "",
"End (maximum) time for the query, as a string containing an\nRFC 3339 time string, a Unix timestamp in milliseconds, or\na relative time of the format \"now\" or \"now-[0-9]+[mhd]\"\n(where 'm' = minutes, 'h' = hours, and 'd' = days).\nExamples: \"2018-09-26T14:10:20Z\"; \"1537971006000\";\n\"now-3h\"; \"now-7d\". (default \"now\")")
cmd.Flags().StringVarP(&commandeer.from, "begin", "b", "",
"Start (minimum) time for the query, as a string containing\nan RFC 3339 time, a Unix timestamp in milliseconds, a\nrelative time of the format \"now\" or \"now-[0-9]+[mhd]\"\n(where 'm' = minutes, 'h' = hours, and 'd' = days), or 0\nfor the earliest time. Examples: \"2016-01-02T15:34:26Z\";\n\"1451748866\"; \"now-90m\"; \"0\". (default = <end time> - 1h)")
cmd.Flags().StringVarP(&commandeer.output, "output", "o", formatter.DefaultOutputFormat,
"Output format in which to display the query results -\n\"text\" | \"csv\" | \"json\".")
cmd.Flags().StringVarP(&commandeer.filter, "filter", "f", "",
"Query filter, as an Iguazio Data Science Platform\nfilter expression. To reference a metric name from within\nthe query filter, use the \"__name__\" attribute.\nExamples: \"method=='get'\"; \"__name__='cpu' AND os=='win'\".")
cmd.Flags().StringVarP(&commandeer.last, "last", "l", "",
"Return data for the specified time period before the\ncurrent time, of the format \"[0-9]+[mhd]\" (where\n'm' = minutes, 'h' = hours, and 'd' = days>). When setting\nthis flag, don't set the -b|--begin or -e|--end flags.\nExamples: \"1h\"; \"15m\"; \"30d\" to return data for the last\n1 hour, 15 minutes, or 30 days.")
cmd.Flags().StringVarP(&commandeer.aggregationWindow, "aggregation-window", "w", "",
"Sliding time window for aggregation. Must be used in conjunction with `-a <aggr>`. Examples: \"1h\"; \"150m\".")
cmd.Flags().StringVarP(&commandeer.functions, "aggregates", "a", "",
"Aggregation information to return, as a comma-separated\nlist of supported aggregation functions - count | avg |\nsum | min | max | stddev | stdvar | last | rate.\nFor cross series aggregations add an \"_all\" suffix for the wanted aggregate.\nNote: you can query either over time aggregates or cross series aggregate but not both in the same query.\nExample: \"sum,min,max,count\", \"sum_all,avg_all\".")
cmd.Flags().StringVarP(&commandeer.step, "aggregation-interval", "i", "",
"Aggregation interval for applying the aggregation functions\n(if set - see the -a|--aggregates flag), of the format\n\"[0-9]+[mhd]\" (where 'm' = minutes, 'h' = hours, and\n'd' = days). Examples: \"1h\"; \"150m\". (default =\n<end time> - <start time>)")
cmd.Flags().StringVar(&commandeer.groupBy, "groupBy", "",
"Comma separated list of labels to group the result by")
cmd.Flags().BoolVar(&commandeer.usePreciseAggregations, "use-precise-aggregations", false,
"Disable server aggregation optimizations for more accurate results.")
cmd.Flags().BoolVarP(&commandeer.oldQuerier, "oldQuerier", "q", false, "use old querier")
cmd.Flags().Lookup("oldQuerier").Hidden = true
commandeer.cmd = cmd
return commandeer
}
func (qc *queryCommandeer) query() error {
if qc.name == "" && qc.filter == "" {
return errors.New("the query command must receive either a metric-name parameter (<metrics>) or a query filter (set via the -f|--filter flag)")
}
if qc.last != "" && (qc.from != "" || qc.to != "") {
return errors.New("the -l|--last flag cannot be set together with the -b|--begin and/or -e|--end flags")
}
// Initialize parameters and adapter
if err := qc.rootCommandeer.initialize(); err != nil {
return err
}
if err := qc.rootCommandeer.startAdapter(); err != nil {
return err
}
step, err := utils.Str2duration(qc.step)
if err != nil {
return err
}
// Set start & end times
to := time.Now().Unix() * 1000
if qc.to != "" {
to, err = utils.Str2unixTime(qc.to)
if err != nil {
return err
}
}
from := to - 1000*3600 // Default start time = one hour before the end time
if qc.from != "" {
from, err = utils.Str2unixTime(qc.from)
if err != nil {
return err
}
}
if qc.last != "" {
last, err := utils.Str2duration(qc.last)
if err != nil {
return err
}
from = to - last
}
qc.rootCommandeer.logger.DebugWith("Query", "from", from, "to", to, "name", qc.name,
"filter", qc.filter, "functions", qc.functions, "step", qc.step, "groupBy", qc.groupBy)
if !qc.oldQuerier {
return qc.newQuery(from, to, step)
}
return qc.oldQuery(from, to, step)
}
func (qc *queryCommandeer) newQuery(from, to, step int64) error {
qry, err := qc.rootCommandeer.adapter.QuerierV2()
if err != nil {
return errors.Wrap(err, "Failed to initialize the Querier object.")
}
aggregationWindow, err := utils.Str2duration(qc.aggregationWindow)
if err != nil {
return errors.Wrap(err, "Failed to parse aggregation window")
}
var selectParams *pquerier.SelectParams
if strings.HasPrefix(qc.name, "select") {
selectParams, _, err = pquerier.ParseQuery(qc.name)
if err != nil {
return errors.Wrap(err, "failed to parse sql")
}
selectParams.Step = step
selectParams.From = from
selectParams.To = to
selectParams.UseOnlyClientAggr = qc.usePreciseAggregations
selectParams.AggregationWindow = aggregationWindow
} else {
selectParams = &pquerier.SelectParams{Name: qc.name, Functions: qc.functions,
Step: step, Filter: qc.filter, From: from, To: to, GroupBy: qc.groupBy,
UseOnlyClientAggr: qc.usePreciseAggregations,
AggregationWindow: aggregationWindow}
}
set, err := qry.Select(selectParams)
if err != nil {
return errors.Wrap(err, "The query selection failed.")
}
f, err := formatter.NewFormatter(qc.output, nil)
if err != nil {
return errors.Wrapf(err, "Failed to start formatter '%s'.", qc.output)
}
err = f.Write(qc.cmd.OutOrStdout(), set)
return err
}
func (qc *queryCommandeer) oldQuery(from, to, step int64) error {
qry, err := qc.rootCommandeer.adapter.Querier(context.TODO(), from, to)
if err != nil {
return errors.Wrap(err, "Failed to initialize the Querier object.")
}
var set utils.SeriesSet
set, err = qry.Select(qc.name, qc.functions, step, qc.filter)
if err != nil {
return errors.Wrap(err, "The query selection failed.")
}
f, err := formatter.NewFormatter(qc.output, nil)
if err != nil { | err = f.Write(qc.cmd.OutOrStdout(), set)
return err
} | return errors.Wrapf(err, "Failed to start formatter '%s'.", qc.output)
}
| random_line_split |
query.go | /*
Copyright 2018 Iguazio Systems Ltd.
Licensed under the Apache License, Version 2.0 (the "License") with
an addition restriction as set forth herein. You may not use this
file except in compliance with the License. You may obtain a copy of
the License at http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License.
In addition, you may not use the software for any purposes that are
illegal under applicable law, and the grant of the foregoing license
under the Apache 2.0 license is conditioned upon your compliance with
such restriction.
*/
package tsdbctl
import (
"context"
"strings"
"time"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/v3io/v3io-tsdb/pkg/config"
"github.com/v3io/v3io-tsdb/pkg/formatter"
"github.com/v3io/v3io-tsdb/pkg/pquerier"
"github.com/v3io/v3io-tsdb/pkg/utils"
)
type queryCommandeer struct {
cmd *cobra.Command
rootCommandeer *RootCommandeer
name string
filter string
to string
from string
last string
functions string
step string
output string
oldQuerier bool
groupBy string
usePreciseAggregations bool
aggregationWindow string
}
func newQueryCommandeer(rootCommandeer *RootCommandeer) *queryCommandeer {
commandeer := &queryCommandeer{
rootCommandeer: rootCommandeer,
}
cmd := &cobra.Command{
Aliases: []string{"get"},
Use: "query [<metrics>] [flags]",
Short: "Query a TSDB instance",
Long: `Query a TSDB instance (table).`,
Example: `The examples assume that the endpoint of the web-gateway service, the login credentials, and
the name of the data container are configured in the default configuration file (` + config.DefaultConfigurationFileName + `)
instead of using the -s|--server, -u|--username, -p|--password, and -c|--container flags.
- tsdbctl query temperature -t mytsdb
- tsdbctl query -t performance -f "starts(__name__, 'cpu') AND os=='win'"
- tsdbctl query metric2,metric3,metric4 -t pmetrics -b 0 -e now-1h -a "sum,avg" -i 20m
- tsdbctl query -t mytsdb -f "LabelA==8.1" -l 1d -o json
- tsdbctl query metric1 -t my_tsdb -l 1d -a "count,sum,avg" --groupBy LabelA,LabelB
- tsdbctl query metric1 -t my_tsdb -l 1d -a "count_all,sum_all"
Notes:
- You must set the metric-name argument (<metrics>) and/or the query-filter flag (-f|--filter).
- Queries that set the metric-name argument (<metrics>) use range scan and are therefore faster.
- To query the full TSDB content, set the -f|--filter to a query filter that always evaluates
to true (such as "1==1"), don't set the <metrics> argument, and set the -b|--begin flag to 0.
- You can use either over-time aggregates or cross series (*_all) aggregates, but not both in the same query.
Arguments:
<metrics> (string) Comma-separated list of metric names to query. If you don't set this argument, you must
provide a query filter using the -f|--filter flag.`,
RunE: func(cmd *cobra.Command, args []string) error {
// Save the metric name if provided as a positional argument ($1)
if len(args) > 0 {
commandeer.name = args[0]
}
return commandeer.query()
},
}
cmd.Flags().StringVarP(&commandeer.to, "end", "e", "",
"End (maximum) time for the query, as a string containing an\nRFC 3339 time string, a Unix timestamp in milliseconds, or\na relative time of the format \"now\" or \"now-[0-9]+[mhd]\"\n(where 'm' = minutes, 'h' = hours, and 'd' = days).\nExamples: \"2018-09-26T14:10:20Z\"; \"1537971006000\";\n\"now-3h\"; \"now-7d\". (default \"now\")")
cmd.Flags().StringVarP(&commandeer.from, "begin", "b", "",
"Start (minimum) time for the query, as a string containing\nan RFC 3339 time, a Unix timestamp in milliseconds, a\nrelative time of the format \"now\" or \"now-[0-9]+[mhd]\"\n(where 'm' = minutes, 'h' = hours, and 'd' = days), or 0\nfor the earliest time. Examples: \"2016-01-02T15:34:26Z\";\n\"1451748866\"; \"now-90m\"; \"0\". (default = <end time> - 1h)")
cmd.Flags().StringVarP(&commandeer.output, "output", "o", formatter.DefaultOutputFormat,
"Output format in which to display the query results -\n\"text\" | \"csv\" | \"json\".")
cmd.Flags().StringVarP(&commandeer.filter, "filter", "f", "",
"Query filter, as an Iguazio Data Science Platform\nfilter expression. To reference a metric name from within\nthe query filter, use the \"__name__\" attribute.\nExamples: \"method=='get'\"; \"__name__='cpu' AND os=='win'\".")
cmd.Flags().StringVarP(&commandeer.last, "last", "l", "",
"Return data for the specified time period before the\ncurrent time, of the format \"[0-9]+[mhd]\" (where\n'm' = minutes, 'h' = hours, and 'd' = days>). When setting\nthis flag, don't set the -b|--begin or -e|--end flags.\nExamples: \"1h\"; \"15m\"; \"30d\" to return data for the last\n1 hour, 15 minutes, or 30 days.")
cmd.Flags().StringVarP(&commandeer.aggregationWindow, "aggregation-window", "w", "",
"Sliding time window for aggregation. Must be used in conjunction with `-a <aggr>`. Examples: \"1h\"; \"150m\".")
cmd.Flags().StringVarP(&commandeer.functions, "aggregates", "a", "",
"Aggregation information to return, as a comma-separated\nlist of supported aggregation functions - count | avg |\nsum | min | max | stddev | stdvar | last | rate.\nFor cross series aggregations add an \"_all\" suffix for the wanted aggregate.\nNote: you can query either over time aggregates or cross series aggregate but not both in the same query.\nExample: \"sum,min,max,count\", \"sum_all,avg_all\".")
cmd.Flags().StringVarP(&commandeer.step, "aggregation-interval", "i", "",
"Aggregation interval for applying the aggregation functions\n(if set - see the -a|--aggregates flag), of the format\n\"[0-9]+[mhd]\" (where 'm' = minutes, 'h' = hours, and\n'd' = days). Examples: \"1h\"; \"150m\". (default =\n<end time> - <start time>)")
cmd.Flags().StringVar(&commandeer.groupBy, "groupBy", "",
"Comma separated list of labels to group the result by")
cmd.Flags().BoolVar(&commandeer.usePreciseAggregations, "use-precise-aggregations", false,
"Disable server aggregation optimizations for more accurate results.")
cmd.Flags().BoolVarP(&commandeer.oldQuerier, "oldQuerier", "q", false, "use old querier")
cmd.Flags().Lookup("oldQuerier").Hidden = true
commandeer.cmd = cmd
return commandeer
}
func (qc *queryCommandeer) query() error {
if qc.name == "" && qc.filter == "" {
return errors.New("the query command must receive either a metric-name parameter (<metrics>) or a query filter (set via the -f|--filter flag)")
}
if qc.last != "" && (qc.from != "" || qc.to != "") {
return errors.New("the -l|--last flag cannot be set together with the -b|--begin and/or -e|--end flags")
}
// Initialize parameters and adapter
if err := qc.rootCommandeer.initialize(); err != nil {
return err
}
if err := qc.rootCommandeer.startAdapter(); err != nil {
return err
}
step, err := utils.Str2duration(qc.step)
if err != nil {
return err
}
// Set start & end times
to := time.Now().Unix() * 1000
if qc.to != "" {
to, err = utils.Str2unixTime(qc.to)
if err != nil {
return err
}
}
from := to - 1000*3600 // Default start time = one hour before the end time
if qc.from != "" {
from, err = utils.Str2unixTime(qc.from)
if err != nil {
return err
}
}
if qc.last != "" {
last, err := utils.Str2duration(qc.last)
if err != nil {
return err
}
from = to - last
}
qc.rootCommandeer.logger.DebugWith("Query", "from", from, "to", to, "name", qc.name,
"filter", qc.filter, "functions", qc.functions, "step", qc.step, "groupBy", qc.groupBy)
if !qc.oldQuerier {
return qc.newQuery(from, to, step)
}
return qc.oldQuery(from, to, step)
}
func (qc *queryCommandeer) | (from, to, step int64) error {
qry, err := qc.rootCommandeer.adapter.QuerierV2()
if err != nil {
return errors.Wrap(err, "Failed to initialize the Querier object.")
}
aggregationWindow, err := utils.Str2duration(qc.aggregationWindow)
if err != nil {
return errors.Wrap(err, "Failed to parse aggregation window")
}
var selectParams *pquerier.SelectParams
if strings.HasPrefix(qc.name, "select") {
selectParams, _, err = pquerier.ParseQuery(qc.name)
if err != nil {
return errors.Wrap(err, "failed to parse sql")
}
selectParams.Step = step
selectParams.From = from
selectParams.To = to
selectParams.UseOnlyClientAggr = qc.usePreciseAggregations
selectParams.AggregationWindow = aggregationWindow
} else {
selectParams = &pquerier.SelectParams{Name: qc.name, Functions: qc.functions,
Step: step, Filter: qc.filter, From: from, To: to, GroupBy: qc.groupBy,
UseOnlyClientAggr: qc.usePreciseAggregations,
AggregationWindow: aggregationWindow}
}
set, err := qry.Select(selectParams)
if err != nil {
return errors.Wrap(err, "The query selection failed.")
}
f, err := formatter.NewFormatter(qc.output, nil)
if err != nil {
return errors.Wrapf(err, "Failed to start formatter '%s'.", qc.output)
}
err = f.Write(qc.cmd.OutOrStdout(), set)
return err
}
func (qc *queryCommandeer) oldQuery(from, to, step int64) error {
qry, err := qc.rootCommandeer.adapter.Querier(context.TODO(), from, to)
if err != nil {
return errors.Wrap(err, "Failed to initialize the Querier object.")
}
var set utils.SeriesSet
set, err = qry.Select(qc.name, qc.functions, step, qc.filter)
if err != nil {
return errors.Wrap(err, "The query selection failed.")
}
f, err := formatter.NewFormatter(qc.output, nil)
if err != nil {
return errors.Wrapf(err, "Failed to start formatter '%s'.", qc.output)
}
err = f.Write(qc.cmd.OutOrStdout(), set)
return err
}
| newQuery | identifier_name |
query.go | /*
Copyright 2018 Iguazio Systems Ltd.
Licensed under the Apache License, Version 2.0 (the "License") with
an addition restriction as set forth herein. You may not use this
file except in compliance with the License. You may obtain a copy of
the License at http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License.
In addition, you may not use the software for any purposes that are
illegal under applicable law, and the grant of the foregoing license
under the Apache 2.0 license is conditioned upon your compliance with
such restriction.
*/
package tsdbctl
import (
"context"
"strings"
"time"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/v3io/v3io-tsdb/pkg/config"
"github.com/v3io/v3io-tsdb/pkg/formatter"
"github.com/v3io/v3io-tsdb/pkg/pquerier"
"github.com/v3io/v3io-tsdb/pkg/utils"
)
type queryCommandeer struct {
cmd *cobra.Command
rootCommandeer *RootCommandeer
name string
filter string
to string
from string
last string
functions string
step string
output string
oldQuerier bool
groupBy string
usePreciseAggregations bool
aggregationWindow string
}
func newQueryCommandeer(rootCommandeer *RootCommandeer) *queryCommandeer {
commandeer := &queryCommandeer{
rootCommandeer: rootCommandeer,
}
cmd := &cobra.Command{
Aliases: []string{"get"},
Use: "query [<metrics>] [flags]",
Short: "Query a TSDB instance",
Long: `Query a TSDB instance (table).`,
Example: `The examples assume that the endpoint of the web-gateway service, the login credentials, and
the name of the data container are configured in the default configuration file (` + config.DefaultConfigurationFileName + `)
instead of using the -s|--server, -u|--username, -p|--password, and -c|--container flags.
- tsdbctl query temperature -t mytsdb
- tsdbctl query -t performance -f "starts(__name__, 'cpu') AND os=='win'"
- tsdbctl query metric2,metric3,metric4 -t pmetrics -b 0 -e now-1h -a "sum,avg" -i 20m
- tsdbctl query -t mytsdb -f "LabelA==8.1" -l 1d -o json
- tsdbctl query metric1 -t my_tsdb -l 1d -a "count,sum,avg" --groupBy LabelA,LabelB
- tsdbctl query metric1 -t my_tsdb -l 1d -a "count_all,sum_all"
Notes:
- You must set the metric-name argument (<metrics>) and/or the query-filter flag (-f|--filter).
- Queries that set the metric-name argument (<metrics>) use range scan and are therefore faster.
- To query the full TSDB content, set the -f|--filter to a query filter that always evaluates
to true (such as "1==1"), don't set the <metrics> argument, and set the -b|--begin flag to 0.
- You can use either over-time aggregates or cross series (*_all) aggregates, but not both in the same query.
Arguments:
<metrics> (string) Comma-separated list of metric names to query. If you don't set this argument, you must
provide a query filter using the -f|--filter flag.`,
RunE: func(cmd *cobra.Command, args []string) error {
// Save the metric name if provided as a positional argument ($1)
if len(args) > 0 {
commandeer.name = args[0]
}
return commandeer.query()
},
}
cmd.Flags().StringVarP(&commandeer.to, "end", "e", "",
"End (maximum) time for the query, as a string containing an\nRFC 3339 time string, a Unix timestamp in milliseconds, or\na relative time of the format \"now\" or \"now-[0-9]+[mhd]\"\n(where 'm' = minutes, 'h' = hours, and 'd' = days).\nExamples: \"2018-09-26T14:10:20Z\"; \"1537971006000\";\n\"now-3h\"; \"now-7d\". (default \"now\")")
cmd.Flags().StringVarP(&commandeer.from, "begin", "b", "",
"Start (minimum) time for the query, as a string containing\nan RFC 3339 time, a Unix timestamp in milliseconds, a\nrelative time of the format \"now\" or \"now-[0-9]+[mhd]\"\n(where 'm' = minutes, 'h' = hours, and 'd' = days), or 0\nfor the earliest time. Examples: \"2016-01-02T15:34:26Z\";\n\"1451748866\"; \"now-90m\"; \"0\". (default = <end time> - 1h)")
cmd.Flags().StringVarP(&commandeer.output, "output", "o", formatter.DefaultOutputFormat,
"Output format in which to display the query results -\n\"text\" | \"csv\" | \"json\".")
cmd.Flags().StringVarP(&commandeer.filter, "filter", "f", "",
"Query filter, as an Iguazio Data Science Platform\nfilter expression. To reference a metric name from within\nthe query filter, use the \"__name__\" attribute.\nExamples: \"method=='get'\"; \"__name__='cpu' AND os=='win'\".")
cmd.Flags().StringVarP(&commandeer.last, "last", "l", "",
"Return data for the specified time period before the\ncurrent time, of the format \"[0-9]+[mhd]\" (where\n'm' = minutes, 'h' = hours, and 'd' = days>). When setting\nthis flag, don't set the -b|--begin or -e|--end flags.\nExamples: \"1h\"; \"15m\"; \"30d\" to return data for the last\n1 hour, 15 minutes, or 30 days.")
cmd.Flags().StringVarP(&commandeer.aggregationWindow, "aggregation-window", "w", "",
"Sliding time window for aggregation. Must be used in conjunction with `-a <aggr>`. Examples: \"1h\"; \"150m\".")
cmd.Flags().StringVarP(&commandeer.functions, "aggregates", "a", "",
"Aggregation information to return, as a comma-separated\nlist of supported aggregation functions - count | avg |\nsum | min | max | stddev | stdvar | last | rate.\nFor cross series aggregations add an \"_all\" suffix for the wanted aggregate.\nNote: you can query either over time aggregates or cross series aggregate but not both in the same query.\nExample: \"sum,min,max,count\", \"sum_all,avg_all\".")
cmd.Flags().StringVarP(&commandeer.step, "aggregation-interval", "i", "",
"Aggregation interval for applying the aggregation functions\n(if set - see the -a|--aggregates flag), of the format\n\"[0-9]+[mhd]\" (where 'm' = minutes, 'h' = hours, and\n'd' = days). Examples: \"1h\"; \"150m\". (default =\n<end time> - <start time>)")
cmd.Flags().StringVar(&commandeer.groupBy, "groupBy", "",
"Comma separated list of labels to group the result by")
cmd.Flags().BoolVar(&commandeer.usePreciseAggregations, "use-precise-aggregations", false,
"Disable server aggregation optimizations for more accurate results.")
cmd.Flags().BoolVarP(&commandeer.oldQuerier, "oldQuerier", "q", false, "use old querier")
cmd.Flags().Lookup("oldQuerier").Hidden = true
commandeer.cmd = cmd
return commandeer
}
func (qc *queryCommandeer) query() error |
func (qc *queryCommandeer) newQuery(from, to, step int64) error {
qry, err := qc.rootCommandeer.adapter.QuerierV2()
if err != nil {
return errors.Wrap(err, "Failed to initialize the Querier object.")
}
aggregationWindow, err := utils.Str2duration(qc.aggregationWindow)
if err != nil {
return errors.Wrap(err, "Failed to parse aggregation window")
}
var selectParams *pquerier.SelectParams
if strings.HasPrefix(qc.name, "select") {
selectParams, _, err = pquerier.ParseQuery(qc.name)
if err != nil {
return errors.Wrap(err, "failed to parse sql")
}
selectParams.Step = step
selectParams.From = from
selectParams.To = to
selectParams.UseOnlyClientAggr = qc.usePreciseAggregations
selectParams.AggregationWindow = aggregationWindow
} else {
selectParams = &pquerier.SelectParams{Name: qc.name, Functions: qc.functions,
Step: step, Filter: qc.filter, From: from, To: to, GroupBy: qc.groupBy,
UseOnlyClientAggr: qc.usePreciseAggregations,
AggregationWindow: aggregationWindow}
}
set, err := qry.Select(selectParams)
if err != nil {
return errors.Wrap(err, "The query selection failed.")
}
f, err := formatter.NewFormatter(qc.output, nil)
if err != nil {
return errors.Wrapf(err, "Failed to start formatter '%s'.", qc.output)
}
err = f.Write(qc.cmd.OutOrStdout(), set)
return err
}
func (qc *queryCommandeer) oldQuery(from, to, step int64) error {
qry, err := qc.rootCommandeer.adapter.Querier(context.TODO(), from, to)
if err != nil {
return errors.Wrap(err, "Failed to initialize the Querier object.")
}
var set utils.SeriesSet
set, err = qry.Select(qc.name, qc.functions, step, qc.filter)
if err != nil {
return errors.Wrap(err, "The query selection failed.")
}
f, err := formatter.NewFormatter(qc.output, nil)
if err != nil {
return errors.Wrapf(err, "Failed to start formatter '%s'.", qc.output)
}
err = f.Write(qc.cmd.OutOrStdout(), set)
return err
}
| {
if qc.name == "" && qc.filter == "" {
return errors.New("the query command must receive either a metric-name parameter (<metrics>) or a query filter (set via the -f|--filter flag)")
}
if qc.last != "" && (qc.from != "" || qc.to != "") {
return errors.New("the -l|--last flag cannot be set together with the -b|--begin and/or -e|--end flags")
}
// Initialize parameters and adapter
if err := qc.rootCommandeer.initialize(); err != nil {
return err
}
if err := qc.rootCommandeer.startAdapter(); err != nil {
return err
}
step, err := utils.Str2duration(qc.step)
if err != nil {
return err
}
// Set start & end times
to := time.Now().Unix() * 1000
if qc.to != "" {
to, err = utils.Str2unixTime(qc.to)
if err != nil {
return err
}
}
from := to - 1000*3600 // Default start time = one hour before the end time
if qc.from != "" {
from, err = utils.Str2unixTime(qc.from)
if err != nil {
return err
}
}
if qc.last != "" {
last, err := utils.Str2duration(qc.last)
if err != nil {
return err
}
from = to - last
}
qc.rootCommandeer.logger.DebugWith("Query", "from", from, "to", to, "name", qc.name,
"filter", qc.filter, "functions", qc.functions, "step", qc.step, "groupBy", qc.groupBy)
if !qc.oldQuerier {
return qc.newQuery(from, to, step)
}
return qc.oldQuery(from, to, step)
} | identifier_body |
main0.rs | use std::error::Error;
use mio::net::{ TcpListener, TcpStream, UdpSocket};
use mio::{Events, Interest, Poll, Token};
use std::io::{Read, Write};
use hex;
use rand::{thread_rng, Rng};
use keccak_hash::keccak;
use secp256k1::{SecretKey, PublicKey, Message, RecoveryId, Signature, sign, recover};
use rlp::{Rlp, RlpStream};
use std::collections::VecDeque;
mod message;
use message::PeerInfo;
fn print_message_type(message_type: u8) {
match message_type {
0x01 => {
println!("ping message");
},
0x02 => {
println!("pong message");
},
0x03 => {
println!("find neighbours message");
},
0x04 => {
println!("neighbours message");
},
_ => {
println!("unknow message");
},
}
}
fn main() -> Result<(), Box<dyn Error>> {
let mut arr = [0u8; 32];
thread_rng().fill(&mut arr[..]);
// let data = vec![0x83, b'c', b'a', b't'];
// let aa: String = rlp::decode(&data).unwrap();
// println!("aa = {:?}", aa);
// let pk = hex::decode("ee5495585eff78f2fcf95bab21ef1a598c54d1e3c672e23b3bb97a4fc7490660").unwrap();
let private_key = SecretKey::parse_slice(&arr[0..arr.len()]).unwrap();
// let private_key = SecretKey::parse_slice(&pk).unwrap();
let pubkey = PublicKey::from_secret_key(&private_key);
let id = &pubkey.serialize().to_vec()[1..];
println!("id is {:?}", hex::encode(&id));
const CLIENT: Token = Token(0);
const SENDER: Token = Token(0);
let udp_server_ip = "35.180.217.147";
let udp_server_port = "30304";
let upd_server_addr = "35.180.217.147:30304";
let local_udp_addr = "192.168.31.125:30309";
let mut udp_socket = UdpSocket::bind(local_udp_addr.parse()?)?;
// let local_addr = udp_socket.local_addr()?;
//
// println!("local_addr = {:?}", local_addr);
println!("private_key is {:?}", private_key);
let mut poll = Poll::new()?;
let mut events = Events::with_capacity(1024);
let addr = "192.168.31.248:30303".parse()?;
let peer = PeerInfo::from_sock_addr(&upd_server_addr.parse()?);
let local_peer = PeerInfo::from_sock_addr(&local_udp_addr.parse()?);
let mut sent_ping = false;
// message::encode_ping(&peer, &peer, &private_key);
// println!("peer ip {:?}", peer.encode());
// let addr = "127.0.0.1:9000".parse()?;
let mut send_queue: VecDeque<Vec<u8>> = VecDeque::new();
let mut client = TcpStream::connect(addr)?;
let mut status_sent = false;
poll.registry().register(&mut client, CLIENT, Interest::READABLE | Interest::WRITABLE)?;
poll.registry().register(&mut udp_socket, SENDER, Interest::READABLE | Interest::WRITABLE)?;
let mut received_data = Vec::with_capacity(4096);
send_queue.push_back(message::encode_ping(&local_peer, &peer, &private_key));
loop {
poll.poll(&mut events, None)?;
for event in events.iter() {
match event.token() {
SENDER => {
println!("udp socket is active");
if event.is_writable() {
'inner: loop {
if let Some(buf) = send_queue.pop_front() {
match udp_socket.send_to(&buf, upd_server_addr.parse()?) {
Ok(size) => {
println!("sent {:?} bytes(total {:?})", size, buf.len());
// we have some buf remain for next time
if size < buf.len() {
if size == 0 {
send_queue.push_front(buf);
}
break 'inner;
}
},
Err(e) => {
println!("send error {:?}", e);
break 'inner;
}
}
} else {
println!("no data to send, reregister for next writable event");
break 'inner;
}
}
}
if event.is_readable() {
'read: loop {
let mut buf = [0; 1024];
match udp_socket.recv_from(&mut buf) {
Ok((size, addr)) => {
println!("read {:?} bytes from {:?}", size, addr);
if (size > 0) {
let read_buf = &buf[..size];
let hash_signed = keccak(&read_buf[32..]);
println!("hash_signed = {:?}", hash_signed);
println!("check_sum = {:?}", hex::encode(&read_buf[0..32]));
// if hash_signed.as_bytes() != &read_buf[0..32] {
// // return Box::new(Err("bad protocol"));
// break;
// }
let signed = &read_buf[(32 + 65)..];
let message_type = signed[0];
print_message_type(message_type);
println!("message_type is {:?}", message_type);
let recover_id = RecoveryId::parse(read_buf[32 + 64]).expect("can not get recover id");
println!("recover_id = {:?}", recover_id);
let signature = Signature::parse_slice(&read_buf[32..(32 + 64)]).expect("can not get signature");
let hash = keccak(signed);
let pubkey = recover(&Message::parse_slice(&hash).unwrap(), &signature, &recover_id).expect("can not recover pubkey");
println!("pubkey is {:?}", hex::encode(&pubkey.serialize_compressed().to_vec()));
let rlp = Rlp::new(&signed[1..]);
if message_type == 0x01 {
// got a ping message
let version: u8 = rlp.val_at(0)?;
let from_peer = PeerInfo::decode_rlp(&rlp.at(1)?)?;
let to_peer = PeerInfo::decode_rlp(&rlp.at(2)?)?;
println!("from_peer = {:?}, to_peer = {:?}", from_peer, to_peer);
let timestamp: u64 = rlp.val_at(3)?;
println!("version = {:?}, timestamp = {:?}", version, timestamp);
// send pong message
let from = PeerInfo::from_sock_addr(&addr);
let bytes = message::encode_pong(&from, &read_buf[0..32].to_vec(), ×tamp, &private_key); |
} else if message_type == 0x02 {
// got a pong message
let from_peer = PeerInfo::decode_rlp(&rlp.at(0)?)?;
let hash_bytes = rlp.at(1)?.data()?;
let timestamp: u64 = rlp.val_at(2)?;
println!("got a pong message {:?} {:?}", from_peer, timestamp);
// start send findneighbours packet
let bytes = message::encode_find_node(&private_key);
println!("find node bytes is {:?}", bytes.len());
send_queue.push_back(bytes);
} else if message_type == 0x03 {
println!("got a find node message");
} else if message_type == 0x04 {
println!("got a node message");
}
poll.registry().reregister(&mut udp_socket, event.token(), Interest::WRITABLE)?;
// we have read all data
if (size < buf.len()) {
println!("no more data read");
break 'read;
}
} else {
println!("no data read");
break 'read;
}
},
Err(e) => {
println!("read error {:?}", e);
break 'read;
}
}
}
}
},
CLIENT => {
if event.is_readable() {
println!("client socket is readable");
// read buf
let mut buf = [0; 1024];
match client.read(&mut buf) {
Ok(n) => {
if (n > 0) {
received_data.extend_from_slice(&buf[..n]);
println!("read data: {:?}", String::from_utf8_lossy(&received_data));
}
println!("read {:?} bytes", n);
},
Err(err) => {
println!("read data error {:?}", err);
}
}
}
if event.is_writable() {
// send auth info
let auth = "0196045fa704aa5f5a85f36c6b399b08d823083228d63c4346f382f78a18b684f3a4e64a671de498abf20cba88dd8f3f0a11443bed18248895b981e0c842e9e4fafe387cf9ad619ba89fe7dbfa6f504725bb673a804f3526df31c68a69caf9bc7a9eed62fe73dffdeae5e21f55e2a1ec28e17ad5f98bd0a61759fe25f8f96665278197413d86ab84ea2f3adbf70634b49d13b4b55037e23f393ddc2ae46e63d4c3d1b67945bcf22d03183a1b1ff3b9b74cf3d83a8093489b508759c5042ca0d7de29aa6eb024800868594f848f646f1488c7bbf2a598d411a7333db52168f53e04e28b260e218233e9641232304625ba67cbaa7b6a3703161235ab41758d466701beac1a08e5edc612e42cb7235d43cbdd51ff7bb3cbe4720dfa165f084dafce2c84795eb619016647c9aef4d6d9b31e1a4b1e3b18e856a025ab99275b8b860816259ddf86cdc20c22e0f6f70445258113fade6d38814cb88d8c0693a64880088563cb02ff15236bca24720aaaa9da219c0f2fa71f8a4b1e34793a330b31ccfbdcbaf0026c881d5761b198be428feb93b170afe95174722f";
let buf = hex::decode(auth).unwrap();
if !status_sent {
status_sent = true;
match client.write_all(&buf) {
Ok(_) => {
println!("write ok");
},
Err(err) => {
println!("read data error {:?}", err);
}
}
}
println!("client socket is writable");
}
println!("client event token {:?}", event.token());
},
_ => {
}
}
}
}
println!("Hello, world!");
} | println!("pong bytes is {:?}", bytes.len());
send_queue.push_back(bytes);
// send_queue | random_line_split |
main0.rs | use std::error::Error;
use mio::net::{ TcpListener, TcpStream, UdpSocket};
use mio::{Events, Interest, Poll, Token};
use std::io::{Read, Write};
use hex;
use rand::{thread_rng, Rng};
use keccak_hash::keccak;
use secp256k1::{SecretKey, PublicKey, Message, RecoveryId, Signature, sign, recover};
use rlp::{Rlp, RlpStream};
use std::collections::VecDeque;
mod message;
use message::PeerInfo;
fn print_message_type(message_type: u8) {
match message_type {
0x01 => {
println!("ping message");
},
0x02 => {
println!("pong message");
},
0x03 => {
println!("find neighbours message");
},
0x04 => {
println!("neighbours message");
},
_ => {
println!("unknow message");
},
}
}
fn main() -> Result<(), Box<dyn Error>> {
let mut arr = [0u8; 32];
thread_rng().fill(&mut arr[..]);
// let data = vec![0x83, b'c', b'a', b't'];
// let aa: String = rlp::decode(&data).unwrap();
// println!("aa = {:?}", aa);
// let pk = hex::decode("ee5495585eff78f2fcf95bab21ef1a598c54d1e3c672e23b3bb97a4fc7490660").unwrap();
let private_key = SecretKey::parse_slice(&arr[0..arr.len()]).unwrap();
// let private_key = SecretKey::parse_slice(&pk).unwrap();
let pubkey = PublicKey::from_secret_key(&private_key);
let id = &pubkey.serialize().to_vec()[1..];
println!("id is {:?}", hex::encode(&id));
const CLIENT: Token = Token(0);
const SENDER: Token = Token(0);
let udp_server_ip = "35.180.217.147";
let udp_server_port = "30304";
let upd_server_addr = "35.180.217.147:30304";
let local_udp_addr = "192.168.31.125:30309";
let mut udp_socket = UdpSocket::bind(local_udp_addr.parse()?)?;
// let local_addr = udp_socket.local_addr()?;
//
// println!("local_addr = {:?}", local_addr);
println!("private_key is {:?}", private_key);
let mut poll = Poll::new()?;
let mut events = Events::with_capacity(1024);
let addr = "192.168.31.248:30303".parse()?;
let peer = PeerInfo::from_sock_addr(&upd_server_addr.parse()?);
let local_peer = PeerInfo::from_sock_addr(&local_udp_addr.parse()?);
let mut sent_ping = false;
// message::encode_ping(&peer, &peer, &private_key);
// println!("peer ip {:?}", peer.encode());
// let addr = "127.0.0.1:9000".parse()?;
let mut send_queue: VecDeque<Vec<u8>> = VecDeque::new();
let mut client = TcpStream::connect(addr)?;
let mut status_sent = false;
poll.registry().register(&mut client, CLIENT, Interest::READABLE | Interest::WRITABLE)?;
poll.registry().register(&mut udp_socket, SENDER, Interest::READABLE | Interest::WRITABLE)?;
let mut received_data = Vec::with_capacity(4096);
send_queue.push_back(message::encode_ping(&local_peer, &peer, &private_key));
loop {
poll.poll(&mut events, None)?;
for event in events.iter() {
match event.token() {
SENDER => {
println!("udp socket is active");
if event.is_writable() {
'inner: loop {
if let Some(buf) = send_queue.pop_front() {
match udp_socket.send_to(&buf, upd_server_addr.parse()?) {
Ok(size) => {
println!("sent {:?} bytes(total {:?})", size, buf.len());
// we have some buf remain for next time
if size < buf.len() {
if size == 0 {
send_queue.push_front(buf);
}
break 'inner;
}
},
Err(e) => {
println!("send error {:?}", e);
break 'inner;
}
}
} else {
println!("no data to send, reregister for next writable event");
break 'inner;
}
}
}
if event.is_readable() {
'read: loop {
let mut buf = [0; 1024];
match udp_socket.recv_from(&mut buf) {
Ok((size, addr)) => {
println!("read {:?} bytes from {:?}", size, addr);
if (size > 0) {
let read_buf = &buf[..size];
let hash_signed = keccak(&read_buf[32..]);
println!("hash_signed = {:?}", hash_signed);
println!("check_sum = {:?}", hex::encode(&read_buf[0..32]));
// if hash_signed.as_bytes() != &read_buf[0..32] {
// // return Box::new(Err("bad protocol"));
// break;
// }
let signed = &read_buf[(32 + 65)..];
let message_type = signed[0];
print_message_type(message_type);
println!("message_type is {:?}", message_type);
let recover_id = RecoveryId::parse(read_buf[32 + 64]).expect("can not get recover id");
println!("recover_id = {:?}", recover_id);
let signature = Signature::parse_slice(&read_buf[32..(32 + 64)]).expect("can not get signature");
let hash = keccak(signed);
let pubkey = recover(&Message::parse_slice(&hash).unwrap(), &signature, &recover_id).expect("can not recover pubkey");
println!("pubkey is {:?}", hex::encode(&pubkey.serialize_compressed().to_vec()));
let rlp = Rlp::new(&signed[1..]);
if message_type == 0x01 {
// got a ping message
let version: u8 = rlp.val_at(0)?;
let from_peer = PeerInfo::decode_rlp(&rlp.at(1)?)?;
let to_peer = PeerInfo::decode_rlp(&rlp.at(2)?)?;
println!("from_peer = {:?}, to_peer = {:?}", from_peer, to_peer);
let timestamp: u64 = rlp.val_at(3)?;
println!("version = {:?}, timestamp = {:?}", version, timestamp);
// send pong message
let from = PeerInfo::from_sock_addr(&addr);
let bytes = message::encode_pong(&from, &read_buf[0..32].to_vec(), ×tamp, &private_key);
println!("pong bytes is {:?}", bytes.len());
send_queue.push_back(bytes);
// send_queue
} else if message_type == 0x02 {
// got a pong message
let from_peer = PeerInfo::decode_rlp(&rlp.at(0)?)?;
let hash_bytes = rlp.at(1)?.data()?;
let timestamp: u64 = rlp.val_at(2)?;
println!("got a pong message {:?} {:?}", from_peer, timestamp);
// start send findneighbours packet
let bytes = message::encode_find_node(&private_key);
println!("find node bytes is {:?}", bytes.len());
send_queue.push_back(bytes);
} else if message_type == 0x03 {
println!("got a find node message");
} else if message_type == 0x04 {
println!("got a node message");
}
poll.registry().reregister(&mut udp_socket, event.token(), Interest::WRITABLE)?;
// we have read all data
if (size < buf.len()) {
println!("no more data read");
break 'read;
}
} else {
println!("no data read");
break 'read;
}
},
Err(e) => {
println!("read error {:?}", e);
break 'read;
}
}
}
}
},
CLIENT => {
if event.is_readable() {
println!("client socket is readable");
// read buf
let mut buf = [0; 1024];
match client.read(&mut buf) {
Ok(n) => {
if (n > 0) {
received_data.extend_from_slice(&buf[..n]);
println!("read data: {:?}", String::from_utf8_lossy(&received_data));
}
println!("read {:?} bytes", n);
},
Err(err) => {
println!("read data error {:?}", err);
}
}
}
if event.is_writable() {
// send auth info
let auth = "0196045fa704aa5f5a85f36c6b399b08d823083228d63c4346f382f78a18b684f3a4e64a671de498abf20cba88dd8f3f0a11443bed18248895b981e0c842e9e4fafe387cf9ad619ba89fe7dbfa6f504725bb673a804f3526df31c68a69caf9bc7a9eed62fe73dffdeae5e21f55e2a1ec28e17ad5f98bd0a61759fe25f8f96665278197413d86ab84ea2f3adbf70634b49d13b4b55037e23f393ddc2ae46e63d4c3d1b67945bcf22d03183a1b1ff3b9b74cf3d83a8093489b508759c5042ca0d7de29aa6eb024800868594f848f646f1488c7bbf2a598d411a7333db52168f53e04e28b260e218233e9641232304625ba67cbaa7b6a3703161235ab41758d466701beac1a08e5edc612e42cb7235d43cbdd51ff7bb3cbe4720dfa165f084dafce2c84795eb619016647c9aef4d6d9b31e1a4b1e3b18e856a025ab99275b8b860816259ddf86cdc20c22e0f6f70445258113fade6d38814cb88d8c0693a64880088563cb02ff15236bca24720aaaa9da219c0f2fa71f8a4b1e34793a330b31ccfbdcbaf0026c881d5761b198be428feb93b170afe95174722f";
let buf = hex::decode(auth).unwrap();
if !status_sent {
status_sent = true;
match client.write_all(&buf) {
Ok(_) => | ,
Err(err) => {
println!("read data error {:?}", err);
}
}
}
println!("client socket is writable");
}
println!("client event token {:?}", event.token());
},
_ => {
}
}
}
}
println!("Hello, world!");
}
| {
println!("write ok");
} | conditional_block |
main0.rs | use std::error::Error;
use mio::net::{ TcpListener, TcpStream, UdpSocket};
use mio::{Events, Interest, Poll, Token};
use std::io::{Read, Write};
use hex;
use rand::{thread_rng, Rng};
use keccak_hash::keccak;
use secp256k1::{SecretKey, PublicKey, Message, RecoveryId, Signature, sign, recover};
use rlp::{Rlp, RlpStream};
use std::collections::VecDeque;
mod message;
use message::PeerInfo;
fn print_message_type(message_type: u8) |
fn main() -> Result<(), Box<dyn Error>> {
let mut arr = [0u8; 32];
thread_rng().fill(&mut arr[..]);
// let data = vec![0x83, b'c', b'a', b't'];
// let aa: String = rlp::decode(&data).unwrap();
// println!("aa = {:?}", aa);
// let pk = hex::decode("ee5495585eff78f2fcf95bab21ef1a598c54d1e3c672e23b3bb97a4fc7490660").unwrap();
let private_key = SecretKey::parse_slice(&arr[0..arr.len()]).unwrap();
// let private_key = SecretKey::parse_slice(&pk).unwrap();
let pubkey = PublicKey::from_secret_key(&private_key);
let id = &pubkey.serialize().to_vec()[1..];
println!("id is {:?}", hex::encode(&id));
const CLIENT: Token = Token(0);
const SENDER: Token = Token(0);
let udp_server_ip = "35.180.217.147";
let udp_server_port = "30304";
let upd_server_addr = "35.180.217.147:30304";
let local_udp_addr = "192.168.31.125:30309";
let mut udp_socket = UdpSocket::bind(local_udp_addr.parse()?)?;
// let local_addr = udp_socket.local_addr()?;
//
// println!("local_addr = {:?}", local_addr);
println!("private_key is {:?}", private_key);
let mut poll = Poll::new()?;
let mut events = Events::with_capacity(1024);
let addr = "192.168.31.248:30303".parse()?;
let peer = PeerInfo::from_sock_addr(&upd_server_addr.parse()?);
let local_peer = PeerInfo::from_sock_addr(&local_udp_addr.parse()?);
let mut sent_ping = false;
// message::encode_ping(&peer, &peer, &private_key);
// println!("peer ip {:?}", peer.encode());
// let addr = "127.0.0.1:9000".parse()?;
let mut send_queue: VecDeque<Vec<u8>> = VecDeque::new();
let mut client = TcpStream::connect(addr)?;
let mut status_sent = false;
poll.registry().register(&mut client, CLIENT, Interest::READABLE | Interest::WRITABLE)?;
poll.registry().register(&mut udp_socket, SENDER, Interest::READABLE | Interest::WRITABLE)?;
let mut received_data = Vec::with_capacity(4096);
send_queue.push_back(message::encode_ping(&local_peer, &peer, &private_key));
loop {
poll.poll(&mut events, None)?;
for event in events.iter() {
match event.token() {
SENDER => {
println!("udp socket is active");
if event.is_writable() {
'inner: loop {
if let Some(buf) = send_queue.pop_front() {
match udp_socket.send_to(&buf, upd_server_addr.parse()?) {
Ok(size) => {
println!("sent {:?} bytes(total {:?})", size, buf.len());
// we have some buf remain for next time
if size < buf.len() {
if size == 0 {
send_queue.push_front(buf);
}
break 'inner;
}
},
Err(e) => {
println!("send error {:?}", e);
break 'inner;
}
}
} else {
println!("no data to send, reregister for next writable event");
break 'inner;
}
}
}
if event.is_readable() {
'read: loop {
let mut buf = [0; 1024];
match udp_socket.recv_from(&mut buf) {
Ok((size, addr)) => {
println!("read {:?} bytes from {:?}", size, addr);
if (size > 0) {
let read_buf = &buf[..size];
let hash_signed = keccak(&read_buf[32..]);
println!("hash_signed = {:?}", hash_signed);
println!("check_sum = {:?}", hex::encode(&read_buf[0..32]));
// if hash_signed.as_bytes() != &read_buf[0..32] {
// // return Box::new(Err("bad protocol"));
// break;
// }
let signed = &read_buf[(32 + 65)..];
let message_type = signed[0];
print_message_type(message_type);
println!("message_type is {:?}", message_type);
let recover_id = RecoveryId::parse(read_buf[32 + 64]).expect("can not get recover id");
println!("recover_id = {:?}", recover_id);
let signature = Signature::parse_slice(&read_buf[32..(32 + 64)]).expect("can not get signature");
let hash = keccak(signed);
let pubkey = recover(&Message::parse_slice(&hash).unwrap(), &signature, &recover_id).expect("can not recover pubkey");
println!("pubkey is {:?}", hex::encode(&pubkey.serialize_compressed().to_vec()));
let rlp = Rlp::new(&signed[1..]);
if message_type == 0x01 {
// got a ping message
let version: u8 = rlp.val_at(0)?;
let from_peer = PeerInfo::decode_rlp(&rlp.at(1)?)?;
let to_peer = PeerInfo::decode_rlp(&rlp.at(2)?)?;
println!("from_peer = {:?}, to_peer = {:?}", from_peer, to_peer);
let timestamp: u64 = rlp.val_at(3)?;
println!("version = {:?}, timestamp = {:?}", version, timestamp);
// send pong message
let from = PeerInfo::from_sock_addr(&addr);
let bytes = message::encode_pong(&from, &read_buf[0..32].to_vec(), ×tamp, &private_key);
println!("pong bytes is {:?}", bytes.len());
send_queue.push_back(bytes);
// send_queue
} else if message_type == 0x02 {
// got a pong message
let from_peer = PeerInfo::decode_rlp(&rlp.at(0)?)?;
let hash_bytes = rlp.at(1)?.data()?;
let timestamp: u64 = rlp.val_at(2)?;
println!("got a pong message {:?} {:?}", from_peer, timestamp);
// start send findneighbours packet
let bytes = message::encode_find_node(&private_key);
println!("find node bytes is {:?}", bytes.len());
send_queue.push_back(bytes);
} else if message_type == 0x03 {
println!("got a find node message");
} else if message_type == 0x04 {
println!("got a node message");
}
poll.registry().reregister(&mut udp_socket, event.token(), Interest::WRITABLE)?;
// we have read all data
if (size < buf.len()) {
println!("no more data read");
break 'read;
}
} else {
println!("no data read");
break 'read;
}
},
Err(e) => {
println!("read error {:?}", e);
break 'read;
}
}
}
}
},
CLIENT => {
if event.is_readable() {
println!("client socket is readable");
// read buf
let mut buf = [0; 1024];
match client.read(&mut buf) {
Ok(n) => {
if (n > 0) {
received_data.extend_from_slice(&buf[..n]);
println!("read data: {:?}", String::from_utf8_lossy(&received_data));
}
println!("read {:?} bytes", n);
},
Err(err) => {
println!("read data error {:?}", err);
}
}
}
if event.is_writable() {
// send auth info
let auth = "0196045fa704aa5f5a85f36c6b399b08d823083228d63c4346f382f78a18b684f3a4e64a671de498abf20cba88dd8f3f0a11443bed18248895b981e0c842e9e4fafe387cf9ad619ba89fe7dbfa6f504725bb673a804f3526df31c68a69caf9bc7a9eed62fe73dffdeae5e21f55e2a1ec28e17ad5f98bd0a61759fe25f8f96665278197413d86ab84ea2f3adbf70634b49d13b4b55037e23f393ddc2ae46e63d4c3d1b67945bcf22d03183a1b1ff3b9b74cf3d83a8093489b508759c5042ca0d7de29aa6eb024800868594f848f646f1488c7bbf2a598d411a7333db52168f53e04e28b260e218233e9641232304625ba67cbaa7b6a3703161235ab41758d466701beac1a08e5edc612e42cb7235d43cbdd51ff7bb3cbe4720dfa165f084dafce2c84795eb619016647c9aef4d6d9b31e1a4b1e3b18e856a025ab99275b8b860816259ddf86cdc20c22e0f6f70445258113fade6d38814cb88d8c0693a64880088563cb02ff15236bca24720aaaa9da219c0f2fa71f8a4b1e34793a330b31ccfbdcbaf0026c881d5761b198be428feb93b170afe95174722f";
let buf = hex::decode(auth).unwrap();
if !status_sent {
status_sent = true;
match client.write_all(&buf) {
Ok(_) => {
println!("write ok");
},
Err(err) => {
println!("read data error {:?}", err);
}
}
}
println!("client socket is writable");
}
println!("client event token {:?}", event.token());
},
_ => {
}
}
}
}
println!("Hello, world!");
}
| {
match message_type {
0x01 => {
println!("ping message");
},
0x02 => {
println!("pong message");
},
0x03 => {
println!("find neighbours message");
},
0x04 => {
println!("neighbours message");
},
_ => {
println!("unknow message");
},
}
} | identifier_body |
main0.rs | use std::error::Error;
use mio::net::{ TcpListener, TcpStream, UdpSocket};
use mio::{Events, Interest, Poll, Token};
use std::io::{Read, Write};
use hex;
use rand::{thread_rng, Rng};
use keccak_hash::keccak;
use secp256k1::{SecretKey, PublicKey, Message, RecoveryId, Signature, sign, recover};
use rlp::{Rlp, RlpStream};
use std::collections::VecDeque;
mod message;
use message::PeerInfo;
fn print_message_type(message_type: u8) {
match message_type {
0x01 => {
println!("ping message");
},
0x02 => {
println!("pong message");
},
0x03 => {
println!("find neighbours message");
},
0x04 => {
println!("neighbours message");
},
_ => {
println!("unknow message");
},
}
}
fn | () -> Result<(), Box<dyn Error>> {
let mut arr = [0u8; 32];
thread_rng().fill(&mut arr[..]);
// let data = vec![0x83, b'c', b'a', b't'];
// let aa: String = rlp::decode(&data).unwrap();
// println!("aa = {:?}", aa);
// let pk = hex::decode("ee5495585eff78f2fcf95bab21ef1a598c54d1e3c672e23b3bb97a4fc7490660").unwrap();
let private_key = SecretKey::parse_slice(&arr[0..arr.len()]).unwrap();
// let private_key = SecretKey::parse_slice(&pk).unwrap();
let pubkey = PublicKey::from_secret_key(&private_key);
let id = &pubkey.serialize().to_vec()[1..];
println!("id is {:?}", hex::encode(&id));
const CLIENT: Token = Token(0);
const SENDER: Token = Token(0);
let udp_server_ip = "35.180.217.147";
let udp_server_port = "30304";
let upd_server_addr = "35.180.217.147:30304";
let local_udp_addr = "192.168.31.125:30309";
let mut udp_socket = UdpSocket::bind(local_udp_addr.parse()?)?;
// let local_addr = udp_socket.local_addr()?;
//
// println!("local_addr = {:?}", local_addr);
println!("private_key is {:?}", private_key);
let mut poll = Poll::new()?;
let mut events = Events::with_capacity(1024);
let addr = "192.168.31.248:30303".parse()?;
let peer = PeerInfo::from_sock_addr(&upd_server_addr.parse()?);
let local_peer = PeerInfo::from_sock_addr(&local_udp_addr.parse()?);
let mut sent_ping = false;
// message::encode_ping(&peer, &peer, &private_key);
// println!("peer ip {:?}", peer.encode());
// let addr = "127.0.0.1:9000".parse()?;
let mut send_queue: VecDeque<Vec<u8>> = VecDeque::new();
let mut client = TcpStream::connect(addr)?;
let mut status_sent = false;
poll.registry().register(&mut client, CLIENT, Interest::READABLE | Interest::WRITABLE)?;
poll.registry().register(&mut udp_socket, SENDER, Interest::READABLE | Interest::WRITABLE)?;
let mut received_data = Vec::with_capacity(4096);
send_queue.push_back(message::encode_ping(&local_peer, &peer, &private_key));
loop {
poll.poll(&mut events, None)?;
for event in events.iter() {
match event.token() {
SENDER => {
println!("udp socket is active");
if event.is_writable() {
'inner: loop {
if let Some(buf) = send_queue.pop_front() {
match udp_socket.send_to(&buf, upd_server_addr.parse()?) {
Ok(size) => {
println!("sent {:?} bytes(total {:?})", size, buf.len());
// we have some buf remain for next time
if size < buf.len() {
if size == 0 {
send_queue.push_front(buf);
}
break 'inner;
}
},
Err(e) => {
println!("send error {:?}", e);
break 'inner;
}
}
} else {
println!("no data to send, reregister for next writable event");
break 'inner;
}
}
}
if event.is_readable() {
'read: loop {
let mut buf = [0; 1024];
match udp_socket.recv_from(&mut buf) {
Ok((size, addr)) => {
println!("read {:?} bytes from {:?}", size, addr);
if (size > 0) {
let read_buf = &buf[..size];
let hash_signed = keccak(&read_buf[32..]);
println!("hash_signed = {:?}", hash_signed);
println!("check_sum = {:?}", hex::encode(&read_buf[0..32]));
// if hash_signed.as_bytes() != &read_buf[0..32] {
// // return Box::new(Err("bad protocol"));
// break;
// }
let signed = &read_buf[(32 + 65)..];
let message_type = signed[0];
print_message_type(message_type);
println!("message_type is {:?}", message_type);
let recover_id = RecoveryId::parse(read_buf[32 + 64]).expect("can not get recover id");
println!("recover_id = {:?}", recover_id);
let signature = Signature::parse_slice(&read_buf[32..(32 + 64)]).expect("can not get signature");
let hash = keccak(signed);
let pubkey = recover(&Message::parse_slice(&hash).unwrap(), &signature, &recover_id).expect("can not recover pubkey");
println!("pubkey is {:?}", hex::encode(&pubkey.serialize_compressed().to_vec()));
let rlp = Rlp::new(&signed[1..]);
if message_type == 0x01 {
// got a ping message
let version: u8 = rlp.val_at(0)?;
let from_peer = PeerInfo::decode_rlp(&rlp.at(1)?)?;
let to_peer = PeerInfo::decode_rlp(&rlp.at(2)?)?;
println!("from_peer = {:?}, to_peer = {:?}", from_peer, to_peer);
let timestamp: u64 = rlp.val_at(3)?;
println!("version = {:?}, timestamp = {:?}", version, timestamp);
// send pong message
let from = PeerInfo::from_sock_addr(&addr);
let bytes = message::encode_pong(&from, &read_buf[0..32].to_vec(), ×tamp, &private_key);
println!("pong bytes is {:?}", bytes.len());
send_queue.push_back(bytes);
// send_queue
} else if message_type == 0x02 {
// got a pong message
let from_peer = PeerInfo::decode_rlp(&rlp.at(0)?)?;
let hash_bytes = rlp.at(1)?.data()?;
let timestamp: u64 = rlp.val_at(2)?;
println!("got a pong message {:?} {:?}", from_peer, timestamp);
// start send findneighbours packet
let bytes = message::encode_find_node(&private_key);
println!("find node bytes is {:?}", bytes.len());
send_queue.push_back(bytes);
} else if message_type == 0x03 {
println!("got a find node message");
} else if message_type == 0x04 {
println!("got a node message");
}
poll.registry().reregister(&mut udp_socket, event.token(), Interest::WRITABLE)?;
// we have read all data
if (size < buf.len()) {
println!("no more data read");
break 'read;
}
} else {
println!("no data read");
break 'read;
}
},
Err(e) => {
println!("read error {:?}", e);
break 'read;
}
}
}
}
},
CLIENT => {
if event.is_readable() {
println!("client socket is readable");
// read buf
let mut buf = [0; 1024];
match client.read(&mut buf) {
Ok(n) => {
if (n > 0) {
received_data.extend_from_slice(&buf[..n]);
println!("read data: {:?}", String::from_utf8_lossy(&received_data));
}
println!("read {:?} bytes", n);
},
Err(err) => {
println!("read data error {:?}", err);
}
}
}
if event.is_writable() {
// send auth info
let auth = "0196045fa704aa5f5a85f36c6b399b08d823083228d63c4346f382f78a18b684f3a4e64a671de498abf20cba88dd8f3f0a11443bed18248895b981e0c842e9e4fafe387cf9ad619ba89fe7dbfa6f504725bb673a804f3526df31c68a69caf9bc7a9eed62fe73dffdeae5e21f55e2a1ec28e17ad5f98bd0a61759fe25f8f96665278197413d86ab84ea2f3adbf70634b49d13b4b55037e23f393ddc2ae46e63d4c3d1b67945bcf22d03183a1b1ff3b9b74cf3d83a8093489b508759c5042ca0d7de29aa6eb024800868594f848f646f1488c7bbf2a598d411a7333db52168f53e04e28b260e218233e9641232304625ba67cbaa7b6a3703161235ab41758d466701beac1a08e5edc612e42cb7235d43cbdd51ff7bb3cbe4720dfa165f084dafce2c84795eb619016647c9aef4d6d9b31e1a4b1e3b18e856a025ab99275b8b860816259ddf86cdc20c22e0f6f70445258113fade6d38814cb88d8c0693a64880088563cb02ff15236bca24720aaaa9da219c0f2fa71f8a4b1e34793a330b31ccfbdcbaf0026c881d5761b198be428feb93b170afe95174722f";
let buf = hex::decode(auth).unwrap();
if !status_sent {
status_sent = true;
match client.write_all(&buf) {
Ok(_) => {
println!("write ok");
},
Err(err) => {
println!("read data error {:?}", err);
}
}
}
println!("client socket is writable");
}
println!("client event token {:?}", event.token());
},
_ => {
}
}
}
}
println!("Hello, world!");
}
| main | identifier_name |
csvload.go | // Copyright 2019 Tamás Gulácsi. All rights reserved.
package main
import (
"bytes"
"context"
"database/sql"
"flag"
"fmt"
"io"
"log"
"os"
"reflect"
"runtime"
"runtime/pprof"
"strings"
"sync"
"sync/atomic"
"time"
"unicode"
"github.com/pkg/errors"
"github.com/tgulacsi/go/dbcsv"
"golang.org/x/sync/errgroup"
_ "gopkg.in/goracle.v2"
)
func main() {
if err := Main(); err != nil {
log.Fatal(err)
}
}
var dateFormat = "2006-01-02 15:04:05"
var ForceString bool
const chunkSize = 1024
func Main() error {
encName := os.Getenv("LANG")
if i := strings.IndexByte(encName, '.'); i >= 0 {
encName = encName[i+1:]
} else if encName == "" {
encName = "UTF-8"
}
cfg := &dbcsv.Config{}
flagDB := flag.String("dsn", "$BRUNO_ID", "database to connect to")
flag.StringVar(&cfg.Charset, "charset", encName, "input charset")
flagTruncate := flag.Bool("truncate", false, "truncate table")
flagTablespace := flag.String("tablespace", "DATA", "tablespace to create table in")
flag.StringVar(&cfg.Delim, "delim", ";", "CSV separator")
flagConcurrency := flag.Int("concurrency", 8, "concurrency")
flag.StringVar(&dateFormat, "date", dateFormat, "date format, in Go notation")
flag.IntVar(&cfg.Skip, "skip", 0, "skip rows")
flag.IntVar(&cfg.Sheet, "sheet", 0, "sheet of spreadsheet")
flag.StringVar(&cfg.ColumnsString, "columns", "", "columns, comma separated indexes")
flag.BoolVar(&ForceString, "force-string", false, "force all columns to be VARCHAR2")
flagMemProf := flag.String("memprofile", "", "file to output memory profile to")
flagCPUProf := flag.String("cpuprofile", "", "file to output CPU profile to")
flagJustPrint := flag.Bool("just-print", false, "just print the INSERTs")
flag.Parse()
if flag.NArg() != 2 {
log.Fatal("Need two args: the table and the source.")
}
if *flagCPUProf != "" {
f, err := os.Create(*flagCPUProf)
if err != nil {
log.Fatal("could not create CPU profile: ", err)
}
if err := pprof.StartCPUProfile(f); err != nil {
log.Fatal("could not start CPU profile: ", err)
}
defer pprof.StopCPUProfile()
}
writeHeapProf := func() {}
if *flagMemProf != "" {
f, err := os.Create(*flagMemProf)
if err != nil {
log.Fatal("could not create memory profile: ", err)
}
defer f.Close()
writeHeapProf = func() {
log.Println("writeHeapProf")
f.Seek(0, 0)
runtime.GC() // get up-to-date statistics
if err := pprof.WriteHeapProfile(f); err != nil {
log.Fatal("could not write memory profile: ", err)
}
}
}
if strings.HasPrefix(*flagDB, "$") {
*flagDB = os.ExpandEnv(*flagDB)
}
db, err := sql.Open("goracle", *flagDB)
if err != nil {
return errors.Wrap(err, *flagDB)
}
defer db.Close()
db.SetMaxIdleConns(*flagConcurrency)
tbl := strings.ToUpper(flag.Arg(0))
src := flag.Arg(1)
if ForceString {
err = cfg.OpenVolatile(flag.Arg(1))
} else {
err = cfg.Open(flag.Arg(1))
}
if err != nil {
return err
}
defer cfg.Close()
rows := make(chan dbcsv.Row)
ctx, cancel := context.WithCancel(context.Background())
go func() {
defer close(rows)
cfg.ReadRows(ctx,
func(_ string, row dbcsv.Row) error {
select {
case <-ctx.Done():
return ctx.Err()
case rows <- row:
}
return nil
},
)
}()
if *flagJustPrint {
cols, err := getColumns(ctx, db, tbl)
if err != nil {
return err
}
var buf strings.Builder
for i, col := range cols {
if i != 0 {
buf.Write([]byte{',', ' '})
}
buf.WriteString(col.Name)
}
fmt.Println("INSERT ALL")
prefix := " INTO " + tbl + " (" + buf.String() + ")"
colMap := make(map[string]Column, len(cols))
for _, col := range cols {
colMap[col.Name] = col
}
cols = cols[:0]
for _, nm := range (<-rows).Values {
cols = append(cols, colMap[strings.ToUpper(nm)])
}
dRepl := strings.NewReplacer(".", "", "-", "")
for row := range rows {
buf.Reset()
for j, s := range row.Values {
if j != 0 {
buf.Write([]byte{',', ' '})
}
col := cols[j]
if col.Type != Date {
if err = quote(&buf, s); err != nil {
return err
}
} else {
buf.WriteString("TO_DATE('")
d := dRepl.Replace(s)
if len(d) == 6 {
d = "20" + d
}
buf.WriteString(d)
buf.WriteString("','YYYYMMDD')")
}
}
fmt.Printf("%s VALUES (%s)\n", prefix, buf.String())
}
fmt.Println("SELECT 1 FROM DUAL;")
return nil
}
columns, err := CreateTable(ctx, db, tbl, rows, *flagTruncate, *flagTablespace)
cancel()
if err != nil {
return err
}
var buf strings.Builder
fmt.Fprintf(&buf, `INSERT INTO "%s" (`, tbl)
for i, c := range columns {
if i != 0 {
buf.WriteString(", ")
}
buf.WriteString(c.Name)
}
buf.WriteString(") VALUES (")
for i := range columns {
if i != 0 {
buf.WriteString(", ")
}
fmt.Fprintf(&buf, ":%d", i+1)
}
buf.WriteString(")")
qry := buf.String()
log.Println(qry)
start := time.Now()
ctx, cancel = context.WithCancel(context.Background())
defer cancel()
grp, ctx := errgroup.WithContext(ctx)
type rowsType struct {
Rows [][]string
Start int64
}
rowsCh := make(chan rowsType, *flagConcurrency)
chunkPool := sync.Pool{New: func() interface{} { z := make([][]string, 0, chunkSize); return &z }}
var inserted int64
for i := 0; i < *flagConcurrency; i++ {
grp.Go(func() error {
tx, txErr := db.BeginTx(ctx, nil)
if txErr != nil {
return txErr
}
defer tx.Rollback()
stmt, prepErr := tx.Prepare(qry)
if prepErr != nil {
return errors.Wrap(prepErr, qry)
}
nCols := len(columns)
cols := make([][]string, nCols)
rowsI := make([]interface{}, nCols)
for rs := range rowsCh {
chunk := rs.Rows
if err = ctx.Err(); err != nil {
return err
}
if len(chunk) == 0 {
continue
}
nRows := len(chunk)
for j := range cols {
if cap(cols[j]) < nRows {
cols[j] = make([]string, nRows)
} else {
cols[j] = cols[j][:nRows]
for i := range cols[j] {
cols[j][i] = ""
}
}
}
for k, row := range chunk {
if len(row) > len(cols) {
log.Printf("%d. more elements in the row (%d) then columns (%d)!", rs.Start+int64(k), len(row), len(cols))
row = row[:len(cols)]
}
for j, v := range row {
cols[j][k] = v
}
}
for i, col := range cols {
if rowsI[i], err = columns[i].FromString(col); err != nil {
log.Printf("%d. col: %+v", i, err)
for k, row := range chunk {
if _, err = columns[i].FromString(col[k : k+1]); err != nil {
log.Printf("%d.%q %q: %q", rs.Start+int64(k), columns[i].Name, col[k:k+1], row)
break
}
}
return errors.Wrapf(err, columns[i].Name)
}
}
_, err = stmt.Exec(rowsI...)
{
z := chunk[:0]
chunkPool.Put(&z)
}
if err == nil {
atomic.AddInt64(&inserted, int64(len(chunk)))
continue
}
err = errors.Wrapf(err, "%s", qry)
log.Println(err)
rowsR := make([]reflect.Value, len(rowsI))
rowsI2 := make([]interface{}, len(rowsI))
for j, I := range rowsI {
rowsR[j] = reflect.ValueOf(I)
rowsI2[j] = ""
}
R2 := reflect.ValueOf(rowsI2)
for j := range cols[0] { // rows
for i, r := range rowsR { // cols
if r.Len() <= j {
log.Printf("%d[%q]=%d", j, columns[i].Name, r.Len())
rowsI2[i] = ""
continue
}
R2.Index(i).Set(r.Index(j))
}
if _, err = stmt.Exec(rowsI2...); err != nil {
err = errors.Wrapf(err, "%s, %q", qry, rowsI2)
log.Println(err)
return err
}
}
return err
}
return tx.Commit()
})
}
var n int64
var headerSeen bool
chunk := (*(chunkPool.Get().(*[][]string)))[:0]
if err = cfg.ReadRows(ctx,
func(_ string, row dbcsv.Row) error {
if err = ctx.Err(); err != nil {
chunk = chunk[:0]
return err
}
if !headerSeen {
headerSeen = true
return nil
} else if n%10000 == 0 {
writeHeapProf()
}
for i, s := range row.Values {
row.Values[i] = strings.TrimSpace(s)
}
chunk = append(chunk, row.Values)
if len(chunk) < chunkSize {
return nil
}
select {
case rowsCh <- rowsType{Rows: chunk, Start: n}:
n += int64(len(chunk))
case <-ctx.Done():
return ctx.Err()
}
chunk = (*chunkPool.Get().(*[][]string))[:0]
return nil
},
); err != nil {
return err
}
if len(chunk) != 0 {
rowsCh <- rowsType{Rows: chunk, Start: n}
n += int64(len(chunk))
}
close(rowsCh)
err = grp.Wait()
dur := time.Since(start)
log.Printf("Read %d, inserted %d rows from %q to %q in %s.", n, inserted, src, tbl, dur)
return err
}
func typeOf(s string) Type {
if ForceString {
return String
}
s = strings.TrimSpace(s)
if len(s) == 0 {
return Unknown
}
var hasNonDigit bool
var dotCount int
var length int
_ = strings.Map(func(r rune) rune {
length++
if r == '.' {
dotCount++
} else if !hasNonDigit {
hasNonDigit = !('0' <= r && r <= '9')
}
return -1
},
s)
if !hasNonDigit && s[0] != '0' {
if dotCount == 1 {
return Float
}
if dotCount == 0 {
return Int
}
}
if 10 <= len(s) && len(s) <= len(dateFormat) {
if _, err := time.Parse(dateFormat[:len(s)], s); err == nil {
return Date
}
}
return String
}
func CreateTable(ctx context.Context, db *sql.DB, tbl string, rows <-chan dbcsv.Row, truncate bool, tablespace string) ([]Column, error) {
tbl = strings.ToUpper(tbl)
qry := "SELECT COUNT(0) FROM user_tables WHERE UPPER(table_name) = :1"
var n int64
var cols []Column
if err := db.QueryRowContext(ctx, qry, tbl).Scan(&n); err != nil {
return cols, errors.Wrap(err, qry)
}
if n > 0 && truncate {
qry = `TRUNCATE TABLE ` + tbl
if _, err := db.ExecContext(ctx, qry); err != nil {
return cols, errors.Wrap(err, qry)
}
}
if n == 0 {
row := <-rows
log.Printf("row: %q", row.Values)
cols = make([]Column, len(row.Values))
for i, v := range row.Values {
v = strings.Map(func(r rune) rune {
r = unicode.ToLower(r)
switch r {
case 'á':
return 'a'
case 'é':
return 'e'
case 'í':
return 'i'
case 'ö', 'ő', 'ó':
return 'o'
case 'ü', 'ű', 'ú':
return 'u'
case '_':
return '_'
default:
if 'a' <= r && r <= 'z' || '0' <= r && r <= '9' {
return r
}
return '_'
}
},
v)
if len(v) > 30 {
v = fmt.Sprintf("%s_%02d", v[:27], i)
}
cols[i].Name = v
}
if ForceString {
for i := range cols {
cols[i].Type = String
}
}
for row := range rows {
for i, v := range row.Values {
if len(v) > cols[i].Length {
cols[i].Length = len(v)
}
if cols[i].Type == String {
continue
}
typ := typeOf(v)
if cols[i].Type == Unknown {
cols[i].Type = typ
} else if typ != cols[i].Type {
cols[i].Type = String
}
}
}
var buf bytes.Buffer
buf.WriteString(`CREATE TABLE "` + tbl + `" (`)
for i, c := range cols {
if i != 0 {
buf.WriteString(",\n")
}
if c.Type == Date {
fmt.Fprintf(&buf, " %s DATE", c.Name)
continue
}
length := c.Length
if length == 0 {
length = 1
}
fmt.Fprintf(&buf, " %s %s(%d)", c.Name, c.Type.String(), length)
}
buf.WriteString("\n)")
if tablespace != "" {
buf.WriteString(" TABLESPACE ")
buf.WriteString(tablespace)
}
qry = buf.String()
log.Println(qry)
if _, err := db.Exec(qry); err != nil {
return cols, errors.Wrap(err, qry)
}
cols = cols[:0]
}
qry = `SELECT column_name, data_type, NVL(data_length, 0), NVL(data_precision, 0), NVL(data_scale, 0), nullable
FROM user_tab_cols WHERE table_name = :1
ORDER BY column_id`
tRows, err := db.QueryContext(ctx, qry, tbl)
if err != nil {
return cols, errors.Wrap(err, qry)
}
defer tRows.Close()
for tRows.Next() {
var c Column
var nullable string
if err = tRows.Scan(&c.Name, &c.DataType, &c.Length, &c.Precision, &c.Scale, &nullable); err != nil {
return cols, err
}
c.Nullable = nullable != "N"
cols = append(cols, c)
}
return cols, nil
}
type Column struct {
Length int
Name string
Type Type
DataType string
Precision, Scale int
Nullable bool
}
type Type uint8
const (
Unknown = Type(0)
String = Type(1)
Int = Type(2)
Float = Type(3)
Date = Type(4)
)
func (t Type) String() string {
switch t {
case Int, Float:
return "NUMBER"
case Date:
return "DATE"
default:
return "VARCHAR2"
}
}
func (c Column) FromString(ss []string) (interface{}, error) {
if c.DataType == "DATE" || c.Type == Date {
res := make([]time.Time, len(ss))
for i, s := range ss {
if s == "" {
continue
}
var err error
if res[i], err = time.Parse(dateFormat[:len(s)], s); err != nil {
return res, errors.Wrapf(err, "%d. %q", i, s)
}
}
return res, nil
}
if strings.HasPrefix(c.DataType, "VARCHAR2") {
for i, s := range ss {
if len(s) > c.Length {
ss[i] = s[:c.Length]
return ss, errors.Errorf("%d. %q is longer (%d) then allowed (%d) for column %v", i, s, len(s), c.Length, c)
}
}
return ss, nil
}
if c.Type == Int {
for i, s := range ss {
e := strings.Map(func(r rune) rune {
if !('0' <= r && r <= '9' || r == '-') {
return r
}
return -1
}, s)
if e != "" {
ss[i] = ""
return ss, errors.Errorf("%d. %q is not integer (%q)", i, s, e)
}
}
return ss, nil
}
if c.Type == Float {
for i, s := range ss {
e := strings.Map(func(r rune) rune {
if !('0' <= r && r <= '9' || r == '-' || r == '.') {
return r
}
return -1
}, s)
if e != "" {
ss[i] = ""
return ss, errors.Errorf("%d. %q is not float (%q)", i, s, e)
}
}
return ss, nil
}
return ss, nil
}
func getColumns(ctx context.Context, db *sql.DB, tbl string) ([]Column, error) {
// TODO(tgulacsi): this is Oracle-specific!
const qry = "SELECT column_name, data_type, data_length, data_precision, data_scale, nullable FROM user_tab_cols WHERE table_name = UPPER(:1) ORDER BY column_id"
rows, err := db.QueryContext(ctx, qry, tbl)
if err != nil {
return nil, errors.Wrap(err, qry)
}
defer rows.Close()
var cols []Column
for rows.Next() {
var c Column
var prec, scale sql.NullInt64
var nullable string
if err = rows.Scan(&c.Name, &c.DataType, &c.Length, &prec, &scale, &nullable); err != nil {
return nil, err
}
c.Nullable = nullable == "Y"
switch c.DataType {
case "DATE":
c.Type = Date
c.Length = 8
case "NUMBER":
c.Precision, c.Scale = int(prec.Int64), int(scale.Int64)
if c.Scale > 0 {
c.Type = Float
c.Length = c.Precision + 1
} else {
c.Type = Int
c.Length = c.Precision
}
default:
c.Type = String
}
cols = append(cols, c)
}
return cols, rows.Close()
}
var qRepl = strings.NewReplacer(
"'", "''",
"&", "'||CHR(38)||'",
)
func quote(w io.Writer, s string) error {
if _, err := w.Write([]byte{'\''}); err != nil {
return | := io.WriteString(w, qRepl.Replace(s)); err != nil {
return err
}
_, err := w.Write([]byte{'\''})
return err
}
// vim: set fileencoding=utf-8 noet:
| err
}
if _, err | conditional_block |
csvload.go | // Copyright 2019 Tamás Gulácsi. All rights reserved.
package main
import (
"bytes"
"context"
"database/sql"
"flag"
"fmt"
"io"
"log"
"os"
"reflect"
"runtime"
"runtime/pprof"
"strings"
"sync"
"sync/atomic"
"time"
"unicode"
"github.com/pkg/errors"
"github.com/tgulacsi/go/dbcsv"
"golang.org/x/sync/errgroup"
_ "gopkg.in/goracle.v2"
)
func main() {
if err := Main(); err != nil {
log.Fatal(err)
}
}
var dateFormat = "2006-01-02 15:04:05"
var ForceString bool
const chunkSize = 1024
func Main() error {
encName := os.Getenv("LANG")
if i := strings.IndexByte(encName, '.'); i >= 0 {
encName = encName[i+1:]
} else if encName == "" {
encName = "UTF-8"
}
cfg := &dbcsv.Config{}
flagDB := flag.String("dsn", "$BRUNO_ID", "database to connect to")
flag.StringVar(&cfg.Charset, "charset", encName, "input charset")
flagTruncate := flag.Bool("truncate", false, "truncate table")
flagTablespace := flag.String("tablespace", "DATA", "tablespace to create table in")
flag.StringVar(&cfg.Delim, "delim", ";", "CSV separator")
flagConcurrency := flag.Int("concurrency", 8, "concurrency")
flag.StringVar(&dateFormat, "date", dateFormat, "date format, in Go notation")
flag.IntVar(&cfg.Skip, "skip", 0, "skip rows")
flag.IntVar(&cfg.Sheet, "sheet", 0, "sheet of spreadsheet")
flag.StringVar(&cfg.ColumnsString, "columns", "", "columns, comma separated indexes")
flag.BoolVar(&ForceString, "force-string", false, "force all columns to be VARCHAR2")
flagMemProf := flag.String("memprofile", "", "file to output memory profile to")
flagCPUProf := flag.String("cpuprofile", "", "file to output CPU profile to")
flagJustPrint := flag.Bool("just-print", false, "just print the INSERTs")
flag.Parse()
if flag.NArg() != 2 {
log.Fatal("Need two args: the table and the source.")
}
if *flagCPUProf != "" {
f, err := os.Create(*flagCPUProf)
if err != nil {
log.Fatal("could not create CPU profile: ", err)
}
if err := pprof.StartCPUProfile(f); err != nil {
log.Fatal("could not start CPU profile: ", err)
}
defer pprof.StopCPUProfile()
}
writeHeapProf := func() {}
if *flagMemProf != "" {
f, err := os.Create(*flagMemProf)
if err != nil {
log.Fatal("could not create memory profile: ", err)
}
defer f.Close()
writeHeapProf = func() {
log.Println("writeHeapProf")
f.Seek(0, 0)
runtime.GC() // get up-to-date statistics
if err := pprof.WriteHeapProfile(f); err != nil {
log.Fatal("could not write memory profile: ", err)
}
}
}
if strings.HasPrefix(*flagDB, "$") {
*flagDB = os.ExpandEnv(*flagDB)
}
db, err := sql.Open("goracle", *flagDB)
if err != nil {
return errors.Wrap(err, *flagDB)
}
defer db.Close()
db.SetMaxIdleConns(*flagConcurrency)
tbl := strings.ToUpper(flag.Arg(0))
src := flag.Arg(1)
if ForceString {
err = cfg.OpenVolatile(flag.Arg(1))
} else {
err = cfg.Open(flag.Arg(1))
}
if err != nil {
return err
}
defer cfg.Close()
rows := make(chan dbcsv.Row)
ctx, cancel := context.WithCancel(context.Background())
go func() {
defer close(rows)
cfg.ReadRows(ctx,
func(_ string, row dbcsv.Row) error {
select {
case <-ctx.Done():
return ctx.Err()
case rows <- row:
}
return nil
},
)
}()
if *flagJustPrint {
cols, err := getColumns(ctx, db, tbl)
if err != nil {
return err
}
var buf strings.Builder
for i, col := range cols {
if i != 0 {
buf.Write([]byte{',', ' '})
}
buf.WriteString(col.Name)
}
fmt.Println("INSERT ALL")
prefix := " INTO " + tbl + " (" + buf.String() + ")"
colMap := make(map[string]Column, len(cols))
for _, col := range cols {
colMap[col.Name] = col
}
cols = cols[:0]
for _, nm := range (<-rows).Values {
cols = append(cols, colMap[strings.ToUpper(nm)])
}
dRepl := strings.NewReplacer(".", "", "-", "")
for row := range rows {
buf.Reset()
for j, s := range row.Values {
if j != 0 {
buf.Write([]byte{',', ' '})
}
col := cols[j]
if col.Type != Date {
if err = quote(&buf, s); err != nil {
return err
}
} else {
buf.WriteString("TO_DATE('")
d := dRepl.Replace(s)
if len(d) == 6 {
d = "20" + d
}
buf.WriteString(d)
buf.WriteString("','YYYYMMDD')")
}
}
fmt.Printf("%s VALUES (%s)\n", prefix, buf.String())
}
fmt.Println("SELECT 1 FROM DUAL;")
return nil
}
columns, err := CreateTable(ctx, db, tbl, rows, *flagTruncate, *flagTablespace)
cancel()
if err != nil {
return err
}
var buf strings.Builder
fmt.Fprintf(&buf, `INSERT INTO "%s" (`, tbl)
for i, c := range columns {
if i != 0 {
buf.WriteString(", ")
}
buf.WriteString(c.Name)
}
buf.WriteString(") VALUES (")
for i := range columns {
if i != 0 {
buf.WriteString(", ")
}
fmt.Fprintf(&buf, ":%d", i+1)
}
buf.WriteString(")")
qry := buf.String()
log.Println(qry)
start := time.Now()
ctx, cancel = context.WithCancel(context.Background())
defer cancel()
grp, ctx := errgroup.WithContext(ctx)
type rowsType struct {
Rows [][]string
Start int64
}
rowsCh := make(chan rowsType, *flagConcurrency)
chunkPool := sync.Pool{New: func() interface{} { z := make([][]string, 0, chunkSize); return &z }}
var inserted int64
for i := 0; i < *flagConcurrency; i++ {
grp.Go(func() error {
tx, txErr := db.BeginTx(ctx, nil)
if txErr != nil {
return txErr
}
defer tx.Rollback()
stmt, prepErr := tx.Prepare(qry)
if prepErr != nil {
return errors.Wrap(prepErr, qry)
}
nCols := len(columns)
cols := make([][]string, nCols)
rowsI := make([]interface{}, nCols)
for rs := range rowsCh {
chunk := rs.Rows
if err = ctx.Err(); err != nil {
return err
}
if len(chunk) == 0 {
continue
}
nRows := len(chunk)
for j := range cols {
if cap(cols[j]) < nRows {
cols[j] = make([]string, nRows)
} else {
cols[j] = cols[j][:nRows]
for i := range cols[j] {
cols[j][i] = ""
}
}
}
for k, row := range chunk {
if len(row) > len(cols) {
log.Printf("%d. more elements in the row (%d) then columns (%d)!", rs.Start+int64(k), len(row), len(cols))
row = row[:len(cols)]
}
for j, v := range row {
cols[j][k] = v
}
}
for i, col := range cols {
if rowsI[i], err = columns[i].FromString(col); err != nil {
log.Printf("%d. col: %+v", i, err)
for k, row := range chunk {
if _, err = columns[i].FromString(col[k : k+1]); err != nil {
log.Printf("%d.%q %q: %q", rs.Start+int64(k), columns[i].Name, col[k:k+1], row)
break
}
}
return errors.Wrapf(err, columns[i].Name)
}
}
_, err = stmt.Exec(rowsI...)
{
z := chunk[:0]
chunkPool.Put(&z)
}
if err == nil {
atomic.AddInt64(&inserted, int64(len(chunk)))
continue
}
err = errors.Wrapf(err, "%s", qry)
log.Println(err)
rowsR := make([]reflect.Value, len(rowsI))
rowsI2 := make([]interface{}, len(rowsI))
for j, I := range rowsI {
rowsR[j] = reflect.ValueOf(I)
rowsI2[j] = ""
}
R2 := reflect.ValueOf(rowsI2)
for j := range cols[0] { // rows
for i, r := range rowsR { // cols
if r.Len() <= j {
log.Printf("%d[%q]=%d", j, columns[i].Name, r.Len())
rowsI2[i] = ""
continue
}
R2.Index(i).Set(r.Index(j))
}
if _, err = stmt.Exec(rowsI2...); err != nil {
err = errors.Wrapf(err, "%s, %q", qry, rowsI2)
log.Println(err)
return err
}
}
return err
}
return tx.Commit()
})
}
var n int64
var headerSeen bool
chunk := (*(chunkPool.Get().(*[][]string)))[:0]
if err = cfg.ReadRows(ctx,
func(_ string, row dbcsv.Row) error {
if err = ctx.Err(); err != nil {
chunk = chunk[:0]
return err
}
if !headerSeen {
headerSeen = true
return nil
} else if n%10000 == 0 {
writeHeapProf()
}
for i, s := range row.Values {
row.Values[i] = strings.TrimSpace(s)
}
chunk = append(chunk, row.Values)
if len(chunk) < chunkSize {
return nil
}
select {
case rowsCh <- rowsType{Rows: chunk, Start: n}:
n += int64(len(chunk))
case <-ctx.Done():
return ctx.Err()
}
chunk = (*chunkPool.Get().(*[][]string))[:0]
return nil
},
); err != nil {
return err
}
if len(chunk) != 0 {
rowsCh <- rowsType{Rows: chunk, Start: n}
n += int64(len(chunk))
}
close(rowsCh)
err = grp.Wait()
dur := time.Since(start)
log.Printf("Read %d, inserted %d rows from %q to %q in %s.", n, inserted, src, tbl, dur)
return err
}
func typeOf(s string) Type {
if ForceString {
return String
}
s = strings.TrimSpace(s)
if len(s) == 0 {
return Unknown
}
var hasNonDigit bool
var dotCount int
var length int
_ = strings.Map(func(r rune) rune {
length++
if r == '.' {
dotCount++
} else if !hasNonDigit {
hasNonDigit = !('0' <= r && r <= '9')
}
return -1
},
s)
if !hasNonDigit && s[0] != '0' {
if dotCount == 1 {
return Float
}
if dotCount == 0 {
return Int
}
}
if 10 <= len(s) && len(s) <= len(dateFormat) {
if _, err := time.Parse(dateFormat[:len(s)], s); err == nil {
return Date
}
}
return String
}
func CreateTable(ctx context.Context, db *sql.DB, tbl string, rows <-chan dbcsv.Row, truncate bool, tablespace string) ([]Column, error) {
tbl = strings.ToUpper(tbl)
qry := "SELECT COUNT(0) FROM user_tables WHERE UPPER(table_name) = :1"
var n int64
var cols []Column
if err := db.QueryRowContext(ctx, qry, tbl).Scan(&n); err != nil {
return cols, errors.Wrap(err, qry)
}
if n > 0 && truncate {
qry = `TRUNCATE TABLE ` + tbl
if _, err := db.ExecContext(ctx, qry); err != nil {
return cols, errors.Wrap(err, qry)
}
}
if n == 0 {
row := <-rows
log.Printf("row: %q", row.Values)
cols = make([]Column, len(row.Values))
for i, v := range row.Values {
v = strings.Map(func(r rune) rune {
r = unicode.ToLower(r)
switch r {
case 'á':
return 'a'
case 'é':
return 'e'
case 'í':
return 'i'
case 'ö', 'ő', 'ó':
return 'o'
case 'ü', 'ű', 'ú':
return 'u'
case '_':
return '_'
default:
if 'a' <= r && r <= 'z' || '0' <= r && r <= '9' {
return r
}
return '_'
}
},
v)
if len(v) > 30 {
v = fmt.Sprintf("%s_%02d", v[:27], i)
}
cols[i].Name = v
}
if ForceString {
for i := range cols {
cols[i].Type = String
}
}
for row := range rows {
for i, v := range row.Values {
if len(v) > cols[i].Length {
cols[i].Length = len(v)
}
if cols[i].Type == String {
continue
}
typ := typeOf(v)
if cols[i].Type == Unknown {
cols[i].Type = typ
} else if typ != cols[i].Type {
cols[i].Type = String
}
}
}
var buf bytes.Buffer
buf.WriteString(`CREATE TABLE "` + tbl + `" (`)
for i, c := range cols {
if i != 0 {
buf.WriteString(",\n")
}
if c.Type == Date {
fmt.Fprintf(&buf, " %s DATE", c.Name)
continue
}
length := c.Length
if length == 0 {
length = 1
}
fmt.Fprintf(&buf, " %s %s(%d)", c.Name, c.Type.String(), length)
}
buf.WriteString("\n)")
if tablespace != "" {
buf.WriteString(" TABLESPACE ")
buf.WriteString(tablespace)
}
qry = buf.String()
log.Println(qry)
if _, err := db.Exec(qry); err != nil {
return cols, errors.Wrap(err, qry)
}
cols = cols[:0]
}
qry = `SELECT column_name, data_type, NVL(data_length, 0), NVL(data_precision, 0), NVL(data_scale, 0), nullable
FROM user_tab_cols WHERE table_name = :1
ORDER BY column_id`
tRows, err := db.QueryContext(ctx, qry, tbl)
if err != nil {
return cols, errors.Wrap(err, qry)
}
defer tRows.Close()
for tRows.Next() {
var c Column
var nullable string
if err = tRows.Scan(&c.Name, &c.DataType, &c.Length, &c.Precision, &c.Scale, &nullable); err != nil {
return cols, err
}
c.Nullable = nullable != "N"
cols = append(cols, c)
}
return cols, nil
}
type Column struct {
Length int
Name string
Type Type
DataType string
Precision, Scale int
Nullable bool
}
type Type uint8
const (
Unknown = Type(0)
String = Type(1)
Int = Type(2)
Float = Type(3)
Date = Type(4)
)
func (t Type) String() string {
switch t {
case Int, Float:
return "NUMBER"
case Date:
return "DATE"
default:
return "VARCHAR2"
}
}
func (c Column) FromString(ss []string) (interface{}, error) {
if c.DataType == "DATE" || c.Type == Date {
res := make([]time.Time, len(ss))
for i, s := range ss {
if s == "" {
continue
}
var err error
if res[i], err = time.Parse(dateFormat[:len(s)], s); err != nil {
return res, errors.Wrapf(err, "%d. %q", i, s)
}
}
return res, nil
}
if strings.HasPrefix(c.DataType, "VARCHAR2") {
for i, s := range ss {
if len(s) > c.Length {
ss[i] = s[:c.Length]
return ss, errors.Errorf("%d. %q is longer (%d) then allowed (%d) for column %v", i, s, len(s), c.Length, c)
}
}
return ss, nil
}
if c.Type == Int {
for i, s := range ss {
e := strings.Map(func(r rune) rune {
if !('0' <= r && r <= '9' || r == '-') {
return r
}
return -1
}, s)
if e != "" {
ss[i] = ""
return ss, errors.Errorf("%d. %q is not integer (%q)", i, s, e)
}
}
return ss, nil
}
if c.Type == Float {
for i, s := range ss {
e := strings.Map(func(r rune) rune {
if !('0' <= r && r <= '9' || r == '-' || r == '.') {
return r
}
return -1
}, s)
if e != "" {
ss[i] = ""
return ss, errors.Errorf("%d. %q is not float (%q)", i, s, e)
}
}
return ss, nil
}
return ss, nil
}
func getColumns(ctx context.Context, db *sql.DB, tbl string) ([]Column, error) {
// TODO(tgulacsi): this is Oracle-specific!
const qry = "SELECT column_name, data_type, data_length, data_precision, data_scale, nullable FROM user_tab_cols WHERE table_name = UPPER(:1) ORDER BY column_id"
rows, err := db.QueryContext(ctx, qry, tbl)
if err != nil {
return nil, errors.Wrap(err, qry)
}
defer rows.Close()
var cols []Column
for rows.Next() {
var c Column
var prec, scale sql.NullInt64
var nullable string
if err = rows.Scan(&c.Name, &c.DataType, &c.Length, &prec, &scale, &nullable); err != nil {
return nil, err
}
c.Nullable = nullable == "Y"
switch c.DataType {
case "DATE":
c.Type = Date
c.Length = 8
case "NUMBER":
c.Precision, c.Scale = int(prec.Int64), int(scale.Int64)
if c.Scale > 0 {
c.Type = Float
c.Length = c.Precision + 1
} else {
c.Type = Int
c.Length = c.Precision
}
default:
c.Type = String
}
cols = append(cols, c)
}
return cols, rows.Close()
}
var qRepl = strings.NewReplacer(
"'", "''",
"&", "'||CHR(38)||'",
)
func quote(w io. | r, s string) error {
if _, err := w.Write([]byte{'\''}); err != nil {
return err
}
if _, err := io.WriteString(w, qRepl.Replace(s)); err != nil {
return err
}
_, err := w.Write([]byte{'\''})
return err
}
// vim: set fileencoding=utf-8 noet:
| Write | identifier_name |
csvload.go | // Copyright 2019 Tamás Gulácsi. All rights reserved.
package main
import (
"bytes"
"context"
"database/sql"
"flag"
"fmt"
"io"
"log"
"os"
"reflect"
"runtime"
"runtime/pprof"
"strings"
"sync"
"sync/atomic"
"time"
"unicode"
"github.com/pkg/errors"
"github.com/tgulacsi/go/dbcsv"
"golang.org/x/sync/errgroup"
_ "gopkg.in/goracle.v2"
)
func main() {
if err := Main(); err != nil {
log.Fatal(err)
}
}
var dateFormat = "2006-01-02 15:04:05"
var ForceString bool
const chunkSize = 1024
func Main() error {
encName := os.Getenv("LANG")
if i := strings.IndexByte(encName, '.'); i >= 0 {
encName = encName[i+1:]
} else if encName == "" {
encName = "UTF-8"
}
cfg := &dbcsv.Config{}
flagDB := flag.String("dsn", "$BRUNO_ID", "database to connect to")
flag.StringVar(&cfg.Charset, "charset", encName, "input charset")
flagTruncate := flag.Bool("truncate", false, "truncate table")
flagTablespace := flag.String("tablespace", "DATA", "tablespace to create table in")
flag.StringVar(&cfg.Delim, "delim", ";", "CSV separator")
flagConcurrency := flag.Int("concurrency", 8, "concurrency")
flag.StringVar(&dateFormat, "date", dateFormat, "date format, in Go notation")
flag.IntVar(&cfg.Skip, "skip", 0, "skip rows")
flag.IntVar(&cfg.Sheet, "sheet", 0, "sheet of spreadsheet")
flag.StringVar(&cfg.ColumnsString, "columns", "", "columns, comma separated indexes")
flag.BoolVar(&ForceString, "force-string", false, "force all columns to be VARCHAR2")
flagMemProf := flag.String("memprofile", "", "file to output memory profile to")
flagCPUProf := flag.String("cpuprofile", "", "file to output CPU profile to")
flagJustPrint := flag.Bool("just-print", false, "just print the INSERTs")
flag.Parse()
if flag.NArg() != 2 {
log.Fatal("Need two args: the table and the source.")
}
if *flagCPUProf != "" {
f, err := os.Create(*flagCPUProf)
if err != nil {
log.Fatal("could not create CPU profile: ", err)
}
if err := pprof.StartCPUProfile(f); err != nil {
log.Fatal("could not start CPU profile: ", err)
}
defer pprof.StopCPUProfile()
}
writeHeapProf := func() {}
if *flagMemProf != "" {
f, err := os.Create(*flagMemProf)
if err != nil {
log.Fatal("could not create memory profile: ", err)
}
defer f.Close()
writeHeapProf = func() {
log.Println("writeHeapProf")
f.Seek(0, 0)
runtime.GC() // get up-to-date statistics
if err := pprof.WriteHeapProfile(f); err != nil {
log.Fatal("could not write memory profile: ", err)
}
}
}
if strings.HasPrefix(*flagDB, "$") {
*flagDB = os.ExpandEnv(*flagDB)
}
db, err := sql.Open("goracle", *flagDB)
if err != nil {
return errors.Wrap(err, *flagDB)
}
defer db.Close()
db.SetMaxIdleConns(*flagConcurrency)
tbl := strings.ToUpper(flag.Arg(0))
src := flag.Arg(1)
if ForceString {
err = cfg.OpenVolatile(flag.Arg(1))
} else {
err = cfg.Open(flag.Arg(1))
}
if err != nil {
return err
}
defer cfg.Close()
rows := make(chan dbcsv.Row)
ctx, cancel := context.WithCancel(context.Background())
go func() {
defer close(rows)
cfg.ReadRows(ctx,
func(_ string, row dbcsv.Row) error {
select {
case <-ctx.Done():
return ctx.Err()
case rows <- row:
}
return nil
},
)
}()
if *flagJustPrint {
cols, err := getColumns(ctx, db, tbl)
if err != nil {
return err
}
var buf strings.Builder
for i, col := range cols {
if i != 0 {
buf.Write([]byte{',', ' '})
}
buf.WriteString(col.Name)
}
fmt.Println("INSERT ALL")
prefix := " INTO " + tbl + " (" + buf.String() + ")"
colMap := make(map[string]Column, len(cols))
for _, col := range cols {
colMap[col.Name] = col
}
cols = cols[:0]
for _, nm := range (<-rows).Values {
cols = append(cols, colMap[strings.ToUpper(nm)])
}
dRepl := strings.NewReplacer(".", "", "-", "")
for row := range rows {
buf.Reset()
for j, s := range row.Values {
if j != 0 {
buf.Write([]byte{',', ' '})
}
col := cols[j]
if col.Type != Date {
if err = quote(&buf, s); err != nil {
return err
}
} else {
buf.WriteString("TO_DATE('")
d := dRepl.Replace(s)
if len(d) == 6 {
d = "20" + d
}
buf.WriteString(d)
buf.WriteString("','YYYYMMDD')")
}
}
fmt.Printf("%s VALUES (%s)\n", prefix, buf.String())
}
fmt.Println("SELECT 1 FROM DUAL;")
return nil
}
columns, err := CreateTable(ctx, db, tbl, rows, *flagTruncate, *flagTablespace)
cancel()
if err != nil {
return err
}
var buf strings.Builder
fmt.Fprintf(&buf, `INSERT INTO "%s" (`, tbl)
for i, c := range columns {
if i != 0 {
buf.WriteString(", ")
}
buf.WriteString(c.Name)
}
buf.WriteString(") VALUES (")
for i := range columns {
if i != 0 {
buf.WriteString(", ")
}
fmt.Fprintf(&buf, ":%d", i+1)
}
buf.WriteString(")")
qry := buf.String()
log.Println(qry)
start := time.Now()
ctx, cancel = context.WithCancel(context.Background())
defer cancel()
grp, ctx := errgroup.WithContext(ctx)
type rowsType struct {
Rows [][]string
Start int64
}
rowsCh := make(chan rowsType, *flagConcurrency)
chunkPool := sync.Pool{New: func() interface{} { z := make([][]string, 0, chunkSize); return &z }}
var inserted int64
for i := 0; i < *flagConcurrency; i++ {
grp.Go(func() error {
tx, txErr := db.BeginTx(ctx, nil)
if txErr != nil {
return txErr
}
defer tx.Rollback()
stmt, prepErr := tx.Prepare(qry)
if prepErr != nil {
return errors.Wrap(prepErr, qry)
}
nCols := len(columns)
cols := make([][]string, nCols)
rowsI := make([]interface{}, nCols)
for rs := range rowsCh {
chunk := rs.Rows
if err = ctx.Err(); err != nil {
return err
}
if len(chunk) == 0 {
continue
}
nRows := len(chunk)
for j := range cols {
if cap(cols[j]) < nRows {
cols[j] = make([]string, nRows)
} else {
cols[j] = cols[j][:nRows]
for i := range cols[j] {
cols[j][i] = ""
}
}
}
for k, row := range chunk {
if len(row) > len(cols) {
log.Printf("%d. more elements in the row (%d) then columns (%d)!", rs.Start+int64(k), len(row), len(cols))
row = row[:len(cols)]
}
for j, v := range row {
cols[j][k] = v
}
}
for i, col := range cols {
if rowsI[i], err = columns[i].FromString(col); err != nil {
log.Printf("%d. col: %+v", i, err)
for k, row := range chunk {
if _, err = columns[i].FromString(col[k : k+1]); err != nil {
log.Printf("%d.%q %q: %q", rs.Start+int64(k), columns[i].Name, col[k:k+1], row)
break
}
}
return errors.Wrapf(err, columns[i].Name)
}
}
_, err = stmt.Exec(rowsI...)
{
z := chunk[:0]
chunkPool.Put(&z)
}
if err == nil {
atomic.AddInt64(&inserted, int64(len(chunk)))
continue
}
err = errors.Wrapf(err, "%s", qry)
log.Println(err)
rowsR := make([]reflect.Value, len(rowsI))
rowsI2 := make([]interface{}, len(rowsI))
for j, I := range rowsI {
rowsR[j] = reflect.ValueOf(I)
rowsI2[j] = ""
}
R2 := reflect.ValueOf(rowsI2)
for j := range cols[0] { // rows
for i, r := range rowsR { // cols
if r.Len() <= j {
log.Printf("%d[%q]=%d", j, columns[i].Name, r.Len())
rowsI2[i] = ""
continue
}
R2.Index(i).Set(r.Index(j))
}
if _, err = stmt.Exec(rowsI2...); err != nil {
err = errors.Wrapf(err, "%s, %q", qry, rowsI2)
log.Println(err)
return err | return tx.Commit()
})
}
var n int64
var headerSeen bool
chunk := (*(chunkPool.Get().(*[][]string)))[:0]
if err = cfg.ReadRows(ctx,
func(_ string, row dbcsv.Row) error {
if err = ctx.Err(); err != nil {
chunk = chunk[:0]
return err
}
if !headerSeen {
headerSeen = true
return nil
} else if n%10000 == 0 {
writeHeapProf()
}
for i, s := range row.Values {
row.Values[i] = strings.TrimSpace(s)
}
chunk = append(chunk, row.Values)
if len(chunk) < chunkSize {
return nil
}
select {
case rowsCh <- rowsType{Rows: chunk, Start: n}:
n += int64(len(chunk))
case <-ctx.Done():
return ctx.Err()
}
chunk = (*chunkPool.Get().(*[][]string))[:0]
return nil
},
); err != nil {
return err
}
if len(chunk) != 0 {
rowsCh <- rowsType{Rows: chunk, Start: n}
n += int64(len(chunk))
}
close(rowsCh)
err = grp.Wait()
dur := time.Since(start)
log.Printf("Read %d, inserted %d rows from %q to %q in %s.", n, inserted, src, tbl, dur)
return err
}
func typeOf(s string) Type {
if ForceString {
return String
}
s = strings.TrimSpace(s)
if len(s) == 0 {
return Unknown
}
var hasNonDigit bool
var dotCount int
var length int
_ = strings.Map(func(r rune) rune {
length++
if r == '.' {
dotCount++
} else if !hasNonDigit {
hasNonDigit = !('0' <= r && r <= '9')
}
return -1
},
s)
if !hasNonDigit && s[0] != '0' {
if dotCount == 1 {
return Float
}
if dotCount == 0 {
return Int
}
}
if 10 <= len(s) && len(s) <= len(dateFormat) {
if _, err := time.Parse(dateFormat[:len(s)], s); err == nil {
return Date
}
}
return String
}
func CreateTable(ctx context.Context, db *sql.DB, tbl string, rows <-chan dbcsv.Row, truncate bool, tablespace string) ([]Column, error) {
tbl = strings.ToUpper(tbl)
qry := "SELECT COUNT(0) FROM user_tables WHERE UPPER(table_name) = :1"
var n int64
var cols []Column
if err := db.QueryRowContext(ctx, qry, tbl).Scan(&n); err != nil {
return cols, errors.Wrap(err, qry)
}
if n > 0 && truncate {
qry = `TRUNCATE TABLE ` + tbl
if _, err := db.ExecContext(ctx, qry); err != nil {
return cols, errors.Wrap(err, qry)
}
}
if n == 0 {
row := <-rows
log.Printf("row: %q", row.Values)
cols = make([]Column, len(row.Values))
for i, v := range row.Values {
v = strings.Map(func(r rune) rune {
r = unicode.ToLower(r)
switch r {
case 'á':
return 'a'
case 'é':
return 'e'
case 'í':
return 'i'
case 'ö', 'ő', 'ó':
return 'o'
case 'ü', 'ű', 'ú':
return 'u'
case '_':
return '_'
default:
if 'a' <= r && r <= 'z' || '0' <= r && r <= '9' {
return r
}
return '_'
}
},
v)
if len(v) > 30 {
v = fmt.Sprintf("%s_%02d", v[:27], i)
}
cols[i].Name = v
}
if ForceString {
for i := range cols {
cols[i].Type = String
}
}
for row := range rows {
for i, v := range row.Values {
if len(v) > cols[i].Length {
cols[i].Length = len(v)
}
if cols[i].Type == String {
continue
}
typ := typeOf(v)
if cols[i].Type == Unknown {
cols[i].Type = typ
} else if typ != cols[i].Type {
cols[i].Type = String
}
}
}
var buf bytes.Buffer
buf.WriteString(`CREATE TABLE "` + tbl + `" (`)
for i, c := range cols {
if i != 0 {
buf.WriteString(",\n")
}
if c.Type == Date {
fmt.Fprintf(&buf, " %s DATE", c.Name)
continue
}
length := c.Length
if length == 0 {
length = 1
}
fmt.Fprintf(&buf, " %s %s(%d)", c.Name, c.Type.String(), length)
}
buf.WriteString("\n)")
if tablespace != "" {
buf.WriteString(" TABLESPACE ")
buf.WriteString(tablespace)
}
qry = buf.String()
log.Println(qry)
if _, err := db.Exec(qry); err != nil {
return cols, errors.Wrap(err, qry)
}
cols = cols[:0]
}
qry = `SELECT column_name, data_type, NVL(data_length, 0), NVL(data_precision, 0), NVL(data_scale, 0), nullable
FROM user_tab_cols WHERE table_name = :1
ORDER BY column_id`
tRows, err := db.QueryContext(ctx, qry, tbl)
if err != nil {
return cols, errors.Wrap(err, qry)
}
defer tRows.Close()
for tRows.Next() {
var c Column
var nullable string
if err = tRows.Scan(&c.Name, &c.DataType, &c.Length, &c.Precision, &c.Scale, &nullable); err != nil {
return cols, err
}
c.Nullable = nullable != "N"
cols = append(cols, c)
}
return cols, nil
}
type Column struct {
Length int
Name string
Type Type
DataType string
Precision, Scale int
Nullable bool
}
type Type uint8
const (
Unknown = Type(0)
String = Type(1)
Int = Type(2)
Float = Type(3)
Date = Type(4)
)
func (t Type) String() string {
switch t {
case Int, Float:
return "NUMBER"
case Date:
return "DATE"
default:
return "VARCHAR2"
}
}
func (c Column) FromString(ss []string) (interface{}, error) {
if c.DataType == "DATE" || c.Type == Date {
res := make([]time.Time, len(ss))
for i, s := range ss {
if s == "" {
continue
}
var err error
if res[i], err = time.Parse(dateFormat[:len(s)], s); err != nil {
return res, errors.Wrapf(err, "%d. %q", i, s)
}
}
return res, nil
}
if strings.HasPrefix(c.DataType, "VARCHAR2") {
for i, s := range ss {
if len(s) > c.Length {
ss[i] = s[:c.Length]
return ss, errors.Errorf("%d. %q is longer (%d) then allowed (%d) for column %v", i, s, len(s), c.Length, c)
}
}
return ss, nil
}
if c.Type == Int {
for i, s := range ss {
e := strings.Map(func(r rune) rune {
if !('0' <= r && r <= '9' || r == '-') {
return r
}
return -1
}, s)
if e != "" {
ss[i] = ""
return ss, errors.Errorf("%d. %q is not integer (%q)", i, s, e)
}
}
return ss, nil
}
if c.Type == Float {
for i, s := range ss {
e := strings.Map(func(r rune) rune {
if !('0' <= r && r <= '9' || r == '-' || r == '.') {
return r
}
return -1
}, s)
if e != "" {
ss[i] = ""
return ss, errors.Errorf("%d. %q is not float (%q)", i, s, e)
}
}
return ss, nil
}
return ss, nil
}
func getColumns(ctx context.Context, db *sql.DB, tbl string) ([]Column, error) {
// TODO(tgulacsi): this is Oracle-specific!
const qry = "SELECT column_name, data_type, data_length, data_precision, data_scale, nullable FROM user_tab_cols WHERE table_name = UPPER(:1) ORDER BY column_id"
rows, err := db.QueryContext(ctx, qry, tbl)
if err != nil {
return nil, errors.Wrap(err, qry)
}
defer rows.Close()
var cols []Column
for rows.Next() {
var c Column
var prec, scale sql.NullInt64
var nullable string
if err = rows.Scan(&c.Name, &c.DataType, &c.Length, &prec, &scale, &nullable); err != nil {
return nil, err
}
c.Nullable = nullable == "Y"
switch c.DataType {
case "DATE":
c.Type = Date
c.Length = 8
case "NUMBER":
c.Precision, c.Scale = int(prec.Int64), int(scale.Int64)
if c.Scale > 0 {
c.Type = Float
c.Length = c.Precision + 1
} else {
c.Type = Int
c.Length = c.Precision
}
default:
c.Type = String
}
cols = append(cols, c)
}
return cols, rows.Close()
}
var qRepl = strings.NewReplacer(
"'", "''",
"&", "'||CHR(38)||'",
)
func quote(w io.Writer, s string) error {
if _, err := w.Write([]byte{'\''}); err != nil {
return err
}
if _, err := io.WriteString(w, qRepl.Replace(s)); err != nil {
return err
}
_, err := w.Write([]byte{'\''})
return err
}
// vim: set fileencoding=utf-8 noet: | }
}
return err
} | random_line_split |
csvload.go | // Copyright 2019 Tamás Gulácsi. All rights reserved.
package main
import (
"bytes"
"context"
"database/sql"
"flag"
"fmt"
"io"
"log"
"os"
"reflect"
"runtime"
"runtime/pprof"
"strings"
"sync"
"sync/atomic"
"time"
"unicode"
"github.com/pkg/errors"
"github.com/tgulacsi/go/dbcsv"
"golang.org/x/sync/errgroup"
_ "gopkg.in/goracle.v2"
)
func main() {
if err := Main(); err != nil {
log.Fatal(err)
}
}
var dateFormat = "2006-01-02 15:04:05"
var ForceString bool
const chunkSize = 1024
func Main() error {
encName := os.Getenv("LANG")
if i := strings.IndexByte(encName, '.'); i >= 0 {
encName = encName[i+1:]
} else if encName == "" {
encName = "UTF-8"
}
cfg := &dbcsv.Config{}
flagDB := flag.String("dsn", "$BRUNO_ID", "database to connect to")
flag.StringVar(&cfg.Charset, "charset", encName, "input charset")
flagTruncate := flag.Bool("truncate", false, "truncate table")
flagTablespace := flag.String("tablespace", "DATA", "tablespace to create table in")
flag.StringVar(&cfg.Delim, "delim", ";", "CSV separator")
flagConcurrency := flag.Int("concurrency", 8, "concurrency")
flag.StringVar(&dateFormat, "date", dateFormat, "date format, in Go notation")
flag.IntVar(&cfg.Skip, "skip", 0, "skip rows")
flag.IntVar(&cfg.Sheet, "sheet", 0, "sheet of spreadsheet")
flag.StringVar(&cfg.ColumnsString, "columns", "", "columns, comma separated indexes")
flag.BoolVar(&ForceString, "force-string", false, "force all columns to be VARCHAR2")
flagMemProf := flag.String("memprofile", "", "file to output memory profile to")
flagCPUProf := flag.String("cpuprofile", "", "file to output CPU profile to")
flagJustPrint := flag.Bool("just-print", false, "just print the INSERTs")
flag.Parse()
if flag.NArg() != 2 {
log.Fatal("Need two args: the table and the source.")
}
if *flagCPUProf != "" {
f, err := os.Create(*flagCPUProf)
if err != nil {
log.Fatal("could not create CPU profile: ", err)
}
if err := pprof.StartCPUProfile(f); err != nil {
log.Fatal("could not start CPU profile: ", err)
}
defer pprof.StopCPUProfile()
}
writeHeapProf := func() {}
if *flagMemProf != "" {
f, err := os.Create(*flagMemProf)
if err != nil {
log.Fatal("could not create memory profile: ", err)
}
defer f.Close()
writeHeapProf = func() {
log.Println("writeHeapProf")
f.Seek(0, 0)
runtime.GC() // get up-to-date statistics
if err := pprof.WriteHeapProfile(f); err != nil {
log.Fatal("could not write memory profile: ", err)
}
}
}
if strings.HasPrefix(*flagDB, "$") {
*flagDB = os.ExpandEnv(*flagDB)
}
db, err := sql.Open("goracle", *flagDB)
if err != nil {
return errors.Wrap(err, *flagDB)
}
defer db.Close()
db.SetMaxIdleConns(*flagConcurrency)
tbl := strings.ToUpper(flag.Arg(0))
src := flag.Arg(1)
if ForceString {
err = cfg.OpenVolatile(flag.Arg(1))
} else {
err = cfg.Open(flag.Arg(1))
}
if err != nil {
return err
}
defer cfg.Close()
rows := make(chan dbcsv.Row)
ctx, cancel := context.WithCancel(context.Background())
go func() {
defer close(rows)
cfg.ReadRows(ctx,
func(_ string, row dbcsv.Row) error {
select {
case <-ctx.Done():
return ctx.Err()
case rows <- row:
}
return nil
},
)
}()
if *flagJustPrint {
cols, err := getColumns(ctx, db, tbl)
if err != nil {
return err
}
var buf strings.Builder
for i, col := range cols {
if i != 0 {
buf.Write([]byte{',', ' '})
}
buf.WriteString(col.Name)
}
fmt.Println("INSERT ALL")
prefix := " INTO " + tbl + " (" + buf.String() + ")"
colMap := make(map[string]Column, len(cols))
for _, col := range cols {
colMap[col.Name] = col
}
cols = cols[:0]
for _, nm := range (<-rows).Values {
cols = append(cols, colMap[strings.ToUpper(nm)])
}
dRepl := strings.NewReplacer(".", "", "-", "")
for row := range rows {
buf.Reset()
for j, s := range row.Values {
if j != 0 {
buf.Write([]byte{',', ' '})
}
col := cols[j]
if col.Type != Date {
if err = quote(&buf, s); err != nil {
return err
}
} else {
buf.WriteString("TO_DATE('")
d := dRepl.Replace(s)
if len(d) == 6 {
d = "20" + d
}
buf.WriteString(d)
buf.WriteString("','YYYYMMDD')")
}
}
fmt.Printf("%s VALUES (%s)\n", prefix, buf.String())
}
fmt.Println("SELECT 1 FROM DUAL;")
return nil
}
columns, err := CreateTable(ctx, db, tbl, rows, *flagTruncate, *flagTablespace)
cancel()
if err != nil {
return err
}
var buf strings.Builder
fmt.Fprintf(&buf, `INSERT INTO "%s" (`, tbl)
for i, c := range columns {
if i != 0 {
buf.WriteString(", ")
}
buf.WriteString(c.Name)
}
buf.WriteString(") VALUES (")
for i := range columns {
if i != 0 {
buf.WriteString(", ")
}
fmt.Fprintf(&buf, ":%d", i+1)
}
buf.WriteString(")")
qry := buf.String()
log.Println(qry)
start := time.Now()
ctx, cancel = context.WithCancel(context.Background())
defer cancel()
grp, ctx := errgroup.WithContext(ctx)
type rowsType struct {
Rows [][]string
Start int64
}
rowsCh := make(chan rowsType, *flagConcurrency)
chunkPool := sync.Pool{New: func() interface{} { z := make([][]string, 0, chunkSize); return &z }}
var inserted int64
for i := 0; i < *flagConcurrency; i++ {
grp.Go(func() error {
tx, txErr := db.BeginTx(ctx, nil)
if txErr != nil {
return txErr
}
defer tx.Rollback()
stmt, prepErr := tx.Prepare(qry)
if prepErr != nil {
return errors.Wrap(prepErr, qry)
}
nCols := len(columns)
cols := make([][]string, nCols)
rowsI := make([]interface{}, nCols)
for rs := range rowsCh {
chunk := rs.Rows
if err = ctx.Err(); err != nil {
return err
}
if len(chunk) == 0 {
continue
}
nRows := len(chunk)
for j := range cols {
if cap(cols[j]) < nRows {
cols[j] = make([]string, nRows)
} else {
cols[j] = cols[j][:nRows]
for i := range cols[j] {
cols[j][i] = ""
}
}
}
for k, row := range chunk {
if len(row) > len(cols) {
log.Printf("%d. more elements in the row (%d) then columns (%d)!", rs.Start+int64(k), len(row), len(cols))
row = row[:len(cols)]
}
for j, v := range row {
cols[j][k] = v
}
}
for i, col := range cols {
if rowsI[i], err = columns[i].FromString(col); err != nil {
log.Printf("%d. col: %+v", i, err)
for k, row := range chunk {
if _, err = columns[i].FromString(col[k : k+1]); err != nil {
log.Printf("%d.%q %q: %q", rs.Start+int64(k), columns[i].Name, col[k:k+1], row)
break
}
}
return errors.Wrapf(err, columns[i].Name)
}
}
_, err = stmt.Exec(rowsI...)
{
z := chunk[:0]
chunkPool.Put(&z)
}
if err == nil {
atomic.AddInt64(&inserted, int64(len(chunk)))
continue
}
err = errors.Wrapf(err, "%s", qry)
log.Println(err)
rowsR := make([]reflect.Value, len(rowsI))
rowsI2 := make([]interface{}, len(rowsI))
for j, I := range rowsI {
rowsR[j] = reflect.ValueOf(I)
rowsI2[j] = ""
}
R2 := reflect.ValueOf(rowsI2)
for j := range cols[0] { // rows
for i, r := range rowsR { // cols
if r.Len() <= j {
log.Printf("%d[%q]=%d", j, columns[i].Name, r.Len())
rowsI2[i] = ""
continue
}
R2.Index(i).Set(r.Index(j))
}
if _, err = stmt.Exec(rowsI2...); err != nil {
err = errors.Wrapf(err, "%s, %q", qry, rowsI2)
log.Println(err)
return err
}
}
return err
}
return tx.Commit()
})
}
var n int64
var headerSeen bool
chunk := (*(chunkPool.Get().(*[][]string)))[:0]
if err = cfg.ReadRows(ctx,
func(_ string, row dbcsv.Row) error {
if err = ctx.Err(); err != nil {
chunk = chunk[:0]
return err
}
if !headerSeen {
headerSeen = true
return nil
} else if n%10000 == 0 {
writeHeapProf()
}
for i, s := range row.Values {
row.Values[i] = strings.TrimSpace(s)
}
chunk = append(chunk, row.Values)
if len(chunk) < chunkSize {
return nil
}
select {
case rowsCh <- rowsType{Rows: chunk, Start: n}:
n += int64(len(chunk))
case <-ctx.Done():
return ctx.Err()
}
chunk = (*chunkPool.Get().(*[][]string))[:0]
return nil
},
); err != nil {
return err
}
if len(chunk) != 0 {
rowsCh <- rowsType{Rows: chunk, Start: n}
n += int64(len(chunk))
}
close(rowsCh)
err = grp.Wait()
dur := time.Since(start)
log.Printf("Read %d, inserted %d rows from %q to %q in %s.", n, inserted, src, tbl, dur)
return err
}
func typeOf(s string) Type {
if ForceString {
return String
}
s = strings.TrimSpace(s)
if len(s) == 0 {
return Unknown
}
var hasNonDigit bool
var dotCount int
var length int
_ = strings.Map(func(r rune) rune {
length++
if r == '.' {
dotCount++
} else if !hasNonDigit {
hasNonDigit = !('0' <= r && r <= '9')
}
return -1
},
s)
if !hasNonDigit && s[0] != '0' {
if dotCount == 1 {
return Float
}
if dotCount == 0 {
return Int
}
}
if 10 <= len(s) && len(s) <= len(dateFormat) {
if _, err := time.Parse(dateFormat[:len(s)], s); err == nil {
return Date
}
}
return String
}
func CreateTable(ctx context.Context, db *sql.DB, tbl string, rows <-chan dbcsv.Row, truncate bool, tablespace string) ([]Column, error) {
tbl = strings.ToUpper(tbl)
qry := "SELECT COUNT(0) FROM user_tables WHERE UPPER(table_name) = :1"
var n int64
var cols []Column
if err := db.QueryRowContext(ctx, qry, tbl).Scan(&n); err != nil {
return cols, errors.Wrap(err, qry)
}
if n > 0 && truncate {
qry = `TRUNCATE TABLE ` + tbl
if _, err := db.ExecContext(ctx, qry); err != nil {
return cols, errors.Wrap(err, qry)
}
}
if n == 0 {
row := <-rows
log.Printf("row: %q", row.Values)
cols = make([]Column, len(row.Values))
for i, v := range row.Values {
v = strings.Map(func(r rune) rune {
r = unicode.ToLower(r)
switch r {
case 'á':
return 'a'
case 'é':
return 'e'
case 'í':
return 'i'
case 'ö', 'ő', 'ó':
return 'o'
case 'ü', 'ű', 'ú':
return 'u'
case '_':
return '_'
default:
if 'a' <= r && r <= 'z' || '0' <= r && r <= '9' {
return r
}
return '_'
}
},
v)
if len(v) > 30 {
v = fmt.Sprintf("%s_%02d", v[:27], i)
}
cols[i].Name = v
}
if ForceString {
for i := range cols {
cols[i].Type = String
}
}
for row := range rows {
for i, v := range row.Values {
if len(v) > cols[i].Length {
cols[i].Length = len(v)
}
if cols[i].Type == String {
continue
}
typ := typeOf(v)
if cols[i].Type == Unknown {
cols[i].Type = typ
} else if typ != cols[i].Type {
cols[i].Type = String
}
}
}
var buf bytes.Buffer
buf.WriteString(`CREATE TABLE "` + tbl + `" (`)
for i, c := range cols {
if i != 0 {
buf.WriteString(",\n")
}
if c.Type == Date {
fmt.Fprintf(&buf, " %s DATE", c.Name)
continue
}
length := c.Length
if length == 0 {
length = 1
}
fmt.Fprintf(&buf, " %s %s(%d)", c.Name, c.Type.String(), length)
}
buf.WriteString("\n)")
if tablespace != "" {
buf.WriteString(" TABLESPACE ")
buf.WriteString(tablespace)
}
qry = buf.String()
log.Println(qry)
if _, err := db.Exec(qry); err != nil {
return cols, errors.Wrap(err, qry)
}
cols = cols[:0]
}
qry = `SELECT column_name, data_type, NVL(data_length, 0), NVL(data_precision, 0), NVL(data_scale, 0), nullable
FROM user_tab_cols WHERE table_name = :1
ORDER BY column_id`
tRows, err := db.QueryContext(ctx, qry, tbl)
if err != nil {
return cols, errors.Wrap(err, qry)
}
defer tRows.Close()
for tRows.Next() {
var c Column
var nullable string
if err = tRows.Scan(&c.Name, &c.DataType, &c.Length, &c.Precision, &c.Scale, &nullable); err != nil {
return cols, err
}
c.Nullable = nullable != "N"
cols = append(cols, c)
}
return cols, nil
}
type Column struct {
Length int
Name string
Type Type
DataType string
Precision, Scale int
Nullable bool
}
type Type uint8
const (
Unknown = Type(0)
String = Type(1)
Int = Type(2)
Float = Type(3)
Date = Type(4)
)
func (t Type) String() string {
switch t {
case Int, Float:
return "NUMBER"
case Date:
return "DATE"
default:
return "VARCHAR2"
}
}
func (c Column) FromString(ss []string) (interface{}, error) {
if c.Dat | olumns(ctx context.Context, db *sql.DB, tbl string) ([]Column, error) {
// TODO(tgulacsi): this is Oracle-specific!
const qry = "SELECT column_name, data_type, data_length, data_precision, data_scale, nullable FROM user_tab_cols WHERE table_name = UPPER(:1) ORDER BY column_id"
rows, err := db.QueryContext(ctx, qry, tbl)
if err != nil {
return nil, errors.Wrap(err, qry)
}
defer rows.Close()
var cols []Column
for rows.Next() {
var c Column
var prec, scale sql.NullInt64
var nullable string
if err = rows.Scan(&c.Name, &c.DataType, &c.Length, &prec, &scale, &nullable); err != nil {
return nil, err
}
c.Nullable = nullable == "Y"
switch c.DataType {
case "DATE":
c.Type = Date
c.Length = 8
case "NUMBER":
c.Precision, c.Scale = int(prec.Int64), int(scale.Int64)
if c.Scale > 0 {
c.Type = Float
c.Length = c.Precision + 1
} else {
c.Type = Int
c.Length = c.Precision
}
default:
c.Type = String
}
cols = append(cols, c)
}
return cols, rows.Close()
}
var qRepl = strings.NewReplacer(
"'", "''",
"&", "'||CHR(38)||'",
)
func quote(w io.Writer, s string) error {
if _, err := w.Write([]byte{'\''}); err != nil {
return err
}
if _, err := io.WriteString(w, qRepl.Replace(s)); err != nil {
return err
}
_, err := w.Write([]byte{'\''})
return err
}
// vim: set fileencoding=utf-8 noet:
| aType == "DATE" || c.Type == Date {
res := make([]time.Time, len(ss))
for i, s := range ss {
if s == "" {
continue
}
var err error
if res[i], err = time.Parse(dateFormat[:len(s)], s); err != nil {
return res, errors.Wrapf(err, "%d. %q", i, s)
}
}
return res, nil
}
if strings.HasPrefix(c.DataType, "VARCHAR2") {
for i, s := range ss {
if len(s) > c.Length {
ss[i] = s[:c.Length]
return ss, errors.Errorf("%d. %q is longer (%d) then allowed (%d) for column %v", i, s, len(s), c.Length, c)
}
}
return ss, nil
}
if c.Type == Int {
for i, s := range ss {
e := strings.Map(func(r rune) rune {
if !('0' <= r && r <= '9' || r == '-') {
return r
}
return -1
}, s)
if e != "" {
ss[i] = ""
return ss, errors.Errorf("%d. %q is not integer (%q)", i, s, e)
}
}
return ss, nil
}
if c.Type == Float {
for i, s := range ss {
e := strings.Map(func(r rune) rune {
if !('0' <= r && r <= '9' || r == '-' || r == '.') {
return r
}
return -1
}, s)
if e != "" {
ss[i] = ""
return ss, errors.Errorf("%d. %q is not float (%q)", i, s, e)
}
}
return ss, nil
}
return ss, nil
}
func getC | identifier_body |
prediction.py | # -*- coding: utf-8 -*-
"""prediction.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1lcvaJYf5k-Y0mmlCYAF8kWQlr2P-eTwr
"""
# Load model
from joblib import dump, load
# model_name = "DTR_model"
# model = load('../models/' + model_name + '.joblib')
# from datetime import datetime
# # model = load("DTR_model.joblib")
"""# Load CMEMS data
Try to use WMS or other more flexibel data retrieval
"""
import ftplib
import os
import numpy as np
import netCDF4 as nc
import pandas as pd
from datetime import datetime
def download(url, user, passwd, ftp_path, filename):
with ftplib.FTP(url) as ftp:
try:
ftp.login(user, passwd)
# Change directory
ftp.cwd(ftp_path)
# Download file (if there is not yet a local copy)
if os.path.isfile(filename):
print("There is already a local copy for this date ({})".format(filename))
else:
with open(filename, 'wb') as fp:
print("Downloading ... ({})".format(filename))
ftp.retrbinary('RETR {}'.format(filename), fp.write)
except ftplib.all_errors as e:
print('FTP error:', e)
# Check contents
"""
with ftplib.FTP('nrt.cmems-du.eu') as ftp:
try:
ftp.login(UN_CMEMS, PW_CMEMS)
# Change directory
ftp.cwd('Core/GLOBAL_ANALYSIS_FORECAST_PHY_001_024/global-analysis-forecast-phy-001-024/2021/07')
# List directory contents with additional information
ftp.retrlines('LIST')
# Get list of directory contents without additional information
files = []
ftp.retrlines('NLST', files.append)
print(files)
# Check file size
print("{} MB".format(ftp.size('mfwamglocep_2020120100_R20201202.nc')/1000000))
except ftplib.all_errors as e:
print('FTP error:', e)
"""
def calc_relative_direction(ship_dir, ww_dir):
|
def concatenate_cmems(cm_wave, cm_phy, ship_param, ship_dir):
"""
concatenate the variables from cmems wave and physics datasets
Parameters
----------
cm_wave : net4CDF dataset
netcdf file cmems wave
cm_phy : net4CDF dataset
netdcf file cmems physics
ship_param : int
ship variable that is used in model later (e.g. draft or length)
ship_dir str, in ("N", "E", "S", "W")
direction the ship is going
"""
array = (np.flipud(cm_wave["VHM0"][0, :, :]).data) # extract data from CMEMS
dim = array.shape
l = np.prod(dim) # get number of "pixel"
# extract parameters from cmems dataset and reshape to array with dimension of 1 x number of pixel
vhm = (np.flipud(cm_wave["VHM0"][0, :, :])).reshape(l, 1)
vtm = (np.flipud(cm_wave["VTPK"][0, :, :])).reshape(l, 1)
temp = (np.flipud(cm_phy["thetao"][0, 1, :, :])).reshape(l, 1)
sal = (np.flipud(cm_phy["so"][0, 1, :, :])).reshape(l, 1)
# create column for ship parameter
ship = np.full((l, 1), ship_param)
# calculate relative direction of wind depending on ship direction
dir = calc_relative_direction(ship_dir, (np.flipud(cm_wave["VMDR_WW"][0, :, :])).reshape(l, 1))
# concatenate parameters
a = np.concatenate((ship, vhm, vtm, temp, sal, dir), axis=1)
# create pd df from array
X_pred = pd.DataFrame(data=a, # values
index=list(range(0, l)), # 1st column as index
columns=["Draft", "VHM0", "VTPK", "thetao", "so", "dir_4"]) # 1st row as the column names
return X_pred
def prepare_grid(cm_wave, cm_phy, ship_param, ship_dir, model):
"""
prepare grid of SOGs
Parameters
----------
cm_wave : net4CDF dataset
netcdf file cmems wave
cm_phy : net4CDF dataset
netdcf file cmems physics
ship_param : int
ship variable that is used in model later (e.g. draft or length)
ship_dir str, in ("N", "E", "S", "W")
direction the ship is going
"""
X_pred = concatenate_cmems(cm_wave, cm_phy, ship_param, ship_dir)
# extract shape from cmems data
input = (np.flipud(cm_wave["VHM0"][0, :, :]))
dim = input.shape
# predict SOG
# model = load('cms_routing/models/DTR_model.joblib') # import model
SOG_pred = model.predict(X_pred)
SOG_pred = SOG_pred.reshape(dim) # reshape to 'coordinates'
SOG_pred[input < -30000] = -5 # -32767.0 # mask data with negative value
return SOG_pred
def calculateTimeGrid(SOG_E, SOG_N, SOG_S, SOG_W, AOI):
kmGridEW = np.load("lengthGridEW.npy")
kmGridEW = kmGridEW[AOI[2]:AOI[3], AOI[0]:AOI[1]]
kmGridNS = np.load("lengthGridNS.npy")
kmGridNS = kmGridNS[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridE = SOG_E
constE = 70 / np.power(timeGridE, 3)
timeGridE80 = np.cbrt(80 / constE)
timeGridE60 = np.cbrt(60 / constE)
timeGridE = timeGridE[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridE80 = timeGridE80[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridE60 = timeGridE60[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridE = np.where(timeGridE < 0, 10000, (kmGridEW * 1000) / (timeGridE * 30.87))
timeGridE80 = np.where(timeGridE80 < 0, 10000, (kmGridEW * 1000) / (timeGridE80 * 30.87))
timeGridE60 = np.where(timeGridE60 < 0, 10000, (kmGridEW * 1000) / (timeGridE60 * 30.87))
timeGridN = SOG_N
constN = 70 / np.power(timeGridN, 3)
timeGridN80 = np.cbrt(80 / constN)
timeGridN60 = np.cbrt(60 / constN)
timeGridN = timeGridN[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridN80 = timeGridN80[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridN60 = timeGridN60[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridN = np.where(timeGridN < 0, 10000, (kmGridNS * 1000) / (timeGridN * 30.87))
timeGridN80 = np.where(timeGridN80 < 0, 10000, (kmGridNS * 1000) / (timeGridN80 * 30.87))
timeGridN60 = np.where(timeGridN60 < 0, 10000, (kmGridNS * 1000) / (timeGridN60 * 30.87))
timeGridS = SOG_S
constS = 70 / np.power(timeGridS, 3)
timeGridS80 = np.cbrt(80 / constS)
timeGridS60 = np.cbrt(60 / constS)
timeGridS = timeGridS[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridS80 = timeGridS80[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridS60 = timeGridS60[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridS = np.where(timeGridS < 0, 10000, (kmGridNS * 1000) / (timeGridS * 30.87))
timeGridS80 = np.where(timeGridS80 < 0, 10000, (kmGridNS * 1000) / (timeGridS80 * 30.87))
timeGridS60 = np.where(timeGridS60 < 0, 10000, (kmGridNS * 1000) / (timeGridS60 * 30.87))
timeGridW = SOG_W
constW = 70 / np.power(timeGridW, 3)
timeGridW80 = np.cbrt(80 / constW)
timeGridW60 = np.cbrt(60 / constW)
timeGridW = timeGridW[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridW80 = timeGridW80[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridW60 = timeGridW60[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridW = np.where(timeGridW < 0, 10000, (kmGridEW * 1000) / (timeGridW * 30.87))
timeGridW80 = np.where(timeGridW80 < 0, 10000, (kmGridEW * 1000) / (timeGridW80 * 30.87))
timeGridW60 = np.where(timeGridW60 < 0, 10000, (kmGridEW * 1000) / (timeGridW60 * 30.87))
timeGrids = [[timeGridN80, timeGridS80, timeGridE80, timeGridW80], [timeGridN, timeGridS, timeGridE, timeGridW],
[timeGridN60, timeGridS60, timeGridE60, timeGridW60]]
return timeGrids
'''
# created masked array
import numpy.ma as ma
SOG_pred = np.ma.masked_where(np.flipud(np.ma.getmask(ds[parameter][0, :, :])), SOG_pred.reshape(dim))
SOG_pred.fill_value = -32767
# SOG_pred =np.flipud(SOG_pred)
'''
# # create actual grids for different ship directions
# ship_param = 12
# SOG_N = prepare_grid(model, ds, ds_p, ship_param, "N")
# SOG_E = prepare_grid(model, ds, ds_p, ship_param, "E")
# SOG_S = prepare_grid(model, ds, ds_p, ship_param, "S")
# SOG_W = prepare_grid(model, ds, ds_p, ship_param, "W")
# def cmems_paths(date):
def get_cmems(date_start, date_end, UN_CMEMS, PW_CMEMS):
date_s = datetime.strptime(date_start, "%d.%m.%Y %H:%M")
date_e = datetime.strptime(date_end, "%d.%m.%Y %H:%M")
date_m = date_s + (date_e - date_s) / 2
date = date_m.strftime("%Y%m%d")
today = datetime.now().strftime("%Y%m%d")
path_date = date[0:4] + "/" + date[4:6]
url = 'nrt.cmems-du.eu'
path_w = 'Core/GLOBAL_ANALYSIS_FORECAST_WAV_001_027/global-analysis-forecast-wav-001-027/' + path_date
path_p = 'Core/GLOBAL_ANALYSIS_FORECAST_PHY_001_024/global-analysis-forecast-phy-001-024/' + path_date
with ftplib.FTP(url) as ftp:
try:
ftp.login(UN_CMEMS, PW_CMEMS)
ftp.cwd(path_w)
files = ftp.nlst()
files = [i for i in files if date in i]
filename_w = files[0]
ftp.cwd('/')
ftp.cwd(path_p)
files = ftp.nlst()
files = [i for i in files if date in i]
filename_p = files[0]
except ftplib.all_errors as e:
print('FTP error:', e)
download(url, UN_CMEMS, PW_CMEMS, path_w, filename_w)
download(url, UN_CMEMS, PW_CMEMS, path_p, filename_p)
ds_w = nc.Dataset(filename_w)
ds_p = nc.Dataset(filename_p)
return (ds_w, ds_p)
""""
# set CMEMS credentials
UN_CMEMS = "jstenkamp"
PW_CMEMS = ""
# cmems wave data download
url = 'nrt.cmems-du.eu'
path = 'Core/GLOBAL_ANALYSIS_FORECAST_WAV_001_027/global-analysis-forecast-wav-001-027/2021/07'
filename = 'mfwamglocep_2021070200_R20210703.nc'
download(url, UN_CMEMS, PW_CMEMS, path, filename)
# cmems physics download
url = 'nrt.cmems-du.eu'
path = 'Core/GLOBAL_ANALYSIS_FORECAST_PHY_001_024/global-analysis-forecast-phy-001-024/2021/07'
filename_p = 'mercatorpsy4v3r1_gl12_mean_20210702_R20210703.nc'
download(url, UN_CMEMS, PW_CMEMS, path, filename_p)
# load files as netcdf dataset
ds = nc.Dataset(filename)
ds_p = nc.Dataset(filename_p)
# ds
"""
| """
determine relative wind direction for ships going north, east, south or west
Parameters
----------
ship_dir : str, in ("N", "E", "S", "W")
direction the ship is going
ww_dir : array, float
array of relative wind directions [0 - 360]
"""
if ship_dir not in ("N", "E", "S", "W"):
raise Exception("Direction not accepted.")
ww_360 = ww_dir
ww_360[ww_360 < 0] = 360 + ww_dir[0]
if ship_dir in ("N"):
dir_4 = np.full((len(ww_dir), 1), 2)
dir_4[(ww_dir < 45) | (ww_dir > 315)] = 1
dir_4[(ww_dir > 135) & (ww_dir < 225)] = 3
if ship_dir in ("E"):
dir_4 = np.full((len(ww_dir), 1), 2)
dir_4[(ww_dir > 45) & (ww_dir < 135)] = 1
dir_4[(ww_dir > 225) & (ww_dir < 315)] = 3
if ship_dir in ("W"):
dir_4 = np.full((len(ww_dir), 1), 2)
dir_4[(ww_dir > 45) & (ww_dir < 135)] = 3
dir_4[(ww_dir > 225) & (ww_dir < 315)] = 1
if ship_dir in ("S"):
dir_4 = np.full((len(ww_dir), 1), 2)
dir_4[(ww_dir < 45) | (ww_dir > 315)] = 3
dir_4[(ww_dir > 135) & (ww_dir < 225)] = 1
return dir_4 | identifier_body |
prediction.py | # -*- coding: utf-8 -*-
"""prediction.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1lcvaJYf5k-Y0mmlCYAF8kWQlr2P-eTwr
"""
# Load model
from joblib import dump, load
# model_name = "DTR_model"
# model = load('../models/' + model_name + '.joblib')
# from datetime import datetime
# # model = load("DTR_model.joblib")
"""# Load CMEMS data
Try to use WMS or other more flexibel data retrieval
"""
import ftplib
import os
import numpy as np
import netCDF4 as nc
import pandas as pd
from datetime import datetime
def | (url, user, passwd, ftp_path, filename):
with ftplib.FTP(url) as ftp:
try:
ftp.login(user, passwd)
# Change directory
ftp.cwd(ftp_path)
# Download file (if there is not yet a local copy)
if os.path.isfile(filename):
print("There is already a local copy for this date ({})".format(filename))
else:
with open(filename, 'wb') as fp:
print("Downloading ... ({})".format(filename))
ftp.retrbinary('RETR {}'.format(filename), fp.write)
except ftplib.all_errors as e:
print('FTP error:', e)
# Check contents
"""
with ftplib.FTP('nrt.cmems-du.eu') as ftp:
try:
ftp.login(UN_CMEMS, PW_CMEMS)
# Change directory
ftp.cwd('Core/GLOBAL_ANALYSIS_FORECAST_PHY_001_024/global-analysis-forecast-phy-001-024/2021/07')
# List directory contents with additional information
ftp.retrlines('LIST')
# Get list of directory contents without additional information
files = []
ftp.retrlines('NLST', files.append)
print(files)
# Check file size
print("{} MB".format(ftp.size('mfwamglocep_2020120100_R20201202.nc')/1000000))
except ftplib.all_errors as e:
print('FTP error:', e)
"""
def calc_relative_direction(ship_dir, ww_dir):
"""
determine relative wind direction for ships going north, east, south or west
Parameters
----------
ship_dir : str, in ("N", "E", "S", "W")
direction the ship is going
ww_dir : array, float
array of relative wind directions [0 - 360]
"""
if ship_dir not in ("N", "E", "S", "W"):
raise Exception("Direction not accepted.")
ww_360 = ww_dir
ww_360[ww_360 < 0] = 360 + ww_dir[0]
if ship_dir in ("N"):
dir_4 = np.full((len(ww_dir), 1), 2)
dir_4[(ww_dir < 45) | (ww_dir > 315)] = 1
dir_4[(ww_dir > 135) & (ww_dir < 225)] = 3
if ship_dir in ("E"):
dir_4 = np.full((len(ww_dir), 1), 2)
dir_4[(ww_dir > 45) & (ww_dir < 135)] = 1
dir_4[(ww_dir > 225) & (ww_dir < 315)] = 3
if ship_dir in ("W"):
dir_4 = np.full((len(ww_dir), 1), 2)
dir_4[(ww_dir > 45) & (ww_dir < 135)] = 3
dir_4[(ww_dir > 225) & (ww_dir < 315)] = 1
if ship_dir in ("S"):
dir_4 = np.full((len(ww_dir), 1), 2)
dir_4[(ww_dir < 45) | (ww_dir > 315)] = 3
dir_4[(ww_dir > 135) & (ww_dir < 225)] = 1
return dir_4
def concatenate_cmems(cm_wave, cm_phy, ship_param, ship_dir):
"""
concatenate the variables from cmems wave and physics datasets
Parameters
----------
cm_wave : net4CDF dataset
netcdf file cmems wave
cm_phy : net4CDF dataset
netdcf file cmems physics
ship_param : int
ship variable that is used in model later (e.g. draft or length)
ship_dir str, in ("N", "E", "S", "W")
direction the ship is going
"""
array = (np.flipud(cm_wave["VHM0"][0, :, :]).data) # extract data from CMEMS
dim = array.shape
l = np.prod(dim) # get number of "pixel"
# extract parameters from cmems dataset and reshape to array with dimension of 1 x number of pixel
vhm = (np.flipud(cm_wave["VHM0"][0, :, :])).reshape(l, 1)
vtm = (np.flipud(cm_wave["VTPK"][0, :, :])).reshape(l, 1)
temp = (np.flipud(cm_phy["thetao"][0, 1, :, :])).reshape(l, 1)
sal = (np.flipud(cm_phy["so"][0, 1, :, :])).reshape(l, 1)
# create column for ship parameter
ship = np.full((l, 1), ship_param)
# calculate relative direction of wind depending on ship direction
dir = calc_relative_direction(ship_dir, (np.flipud(cm_wave["VMDR_WW"][0, :, :])).reshape(l, 1))
# concatenate parameters
a = np.concatenate((ship, vhm, vtm, temp, sal, dir), axis=1)
# create pd df from array
X_pred = pd.DataFrame(data=a, # values
index=list(range(0, l)), # 1st column as index
columns=["Draft", "VHM0", "VTPK", "thetao", "so", "dir_4"]) # 1st row as the column names
return X_pred
def prepare_grid(cm_wave, cm_phy, ship_param, ship_dir, model):
"""
prepare grid of SOGs
Parameters
----------
cm_wave : net4CDF dataset
netcdf file cmems wave
cm_phy : net4CDF dataset
netdcf file cmems physics
ship_param : int
ship variable that is used in model later (e.g. draft or length)
ship_dir str, in ("N", "E", "S", "W")
direction the ship is going
"""
X_pred = concatenate_cmems(cm_wave, cm_phy, ship_param, ship_dir)
# extract shape from cmems data
input = (np.flipud(cm_wave["VHM0"][0, :, :]))
dim = input.shape
# predict SOG
# model = load('cms_routing/models/DTR_model.joblib') # import model
SOG_pred = model.predict(X_pred)
SOG_pred = SOG_pred.reshape(dim) # reshape to 'coordinates'
SOG_pred[input < -30000] = -5 # -32767.0 # mask data with negative value
return SOG_pred
def calculateTimeGrid(SOG_E, SOG_N, SOG_S, SOG_W, AOI):
kmGridEW = np.load("lengthGridEW.npy")
kmGridEW = kmGridEW[AOI[2]:AOI[3], AOI[0]:AOI[1]]
kmGridNS = np.load("lengthGridNS.npy")
kmGridNS = kmGridNS[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridE = SOG_E
constE = 70 / np.power(timeGridE, 3)
timeGridE80 = np.cbrt(80 / constE)
timeGridE60 = np.cbrt(60 / constE)
timeGridE = timeGridE[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridE80 = timeGridE80[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridE60 = timeGridE60[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridE = np.where(timeGridE < 0, 10000, (kmGridEW * 1000) / (timeGridE * 30.87))
timeGridE80 = np.where(timeGridE80 < 0, 10000, (kmGridEW * 1000) / (timeGridE80 * 30.87))
timeGridE60 = np.where(timeGridE60 < 0, 10000, (kmGridEW * 1000) / (timeGridE60 * 30.87))
timeGridN = SOG_N
constN = 70 / np.power(timeGridN, 3)
timeGridN80 = np.cbrt(80 / constN)
timeGridN60 = np.cbrt(60 / constN)
timeGridN = timeGridN[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridN80 = timeGridN80[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridN60 = timeGridN60[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridN = np.where(timeGridN < 0, 10000, (kmGridNS * 1000) / (timeGridN * 30.87))
timeGridN80 = np.where(timeGridN80 < 0, 10000, (kmGridNS * 1000) / (timeGridN80 * 30.87))
timeGridN60 = np.where(timeGridN60 < 0, 10000, (kmGridNS * 1000) / (timeGridN60 * 30.87))
timeGridS = SOG_S
constS = 70 / np.power(timeGridS, 3)
timeGridS80 = np.cbrt(80 / constS)
timeGridS60 = np.cbrt(60 / constS)
timeGridS = timeGridS[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridS80 = timeGridS80[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridS60 = timeGridS60[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridS = np.where(timeGridS < 0, 10000, (kmGridNS * 1000) / (timeGridS * 30.87))
timeGridS80 = np.where(timeGridS80 < 0, 10000, (kmGridNS * 1000) / (timeGridS80 * 30.87))
timeGridS60 = np.where(timeGridS60 < 0, 10000, (kmGridNS * 1000) / (timeGridS60 * 30.87))
timeGridW = SOG_W
constW = 70 / np.power(timeGridW, 3)
timeGridW80 = np.cbrt(80 / constW)
timeGridW60 = np.cbrt(60 / constW)
timeGridW = timeGridW[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridW80 = timeGridW80[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridW60 = timeGridW60[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridW = np.where(timeGridW < 0, 10000, (kmGridEW * 1000) / (timeGridW * 30.87))
timeGridW80 = np.where(timeGridW80 < 0, 10000, (kmGridEW * 1000) / (timeGridW80 * 30.87))
timeGridW60 = np.where(timeGridW60 < 0, 10000, (kmGridEW * 1000) / (timeGridW60 * 30.87))
timeGrids = [[timeGridN80, timeGridS80, timeGridE80, timeGridW80], [timeGridN, timeGridS, timeGridE, timeGridW],
[timeGridN60, timeGridS60, timeGridE60, timeGridW60]]
return timeGrids
'''
# created masked array
import numpy.ma as ma
SOG_pred = np.ma.masked_where(np.flipud(np.ma.getmask(ds[parameter][0, :, :])), SOG_pred.reshape(dim))
SOG_pred.fill_value = -32767
# SOG_pred =np.flipud(SOG_pred)
'''
# # create actual grids for different ship directions
# ship_param = 12
# SOG_N = prepare_grid(model, ds, ds_p, ship_param, "N")
# SOG_E = prepare_grid(model, ds, ds_p, ship_param, "E")
# SOG_S = prepare_grid(model, ds, ds_p, ship_param, "S")
# SOG_W = prepare_grid(model, ds, ds_p, ship_param, "W")
# def cmems_paths(date):
def get_cmems(date_start, date_end, UN_CMEMS, PW_CMEMS):
date_s = datetime.strptime(date_start, "%d.%m.%Y %H:%M")
date_e = datetime.strptime(date_end, "%d.%m.%Y %H:%M")
date_m = date_s + (date_e - date_s) / 2
date = date_m.strftime("%Y%m%d")
today = datetime.now().strftime("%Y%m%d")
path_date = date[0:4] + "/" + date[4:6]
url = 'nrt.cmems-du.eu'
path_w = 'Core/GLOBAL_ANALYSIS_FORECAST_WAV_001_027/global-analysis-forecast-wav-001-027/' + path_date
path_p = 'Core/GLOBAL_ANALYSIS_FORECAST_PHY_001_024/global-analysis-forecast-phy-001-024/' + path_date
with ftplib.FTP(url) as ftp:
try:
ftp.login(UN_CMEMS, PW_CMEMS)
ftp.cwd(path_w)
files = ftp.nlst()
files = [i for i in files if date in i]
filename_w = files[0]
ftp.cwd('/')
ftp.cwd(path_p)
files = ftp.nlst()
files = [i for i in files if date in i]
filename_p = files[0]
except ftplib.all_errors as e:
print('FTP error:', e)
download(url, UN_CMEMS, PW_CMEMS, path_w, filename_w)
download(url, UN_CMEMS, PW_CMEMS, path_p, filename_p)
ds_w = nc.Dataset(filename_w)
ds_p = nc.Dataset(filename_p)
return (ds_w, ds_p)
""""
# set CMEMS credentials
UN_CMEMS = "jstenkamp"
PW_CMEMS = ""
# cmems wave data download
url = 'nrt.cmems-du.eu'
path = 'Core/GLOBAL_ANALYSIS_FORECAST_WAV_001_027/global-analysis-forecast-wav-001-027/2021/07'
filename = 'mfwamglocep_2021070200_R20210703.nc'
download(url, UN_CMEMS, PW_CMEMS, path, filename)
# cmems physics download
url = 'nrt.cmems-du.eu'
path = 'Core/GLOBAL_ANALYSIS_FORECAST_PHY_001_024/global-analysis-forecast-phy-001-024/2021/07'
filename_p = 'mercatorpsy4v3r1_gl12_mean_20210702_R20210703.nc'
download(url, UN_CMEMS, PW_CMEMS, path, filename_p)
# load files as netcdf dataset
ds = nc.Dataset(filename)
ds_p = nc.Dataset(filename_p)
# ds
"""
| download | identifier_name |
prediction.py | # -*- coding: utf-8 -*-
"""prediction.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1lcvaJYf5k-Y0mmlCYAF8kWQlr2P-eTwr
"""
# Load model
from joblib import dump, load
# model_name = "DTR_model"
# model = load('../models/' + model_name + '.joblib')
# from datetime import datetime
# # model = load("DTR_model.joblib")
"""# Load CMEMS data
Try to use WMS or other more flexibel data retrieval
"""
import ftplib
import os
import numpy as np
import netCDF4 as nc
import pandas as pd
from datetime import datetime |
try:
ftp.login(user, passwd)
# Change directory
ftp.cwd(ftp_path)
# Download file (if there is not yet a local copy)
if os.path.isfile(filename):
print("There is already a local copy for this date ({})".format(filename))
else:
with open(filename, 'wb') as fp:
print("Downloading ... ({})".format(filename))
ftp.retrbinary('RETR {}'.format(filename), fp.write)
except ftplib.all_errors as e:
print('FTP error:', e)
# Check contents
"""
with ftplib.FTP('nrt.cmems-du.eu') as ftp:
try:
ftp.login(UN_CMEMS, PW_CMEMS)
# Change directory
ftp.cwd('Core/GLOBAL_ANALYSIS_FORECAST_PHY_001_024/global-analysis-forecast-phy-001-024/2021/07')
# List directory contents with additional information
ftp.retrlines('LIST')
# Get list of directory contents without additional information
files = []
ftp.retrlines('NLST', files.append)
print(files)
# Check file size
print("{} MB".format(ftp.size('mfwamglocep_2020120100_R20201202.nc')/1000000))
except ftplib.all_errors as e:
print('FTP error:', e)
"""
def calc_relative_direction(ship_dir, ww_dir):
"""
determine relative wind direction for ships going north, east, south or west
Parameters
----------
ship_dir : str, in ("N", "E", "S", "W")
direction the ship is going
ww_dir : array, float
array of relative wind directions [0 - 360]
"""
if ship_dir not in ("N", "E", "S", "W"):
raise Exception("Direction not accepted.")
ww_360 = ww_dir
ww_360[ww_360 < 0] = 360 + ww_dir[0]
if ship_dir in ("N"):
dir_4 = np.full((len(ww_dir), 1), 2)
dir_4[(ww_dir < 45) | (ww_dir > 315)] = 1
dir_4[(ww_dir > 135) & (ww_dir < 225)] = 3
if ship_dir in ("E"):
dir_4 = np.full((len(ww_dir), 1), 2)
dir_4[(ww_dir > 45) & (ww_dir < 135)] = 1
dir_4[(ww_dir > 225) & (ww_dir < 315)] = 3
if ship_dir in ("W"):
dir_4 = np.full((len(ww_dir), 1), 2)
dir_4[(ww_dir > 45) & (ww_dir < 135)] = 3
dir_4[(ww_dir > 225) & (ww_dir < 315)] = 1
if ship_dir in ("S"):
dir_4 = np.full((len(ww_dir), 1), 2)
dir_4[(ww_dir < 45) | (ww_dir > 315)] = 3
dir_4[(ww_dir > 135) & (ww_dir < 225)] = 1
return dir_4
def concatenate_cmems(cm_wave, cm_phy, ship_param, ship_dir):
"""
concatenate the variables from cmems wave and physics datasets
Parameters
----------
cm_wave : net4CDF dataset
netcdf file cmems wave
cm_phy : net4CDF dataset
netdcf file cmems physics
ship_param : int
ship variable that is used in model later (e.g. draft or length)
ship_dir str, in ("N", "E", "S", "W")
direction the ship is going
"""
array = (np.flipud(cm_wave["VHM0"][0, :, :]).data) # extract data from CMEMS
dim = array.shape
l = np.prod(dim) # get number of "pixel"
# extract parameters from cmems dataset and reshape to array with dimension of 1 x number of pixel
vhm = (np.flipud(cm_wave["VHM0"][0, :, :])).reshape(l, 1)
vtm = (np.flipud(cm_wave["VTPK"][0, :, :])).reshape(l, 1)
temp = (np.flipud(cm_phy["thetao"][0, 1, :, :])).reshape(l, 1)
sal = (np.flipud(cm_phy["so"][0, 1, :, :])).reshape(l, 1)
# create column for ship parameter
ship = np.full((l, 1), ship_param)
# calculate relative direction of wind depending on ship direction
dir = calc_relative_direction(ship_dir, (np.flipud(cm_wave["VMDR_WW"][0, :, :])).reshape(l, 1))
# concatenate parameters
a = np.concatenate((ship, vhm, vtm, temp, sal, dir), axis=1)
# create pd df from array
X_pred = pd.DataFrame(data=a, # values
index=list(range(0, l)), # 1st column as index
columns=["Draft", "VHM0", "VTPK", "thetao", "so", "dir_4"]) # 1st row as the column names
return X_pred
def prepare_grid(cm_wave, cm_phy, ship_param, ship_dir, model):
"""
prepare grid of SOGs
Parameters
----------
cm_wave : net4CDF dataset
netcdf file cmems wave
cm_phy : net4CDF dataset
netdcf file cmems physics
ship_param : int
ship variable that is used in model later (e.g. draft or length)
ship_dir str, in ("N", "E", "S", "W")
direction the ship is going
"""
X_pred = concatenate_cmems(cm_wave, cm_phy, ship_param, ship_dir)
# extract shape from cmems data
input = (np.flipud(cm_wave["VHM0"][0, :, :]))
dim = input.shape
# predict SOG
# model = load('cms_routing/models/DTR_model.joblib') # import model
SOG_pred = model.predict(X_pred)
SOG_pred = SOG_pred.reshape(dim) # reshape to 'coordinates'
SOG_pred[input < -30000] = -5 # -32767.0 # mask data with negative value
return SOG_pred
def calculateTimeGrid(SOG_E, SOG_N, SOG_S, SOG_W, AOI):
kmGridEW = np.load("lengthGridEW.npy")
kmGridEW = kmGridEW[AOI[2]:AOI[3], AOI[0]:AOI[1]]
kmGridNS = np.load("lengthGridNS.npy")
kmGridNS = kmGridNS[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridE = SOG_E
constE = 70 / np.power(timeGridE, 3)
timeGridE80 = np.cbrt(80 / constE)
timeGridE60 = np.cbrt(60 / constE)
timeGridE = timeGridE[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridE80 = timeGridE80[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridE60 = timeGridE60[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridE = np.where(timeGridE < 0, 10000, (kmGridEW * 1000) / (timeGridE * 30.87))
timeGridE80 = np.where(timeGridE80 < 0, 10000, (kmGridEW * 1000) / (timeGridE80 * 30.87))
timeGridE60 = np.where(timeGridE60 < 0, 10000, (kmGridEW * 1000) / (timeGridE60 * 30.87))
timeGridN = SOG_N
constN = 70 / np.power(timeGridN, 3)
timeGridN80 = np.cbrt(80 / constN)
timeGridN60 = np.cbrt(60 / constN)
timeGridN = timeGridN[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridN80 = timeGridN80[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridN60 = timeGridN60[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridN = np.where(timeGridN < 0, 10000, (kmGridNS * 1000) / (timeGridN * 30.87))
timeGridN80 = np.where(timeGridN80 < 0, 10000, (kmGridNS * 1000) / (timeGridN80 * 30.87))
timeGridN60 = np.where(timeGridN60 < 0, 10000, (kmGridNS * 1000) / (timeGridN60 * 30.87))
timeGridS = SOG_S
constS = 70 / np.power(timeGridS, 3)
timeGridS80 = np.cbrt(80 / constS)
timeGridS60 = np.cbrt(60 / constS)
timeGridS = timeGridS[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridS80 = timeGridS80[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridS60 = timeGridS60[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridS = np.where(timeGridS < 0, 10000, (kmGridNS * 1000) / (timeGridS * 30.87))
timeGridS80 = np.where(timeGridS80 < 0, 10000, (kmGridNS * 1000) / (timeGridS80 * 30.87))
timeGridS60 = np.where(timeGridS60 < 0, 10000, (kmGridNS * 1000) / (timeGridS60 * 30.87))
timeGridW = SOG_W
constW = 70 / np.power(timeGridW, 3)
timeGridW80 = np.cbrt(80 / constW)
timeGridW60 = np.cbrt(60 / constW)
timeGridW = timeGridW[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridW80 = timeGridW80[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridW60 = timeGridW60[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridW = np.where(timeGridW < 0, 10000, (kmGridEW * 1000) / (timeGridW * 30.87))
timeGridW80 = np.where(timeGridW80 < 0, 10000, (kmGridEW * 1000) / (timeGridW80 * 30.87))
timeGridW60 = np.where(timeGridW60 < 0, 10000, (kmGridEW * 1000) / (timeGridW60 * 30.87))
timeGrids = [[timeGridN80, timeGridS80, timeGridE80, timeGridW80], [timeGridN, timeGridS, timeGridE, timeGridW],
[timeGridN60, timeGridS60, timeGridE60, timeGridW60]]
return timeGrids
'''
# created masked array
import numpy.ma as ma
SOG_pred = np.ma.masked_where(np.flipud(np.ma.getmask(ds[parameter][0, :, :])), SOG_pred.reshape(dim))
SOG_pred.fill_value = -32767
# SOG_pred =np.flipud(SOG_pred)
'''
# # create actual grids for different ship directions
# ship_param = 12
# SOG_N = prepare_grid(model, ds, ds_p, ship_param, "N")
# SOG_E = prepare_grid(model, ds, ds_p, ship_param, "E")
# SOG_S = prepare_grid(model, ds, ds_p, ship_param, "S")
# SOG_W = prepare_grid(model, ds, ds_p, ship_param, "W")
# def cmems_paths(date):
def get_cmems(date_start, date_end, UN_CMEMS, PW_CMEMS):
date_s = datetime.strptime(date_start, "%d.%m.%Y %H:%M")
date_e = datetime.strptime(date_end, "%d.%m.%Y %H:%M")
date_m = date_s + (date_e - date_s) / 2
date = date_m.strftime("%Y%m%d")
today = datetime.now().strftime("%Y%m%d")
path_date = date[0:4] + "/" + date[4:6]
url = 'nrt.cmems-du.eu'
path_w = 'Core/GLOBAL_ANALYSIS_FORECAST_WAV_001_027/global-analysis-forecast-wav-001-027/' + path_date
path_p = 'Core/GLOBAL_ANALYSIS_FORECAST_PHY_001_024/global-analysis-forecast-phy-001-024/' + path_date
with ftplib.FTP(url) as ftp:
try:
ftp.login(UN_CMEMS, PW_CMEMS)
ftp.cwd(path_w)
files = ftp.nlst()
files = [i for i in files if date in i]
filename_w = files[0]
ftp.cwd('/')
ftp.cwd(path_p)
files = ftp.nlst()
files = [i for i in files if date in i]
filename_p = files[0]
except ftplib.all_errors as e:
print('FTP error:', e)
download(url, UN_CMEMS, PW_CMEMS, path_w, filename_w)
download(url, UN_CMEMS, PW_CMEMS, path_p, filename_p)
ds_w = nc.Dataset(filename_w)
ds_p = nc.Dataset(filename_p)
return (ds_w, ds_p)
""""
# set CMEMS credentials
UN_CMEMS = "jstenkamp"
PW_CMEMS = ""
# cmems wave data download
url = 'nrt.cmems-du.eu'
path = 'Core/GLOBAL_ANALYSIS_FORECAST_WAV_001_027/global-analysis-forecast-wav-001-027/2021/07'
filename = 'mfwamglocep_2021070200_R20210703.nc'
download(url, UN_CMEMS, PW_CMEMS, path, filename)
# cmems physics download
url = 'nrt.cmems-du.eu'
path = 'Core/GLOBAL_ANALYSIS_FORECAST_PHY_001_024/global-analysis-forecast-phy-001-024/2021/07'
filename_p = 'mercatorpsy4v3r1_gl12_mean_20210702_R20210703.nc'
download(url, UN_CMEMS, PW_CMEMS, path, filename_p)
# load files as netcdf dataset
ds = nc.Dataset(filename)
ds_p = nc.Dataset(filename_p)
# ds
""" |
def download(url, user, passwd, ftp_path, filename):
with ftplib.FTP(url) as ftp: | random_line_split |
prediction.py | # -*- coding: utf-8 -*-
"""prediction.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1lcvaJYf5k-Y0mmlCYAF8kWQlr2P-eTwr
"""
# Load model
from joblib import dump, load
# model_name = "DTR_model"
# model = load('../models/' + model_name + '.joblib')
# from datetime import datetime
# # model = load("DTR_model.joblib")
"""# Load CMEMS data
Try to use WMS or other more flexibel data retrieval
"""
import ftplib
import os
import numpy as np
import netCDF4 as nc
import pandas as pd
from datetime import datetime
def download(url, user, passwd, ftp_path, filename):
with ftplib.FTP(url) as ftp:
try:
ftp.login(user, passwd)
# Change directory
ftp.cwd(ftp_path)
# Download file (if there is not yet a local copy)
if os.path.isfile(filename):
print("There is already a local copy for this date ({})".format(filename))
else:
with open(filename, 'wb') as fp:
print("Downloading ... ({})".format(filename))
ftp.retrbinary('RETR {}'.format(filename), fp.write)
except ftplib.all_errors as e:
print('FTP error:', e)
# Check contents
"""
with ftplib.FTP('nrt.cmems-du.eu') as ftp:
try:
ftp.login(UN_CMEMS, PW_CMEMS)
# Change directory
ftp.cwd('Core/GLOBAL_ANALYSIS_FORECAST_PHY_001_024/global-analysis-forecast-phy-001-024/2021/07')
# List directory contents with additional information
ftp.retrlines('LIST')
# Get list of directory contents without additional information
files = []
ftp.retrlines('NLST', files.append)
print(files)
# Check file size
print("{} MB".format(ftp.size('mfwamglocep_2020120100_R20201202.nc')/1000000))
except ftplib.all_errors as e:
print('FTP error:', e)
"""
def calc_relative_direction(ship_dir, ww_dir):
"""
determine relative wind direction for ships going north, east, south or west
Parameters
----------
ship_dir : str, in ("N", "E", "S", "W")
direction the ship is going
ww_dir : array, float
array of relative wind directions [0 - 360]
"""
if ship_dir not in ("N", "E", "S", "W"):
raise Exception("Direction not accepted.")
ww_360 = ww_dir
ww_360[ww_360 < 0] = 360 + ww_dir[0]
if ship_dir in ("N"):
dir_4 = np.full((len(ww_dir), 1), 2)
dir_4[(ww_dir < 45) | (ww_dir > 315)] = 1
dir_4[(ww_dir > 135) & (ww_dir < 225)] = 3
if ship_dir in ("E"):
dir_4 = np.full((len(ww_dir), 1), 2)
dir_4[(ww_dir > 45) & (ww_dir < 135)] = 1
dir_4[(ww_dir > 225) & (ww_dir < 315)] = 3
if ship_dir in ("W"):
|
if ship_dir in ("S"):
dir_4 = np.full((len(ww_dir), 1), 2)
dir_4[(ww_dir < 45) | (ww_dir > 315)] = 3
dir_4[(ww_dir > 135) & (ww_dir < 225)] = 1
return dir_4
def concatenate_cmems(cm_wave, cm_phy, ship_param, ship_dir):
"""
concatenate the variables from cmems wave and physics datasets
Parameters
----------
cm_wave : net4CDF dataset
netcdf file cmems wave
cm_phy : net4CDF dataset
netdcf file cmems physics
ship_param : int
ship variable that is used in model later (e.g. draft or length)
ship_dir str, in ("N", "E", "S", "W")
direction the ship is going
"""
array = (np.flipud(cm_wave["VHM0"][0, :, :]).data) # extract data from CMEMS
dim = array.shape
l = np.prod(dim) # get number of "pixel"
# extract parameters from cmems dataset and reshape to array with dimension of 1 x number of pixel
vhm = (np.flipud(cm_wave["VHM0"][0, :, :])).reshape(l, 1)
vtm = (np.flipud(cm_wave["VTPK"][0, :, :])).reshape(l, 1)
temp = (np.flipud(cm_phy["thetao"][0, 1, :, :])).reshape(l, 1)
sal = (np.flipud(cm_phy["so"][0, 1, :, :])).reshape(l, 1)
# create column for ship parameter
ship = np.full((l, 1), ship_param)
# calculate relative direction of wind depending on ship direction
dir = calc_relative_direction(ship_dir, (np.flipud(cm_wave["VMDR_WW"][0, :, :])).reshape(l, 1))
# concatenate parameters
a = np.concatenate((ship, vhm, vtm, temp, sal, dir), axis=1)
# create pd df from array
X_pred = pd.DataFrame(data=a, # values
index=list(range(0, l)), # 1st column as index
columns=["Draft", "VHM0", "VTPK", "thetao", "so", "dir_4"]) # 1st row as the column names
return X_pred
def prepare_grid(cm_wave, cm_phy, ship_param, ship_dir, model):
"""
prepare grid of SOGs
Parameters
----------
cm_wave : net4CDF dataset
netcdf file cmems wave
cm_phy : net4CDF dataset
netdcf file cmems physics
ship_param : int
ship variable that is used in model later (e.g. draft or length)
ship_dir str, in ("N", "E", "S", "W")
direction the ship is going
"""
X_pred = concatenate_cmems(cm_wave, cm_phy, ship_param, ship_dir)
# extract shape from cmems data
input = (np.flipud(cm_wave["VHM0"][0, :, :]))
dim = input.shape
# predict SOG
# model = load('cms_routing/models/DTR_model.joblib') # import model
SOG_pred = model.predict(X_pred)
SOG_pred = SOG_pred.reshape(dim) # reshape to 'coordinates'
SOG_pred[input < -30000] = -5 # -32767.0 # mask data with negative value
return SOG_pred
def calculateTimeGrid(SOG_E, SOG_N, SOG_S, SOG_W, AOI):
kmGridEW = np.load("lengthGridEW.npy")
kmGridEW = kmGridEW[AOI[2]:AOI[3], AOI[0]:AOI[1]]
kmGridNS = np.load("lengthGridNS.npy")
kmGridNS = kmGridNS[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridE = SOG_E
constE = 70 / np.power(timeGridE, 3)
timeGridE80 = np.cbrt(80 / constE)
timeGridE60 = np.cbrt(60 / constE)
timeGridE = timeGridE[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridE80 = timeGridE80[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridE60 = timeGridE60[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridE = np.where(timeGridE < 0, 10000, (kmGridEW * 1000) / (timeGridE * 30.87))
timeGridE80 = np.where(timeGridE80 < 0, 10000, (kmGridEW * 1000) / (timeGridE80 * 30.87))
timeGridE60 = np.where(timeGridE60 < 0, 10000, (kmGridEW * 1000) / (timeGridE60 * 30.87))
timeGridN = SOG_N
constN = 70 / np.power(timeGridN, 3)
timeGridN80 = np.cbrt(80 / constN)
timeGridN60 = np.cbrt(60 / constN)
timeGridN = timeGridN[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridN80 = timeGridN80[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridN60 = timeGridN60[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridN = np.where(timeGridN < 0, 10000, (kmGridNS * 1000) / (timeGridN * 30.87))
timeGridN80 = np.where(timeGridN80 < 0, 10000, (kmGridNS * 1000) / (timeGridN80 * 30.87))
timeGridN60 = np.where(timeGridN60 < 0, 10000, (kmGridNS * 1000) / (timeGridN60 * 30.87))
timeGridS = SOG_S
constS = 70 / np.power(timeGridS, 3)
timeGridS80 = np.cbrt(80 / constS)
timeGridS60 = np.cbrt(60 / constS)
timeGridS = timeGridS[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridS80 = timeGridS80[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridS60 = timeGridS60[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridS = np.where(timeGridS < 0, 10000, (kmGridNS * 1000) / (timeGridS * 30.87))
timeGridS80 = np.where(timeGridS80 < 0, 10000, (kmGridNS * 1000) / (timeGridS80 * 30.87))
timeGridS60 = np.where(timeGridS60 < 0, 10000, (kmGridNS * 1000) / (timeGridS60 * 30.87))
timeGridW = SOG_W
constW = 70 / np.power(timeGridW, 3)
timeGridW80 = np.cbrt(80 / constW)
timeGridW60 = np.cbrt(60 / constW)
timeGridW = timeGridW[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridW80 = timeGridW80[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridW60 = timeGridW60[AOI[2]:AOI[3], AOI[0]:AOI[1]]
timeGridW = np.where(timeGridW < 0, 10000, (kmGridEW * 1000) / (timeGridW * 30.87))
timeGridW80 = np.where(timeGridW80 < 0, 10000, (kmGridEW * 1000) / (timeGridW80 * 30.87))
timeGridW60 = np.where(timeGridW60 < 0, 10000, (kmGridEW * 1000) / (timeGridW60 * 30.87))
timeGrids = [[timeGridN80, timeGridS80, timeGridE80, timeGridW80], [timeGridN, timeGridS, timeGridE, timeGridW],
[timeGridN60, timeGridS60, timeGridE60, timeGridW60]]
return timeGrids
'''
# created masked array
import numpy.ma as ma
SOG_pred = np.ma.masked_where(np.flipud(np.ma.getmask(ds[parameter][0, :, :])), SOG_pred.reshape(dim))
SOG_pred.fill_value = -32767
# SOG_pred =np.flipud(SOG_pred)
'''
# # create actual grids for different ship directions
# ship_param = 12
# SOG_N = prepare_grid(model, ds, ds_p, ship_param, "N")
# SOG_E = prepare_grid(model, ds, ds_p, ship_param, "E")
# SOG_S = prepare_grid(model, ds, ds_p, ship_param, "S")
# SOG_W = prepare_grid(model, ds, ds_p, ship_param, "W")
# def cmems_paths(date):
def get_cmems(date_start, date_end, UN_CMEMS, PW_CMEMS):
date_s = datetime.strptime(date_start, "%d.%m.%Y %H:%M")
date_e = datetime.strptime(date_end, "%d.%m.%Y %H:%M")
date_m = date_s + (date_e - date_s) / 2
date = date_m.strftime("%Y%m%d")
today = datetime.now().strftime("%Y%m%d")
path_date = date[0:4] + "/" + date[4:6]
url = 'nrt.cmems-du.eu'
path_w = 'Core/GLOBAL_ANALYSIS_FORECAST_WAV_001_027/global-analysis-forecast-wav-001-027/' + path_date
path_p = 'Core/GLOBAL_ANALYSIS_FORECAST_PHY_001_024/global-analysis-forecast-phy-001-024/' + path_date
with ftplib.FTP(url) as ftp:
try:
ftp.login(UN_CMEMS, PW_CMEMS)
ftp.cwd(path_w)
files = ftp.nlst()
files = [i for i in files if date in i]
filename_w = files[0]
ftp.cwd('/')
ftp.cwd(path_p)
files = ftp.nlst()
files = [i for i in files if date in i]
filename_p = files[0]
except ftplib.all_errors as e:
print('FTP error:', e)
download(url, UN_CMEMS, PW_CMEMS, path_w, filename_w)
download(url, UN_CMEMS, PW_CMEMS, path_p, filename_p)
ds_w = nc.Dataset(filename_w)
ds_p = nc.Dataset(filename_p)
return (ds_w, ds_p)
""""
# set CMEMS credentials
UN_CMEMS = "jstenkamp"
PW_CMEMS = ""
# cmems wave data download
url = 'nrt.cmems-du.eu'
path = 'Core/GLOBAL_ANALYSIS_FORECAST_WAV_001_027/global-analysis-forecast-wav-001-027/2021/07'
filename = 'mfwamglocep_2021070200_R20210703.nc'
download(url, UN_CMEMS, PW_CMEMS, path, filename)
# cmems physics download
url = 'nrt.cmems-du.eu'
path = 'Core/GLOBAL_ANALYSIS_FORECAST_PHY_001_024/global-analysis-forecast-phy-001-024/2021/07'
filename_p = 'mercatorpsy4v3r1_gl12_mean_20210702_R20210703.nc'
download(url, UN_CMEMS, PW_CMEMS, path, filename_p)
# load files as netcdf dataset
ds = nc.Dataset(filename)
ds_p = nc.Dataset(filename_p)
# ds
"""
| dir_4 = np.full((len(ww_dir), 1), 2)
dir_4[(ww_dir > 45) & (ww_dir < 135)] = 3
dir_4[(ww_dir > 225) & (ww_dir < 315)] = 1 | conditional_block |
iconnect.js | "use strict";
var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; };
/*
* 用于iframe内外通信。
* iconnect.trigger('insertIntoPublishTop', 'aaa', function(){'callback'});
*/
(function (window, document, undefined) {
//STK的自定义事件
var cEvt = function () {
//===自定义事件依赖关系===
var isArray = function isArray(o) {
return Object.prototype.toString.call(o) === '[object Array]';
};
var getType = function getType(oObject) {
var _t;
return ((_t = typeof oObject === "undefined" ? "undefined" : _typeof(oObject)) == "object" ? oObject == null && "null" || Object.prototype.toString.call(oObject).slice(8, -1) : _t).toLowerCase();
};
//====================
var custEventAttribute = "__custEventKey__",
custEventKey = 1,
custEventCache = {},
/**
* 从缓存中查找相关对象
* 当已经定义时
* 有type时返回缓存中的列表 没有时返回缓存中的对象
* 没有定义时返回false
* @param {Object|number} obj 对象引用或获取的key
* @param {String} type 自定义事件名称
*/
findCache = function findCache(obj, type) {
var _key = typeof obj == "number" ? obj : obj[custEventAttribute];
return _key && custEventCache[_key] && {
obj: typeof type == "string" ? custEventCache[_key][type] : custEventCache[_key],
key: _key
};
};
////
//事件迁移相关
var hookCache = {}; //arr key -> {origtype-> {fn, desttype}}
//
var _add = function _add(obj, type, fn, data, once) {
if (obj && typeof type == "string" && fn) {
var _cache = findCache(obj, type);
if (!_cache || !_cache.obj) {
throw "custEvent (" + type + ") is undefined !";
}
_cache.obj.push({ fn: fn, data: data, once: once });
return _cache.key;
}
};
var _fire = function _fire(obj, type, args, defaultAction) {
//事件默认行为阻止
var preventDefaultFlag = true;
var preventDefault = function preventDefault() {
preventDefaultFlag = false;
};
if (obj && typeof type == "string") {
var _cache = findCache(obj, type),
_obj;
if (_cache && (_obj = _cache.obj)) {
args = typeof args != 'undefined' && [].concat(args) || [];
for (var i = _obj.length - 1; i > -1 && _obj[i]; i--) {
var fn = _obj[i].fn;
var isOnce = _obj[i].once;
if (fn && fn.apply) {
try {
fn.apply(obj, [{ obj: obj, type: type, data: _obj[i].data, preventDefault: preventDefault }].concat(args));
if (isOnce) {
_obj.splice(i, 1);
}
} catch (e) {
window.console && console.log("[error][custEvent]" + e.message, e, e.stack);
}
}
}
if (preventDefaultFlag && getType(defaultAction) === 'function') {
defaultAction();
}
return _cache.key;
}
}
};
var that = {
/**
* 对象自定义事件的定义 未定义的事件不得绑定
* @method define
* @static
* @param {Object|number} obj 对象引用或获取的下标(key); 必选
* @param {String|Array} type 自定义事件名称; 必选
* @return {number} key 下标
*/
define: function define(obj, type) {
if (obj && type) {
var _key = typeof obj == "number" ? obj : obj[custEventAttribute] || (obj[custEventAttribute] = custEventKey++),
_cache = custEventCache[_key] || (custEventCache[_key] = {});
type = [].concat(type);
for (var i = 0; i < type.length; i++) {
_cache[type[i]] || (_cache[type[i]] = []);
}
return _key;
}
},
/**
* 对象自定义事件的取消定义
* 当对象的所有事件定义都被取消时 删除对对象的引用
* @method define
* @static
* @param {Object|number} obj 对象引用或获取的(key); 必选
* @param {String} type 自定义事件名称; 可选 不填可取消所有事件的定义
*/
undefine: function undefine(obj, type) {
if (obj) {
var _key = typeof obj == "number" ? obj : obj[custEventAttribute];
if (_key && custEventCache[_key]) {
if (type) {
type = [].concat(type);
for (var i = 0; i < type.length; i++) {
if (type[i] in custEventCache[_key]) delete custEventCache[_key][type[i]];
}
} else {
delete custEventCache[_key];
}
}
}
},
/**
* 事件添加或绑定
* @method add
* @static
* @param {Object|number} obj 对象引用或获取的(key); 必选
* @param {String} type 自定义事件名称; 必选
* @param {Function} fn 事件处理方法; 必选
* @param {Any} data 扩展数据任意类型; 可选
* @return {number} key 下标
*/
add: function add(obj, type, fn, data) {
return _add(obj, type, fn, data, false);
},
/**
* 单次事件绑定
* @method once
* @static
* @param {Object|number} obj 对象引用或获取的(key); 必选
* @param {String} type 自定义事件名称; 必选
* @param {Function} fn 事件处理方法; 必选
* @param {Any} data 扩展数据任意类型; 可选
* @return {number} key 下标
*/
once: function once(obj, type, fn, data) {
return _add(obj, type, fn, data, true);
},
/**
* 事件删除或解绑
* @method remove
* @static
* @param {Object|number} obj 对象引用或获取的(key); 必选
* @param {String} type 自定义事件名称; 可选; 为空时删除对象下的所有事件绑定
* @param {Function} fn 事件处理方法; 可选; 为空且type不为空时 删除对象下type事件相关的所有处理方法
* @return {number} key 下标
*/
remove: function remove(obj, type, fn) {
if (obj) {
var _cache = findCache(obj, type),
_obj,
index;
if (_cache && (_obj = _cache.obj)) {
if (isArray(_obj)) {
if (fn) {
//for (var i = 0; i < _obj.length && _obj[i].fn !== fn; i++);
var i = 0;
while (_obj[i]) {
if (_obj[i].fn === fn) {
break;
}
i++;
}
_obj.splice(i, 1);
} else {
_obj.splice(0, _obj.length);
}
} else {
for (var i in _obj) {
_obj[i] = [];
}
}
return _cache.key;
}
}
},
/**
* 事件触发
* @method fire
* @static
* @param {Object|number} obj 对象引用或获取的(key); 必选
* @param {String} type 自定义事件名称; 必选
* @param {Any|Array} args 参数数组或单个的其他数据; 可选
* @param {Function} defaultAction 触发事件列表结束后的默认Function; 可选 注:当args不需要时请用undefined/null填充,以保证该参数为第四个参数
* @return {number} key 下标
*/
fire: function fire(obj, type, args, defaultAction) {
return _fire(obj, type, args, defaultAction);
},
/**
* 事件由源对象迁移到目标对象
* @method hook
* @static
* @param {Object} orig 源对象
* @param {Object} dest 目标对象
* @param {Object} typeMap 事件名称对照表
* {
* 源事件名->目标事件名
* }
*/
hook: function hook(orig, dest, typeMap) {
if (!orig || !dest || !typeMap) {
return;
}
var destTypes = [],
origKey = orig[custEventAttribute],
origKeyCache = origKey && custEventCache[origKey],
origTypeCache,
destKey = dest[custEventAttribute] || (dest[custEventAttribute] = custEventKey++),
keyHookCache;
if (origKeyCache) {
keyHookCache = hookCache[origKey + '_' + destKey] || (hookCache[origKey + '_' + destKey] = {});
var fn = function fn(event) {
var preventDefaultFlag = true;
_fire(dest, keyHookCache[event.type].type, Array.prototype.slice.apply(arguments, [1, arguments.length]), function () {
preventDefaultFlag = false;
});
preventDefaultFlag && event.preventDefault();
};
for (var origType in typeMap) {
var destType = typeMap[origType];
if (!keyHookCache[origType]) {
if (origTypeCache = origKeyCache[origType]) {
origTypeCache.push({ fn: fn, data: undefined });
keyHookCache[origType] = {
fn: fn,
type: destType
};
destTypes.push(destType);
}
}
}
that.define(dest, destTypes);
}
},
/**
* 取消事件迁移
* @method unhook
* @static
* @param {Object} orig 源对象
* @param {Object} dest 目标对象
* @param {Object} typeMap 事件名称对照表
* {
* 源事件名->目标事件名
* }
*/
unhook: function unhook(orig, dest, typeMap) {
if (!orig || !dest || !typeMap) {
return;
}
var origKey = orig[custEventAttribute],
destKey = dest[custEventAttribute],
keyHookCache = hookCache[origKey + '_' + destKey];
if (keyHookCache) {
for (var origType in typeMap) {
var destType = typeMap[origType];
if (keyHookCache[origType]) {
that.remove(orig, origType, keyHookCache[origType].fn);
}
}
}
},
/**
* 销毁
* @method destroy
* @static
*/
destroy: function destroy() {
custEventCache = {};
custEventKey = 1;
hookCache = {};
}
};
return that;
}();
var utils = {
count: 0,
getUniqueKey: function getUniqueKey() {
return +new Date() + (Math.random() + '').replace('.', '') + utils.count++;
},
json2str: function () {
function f(n) {
// Format integers to have at least two digits.
return n < 10 ? '0' + n : n;
}
if (typeof Date.prototype.toJSON !== 'function') {
Date.prototype.toJSON = function (key) {
return isFinite(this.valueOf()) ? this.getUTCFullYear() + '-' + f(this.getUTCMonth() + 1) + '-' + f(this.getUTCDate()) + 'T' + f(this.getUTCHours()) + ':' + f(this.getUTCMinutes()) + ':' + f(this.getUTCSeconds()) + 'Z' : null;
};
String.prototype.toJSON = Number.prototype.toJSON = Boolean.prototype.toJSON = function (key) {
return this.valueOf();
};
}
var cx = /[\u0000\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
escapable = /[\\\"\x00-\x1f\x7f-\x9f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
gap,
indent,
meta = { // table of character substitutions
'\b': '\\b',
'\t': '\\t',
'\n': '\\n',
'\f': '\\f',
'\r': '\\r',
'"': '\\"',
| '\\': '\\\\'
},
rep;
function quote(string) {
// If the string contains no control characters, no quote characters, and no
// backslash characters, then we can safely slap some quotes around it.
// Otherwise we must also replace the offending characters with safe escape
// sequences.
escapable.lastIndex = 0;
return escapable.test(string) ? '"' + string.replace(escapable, function (a) {
var c = meta[a];
return typeof c === 'string' ? c : "\\u" + ('0000' + a.charCodeAt(0).toString(16)).slice(-4);
}) + '"' : '"' + string + '"';
}
function str(key, holder) {
// Produce a string from holder[key].
var i,
// The loop counter.
k,
// The member key.
v,
// The member value.
length,
mind = gap,
partial,
value = holder[key];
// If the value has a toJSON method, call it to obtain a replacement value.
if (value && (typeof value === "undefined" ? "undefined" : _typeof(value)) === 'object' && typeof value.toJSON === 'function') {
value = value.toJSON(key);
}
// If we were called with a replacer function, then call the replacer to
// obtain a replacement value.
if (typeof rep === 'function') {
value = rep.call(holder, key, value);
}
// What happens next depends on the value's type.
switch (typeof value === "undefined" ? "undefined" : _typeof(value)) {
case 'string':
return quote(value);
case 'number':
// JSON numbers must be finite. Encode non-finite numbers as null.
return isFinite(value) ? String(value) : 'null';
case 'boolean':
case 'null':
// If the value is a boolean or null, convert it to a string. Note:
// typeof null does not produce 'null'. The case is included here in
// the remote chance that this gets fixed someday.
return String(value);
// If the type is 'object', we might be dealing with an object or an array or
// null.
case 'object':
// Due to a specification blunder in ECMAScript, typeof null is 'object',
// so watch out for that case.
if (!value) {
return 'null';
}
// Make an array to hold the partial results of stringifying this object value.
gap += indent;
partial = [];
// Is the value an array?
if (Object.prototype.toString.apply(value) === '[object Array]') {
// The value is an array. Stringify every element. Use null as a placeholder
// for non-JSON values.
length = value.length;
for (i = 0; i < length; i += 1) {
partial[i] = str(i, value) || 'null';
}
// Join all of the elements together, separated with commas, and wrap them in
// brackets.
v = partial.length === 0 ? '[]' : gap ? '[\n' + gap + partial.join(',\n' + gap) + '\n' + mind + ']' : '[' + partial.join(',') + ']';
gap = mind;
return v;
}
// If the replacer is an array, use it to select the members to be stringified.
if (rep && (typeof rep === "undefined" ? "undefined" : _typeof(rep)) === 'object') {
length = rep.length;
for (i = 0; i < length; i += 1) {
k = rep[i];
if (typeof k === 'string') {
v = str(k, value);
if (v) {
partial.push(quote(k) + (gap ? ': ' : ':') + v);
}
}
}
} else {
// Otherwise, iterate through all of the keys in the object.
for (k in value) {
if (Object.hasOwnProperty.call(value, k)) {
v = str(k, value);
if (v) {
partial.push(quote(k) + (gap ? ': ' : ':') + v);
}
}
}
}
// Join all of the member texts together, separated with commas,
// and wrap them in braces.
v = partial.length === 0 ? '{}' : gap ? '{\n' + gap + partial.join(',\n' + gap) + '\n' + mind + '}' : '{' + partial.join(',') + '}';
gap = mind;
return v;
}
}
return function (value, replacer, space) {
if (window.JSON && window.JSON.stringify) {
return window.JSON.stringify(value, replacer, space);
}
// The stringify method takes a value and an optional replacer, and an optional
// space parameter, and returns a JSON text. The replacer can be a function
// that can replace values, or an array of strings that will select the keys.
// A default replacer method can be provided. Use of the space parameter can
// produce text that is more easily readable.
var i;
gap = '';
indent = '';
// If the space parameter is a number, make an indent string containing that
// many spaces.
if (typeof space === 'number') {
for (i = 0; i < space; i += 1) {
indent += ' ';
}
// If the space parameter is a string, it will be used as the indent string.
} else if (typeof space === 'string') {
indent = space;
}
// If there is a replacer, it must be a function or an array.
// Otherwise, throw an error.
rep = replacer;
if (replacer && typeof replacer !== 'function' && ((typeof replacer === "undefined" ? "undefined" : _typeof(replacer)) !== 'object' || typeof replacer.length !== 'number')) {
throw new Error('JSON.stringify');
}
// Make a fake root object containing our value under the key of ''.
// Return the result of stringifying the value.
return str('', {
'': value
});
};
}(),
str2json: function str2json(str) {
try {
return eval('(' + str + ')');
} catch (e) {
return null;
}
},
getUrlParam: function getUrlParam(name) {
var reg = new RegExp("(^|&)" + name + "=([^&]*)(&|$)"); //构造一个含有目标参数的正则表达式对象.
var r = window.location.search.substr(1).match(reg); //匹配目标参数
if (r != null) {
return unescape(r[2]);
}
return null;
}
};
var iframeConnect = function iframeConnect() {
var cidList = {};
var event = {};
var iid = window.name;
var post = function post(cid, cmd, param) {
//iid iframe的id
//cid 这个任务的id
//cmd clinet段调用回调函数的方法名
//param 这个任务的参数 数组
var msg = utils.json2str(param === undefined ? { iid: iid, cid: cid, cmd: cmd } : { iid: iid, cid: cid, cmd: cmd, param: param });
if (window.parent.postMessage) {
window.parent.postMessage(msg, '*');
} else {
window.navigator['STK_IFRAME_CONNECT_OUT'](msg);
}
};
var listener = function listener(evt) {
try {
var data = utils.str2json(evt.data);
if (data.cid && data.cid == '_EVENT_') {
cEvt.define(event, data.call);
cEvt.fire(event, data.call, data.rs);
} else if (data.cid && data.cid in cidList) {
try {
var call = data.call == 'callback' ? cidList[data.cid] : cidList[data.cid][data.call];
call(data.rs);
delete cidList[data.cid];
} catch (e) {}
}
} catch (e) {}
};
if (window.postMessage) {
if (window.addEventListener) {
window.addEventListener('message', listener, false);
} else {
window.attachEvent('onmessage', listener);
}
} else {
window.navigator['STK_IFRAME_CONNECT_' + iid] = function (data) {
listener({ data: data });
};
}
return {
trigger: function trigger(cmd, param, callback) {
if ((typeof param === "undefined" ? "undefined" : _typeof(param)).toUpperCase() === 'FUNCTION') {
callback = param;
param = undefined;
}
var cid = utils.getUniqueKey();
callback && (cidList[cid] = callback);
post(cid, cmd, param);
},
on: function on(cmd, fn) {
cEvt.define(event, cmd);
cEvt.add(event, cmd, fn);
},
off: function off(cmd, fn) {
cEvt.remove(event, cmd, fn);
}
};
};
//兼容原来的api
var iconnect = window.iconnect = iframeConnect();
window.iframeConnect = function () {
return iconnect;
};
iconnect.setHeight = function (num, callback) {
iconnect.trigger('setHeight', num, callback);
};
iconnect.getLayoutInfo = function (callback) {
iconnect.trigger('getLayoutInfo', callback);
};
iconnect.oauth = function (options) {
if (options.appkey == null || options.callback == null) {
return;
}
var doc = document,
iframe_id = "page_app_oauth_iframe";
var url = "https://api.weibo.com/2/oauth2/authorize?client_id=" + options.appkey + "&response_type=code&redirect_uri=" + encodeURIComponent(options.callback) + "&quick_auth=true";
if (doc.getElementById(iframe_id) == null) {
var iframe = doc.createElement('iframe');
iframe.id = iframe_id;
// iframe.setAttribute("style", "position:absolute;left:-1000px;top:-1000px;display:none");
iframe.style.cssText = "position:absolute;left:-1000px;top:-1000px;display:none";
iframe.src = url;
document.body.appendChild(iframe);
} else {
doc.getElementById(iframe_id).src = url;
}
};
})(window, document);
//# sourceMappingURL=iconnect.js.map | identifier_name |
|
iconnect.js | "use strict";
var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; };
/*
* 用于iframe内外通信。
* iconnect.trigger('insertIntoPublishTop', 'aaa', function(){'callback'});
*/
(function (window, document, undefined) {
//STK的自定义事件
var cEvt = function () {
//===自定义事件依赖关系===
var isArray = function isArray(o) {
return Object.prototype.toString.call(o) === '[object Array]';
};
var getType = function getType(oObject) {
var _t;
return ((_t = typeof oObject === "undefined" ? "undefined" : _typeof(oObject)) == "object" ? oObject == null && "null" || Object.prototype.toString.call(oObject).slice(8, -1) : _t).toLowerCase();
};
//====================
var custEventAttribute = "__custEventKey__",
custEventKey = 1,
custEventCache = {},
/**
* 从缓存中查找相关对象
* 当已经定义时
* 有type时返回缓存中的列表 没有时返回缓存中的对象
* 没有定义时返回false
* @param {Object|number} obj 对象引用或获取的key
* @param {String} type 自定义事件名称
*/
findCache = function findCache(obj, type) {
var _key = typeof obj == "number" ? obj : obj[custEventAttribute];
return _key && custEventCache[_key] && {
obj: typeof type == "string" ? custEventCache[_key][type] : custEventCache[_key],
key: _key
};
};
////
//事件迁移相关
var hookCache = {}; //arr key -> {origtype-> {fn, desttype}}
//
var _add = function _add(obj, type, fn, data, once) {
if (obj && typeof type == "string" && fn) {
var _cache = findCache(obj, type);
if (!_cache || !_cache.obj) {
throw "custEvent (" + type + ") is undefined !";
}
_cache.obj.push({ fn: fn, data: data, once: once });
return _cache.key;
}
};
var _fire = function _fire(obj, type, args, defaultAction) {
//事件默认行为阻止
var preventDefaultFlag = true;
var preventDefault = function preventDefault() {
preventDefaultFlag = false;
};
if (obj && typeof type == "string") {
var _cache = findCache(obj, type),
_obj;
if (_cache && (_obj = _cache.obj)) {
args = typeof args != 'undefined' && [].concat(args) || [];
for (var i = _obj.length - 1; i > -1 && _obj[i]; i--) {
var fn = _obj[i].fn;
var isOnce = _obj[i].once;
if (fn && fn.apply) {
try {
fn.apply(obj, [{ obj: obj, type: type, data: _obj[i].data, preventDefault: preventDefault }].concat(args));
if (isOnce) {
_obj.splice(i, 1);
}
} catch (e) {
window.console && console.log("[error][custEvent]" + e.message, e, e.stack);
}
}
}
if (preventDefaultFlag && getType(defaultAction) === 'function') {
defaultAction(); | }
};
var that = {
/**
* 对象自定义事件的定义 未定义的事件不得绑定
* @method define
* @static
* @param {Object|number} obj 对象引用或获取的下标(key); 必选
* @param {String|Array} type 自定义事件名称; 必选
* @return {number} key 下标
*/
define: function define(obj, type) {
if (obj && type) {
var _key = typeof obj == "number" ? obj : obj[custEventAttribute] || (obj[custEventAttribute] = custEventKey++),
_cache = custEventCache[_key] || (custEventCache[_key] = {});
type = [].concat(type);
for (var i = 0; i < type.length; i++) {
_cache[type[i]] || (_cache[type[i]] = []);
}
return _key;
}
},
/**
* 对象自定义事件的取消定义
* 当对象的所有事件定义都被取消时 删除对对象的引用
* @method define
* @static
* @param {Object|number} obj 对象引用或获取的(key); 必选
* @param {String} type 自定义事件名称; 可选 不填可取消所有事件的定义
*/
undefine: function undefine(obj, type) {
if (obj) {
var _key = typeof obj == "number" ? obj : obj[custEventAttribute];
if (_key && custEventCache[_key]) {
if (type) {
type = [].concat(type);
for (var i = 0; i < type.length; i++) {
if (type[i] in custEventCache[_key]) delete custEventCache[_key][type[i]];
}
} else {
delete custEventCache[_key];
}
}
}
},
/**
* 事件添加或绑定
* @method add
* @static
* @param {Object|number} obj 对象引用或获取的(key); 必选
* @param {String} type 自定义事件名称; 必选
* @param {Function} fn 事件处理方法; 必选
* @param {Any} data 扩展数据任意类型; 可选
* @return {number} key 下标
*/
add: function add(obj, type, fn, data) {
return _add(obj, type, fn, data, false);
},
/**
* 单次事件绑定
* @method once
* @static
* @param {Object|number} obj 对象引用或获取的(key); 必选
* @param {String} type 自定义事件名称; 必选
* @param {Function} fn 事件处理方法; 必选
* @param {Any} data 扩展数据任意类型; 可选
* @return {number} key 下标
*/
once: function once(obj, type, fn, data) {
return _add(obj, type, fn, data, true);
},
/**
* 事件删除或解绑
* @method remove
* @static
* @param {Object|number} obj 对象引用或获取的(key); 必选
* @param {String} type 自定义事件名称; 可选; 为空时删除对象下的所有事件绑定
* @param {Function} fn 事件处理方法; 可选; 为空且type不为空时 删除对象下type事件相关的所有处理方法
* @return {number} key 下标
*/
remove: function remove(obj, type, fn) {
if (obj) {
var _cache = findCache(obj, type),
_obj,
index;
if (_cache && (_obj = _cache.obj)) {
if (isArray(_obj)) {
if (fn) {
//for (var i = 0; i < _obj.length && _obj[i].fn !== fn; i++);
var i = 0;
while (_obj[i]) {
if (_obj[i].fn === fn) {
break;
}
i++;
}
_obj.splice(i, 1);
} else {
_obj.splice(0, _obj.length);
}
} else {
for (var i in _obj) {
_obj[i] = [];
}
}
return _cache.key;
}
}
},
/**
* 事件触发
* @method fire
* @static
* @param {Object|number} obj 对象引用或获取的(key); 必选
* @param {String} type 自定义事件名称; 必选
* @param {Any|Array} args 参数数组或单个的其他数据; 可选
* @param {Function} defaultAction 触发事件列表结束后的默认Function; 可选 注:当args不需要时请用undefined/null填充,以保证该参数为第四个参数
* @return {number} key 下标
*/
fire: function fire(obj, type, args, defaultAction) {
return _fire(obj, type, args, defaultAction);
},
/**
* 事件由源对象迁移到目标对象
* @method hook
* @static
* @param {Object} orig 源对象
* @param {Object} dest 目标对象
* @param {Object} typeMap 事件名称对照表
* {
* 源事件名->目标事件名
* }
*/
hook: function hook(orig, dest, typeMap) {
if (!orig || !dest || !typeMap) {
return;
}
var destTypes = [],
origKey = orig[custEventAttribute],
origKeyCache = origKey && custEventCache[origKey],
origTypeCache,
destKey = dest[custEventAttribute] || (dest[custEventAttribute] = custEventKey++),
keyHookCache;
if (origKeyCache) {
keyHookCache = hookCache[origKey + '_' + destKey] || (hookCache[origKey + '_' + destKey] = {});
var fn = function fn(event) {
var preventDefaultFlag = true;
_fire(dest, keyHookCache[event.type].type, Array.prototype.slice.apply(arguments, [1, arguments.length]), function () {
preventDefaultFlag = false;
});
preventDefaultFlag && event.preventDefault();
};
for (var origType in typeMap) {
var destType = typeMap[origType];
if (!keyHookCache[origType]) {
if (origTypeCache = origKeyCache[origType]) {
origTypeCache.push({ fn: fn, data: undefined });
keyHookCache[origType] = {
fn: fn,
type: destType
};
destTypes.push(destType);
}
}
}
that.define(dest, destTypes);
}
},
/**
* 取消事件迁移
* @method unhook
* @static
* @param {Object} orig 源对象
* @param {Object} dest 目标对象
* @param {Object} typeMap 事件名称对照表
* {
* 源事件名->目标事件名
* }
*/
unhook: function unhook(orig, dest, typeMap) {
if (!orig || !dest || !typeMap) {
return;
}
var origKey = orig[custEventAttribute],
destKey = dest[custEventAttribute],
keyHookCache = hookCache[origKey + '_' + destKey];
if (keyHookCache) {
for (var origType in typeMap) {
var destType = typeMap[origType];
if (keyHookCache[origType]) {
that.remove(orig, origType, keyHookCache[origType].fn);
}
}
}
},
/**
* 销毁
* @method destroy
* @static
*/
destroy: function destroy() {
custEventCache = {};
custEventKey = 1;
hookCache = {};
}
};
return that;
}();
var utils = {
count: 0,
getUniqueKey: function getUniqueKey() {
return +new Date() + (Math.random() + '').replace('.', '') + utils.count++;
},
json2str: function () {
function f(n) {
// Format integers to have at least two digits.
return n < 10 ? '0' + n : n;
}
if (typeof Date.prototype.toJSON !== 'function') {
Date.prototype.toJSON = function (key) {
return isFinite(this.valueOf()) ? this.getUTCFullYear() + '-' + f(this.getUTCMonth() + 1) + '-' + f(this.getUTCDate()) + 'T' + f(this.getUTCHours()) + ':' + f(this.getUTCMinutes()) + ':' + f(this.getUTCSeconds()) + 'Z' : null;
};
String.prototype.toJSON = Number.prototype.toJSON = Boolean.prototype.toJSON = function (key) {
return this.valueOf();
};
}
var cx = /[\u0000\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
escapable = /[\\\"\x00-\x1f\x7f-\x9f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
gap,
indent,
meta = { // table of character substitutions
'\b': '\\b',
'\t': '\\t',
'\n': '\\n',
'\f': '\\f',
'\r': '\\r',
'"': '\\"',
'\\': '\\\\'
},
rep;
function quote(string) {
// If the string contains no control characters, no quote characters, and no
// backslash characters, then we can safely slap some quotes around it.
// Otherwise we must also replace the offending characters with safe escape
// sequences.
escapable.lastIndex = 0;
return escapable.test(string) ? '"' + string.replace(escapable, function (a) {
var c = meta[a];
return typeof c === 'string' ? c : "\\u" + ('0000' + a.charCodeAt(0).toString(16)).slice(-4);
}) + '"' : '"' + string + '"';
}
function str(key, holder) {
// Produce a string from holder[key].
var i,
// The loop counter.
k,
// The member key.
v,
// The member value.
length,
mind = gap,
partial,
value = holder[key];
// If the value has a toJSON method, call it to obtain a replacement value.
if (value && (typeof value === "undefined" ? "undefined" : _typeof(value)) === 'object' && typeof value.toJSON === 'function') {
value = value.toJSON(key);
}
// If we were called with a replacer function, then call the replacer to
// obtain a replacement value.
if (typeof rep === 'function') {
value = rep.call(holder, key, value);
}
// What happens next depends on the value's type.
switch (typeof value === "undefined" ? "undefined" : _typeof(value)) {
case 'string':
return quote(value);
case 'number':
// JSON numbers must be finite. Encode non-finite numbers as null.
return isFinite(value) ? String(value) : 'null';
case 'boolean':
case 'null':
// If the value is a boolean or null, convert it to a string. Note:
// typeof null does not produce 'null'. The case is included here in
// the remote chance that this gets fixed someday.
return String(value);
// If the type is 'object', we might be dealing with an object or an array or
// null.
case 'object':
// Due to a specification blunder in ECMAScript, typeof null is 'object',
// so watch out for that case.
if (!value) {
return 'null';
}
// Make an array to hold the partial results of stringifying this object value.
gap += indent;
partial = [];
// Is the value an array?
if (Object.prototype.toString.apply(value) === '[object Array]') {
// The value is an array. Stringify every element. Use null as a placeholder
// for non-JSON values.
length = value.length;
for (i = 0; i < length; i += 1) {
partial[i] = str(i, value) || 'null';
}
// Join all of the elements together, separated with commas, and wrap them in
// brackets.
v = partial.length === 0 ? '[]' : gap ? '[\n' + gap + partial.join(',\n' + gap) + '\n' + mind + ']' : '[' + partial.join(',') + ']';
gap = mind;
return v;
}
// If the replacer is an array, use it to select the members to be stringified.
if (rep && (typeof rep === "undefined" ? "undefined" : _typeof(rep)) === 'object') {
length = rep.length;
for (i = 0; i < length; i += 1) {
k = rep[i];
if (typeof k === 'string') {
v = str(k, value);
if (v) {
partial.push(quote(k) + (gap ? ': ' : ':') + v);
}
}
}
} else {
// Otherwise, iterate through all of the keys in the object.
for (k in value) {
if (Object.hasOwnProperty.call(value, k)) {
v = str(k, value);
if (v) {
partial.push(quote(k) + (gap ? ': ' : ':') + v);
}
}
}
}
// Join all of the member texts together, separated with commas,
// and wrap them in braces.
v = partial.length === 0 ? '{}' : gap ? '{\n' + gap + partial.join(',\n' + gap) + '\n' + mind + '}' : '{' + partial.join(',') + '}';
gap = mind;
return v;
}
}
return function (value, replacer, space) {
if (window.JSON && window.JSON.stringify) {
return window.JSON.stringify(value, replacer, space);
}
// The stringify method takes a value and an optional replacer, and an optional
// space parameter, and returns a JSON text. The replacer can be a function
// that can replace values, or an array of strings that will select the keys.
// A default replacer method can be provided. Use of the space parameter can
// produce text that is more easily readable.
var i;
gap = '';
indent = '';
// If the space parameter is a number, make an indent string containing that
// many spaces.
if (typeof space === 'number') {
for (i = 0; i < space; i += 1) {
indent += ' ';
}
// If the space parameter is a string, it will be used as the indent string.
} else if (typeof space === 'string') {
indent = space;
}
// If there is a replacer, it must be a function or an array.
// Otherwise, throw an error.
rep = replacer;
if (replacer && typeof replacer !== 'function' && ((typeof replacer === "undefined" ? "undefined" : _typeof(replacer)) !== 'object' || typeof replacer.length !== 'number')) {
throw new Error('JSON.stringify');
}
// Make a fake root object containing our value under the key of ''.
// Return the result of stringifying the value.
return str('', {
'': value
});
};
}(),
str2json: function str2json(str) {
try {
return eval('(' + str + ')');
} catch (e) {
return null;
}
},
getUrlParam: function getUrlParam(name) {
var reg = new RegExp("(^|&)" + name + "=([^&]*)(&|$)"); //构造一个含有目标参数的正则表达式对象.
var r = window.location.search.substr(1).match(reg); //匹配目标参数
if (r != null) {
return unescape(r[2]);
}
return null;
}
};
var iframeConnect = function iframeConnect() {
var cidList = {};
var event = {};
var iid = window.name;
var post = function post(cid, cmd, param) {
//iid iframe的id
//cid 这个任务的id
//cmd clinet段调用回调函数的方法名
//param 这个任务的参数 数组
var msg = utils.json2str(param === undefined ? { iid: iid, cid: cid, cmd: cmd } : { iid: iid, cid: cid, cmd: cmd, param: param });
if (window.parent.postMessage) {
window.parent.postMessage(msg, '*');
} else {
window.navigator['STK_IFRAME_CONNECT_OUT'](msg);
}
};
var listener = function listener(evt) {
try {
var data = utils.str2json(evt.data);
if (data.cid && data.cid == '_EVENT_') {
cEvt.define(event, data.call);
cEvt.fire(event, data.call, data.rs);
} else if (data.cid && data.cid in cidList) {
try {
var call = data.call == 'callback' ? cidList[data.cid] : cidList[data.cid][data.call];
call(data.rs);
delete cidList[data.cid];
} catch (e) {}
}
} catch (e) {}
};
if (window.postMessage) {
if (window.addEventListener) {
window.addEventListener('message', listener, false);
} else {
window.attachEvent('onmessage', listener);
}
} else {
window.navigator['STK_IFRAME_CONNECT_' + iid] = function (data) {
listener({ data: data });
};
}
return {
trigger: function trigger(cmd, param, callback) {
if ((typeof param === "undefined" ? "undefined" : _typeof(param)).toUpperCase() === 'FUNCTION') {
callback = param;
param = undefined;
}
var cid = utils.getUniqueKey();
callback && (cidList[cid] = callback);
post(cid, cmd, param);
},
on: function on(cmd, fn) {
cEvt.define(event, cmd);
cEvt.add(event, cmd, fn);
},
off: function off(cmd, fn) {
cEvt.remove(event, cmd, fn);
}
};
};
//兼容原来的api
var iconnect = window.iconnect = iframeConnect();
window.iframeConnect = function () {
return iconnect;
};
iconnect.setHeight = function (num, callback) {
iconnect.trigger('setHeight', num, callback);
};
iconnect.getLayoutInfo = function (callback) {
iconnect.trigger('getLayoutInfo', callback);
};
iconnect.oauth = function (options) {
if (options.appkey == null || options.callback == null) {
return;
}
var doc = document,
iframe_id = "page_app_oauth_iframe";
var url = "https://api.weibo.com/2/oauth2/authorize?client_id=" + options.appkey + "&response_type=code&redirect_uri=" + encodeURIComponent(options.callback) + "&quick_auth=true";
if (doc.getElementById(iframe_id) == null) {
var iframe = doc.createElement('iframe');
iframe.id = iframe_id;
// iframe.setAttribute("style", "position:absolute;left:-1000px;top:-1000px;display:none");
iframe.style.cssText = "position:absolute;left:-1000px;top:-1000px;display:none";
iframe.src = url;
document.body.appendChild(iframe);
} else {
doc.getElementById(iframe_id).src = url;
}
};
})(window, document);
//# sourceMappingURL=iconnect.js.map | }
return _cache.key;
} | random_line_split |
iconnect.js | "use strict";
var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; };
/*
* 用于iframe内外通信。
* iconnect.trigger('insertIntoPublishTop', 'aaa', function(){'callback'});
*/
(function (window, document, undefined) {
//STK的自定义事件
var cEvt = function () {
//===自定义事件依赖关系===
var isArray = function isArray(o) {
return Object.prototype.toString.call(o) === '[object Array]';
};
var getType = function getType(oObject) {
var _t;
return ((_t = typeof oObject === "undefined" ? "undefined" : _typeof(oObject)) == "object" ? oObject == null && "null" || Object.prototype.toString.call(oObject).slice(8, -1) : _t).toLowerCase();
};
//====================
var custEventAttribute = "__custEventKey__",
custEventKey = 1,
custEventCache = {},
/**
* 从缓存中查找相关对象
* 当已经定义时
* 有type时返回缓存中的列表 没有时返回缓存中的对象
* 没有定义时返回false
* @param {Object|number} obj 对象引用或获取的key
* @param {String} type 自定义事件名称
*/
findCache = function findCache(obj, type) {
var _key = typeof obj == "number" ? obj : obj[custEventAttribute];
return _key && custEventCache[_key] && {
obj: typeof type == "string" ? custEventCache[_key][type] : custEventCache[_key],
key: _key
};
};
////
//事件迁移相关
var hookCache = {}; //arr key -> {origtype-> {fn, desttype}}
//
var _add = function _add(obj, type, fn, data, once) {
if (obj && typeof type == "string" && fn) {
var _cache = findCache(obj, type);
if (!_cache || !_cache.obj) {
throw "custEvent (" + type + ") is undefined !";
}
_cache.obj.push({ fn: fn, data: data, once: once });
return _cache.key;
}
};
var _fire = function _fire(obj, type, args, defaultAction) {
//事件默认行为阻止
var preventDefaultFlag = true;
var preventDefault = function preventDefault() {
preventDefaultFlag = false;
};
if (obj && typeof type == "string") {
var _cache = findCache(obj, type),
_obj;
if (_cache && (_obj = _cache.obj)) {
args = typeof args != 'undefined' && [].concat(args) || [];
for (var i = _obj.length - 1; i > -1 && _obj[i]; i--) {
var fn = _obj[i].fn;
var isOnce = _obj[i].once;
if (fn && fn.apply) {
try {
fn.apply(obj, [{ obj: obj, type: type, data: _obj[i].data, preventDefault: preventDefault }].concat(args));
if (isOnce) {
_obj.splice(i, 1);
}
} catch (e) {
window.console && console.log("[error][custEvent]" + e.message, e, e.stack);
}
}
}
if (preventDefaultFlag && getType(defaultAction) === 'function') {
defaultAction();
}
return _cache.key;
}
}
};
var that = {
/**
* 对象自定义事件的定义 未定义的事件不得绑定
* @method define
* @static
* @param {Object|number} obj 对象引用或获取的下标(key); 必选
* @param {String|Array} type 自定义事件名称; 必选
* @return {number} key 下标
*/
define: function define(obj, type) {
if (obj && type) {
var _key = typeof obj == "number" ? obj : obj[custEventAttribute] || (obj[custEventAttribute] = custEventKey++),
_cache = custEventCache[_key] || (custEventCache[_key] = {});
type = [].concat(type);
for (var i = 0; i < type.length; i++) {
_cache[type[i]] || (_cache[type[i]] = []);
}
return _key;
}
},
/**
* 对象自定义事件的取消定义
* 当对象的所有事件定义都被取消时 删除对对象的引用
* @method define
* @static
* @param {Object|number} obj 对象引用或获取的(key); 必选
* @param {String} type 自定义事件名称; 可选 不填可取消所有事件的定义
*/
undefine: function undefine(obj, type) {
if (obj) {
var _key = typeof obj == "number" ? obj : obj[custEventAttribute];
if (_key && custEventCache[_key]) {
if (type) {
type = [].concat(type);
for (var i = 0; i < type.length; i++) {
if (type[i] in custEventCache[_key]) delete custEventCache[_key][type[i]];
}
} else {
delete custEventCache[_key];
}
}
}
},
/**
* 事件添加或绑定
* @method add
* @static
* @param {Object|number} obj 对象引用或获取的(key); 必选
* @param {String} type 自定义事件名称; 必选
* @param {Function} fn 事件处理方法; 必选
* @param {Any} data 扩展数据任意类型; 可选
* @return {number} key 下标
*/
add: function add(obj, type, fn, data) {
return _add(obj, type, fn, data, false);
},
/**
* 单次事件绑定
* @method once
* @static
* @param {Object|number} obj 对象引用或获取的(key); 必选
* @param {String} type 自定义事件名称; 必选
* @param {Function} fn 事件处理方法; 必选
* @param {Any} data 扩展数据任意类型; 可选
* @return {number} key 下标
*/
once: function once(obj, type, fn, data) {
return _add(obj, type, fn, data, true);
},
/**
* 事件删除或解绑
* @method remove
* @static
* @param {Object|number} obj 对象引用或获取的(key); 必选
* @param {String} type 自定义事件名称; 可选; 为空时删除对象下的所有事件绑定
* @param {Function} fn 事件处理方法; 可选; 为空且type不为空时 删除对象下type事件相关的所有处理方法
* @return {number} key 下标
*/
remove: function remove(obj, type, fn) {
if (obj) {
var _cache = findCache(obj, type),
_obj,
index;
if (_cache && (_obj = _cache.obj)) {
if (isArray(_obj)) {
if (fn) {
//for (var i = 0; i < _obj.length && _obj[i].fn !== fn; i++);
var i = 0;
while (_obj[i]) {
if (_obj[i].fn === fn) {
break;
}
i++;
}
_obj.splice(i, 1);
} else {
_obj.splice(0, _obj.length);
}
} else {
for (var i in _obj) {
_obj[i] = [];
}
}
return _cache.key;
}
}
},
/**
* 事件触发
* @method fire
* @static
* @param {Object|number} obj 对象引用或获取的(key); 必选
* @param {String} type 自定义事件名称; 必选
* @param {Any|Array} args 参数数组或单个的其他数据; 可选
* @param {Function} defaultAction 触发事件列表结束后的默认Function; 可选 注:当args不需要时请用undefined/null填充,以保证该参数为第四个参数
* @return {number} key 下标
*/
fire: function fire(obj, type, args, defaultAction) {
return _fire(obj, type, args, defaultAction);
},
/**
* 事件由源对象迁移到目标对象
* @method hook
* @static
* @param {Object} orig 源对象
* @param {Object} dest 目标对象
* @param {Object} typeMap 事件名称对照表
* {
* 源事件名->目标事件名
* }
*/
hook: function hook(orig, dest, typeMap) {
if (!orig || !dest || !typeMap) {
return;
}
var destTypes = [],
origKey = orig[custEventAttribute],
origKeyCache = origKey && custEventCache[origKey],
origTypeCache,
destKey = dest[custEventAttribute] || (dest[custEventAttribute] = custEventKey++),
keyHookCache;
if (origKeyCache) {
keyHookCache = hookCache[origKey + '_' + destKey] || (hookCache[origKey + '_' + destKey] = {});
var fn = function fn(event) {
var preventDefaultFlag = true;
_fire(dest, keyHookCache[event.type].type, Array.prototype.slice.apply(arguments, [1, arguments.length]), function () {
preventDefaultFlag = false;
});
preventDefaultFlag && event.preventDefault();
};
for (var origType in typeMap) {
var destType = typeMap[origType];
if (!keyHookCache[origType]) {
if (origTypeCache = origKeyCache[origType]) {
origTypeCache.push({ fn: fn, data: undefined });
keyHookCache[origType] = {
fn: fn,
type: destType
};
destTypes.push(destType);
}
}
}
that.define(dest, destTypes);
}
},
/**
* 取消事件迁移
* @method unhook
* @static
* @param {Object} orig 源对象
* @param {Object} dest 目标对象
* @param {Object} typeMap 事件名称对照表
* {
* 源事件名->目标事件名
* }
*/
unhook: function unhook(orig, dest, typeMap) {
if (!orig || !dest || !typeMap) {
return;
}
var origKey = orig[custEventAttribute],
destKey = dest[custEventAttribute],
keyHookCache = hookCache[origKey + '_' + destKey];
if (keyHookCache) {
for (var origType in typeMap) {
var destType = typeMap[origType];
if (keyHookCache[origType]) {
that.remove(orig, origType, keyHookCache[origType].fn);
}
}
}
},
/**
* 销毁
* @method destroy
* @static
*/
destroy: function destroy() {
custEventCache = {};
custEventKey = 1;
hookCache = {};
}
};
return that;
}();
var utils = {
count: 0,
getUniqueKey: function getUniqueKey() {
return +new Date() + (Math.random() + '').replace('.', '') + utils.count++;
},
json2str: function () {
function f(n) {
// Format integers to have at least two digits.
return n < 10 ? '0' + n : n;
}
if (typeof Date.prototype.toJSON !== 'function') {
Date.prototype.toJSON = function (key) {
return isFinite(this.valueOf()) ? this.getUTCFullYear() + '-' + f(this.getUTCMonth() + 1) + '-' + f(this.getUTCDate()) + 'T' + f(this.getUTCHours()) + ':' + f(this.getUTCMinutes()) + ':' + f(this.getUTCSeconds()) + 'Z' : null;
};
String.prototype.toJSON = Number.prototype.toJSON = Boolean.prototype.toJSON = function (key) {
return this.valueOf();
};
}
var cx = /[\u0000\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
escapable = /[\\\"\x00-\x1f\x7f-\x9f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
gap,
indent,
meta = { // table of character substitutions
'\b': '\\b',
'\t': '\\t',
'\n': '\\n',
'\f': '\\f',
'\r': '\\r',
'"': '\\"',
'\\': '\\\\'
},
rep;
function quote(string) {
// If the string contains no control characters, no quote characters, and no
// backslash characters, then we can safely slap some quotes around it.
// Otherwise we must also replace the offending characters with safe escape
// sequences.
escapable.lastIndex = 0;
return escapable.test(string) ? '"' + string.replace(escapable, function (a) {
var c = meta[a];
return typeof c === 'string' ? c : "\\u" + ('0000' + a.charCodeAt(0).toString(16)).slice(-4);
}) + '"' : '"' + string + '"';
}
function str(key, holder) {
// Produce a string from holder[key].
var i,
// The loop counter.
k,
// The member key.
v,
// The member value.
length,
mind = gap,
partial,
value = holder[key];
// If the value has a toJSON method, call it to obtain a replacement value.
if (value && (typeof value === "undefined" ? "undefined" : _typeof(value)) === 'object' && typeof value.toJSON === 'function') {
value = value.toJSON(key);
}
// If we were called with a replacer function, then call the replacer to
// obtain a replacement value.
if (typeof rep === 'function') {
value = rep.call(holder, key, value);
}
// What happens next depends on the value's type.
switch (typeof value === "undefined" ? "undefined" : _typeof(value)) {
case 'string':
return quote(value);
case 'number':
// JSON numbers must be finite. Encode non-finite numbers as null.
return isFinite(value) ? String(value) : 'null';
case 'boolean':
case 'null':
// If the value is a boolean or null, convert it to a string. Note:
// typeof null does not produce 'null'. The case is included here in
// the remote chance that this gets fixed someday.
return String(value);
// If the type is 'object', we might be dealing with an object or an array or
// null.
case 'object':
// Due to a specification blunder in ECMAScript, typeof null is 'object',
// so watch out for that case.
if (!value) {
return 'null';
}
// Make an array to hold the partial results of stringifying this object value.
gap += indent;
partial = [];
// Is the value an array?
if (Object.prototype.toString.apply(value) === '[object Array]') {
// The value is an array. Stringify every element. Use null as a placeholder
// for non-JSON values.
length = value.length;
for (i = 0; i < length; i += 1) {
partial[i] = str(i, value) || 'null';
}
// Join all of the elements together, separated with commas, and wrap them in
// brackets.
v = partial.length === 0 ? '[]' : gap ? '[\n' + gap + partial.join(',\n' + gap) + '\n' + mind + ']' : '[' + partial.join(',') + ']';
gap = mind;
return v;
}
// If the replacer is an array, use it to select the members to be stringified.
if (rep && (typeof rep === "undefined" ? "undefined" : _typeof(rep)) === 'object') {
length = rep.length;
for (i = 0; i < length; i += 1) {
k = rep[i];
if (typeof k === 'string') {
v = str(k, value);
if (v) {
partial.push(quote(k) + (gap ? ': ' : ':') + v);
}
}
}
} else {
// Otherwise, iterate through all of the keys in the object.
for (k in value) {
if (Object.hasOwnProperty.call(value, k)) {
v = str(k, value);
if (v) {
partial.push(quote(k) + (gap ? ': ' : ':') + v);
}
}
}
}
// Join all of the member texts together, separated with commas,
// and wrap them in braces.
v = partial.length === 0 ? '{}' : gap ? '{\n' + gap + partial.join(',\n' + gap) + '\n' + mind + '}' : '{' + partial.join(',') + '}';
gap = mind;
return v;
}
}
return function (value, replacer, space) {
if (window.JSON && window.JSON.stringify) {
return window.JSON.stringify(value, replacer, space);
}
// The stringify method takes a value and an optional replacer, and an optional
// space parameter, and returns a JSON text. The replacer can be a function
// that can replace values, or an array of strings that will select the keys.
// A default replacer method can be provided. Use of the space parameter can
// produce text that is more easily readable.
var i;
gap = '';
indent = '';
// If the space parameter is a number, make an indent string containing that
// many spaces.
if (typeof space === 'number') {
for (i = 0; i < space; i += 1) {
indent += ' ';
}
// If the space parameter is a string, it will be used as the indent string.
} else if (typeof space === 'string') {
indent = space;
}
// If there is a replacer, it must be a function or an array.
// Otherwise, throw an error.
rep = replacer;
if (replacer && typeof replacer !== 'function' && ((typeof replacer === "undefined" ? "undefined" : _typeof(replacer)) !== 'object' || typeof replacer.length !== 'number')) {
throw new Error('JSON.stringify');
}
// Make a fake root object containing our value under the key of ''.
// Return the result of stringifying the value.
return str('', {
'': value
});
};
}(),
str2json: function str2json(str) {
try {
return eval('(' + str + ')');
} catch (e) {
return null;
}
},
getUrlParam: function getUrlParam(name) {
var reg = new RegExp("(^|&)" + name + "=([^&]*)(&|$)"); //构造一个含有目标参数的正则表达式对象.
var r = window.location.search.substr(1).match(reg); //匹配目标参数
if (r != null) {
return unescape(r[2]);
}
return null;
}
};
var iframeConnect = function iframeConnect() {
var cidList = {};
var event = {};
var iid = window.name;
var post = function post(cid, cmd, param) {
//iid iframe的id
//cid 这个任务的id
//cmd clinet段调用回调函数的方法名
//param 这个任务的参数 数组
var msg = utils.json2str(param === undefined ? { iid: iid, cid: cid, cmd: cmd } : { iid: iid, cid: cid, cmd: cmd, param: param });
if (wind | Message(msg, '*');
} else {
window.navigator['STK_IFRAME_CONNECT_OUT'](msg);
}
};
var listener = function listener(evt) {
try {
var data = utils.str2json(evt.data);
if (data.cid && data.cid == '_EVENT_') {
cEvt.define(event, data.call);
cEvt.fire(event, data.call, data.rs);
} else if (data.cid && data.cid in cidList) {
try {
var call = data.call == 'callback' ? cidList[data.cid] : cidList[data.cid][data.call];
call(data.rs);
delete cidList[data.cid];
} catch (e) {}
}
} catch (e) {}
};
if (window.postMessage) {
if (window.addEventListener) {
window.addEventListener('message', listener, false);
} else {
window.attachEvent('onmessage', listener);
}
} else {
window.navigator['STK_IFRAME_CONNECT_' + iid] = function (data) {
listener({ data: data });
};
}
return {
trigger: function trigger(cmd, param, callback) {
if ((typeof param === "undefined" ? "undefined" : _typeof(param)).toUpperCase() === 'FUNCTION') {
callback = param;
param = undefined;
}
var cid = utils.getUniqueKey();
callback && (cidList[cid] = callback);
post(cid, cmd, param);
},
on: function on(cmd, fn) {
cEvt.define(event, cmd);
cEvt.add(event, cmd, fn);
},
off: function off(cmd, fn) {
cEvt.remove(event, cmd, fn);
}
};
};
//兼容原来的api
var iconnect = window.iconnect = iframeConnect();
window.iframeConnect = function () {
return iconnect;
};
iconnect.setHeight = function (num, callback) {
iconnect.trigger('setHeight', num, callback);
};
iconnect.getLayoutInfo = function (callback) {
iconnect.trigger('getLayoutInfo', callback);
};
iconnect.oauth = function (options) {
if (options.appkey == null || options.callback == null) {
return;
}
var doc = document,
iframe_id = "page_app_oauth_iframe";
var url = "https://api.weibo.com/2/oauth2/authorize?client_id=" + options.appkey + "&response_type=code&redirect_uri=" + encodeURIComponent(options.callback) + "&quick_auth=true";
if (doc.getElementById(iframe_id) == null) {
var iframe = doc.createElement('iframe');
iframe.id = iframe_id;
// iframe.setAttribute("style", "position:absolute;left:-1000px;top:-1000px;display:none");
iframe.style.cssText = "position:absolute;left:-1000px;top:-1000px;display:none";
iframe.src = url;
document.body.appendChild(iframe);
} else {
doc.getElementById(iframe_id).src = url;
}
};
})(window, document);
//# sourceMappingURL=iconnect.js.map | ow.parent.postMessage) {
window.parent.post | conditional_block |
iconnect.js | "use strict";
var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; };
/*
* 用于iframe内外通信。
* iconnect.trigger('insertIntoPublishTop', 'aaa', function(){'callback'});
*/
(function (window, document, undefined) {
//STK的自定义事件
var cEvt = function () {
//===自定义事件依赖关系===
var isArray = function isArray(o) {
return Object.prototype.toString.call(o) === '[object Array]';
};
var getType = function getType(oObject) {
var _t;
return ((_t = typeof oObject === "undefined" ? "undefined" : _typeof(oObject)) == "object" ? oObject == null && "null" || Object.prototype.toString.call(oObject).slice(8, -1) : _t).toLowerCase();
};
//====================
var custEventAttribute = "__custEventKey__",
custEventKey = 1,
custEventCache = {},
/**
* 从缓存中查找相关对象
* 当已经定义时
* 有type时返回缓存中的列表 没有时返回缓存中的对象
* 没有定义时返回false
* @param {Object|number} obj 对象引用或获取的key
* @param {String} type 自定义事件名称
*/
findCache = function findCache(obj, type) {
var _key = typeof obj == "number" ? obj : obj[custEventAttribute];
return _key && custEventCache[_key] && {
obj: typeof type == "string" ? custEventCache[_key][type] : custEventCache[_key],
key: _key
};
};
////
//事件迁移相关
var hookCache = {}; //arr key -> {origtype-> {fn, desttype}}
//
var _add = function _add(obj, type, fn, data, once) {
if (obj && typeof type == "string" && fn) {
var _cache = findCache(obj, type);
if (!_cache || !_cache.obj) {
throw "custEvent (" + type + ") is undefined !";
}
_cache.obj.push({ fn: fn, data: data, once: once });
return _cache.key;
}
};
var _fire = function _fire(obj, type, args, defaultAction) {
//事件默认行为阻止
var preventDefaultFlag = true;
var preventDefault = function preventDefault() {
preventDefaultFlag = false;
};
if (obj && typeof type == "string") {
var _cache = findCache(obj, type),
_obj;
if (_cache && (_obj = _cache.obj)) {
args = typeof args != 'undefined' && [].concat(args) || [];
for (var i = _obj.length - 1; i > -1 && _obj[i]; i--) {
var fn = _obj[i].fn;
var isOnce = _obj[i].once;
if (fn && fn.apply) {
try {
fn.apply(obj, [{ obj: obj, type: type, data: _obj[i].data, preventDefault: preventDefault }].concat(args));
if (isOnce) {
_obj.splice(i, 1);
}
} catch (e) {
window.console && console.log("[error][custEvent]" + e.message, e, e.stack);
}
}
}
if (preventDefaultFlag && getType(defaultAction) === 'function') {
defaultAction();
}
return _cache.key;
}
}
};
var that = {
/**
* 对象自定义事件的定义 未定义的事件不得绑定
* @method define
* @static
* @param {Object|number} obj 对象引用或获取的下标(key); 必选
* @param {String|Array} type 自定义事件名称; 必选
* @return {number} key 下标
*/
define: function define(obj, type) {
if (obj && type) {
var _key = typeof obj == "number" ? obj : obj[custEventAttribute] || (obj[custEventAttribute] = custEventKey++),
_cache = custEventCache[_key] || (custEventCache[_key] = {});
type = [].concat(type);
for (var i = 0; i < type.length; i++) {
_cache[type[i]] || (_cache[type[i]] = []);
}
return _key;
}
},
/**
* 对象自定义事件的取消定义
* 当对象的所有事件定义都被取消时 删除对对象的引用
* @method define
* @static
* @param {Object|number} obj 对象引用或获取的(key); 必选
* @param {String} type 自定义事件名称; 可选 不填可取消所有事件的定义
*/
undefine: function undefine(obj, type) {
if (obj) {
var _key = typeof obj == "number" ? obj : obj[custEventAttribute];
if (_key && custEventCache[_key]) {
if (type) {
type = [].concat(type);
for (var i = 0; i < type.length; i++) {
if (type[i] in custEventCache[_key]) delete custEventCache[_key][type[i]];
}
} else {
delete custEventCache[_key];
}
}
}
},
/**
* 事件添加或绑定
* @method add
* @static
* @param {Object|number} obj 对象引用或获取的(key); 必选
* @param {String} type 自定义事件名称; 必选
* @param {Function} fn 事件处理方法; 必选
* @param {Any} data 扩展数据任意类型; 可选
* @return {number} key 下标
*/
add: function add(obj, type, fn, data) {
return _add(obj, type, fn, data, false);
},
/**
* 单次事件绑定
* @method once
* @static
* @param {Object|number} obj 对象引用或获取的(key); 必选
* @param {String} type 自定义事件名称; 必选
* @param {Function} fn 事件处理方法; 必选
* @param {Any} data 扩展数据任意类型; 可选
* @return {number} key 下标
*/
once: function once(obj, type, fn, data) {
return _add(obj, type, fn, data, true);
},
/**
* 事件删除或解绑
* @method remove
* @static
* @param {Object|number} obj 对象引用或获取的(key); 必选
* @param {String} type 自定义事件名称; 可选; 为空时删除对象下的所有事件绑定
* @param {Function} fn 事件处理方法; 可选; 为空且type不为空时 删除对象下type事件相关的所有处理方法
* @return {number} key 下标
*/
remove: function remove(obj, type, fn) {
if (obj) {
var _cache = findCache(obj, type),
_obj,
index;
if (_cache && (_obj = _cache.obj)) {
if (isArray(_obj)) {
if (fn) {
//for (var i = 0; i < _obj.length && _obj[i].fn !== fn; i++);
var i = 0;
while (_obj[i]) {
if (_obj[i].fn === fn) {
break;
}
i++;
}
_obj.splice(i, 1);
} else {
_obj.splice(0, _obj.length);
}
} else {
for (var i in _obj) {
_obj[i] = [];
}
}
return _cache.key;
}
}
},
/**
* 事件触发
* @method fire
* @static
* @param {Object|number} obj 对象引用或获取的(key); 必选
* @param {String} type 自定义事件名称; 必选
* @param {Any|Array} args 参数数组或单个的其他数据; 可选
* @param {Function} defaultAction 触发事件列表结束后的默认Function; 可选 注:当args不需要时请用undefined/null填充,以保证该参数为第四个参数
* @return {number} key 下标
*/
fire: function fire(obj, type, args, defaultAction) {
return _fire(obj, type, args, defaultAction);
},
/**
* 事件由源对象迁移到目标对象
* @method hook
* @static
* @param {Object} orig 源对象
* @param {Object} dest 目标对象
* @param {Object} typeMap 事件名称对照表
* {
* 源事件名->目标事件名
* }
*/
hook: function hook(orig, dest, typeMap) {
if (!orig || !dest || !typeMap) {
return;
}
var destTypes = [],
origKey = orig[custEventAttribute],
origKeyCache = origKey && custEventCache[origKey],
origTypeCache,
destKey = dest[custEventAttribute] || (dest[custEventAttribute] = custEventKey++),
keyHookCache;
if (origKeyCache) {
keyHookCache = hookCache[origKey + '_' + destKey] || (hookCache[origKey + '_' + destKey] = {});
var fn = function fn(event) {
var preventDefaultFlag = true;
_fire(dest, keyHookCache[event.type].type, Array.prototype.slice.apply(arguments, [1, arguments.length]), function () {
preventDefaultFlag = false;
});
preventDefaultFlag && event.preventDefault();
};
for (var origType in typeMap) {
var destType = typeMap[origType];
if (!keyHookCache[origType]) {
if (origTypeCache = origKeyCache[origType]) {
origTypeCache.push({ fn: fn, data: undefined });
keyHookCache[origType] = {
fn: fn,
type: destType
};
destTypes.push(destType);
}
}
}
that.define(dest, destTypes);
}
},
/**
* 取消事件迁移
* @method unhook
* @static
* @param {Object} orig 源对象
* @param {Object} dest 目标对象
* @param {Object} typeMap 事件名称对照表
* {
* 源事件名->目标事件名
* }
*/
unhook: function unhook(orig, dest, typeMap) {
if (!orig || !dest || !typeMap) {
return;
}
var origKey = orig[custEventAttribute],
destKey = dest[custEventAttribute],
keyHookCache = hookCache[origKey + '_' + destKey];
if (keyHookCache) {
for (var origType in typeMap) {
var destType = typeMap[origType];
if (keyHookCache[origType]) {
that.remove(orig, origType, keyHookCache[origType].fn);
}
}
}
},
/**
* 销毁
* @method destroy
* @static
*/
destroy: function destroy() {
custEventCache = {};
custEventKey = 1;
hookCache = {};
}
};
return that;
}();
var utils = {
count: 0,
getUniqueKey: function getUniqueKey() {
return +new Date() + (Math.random() + '').replace('.', '') + utils.count++;
},
json2str: function () {
function f(n) {
// Format integers to have at least two digits.
return n < 10 ? '0' + n : n;
}
if (typeof Date.prototype.toJSON !== 'function') {
Date.prototype.toJSON = function (key) {
return isFinite(this.valueOf()) ? this.getUTCFullYear() + '-' + f(this.getUTCMonth() + 1) + '-' + f(this.getUTCDate()) + 'T' + f(this.getUTCHours()) + ':' + f(this.getUTCMinutes()) + ':' + f(this.getUTCSeconds()) + 'Z' : null;
};
String.prototype.toJSON = Number.prototype.toJSON = Boolean.prototype.toJSON = function (key) {
return this.valueOf();
};
}
var cx = /[\u0000\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
escapable = /[\\\"\x00-\x1f\x7f-\x9f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
gap,
indent,
meta = { // table of character substitutions
'\b': '\\b',
'\t': '\\t',
'\n': '\\n',
'\f': '\\f',
'\r': '\\r',
'"': '\\"',
'\\': '\\\\'
},
rep;
function quote(string) {
// If the string contains no control characters, no quote characters, and no
// backslash characters, then we can safely slap some quotes around it.
// Otherwise we must also replace the offending characters with safe escape
// sequences.
escapable.lastIndex = 0;
return escapable.test(string) ? '"' + string.replace(escapable, function (a) {
var c = meta[a];
return typeof c === 'string' ? c : "\\u" + ('0000' + a.charCodeAt(0).toString(16)).slice(-4);
}) + '"' : '"' + string + '"';
}
function str(key, holder) {
// Produce a string from holder[key].
var i,
// The loop counter.
k,
// The member key.
v,
// The member value.
length,
mind = gap,
partial,
value = holder[key];
// If the value has a toJSON method, call it to obtain a replacement value.
if (value && (typeof value === "undefined" ? "undefined" : _typeof(value)) === 'object' && typeof value.toJSON === 'function') {
value = value.toJSON(key);
}
// If we were called with a replacer function, then call the replacer to
// obtain a replacement value.
if (typeof rep === 'function') {
value = rep.call(holder, key, value);
}
// What happens next depends on the value's type.
switch (typeof value === "undefined" ? "undefined" : _typeof(value)) {
case 'string':
return quote(value);
case 'number':
// JSON numbers must be finite. Encode non-finite numbers as null.
return isFinite(value) ? String(value) : 'null';
case 'b | hrow an error.
rep = replacer;
if (replacer && typeof replacer !== 'function' && ((typeof replacer === "undefined" ? "undefined" : _typeof(replacer)) !== 'object' || typeof replacer.length !== 'number')) {
throw new Error('JSON.stringify');
}
// Make a fake root object containing our value under the key of ''.
// Return the result of stringifying the value.
return str('', {
'': value
});
};
}(),
str2json: function str2json(str) {
try {
return eval('(' + str + ')');
} catch (e) {
return null;
}
},
getUrlParam: function getUrlParam(name) {
var reg = new RegExp("(^|&)" + name + "=([^&]*)(&|$)"); //构造一个含有目标参数的正则表达式对象.
var r = window.location.search.substr(1).match(reg); //匹配目标参数
if (r != null) {
return unescape(r[2]);
}
return null;
}
};
var iframeConnect = function iframeConnect() {
var cidList = {};
var event = {};
var iid = window.name;
var post = function post(cid, cmd, param) {
//iid iframe的id
//cid 这个任务的id
//cmd clinet段调用回调函数的方法名
//param 这个任务的参数 数组
var msg = utils.json2str(param === undefined ? { iid: iid, cid: cid, cmd: cmd } : { iid: iid, cid: cid, cmd: cmd, param: param });
if (window.parent.postMessage) {
window.parent.postMessage(msg, '*');
} else {
window.navigator['STK_IFRAME_CONNECT_OUT'](msg);
}
};
var listener = function listener(evt) {
try {
var data = utils.str2json(evt.data);
if (data.cid && data.cid == '_EVENT_') {
cEvt.define(event, data.call);
cEvt.fire(event, data.call, data.rs);
} else if (data.cid && data.cid in cidList) {
try {
var call = data.call == 'callback' ? cidList[data.cid] : cidList[data.cid][data.call];
call(data.rs);
delete cidList[data.cid];
} catch (e) {}
}
} catch (e) {}
};
if (window.postMessage) {
if (window.addEventListener) {
window.addEventListener('message', listener, false);
} else {
window.attachEvent('onmessage', listener);
}
} else {
window.navigator['STK_IFRAME_CONNECT_' + iid] = function (data) {
listener({ data: data });
};
}
return {
trigger: function trigger(cmd, param, callback) {
if ((typeof param === "undefined" ? "undefined" : _typeof(param)).toUpperCase() === 'FUNCTION') {
callback = param;
param = undefined;
}
var cid = utils.getUniqueKey();
callback && (cidList[cid] = callback);
post(cid, cmd, param);
},
on: function on(cmd, fn) {
cEvt.define(event, cmd);
cEvt.add(event, cmd, fn);
},
off: function off(cmd, fn) {
cEvt.remove(event, cmd, fn);
}
};
};
//兼容原来的api
var iconnect = window.iconnect = iframeConnect();
window.iframeConnect = function () {
return iconnect;
};
iconnect.setHeight = function (num, callback) {
iconnect.trigger('setHeight', num, callback);
};
iconnect.getLayoutInfo = function (callback) {
iconnect.trigger('getLayoutInfo', callback);
};
iconnect.oauth = function (options) {
if (options.appkey == null || options.callback == null) {
return;
}
var doc = document,
iframe_id = "page_app_oauth_iframe";
var url = "https://api.weibo.com/2/oauth2/authorize?client_id=" + options.appkey + "&response_type=code&redirect_uri=" + encodeURIComponent(options.callback) + "&quick_auth=true";
if (doc.getElementById(iframe_id) == null) {
var iframe = doc.createElement('iframe');
iframe.id = iframe_id;
// iframe.setAttribute("style", "position:absolute;left:-1000px;top:-1000px;display:none");
iframe.style.cssText = "position:absolute;left:-1000px;top:-1000px;display:none";
iframe.src = url;
document.body.appendChild(iframe);
} else {
doc.getElementById(iframe_id).src = url;
}
};
})(window, document);
//# sourceMappingURL=iconnect.js.map | oolean':
case 'null':
// If the value is a boolean or null, convert it to a string. Note:
// typeof null does not produce 'null'. The case is included here in
// the remote chance that this gets fixed someday.
return String(value);
// If the type is 'object', we might be dealing with an object or an array or
// null.
case 'object':
// Due to a specification blunder in ECMAScript, typeof null is 'object',
// so watch out for that case.
if (!value) {
return 'null';
}
// Make an array to hold the partial results of stringifying this object value.
gap += indent;
partial = [];
// Is the value an array?
if (Object.prototype.toString.apply(value) === '[object Array]') {
// The value is an array. Stringify every element. Use null as a placeholder
// for non-JSON values.
length = value.length;
for (i = 0; i < length; i += 1) {
partial[i] = str(i, value) || 'null';
}
// Join all of the elements together, separated with commas, and wrap them in
// brackets.
v = partial.length === 0 ? '[]' : gap ? '[\n' + gap + partial.join(',\n' + gap) + '\n' + mind + ']' : '[' + partial.join(',') + ']';
gap = mind;
return v;
}
// If the replacer is an array, use it to select the members to be stringified.
if (rep && (typeof rep === "undefined" ? "undefined" : _typeof(rep)) === 'object') {
length = rep.length;
for (i = 0; i < length; i += 1) {
k = rep[i];
if (typeof k === 'string') {
v = str(k, value);
if (v) {
partial.push(quote(k) + (gap ? ': ' : ':') + v);
}
}
}
} else {
// Otherwise, iterate through all of the keys in the object.
for (k in value) {
if (Object.hasOwnProperty.call(value, k)) {
v = str(k, value);
if (v) {
partial.push(quote(k) + (gap ? ': ' : ':') + v);
}
}
}
}
// Join all of the member texts together, separated with commas,
// and wrap them in braces.
v = partial.length === 0 ? '{}' : gap ? '{\n' + gap + partial.join(',\n' + gap) + '\n' + mind + '}' : '{' + partial.join(',') + '}';
gap = mind;
return v;
}
}
return function (value, replacer, space) {
if (window.JSON && window.JSON.stringify) {
return window.JSON.stringify(value, replacer, space);
}
// The stringify method takes a value and an optional replacer, and an optional
// space parameter, and returns a JSON text. The replacer can be a function
// that can replace values, or an array of strings that will select the keys.
// A default replacer method can be provided. Use of the space parameter can
// produce text that is more easily readable.
var i;
gap = '';
indent = '';
// If the space parameter is a number, make an indent string containing that
// many spaces.
if (typeof space === 'number') {
for (i = 0; i < space; i += 1) {
indent += ' ';
}
// If the space parameter is a string, it will be used as the indent string.
} else if (typeof space === 'string') {
indent = space;
}
// If there is a replacer, it must be a function or an array.
// Otherwise, t | identifier_body |
binary.go | package common
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package binary implements simple translation between numbers and byte
// sequences and encoding and decoding of varints.
//
// Numbers are translated by reading and writing fixed-size values.
// A fixed-size value is either a fixed-size arithmetic
// type (int8, uint8, int16, float32, complex64, ...)
// or an array or struct containing only fixed-size values.
//
// The varint functions encode and decode single integer values using
// a variable-length encoding; smaller values require fewer bytes.
// For a specification, see
// http://code.google.com/apis/protocolbuffers/docs/encoding.html.
//
// This package favors simplicity over efficiency. Clients that require
// high-performance serialization, especially for large data structures,
// should look at more advanced solutions such as the encoding/gob
// package or protocol buffers.
import (
"errors"
"io"
"math"
"reflect"
)
// A ByteOrder specifies how to convert byte sequences into
// 16-, 32-, or 64-bit unsigned integers.
type ByteOrder interface {
Uint16([]byte) uint16
Uint32([]byte) uint32
Uint64([]byte) uint64
PutUint16([]byte, uint16)
PutUint32([]byte, uint32)
PutUint64([]byte, uint64)
String() string
}
// LittleEndian is the little-endian implementation of ByteOrder.
var LittleEndian littleEndian
// BigEndian is the big-endian implementation of ByteOrder.
var BigEndian bigEndian
type littleEndian struct{}
func (littleEndian) Uint16(b []byte) uint16 { return uint16(b[0]) | uint16(b[1])<<8 }
func (littleEndian) PutUint16(b []byte, v uint16) {
b[0] = byte(v)
b[1] = byte(v >> 8)
}
func (littleEndian) Uint32(b []byte) uint32 {
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func (littleEndian) PutUint32(b []byte, v uint32) {
b[0] = byte(v)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
b[3] = byte(v >> 24)
}
func (littleEndian) Uint64(b []byte) uint64 {
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
func (littleEndian) PutUint64(b []byte, v uint64) {
b[0] = byte(v)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
b[3] = byte(v >> 24)
b[4] = byte(v >> 32)
b[5] = byte(v >> 40)
b[6] = byte(v >> 48)
b[7] = byte(v >> 56)
}
func (littleEndian) String() string { return "LittleEndian" }
func (littleEndian) GoString() string { return "binary.LittleEndian" }
type bigEndian struct{}
func (bigEndian) Uint16(b []byte) uint16 { return uint16(b[1]) | uint16(b[0])<<8 }
func (bigEndian) PutUint16(b []byte, v uint16) {
b[0] = byte(v >> 8)
b[1] = byte(v)
}
func (bigEndian) Uint32(b []byte) uint32 {
return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
}
func (bigEndian) PutUint32(b []byte, v uint32) {
b[0] = byte(v >> 24)
b[1] = byte(v >> 16)
b[2] = byte(v >> 8)
b[3] = byte(v)
}
func (bigEndian) Uint64(b []byte) uint64 {
return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
}
func (bigEndian) PutUint64(b []byte, v uint64) {
b[0] = byte(v >> 56)
b[1] = byte(v >> 48)
b[2] = byte(v >> 40)
b[3] = byte(v >> 32)
b[4] = byte(v >> 24)
b[5] = byte(v >> 16)
b[6] = byte(v >> 8)
b[7] = byte(v)
}
func (bigEndian) String() string { return "BigEndian" }
func (bigEndian) GoString() string { return "binary.BigEndian" }
// Read reads structured binary data from r into data.
// Data must be a pointer to a fixed-size value or a slice
// of fixed-size values.
// Bytes read from r are decoded using the specified byte order
// and written to successive fields of the data.
// When reading into structs, the field data for fields with
// blank (_) field names is skipped; i.e., blank field names
// may be used for padding.
// When reading into a struct, all non-blank fields must be exported.
func Read(r io.Reader, order ByteOrder, data interface{}) error {
// Fast path for basic types and slices.
if n := intDataSize(data); n != 0 {
var b [8]byte
var bs []byte
if n > len(b) {
bs = make([]byte, n)
} else {
bs = b[:n]
}
if _, err := io.ReadFull(r, bs); err != nil {
return err
}
switch data := data.(type) {
case *int8:
*data = int8(b[0])
case *uint8:
*data = b[0]
case *int16:
*data = int16(order.Uint16(bs))
case *uint16:
*data = order.Uint16(bs)
case *int32:
*data = int32(order.Uint32(bs))
case *uint32:
*data = order.Uint32(bs)
case *int64:
*data = int64(order.Uint64(bs))
case *uint64:
*data = order.Uint64(bs)
case []int8:
for i, x := range bs { // Easier to loop over the input for 8-bit values.
data[i] = int8(x)
}
case []uint8:
copy(data, bs)
case []int16:
for i := range data {
data[i] = int16(order.Uint16(bs[2*i:]))
}
case []uint16:
for i := range data {
data[i] = order.Uint16(bs[2*i:])
}
case []int32:
for i := range data {
data[i] = int32(order.Uint32(bs[4*i:]))
}
case []uint32:
for i := range data {
data[i] = order.Uint32(bs[4*i:])
}
case []int64:
for i := range data {
data[i] = int64(order.Uint64(bs[8*i:]))
}
case []uint64:
for i := range data {
data[i] = order.Uint64(bs[8*i:])
}
}
return nil
}
// Fallback to reflect-based decoding.
v := reflect.ValueOf(data)
size := -1
switch v.Kind() {
case reflect.Ptr:
v = v.Elem()
size = dataSize(v)
case reflect.Slice:
size = dataSize(v)
}
if size < 0 {
return errors.New("binary.Read: invalid type " + reflect.TypeOf(data).String())
}
d := &decoder{order: order, buf: make([]byte, size)}
if _, err := io.ReadFull(r, d.buf); err != nil {
return err
}
d.value(v)
return nil
}
// Write writes the binary representation of data into w.
// Data must be a fixed-size value or a slice of fixed-size
// values, or a pointer to such data.
// Bytes written to w are encoded using the specified byte order
// and read from successive fields of the data.
// When writing structs, zero values are written for fields
// with blank (_) field names.
func Write(w io.Writer, order ByteOrder, data interface{}) error {
// Fast path for basic types and slices.
if n := intDataSize(data); n != 0 {
var b [8]byte
var bs []byte
if n > len(b) {
bs = make([]byte, n)
} else {
bs = b[:n]
}
switch v := data.(type) {
case *int8:
bs = b[:1]
b[0] = byte(*v)
case int8:
bs = b[:1]
b[0] = byte(v)
case []int8:
for i, x := range v {
bs[i] = byte(x)
}
case *uint8:
bs = b[:1]
b[0] = *v
case uint8:
bs = b[:1]
b[0] = byte(v)
case []uint8:
bs = v
case *int16:
bs = b[:2]
order.PutUint16(bs, uint16(*v))
case int16:
bs = b[:2]
order.PutUint16(bs, uint16(v))
case []int16:
for i, x := range v {
order.PutUint16(bs[2*i:], uint16(x))
}
case *uint16:
bs = b[:2]
order.PutUint16(bs, *v)
case uint16:
bs = b[:2]
order.PutUint16(bs, v)
case []uint16:
for i, x := range v {
order.PutUint16(bs[2*i:], x)
}
case *int32:
bs = b[:4]
order.PutUint32(bs, uint32(*v))
case int32:
bs = b[:4]
order.PutUint32(bs, uint32(v))
case []int32:
for i, x := range v {
order.PutUint32(bs[4*i:], uint32(x))
}
case *uint32:
bs = b[:4]
order.PutUint32(bs, *v)
case uint32:
bs = b[:4]
order.PutUint32(bs, v)
case []uint32:
for i, x := range v {
order.PutUint32(bs[4*i:], x)
}
case *int64:
bs = b[:8]
order.PutUint64(bs, uint64(*v))
case int64:
bs = b[:8]
order.PutUint64(bs, uint64(v))
case []int64:
for i, x := range v {
order.PutUint64(bs[8*i:], uint64(x))
}
case *uint64:
bs = b[:8]
order.PutUint64(bs, *v)
case uint64:
bs = b[:8]
order.PutUint64(bs, v)
case []uint64:
for i, x := range v {
order.PutUint64(bs[8*i:], x)
}
}
_, err := w.Write(bs)
return err
}
// Fallback to reflect-based encoding.
v := reflect.Indirect(reflect.ValueOf(data))
size := dataSize(v)
if size < 0 {
return errors.New("binary.Write: invalid type " + reflect.TypeOf(data).String())
}
buf := make([]byte, size)
e := &encoder{order: order, buf: buf}
e.value(v)
_, err := w.Write(buf)
return err
}
// Size returns how many bytes Write would generate to encode the value v, which
// must be a fixed-size value or a slice of fixed-size values, or a pointer to such data.
// If v is neither of these, Size returns -1.
func Size(v interface{}) int {
return dataSize(reflect.Indirect(reflect.ValueOf(v)))
}
// dataSize returns the number of bytes the actual data represented by v occupies in memory.
// For compound structures, it sums the sizes of the elements. Thus, for instance, for a slice
// it returns the length of the slice times the element size and does not count the memory
// occupied by the header. If the type of v is not acceptable, dataSize returns -1.
func dataSize(v reflect.Value) int {
if v.Kind() == reflect.Slice {
if s := sizeof(v.Type().Elem()); s >= 0 {
return s * v.Len()
}
return -1
}
return sizeof(v.Type())
}
// sizeof returns the size >= 0 of variables for the given type or -1 if the type is not acceptable.
func sizeof(t reflect.Type) int {
switch t.Kind() {
case reflect.Array:
if s := sizeof(t.Elem()); s >= 0 {
return s * t.Len()
}
case reflect.Struct:
sum := 0
for i, n := 0, t.NumField(); i < n; i++ {
s := sizeof(t.Field(i).Type)
if s < 0 {
return -1
}
sum += s
}
return sum
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, reflect.Ptr:
return int(t.Size())
}
return -1
}
type coder struct {
order ByteOrder
buf []byte
}
type (
decoder coder
encoder coder
)
func (d *decoder) uint8() uint8 {
x := d.buf[0]
d.buf = d.buf[1:]
return x
}
func (e *encoder) uint8(x uint8) {
e.buf[0] = x
e.buf = e.buf[1:]
}
func (d *decoder) uint16() uint16 {
x := d.order.Uint16(d.buf[0:2])
d.buf = d.buf[2:]
return x
}
func (e *encoder) uint16(x uint16) {
e.order.PutUint16(e.buf[0:2], x)
e.buf = e.buf[2:]
}
func (d *decoder) uint32() uint32 {
x := d.order.Uint32(d.buf[0:4])
d.buf = d.buf[4:]
return x
}
func (e *encoder) uint32(x uint32) {
e.order.PutUint32(e.buf[0:4], x)
e.buf = e.buf[4:]
}
func (d *decoder) uint64() uint64 {
x := d.order.Uint64(d.buf[0:8])
d.buf = d.buf[8:]
return x
}
func (e *encoder) uint64(x uint64) {
e.order.PutUint64(e.buf[0:8], x)
e.buf = e.buf[8:]
}
func (d *decoder) int8() int8 { return int8(d.uint8()) }
func (e *encoder) int8(x int8) { e.uint8(uint8(x)) }
func (d *decoder) int16() int16 { return int16(d.uint16()) }
func (e *encoder) int16(x int16) { e.uint16(uint16(x)) }
func (d *decoder) int32() int32 { return int32(d.uint32()) }
func (e *encoder) int32(x int32) { e.uint32(uint32(x)) }
func (d *decoder) int64() int64 { return int64(d.uint64()) }
func (e *encoder) int64(x int64) { e.uint64(uint64(x)) }
func (d *decoder) value(v reflect.Value) {
switch v.Kind() {
case reflect.Array:
l := v.Len()
for i := 0; i < l; i++ {
d.value(v.Index(i))
}
case reflect.Struct:
t := v.Type()
l := v.NumField()
for i := 0; i < l; i++ {
// Note: Calling v.CanSet() below is an optimization.
// It would be sufficient to check the field name,
// but creating the StructField info for each field is
// costly (run "go test -bench=ReadStruct" and compare
// results when making changes to this code).
if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
d.value(v)
} else {
d.skip(v)
}
}
case reflect.Slice:
l := v.Len()
for i := 0; i < l; i++ {
d.value(v.Index(i))
}
case reflect.Int8:
v.SetInt(int64(d.int8()))
case reflect.Int16:
v.SetInt(int64(d.int16()))
case reflect.Int32:
v.SetInt(int64(d.int32()))
case reflect.Int64:
v.SetInt(d.int64())
case reflect.Uint8:
v.SetUint(uint64(d.uint8()))
case reflect.Uint16:
v.SetUint(uint64(d.uint16()))
case reflect.Uint32:
v.SetUint(uint64(d.uint32()))
case reflect.Uint64:
v.SetUint(d.uint64())
case reflect.Float32:
v.SetFloat(float64(math.Float32frombits(d.uint32())))
case reflect.Float64:
v.SetFloat(math.Float64frombits(d.uint64()))
case reflect.Complex64:
v.SetComplex(complex(
float64(math.Float32frombits(d.uint32())),
float64(math.Float32frombits(d.uint32())),
))
case reflect.Complex128:
v.SetComplex(complex(
math.Float64frombits(d.uint64()),
math.Float64frombits(d.uint64()),
))
}
}
func (e *encoder) value(v reflect.Value) {
switch v.Kind() {
case reflect.Array:
l := v.Len()
for i := 0; i < l; i++ {
e.value(v.Index(i))
}
case reflect.Struct:
t := v.Type()
l := v.NumField()
for i := 0; i < l; i++ {
// see comment for corresponding code in decoder.value()
if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
e.value(v)
} else {
e.skip(v)
}
}
case reflect.Slice:
l := v.Len()
for i := 0; i < l; i++ {
e.value(v.Index(i))
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch v.Type().Kind() {
case reflect.Int8:
e.int8(int8(v.Int()))
case reflect.Int16:
e.int16(int16(v.Int()))
case reflect.Int32:
e.int32(int32(v.Int()))
case reflect.Int64:
e.int64(v.Int())
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
switch v.Type().Kind() {
case reflect.Uint8:
e.uint8(uint8(v.Uint()))
case reflect.Uint16:
e.uint16(uint16(v.Uint()))
case reflect.Uint32:
e.uint32(uint32(v.Uint()))
case reflect.Uint64:
e.uint64(v.Uint())
}
case reflect.Float32, reflect.Float64:
switch v.Type().Kind() {
case reflect.Float32:
e.uint32(math.Float32bits(float32(v.Float())))
case reflect.Float64:
e.uint64(math.Float64bits(v.Float()))
}
case reflect.Complex64, reflect.Complex128:
switch v.Type().Kind() {
case reflect.Complex64:
x := v.Complex()
e.uint32(math.Float32bits(float32(real(x))))
e.uint32(math.Float32bits(float32(imag(x))))
case reflect.Complex128:
x := v.Complex()
e.uint64(math.Float64bits(real(x)))
e.uint64(math.Float64bits(imag(x)))
}
}
}
func (d *decoder) skip(v reflect.Value) {
d.buf = d.buf[dataSize(v):]
}
func (e *encoder) skip(v reflect.Value) {
n := dataSize(v)
for i := range e.buf[0:n] {
e.buf[i] = 0
}
e.buf = e.buf[n:]
}
// intDataSize returns the size of the data required to represent the data when encoded.
// It returns zero if the type cannot be implemented by the fast path in Read or Write.
func | (data interface{}) int {
switch data := data.(type) {
case int8, *int8, *uint8:
return 1
case []int8:
return len(data)
case []uint8:
return len(data)
case int16, *int16, *uint16:
return 2
case []int16:
return 2 * len(data)
case []uint16:
return 2 * len(data)
case int32, *int32, *uint32:
return 4
case []int32:
return 4 * len(data)
case []uint32:
return 4 * len(data)
case int64, *int64, *uint64:
return 8
case []int64:
return 8 * len(data)
case []uint64:
return 8 * len(data)
}
return 0
}
| intDataSize | identifier_name |
binary.go | package common
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package binary implements simple translation between numbers and byte
// sequences and encoding and decoding of varints.
//
// Numbers are translated by reading and writing fixed-size values.
// A fixed-size value is either a fixed-size arithmetic
// type (int8, uint8, int16, float32, complex64, ...)
// or an array or struct containing only fixed-size values.
//
// The varint functions encode and decode single integer values using
// a variable-length encoding; smaller values require fewer bytes.
// For a specification, see
// http://code.google.com/apis/protocolbuffers/docs/encoding.html.
//
// This package favors simplicity over efficiency. Clients that require
// high-performance serialization, especially for large data structures,
// should look at more advanced solutions such as the encoding/gob
// package or protocol buffers.
import (
"errors"
"io"
"math"
"reflect"
)
// A ByteOrder specifies how to convert byte sequences into
// 16-, 32-, or 64-bit unsigned integers.
type ByteOrder interface {
Uint16([]byte) uint16
Uint32([]byte) uint32
Uint64([]byte) uint64
PutUint16([]byte, uint16)
PutUint32([]byte, uint32)
PutUint64([]byte, uint64)
String() string
}
// LittleEndian is the little-endian implementation of ByteOrder.
var LittleEndian littleEndian
// BigEndian is the big-endian implementation of ByteOrder.
var BigEndian bigEndian
type littleEndian struct{}
func (littleEndian) Uint16(b []byte) uint16 { return uint16(b[0]) | uint16(b[1])<<8 }
func (littleEndian) PutUint16(b []byte, v uint16) {
b[0] = byte(v)
b[1] = byte(v >> 8)
}
func (littleEndian) Uint32(b []byte) uint32 {
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func (littleEndian) PutUint32(b []byte, v uint32) {
b[0] = byte(v)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
b[3] = byte(v >> 24)
}
func (littleEndian) Uint64(b []byte) uint64 {
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
func (littleEndian) PutUint64(b []byte, v uint64) {
b[0] = byte(v)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
b[3] = byte(v >> 24)
b[4] = byte(v >> 32)
b[5] = byte(v >> 40)
b[6] = byte(v >> 48)
b[7] = byte(v >> 56)
}
func (littleEndian) String() string { return "LittleEndian" }
func (littleEndian) GoString() string { return "binary.LittleEndian" }
type bigEndian struct{}
func (bigEndian) Uint16(b []byte) uint16 { return uint16(b[1]) | uint16(b[0])<<8 }
func (bigEndian) PutUint16(b []byte, v uint16) {
b[0] = byte(v >> 8)
b[1] = byte(v)
}
func (bigEndian) Uint32(b []byte) uint32 {
return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
}
func (bigEndian) PutUint32(b []byte, v uint32) {
b[0] = byte(v >> 24)
b[1] = byte(v >> 16)
b[2] = byte(v >> 8)
b[3] = byte(v)
}
func (bigEndian) Uint64(b []byte) uint64 {
return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
}
func (bigEndian) PutUint64(b []byte, v uint64) {
b[0] = byte(v >> 56)
b[1] = byte(v >> 48)
b[2] = byte(v >> 40)
b[3] = byte(v >> 32)
b[4] = byte(v >> 24)
b[5] = byte(v >> 16)
b[6] = byte(v >> 8)
b[7] = byte(v)
}
func (bigEndian) String() string { return "BigEndian" }
func (bigEndian) GoString() string { return "binary.BigEndian" }
// Read reads structured binary data from r into data.
// Data must be a pointer to a fixed-size value or a slice
// of fixed-size values.
// Bytes read from r are decoded using the specified byte order
// and written to successive fields of the data.
// When reading into structs, the field data for fields with
// blank (_) field names is skipped; i.e., blank field names
// may be used for padding.
// When reading into a struct, all non-blank fields must be exported.
func Read(r io.Reader, order ByteOrder, data interface{}) error {
// Fast path for basic types and slices.
if n := intDataSize(data); n != 0 {
var b [8]byte
var bs []byte
if n > len(b) {
bs = make([]byte, n)
} else {
bs = b[:n]
}
if _, err := io.ReadFull(r, bs); err != nil {
return err
}
switch data := data.(type) {
case *int8:
*data = int8(b[0])
case *uint8:
*data = b[0]
case *int16:
*data = int16(order.Uint16(bs))
case *uint16:
*data = order.Uint16(bs)
case *int32:
*data = int32(order.Uint32(bs))
case *uint32:
*data = order.Uint32(bs)
case *int64:
*data = int64(order.Uint64(bs))
case *uint64:
*data = order.Uint64(bs)
case []int8:
for i, x := range bs { // Easier to loop over the input for 8-bit values.
data[i] = int8(x)
}
case []uint8:
copy(data, bs)
case []int16:
for i := range data {
data[i] = int16(order.Uint16(bs[2*i:]))
}
case []uint16:
for i := range data {
data[i] = order.Uint16(bs[2*i:])
}
case []int32:
for i := range data {
data[i] = int32(order.Uint32(bs[4*i:]))
}
case []uint32:
for i := range data {
data[i] = order.Uint32(bs[4*i:])
}
case []int64:
for i := range data {
data[i] = int64(order.Uint64(bs[8*i:]))
}
case []uint64:
for i := range data {
data[i] = order.Uint64(bs[8*i:])
}
}
return nil
}
// Fallback to reflect-based decoding.
v := reflect.ValueOf(data)
size := -1
switch v.Kind() {
case reflect.Ptr:
v = v.Elem()
size = dataSize(v)
case reflect.Slice:
size = dataSize(v)
}
if size < 0 {
return errors.New("binary.Read: invalid type " + reflect.TypeOf(data).String())
}
d := &decoder{order: order, buf: make([]byte, size)}
if _, err := io.ReadFull(r, d.buf); err != nil {
return err
}
d.value(v)
return nil
}
// Write writes the binary representation of data into w.
// Data must be a fixed-size value or a slice of fixed-size
// values, or a pointer to such data.
// Bytes written to w are encoded using the specified byte order
// and read from successive fields of the data.
// When writing structs, zero values are written for fields
// with blank (_) field names.
func Write(w io.Writer, order ByteOrder, data interface{}) error {
// Fast path for basic types and slices.
if n := intDataSize(data); n != 0 {
var b [8]byte
var bs []byte
if n > len(b) {
bs = make([]byte, n)
} else {
bs = b[:n]
}
switch v := data.(type) {
case *int8:
bs = b[:1]
b[0] = byte(*v)
case int8:
bs = b[:1]
b[0] = byte(v)
case []int8:
for i, x := range v {
bs[i] = byte(x)
}
case *uint8:
bs = b[:1]
b[0] = *v
case uint8:
bs = b[:1]
b[0] = byte(v)
case []uint8:
bs = v
case *int16:
bs = b[:2]
order.PutUint16(bs, uint16(*v))
case int16:
bs = b[:2]
order.PutUint16(bs, uint16(v))
case []int16:
for i, x := range v {
order.PutUint16(bs[2*i:], uint16(x))
}
case *uint16:
bs = b[:2]
order.PutUint16(bs, *v)
case uint16:
bs = b[:2]
order.PutUint16(bs, v)
case []uint16:
for i, x := range v {
order.PutUint16(bs[2*i:], x)
}
case *int32:
bs = b[:4]
order.PutUint32(bs, uint32(*v))
case int32:
bs = b[:4]
order.PutUint32(bs, uint32(v))
case []int32:
for i, x := range v {
order.PutUint32(bs[4*i:], uint32(x))
}
case *uint32:
bs = b[:4]
order.PutUint32(bs, *v)
case uint32:
bs = b[:4]
order.PutUint32(bs, v)
case []uint32:
for i, x := range v {
order.PutUint32(bs[4*i:], x)
}
case *int64:
bs = b[:8]
order.PutUint64(bs, uint64(*v))
case int64:
bs = b[:8]
order.PutUint64(bs, uint64(v))
case []int64:
for i, x := range v {
order.PutUint64(bs[8*i:], uint64(x))
}
case *uint64:
bs = b[:8]
order.PutUint64(bs, *v)
case uint64:
bs = b[:8]
order.PutUint64(bs, v)
case []uint64:
for i, x := range v {
order.PutUint64(bs[8*i:], x)
}
}
_, err := w.Write(bs)
return err
}
// Fallback to reflect-based encoding.
v := reflect.Indirect(reflect.ValueOf(data))
size := dataSize(v)
if size < 0 {
return errors.New("binary.Write: invalid type " + reflect.TypeOf(data).String())
}
buf := make([]byte, size)
e := &encoder{order: order, buf: buf}
e.value(v)
_, err := w.Write(buf)
return err
}
// Size returns how many bytes Write would generate to encode the value v, which
// must be a fixed-size value or a slice of fixed-size values, or a pointer to such data.
// If v is neither of these, Size returns -1.
func Size(v interface{}) int {
return dataSize(reflect.Indirect(reflect.ValueOf(v)))
}
// dataSize returns the number of bytes the actual data represented by v occupies in memory.
// For compound structures, it sums the sizes of the elements. Thus, for instance, for a slice
// it returns the length of the slice times the element size and does not count the memory
// occupied by the header. If the type of v is not acceptable, dataSize returns -1.
func dataSize(v reflect.Value) int {
if v.Kind() == reflect.Slice {
if s := sizeof(v.Type().Elem()); s >= 0 {
return s * v.Len()
}
return -1
}
return sizeof(v.Type())
}
// sizeof returns the size >= 0 of variables for the given type or -1 if the type is not acceptable.
func sizeof(t reflect.Type) int |
type coder struct {
order ByteOrder
buf []byte
}
type (
decoder coder
encoder coder
)
func (d *decoder) uint8() uint8 {
x := d.buf[0]
d.buf = d.buf[1:]
return x
}
func (e *encoder) uint8(x uint8) {
e.buf[0] = x
e.buf = e.buf[1:]
}
func (d *decoder) uint16() uint16 {
x := d.order.Uint16(d.buf[0:2])
d.buf = d.buf[2:]
return x
}
func (e *encoder) uint16(x uint16) {
e.order.PutUint16(e.buf[0:2], x)
e.buf = e.buf[2:]
}
func (d *decoder) uint32() uint32 {
x := d.order.Uint32(d.buf[0:4])
d.buf = d.buf[4:]
return x
}
func (e *encoder) uint32(x uint32) {
e.order.PutUint32(e.buf[0:4], x)
e.buf = e.buf[4:]
}
func (d *decoder) uint64() uint64 {
x := d.order.Uint64(d.buf[0:8])
d.buf = d.buf[8:]
return x
}
func (e *encoder) uint64(x uint64) {
e.order.PutUint64(e.buf[0:8], x)
e.buf = e.buf[8:]
}
func (d *decoder) int8() int8 { return int8(d.uint8()) }
func (e *encoder) int8(x int8) { e.uint8(uint8(x)) }
func (d *decoder) int16() int16 { return int16(d.uint16()) }
func (e *encoder) int16(x int16) { e.uint16(uint16(x)) }
func (d *decoder) int32() int32 { return int32(d.uint32()) }
func (e *encoder) int32(x int32) { e.uint32(uint32(x)) }
func (d *decoder) int64() int64 { return int64(d.uint64()) }
func (e *encoder) int64(x int64) { e.uint64(uint64(x)) }
func (d *decoder) value(v reflect.Value) {
switch v.Kind() {
case reflect.Array:
l := v.Len()
for i := 0; i < l; i++ {
d.value(v.Index(i))
}
case reflect.Struct:
t := v.Type()
l := v.NumField()
for i := 0; i < l; i++ {
// Note: Calling v.CanSet() below is an optimization.
// It would be sufficient to check the field name,
// but creating the StructField info for each field is
// costly (run "go test -bench=ReadStruct" and compare
// results when making changes to this code).
if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
d.value(v)
} else {
d.skip(v)
}
}
case reflect.Slice:
l := v.Len()
for i := 0; i < l; i++ {
d.value(v.Index(i))
}
case reflect.Int8:
v.SetInt(int64(d.int8()))
case reflect.Int16:
v.SetInt(int64(d.int16()))
case reflect.Int32:
v.SetInt(int64(d.int32()))
case reflect.Int64:
v.SetInt(d.int64())
case reflect.Uint8:
v.SetUint(uint64(d.uint8()))
case reflect.Uint16:
v.SetUint(uint64(d.uint16()))
case reflect.Uint32:
v.SetUint(uint64(d.uint32()))
case reflect.Uint64:
v.SetUint(d.uint64())
case reflect.Float32:
v.SetFloat(float64(math.Float32frombits(d.uint32())))
case reflect.Float64:
v.SetFloat(math.Float64frombits(d.uint64()))
case reflect.Complex64:
v.SetComplex(complex(
float64(math.Float32frombits(d.uint32())),
float64(math.Float32frombits(d.uint32())),
))
case reflect.Complex128:
v.SetComplex(complex(
math.Float64frombits(d.uint64()),
math.Float64frombits(d.uint64()),
))
}
}
func (e *encoder) value(v reflect.Value) {
switch v.Kind() {
case reflect.Array:
l := v.Len()
for i := 0; i < l; i++ {
e.value(v.Index(i))
}
case reflect.Struct:
t := v.Type()
l := v.NumField()
for i := 0; i < l; i++ {
// see comment for corresponding code in decoder.value()
if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
e.value(v)
} else {
e.skip(v)
}
}
case reflect.Slice:
l := v.Len()
for i := 0; i < l; i++ {
e.value(v.Index(i))
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch v.Type().Kind() {
case reflect.Int8:
e.int8(int8(v.Int()))
case reflect.Int16:
e.int16(int16(v.Int()))
case reflect.Int32:
e.int32(int32(v.Int()))
case reflect.Int64:
e.int64(v.Int())
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
switch v.Type().Kind() {
case reflect.Uint8:
e.uint8(uint8(v.Uint()))
case reflect.Uint16:
e.uint16(uint16(v.Uint()))
case reflect.Uint32:
e.uint32(uint32(v.Uint()))
case reflect.Uint64:
e.uint64(v.Uint())
}
case reflect.Float32, reflect.Float64:
switch v.Type().Kind() {
case reflect.Float32:
e.uint32(math.Float32bits(float32(v.Float())))
case reflect.Float64:
e.uint64(math.Float64bits(v.Float()))
}
case reflect.Complex64, reflect.Complex128:
switch v.Type().Kind() {
case reflect.Complex64:
x := v.Complex()
e.uint32(math.Float32bits(float32(real(x))))
e.uint32(math.Float32bits(float32(imag(x))))
case reflect.Complex128:
x := v.Complex()
e.uint64(math.Float64bits(real(x)))
e.uint64(math.Float64bits(imag(x)))
}
}
}
func (d *decoder) skip(v reflect.Value) {
d.buf = d.buf[dataSize(v):]
}
func (e *encoder) skip(v reflect.Value) {
n := dataSize(v)
for i := range e.buf[0:n] {
e.buf[i] = 0
}
e.buf = e.buf[n:]
}
// intDataSize returns the size of the data required to represent the data when encoded.
// It returns zero if the type cannot be implemented by the fast path in Read or Write.
func intDataSize(data interface{}) int {
switch data := data.(type) {
case int8, *int8, *uint8:
return 1
case []int8:
return len(data)
case []uint8:
return len(data)
case int16, *int16, *uint16:
return 2
case []int16:
return 2 * len(data)
case []uint16:
return 2 * len(data)
case int32, *int32, *uint32:
return 4
case []int32:
return 4 * len(data)
case []uint32:
return 4 * len(data)
case int64, *int64, *uint64:
return 8
case []int64:
return 8 * len(data)
case []uint64:
return 8 * len(data)
}
return 0
}
| {
switch t.Kind() {
case reflect.Array:
if s := sizeof(t.Elem()); s >= 0 {
return s * t.Len()
}
case reflect.Struct:
sum := 0
for i, n := 0, t.NumField(); i < n; i++ {
s := sizeof(t.Field(i).Type)
if s < 0 {
return -1
}
sum += s
}
return sum
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, reflect.Ptr:
return int(t.Size())
}
return -1
} | identifier_body |
binary.go | package common
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package binary implements simple translation between numbers and byte
// sequences and encoding and decoding of varints.
//
// Numbers are translated by reading and writing fixed-size values.
// A fixed-size value is either a fixed-size arithmetic
// type (int8, uint8, int16, float32, complex64, ...)
// or an array or struct containing only fixed-size values.
//
// The varint functions encode and decode single integer values using
// a variable-length encoding; smaller values require fewer bytes.
// For a specification, see
// http://code.google.com/apis/protocolbuffers/docs/encoding.html.
//
// This package favors simplicity over efficiency. Clients that require
// high-performance serialization, especially for large data structures,
// should look at more advanced solutions such as the encoding/gob
// package or protocol buffers.
import (
"errors"
"io"
"math"
"reflect"
)
// A ByteOrder specifies how to convert byte sequences into
// 16-, 32-, or 64-bit unsigned integers.
type ByteOrder interface {
Uint16([]byte) uint16
Uint32([]byte) uint32
Uint64([]byte) uint64
PutUint16([]byte, uint16)
PutUint32([]byte, uint32)
PutUint64([]byte, uint64)
String() string
}
// LittleEndian is the little-endian implementation of ByteOrder.
var LittleEndian littleEndian
// BigEndian is the big-endian implementation of ByteOrder.
var BigEndian bigEndian
type littleEndian struct{}
func (littleEndian) Uint16(b []byte) uint16 { return uint16(b[0]) | uint16(b[1])<<8 }
func (littleEndian) PutUint16(b []byte, v uint16) {
b[0] = byte(v)
b[1] = byte(v >> 8)
}
func (littleEndian) Uint32(b []byte) uint32 {
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func (littleEndian) PutUint32(b []byte, v uint32) {
b[0] = byte(v)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
b[3] = byte(v >> 24)
}
func (littleEndian) Uint64(b []byte) uint64 {
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
func (littleEndian) PutUint64(b []byte, v uint64) {
b[0] = byte(v)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
b[3] = byte(v >> 24)
b[4] = byte(v >> 32)
b[5] = byte(v >> 40)
b[6] = byte(v >> 48)
b[7] = byte(v >> 56)
}
func (littleEndian) String() string { return "LittleEndian" }
func (littleEndian) GoString() string { return "binary.LittleEndian" }
type bigEndian struct{}
func (bigEndian) Uint16(b []byte) uint16 { return uint16(b[1]) | uint16(b[0])<<8 }
func (bigEndian) PutUint16(b []byte, v uint16) {
b[0] = byte(v >> 8)
b[1] = byte(v)
}
func (bigEndian) Uint32(b []byte) uint32 {
return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
}
func (bigEndian) PutUint32(b []byte, v uint32) {
b[0] = byte(v >> 24)
b[1] = byte(v >> 16)
b[2] = byte(v >> 8)
b[3] = byte(v)
}
func (bigEndian) Uint64(b []byte) uint64 {
return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
}
func (bigEndian) PutUint64(b []byte, v uint64) {
b[0] = byte(v >> 56)
b[1] = byte(v >> 48)
b[2] = byte(v >> 40)
b[3] = byte(v >> 32)
b[4] = byte(v >> 24)
b[5] = byte(v >> 16)
b[6] = byte(v >> 8)
b[7] = byte(v)
}
func (bigEndian) String() string { return "BigEndian" }
func (bigEndian) GoString() string { return "binary.BigEndian" }
// Read reads structured binary data from r into data.
// Data must be a pointer to a fixed-size value or a slice
// of fixed-size values.
// Bytes read from r are decoded using the specified byte order
// and written to successive fields of the data.
// When reading into structs, the field data for fields with
// blank (_) field names is skipped; i.e., blank field names
// may be used for padding.
// When reading into a struct, all non-blank fields must be exported.
func Read(r io.Reader, order ByteOrder, data interface{}) error {
// Fast path for basic types and slices.
if n := intDataSize(data); n != 0 {
var b [8]byte
var bs []byte
if n > len(b) {
bs = make([]byte, n)
} else {
bs = b[:n]
}
if _, err := io.ReadFull(r, bs); err != nil {
return err
}
switch data := data.(type) {
case *int8:
*data = int8(b[0])
case *uint8:
*data = b[0]
case *int16:
*data = int16(order.Uint16(bs))
case *uint16:
*data = order.Uint16(bs)
case *int32:
*data = int32(order.Uint32(bs))
case *uint32:
*data = order.Uint32(bs)
case *int64:
*data = int64(order.Uint64(bs))
case *uint64:
*data = order.Uint64(bs)
case []int8:
for i, x := range bs { // Easier to loop over the input for 8-bit values.
data[i] = int8(x)
}
case []uint8:
copy(data, bs)
case []int16:
for i := range data {
data[i] = int16(order.Uint16(bs[2*i:]))
}
case []uint16:
for i := range data {
data[i] = order.Uint16(bs[2*i:])
}
case []int32:
for i := range data {
data[i] = int32(order.Uint32(bs[4*i:]))
}
case []uint32:
for i := range data {
data[i] = order.Uint32(bs[4*i:])
}
case []int64:
for i := range data {
data[i] = int64(order.Uint64(bs[8*i:]))
}
case []uint64:
for i := range data {
data[i] = order.Uint64(bs[8*i:])
}
}
return nil
}
// Fallback to reflect-based decoding.
v := reflect.ValueOf(data)
size := -1
switch v.Kind() {
case reflect.Ptr:
v = v.Elem()
size = dataSize(v)
case reflect.Slice:
size = dataSize(v)
}
if size < 0 {
return errors.New("binary.Read: invalid type " + reflect.TypeOf(data).String())
}
d := &decoder{order: order, buf: make([]byte, size)}
if _, err := io.ReadFull(r, d.buf); err != nil {
return err
}
d.value(v)
return nil
}
// Write writes the binary representation of data into w.
// Data must be a fixed-size value or a slice of fixed-size
// values, or a pointer to such data.
// Bytes written to w are encoded using the specified byte order
// and read from successive fields of the data.
// When writing structs, zero values are written for fields
// with blank (_) field names.
func Write(w io.Writer, order ByteOrder, data interface{}) error {
// Fast path for basic types and slices.
if n := intDataSize(data); n != 0 {
var b [8]byte
var bs []byte
if n > len(b) {
bs = make([]byte, n)
} else {
bs = b[:n]
}
switch v := data.(type) {
case *int8:
bs = b[:1]
b[0] = byte(*v)
case int8:
bs = b[:1]
b[0] = byte(v)
case []int8:
for i, x := range v {
bs[i] = byte(x)
}
case *uint8:
bs = b[:1]
b[0] = *v
case uint8:
bs = b[:1]
b[0] = byte(v)
case []uint8:
bs = v
case *int16:
bs = b[:2]
order.PutUint16(bs, uint16(*v))
case int16:
bs = b[:2]
order.PutUint16(bs, uint16(v))
case []int16:
for i, x := range v {
order.PutUint16(bs[2*i:], uint16(x))
}
case *uint16:
bs = b[:2]
order.PutUint16(bs, *v)
case uint16:
bs = b[:2]
order.PutUint16(bs, v)
case []uint16:
for i, x := range v {
order.PutUint16(bs[2*i:], x)
}
case *int32:
bs = b[:4]
order.PutUint32(bs, uint32(*v))
case int32:
bs = b[:4]
order.PutUint32(bs, uint32(v))
case []int32:
for i, x := range v {
order.PutUint32(bs[4*i:], uint32(x))
}
case *uint32:
bs = b[:4]
order.PutUint32(bs, *v)
case uint32:
bs = b[:4]
order.PutUint32(bs, v)
case []uint32:
for i, x := range v {
order.PutUint32(bs[4*i:], x)
}
case *int64:
bs = b[:8]
order.PutUint64(bs, uint64(*v))
case int64:
bs = b[:8]
order.PutUint64(bs, uint64(v))
case []int64:
for i, x := range v {
order.PutUint64(bs[8*i:], uint64(x))
}
case *uint64:
bs = b[:8]
order.PutUint64(bs, *v)
case uint64:
bs = b[:8]
order.PutUint64(bs, v)
case []uint64:
for i, x := range v {
order.PutUint64(bs[8*i:], x)
}
}
_, err := w.Write(bs)
return err
}
// Fallback to reflect-based encoding.
v := reflect.Indirect(reflect.ValueOf(data))
size := dataSize(v)
if size < 0 {
return errors.New("binary.Write: invalid type " + reflect.TypeOf(data).String())
}
buf := make([]byte, size)
e := &encoder{order: order, buf: buf}
e.value(v)
_, err := w.Write(buf)
return err
}
// Size returns how many bytes Write would generate to encode the value v, which
// must be a fixed-size value or a slice of fixed-size values, or a pointer to such data.
// If v is neither of these, Size returns -1.
func Size(v interface{}) int {
return dataSize(reflect.Indirect(reflect.ValueOf(v)))
}
// dataSize returns the number of bytes the actual data represented by v occupies in memory.
// For compound structures, it sums the sizes of the elements. Thus, for instance, for a slice
// it returns the length of the slice times the element size and does not count the memory
// occupied by the header. If the type of v is not acceptable, dataSize returns -1.
func dataSize(v reflect.Value) int {
if v.Kind() == reflect.Slice {
if s := sizeof(v.Type().Elem()); s >= 0 {
return s * v.Len()
}
return -1
}
return sizeof(v.Type())
}
// sizeof returns the size >= 0 of variables for the given type or -1 if the type is not acceptable.
func sizeof(t reflect.Type) int {
switch t.Kind() {
case reflect.Array:
if s := sizeof(t.Elem()); s >= 0 {
return s * t.Len()
}
case reflect.Struct:
sum := 0
for i, n := 0, t.NumField(); i < n; i++ {
s := sizeof(t.Field(i).Type)
if s < 0 {
return -1
}
sum += s
}
return sum
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, reflect.Ptr:
return int(t.Size())
}
return -1
}
type coder struct {
order ByteOrder
buf []byte
}
type (
decoder coder
encoder coder
)
func (d *decoder) uint8() uint8 {
x := d.buf[0]
d.buf = d.buf[1:]
return x
}
func (e *encoder) uint8(x uint8) {
e.buf[0] = x
e.buf = e.buf[1:]
}
func (d *decoder) uint16() uint16 {
x := d.order.Uint16(d.buf[0:2])
d.buf = d.buf[2:]
return x
}
func (e *encoder) uint16(x uint16) {
e.order.PutUint16(e.buf[0:2], x)
e.buf = e.buf[2:]
}
func (d *decoder) uint32() uint32 {
x := d.order.Uint32(d.buf[0:4])
d.buf = d.buf[4:]
return x
}
func (e *encoder) uint32(x uint32) {
e.order.PutUint32(e.buf[0:4], x)
e.buf = e.buf[4:]
}
func (d *decoder) uint64() uint64 {
x := d.order.Uint64(d.buf[0:8])
d.buf = d.buf[8:]
return x
}
func (e *encoder) uint64(x uint64) {
e.order.PutUint64(e.buf[0:8], x)
e.buf = e.buf[8:]
}
func (d *decoder) int8() int8 { return int8(d.uint8()) }
func (e *encoder) int8(x int8) { e.uint8(uint8(x)) }
func (d *decoder) int16() int16 { return int16(d.uint16()) }
func (e *encoder) int16(x int16) { e.uint16(uint16(x)) }
func (d *decoder) int32() int32 { return int32(d.uint32()) }
func (e *encoder) int32(x int32) { e.uint32(uint32(x)) }
func (d *decoder) int64() int64 { return int64(d.uint64()) }
func (e *encoder) int64(x int64) { e.uint64(uint64(x)) }
func (d *decoder) value(v reflect.Value) {
switch v.Kind() {
case reflect.Array:
l := v.Len()
for i := 0; i < l; i++ {
d.value(v.Index(i))
}
case reflect.Struct:
t := v.Type()
l := v.NumField()
for i := 0; i < l; i++ {
// Note: Calling v.CanSet() below is an optimization.
// It would be sufficient to check the field name,
// but creating the StructField info for each field is
// costly (run "go test -bench=ReadStruct" and compare
// results when making changes to this code).
if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" | else {
d.skip(v)
}
}
case reflect.Slice:
l := v.Len()
for i := 0; i < l; i++ {
d.value(v.Index(i))
}
case reflect.Int8:
v.SetInt(int64(d.int8()))
case reflect.Int16:
v.SetInt(int64(d.int16()))
case reflect.Int32:
v.SetInt(int64(d.int32()))
case reflect.Int64:
v.SetInt(d.int64())
case reflect.Uint8:
v.SetUint(uint64(d.uint8()))
case reflect.Uint16:
v.SetUint(uint64(d.uint16()))
case reflect.Uint32:
v.SetUint(uint64(d.uint32()))
case reflect.Uint64:
v.SetUint(d.uint64())
case reflect.Float32:
v.SetFloat(float64(math.Float32frombits(d.uint32())))
case reflect.Float64:
v.SetFloat(math.Float64frombits(d.uint64()))
case reflect.Complex64:
v.SetComplex(complex(
float64(math.Float32frombits(d.uint32())),
float64(math.Float32frombits(d.uint32())),
))
case reflect.Complex128:
v.SetComplex(complex(
math.Float64frombits(d.uint64()),
math.Float64frombits(d.uint64()),
))
}
}
func (e *encoder) value(v reflect.Value) {
switch v.Kind() {
case reflect.Array:
l := v.Len()
for i := 0; i < l; i++ {
e.value(v.Index(i))
}
case reflect.Struct:
t := v.Type()
l := v.NumField()
for i := 0; i < l; i++ {
// see comment for corresponding code in decoder.value()
if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
e.value(v)
} else {
e.skip(v)
}
}
case reflect.Slice:
l := v.Len()
for i := 0; i < l; i++ {
e.value(v.Index(i))
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch v.Type().Kind() {
case reflect.Int8:
e.int8(int8(v.Int()))
case reflect.Int16:
e.int16(int16(v.Int()))
case reflect.Int32:
e.int32(int32(v.Int()))
case reflect.Int64:
e.int64(v.Int())
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
switch v.Type().Kind() {
case reflect.Uint8:
e.uint8(uint8(v.Uint()))
case reflect.Uint16:
e.uint16(uint16(v.Uint()))
case reflect.Uint32:
e.uint32(uint32(v.Uint()))
case reflect.Uint64:
e.uint64(v.Uint())
}
case reflect.Float32, reflect.Float64:
switch v.Type().Kind() {
case reflect.Float32:
e.uint32(math.Float32bits(float32(v.Float())))
case reflect.Float64:
e.uint64(math.Float64bits(v.Float()))
}
case reflect.Complex64, reflect.Complex128:
switch v.Type().Kind() {
case reflect.Complex64:
x := v.Complex()
e.uint32(math.Float32bits(float32(real(x))))
e.uint32(math.Float32bits(float32(imag(x))))
case reflect.Complex128:
x := v.Complex()
e.uint64(math.Float64bits(real(x)))
e.uint64(math.Float64bits(imag(x)))
}
}
}
func (d *decoder) skip(v reflect.Value) {
d.buf = d.buf[dataSize(v):]
}
func (e *encoder) skip(v reflect.Value) {
n := dataSize(v)
for i := range e.buf[0:n] {
e.buf[i] = 0
}
e.buf = e.buf[n:]
}
// intDataSize returns the size of the data required to represent the data when encoded.
// It returns zero if the type cannot be implemented by the fast path in Read or Write.
func intDataSize(data interface{}) int {
switch data := data.(type) {
case int8, *int8, *uint8:
return 1
case []int8:
return len(data)
case []uint8:
return len(data)
case int16, *int16, *uint16:
return 2
case []int16:
return 2 * len(data)
case []uint16:
return 2 * len(data)
case int32, *int32, *uint32:
return 4
case []int32:
return 4 * len(data)
case []uint32:
return 4 * len(data)
case int64, *int64, *uint64:
return 8
case []int64:
return 8 * len(data)
case []uint64:
return 8 * len(data)
}
return 0
}
| {
d.value(v)
} | conditional_block |
binary.go | package common
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package binary implements simple translation between numbers and byte
// sequences and encoding and decoding of varints.
//
// Numbers are translated by reading and writing fixed-size values.
// A fixed-size value is either a fixed-size arithmetic
// type (int8, uint8, int16, float32, complex64, ...)
// or an array or struct containing only fixed-size values.
//
// The varint functions encode and decode single integer values using
// a variable-length encoding; smaller values require fewer bytes.
// For a specification, see
// http://code.google.com/apis/protocolbuffers/docs/encoding.html.
//
// This package favors simplicity over efficiency. Clients that require
// high-performance serialization, especially for large data structures,
// should look at more advanced solutions such as the encoding/gob
// package or protocol buffers.
import (
"errors"
"io"
"math"
"reflect"
)
// A ByteOrder specifies how to convert byte sequences into
// 16-, 32-, or 64-bit unsigned integers.
type ByteOrder interface {
Uint16([]byte) uint16
Uint32([]byte) uint32
Uint64([]byte) uint64
PutUint16([]byte, uint16)
PutUint32([]byte, uint32)
PutUint64([]byte, uint64)
String() string
}
// LittleEndian is the little-endian implementation of ByteOrder.
var LittleEndian littleEndian
// BigEndian is the big-endian implementation of ByteOrder.
var BigEndian bigEndian
type littleEndian struct{}
func (littleEndian) Uint16(b []byte) uint16 { return uint16(b[0]) | uint16(b[1])<<8 }
func (littleEndian) PutUint16(b []byte, v uint16) {
b[0] = byte(v)
b[1] = byte(v >> 8) | func (littleEndian) Uint32(b []byte) uint32 {
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func (littleEndian) PutUint32(b []byte, v uint32) {
b[0] = byte(v)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
b[3] = byte(v >> 24)
}
func (littleEndian) Uint64(b []byte) uint64 {
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
func (littleEndian) PutUint64(b []byte, v uint64) {
b[0] = byte(v)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
b[3] = byte(v >> 24)
b[4] = byte(v >> 32)
b[5] = byte(v >> 40)
b[6] = byte(v >> 48)
b[7] = byte(v >> 56)
}
func (littleEndian) String() string { return "LittleEndian" }
func (littleEndian) GoString() string { return "binary.LittleEndian" }
type bigEndian struct{}
func (bigEndian) Uint16(b []byte) uint16 { return uint16(b[1]) | uint16(b[0])<<8 }
func (bigEndian) PutUint16(b []byte, v uint16) {
b[0] = byte(v >> 8)
b[1] = byte(v)
}
func (bigEndian) Uint32(b []byte) uint32 {
return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
}
func (bigEndian) PutUint32(b []byte, v uint32) {
b[0] = byte(v >> 24)
b[1] = byte(v >> 16)
b[2] = byte(v >> 8)
b[3] = byte(v)
}
func (bigEndian) Uint64(b []byte) uint64 {
return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
}
func (bigEndian) PutUint64(b []byte, v uint64) {
b[0] = byte(v >> 56)
b[1] = byte(v >> 48)
b[2] = byte(v >> 40)
b[3] = byte(v >> 32)
b[4] = byte(v >> 24)
b[5] = byte(v >> 16)
b[6] = byte(v >> 8)
b[7] = byte(v)
}
func (bigEndian) String() string { return "BigEndian" }
func (bigEndian) GoString() string { return "binary.BigEndian" }
// Read reads structured binary data from r into data.
// Data must be a pointer to a fixed-size value or a slice
// of fixed-size values.
// Bytes read from r are decoded using the specified byte order
// and written to successive fields of the data.
// When reading into structs, the field data for fields with
// blank (_) field names is skipped; i.e., blank field names
// may be used for padding.
// When reading into a struct, all non-blank fields must be exported.
func Read(r io.Reader, order ByteOrder, data interface{}) error {
// Fast path for basic types and slices.
if n := intDataSize(data); n != 0 {
var b [8]byte
var bs []byte
if n > len(b) {
bs = make([]byte, n)
} else {
bs = b[:n]
}
if _, err := io.ReadFull(r, bs); err != nil {
return err
}
switch data := data.(type) {
case *int8:
*data = int8(b[0])
case *uint8:
*data = b[0]
case *int16:
*data = int16(order.Uint16(bs))
case *uint16:
*data = order.Uint16(bs)
case *int32:
*data = int32(order.Uint32(bs))
case *uint32:
*data = order.Uint32(bs)
case *int64:
*data = int64(order.Uint64(bs))
case *uint64:
*data = order.Uint64(bs)
case []int8:
for i, x := range bs { // Easier to loop over the input for 8-bit values.
data[i] = int8(x)
}
case []uint8:
copy(data, bs)
case []int16:
for i := range data {
data[i] = int16(order.Uint16(bs[2*i:]))
}
case []uint16:
for i := range data {
data[i] = order.Uint16(bs[2*i:])
}
case []int32:
for i := range data {
data[i] = int32(order.Uint32(bs[4*i:]))
}
case []uint32:
for i := range data {
data[i] = order.Uint32(bs[4*i:])
}
case []int64:
for i := range data {
data[i] = int64(order.Uint64(bs[8*i:]))
}
case []uint64:
for i := range data {
data[i] = order.Uint64(bs[8*i:])
}
}
return nil
}
// Fallback to reflect-based decoding.
v := reflect.ValueOf(data)
size := -1
switch v.Kind() {
case reflect.Ptr:
v = v.Elem()
size = dataSize(v)
case reflect.Slice:
size = dataSize(v)
}
if size < 0 {
return errors.New("binary.Read: invalid type " + reflect.TypeOf(data).String())
}
d := &decoder{order: order, buf: make([]byte, size)}
if _, err := io.ReadFull(r, d.buf); err != nil {
return err
}
d.value(v)
return nil
}
// Write writes the binary representation of data into w.
// Data must be a fixed-size value or a slice of fixed-size
// values, or a pointer to such data.
// Bytes written to w are encoded using the specified byte order
// and read from successive fields of the data.
// When writing structs, zero values are written for fields
// with blank (_) field names.
func Write(w io.Writer, order ByteOrder, data interface{}) error {
// Fast path for basic types and slices.
if n := intDataSize(data); n != 0 {
var b [8]byte
var bs []byte
if n > len(b) {
bs = make([]byte, n)
} else {
bs = b[:n]
}
switch v := data.(type) {
case *int8:
bs = b[:1]
b[0] = byte(*v)
case int8:
bs = b[:1]
b[0] = byte(v)
case []int8:
for i, x := range v {
bs[i] = byte(x)
}
case *uint8:
bs = b[:1]
b[0] = *v
case uint8:
bs = b[:1]
b[0] = byte(v)
case []uint8:
bs = v
case *int16:
bs = b[:2]
order.PutUint16(bs, uint16(*v))
case int16:
bs = b[:2]
order.PutUint16(bs, uint16(v))
case []int16:
for i, x := range v {
order.PutUint16(bs[2*i:], uint16(x))
}
case *uint16:
bs = b[:2]
order.PutUint16(bs, *v)
case uint16:
bs = b[:2]
order.PutUint16(bs, v)
case []uint16:
for i, x := range v {
order.PutUint16(bs[2*i:], x)
}
case *int32:
bs = b[:4]
order.PutUint32(bs, uint32(*v))
case int32:
bs = b[:4]
order.PutUint32(bs, uint32(v))
case []int32:
for i, x := range v {
order.PutUint32(bs[4*i:], uint32(x))
}
case *uint32:
bs = b[:4]
order.PutUint32(bs, *v)
case uint32:
bs = b[:4]
order.PutUint32(bs, v)
case []uint32:
for i, x := range v {
order.PutUint32(bs[4*i:], x)
}
case *int64:
bs = b[:8]
order.PutUint64(bs, uint64(*v))
case int64:
bs = b[:8]
order.PutUint64(bs, uint64(v))
case []int64:
for i, x := range v {
order.PutUint64(bs[8*i:], uint64(x))
}
case *uint64:
bs = b[:8]
order.PutUint64(bs, *v)
case uint64:
bs = b[:8]
order.PutUint64(bs, v)
case []uint64:
for i, x := range v {
order.PutUint64(bs[8*i:], x)
}
}
_, err := w.Write(bs)
return err
}
// Fallback to reflect-based encoding.
v := reflect.Indirect(reflect.ValueOf(data))
size := dataSize(v)
if size < 0 {
return errors.New("binary.Write: invalid type " + reflect.TypeOf(data).String())
}
buf := make([]byte, size)
e := &encoder{order: order, buf: buf}
e.value(v)
_, err := w.Write(buf)
return err
}
// Size returns how many bytes Write would generate to encode the value v, which
// must be a fixed-size value or a slice of fixed-size values, or a pointer to such data.
// If v is neither of these, Size returns -1.
func Size(v interface{}) int {
return dataSize(reflect.Indirect(reflect.ValueOf(v)))
}
// dataSize returns the number of bytes the actual data represented by v occupies in memory.
// For compound structures, it sums the sizes of the elements. Thus, for instance, for a slice
// it returns the length of the slice times the element size and does not count the memory
// occupied by the header. If the type of v is not acceptable, dataSize returns -1.
func dataSize(v reflect.Value) int {
if v.Kind() == reflect.Slice {
if s := sizeof(v.Type().Elem()); s >= 0 {
return s * v.Len()
}
return -1
}
return sizeof(v.Type())
}
// sizeof returns the size >= 0 of variables for the given type or -1 if the type is not acceptable.
func sizeof(t reflect.Type) int {
switch t.Kind() {
case reflect.Array:
if s := sizeof(t.Elem()); s >= 0 {
return s * t.Len()
}
case reflect.Struct:
sum := 0
for i, n := 0, t.NumField(); i < n; i++ {
s := sizeof(t.Field(i).Type)
if s < 0 {
return -1
}
sum += s
}
return sum
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, reflect.Ptr:
return int(t.Size())
}
return -1
}
type coder struct {
order ByteOrder
buf []byte
}
type (
decoder coder
encoder coder
)
func (d *decoder) uint8() uint8 {
x := d.buf[0]
d.buf = d.buf[1:]
return x
}
func (e *encoder) uint8(x uint8) {
e.buf[0] = x
e.buf = e.buf[1:]
}
func (d *decoder) uint16() uint16 {
x := d.order.Uint16(d.buf[0:2])
d.buf = d.buf[2:]
return x
}
func (e *encoder) uint16(x uint16) {
e.order.PutUint16(e.buf[0:2], x)
e.buf = e.buf[2:]
}
func (d *decoder) uint32() uint32 {
x := d.order.Uint32(d.buf[0:4])
d.buf = d.buf[4:]
return x
}
func (e *encoder) uint32(x uint32) {
e.order.PutUint32(e.buf[0:4], x)
e.buf = e.buf[4:]
}
func (d *decoder) uint64() uint64 {
x := d.order.Uint64(d.buf[0:8])
d.buf = d.buf[8:]
return x
}
func (e *encoder) uint64(x uint64) {
e.order.PutUint64(e.buf[0:8], x)
e.buf = e.buf[8:]
}
func (d *decoder) int8() int8 { return int8(d.uint8()) }
func (e *encoder) int8(x int8) { e.uint8(uint8(x)) }
func (d *decoder) int16() int16 { return int16(d.uint16()) }
func (e *encoder) int16(x int16) { e.uint16(uint16(x)) }
func (d *decoder) int32() int32 { return int32(d.uint32()) }
func (e *encoder) int32(x int32) { e.uint32(uint32(x)) }
func (d *decoder) int64() int64 { return int64(d.uint64()) }
func (e *encoder) int64(x int64) { e.uint64(uint64(x)) }
func (d *decoder) value(v reflect.Value) {
switch v.Kind() {
case reflect.Array:
l := v.Len()
for i := 0; i < l; i++ {
d.value(v.Index(i))
}
case reflect.Struct:
t := v.Type()
l := v.NumField()
for i := 0; i < l; i++ {
// Note: Calling v.CanSet() below is an optimization.
// It would be sufficient to check the field name,
// but creating the StructField info for each field is
// costly (run "go test -bench=ReadStruct" and compare
// results when making changes to this code).
if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
d.value(v)
} else {
d.skip(v)
}
}
case reflect.Slice:
l := v.Len()
for i := 0; i < l; i++ {
d.value(v.Index(i))
}
case reflect.Int8:
v.SetInt(int64(d.int8()))
case reflect.Int16:
v.SetInt(int64(d.int16()))
case reflect.Int32:
v.SetInt(int64(d.int32()))
case reflect.Int64:
v.SetInt(d.int64())
case reflect.Uint8:
v.SetUint(uint64(d.uint8()))
case reflect.Uint16:
v.SetUint(uint64(d.uint16()))
case reflect.Uint32:
v.SetUint(uint64(d.uint32()))
case reflect.Uint64:
v.SetUint(d.uint64())
case reflect.Float32:
v.SetFloat(float64(math.Float32frombits(d.uint32())))
case reflect.Float64:
v.SetFloat(math.Float64frombits(d.uint64()))
case reflect.Complex64:
v.SetComplex(complex(
float64(math.Float32frombits(d.uint32())),
float64(math.Float32frombits(d.uint32())),
))
case reflect.Complex128:
v.SetComplex(complex(
math.Float64frombits(d.uint64()),
math.Float64frombits(d.uint64()),
))
}
}
func (e *encoder) value(v reflect.Value) {
switch v.Kind() {
case reflect.Array:
l := v.Len()
for i := 0; i < l; i++ {
e.value(v.Index(i))
}
case reflect.Struct:
t := v.Type()
l := v.NumField()
for i := 0; i < l; i++ {
// see comment for corresponding code in decoder.value()
if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
e.value(v)
} else {
e.skip(v)
}
}
case reflect.Slice:
l := v.Len()
for i := 0; i < l; i++ {
e.value(v.Index(i))
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch v.Type().Kind() {
case reflect.Int8:
e.int8(int8(v.Int()))
case reflect.Int16:
e.int16(int16(v.Int()))
case reflect.Int32:
e.int32(int32(v.Int()))
case reflect.Int64:
e.int64(v.Int())
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
switch v.Type().Kind() {
case reflect.Uint8:
e.uint8(uint8(v.Uint()))
case reflect.Uint16:
e.uint16(uint16(v.Uint()))
case reflect.Uint32:
e.uint32(uint32(v.Uint()))
case reflect.Uint64:
e.uint64(v.Uint())
}
case reflect.Float32, reflect.Float64:
switch v.Type().Kind() {
case reflect.Float32:
e.uint32(math.Float32bits(float32(v.Float())))
case reflect.Float64:
e.uint64(math.Float64bits(v.Float()))
}
case reflect.Complex64, reflect.Complex128:
switch v.Type().Kind() {
case reflect.Complex64:
x := v.Complex()
e.uint32(math.Float32bits(float32(real(x))))
e.uint32(math.Float32bits(float32(imag(x))))
case reflect.Complex128:
x := v.Complex()
e.uint64(math.Float64bits(real(x)))
e.uint64(math.Float64bits(imag(x)))
}
}
}
func (d *decoder) skip(v reflect.Value) {
d.buf = d.buf[dataSize(v):]
}
func (e *encoder) skip(v reflect.Value) {
n := dataSize(v)
for i := range e.buf[0:n] {
e.buf[i] = 0
}
e.buf = e.buf[n:]
}
// intDataSize returns the size of the data required to represent the data when encoded.
// It returns zero if the type cannot be implemented by the fast path in Read or Write.
func intDataSize(data interface{}) int {
switch data := data.(type) {
case int8, *int8, *uint8:
return 1
case []int8:
return len(data)
case []uint8:
return len(data)
case int16, *int16, *uint16:
return 2
case []int16:
return 2 * len(data)
case []uint16:
return 2 * len(data)
case int32, *int32, *uint32:
return 4
case []int32:
return 4 * len(data)
case []uint32:
return 4 * len(data)
case int64, *int64, *uint64:
return 8
case []int64:
return 8 * len(data)
case []uint64:
return 8 * len(data)
}
return 0
} | }
| random_line_split |
tx.rs | use std::convert::TryInto;
use std::io::Write;
use clap;
use bitcoin::hashes::Hash;
use bitcoin;
use elements::encode::{deserialize, serialize};
use elements::{
confidential, AssetIssuance, OutPoint, Transaction, TxIn, TxInWitness, TxOut, TxOutWitness,
Script,
};
use elements::secp256k1_zkp::{
Generator, PedersenCommitment, PublicKey, RangeProof, SurjectionProof, Tweak,
};
use cmd;
use hal_elements::Network;
use hal_elements::confidential::{
ConfidentialAssetInfo, ConfidentialNonceInfo, ConfidentialType, ConfidentialValueInfo,
};
use hal_elements::tx::{
AssetIssuanceInfo, InputInfo, InputWitnessInfo, OutputInfo, OutputWitnessInfo, PeginDataInfo,
PegoutDataInfo, TransactionInfo, InputScriptInfo, OutputScriptInfo,
};
pub fn subcommand<'a>() -> clap::App<'a, 'a> {
cmd::subcommand_group("tx", "manipulate transactions")
.subcommand(cmd_create())
.subcommand(cmd_decode())
}
pub fn execute<'a>(matches: &clap::ArgMatches<'a>) {
match matches.subcommand() {
("create", Some(ref m)) => exec_create(&m),
("decode", Some(ref m)) => exec_decode(&m),
(_, _) => unreachable!("clap prints help"),
};
}
fn cmd_create<'a>() -> clap::App<'a, 'a> {
cmd::subcommand("create", "create a raw transaction from JSON").args(&[
cmd::arg("tx-info", "the transaction info in JSON").required(false),
cmd::opt("raw-stdout", "output the raw bytes of the result to stdout")
.short("r")
.required(false),
])
}
/// Check both ways to specify the outpoint and panic if conflicting.
fn outpoint_from_input_info(input: &InputInfo) -> OutPoint {
let op1: Option<OutPoint> =
input.prevout.as_ref().map(|ref op| op.parse().expect("invalid prevout format"));
let op2 = match input.txid {
Some(txid) => match input.vout {
Some(vout) => Some(OutPoint {
txid: txid,
vout: vout,
}),
None => panic!("\"txid\" field given in input without \"vout\" field"),
},
None => None,
};
match (op1, op2) {
(Some(op1), Some(op2)) => {
if op1 != op2 {
panic!("Conflicting prevout information in input.");
}
op1
}
(Some(op), None) => op,
(None, Some(op)) => op,
(None, None) => panic!("No previous output provided in input."),
}
}
fn bytes_32(bytes: &[u8]) -> Option<[u8; 32]> {
if bytes.len() != 32 {
None
} else {
let mut array = [0; 32];
for (x, y) in bytes.iter().zip(array.iter_mut()) {
*y = *x;
}
Some(array)
}
}
fn create_confidential_value(info: ConfidentialValueInfo) -> confidential::Value {
match info.type_ {
ConfidentialType::Null => confidential::Value::Null,
ConfidentialType::Explicit => confidential::Value::Explicit(
info.value.expect("Field \"value\" is required for explicit values."),
),
ConfidentialType::Confidential => {
let comm = PedersenCommitment::from_slice(
&info.commitment
.expect("Field \"commitment\" is required for confidential values.")
.0[..]
).expect("invalid confidential commitment");
confidential::Value::Confidential(comm)
}
}
}
fn create_confidential_asset(info: ConfidentialAssetInfo) -> confidential::Asset {
match info.type_ {
ConfidentialType::Null => confidential::Asset::Null,
ConfidentialType::Explicit => confidential::Asset::Explicit(
info.asset.expect("Field \"asset\" is required for explicit assets."),
),
ConfidentialType::Confidential => {
let gen = Generator::from_slice(
&info.commitment
.expect("Field \"commitment\" is required for confidential values.")
.0[..]
).expect("invalid confidential commitment");
confidential::Asset::Confidential(gen)
}
}
}
fn create_confidential_nonce(info: ConfidentialNonceInfo) -> confidential::Nonce {
match info.type_ {
ConfidentialType::Null => confidential::Nonce::Null,
ConfidentialType::Explicit => confidential::Nonce::Explicit(bytes_32(
&info.nonce
.expect("Field \"nonce\" is required for asset issuances.")
.0[..],
).expect("wrong size of \"nonce\" field")),
ConfidentialType::Confidential => {
let pubkey = PublicKey::from_slice(
&info.commitment
.expect("Field \"commitment\" is required for confidential values.")
.0[..]
).expect("invalid confidential commitment");
confidential::Nonce::Confidential(pubkey)
}
}
}
fn create_asset_issuance(info: AssetIssuanceInfo) -> AssetIssuance {
AssetIssuance {
asset_blinding_nonce: Tweak::from_slice(
&info.asset_blinding_nonce
.expect("Field \"asset_blinding_nonce\" is required for asset issuances.")
.0[..]
).expect("Invalid \"asset_blinding_nonce\"."),
asset_entropy: bytes_32(
&info.asset_entropy
.expect("Field \"asset_entropy\" is required for asset issuances.")
.0[..],
).expect("Invalid size of \"asset_entropy\"."),
amount: create_confidential_value(
info.amount.expect("Field \"amount\" is required for asset issuances."),
),
inflation_keys: create_confidential_value(
info.inflation_keys.expect("Field \"inflation_keys\" is required for asset issuances."),
),
}
}
fn create_script_sig(ss: InputScriptInfo) -> Script {
if let Some(hex) = ss.hex {
if ss.asm.is_some() {
warn!("Field \"asm\" of input is ignored.");
}
hex.0.into()
} else if let Some(_) = ss.asm {
panic!("Decoding script assembly is not yet supported.");
} else {
panic!("No scriptSig info provided.");
}
}
fn create_pegin_witness(pd: PeginDataInfo, prevout: bitcoin::OutPoint) -> Vec<Vec<u8>> {
if prevout != pd.outpoint.parse().expect("Invalid outpoint in field \"pegin_data\".") {
panic!("Outpoint in \"pegin_data\" does not correspond to input value.");
}
let asset = match create_confidential_asset(pd.asset) {
confidential::Asset::Explicit(asset) => asset,
_ => panic!("Asset in \"pegin_data\" should be explicit."),
};
vec![
serialize(&pd.value),
serialize(&asset),
serialize(&pd.genesis_hash),
serialize(&pd.claim_script.0),
serialize(&pd.mainchain_tx_hex.0),
serialize(&pd.merkle_proof.0),
]
}
fn convert_outpoint_to_btc(p: elements::OutPoint) -> bitcoin::OutPoint {
bitcoin::OutPoint {
txid: bitcoin::Txid::from_inner(p.txid.into_inner()),
vout: p.vout,
}
}
fn create_input_witness(
info: Option<InputWitnessInfo>,
pd: Option<PeginDataInfo>,
prevout: OutPoint,
) -> TxInWitness {
let pegin_witness = if info.is_some() && info.as_ref().unwrap().pegin_witness.is_some() {
if pd.is_some() {
warn!("Field \"pegin_data\" of input is ignored.");
}
info.as_ref().unwrap().pegin_witness.clone().unwrap().iter().map(|h| h.clone().0).collect()
} else if let Some(pd) = pd {
create_pegin_witness(pd, convert_outpoint_to_btc(prevout))
} else {
Default::default()
};
if let Some(wi) = info {
TxInWitness {
amount_rangeproof: wi.amount_rangeproof
.map(|b| Box::new(RangeProof::from_slice(&b.0).expect("invalid rangeproof"))),
inflation_keys_rangeproof: wi.inflation_keys_rangeproof
.map(|b| Box::new(RangeProof::from_slice(&b.0).expect("invalid rangeproof"))),
script_witness: match wi.script_witness {
Some(ref w) => w.iter().map(|h| h.clone().0).collect(),
None => Vec::new(),
},
pegin_witness: pegin_witness,
}
} else {
TxInWitness {
pegin_witness: pegin_witness,
..Default::default()
}
}
}
fn create_input(input: InputInfo) -> TxIn |
fn create_script_pubkey(spk: OutputScriptInfo, used_network: &mut Option<Network>) -> Script {
if spk.type_.is_some() {
warn!("Field \"type\" of output is ignored.");
}
if let Some(hex) = spk.hex {
if spk.asm.is_some() {
warn!("Field \"asm\" of output is ignored.");
}
if spk.address.is_some() {
warn!("Field \"address\" of output is ignored.");
}
//TODO(stevenroose) do script sanity check to avoid blackhole?
hex.0.into()
} else if let Some(_) = spk.asm {
if spk.address.is_some() {
warn!("Field \"address\" of output is ignored.");
}
panic!("Decoding script assembly is not yet supported.");
} else if let Some(address) = spk.address {
// Error if another network had already been used.
if let Some(network) = Network::from_params(address.params) {
if used_network.replace(network).unwrap_or(network) != network {
panic!("Addresses for different networks are used in the output scripts.");
}
}
address.script_pubkey()
} else {
panic!("No scriptPubKey info provided.");
}
}
fn create_bitcoin_script_pubkey(spk: hal::tx::OutputScriptInfo) -> bitcoin::Script {
if spk.type_.is_some() {
warn!("Field \"type\" of output is ignored.");
}
if let Some(hex) = spk.hex {
if spk.asm.is_some() {
warn!("Field \"asm\" of output is ignored.");
}
if spk.address.is_some() {
warn!("Field \"address\" of output is ignored.");
}
//TODO(stevenroose) do script sanity check to avoid blackhole?
hex.0.into()
} else if let Some(_) = spk.asm {
if spk.address.is_some() {
warn!("Field \"address\" of output is ignored.");
}
panic!("Decoding script assembly is not yet supported.");
} else if let Some(address) = spk.address {
address.script_pubkey()
} else {
panic!("No scriptPubKey info provided.");
}
}
fn create_output_witness(w: OutputWitnessInfo) -> TxOutWitness {
TxOutWitness {
surjection_proof: w.surjection_proof.map(|b| {
Box::new(SurjectionProof::from_slice(&b.0[..]).expect("invalid surjection proof"))
}),
rangeproof: w.rangeproof.map(|b| {
Box::new(RangeProof::from_slice(&b.0[..]).expect("invalid rangeproof"))
}),
}
}
fn create_script_pubkey_from_pegout_data(
pd: PegoutDataInfo,
) -> Script {
let mut builder = elements::script::Builder::new()
.push_opcode(elements::opcodes::all::OP_RETURN)
.push_slice(&pd.genesis_hash.into_inner()[..])
.push_slice(&create_bitcoin_script_pubkey(pd.script_pub_key)[..]);
for d in pd.extra_data {
builder = builder.push_slice(&d.0);
}
builder.into_script()
}
fn create_output(output: OutputInfo) -> TxOut {
// Keep track of which network has been used in addresses and error if two different networks
// are used.
let mut used_network = None;
let value = output
.value
.map(create_confidential_value)
.expect("Field \"value\" is required for outputs.");
let asset = output
.asset
.map(create_confidential_asset)
.expect("Field \"asset\" is required for outputs.");
TxOut {
asset: asset,
value: value,
nonce: output.nonce.map(create_confidential_nonce).unwrap_or(confidential::Nonce::Null),
script_pubkey: if let Some(spk) = output.script_pub_key {
if output.pegout_data.is_some() {
warn!("Field \"pegout_data\" of output is ignored.");
}
create_script_pubkey(spk, &mut used_network)
} else if let Some(pd) = output.pegout_data {
match value {
confidential::Value::Explicit(v) => {
if v != pd.value {
panic!("Value in \"pegout_data\" does not correspond to output value.");
}
}
_ => panic!("Explicit value is required for pegout data."),
}
if asset != create_confidential_asset(pd.asset.clone()) {
panic!("Asset in \"pegout_data\" does not correspond to output value.");
}
create_script_pubkey_from_pegout_data(pd)
} else {
Default::default()
},
witness: output.witness.map(create_output_witness).unwrap_or_default(),
}
}
pub fn create_transaction(info: TransactionInfo) -> Transaction {
// Fields that are ignored.
if info.txid.is_some() {
warn!("Field \"txid\" is ignored.");
}
if info.hash.is_some() {
warn!("Field \"hash\" is ignored.");
}
if info.size.is_some() {
warn!("Field \"size\" is ignored.");
}
if info.weight.is_some() {
warn!("Field \"weight\" is ignored.");
}
if info.vsize.is_some() {
warn!("Field \"vsize\" is ignored.");
}
Transaction {
version: info.version.expect("Field \"version\" is required."),
lock_time: elements::PackedLockTime(info.locktime.expect("Field \"locktime\" is required.")),
input: info
.inputs
.expect("Field \"inputs\" is required.")
.into_iter()
.map(create_input)
.collect(),
output: info
.outputs
.expect("Field \"outputs\" is required.")
.into_iter()
.map(create_output)
.collect(),
}
}
fn exec_create<'a>(matches: &clap::ArgMatches<'a>) {
let info = serde_json::from_str::<TransactionInfo>(&cmd::arg_or_stdin(matches, "tx-info"))
.expect("invalid JSON provided");
let tx = create_transaction(info);
let tx_bytes = serialize(&tx);
if matches.is_present("raw-stdout") {
::std::io::stdout().write_all(&tx_bytes).unwrap();
} else {
print!("{}", hex::encode(&tx_bytes));
}
}
fn cmd_decode<'a>() -> clap::App<'a, 'a> {
cmd::subcommand("decode", "decode a raw transaction to JSON")
.args(&cmd::opts_networks())
.args(&[cmd::opt_yaml(), cmd::arg("raw-tx", "the raw transaction in hex").required(false)])
}
fn exec_decode<'a>(matches: &clap::ArgMatches<'a>) {
let hex_tx = cmd::arg_or_stdin(matches, "raw-tx");
let raw_tx = hex::decode(hex_tx.as_ref()).expect("could not decode raw tx");
let tx: Transaction = deserialize(&raw_tx).expect("invalid tx format");
let info = ::GetInfo::get_info(&tx, cmd::network(matches));
cmd::print_output(matches, &info)
}
| {
let has_issuance = input.has_issuance.unwrap_or(input.asset_issuance.is_some());
let is_pegin = input.is_pegin.unwrap_or(input.pegin_data.is_some());
let prevout = outpoint_from_input_info(&input);
TxIn {
previous_output: prevout,
script_sig: input.script_sig.map(create_script_sig).unwrap_or_default(),
sequence: elements::Sequence::from_height(input.sequence.unwrap_or_default().try_into().unwrap()),
is_pegin: is_pegin,
asset_issuance: if has_issuance {
input.asset_issuance.map(create_asset_issuance).unwrap_or_default()
} else {
if input.asset_issuance.is_some() {
warn!("Field \"asset_issuance\" of input is ignored.");
}
Default::default()
},
witness: create_input_witness(input.witness, input.pegin_data, prevout),
}
} | identifier_body |
tx.rs | use std::convert::TryInto;
use std::io::Write;
use clap;
use bitcoin::hashes::Hash;
use bitcoin;
use elements::encode::{deserialize, serialize};
use elements::{
confidential, AssetIssuance, OutPoint, Transaction, TxIn, TxInWitness, TxOut, TxOutWitness,
Script,
};
use elements::secp256k1_zkp::{
Generator, PedersenCommitment, PublicKey, RangeProof, SurjectionProof, Tweak,
};
use cmd;
use hal_elements::Network;
use hal_elements::confidential::{
ConfidentialAssetInfo, ConfidentialNonceInfo, ConfidentialType, ConfidentialValueInfo,
};
use hal_elements::tx::{
AssetIssuanceInfo, InputInfo, InputWitnessInfo, OutputInfo, OutputWitnessInfo, PeginDataInfo,
PegoutDataInfo, TransactionInfo, InputScriptInfo, OutputScriptInfo,
};
pub fn subcommand<'a>() -> clap::App<'a, 'a> {
cmd::subcommand_group("tx", "manipulate transactions")
.subcommand(cmd_create())
.subcommand(cmd_decode())
}
pub fn execute<'a>(matches: &clap::ArgMatches<'a>) {
match matches.subcommand() {
("create", Some(ref m)) => exec_create(&m),
("decode", Some(ref m)) => exec_decode(&m),
(_, _) => unreachable!("clap prints help"),
};
}
fn cmd_create<'a>() -> clap::App<'a, 'a> {
cmd::subcommand("create", "create a raw transaction from JSON").args(&[
cmd::arg("tx-info", "the transaction info in JSON").required(false),
cmd::opt("raw-stdout", "output the raw bytes of the result to stdout")
.short("r")
.required(false),
])
}
/// Check both ways to specify the outpoint and panic if conflicting.
fn | (input: &InputInfo) -> OutPoint {
let op1: Option<OutPoint> =
input.prevout.as_ref().map(|ref op| op.parse().expect("invalid prevout format"));
let op2 = match input.txid {
Some(txid) => match input.vout {
Some(vout) => Some(OutPoint {
txid: txid,
vout: vout,
}),
None => panic!("\"txid\" field given in input without \"vout\" field"),
},
None => None,
};
match (op1, op2) {
(Some(op1), Some(op2)) => {
if op1 != op2 {
panic!("Conflicting prevout information in input.");
}
op1
}
(Some(op), None) => op,
(None, Some(op)) => op,
(None, None) => panic!("No previous output provided in input."),
}
}
fn bytes_32(bytes: &[u8]) -> Option<[u8; 32]> {
if bytes.len() != 32 {
None
} else {
let mut array = [0; 32];
for (x, y) in bytes.iter().zip(array.iter_mut()) {
*y = *x;
}
Some(array)
}
}
fn create_confidential_value(info: ConfidentialValueInfo) -> confidential::Value {
match info.type_ {
ConfidentialType::Null => confidential::Value::Null,
ConfidentialType::Explicit => confidential::Value::Explicit(
info.value.expect("Field \"value\" is required for explicit values."),
),
ConfidentialType::Confidential => {
let comm = PedersenCommitment::from_slice(
&info.commitment
.expect("Field \"commitment\" is required for confidential values.")
.0[..]
).expect("invalid confidential commitment");
confidential::Value::Confidential(comm)
}
}
}
fn create_confidential_asset(info: ConfidentialAssetInfo) -> confidential::Asset {
match info.type_ {
ConfidentialType::Null => confidential::Asset::Null,
ConfidentialType::Explicit => confidential::Asset::Explicit(
info.asset.expect("Field \"asset\" is required for explicit assets."),
),
ConfidentialType::Confidential => {
let gen = Generator::from_slice(
&info.commitment
.expect("Field \"commitment\" is required for confidential values.")
.0[..]
).expect("invalid confidential commitment");
confidential::Asset::Confidential(gen)
}
}
}
fn create_confidential_nonce(info: ConfidentialNonceInfo) -> confidential::Nonce {
match info.type_ {
ConfidentialType::Null => confidential::Nonce::Null,
ConfidentialType::Explicit => confidential::Nonce::Explicit(bytes_32(
&info.nonce
.expect("Field \"nonce\" is required for asset issuances.")
.0[..],
).expect("wrong size of \"nonce\" field")),
ConfidentialType::Confidential => {
let pubkey = PublicKey::from_slice(
&info.commitment
.expect("Field \"commitment\" is required for confidential values.")
.0[..]
).expect("invalid confidential commitment");
confidential::Nonce::Confidential(pubkey)
}
}
}
fn create_asset_issuance(info: AssetIssuanceInfo) -> AssetIssuance {
AssetIssuance {
asset_blinding_nonce: Tweak::from_slice(
&info.asset_blinding_nonce
.expect("Field \"asset_blinding_nonce\" is required for asset issuances.")
.0[..]
).expect("Invalid \"asset_blinding_nonce\"."),
asset_entropy: bytes_32(
&info.asset_entropy
.expect("Field \"asset_entropy\" is required for asset issuances.")
.0[..],
).expect("Invalid size of \"asset_entropy\"."),
amount: create_confidential_value(
info.amount.expect("Field \"amount\" is required for asset issuances."),
),
inflation_keys: create_confidential_value(
info.inflation_keys.expect("Field \"inflation_keys\" is required for asset issuances."),
),
}
}
fn create_script_sig(ss: InputScriptInfo) -> Script {
if let Some(hex) = ss.hex {
if ss.asm.is_some() {
warn!("Field \"asm\" of input is ignored.");
}
hex.0.into()
} else if let Some(_) = ss.asm {
panic!("Decoding script assembly is not yet supported.");
} else {
panic!("No scriptSig info provided.");
}
}
fn create_pegin_witness(pd: PeginDataInfo, prevout: bitcoin::OutPoint) -> Vec<Vec<u8>> {
if prevout != pd.outpoint.parse().expect("Invalid outpoint in field \"pegin_data\".") {
panic!("Outpoint in \"pegin_data\" does not correspond to input value.");
}
let asset = match create_confidential_asset(pd.asset) {
confidential::Asset::Explicit(asset) => asset,
_ => panic!("Asset in \"pegin_data\" should be explicit."),
};
vec![
serialize(&pd.value),
serialize(&asset),
serialize(&pd.genesis_hash),
serialize(&pd.claim_script.0),
serialize(&pd.mainchain_tx_hex.0),
serialize(&pd.merkle_proof.0),
]
}
fn convert_outpoint_to_btc(p: elements::OutPoint) -> bitcoin::OutPoint {
bitcoin::OutPoint {
txid: bitcoin::Txid::from_inner(p.txid.into_inner()),
vout: p.vout,
}
}
fn create_input_witness(
info: Option<InputWitnessInfo>,
pd: Option<PeginDataInfo>,
prevout: OutPoint,
) -> TxInWitness {
let pegin_witness = if info.is_some() && info.as_ref().unwrap().pegin_witness.is_some() {
if pd.is_some() {
warn!("Field \"pegin_data\" of input is ignored.");
}
info.as_ref().unwrap().pegin_witness.clone().unwrap().iter().map(|h| h.clone().0).collect()
} else if let Some(pd) = pd {
create_pegin_witness(pd, convert_outpoint_to_btc(prevout))
} else {
Default::default()
};
if let Some(wi) = info {
TxInWitness {
amount_rangeproof: wi.amount_rangeproof
.map(|b| Box::new(RangeProof::from_slice(&b.0).expect("invalid rangeproof"))),
inflation_keys_rangeproof: wi.inflation_keys_rangeproof
.map(|b| Box::new(RangeProof::from_slice(&b.0).expect("invalid rangeproof"))),
script_witness: match wi.script_witness {
Some(ref w) => w.iter().map(|h| h.clone().0).collect(),
None => Vec::new(),
},
pegin_witness: pegin_witness,
}
} else {
TxInWitness {
pegin_witness: pegin_witness,
..Default::default()
}
}
}
fn create_input(input: InputInfo) -> TxIn {
let has_issuance = input.has_issuance.unwrap_or(input.asset_issuance.is_some());
let is_pegin = input.is_pegin.unwrap_or(input.pegin_data.is_some());
let prevout = outpoint_from_input_info(&input);
TxIn {
previous_output: prevout,
script_sig: input.script_sig.map(create_script_sig).unwrap_or_default(),
sequence: elements::Sequence::from_height(input.sequence.unwrap_or_default().try_into().unwrap()),
is_pegin: is_pegin,
asset_issuance: if has_issuance {
input.asset_issuance.map(create_asset_issuance).unwrap_or_default()
} else {
if input.asset_issuance.is_some() {
warn!("Field \"asset_issuance\" of input is ignored.");
}
Default::default()
},
witness: create_input_witness(input.witness, input.pegin_data, prevout),
}
}
fn create_script_pubkey(spk: OutputScriptInfo, used_network: &mut Option<Network>) -> Script {
if spk.type_.is_some() {
warn!("Field \"type\" of output is ignored.");
}
if let Some(hex) = spk.hex {
if spk.asm.is_some() {
warn!("Field \"asm\" of output is ignored.");
}
if spk.address.is_some() {
warn!("Field \"address\" of output is ignored.");
}
//TODO(stevenroose) do script sanity check to avoid blackhole?
hex.0.into()
} else if let Some(_) = spk.asm {
if spk.address.is_some() {
warn!("Field \"address\" of output is ignored.");
}
panic!("Decoding script assembly is not yet supported.");
} else if let Some(address) = spk.address {
// Error if another network had already been used.
if let Some(network) = Network::from_params(address.params) {
if used_network.replace(network).unwrap_or(network) != network {
panic!("Addresses for different networks are used in the output scripts.");
}
}
address.script_pubkey()
} else {
panic!("No scriptPubKey info provided.");
}
}
fn create_bitcoin_script_pubkey(spk: hal::tx::OutputScriptInfo) -> bitcoin::Script {
if spk.type_.is_some() {
warn!("Field \"type\" of output is ignored.");
}
if let Some(hex) = spk.hex {
if spk.asm.is_some() {
warn!("Field \"asm\" of output is ignored.");
}
if spk.address.is_some() {
warn!("Field \"address\" of output is ignored.");
}
//TODO(stevenroose) do script sanity check to avoid blackhole?
hex.0.into()
} else if let Some(_) = spk.asm {
if spk.address.is_some() {
warn!("Field \"address\" of output is ignored.");
}
panic!("Decoding script assembly is not yet supported.");
} else if let Some(address) = spk.address {
address.script_pubkey()
} else {
panic!("No scriptPubKey info provided.");
}
}
fn create_output_witness(w: OutputWitnessInfo) -> TxOutWitness {
TxOutWitness {
surjection_proof: w.surjection_proof.map(|b| {
Box::new(SurjectionProof::from_slice(&b.0[..]).expect("invalid surjection proof"))
}),
rangeproof: w.rangeproof.map(|b| {
Box::new(RangeProof::from_slice(&b.0[..]).expect("invalid rangeproof"))
}),
}
}
fn create_script_pubkey_from_pegout_data(
pd: PegoutDataInfo,
) -> Script {
let mut builder = elements::script::Builder::new()
.push_opcode(elements::opcodes::all::OP_RETURN)
.push_slice(&pd.genesis_hash.into_inner()[..])
.push_slice(&create_bitcoin_script_pubkey(pd.script_pub_key)[..]);
for d in pd.extra_data {
builder = builder.push_slice(&d.0);
}
builder.into_script()
}
fn create_output(output: OutputInfo) -> TxOut {
// Keep track of which network has been used in addresses and error if two different networks
// are used.
let mut used_network = None;
let value = output
.value
.map(create_confidential_value)
.expect("Field \"value\" is required for outputs.");
let asset = output
.asset
.map(create_confidential_asset)
.expect("Field \"asset\" is required for outputs.");
TxOut {
asset: asset,
value: value,
nonce: output.nonce.map(create_confidential_nonce).unwrap_or(confidential::Nonce::Null),
script_pubkey: if let Some(spk) = output.script_pub_key {
if output.pegout_data.is_some() {
warn!("Field \"pegout_data\" of output is ignored.");
}
create_script_pubkey(spk, &mut used_network)
} else if let Some(pd) = output.pegout_data {
match value {
confidential::Value::Explicit(v) => {
if v != pd.value {
panic!("Value in \"pegout_data\" does not correspond to output value.");
}
}
_ => panic!("Explicit value is required for pegout data."),
}
if asset != create_confidential_asset(pd.asset.clone()) {
panic!("Asset in \"pegout_data\" does not correspond to output value.");
}
create_script_pubkey_from_pegout_data(pd)
} else {
Default::default()
},
witness: output.witness.map(create_output_witness).unwrap_or_default(),
}
}
pub fn create_transaction(info: TransactionInfo) -> Transaction {
// Fields that are ignored.
if info.txid.is_some() {
warn!("Field \"txid\" is ignored.");
}
if info.hash.is_some() {
warn!("Field \"hash\" is ignored.");
}
if info.size.is_some() {
warn!("Field \"size\" is ignored.");
}
if info.weight.is_some() {
warn!("Field \"weight\" is ignored.");
}
if info.vsize.is_some() {
warn!("Field \"vsize\" is ignored.");
}
Transaction {
version: info.version.expect("Field \"version\" is required."),
lock_time: elements::PackedLockTime(info.locktime.expect("Field \"locktime\" is required.")),
input: info
.inputs
.expect("Field \"inputs\" is required.")
.into_iter()
.map(create_input)
.collect(),
output: info
.outputs
.expect("Field \"outputs\" is required.")
.into_iter()
.map(create_output)
.collect(),
}
}
fn exec_create<'a>(matches: &clap::ArgMatches<'a>) {
let info = serde_json::from_str::<TransactionInfo>(&cmd::arg_or_stdin(matches, "tx-info"))
.expect("invalid JSON provided");
let tx = create_transaction(info);
let tx_bytes = serialize(&tx);
if matches.is_present("raw-stdout") {
::std::io::stdout().write_all(&tx_bytes).unwrap();
} else {
print!("{}", hex::encode(&tx_bytes));
}
}
fn cmd_decode<'a>() -> clap::App<'a, 'a> {
cmd::subcommand("decode", "decode a raw transaction to JSON")
.args(&cmd::opts_networks())
.args(&[cmd::opt_yaml(), cmd::arg("raw-tx", "the raw transaction in hex").required(false)])
}
fn exec_decode<'a>(matches: &clap::ArgMatches<'a>) {
let hex_tx = cmd::arg_or_stdin(matches, "raw-tx");
let raw_tx = hex::decode(hex_tx.as_ref()).expect("could not decode raw tx");
let tx: Transaction = deserialize(&raw_tx).expect("invalid tx format");
let info = ::GetInfo::get_info(&tx, cmd::network(matches));
cmd::print_output(matches, &info)
}
| outpoint_from_input_info | identifier_name |
tx.rs | use std::convert::TryInto;
use std::io::Write;
use clap;
use bitcoin::hashes::Hash;
use bitcoin;
use elements::encode::{deserialize, serialize};
use elements::{
confidential, AssetIssuance, OutPoint, Transaction, TxIn, TxInWitness, TxOut, TxOutWitness,
Script,
};
use elements::secp256k1_zkp::{
Generator, PedersenCommitment, PublicKey, RangeProof, SurjectionProof, Tweak,
};
use cmd;
use hal_elements::Network;
use hal_elements::confidential::{
ConfidentialAssetInfo, ConfidentialNonceInfo, ConfidentialType, ConfidentialValueInfo,
};
use hal_elements::tx::{
AssetIssuanceInfo, InputInfo, InputWitnessInfo, OutputInfo, OutputWitnessInfo, PeginDataInfo,
PegoutDataInfo, TransactionInfo, InputScriptInfo, OutputScriptInfo,
};
pub fn subcommand<'a>() -> clap::App<'a, 'a> {
cmd::subcommand_group("tx", "manipulate transactions")
.subcommand(cmd_create())
.subcommand(cmd_decode())
}
pub fn execute<'a>(matches: &clap::ArgMatches<'a>) {
match matches.subcommand() {
("create", Some(ref m)) => exec_create(&m),
("decode", Some(ref m)) => exec_decode(&m),
(_, _) => unreachable!("clap prints help"),
};
}
fn cmd_create<'a>() -> clap::App<'a, 'a> {
cmd::subcommand("create", "create a raw transaction from JSON").args(&[
cmd::arg("tx-info", "the transaction info in JSON").required(false),
cmd::opt("raw-stdout", "output the raw bytes of the result to stdout")
.short("r")
.required(false),
])
}
/// Check both ways to specify the outpoint and panic if conflicting.
fn outpoint_from_input_info(input: &InputInfo) -> OutPoint {
let op1: Option<OutPoint> =
input.prevout.as_ref().map(|ref op| op.parse().expect("invalid prevout format"));
let op2 = match input.txid {
Some(txid) => match input.vout {
Some(vout) => Some(OutPoint {
txid: txid,
vout: vout,
}),
None => panic!("\"txid\" field given in input without \"vout\" field"),
},
None => None,
};
match (op1, op2) {
(Some(op1), Some(op2)) => {
if op1 != op2 {
panic!("Conflicting prevout information in input.");
}
op1
}
(Some(op), None) => op,
(None, Some(op)) => op,
(None, None) => panic!("No previous output provided in input."),
}
}
fn bytes_32(bytes: &[u8]) -> Option<[u8; 32]> {
if bytes.len() != 32 {
None
} else {
let mut array = [0; 32];
for (x, y) in bytes.iter().zip(array.iter_mut()) {
*y = *x;
}
Some(array)
}
}
fn create_confidential_value(info: ConfidentialValueInfo) -> confidential::Value {
match info.type_ {
ConfidentialType::Null => confidential::Value::Null,
ConfidentialType::Explicit => confidential::Value::Explicit(
info.value.expect("Field \"value\" is required for explicit values."),
),
ConfidentialType::Confidential => {
let comm = PedersenCommitment::from_slice(
&info.commitment
.expect("Field \"commitment\" is required for confidential values.")
.0[..]
).expect("invalid confidential commitment");
confidential::Value::Confidential(comm)
}
}
}
fn create_confidential_asset(info: ConfidentialAssetInfo) -> confidential::Asset {
match info.type_ {
ConfidentialType::Null => confidential::Asset::Null,
ConfidentialType::Explicit => confidential::Asset::Explicit(
info.asset.expect("Field \"asset\" is required for explicit assets."),
),
ConfidentialType::Confidential => {
let gen = Generator::from_slice(
&info.commitment
.expect("Field \"commitment\" is required for confidential values.")
.0[..]
).expect("invalid confidential commitment");
confidential::Asset::Confidential(gen)
}
}
}
fn create_confidential_nonce(info: ConfidentialNonceInfo) -> confidential::Nonce {
match info.type_ {
ConfidentialType::Null => confidential::Nonce::Null,
ConfidentialType::Explicit => confidential::Nonce::Explicit(bytes_32(
&info.nonce
.expect("Field \"nonce\" is required for asset issuances.")
.0[..],
).expect("wrong size of \"nonce\" field")),
ConfidentialType::Confidential => {
let pubkey = PublicKey::from_slice(
&info.commitment
.expect("Field \"commitment\" is required for confidential values.")
.0[..]
).expect("invalid confidential commitment");
confidential::Nonce::Confidential(pubkey)
}
}
}
fn create_asset_issuance(info: AssetIssuanceInfo) -> AssetIssuance {
AssetIssuance {
asset_blinding_nonce: Tweak::from_slice(
&info.asset_blinding_nonce
.expect("Field \"asset_blinding_nonce\" is required for asset issuances.")
.0[..]
).expect("Invalid \"asset_blinding_nonce\"."),
asset_entropy: bytes_32(
&info.asset_entropy
.expect("Field \"asset_entropy\" is required for asset issuances.")
.0[..],
).expect("Invalid size of \"asset_entropy\"."),
amount: create_confidential_value(
info.amount.expect("Field \"amount\" is required for asset issuances."),
),
inflation_keys: create_confidential_value(
info.inflation_keys.expect("Field \"inflation_keys\" is required for asset issuances."),
),
}
}
fn create_script_sig(ss: InputScriptInfo) -> Script {
if let Some(hex) = ss.hex {
if ss.asm.is_some() {
warn!("Field \"asm\" of input is ignored.");
}
hex.0.into()
} else if let Some(_) = ss.asm {
panic!("Decoding script assembly is not yet supported.");
} else {
panic!("No scriptSig info provided.");
}
}
fn create_pegin_witness(pd: PeginDataInfo, prevout: bitcoin::OutPoint) -> Vec<Vec<u8>> {
if prevout != pd.outpoint.parse().expect("Invalid outpoint in field \"pegin_data\".") {
panic!("Outpoint in \"pegin_data\" does not correspond to input value.");
}
let asset = match create_confidential_asset(pd.asset) {
confidential::Asset::Explicit(asset) => asset,
_ => panic!("Asset in \"pegin_data\" should be explicit."),
};
vec![
serialize(&pd.value),
serialize(&asset),
serialize(&pd.genesis_hash),
serialize(&pd.claim_script.0),
serialize(&pd.mainchain_tx_hex.0),
serialize(&pd.merkle_proof.0),
]
}
fn convert_outpoint_to_btc(p: elements::OutPoint) -> bitcoin::OutPoint {
bitcoin::OutPoint {
txid: bitcoin::Txid::from_inner(p.txid.into_inner()),
vout: p.vout,
}
}
fn create_input_witness(
info: Option<InputWitnessInfo>,
pd: Option<PeginDataInfo>,
prevout: OutPoint,
) -> TxInWitness {
let pegin_witness = if info.is_some() && info.as_ref().unwrap().pegin_witness.is_some() {
if pd.is_some() {
warn!("Field \"pegin_data\" of input is ignored.");
}
info.as_ref().unwrap().pegin_witness.clone().unwrap().iter().map(|h| h.clone().0).collect()
} else if let Some(pd) = pd {
create_pegin_witness(pd, convert_outpoint_to_btc(prevout))
} else {
Default::default()
};
if let Some(wi) = info {
TxInWitness {
amount_rangeproof: wi.amount_rangeproof
.map(|b| Box::new(RangeProof::from_slice(&b.0).expect("invalid rangeproof"))),
inflation_keys_rangeproof: wi.inflation_keys_rangeproof
.map(|b| Box::new(RangeProof::from_slice(&b.0).expect("invalid rangeproof"))),
script_witness: match wi.script_witness {
Some(ref w) => w.iter().map(|h| h.clone().0).collect(),
None => Vec::new(),
},
pegin_witness: pegin_witness,
}
} else {
TxInWitness {
pegin_witness: pegin_witness,
..Default::default()
}
}
}
fn create_input(input: InputInfo) -> TxIn {
let has_issuance = input.has_issuance.unwrap_or(input.asset_issuance.is_some());
let is_pegin = input.is_pegin.unwrap_or(input.pegin_data.is_some());
let prevout = outpoint_from_input_info(&input);
TxIn {
previous_output: prevout,
script_sig: input.script_sig.map(create_script_sig).unwrap_or_default(),
sequence: elements::Sequence::from_height(input.sequence.unwrap_or_default().try_into().unwrap()),
is_pegin: is_pegin,
asset_issuance: if has_issuance {
input.asset_issuance.map(create_asset_issuance).unwrap_or_default()
} else {
if input.asset_issuance.is_some() {
warn!("Field \"asset_issuance\" of input is ignored.");
}
Default::default()
},
witness: create_input_witness(input.witness, input.pegin_data, prevout),
}
}
fn create_script_pubkey(spk: OutputScriptInfo, used_network: &mut Option<Network>) -> Script {
if spk.type_.is_some() {
warn!("Field \"type\" of output is ignored.");
}
if let Some(hex) = spk.hex {
if spk.asm.is_some() {
warn!("Field \"asm\" of output is ignored.");
}
if spk.address.is_some() {
warn!("Field \"address\" of output is ignored.");
}
//TODO(stevenroose) do script sanity check to avoid blackhole?
hex.0.into()
} else if let Some(_) = spk.asm {
if spk.address.is_some() {
warn!("Field \"address\" of output is ignored.");
}
panic!("Decoding script assembly is not yet supported.");
} else if let Some(address) = spk.address {
// Error if another network had already been used.
if let Some(network) = Network::from_params(address.params) {
if used_network.replace(network).unwrap_or(network) != network {
panic!("Addresses for different networks are used in the output scripts.");
}
}
address.script_pubkey()
} else {
panic!("No scriptPubKey info provided.");
}
}
fn create_bitcoin_script_pubkey(spk: hal::tx::OutputScriptInfo) -> bitcoin::Script {
if spk.type_.is_some() {
warn!("Field \"type\" of output is ignored.");
}
if let Some(hex) = spk.hex {
if spk.asm.is_some() {
warn!("Field \"asm\" of output is ignored.");
}
if spk.address.is_some() {
warn!("Field \"address\" of output is ignored.");
}
//TODO(stevenroose) do script sanity check to avoid blackhole?
hex.0.into()
} else if let Some(_) = spk.asm {
if spk.address.is_some() {
warn!("Field \"address\" of output is ignored.");
}
panic!("Decoding script assembly is not yet supported.");
} else if let Some(address) = spk.address {
address.script_pubkey()
} else {
panic!("No scriptPubKey info provided.");
}
}
fn create_output_witness(w: OutputWitnessInfo) -> TxOutWitness {
TxOutWitness {
surjection_proof: w.surjection_proof.map(|b| {
Box::new(SurjectionProof::from_slice(&b.0[..]).expect("invalid surjection proof"))
}),
rangeproof: w.rangeproof.map(|b| {
Box::new(RangeProof::from_slice(&b.0[..]).expect("invalid rangeproof"))
}),
}
}
fn create_script_pubkey_from_pegout_data(
pd: PegoutDataInfo,
) -> Script {
let mut builder = elements::script::Builder::new()
.push_opcode(elements::opcodes::all::OP_RETURN)
.push_slice(&pd.genesis_hash.into_inner()[..])
.push_slice(&create_bitcoin_script_pubkey(pd.script_pub_key)[..]);
for d in pd.extra_data {
builder = builder.push_slice(&d.0);
}
builder.into_script()
}
fn create_output(output: OutputInfo) -> TxOut {
// Keep track of which network has been used in addresses and error if two different networks
// are used.
let mut used_network = None;
let value = output
.value
.map(create_confidential_value)
.expect("Field \"value\" is required for outputs.");
let asset = output
.asset
.map(create_confidential_asset)
.expect("Field \"asset\" is required for outputs.");
TxOut {
asset: asset,
value: value,
nonce: output.nonce.map(create_confidential_nonce).unwrap_or(confidential::Nonce::Null),
script_pubkey: if let Some(spk) = output.script_pub_key {
if output.pegout_data.is_some() {
warn!("Field \"pegout_data\" of output is ignored.");
}
create_script_pubkey(spk, &mut used_network)
} else if let Some(pd) = output.pegout_data {
match value {
confidential::Value::Explicit(v) => {
if v != pd.value {
panic!("Value in \"pegout_data\" does not correspond to output value.");
}
}
_ => panic!("Explicit value is required for pegout data."),
}
if asset != create_confidential_asset(pd.asset.clone()) {
panic!("Asset in \"pegout_data\" does not correspond to output value.");
}
create_script_pubkey_from_pegout_data(pd)
} else {
Default::default()
},
witness: output.witness.map(create_output_witness).unwrap_or_default(),
}
}
pub fn create_transaction(info: TransactionInfo) -> Transaction {
// Fields that are ignored.
if info.txid.is_some() {
warn!("Field \"txid\" is ignored.");
}
if info.hash.is_some() {
warn!("Field \"hash\" is ignored.");
}
if info.size.is_some() {
warn!("Field \"size\" is ignored.");
}
if info.weight.is_some() {
warn!("Field \"weight\" is ignored.");
}
if info.vsize.is_some() {
warn!("Field \"vsize\" is ignored.");
}
Transaction {
version: info.version.expect("Field \"version\" is required."),
lock_time: elements::PackedLockTime(info.locktime.expect("Field \"locktime\" is required.")),
input: info
.inputs
.expect("Field \"inputs\" is required.")
.into_iter()
.map(create_input)
.collect(),
output: info
.outputs
.expect("Field \"outputs\" is required.")
.into_iter()
.map(create_output)
.collect(),
}
}
fn exec_create<'a>(matches: &clap::ArgMatches<'a>) {
let info = serde_json::from_str::<TransactionInfo>(&cmd::arg_or_stdin(matches, "tx-info"))
.expect("invalid JSON provided");
let tx = create_transaction(info);
let tx_bytes = serialize(&tx);
if matches.is_present("raw-stdout") { | } else {
print!("{}", hex::encode(&tx_bytes));
}
}
fn cmd_decode<'a>() -> clap::App<'a, 'a> {
cmd::subcommand("decode", "decode a raw transaction to JSON")
.args(&cmd::opts_networks())
.args(&[cmd::opt_yaml(), cmd::arg("raw-tx", "the raw transaction in hex").required(false)])
}
fn exec_decode<'a>(matches: &clap::ArgMatches<'a>) {
let hex_tx = cmd::arg_or_stdin(matches, "raw-tx");
let raw_tx = hex::decode(hex_tx.as_ref()).expect("could not decode raw tx");
let tx: Transaction = deserialize(&raw_tx).expect("invalid tx format");
let info = ::GetInfo::get_info(&tx, cmd::network(matches));
cmd::print_output(matches, &info)
} | ::std::io::stdout().write_all(&tx_bytes).unwrap(); | random_line_split |
RHD_Load_Filter.py | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 28 16:06:17 2023
@author: Gilles.DELBECQ
"""
import sys, struct, math, os, time
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as sp
def read_data(filename):
"""Reads Intan Technologies RHD2000 data file generated by evaluation board GUI.
Data are returned in a dictionary, for future extensibility.
"""
from intanutil.read_header import read_header
from intanutil.get_bytes_per_data_block import get_bytes_per_data_block
from intanutil.read_one_data_block import read_one_data_block
from intanutil.notch_filter import notch_filter
from intanutil.data_to_result import data_to_result
tic = time.time()
fid = open(filename, 'rb')
filesize = os.path.getsize(filename)
header = read_header(fid)
print('Found {} amplifier channel{}.'.format(header['num_amplifier_channels'], plural(header['num_amplifier_channels'])))
print('Found {} auxiliary input channel{}.'.format(header['num_aux_input_channels'], plural(header['num_aux_input_channels'])))
print('Found {} supply voltage channel{}.'.format(header['num_supply_voltage_channels'], plural(header['num_supply_voltage_channels'])))
print('Found {} board ADC channel{}.'.format(header['num_board_adc_channels'], plural(header['num_board_adc_channels'])))
print('Found {} board digital input channel{}.'.format(header['num_board_dig_in_channels'], plural(header['num_board_dig_in_channels'])))
print('Found {} board digital output channel{}.'.format(header['num_board_dig_out_channels'], plural(header['num_board_dig_out_channels'])))
print('Found {} temperature sensors channel{}.'.format(header['num_temp_sensor_channels'], plural(header['num_temp_sensor_channels'])))
print('')
# Determine how many samples the data file contains.
bytes_per_block = get_bytes_per_data_block(header)
# How many data blocks remain in this file?
data_present = False
bytes_remaining = filesize - fid.tell()
if bytes_remaining > 0:
data_present = True
if bytes_remaining % bytes_per_block != 0:
raise Exception('Something is wrong with file size : should have a whole number of data blocks')
num_data_blocks = int(bytes_remaining / bytes_per_block)
num_amplifier_samples = header['num_samples_per_data_block'] * num_data_blocks
num_aux_input_samples = int((header['num_samples_per_data_block'] / 4) * num_data_blocks)
num_supply_voltage_samples = 1 * num_data_blocks
num_board_adc_samples = header['num_samples_per_data_block'] * num_data_blocks
num_board_dig_in_samples = header['num_samples_per_data_block'] * num_data_blocks
num_board_dig_out_samples = header['num_samples_per_data_block'] * num_data_blocks
record_time = num_amplifier_samples / header['sample_rate']
if data_present:
print('File contains {:0.3f} seconds of data. Amplifiers were sampled at {:0.2f} kS/s.'.format(record_time, header['sample_rate'] / 1000))
else:
print('Header file contains no data. Amplifiers were sampled at {:0.2f} kS/s.'.format(header['sample_rate'] / 1000))
if data_present:
# Pre-allocate memory for data.
print('')
print('Allocating memory for data...')
data = {}
if (header['version']['major'] == 1 and header['version']['minor'] >= 2) or (header['version']['major'] > 1):
data['t_amplifier'] = np.zeros(num_amplifier_samples, dtype=np.int_)
else:
data['t_amplifier'] = np.zeros(num_amplifier_samples, dtype=np.uint)
data['amplifier_data'] = np.zeros([header['num_amplifier_channels'], num_amplifier_samples], dtype=np.uint)
data['aux_input_data'] = np.zeros([header['num_aux_input_channels'], num_aux_input_samples], dtype=np.uint)
data['supply_voltage_data'] = np.zeros([header['num_supply_voltage_channels'], num_supply_voltage_samples], dtype=np.uint)
data['temp_sensor_data'] = np.zeros([header['num_temp_sensor_channels'], num_supply_voltage_samples], dtype=np.uint)
data['board_adc_data'] = np.zeros([header['num_board_adc_channels'], num_board_adc_samples], dtype=np.uint)
# by default, this script interprets digital events (digital inputs and outputs) as booleans
# if unsigned int values are preferred(0 for False, 1 for True), replace the 'dtype=np.bool_' argument with 'dtype=np.uint' as shown
# the commented line below illustrates this for digital input data; the same can be done for digital out
#data['board_dig_in_data'] = np.zeros([header['num_board_dig_in_channels'], num_board_dig_in_samples], dtype=np.uint)
data['board_dig_in_data'] = np.zeros([header['num_board_dig_in_channels'], num_board_dig_in_samples], dtype=np.bool_)
data['board_dig_in_raw'] = np.zeros(num_board_dig_in_samples, dtype=np.uint)
data['board_dig_out_data'] = np.zeros([header['num_board_dig_out_channels'], num_board_dig_out_samples], dtype=np.bool_)
data['board_dig_out_raw'] = np.zeros(num_board_dig_out_samples, dtype=np.uint)
# Read sampled data from file.
print('Reading data from file...')
# Initialize indices used in looping
indices = {}
indices['amplifier'] = 0
indices['aux_input'] = 0
indices['supply_voltage'] = 0
indices['board_adc'] = 0
indices['board_dig_in'] = 0
indices['board_dig_out'] = 0
print_increment = 10
percent_done = print_increment
for i in range(num_data_blocks):
read_one_data_block(data, header, indices, fid)
# Increment indices
indices['amplifier'] += header['num_samples_per_data_block']
indices['aux_input'] += int(header['num_samples_per_data_block'] / 4)
indices['supply_voltage'] += 1
indices['board_adc'] += header['num_samples_per_data_block']
indices['board_dig_in'] += header['num_samples_per_data_block']
indices['board_dig_out'] += header['num_samples_per_data_block']
fraction_done = 100 * (1.0 * i / num_data_blocks)
if fraction_done >= percent_done:
print('{}% done...'.format(percent_done))
percent_done = percent_done + print_increment
# Make sure we have read exactly the right amount of data.
bytes_remaining = filesize - fid.tell()
if bytes_remaining != 0: raise Exception('Error: End of file not reached.')
# Close data file.
fid.close()
if (data_present):
print('Parsing data...')
# Extract digital input channels to separate variables.
for i in range(header['num_board_dig_in_channels']):
data['board_dig_in_data'][i, :] = np.not_equal(np.bitwise_and(data['board_dig_in_raw'], (1 << header['board_dig_in_channels'][i]['native_order'])), 0)
# Extract digital output channels to separate variables.
for i in range(header['num_board_dig_out_channels']):
data['board_dig_out_data'][i, :] = np.not_equal(np.bitwise_and(data['board_dig_out_raw'], (1 << header['board_dig_out_channels'][i]['native_order'])), 0)
# Scale voltage levels appropriately.
data['amplifier_data'] = np.multiply(0.195, (data['amplifier_data'].astype(np.int32) - 32768)) # units = microvolts
data['aux_input_data'] = np.multiply(37.4e-6, data['aux_input_data']) # units = volts
data['supply_voltage_data'] = np.multiply(74.8e-6, data['supply_voltage_data']) # units = volts
if header['eval_board_mode'] == 1:
data['board_adc_data'] = np.multiply(152.59e-6, (data['board_adc_data'].astype(np.int32) - 32768)) # units = volts
elif header['eval_board_mode'] == 13:
data['board_adc_data'] = np.multiply(312.5e-6, (data['board_adc_data'].astype(np.int32) - 32768)) # units = volts
else:
data['board_adc_data'] = np.multiply(50.354e-6, data['board_adc_data']) # units = volts
data['temp_sensor_data'] = np.multiply(0.01, data['temp_sensor_data']) # units = deg C
# Check for gaps in timestamps.
num_gaps = np.sum(np.not_equal(data['t_amplifier'][1:]-data['t_amplifier'][:-1], 1))
if num_gaps == 0:
print('No missing timestamps in data.')
else:
print('Warning: {0} gaps in timestamp data found. Time scale will not be uniform!'.format(num_gaps))
# Scale time steps (units = seconds).
data['t_amplifier'] = data['t_amplifier'] / header['sample_rate']
data['t_aux_input'] = data['t_amplifier'][range(0, len(data['t_amplifier']), 4)]
data['t_supply_voltage'] = data['t_amplifier'][range(0, len(data['t_amplifier']), header['num_samples_per_data_block'])]
data['t_board_adc'] = data['t_amplifier']
data['t_dig'] = data['t_amplifier']
data['t_temp_sensor'] = data['t_supply_voltage']
# If the software notch filter was selected during the recording, apply the
# same notch filter to amplifier data here.
if header['notch_filter_frequency'] > 0 and header['version']['major'] < 3:
print('Applying notch filter...')
print_increment = 10
percent_done = print_increment
for i in range(header['num_amplifier_channels']):
data['amplifier_data'][i,:] = notch_filter(data['amplifier_data'][i,:], header['sample_rate'], header['notch_filter_frequency'], 10)
fraction_done = 100 * (i / header['num_amplifier_channels'])
if fraction_done >= percent_done:
print('{}% done...'.format(percent_done))
percent_done += print_increment
else:
data = [];
# Move variables to result struct.
result = data_to_result(header, data, data_present)
print('Done! Elapsed time: {0:0.1f} seconds'.format(time.time() - tic))
return result
def plural(n):
"""Utility function to optionally pluralize words based on the value of n.
"""
if n == 1:
return ''
else:
return 's'
path="//equipe2-nas1/Gilles.DELBECQ/Data/ePhy/Février2023/Test_Gustave/raw/raw intan/Test_Gustave_15_03_230315_182841/Test_Gustave_15_03_230315_182841.rhd"
reader=read_data(path)
sampling_rate = reader['frequency_parameters']['amplifier_sample_rate']
time_vector=reader['t_amplifier']
signal=reader['amplifier_data']
selected_channels=['2','3','4','9','10','11','12','14','15']
#Filtering parameters
freq_low = 300
freq_high = 3000
order = 4
# Noise parameters
std_threshold = 5 #Times the std
noise_window = 5 #window for the noise calculation in sec
distance = 50 # distance between 2 spikes
#waveform window
waveform_window=5 #ms
Waveforms = True
def extract_spike_waveform(signal, spike_idx, left_width=(waveform_window/1000)*20000/2, right_width=(waveform_window/1000)*20000/2):
'''
Function to extract spikes waveforms in spike2 recordings
INPUTS :
signal (1-d array) : the ephy signal
spike_idx (1-d array or integer list) : array containing the spike indexes (in points)
width (int) = width for spike window
OUTPUTS :
SPIKES (list) : a list containg the waveform of each spike
'''
SPIKES = []
left_width = int(left_width)
right_width = int(right_width)
for i in range(len(spike_idx)):
index = spike_idx[i]
spike_wf = signal[index-left_width : index+right_width]
SPIKES.append(spike_wf)
return SPIKES
def filter_signal(signal, order=order, sample_rate=sampling_rate, freq_low=freq_low, freq_high=freq_high, axis=0):
"""
From Théo G.
Filtering with scipy
inputs raw signal (array)
returns filtered signal (array)
"""
import scipy.signal
Wn = [freq_low / (sample_rate / 2), freq_high / (sample_rate / 2)]
sos_coeff = scipy.signal.iirfilter(order, Wn, btype="band", ftype="butter", output="sos")
filtered_signal = scipy.signal.sosfiltfilt(sos_coeff, signal, axis=axis)
return filtered_signal
# def notch_filter(signal, order=4, sample_rate=20000, freq_low=48, freq_high=52, axis=0):
# import scipy.signal
# Wn = [freq_low / (sample_rate / 2), freq_high / (sample_rate / 2)]
# notch_coeff = scipy.signal.iirfilter(order, Wn, btype="bandstop", ftype="butter", output="sos")
# notch_signal = scipy.signal.sosfiltfilt(notch_coeff, signal, axis=axis)
# return notch_signal
filtered_signals=[]
for i in selected_channels:
filtered_signal=filter_signal(signal[int(i),:])
filtered_signals.append(filtered_signal)
# plt.figure()
# # plt.plot(time_vector,signal[0,:])
# plt.plot(time_vector,filtered_signal)
# plt.title(rf'channel {int(i)}')
filtered_signals = np.array(filtered_signals)
median = np.median(filtered_signals, axis=0)#compute median on all
cmr_signals = filtered_signals-median #compute common ref removal median on all
for i in range(len(cmr_signals)):
pl |
"""
Spike detection
"""
thresholds=[]
spikes_list=[]
spikes_list_y=[]
wfs=[]
waveforms=[]
for signal in cmr_signals:
# Threshold calculation
noise = signal[0:int(noise_window*sampling_rate)] #noise window taken from individual channel signal
threshold = np.median(noise)+std_threshold*np.std(noise) #threshold calculation for the channel
thresholds.append(threshold) #append it to the list regrouping threshold for each channel
#Detect the spike indexes
spike_idx, _ = sp.find_peaks(-signal,height=threshold,distance=distance)
#Convert to spike times
spike_times = spike_idx*1./sampling_rate
#Get spikes peak
spike_y = signal[spike_idx]
#Append spikes times to the list of all channels spikes
spikes_list.append(spike_times)
spikes_list_y.append(spike_y)
if Waveforms == True :
wfs = extract_spike_waveform(signal,spike_idx)
waveforms.append(wfs)
for index,i in np.ndenumerate(waveforms):
plt.figure()
# plt.title(rf'waveform_chan_{selected_chan[index[0]]}')
time_axis=np.array(range(int(-(waveform_window/1000)*20000/2),int(waveform_window/1000*20000/2)))/20000*1000
for j in i:
plt.plot(j*1000)
# plt.savefig(rf'{save_path}\waveform_chan_{selected_chan[index[0]]}.svg') | t.figure()
plt.plot(time_vector,cmr_signals[i])
plt.title(rf'channel {int(selected_channels[i])}')
| conditional_block |
RHD_Load_Filter.py | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 28 16:06:17 2023
@author: Gilles.DELBECQ
"""
import sys, struct, math, os, time
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as sp
def read_data(filename):
"""Reads Intan Technologies RHD2000 data file generated by evaluation board GUI.
Data are returned in a dictionary, for future extensibility.
"""
from intanutil.read_header import read_header
from intanutil.get_bytes_per_data_block import get_bytes_per_data_block
from intanutil.read_one_data_block import read_one_data_block
from intanutil.notch_filter import notch_filter
from intanutil.data_to_result import data_to_result
tic = time.time()
fid = open(filename, 'rb')
filesize = os.path.getsize(filename)
header = read_header(fid)
print('Found {} amplifier channel{}.'.format(header['num_amplifier_channels'], plural(header['num_amplifier_channels'])))
print('Found {} auxiliary input channel{}.'.format(header['num_aux_input_channels'], plural(header['num_aux_input_channels'])))
print('Found {} supply voltage channel{}.'.format(header['num_supply_voltage_channels'], plural(header['num_supply_voltage_channels'])))
print('Found {} board ADC channel{}.'.format(header['num_board_adc_channels'], plural(header['num_board_adc_channels'])))
print('Found {} board digital input channel{}.'.format(header['num_board_dig_in_channels'], plural(header['num_board_dig_in_channels'])))
print('Found {} board digital output channel{}.'.format(header['num_board_dig_out_channels'], plural(header['num_board_dig_out_channels'])))
print('Found {} temperature sensors channel{}.'.format(header['num_temp_sensor_channels'], plural(header['num_temp_sensor_channels'])))
print('')
# Determine how many samples the data file contains.
bytes_per_block = get_bytes_per_data_block(header)
# How many data blocks remain in this file?
data_present = False
bytes_remaining = filesize - fid.tell()
if bytes_remaining > 0:
data_present = True
if bytes_remaining % bytes_per_block != 0:
raise Exception('Something is wrong with file size : should have a whole number of data blocks')
num_data_blocks = int(bytes_remaining / bytes_per_block)
num_amplifier_samples = header['num_samples_per_data_block'] * num_data_blocks
num_aux_input_samples = int((header['num_samples_per_data_block'] / 4) * num_data_blocks)
num_supply_voltage_samples = 1 * num_data_blocks
num_board_adc_samples = header['num_samples_per_data_block'] * num_data_blocks
num_board_dig_in_samples = header['num_samples_per_data_block'] * num_data_blocks
num_board_dig_out_samples = header['num_samples_per_data_block'] * num_data_blocks
record_time = num_amplifier_samples / header['sample_rate']
if data_present:
print('File contains {:0.3f} seconds of data. Amplifiers were sampled at {:0.2f} kS/s.'.format(record_time, header['sample_rate'] / 1000))
else:
print('Header file contains no data. Amplifiers were sampled at {:0.2f} kS/s.'.format(header['sample_rate'] / 1000))
if data_present:
# Pre-allocate memory for data.
print('')
print('Allocating memory for data...')
data = {}
if (header['version']['major'] == 1 and header['version']['minor'] >= 2) or (header['version']['major'] > 1):
data['t_amplifier'] = np.zeros(num_amplifier_samples, dtype=np.int_)
else:
data['t_amplifier'] = np.zeros(num_amplifier_samples, dtype=np.uint)
data['amplifier_data'] = np.zeros([header['num_amplifier_channels'], num_amplifier_samples], dtype=np.uint)
data['aux_input_data'] = np.zeros([header['num_aux_input_channels'], num_aux_input_samples], dtype=np.uint)
data['supply_voltage_data'] = np.zeros([header['num_supply_voltage_channels'], num_supply_voltage_samples], dtype=np.uint)
data['temp_sensor_data'] = np.zeros([header['num_temp_sensor_channels'], num_supply_voltage_samples], dtype=np.uint)
data['board_adc_data'] = np.zeros([header['num_board_adc_channels'], num_board_adc_samples], dtype=np.uint)
# by default, this script interprets digital events (digital inputs and outputs) as booleans
# if unsigned int values are preferred(0 for False, 1 for True), replace the 'dtype=np.bool_' argument with 'dtype=np.uint' as shown
# the commented line below illustrates this for digital input data; the same can be done for digital out
#data['board_dig_in_data'] = np.zeros([header['num_board_dig_in_channels'], num_board_dig_in_samples], dtype=np.uint)
data['board_dig_in_data'] = np.zeros([header['num_board_dig_in_channels'], num_board_dig_in_samples], dtype=np.bool_)
data['board_dig_in_raw'] = np.zeros(num_board_dig_in_samples, dtype=np.uint)
data['board_dig_out_data'] = np.zeros([header['num_board_dig_out_channels'], num_board_dig_out_samples], dtype=np.bool_)
data['board_dig_out_raw'] = np.zeros(num_board_dig_out_samples, dtype=np.uint)
# Read sampled data from file.
print('Reading data from file...')
# Initialize indices used in looping
indices = {}
indices['amplifier'] = 0
indices['aux_input'] = 0
indices['supply_voltage'] = 0
indices['board_adc'] = 0
indices['board_dig_in'] = 0
indices['board_dig_out'] = 0
print_increment = 10
percent_done = print_increment
for i in range(num_data_blocks):
read_one_data_block(data, header, indices, fid)
# Increment indices
indices['amplifier'] += header['num_samples_per_data_block']
indices['aux_input'] += int(header['num_samples_per_data_block'] / 4)
indices['supply_voltage'] += 1
indices['board_adc'] += header['num_samples_per_data_block']
indices['board_dig_in'] += header['num_samples_per_data_block']
indices['board_dig_out'] += header['num_samples_per_data_block']
fraction_done = 100 * (1.0 * i / num_data_blocks)
if fraction_done >= percent_done:
print('{}% done...'.format(percent_done))
percent_done = percent_done + print_increment
# Make sure we have read exactly the right amount of data.
bytes_remaining = filesize - fid.tell()
if bytes_remaining != 0: raise Exception('Error: End of file not reached.')
# Close data file.
fid.close()
if (data_present):
print('Parsing data...')
# Extract digital input channels to separate variables.
for i in range(header['num_board_dig_in_channels']):
data['board_dig_in_data'][i, :] = np.not_equal(np.bitwise_and(data['board_dig_in_raw'], (1 << header['board_dig_in_channels'][i]['native_order'])), 0)
# Extract digital output channels to separate variables.
for i in range(header['num_board_dig_out_channels']):
data['board_dig_out_data'][i, :] = np.not_equal(np.bitwise_and(data['board_dig_out_raw'], (1 << header['board_dig_out_channels'][i]['native_order'])), 0)
# Scale voltage levels appropriately.
data['amplifier_data'] = np.multiply(0.195, (data['amplifier_data'].astype(np.int32) - 32768)) # units = microvolts
data['aux_input_data'] = np.multiply(37.4e-6, data['aux_input_data']) # units = volts
data['supply_voltage_data'] = np.multiply(74.8e-6, data['supply_voltage_data']) # units = volts
if header['eval_board_mode'] == 1:
data['board_adc_data'] = np.multiply(152.59e-6, (data['board_adc_data'].astype(np.int32) - 32768)) # units = volts
elif header['eval_board_mode'] == 13:
data['board_adc_data'] = np.multiply(312.5e-6, (data['board_adc_data'].astype(np.int32) - 32768)) # units = volts
else:
data['board_adc_data'] = np.multiply(50.354e-6, data['board_adc_data']) # units = volts
data['temp_sensor_data'] = np.multiply(0.01, data['temp_sensor_data']) # units = deg C
# Check for gaps in timestamps.
num_gaps = np.sum(np.not_equal(data['t_amplifier'][1:]-data['t_amplifier'][:-1], 1))
if num_gaps == 0:
print('No missing timestamps in data.')
else:
print('Warning: {0} gaps in timestamp data found. Time scale will not be uniform!'.format(num_gaps))
# Scale time steps (units = seconds).
data['t_amplifier'] = data['t_amplifier'] / header['sample_rate']
data['t_aux_input'] = data['t_amplifier'][range(0, len(data['t_amplifier']), 4)]
data['t_supply_voltage'] = data['t_amplifier'][range(0, len(data['t_amplifier']), header['num_samples_per_data_block'])]
data['t_board_adc'] = data['t_amplifier']
data['t_dig'] = data['t_amplifier']
data['t_temp_sensor'] = data['t_supply_voltage']
# If the software notch filter was selected during the recording, apply the
# same notch filter to amplifier data here.
if header['notch_filter_frequency'] > 0 and header['version']['major'] < 3:
print('Applying notch filter...')
print_increment = 10
percent_done = print_increment
for i in range(header['num_amplifier_channels']):
data['amplifier_data'][i,:] = notch_filter(data['amplifier_data'][i,:], header['sample_rate'], header['notch_filter_frequency'], 10)
fraction_done = 100 * (i / header['num_amplifier_channels'])
if fraction_done >= percent_done:
print('{}% done...'.format(percent_done))
percent_done += print_increment
else:
data = [];
# Move variables to result struct.
result = data_to_result(header, data, data_present)
print('Done! Elapsed time: {0:0.1f} seconds'.format(time.time() - tic))
return result
def plural(n):
"""Utility function to optionally pluralize words based on the value of n.
"""
if n == 1:
return ''
else:
return 's'
path="//equipe2-nas1/Gilles.DELBECQ/Data/ePhy/Février2023/Test_Gustave/raw/raw intan/Test_Gustave_15_03_230315_182841/Test_Gustave_15_03_230315_182841.rhd"
reader=read_data(path)
sampling_rate = reader['frequency_parameters']['amplifier_sample_rate']
time_vector=reader['t_amplifier']
signal=reader['amplifier_data']
selected_channels=['2','3','4','9','10','11','12','14','15']
#Filtering parameters
freq_low = 300
freq_high = 3000
order = 4
# Noise parameters
std_threshold = 5 #Times the std
noise_window = 5 #window for the noise calculation in sec
distance = 50 # distance between 2 spikes
#waveform window
waveform_window=5 #ms
Waveforms = True
def extract_spike_waveform(signal, spike_idx, left_width=(waveform_window/1000)*20000/2, right_width=(waveform_window/1000)*20000/2):
'''
Function to extract spikes waveforms in spike2 recordings
INPUTS :
signal (1-d array) : the ephy signal
spike_idx (1-d array or integer list) : array containing the spike indexes (in points)
width (int) = width for spike window
OUTPUTS :
SPIKES (list) : a list containg the waveform of each spike
'''
SPIKES = []
left_width = int(left_width)
right_width = int(right_width)
for i in range(len(spike_idx)):
index = spike_idx[i]
spike_wf = signal[index-left_width : index+right_width]
SPIKES.append(spike_wf)
return SPIKES
def filter_signal(signal, order=order, sample_rate=sampling_rate, freq_low=freq_low, freq_high=freq_high, axis=0):
"""
From Théo G.
Filtering with scipy
inputs raw signal (array)
returns filtered signal (array)
"""
import scipy.signal
Wn = [freq_low / (sample_rate / 2), freq_high / (sample_rate / 2)]
sos_coeff = scipy.signal.iirfilter(order, Wn, btype="band", ftype="butter", output="sos")
filtered_signal = scipy.signal.sosfiltfilt(sos_coeff, signal, axis=axis)
return filtered_signal
# def notch_filter(signal, order=4, sample_rate=20000, freq_low=48, freq_high=52, axis=0):
# import scipy.signal
# Wn = [freq_low / (sample_rate / 2), freq_high / (sample_rate / 2)]
# notch_coeff = scipy.signal.iirfilter(order, Wn, btype="bandstop", ftype="butter", output="sos")
# notch_signal = scipy.signal.sosfiltfilt(notch_coeff, signal, axis=axis)
# return notch_signal
filtered_signals=[]
for i in selected_channels:
filtered_signal=filter_signal(signal[int(i),:])
filtered_signals.append(filtered_signal)
# plt.figure()
# # plt.plot(time_vector,signal[0,:])
# plt.plot(time_vector,filtered_signal)
# plt.title(rf'channel {int(i)}')
filtered_signals = np.array(filtered_signals)
median = np.median(filtered_signals, axis=0)#compute median on all
cmr_signals = filtered_signals-median #compute common ref removal median on all
for i in range(len(cmr_signals)):
plt.figure()
plt.plot(time_vector,cmr_signals[i])
plt.title(rf'channel {int(selected_channels[i])}')
"""
Spike detection
"""
thresholds=[]
spikes_list=[]
spikes_list_y=[]
wfs=[]
waveforms=[]
for signal in cmr_signals:
# Threshold calculation
noise = signal[0:int(noise_window*sampling_rate)] #noise window taken from individual channel signal
threshold = np.median(noise)+std_threshold*np.std(noise) #threshold calculation for the channel
thresholds.append(threshold) #append it to the list regrouping threshold for each channel | #Detect the spike indexes
spike_idx, _ = sp.find_peaks(-signal,height=threshold,distance=distance)
#Convert to spike times
spike_times = spike_idx*1./sampling_rate
#Get spikes peak
spike_y = signal[spike_idx]
#Append spikes times to the list of all channels spikes
spikes_list.append(spike_times)
spikes_list_y.append(spike_y)
if Waveforms == True :
wfs = extract_spike_waveform(signal,spike_idx)
waveforms.append(wfs)
for index,i in np.ndenumerate(waveforms):
plt.figure()
# plt.title(rf'waveform_chan_{selected_chan[index[0]]}')
time_axis=np.array(range(int(-(waveform_window/1000)*20000/2),int(waveform_window/1000*20000/2)))/20000*1000
for j in i:
plt.plot(j*1000)
# plt.savefig(rf'{save_path}\waveform_chan_{selected_chan[index[0]]}.svg') | random_line_split |
|
RHD_Load_Filter.py | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 28 16:06:17 2023
@author: Gilles.DELBECQ
"""
import sys, struct, math, os, time
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as sp
def read_data(filename):
"""Reads Intan Technologies RHD2000 data file generated by evaluation board GUI.
Data are returned in a dictionary, for future extensibility.
"""
from intanutil.read_header import read_header
from intanutil.get_bytes_per_data_block import get_bytes_per_data_block
from intanutil.read_one_data_block import read_one_data_block
from intanutil.notch_filter import notch_filter
from intanutil.data_to_result import data_to_result
tic = time.time()
fid = open(filename, 'rb')
filesize = os.path.getsize(filename)
header = read_header(fid)
print('Found {} amplifier channel{}.'.format(header['num_amplifier_channels'], plural(header['num_amplifier_channels'])))
print('Found {} auxiliary input channel{}.'.format(header['num_aux_input_channels'], plural(header['num_aux_input_channels'])))
print('Found {} supply voltage channel{}.'.format(header['num_supply_voltage_channels'], plural(header['num_supply_voltage_channels'])))
print('Found {} board ADC channel{}.'.format(header['num_board_adc_channels'], plural(header['num_board_adc_channels'])))
print('Found {} board digital input channel{}.'.format(header['num_board_dig_in_channels'], plural(header['num_board_dig_in_channels'])))
print('Found {} board digital output channel{}.'.format(header['num_board_dig_out_channels'], plural(header['num_board_dig_out_channels'])))
print('Found {} temperature sensors channel{}.'.format(header['num_temp_sensor_channels'], plural(header['num_temp_sensor_channels'])))
print('')
# Determine how many samples the data file contains.
bytes_per_block = get_bytes_per_data_block(header)
# How many data blocks remain in this file?
data_present = False
bytes_remaining = filesize - fid.tell()
if bytes_remaining > 0:
data_present = True
if bytes_remaining % bytes_per_block != 0:
raise Exception('Something is wrong with file size : should have a whole number of data blocks')
num_data_blocks = int(bytes_remaining / bytes_per_block)
num_amplifier_samples = header['num_samples_per_data_block'] * num_data_blocks
num_aux_input_samples = int((header['num_samples_per_data_block'] / 4) * num_data_blocks)
num_supply_voltage_samples = 1 * num_data_blocks
num_board_adc_samples = header['num_samples_per_data_block'] * num_data_blocks
num_board_dig_in_samples = header['num_samples_per_data_block'] * num_data_blocks
num_board_dig_out_samples = header['num_samples_per_data_block'] * num_data_blocks
record_time = num_amplifier_samples / header['sample_rate']
if data_present:
print('File contains {:0.3f} seconds of data. Amplifiers were sampled at {:0.2f} kS/s.'.format(record_time, header['sample_rate'] / 1000))
else:
print('Header file contains no data. Amplifiers were sampled at {:0.2f} kS/s.'.format(header['sample_rate'] / 1000))
if data_present:
# Pre-allocate memory for data.
print('')
print('Allocating memory for data...')
data = {}
if (header['version']['major'] == 1 and header['version']['minor'] >= 2) or (header['version']['major'] > 1):
data['t_amplifier'] = np.zeros(num_amplifier_samples, dtype=np.int_)
else:
data['t_amplifier'] = np.zeros(num_amplifier_samples, dtype=np.uint)
data['amplifier_data'] = np.zeros([header['num_amplifier_channels'], num_amplifier_samples], dtype=np.uint)
data['aux_input_data'] = np.zeros([header['num_aux_input_channels'], num_aux_input_samples], dtype=np.uint)
data['supply_voltage_data'] = np.zeros([header['num_supply_voltage_channels'], num_supply_voltage_samples], dtype=np.uint)
data['temp_sensor_data'] = np.zeros([header['num_temp_sensor_channels'], num_supply_voltage_samples], dtype=np.uint)
data['board_adc_data'] = np.zeros([header['num_board_adc_channels'], num_board_adc_samples], dtype=np.uint)
# by default, this script interprets digital events (digital inputs and outputs) as booleans
# if unsigned int values are preferred(0 for False, 1 for True), replace the 'dtype=np.bool_' argument with 'dtype=np.uint' as shown
# the commented line below illustrates this for digital input data; the same can be done for digital out
#data['board_dig_in_data'] = np.zeros([header['num_board_dig_in_channels'], num_board_dig_in_samples], dtype=np.uint)
data['board_dig_in_data'] = np.zeros([header['num_board_dig_in_channels'], num_board_dig_in_samples], dtype=np.bool_)
data['board_dig_in_raw'] = np.zeros(num_board_dig_in_samples, dtype=np.uint)
data['board_dig_out_data'] = np.zeros([header['num_board_dig_out_channels'], num_board_dig_out_samples], dtype=np.bool_)
data['board_dig_out_raw'] = np.zeros(num_board_dig_out_samples, dtype=np.uint)
# Read sampled data from file.
print('Reading data from file...')
# Initialize indices used in looping
indices = {}
indices['amplifier'] = 0
indices['aux_input'] = 0
indices['supply_voltage'] = 0
indices['board_adc'] = 0
indices['board_dig_in'] = 0
indices['board_dig_out'] = 0
print_increment = 10
percent_done = print_increment
for i in range(num_data_blocks):
read_one_data_block(data, header, indices, fid)
# Increment indices
indices['amplifier'] += header['num_samples_per_data_block']
indices['aux_input'] += int(header['num_samples_per_data_block'] / 4)
indices['supply_voltage'] += 1
indices['board_adc'] += header['num_samples_per_data_block']
indices['board_dig_in'] += header['num_samples_per_data_block']
indices['board_dig_out'] += header['num_samples_per_data_block']
fraction_done = 100 * (1.0 * i / num_data_blocks)
if fraction_done >= percent_done:
print('{}% done...'.format(percent_done))
percent_done = percent_done + print_increment
# Make sure we have read exactly the right amount of data.
bytes_remaining = filesize - fid.tell()
if bytes_remaining != 0: raise Exception('Error: End of file not reached.')
# Close data file.
fid.close()
if (data_present):
print('Parsing data...')
# Extract digital input channels to separate variables.
for i in range(header['num_board_dig_in_channels']):
data['board_dig_in_data'][i, :] = np.not_equal(np.bitwise_and(data['board_dig_in_raw'], (1 << header['board_dig_in_channels'][i]['native_order'])), 0)
# Extract digital output channels to separate variables.
for i in range(header['num_board_dig_out_channels']):
data['board_dig_out_data'][i, :] = np.not_equal(np.bitwise_and(data['board_dig_out_raw'], (1 << header['board_dig_out_channels'][i]['native_order'])), 0)
# Scale voltage levels appropriately.
data['amplifier_data'] = np.multiply(0.195, (data['amplifier_data'].astype(np.int32) - 32768)) # units = microvolts
data['aux_input_data'] = np.multiply(37.4e-6, data['aux_input_data']) # units = volts
data['supply_voltage_data'] = np.multiply(74.8e-6, data['supply_voltage_data']) # units = volts
if header['eval_board_mode'] == 1:
data['board_adc_data'] = np.multiply(152.59e-6, (data['board_adc_data'].astype(np.int32) - 32768)) # units = volts
elif header['eval_board_mode'] == 13:
data['board_adc_data'] = np.multiply(312.5e-6, (data['board_adc_data'].astype(np.int32) - 32768)) # units = volts
else:
data['board_adc_data'] = np.multiply(50.354e-6, data['board_adc_data']) # units = volts
data['temp_sensor_data'] = np.multiply(0.01, data['temp_sensor_data']) # units = deg C
# Check for gaps in timestamps.
num_gaps = np.sum(np.not_equal(data['t_amplifier'][1:]-data['t_amplifier'][:-1], 1))
if num_gaps == 0:
print('No missing timestamps in data.')
else:
print('Warning: {0} gaps in timestamp data found. Time scale will not be uniform!'.format(num_gaps))
# Scale time steps (units = seconds).
data['t_amplifier'] = data['t_amplifier'] / header['sample_rate']
data['t_aux_input'] = data['t_amplifier'][range(0, len(data['t_amplifier']), 4)]
data['t_supply_voltage'] = data['t_amplifier'][range(0, len(data['t_amplifier']), header['num_samples_per_data_block'])]
data['t_board_adc'] = data['t_amplifier']
data['t_dig'] = data['t_amplifier']
data['t_temp_sensor'] = data['t_supply_voltage']
# If the software notch filter was selected during the recording, apply the
# same notch filter to amplifier data here.
if header['notch_filter_frequency'] > 0 and header['version']['major'] < 3:
print('Applying notch filter...')
print_increment = 10
percent_done = print_increment
for i in range(header['num_amplifier_channels']):
data['amplifier_data'][i,:] = notch_filter(data['amplifier_data'][i,:], header['sample_rate'], header['notch_filter_frequency'], 10)
fraction_done = 100 * (i / header['num_amplifier_channels'])
if fraction_done >= percent_done:
print('{}% done...'.format(percent_done))
percent_done += print_increment
else:
data = [];
# Move variables to result struct.
result = data_to_result(header, data, data_present)
print('Done! Elapsed time: {0:0.1f} seconds'.format(time.time() - tic))
return result
def | (n):
"""Utility function to optionally pluralize words based on the value of n.
"""
if n == 1:
return ''
else:
return 's'
path="//equipe2-nas1/Gilles.DELBECQ/Data/ePhy/Février2023/Test_Gustave/raw/raw intan/Test_Gustave_15_03_230315_182841/Test_Gustave_15_03_230315_182841.rhd"
reader=read_data(path)
sampling_rate = reader['frequency_parameters']['amplifier_sample_rate']
time_vector=reader['t_amplifier']
signal=reader['amplifier_data']
selected_channels=['2','3','4','9','10','11','12','14','15']
#Filtering parameters
freq_low = 300
freq_high = 3000
order = 4
# Noise parameters
std_threshold = 5 #Times the std
noise_window = 5 #window for the noise calculation in sec
distance = 50 # distance between 2 spikes
#waveform window
waveform_window=5 #ms
Waveforms = True
def extract_spike_waveform(signal, spike_idx, left_width=(waveform_window/1000)*20000/2, right_width=(waveform_window/1000)*20000/2):
'''
Function to extract spikes waveforms in spike2 recordings
INPUTS :
signal (1-d array) : the ephy signal
spike_idx (1-d array or integer list) : array containing the spike indexes (in points)
width (int) = width for spike window
OUTPUTS :
SPIKES (list) : a list containg the waveform of each spike
'''
SPIKES = []
left_width = int(left_width)
right_width = int(right_width)
for i in range(len(spike_idx)):
index = spike_idx[i]
spike_wf = signal[index-left_width : index+right_width]
SPIKES.append(spike_wf)
return SPIKES
def filter_signal(signal, order=order, sample_rate=sampling_rate, freq_low=freq_low, freq_high=freq_high, axis=0):
"""
From Théo G.
Filtering with scipy
inputs raw signal (array)
returns filtered signal (array)
"""
import scipy.signal
Wn = [freq_low / (sample_rate / 2), freq_high / (sample_rate / 2)]
sos_coeff = scipy.signal.iirfilter(order, Wn, btype="band", ftype="butter", output="sos")
filtered_signal = scipy.signal.sosfiltfilt(sos_coeff, signal, axis=axis)
return filtered_signal
# def notch_filter(signal, order=4, sample_rate=20000, freq_low=48, freq_high=52, axis=0):
# import scipy.signal
# Wn = [freq_low / (sample_rate / 2), freq_high / (sample_rate / 2)]
# notch_coeff = scipy.signal.iirfilter(order, Wn, btype="bandstop", ftype="butter", output="sos")
# notch_signal = scipy.signal.sosfiltfilt(notch_coeff, signal, axis=axis)
# return notch_signal
filtered_signals=[]
for i in selected_channels:
filtered_signal=filter_signal(signal[int(i),:])
filtered_signals.append(filtered_signal)
# plt.figure()
# # plt.plot(time_vector,signal[0,:])
# plt.plot(time_vector,filtered_signal)
# plt.title(rf'channel {int(i)}')
filtered_signals = np.array(filtered_signals)
median = np.median(filtered_signals, axis=0)#compute median on all
cmr_signals = filtered_signals-median #compute common ref removal median on all
for i in range(len(cmr_signals)):
plt.figure()
plt.plot(time_vector,cmr_signals[i])
plt.title(rf'channel {int(selected_channels[i])}')
"""
Spike detection
"""
thresholds=[]
spikes_list=[]
spikes_list_y=[]
wfs=[]
waveforms=[]
for signal in cmr_signals:
# Threshold calculation
noise = signal[0:int(noise_window*sampling_rate)] #noise window taken from individual channel signal
threshold = np.median(noise)+std_threshold*np.std(noise) #threshold calculation for the channel
thresholds.append(threshold) #append it to the list regrouping threshold for each channel
#Detect the spike indexes
spike_idx, _ = sp.find_peaks(-signal,height=threshold,distance=distance)
#Convert to spike times
spike_times = spike_idx*1./sampling_rate
#Get spikes peak
spike_y = signal[spike_idx]
#Append spikes times to the list of all channels spikes
spikes_list.append(spike_times)
spikes_list_y.append(spike_y)
if Waveforms == True :
wfs = extract_spike_waveform(signal,spike_idx)
waveforms.append(wfs)
for index,i in np.ndenumerate(waveforms):
plt.figure()
# plt.title(rf'waveform_chan_{selected_chan[index[0]]}')
time_axis=np.array(range(int(-(waveform_window/1000)*20000/2),int(waveform_window/1000*20000/2)))/20000*1000
for j in i:
plt.plot(j*1000)
# plt.savefig(rf'{save_path}\waveform_chan_{selected_chan[index[0]]}.svg') | plural | identifier_name |
RHD_Load_Filter.py | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 28 16:06:17 2023
@author: Gilles.DELBECQ
"""
import sys, struct, math, os, time
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as sp
def read_data(filename):
"""Reads Intan Technologies RHD2000 data file generated by evaluation board GUI.
Data are returned in a dictionary, for future extensibility.
"""
from intanutil.read_header import read_header
from intanutil.get_bytes_per_data_block import get_bytes_per_data_block
from intanutil.read_one_data_block import read_one_data_block
from intanutil.notch_filter import notch_filter
from intanutil.data_to_result import data_to_result
tic = time.time()
fid = open(filename, 'rb')
filesize = os.path.getsize(filename)
header = read_header(fid)
print('Found {} amplifier channel{}.'.format(header['num_amplifier_channels'], plural(header['num_amplifier_channels'])))
print('Found {} auxiliary input channel{}.'.format(header['num_aux_input_channels'], plural(header['num_aux_input_channels'])))
print('Found {} supply voltage channel{}.'.format(header['num_supply_voltage_channels'], plural(header['num_supply_voltage_channels'])))
print('Found {} board ADC channel{}.'.format(header['num_board_adc_channels'], plural(header['num_board_adc_channels'])))
print('Found {} board digital input channel{}.'.format(header['num_board_dig_in_channels'], plural(header['num_board_dig_in_channels'])))
print('Found {} board digital output channel{}.'.format(header['num_board_dig_out_channels'], plural(header['num_board_dig_out_channels'])))
print('Found {} temperature sensors channel{}.'.format(header['num_temp_sensor_channels'], plural(header['num_temp_sensor_channels'])))
print('')
# Determine how many samples the data file contains.
bytes_per_block = get_bytes_per_data_block(header)
# How many data blocks remain in this file?
data_present = False
bytes_remaining = filesize - fid.tell()
if bytes_remaining > 0:
data_present = True
if bytes_remaining % bytes_per_block != 0:
raise Exception('Something is wrong with file size : should have a whole number of data blocks')
num_data_blocks = int(bytes_remaining / bytes_per_block)
num_amplifier_samples = header['num_samples_per_data_block'] * num_data_blocks
num_aux_input_samples = int((header['num_samples_per_data_block'] / 4) * num_data_blocks)
num_supply_voltage_samples = 1 * num_data_blocks
num_board_adc_samples = header['num_samples_per_data_block'] * num_data_blocks
num_board_dig_in_samples = header['num_samples_per_data_block'] * num_data_blocks
num_board_dig_out_samples = header['num_samples_per_data_block'] * num_data_blocks
record_time = num_amplifier_samples / header['sample_rate']
if data_present:
print('File contains {:0.3f} seconds of data. Amplifiers were sampled at {:0.2f} kS/s.'.format(record_time, header['sample_rate'] / 1000))
else:
print('Header file contains no data. Amplifiers were sampled at {:0.2f} kS/s.'.format(header['sample_rate'] / 1000))
if data_present:
# Pre-allocate memory for data.
print('')
print('Allocating memory for data...')
data = {}
if (header['version']['major'] == 1 and header['version']['minor'] >= 2) or (header['version']['major'] > 1):
data['t_amplifier'] = np.zeros(num_amplifier_samples, dtype=np.int_)
else:
data['t_amplifier'] = np.zeros(num_amplifier_samples, dtype=np.uint)
data['amplifier_data'] = np.zeros([header['num_amplifier_channels'], num_amplifier_samples], dtype=np.uint)
data['aux_input_data'] = np.zeros([header['num_aux_input_channels'], num_aux_input_samples], dtype=np.uint)
data['supply_voltage_data'] = np.zeros([header['num_supply_voltage_channels'], num_supply_voltage_samples], dtype=np.uint)
data['temp_sensor_data'] = np.zeros([header['num_temp_sensor_channels'], num_supply_voltage_samples], dtype=np.uint)
data['board_adc_data'] = np.zeros([header['num_board_adc_channels'], num_board_adc_samples], dtype=np.uint)
# by default, this script interprets digital events (digital inputs and outputs) as booleans
# if unsigned int values are preferred(0 for False, 1 for True), replace the 'dtype=np.bool_' argument with 'dtype=np.uint' as shown
# the commented line below illustrates this for digital input data; the same can be done for digital out
#data['board_dig_in_data'] = np.zeros([header['num_board_dig_in_channels'], num_board_dig_in_samples], dtype=np.uint)
data['board_dig_in_data'] = np.zeros([header['num_board_dig_in_channels'], num_board_dig_in_samples], dtype=np.bool_)
data['board_dig_in_raw'] = np.zeros(num_board_dig_in_samples, dtype=np.uint)
data['board_dig_out_data'] = np.zeros([header['num_board_dig_out_channels'], num_board_dig_out_samples], dtype=np.bool_)
data['board_dig_out_raw'] = np.zeros(num_board_dig_out_samples, dtype=np.uint)
# Read sampled data from file.
print('Reading data from file...')
# Initialize indices used in looping
indices = {}
indices['amplifier'] = 0
indices['aux_input'] = 0
indices['supply_voltage'] = 0
indices['board_adc'] = 0
indices['board_dig_in'] = 0
indices['board_dig_out'] = 0
print_increment = 10
percent_done = print_increment
for i in range(num_data_blocks):
read_one_data_block(data, header, indices, fid)
# Increment indices
indices['amplifier'] += header['num_samples_per_data_block']
indices['aux_input'] += int(header['num_samples_per_data_block'] / 4)
indices['supply_voltage'] += 1
indices['board_adc'] += header['num_samples_per_data_block']
indices['board_dig_in'] += header['num_samples_per_data_block']
indices['board_dig_out'] += header['num_samples_per_data_block']
fraction_done = 100 * (1.0 * i / num_data_blocks)
if fraction_done >= percent_done:
print('{}% done...'.format(percent_done))
percent_done = percent_done + print_increment
# Make sure we have read exactly the right amount of data.
bytes_remaining = filesize - fid.tell()
if bytes_remaining != 0: raise Exception('Error: End of file not reached.')
# Close data file.
fid.close()
if (data_present):
print('Parsing data...')
# Extract digital input channels to separate variables.
for i in range(header['num_board_dig_in_channels']):
data['board_dig_in_data'][i, :] = np.not_equal(np.bitwise_and(data['board_dig_in_raw'], (1 << header['board_dig_in_channels'][i]['native_order'])), 0)
# Extract digital output channels to separate variables.
for i in range(header['num_board_dig_out_channels']):
data['board_dig_out_data'][i, :] = np.not_equal(np.bitwise_and(data['board_dig_out_raw'], (1 << header['board_dig_out_channels'][i]['native_order'])), 0)
# Scale voltage levels appropriately.
data['amplifier_data'] = np.multiply(0.195, (data['amplifier_data'].astype(np.int32) - 32768)) # units = microvolts
data['aux_input_data'] = np.multiply(37.4e-6, data['aux_input_data']) # units = volts
data['supply_voltage_data'] = np.multiply(74.8e-6, data['supply_voltage_data']) # units = volts
if header['eval_board_mode'] == 1:
data['board_adc_data'] = np.multiply(152.59e-6, (data['board_adc_data'].astype(np.int32) - 32768)) # units = volts
elif header['eval_board_mode'] == 13:
data['board_adc_data'] = np.multiply(312.5e-6, (data['board_adc_data'].astype(np.int32) - 32768)) # units = volts
else:
data['board_adc_data'] = np.multiply(50.354e-6, data['board_adc_data']) # units = volts
data['temp_sensor_data'] = np.multiply(0.01, data['temp_sensor_data']) # units = deg C
# Check for gaps in timestamps.
num_gaps = np.sum(np.not_equal(data['t_amplifier'][1:]-data['t_amplifier'][:-1], 1))
if num_gaps == 0:
print('No missing timestamps in data.')
else:
print('Warning: {0} gaps in timestamp data found. Time scale will not be uniform!'.format(num_gaps))
# Scale time steps (units = seconds).
data['t_amplifier'] = data['t_amplifier'] / header['sample_rate']
data['t_aux_input'] = data['t_amplifier'][range(0, len(data['t_amplifier']), 4)]
data['t_supply_voltage'] = data['t_amplifier'][range(0, len(data['t_amplifier']), header['num_samples_per_data_block'])]
data['t_board_adc'] = data['t_amplifier']
data['t_dig'] = data['t_amplifier']
data['t_temp_sensor'] = data['t_supply_voltage']
# If the software notch filter was selected during the recording, apply the
# same notch filter to amplifier data here.
if header['notch_filter_frequency'] > 0 and header['version']['major'] < 3:
print('Applying notch filter...')
print_increment = 10
percent_done = print_increment
for i in range(header['num_amplifier_channels']):
data['amplifier_data'][i,:] = notch_filter(data['amplifier_data'][i,:], header['sample_rate'], header['notch_filter_frequency'], 10)
fraction_done = 100 * (i / header['num_amplifier_channels'])
if fraction_done >= percent_done:
print('{}% done...'.format(percent_done))
percent_done += print_increment
else:
data = [];
# Move variables to result struct.
result = data_to_result(header, data, data_present)
print('Done! Elapsed time: {0:0.1f} seconds'.format(time.time() - tic))
return result
def plural(n):
|
path="//equipe2-nas1/Gilles.DELBECQ/Data/ePhy/Février2023/Test_Gustave/raw/raw intan/Test_Gustave_15_03_230315_182841/Test_Gustave_15_03_230315_182841.rhd"
reader=read_data(path)
sampling_rate = reader['frequency_parameters']['amplifier_sample_rate']
time_vector=reader['t_amplifier']
signal=reader['amplifier_data']
selected_channels=['2','3','4','9','10','11','12','14','15']
#Filtering parameters
freq_low = 300
freq_high = 3000
order = 4
# Noise parameters
std_threshold = 5 #Times the std
noise_window = 5 #window for the noise calculation in sec
distance = 50 # distance between 2 spikes
#waveform window
waveform_window=5 #ms
Waveforms = True
def extract_spike_waveform(signal, spike_idx, left_width=(waveform_window/1000)*20000/2, right_width=(waveform_window/1000)*20000/2):
'''
Function to extract spikes waveforms in spike2 recordings
INPUTS :
signal (1-d array) : the ephy signal
spike_idx (1-d array or integer list) : array containing the spike indexes (in points)
width (int) = width for spike window
OUTPUTS :
SPIKES (list) : a list containg the waveform of each spike
'''
SPIKES = []
left_width = int(left_width)
right_width = int(right_width)
for i in range(len(spike_idx)):
index = spike_idx[i]
spike_wf = signal[index-left_width : index+right_width]
SPIKES.append(spike_wf)
return SPIKES
def filter_signal(signal, order=order, sample_rate=sampling_rate, freq_low=freq_low, freq_high=freq_high, axis=0):
"""
From Théo G.
Filtering with scipy
inputs raw signal (array)
returns filtered signal (array)
"""
import scipy.signal
Wn = [freq_low / (sample_rate / 2), freq_high / (sample_rate / 2)]
sos_coeff = scipy.signal.iirfilter(order, Wn, btype="band", ftype="butter", output="sos")
filtered_signal = scipy.signal.sosfiltfilt(sos_coeff, signal, axis=axis)
return filtered_signal
# def notch_filter(signal, order=4, sample_rate=20000, freq_low=48, freq_high=52, axis=0):
# import scipy.signal
# Wn = [freq_low / (sample_rate / 2), freq_high / (sample_rate / 2)]
# notch_coeff = scipy.signal.iirfilter(order, Wn, btype="bandstop", ftype="butter", output="sos")
# notch_signal = scipy.signal.sosfiltfilt(notch_coeff, signal, axis=axis)
# return notch_signal
filtered_signals=[]
for i in selected_channels:
filtered_signal=filter_signal(signal[int(i),:])
filtered_signals.append(filtered_signal)
# plt.figure()
# # plt.plot(time_vector,signal[0,:])
# plt.plot(time_vector,filtered_signal)
# plt.title(rf'channel {int(i)}')
filtered_signals = np.array(filtered_signals)
median = np.median(filtered_signals, axis=0)#compute median on all
cmr_signals = filtered_signals-median #compute common ref removal median on all
for i in range(len(cmr_signals)):
plt.figure()
plt.plot(time_vector,cmr_signals[i])
plt.title(rf'channel {int(selected_channels[i])}')
"""
Spike detection
"""
thresholds=[]
spikes_list=[]
spikes_list_y=[]
wfs=[]
waveforms=[]
for signal in cmr_signals:
# Threshold calculation
noise = signal[0:int(noise_window*sampling_rate)] #noise window taken from individual channel signal
threshold = np.median(noise)+std_threshold*np.std(noise) #threshold calculation for the channel
thresholds.append(threshold) #append it to the list regrouping threshold for each channel
#Detect the spike indexes
spike_idx, _ = sp.find_peaks(-signal,height=threshold,distance=distance)
#Convert to spike times
spike_times = spike_idx*1./sampling_rate
#Get spikes peak
spike_y = signal[spike_idx]
#Append spikes times to the list of all channels spikes
spikes_list.append(spike_times)
spikes_list_y.append(spike_y)
if Waveforms == True :
wfs = extract_spike_waveform(signal,spike_idx)
waveforms.append(wfs)
for index,i in np.ndenumerate(waveforms):
plt.figure()
# plt.title(rf'waveform_chan_{selected_chan[index[0]]}')
time_axis=np.array(range(int(-(waveform_window/1000)*20000/2),int(waveform_window/1000*20000/2)))/20000*1000
for j in i:
plt.plot(j*1000)
# plt.savefig(rf'{save_path}\waveform_chan_{selected_chan[index[0]]}.svg') | """Utility function to optionally pluralize words based on the value of n.
"""
if n == 1:
return ''
else:
return 's' | identifier_body |
term_gui.rs | // Copyright (c) The Swiboe development team. All rights reserved.
// Licensed under the Apache License, Version 2.0. See LICENSE.txt
// in the project root for license information.
#[macro_use]
extern crate clap;
extern crate rustbox;
extern crate serde_json;
extern crate subsequence_match;
extern crate swiboe;
extern crate swiboe_gui as gui;
extern crate time;
extern crate serde;
extern crate uuid;
use gui::buffer_views;
use serde::{Serialize, Deserialize};
use gui::keymap_handler;
use rustbox::{Color, RustBox};
use std::cmp;
use std::env;
use std::net;
use std::path;
use std::str::FromStr;
use std::sync::mpsc;
use std::sync::{RwLock, Arc};
use swiboe::client::{self, RpcCaller};
use uuid::Uuid;
fn clamp<T: Copy + cmp::Ord + std::fmt::Debug>(min: T, max: T, v: &mut T) {
let new_value = cmp::min(max, cmp::max(min, *v));
*v = new_value;
}
struct CompleterWidget {
candidates: subsequence_match::CandidateSet,
rpc: Option<client::rpc::client::Context>,
query: String,
results: Vec<subsequence_match::QueryResult>,
selection_index: isize,
}
enum CompleterState {
Running,
Canceled,
Selected(String),
}
impl CompleterWidget {
fn new(client: &mut client::Client) -> swiboe::Result<Self> {
// TODO(sirver): This should use the current work directory of the server, since the server
// might run on a different machine than the client - and certainly in a different
// directory.
let current_dir = env::current_dir().unwrap();
let rpc = try!(client.call("list_files", &swiboe::plugin::list_files::ListFilesRequest {
directory: current_dir.to_string_lossy().into_owned(),
}));
Ok(CompleterWidget {
candidates: subsequence_match::CandidateSet::new(),
rpc: Some(rpc),
query: "".into(),
results: Vec::new(),
selection_index: 0,
})
}
fn on_key(&mut self, key: rustbox::Key) -> CompleterState {
match key {
rustbox::Key::Char(c) => {
self.query.push(c);
self.results.clear();
CompleterState::Running
},
rustbox::Key::Backspace => {
self.query.pop();
self.results.clear();
CompleterState::Running
},
rustbox::Key::Down => {
self.selection_index += 1;
CompleterState::Running
},
rustbox::Key::Up => {
self.selection_index -= 1;
CompleterState::Running
},
rustbox::Key::Esc => {
self.rpc.take().unwrap().cancel().unwrap();
CompleterState::Canceled
},
rustbox::Key::Enter => {
self.rpc.take().unwrap().cancel().unwrap();
if self.results.is_empty() {
CompleterState::Canceled
} else {
clamp(0, self.results.len() as isize - 1, &mut self.selection_index);
CompleterState::Selected(self.results[self.selection_index as usize].text.clone())
}
}
_ => CompleterState::Running,
}
}
fn draw(&mut self, rustbox: &rustbox::RustBox) {
while let Some(b) = self.rpc.as_mut().unwrap().try_recv().unwrap() {
self.results.clear();
let b: swiboe::plugin::list_files::ListFilesUpdate = serde_json::from_value(b).unwrap();
for file in &b.files {
self.candidates.insert(file);
}
}
if self.results.is_empty() {
let query_to_use: String = self.query.chars().filter(|c| !c.is_whitespace()).collect();
self.candidates.query(&query_to_use, subsequence_match::MatchCase::No, &mut self.results);
}
if !self.results.is_empty() {
clamp(0, self.results.len() as isize - 1, &mut self.selection_index);
}
rustbox.print(0, 0, rustbox::RB_BOLD, Color::Yellow, Color::Default, &self.query);
let len_string = format!("{}/{} matching ({})", self.results.len(), self.candidates.len(),
if self.rpc.as_ref().unwrap().done() { "done" } else { "scanning" } );
rustbox.print(rustbox.width() - len_string.len() - 1, 0, rustbox::RB_BOLD, Color::Blue, Color::Default, &len_string);
let mut row = 1usize;
for result in &self.results {
let mut matching_indices = result.matching_indices.iter().peekable();
for (col, c) in result.text.chars().enumerate() {
let matches = match matching_indices.peek() {
Some(val) if **val == col => true,
_ => false,
};
let mut style = if matches {
matching_indices.next();
rustbox::RB_BOLD
} else {
rustbox::RB_NORMAL
};
if row as isize == self.selection_index + 1 {
style = style | rustbox::RB_REVERSE;
}
rustbox.print_char(col, row, style, Color::Default, Color::Default, c);
}
row += 1;
if row > rustbox.height() {
break;
}
}
}
}
struct BufferViewWidget {
view_id: String,
client: client::ThinClient,
cursor_id: String,
}
impl BufferViewWidget {
pub fn new(view_id: String, client: client::ThinClient) -> Self {
BufferViewWidget {
view_id: view_id,
client: client, |
fn draw(&mut self, buffer_view: &buffer_views::BufferView, rustbox: &rustbox::RustBox) {
let mut row = 0;
let top_line_index = buffer_view.top_line_index as usize;
self.cursor_id = buffer_view.cursor.id().to_string();
let mut cursor_drawn = false;
while row < rustbox.height() {
let line_index = top_line_index + row;
if let Some(line) = buffer_view.lines.get(line_index) {
for (col, c) in line.chars().enumerate() {
if col >= rustbox.width() {
break;
}
let bg = if buffer_view.cursor.position.line_index == line_index as isize &&
buffer_view.cursor.position.column_index as usize == col {
cursor_drawn = true;
Color::Red
} else {
Color::Default
};
rustbox.print_char(col, row, rustbox::RB_NORMAL, Color::Default, bg, c);
}
}
row += 1;
}
if !cursor_drawn {
let row = buffer_view.cursor.position.line_index - top_line_index as isize;
rustbox.print_char(buffer_view.cursor.position.column_index as usize,
row as usize, rustbox::RB_NORMAL,
Color::Default, Color::Red, ' ');
}
}
}
#[derive(Debug)]
struct Options {
socket: String,
config_file: path::PathBuf,
}
struct TerminalGui {
config_file_runner: Box<gui::config_file::ConfigFileRunner>,
client: client::Client,
rustbox: rustbox::RustBox,
buffer_views: Arc<RwLock<gui::buffer_views::BufferViews>>,
last_key_down_event: time::PreciseTime,
completer: Option<CompleterWidget>,
buffer_view_widget: Option<BufferViewWidget>,
// NOCOM(#sirver): GuiCommand in namespace gui is very duplicated
gui_commands: mpsc::Receiver<gui::command::GuiCommand>,
}
impl TerminalGui {
fn new(options: &Options) -> swiboe::Result<Self> {
let mut client = match net::SocketAddr::from_str(&options.socket) {
Ok(value) => {
client::Client::connect_tcp(&value).unwrap()
}
Err(_) => {
let socket_path = path::PathBuf::from(&options.socket);
client::Client::connect_unix(&socket_path).unwrap()
}
};
let mut config_file_runner = gui::config_file::ConfigFileRunner::new(
try!(client.clone()));
config_file_runner.run(&options.config_file);
let rustbox = match RustBox::init(rustbox::InitOptions {
input_mode: rustbox::InputMode::Current,
buffer_stderr: true,
}) {
Result::Ok(v) => v,
Result::Err(e) => panic!("{}", e),
};
let gui_id: String = Uuid::new_v4().to_hyphenated_string();
let (gui_commands_tx, gui_commands_rx) = mpsc::channel();
let buffer_views = try!(gui::buffer_views::BufferViews::new(&gui_id, gui_commands_tx, &mut client));
Ok(TerminalGui {
config_file_runner: config_file_runner,
client: client,
rustbox: rustbox,
buffer_views: buffer_views,
last_key_down_event: time::PreciseTime::now(),
completer: None,
buffer_view_widget: None,
gui_commands: gui_commands_rx,
})
}
fn handle_events(&mut self) -> swiboe::Result<bool> {
match self.rustbox.peek_event(std::time::Duration::from_millis(5), false) {
Ok(rustbox::Event::KeyEvent(key)) => {
if self.completer.is_some() {
let rv = self.completer.as_mut().unwrap().on_key(key);
match rv {
CompleterState::Running => (),
CompleterState::Canceled => {
self.completer = None;
},
CompleterState::Selected(result) => {
self.completer = None;
let mut rpc = try!(self.client.call("buffer.open", &swiboe::plugin::buffer::open::Request {
uri: format!("file://{}", result),
}));
let response: swiboe::plugin::buffer::open::Response = rpc.wait_for().unwrap();
let mut buffer_views = self.buffer_views.write().unwrap();
let view_id = buffer_views.new_view(response.buffer_index, self.rustbox.width(), self.rustbox.height());
self.buffer_view_widget = Some(BufferViewWidget::new(view_id, try!(self.client.clone())));
},
}
} else {
if !try!(self.handle_key(key)) {
return Ok(false);
}
}
},
Err(e) => panic!("{}", e),
_ => { }
}
while let Ok(command) = self.gui_commands.try_recv() {
match command {
gui::command::GuiCommand::Quit => return Ok(false),
gui::command::GuiCommand::Redraw => (),
}
}
return Ok(true);
}
fn handle_key(&mut self, key: rustbox::Key) -> swiboe::Result<bool> {
let delta_t = {
let now = time::PreciseTime::now();
let delta_t = self.last_key_down_event.to(now);
self.last_key_down_event = now;
delta_t
};
let delta_t_in_seconds = delta_t.num_nanoseconds().unwrap() as f64 / 1e9;
match key {
// NOCOM(#sirver): should be handled through plugins.
rustbox::Key::Char('q') => return Ok(false),
rustbox::Key::Ctrl('t') => {
self.completer = Some(try!(CompleterWidget::new(&mut self.client)))
},
rustbox::Key::Esc => {
self.config_file_runner.keymap_handler.timeout();
},
rustbox::Key::Char(a) => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Char(a));
},
rustbox::Key::Up => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Up);
},
rustbox::Key::Down => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Down);
},
rustbox::Key::Left => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Left);
},
rustbox::Key::Right => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Right);
},
rustbox::Key::Tab => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Tab);
},
rustbox::Key::Ctrl(some_other_key) => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Ctrl);
try!(self.handle_key(rustbox::Key::Char(some_other_key)));
}
_ => (),
}
Ok(true)
}
fn draw(&mut self) {
self.rustbox.clear();
if let Some(ref mut widget) = self.buffer_view_widget {
let buffer_views = self.buffer_views.read().unwrap();
let buffer_view = buffer_views.get(&widget.view_id).unwrap();
widget.draw(&buffer_view, &self.rustbox);
}
if let Some(ref mut completer) = self.completer {
completer.draw(&self.rustbox);
}
self.rustbox.present();
}
}
fn parse_options() -> Options {
let matches = clap::App::new("term_gui")
.about("Terminal client for Swiboe")
.version(&crate_version!()[..])
.arg(clap::Arg::with_name("SOCKET")
.short("s")
.long("socket")
.help("Socket at which the master listens.")
.required(true)
.takes_value(true))
.arg(clap::Arg::with_name("CONFIG_FILE")
.short("c")
.long("config_file")
.help("The config file to run when the GUI starts up.")
.takes_value(true))
.get_matches();
Options {
config_file: path::PathBuf::from(matches.value_of("CONFIG_FILE").unwrap_or("config.lua")),
socket: matches.value_of("SOCKET").unwrap().into(),
}
}
fn main() {
let options = parse_options();
let mut gui = TerminalGui::new(&options).unwrap();
while gui.handle_events().unwrap() {
gui.draw();
}
} | cursor_id: String::new(),
}
} | random_line_split |
term_gui.rs | // Copyright (c) The Swiboe development team. All rights reserved.
// Licensed under the Apache License, Version 2.0. See LICENSE.txt
// in the project root for license information.
#[macro_use]
extern crate clap;
extern crate rustbox;
extern crate serde_json;
extern crate subsequence_match;
extern crate swiboe;
extern crate swiboe_gui as gui;
extern crate time;
extern crate serde;
extern crate uuid;
use gui::buffer_views;
use serde::{Serialize, Deserialize};
use gui::keymap_handler;
use rustbox::{Color, RustBox};
use std::cmp;
use std::env;
use std::net;
use std::path;
use std::str::FromStr;
use std::sync::mpsc;
use std::sync::{RwLock, Arc};
use swiboe::client::{self, RpcCaller};
use uuid::Uuid;
fn clamp<T: Copy + cmp::Ord + std::fmt::Debug>(min: T, max: T, v: &mut T) {
let new_value = cmp::min(max, cmp::max(min, *v));
*v = new_value;
}
struct CompleterWidget {
candidates: subsequence_match::CandidateSet,
rpc: Option<client::rpc::client::Context>,
query: String,
results: Vec<subsequence_match::QueryResult>,
selection_index: isize,
}
enum CompleterState {
Running,
Canceled,
Selected(String),
}
impl CompleterWidget {
fn new(client: &mut client::Client) -> swiboe::Result<Self> {
// TODO(sirver): This should use the current work directory of the server, since the server
// might run on a different machine than the client - and certainly in a different
// directory.
let current_dir = env::current_dir().unwrap();
let rpc = try!(client.call("list_files", &swiboe::plugin::list_files::ListFilesRequest {
directory: current_dir.to_string_lossy().into_owned(),
}));
Ok(CompleterWidget {
candidates: subsequence_match::CandidateSet::new(),
rpc: Some(rpc),
query: "".into(),
results: Vec::new(),
selection_index: 0,
})
}
fn on_key(&mut self, key: rustbox::Key) -> CompleterState {
match key {
rustbox::Key::Char(c) => {
self.query.push(c);
self.results.clear();
CompleterState::Running
},
rustbox::Key::Backspace => {
self.query.pop();
self.results.clear();
CompleterState::Running
},
rustbox::Key::Down => {
self.selection_index += 1;
CompleterState::Running
},
rustbox::Key::Up => {
self.selection_index -= 1;
CompleterState::Running
},
rustbox::Key::Esc => {
self.rpc.take().unwrap().cancel().unwrap();
CompleterState::Canceled
},
rustbox::Key::Enter => {
self.rpc.take().unwrap().cancel().unwrap();
if self.results.is_empty() {
CompleterState::Canceled
} else {
clamp(0, self.results.len() as isize - 1, &mut self.selection_index);
CompleterState::Selected(self.results[self.selection_index as usize].text.clone())
}
}
_ => CompleterState::Running,
}
}
fn draw(&mut self, rustbox: &rustbox::RustBox) {
while let Some(b) = self.rpc.as_mut().unwrap().try_recv().unwrap() {
self.results.clear();
let b: swiboe::plugin::list_files::ListFilesUpdate = serde_json::from_value(b).unwrap();
for file in &b.files {
self.candidates.insert(file);
}
}
if self.results.is_empty() {
let query_to_use: String = self.query.chars().filter(|c| !c.is_whitespace()).collect();
self.candidates.query(&query_to_use, subsequence_match::MatchCase::No, &mut self.results);
}
if !self.results.is_empty() {
clamp(0, self.results.len() as isize - 1, &mut self.selection_index);
}
rustbox.print(0, 0, rustbox::RB_BOLD, Color::Yellow, Color::Default, &self.query);
let len_string = format!("{}/{} matching ({})", self.results.len(), self.candidates.len(),
if self.rpc.as_ref().unwrap().done() { "done" } else { "scanning" } );
rustbox.print(rustbox.width() - len_string.len() - 1, 0, rustbox::RB_BOLD, Color::Blue, Color::Default, &len_string);
let mut row = 1usize;
for result in &self.results {
let mut matching_indices = result.matching_indices.iter().peekable();
for (col, c) in result.text.chars().enumerate() {
let matches = match matching_indices.peek() {
Some(val) if **val == col => true,
_ => false,
};
let mut style = if matches {
matching_indices.next();
rustbox::RB_BOLD
} else {
rustbox::RB_NORMAL
};
if row as isize == self.selection_index + 1 {
style = style | rustbox::RB_REVERSE;
}
rustbox.print_char(col, row, style, Color::Default, Color::Default, c);
}
row += 1;
if row > rustbox.height() {
break;
}
}
}
}
struct BufferViewWidget {
view_id: String,
client: client::ThinClient,
cursor_id: String,
}
impl BufferViewWidget {
pub fn new(view_id: String, client: client::ThinClient) -> Self {
BufferViewWidget {
view_id: view_id,
client: client,
cursor_id: String::new(),
}
}
fn draw(&mut self, buffer_view: &buffer_views::BufferView, rustbox: &rustbox::RustBox) {
let mut row = 0;
let top_line_index = buffer_view.top_line_index as usize;
self.cursor_id = buffer_view.cursor.id().to_string();
let mut cursor_drawn = false;
while row < rustbox.height() {
let line_index = top_line_index + row;
if let Some(line) = buffer_view.lines.get(line_index) {
for (col, c) in line.chars().enumerate() {
if col >= rustbox.width() {
break;
}
let bg = if buffer_view.cursor.position.line_index == line_index as isize &&
buffer_view.cursor.position.column_index as usize == col {
cursor_drawn = true;
Color::Red
} else {
Color::Default
};
rustbox.print_char(col, row, rustbox::RB_NORMAL, Color::Default, bg, c);
}
}
row += 1;
}
if !cursor_drawn {
let row = buffer_view.cursor.position.line_index - top_line_index as isize;
rustbox.print_char(buffer_view.cursor.position.column_index as usize,
row as usize, rustbox::RB_NORMAL,
Color::Default, Color::Red, ' ');
}
}
}
#[derive(Debug)]
struct Options {
socket: String,
config_file: path::PathBuf,
}
struct TerminalGui {
config_file_runner: Box<gui::config_file::ConfigFileRunner>,
client: client::Client,
rustbox: rustbox::RustBox,
buffer_views: Arc<RwLock<gui::buffer_views::BufferViews>>,
last_key_down_event: time::PreciseTime,
completer: Option<CompleterWidget>,
buffer_view_widget: Option<BufferViewWidget>,
// NOCOM(#sirver): GuiCommand in namespace gui is very duplicated
gui_commands: mpsc::Receiver<gui::command::GuiCommand>,
}
impl TerminalGui {
fn new(options: &Options) -> swiboe::Result<Self> {
let mut client = match net::SocketAddr::from_str(&options.socket) {
Ok(value) => {
client::Client::connect_tcp(&value).unwrap()
}
Err(_) => {
let socket_path = path::PathBuf::from(&options.socket);
client::Client::connect_unix(&socket_path).unwrap()
}
};
let mut config_file_runner = gui::config_file::ConfigFileRunner::new(
try!(client.clone()));
config_file_runner.run(&options.config_file);
let rustbox = match RustBox::init(rustbox::InitOptions {
input_mode: rustbox::InputMode::Current,
buffer_stderr: true,
}) {
Result::Ok(v) => v,
Result::Err(e) => panic!("{}", e),
};
let gui_id: String = Uuid::new_v4().to_hyphenated_string();
let (gui_commands_tx, gui_commands_rx) = mpsc::channel();
let buffer_views = try!(gui::buffer_views::BufferViews::new(&gui_id, gui_commands_tx, &mut client));
Ok(TerminalGui {
config_file_runner: config_file_runner,
client: client,
rustbox: rustbox,
buffer_views: buffer_views,
last_key_down_event: time::PreciseTime::now(),
completer: None,
buffer_view_widget: None,
gui_commands: gui_commands_rx,
})
}
fn handle_events(&mut self) -> swiboe::Result<bool> {
match self.rustbox.peek_event(std::time::Duration::from_millis(5), false) {
Ok(rustbox::Event::KeyEvent(key)) => {
if self.completer.is_some() {
let rv = self.completer.as_mut().unwrap().on_key(key);
match rv {
CompleterState::Running => (),
CompleterState::Canceled => {
self.completer = None;
},
CompleterState::Selected(result) => {
self.completer = None;
let mut rpc = try!(self.client.call("buffer.open", &swiboe::plugin::buffer::open::Request {
uri: format!("file://{}", result),
}));
let response: swiboe::plugin::buffer::open::Response = rpc.wait_for().unwrap();
let mut buffer_views = self.buffer_views.write().unwrap();
let view_id = buffer_views.new_view(response.buffer_index, self.rustbox.width(), self.rustbox.height());
self.buffer_view_widget = Some(BufferViewWidget::new(view_id, try!(self.client.clone())));
},
}
} else {
if !try!(self.handle_key(key)) {
return Ok(false);
}
}
},
Err(e) => panic!("{}", e),
_ => { }
}
while let Ok(command) = self.gui_commands.try_recv() {
match command {
gui::command::GuiCommand::Quit => return Ok(false),
gui::command::GuiCommand::Redraw => (),
}
}
return Ok(true);
}
fn handle_key(&mut self, key: rustbox::Key) -> swiboe::Result<bool> {
let delta_t = {
let now = time::PreciseTime::now();
let delta_t = self.last_key_down_event.to(now);
self.last_key_down_event = now;
delta_t
};
let delta_t_in_seconds = delta_t.num_nanoseconds().unwrap() as f64 / 1e9;
match key {
// NOCOM(#sirver): should be handled through plugins.
rustbox::Key::Char('q') => return Ok(false),
rustbox::Key::Ctrl('t') => {
self.completer = Some(try!(CompleterWidget::new(&mut self.client)))
},
rustbox::Key::Esc => {
self.config_file_runner.keymap_handler.timeout();
},
rustbox::Key::Char(a) => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Char(a));
},
rustbox::Key::Up => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Up);
},
rustbox::Key::Down => | ,
rustbox::Key::Left => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Left);
},
rustbox::Key::Right => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Right);
},
rustbox::Key::Tab => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Tab);
},
rustbox::Key::Ctrl(some_other_key) => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Ctrl);
try!(self.handle_key(rustbox::Key::Char(some_other_key)));
}
_ => (),
}
Ok(true)
}
fn draw(&mut self) {
self.rustbox.clear();
if let Some(ref mut widget) = self.buffer_view_widget {
let buffer_views = self.buffer_views.read().unwrap();
let buffer_view = buffer_views.get(&widget.view_id).unwrap();
widget.draw(&buffer_view, &self.rustbox);
}
if let Some(ref mut completer) = self.completer {
completer.draw(&self.rustbox);
}
self.rustbox.present();
}
}
fn parse_options() -> Options {
let matches = clap::App::new("term_gui")
.about("Terminal client for Swiboe")
.version(&crate_version!()[..])
.arg(clap::Arg::with_name("SOCKET")
.short("s")
.long("socket")
.help("Socket at which the master listens.")
.required(true)
.takes_value(true))
.arg(clap::Arg::with_name("CONFIG_FILE")
.short("c")
.long("config_file")
.help("The config file to run when the GUI starts up.")
.takes_value(true))
.get_matches();
Options {
config_file: path::PathBuf::from(matches.value_of("CONFIG_FILE").unwrap_or("config.lua")),
socket: matches.value_of("SOCKET").unwrap().into(),
}
}
fn main() {
let options = parse_options();
let mut gui = TerminalGui::new(&options).unwrap();
while gui.handle_events().unwrap() {
gui.draw();
}
}
| {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Down);
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.