python_code
stringlengths
0
456k
import cv2 as cv import numpy as np W = 400 ## [my_ellipse] def my_ellipse(img, angle): thickness = 2 line_type = 8 cv.ellipse(img, (W // 2, W // 2), (W // 4, W // 16), angle, 0, 360, (255, 0, 0), thickness, line_type) ## [my_ellipse] ## [my_filled_circle] def my_filled_circle(img, center): thickness = -1 line_type = 8 cv.circle(img, center, W // 32, (0, 0, 255), thickness, line_type) ## [my_filled_circle] ## [my_polygon] def my_polygon(img): line_type = 8 # Create some points ppt = np.array([[W / 4, 7 * W / 8], [3 * W / 4, 7 * W / 8], [3 * W / 4, 13 * W / 16], [11 * W / 16, 13 * W / 16], [19 * W / 32, 3 * W / 8], [3 * W / 4, 3 * W / 8], [3 * W / 4, W / 8], [26 * W / 40, W / 8], [26 * W / 40, W / 4], [22 * W / 40, W / 4], [22 * W / 40, W / 8], [18 * W / 40, W / 8], [18 * W / 40, W / 4], [14 * W / 40, W / 4], [14 * W / 40, W / 8], [W / 4, W / 8], [W / 4, 3 * W / 8], [13 * W / 32, 3 * W / 8], [5 * W / 16, 13 * W / 16], [W / 4, 13 * W / 16]], np.int32) ppt = ppt.reshape((-1, 1, 2)) cv.fillPoly(img, [ppt], (255, 255, 255), line_type) # Only drawind the lines would be: # cv.polylines(img, [ppt], True, (255, 0, 255), line_type) ## [my_polygon] ## [my_line] def my_line(img, start, end): thickness = 2 line_type = 8 cv.line(img, start, end, (0, 0, 0), thickness, line_type) ## [my_line] ## [create_images] # Windows names atom_window = "Drawing 1: Atom" rook_window = "Drawing 2: Rook" # Create black empty images size = W, W, 3 atom_image = np.zeros(size, dtype=np.uint8) rook_image = np.zeros(size, dtype=np.uint8) ## [create_images] ## [draw_atom] # 1. Draw a simple atom: # ----------------------- # 1.a. Creating ellipses my_ellipse(atom_image, 90) my_ellipse(atom_image, 0) my_ellipse(atom_image, 45) my_ellipse(atom_image, -45) # 1.b. Creating circles my_filled_circle(atom_image, (W // 2, W // 2)) ## [draw_atom] ## [draw_rook] # 2. Draw a rook # ------------------ # 2.a. Create a convex polygon my_polygon(rook_image) ## [rectangle] # 2.b. Creating rectangles cv.rectangle(rook_image, (0, 7 * W // 8), (W, W), (0, 255, 255), -1, 8) ## [rectangle] # 2.c. Create a few lines my_line(rook_image, (0, 15 * W // 16), (W, 15 * W // 16)) my_line(rook_image, (W // 4, 7 * W // 8), (W // 4, W)) my_line(rook_image, (W // 2, 7 * W // 8), (W // 2, W)) my_line(rook_image, (3 * W // 4, 7 * W // 8), (3 * W // 4, W)) ## [draw_rook] cv.imshow(atom_window, atom_image) cv.moveWindow(atom_window, 0, 200) cv.imshow(rook_window, rook_image) cv.moveWindow(rook_window, W, 200) cv.waitKey(0) cv.destroyAllWindows()
import sys import cv2 as cv def main(argv): print(""" Zoom In-Out demo ------------------ * [i] -> Zoom [i]n * [o] -> Zoom [o]ut * [ESC] -> Close program """) ## [load] filename = argv[0] if len(argv) > 0 else 'chicky_512.png' # Load the image src = cv.imread(cv.samples.findFile(filename)) # Check if image is loaded fine if src is None: print ('Error opening image!') print ('Usage: pyramids.py [image_name -- default ../data/chicky_512.png] \n') return -1 ## [load] ## [loop] while 1: rows, cols, _channels = map(int, src.shape) ## [show_image] cv.imshow('Pyramids Demo', src) ## [show_image] k = cv.waitKey(0) if k == 27: break ## [pyrup] elif chr(k) == 'i': src = cv.pyrUp(src, dstsize=(2 * cols, 2 * rows)) print ('** Zoom In: Image x 2') ## [pyrup] ## [pyrdown] elif chr(k) == 'o': src = cv.pyrDown(src, dstsize=(cols // 2, rows // 2)) print ('** Zoom Out: Image / 2') ## [pyrdown] ## [loop] cv.destroyAllWindows() return 0 if __name__ == "__main__": main(sys.argv[1:])
from __future__ import print_function import cv2 as cv import numpy as np import argparse erosion_size = 0 max_elem = 2 max_kernel_size = 21 title_trackbar_element_type = 'Element:\n 0: Rect \n 1: Cross \n 2: Ellipse' title_trackbar_kernel_size = 'Kernel size:\n 2n +1' title_erosion_window = 'Erosion Demo' title_dilatation_window = 'Dilation Demo' def erosion(val): erosion_size = cv.getTrackbarPos(title_trackbar_kernel_size, title_erosion_window) erosion_type = 0 val_type = cv.getTrackbarPos(title_trackbar_element_type, title_erosion_window) if val_type == 0: erosion_type = cv.MORPH_RECT elif val_type == 1: erosion_type = cv.MORPH_CROSS elif val_type == 2: erosion_type = cv.MORPH_ELLIPSE element = cv.getStructuringElement(erosion_type, (2*erosion_size + 1, 2*erosion_size+1), (erosion_size, erosion_size)) erosion_dst = cv.erode(src, element) cv.imshow(title_erosion_window, erosion_dst) def dilatation(val): dilatation_size = cv.getTrackbarPos(title_trackbar_kernel_size, title_dilatation_window) dilatation_type = 0 val_type = cv.getTrackbarPos(title_trackbar_element_type, title_dilatation_window) if val_type == 0: dilatation_type = cv.MORPH_RECT elif val_type == 1: dilatation_type = cv.MORPH_CROSS elif val_type == 2: dilatation_type = cv.MORPH_ELLIPSE element = cv.getStructuringElement(dilatation_type, (2*dilatation_size + 1, 2*dilatation_size+1), (dilatation_size, dilatation_size)) dilatation_dst = cv.dilate(src, element) cv.imshow(title_dilatation_window, dilatation_dst) parser = argparse.ArgumentParser(description='Code for Eroding and Dilating tutorial.') parser.add_argument('--input', help='Path to input image.', default='LinuxLogo.jpg') args = parser.parse_args() src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image: ', args.input) exit(0) cv.namedWindow(title_erosion_window) cv.createTrackbar(title_trackbar_element_type, title_erosion_window , 0, max_elem, erosion) cv.createTrackbar(title_trackbar_kernel_size, title_erosion_window , 0, max_kernel_size, erosion) cv.namedWindow(title_dilatation_window) cv.createTrackbar(title_trackbar_element_type, title_dilatation_window , 0, max_elem, dilatation) cv.createTrackbar(title_trackbar_kernel_size, title_dilatation_window , 0, max_kernel_size, dilatation) erosion(0) dilatation(0) cv.waitKey()
""" @file morph_lines_detection.py @brief Use morphology transformations for extracting horizontal and vertical lines sample code """ import numpy as np import sys import cv2 as cv def show_wait_destroy(winname, img): cv.imshow(winname, img) cv.moveWindow(winname, 500, 0) cv.waitKey(0) cv.destroyWindow(winname) def main(argv): # [load_image] # Check number of arguments if len(argv) < 1: print ('Not enough parameters') print ('Usage:\nmorph_lines_detection.py < path_to_image >') return -1 # Load the image src = cv.imread(argv[0], cv.IMREAD_COLOR) # Check if image is loaded fine if src is None: print ('Error opening image: ' + argv[0]) return -1 # Show source image cv.imshow("src", src) # [load_image] # [gray] # Transform source image to gray if it is not already if len(src.shape) != 2: gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) else: gray = src # Show gray image show_wait_destroy("gray", gray) # [gray] # [bin] # Apply adaptiveThreshold at the bitwise_not of gray, notice the ~ symbol gray = cv.bitwise_not(gray) bw = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C, \ cv.THRESH_BINARY, 15, -2) # Show binary image show_wait_destroy("binary", bw) # [bin] # [init] # Create the images that will use to extract the horizontal and vertical lines horizontal = np.copy(bw) vertical = np.copy(bw) # [init] # [horiz] # Specify size on horizontal axis cols = horizontal.shape[1] horizontal_size = cols // 30 # Create structure element for extracting horizontal lines through morphology operations horizontalStructure = cv.getStructuringElement(cv.MORPH_RECT, (horizontal_size, 1)) # Apply morphology operations horizontal = cv.erode(horizontal, horizontalStructure) horizontal = cv.dilate(horizontal, horizontalStructure) # Show extracted horizontal lines show_wait_destroy("horizontal", horizontal) # [horiz] # [vert] # Specify size on vertical axis rows = vertical.shape[0] verticalsize = rows // 30 # Create structure element for extracting vertical lines through morphology operations verticalStructure = cv.getStructuringElement(cv.MORPH_RECT, (1, verticalsize)) # Apply morphology operations vertical = cv.erode(vertical, verticalStructure) vertical = cv.dilate(vertical, verticalStructure) # Show extracted vertical lines show_wait_destroy("vertical", vertical) # [vert] # [smooth] # Inverse vertical image vertical = cv.bitwise_not(vertical) show_wait_destroy("vertical_bit", vertical) ''' Extract edges and smooth image according to the logic 1. extract edges 2. dilate(edges) 3. src.copyTo(smooth) 4. blur smooth img 5. smooth.copyTo(src, edges) ''' # Step 1 edges = cv.adaptiveThreshold(vertical, 255, cv.ADAPTIVE_THRESH_MEAN_C, \ cv.THRESH_BINARY, 3, -2) show_wait_destroy("edges", edges) # Step 2 kernel = np.ones((2, 2), np.uint8) edges = cv.dilate(edges, kernel) show_wait_destroy("dilate", edges) # Step 3 smooth = np.copy(vertical) # Step 4 smooth = cv.blur(smooth, (2, 2)) # Step 5 (rows, cols) = np.where(edges != 0) vertical[rows, cols] = smooth[rows, cols] # Show final result show_wait_destroy("smooth - final", vertical) # [smooth] return 0 if __name__ == "__main__": main(sys.argv[1:])
import cv2 as cv import numpy as np import argparse W = 52 # window size is WxW C_Thr = 0.43 # threshold for coherency LowThr = 35 # threshold1 for orientation, it ranges from 0 to 180 HighThr = 57 # threshold2 for orientation, it ranges from 0 to 180 ## [calcGST] ## [calcJ_header] ## [calcGST_proto] def calcGST(inputIMG, w): ## [calcGST_proto] img = inputIMG.astype(np.float32) # GST components calculation (start) # J = (J11 J12; J12 J22) - GST imgDiffX = cv.Sobel(img, cv.CV_32F, 1, 0, 3) imgDiffY = cv.Sobel(img, cv.CV_32F, 0, 1, 3) imgDiffXY = cv.multiply(imgDiffX, imgDiffY) ## [calcJ_header] imgDiffXX = cv.multiply(imgDiffX, imgDiffX) imgDiffYY = cv.multiply(imgDiffY, imgDiffY) J11 = cv.boxFilter(imgDiffXX, cv.CV_32F, (w,w)) J22 = cv.boxFilter(imgDiffYY, cv.CV_32F, (w,w)) J12 = cv.boxFilter(imgDiffXY, cv.CV_32F, (w,w)) # GST components calculations (stop) # eigenvalue calculation (start) # lambda1 = J11 + J22 + sqrt((J11-J22)^2 + 4*J12^2) # lambda2 = J11 + J22 - sqrt((J11-J22)^2 + 4*J12^2) tmp1 = J11 + J22 tmp2 = J11 - J22 tmp2 = cv.multiply(tmp2, tmp2) tmp3 = cv.multiply(J12, J12) tmp4 = np.sqrt(tmp2 + 4.0 * tmp3) lambda1 = tmp1 + tmp4 # biggest eigenvalue lambda2 = tmp1 - tmp4 # smallest eigenvalue # eigenvalue calculation (stop) # Coherency calculation (start) # Coherency = (lambda1 - lambda2)/(lambda1 + lambda2)) - measure of anisotropism # Coherency is anisotropy degree (consistency of local orientation) imgCoherencyOut = cv.divide(lambda1 - lambda2, lambda1 + lambda2) # Coherency calculation (stop) # orientation angle calculation (start) # tan(2*Alpha) = 2*J12/(J22 - J11) # Alpha = 0.5 atan2(2*J12/(J22 - J11)) imgOrientationOut = cv.phase(J22 - J11, 2.0 * J12, angleInDegrees = True) imgOrientationOut = 0.5 * imgOrientationOut # orientation angle calculation (stop) return imgCoherencyOut, imgOrientationOut ## [calcGST] parser = argparse.ArgumentParser(description='Code for Anisotropic image segmentation tutorial.') parser.add_argument('-i', '--input', help='Path to input image.', required=True) args = parser.parse_args() imgIn = cv.imread(args.input, cv.IMREAD_GRAYSCALE) if imgIn is None: print('Could not open or find the image: {}'.format(args.input)) exit(0) ## [main_extra] ## [main] imgCoherency, imgOrientation = calcGST(imgIn, W) ## [thresholding] _, imgCoherencyBin = cv.threshold(imgCoherency, C_Thr, 255, cv.THRESH_BINARY) _, imgOrientationBin = cv.threshold(imgOrientation, LowThr, HighThr, cv.THRESH_BINARY) ## [thresholding] ## [combining] imgBin = cv.bitwise_and(imgCoherencyBin, imgOrientationBin) ## [combining] ## [main] imgCoherency = cv.normalize(imgCoherency, None, alpha=0, beta=1, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F) imgOrientation = cv.normalize(imgOrientation, None, alpha=0, beta=1, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F) cv.imshow('result.jpg', np.uint8(0.5*(imgIn + imgBin))) cv.imshow('Coherency.jpg', imgCoherency) cv.imshow('Orientation.jpg', imgOrientation) cv.waitKey(0) ## [main_extra]
print('Not showing this text because it is outside the snippet') ## [hello_world] print('Hello world!') ## [hello_world]
from __future__ import print_function import cv2 as cv import argparse def detectAndDisplay(frame): frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) frame_gray = cv.equalizeHist(frame_gray) #-- Detect faces faces = face_cascade.detectMultiScale(frame_gray) for (x,y,w,h) in faces: center = (x + w//2, y + h//2) frame = cv.ellipse(frame, center, (w//2, h//2), 0, 0, 360, (255, 0, 255), 4) faceROI = frame_gray[y:y+h,x:x+w] #-- In each face, detect eyes eyes = eyes_cascade.detectMultiScale(faceROI) for (x2,y2,w2,h2) in eyes: eye_center = (x + x2 + w2//2, y + y2 + h2//2) radius = int(round((w2 + h2)*0.25)) frame = cv.circle(frame, eye_center, radius, (255, 0, 0 ), 4) cv.imshow('Capture - Face detection', frame) parser = argparse.ArgumentParser(description='Code for Cascade Classifier tutorial.') parser.add_argument('--face_cascade', help='Path to face cascade.', default='data/haarcascades/haarcascade_frontalface_alt.xml') parser.add_argument('--eyes_cascade', help='Path to eyes cascade.', default='data/haarcascades/haarcascade_eye_tree_eyeglasses.xml') parser.add_argument('--camera', help='Camera divide number.', type=int, default=0) args = parser.parse_args() face_cascade_name = args.face_cascade eyes_cascade_name = args.eyes_cascade face_cascade = cv.CascadeClassifier() eyes_cascade = cv.CascadeClassifier() #-- 1. Load the cascades if not face_cascade.load(cv.samples.findFile(face_cascade_name)): print('--(!)Error loading face cascade') exit(0) if not eyes_cascade.load(cv.samples.findFile(eyes_cascade_name)): print('--(!)Error loading eyes cascade') exit(0) camera_device = args.camera #-- 2. Read the video stream cap = cv.VideoCapture(camera_device) if not cap.isOpened: print('--(!)Error opening video capture') exit(0) while True: ret, frame = cap.read() if frame is None: print('--(!) No captured frame -- Break!') break detectAndDisplay(frame) if cv.waitKey(10) == 27: break
""" @file copy_make_border.py @brief Sample code that shows the functionality of copyMakeBorder """ import sys from random import randint import cv2 as cv def main(argv): ## [variables] # First we declare the variables we are going to use borderType = cv.BORDER_CONSTANT window_name = "copyMakeBorder Demo" ## [variables] ## [load] imageName = argv[0] if len(argv) > 0 else 'lena.jpg' # Loads an image src = cv.imread(cv.samples.findFile(imageName), cv.IMREAD_COLOR) # Check if image is loaded fine if src is None: print ('Error opening image!') print ('Usage: copy_make_border.py [image_name -- default lena.jpg] \n') return -1 ## [load] # Brief how-to for this program print ('\n' '\t copyMakeBorder Demo: \n' ' -------------------- \n' ' ** Press \'c\' to set the border to a random constant value \n' ' ** Press \'r\' to set the border to be replicated \n' ' ** Press \'ESC\' to exit the program ') ## [create_window] cv.namedWindow(window_name, cv.WINDOW_AUTOSIZE) ## [create_window] ## [init_arguments] # Initialize arguments for the filter top = int(0.05 * src.shape[0]) # shape[0] = rows bottom = top left = int(0.05 * src.shape[1]) # shape[1] = cols right = left ## [init_arguments] while 1: ## [update_value] value = [randint(0, 255), randint(0, 255), randint(0, 255)] ## [update_value] ## [copymakeborder] dst = cv.copyMakeBorder(src, top, bottom, left, right, borderType, None, value) ## [copymakeborder] ## [display] cv.imshow(window_name, dst) ## [display] ## [check_keypress] c = cv.waitKey(500) if c == 27: break elif c == 99: # 99 = ord('c') borderType = cv.BORDER_CONSTANT elif c == 114: # 114 = ord('r') borderType = cv.BORDER_REPLICATE ## [check_keypress] return 0 if __name__ == "__main__": main(sys.argv[1:])
from __future__ import print_function import cv2 as cv import numpy as np import argparse import random as rng rng.seed(12345) ## [load_image] # Load the image parser = argparse.ArgumentParser(description='Code for Image Segmentation with Distance Transform and Watershed Algorithm.\ Sample code showing how to segment overlapping objects using Laplacian filtering, \ in addition to Watershed and Distance Transformation') parser.add_argument('--input', help='Path to input image.', default='cards.png') args = parser.parse_args() src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image:', args.input) exit(0) # Show source image cv.imshow('Source Image', src) ## [load_image] ## [black_bg] # Change the background from white to black, since that will help later to extract # better results during the use of Distance Transform src[np.all(src == 255, axis=2)] = 0 # Show output image cv.imshow('Black Background Image', src) ## [black_bg] ## [sharp] # Create a kernel that we will use to sharpen our image # an approximation of second derivative, a quite strong kernel kernel = np.array([[1, 1, 1], [1, -8, 1], [1, 1, 1]], dtype=np.float32) # do the laplacian filtering as it is # well, we need to convert everything in something more deeper then CV_8U # because the kernel has some negative values, # and we can expect in general to have a Laplacian image with negative values # BUT a 8bits unsigned int (the one we are working with) can contain values from 0 to 255 # so the possible negative number will be truncated imgLaplacian = cv.filter2D(src, cv.CV_32F, kernel) sharp = np.float32(src) imgResult = sharp - imgLaplacian # convert back to 8bits gray scale imgResult = np.clip(imgResult, 0, 255) imgResult = imgResult.astype('uint8') imgLaplacian = np.clip(imgLaplacian, 0, 255) imgLaplacian = np.uint8(imgLaplacian) #cv.imshow('Laplace Filtered Image', imgLaplacian) cv.imshow('New Sharped Image', imgResult) ## [sharp] ## [bin] # Create binary image from source image bw = cv.cvtColor(imgResult, cv.COLOR_BGR2GRAY) _, bw = cv.threshold(bw, 40, 255, cv.THRESH_BINARY | cv.THRESH_OTSU) cv.imshow('Binary Image', bw) ## [bin] ## [dist] # Perform the distance transform algorithm dist = cv.distanceTransform(bw, cv.DIST_L2, 3) # Normalize the distance image for range = {0.0, 1.0} # so we can visualize and threshold it cv.normalize(dist, dist, 0, 1.0, cv.NORM_MINMAX) cv.imshow('Distance Transform Image', dist) ## [dist] ## [peaks] # Threshold to obtain the peaks # This will be the markers for the foreground objects _, dist = cv.threshold(dist, 0.4, 1.0, cv.THRESH_BINARY) # Dilate a bit the dist image kernel1 = np.ones((3,3), dtype=np.uint8) dist = cv.dilate(dist, kernel1) cv.imshow('Peaks', dist) ## [peaks] ## [seeds] # Create the CV_8U version of the distance image # It is needed for findContours() dist_8u = dist.astype('uint8') # Find total markers contours, _ = cv.findContours(dist_8u, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) # Create the marker image for the watershed algorithm markers = np.zeros(dist.shape, dtype=np.int32) # Draw the foreground markers for i in range(len(contours)): cv.drawContours(markers, contours, i, (i+1), -1) # Draw the background marker cv.circle(markers, (5,5), 3, (255,255,255), -1) cv.imshow('Markers', markers*10000) ## [seeds] ## [watershed] # Perform the watershed algorithm cv.watershed(imgResult, markers) #mark = np.zeros(markers.shape, dtype=np.uint8) mark = markers.astype('uint8') mark = cv.bitwise_not(mark) # uncomment this if you want to see how the mark # image looks like at that point #cv.imshow('Markers_v2', mark) # Generate random colors colors = [] for contour in contours: colors.append((rng.randint(0,256), rng.randint(0,256), rng.randint(0,256))) # Create the result image dst = np.zeros((markers.shape[0], markers.shape[1], 3), dtype=np.uint8) # Fill labeled objects with random colors for i in range(markers.shape[0]): for j in range(markers.shape[1]): index = markers[i,j] if index > 0 and index <= len(contours): dst[i,j,:] = colors[index-1] # Visualize the final image cv.imshow('Final Result', dst) ## [watershed] cv.waitKey()
from __future__ import print_function import cv2 as cv import numpy as np import argparse ## [Update] def update_map(ind, map_x, map_y): if ind == 0: for i in range(map_x.shape[0]): for j in range(map_x.shape[1]): if j > map_x.shape[1]*0.25 and j < map_x.shape[1]*0.75 and i > map_x.shape[0]*0.25 and i < map_x.shape[0]*0.75: map_x[i,j] = 2 * (j-map_x.shape[1]*0.25) + 0.5 map_y[i,j] = 2 * (i-map_y.shape[0]*0.25) + 0.5 else: map_x[i,j] = 0 map_y[i,j] = 0 elif ind == 1: for i in range(map_x.shape[0]): map_x[i,:] = [x for x in range(map_x.shape[1])] for j in range(map_y.shape[1]): map_y[:,j] = [map_y.shape[0]-y for y in range(map_y.shape[0])] elif ind == 2: for i in range(map_x.shape[0]): map_x[i,:] = [map_x.shape[1]-x for x in range(map_x.shape[1])] for j in range(map_y.shape[1]): map_y[:,j] = [y for y in range(map_y.shape[0])] elif ind == 3: for i in range(map_x.shape[0]): map_x[i,:] = [map_x.shape[1]-x for x in range(map_x.shape[1])] for j in range(map_y.shape[1]): map_y[:,j] = [map_y.shape[0]-y for y in range(map_y.shape[0])] ## [Update] parser = argparse.ArgumentParser(description='Code for Remapping tutorial.') parser.add_argument('--input', help='Path to input image.', default='chicky_512.png') args = parser.parse_args() ## [Load] src = cv.imread(cv.samples.findFile(args.input), cv.IMREAD_COLOR) if src is None: print('Could not open or find the image: ', args.input) exit(0) ## [Load] ## [Create] map_x = np.zeros((src.shape[0], src.shape[1]), dtype=np.float32) map_y = np.zeros((src.shape[0], src.shape[1]), dtype=np.float32) ## [Create] ## [Window] window_name = 'Remap demo' cv.namedWindow(window_name) ## [Window] ## [Loop] ind = 0 while True: update_map(ind, map_x, map_y) ind = (ind + 1) % 4 dst = cv.remap(src, map_x, map_y, cv.INTER_LINEAR) cv.imshow(window_name, dst) c = cv.waitKey(1000) if c == 27: break ## [Loop]
from __future__ import print_function import cv2 as cv import argparse max_lowThreshold = 100 window_name = 'Edge Map' title_trackbar = 'Min Threshold:' ratio = 3 kernel_size = 3 def CannyThreshold(val): low_threshold = val img_blur = cv.blur(src_gray, (3,3)) detected_edges = cv.Canny(img_blur, low_threshold, low_threshold*ratio, kernel_size) mask = detected_edges != 0 dst = src * (mask[:,:,None].astype(src.dtype)) cv.imshow(window_name, dst) parser = argparse.ArgumentParser(description='Code for Canny Edge Detector tutorial.') parser.add_argument('--input', help='Path to input image.', default='fruits.jpg') args = parser.parse_args() src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image: ', args.input) exit(0) src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) cv.namedWindow(window_name) cv.createTrackbar(title_trackbar, window_name , 0, max_lowThreshold, CannyThreshold) CannyThreshold(0) cv.waitKey()
""" @file filter2D.py @brief Sample code that shows how to implement your own linear filters by using filter2D function """ import sys import cv2 as cv import numpy as np def main(argv): window_name = 'filter2D Demo' ## [load] imageName = argv[0] if len(argv) > 0 else 'lena.jpg' # Loads an image src = cv.imread(cv.samples.findFile(imageName), cv.IMREAD_COLOR) # Check if image is loaded fine if src is None: print ('Error opening image!') print ('Usage: filter2D.py [image_name -- default lena.jpg] \n') return -1 ## [load] ## [init_arguments] # Initialize ddepth argument for the filter ddepth = -1 ## [init_arguments] # Loop - Will filter the image with different kernel sizes each 0.5 seconds ind = 0 while True: ## [update_kernel] # Update kernel size for a normalized box filter kernel_size = 3 + 2 * (ind % 5) kernel = np.ones((kernel_size, kernel_size), dtype=np.float32) kernel /= (kernel_size * kernel_size) ## [update_kernel] ## [apply_filter] # Apply filter dst = cv.filter2D(src, ddepth, kernel) ## [apply_filter] cv.imshow(window_name, dst) c = cv.waitKey(500) if c == 27: break ind += 1 return 0 if __name__ == "__main__": main(sys.argv[1:])
""" @file laplace_demo.py @brief Sample code showing how to detect edges using the Laplace operator """ import sys import cv2 as cv def main(argv): # [variables] # Declare the variables we are going to use ddepth = cv.CV_16S kernel_size = 3 window_name = "Laplace Demo" # [variables] # [load] imageName = argv[0] if len(argv) > 0 else 'lena.jpg' src = cv.imread(cv.samples.findFile(imageName), cv.IMREAD_COLOR) # Load an image # Check if image is loaded fine if src is None: print ('Error opening image') print ('Program Arguments: [image_name -- default lena.jpg]') return -1 # [load] # [reduce_noise] # Remove noise by blurring with a Gaussian filter src = cv.GaussianBlur(src, (3, 3), 0) # [reduce_noise] # [convert_to_gray] # Convert the image to grayscale src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) # [convert_to_gray] # Create Window cv.namedWindow(window_name, cv.WINDOW_AUTOSIZE) # [laplacian] # Apply Laplace function dst = cv.Laplacian(src_gray, ddepth, ksize=kernel_size) # [laplacian] # [convert] # converting back to uint8 abs_dst = cv.convertScaleAbs(dst) # [convert] # [display] cv.imshow(window_name, abs_dst) cv.waitKey(0) # [display] return 0 if __name__ == "__main__": main(sys.argv[1:])
""" @file sobel_demo.py @brief Sample code using Sobel and/or Scharr OpenCV functions to make a simple Edge Detector """ import sys import cv2 as cv def main(argv): ## [variables] # First we declare the variables we are going to use window_name = ('Sobel Demo - Simple Edge Detector') scale = 1 delta = 0 ddepth = cv.CV_16S ## [variables] ## [load] # As usual we load our source image (src) # Check number of arguments if len(argv) < 1: print ('Not enough parameters') print ('Usage:\nmorph_lines_detection.py < path_to_image >') return -1 # Load the image src = cv.imread(argv[0], cv.IMREAD_COLOR) # Check if image is loaded fine if src is None: print ('Error opening image: ' + argv[0]) return -1 ## [load] ## [reduce_noise] # Remove noise by blurring with a Gaussian filter ( kernel size = 3 ) src = cv.GaussianBlur(src, (3, 3), 0) ## [reduce_noise] ## [convert_to_gray] # Convert the image to grayscale gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) ## [convert_to_gray] ## [sobel] # Gradient-X # grad_x = cv.Scharr(gray,ddepth,1,0) grad_x = cv.Sobel(gray, ddepth, 1, 0, ksize=3, scale=scale, delta=delta, borderType=cv.BORDER_DEFAULT) # Gradient-Y # grad_y = cv.Scharr(gray,ddepth,0,1) grad_y = cv.Sobel(gray, ddepth, 0, 1, ksize=3, scale=scale, delta=delta, borderType=cv.BORDER_DEFAULT) ## [sobel] ## [convert] # converting back to uint8 abs_grad_x = cv.convertScaleAbs(grad_x) abs_grad_y = cv.convertScaleAbs(grad_y) ## [convert] ## [blend] ## Total Gradient (approximate) grad = cv.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0) ## [blend] ## [display] cv.imshow(window_name, grad) cv.waitKey(0) ## [display] return 0 if __name__ == "__main__": main(sys.argv[1:])
""" @file hough_lines.py @brief This program demonstrates line finding with the Hough transform """ import sys import math import cv2 as cv import numpy as np def main(argv): ## [load] default_file = 'sudoku.png' filename = argv[0] if len(argv) > 0 else default_file # Loads an image src = cv.imread(cv.samples.findFile(filename), cv.IMREAD_GRAYSCALE) # Check if image is loaded fine if src is None: print ('Error opening image!') print ('Usage: hough_lines.py [image_name -- default ' + default_file + '] \n') return -1 ## [load] ## [edge_detection] # Edge detection dst = cv.Canny(src, 50, 200, None, 3) ## [edge_detection] # Copy edges to the images that will display the results in BGR cdst = cv.cvtColor(dst, cv.COLOR_GRAY2BGR) cdstP = np.copy(cdst) ## [hough_lines] # Standard Hough Line Transform lines = cv.HoughLines(dst, 1, np.pi / 180, 150, None, 0, 0) ## [hough_lines] ## [draw_lines] # Draw the lines if lines is not None: for i in range(0, len(lines)): rho = lines[i][0][0] theta = lines[i][0][1] a = math.cos(theta) b = math.sin(theta) x0 = a * rho y0 = b * rho pt1 = (int(x0 + 1000*(-b)), int(y0 + 1000*(a))) pt2 = (int(x0 - 1000*(-b)), int(y0 - 1000*(a))) cv.line(cdst, pt1, pt2, (0,0,255), 3, cv.LINE_AA) ## [draw_lines] ## [hough_lines_p] # Probabilistic Line Transform linesP = cv.HoughLinesP(dst, 1, np.pi / 180, 50, None, 50, 10) ## [hough_lines_p] ## [draw_lines_p] # Draw the lines if linesP is not None: for i in range(0, len(linesP)): l = linesP[i][0] cv.line(cdstP, (l[0], l[1]), (l[2], l[3]), (0,0,255), 3, cv.LINE_AA) ## [draw_lines_p] ## [imshow] # Show results cv.imshow("Source", src) cv.imshow("Detected Lines (in red) - Standard Hough Line Transform", cdst) cv.imshow("Detected Lines (in red) - Probabilistic Line Transform", cdstP) ## [imshow] ## [exit] # Wait and Exit cv.waitKey() return 0 ## [exit] if __name__ == "__main__": main(sys.argv[1:])
from __future__ import print_function import cv2 as cv import numpy as np import argparse ## [Load the image] parser = argparse.ArgumentParser(description='Code for Affine Transformations tutorial.') parser.add_argument('--input', help='Path to input image.', default='lena.jpg') args = parser.parse_args() src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image:', args.input) exit(0) ## [Load the image] ## [Set your 3 points to calculate the Affine Transform] srcTri = np.array( [[0, 0], [src.shape[1] - 1, 0], [0, src.shape[0] - 1]] ).astype(np.float32) dstTri = np.array( [[0, src.shape[1]*0.33], [src.shape[1]*0.85, src.shape[0]*0.25], [src.shape[1]*0.15, src.shape[0]*0.7]] ).astype(np.float32) ## [Set your 3 points to calculate the Affine Transform] ## [Get the Affine Transform] warp_mat = cv.getAffineTransform(srcTri, dstTri) ## [Get the Affine Transform] ## [Apply the Affine Transform just found to the src image] warp_dst = cv.warpAffine(src, warp_mat, (src.shape[1], src.shape[0])) ## [Apply the Affine Transform just found to the src image] # Rotating the image after Warp ## [Compute a rotation matrix with respect to the center of the image] center = (warp_dst.shape[1]//2, warp_dst.shape[0]//2) angle = -50 scale = 0.6 ## [Compute a rotation matrix with respect to the center of the image] ## [Get the rotation matrix with the specifications above] rot_mat = cv.getRotationMatrix2D( center, angle, scale ) ## [Get the rotation matrix with the specifications above] ## [Rotate the warped image] warp_rotate_dst = cv.warpAffine(warp_dst, rot_mat, (warp_dst.shape[1], warp_dst.shape[0])) ## [Rotate the warped image] ## [Show what you got] cv.imshow('Source image', src) cv.imshow('Warp', warp_dst) cv.imshow('Warp + Rotate', warp_rotate_dst) ## [Show what you got] ## [Wait until user exits the program] cv.waitKey() ## [Wait until user exits the program]
import sys import cv2 as cv import numpy as np def main(argv): ## [load] default_file = 'smarties.png' filename = argv[0] if len(argv) > 0 else default_file # Loads an image src = cv.imread(cv.samples.findFile(filename), cv.IMREAD_COLOR) # Check if image is loaded fine if src is None: print ('Error opening image!') print ('Usage: hough_circle.py [image_name -- default ' + default_file + '] \n') return -1 ## [load] ## [convert_to_gray] # Convert it to gray gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) ## [convert_to_gray] ## [reduce_noise] # Reduce the noise to avoid false circle detection gray = cv.medianBlur(gray, 5) ## [reduce_noise] ## [houghcircles] rows = gray.shape[0] circles = cv.HoughCircles(gray, cv.HOUGH_GRADIENT, 1, rows / 8, param1=100, param2=30, minRadius=1, maxRadius=30) ## [houghcircles] ## [draw] if circles is not None: circles = np.uint16(np.around(circles)) for i in circles[0, :]: center = (i[0], i[1]) # circle center cv.circle(src, center, 1, (0, 100, 100), 3) # circle outline radius = i[2] cv.circle(src, center, radius, (255, 0, 255), 3) ## [draw] ## [display] cv.imshow("detected circles", src) cv.waitKey(0) ## [display] return 0 if __name__ == "__main__": main(sys.argv[1:])
from __future__ import print_function import cv2 as cv import numpy as np import argparse import random as rng rng.seed(12345) def thresh_callback(val): threshold = val # Detect edges using Canny canny_output = cv.Canny(src_gray, threshold, threshold * 2) # Find contours contours, hierarchy = cv.findContours(canny_output, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) # Draw contours drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8) for i in range(len(contours)): color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)) cv.drawContours(drawing, contours, i, color, 2, cv.LINE_8, hierarchy, 0) # Show in a window cv.imshow('Contours', drawing) # Load source image parser = argparse.ArgumentParser(description='Code for Finding contours in your image tutorial.') parser.add_argument('--input', help='Path to input image.', default='HappyFish.jpg') args = parser.parse_args() src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image:', args.input) exit(0) # Convert image to gray and blur it src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) src_gray = cv.blur(src_gray, (3,3)) # Create Window source_window = 'Source' cv.namedWindow(source_window) cv.imshow(source_window, src) max_thresh = 255 thresh = 100 # initial threshold cv.createTrackbar('Canny Thresh:', source_window, thresh, max_thresh, thresh_callback) thresh_callback(thresh) cv.waitKey()
from __future__ import print_function from __future__ import division import cv2 as cv import numpy as np import argparse import random as rng rng.seed(12345) def thresh_callback(val): threshold = val ## [Canny] # Detect edges using Canny canny_output = cv.Canny(src_gray, threshold, threshold * 2) ## [Canny] ## [findContours] # Find contours contours, _ = cv.findContours(canny_output, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) ## [findContours] # Get the moments mu = [None]*len(contours) for i in range(len(contours)): mu[i] = cv.moments(contours[i]) # Get the mass centers mc = [None]*len(contours) for i in range(len(contours)): # add 1e-5 to avoid division by zero mc[i] = (mu[i]['m10'] / (mu[i]['m00'] + 1e-5), mu[i]['m01'] / (mu[i]['m00'] + 1e-5)) # Draw contours ## [zeroMat] drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8) ## [zeroMat] ## [forContour] for i in range(len(contours)): color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)) cv.drawContours(drawing, contours, i, color, 2) cv.circle(drawing, (int(mc[i][0]), int(mc[i][1])), 4, color, -1) ## [forContour] ## [showDrawings] # Show in a window cv.imshow('Contours', drawing) ## [showDrawings] # Calculate the area with the moments 00 and compare with the result of the OpenCV function for i in range(len(contours)): print(' * Contour[%d] - Area (M_00) = %.2f - Area OpenCV: %.2f - Length: %.2f' % (i, mu[i]['m00'], cv.contourArea(contours[i]), cv.arcLength(contours[i], True))) ## [setup] # Load source image parser = argparse.ArgumentParser(description='Code for Image Moments tutorial.') parser.add_argument('--input', help='Path to input image.', default='stuff.jpg') args = parser.parse_args() src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image:', args.input) exit(0) # Convert image to gray and blur it src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) src_gray = cv.blur(src_gray, (3,3)) ## [setup] ## [createWindow] # Create Window source_window = 'Source' cv.namedWindow(source_window) cv.imshow(source_window, src) ## [createWindow] ## [trackbar] max_thresh = 255 thresh = 100 # initial threshold cv.createTrackbar('Canny Thresh:', source_window, thresh, max_thresh, thresh_callback) thresh_callback(thresh) ## [trackbar] cv.waitKey()
from __future__ import print_function import cv2 as cv import numpy as np import argparse import random as rng rng.seed(12345) def thresh_callback(val): threshold = val ## [Canny] # Detect edges using Canny canny_output = cv.Canny(src_gray, threshold, threshold * 2) ## [Canny] ## [findContours] # Find contours contours, _ = cv.findContours(canny_output, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) ## [findContours] # Find the rotated rectangles and ellipses for each contour minRect = [None]*len(contours) minEllipse = [None]*len(contours) for i, c in enumerate(contours): minRect[i] = cv.minAreaRect(c) if c.shape[0] > 5: minEllipse[i] = cv.fitEllipse(c) # Draw contours + rotated rects + ellipses ## [zeroMat] drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8) ## [zeroMat] ## [forContour] for i, c in enumerate(contours): color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)) # contour cv.drawContours(drawing, contours, i, color) # ellipse if c.shape[0] > 5: cv.ellipse(drawing, minEllipse[i], color, 2) # rotated rectangle box = cv.boxPoints(minRect[i]) box = np.intp(box) #np.intp: Integer used for indexing (same as C ssize_t; normally either int32 or int64) cv.drawContours(drawing, [box], 0, color) ## [forContour] ## [showDrawings] # Show in a window cv.imshow('Contours', drawing) ## [showDrawings] ## [setup] # Load source image parser = argparse.ArgumentParser(description='Code for Creating Bounding rotated boxes and ellipses for contours tutorial.') parser.add_argument('--input', help='Path to input image.', default='stuff.jpg') args = parser.parse_args() src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image:', args.input) exit(0) # Convert image to gray and blur it src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) src_gray = cv.blur(src_gray, (3,3)) ## [setup] ## [createWindow] # Create Window source_window = 'Source' cv.namedWindow(source_window) cv.imshow(source_window, src) ## [createWindow] ## [trackbar] max_thresh = 255 thresh = 100 # initial threshold cv.createTrackbar('Canny Thresh:', source_window, thresh, max_thresh, thresh_callback) thresh_callback(thresh) ## [trackbar] cv.waitKey()
from __future__ import print_function import cv2 as cv import numpy as np import argparse import random as rng rng.seed(12345) def thresh_callback(val): threshold = val # Detect edges using Canny canny_output = cv.Canny(src_gray, threshold, threshold * 2) # Find contours contours, _ = cv.findContours(canny_output, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) # Find the convex hull object for each contour hull_list = [] for i in range(len(contours)): hull = cv.convexHull(contours[i]) hull_list.append(hull) # Draw contours + hull results drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8) for i in range(len(contours)): color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)) cv.drawContours(drawing, contours, i, color) cv.drawContours(drawing, hull_list, i, color) # Show in a window cv.imshow('Contours', drawing) # Load source image parser = argparse.ArgumentParser(description='Code for Convex Hull tutorial.') parser.add_argument('--input', help='Path to input image.', default='stuff.jpg') args = parser.parse_args() src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image:', args.input) exit(0) # Convert image to gray and blur it src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) src_gray = cv.blur(src_gray, (3,3)) # Create Window source_window = 'Source' cv.namedWindow(source_window) cv.imshow(source_window, src) max_thresh = 255 thresh = 100 # initial threshold cv.createTrackbar('Canny thresh:', source_window, thresh, max_thresh, thresh_callback) thresh_callback(thresh) cv.waitKey()
from __future__ import print_function from __future__ import division import cv2 as cv import numpy as np # Create an image r = 100 src = np.zeros((4*r, 4*r), dtype=np.uint8) # Create a sequence of points to make a contour vert = [None]*6 vert[0] = (3*r//2, int(1.34*r)) vert[1] = (1*r, 2*r) vert[2] = (3*r//2, int(2.866*r)) vert[3] = (5*r//2, int(2.866*r)) vert[4] = (3*r, 2*r) vert[5] = (5*r//2, int(1.34*r)) # Draw it in src for i in range(6): cv.line(src, vert[i], vert[(i+1)%6], ( 255 ), 3) # Get the contours contours, _ = cv.findContours(src, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) # Calculate the distances to the contour raw_dist = np.empty(src.shape, dtype=np.float32) for i in range(src.shape[0]): for j in range(src.shape[1]): raw_dist[i,j] = cv.pointPolygonTest(contours[0], (j,i), True) minVal, maxVal, _, maxDistPt = cv.minMaxLoc(raw_dist) minVal = abs(minVal) maxVal = abs(maxVal) # Depicting the distances graphically drawing = np.zeros((src.shape[0], src.shape[1], 3), dtype=np.uint8) for i in range(src.shape[0]): for j in range(src.shape[1]): if raw_dist[i,j] < 0: drawing[i,j,0] = 255 - abs(raw_dist[i,j]) * 255 / minVal elif raw_dist[i,j] > 0: drawing[i,j,2] = 255 - raw_dist[i,j] * 255 / maxVal else: drawing[i,j,0] = 255 drawing[i,j,1] = 255 drawing[i,j,2] = 255 cv.circle(drawing,maxDistPt, int(maxVal),tuple(255,255,255), 1, cv.LINE_8, 0) cv.imshow('Source', src) cv.imshow('Distance and inscribed circle', drawing) cv.waitKey()
from __future__ import print_function import cv2 as cv import numpy as np import argparse import random as rng rng.seed(12345) def thresh_callback(val): threshold = val ## [Canny] # Detect edges using Canny canny_output = cv.Canny(src_gray, threshold, threshold * 2) ## [Canny] ## [findContours] # Find contours contours, _ = cv.findContours(canny_output, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) ## [findContours] ## [allthework] # Approximate contours to polygons + get bounding rects and circles contours_poly = [None]*len(contours) boundRect = [None]*len(contours) centers = [None]*len(contours) radius = [None]*len(contours) for i, c in enumerate(contours): contours_poly[i] = cv.approxPolyDP(c, 3, True) boundRect[i] = cv.boundingRect(contours_poly[i]) centers[i], radius[i] = cv.minEnclosingCircle(contours_poly[i]) ## [allthework] ## [zeroMat] drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8) ## [zeroMat] ## [forContour] # Draw polygonal contour + bonding rects + circles for i in range(len(contours)): color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)) cv.drawContours(drawing, contours_poly, i, color) cv.rectangle(drawing, (int(boundRect[i][0]), int(boundRect[i][1])), \ (int(boundRect[i][0]+boundRect[i][2]), int(boundRect[i][1]+boundRect[i][3])), color, 2) cv.circle(drawing, (int(centers[i][0]), int(centers[i][1])), int(radius[i]), color, 2) ## [forContour] ## [showDrawings] # Show in a window cv.imshow('Contours', drawing) ## [showDrawings] ## [setup] # Load source image parser = argparse.ArgumentParser(description='Code for Creating Bounding boxes and circles for contours tutorial.') parser.add_argument('--input', help='Path to input image.', default='stuff.jpg') args = parser.parse_args() src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image:', args.input) exit(0) # Convert image to gray and blur it src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) src_gray = cv.blur(src_gray, (3,3)) ## [setup] ## [createWindow] # Create Window source_window = 'Source' cv.namedWindow(source_window) cv.imshow(source_window, src) ## [createWindow] ## [trackbar] max_thresh = 255 thresh = 100 # initial threshold cv.createTrackbar('Canny thresh:', source_window, thresh, max_thresh, thresh_callback) thresh_callback(thresh) ## [trackbar] cv.waitKey()
from __future__ import print_function import cv2 as cv import numpy as np import random as rng NTRAINING_SAMPLES = 100 # Number of training samples per class FRAC_LINEAR_SEP = 0.9 # Fraction of samples which compose the linear separable part # Data for visual representation WIDTH = 512 HEIGHT = 512 I = np.zeros((HEIGHT, WIDTH, 3), dtype=np.uint8) # --------------------- 1. Set up training data randomly --------------------------------------- trainData = np.empty((2*NTRAINING_SAMPLES, 2), dtype=np.float32) labels = np.empty((2*NTRAINING_SAMPLES, 1), dtype=np.int32) rng.seed(100) # Random value generation class # Set up the linearly separable part of the training data nLinearSamples = int(FRAC_LINEAR_SEP * NTRAINING_SAMPLES) ## [setup1] # Generate random points for the class 1 trainClass = trainData[0:nLinearSamples,:] # The x coordinate of the points is in [0, 0.4) c = trainClass[:,0:1] c[:] = np.random.uniform(0.0, 0.4 * WIDTH, c.shape) # The y coordinate of the points is in [0, 1) c = trainClass[:,1:2] c[:] = np.random.uniform(0.0, HEIGHT, c.shape) # Generate random points for the class 2 trainClass = trainData[2*NTRAINING_SAMPLES-nLinearSamples:2*NTRAINING_SAMPLES,:] # The x coordinate of the points is in [0.6, 1] c = trainClass[:,0:1] c[:] = np.random.uniform(0.6*WIDTH, WIDTH, c.shape) # The y coordinate of the points is in [0, 1) c = trainClass[:,1:2] c[:] = np.random.uniform(0.0, HEIGHT, c.shape) ## [setup1] #------------------ Set up the non-linearly separable part of the training data --------------- ## [setup2] # Generate random points for the classes 1 and 2 trainClass = trainData[nLinearSamples:2*NTRAINING_SAMPLES-nLinearSamples,:] # The x coordinate of the points is in [0.4, 0.6) c = trainClass[:,0:1] c[:] = np.random.uniform(0.4*WIDTH, 0.6*WIDTH, c.shape) # The y coordinate of the points is in [0, 1) c = trainClass[:,1:2] c[:] = np.random.uniform(0.0, HEIGHT, c.shape) ## [setup2] #------------------------- Set up the labels for the classes --------------------------------- labels[0:NTRAINING_SAMPLES,:] = 1 # Class 1 labels[NTRAINING_SAMPLES:2*NTRAINING_SAMPLES,:] = 2 # Class 2 #------------------------ 2. Set up the support vector machines parameters -------------------- print('Starting training process') ## [init] svm = cv.ml.SVM_create() svm.setType(cv.ml.SVM_C_SVC) svm.setC(0.1) svm.setKernel(cv.ml.SVM_LINEAR) svm.setTermCriteria((cv.TERM_CRITERIA_MAX_ITER, int(1e7), 1e-6)) ## [init] #------------------------ 3. Train the svm ---------------------------------------------------- ## [train] svm.train(trainData, cv.ml.ROW_SAMPLE, labels) ## [train] print('Finished training process') #------------------------ 4. Show the decision regions ---------------------------------------- ## [show] green = (0,100,0) blue = (100,0,0) for i in range(I.shape[0]): for j in range(I.shape[1]): sampleMat = np.matrix([[j,i]], dtype=np.float32) response = svm.predict(sampleMat)[1] if response == 1: I[i,j] = green elif response == 2: I[i,j] = blue ## [show] #----------------------- 5. Show the training data -------------------------------------------- ## [show_data] thick = -1 # Class 1 for i in range(NTRAINING_SAMPLES): px = trainData[i,0] py = trainData[i,1] cv.circle(I, (px, py), 3, (0, 255, 0), thick) # Class 2 for i in range(NTRAINING_SAMPLES, 2*NTRAINING_SAMPLES): px = trainData[i,0] py = trainData[i,1] cv.circle(I, (px, py), 3, (255, 0, 0), thick) ## [show_data] #------------------------- 6. Show support vectors -------------------------------------------- ## [show_vectors] thick = 2 sv = svm.getUncompressedSupportVectors() for i in range(sv.shape[0]): cv.circle(I, (sv[i,0], sv[i,1]), 6, (128, 128, 128), thick) ## [show_vectors] cv.imwrite('result.png', I) # save the Image cv.imshow('SVM for Non-Linear Training Data', I) # show it to the user cv.waitKey()
import cv2 as cv import numpy as np # Set up training data ## [setup1] labels = np.array([1, -1, -1, -1]) trainingData = np.matrix([[501, 10], [255, 10], [501, 255], [10, 501]], dtype=np.float32) ## [setup1] # Train the SVM ## [init] svm = cv.ml.SVM_create() svm.setType(cv.ml.SVM_C_SVC) svm.setKernel(cv.ml.SVM_LINEAR) svm.setTermCriteria((cv.TERM_CRITERIA_MAX_ITER, 100, 1e-6)) ## [init] ## [train] svm.train(trainingData, cv.ml.ROW_SAMPLE, labels) ## [train] # Data for visual representation width = 512 height = 512 image = np.zeros((height, width, 3), dtype=np.uint8) # Show the decision regions given by the SVM ## [show] green = (0,255,0) blue = (255,0,0) for i in range(image.shape[0]): for j in range(image.shape[1]): sampleMat = np.matrix([[j,i]], dtype=np.float32) response = svm.predict(sampleMat)[1] if response == 1: image[i,j] = green elif response == -1: image[i,j] = blue ## [show] # Show the training data ## [show_data] thickness = -1 cv.circle(image, (501, 10), 5, ( 0, 0, 0), thickness) cv.circle(image, (255, 10), 5, (255, 255, 255), thickness) cv.circle(image, (501, 255), 5, (255, 255, 255), thickness) cv.circle(image, ( 10, 501), 5, (255, 255, 255), thickness) ## [show_data] # Show support vectors ## [show_vectors] thickness = 2 sv = svm.getUncompressedSupportVectors() for i in range(sv.shape[0]): cv.circle(image, (sv[i,0], sv[i,1]), 6, (128, 128, 128), thickness) ## [show_vectors] cv.imwrite('result.png', image) # save the image cv.imshow('SVM Simple Example', image) # show it to the user cv.waitKey()
#!/usr/bin/env python import cv2 as cv import numpy as np SZ=20 bin_n = 16 # Number of bins affine_flags = cv.WARP_INVERSE_MAP|cv.INTER_LINEAR ## [deskew] def deskew(img): m = cv.moments(img) if abs(m['mu02']) < 1e-2: return img.copy() skew = m['mu11']/m['mu02'] M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]]) img = cv.warpAffine(img,M,(SZ, SZ),flags=affine_flags) return img ## [deskew] ## [hog] def hog(img): gx = cv.Sobel(img, cv.CV_32F, 1, 0) gy = cv.Sobel(img, cv.CV_32F, 0, 1) mag, ang = cv.cartToPolar(gx, gy) bins = np.int32(bin_n*ang/(2*np.pi)) # quantizing binvalues in (0...16) bin_cells = bins[:10,:10], bins[10:,:10], bins[:10,10:], bins[10:,10:] mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:] hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)] hist = np.hstack(hists) # hist is a 64 bit vector return hist ## [hog] img = cv.imread('digits.png',0) if img is None: raise Exception("we need the digits.png image from samples/data here !") cells = [np.hsplit(row,100) for row in np.vsplit(img,50)] # First half is trainData, remaining is testData train_cells = [ i[:50] for i in cells ] test_cells = [ i[50:] for i in cells] ###### Now training ######################## deskewed = [list(map(deskew,row)) for row in train_cells] hogdata = [list(map(hog,row)) for row in deskewed] trainData = np.float32(hogdata).reshape(-1,64) responses = np.repeat(np.arange(10),250)[:,np.newaxis] svm = cv.ml.SVM_create() svm.setKernel(cv.ml.SVM_LINEAR) svm.setType(cv.ml.SVM_C_SVC) svm.setC(2.67) svm.setGamma(5.383) svm.train(trainData, cv.ml.ROW_SAMPLE, responses) svm.save('svm_data.dat') ###### Now testing ######################## deskewed = [list(map(deskew,row)) for row in test_cells] hogdata = [list(map(hog,row)) for row in deskewed] testData = np.float32(hogdata).reshape(-1,bin_n*4) result = svm.predict(testData)[1] ####### Check Accuracy ######################## mask = result==responses correct = np.count_nonzero(mask) print(correct*100.0/result.size)
from __future__ import print_function from __future__ import division import cv2 as cv import numpy as np import argparse from math import atan2, cos, sin, sqrt, pi def drawAxis(img, p_, q_, colour, scale): p = list(p_) q = list(q_) ## [visualization1] angle = atan2(p[1] - q[1], p[0] - q[0]) # angle in radians hypotenuse = sqrt((p[1] - q[1]) * (p[1] - q[1]) + (p[0] - q[0]) * (p[0] - q[0])) # Here we lengthen the arrow by a factor of scale q[0] = p[0] - scale * hypotenuse * cos(angle) q[1] = p[1] - scale * hypotenuse * sin(angle) cv.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), colour, 1, cv.LINE_AA) # create the arrow hooks p[0] = q[0] + 9 * cos(angle + pi / 4) p[1] = q[1] + 9 * sin(angle + pi / 4) cv.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), colour, 1, cv.LINE_AA) p[0] = q[0] + 9 * cos(angle - pi / 4) p[1] = q[1] + 9 * sin(angle - pi / 4) cv.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), colour, 1, cv.LINE_AA) ## [visualization1] def getOrientation(pts, img): ## [pca] # Construct a buffer used by the pca analysis sz = len(pts) data_pts = np.empty((sz, 2), dtype=np.float64) for i in range(data_pts.shape[0]): data_pts[i,0] = pts[i,0,0] data_pts[i,1] = pts[i,0,1] # Perform PCA analysis mean = np.empty((0)) mean, eigenvectors, eigenvalues = cv.PCACompute2(data_pts, mean) # Store the center of the object cntr = (int(mean[0,0]), int(mean[0,1])) ## [pca] ## [visualization] # Draw the principal components cv.circle(img, cntr, 3, (255, 0, 255), 2) p1 = (cntr[0] + 0.02 * eigenvectors[0,0] * eigenvalues[0,0], cntr[1] + 0.02 * eigenvectors[0,1] * eigenvalues[0,0]) p2 = (cntr[0] - 0.02 * eigenvectors[1,0] * eigenvalues[1,0], cntr[1] - 0.02 * eigenvectors[1,1] * eigenvalues[1,0]) drawAxis(img, cntr, p1, (0, 255, 0), 1) drawAxis(img, cntr, p2, (255, 255, 0), 5) angle = atan2(eigenvectors[0,1], eigenvectors[0,0]) # orientation in radians ## [visualization] return angle ## [pre-process] # Load image parser = argparse.ArgumentParser(description='Code for Introduction to Principal Component Analysis (PCA) tutorial.\ This program demonstrates how to use OpenCV PCA to extract the orientation of an object.') parser.add_argument('--input', help='Path to input image.', default='pca_test1.jpg') args = parser.parse_args() src = cv.imread(cv.samples.findFile(args.input)) # Check if image is loaded successfully if src is None: print('Could not open or find the image: ', args.input) exit(0) cv.imshow('src', src) # Convert image to grayscale gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) # Convert image to binary _, bw = cv.threshold(gray, 50, 255, cv.THRESH_BINARY | cv.THRESH_OTSU) ## [pre-process] ## [contours] # Find all the contours in the thresholded image contours, _ = cv.findContours(bw, cv.RETR_LIST, cv.CHAIN_APPROX_NONE) for i, c in enumerate(contours): # Calculate the area of each contour area = cv.contourArea(c) # Ignore contours that are too small or too large if area < 1e2 or 1e5 < area: continue # Draw each contour only for visualisation purposes cv.drawContours(src, contours, i, (0, 0, 255), 2) # Find the orientation of each shape getOrientation(c, src) ## [contours] cv.imshow('output', src) cv.waitKey()
from __future__ import print_function from __future__ import division import cv2 as cv import argparse alpha_slider_max = 100 title_window = 'Linear Blend' ## [on_trackbar] def on_trackbar(val): alpha = val / alpha_slider_max beta = ( 1.0 - alpha ) dst = cv.addWeighted(src1, alpha, src2, beta, 0.0) cv.imshow(title_window, dst) ## [on_trackbar] parser = argparse.ArgumentParser(description='Code for Adding a Trackbar to our applications tutorial.') parser.add_argument('--input1', help='Path to the first input image.', default='LinuxLogo.jpg') parser.add_argument('--input2', help='Path to the second input image.', default='WindowsLogo.jpg') args = parser.parse_args() ## [load] # Read images ( both have to be of the same size and type ) src1 = cv.imread(cv.samples.findFile(args.input1)) src2 = cv.imread(cv.samples.findFile(args.input2)) ## [load] if src1 is None: print('Could not open or find the image: ', args.input1) exit(0) if src2 is None: print('Could not open or find the image: ', args.input2) exit(0) ## [window] cv.namedWindow(title_window) ## [window] ## [create_trackbar] trackbar_name = 'Alpha x %d' % alpha_slider_max cv.createTrackbar(trackbar_name, title_window , 0, alpha_slider_max, on_trackbar) ## [create_trackbar] # Show some stuff on_trackbar(0) # Wait until user press some key cv.waitKey()
from __future__ import print_function import cv2 as cv import numpy as np import argparse parser = argparse.ArgumentParser(description='Code for Feature Detection tutorial.') parser.add_argument('--input', help='Path to input image.', default='box.png') args = parser.parse_args() src = cv.imread(cv.samples.findFile(args.input), cv.IMREAD_GRAYSCALE) if src is None: print('Could not open or find the image:', args.input) exit(0) #-- Step 1: Detect the keypoints using SURF Detector minHessian = 400 detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian) keypoints = detector.detect(src) #-- Draw keypoints img_keypoints = np.empty((src.shape[0], src.shape[1], 3), dtype=np.uint8) cv.drawKeypoints(src, keypoints, img_keypoints) #-- Show detected (drawn) keypoints cv.imshow('SURF Keypoints', img_keypoints) cv.waitKey()
from __future__ import print_function import cv2 as cv import numpy as np import argparse parser = argparse.ArgumentParser(description='Code for Feature Matching with FLANN tutorial.') parser.add_argument('--input1', help='Path to input image 1.', default='box.png') parser.add_argument('--input2', help='Path to input image 2.', default='box_in_scene.png') args = parser.parse_args() img1 = cv.imread(cv.samples.findFile(args.input1), cv.IMREAD_GRAYSCALE) img2 = cv.imread(cv.samples.findFile(args.input2), cv.IMREAD_GRAYSCALE) if img1 is None or img2 is None: print('Could not open or find the images!') exit(0) #-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors minHessian = 400 detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian) keypoints1, descriptors1 = detector.detectAndCompute(img1, None) keypoints2, descriptors2 = detector.detectAndCompute(img2, None) #-- Step 2: Matching descriptor vectors with a FLANN based matcher # Since SURF is a floating-point descriptor NORM_L2 is used matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED) knn_matches = matcher.knnMatch(descriptors1, descriptors2, 2) #-- Filter matches using the Lowe's ratio test ratio_thresh = 0.7 good_matches = [] for m,n in knn_matches: if m.distance < ratio_thresh * n.distance: good_matches.append(m) #-- Draw matches img_matches = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], 3), dtype=np.uint8) cv.drawMatches(img1, keypoints1, img2, keypoints2, good_matches, img_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) #-- Show detected matches cv.imshow('Good Matches', img_matches) cv.waitKey()
from __future__ import print_function import cv2 as cv import numpy as np import argparse parser = argparse.ArgumentParser(description='Code for Feature Matching with FLANN tutorial.') parser.add_argument('--input1', help='Path to input image 1.', default='box.png') parser.add_argument('--input2', help='Path to input image 2.', default='box_in_scene.png') args = parser.parse_args() img_object = cv.imread(cv.samples.findFile(args.input1), cv.IMREAD_GRAYSCALE) img_scene = cv.imread(cv.samples.findFile(args.input2), cv.IMREAD_GRAYSCALE) if img_object is None or img_scene is None: print('Could not open or find the images!') exit(0) #-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors minHessian = 400 detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian) keypoints_obj, descriptors_obj = detector.detectAndCompute(img_object, None) keypoints_scene, descriptors_scene = detector.detectAndCompute(img_scene, None) #-- Step 2: Matching descriptor vectors with a FLANN based matcher # Since SURF is a floating-point descriptor NORM_L2 is used matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED) knn_matches = matcher.knnMatch(descriptors_obj, descriptors_scene, 2) #-- Filter matches using the Lowe's ratio test ratio_thresh = 0.75 good_matches = [] for m,n in knn_matches: if m.distance < ratio_thresh * n.distance: good_matches.append(m) #-- Draw matches img_matches = np.empty((max(img_object.shape[0], img_scene.shape[0]), img_object.shape[1]+img_scene.shape[1], 3), dtype=np.uint8) cv.drawMatches(img_object, keypoints_obj, img_scene, keypoints_scene, good_matches, img_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) #-- Localize the object obj = np.empty((len(good_matches),2), dtype=np.float32) scene = np.empty((len(good_matches),2), dtype=np.float32) for i in range(len(good_matches)): #-- Get the keypoints from the good matches obj[i,0] = keypoints_obj[good_matches[i].queryIdx].pt[0] obj[i,1] = keypoints_obj[good_matches[i].queryIdx].pt[1] scene[i,0] = keypoints_scene[good_matches[i].trainIdx].pt[0] scene[i,1] = keypoints_scene[good_matches[i].trainIdx].pt[1] H, _ = cv.findHomography(obj, scene, cv.RANSAC) #-- Get the corners from the image_1 ( the object to be "detected" ) obj_corners = np.empty((4,1,2), dtype=np.float32) obj_corners[0,0,0] = 0 obj_corners[0,0,1] = 0 obj_corners[1,0,0] = img_object.shape[1] obj_corners[1,0,1] = 0 obj_corners[2,0,0] = img_object.shape[1] obj_corners[2,0,1] = img_object.shape[0] obj_corners[3,0,0] = 0 obj_corners[3,0,1] = img_object.shape[0] scene_corners = cv.perspectiveTransform(obj_corners, H) #-- Draw lines between the corners (the mapped object in the scene - image_2 ) cv.line(img_matches, (int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])),\ (int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])), (0,255,0), 4) cv.line(img_matches, (int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])),\ (int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])), (0,255,0), 4) cv.line(img_matches, (int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])),\ (int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])), (0,255,0), 4) cv.line(img_matches, (int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])),\ (int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])), (0,255,0), 4) #-- Show detected matches cv.imshow('Good Matches & Object detection', img_matches) cv.waitKey()
#!/usr/bin/env python # -*- coding: utf-8 -*- # Python 2/3 compatibility from __future__ import print_function import numpy as np import cv2 as cv def basicPanoramaStitching(img1Path, img2Path): img1 = cv.imread(cv.samples.findFile(img1Path)) img2 = cv.imread(cv.samples.findFile(img2Path)) # [camera-pose-from-Blender-at-location-1] c1Mo = np.array([[0.9659258723258972, 0.2588190734386444, 0.0, 1.5529145002365112], [ 0.08852133899927139, -0.3303661346435547, -0.9396926164627075, -0.10281121730804443], [-0.24321036040782928, 0.9076734185218811, -0.342020183801651, 6.130080699920654], [0, 0, 0, 1]],dtype=np.float64) # [camera-pose-from-Blender-at-location-1] # [camera-pose-from-Blender-at-location-2] c2Mo = np.array([[0.9659258723258972, -0.2588190734386444, 0.0, -1.5529145002365112], [-0.08852133899927139, -0.3303661346435547, -0.9396926164627075, -0.10281121730804443], [0.24321036040782928, 0.9076734185218811, -0.342020183801651, 6.130080699920654], [0, 0, 0, 1]],dtype=np.float64) # [camera-pose-from-Blender-at-location-2] # [camera-intrinsics-from-Blender] cameraMatrix = np.array([[700.0, 0.0, 320.0], [0.0, 700.0, 240.0], [0, 0, 1]], dtype=np.float32) # [camera-intrinsics-from-Blender] # [extract-rotation] R1 = c1Mo[0:3, 0:3] R2 = c2Mo[0:3, 0:3] #[extract-rotation] # [compute-rotation-displacement] R2 = R2.transpose() R_2to1 = np.dot(R1,R2) # [compute-rotation-displacement] # [compute-homography] H = cameraMatrix.dot(R_2to1).dot(np.linalg.inv(cameraMatrix)) H = H / H[2][2] # [compute-homography] # [stitch] img_stitch = cv.warpPerspective(img2, H, (img2.shape[1]*2, img2.shape[0])) img_stitch[0:img1.shape[0], 0:img1.shape[1]] = img1 # [stitch] img_space = np.zeros((img1.shape[0],50,3), dtype=np.uint8) img_compare = cv.hconcat([img1,img_space, img2]) cv.imshow("Final", img_compare) cv.imshow("Panorama", img_stitch) cv.waitKey(0) def main(): import argparse parser = argparse.ArgumentParser(description="Code for homography tutorial. Example 5: basic panorama stitching from a rotating camera.") parser.add_argument("-I1","--image1", help = "path to first image", default="Blender_Suzanne1.jpg") parser.add_argument("-I2","--image2", help = "path to second image", default="Blender_Suzanne2.jpg") args = parser.parse_args() print("Panorama Stitching Started") basicPanoramaStitching(args.image1, args.image2) print("Panorama Stitching Completed Successfully") if __name__ == '__main__': main()
#!/usr/bin/env python # -*- coding: utf-8 -*- # Python 2/3 compatibility from __future__ import print_function import numpy as np import cv2 as cv import sys def randomColor(): color = np.random.randint(0, 255,(1, 3)) return color[0].tolist() def perspectiveCorrection(img1Path, img2Path ,patternSize ): img1 = cv.imread(cv.samples.findFile(img1Path)) img2 = cv.imread(cv.samples.findFile(img2Path)) # [find-corners] ret1, corners1 = cv.findChessboardCorners(img1, patternSize) ret2, corners2 = cv.findChessboardCorners(img2, patternSize) # [find-corners] if not ret1 or not ret2: print("Error, cannot find the chessboard corners in both images.") sys.exit(-1) # [estimate-homography] H, _ = cv.findHomography(corners1, corners2) print(H) # [estimate-homography] # [warp-chessboard] img1_warp = cv.warpPerspective(img1, H, (img1.shape[1], img1.shape[0])) # [warp-chessboard] img_draw_warp = cv.hconcat([img2, img1_warp]) cv.imshow("Desired chessboard view / Warped source chessboard view", img_draw_warp ) corners1 = corners1.tolist() corners1 = [a[0] for a in corners1] # [compute-transformed-corners] img_draw_matches = cv.hconcat([img1, img2]) for i in range(len(corners1)): pt1 = np.array([corners1[i][0], corners1[i][1], 1]) pt1 = pt1.reshape(3, 1) pt2 = np.dot(H, pt1) pt2 = pt2/pt2[2] end = (int(img1.shape[1] + pt2[0]), int(pt2[1])) cv.line(img_draw_matches, tuple([int(j) for j in corners1[i]]), end, randomColor(), 2) cv.imshow("Draw matches", img_draw_matches) cv.waitKey(0) # [compute-transformed-corners] def main(): import argparse parser = argparse.ArgumentParser() parser.add_argument('-I1', "--image1", help="Path to the first image", default="left02.jpg") parser.add_argument('-I2', "--image2", help="Path to the second image", default="left01.jpg") parser.add_argument('-H', "--height", help="Height of pattern size", default=6) parser.add_argument('-W', "--width", help="Width of pattern size", default=9) args = parser.parse_args() img1Path = args.image1 img2Path = args.image2 h = args.height w = args.width perspectiveCorrection(img1Path, img2Path, (w, h)) if __name__ == "__main__": main()
from __future__ import print_function import cv2 as cv import numpy as np import argparse from math import sqrt ## [load] parser = argparse.ArgumentParser(description='Code for AKAZE local features matching tutorial.') parser.add_argument('--input1', help='Path to input image 1.', default='graf1.png') parser.add_argument('--input2', help='Path to input image 2.', default='graf3.png') parser.add_argument('--homography', help='Path to the homography matrix.', default='H1to3p.xml') args = parser.parse_args() img1 = cv.imread(cv.samples.findFile(args.input1), cv.IMREAD_GRAYSCALE) img2 = cv.imread(cv.samples.findFile(args.input2), cv.IMREAD_GRAYSCALE) if img1 is None or img2 is None: print('Could not open or find the images!') exit(0) fs = cv.FileStorage(cv.samples.findFile(args.homography), cv.FILE_STORAGE_READ) homography = fs.getFirstTopLevelNode().mat() ## [load] ## [AKAZE] akaze = cv.AKAZE_create() kpts1, desc1 = akaze.detectAndCompute(img1, None) kpts2, desc2 = akaze.detectAndCompute(img2, None) ## [AKAZE] ## [2-nn matching] matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_BRUTEFORCE_HAMMING) nn_matches = matcher.knnMatch(desc1, desc2, 2) ## [2-nn matching] ## [ratio test filtering] matched1 = [] matched2 = [] nn_match_ratio = 0.8 # Nearest neighbor matching ratio for m, n in nn_matches: if m.distance < nn_match_ratio * n.distance: matched1.append(kpts1[m.queryIdx]) matched2.append(kpts2[m.trainIdx]) ## [ratio test filtering] ## [homography check] inliers1 = [] inliers2 = [] good_matches = [] inlier_threshold = 2.5 # Distance threshold to identify inliers with homography check for i, m in enumerate(matched1): col = np.ones((3,1), dtype=np.float64) col[0:2,0] = m.pt col = np.dot(homography, col) col /= col[2,0] dist = sqrt(pow(col[0,0] - matched2[i].pt[0], 2) +\ pow(col[1,0] - matched2[i].pt[1], 2)) if dist < inlier_threshold: good_matches.append(cv.DMatch(len(inliers1), len(inliers2), 0)) inliers1.append(matched1[i]) inliers2.append(matched2[i]) ## [homography check] ## [draw final matches] res = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], 3), dtype=np.uint8) cv.drawMatches(img1, inliers1, img2, inliers2, good_matches, res) cv.imwrite("akaze_result.png", res) inlier_ratio = len(inliers1) / float(len(matched1)) print('A-KAZE Matching Results') print('*******************************') print('# Keypoints 1: \t', len(kpts1)) print('# Keypoints 2: \t', len(kpts2)) print('# Matches: \t', len(matched1)) print('# Inliers: \t', len(inliers1)) print('# Inliers Ratio: \t', inlier_ratio) cv.imshow('result', res) cv.waitKey() ## [draw final matches]
from __future__ import print_function import cv2 as cv import numpy as np import argparse parser = argparse.ArgumentParser(description='Code for Feature Detection tutorial.') parser.add_argument('--input1', help='Path to input image 1.', default='box.png') parser.add_argument('--input2', help='Path to input image 2.', default='box_in_scene.png') args = parser.parse_args() img1 = cv.imread(cv.samples.findFile(args.input1), cv.IMREAD_GRAYSCALE) img2 = cv.imread(cv.samples.findFile(args.input2), cv.IMREAD_GRAYSCALE) if img1 is None or img2 is None: print('Could not open or find the images!') exit(0) #-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors minHessian = 400 detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian) keypoints1, descriptors1 = detector.detectAndCompute(img1, None) keypoints2, descriptors2 = detector.detectAndCompute(img2, None) #-- Step 2: Matching descriptor vectors with a brute force matcher # Since SURF is a floating-point descriptor NORM_L2 is used matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_BRUTEFORCE) matches = matcher.match(descriptors1, descriptors2) #-- Draw matches img_matches = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], 3), dtype=np.uint8) cv.drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches) #-- Show detected matches cv.imshow('Matches', img_matches) cv.waitKey()
from __future__ import print_function from __future__ import division import cv2 as cv import numpy as np import argparse import os def loadExposureSeq(path): images = [] times = [] with open(os.path.join(path, 'list.txt')) as f: content = f.readlines() for line in content: tokens = line.split() images.append(cv.imread(os.path.join(path, tokens[0]))) times.append(1 / float(tokens[1])) return images, np.asarray(times, dtype=np.float32) parser = argparse.ArgumentParser(description='Code for High Dynamic Range Imaging tutorial.') parser.add_argument('--input', type=str, help='Path to the directory that contains images and exposure times.') args = parser.parse_args() if not args.input: parser.print_help() exit(0) ## [Load images and exposure times] images, times = loadExposureSeq(args.input) ## [Load images and exposure times] ## [Estimate camera response] calibrate = cv.createCalibrateDebevec() response = calibrate.process(images, times) ## [Estimate camera response] ## [Make HDR image] merge_debevec = cv.createMergeDebevec() hdr = merge_debevec.process(images, times, response) ## [Make HDR image] ## [Tonemap HDR image] tonemap = cv.createTonemap(2.2) ldr = tonemap.process(hdr) ## [Tonemap HDR image] ## [Perform exposure fusion] merge_mertens = cv.createMergeMertens() fusion = merge_mertens.process(images) ## [Perform exposure fusion] ## [Write results] cv.imwrite('fusion.png', fusion * 255) cv.imwrite('ldr.png', ldr * 255) cv.imwrite('hdr.hdr', hdr) ## [Write results]
#!/usr/bin/env python ''' You can download the converted pb model from https://www.dropbox.com/s/qag9vzambhhkvxr/lip_jppnet_384.pb?dl=0 or convert the model yourself. Follow these steps if you want to convert the original model yourself: To get original .meta pre-trained model download https://drive.google.com/file/d/1BFVXgeln-bek8TCbRjN6utPAgRE0LJZg/view For correct convert .meta to .pb model download original repository https://github.com/Engineering-Course/LIP_JPPNet Change script evaluate_parsing_JPPNet-s2.py for human parsing 1. Remove preprocessing to create image_batch_origin: with tf.name_scope("create_inputs"): ... Add image_batch_origin = tf.placeholder(tf.float32, shape=(2, None, None, 3), name='input') 2. Create input image = cv2.imread(path/to/image) image_rev = np.flip(image, axis=1) input = np.stack([image, image_rev], axis=0) 3. Hardcode image_h and image_w shapes to determine output shapes. We use default INPUT_SIZE = (384, 384) from evaluate_parsing_JPPNet-s2.py. parsing_out1 = tf.reduce_mean(tf.stack([tf.image.resize_images(parsing_out1_100, INPUT_SIZE), tf.image.resize_images(parsing_out1_075, INPUT_SIZE), tf.image.resize_images(parsing_out1_125, INPUT_SIZE)]), axis=0) Do similarly with parsing_out2, parsing_out3 4. Remove postprocessing. Last net operation: raw_output = tf.reduce_mean(tf.stack([parsing_out1, parsing_out2, parsing_out3]), axis=0) Change: parsing_ = sess.run(raw_output, feed_dict={'input:0': input}) 5. To save model after sess.run(...) add: input_graph_def = tf.get_default_graph().as_graph_def() output_node = "Mean_3" output_graph_def = tf.graph_util.convert_variables_to_constants(sess, input_graph_def, output_node) output_graph = "LIP_JPPNet.pb" with tf.gfile.GFile(output_graph, "wb") as f: f.write(output_graph_def.SerializeToString())' ''' import argparse import os.path import numpy as np import cv2 as cv backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV) targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD) def preprocess(image): """ Create 4-dimensional blob from image and flip image :param image: input image """ image_rev = np.flip(image, axis=1) input = cv.dnn.blobFromImages([image, image_rev], mean=(104.00698793, 116.66876762, 122.67891434)) return input def run_net(input, model_path, backend, target): """ Read network and infer model :param model_path: path to JPPNet model :param backend: computation backend :param target: computation device """ net = cv.dnn.readNet(model_path) net.setPreferableBackend(backend) net.setPreferableTarget(target) net.setInput(input) out = net.forward() return out def postprocess(out, input_shape): """ Create a grayscale human segmentation :param out: network output :param input_shape: input image width and height """ # LIP classes # 0 Background # 1 Hat # 2 Hair # 3 Glove # 4 Sunglasses # 5 UpperClothes # 6 Dress # 7 Coat # 8 Socks # 9 Pants # 10 Jumpsuits # 11 Scarf # 12 Skirt # 13 Face # 14 LeftArm # 15 RightArm # 16 LeftLeg # 17 RightLeg # 18 LeftShoe # 19 RightShoe head_output, tail_output = np.split(out, indices_or_sections=[1], axis=0) head_output = head_output.squeeze(0) tail_output = tail_output.squeeze(0) head_output = np.stack([cv.resize(img, dsize=input_shape) for img in head_output[:, ...]]) tail_output = np.stack([cv.resize(img, dsize=input_shape) for img in tail_output[:, ...]]) tail_list = np.split(tail_output, indices_or_sections=list(range(1, 20)), axis=0) tail_list = [arr.squeeze(0) for arr in tail_list] tail_list_rev = [tail_list[i] for i in range(14)] tail_list_rev.extend([tail_list[15], tail_list[14], tail_list[17], tail_list[16], tail_list[19], tail_list[18]]) tail_output_rev = np.stack(tail_list_rev, axis=0) tail_output_rev = np.flip(tail_output_rev, axis=2) raw_output_all = np.mean(np.stack([head_output, tail_output_rev], axis=0), axis=0, keepdims=True) raw_output_all = np.argmax(raw_output_all, axis=1) raw_output_all = raw_output_all.transpose(1, 2, 0) return raw_output_all def decode_labels(gray_image): """ Colorize image according to labels :param gray_image: grayscale human segmentation result """ height, width, _ = gray_image.shape colors = [(0, 0, 0), (128, 0, 0), (255, 0, 0), (0, 85, 0), (170, 0, 51), (255, 85, 0), (0, 0, 85), (0, 119, 221), (85, 85, 0), (0, 85, 85), (85, 51, 0), (52, 86, 128), (0, 128, 0), (0, 0, 255), (51, 170, 221), (0, 255, 255),(85, 255, 170), (170, 255, 85), (255, 255, 0), (255, 170, 0)] segm = np.stack([colors[idx] for idx in gray_image.flatten()]) segm = segm.reshape(height, width, 3).astype(np.uint8) segm = cv.cvtColor(segm, cv.COLOR_BGR2RGB) return segm def parse_human(image, model_path, backend=cv.dnn.DNN_BACKEND_OPENCV, target=cv.dnn.DNN_TARGET_CPU): """ Prepare input for execution, run net and postprocess output to parse human. :param image: input image :param model_path: path to JPPNet model :param backend: name of computation backend :param target: name of computation target """ input = preprocess(image) input_h, input_w = input.shape[2:] output = run_net(input, model_path, backend, target) grayscale_out = postprocess(output, (input_w, input_h)) segmentation = decode_labels(grayscale_out) return segmentation if __name__ == '__main__': parser = argparse.ArgumentParser(description='Use this script to run human parsing using JPPNet', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--input', '-i', required=True, help='Path to input image.') parser.add_argument('--model', '-m', default='lip_jppnet_384.pb', help='Path to pb model.') parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int, help="Choose one of computation backends: " "%d: automatically (by default), " "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " "%d: OpenCV implementation" % backends) parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, help='Choose one of target computation devices: ' '%d: CPU target (by default), ' '%d: OpenCL, ' '%d: OpenCL fp16 (half-float precision), ' '%d: VPU' % targets) args, _ = parser.parse_known_args() if not os.path.isfile(args.model): raise OSError("Model not exist") image = cv.imread(args.input) output = parse_human(image, args.model, args.backend, args.target) winName = 'Deep learning human parsing in OpenCV' cv.namedWindow(winName, cv.WINDOW_AUTOSIZE) cv.imshow(winName, output) cv.waitKey()
# Import required modules import cv2 as cv import math import argparse ############ Add argument parser for command line arguments ############ parser = argparse.ArgumentParser(description='Use this script to run TensorFlow implementation (https://github.com/argman/EAST) of EAST: An Efficient and Accurate Scene Text Detector (https://arxiv.org/abs/1704.03155v2)') parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.') parser.add_argument('--model', required=True, help='Path to a binary .pb file of model contains trained weights.') parser.add_argument('--width', type=int, default=320, help='Preprocess input image by resizing to a specific width. It should be multiple by 32.') parser.add_argument('--height',type=int, default=320, help='Preprocess input image by resizing to a specific height. It should be multiple by 32.') parser.add_argument('--thr',type=float, default=0.5, help='Confidence threshold.') parser.add_argument('--nms',type=float, default=0.4, help='Non-maximum suppression threshold.') args = parser.parse_args() ############ Utility functions ############ def decode(scores, geometry, scoreThresh): detections = [] confidences = [] ############ CHECK DIMENSIONS AND SHAPES OF geometry AND scores ############ assert len(scores.shape) == 4, "Incorrect dimensions of scores" assert len(geometry.shape) == 4, "Incorrect dimensions of geometry" assert scores.shape[0] == 1, "Invalid dimensions of scores" assert geometry.shape[0] == 1, "Invalid dimensions of geometry" assert scores.shape[1] == 1, "Invalid dimensions of scores" assert geometry.shape[1] == 5, "Invalid dimensions of geometry" assert scores.shape[2] == geometry.shape[2], "Invalid dimensions of scores and geometry" assert scores.shape[3] == geometry.shape[3], "Invalid dimensions of scores and geometry" height = scores.shape[2] width = scores.shape[3] for y in range(0, height): # Extract data from scores scoresData = scores[0][0][y] x0_data = geometry[0][0][y] x1_data = geometry[0][1][y] x2_data = geometry[0][2][y] x3_data = geometry[0][3][y] anglesData = geometry[0][4][y] for x in range(0, width): score = scoresData[x] # If score is lower than threshold score, move to next x if(score < scoreThresh): continue # Calculate offset offsetX = x * 4.0 offsetY = y * 4.0 angle = anglesData[x] # Calculate cos and sin of angle cosA = math.cos(angle) sinA = math.sin(angle) h = x0_data[x] + x2_data[x] w = x1_data[x] + x3_data[x] # Calculate offset offset = ([offsetX + cosA * x1_data[x] + sinA * x2_data[x], offsetY - sinA * x1_data[x] + cosA * x2_data[x]]) # Find points for rectangle p1 = (-sinA * h + offset[0], -cosA * h + offset[1]) p3 = (-cosA * w + offset[0], sinA * w + offset[1]) center = (0.5*(p1[0]+p3[0]), 0.5*(p1[1]+p3[1])) detections.append((center, (w,h), -1*angle * 180.0 / math.pi)) confidences.append(float(score)) # Return detections and confidences return [detections, confidences] def main(): # Read and store arguments confThreshold = args.thr nmsThreshold = args.nms inpWidth = args.width inpHeight = args.height model = args.model # Load network net = cv.dnn.readNet(model) # Create a new named window kWinName = "EAST: An Efficient and Accurate Scene Text Detector" cv.namedWindow(kWinName, cv.WINDOW_NORMAL) outNames = [] outNames.append("feature_fusion/Conv_7/Sigmoid") outNames.append("feature_fusion/concat_3") # Open a video file or an image file or a camera stream cap = cv.VideoCapture(args.input if args.input else 0) while cv.waitKey(1) < 0: # Read frame hasFrame, frame = cap.read() if not hasFrame: cv.waitKey() break # Get frame height and width height_ = frame.shape[0] width_ = frame.shape[1] rW = width_ / float(inpWidth) rH = height_ / float(inpHeight) # Create a 4D blob from frame. blob = cv.dnn.blobFromImage(frame, 1.0, (inpWidth, inpHeight), (123.68, 116.78, 103.94), True, False) # Run the model net.setInput(blob) outs = net.forward(outNames) t, _ = net.getPerfProfile() label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency()) # Get scores and geometry scores = outs[0] geometry = outs[1] [boxes, confidences] = decode(scores, geometry, confThreshold) # Apply NMS indices = cv.dnn.NMSBoxesRotated(boxes, confidences, confThreshold,nmsThreshold) for i in indices: # get 4 corners of the rotated rect vertices = cv.boxPoints(boxes[i[0]]) # scale the bounding box coordinates based on the respective ratios for j in range(4): vertices[j][0] *= rW vertices[j][1] *= rH for j in range(4): p1 = (vertices[j][0], vertices[j][1]) p2 = (vertices[(j + 1) % 4][0], vertices[(j + 1) % 4][1]) cv.line(frame, p1, p2, (0, 255, 0), 1) # Put efficiency information cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) # Display the frame cv.imshow(kWinName,frame) if __name__ == "__main__": main()
import cv2 as cv import argparse import numpy as np from common import * backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV) targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD) parser = argparse.ArgumentParser(add_help=False) parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'), help='An optional path to file with preprocessing parameters.') parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.') parser.add_argument('--framework', choices=['caffe', 'tensorflow', 'torch', 'darknet'], help='Optional name of an origin framework of the model. ' 'Detect it automatically if it does not set.') parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int, help="Choose one of computation backends: " "%d: automatically (by default), " "%d: Halide language (http://halide-lang.org/), " "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " "%d: OpenCV implementation" % backends) parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, help='Choose one of target computation devices: ' '%d: CPU target (by default), ' '%d: OpenCL, ' '%d: OpenCL fp16 (half-float precision), ' '%d: VPU' % targets) args, _ = parser.parse_known_args() add_preproc_args(args.zoo, parser, 'classification') parser = argparse.ArgumentParser(parents=[parser], description='Use this script to run classification deep learning networks using OpenCV.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) args = parser.parse_args() args.model = findFile(args.model) args.config = findFile(args.config) args.classes = findFile(args.classes) # Load names of classes classes = None if args.classes: with open(args.classes, 'rt') as f: classes = f.read().rstrip('\n').split('\n') # Load a network net = cv.dnn.readNet(args.model, args.config, args.framework) net.setPreferableBackend(args.backend) net.setPreferableTarget(args.target) winName = 'Deep learning image classification in OpenCV' cv.namedWindow(winName, cv.WINDOW_NORMAL) cap = cv.VideoCapture(args.input if args.input else 0) while cv.waitKey(1) < 0: hasFrame, frame = cap.read() if not hasFrame: cv.waitKey() break # Create a 4D blob from a frame. inpWidth = args.width if args.width else frame.shape[1] inpHeight = args.height if args.height else frame.shape[0] blob = cv.dnn.blobFromImage(frame, args.scale, (inpWidth, inpHeight), args.mean, args.rgb, crop=False) # Run a model net.setInput(blob) out = net.forward() # Get a class with a highest score. out = out.flatten() classId = np.argmax(out) confidence = out[classId] # Put efficiency information. t, _ = net.getPerfProfile() label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency()) cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) # Print predicted class. label = '%s: %.4f' % (classes[classId] if classes else 'Class #%d' % classId, confidence) cv.putText(frame, label, (0, 40), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) cv.imshow(winName, frame)
#!/usr/bin/env python3 ''' You can download the Geometric Matching Module model from https://www.dropbox.com/s/tyhc73xa051grjp/cp_vton_gmm.onnx?dl=0 You can download the Try-On Module model from https://www.dropbox.com/s/q2x97ve2h53j66k/cp_vton_tom.onnx?dl=0 You can download the cloth segmentation model from https://www.dropbox.com/s/qag9vzambhhkvxr/lip_jppnet_384.pb?dl=0 You can find the OpenPose proto in opencv_extra/testdata/dnn/openpose_pose_coco.prototxt and get .caffemodel using opencv_extra/testdata/dnn/download_models.py ''' import argparse import os.path import numpy as np import cv2 as cv from numpy import linalg from common import findFile from human_parsing import parse_human backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV) targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD) parser = argparse.ArgumentParser(description='Use this script to run virtial try-on using CP-VTON', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--input_image', '-i', required=True, help='Path to image with person.') parser.add_argument('--input_cloth', '-c', required=True, help='Path to target cloth image') parser.add_argument('--gmm_model', '-gmm', default='cp_vton_gmm.onnx', help='Path to Geometric Matching Module .onnx model.') parser.add_argument('--tom_model', '-tom', default='cp_vton_tom.onnx', help='Path to Try-On Module .onnx model.') parser.add_argument('--segmentation_model', default='lip_jppnet_384.pb', help='Path to cloth segmentation .pb model.') parser.add_argument('--openpose_proto', default='openpose_pose_coco.prototxt', help='Path to OpenPose .prototxt model was trained on COCO dataset.') parser.add_argument('--openpose_model', default='openpose_pose_coco.caffemodel', help='Path to OpenPose .caffemodel model was trained on COCO dataset.') parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int, help="Choose one of computation backends: " "%d: automatically (by default), " "%d: Halide language (http://halide-lang.org/), " "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " "%d: OpenCV implementation" % backends) parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, help='Choose one of target computation devices: ' '%d: CPU target (by default), ' '%d: OpenCL, ' '%d: OpenCL fp16 (half-float precision), ' '%d: VPU' % targets) args, _ = parser.parse_known_args() def get_pose_map(image, proto_path, model_path, backend, target, height=256, width=192): radius = 5 inp = cv.dnn.blobFromImage(image, 1.0 / 255, (width, height)) net = cv.dnn.readNet(proto_path, model_path) net.setPreferableBackend(backend) net.setPreferableTarget(target) net.setInput(inp) out = net.forward() threshold = 0.1 _, out_c, out_h, out_w = out.shape pose_map = np.zeros((height, width, out_c - 1)) # last label: Background for i in range(0, out.shape[1] - 1): heatMap = out[0, i, :, :] keypoint = np.full((height, width), -1) _, conf, _, point = cv.minMaxLoc(heatMap) x = width * point[0] // out_w y = height * point[1] // out_h if conf > threshold and x > 0 and y > 0: keypoint[y - radius:y + radius, x - radius:x + radius] = 1 pose_map[:, :, i] = keypoint pose_map = pose_map.transpose(2, 0, 1) return pose_map class BilinearFilter(object): """ PIL bilinear resize implementation image = image.resize((image_width // 16, image_height // 16), Image.BILINEAR) """ def _precompute_coeffs(self, inSize, outSize): filterscale = max(1.0, inSize / outSize) ksize = int(np.ceil(filterscale)) * 2 + 1 kk = np.zeros(shape=(outSize * ksize, ), dtype=np.float32) bounds = np.empty(shape=(outSize * 2, ), dtype=np.int32) centers = (np.arange(outSize) + 0.5) * filterscale + 0.5 bounds[::2] = np.where(centers - filterscale < 0, 0, centers - filterscale) bounds[1::2] = np.where(centers + filterscale > inSize, inSize, centers + filterscale) - bounds[::2] xmins = bounds[::2] - centers + 1 points = np.array([np.arange(row) + xmins[i] for i, row in enumerate(bounds[1::2])]) / filterscale for xx in range(0, outSize): point = points[xx] bilinear = np.where(point < 1.0, 1.0 - abs(point), 0.0) ww = np.sum(bilinear) kk[xx * ksize : xx * ksize + bilinear.size] = np.where(ww == 0.0, bilinear, bilinear / ww) return bounds, kk, ksize def _resample_horizontal(self, out, img, ksize, bounds, kk): for yy in range(0, out.shape[0]): for xx in range(0, out.shape[1]): xmin = bounds[xx * 2 + 0] xmax = bounds[xx * 2 + 1] k = kk[xx * ksize : xx * ksize + xmax] out[yy, xx] = np.round(np.sum(img[yy, xmin : xmin + xmax] * k)) def _resample_vertical(self, out, img, ksize, bounds, kk): for yy in range(0, out.shape[0]): ymin = bounds[yy * 2 + 0] ymax = bounds[yy * 2 + 1] k = kk[yy * ksize: yy * ksize + ymax] out[yy] = np.round(np.sum(img[ymin : ymin + ymax, 0:out.shape[1]] * k[:, np.newaxis], axis=0)) def imaging_resample(self, img, xsize, ysize): height, width, *args = img.shape bounds_horiz, kk_horiz, ksize_horiz = self._precompute_coeffs(width, xsize) bounds_vert, kk_vert, ksize_vert = self._precompute_coeffs(height, ysize) out_hor = np.empty((img.shape[0], xsize), dtype=np.uint8) self._resample_horizontal(out_hor, img, ksize_horiz, bounds_horiz, kk_horiz) out = np.empty((ysize, xsize), dtype=np.uint8) self._resample_vertical(out, out_hor, ksize_vert, bounds_vert, kk_vert) return out class CpVton(object): def __init__(self, gmm_model, tom_model, backend, target): super(CpVton, self).__init__() self.gmm_net = cv.dnn.readNet(gmm_model) self.tom_net = cv.dnn.readNet(tom_model) self.gmm_net.setPreferableBackend(backend) self.gmm_net.setPreferableTarget(target) self.tom_net.setPreferableBackend(backend) self.tom_net.setPreferableTarget(target) def prepare_agnostic(self, segm_image, input_image, pose_map, height=256, width=192): palette = { 'Background' : (0, 0, 0), 'Hat' : (128, 0, 0), 'Hair' : (255, 0, 0), 'Glove' : (0, 85, 0), 'Sunglasses' : (170, 0, 51), 'UpperClothes' : (255, 85, 0), 'Dress' : (0, 0, 85), 'Coat' : (0, 119, 221), 'Socks' : (85, 85, 0), 'Pants' : (0, 85, 85), 'Jumpsuits' : (85, 51, 0), 'Scarf' : (52, 86, 128), 'Skirt' : (0, 128, 0), 'Face' : (0, 0, 255), 'Left-arm' : (51, 170, 221), 'Right-arm' : (0, 255, 255), 'Left-leg' : (85, 255, 170), 'Right-leg' : (170, 255, 85), 'Left-shoe' : (255, 255, 0), 'Right-shoe' : (255, 170, 0) } color2label = {val: key for key, val in palette.items()} head_labels = ['Hat', 'Hair', 'Sunglasses', 'Face', 'Pants', 'Skirt'] segm_image = cv.cvtColor(segm_image, cv.COLOR_BGR2RGB) phead = np.zeros((1, height, width), dtype=np.float32) pose_shape = np.zeros((height, width), dtype=np.uint8) for r in range(height): for c in range(width): pixel = tuple(segm_image[r, c]) if tuple(pixel) in color2label: if color2label[pixel] in head_labels: phead[0, r, c] = 1 if color2label[pixel] != 'Background': pose_shape[r, c] = 255 input_image = cv.dnn.blobFromImage(input_image, 1.0 / 127.5, (width, height), mean=(127.5, 127.5, 127.5), swapRB=True) input_image = input_image.squeeze(0) img_head = input_image * phead - (1 - phead) downsample = BilinearFilter() down = downsample.imaging_resample(pose_shape, width // 16, height // 16) res_shape = cv.resize(down, (width, height), cv.INTER_LINEAR) res_shape = cv.dnn.blobFromImage(res_shape, 1.0 / 127.5, mean=(127.5, 127.5, 127.5), swapRB=True) res_shape = res_shape.squeeze(0) agnostic = np.concatenate((res_shape, img_head, pose_map), axis=0) agnostic = np.expand_dims(agnostic, axis=0) return agnostic def get_warped_cloth(self, cloth_img, agnostic, height=256, width=192): cloth = cv.dnn.blobFromImage(cloth_img, 1.0 / 127.5, (width, height), mean=(127.5, 127.5, 127.5), swapRB=True) self.gmm_net.setInput(agnostic, "input.1") self.gmm_net.setInput(cloth, "input.18") theta = self.gmm_net.forward() grid = self._generate_grid(theta) warped_cloth = self._bilinear_sampler(cloth, grid).astype(np.float32) return warped_cloth def get_tryon(self, agnostic, warp_cloth): inp = np.concatenate([agnostic, warp_cloth], axis=1) self.tom_net.setInput(inp) out = self.tom_net.forward() p_rendered, m_composite = np.split(out, [3], axis=1) p_rendered = np.tanh(p_rendered) m_composite = 1 / (1 + np.exp(-m_composite)) p_tryon = warp_cloth * m_composite + p_rendered * (1 - m_composite) rgb_p_tryon = cv.cvtColor(p_tryon.squeeze(0).transpose(1, 2, 0), cv.COLOR_BGR2RGB) rgb_p_tryon = (rgb_p_tryon + 1) / 2 return rgb_p_tryon def _compute_L_inverse(self, X, Y): N = X.shape[0] Xmat = np.tile(X, (1, N)) Ymat = np.tile(Y, (1, N)) P_dist_squared = np.power(Xmat - Xmat.transpose(1, 0), 2) + np.power(Ymat - Ymat.transpose(1, 0), 2) P_dist_squared[P_dist_squared == 0] = 1 K = np.multiply(P_dist_squared, np.log(P_dist_squared)) O = np.ones([N, 1], dtype=np.float32) Z = np.zeros([3, 3], dtype=np.float32) P = np.concatenate([O, X, Y], axis=1) first = np.concatenate((K, P), axis=1) second = np.concatenate((P.transpose(1, 0), Z), axis=1) L = np.concatenate((first, second), axis=0) Li = linalg.inv(L) return Li def _prepare_to_transform(self, out_h=256, out_w=192, grid_size=5): grid = np.zeros([out_h, out_w, 3], dtype=np.float32) grid_X, grid_Y = np.meshgrid(np.linspace(-1, 1, out_w), np.linspace(-1, 1, out_h)) grid_X = np.expand_dims(np.expand_dims(grid_X, axis=0), axis=3) grid_Y = np.expand_dims(np.expand_dims(grid_Y, axis=0), axis=3) axis_coords = np.linspace(-1, 1, grid_size) N = grid_size ** 2 P_Y, P_X = np.meshgrid(axis_coords, axis_coords) P_X = np.reshape(P_X,(-1, 1)) P_Y = np.reshape(P_Y,(-1, 1)) P_X = np.expand_dims(np.expand_dims(np.expand_dims(P_X, axis=2), axis=3), axis=4).transpose(4, 1, 2, 3, 0) P_Y = np.expand_dims(np.expand_dims(np.expand_dims(P_Y, axis=2), axis=3), axis=4).transpose(4, 1, 2, 3, 0) return grid_X, grid_Y, N, P_X, P_Y def _expand_torch(self, X, shape): if len(X.shape) != len(shape): return X.flatten().reshape(shape) else: axis = [1 if src == dst else dst for src, dst in zip(X.shape, shape)] return np.tile(X, axis) def _apply_transformation(self, theta, points, N, P_X, P_Y): if len(theta.shape) == 2: theta = np.expand_dims(np.expand_dims(theta, axis=2), axis=3) batch_size = theta.shape[0] P_X_base = np.copy(P_X) P_Y_base = np.copy(P_Y) Li = self._compute_L_inverse(np.reshape(P_X, (N, -1)), np.reshape(P_Y, (N, -1))) Li = np.expand_dims(Li, axis=0) # split theta into point coordinates Q_X = np.squeeze(theta[:, :N, :, :], axis=3) Q_Y = np.squeeze(theta[:, N:, :, :], axis=3) Q_X += self._expand_torch(P_X_base, Q_X.shape) Q_Y += self._expand_torch(P_Y_base, Q_Y.shape) points_b = points.shape[0] points_h = points.shape[1] points_w = points.shape[2] P_X = self._expand_torch(P_X, (1, points_h, points_w, 1, N)) P_Y = self._expand_torch(P_Y, (1, points_h, points_w, 1, N)) W_X = self._expand_torch(Li[:,:N,:N], (batch_size, N, N)) @ Q_X W_Y = self._expand_torch(Li[:,:N,:N], (batch_size, N, N)) @ Q_Y W_X = np.expand_dims(np.expand_dims(W_X, axis=3), axis=4).transpose(0, 4, 2, 3, 1) W_X = np.repeat(W_X, points_h, axis=1) W_X = np.repeat(W_X, points_w, axis=2) W_Y = np.expand_dims(np.expand_dims(W_Y, axis=3), axis=4).transpose(0, 4, 2, 3, 1) W_Y = np.repeat(W_Y, points_h, axis=1) W_Y = np.repeat(W_Y, points_w, axis=2) A_X = self._expand_torch(Li[:, N:, :N], (batch_size, 3, N)) @ Q_X A_Y = self._expand_torch(Li[:, N:, :N], (batch_size, 3, N)) @ Q_Y A_X = np.expand_dims(np.expand_dims(A_X, axis=3), axis=4).transpose(0, 4, 2, 3, 1) A_X = np.repeat(A_X, points_h, axis=1) A_X = np.repeat(A_X, points_w, axis=2) A_Y = np.expand_dims(np.expand_dims(A_Y, axis=3), axis=4).transpose(0, 4, 2, 3, 1) A_Y = np.repeat(A_Y, points_h, axis=1) A_Y = np.repeat(A_Y, points_w, axis=2) points_X_for_summation = np.expand_dims(np.expand_dims(points[:, :, :, 0], axis=3), axis=4) points_X_for_summation = self._expand_torch(points_X_for_summation, points[:, :, :, 0].shape + (1, N)) points_Y_for_summation = np.expand_dims(np.expand_dims(points[:, :, :, 1], axis=3), axis=4) points_Y_for_summation = self._expand_torch(points_Y_for_summation, points[:, :, :, 0].shape + (1, N)) if points_b == 1: delta_X = points_X_for_summation - P_X delta_Y = points_Y_for_summation - P_Y else: delta_X = points_X_for_summation - self._expand_torch(P_X, points_X_for_summation.shape) delta_Y = points_Y_for_summation - self._expand_torch(P_Y, points_Y_for_summation.shape) dist_squared = np.power(delta_X, 2) + np.power(delta_Y, 2) dist_squared[dist_squared == 0] = 1 U = np.multiply(dist_squared, np.log(dist_squared)) points_X_batch = np.expand_dims(points[:,:,:,0], axis=3) points_Y_batch = np.expand_dims(points[:,:,:,1], axis=3) if points_b == 1: points_X_batch = self._expand_torch(points_X_batch, (batch_size, ) + points_X_batch.shape[1:]) points_Y_batch = self._expand_torch(points_Y_batch, (batch_size, ) + points_Y_batch.shape[1:]) points_X_prime = A_X[:,:,:,:,0]+ \ np.multiply(A_X[:,:,:,:,1], points_X_batch) + \ np.multiply(A_X[:,:,:,:,2], points_Y_batch) + \ np.sum(np.multiply(W_X, self._expand_torch(U, W_X.shape)), 4) points_Y_prime = A_Y[:,:,:,:,0]+ \ np.multiply(A_Y[:,:,:,:,1], points_X_batch) + \ np.multiply(A_Y[:,:,:,:,2], points_Y_batch) + \ np.sum(np.multiply(W_Y, self._expand_torch(U, W_Y.shape)), 4) return np.concatenate((points_X_prime, points_Y_prime), 3) def _generate_grid(self, theta): grid_X, grid_Y, N, P_X, P_Y = self._prepare_to_transform() warped_grid = self._apply_transformation(theta, np.concatenate((grid_X, grid_Y), axis=3), N, P_X, P_Y) return warped_grid def _bilinear_sampler(self, img, grid): x, y = grid[:,:,:,0], grid[:,:,:,1] H = img.shape[2] W = img.shape[3] max_y = H - 1 max_x = W - 1 # rescale x and y to [0, W-1/H-1] x = 0.5 * (x + 1.0) * (max_x - 1) y = 0.5 * (y + 1.0) * (max_y - 1) # grab 4 nearest corner points for each (x_i, y_i) x0 = np.floor(x).astype(int) x1 = x0 + 1 y0 = np.floor(y).astype(int) y1 = y0 + 1 # calculate deltas wa = (x1 - x) * (y1 - y) wb = (x1 - x) * (y - y0) wc = (x - x0) * (y1 - y) wd = (x - x0) * (y - y0) # clip to range [0, H-1/W-1] to not violate img boundaries x0 = np.clip(x0, 0, max_x) x1 = np.clip(x1, 0, max_x) y0 = np.clip(y0, 0, max_y) y1 = np.clip(y1, 0, max_y) # get pixel value at corner coords img = img.reshape(-1, H, W) Ia = img[:, y0, x0].swapaxes(0, 1) Ib = img[:, y1, x0].swapaxes(0, 1) Ic = img[:, y0, x1].swapaxes(0, 1) Id = img[:, y1, x1].swapaxes(0, 1) wa = np.expand_dims(wa, axis=0) wb = np.expand_dims(wb, axis=0) wc = np.expand_dims(wc, axis=0) wd = np.expand_dims(wd, axis=0) # compute output out = wa*Ia + wb*Ib + wc*Ic + wd*Id return out class CorrelationLayer(object): def __init__(self, params, blobs): super(CorrelationLayer, self).__init__() def getMemoryShapes(self, inputs): fetureAShape = inputs[0] b, c, h, w = fetureAShape return [[b, h * w, h, w]] def forward(self, inputs): feature_A, feature_B = inputs b, c, h, w = feature_A.shape feature_A = feature_A.transpose(0, 1, 3, 2) feature_A = np.reshape(feature_A, (b, c, h * w)) feature_B = np.reshape(feature_B, (b, c, h * w)) feature_B = feature_B.transpose(0, 2, 1) feature_mul = feature_B @ feature_A feature_mul= np.reshape(feature_mul, (b, h, w, h * w)) feature_mul = feature_mul.transpose(0, 1, 3, 2) correlation_tensor = feature_mul.transpose(0, 2, 1, 3) correlation_tensor = np.ascontiguousarray(correlation_tensor) return [correlation_tensor] if __name__ == "__main__": if not os.path.isfile(args.gmm_model): raise OSError("GMM model not exist") if not os.path.isfile(args.tom_model): raise OSError("TOM model not exist") if not os.path.isfile(args.segmentation_model): raise OSError("Segmentation model not exist") if not os.path.isfile(findFile(args.openpose_proto)): raise OSError("OpenPose proto not exist") if not os.path.isfile(findFile(args.openpose_model)): raise OSError("OpenPose model not exist") person_img = cv.imread(args.input_image) ratio = 256 / 192 inp_h, inp_w, _ = person_img.shape current_ratio = inp_h / inp_w if current_ratio > ratio: center_h = inp_h // 2 out_h = inp_w * ratio start = int(center_h - out_h // 2) end = int(center_h + out_h // 2) person_img = person_img[start:end, ...] else: center_w = inp_w // 2 out_w = inp_h / ratio start = int(center_w - out_w // 2) end = int(center_w + out_w // 2) person_img = person_img[:, start:end, :] cloth_img = cv.imread(args.input_cloth) pose = get_pose_map(person_img, findFile(args.openpose_proto), findFile(args.openpose_model), args.backend, args.target) segm_image = parse_human(person_img, args.segmentation_model) segm_image = cv.resize(segm_image, (192, 256), cv.INTER_LINEAR) cv.dnn_registerLayer('Correlation', CorrelationLayer) model = CpVton(args.gmm_model, args.tom_model, args.backend, args.target) agnostic = model.prepare_agnostic(segm_image, person_img, pose) warped_cloth = model.get_warped_cloth(cloth_img, agnostic) output = model.get_tryon(agnostic, warped_cloth) cv.dnn_unregisterLayer('Correlation') winName = 'Virtual Try-On' cv.namedWindow(winName, cv.WINDOW_AUTOSIZE) cv.imshow(winName, output) cv.waitKey()
import cv2 as cv import argparse parser = argparse.ArgumentParser( description='This sample shows how to define custom OpenCV deep learning layers in Python. ' 'Holistically-Nested Edge Detection (https://arxiv.org/abs/1504.06375) neural network ' 'is used as an example model. Find a pre-trained model at https://github.com/s9xie/hed.') parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera') parser.add_argument('--prototxt', help='Path to deploy.prototxt', required=True) parser.add_argument('--caffemodel', help='Path to hed_pretrained_bsds.caffemodel', required=True) parser.add_argument('--width', help='Resize input image to a specific width', default=500, type=int) parser.add_argument('--height', help='Resize input image to a specific height', default=500, type=int) args = parser.parse_args() #! [CropLayer] class CropLayer(object): def __init__(self, params, blobs): self.xstart = 0 self.xend = 0 self.ystart = 0 self.yend = 0 # Our layer receives two inputs. We need to crop the first input blob # to match a shape of the second one (keeping batch size and number of channels) def getMemoryShapes(self, inputs): inputShape, targetShape = inputs[0], inputs[1] batchSize, numChannels = inputShape[0], inputShape[1] height, width = targetShape[2], targetShape[3] self.ystart = (inputShape[2] - targetShape[2]) // 2 self.xstart = (inputShape[3] - targetShape[3]) // 2 self.yend = self.ystart + height self.xend = self.xstart + width return [[batchSize, numChannels, height, width]] def forward(self, inputs): return [inputs[0][:,:,self.ystart:self.yend,self.xstart:self.xend]] #! [CropLayer] #! [Register] cv.dnn_registerLayer('Crop', CropLayer) #! [Register] # Load the model. net = cv.dnn.readNet(cv.samples.findFile(args.prototxt), cv.samples.findFile(args.caffemodel)) kWinName = 'Holistically-Nested Edge Detection' cv.namedWindow('Input', cv.WINDOW_NORMAL) cv.namedWindow(kWinName, cv.WINDOW_NORMAL) cap = cv.VideoCapture(args.input if args.input else 0) while cv.waitKey(1) < 0: hasFrame, frame = cap.read() if not hasFrame: cv.waitKey() break cv.imshow('Input', frame) inp = cv.dnn.blobFromImage(frame, scalefactor=1.0, size=(args.width, args.height), mean=(104.00698793, 116.66876762, 122.67891434), swapRB=False, crop=False) net.setInput(inp) out = net.forward() out = out[0, 0] out = cv.resize(out, (frame.shape[1], frame.shape[0])) cv.imshow(kWinName, out)
# This file is a part of OpenCV project. # It is a subject to the license terms in the LICENSE file found in the top-level directory # of this distribution and at http://opencv.org/license.html. # # Copyright (C) 2018, Intel Corporation, all rights reserved. # Third party copyrights are property of their respective owners. # # Use this script to get the text graph representation (.pbtxt) of SSD-based # deep learning network trained in TensorFlow Object Detection API. # Then you can import it with a binary frozen graph (.pb) using readNetFromTensorflow() function. # See details and examples on the following wiki page: https://github.com/opencv/opencv/wiki/TensorFlow-Object-Detection-API import argparse import re from math import sqrt from tf_text_graph_common import * class SSDAnchorGenerator: def __init__(self, min_scale, max_scale, num_layers, aspect_ratios, reduce_boxes_in_lowest_layer, image_width, image_height): self.min_scale = min_scale self.aspect_ratios = aspect_ratios self.reduce_boxes_in_lowest_layer = reduce_boxes_in_lowest_layer self.image_width = image_width self.image_height = image_height self.scales = [min_scale + (max_scale - min_scale) * i / (num_layers - 1) for i in range(num_layers)] + [1.0] def get(self, layer_id): if layer_id == 0 and self.reduce_boxes_in_lowest_layer: widths = [0.1, self.min_scale * sqrt(2.0), self.min_scale * sqrt(0.5)] heights = [0.1, self.min_scale / sqrt(2.0), self.min_scale / sqrt(0.5)] else: widths = [self.scales[layer_id] * sqrt(ar) for ar in self.aspect_ratios] heights = [self.scales[layer_id] / sqrt(ar) for ar in self.aspect_ratios] widths += [sqrt(self.scales[layer_id] * self.scales[layer_id + 1])] heights += [sqrt(self.scales[layer_id] * self.scales[layer_id + 1])] min_size = min(self.image_width, self.image_height) widths = [w * min_size for w in widths] heights = [h * min_size for h in heights] return widths, heights class MultiscaleAnchorGenerator: def __init__(self, min_level, aspect_ratios, scales_per_octave, anchor_scale): self.min_level = min_level self.aspect_ratios = aspect_ratios self.anchor_scale = anchor_scale self.scales = [2**(float(s) / scales_per_octave) for s in range(scales_per_octave)] def get(self, layer_id): widths = [] heights = [] for a in self.aspect_ratios: for s in self.scales: base_anchor_size = 2**(self.min_level + layer_id) * self.anchor_scale ar = sqrt(a) heights.append(base_anchor_size * s / ar) widths.append(base_anchor_size * s * ar) return widths, heights def createSSDGraph(modelPath, configPath, outputPath): # Nodes that should be kept. keepOps = ['Conv2D', 'BiasAdd', 'Add', 'AddV2', 'Relu', 'Relu6', 'Placeholder', 'FusedBatchNorm', 'DepthwiseConv2dNative', 'ConcatV2', 'Mul', 'MaxPool', 'AvgPool', 'Identity', 'Sub', 'ResizeNearestNeighbor', 'Pad', 'FusedBatchNormV3'] # Node with which prefixes should be removed prefixesToRemove = ('MultipleGridAnchorGenerator/', 'Concatenate/', 'Postprocessor/', 'Preprocessor/map') # Load a config file. config = readTextMessage(configPath) config = config['model'][0]['ssd'][0] num_classes = int(config['num_classes'][0]) fixed_shape_resizer = config['image_resizer'][0]['fixed_shape_resizer'][0] image_width = int(fixed_shape_resizer['width'][0]) image_height = int(fixed_shape_resizer['height'][0]) box_predictor = 'convolutional' if 'convolutional_box_predictor' in config['box_predictor'][0] else 'weight_shared_convolutional' anchor_generator = config['anchor_generator'][0] if 'ssd_anchor_generator' in anchor_generator: ssd_anchor_generator = anchor_generator['ssd_anchor_generator'][0] min_scale = float(ssd_anchor_generator['min_scale'][0]) max_scale = float(ssd_anchor_generator['max_scale'][0]) num_layers = int(ssd_anchor_generator['num_layers'][0]) aspect_ratios = [float(ar) for ar in ssd_anchor_generator['aspect_ratios']] reduce_boxes_in_lowest_layer = True if 'reduce_boxes_in_lowest_layer' in ssd_anchor_generator: reduce_boxes_in_lowest_layer = ssd_anchor_generator['reduce_boxes_in_lowest_layer'][0] == 'true' priors_generator = SSDAnchorGenerator(min_scale, max_scale, num_layers, aspect_ratios, reduce_boxes_in_lowest_layer, image_width, image_height) print('Scale: [%f-%f]' % (min_scale, max_scale)) print('Aspect ratios: %s' % str(aspect_ratios)) print('Reduce boxes in the lowest layer: %s' % str(reduce_boxes_in_lowest_layer)) elif 'multiscale_anchor_generator' in anchor_generator: multiscale_anchor_generator = anchor_generator['multiscale_anchor_generator'][0] min_level = int(multiscale_anchor_generator['min_level'][0]) max_level = int(multiscale_anchor_generator['max_level'][0]) anchor_scale = float(multiscale_anchor_generator['anchor_scale'][0]) aspect_ratios = [float(ar) for ar in multiscale_anchor_generator['aspect_ratios']] scales_per_octave = int(multiscale_anchor_generator['scales_per_octave'][0]) num_layers = max_level - min_level + 1 priors_generator = MultiscaleAnchorGenerator(min_level, aspect_ratios, scales_per_octave, anchor_scale) print('Levels: [%d-%d]' % (min_level, max_level)) print('Anchor scale: %f' % anchor_scale) print('Scales per octave: %d' % scales_per_octave) print('Aspect ratios: %s' % str(aspect_ratios)) else: print('Unknown anchor_generator') exit(0) print('Number of classes: %d' % num_classes) print('Number of layers: %d' % num_layers) print('box predictor: %s' % box_predictor) print('Input image size: %dx%d' % (image_width, image_height)) # Read the graph. _inpNames = ['image_tensor'] outNames = ['num_detections', 'detection_scores', 'detection_boxes', 'detection_classes'] writeTextGraph(modelPath, outputPath, outNames) graph_def = parseTextGraph(outputPath) def getUnconnectedNodes(): unconnected = [] for node in graph_def.node: unconnected.append(node.name) for inp in node.input: if inp in unconnected: unconnected.remove(inp) return unconnected def fuse_nodes(nodesToKeep): # Detect unfused batch normalization nodes and fuse them. # Add_0 <-- moving_variance, add_y # Rsqrt <-- Add_0 # Mul_0 <-- Rsqrt, gamma # Mul_1 <-- input, Mul_0 # Mul_2 <-- moving_mean, Mul_0 # Sub_0 <-- beta, Mul_2 # Add_1 <-- Mul_1, Sub_0 nodesMap = {node.name: node for node in graph_def.node} subgraphBatchNorm = ['Add', ['Mul', 'input', ['Mul', ['Rsqrt', ['Add', 'moving_variance', 'add_y']], 'gamma']], ['Sub', 'beta', ['Mul', 'moving_mean', 'Mul_0']]] subgraphBatchNormV2 = ['AddV2', ['Mul', 'input', ['Mul', ['Rsqrt', ['AddV2', 'moving_variance', 'add_y']], 'gamma']], ['Sub', 'beta', ['Mul', 'moving_mean', 'Mul_0']]] # Detect unfused nearest neighbor resize. subgraphResizeNN = ['Reshape', ['Mul', ['Reshape', 'input', ['Pack', 'shape_1', 'shape_2', 'shape_3', 'shape_4', 'shape_5']], 'ones'], ['Pack', ['StridedSlice', ['Shape', 'input'], 'stack', 'stack_1', 'stack_2'], 'out_height', 'out_width', 'out_channels']] def checkSubgraph(node, targetNode, inputs, fusedNodes): op = targetNode[0] if node.op == op and (len(node.input) >= len(targetNode) - 1): fusedNodes.append(node) for i, inpOp in enumerate(targetNode[1:]): if isinstance(inpOp, list): if not node.input[i] in nodesMap or \ not checkSubgraph(nodesMap[node.input[i]], inpOp, inputs, fusedNodes): return False else: inputs[inpOp] = node.input[i] return True else: return False nodesToRemove = [] for node in graph_def.node: inputs = {} fusedNodes = [] if checkSubgraph(node, subgraphBatchNorm, inputs, fusedNodes) or \ checkSubgraph(node, subgraphBatchNormV2, inputs, fusedNodes): name = node.name node.Clear() node.name = name node.op = 'FusedBatchNorm' node.input.append(inputs['input']) node.input.append(inputs['gamma']) node.input.append(inputs['beta']) node.input.append(inputs['moving_mean']) node.input.append(inputs['moving_variance']) node.addAttr('epsilon', 0.001) nodesToRemove += fusedNodes[1:] inputs = {} fusedNodes = [] if checkSubgraph(node, subgraphResizeNN, inputs, fusedNodes): name = node.name node.Clear() node.name = name node.op = 'ResizeNearestNeighbor' node.input.append(inputs['input']) node.input.append(name + '/output_shape') out_height_node = nodesMap[inputs['out_height']] out_width_node = nodesMap[inputs['out_width']] out_height = int(out_height_node.attr['value']['tensor'][0]['int_val'][0]) out_width = int(out_width_node.attr['value']['tensor'][0]['int_val'][0]) shapeNode = NodeDef() shapeNode.name = name + '/output_shape' shapeNode.op = 'Const' shapeNode.addAttr('value', [out_height, out_width]) graph_def.node.insert(graph_def.node.index(node), shapeNode) nodesToKeep.append(shapeNode.name) nodesToRemove += fusedNodes[1:] for node in nodesToRemove: graph_def.node.remove(node) nodesToKeep = [] fuse_nodes(nodesToKeep) removeIdentity(graph_def) def to_remove(name, op): return (not name in nodesToKeep) and \ (op == 'Const' or (not op in keepOps) or name.startswith(prefixesToRemove)) removeUnusedNodesAndAttrs(to_remove, graph_def) # Connect input node to the first layer assert(graph_def.node[0].op == 'Placeholder') # assert(graph_def.node[1].op == 'Conv2D') weights = graph_def.node[1].input[0] for i in range(len(graph_def.node[1].input)): graph_def.node[1].input.pop() graph_def.node[1].input.append(graph_def.node[0].name) graph_def.node[1].input.append(weights) # Create SSD postprocessing head ############################################### # Concatenate predictions of classes, predictions of bounding boxes and proposals. def addConcatNode(name, inputs, axisNodeName): concat = NodeDef() concat.name = name concat.op = 'ConcatV2' for inp in inputs: concat.input.append(inp) concat.input.append(axisNodeName) graph_def.node.extend([concat]) addConstNode('concat/axis_flatten', [-1], graph_def) addConstNode('PriorBox/concat/axis', [-2], graph_def) for label in ['ClassPredictor', 'BoxEncodingPredictor' if box_predictor is 'convolutional' else 'BoxPredictor']: concatInputs = [] for i in range(num_layers): # Flatten predictions flatten = NodeDef() if box_predictor is 'convolutional': inpName = 'BoxPredictor_%d/%s/BiasAdd' % (i, label) else: if i == 0: inpName = 'WeightSharedConvolutionalBoxPredictor/%s/BiasAdd' % label else: inpName = 'WeightSharedConvolutionalBoxPredictor_%d/%s/BiasAdd' % (i, label) flatten.input.append(inpName) flatten.name = inpName + '/Flatten' flatten.op = 'Flatten' concatInputs.append(flatten.name) graph_def.node.extend([flatten]) addConcatNode('%s/concat' % label, concatInputs, 'concat/axis_flatten') num_matched_layers = 0 for node in graph_def.node: if re.match('BoxPredictor_\d/BoxEncodingPredictor/convolution', node.name) or \ re.match('BoxPredictor_\d/BoxEncodingPredictor/Conv2D', node.name) or \ re.match('WeightSharedConvolutionalBoxPredictor(_\d)*/BoxPredictor/Conv2D', node.name): node.addAttr('loc_pred_transposed', True) num_matched_layers += 1 assert(num_matched_layers == num_layers) # Add layers that generate anchors (bounding boxes proposals). priorBoxes = [] boxCoder = config['box_coder'][0] fasterRcnnBoxCoder = boxCoder['faster_rcnn_box_coder'][0] boxCoderVariance = [1.0/float(fasterRcnnBoxCoder['x_scale'][0]), 1.0/float(fasterRcnnBoxCoder['y_scale'][0]), 1.0/float(fasterRcnnBoxCoder['width_scale'][0]), 1.0/float(fasterRcnnBoxCoder['height_scale'][0])] for i in range(num_layers): priorBox = NodeDef() priorBox.name = 'PriorBox_%d' % i priorBox.op = 'PriorBox' if box_predictor is 'convolutional': priorBox.input.append('BoxPredictor_%d/BoxEncodingPredictor/BiasAdd' % i) else: if i == 0: priorBox.input.append('WeightSharedConvolutionalBoxPredictor/BoxPredictor/Conv2D') else: priorBox.input.append('WeightSharedConvolutionalBoxPredictor_%d/BoxPredictor/BiasAdd' % i) priorBox.input.append(graph_def.node[0].name) # image_tensor priorBox.addAttr('flip', False) priorBox.addAttr('clip', False) widths, heights = priors_generator.get(i) priorBox.addAttr('width', widths) priorBox.addAttr('height', heights) priorBox.addAttr('variance', boxCoderVariance) graph_def.node.extend([priorBox]) priorBoxes.append(priorBox.name) # Compare this layer's output with Postprocessor/Reshape addConcatNode('PriorBox/concat', priorBoxes, 'concat/axis_flatten') # Sigmoid for classes predictions and DetectionOutput layer addReshape('ClassPredictor/concat', 'ClassPredictor/concat3d', [0, -1, num_classes + 1], graph_def) sigmoid = NodeDef() sigmoid.name = 'ClassPredictor/concat/sigmoid' sigmoid.op = 'Sigmoid' sigmoid.input.append('ClassPredictor/concat3d') graph_def.node.extend([sigmoid]) addFlatten(sigmoid.name, sigmoid.name + '/Flatten', graph_def) detectionOut = NodeDef() detectionOut.name = 'detection_out' detectionOut.op = 'DetectionOutput' if box_predictor == 'convolutional': detectionOut.input.append('BoxEncodingPredictor/concat') else: detectionOut.input.append('BoxPredictor/concat') detectionOut.input.append(sigmoid.name + '/Flatten') detectionOut.input.append('PriorBox/concat') detectionOut.addAttr('num_classes', num_classes + 1) detectionOut.addAttr('share_location', True) detectionOut.addAttr('background_label_id', 0) postProcessing = config['post_processing'][0] batchNMS = postProcessing['batch_non_max_suppression'][0] if 'iou_threshold' in batchNMS: detectionOut.addAttr('nms_threshold', float(batchNMS['iou_threshold'][0])) else: detectionOut.addAttr('nms_threshold', 0.6) if 'score_threshold' in batchNMS: detectionOut.addAttr('confidence_threshold', float(batchNMS['score_threshold'][0])) else: detectionOut.addAttr('confidence_threshold', 0.01) if 'max_detections_per_class' in batchNMS: detectionOut.addAttr('top_k', int(batchNMS['max_detections_per_class'][0])) else: detectionOut.addAttr('top_k', 100) if 'max_total_detections' in batchNMS: detectionOut.addAttr('keep_top_k', int(batchNMS['max_total_detections'][0])) else: detectionOut.addAttr('keep_top_k', 100) detectionOut.addAttr('code_type', "CENTER_SIZE") graph_def.node.extend([detectionOut]) while True: unconnectedNodes = getUnconnectedNodes() unconnectedNodes.remove(detectionOut.name) if not unconnectedNodes: break for name in unconnectedNodes: for i in range(len(graph_def.node)): if graph_def.node[i].name == name: del graph_def.node[i] break # Save as text. graph_def.save(outputPath) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Run this script to get a text graph of ' 'SSD model from TensorFlow Object Detection API. ' 'Then pass it with .pb file to cv::dnn::readNetFromTensorflow function.') parser.add_argument('--input', required=True, help='Path to frozen TensorFlow graph.') parser.add_argument('--output', required=True, help='Path to output text graph.') parser.add_argument('--config', required=True, help='Path to a *.config file is used for training.') args = parser.parse_args() createSSDGraph(args.input, args.config, args.output)
import os import numpy as np import cv2 as cv import argparse from common import findFile parser = argparse.ArgumentParser(description='Use this script to run action recognition using 3D ResNet34', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--input', '-i', help='Path to input video file. Skip this argument to capture frames from a camera.') parser.add_argument('--model', required=True, help='Path to model.') parser.add_argument('--classes', default=findFile('action_recongnition_kinetics.txt'), help='Path to classes list.') # To get net download original repository https://github.com/kenshohara/video-classification-3d-cnn-pytorch # For correct ONNX export modify file: video-classification-3d-cnn-pytorch/models/resnet.py # change # - def downsample_basic_block(x, planes, stride): # - out = F.avg_pool3d(x, kernel_size=1, stride=stride) # - zero_pads = torch.Tensor(out.size(0), planes - out.size(1), # - out.size(2), out.size(3), # - out.size(4)).zero_() # - if isinstance(out.data, torch.cuda.FloatTensor): # - zero_pads = zero_pads.cuda() # - # - out = Variable(torch.cat([out.data, zero_pads], dim=1)) # - return out # To # + def downsample_basic_block(x, planes, stride): # + out = F.avg_pool3d(x, kernel_size=1, stride=stride) # + out = F.pad(out, (0, 0, 0, 0, 0, 0, 0, int(planes - out.size(1)), 0, 0), "constant", 0) # + return out # To ONNX export use torch.onnx.export(model, inputs, model_name) def get_class_names(path): class_names = [] with open(path) as f: for row in f: class_names.append(row[:-1]) return class_names def classify_video(video_path, net_path): SAMPLE_DURATION = 16 SAMPLE_SIZE = 112 mean = (114.7748, 107.7354, 99.4750) class_names = get_class_names(args.classes) net = cv.dnn.readNet(net_path) net.setPreferableBackend(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE) net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU) winName = 'Deep learning image classification in OpenCV' cv.namedWindow(winName, cv.WINDOW_AUTOSIZE) cap = cv.VideoCapture(video_path) while cv.waitKey(1) < 0: frames = [] for _ in range(SAMPLE_DURATION): hasFrame, frame = cap.read() if not hasFrame: exit(0) frames.append(frame) inputs = cv.dnn.blobFromImages(frames, 1, (SAMPLE_SIZE, SAMPLE_SIZE), mean, True, crop=True) inputs = np.transpose(inputs, (1, 0, 2, 3)) inputs = np.expand_dims(inputs, axis=0) net.setInput(inputs) outputs = net.forward() class_pred = np.argmax(outputs) label = class_names[class_pred] for frame in frames: labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1) cv.rectangle(frame, (0, 10 - labelSize[1]), (labelSize[0], 10 + baseLine), (255, 255, 255), cv.FILLED) cv.putText(frame, label, (0, 10), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0)) cv.imshow(winName, frame) if cv.waitKey(1) & 0xFF == ord('q'): break if __name__ == "__main__": args, _ = parser.parse_known_args() classify_video(args.input if args.input else 0, args.model)
from __future__ import print_function # Script to evaluate MobileNet-SSD object detection model trained in TensorFlow # using both TensorFlow and OpenCV. Example: # # python mobilenet_ssd_accuracy.py \ # --weights=frozen_inference_graph.pb \ # --prototxt=ssd_mobilenet_v1_coco.pbtxt \ # --images=val2017 \ # --annotations=annotations/instances_val2017.json # # Tested on COCO 2017 object detection dataset, http://cocodataset.org/#download import os import cv2 as cv import json import argparse parser = argparse.ArgumentParser( description='Evaluate MobileNet-SSD model using both TensorFlow and OpenCV. ' 'COCO evaluation framework is required: http://cocodataset.org') parser.add_argument('--weights', required=True, help='Path to frozen_inference_graph.pb of MobileNet-SSD model. ' 'Download it from http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_coco_11_06_2017.tar.gz') parser.add_argument('--prototxt', help='Path to ssd_mobilenet_v1_coco.pbtxt from opencv_extra.', required=True) parser.add_argument('--images', help='Path to COCO validation images directory.', required=True) parser.add_argument('--annotations', help='Path to COCO annotations file.', required=True) args = parser.parse_args() ### Get OpenCV predictions ##################################################### net = cv.dnn.readNetFromTensorflow(cv.samples.findFile(args.weights), cv.samples.findFile(args.prototxt)) net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV) detections = [] for imgName in os.listdir(args.images): inp = cv.imread(cv.samples.findFile(os.path.join(args.images, imgName))) rows = inp.shape[0] cols = inp.shape[1] inp = cv.resize(inp, (300, 300)) net.setInput(cv.dnn.blobFromImage(inp, 1.0/127.5, (300, 300), (127.5, 127.5, 127.5), True)) out = net.forward() for i in range(out.shape[2]): score = float(out[0, 0, i, 2]) # Confidence threshold is in prototxt. classId = int(out[0, 0, i, 1]) x = out[0, 0, i, 3] * cols y = out[0, 0, i, 4] * rows w = out[0, 0, i, 5] * cols - x h = out[0, 0, i, 6] * rows - y detections.append({ "image_id": int(imgName.rstrip('0')[:imgName.rfind('.')]), "category_id": classId, "bbox": [x, y, w, h], "score": score }) with open('cv_result.json', 'wt') as f: json.dump(detections, f) ### Get TensorFlow predictions ################################################# import tensorflow as tf with tf.gfile.FastGFile(args.weights) as f: # Load the model graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) with tf.Session() as sess: # Restore session sess.graph.as_default() tf.import_graph_def(graph_def, name='') detections = [] for imgName in os.listdir(args.images): inp = cv.imread(os.path.join(args.images, imgName)) rows = inp.shape[0] cols = inp.shape[1] inp = cv.resize(inp, (300, 300)) inp = inp[:, :, [2, 1, 0]] # BGR2RGB out = sess.run([sess.graph.get_tensor_by_name('num_detections:0'), sess.graph.get_tensor_by_name('detection_scores:0'), sess.graph.get_tensor_by_name('detection_boxes:0'), sess.graph.get_tensor_by_name('detection_classes:0')], feed_dict={'image_tensor:0': inp.reshape(1, inp.shape[0], inp.shape[1], 3)}) num_detections = int(out[0][0]) for i in range(num_detections): classId = int(out[3][0][i]) score = float(out[1][0][i]) bbox = [float(v) for v in out[2][0][i]] if score > 0.01: x = bbox[1] * cols y = bbox[0] * rows w = bbox[3] * cols - x h = bbox[2] * rows - y detections.append({ "image_id": int(imgName.rstrip('0')[:imgName.rfind('.')]), "category_id": classId, "bbox": [x, y, w, h], "score": score }) with open('tf_result.json', 'wt') as f: json.dump(detections, f) ### Evaluation part ############################################################ # %matplotlib inline import matplotlib.pyplot as plt from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval import numpy as np import skimage.io as io import pylab pylab.rcParams['figure.figsize'] = (10.0, 8.0) annType = ['segm','bbox','keypoints'] annType = annType[1] #specify type here prefix = 'person_keypoints' if annType=='keypoints' else 'instances' print('Running demo for *%s* results.'%(annType)) #initialize COCO ground truth api cocoGt=COCO(args.annotations) #initialize COCO detections api for resFile in ['tf_result.json', 'cv_result.json']: print(resFile) cocoDt=cocoGt.loadRes(resFile) cocoEval = COCOeval(cocoGt,cocoDt,annType) cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize()
import argparse import numpy as np from tf_text_graph_common import * def createFasterRCNNGraph(modelPath, configPath, outputPath): scopesToKeep = ('FirstStageFeatureExtractor', 'Conv', 'FirstStageBoxPredictor/BoxEncodingPredictor', 'FirstStageBoxPredictor/ClassPredictor', 'CropAndResize', 'MaxPool2D', 'SecondStageFeatureExtractor', 'SecondStageBoxPredictor', 'Preprocessor/sub', 'Preprocessor/mul', 'image_tensor') scopesToIgnore = ('FirstStageFeatureExtractor/Assert', 'FirstStageFeatureExtractor/Shape', 'FirstStageFeatureExtractor/strided_slice', 'FirstStageFeatureExtractor/GreaterEqual', 'FirstStageFeatureExtractor/LogicalAnd') # Load a config file. config = readTextMessage(configPath) config = config['model'][0]['faster_rcnn'][0] num_classes = int(config['num_classes'][0]) grid_anchor_generator = config['first_stage_anchor_generator'][0]['grid_anchor_generator'][0] scales = [float(s) for s in grid_anchor_generator['scales']] aspect_ratios = [float(ar) for ar in grid_anchor_generator['aspect_ratios']] width_stride = float(grid_anchor_generator['width_stride'][0]) height_stride = float(grid_anchor_generator['height_stride'][0]) feature_extractor = config['feature_extractor'][0] if 'type' in feature_extractor and feature_extractor['type'][0] == 'faster_rcnn_nas': features_stride = 16.0 else: features_stride = float(feature_extractor['first_stage_features_stride'][0]) first_stage_nms_iou_threshold = float(config['first_stage_nms_iou_threshold'][0]) first_stage_max_proposals = int(config['first_stage_max_proposals'][0]) print('Number of classes: %d' % num_classes) print('Scales: %s' % str(scales)) print('Aspect ratios: %s' % str(aspect_ratios)) print('Width stride: %f' % width_stride) print('Height stride: %f' % height_stride) print('Features stride: %f' % features_stride) # Read the graph. writeTextGraph(modelPath, outputPath, ['num_detections', 'detection_scores', 'detection_boxes', 'detection_classes']) graph_def = parseTextGraph(outputPath) removeIdentity(graph_def) nodesToKeep = [] def to_remove(name, op): if name in nodesToKeep: return False return op == 'Const' or name.startswith(scopesToIgnore) or not name.startswith(scopesToKeep) or \ (name.startswith('CropAndResize') and op != 'CropAndResize') # Fuse atrous convolutions (with dilations). nodesMap = {node.name: node for node in graph_def.node} for node in reversed(graph_def.node): if node.op == 'BatchToSpaceND': del node.input[2] conv = nodesMap[node.input[0]] spaceToBatchND = nodesMap[conv.input[0]] # Extract paddings stridedSlice = nodesMap[spaceToBatchND.input[2]] assert(stridedSlice.op == 'StridedSlice') pack = nodesMap[stridedSlice.input[0]] assert(pack.op == 'Pack') padNodeH = nodesMap[nodesMap[pack.input[0]].input[0]] padNodeW = nodesMap[nodesMap[pack.input[1]].input[0]] padH = int(padNodeH.attr['value']['tensor'][0]['int_val'][0]) padW = int(padNodeW.attr['value']['tensor'][0]['int_val'][0]) paddingsNode = NodeDef() paddingsNode.name = conv.name + '/paddings' paddingsNode.op = 'Const' paddingsNode.addAttr('value', [padH, padH, padW, padW]) graph_def.node.insert(graph_def.node.index(spaceToBatchND), paddingsNode) nodesToKeep.append(paddingsNode.name) spaceToBatchND.input[2] = paddingsNode.name removeUnusedNodesAndAttrs(to_remove, graph_def) # Connect input node to the first layer assert(graph_def.node[0].op == 'Placeholder') graph_def.node[1].input.insert(0, graph_def.node[0].name) # Temporarily remove top nodes. topNodes = [] while True: node = graph_def.node.pop() topNodes.append(node) if node.op == 'CropAndResize': break addReshape('FirstStageBoxPredictor/ClassPredictor/BiasAdd', 'FirstStageBoxPredictor/ClassPredictor/reshape_1', [0, -1, 2], graph_def) addSoftMax('FirstStageBoxPredictor/ClassPredictor/reshape_1', 'FirstStageBoxPredictor/ClassPredictor/softmax', graph_def) # Compare with Reshape_4 addFlatten('FirstStageBoxPredictor/ClassPredictor/softmax', 'FirstStageBoxPredictor/ClassPredictor/softmax/flatten', graph_def) # Compare with FirstStageBoxPredictor/BoxEncodingPredictor/BiasAdd addFlatten('FirstStageBoxPredictor/BoxEncodingPredictor/BiasAdd', 'FirstStageBoxPredictor/BoxEncodingPredictor/flatten', graph_def) proposals = NodeDef() proposals.name = 'proposals' # Compare with ClipToWindow/Gather/Gather (NOTE: normalized) proposals.op = 'PriorBox' proposals.input.append('FirstStageBoxPredictor/BoxEncodingPredictor/BiasAdd') proposals.input.append(graph_def.node[0].name) # image_tensor proposals.addAttr('flip', False) proposals.addAttr('clip', True) proposals.addAttr('step', features_stride) proposals.addAttr('offset', 0.0) proposals.addAttr('variance', [0.1, 0.1, 0.2, 0.2]) widths = [] heights = [] for a in aspect_ratios: for s in scales: ar = np.sqrt(a) heights.append((height_stride**2) * s / ar) widths.append((width_stride**2) * s * ar) proposals.addAttr('width', widths) proposals.addAttr('height', heights) graph_def.node.extend([proposals]) # Compare with Reshape_5 detectionOut = NodeDef() detectionOut.name = 'detection_out' detectionOut.op = 'DetectionOutput' detectionOut.input.append('FirstStageBoxPredictor/BoxEncodingPredictor/flatten') detectionOut.input.append('FirstStageBoxPredictor/ClassPredictor/softmax/flatten') detectionOut.input.append('proposals') detectionOut.addAttr('num_classes', 2) detectionOut.addAttr('share_location', True) detectionOut.addAttr('background_label_id', 0) detectionOut.addAttr('nms_threshold', first_stage_nms_iou_threshold) detectionOut.addAttr('top_k', 6000) detectionOut.addAttr('code_type', "CENTER_SIZE") detectionOut.addAttr('keep_top_k', first_stage_max_proposals) detectionOut.addAttr('clip', False) graph_def.node.extend([detectionOut]) addConstNode('clip_by_value/lower', [0.0], graph_def) addConstNode('clip_by_value/upper', [1.0], graph_def) clipByValueNode = NodeDef() clipByValueNode.name = 'detection_out/clip_by_value' clipByValueNode.op = 'ClipByValue' clipByValueNode.input.append('detection_out') clipByValueNode.input.append('clip_by_value/lower') clipByValueNode.input.append('clip_by_value/upper') graph_def.node.extend([clipByValueNode]) # Save as text. for node in reversed(topNodes): graph_def.node.extend([node]) addSoftMax('SecondStageBoxPredictor/Reshape_1', 'SecondStageBoxPredictor/Reshape_1/softmax', graph_def) addSlice('SecondStageBoxPredictor/Reshape_1/softmax', 'SecondStageBoxPredictor/Reshape_1/slice', [0, 0, 1], [-1, -1, -1], graph_def) addReshape('SecondStageBoxPredictor/Reshape_1/slice', 'SecondStageBoxPredictor/Reshape_1/Reshape', [1, -1], graph_def) # Replace Flatten subgraph onto a single node. cropAndResizeNodeName = '' for i in reversed(range(len(graph_def.node))): if graph_def.node[i].op == 'CropAndResize': graph_def.node[i].input.insert(1, 'detection_out/clip_by_value') cropAndResizeNodeName = graph_def.node[i].name if graph_def.node[i].name == 'SecondStageBoxPredictor/Reshape': addConstNode('SecondStageBoxPredictor/Reshape/shape2', [1, -1, 4], graph_def) graph_def.node[i].input.pop() graph_def.node[i].input.append('SecondStageBoxPredictor/Reshape/shape2') if graph_def.node[i].name in ['SecondStageBoxPredictor/Flatten/flatten/Shape', 'SecondStageBoxPredictor/Flatten/flatten/strided_slice', 'SecondStageBoxPredictor/Flatten/flatten/Reshape/shape', 'SecondStageBoxPredictor/Flatten_1/flatten/Shape', 'SecondStageBoxPredictor/Flatten_1/flatten/strided_slice', 'SecondStageBoxPredictor/Flatten_1/flatten/Reshape/shape']: del graph_def.node[i] for node in graph_def.node: if node.name == 'SecondStageBoxPredictor/Flatten/flatten/Reshape' or \ node.name == 'SecondStageBoxPredictor/Flatten_1/flatten/Reshape': node.op = 'Flatten' node.input.pop() if node.name in ['FirstStageBoxPredictor/BoxEncodingPredictor/Conv2D', 'SecondStageBoxPredictor/BoxEncodingPredictor/MatMul']: node.addAttr('loc_pred_transposed', True) if node.name.startswith('MaxPool2D'): assert(node.op == 'MaxPool') assert(cropAndResizeNodeName) node.input = [cropAndResizeNodeName] ################################################################################ ### Postprocessing ################################################################################ addSlice('detection_out/clip_by_value', 'detection_out/slice', [0, 0, 0, 3], [-1, -1, -1, 4], graph_def) variance = NodeDef() variance.name = 'proposals/variance' variance.op = 'Const' variance.addAttr('value', [0.1, 0.1, 0.2, 0.2]) graph_def.node.extend([variance]) varianceEncoder = NodeDef() varianceEncoder.name = 'variance_encoded' varianceEncoder.op = 'Mul' varianceEncoder.input.append('SecondStageBoxPredictor/Reshape') varianceEncoder.input.append(variance.name) varianceEncoder.addAttr('axis', 2) graph_def.node.extend([varianceEncoder]) addReshape('detection_out/slice', 'detection_out/slice/reshape', [1, 1, -1], graph_def) addFlatten('variance_encoded', 'variance_encoded/flatten', graph_def) detectionOut = NodeDef() detectionOut.name = 'detection_out_final' detectionOut.op = 'DetectionOutput' detectionOut.input.append('variance_encoded/flatten') detectionOut.input.append('SecondStageBoxPredictor/Reshape_1/Reshape') detectionOut.input.append('detection_out/slice/reshape') detectionOut.addAttr('num_classes', num_classes) detectionOut.addAttr('share_location', False) detectionOut.addAttr('background_label_id', num_classes + 1) detectionOut.addAttr('nms_threshold', 0.6) detectionOut.addAttr('code_type', "CENTER_SIZE") detectionOut.addAttr('keep_top_k', 100) detectionOut.addAttr('clip', True) detectionOut.addAttr('variance_encoded_in_target', True) graph_def.node.extend([detectionOut]) def getUnconnectedNodes(): unconnected = [node.name for node in graph_def.node] for node in graph_def.node: for inp in node.input: if inp in unconnected: unconnected.remove(inp) return unconnected while True: unconnectedNodes = getUnconnectedNodes() unconnectedNodes.remove(detectionOut.name) if not unconnectedNodes: break for name in unconnectedNodes: for i in range(len(graph_def.node)): if graph_def.node[i].name == name: del graph_def.node[i] break # Save as text. graph_def.save(outputPath) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Run this script to get a text graph of ' 'Faster-RCNN model from TensorFlow Object Detection API. ' 'Then pass it with .pb file to cv::dnn::readNetFromTensorflow function.') parser.add_argument('--input', required=True, help='Path to frozen TensorFlow graph.') parser.add_argument('--output', required=True, help='Path to output text graph.') parser.add_argument('--config', required=True, help='Path to a *.config file is used for training.') args = parser.parse_args() createFasterRCNNGraph(args.input, args.config, args.output)
# Script is based on https://github.com/richzhang/colorization/blob/master/colorization/colorize.py # To download the caffemodel and the prototxt, see: https://github.com/richzhang/colorization/tree/master/colorization/models # To download pts_in_hull.npy, see: https://github.com/richzhang/colorization/blob/master/colorization/resources/pts_in_hull.npy import numpy as np import argparse import cv2 as cv def parse_args(): parser = argparse.ArgumentParser(description='iColor: deep interactive colorization') parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera') parser.add_argument('--prototxt', help='Path to colorization_deploy_v2.prototxt', required=True) parser.add_argument('--caffemodel', help='Path to colorization_release_v2.caffemodel', required=True) parser.add_argument('--kernel', help='Path to pts_in_hull.npy', required=True) args = parser.parse_args() return args if __name__ == '__main__': W_in = 224 H_in = 224 imshowSize = (640, 480) args = parse_args() # Select desired model net = cv.dnn.readNetFromCaffe(args.prototxt, args.caffemodel) pts_in_hull = np.load(args.kernel) # load cluster centers # populate cluster centers as 1x1 convolution kernel pts_in_hull = pts_in_hull.transpose().reshape(2, 313, 1, 1) net.getLayer(net.getLayerId('class8_ab')).blobs = [pts_in_hull.astype(np.float32)] net.getLayer(net.getLayerId('conv8_313_rh')).blobs = [np.full([1, 313], 2.606, np.float32)] if args.input: cap = cv.VideoCapture(args.input) else: cap = cv.VideoCapture(0) while cv.waitKey(1) < 0: hasFrame, frame = cap.read() if not hasFrame: cv.waitKey() break img_rgb = (frame[:,:,[2, 1, 0]] * 1.0 / 255).astype(np.float32) img_lab = cv.cvtColor(img_rgb, cv.COLOR_RGB2Lab) img_l = img_lab[:,:,0] # pull out L channel (H_orig,W_orig) = img_rgb.shape[:2] # original image size # resize image to network input size img_rs = cv.resize(img_rgb, (W_in, H_in)) # resize image to network input size img_lab_rs = cv.cvtColor(img_rs, cv.COLOR_RGB2Lab) img_l_rs = img_lab_rs[:,:,0] img_l_rs -= 50 # subtract 50 for mean-centering net.setInput(cv.dnn.blobFromImage(img_l_rs)) ab_dec = net.forward()[0,:,:,:].transpose((1,2,0)) # this is our result (H_out,W_out) = ab_dec.shape[:2] ab_dec_us = cv.resize(ab_dec, (W_orig, H_orig)) img_lab_out = np.concatenate((img_l[:,:,np.newaxis],ab_dec_us),axis=2) # concatenate with original image L img_bgr_out = np.clip(cv.cvtColor(img_lab_out, cv.COLOR_Lab2BGR), 0, 1) frame = cv.resize(frame, imshowSize) cv.imshow('origin', frame) cv.imshow('gray', cv.cvtColor(frame, cv.COLOR_RGB2GRAY)) cv.imshow('colorized', cv.resize(img_bgr_out, imshowSize))
import sys import os import cv2 as cv def add_argument(zoo, parser, name, help, required=False, default=None, type=None, action=None, nargs=None): if len(sys.argv) <= 1: return modelName = sys.argv[1] if os.path.isfile(zoo): fs = cv.FileStorage(zoo, cv.FILE_STORAGE_READ) node = fs.getNode(modelName) if not node.empty(): value = node.getNode(name) if not value.empty(): if value.isReal(): default = value.real() elif value.isString(): default = value.string() elif value.isInt(): default = int(value.real()) elif value.isSeq(): default = [] for i in range(value.size()): v = value.at(i) if v.isInt(): default.append(int(v.real())) elif v.isReal(): default.append(v.real()) else: print('Unexpected value format') exit(0) else: print('Unexpected field format') exit(0) required = False if action == 'store_true': default = 1 if default == 'true' else (0 if default == 'false' else default) assert(default is None or default == 0 or default == 1) parser.add_argument('--' + name, required=required, help=help, default=bool(default), action=action) else: parser.add_argument('--' + name, required=required, help=help, default=default, action=action, nargs=nargs, type=type) def add_preproc_args(zoo, parser, sample): aliases = [] if os.path.isfile(zoo): fs = cv.FileStorage(zoo, cv.FILE_STORAGE_READ) root = fs.root() for name in root.keys(): model = root.getNode(name) if model.getNode('sample').string() == sample: aliases.append(name) parser.add_argument('alias', nargs='?', choices=aliases, help='An alias name of model to extract preprocessing parameters from models.yml file.') add_argument(zoo, parser, 'model', required=True, help='Path to a binary file of model contains trained weights. ' 'It could be a file with extensions .caffemodel (Caffe), ' '.pb (TensorFlow), .t7 or .net (Torch), .weights (Darknet), .bin (OpenVINO)') add_argument(zoo, parser, 'config', help='Path to a text file of model contains network configuration. ' 'It could be a file with extensions .prototxt (Caffe), .pbtxt or .config (TensorFlow), .cfg (Darknet), .xml (OpenVINO)') add_argument(zoo, parser, 'mean', nargs='+', type=float, default=[0, 0, 0], help='Preprocess input image by subtracting mean values. ' 'Mean values should be in BGR order.') add_argument(zoo, parser, 'scale', type=float, default=1.0, help='Preprocess input image by multiplying on a scale factor.') add_argument(zoo, parser, 'width', type=int, help='Preprocess input image by resizing to a specific width.') add_argument(zoo, parser, 'height', type=int, help='Preprocess input image by resizing to a specific height.') add_argument(zoo, parser, 'rgb', action='store_true', help='Indicate that model works with RGB input images instead BGR ones.') add_argument(zoo, parser, 'classes', help='Optional path to a text file with names of classes to label detected objects.') def findFile(filename): if filename: if os.path.exists(filename): return filename fpath = cv.samples.findFile(filename, False) if fpath: return fpath samplesDataDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'dnn') if os.path.exists(os.path.join(samplesDataDir, filename)): return os.path.join(samplesDataDir, filename) for path in ['OPENCV_DNN_TEST_DATA_PATH', 'OPENCV_TEST_DATA_PATH']: try: extraPath = os.environ[path] absPath = os.path.join(extraPath, 'dnn', filename) if os.path.exists(absPath): return absPath except KeyError: pass print('File ' + filename + ' not found! Please specify a path to ' '/opencv_extra/testdata in OPENCV_DNN_TEST_DATA_PATH environment ' 'variable or pass a full path to model.') exit(0)
import argparse import numpy as np from tf_text_graph_common import * parser = argparse.ArgumentParser(description='Run this script to get a text graph of ' 'Mask-RCNN model from TensorFlow Object Detection API. ' 'Then pass it with .pb file to cv::dnn::readNetFromTensorflow function.') parser.add_argument('--input', required=True, help='Path to frozen TensorFlow graph.') parser.add_argument('--output', required=True, help='Path to output text graph.') parser.add_argument('--config', required=True, help='Path to a *.config file is used for training.') args = parser.parse_args() scopesToKeep = ('FirstStageFeatureExtractor', 'Conv', 'FirstStageBoxPredictor/BoxEncodingPredictor', 'FirstStageBoxPredictor/ClassPredictor', 'CropAndResize', 'MaxPool2D', 'SecondStageFeatureExtractor', 'SecondStageBoxPredictor', 'Preprocessor/sub', 'Preprocessor/mul', 'image_tensor') scopesToIgnore = ('FirstStageFeatureExtractor/Assert', 'FirstStageFeatureExtractor/Shape', 'FirstStageFeatureExtractor/strided_slice', 'FirstStageFeatureExtractor/GreaterEqual', 'FirstStageFeatureExtractor/LogicalAnd', 'Conv/required_space_to_batch_paddings') # Load a config file. config = readTextMessage(args.config) config = config['model'][0]['faster_rcnn'][0] num_classes = int(config['num_classes'][0]) grid_anchor_generator = config['first_stage_anchor_generator'][0]['grid_anchor_generator'][0] scales = [float(s) for s in grid_anchor_generator['scales']] aspect_ratios = [float(ar) for ar in grid_anchor_generator['aspect_ratios']] width_stride = float(grid_anchor_generator['width_stride'][0]) height_stride = float(grid_anchor_generator['height_stride'][0]) features_stride = float(config['feature_extractor'][0]['first_stage_features_stride'][0]) first_stage_nms_iou_threshold = float(config['first_stage_nms_iou_threshold'][0]) first_stage_max_proposals = int(config['first_stage_max_proposals'][0]) print('Number of classes: %d' % num_classes) print('Scales: %s' % str(scales)) print('Aspect ratios: %s' % str(aspect_ratios)) print('Width stride: %f' % width_stride) print('Height stride: %f' % height_stride) print('Features stride: %f' % features_stride) # Read the graph. writeTextGraph(args.input, args.output, ['num_detections', 'detection_scores', 'detection_boxes', 'detection_classes', 'detection_masks']) graph_def = parseTextGraph(args.output) removeIdentity(graph_def) nodesToKeep = [] def to_remove(name, op): if name in nodesToKeep: return False return op == 'Const' or name.startswith(scopesToIgnore) or not name.startswith(scopesToKeep) or \ (name.startswith('CropAndResize') and op != 'CropAndResize') # Fuse atrous convolutions (with dilations). nodesMap = {node.name: node for node in graph_def.node} for node in reversed(graph_def.node): if node.op == 'BatchToSpaceND': del node.input[2] conv = nodesMap[node.input[0]] spaceToBatchND = nodesMap[conv.input[0]] paddingsNode = NodeDef() paddingsNode.name = conv.name + '/paddings' paddingsNode.op = 'Const' paddingsNode.addAttr('value', [2, 2, 2, 2]) graph_def.node.insert(graph_def.node.index(spaceToBatchND), paddingsNode) nodesToKeep.append(paddingsNode.name) spaceToBatchND.input[2] = paddingsNode.name removeUnusedNodesAndAttrs(to_remove, graph_def) # Connect input node to the first layer assert(graph_def.node[0].op == 'Placeholder') graph_def.node[1].input.insert(0, graph_def.node[0].name) # Temporarily remove top nodes. topNodes = [] numCropAndResize = 0 while True: node = graph_def.node.pop() topNodes.append(node) if node.op == 'CropAndResize': numCropAndResize += 1 if numCropAndResize == 2: break addReshape('FirstStageBoxPredictor/ClassPredictor/BiasAdd', 'FirstStageBoxPredictor/ClassPredictor/reshape_1', [0, -1, 2], graph_def) addSoftMax('FirstStageBoxPredictor/ClassPredictor/reshape_1', 'FirstStageBoxPredictor/ClassPredictor/softmax', graph_def) # Compare with Reshape_4 addFlatten('FirstStageBoxPredictor/ClassPredictor/softmax', 'FirstStageBoxPredictor/ClassPredictor/softmax/flatten', graph_def) # Compare with FirstStageBoxPredictor/BoxEncodingPredictor/BiasAdd addFlatten('FirstStageBoxPredictor/BoxEncodingPredictor/BiasAdd', 'FirstStageBoxPredictor/BoxEncodingPredictor/flatten', graph_def) proposals = NodeDef() proposals.name = 'proposals' # Compare with ClipToWindow/Gather/Gather (NOTE: normalized) proposals.op = 'PriorBox' proposals.input.append('FirstStageBoxPredictor/BoxEncodingPredictor/BiasAdd') proposals.input.append(graph_def.node[0].name) # image_tensor proposals.addAttr('flip', False) proposals.addAttr('clip', True) proposals.addAttr('step', features_stride) proposals.addAttr('offset', 0.0) proposals.addAttr('variance', [0.1, 0.1, 0.2, 0.2]) widths = [] heights = [] for a in aspect_ratios: for s in scales: ar = np.sqrt(a) heights.append((height_stride**2) * s / ar) widths.append((width_stride**2) * s * ar) proposals.addAttr('width', widths) proposals.addAttr('height', heights) graph_def.node.extend([proposals]) # Compare with Reshape_5 detectionOut = NodeDef() detectionOut.name = 'detection_out' detectionOut.op = 'DetectionOutput' detectionOut.input.append('FirstStageBoxPredictor/BoxEncodingPredictor/flatten') detectionOut.input.append('FirstStageBoxPredictor/ClassPredictor/softmax/flatten') detectionOut.input.append('proposals') detectionOut.addAttr('num_classes', 2) detectionOut.addAttr('share_location', True) detectionOut.addAttr('background_label_id', 0) detectionOut.addAttr('nms_threshold', first_stage_nms_iou_threshold) detectionOut.addAttr('top_k', 6000) detectionOut.addAttr('code_type', "CENTER_SIZE") detectionOut.addAttr('keep_top_k', first_stage_max_proposals) detectionOut.addAttr('clip', True) graph_def.node.extend([detectionOut]) # Save as text. cropAndResizeNodesNames = [] for node in reversed(topNodes): if node.op != 'CropAndResize': graph_def.node.extend([node]) topNodes.pop() else: cropAndResizeNodesNames.append(node.name) if numCropAndResize == 1: break else: graph_def.node.extend([node]) topNodes.pop() numCropAndResize -= 1 addSoftMax('SecondStageBoxPredictor/Reshape_1', 'SecondStageBoxPredictor/Reshape_1/softmax', graph_def) addSlice('SecondStageBoxPredictor/Reshape_1/softmax', 'SecondStageBoxPredictor/Reshape_1/slice', [0, 0, 1], [-1, -1, -1], graph_def) addReshape('SecondStageBoxPredictor/Reshape_1/slice', 'SecondStageBoxPredictor/Reshape_1/Reshape', [1, -1], graph_def) # Replace Flatten subgraph onto a single node. for i in reversed(range(len(graph_def.node))): if graph_def.node[i].op == 'CropAndResize': graph_def.node[i].input.insert(1, 'detection_out') if graph_def.node[i].name == 'SecondStageBoxPredictor/Reshape': addConstNode('SecondStageBoxPredictor/Reshape/shape2', [1, -1, 4], graph_def) graph_def.node[i].input.pop() graph_def.node[i].input.append('SecondStageBoxPredictor/Reshape/shape2') if graph_def.node[i].name in ['SecondStageBoxPredictor/Flatten/flatten/Shape', 'SecondStageBoxPredictor/Flatten/flatten/strided_slice', 'SecondStageBoxPredictor/Flatten/flatten/Reshape/shape', 'SecondStageBoxPredictor/Flatten_1/flatten/Shape', 'SecondStageBoxPredictor/Flatten_1/flatten/strided_slice', 'SecondStageBoxPredictor/Flatten_1/flatten/Reshape/shape']: del graph_def.node[i] for node in graph_def.node: if node.name == 'SecondStageBoxPredictor/Flatten/flatten/Reshape' or \ node.name == 'SecondStageBoxPredictor/Flatten_1/flatten/Reshape': node.op = 'Flatten' node.input.pop() if node.name in ['FirstStageBoxPredictor/BoxEncodingPredictor/Conv2D', 'SecondStageBoxPredictor/BoxEncodingPredictor/MatMul']: node.addAttr('loc_pred_transposed', True) if node.name.startswith('MaxPool2D'): assert(node.op == 'MaxPool') assert(len(cropAndResizeNodesNames) == 2) node.input = [cropAndResizeNodesNames[0]] del cropAndResizeNodesNames[0] ################################################################################ ### Postprocessing ################################################################################ addSlice('detection_out', 'detection_out/slice', [0, 0, 0, 3], [-1, -1, -1, 4], graph_def) variance = NodeDef() variance.name = 'proposals/variance' variance.op = 'Const' variance.addAttr('value', [0.1, 0.1, 0.2, 0.2]) graph_def.node.extend([variance]) varianceEncoder = NodeDef() varianceEncoder.name = 'variance_encoded' varianceEncoder.op = 'Mul' varianceEncoder.input.append('SecondStageBoxPredictor/Reshape') varianceEncoder.input.append(variance.name) varianceEncoder.addAttr('axis', 2) graph_def.node.extend([varianceEncoder]) addReshape('detection_out/slice', 'detection_out/slice/reshape', [1, 1, -1], graph_def) addFlatten('variance_encoded', 'variance_encoded/flatten', graph_def) detectionOut = NodeDef() detectionOut.name = 'detection_out_final' detectionOut.op = 'DetectionOutput' detectionOut.input.append('variance_encoded/flatten') detectionOut.input.append('SecondStageBoxPredictor/Reshape_1/Reshape') detectionOut.input.append('detection_out/slice/reshape') detectionOut.addAttr('num_classes', num_classes) detectionOut.addAttr('share_location', False) detectionOut.addAttr('background_label_id', num_classes + 1) detectionOut.addAttr('nms_threshold', 0.6) detectionOut.addAttr('code_type', "CENTER_SIZE") detectionOut.addAttr('keep_top_k',100) detectionOut.addAttr('clip', True) detectionOut.addAttr('variance_encoded_in_target', True) detectionOut.addAttr('confidence_threshold', 0.3) detectionOut.addAttr('group_by_classes', False) graph_def.node.extend([detectionOut]) for node in reversed(topNodes): graph_def.node.extend([node]) if node.name.startswith('MaxPool2D'): assert(node.op == 'MaxPool') assert(len(cropAndResizeNodesNames) == 1) node.input = [cropAndResizeNodesNames[0]] for i in reversed(range(len(graph_def.node))): if graph_def.node[i].op == 'CropAndResize': graph_def.node[i].input.insert(1, 'detection_out_final') break graph_def.node[-1].name = 'detection_masks' graph_def.node[-1].op = 'Sigmoid' graph_def.node[-1].input.pop() def getUnconnectedNodes(): unconnected = [node.name for node in graph_def.node] for node in graph_def.node: for inp in node.input: if inp in unconnected: unconnected.remove(inp) return unconnected while True: unconnectedNodes = getUnconnectedNodes() unconnectedNodes.remove(graph_def.node[-1].name) if not unconnectedNodes: break for name in unconnectedNodes: for i in range(len(graph_def.node)): if graph_def.node[i].name == name: del graph_def.node[i] break # Save as text. graph_def.save(args.output)
import cv2 as cv import argparse import numpy as np parser = argparse.ArgumentParser(description= 'Use this script to run Mask-RCNN object detection and semantic ' 'segmentation network from TensorFlow Object Detection API.') parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.') parser.add_argument('--model', required=True, help='Path to a .pb file with weights.') parser.add_argument('--config', required=True, help='Path to a .pxtxt file contains network configuration.') parser.add_argument('--classes', help='Optional path to a text file with names of classes.') parser.add_argument('--colors', help='Optional path to a text file with colors for an every class. ' 'An every color is represented with three values from 0 to 255 in BGR channels order.') parser.add_argument('--width', type=int, default=800, help='Preprocess input image by resizing to a specific width.') parser.add_argument('--height', type=int, default=800, help='Preprocess input image by resizing to a specific height.') parser.add_argument('--thr', type=float, default=0.5, help='Confidence threshold') args = parser.parse_args() np.random.seed(324) # Load names of classes classes = None if args.classes: with open(args.classes, 'rt') as f: classes = f.read().rstrip('\n').split('\n') # Load colors colors = None if args.colors: with open(args.colors, 'rt') as f: colors = [np.array(color.split(' '), np.uint8) for color in f.read().rstrip('\n').split('\n')] legend = None def showLegend(classes): global legend if not classes is None and legend is None: blockHeight = 30 assert(len(classes) == len(colors)) legend = np.zeros((blockHeight * len(colors), 200, 3), np.uint8) for i in range(len(classes)): block = legend[i * blockHeight:(i + 1) * blockHeight] block[:,:] = colors[i] cv.putText(block, classes[i], (0, blockHeight/2), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255)) cv.namedWindow('Legend', cv.WINDOW_NORMAL) cv.imshow('Legend', legend) classes = None def drawBox(frame, classId, conf, left, top, right, bottom): # Draw a bounding box. cv.rectangle(frame, (left, top), (right, bottom), (0, 255, 0)) label = '%.2f' % conf # Print a label of class. if classes: assert(classId < len(classes)) label = '%s: %s' % (classes[classId], label) labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1) top = max(top, labelSize[1]) cv.rectangle(frame, (left, top - labelSize[1]), (left + labelSize[0], top + baseLine), (255, 255, 255), cv.FILLED) cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0)) # Load a network net = cv.dnn.readNet(cv.samples.findFile(args.model), cv.samples.findFile(args.config)) net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV) winName = 'Mask-RCNN in OpenCV' cv.namedWindow(winName, cv.WINDOW_NORMAL) cap = cv.VideoCapture(cv.samples.findFileOrKeep(args.input) if args.input else 0) legend = None while cv.waitKey(1) < 0: hasFrame, frame = cap.read() if not hasFrame: cv.waitKey() break frameH = frame.shape[0] frameW = frame.shape[1] # Create a 4D blob from a frame. blob = cv.dnn.blobFromImage(frame, size=(args.width, args.height), swapRB=True, crop=False) # Run a model net.setInput(blob) boxes, masks = net.forward(['detection_out_final', 'detection_masks']) numClasses = masks.shape[1] numDetections = boxes.shape[2] # Draw segmentation if not colors: # Generate colors colors = [np.array([0, 0, 0], np.uint8)] for i in range(1, numClasses + 1): colors.append((colors[i - 1] + np.random.randint(0, 256, [3], np.uint8)) / 2) del colors[0] boxesToDraw = [] for i in range(numDetections): box = boxes[0, 0, i] mask = masks[i] score = box[2] if score > args.thr: classId = int(box[1]) left = int(frameW * box[3]) top = int(frameH * box[4]) right = int(frameW * box[5]) bottom = int(frameH * box[6]) left = max(0, min(left, frameW - 1)) top = max(0, min(top, frameH - 1)) right = max(0, min(right, frameW - 1)) bottom = max(0, min(bottom, frameH - 1)) boxesToDraw.append([frame, classId, score, left, top, right, bottom]) classMask = mask[classId] classMask = cv.resize(classMask, (right - left + 1, bottom - top + 1)) mask = (classMask > 0.5) roi = frame[top:bottom+1, left:right+1][mask] frame[top:bottom+1, left:right+1][mask] = (0.7 * colors[classId] + 0.3 * roi).astype(np.uint8) for box in boxesToDraw: drawBox(*box) # Put efficiency information. t, _ = net.getPerfProfile() label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency()) cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) showLegend(classes) cv.imshow(winName, frame)
def tokenize(s): tokens = [] token = "" isString = False isComment = False for symbol in s: isComment = (isComment and symbol != '\n') or (not isString and symbol == '#') if isComment: continue if symbol == ' ' or symbol == '\t' or symbol == '\r' or symbol == '\'' or \ symbol == '\n' or symbol == ':' or symbol == '\"' or symbol == ';' or \ symbol == ',': if (symbol == '\"' or symbol == '\'') and isString: tokens.append(token) token = "" else: if isString: token += symbol elif token: tokens.append(token) token = "" isString = (symbol == '\"' or symbol == '\'') ^ isString elif symbol == '{' or symbol == '}' or symbol == '[' or symbol == ']': if token: tokens.append(token) token = "" tokens.append(symbol) else: token += symbol if token: tokens.append(token) return tokens def parseMessage(tokens, idx): msg = {} assert(tokens[idx] == '{') isArray = False while True: if not isArray: idx += 1 if idx < len(tokens): fieldName = tokens[idx] else: return None if fieldName == '}': break idx += 1 fieldValue = tokens[idx] if fieldValue == '{': embeddedMsg, idx = parseMessage(tokens, idx) if fieldName in msg: msg[fieldName].append(embeddedMsg) else: msg[fieldName] = [embeddedMsg] elif fieldValue == '[': isArray = True elif fieldValue == ']': isArray = False else: if fieldName in msg: msg[fieldName].append(fieldValue) else: msg[fieldName] = [fieldValue] return msg, idx def readTextMessage(filePath): if not filePath: return {} with open(filePath, 'rt') as f: content = f.read() tokens = tokenize('{' + content + '}') msg = parseMessage(tokens, 0) return msg[0] if msg else {} def listToTensor(values): if all([isinstance(v, float) for v in values]): dtype = 'DT_FLOAT' field = 'float_val' elif all([isinstance(v, int) for v in values]): dtype = 'DT_INT32' field = 'int_val' else: raise Exception('Wrong values types') msg = { 'tensor': { 'dtype': dtype, 'tensor_shape': { 'dim': { 'size': len(values) } } } } msg['tensor'][field] = values return msg def addConstNode(name, values, graph_def): node = NodeDef() node.name = name node.op = 'Const' node.addAttr('value', values) graph_def.node.extend([node]) def addSlice(inp, out, begins, sizes, graph_def): beginsNode = NodeDef() beginsNode.name = out + '/begins' beginsNode.op = 'Const' beginsNode.addAttr('value', begins) graph_def.node.extend([beginsNode]) sizesNode = NodeDef() sizesNode.name = out + '/sizes' sizesNode.op = 'Const' sizesNode.addAttr('value', sizes) graph_def.node.extend([sizesNode]) sliced = NodeDef() sliced.name = out sliced.op = 'Slice' sliced.input.append(inp) sliced.input.append(beginsNode.name) sliced.input.append(sizesNode.name) graph_def.node.extend([sliced]) def addReshape(inp, out, shape, graph_def): shapeNode = NodeDef() shapeNode.name = out + '/shape' shapeNode.op = 'Const' shapeNode.addAttr('value', shape) graph_def.node.extend([shapeNode]) reshape = NodeDef() reshape.name = out reshape.op = 'Reshape' reshape.input.append(inp) reshape.input.append(shapeNode.name) graph_def.node.extend([reshape]) def addSoftMax(inp, out, graph_def): softmax = NodeDef() softmax.name = out softmax.op = 'Softmax' softmax.addAttr('axis', -1) softmax.input.append(inp) graph_def.node.extend([softmax]) def addFlatten(inp, out, graph_def): flatten = NodeDef() flatten.name = out flatten.op = 'Flatten' flatten.input.append(inp) graph_def.node.extend([flatten]) class NodeDef: def __init__(self): self.input = [] self.name = "" self.op = "" self.attr = {} def addAttr(self, key, value): assert(not key in self.attr) if isinstance(value, bool): self.attr[key] = {'b': value} elif isinstance(value, int): self.attr[key] = {'i': value} elif isinstance(value, float): self.attr[key] = {'f': value} elif isinstance(value, str): self.attr[key] = {'s': value} elif isinstance(value, list): self.attr[key] = listToTensor(value) else: raise Exception('Unknown type of attribute ' + key) def Clear(self): self.input = [] self.name = "" self.op = "" self.attr = {} class GraphDef: def __init__(self): self.node = [] def save(self, filePath): with open(filePath, 'wt') as f: def printAttr(d, indent): indent = ' ' * indent for key, value in sorted(d.items(), key=lambda x:x[0].lower()): value = value if isinstance(value, list) else [value] for v in value: if isinstance(v, dict): f.write(indent + key + ' {\n') printAttr(v, len(indent) + 2) f.write(indent + '}\n') else: isString = False if isinstance(v, str) and not v.startswith('DT_'): try: float(v) except: isString = True if isinstance(v, bool): printed = 'true' if v else 'false' elif v == 'true' or v == 'false': printed = 'true' if v == 'true' else 'false' elif isString: printed = '\"%s\"' % v else: printed = str(v) f.write(indent + key + ': ' + printed + '\n') for node in self.node: f.write('node {\n') f.write(' name: \"%s\"\n' % node.name) f.write(' op: \"%s\"\n' % node.op) for inp in node.input: f.write(' input: \"%s\"\n' % inp) for key, value in sorted(node.attr.items(), key=lambda x:x[0].lower()): f.write(' attr {\n') f.write(' key: \"%s\"\n' % key) f.write(' value {\n') printAttr(value, 6) f.write(' }\n') f.write(' }\n') f.write('}\n') def parseTextGraph(filePath): msg = readTextMessage(filePath) graph = GraphDef() for node in msg['node']: graphNode = NodeDef() graphNode.name = node['name'][0] graphNode.op = node['op'][0] graphNode.input = node['input'] if 'input' in node else [] if 'attr' in node: for attr in node['attr']: graphNode.attr[attr['key'][0]] = attr['value'][0] graph.node.append(graphNode) return graph # Removes Identity nodes def removeIdentity(graph_def): identities = {} for node in graph_def.node: if node.op == 'Identity': identities[node.name] = node.input[0] graph_def.node.remove(node) for node in graph_def.node: for i in range(len(node.input)): if node.input[i] in identities: node.input[i] = identities[node.input[i]] def removeUnusedNodesAndAttrs(to_remove, graph_def): unusedAttrs = ['T', 'Tshape', 'N', 'Tidx', 'Tdim', 'use_cudnn_on_gpu', 'Index', 'Tperm', 'is_training', 'Tpaddings'] removedNodes = [] for i in reversed(range(len(graph_def.node))): op = graph_def.node[i].op name = graph_def.node[i].name if to_remove(name, op): if op != 'Const': removedNodes.append(name) del graph_def.node[i] else: for attr in unusedAttrs: if attr in graph_def.node[i].attr: del graph_def.node[i].attr[attr] # Remove references to removed nodes except Const nodes. for node in graph_def.node: for i in reversed(range(len(node.input))): if node.input[i] in removedNodes: del node.input[i] def writeTextGraph(modelPath, outputPath, outNodes): try: import cv2 as cv cv.dnn.writeTextGraph(modelPath, outputPath) except: import tensorflow as tf from tensorflow.tools.graph_transforms import TransformGraph with tf.gfile.FastGFile(modelPath, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) graph_def = TransformGraph(graph_def, ['image_tensor'], outNodes, ['sort_by_execution_order']) for node in graph_def.node: if node.op == 'Const': if 'value' in node.attr and node.attr['value'].tensor.tensor_content: node.attr['value'].tensor.tensor_content = b'' tf.train.write_graph(graph_def, "", outputPath, as_text=True)
# To use Inference Engine backend, specify location of plugins: # source /opt/intel/computer_vision_sdk/bin/setupvars.sh import cv2 as cv import numpy as np import argparse parser = argparse.ArgumentParser( description='This script is used to demonstrate OpenPose human pose estimation network ' 'from https://github.com/CMU-Perceptual-Computing-Lab/openpose project using OpenCV. ' 'The sample and model are simplified and could be used for a single person on the frame.') parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera') parser.add_argument('--proto', help='Path to .prototxt') parser.add_argument('--model', help='Path to .caffemodel') parser.add_argument('--dataset', help='Specify what kind of model was trained. ' 'It could be (COCO, MPI, HAND) depends on dataset.') parser.add_argument('--thr', default=0.1, type=float, help='Threshold value for pose parts heat map') parser.add_argument('--width', default=368, type=int, help='Resize input to specific width.') parser.add_argument('--height', default=368, type=int, help='Resize input to specific height.') parser.add_argument('--scale', default=0.003922, type=float, help='Scale for blob.') args = parser.parse_args() if args.dataset == 'COCO': BODY_PARTS = { "Nose": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4, "LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9, "RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "REye": 14, "LEye": 15, "REar": 16, "LEar": 17, "Background": 18 } POSE_PAIRS = [ ["Neck", "RShoulder"], ["Neck", "LShoulder"], ["RShoulder", "RElbow"], ["RElbow", "RWrist"], ["LShoulder", "LElbow"], ["LElbow", "LWrist"], ["Neck", "RHip"], ["RHip", "RKnee"], ["RKnee", "RAnkle"], ["Neck", "LHip"], ["LHip", "LKnee"], ["LKnee", "LAnkle"], ["Neck", "Nose"], ["Nose", "REye"], ["REye", "REar"], ["Nose", "LEye"], ["LEye", "LEar"] ] elif args.dataset == 'MPI': BODY_PARTS = { "Head": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4, "LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9, "RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "Chest": 14, "Background": 15 } POSE_PAIRS = [ ["Head", "Neck"], ["Neck", "RShoulder"], ["RShoulder", "RElbow"], ["RElbow", "RWrist"], ["Neck", "LShoulder"], ["LShoulder", "LElbow"], ["LElbow", "LWrist"], ["Neck", "Chest"], ["Chest", "RHip"], ["RHip", "RKnee"], ["RKnee", "RAnkle"], ["Chest", "LHip"], ["LHip", "LKnee"], ["LKnee", "LAnkle"] ] else: assert(args.dataset == 'HAND') BODY_PARTS = { "Wrist": 0, "ThumbMetacarpal": 1, "ThumbProximal": 2, "ThumbMiddle": 3, "ThumbDistal": 4, "IndexFingerMetacarpal": 5, "IndexFingerProximal": 6, "IndexFingerMiddle": 7, "IndexFingerDistal": 8, "MiddleFingerMetacarpal": 9, "MiddleFingerProximal": 10, "MiddleFingerMiddle": 11, "MiddleFingerDistal": 12, "RingFingerMetacarpal": 13, "RingFingerProximal": 14, "RingFingerMiddle": 15, "RingFingerDistal": 16, "LittleFingerMetacarpal": 17, "LittleFingerProximal": 18, "LittleFingerMiddle": 19, "LittleFingerDistal": 20, } POSE_PAIRS = [ ["Wrist", "ThumbMetacarpal"], ["ThumbMetacarpal", "ThumbProximal"], ["ThumbProximal", "ThumbMiddle"], ["ThumbMiddle", "ThumbDistal"], ["Wrist", "IndexFingerMetacarpal"], ["IndexFingerMetacarpal", "IndexFingerProximal"], ["IndexFingerProximal", "IndexFingerMiddle"], ["IndexFingerMiddle", "IndexFingerDistal"], ["Wrist", "MiddleFingerMetacarpal"], ["MiddleFingerMetacarpal", "MiddleFingerProximal"], ["MiddleFingerProximal", "MiddleFingerMiddle"], ["MiddleFingerMiddle", "MiddleFingerDistal"], ["Wrist", "RingFingerMetacarpal"], ["RingFingerMetacarpal", "RingFingerProximal"], ["RingFingerProximal", "RingFingerMiddle"], ["RingFingerMiddle", "RingFingerDistal"], ["Wrist", "LittleFingerMetacarpal"], ["LittleFingerMetacarpal", "LittleFingerProximal"], ["LittleFingerProximal", "LittleFingerMiddle"], ["LittleFingerMiddle", "LittleFingerDistal"] ] inWidth = args.width inHeight = args.height inScale = args.scale net = cv.dnn.readNet(cv.samples.findFile(args.proto), cv.samples.findFile(args.model)) cap = cv.VideoCapture(args.input if args.input else 0) while cv.waitKey(1) < 0: hasFrame, frame = cap.read() if not hasFrame: cv.waitKey() break frameWidth = frame.shape[1] frameHeight = frame.shape[0] inp = cv.dnn.blobFromImage(frame, inScale, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False) net.setInput(inp) out = net.forward() assert(len(BODY_PARTS) <= out.shape[1]) points = [] for i in range(len(BODY_PARTS)): # Slice heatmap of corresponding body's part. heatMap = out[0, i, :, :] # Originally, we try to find all the local maximums. To simplify a sample # we just find a global one. However only a single pose at the same time # could be detected this way. _, conf, _, point = cv.minMaxLoc(heatMap) x = (frameWidth * point[0]) / out.shape[3] y = (frameHeight * point[1]) / out.shape[2] # Add a point if it's confidence is higher than threshold. points.append((int(x), int(y)) if conf > args.thr else None) for pair in POSE_PAIRS: partFrom = pair[0] partTo = pair[1] assert(partFrom in BODY_PARTS) assert(partTo in BODY_PARTS) idFrom = BODY_PARTS[partFrom] idTo = BODY_PARTS[partTo] if points[idFrom] and points[idTo]: cv.line(frame, points[idFrom], points[idTo], (0, 255, 0), 3) cv.ellipse(frame, points[idFrom], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED) cv.ellipse(frame, points[idTo], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED) t, _ = net.getPerfProfile() freq = cv.getTickFrequency() / 1000 cv.putText(frame, '%.2fms' % (t / freq), (10, 20), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0)) cv.imshow('OpenPose using OpenCV', frame)
from __future__ import print_function import cv2 as cv import numpy as np import argparse parser = argparse.ArgumentParser( description='This script is used to run style transfer models from ' 'https://github.com/jcjohnson/fast-neural-style using OpenCV') parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera') parser.add_argument('--model', help='Path to .t7 model') parser.add_argument('--width', default=-1, type=int, help='Resize input to specific width.') parser.add_argument('--height', default=-1, type=int, help='Resize input to specific height.') parser.add_argument('--median_filter', default=0, type=int, help='Kernel size of postprocessing blurring.') args = parser.parse_args() net = cv.dnn.readNetFromTorch(cv.samples.findFile(args.model)) net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV) if args.input: cap = cv.VideoCapture(args.input) else: cap = cv.VideoCapture(0) cv.namedWindow('Styled image', cv.WINDOW_NORMAL) while cv.waitKey(1) < 0: hasFrame, frame = cap.read() if not hasFrame: cv.waitKey() break inWidth = args.width if args.width != -1 else frame.shape[1] inHeight = args.height if args.height != -1 else frame.shape[0] inp = cv.dnn.blobFromImage(frame, 1.0, (inWidth, inHeight), (103.939, 116.779, 123.68), swapRB=False, crop=False) net.setInput(inp) out = net.forward() out = out.reshape(3, out.shape[2], out.shape[3]) out[0] += 103.939 out[1] += 116.779 out[2] += 123.68 out /= 255 out = out.transpose(1, 2, 0) t, _ = net.getPerfProfile() freq = cv.getTickFrequency() / 1000 print(t / freq, 'ms') if args.median_filter: out = cv.medianBlur(out, args.median_filter) cv.imshow('Styled image', out)
import cv2 as cv import argparse import numpy as np import sys import time from threading import Thread if sys.version_info[0] == 2: import Queue as queue else: import queue from common import * from tf_text_graph_common import readTextMessage from tf_text_graph_ssd import createSSDGraph from tf_text_graph_faster_rcnn import createFasterRCNNGraph backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV) targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD) parser = argparse.ArgumentParser(add_help=False) parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'), help='An optional path to file with preprocessing parameters.') parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.') parser.add_argument('--out_tf_graph', default='graph.pbtxt', help='For models from TensorFlow Object Detection API, you may ' 'pass a .config file which was used for training through --config ' 'argument. This way an additional .pbtxt file with TensorFlow graph will be created.') parser.add_argument('--framework', choices=['caffe', 'tensorflow', 'torch', 'darknet', 'dldt'], help='Optional name of an origin framework of the model. ' 'Detect it automatically if it does not set.') parser.add_argument('--thr', type=float, default=0.5, help='Confidence threshold') parser.add_argument('--nms', type=float, default=0.4, help='Non-maximum suppression threshold') parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int, help="Choose one of computation backends: " "%d: automatically (by default), " "%d: Halide language (http://halide-lang.org/), " "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " "%d: OpenCV implementation" % backends) parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, help='Choose one of target computation devices: ' '%d: CPU target (by default), ' '%d: OpenCL, ' '%d: OpenCL fp16 (half-float precision), ' '%d: VPU' % targets) parser.add_argument('--async', type=int, default=0, dest='asyncN', help='Number of asynchronous forwards at the same time. ' 'Choose 0 for synchronous mode') args, _ = parser.parse_known_args() add_preproc_args(args.zoo, parser, 'object_detection') parser = argparse.ArgumentParser(parents=[parser], description='Use this script to run object detection deep learning networks using OpenCV.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) args = parser.parse_args() args.model = findFile(args.model) args.config = findFile(args.config) args.classes = findFile(args.classes) # If config specified, try to load it as TensorFlow Object Detection API's pipeline. config = readTextMessage(args.config) if 'model' in config: print('TensorFlow Object Detection API config detected') if 'ssd' in config['model'][0]: print('Preparing text graph representation for SSD model: ' + args.out_tf_graph) createSSDGraph(args.model, args.config, args.out_tf_graph) args.config = args.out_tf_graph elif 'faster_rcnn' in config['model'][0]: print('Preparing text graph representation for Faster-RCNN model: ' + args.out_tf_graph) createFasterRCNNGraph(args.model, args.config, args.out_tf_graph) args.config = args.out_tf_graph # Load names of classes classes = None if args.classes: with open(args.classes, 'rt') as f: classes = f.read().rstrip('\n').split('\n') # Load a network net = cv.dnn.readNet(cv.samples.findFile(args.model), cv.samples.findFile(args.config), args.framework) net.setPreferableBackend(args.backend) net.setPreferableTarget(args.target) outNames = net.getUnconnectedOutLayersNames() confThreshold = args.thr nmsThreshold = args.nms def postprocess(frame, outs): frameHeight = frame.shape[0] frameWidth = frame.shape[1] def drawPred(classId, conf, left, top, right, bottom): # Draw a bounding box. cv.rectangle(frame, (left, top), (right, bottom), (0, 255, 0)) label = '%.2f' % conf # Print a label of class. if classes: assert(classId < len(classes)) label = '%s: %s' % (classes[classId], label) labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1) top = max(top, labelSize[1]) cv.rectangle(frame, (left, top - labelSize[1]), (left + labelSize[0], top + baseLine), (255, 255, 255), cv.FILLED) cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0)) layerNames = net.getLayerNames() lastLayerId = net.getLayerId(layerNames[-1]) lastLayer = net.getLayer(lastLayerId) classIds = [] confidences = [] boxes = [] if lastLayer.type == 'DetectionOutput': # Network produces output blob with a shape 1x1xNx7 where N is a number of # detections and an every detection is a vector of values # [batchId, classId, confidence, left, top, right, bottom] for out in outs: for detection in out[0, 0]: confidence = detection[2] if confidence > confThreshold: left = int(detection[3]) top = int(detection[4]) right = int(detection[5]) bottom = int(detection[6]) width = right - left + 1 height = bottom - top + 1 if width <= 2 or height <= 2: left = int(detection[3] * frameWidth) top = int(detection[4] * frameHeight) right = int(detection[5] * frameWidth) bottom = int(detection[6] * frameHeight) width = right - left + 1 height = bottom - top + 1 classIds.append(int(detection[1]) - 1) # Skip background label confidences.append(float(confidence)) boxes.append([left, top, width, height]) elif lastLayer.type == 'Region': # Network produces output blob with a shape NxC where N is a number of # detected objects and C is a number of classes + 4 where the first 4 # numbers are [center_x, center_y, width, height] classIds = [] confidences = [] boxes = [] for out in outs: for detection in out: scores = detection[5:] classId = np.argmax(scores) confidence = scores[classId] if confidence > confThreshold: center_x = int(detection[0] * frameWidth) center_y = int(detection[1] * frameHeight) width = int(detection[2] * frameWidth) height = int(detection[3] * frameHeight) left = int(center_x - width / 2) top = int(center_y - height / 2) classIds.append(classId) confidences.append(float(confidence)) boxes.append([left, top, width, height]) else: print('Unknown output layer type: ' + lastLayer.type) exit() indices = cv.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold) for i in indices: i = i[0] box = boxes[i] left = box[0] top = box[1] width = box[2] height = box[3] drawPred(classIds[i], confidences[i], left, top, left + width, top + height) # Process inputs winName = 'Deep learning object detection in OpenCV' cv.namedWindow(winName, cv.WINDOW_NORMAL) def callback(pos): global confThreshold confThreshold = pos / 100.0 cv.createTrackbar('Confidence threshold, %', winName, int(confThreshold * 100), 99, callback) cap = cv.VideoCapture(cv.samples.findFileOrKeep(args.input) if args.input else 0) class QueueFPS(queue.Queue): def __init__(self): queue.Queue.__init__(self) self.startTime = 0 self.counter = 0 def put(self, v): queue.Queue.put(self, v) self.counter += 1 if self.counter == 1: self.startTime = time.time() def getFPS(self): return self.counter / (time.time() - self.startTime) process = True # # Frames capturing thread # framesQueue = QueueFPS() def framesThreadBody(): global framesQueue, process while process: hasFrame, frame = cap.read() if not hasFrame: break framesQueue.put(frame) # # Frames processing thread # processedFramesQueue = queue.Queue() predictionsQueue = QueueFPS() def processingThreadBody(): global processedFramesQueue, predictionsQueue, args, process futureOutputs = [] while process: # Get a next frame frame = None try: frame = framesQueue.get_nowait() if args.asyncN: if len(futureOutputs) == args.asyncN: frame = None # Skip the frame else: framesQueue.queue.clear() # Skip the rest of frames except queue.Empty: pass if not frame is None: frameHeight = frame.shape[0] frameWidth = frame.shape[1] # Create a 4D blob from a frame. inpWidth = args.width if args.width else frameWidth inpHeight = args.height if args.height else frameHeight blob = cv.dnn.blobFromImage(frame, size=(inpWidth, inpHeight), swapRB=args.rgb, ddepth=cv.CV_8U) processedFramesQueue.put(frame) # Run a model net.setInput(blob, scalefactor=args.scale, mean=args.mean) if net.getLayer(0).outputNameToIndex('im_info') != -1: # Faster-RCNN or R-FCN frame = cv.resize(frame, (inpWidth, inpHeight)) net.setInput(np.array([[inpHeight, inpWidth, 1.6]], dtype=np.float32), 'im_info') if args.asyncN: futureOutputs.append(net.forwardAsync()) else: outs = net.forward(outNames) predictionsQueue.put(np.copy(outs)) while futureOutputs and futureOutputs[0].wait_for(0): out = futureOutputs[0].get() predictionsQueue.put(np.copy([out])) del futureOutputs[0] framesThread = Thread(target=framesThreadBody) framesThread.start() processingThread = Thread(target=processingThreadBody) processingThread.start() # # Postprocessing and rendering loop # while cv.waitKey(1) < 0: try: # Request prediction first because they put after frames outs = predictionsQueue.get_nowait() frame = processedFramesQueue.get_nowait() postprocess(frame, outs) # Put efficiency information. if predictionsQueue.counter > 1: label = 'Camera: %.2f FPS' % (framesQueue.getFPS()) cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) label = 'Network: %.2f FPS' % (predictionsQueue.getFPS()) cv.putText(frame, label, (0, 30), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) label = 'Skipped frames: %d' % (framesQueue.counter - predictionsQueue.counter) cv.putText(frame, label, (0, 45), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) cv.imshow(winName, frame) except queue.Empty: pass process = False framesThread.join() processingThread.join()
# This file is part of OpenCV project. # It is subject to the license terms in the LICENSE file found in the top-level directory # of this distribution and at http://opencv.org/license.html. # # Copyright (C) 2017, Intel Corporation, all rights reserved. # Third party copyrights are property of their respective owners. import tensorflow as tf import struct import argparse import numpy as np parser = argparse.ArgumentParser(description='Convert weights of a frozen TensorFlow graph to fp16.') parser.add_argument('--input', required=True, help='Path to frozen graph.') parser.add_argument('--output', required=True, help='Path to output graph.') parser.add_argument('--ops', default=['Conv2D', 'MatMul'], nargs='+', help='List of ops which weights are converted.') args = parser.parse_args() DT_FLOAT = 1 DT_HALF = 19 # For the frozen graphs, an every node that uses weights connected to Const nodes # through an Identity node. Usually they're called in the same way with '/read' suffix. # We'll replace all of them to Cast nodes. # Load the model with tf.gfile.FastGFile(args.input) as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) # Set of all inputs from desired nodes. inputs = [] for node in graph_def.node: if node.op in args.ops: inputs += node.input weightsNodes = [] for node in graph_def.node: # From the whole inputs we need to keep only an Identity nodes. if node.name in inputs and node.op == 'Identity' and node.attr['T'].type == DT_FLOAT: weightsNodes.append(node.input[0]) # Replace Identity to Cast. node.op = 'Cast' node.attr['DstT'].type = DT_FLOAT node.attr['SrcT'].type = DT_HALF del node.attr['T'] del node.attr['_class'] # Convert weights to halfs. for node in graph_def.node: if node.name in weightsNodes: node.attr['dtype'].type = DT_HALF node.attr['value'].tensor.dtype = DT_HALF floats = node.attr['value'].tensor.tensor_content floats = struct.unpack('f' * (len(floats) / 4), floats) halfs = np.array(floats).astype(np.float16).view(np.uint16) node.attr['value'].tensor.tensor_content = struct.pack('H' * len(halfs), *halfs) tf.train.write_graph(graph_def, "", args.output, as_text=False)
import cv2 as cv import argparse import numpy as np import sys from common import * backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV) targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD) parser = argparse.ArgumentParser(add_help=False) parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'), help='An optional path to file with preprocessing parameters.') parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.') parser.add_argument('--framework', choices=['caffe', 'tensorflow', 'torch', 'darknet'], help='Optional name of an origin framework of the model. ' 'Detect it automatically if it does not set.') parser.add_argument('--colors', help='Optional path to a text file with colors for an every class. ' 'An every color is represented with three values from 0 to 255 in BGR channels order.') parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int, help="Choose one of computation backends: " "%d: automatically (by default), " "%d: Halide language (http://halide-lang.org/), " "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " "%d: OpenCV implementation" % backends) parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, help='Choose one of target computation devices: ' '%d: CPU target (by default), ' '%d: OpenCL, ' '%d: OpenCL fp16 (half-float precision), ' '%d: VPU' % targets) args, _ = parser.parse_known_args() add_preproc_args(args.zoo, parser, 'segmentation') parser = argparse.ArgumentParser(parents=[parser], description='Use this script to run semantic segmentation deep learning networks using OpenCV.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) args = parser.parse_args() args.model = findFile(args.model) args.config = findFile(args.config) args.classes = findFile(args.classes) np.random.seed(324) # Load names of classes classes = None if args.classes: with open(args.classes, 'rt') as f: classes = f.read().rstrip('\n').split('\n') # Load colors colors = None if args.colors: with open(args.colors, 'rt') as f: colors = [np.array(color.split(' '), np.uint8) for color in f.read().rstrip('\n').split('\n')] legend = None def showLegend(classes): global legend if not classes is None and legend is None: blockHeight = 30 assert(len(classes) == len(colors)) legend = np.zeros((blockHeight * len(colors), 200, 3), np.uint8) for i in range(len(classes)): block = legend[i * blockHeight:(i + 1) * blockHeight] block[:,:] = colors[i] cv.putText(block, classes[i], (0, blockHeight/2), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255)) cv.namedWindow('Legend', cv.WINDOW_NORMAL) cv.imshow('Legend', legend) classes = None # Load a network net = cv.dnn.readNet(args.model, args.config, args.framework) net.setPreferableBackend(args.backend) net.setPreferableTarget(args.target) winName = 'Deep learning image classification in OpenCV' cv.namedWindow(winName, cv.WINDOW_NORMAL) cap = cv.VideoCapture(args.input if args.input else 0) legend = None while cv.waitKey(1) < 0: hasFrame, frame = cap.read() if not hasFrame: cv.waitKey() break frameHeight = frame.shape[0] frameWidth = frame.shape[1] # Create a 4D blob from a frame. inpWidth = args.width if args.width else frameWidth inpHeight = args.height if args.height else frameHeight blob = cv.dnn.blobFromImage(frame, args.scale, (inpWidth, inpHeight), args.mean, args.rgb, crop=False) # Run a model net.setInput(blob) score = net.forward() numClasses = score.shape[1] height = score.shape[2] width = score.shape[3] # Draw segmentation if not colors: # Generate colors colors = [np.array([0, 0, 0], np.uint8)] for i in range(1, numClasses): colors.append((colors[i - 1] + np.random.randint(0, 256, [3], np.uint8)) / 2) classIds = np.argmax(score[0], axis=0) segm = np.stack([colors[idx] for idx in classIds.flatten()]) segm = segm.reshape(height, width, 3) segm = cv.resize(segm, (frameWidth, frameHeight), interpolation=cv.INTER_NEAREST) frame = (0.1 * frame + 0.9 * segm).astype(np.uint8) # Put efficiency information. t, _ = net.getPerfProfile() label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency()) cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) showLegend(classes) cv.imshow(winName, frame)
#!/usr/bin/env python from __future__ import print_function import hashlib import time import sys import xml.etree.ElementTree as ET if sys.version_info[0] < 3: from urllib2 import urlopen else: from urllib.request import urlopen class HashMismatchException(Exception): def __init__(self, expected, actual): Exception.__init__(self) self.expected = expected self.actual = actual def __str__(self): return 'Hash mismatch: {} vs {}'.format(self.expected, self.actual) class MetalinkDownloader(object): BUFSIZE = 10*1024*1024 NS = {'ml': 'urn:ietf:params:xml:ns:metalink'} tick = 0 def download(self, metalink_file): status = True for file_elem in ET.parse(metalink_file).getroot().findall('ml:file', self.NS): url = file_elem.find('ml:url', self.NS).text fname = file_elem.attrib['name'] hash_sum = file_elem.find('ml:hash', self.NS).text print('*** {}'.format(fname)) try: self.verify(hash_sum, fname) except Exception as ex: print(' {}'.format(ex)) try: print(' {}'.format(url)) with open(fname, 'wb') as file_stream: self.buffered_read(urlopen(url), file_stream.write) self.verify(hash_sum, fname) except Exception as ex: print(' {}'.format(ex)) print(' FAILURE') status = False continue print(' SUCCESS') return status def print_progress(self, msg, timeout = 0): if time.time() - self.tick > timeout: print(msg, end='') sys.stdout.flush() self.tick = time.time() def buffered_read(self, in_stream, processing): self.print_progress(' >') while True: buf = in_stream.read(self.BUFSIZE) if not buf: break processing(buf) self.print_progress('>', 5) print(' done') def verify(self, hash_sum, fname): sha = hashlib.sha1() with open(fname, 'rb') as file_stream: self.buffered_read(file_stream, sha.update) if hash_sum != sha.hexdigest(): raise HashMismatchException(hash_sum, sha.hexdigest()) if __name__ == '__main__': sys.exit(0 if MetalinkDownloader().download('weights.meta4') else 1)
""" This code adds Python/Java signatures to the docs. TODO: Do the same thing for Java * using javadoc/ get all the methods/classes/constants to a json file TODO: * clarify when there are several C++ signatures corresponding to a single Python function. i.e: calcHist(): http://docs.opencv.org/3.2.0/d6/dc7/group__imgproc__hist.html#ga4b2b5fd75503ff9e6844cc4dcdaed35d * clarify special case: http://docs.opencv.org/3.2.0/db/de0/group__core__utils.html#ga4910d7f86336cd4eff9dd05575667e41 """ from __future__ import print_function import sys sys.dont_write_bytecode = True # Don't generate .pyc files / __pycache__ directories import os from pprint import pprint import re import logging import json import html_functions import doxygen_scan loglevel=os.environ.get("LOGLEVEL", None) if loglevel: logging.basicConfig(level=loglevel) ROOT_DIR = sys.argv[1] PYTHON_SIGNATURES_FILE = sys.argv[2] JAVA_OR_PYTHON = sys.argv[3] ADD_JAVA = False ADD_PYTHON = False if JAVA_OR_PYTHON == "python": ADD_PYTHON = True python_signatures = dict() with open(PYTHON_SIGNATURES_FILE, "rt") as f: python_signatures = json.load(f) print("Loaded Python signatures: %d" % len(python_signatures)) import xml.etree.ElementTree as ET root = ET.parse(ROOT_DIR + 'opencv.tag') files_dict = {} # constants and function from opencv.tag namespaces = root.findall("./compound[@kind='namespace']") #print("Found {} namespaces".format(len(namespaces))) for ns in namespaces: ns_name = ns.find("./name").text #print('NS: {}'.format(ns_name)) doxygen_scan.scan_namespace_constants(ns, ns_name, files_dict) doxygen_scan.scan_namespace_functions(ns, ns_name, files_dict) # class methods from opencv.tag classes = root.findall("./compound[@kind='class']") #print("Found {} classes".format(len(classes))) for c in classes: c_name = c.find("./name").text file = c.find("./filename").text #print('Class: {} => {}'.format(c_name, file)) doxygen_scan.scan_class_methods(c, c_name, files_dict) print('Doxygen files to scan: %s' % len(files_dict)) files_processed = 0 files_skipped = 0 symbols_processed = 0 for file in files_dict: #if file != "dd/d9e/classcv_1_1VideoWriter.html": #if file != "d4/d86/group__imgproc__filter.html": #if file != "df/dfb/group__imgproc__object.html": # continue #print('File: ' + file) anchor_list = files_dict[file] active_anchors = [a for a in anchor_list if a.cppname in python_signatures] if len(active_anchors) == 0: # no linked Python symbols #print('Skip: ' + file) files_skipped = files_skipped + 1 continue active_anchors_dict = {a.anchor: a for a in active_anchors} if len(active_anchors_dict) != len(active_anchors): logging.info('Duplicate entries detected: %s -> %s (%s)' % (len(active_anchors), len(active_anchors_dict), file)) files_processed = files_processed + 1 #pprint(active_anchors) symbols_processed = symbols_processed + len(active_anchors_dict) logging.info('File: %r' % file) html_functions.insert_python_signatures(python_signatures, active_anchors_dict, ROOT_DIR + file) print('Done (processed files %d, symbols %d, skipped %d files)' % (files_processed, symbols_processed, files_skipped))
from __future__ import print_function import sys import logging import os import re from pprint import pprint import traceback try: import bs4 from bs4 import BeautifulSoup except ImportError: raise ImportError('Error: ' 'Install BeautifulSoup (bs4) for adding' ' Python & Java signatures documentation') def load_html_file(file_dir): """ Uses BeautifulSoup to load an html """ with open(file_dir, 'rb') as fp: data = fp.read() if os.name == 'nt' or sys.version_info[0] == 3: data = data.decode(encoding='utf-8', errors='strict') data = re.sub(r'(\>)([ ]+)', lambda match: match.group(1) + ('!space!' * len(match.group(2))), data) data = re.sub(r'([ ]+)(\<)', lambda match: ('!space!' * len(match.group(1))) + match.group(2), data) if os.name == 'nt' or sys.version_info[0] == 3: data = data.encode('utf-8', 'ignore') soup = BeautifulSoup(data, 'html.parser') return soup def update_html(file, soup): s = str(soup) s = s.replace('!space!', ' ') if os.name == 'nt' or sys.version_info[0] == 3: s = s.encode('utf-8', 'ignore') with open(file, 'wb') as f: f.write(s) def insert_python_signatures(python_signatures, symbols_dict, filepath): soup = load_html_file(filepath) entries = soup.find_all(lambda tag: tag.name == "a" and tag.has_attr('id')) for e in entries: anchor = e['id'] if anchor in symbols_dict: s = symbols_dict[anchor] logging.info('Process: %r' % s) if s.type == 'fn' or s.type == 'method': process_fn(soup, e, python_signatures[s.cppname], s) elif s.type == 'const': process_const(soup, e, python_signatures[s.cppname], s) else: logging.error('unsupported type: %s' % s); update_html(filepath, soup) def process_fn(soup, anchor, python_signature, symbol): try: r = anchor.find_next_sibling(class_='memitem').find(class_='memproto').find('table') insert_python_fn_signature(soup, r, python_signature, symbol) except: logging.error("Can't process: %s" % symbol) traceback.print_exc() pprint(anchor) def process_const(soup, anchor, python_signature, symbol): try: #pprint(anchor.parent) description = append(soup.new_tag('div', **{'class' : ['python_language']}), 'Python: ' + python_signature[0]['name']) old = anchor.find_next_sibling('div', class_='python_language') if old is None: anchor.parent.append(description) else: old.replace_with(description) #pprint(anchor.parent) except: logging.error("Can't process: %s" % symbol) traceback.print_exc() pprint(anchor) def insert_python_fn_signature(soup, table, variants, symbol): description = create_python_fn_description(soup, variants) description['class'] = 'python_language' soup = insert_or_replace(table, description, 'table', 'python_language') return soup def create_python_fn_description(soup, variants): language = 'Python:' table = soup.new_tag('table') heading_row = soup.new_tag('th') table.append( append(soup.new_tag('tr'), append(soup.new_tag('th', colspan=999, style="text-align:left"), language))) for v in variants: #logging.debug(v) add_signature_to_table(soup, table, v, language, type) #print(table) return table def add_signature_to_table(soup, table, signature, language, type): """ Add a signature to an html table""" row = soup.new_tag('tr') row.append(soup.new_tag('td', style='width: 20px;')) if 'ret' in signature: row.append(append(soup.new_tag('td'), signature['ret'])) row.append(append(soup.new_tag('td'), '=')) else: row.append(soup.new_tag('td')) # return values row.append(soup.new_tag('td')) # '=' row.append(append(soup.new_tag('td'), signature['name'] + '(')) row.append(append(soup.new_tag('td', **{'class': 'paramname'}), signature['arg'])) row.append(append(soup.new_tag('td'), ')')) table.append(row) def append(target, obj): target.append(obj) return target def insert_or_replace(element_before, new_element, tag, tag_class): old = element_before.find_next_sibling(tag, class_=tag_class) if old is None: element_before.insert_after(new_element) else: old.replace_with(new_element)
import traceback class Symbol(object): def __init__(self, anchor, type, cppname): self.anchor = anchor self.type = type self.cppname = cppname #if anchor == 'ga586ebfb0a7fb604b35a23d85391329be': # print(repr(self)) # traceback.print_stack() def __repr__(self): return '%s:%s@%s' % (self.type, self.cppname, self.anchor) def add_to_file(files_dict, file, anchor): anchors = files_dict.setdefault(file, []) anchors.append(anchor) def scan_namespace_constants(ns, ns_name, files_dict): constants = ns.findall("./member[@kind='enumvalue']") for c in constants: c_name = c.find("./name").text name = ns_name + '::' + c_name file = c.find("./anchorfile").text anchor = c.find("./anchor").text #print(' CONST: {} => {}#{}'.format(name, file, anchor)) add_to_file(files_dict, file, Symbol(anchor, "const", name)) def scan_namespace_functions(ns, ns_name, files_dict): functions = ns.findall("./member[@kind='function']") for f in functions: f_name = f.find("./name").text name = ns_name + '::' + f_name file = f.find("./anchorfile").text anchor = f.find("./anchor").text #print(' FN: {} => {}#{}'.format(name, file, anchor)) add_to_file(files_dict, file, Symbol(anchor, "fn", name)) def scan_class_methods(c, c_name, files_dict): methods = c.findall("./member[@kind='function']") for m in methods: m_name = m.find("./name").text name = c_name + '::' + m_name file = m.find("./anchorfile").text anchor = m.find("./anchor").text #print(' Method: {} => {}#{}'.format(name, file, anchor)) add_to_file(files_dict, file, Symbol(anchor, "method", name))
#!/usr/bin/env python """gen_pattern.py Usage example: python gen_pattern.py -o out.svg -r 11 -c 8 -T circles -s 20.0 -R 5.0 -u mm -w 216 -h 279 -o, --output - output file (default out.svg) -r, --rows - pattern rows (default 11) -c, --columns - pattern columns (default 8) -T, --type - type of pattern, circles, acircles, checkerboard (default circles) -s, --square_size - size of squares in pattern (default 20.0) -R, --radius_rate - circles_radius = square_size/radius_rate (default 5.0) -u, --units - mm, inches, px, m (default mm) -w, --page_width - page width in units (default 216) -h, --page_height - page height in units (default 279) -a, --page_size - page size (default A4), supersedes -h -w arguments -H, --help - show help """ from svgfig import * import sys import getopt class PatternMaker: def __init__(self, cols,rows,output,units,square_size,radius_rate,page_width,page_height): self.cols = cols self.rows = rows self.output = output self.units = units self.square_size = square_size self.radius_rate = radius_rate self.width = page_width self.height = page_height self.g = SVG("g") # the svg group container def makeCirclesPattern(self): spacing = self.square_size r = spacing / self.radius_rate for x in range(1,self.cols+1): for y in range(1,self.rows+1): dot = SVG("circle", cx=x * spacing, cy=y * spacing, r=r, fill="black", stroke="none") self.g.append(dot) def makeACirclesPattern(self): spacing = self.square_size r = spacing / self.radius_rate for i in range(0,self.rows): for j in range(0,self.cols): dot = SVG("circle", cx= ((j*2 + i%2)*spacing) + spacing, cy=self.height - (i * spacing + spacing), r=r, fill="black", stroke="none") self.g.append(dot) def makeCheckerboardPattern(self): spacing = self.square_size xspacing = (self.width - self.cols * self.square_size) / 2.0 yspacing = (self.height - self.rows * self.square_size) / 2.0 for x in range(0,self.cols): for y in range(0,self.rows): if x%2 == y%2: square = SVG("rect", x=x * spacing + xspacing, y=y * spacing + yspacing, width=spacing, height=spacing, fill="black", stroke="none") self.g.append(square) def save(self): c = canvas(self.g,width="%d%s"%(self.width,self.units),height="%d%s"%(self.height,self.units),viewBox="0 0 %d %d"%(self.width,self.height)) c.save(self.output) def main(): # parse command line options, TODO use argparse for better doc try: opts, args = getopt.getopt(sys.argv[1:], "Ho:c:r:T:u:s:R:w:h:a:", ["help","output=","columns=","rows=", "type=","units=","square_size=","radius_rate=", "page_width=","page_height=", "page_size="]) except getopt.error as msg: print(msg) print("for help use --help") sys.exit(2) output = "out.svg" columns = 8 rows = 11 p_type = "circles" units = "mm" square_size = 20.0 radius_rate = 5.0 page_size = "A4" # page size dict (ISO standard, mm) for easy lookup. format - size: [width, height] page_sizes = {"A0": [840, 1188], "A1": [594, 840], "A2": [420, 594], "A3": [297, 420], "A4": [210, 297], "A5": [148, 210]} page_width = page_sizes[page_size.upper()][0] page_height = page_sizes[page_size.upper()][1] # process options for o, a in opts: if o in ("-H", "--help"): print(__doc__) sys.exit(0) elif o in ("-r", "--rows"): rows = int(a) elif o in ("-c", "--columns"): columns = int(a) elif o in ("-o", "--output"): output = a elif o in ("-T", "--type"): p_type = a elif o in ("-u", "--units"): units = a elif o in ("-s", "--square_size"): square_size = float(a) elif o in ("-R", "--radius_rate"): radius_rate = float(a) elif o in ("-w", "--page_width"): page_width = float(a) elif o in ("-h", "--page_height"): page_height = float(a) elif o in ("-a", "--page_size"): units = "mm" page_size = a.upper() page_width = page_sizes[page_size][0] page_height = page_sizes[page_size][1] pm = PatternMaker(columns,rows,output,units,square_size,radius_rate,page_width,page_height) #dict for easy lookup of pattern type mp = {"circles":pm.makeCirclesPattern,"acircles":pm.makeACirclesPattern,"checkerboard":pm.makeCheckerboardPattern} mp[p_type]() #this should save pattern to output pm.save() if __name__ == "__main__": main()
# svgfig.py copyright (C) 2008 Jim Pivarski <[email protected]> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # # Full licence is in the file COPYING and at http://www.gnu.org/copyleft/gpl.html import re, codecs, os, platform, copy, itertools, math, cmath, random, sys, copy _epsilon = 1e-5 if sys.version_info >= (3,0): long = int basestring = (str,bytes) # Fix Python 2.x. try: UNICODE_EXISTS = bool(type(unicode)) except NameError: unicode = lambda s: str(s) try: xrange # Python 2 except NameError: xrange = range # Python 3 if re.search("windows", platform.system(), re.I): try: import _winreg _default_directory = _winreg.QueryValueEx(_winreg.OpenKey(_winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\Current Version\Explorer\Shell Folders"), "Desktop")[0] # tmpdir = _winreg.QueryValueEx(_winreg.OpenKey(_winreg.HKEY_CURRENT_USER, "Environment"), "TEMP")[0] # if tmpdir[0:13] != "%USERPROFILE%": # tmpdir = os.path.expanduser("~") + tmpdir[13:] except: _default_directory = os.path.expanduser("~") + os.sep + "Desktop" _default_fileName = "tmp.svg" _hacks = {} _hacks["inkscape-text-vertical-shift"] = False def rgb(r, g, b, maximum=1.): """Create an SVG color string "#xxyyzz" from r, g, and b. r,g,b = 0 is black and r,g,b = maximum is white. """ return "#%02x%02x%02x" % (max(0, min(r*255./maximum, 255)), max(0, min(g*255./maximum, 255)), max(0, min(b*255./maximum, 255))) def attr_preprocess(attr): attrCopy = attr.copy() for name in attr.keys(): name_colon = re.sub("__", ":", name) if name_colon != name: attrCopy[name_colon] = attrCopy[name] del attrCopy[name] name = name_colon name_dash = re.sub("_", "-", name) if name_dash != name: attrCopy[name_dash] = attrCopy[name] del attrCopy[name] name = name_dash return attrCopy class SVG: """A tree representation of an SVG image or image fragment. SVG(t, sub, sub, sub..., attribute=value) t required SVG type name sub optional list nested SVG elements or text/Unicode attribute=value pairs optional keywords SVG attributes In attribute names, "__" becomes ":" and "_" becomes "-". SVG in XML <g id="mygroup" fill="blue"> <rect x="1" y="1" width="2" height="2" /> <rect x="3" y="3" width="2" height="2" /> </g> SVG in Python >>> svg = SVG("g", SVG("rect", x=1, y=1, width=2, height=2), \ ... SVG("rect", x=3, y=3, width=2, height=2), \ ... id="mygroup", fill="blue") Sub-elements and attributes may be accessed through tree-indexing: >>> svg = SVG("text", SVG("tspan", "hello there"), stroke="none", fill="black") >>> svg[0] <tspan (1 sub) /> >>> svg[0, 0] 'hello there' >>> svg["fill"] 'black' Iteration is depth-first: >>> svg = SVG("g", SVG("g", SVG("line", x1=0, y1=0, x2=1, y2=1)), \ ... SVG("text", SVG("tspan", "hello again"))) ... >>> for ti, s in svg: ... print ti, repr(s) ... (0,) <g (1 sub) /> (0, 0) <line x2=1 y1=0 x1=0 y2=1 /> (0, 0, 'x2') 1 (0, 0, 'y1') 0 (0, 0, 'x1') 0 (0, 0, 'y2') 1 (1,) <text (1 sub) /> (1, 0) <tspan (1 sub) /> (1, 0, 0) 'hello again' Use "print" to navigate: >>> print svg None <g (2 sub) /> [0] <g (1 sub) /> [0, 0] <line x2=1 y1=0 x1=0 y2=1 /> [1] <text (1 sub) /> [1, 0] <tspan (1 sub) /> """ def __init__(self, *t_sub, **attr): if len(t_sub) == 0: raise TypeError( "SVG element must have a t (SVG type)") # first argument is t (SVG type) self.t = t_sub[0] # the rest are sub-elements self.sub = list(t_sub[1:]) # keyword arguments are attributes # need to preprocess to handle differences between SVG and Python syntax self.attr = attr_preprocess(attr) def __getitem__(self, ti): """Index is a list that descends tree, returning a sub-element if it ends with a number and an attribute if it ends with a string.""" obj = self if isinstance(ti, (list, tuple)): for i in ti[:-1]: obj = obj[i] ti = ti[-1] if isinstance(ti, (int, long, slice)): return obj.sub[ti] else: return obj.attr[ti] def __setitem__(self, ti, value): """Index is a list that descends tree, returning a sub-element if it ends with a number and an attribute if it ends with a string.""" obj = self if isinstance(ti, (list, tuple)): for i in ti[:-1]: obj = obj[i] ti = ti[-1] if isinstance(ti, (int, long, slice)): obj.sub[ti] = value else: obj.attr[ti] = value def __delitem__(self, ti): """Index is a list that descends tree, returning a sub-element if it ends with a number and an attribute if it ends with a string.""" obj = self if isinstance(ti, (list, tuple)): for i in ti[:-1]: obj = obj[i] ti = ti[-1] if isinstance(ti, (int, long, slice)): del obj.sub[ti] else: del obj.attr[ti] def __contains__(self, value): """x in svg == True iff x is an attribute in svg.""" return value in self.attr def __eq__(self, other): """x == y iff x represents the same SVG as y.""" if id(self) == id(other): return True return (isinstance(other, SVG) and self.t == other.t and self.sub == other.sub and self.attr == other.attr) def __ne__(self, other): """x != y iff x does not represent the same SVG as y.""" return not (self == other) def append(self, x): """Appends x to the list of sub-elements (drawn last, overlaps other primitives).""" self.sub.append(x) def prepend(self, x): """Prepends x to the list of sub-elements (drawn first may be overlapped by other primitives).""" self.sub[0:0] = [x] def extend(self, x): """Extends list of sub-elements by a list x.""" self.sub.extend(x) def clone(self, shallow=False): """Deep copy of SVG tree. Set shallow=True for a shallow copy.""" if shallow: return copy.copy(self) else: return copy.deepcopy(self) ### nested class class SVGDepthIterator: """Manages SVG iteration.""" def __init__(self, svg, ti, depth_limit): self.svg = svg self.ti = ti self.shown = False self.depth_limit = depth_limit def __iter__(self): return self def next(self): if not self.shown: self.shown = True if self.ti != (): return self.ti, self.svg if not isinstance(self.svg, SVG): raise StopIteration if self.depth_limit is not None and len(self.ti) >= self.depth_limit: raise StopIteration if "iterators" not in self.__dict__: self.iterators = [] for i, s in enumerate(self.svg.sub): self.iterators.append(self.__class__(s, self.ti + (i,), self.depth_limit)) for k, s in self.svg.attr.items(): self.iterators.append(self.__class__(s, self.ti + (k,), self.depth_limit)) self.iterators = itertools.chain(*self.iterators) return self.iterators.next() ### end nested class def depth_first(self, depth_limit=None): """Returns a depth-first generator over the SVG. If depth_limit is a number, stop recursion at that depth.""" return self.SVGDepthIterator(self, (), depth_limit) def breadth_first(self, depth_limit=None): """Not implemented yet. Any ideas on how to do it? Returns a breadth-first generator over the SVG. If depth_limit is a number, stop recursion at that depth.""" raise NotImplementedError( "Got an algorithm for breadth-first searching a tree without effectively copying the tree?") def __iter__(self): return self.depth_first() def items(self, sub=True, attr=True, text=True): """Get a recursively-generated list of tree-index, sub-element/attribute pairs. If sub == False, do not show sub-elements. If attr == False, do not show attributes. If text == False, do not show text/Unicode sub-elements. """ output = [] for ti, s in self: show = False if isinstance(ti[-1], (int, long)): if isinstance(s, basestring): show = text else: show = sub else: show = attr if show: output.append((ti, s)) return output def keys(self, sub=True, attr=True, text=True): """Get a recursively-generated list of tree-indexes. If sub == False, do not show sub-elements. If attr == False, do not show attributes. If text == False, do not show text/Unicode sub-elements. """ return [ti for ti, s in self.items(sub, attr, text)] def values(self, sub=True, attr=True, text=True): """Get a recursively-generated list of sub-elements and attributes. If sub == False, do not show sub-elements. If attr == False, do not show attributes. If text == False, do not show text/Unicode sub-elements. """ return [s for ti, s in self.items(sub, attr, text)] def __repr__(self): return self.xml(depth_limit=0) def __str__(self): """Print (actually, return a string of) the tree in a form useful for browsing.""" return self.tree(sub=True, attr=False, text=False) def tree(self, depth_limit=None, sub=True, attr=True, text=True, tree_width=20, obj_width=80): """Print (actually, return a string of) the tree in a form useful for browsing. If depth_limit == a number, stop recursion at that depth. If sub == False, do not show sub-elements. If attr == False, do not show attributes. If text == False, do not show text/Unicode sub-elements. tree_width is the number of characters reserved for printing tree indexes. obj_width is the number of characters reserved for printing sub-elements/attributes. """ output = [] line = "%s %s" % (("%%-%ds" % tree_width) % repr(None), ("%%-%ds" % obj_width) % (repr(self))[0:obj_width]) output.append(line) for ti, s in self.depth_first(depth_limit): show = False if isinstance(ti[-1], (int, long)): if isinstance(s, basestring): show = text else: show = sub else: show = attr if show: line = "%s %s" % (("%%-%ds" % tree_width) % repr(list(ti)), ("%%-%ds" % obj_width) % (" "*len(ti) + repr(s))[0:obj_width]) output.append(line) return "\n".join(output) def xml(self, indent=u" ", newl=u"\n", depth_limit=None, depth=0): """Get an XML representation of the SVG. indent string used for indenting newl string used for newlines If depth_limit == a number, stop recursion at that depth. depth starting depth (not useful for users) print svg.xml() """ attrstr = [] for n, v in self.attr.items(): if isinstance(v, dict): v = u"; ".join([u"%s:%s" % (ni, vi) for ni, vi in v.items()]) elif isinstance(v, (list, tuple)): v = u", ".join(v) attrstr.append(u" %s=%s" % (n, repr(v))) attrstr = u"".join(attrstr) if len(self.sub) == 0: return u"%s<%s%s />" % (indent * depth, self.t, attrstr) if depth_limit is None or depth_limit > depth: substr = [] for s in self.sub: if isinstance(s, SVG): substr.append(s.xml(indent, newl, depth_limit, depth + 1) + newl) elif isinstance(s, basestring): substr.append(u"%s%s%s" % (indent * (depth + 1), s, newl)) else: substr.append("%s%s%s" % (indent * (depth + 1), repr(s), newl)) substr = u"".join(substr) return u"%s<%s%s>%s%s%s</%s>" % (indent * depth, self.t, attrstr, newl, substr, indent * depth, self.t) else: return u"%s<%s (%d sub)%s />" % (indent * depth, self.t, len(self.sub), attrstr) def standalone_xml(self, indent=u" ", newl=u"\n", encoding=u"utf-8"): """Get an XML representation of the SVG that can be saved/rendered. indent string used for indenting newl string used for newlines """ if self.t == "svg": top = self else: top = canvas(self) return u"""\ <?xml version="1.0" encoding="%s" standalone="no"?> <!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"> """ % encoding + (u"".join(top.__standalone_xml(indent, newl))) # end of return statement def __standalone_xml(self, indent, newl): output = [u"<%s" % self.t] for n, v in self.attr.items(): if isinstance(v, dict): v = u"; ".join([u"%s:%s" % (ni, vi) for ni, vi in v.items()]) elif isinstance(v, (list, tuple)): v = u", ".join(v) output.append(u' %s="%s"' % (n, v)) if len(self.sub) == 0: output.append(u" />%s%s" % (newl, newl)) return output elif self.t == "text" or self.t == "tspan" or self.t == "style": output.append(u">") else: output.append(u">%s%s" % (newl, newl)) for s in self.sub: if isinstance(s, SVG): output.extend(s.__standalone_xml(indent, newl)) else: output.append(unicode(s)) if self.t == "tspan": output.append(u"</%s>" % self.t) else: output.append(u"</%s>%s%s" % (self.t, newl, newl)) return output def interpret_fileName(self, fileName=None): if fileName is None: fileName = _default_fileName if re.search("windows", platform.system(), re.I) and not os.path.isabs(fileName): fileName = _default_directory + os.sep + fileName return fileName def save(self, fileName=None, encoding="utf-8", compresslevel=None): """Save to a file for viewing. Note that svg.save() overwrites the file named _default_fileName. fileName default=None note that _default_fileName will be overwritten if no fileName is specified. If the extension is ".svgz" or ".gz", the output will be gzipped encoding default="utf-8" file encoding compresslevel default=None if a number, the output will be gzipped with that compression level (1-9, 1 being fastest and 9 most thorough) """ fileName = self.interpret_fileName(fileName) if compresslevel is not None or re.search(r"\.svgz$", fileName, re.I) or re.search(r"\.gz$", fileName, re.I): import gzip if compresslevel is None: f = gzip.GzipFile(fileName, "w") else: f = gzip.GzipFile(fileName, "w", compresslevel) f = codecs.EncodedFile(f, "utf-8", encoding) f.write(self.standalone_xml(encoding=encoding)) f.close() else: f = codecs.open(fileName, "w", encoding=encoding) f.write(self.standalone_xml(encoding=encoding)) f.close() def inkview(self, fileName=None, encoding="utf-8"): """View in "inkview", assuming that program is available on your system. fileName default=None note that any file named _default_fileName will be overwritten if no fileName is specified. If the extension is ".svgz" or ".gz", the output will be gzipped encoding default="utf-8" file encoding """ fileName = self.interpret_fileName(fileName) self.save(fileName, encoding) os.spawnvp(os.P_NOWAIT, "inkview", ("inkview", fileName)) def inkscape(self, fileName=None, encoding="utf-8"): """View in "inkscape", assuming that program is available on your system. fileName default=None note that any file named _default_fileName will be overwritten if no fileName is specified. If the extension is ".svgz" or ".gz", the output will be gzipped encoding default="utf-8" file encoding """ fileName = self.interpret_fileName(fileName) self.save(fileName, encoding) os.spawnvp(os.P_NOWAIT, "inkscape", ("inkscape", fileName)) def firefox(self, fileName=None, encoding="utf-8"): """View in "firefox", assuming that program is available on your system. fileName default=None note that any file named _default_fileName will be overwritten if no fileName is specified. If the extension is ".svgz" or ".gz", the output will be gzipped encoding default="utf-8" file encoding """ fileName = self.interpret_fileName(fileName) self.save(fileName, encoding) os.spawnvp(os.P_NOWAIT, "firefox", ("firefox", fileName)) ###################################################################### _canvas_defaults = {"width": "400px", "height": "400px", "viewBox": "0 0 100 100", "xmlns": "http://www.w3.org/2000/svg", "xmlns:xlink": "http://www.w3.org/1999/xlink", "version": "1.1", "style": {"stroke": "black", "fill": "none", "stroke-width": "0.5pt", "stroke-linejoin": "round", "text-anchor": "middle", }, "font-family": ["Helvetica", "Arial", "FreeSans", "Sans", "sans", "sans-serif"], } def canvas(*sub, **attr): """Creates a top-level SVG object, allowing the user to control the image size and aspect ratio. canvas(sub, sub, sub..., attribute=value) sub optional list nested SVG elements or text/Unicode attribute=value pairs optional keywords SVG attributes Default attribute values: width "400px" height "400px" viewBox "0 0 100 100" xmlns "http://www.w3.org/2000/svg" xmlns:xlink "http://www.w3.org/1999/xlink" version "1.1" style "stroke:black; fill:none; stroke-width:0.5pt; stroke-linejoin:round; text-anchor:middle" font-family "Helvetica,Arial,FreeSans?,Sans,sans,sans-serif" """ attributes = dict(_canvas_defaults) attributes.update(attr) if sub is None or sub == (): return SVG("svg", **attributes) else: return SVG("svg", *sub, **attributes) def canvas_outline(*sub, **attr): """Same as canvas(), but draws an outline around the drawable area, so that you know how close your image is to the edges.""" svg = canvas(*sub, **attr) match = re.match(r"[, \t]*([0-9e.+\-]+)[, \t]+([0-9e.+\-]+)[, \t]+([0-9e.+\-]+)[, \t]+([0-9e.+\-]+)[, \t]*", svg["viewBox"]) if match is None: raise ValueError( "canvas viewBox is incorrectly formatted") x, y, width, height = [float(x) for x in match.groups()] svg.prepend(SVG("rect", x=x, y=y, width=width, height=height, stroke="none", fill="cornsilk")) svg.append(SVG("rect", x=x, y=y, width=width, height=height, stroke="black", fill="none")) return svg def template(fileName, svg, replaceme="REPLACEME"): """Loads an SVG image from a file, replacing instances of <REPLACEME /> with a given svg object. fileName required name of the template SVG svg required SVG object for replacement replaceme default="REPLACEME" fake SVG element to be replaced by the given object >>> print load("template.svg") None <svg (2 sub) style=u'stroke:black; fill:none; stroke-width:0.5pt; stroke-linejoi [0] <rect height=u'100' width=u'100' stroke=u'none' y=u'0' x=u'0' fill=u'yellow' [1] <REPLACEME /> >>> >>> print template("template.svg", SVG("circle", cx=50, cy=50, r=30)) None <svg (2 sub) style=u'stroke:black; fill:none; stroke-width:0.5pt; stroke-linejoi [0] <rect height=u'100' width=u'100' stroke=u'none' y=u'0' x=u'0' fill=u'yellow' [1] <circle cy=50 cx=50 r=30 /> """ output = load(fileName) for ti, s in output: if isinstance(s, SVG) and s.t == replaceme: output[ti] = svg return output ###################################################################### def load(fileName): """Loads an SVG image from a file.""" return load_stream(open(fileName)) def load_stream(stream): """Loads an SVG image from a stream (can be a string or a file object).""" from xml.sax import handler, make_parser from xml.sax.handler import feature_namespaces, feature_external_ges, feature_external_pes class ContentHandler(handler.ContentHandler): def __init__(self): self.stack = [] self.output = None self.all_whitespace = re.compile(r"^\s*$") def startElement(self, name, attr): s = SVG(name) s.attr = dict(attr.items()) if len(self.stack) > 0: last = self.stack[-1] last.sub.append(s) self.stack.append(s) def characters(self, ch): if not isinstance(ch, basestring) or self.all_whitespace.match(ch) is None: if len(self.stack) > 0: last = self.stack[-1] if len(last.sub) > 0 and isinstance(last.sub[-1], basestring): last.sub[-1] = last.sub[-1] + "\n" + ch else: last.sub.append(ch) def endElement(self, name): if len(self.stack) > 0: last = self.stack[-1] if (isinstance(last, SVG) and last.t == "style" and "type" in last.attr and last.attr["type"] == "text/css" and len(last.sub) == 1 and isinstance(last.sub[0], basestring)): last.sub[0] = "<![CDATA[\n" + last.sub[0] + "]]>" self.output = self.stack.pop() ch = ContentHandler() parser = make_parser() parser.setContentHandler(ch) parser.setFeature(feature_namespaces, 0) parser.setFeature(feature_external_ges, 0) parser.parse(stream) return ch.output ###################################################################### def set_func_name(f, name): """try to patch the function name string into a function object""" try: f.func_name = name except TypeError: # py 2.3 raises: TypeError: readonly attribute pass def totrans(expr, vars=("x", "y"), globals=None, locals=None): """Converts to a coordinate transformation (a function that accepts two arguments and returns two values). expr required a string expression or a function of two real or one complex value vars default=("x", "y") independent variable names; a singleton ("z",) is interpreted as complex globals default=None dict of global variables locals default=None dict of local variables """ if locals is None: locals = {} # python 2.3's eval() won't accept None if callable(expr): if expr.func_code.co_argcount == 2: return expr elif expr.func_code.co_argcount == 1: split = lambda z: (z.real, z.imag) output = lambda x, y: split(expr(x + y*1j)) set_func_name(output, expr.func_name) return output else: raise TypeError( "must be a function of 2 or 1 variables") if len(vars) == 2: g = math.__dict__ if globals is not None: g.update(globals) output = eval("lambda %s, %s: (%s)" % (vars[0], vars[1], expr), g, locals) set_func_name(output, "%s,%s -> %s" % (vars[0], vars[1], expr)) return output elif len(vars) == 1: g = cmath.__dict__ if globals is not None: g.update(globals) output = eval("lambda %s: (%s)" % (vars[0], expr), g, locals) split = lambda z: (z.real, z.imag) output2 = lambda x, y: split(output(x + y*1j)) set_func_name(output2, "%s -> %s" % (vars[0], expr)) return output2 else: raise TypeError( "vars must have 2 or 1 elements") def window(xmin, xmax, ymin, ymax, x=0, y=0, width=100, height=100, xlogbase=None, ylogbase=None, minusInfinity=-1000, flipx=False, flipy=True): """Creates and returns a coordinate transformation (a function that accepts two arguments and returns two values) that transforms from (xmin, ymin), (xmax, ymax) to (x, y), (x + width, y + height). xlogbase, ylogbase default=None, None if a number, transform logarithmically with given base minusInfinity default=-1000 what to return if log(0 or negative) is attempted flipx default=False if true, reverse the direction of x flipy default=True if true, reverse the direction of y (When composing windows, be sure to set flipy=False.) """ if flipx: ox1 = x + width ox2 = x else: ox1 = x ox2 = x + width if flipy: oy1 = y + height oy2 = y else: oy1 = y oy2 = y + height ix1 = xmin iy1 = ymin ix2 = xmax iy2 = ymax if xlogbase is not None and (ix1 <= 0. or ix2 <= 0.): raise ValueError ("x range incompatible with log scaling: (%g, %g)" % (ix1, ix2)) if ylogbase is not None and (iy1 <= 0. or iy2 <= 0.): raise ValueError ("y range incompatible with log scaling: (%g, %g)" % (iy1, iy2)) def maybelog(t, it1, it2, ot1, ot2, logbase): if t <= 0.: return minusInfinity else: return ot1 + 1.*(math.log(t, logbase) - math.log(it1, logbase))/(math.log(it2, logbase) - math.log(it1, logbase)) * (ot2 - ot1) xlogstr, ylogstr = "", "" if xlogbase is None: xfunc = lambda x: ox1 + 1.*(x - ix1)/(ix2 - ix1) * (ox2 - ox1) else: xfunc = lambda x: maybelog(x, ix1, ix2, ox1, ox2, xlogbase) xlogstr = " xlog=%g" % xlogbase if ylogbase is None: yfunc = lambda y: oy1 + 1.*(y - iy1)/(iy2 - iy1) * (oy2 - oy1) else: yfunc = lambda y: maybelog(y, iy1, iy2, oy1, oy2, ylogbase) ylogstr = " ylog=%g" % ylogbase output = lambda x, y: (xfunc(x), yfunc(y)) set_func_name(output, "(%g, %g), (%g, %g) -> (%g, %g), (%g, %g)%s%s" % ( ix1, ix2, iy1, iy2, ox1, ox2, oy1, oy2, xlogstr, ylogstr)) return output def rotate(angle, cx=0, cy=0): """Creates and returns a coordinate transformation which rotates around (cx,cy) by "angle" degrees.""" angle *= math.pi/180. return lambda x, y: (cx + math.cos(angle)*(x - cx) - math.sin(angle)*(y - cy), cy + math.sin(angle)*(x - cx) + math.cos(angle)*(y - cy)) class Fig: """Stores graphics primitive objects and applies a single coordinate transformation to them. To compose coordinate systems, nest Fig objects. Fig(obj, obj, obj..., trans=function) obj optional list a list of drawing primitives trans default=None a coordinate transformation function >>> fig = Fig(Line(0,0,1,1), Rect(0.2,0.2,0.8,0.8), trans="2*x, 2*y") >>> print fig.SVG().xml() <g> <path d='M0 0L2 2' /> <path d='M0.4 0.4L1.6 0.4ZL1.6 1.6ZL0.4 1.6ZL0.4 0.4ZZ' /> </g> >>> print Fig(fig, trans="x/2., y/2.").SVG().xml() <g> <path d='M0 0L1 1' /> <path d='M0.2 0.2L0.8 0.2ZL0.8 0.8ZL0.2 0.8ZL0.2 0.2ZZ' /> </g> """ def __repr__(self): if self.trans is None: return "<Fig (%d items)>" % len(self.d) elif isinstance(self.trans, basestring): return "<Fig (%d items) x,y -> %s>" % (len(self.d), self.trans) else: return "<Fig (%d items) %s>" % (len(self.d), self.trans.func_name) def __init__(self, *d, **kwds): self.d = list(d) defaults = {"trans": None, } defaults.update(kwds) kwds = defaults self.trans = kwds["trans"]; del kwds["trans"] if len(kwds) != 0: raise TypeError ("Fig() got unexpected keyword arguments %s" % kwds.keys()) def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object. Coordinate transformations in nested Figs will be composed. """ if trans is None: trans = self.trans if isinstance(trans, basestring): trans = totrans(trans) output = SVG("g") for s in self.d: if isinstance(s, SVG): output.append(s) elif isinstance(s, Fig): strans = s.trans if isinstance(strans, basestring): strans = totrans(strans) if trans is None: subtrans = strans elif strans is None: subtrans = trans else: subtrans = lambda x, y: trans(*strans(x, y)) output.sub += s.SVG(subtrans).sub elif s is None: pass else: output.append(s.SVG(trans)) return output class Plot: """Acts like Fig, but draws a coordinate axis. You also need to supply plot ranges. Plot(xmin, xmax, ymin, ymax, obj, obj, obj..., keyword options...) xmin, xmax required minimum and maximum x values (in the objs' coordinates) ymin, ymax required minimum and maximum y values (in the objs' coordinates) obj optional list drawing primitives keyword options keyword list options defined below The following are keyword options, with their default values: trans None transformation function x, y 5, 5 upper-left corner of the Plot in SVG coordinates width, height 90, 90 width and height of the Plot in SVG coordinates flipx, flipy False, True flip the sign of the coordinate axis minusInfinity -1000 if an axis is logarithmic and an object is plotted at 0 or a negative value, -1000 will be used as a stand-in for NaN atx, aty 0, 0 the place where the coordinate axes cross xticks -10 request ticks according to the standard tick specification (see help(Ticks)) xminiticks True request miniticks according to the standard minitick specification xlabels True request tick labels according to the standard tick label specification xlogbase None if a number, the axis and transformation are logarithmic with ticks at the given base (10 being the most common) (same for y) arrows None if a new identifier, create arrow markers and draw them at the ends of the coordinate axes text_attr {} a dictionary of attributes for label text axis_attr {} a dictionary of attributes for the axis lines """ def __repr__(self): if self.trans is None: return "<Plot (%d items)>" % len(self.d) else: return "<Plot (%d items) %s>" % (len(self.d), self.trans.func_name) def __init__(self, xmin, xmax, ymin, ymax, *d, **kwds): self.xmin, self.xmax, self.ymin, self.ymax = xmin, xmax, ymin, ymax self.d = list(d) defaults = {"trans": None, "x": 5, "y": 5, "width": 90, "height": 90, "flipx": False, "flipy": True, "minusInfinity": -1000, "atx": 0, "xticks": -10, "xminiticks": True, "xlabels": True, "xlogbase": None, "aty": 0, "yticks": -10, "yminiticks": True, "ylabels": True, "ylogbase": None, "arrows": None, "text_attr": {}, "axis_attr": {}, } defaults.update(kwds) kwds = defaults self.trans = kwds["trans"]; del kwds["trans"] self.x = kwds["x"]; del kwds["x"] self.y = kwds["y"]; del kwds["y"] self.width = kwds["width"]; del kwds["width"] self.height = kwds["height"]; del kwds["height"] self.flipx = kwds["flipx"]; del kwds["flipx"] self.flipy = kwds["flipy"]; del kwds["flipy"] self.minusInfinity = kwds["minusInfinity"]; del kwds["minusInfinity"] self.atx = kwds["atx"]; del kwds["atx"] self.xticks = kwds["xticks"]; del kwds["xticks"] self.xminiticks = kwds["xminiticks"]; del kwds["xminiticks"] self.xlabels = kwds["xlabels"]; del kwds["xlabels"] self.xlogbase = kwds["xlogbase"]; del kwds["xlogbase"] self.aty = kwds["aty"]; del kwds["aty"] self.yticks = kwds["yticks"]; del kwds["yticks"] self.yminiticks = kwds["yminiticks"]; del kwds["yminiticks"] self.ylabels = kwds["ylabels"]; del kwds["ylabels"] self.ylogbase = kwds["ylogbase"]; del kwds["ylogbase"] self.arrows = kwds["arrows"]; del kwds["arrows"] self.text_attr = kwds["text_attr"]; del kwds["text_attr"] self.axis_attr = kwds["axis_attr"]; del kwds["axis_attr"] if len(kwds) != 0: raise TypeError ("Plot() got unexpected keyword arguments %s" % kwds.keys()) def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" if trans is None: trans = self.trans if isinstance(trans, basestring): trans = totrans(trans) self.last_window = window(self.xmin, self.xmax, self.ymin, self.ymax, x=self.x, y=self.y, width=self.width, height=self.height, xlogbase=self.xlogbase, ylogbase=self.ylogbase, minusInfinity=self.minusInfinity, flipx=self.flipx, flipy=self.flipy) d = ([Axes(self.xmin, self.xmax, self.ymin, self.ymax, self.atx, self.aty, self.xticks, self.xminiticks, self.xlabels, self.xlogbase, self.yticks, self.yminiticks, self.ylabels, self.ylogbase, self.arrows, self.text_attr, **self.axis_attr)] + self.d) return Fig(Fig(*d, **{"trans": trans})).SVG(self.last_window) class Frame: text_defaults = {"stroke": "none", "fill": "black", "font-size": 5, } axis_defaults = {} tick_length = 1.5 minitick_length = 0.75 text_xaxis_offset = 1. text_yaxis_offset = 2. text_xtitle_offset = 6. text_ytitle_offset = 12. def __repr__(self): return "<Frame (%d items)>" % len(self.d) def __init__(self, xmin, xmax, ymin, ymax, *d, **kwds): """Acts like Fig, but draws a coordinate frame around the data. You also need to supply plot ranges. Frame(xmin, xmax, ymin, ymax, obj, obj, obj..., keyword options...) xmin, xmax required minimum and maximum x values (in the objs' coordinates) ymin, ymax required minimum and maximum y values (in the objs' coordinates) obj optional list drawing primitives keyword options keyword list options defined below The following are keyword options, with their default values: x, y 20, 5 upper-left corner of the Frame in SVG coordinates width, height 75, 80 width and height of the Frame in SVG coordinates flipx, flipy False, True flip the sign of the coordinate axis minusInfinity -1000 if an axis is logarithmic and an object is plotted at 0 or a negative value, -1000 will be used as a stand-in for NaN xtitle None if a string, label the x axis xticks -10 request ticks according to the standard tick specification (see help(Ticks)) xminiticks True request miniticks according to the standard minitick specification xlabels True request tick labels according to the standard tick label specification xlogbase None if a number, the axis and transformation are logarithmic with ticks at the given base (10 being the most common) (same for y) text_attr {} a dictionary of attributes for label text axis_attr {} a dictionary of attributes for the axis lines """ self.xmin, self.xmax, self.ymin, self.ymax = xmin, xmax, ymin, ymax self.d = list(d) defaults = {"x": 20, "y": 5, "width": 75, "height": 80, "flipx": False, "flipy": True, "minusInfinity": -1000, "xtitle": None, "xticks": -10, "xminiticks": True, "xlabels": True, "x2labels": None, "xlogbase": None, "ytitle": None, "yticks": -10, "yminiticks": True, "ylabels": True, "y2labels": None, "ylogbase": None, "text_attr": {}, "axis_attr": {}, } defaults.update(kwds) kwds = defaults self.x = kwds["x"]; del kwds["x"] self.y = kwds["y"]; del kwds["y"] self.width = kwds["width"]; del kwds["width"] self.height = kwds["height"]; del kwds["height"] self.flipx = kwds["flipx"]; del kwds["flipx"] self.flipy = kwds["flipy"]; del kwds["flipy"] self.minusInfinity = kwds["minusInfinity"]; del kwds["minusInfinity"] self.xtitle = kwds["xtitle"]; del kwds["xtitle"] self.xticks = kwds["xticks"]; del kwds["xticks"] self.xminiticks = kwds["xminiticks"]; del kwds["xminiticks"] self.xlabels = kwds["xlabels"]; del kwds["xlabels"] self.x2labels = kwds["x2labels"]; del kwds["x2labels"] self.xlogbase = kwds["xlogbase"]; del kwds["xlogbase"] self.ytitle = kwds["ytitle"]; del kwds["ytitle"] self.yticks = kwds["yticks"]; del kwds["yticks"] self.yminiticks = kwds["yminiticks"]; del kwds["yminiticks"] self.ylabels = kwds["ylabels"]; del kwds["ylabels"] self.y2labels = kwds["y2labels"]; del kwds["y2labels"] self.ylogbase = kwds["ylogbase"]; del kwds["ylogbase"] self.text_attr = dict(self.text_defaults) self.text_attr.update(kwds["text_attr"]); del kwds["text_attr"] self.axis_attr = dict(self.axis_defaults) self.axis_attr.update(kwds["axis_attr"]); del kwds["axis_attr"] if len(kwds) != 0: raise TypeError( "Frame() got unexpected keyword arguments %s" % kwds.keys()) def SVG(self): """Apply the window transformation and return an SVG object.""" self.last_window = window(self.xmin, self.xmax, self.ymin, self.ymax, x=self.x, y=self.y, width=self.width, height=self.height, xlogbase=self.xlogbase, ylogbase=self.ylogbase, minusInfinity=self.minusInfinity, flipx=self.flipx, flipy=self.flipy) left = YAxis(self.ymin, self.ymax, self.xmin, self.yticks, self.yminiticks, self.ylabels, self.ylogbase, None, None, None, self.text_attr, **self.axis_attr) right = YAxis(self.ymin, self.ymax, self.xmax, self.yticks, self.yminiticks, self.y2labels, self.ylogbase, None, None, None, self.text_attr, **self.axis_attr) bottom = XAxis(self.xmin, self.xmax, self.ymin, self.xticks, self.xminiticks, self.xlabels, self.xlogbase, None, None, None, self.text_attr, **self.axis_attr) top = XAxis(self.xmin, self.xmax, self.ymax, self.xticks, self.xminiticks, self.x2labels, self.xlogbase, None, None, None, self.text_attr, **self.axis_attr) left.tick_start = -self.tick_length left.tick_end = 0 left.minitick_start = -self.minitick_length left.minitick_end = 0. left.text_start = self.text_yaxis_offset right.tick_start = 0. right.tick_end = self.tick_length right.minitick_start = 0. right.minitick_end = self.minitick_length right.text_start = -self.text_yaxis_offset right.text_attr["text-anchor"] = "start" bottom.tick_start = 0. bottom.tick_end = self.tick_length bottom.minitick_start = 0. bottom.minitick_end = self.minitick_length bottom.text_start = -self.text_xaxis_offset top.tick_start = -self.tick_length top.tick_end = 0. top.minitick_start = -self.minitick_length top.minitick_end = 0. top.text_start = self.text_xaxis_offset top.text_attr["dominant-baseline"] = "text-after-edge" output = Fig(*self.d).SVG(self.last_window) output.prepend(left.SVG(self.last_window)) output.prepend(bottom.SVG(self.last_window)) output.prepend(right.SVG(self.last_window)) output.prepend(top.SVG(self.last_window)) if self.xtitle is not None: output.append(SVG("text", self.xtitle, transform="translate(%g, %g)" % ((self.x + self.width/2.), (self.y + self.height + self.text_xtitle_offset)), dominant_baseline="text-before-edge", **self.text_attr)) if self.ytitle is not None: output.append(SVG("text", self.ytitle, transform="translate(%g, %g) rotate(-90)" % ((self.x - self.text_ytitle_offset), (self.y + self.height/2.)), **self.text_attr)) return output ###################################################################### def pathtoPath(svg): """Converts SVG("path", d="...") into Path(d=[...]).""" if not isinstance(svg, SVG) or svg.t != "path": raise TypeError ("Only SVG <path /> objects can be converted into Paths") attr = dict(svg.attr) d = attr["d"] del attr["d"] for key in attr.keys(): if not isinstance(key, str): value = attr[key] del attr[key] attr[str(key)] = value return Path(d, **attr) class Path: """Path represents an SVG path, an arbitrary set of curves and straight segments. Unlike SVG("path", d="..."), Path stores coordinates as a list of numbers, rather than a string, so that it is transformable in a Fig. Path(d, attribute=value) d required path data attribute=value pairs keyword list SVG attributes See http://www.w3.org/TR/SVG/paths.html for specification of paths from text. Internally, Path data is a list of tuples with these definitions: * ("Z/z",): close the current path * ("H/h", x) or ("V/v", y): a horizontal or vertical line segment to x or y * ("M/m/L/l/T/t", x, y, global): moveto, lineto, or smooth quadratic curveto point (x, y). If global=True, (x, y) should not be transformed. * ("S/sQ/q", cx, cy, cglobal, x, y, global): polybezier or smooth quadratic curveto point (x, y) using (cx, cy) as a control point. If cglobal or global=True, (cx, cy) or (x, y) should not be transformed. * ("C/c", c1x, c1y, c1global, c2x, c2y, c2global, x, y, global): cubic curveto point (x, y) using (c1x, c1y) and (c2x, c2y) as control points. If c1global, c2global, or global=True, (c1x, c1y), (c2x, c2y), or (x, y) should not be transformed. * ("A/a", rx, ry, rglobal, x-axis-rotation, angle, large-arc-flag, sweep-flag, x, y, global): arcto point (x, y) using the aforementioned parameters. * (",/.", rx, ry, rglobal, angle, x, y, global): an ellipse at point (x, y) with radii (rx, ry). If angle is 0, the whole ellipse is drawn; otherwise, a partial ellipse is drawn. """ defaults = {} def __repr__(self): return "<Path (%d nodes) %s>" % (len(self.d), self.attr) def __init__(self, d=[], **attr): if isinstance(d, basestring): self.d = self.parse(d) else: self.d = list(d) self.attr = dict(self.defaults) self.attr.update(attr) def parse_whitespace(self, index, pathdata): """Part of Path's text-command parsing algorithm; used internally.""" while index < len(pathdata) and pathdata[index] in (" ", "\t", "\r", "\n", ","): index += 1 return index, pathdata def parse_command(self, index, pathdata): """Part of Path's text-command parsing algorithm; used internally.""" index, pathdata = self.parse_whitespace(index, pathdata) if index >= len(pathdata): return None, index, pathdata command = pathdata[index] if "A" <= command <= "Z" or "a" <= command <= "z": index += 1 return command, index, pathdata else: return None, index, pathdata def parse_number(self, index, pathdata): """Part of Path's text-command parsing algorithm; used internally.""" index, pathdata = self.parse_whitespace(index, pathdata) if index >= len(pathdata): return None, index, pathdata first_digit = pathdata[index] if "0" <= first_digit <= "9" or first_digit in ("-", "+", "."): start = index while index < len(pathdata) and ("0" <= pathdata[index] <= "9" or pathdata[index] in ("-", "+", ".", "e", "E")): index += 1 end = index index = end return float(pathdata[start:end]), index, pathdata else: return None, index, pathdata def parse_boolean(self, index, pathdata): """Part of Path's text-command parsing algorithm; used internally.""" index, pathdata = self.parse_whitespace(index, pathdata) if index >= len(pathdata): return None, index, pathdata first_digit = pathdata[index] if first_digit in ("0", "1"): index += 1 return int(first_digit), index, pathdata else: return None, index, pathdata def parse(self, pathdata): """Parses text-commands, converting them into a list of tuples. Called by the constructor.""" output = [] index = 0 while True: command, index, pathdata = self.parse_command(index, pathdata) index, pathdata = self.parse_whitespace(index, pathdata) if command is None and index == len(pathdata): break # this is the normal way out of the loop if command in ("Z", "z"): output.append((command,)) ###################### elif command in ("H", "h", "V", "v"): errstring = "Path command \"%s\" requires a number at index %d" % (command, index) num1, index, pathdata = self.parse_number(index, pathdata) if num1 is None: raise ValueError ( errstring) while num1 is not None: output.append((command, num1)) num1, index, pathdata = self.parse_number(index, pathdata) ###################### elif command in ("M", "m", "L", "l", "T", "t"): errstring = "Path command \"%s\" requires an x,y pair at index %d" % (command, index) num1, index, pathdata = self.parse_number(index, pathdata) num2, index, pathdata = self.parse_number(index, pathdata) if num1 is None: raise ValueError ( errstring) while num1 is not None: if num2 is None: raise ValueError ( errstring) output.append((command, num1, num2, False)) num1, index, pathdata = self.parse_number(index, pathdata) num2, index, pathdata = self.parse_number(index, pathdata) ###################### elif command in ("S", "s", "Q", "q"): errstring = "Path command \"%s\" requires a cx,cy,x,y quadruplet at index %d" % (command, index) num1, index, pathdata = self.parse_number(index, pathdata) num2, index, pathdata = self.parse_number(index, pathdata) num3, index, pathdata = self.parse_number(index, pathdata) num4, index, pathdata = self.parse_number(index, pathdata) if num1 is None: raise ValueError ( errstring ) while num1 is not None: if num2 is None or num3 is None or num4 is None: raise ValueError (errstring) output.append((command, num1, num2, False, num3, num4, False)) num1, index, pathdata = self.parse_number(index, pathdata) num2, index, pathdata = self.parse_number(index, pathdata) num3, index, pathdata = self.parse_number(index, pathdata) num4, index, pathdata = self.parse_number(index, pathdata) ###################### elif command in ("C", "c"): errstring = "Path command \"%s\" requires a c1x,c1y,c2x,c2y,x,y sextuplet at index %d" % (command, index) num1, index, pathdata = self.parse_number(index, pathdata) num2, index, pathdata = self.parse_number(index, pathdata) num3, index, pathdata = self.parse_number(index, pathdata) num4, index, pathdata = self.parse_number(index, pathdata) num5, index, pathdata = self.parse_number(index, pathdata) num6, index, pathdata = self.parse_number(index, pathdata) if num1 is None: raise ValueError(errstring) while num1 is not None: if num2 is None or num3 is None or num4 is None or num5 is None or num6 is None: raise ValueError(errstring) output.append((command, num1, num2, False, num3, num4, False, num5, num6, False)) num1, index, pathdata = self.parse_number(index, pathdata) num2, index, pathdata = self.parse_number(index, pathdata) num3, index, pathdata = self.parse_number(index, pathdata) num4, index, pathdata = self.parse_number(index, pathdata) num5, index, pathdata = self.parse_number(index, pathdata) num6, index, pathdata = self.parse_number(index, pathdata) ###################### elif command in ("A", "a"): errstring = "Path command \"%s\" requires a rx,ry,angle,large-arc-flag,sweep-flag,x,y septuplet at index %d" % (command, index) num1, index, pathdata = self.parse_number(index, pathdata) num2, index, pathdata = self.parse_number(index, pathdata) num3, index, pathdata = self.parse_number(index, pathdata) num4, index, pathdata = self.parse_boolean(index, pathdata) num5, index, pathdata = self.parse_boolean(index, pathdata) num6, index, pathdata = self.parse_number(index, pathdata) num7, index, pathdata = self.parse_number(index, pathdata) if num1 is None: raise ValueError(errstring) while num1 is not None: if num2 is None or num3 is None or num4 is None or num5 is None or num6 is None or num7 is None: raise ValueError(errstring) output.append((command, num1, num2, False, num3, num4, num5, num6, num7, False)) num1, index, pathdata = self.parse_number(index, pathdata) num2, index, pathdata = self.parse_number(index, pathdata) num3, index, pathdata = self.parse_number(index, pathdata) num4, index, pathdata = self.parse_boolean(index, pathdata) num5, index, pathdata = self.parse_boolean(index, pathdata) num6, index, pathdata = self.parse_number(index, pathdata) num7, index, pathdata = self.parse_number(index, pathdata) return output def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" if isinstance(trans, basestring): trans = totrans(trans) x, y, X, Y = None, None, None, None output = [] for datum in self.d: if not isinstance(datum, (tuple, list)): raise TypeError("pathdata elements must be tuples/lists") command = datum[0] ###################### if command in ("Z", "z"): x, y, X, Y = None, None, None, None output.append("Z") ###################### elif command in ("H", "h", "V", "v"): command, num1 = datum if command == "H" or (command == "h" and x is None): x = num1 elif command == "h": x += num1 elif command == "V" or (command == "v" and y is None): y = num1 elif command == "v": y += num1 if trans is None: X, Y = x, y else: X, Y = trans(x, y) output.append("L%g %g" % (X, Y)) ###################### elif command in ("M", "m", "L", "l", "T", "t"): command, num1, num2, isglobal12 = datum if trans is None or isglobal12: if command.isupper() or X is None or Y is None: X, Y = num1, num2 else: X += num1 Y += num2 x, y = X, Y else: if command.isupper() or x is None or y is None: x, y = num1, num2 else: x += num1 y += num2 X, Y = trans(x, y) COMMAND = command.capitalize() output.append("%s%g %g" % (COMMAND, X, Y)) ###################### elif command in ("S", "s", "Q", "q"): command, num1, num2, isglobal12, num3, num4, isglobal34 = datum if trans is None or isglobal12: if command.isupper() or X is None or Y is None: CX, CY = num1, num2 else: CX = X + num1 CY = Y + num2 else: if command.isupper() or x is None or y is None: cx, cy = num1, num2 else: cx = x + num1 cy = y + num2 CX, CY = trans(cx, cy) if trans is None or isglobal34: if command.isupper() or X is None or Y is None: X, Y = num3, num4 else: X += num3 Y += num4 x, y = X, Y else: if command.isupper() or x is None or y is None: x, y = num3, num4 else: x += num3 y += num4 X, Y = trans(x, y) COMMAND = command.capitalize() output.append("%s%g %g %g %g" % (COMMAND, CX, CY, X, Y)) ###################### elif command in ("C", "c"): command, num1, num2, isglobal12, num3, num4, isglobal34, num5, num6, isglobal56 = datum if trans is None or isglobal12: if command.isupper() or X is None or Y is None: C1X, C1Y = num1, num2 else: C1X = X + num1 C1Y = Y + num2 else: if command.isupper() or x is None or y is None: c1x, c1y = num1, num2 else: c1x = x + num1 c1y = y + num2 C1X, C1Y = trans(c1x, c1y) if trans is None or isglobal34: if command.isupper() or X is None or Y is None: C2X, C2Y = num3, num4 else: C2X = X + num3 C2Y = Y + num4 else: if command.isupper() or x is None or y is None: c2x, c2y = num3, num4 else: c2x = x + num3 c2y = y + num4 C2X, C2Y = trans(c2x, c2y) if trans is None or isglobal56: if command.isupper() or X is None or Y is None: X, Y = num5, num6 else: X += num5 Y += num6 x, y = X, Y else: if command.isupper() or x is None or y is None: x, y = num5, num6 else: x += num5 y += num6 X, Y = trans(x, y) COMMAND = command.capitalize() output.append("%s%g %g %g %g %g %g" % (COMMAND, C1X, C1Y, C2X, C2Y, X, Y)) ###################### elif command in ("A", "a"): command, num1, num2, isglobal12, angle, large_arc_flag, sweep_flag, num3, num4, isglobal34 = datum oldx, oldy = x, y OLDX, OLDY = X, Y if trans is None or isglobal34: if command.isupper() or X is None or Y is None: X, Y = num3, num4 else: X += num3 Y += num4 x, y = X, Y else: if command.isupper() or x is None or y is None: x, y = num3, num4 else: x += num3 y += num4 X, Y = trans(x, y) if x is not None and y is not None: centerx, centery = (x + oldx)/2., (y + oldy)/2. CENTERX, CENTERY = (X + OLDX)/2., (Y + OLDY)/2. if trans is None or isglobal12: RX = CENTERX + num1 RY = CENTERY + num2 else: rx = centerx + num1 ry = centery + num2 RX, RY = trans(rx, ry) COMMAND = command.capitalize() output.append("%s%g %g %g %d %d %g %g" % (COMMAND, RX - CENTERX, RY - CENTERY, angle, large_arc_flag, sweep_flag, X, Y)) elif command in (",", "."): command, num1, num2, isglobal12, angle, num3, num4, isglobal34 = datum if trans is None or isglobal34: if command == "." or X is None or Y is None: X, Y = num3, num4 else: X += num3 Y += num4 x, y = None, None else: if command == "." or x is None or y is None: x, y = num3, num4 else: x += num3 y += num4 X, Y = trans(x, y) if trans is None or isglobal12: RX = X + num1 RY = Y + num2 else: rx = x + num1 ry = y + num2 RX, RY = trans(rx, ry) RX, RY = RX - X, RY - Y X1, Y1 = X + RX * math.cos(angle*math.pi/180.), Y + RX * math.sin(angle*math.pi/180.) X2, Y2 = X + RY * math.sin(angle*math.pi/180.), Y - RY * math.cos(angle*math.pi/180.) X3, Y3 = X - RX * math.cos(angle*math.pi/180.), Y - RX * math.sin(angle*math.pi/180.) X4, Y4 = X - RY * math.sin(angle*math.pi/180.), Y + RY * math.cos(angle*math.pi/180.) output.append("M%g %gA%g %g %g 0 0 %g %gA%g %g %g 0 0 %g %gA%g %g %g 0 0 %g %gA%g %g %g 0 0 %g %g" % ( X1, Y1, RX, RY, angle, X2, Y2, RX, RY, angle, X3, Y3, RX, RY, angle, X4, Y4, RX, RY, angle, X1, Y1)) return SVG("path", d="".join(output), **self.attr) ###################################################################### def funcRtoC(expr, var="t", globals=None, locals=None): """Converts a complex "z(t)" string to a function acceptable for Curve. expr required string in the form "z(t)" var default="t" name of the independent variable globals default=None dict of global variables used in the expression; you may want to use Python's builtin globals() locals default=None dict of local variables """ if locals is None: locals = {} # python 2.3's eval() won't accept None g = cmath.__dict__ if globals is not None: g.update(globals) output = eval("lambda %s: (%s)" % (var, expr), g, locals) split = lambda z: (z.real, z.imag) output2 = lambda t: split(output(t)) set_func_name(output2, "%s -> %s" % (var, expr)) return output2 def funcRtoR2(expr, var="t", globals=None, locals=None): """Converts a "f(t), g(t)" string to a function acceptable for Curve. expr required string in the form "f(t), g(t)" var default="t" name of the independent variable globals default=None dict of global variables used in the expression; you may want to use Python's builtin globals() locals default=None dict of local variables """ if locals is None: locals = {} # python 2.3's eval() won't accept None g = math.__dict__ if globals is not None: g.update(globals) output = eval("lambda %s: (%s)" % (var, expr), g, locals) set_func_name(output, "%s -> %s" % (var, expr)) return output def funcRtoR(expr, var="x", globals=None, locals=None): """Converts a "f(x)" string to a function acceptable for Curve. expr required string in the form "f(x)" var default="x" name of the independent variable globals default=None dict of global variables used in the expression; you may want to use Python's builtin globals() locals default=None dict of local variables """ if locals is None: locals = {} # python 2.3's eval() won't accept None g = math.__dict__ if globals is not None: g.update(globals) output = eval("lambda %s: (%s, %s)" % (var, var, expr), g, locals) set_func_name(output, "%s -> %s" % (var, expr)) return output class Curve: """Draws a parametric function as a path. Curve(f, low, high, loop, attribute=value) f required a Python callable or string in the form "f(t), g(t)" low, high required left and right endpoints loop default=False if True, connect the endpoints attribute=value pairs keyword list SVG attributes """ defaults = {} random_sampling = True recursion_limit = 15 linearity_limit = 0.05 discontinuity_limit = 5. def __repr__(self): return "<Curve %s [%s, %s] %s>" % (self.f, self.low, self.high, self.attr) def __init__(self, f, low, high, loop=False, **attr): self.f = f self.low = low self.high = high self.loop = loop self.attr = dict(self.defaults) self.attr.update(attr) ### nested class Sample class Sample: def __repr__(self): t, x, y, X, Y = self.t, self.x, self.y, self.X, self.Y if t is not None: t = "%g" % t if x is not None: x = "%g" % x if y is not None: y = "%g" % y if X is not None: X = "%g" % X if Y is not None: Y = "%g" % Y return "<Curve.Sample t=%s x=%s y=%s X=%s Y=%s>" % (t, x, y, X, Y) def __init__(self, t): self.t = t def link(self, left, right): self.left, self.right = left, right def evaluate(self, f, trans): self.x, self.y = f(self.t) if trans is None: self.X, self.Y = self.x, self.y else: self.X, self.Y = trans(self.x, self.y) ### end Sample ### nested class Samples class Samples: def __repr__(self): return "<Curve.Samples (%d samples)>" % len(self) def __init__(self, left, right): self.left, self.right = left, right def __len__(self): count = 0 current = self.left while current is not None: count += 1 current = current.right return count def __iter__(self): self.current = self.left return self def next(self): current = self.current if current is None: raise StopIteration self.current = self.current.right return current ### end nested class def sample(self, trans=None): """Adaptive-sampling algorithm that chooses the best sample points for a parametric curve between two endpoints and detects discontinuities. Called by SVG().""" oldrecursionlimit = sys.getrecursionlimit() sys.setrecursionlimit(self.recursion_limit + 100) try: # the best way to keep all the information while sampling is to make a linked list if not (self.low < self.high): raise ValueError("low must be less than high") low, high = self.Sample(float(self.low)), self.Sample(float(self.high)) low.link(None, high) high.link(low, None) low.evaluate(self.f, trans) high.evaluate(self.f, trans) # adaptive sampling between the low and high points self.subsample(low, high, 0, trans) # Prune excess points where the curve is nearly linear left = low while left.right is not None: # increment mid and right mid = left.right right = mid.right if (right is not None and left.X is not None and left.Y is not None and mid.X is not None and mid.Y is not None and right.X is not None and right.Y is not None): numer = left.X*(right.Y - mid.Y) + mid.X*(left.Y - right.Y) + right.X*(mid.Y - left.Y) denom = math.sqrt((left.X - right.X)**2 + (left.Y - right.Y)**2) if denom != 0. and abs(numer/denom) < self.linearity_limit: # drop mid (the garbage collector will get it) left.right = right right.left = left else: # increment left left = left.right else: left = left.right self.last_samples = self.Samples(low, high) finally: sys.setrecursionlimit(oldrecursionlimit) def subsample(self, left, right, depth, trans=None): """Part of the adaptive-sampling algorithm that chooses the best sample points. Called by sample().""" if self.random_sampling: mid = self.Sample(left.t + random.uniform(0.3, 0.7) * (right.t - left.t)) else: mid = self.Sample(left.t + 0.5 * (right.t - left.t)) left.right = mid right.left = mid mid.link(left, right) mid.evaluate(self.f, trans) # calculate the distance of closest approach of mid to the line between left and right numer = left.X*(right.Y - mid.Y) + mid.X*(left.Y - right.Y) + right.X*(mid.Y - left.Y) denom = math.sqrt((left.X - right.X)**2 + (left.Y - right.Y)**2) # if we haven't sampled enough or left fails to be close enough to right, or mid fails to be linear enough... if (depth < 3 or (denom == 0 and left.t != right.t) or denom > self.discontinuity_limit or (denom != 0. and abs(numer/denom) > self.linearity_limit)): # and we haven't sampled too many points if depth < self.recursion_limit: self.subsample(left, mid, depth+1, trans) self.subsample(mid, right, depth+1, trans) else: # We've sampled many points and yet it's still not a small linear gap. # Break the line: it's a discontinuity mid.y = mid.Y = None def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" return self.Path(trans).SVG() def Path(self, trans=None, local=False): """Apply the transformation "trans" and return a Path object in global coordinates. If local=True, return a Path in local coordinates (which must be transformed again).""" if isinstance(trans, basestring): trans = totrans(trans) if isinstance(self.f, basestring): self.f = funcRtoR2(self.f) self.sample(trans) output = [] for s in self.last_samples: if s.X is not None and s.Y is not None: if s.left is None or s.left.Y is None: command = "M" else: command = "L" if local: output.append((command, s.x, s.y, False)) else: output.append((command, s.X, s.Y, True)) if self.loop: output.append(("Z",)) return Path(output, **self.attr) ###################################################################### class Poly: """Draws a curve specified by a sequence of points. The curve may be piecewise linear, like a polygon, or a Bezier curve. Poly(d, mode, loop, attribute=value) d required list of tuples representing points and possibly control points mode default="L" "lines", "bezier", "velocity", "foreback", "smooth", or an abbreviation loop default=False if True, connect the first and last point, closing the loop attribute=value pairs keyword list SVG attributes The format of the tuples in d depends on the mode. "lines"/"L" d=[(x,y), (x,y), ...] piecewise-linear segments joining the (x,y) points "bezier"/"B" d=[(x, y, c1x, c1y, c2x, c2y), ...] Bezier curve with two control points (control points precede (x,y), as in SVG paths). If (c1x,c1y) and (c2x,c2y) both equal (x,y), you get a linear interpolation ("lines") "velocity"/"V" d=[(x, y, vx, vy), ...] curve that passes through (x,y) with velocity (vx,vy) (one unit of arclength per unit time); in other words, (vx,vy) is the tangent vector at (x,y). If (vx,vy) is (0,0), you get a linear interpolation ("lines"). "foreback"/"F" d=[(x, y, bx, by, fx, fy), ...] like "velocity" except that there is a left derivative (bx,by) and a right derivative (fx,fy). If (bx,by) equals (fx,fy) (with no minus sign), you get a "velocity" curve "smooth"/"S" d=[(x,y), (x,y), ...] a "velocity" interpolation with (vx,vy)[i] equal to ((x,y)[i+1] - (x,y)[i-1])/2: the minimal derivative """ defaults = {} def __repr__(self): return "<Poly (%d nodes) mode=%s loop=%s %s>" % ( len(self.d), self.mode, repr(self.loop), self.attr) def __init__(self, d=[], mode="L", loop=False, **attr): self.d = list(d) self.mode = mode self.loop = loop self.attr = dict(self.defaults) self.attr.update(attr) def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" return self.Path(trans).SVG() def Path(self, trans=None, local=False): """Apply the transformation "trans" and return a Path object in global coordinates. If local=True, return a Path in local coordinates (which must be transformed again).""" if isinstance(trans, basestring): trans = totrans(trans) if self.mode[0] == "L" or self.mode[0] == "l": mode = "L" elif self.mode[0] == "B" or self.mode[0] == "b": mode = "B" elif self.mode[0] == "V" or self.mode[0] == "v": mode = "V" elif self.mode[0] == "F" or self.mode[0] == "f": mode = "F" elif self.mode[0] == "S" or self.mode[0] == "s": mode = "S" vx, vy = [0.]*len(self.d), [0.]*len(self.d) for i in xrange(len(self.d)): inext = (i+1) % len(self.d) iprev = (i-1) % len(self.d) vx[i] = (self.d[inext][0] - self.d[iprev][0])/2. vy[i] = (self.d[inext][1] - self.d[iprev][1])/2. if not self.loop and (i == 0 or i == len(self.d)-1): vx[i], vy[i] = 0., 0. else: raise ValueError("mode must be \"lines\", \"bezier\", \"velocity\", \"foreback\", \"smooth\", or an abbreviation") d = [] indexes = list(range(len(self.d))) if self.loop and len(self.d) > 0: indexes.append(0) for i in indexes: inext = (i+1) % len(self.d) iprev = (i-1) % len(self.d) x, y = self.d[i][0], self.d[i][1] if trans is None: X, Y = x, y else: X, Y = trans(x, y) if d == []: if local: d.append(("M", x, y, False)) else: d.append(("M", X, Y, True)) elif mode == "L": if local: d.append(("L", x, y, False)) else: d.append(("L", X, Y, True)) elif mode == "B": c1x, c1y = self.d[i][2], self.d[i][3] if trans is None: C1X, C1Y = c1x, c1y else: C1X, C1Y = trans(c1x, c1y) c2x, c2y = self.d[i][4], self.d[i][5] if trans is None: C2X, C2Y = c2x, c2y else: C2X, C2Y = trans(c2x, c2y) if local: d.append(("C", c1x, c1y, False, c2x, c2y, False, x, y, False)) else: d.append(("C", C1X, C1Y, True, C2X, C2Y, True, X, Y, True)) elif mode == "V": c1x, c1y = self.d[iprev][2]/3. + self.d[iprev][0], self.d[iprev][3]/3. + self.d[iprev][1] c2x, c2y = self.d[i][2]/-3. + x, self.d[i][3]/-3. + y if trans is None: C1X, C1Y = c1x, c1y else: C1X, C1Y = trans(c1x, c1y) if trans is None: C2X, C2Y = c2x, c2y else: C2X, C2Y = trans(c2x, c2y) if local: d.append(("C", c1x, c1y, False, c2x, c2y, False, x, y, False)) else: d.append(("C", C1X, C1Y, True, C2X, C2Y, True, X, Y, True)) elif mode == "F": c1x, c1y = self.d[iprev][4]/3. + self.d[iprev][0], self.d[iprev][5]/3. + self.d[iprev][1] c2x, c2y = self.d[i][2]/-3. + x, self.d[i][3]/-3. + y if trans is None: C1X, C1Y = c1x, c1y else: C1X, C1Y = trans(c1x, c1y) if trans is None: C2X, C2Y = c2x, c2y else: C2X, C2Y = trans(c2x, c2y) if local: d.append(("C", c1x, c1y, False, c2x, c2y, False, x, y, False)) else: d.append(("C", C1X, C1Y, True, C2X, C2Y, True, X, Y, True)) elif mode == "S": c1x, c1y = vx[iprev]/3. + self.d[iprev][0], vy[iprev]/3. + self.d[iprev][1] c2x, c2y = vx[i]/-3. + x, vy[i]/-3. + y if trans is None: C1X, C1Y = c1x, c1y else: C1X, C1Y = trans(c1x, c1y) if trans is None: C2X, C2Y = c2x, c2y else: C2X, C2Y = trans(c2x, c2y) if local: d.append(("C", c1x, c1y, False, c2x, c2y, False, x, y, False)) else: d.append(("C", C1X, C1Y, True, C2X, C2Y, True, X, Y, True)) if self.loop and len(self.d) > 0: d.append(("Z",)) return Path(d, **self.attr) ###################################################################### class Text: """Draws a text string at a specified point in local coordinates. x, y required location of the point in local coordinates d required text/Unicode string attribute=value pairs keyword list SVG attributes """ defaults = {"stroke": "none", "fill": "black", "font-size": 5, } def __repr__(self): return "<Text %s at (%g, %g) %s>" % (repr(self.d), self.x, self.y, self.attr) def __init__(self, x, y, d, **attr): self.x = x self.y = y self.d = unicode(d) self.attr = dict(self.defaults) self.attr.update(attr) def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" if isinstance(trans, basestring): trans = totrans(trans) X, Y = self.x, self.y if trans is not None: X, Y = trans(X, Y) return SVG("text", self.d, x=X, y=Y, **self.attr) class TextGlobal: """Draws a text string at a specified point in global coordinates. x, y required location of the point in global coordinates d required text/Unicode string attribute=value pairs keyword list SVG attributes """ defaults = {"stroke": "none", "fill": "black", "font-size": 5, } def __repr__(self): return "<TextGlobal %s at (%s, %s) %s>" % (repr(self.d), str(self.x), str(self.y), self.attr) def __init__(self, x, y, d, **attr): self.x = x self.y = y self.d = unicode(d) self.attr = dict(self.defaults) self.attr.update(attr) def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" return SVG("text", self.d, x=self.x, y=self.y, **self.attr) ###################################################################### _symbol_templates = {"dot": SVG("symbol", SVG("circle", cx=0, cy=0, r=1, stroke="none", fill="black"), viewBox="0 0 1 1", overflow="visible"), "box": SVG("symbol", SVG("rect", x1=-1, y1=-1, x2=1, y2=1, stroke="none", fill="black"), viewBox="0 0 1 1", overflow="visible"), "uptri": SVG("symbol", SVG("path", d="M -1 0.866 L 1 0.866 L 0 -0.866 Z", stroke="none", fill="black"), viewBox="0 0 1 1", overflow="visible"), "downtri": SVG("symbol", SVG("path", d="M -1 -0.866 L 1 -0.866 L 0 0.866 Z", stroke="none", fill="black"), viewBox="0 0 1 1", overflow="visible"), } def make_symbol(id, shape="dot", **attr): """Creates a new instance of an SVG symbol to avoid cross-linking objects. id required a new identifier (string/Unicode) shape default="dot" the shape name from _symbol_templates attribute=value list keyword list modify the SVG attributes of the new symbol """ output = copy.deepcopy(_symbol_templates[shape]) for i in output.sub: i.attr.update(attr_preprocess(attr)) output["id"] = id return output _circular_dot = make_symbol("circular_dot") class Dots: """Dots draws SVG symbols at a set of points. d required list of (x,y) points symbol default=None SVG symbol or a new identifier to label an auto-generated symbol; if None, use pre-defined _circular_dot width, height default=1, 1 width and height of the symbols in SVG coordinates attribute=value pairs keyword list SVG attributes """ defaults = {} def __repr__(self): return "<Dots (%d nodes) %s>" % (len(self.d), self.attr) def __init__(self, d=[], symbol=None, width=1., height=1., **attr): self.d = list(d) self.width = width self.height = height self.attr = dict(self.defaults) self.attr.update(attr) if symbol is None: self.symbol = _circular_dot elif isinstance(symbol, SVG): self.symbol = symbol else: self.symbol = make_symbol(symbol) def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" if isinstance(trans, basestring): trans = totrans(trans) output = SVG("g", SVG("defs", self.symbol)) id = "#%s" % self.symbol["id"] for p in self.d: x, y = p[0], p[1] if trans is None: X, Y = x, y else: X, Y = trans(x, y) item = SVG("use", x=X, y=Y, xlink__href=id) if self.width is not None: item["width"] = self.width if self.height is not None: item["height"] = self.height output.append(item) return output ###################################################################### _marker_templates = {"arrow_start": SVG("marker", SVG("path", d="M 9 3.6 L 10.5 0 L 0 3.6 L 10.5 7.2 L 9 3.6 Z"), viewBox="0 0 10.5 7.2", refX="9", refY="3.6", markerWidth="10.5", markerHeight="7.2", markerUnits="strokeWidth", orient="auto", stroke="none", fill="black"), "arrow_end": SVG("marker", SVG("path", d="M 1.5 3.6 L 0 0 L 10.5 3.6 L 0 7.2 L 1.5 3.6 Z"), viewBox="0 0 10.5 7.2", refX="1.5", refY="3.6", markerWidth="10.5", markerHeight="7.2", markerUnits="strokeWidth", orient="auto", stroke="none", fill="black"), } def make_marker(id, shape, **attr): """Creates a new instance of an SVG marker to avoid cross-linking objects. id required a new identifier (string/Unicode) shape required the shape name from _marker_templates attribute=value list keyword list modify the SVG attributes of the new marker """ output = copy.deepcopy(_marker_templates[shape]) for i in output.sub: i.attr.update(attr_preprocess(attr)) output["id"] = id return output class Line(Curve): """Draws a line between two points. Line(x1, y1, x2, y2, arrow_start, arrow_end, attribute=value) x1, y1 required the starting point x2, y2 required the ending point arrow_start default=None if an identifier string/Unicode, draw a new arrow object at the beginning of the line; if a marker, draw that marker instead arrow_end default=None same for the end of the line attribute=value pairs keyword list SVG attributes """ defaults = {} def __repr__(self): return "<Line (%g, %g) to (%g, %g) %s>" % ( self.x1, self.y1, self.x2, self.y2, self.attr) def __init__(self, x1, y1, x2, y2, arrow_start=None, arrow_end=None, **attr): self.x1, self.y1, self.x2, self.y2 = x1, y1, x2, y2 self.arrow_start, self.arrow_end = arrow_start, arrow_end self.attr = dict(self.defaults) self.attr.update(attr) def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" line = self.Path(trans).SVG() if ((self.arrow_start != False and self.arrow_start is not None) or (self.arrow_end != False and self.arrow_end is not None)): defs = SVG("defs") if self.arrow_start != False and self.arrow_start is not None: if isinstance(self.arrow_start, SVG): defs.append(self.arrow_start) line.attr["marker-start"] = "url(#%s)" % self.arrow_start["id"] elif isinstance(self.arrow_start, basestring): defs.append(make_marker(self.arrow_start, "arrow_start")) line.attr["marker-start"] = "url(#%s)" % self.arrow_start else: raise TypeError("arrow_start must be False/None or an id string for the new marker") if self.arrow_end != False and self.arrow_end is not None: if isinstance(self.arrow_end, SVG): defs.append(self.arrow_end) line.attr["marker-end"] = "url(#%s)" % self.arrow_end["id"] elif isinstance(self.arrow_end, basestring): defs.append(make_marker(self.arrow_end, "arrow_end")) line.attr["marker-end"] = "url(#%s)" % self.arrow_end else: raise TypeError("arrow_end must be False/None or an id string for the new marker") return SVG("g", defs, line) return line def Path(self, trans=None, local=False): """Apply the transformation "trans" and return a Path object in global coordinates. If local=True, return a Path in local coordinates (which must be transformed again).""" self.f = lambda t: (self.x1 + t*(self.x2 - self.x1), self.y1 + t*(self.y2 - self.y1)) self.low = 0. self.high = 1. self.loop = False if trans is None: return Path([("M", self.x1, self.y1, not local), ("L", self.x2, self.y2, not local)], **self.attr) else: return Curve.Path(self, trans, local) class LineGlobal: """Draws a line between two points, one or both of which is in global coordinates. Line(x1, y1, x2, y2, lcoal1, local2, arrow_start, arrow_end, attribute=value) x1, y1 required the starting point x2, y2 required the ending point local1 default=False if True, interpret first point as a local coordinate (apply transform) local2 default=False if True, interpret second point as a local coordinate (apply transform) arrow_start default=None if an identifier string/Unicode, draw a new arrow object at the beginning of the line; if a marker, draw that marker instead arrow_end default=None same for the end of the line attribute=value pairs keyword list SVG attributes """ defaults = {} def __repr__(self): local1, local2 = "", "" if self.local1: local1 = "L" if self.local2: local2 = "L" return "<LineGlobal %s(%s, %s) to %s(%s, %s) %s>" % ( local1, str(self.x1), str(self.y1), local2, str(self.x2), str(self.y2), self.attr) def __init__(self, x1, y1, x2, y2, local1=False, local2=False, arrow_start=None, arrow_end=None, **attr): self.x1, self.y1, self.x2, self.y2 = x1, y1, x2, y2 self.local1, self.local2 = local1, local2 self.arrow_start, self.arrow_end = arrow_start, arrow_end self.attr = dict(self.defaults) self.attr.update(attr) def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" if isinstance(trans, basestring): trans = totrans(trans) X1, Y1, X2, Y2 = self.x1, self.y1, self.x2, self.y2 if self.local1: X1, Y1 = trans(X1, Y1) if self.local2: X2, Y2 = trans(X2, Y2) line = SVG("path", d="M%s %s L%s %s" % (X1, Y1, X2, Y2), **self.attr) if ((self.arrow_start != False and self.arrow_start is not None) or (self.arrow_end != False and self.arrow_end is not None)): defs = SVG("defs") if self.arrow_start != False and self.arrow_start is not None: if isinstance(self.arrow_start, SVG): defs.append(self.arrow_start) line.attr["marker-start"] = "url(#%s)" % self.arrow_start["id"] elif isinstance(self.arrow_start, basestring): defs.append(make_marker(self.arrow_start, "arrow_start")) line.attr["marker-start"] = "url(#%s)" % self.arrow_start else: raise TypeError("arrow_start must be False/None or an id string for the new marker") if self.arrow_end != False and self.arrow_end is not None: if isinstance(self.arrow_end, SVG): defs.append(self.arrow_end) line.attr["marker-end"] = "url(#%s)" % self.arrow_end["id"] elif isinstance(self.arrow_end, basestring): defs.append(make_marker(self.arrow_end, "arrow_end")) line.attr["marker-end"] = "url(#%s)" % self.arrow_end else: raise TypeError("arrow_end must be False/None or an id string for the new marker") return SVG("g", defs, line) return line class VLine(Line): """Draws a vertical line. VLine(y1, y2, x, attribute=value) y1, y2 required y range x required x position attribute=value pairs keyword list SVG attributes """ defaults = {} def __repr__(self): return "<VLine (%g, %g) at x=%s %s>" % (self.y1, self.y2, self.x, self.attr) def __init__(self, y1, y2, x, **attr): self.x = x self.attr = dict(self.defaults) self.attr.update(attr) Line.__init__(self, x, y1, x, y2, **self.attr) def Path(self, trans=None, local=False): """Apply the transformation "trans" and return a Path object in global coordinates. If local=True, return a Path in local coordinates (which must be transformed again).""" self.x1 = self.x self.x2 = self.x return Line.Path(self, trans, local) class HLine(Line): """Draws a horizontal line. HLine(x1, x2, y, attribute=value) x1, x2 required x range y required y position attribute=value pairs keyword list SVG attributes """ defaults = {} def __repr__(self): return "<HLine (%g, %g) at y=%s %s>" % (self.x1, self.x2, self.y, self.attr) def __init__(self, x1, x2, y, **attr): self.y = y self.attr = dict(self.defaults) self.attr.update(attr) Line.__init__(self, x1, y, x2, y, **self.attr) def Path(self, trans=None, local=False): """Apply the transformation "trans" and return a Path object in global coordinates. If local=True, return a Path in local coordinates (which must be transformed again).""" self.y1 = self.y self.y2 = self.y return Line.Path(self, trans, local) ###################################################################### class Rect(Curve): """Draws a rectangle. Rect(x1, y1, x2, y2, attribute=value) x1, y1 required the starting point x2, y2 required the ending point attribute=value pairs keyword list SVG attributes """ defaults = {} def __repr__(self): return "<Rect (%g, %g), (%g, %g) %s>" % ( self.x1, self.y1, self.x2, self.y2, self.attr) def __init__(self, x1, y1, x2, y2, **attr): self.x1, self.y1, self.x2, self.y2 = x1, y1, x2, y2 self.attr = dict(self.defaults) self.attr.update(attr) def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" return self.Path(trans).SVG() def Path(self, trans=None, local=False): """Apply the transformation "trans" and return a Path object in global coordinates. If local=True, return a Path in local coordinates (which must be transformed again).""" if trans is None: return Path([("M", self.x1, self.y1, not local), ("L", self.x2, self.y1, not local), ("L", self.x2, self.y2, not local), ("L", self.x1, self.y2, not local), ("Z",)], **self.attr) else: self.low = 0. self.high = 1. self.loop = False self.f = lambda t: (self.x1 + t*(self.x2 - self.x1), self.y1) d1 = Curve.Path(self, trans, local).d self.f = lambda t: (self.x2, self.y1 + t*(self.y2 - self.y1)) d2 = Curve.Path(self, trans, local).d del d2[0] self.f = lambda t: (self.x2 + t*(self.x1 - self.x2), self.y2) d3 = Curve.Path(self, trans, local).d del d3[0] self.f = lambda t: (self.x1, self.y2 + t*(self.y1 - self.y2)) d4 = Curve.Path(self, trans, local).d del d4[0] return Path(d=(d1 + d2 + d3 + d4 + [("Z",)]), **self.attr) ###################################################################### class Ellipse(Curve): """Draws an ellipse from a semimajor vector (ax,ay) and a semiminor length (b). Ellipse(x, y, ax, ay, b, attribute=value) x, y required the center of the ellipse/circle ax, ay required a vector indicating the length and direction of the semimajor axis b required the length of the semiminor axis. If equal to sqrt(ax2 + ay2), the ellipse is a circle attribute=value pairs keyword list SVG attributes (If sqrt(ax**2 + ay**2) is less than b, then (ax,ay) is actually the semiminor axis.) """ defaults = {} def __repr__(self): return "<Ellipse (%g, %g) a=(%g, %g), b=%g %s>" % ( self.x, self.y, self.ax, self.ay, self.b, self.attr) def __init__(self, x, y, ax, ay, b, **attr): self.x, self.y, self.ax, self.ay, self.b = x, y, ax, ay, b self.attr = dict(self.defaults) self.attr.update(attr) def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" return self.Path(trans).SVG() def Path(self, trans=None, local=False): """Apply the transformation "trans" and return a Path object in global coordinates. If local=True, return a Path in local coordinates (which must be transformed again).""" angle = math.atan2(self.ay, self.ax) + math.pi/2. bx = self.b * math.cos(angle) by = self.b * math.sin(angle) self.f = lambda t: (self.x + self.ax*math.cos(t) + bx*math.sin(t), self.y + self.ay*math.cos(t) + by*math.sin(t)) self.low = -math.pi self.high = math.pi self.loop = True return Curve.Path(self, trans, local) ###################################################################### def unumber(x): """Converts numbers to a Unicode string, taking advantage of special Unicode characters to make nice minus signs and scientific notation. """ output = u"%g" % x if output[0] == u"-": output = u"\u2013" + output[1:] index = output.find(u"e") if index != -1: uniout = unicode(output[:index]) + u"\u00d710" saw_nonzero = False for n in output[index+1:]: if n == u"+": pass # uniout += u"\u207a" elif n == u"-": uniout += u"\u207b" elif n == u"0": if saw_nonzero: uniout += u"\u2070" elif n == u"1": saw_nonzero = True uniout += u"\u00b9" elif n == u"2": saw_nonzero = True uniout += u"\u00b2" elif n == u"3": saw_nonzero = True uniout += u"\u00b3" elif u"4" <= n <= u"9": saw_nonzero = True if saw_nonzero: uniout += eval("u\"\\u%x\"" % (0x2070 + ord(n) - ord(u"0"))) else: uniout += n if uniout[:2] == u"1\u00d7": uniout = uniout[2:] return uniout return output class Ticks: """Superclass for all graphics primitives that draw ticks, miniticks, and tick labels. This class only draws the ticks. Ticks(f, low, high, ticks, miniticks, labels, logbase, arrow_start, arrow_end, text_attr, attribute=value) f required parametric function along which ticks will be drawn; has the same format as the function used in Curve low, high required range of the independent variable ticks default=-10 request ticks according to the standard tick specification (see below) miniticks default=True request miniticks according to the standard minitick specification (below) labels True request tick labels according to the standard tick label specification (below) logbase default=None if a number, the axis is logarithmic with ticks at the given base (usually 10) arrow_start default=None if a new string identifier, draw an arrow at the low-end of the axis, referenced by that identifier; if an SVG marker object, use that marker arrow_end default=None if a new string identifier, draw an arrow at the high-end of the axis, referenced by that identifier; if an SVG marker object, use that marker text_attr default={} SVG attributes for the text labels attribute=value pairs keyword list SVG attributes for the tick marks Standard tick specification: * True: same as -10 (below). * Positive number N: draw exactly N ticks, including the endpoints. To subdivide an axis into 10 equal-sized segments, ask for 11 ticks. * Negative number -N: draw at least N ticks. Ticks will be chosen with "natural" values, multiples of 2 or 5. * List of values: draw a tick mark at each value. * Dict of value, label pairs: draw a tick mark at each value, labeling it with the given string. This lets you say things like {3.14159: "pi"}. * False or None: no ticks. Standard minitick specification: * True: draw miniticks with "natural" values, more closely spaced than the ticks. * Positive number N: draw exactly N miniticks, including the endpoints. To subdivide an axis into 100 equal-sized segments, ask for 101 miniticks. * Negative number -N: draw at least N miniticks. * List of values: draw a minitick mark at each value. * False or None: no miniticks. Standard tick label specification: * True: use the unumber function (described below) * Format string: standard format strings, e.g. "%5.2f" for 12.34 * Python callable: function that converts numbers to strings * False or None: no labels """ defaults = {"stroke-width": "0.25pt", } text_defaults = {"stroke": "none", "fill": "black", "font-size": 5, } tick_start = -1.5 tick_end = 1.5 minitick_start = -0.75 minitick_end = 0.75 text_start = 2.5 text_angle = 0. def __repr__(self): return "<Ticks %s from %s to %s ticks=%s labels=%s %s>" % ( self.f, self.low, self.high, str(self.ticks), str(self.labels), self.attr) def __init__(self, f, low, high, ticks=-10, miniticks=True, labels=True, logbase=None, arrow_start=None, arrow_end=None, text_attr={}, **attr): self.f = f self.low = low self.high = high self.ticks = ticks self.miniticks = miniticks self.labels = labels self.logbase = logbase self.arrow_start = arrow_start self.arrow_end = arrow_end self.attr = dict(self.defaults) self.attr.update(attr) self.text_attr = dict(self.text_defaults) self.text_attr.update(text_attr) def orient_tickmark(self, t, trans=None): """Return the position, normalized local x vector, normalized local y vector, and angle of a tick at position t. Normally only used internally. """ if isinstance(trans, basestring): trans = totrans(trans) if trans is None: f = self.f else: f = lambda t: trans(*self.f(t)) eps = _epsilon * abs(self.high - self.low) X, Y = f(t) Xprime, Yprime = f(t + eps) xhatx, xhaty = (Xprime - X)/eps, (Yprime - Y)/eps norm = math.sqrt(xhatx**2 + xhaty**2) if norm != 0: xhatx, xhaty = xhatx/norm, xhaty/norm else: xhatx, xhaty = 1., 0. angle = math.atan2(xhaty, xhatx) + math.pi/2. yhatx, yhaty = math.cos(angle), math.sin(angle) return (X, Y), (xhatx, xhaty), (yhatx, yhaty), angle def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" if isinstance(trans, basestring): trans = totrans(trans) self.last_ticks, self.last_miniticks = self.interpret() tickmarks = Path([], **self.attr) minitickmarks = Path([], **self.attr) output = SVG("g") if ((self.arrow_start != False and self.arrow_start is not None) or (self.arrow_end != False and self.arrow_end is not None)): defs = SVG("defs") if self.arrow_start != False and self.arrow_start is not None: if isinstance(self.arrow_start, SVG): defs.append(self.arrow_start) elif isinstance(self.arrow_start, basestring): defs.append(make_marker(self.arrow_start, "arrow_start")) else: raise TypeError("arrow_start must be False/None or an id string for the new marker") if self.arrow_end != False and self.arrow_end is not None: if isinstance(self.arrow_end, SVG): defs.append(self.arrow_end) elif isinstance(self.arrow_end, basestring): defs.append(make_marker(self.arrow_end, "arrow_end")) else: raise TypeError("arrow_end must be False/None or an id string for the new marker") output.append(defs) eps = _epsilon * (self.high - self.low) for t, label in self.last_ticks.items(): (X, Y), (xhatx, xhaty), (yhatx, yhaty), angle = self.orient_tickmark(t, trans) if ((not self.arrow_start or abs(t - self.low) > eps) and (not self.arrow_end or abs(t - self.high) > eps)): tickmarks.d.append(("M", X - yhatx*self.tick_start, Y - yhaty*self.tick_start, True)) tickmarks.d.append(("L", X - yhatx*self.tick_end, Y - yhaty*self.tick_end, True)) angle = (angle - math.pi/2.)*180./math.pi + self.text_angle ########### a HACK! ############ (to be removed when Inkscape handles baselines) if _hacks["inkscape-text-vertical-shift"]: if self.text_start > 0: X += math.cos(angle*math.pi/180. + math.pi/2.) * 2. Y += math.sin(angle*math.pi/180. + math.pi/2.) * 2. else: X += math.cos(angle*math.pi/180. + math.pi/2.) * 2. * 2.5 Y += math.sin(angle*math.pi/180. + math.pi/2.) * 2. * 2.5 ########### end hack ########### if label != "": output.append(SVG("text", label, transform="translate(%g, %g) rotate(%g)" % (X - yhatx*self.text_start, Y - yhaty*self.text_start, angle), **self.text_attr)) for t in self.last_miniticks: skip = False for tt in self.last_ticks.keys(): if abs(t - tt) < eps: skip = True break if not skip: (X, Y), (xhatx, xhaty), (yhatx, yhaty), angle = self.orient_tickmark(t, trans) if ((not self.arrow_start or abs(t - self.low) > eps) and (not self.arrow_end or abs(t - self.high) > eps)): minitickmarks.d.append(("M", X - yhatx*self.minitick_start, Y - yhaty*self.minitick_start, True)) minitickmarks.d.append(("L", X - yhatx*self.minitick_end, Y - yhaty*self.minitick_end, True)) output.prepend(tickmarks.SVG(trans)) output.prepend(minitickmarks.SVG(trans)) return output def interpret(self): """Evaluate and return optimal ticks and miniticks according to the standard minitick specification. Normally only used internally. """ if self.labels is None or self.labels == False: format = lambda x: "" elif self.labels == True: format = unumber elif isinstance(self.labels, basestring): format = lambda x: (self.labels % x) elif callable(self.labels): format = self.labels else: raise TypeError("labels must be None/False, True, a format string, or a number->string function") # Now for the ticks ticks = self.ticks # Case 1: ticks is None/False if ticks is None or ticks == False: return {}, [] # Case 2: ticks is the number of desired ticks elif isinstance(ticks, (int, long)): if ticks == True: ticks = -10 if self.logbase is None: ticks = self.compute_ticks(ticks, format) else: ticks = self.compute_logticks(self.logbase, ticks, format) # Now for the miniticks if self.miniticks == True: if self.logbase is None: return ticks, self.compute_miniticks(ticks) else: return ticks, self.compute_logminiticks(self.logbase) elif isinstance(self.miniticks, (int, long)): return ticks, self.regular_miniticks(self.miniticks) elif getattr(self.miniticks, "__iter__", False): return ticks, self.miniticks elif self.miniticks == False or self.miniticks is None: return ticks, [] else: raise TypeError("miniticks must be None/False, True, a number of desired miniticks, or a list of numbers") # Cases 3 & 4: ticks is iterable elif getattr(ticks, "__iter__", False): # Case 3: ticks is some kind of list if not isinstance(ticks, dict): output = {} eps = _epsilon * (self.high - self.low) for x in ticks: if format == unumber and abs(x) < eps: output[x] = u"0" else: output[x] = format(x) ticks = output # Case 4: ticks is a dict else: pass # Now for the miniticks if self.miniticks == True: if self.logbase is None: return ticks, self.compute_miniticks(ticks) else: return ticks, self.compute_logminiticks(self.logbase) elif isinstance(self.miniticks, (int, long)): return ticks, self.regular_miniticks(self.miniticks) elif getattr(self.miniticks, "__iter__", False): return ticks, self.miniticks elif self.miniticks == False or self.miniticks is None: return ticks, [] else: raise TypeError("miniticks must be None/False, True, a number of desired miniticks, or a list of numbers") else: raise TypeError("ticks must be None/False, a number of desired ticks, a list of numbers, or a dictionary of explicit markers") def compute_ticks(self, N, format): """Return less than -N or exactly N optimal linear ticks. Normally only used internally. """ if self.low >= self.high: raise ValueError("low must be less than high") if N == 1: raise ValueError("N can be 0 or >1 to specify the exact number of ticks or negative to specify a maximum") eps = _epsilon * (self.high - self.low) if N >= 0: output = {} x = self.low for i in xrange(N): if format == unumber and abs(x) < eps: label = u"0" else: label = format(x) output[x] = label x += (self.high - self.low)/(N-1.) return output N = -N counter = 0 granularity = 10**math.ceil(math.log10(max(abs(self.low), abs(self.high)))) lowN = math.ceil(1.*self.low / granularity) highN = math.floor(1.*self.high / granularity) while lowN > highN: countermod3 = counter % 3 if countermod3 == 0: granularity *= 0.5 elif countermod3 == 1: granularity *= 0.4 else: granularity *= 0.5 counter += 1 lowN = math.ceil(1.*self.low / granularity) highN = math.floor(1.*self.high / granularity) last_granularity = granularity last_trial = None while True: trial = {} for n in range(int(lowN), int(highN)+1): x = n * granularity if format == unumber and abs(x) < eps: label = u"0" else: label = format(x) trial[x] = label if int(highN)+1 - int(lowN) >= N: if last_trial is None: v1, v2 = self.low, self.high return {v1: format(v1), v2: format(v2)} else: low_in_ticks, high_in_ticks = False, False for t in last_trial.keys(): if 1.*abs(t - self.low)/last_granularity < _epsilon: low_in_ticks = True if 1.*abs(t - self.high)/last_granularity < _epsilon: high_in_ticks = True lowN = 1.*self.low / last_granularity highN = 1.*self.high / last_granularity if abs(lowN - round(lowN)) < _epsilon and not low_in_ticks: last_trial[self.low] = format(self.low) if abs(highN - round(highN)) < _epsilon and not high_in_ticks: last_trial[self.high] = format(self.high) return last_trial last_granularity = granularity last_trial = trial countermod3 = counter % 3 if countermod3 == 0: granularity *= 0.5 elif countermod3 == 1: granularity *= 0.4 else: granularity *= 0.5 counter += 1 lowN = math.ceil(1.*self.low / granularity) highN = math.floor(1.*self.high / granularity) def regular_miniticks(self, N): """Return exactly N linear ticks. Normally only used internally. """ output = [] x = self.low for i in xrange(N): output.append(x) x += (self.high - self.low)/(N-1.) return output def compute_miniticks(self, original_ticks): """Return optimal linear miniticks, given a set of ticks. Normally only used internally. """ if len(original_ticks) < 2: original_ticks = ticks(self.low, self.high) # XXX ticks is undefined! original_ticks = original_ticks.keys() original_ticks.sort() if self.low > original_ticks[0] + _epsilon or self.high < original_ticks[-1] - _epsilon: raise ValueError("original_ticks {%g...%g} extend beyond [%g, %g]" % (original_ticks[0], original_ticks[-1], self.low, self.high)) granularities = [] for i in range(len(original_ticks)-1): granularities.append(original_ticks[i+1] - original_ticks[i]) spacing = 10**(math.ceil(math.log10(min(granularities)) - 1)) output = [] x = original_ticks[0] - math.ceil(1.*(original_ticks[0] - self.low) / spacing) * spacing while x <= self.high: if x >= self.low: already_in_ticks = False for t in original_ticks: if abs(x-t) < _epsilon * (self.high - self.low): already_in_ticks = True if not already_in_ticks: output.append(x) x += spacing return output def compute_logticks(self, base, N, format): """Return less than -N or exactly N optimal logarithmic ticks. Normally only used internally. """ if self.low >= self.high: raise ValueError("low must be less than high") if N == 1: raise ValueError("N can be 0 or >1 to specify the exact number of ticks or negative to specify a maximum") eps = _epsilon * (self.high - self.low) if N >= 0: output = {} x = self.low for i in xrange(N): if format == unumber and abs(x) < eps: label = u"0" else: label = format(x) output[x] = label x += (self.high - self.low)/(N-1.) return output N = -N lowN = math.floor(math.log(self.low, base)) highN = math.ceil(math.log(self.high, base)) output = {} for n in range(int(lowN), int(highN)+1): x = base**n label = format(x) if self.low <= x <= self.high: output[x] = label for i in range(1, len(output)): keys = output.keys() keys.sort() keys = keys[::i] values = map(lambda k: output[k], keys) if len(values) <= N: for k in output.keys(): if k not in keys: output[k] = "" break if len(output) <= 2: output2 = self.compute_ticks(N=-int(math.ceil(N/2.)), format=format) lowest = min(output2) for k in output: if k < lowest: output2[k] = output[k] output = output2 return output def compute_logminiticks(self, base): """Return optimal logarithmic miniticks, given a set of ticks. Normally only used internally. """ if self.low >= self.high: raise ValueError("low must be less than high") lowN = math.floor(math.log(self.low, base)) highN = math.ceil(math.log(self.high, base)) output = [] num_ticks = 0 for n in range(int(lowN), int(highN)+1): x = base**n if self.low <= x <= self.high: num_ticks += 1 for m in range(2, int(math.ceil(base))): minix = m * x if self.low <= minix <= self.high: output.append(minix) if num_ticks <= 2: return [] else: return output ###################################################################### class CurveAxis(Curve, Ticks): """Draw an axis with tick marks along a parametric curve. CurveAxis(f, low, high, ticks, miniticks, labels, logbase, arrow_start, arrow_end, text_attr, attribute=value) f required a Python callable or string in the form "f(t), g(t)", just like Curve low, high required left and right endpoints ticks default=-10 request ticks according to the standard tick specification (see help(Ticks)) miniticks default=True request miniticks according to the standard minitick specification labels True request tick labels according to the standard tick label specification logbase default=None if a number, the x axis is logarithmic with ticks at the given base (10 being the most common) arrow_start default=None if a new string identifier, draw an arrow at the low-end of the axis, referenced by that identifier; if an SVG marker object, use that marker arrow_end default=None if a new string identifier, draw an arrow at the high-end of the axis, referenced by that identifier; if an SVG marker object, use that marker text_attr default={} SVG attributes for the text labels attribute=value pairs keyword list SVG attributes """ defaults = {"stroke-width": "0.25pt", } text_defaults = {"stroke": "none", "fill": "black", "font-size": 5, } def __repr__(self): return "<CurveAxis %s [%s, %s] ticks=%s labels=%s %s>" % ( self.f, self.low, self.high, str(self.ticks), str(self.labels), self.attr) def __init__(self, f, low, high, ticks=-10, miniticks=True, labels=True, logbase=None, arrow_start=None, arrow_end=None, text_attr={}, **attr): tattr = dict(self.text_defaults) tattr.update(text_attr) Curve.__init__(self, f, low, high) Ticks.__init__(self, f, low, high, ticks, miniticks, labels, logbase, arrow_start, arrow_end, tattr, **attr) def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" func = Curve.SVG(self, trans) ticks = Ticks.SVG(self, trans) # returns a <g /> if self.arrow_start != False and self.arrow_start is not None: if isinstance(self.arrow_start, basestring): func.attr["marker-start"] = "url(#%s)" % self.arrow_start else: func.attr["marker-start"] = "url(#%s)" % self.arrow_start.id if self.arrow_end != False and self.arrow_end is not None: if isinstance(self.arrow_end, basestring): func.attr["marker-end"] = "url(#%s)" % self.arrow_end else: func.attr["marker-end"] = "url(#%s)" % self.arrow_end.id ticks.append(func) return ticks class LineAxis(Line, Ticks): """Draws an axis with tick marks along a line. LineAxis(x1, y1, x2, y2, start, end, ticks, miniticks, labels, logbase, arrow_start, arrow_end, text_attr, attribute=value) x1, y1 required starting point x2, y2 required ending point start, end default=0, 1 values to start and end labeling ticks default=-10 request ticks according to the standard tick specification (see help(Ticks)) miniticks default=True request miniticks according to the standard minitick specification labels True request tick labels according to the standard tick label specification logbase default=None if a number, the x axis is logarithmic with ticks at the given base (usually 10) arrow_start default=None if a new string identifier, draw an arrow at the low-end of the axis, referenced by that identifier; if an SVG marker object, use that marker arrow_end default=None if a new string identifier, draw an arrow at the high-end of the axis, referenced by that identifier; if an SVG marker object, use that marker text_attr default={} SVG attributes for the text labels attribute=value pairs keyword list SVG attributes """ defaults = {"stroke-width": "0.25pt", } text_defaults = {"stroke": "none", "fill": "black", "font-size": 5, } def __repr__(self): return "<LineAxis (%g, %g) to (%g, %g) ticks=%s labels=%s %s>" % ( self.x1, self.y1, self.x2, self.y2, str(self.ticks), str(self.labels), self.attr) def __init__(self, x1, y1, x2, y2, start=0., end=1., ticks=-10, miniticks=True, labels=True, logbase=None, arrow_start=None, arrow_end=None, exclude=None, text_attr={}, **attr): self.start = start self.end = end self.exclude = exclude tattr = dict(self.text_defaults) tattr.update(text_attr) Line.__init__(self, x1, y1, x2, y2, **attr) Ticks.__init__(self, None, None, None, ticks, miniticks, labels, logbase, arrow_start, arrow_end, tattr, **attr) def interpret(self): if self.exclude is not None and not (isinstance(self.exclude, (tuple, list)) and len(self.exclude) == 2 and isinstance(self.exclude[0], (int, long, float)) and isinstance(self.exclude[1], (int, long, float))): raise TypeError("exclude must either be None or (low, high)") ticks, miniticks = Ticks.interpret(self) if self.exclude is None: return ticks, miniticks ticks2 = {} for loc, label in ticks.items(): if self.exclude[0] <= loc <= self.exclude[1]: ticks2[loc] = "" else: ticks2[loc] = label return ticks2, miniticks def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" line = Line.SVG(self, trans) # must be evaluated first, to set self.f, self.low, self.high f01 = self.f self.f = lambda t: f01(1. * (t - self.start) / (self.end - self.start)) self.low = self.start self.high = self.end if self.arrow_start != False and self.arrow_start is not None: if isinstance(self.arrow_start, basestring): line.attr["marker-start"] = "url(#%s)" % self.arrow_start else: line.attr["marker-start"] = "url(#%s)" % self.arrow_start.id if self.arrow_end != False and self.arrow_end is not None: if isinstance(self.arrow_end, basestring): line.attr["marker-end"] = "url(#%s)" % self.arrow_end else: line.attr["marker-end"] = "url(#%s)" % self.arrow_end.id ticks = Ticks.SVG(self, trans) # returns a <g /> ticks.append(line) return ticks class XAxis(LineAxis): """Draws an x axis with tick marks. XAxis(xmin, xmax, aty, ticks, miniticks, labels, logbase, arrow_start, arrow_end, exclude, text_attr, attribute=value) xmin, xmax required the x range aty default=0 y position to draw the axis ticks default=-10 request ticks according to the standard tick specification (see help(Ticks)) miniticks default=True request miniticks according to the standard minitick specification labels True request tick labels according to the standard tick label specification logbase default=None if a number, the x axis is logarithmic with ticks at the given base (usually 10) arrow_start default=None if a new string identifier, draw an arrow at the low-end of the axis, referenced by that identifier; if an SVG marker object, use that marker arrow_end default=None if a new string identifier, draw an arrow at the high-end of the axis, referenced by that identifier; if an SVG marker object, use that marker exclude default=None if a (low, high) pair, don't draw text labels within this range text_attr default={} SVG attributes for the text labels attribute=value pairs keyword list SVG attributes for all lines The exclude option is provided for Axes to keep text from overlapping where the axes cross. Normal users are not likely to need it. """ defaults = {"stroke-width": "0.25pt", } text_defaults = {"stroke": "none", "fill": "black", "font-size": 5, "dominant-baseline": "text-before-edge", } text_start = -1. text_angle = 0. def __repr__(self): return "<XAxis (%g, %g) at y=%g ticks=%s labels=%s %s>" % ( self.xmin, self.xmax, self.aty, str(self.ticks), str(self.labels), self.attr) # XXX self.xmin/xmax undefd! def __init__(self, xmin, xmax, aty=0, ticks=-10, miniticks=True, labels=True, logbase=None, arrow_start=None, arrow_end=None, exclude=None, text_attr={}, **attr): self.aty = aty tattr = dict(self.text_defaults) tattr.update(text_attr) LineAxis.__init__(self, xmin, aty, xmax, aty, xmin, xmax, ticks, miniticks, labels, logbase, arrow_start, arrow_end, exclude, tattr, **attr) def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" self.y1 = self.aty self.y2 = self.aty return LineAxis.SVG(self, trans) class YAxis(LineAxis): """Draws a y axis with tick marks. YAxis(ymin, ymax, atx, ticks, miniticks, labels, logbase, arrow_start, arrow_end, exclude, text_attr, attribute=value) ymin, ymax required the y range atx default=0 x position to draw the axis ticks default=-10 request ticks according to the standard tick specification (see help(Ticks)) miniticks default=True request miniticks according to the standard minitick specification labels True request tick labels according to the standard tick label specification logbase default=None if a number, the y axis is logarithmic with ticks at the given base (usually 10) arrow_start default=None if a new string identifier, draw an arrow at the low-end of the axis, referenced by that identifier; if an SVG marker object, use that marker arrow_end default=None if a new string identifier, draw an arrow at the high-end of the axis, referenced by that identifier; if an SVG marker object, use that marker exclude default=None if a (low, high) pair, don't draw text labels within this range text_attr default={} SVG attributes for the text labels attribute=value pairs keyword list SVG attributes for all lines The exclude option is provided for Axes to keep text from overlapping where the axes cross. Normal users are not likely to need it. """ defaults = {"stroke-width": "0.25pt", } text_defaults = {"stroke": "none", "fill": "black", "font-size": 5, "text-anchor": "end", "dominant-baseline": "middle", } text_start = 2.5 text_angle = 90. def __repr__(self): return "<YAxis (%g, %g) at x=%g ticks=%s labels=%s %s>" % ( self.ymin, self.ymax, self.atx, str(self.ticks), str(self.labels), self.attr) # XXX self.ymin/ymax undefd! def __init__(self, ymin, ymax, atx=0, ticks=-10, miniticks=True, labels=True, logbase=None, arrow_start=None, arrow_end=None, exclude=None, text_attr={}, **attr): self.atx = atx tattr = dict(self.text_defaults) tattr.update(text_attr) LineAxis.__init__(self, atx, ymin, atx, ymax, ymin, ymax, ticks, miniticks, labels, logbase, arrow_start, arrow_end, exclude, tattr, **attr) def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" self.x1 = self.atx self.x2 = self.atx return LineAxis.SVG(self, trans) class Axes: """Draw a pair of intersecting x-y axes. Axes(xmin, xmax, ymin, ymax, atx, aty, xticks, xminiticks, xlabels, xlogbase, yticks, yminiticks, ylabels, ylogbase, arrows, text_attr, attribute=value) xmin, xmax required the x range ymin, ymax required the y range atx, aty default=0, 0 point where the axes try to cross; if outside the range, the axes will cross at the closest corner xticks default=-10 request ticks according to the standard tick specification (see help(Ticks)) xminiticks default=True request miniticks according to the standard minitick specification xlabels True request tick labels according to the standard tick label specification xlogbase default=None if a number, the x axis is logarithmic with ticks at the given base (usually 10) yticks default=-10 request ticks according to the standard tick specification yminiticks default=True request miniticks according to the standard minitick specification ylabels True request tick labels according to the standard tick label specification ylogbase default=None if a number, the y axis is logarithmic with ticks at the given base (usually 10) arrows default=None if a new string identifier, draw arrows referenced by that identifier text_attr default={} SVG attributes for the text labels attribute=value pairs keyword list SVG attributes for all lines """ defaults = {"stroke-width": "0.25pt", } text_defaults = {"stroke": "none", "fill": "black", "font-size": 5, } def __repr__(self): return "<Axes x=(%g, %g) y=(%g, %g) at (%g, %g) %s>" % ( self.xmin, self.xmax, self.ymin, self.ymax, self.atx, self.aty, self.attr) def __init__(self, xmin, xmax, ymin, ymax, atx=0, aty=0, xticks=-10, xminiticks=True, xlabels=True, xlogbase=None, yticks=-10, yminiticks=True, ylabels=True, ylogbase=None, arrows=None, text_attr={}, **attr): self.xmin, self.xmax = xmin, xmax self.ymin, self.ymax = ymin, ymax self.atx, self.aty = atx, aty self.xticks, self.xminiticks, self.xlabels, self.xlogbase = xticks, xminiticks, xlabels, xlogbase self.yticks, self.yminiticks, self.ylabels, self.ylogbase = yticks, yminiticks, ylabels, ylogbase self.arrows = arrows self.text_attr = dict(self.text_defaults) self.text_attr.update(text_attr) self.attr = dict(self.defaults) self.attr.update(attr) def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" atx, aty = self.atx, self.aty if atx < self.xmin: atx = self.xmin if atx > self.xmax: atx = self.xmax if aty < self.ymin: aty = self.ymin if aty > self.ymax: aty = self.ymax xmargin = 0.1 * abs(self.ymin - self.ymax) xexclude = atx - xmargin, atx + xmargin ymargin = 0.1 * abs(self.xmin - self.xmax) yexclude = aty - ymargin, aty + ymargin if self.arrows is not None and self.arrows != False: xarrow_start = self.arrows + ".xstart" xarrow_end = self.arrows + ".xend" yarrow_start = self.arrows + ".ystart" yarrow_end = self.arrows + ".yend" else: xarrow_start = xarrow_end = yarrow_start = yarrow_end = None xaxis = XAxis(self.xmin, self.xmax, aty, self.xticks, self.xminiticks, self.xlabels, self.xlogbase, xarrow_start, xarrow_end, exclude=xexclude, text_attr=self.text_attr, **self.attr).SVG(trans) yaxis = YAxis(self.ymin, self.ymax, atx, self.yticks, self.yminiticks, self.ylabels, self.ylogbase, yarrow_start, yarrow_end, exclude=yexclude, text_attr=self.text_attr, **self.attr).SVG(trans) return SVG("g", *(xaxis.sub + yaxis.sub)) ###################################################################### class HGrid(Ticks): """Draws the horizontal lines of a grid over a specified region using the standard tick specification (see help(Ticks)) to place the grid lines. HGrid(xmin, xmax, low, high, ticks, miniticks, logbase, mini_attr, attribute=value) xmin, xmax required the x range low, high required the y range ticks default=-10 request ticks according to the standard tick specification (see help(Ticks)) miniticks default=False request miniticks according to the standard minitick specification logbase default=None if a number, the axis is logarithmic with ticks at the given base (usually 10) mini_attr default={} SVG attributes for the minitick-lines (if miniticks != False) attribute=value pairs keyword list SVG attributes for the major tick lines """ defaults = {"stroke-width": "0.25pt", "stroke": "gray", } mini_defaults = {"stroke-width": "0.25pt", "stroke": "lightgray", "stroke-dasharray": "1,1", } def __repr__(self): return "<HGrid x=(%g, %g) %g <= y <= %g ticks=%s miniticks=%s %s>" % ( self.xmin, self.xmax, self.low, self.high, str(self.ticks), str(self.miniticks), self.attr) def __init__(self, xmin, xmax, low, high, ticks=-10, miniticks=False, logbase=None, mini_attr={}, **attr): self.xmin, self.xmax = xmin, xmax self.mini_attr = dict(self.mini_defaults) self.mini_attr.update(mini_attr) Ticks.__init__(self, None, low, high, ticks, miniticks, None, logbase) self.attr = dict(self.defaults) self.attr.update(attr) def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" self.last_ticks, self.last_miniticks = Ticks.interpret(self) ticksd = [] for t in self.last_ticks.keys(): ticksd += Line(self.xmin, t, self.xmax, t).Path(trans).d miniticksd = [] for t in self.last_miniticks: miniticksd += Line(self.xmin, t, self.xmax, t).Path(trans).d return SVG("g", Path(d=ticksd, **self.attr).SVG(), Path(d=miniticksd, **self.mini_attr).SVG()) class VGrid(Ticks): """Draws the vertical lines of a grid over a specified region using the standard tick specification (see help(Ticks)) to place the grid lines. HGrid(ymin, ymax, low, high, ticks, miniticks, logbase, mini_attr, attribute=value) ymin, ymax required the y range low, high required the x range ticks default=-10 request ticks according to the standard tick specification (see help(Ticks)) miniticks default=False request miniticks according to the standard minitick specification logbase default=None if a number, the axis is logarithmic with ticks at the given base (usually 10) mini_attr default={} SVG attributes for the minitick-lines (if miniticks != False) attribute=value pairs keyword list SVG attributes for the major tick lines """ defaults = {"stroke-width": "0.25pt", "stroke": "gray", } mini_defaults = {"stroke-width": "0.25pt", "stroke": "lightgray", "stroke-dasharray": "1,1", } def __repr__(self): return "<VGrid y=(%g, %g) %g <= x <= %g ticks=%s miniticks=%s %s>" % ( self.ymin, self.ymax, self.low, self.high, str(self.ticks), str(self.miniticks), self.attr) def __init__(self, ymin, ymax, low, high, ticks=-10, miniticks=False, logbase=None, mini_attr={}, **attr): self.ymin, self.ymax = ymin, ymax self.mini_attr = dict(self.mini_defaults) self.mini_attr.update(mini_attr) Ticks.__init__(self, None, low, high, ticks, miniticks, None, logbase) self.attr = dict(self.defaults) self.attr.update(attr) def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" self.last_ticks, self.last_miniticks = Ticks.interpret(self) ticksd = [] for t in self.last_ticks.keys(): ticksd += Line(t, self.ymin, t, self.ymax).Path(trans).d miniticksd = [] for t in self.last_miniticks: miniticksd += Line(t, self.ymin, t, self.ymax).Path(trans).d return SVG("g", Path(d=ticksd, **self.attr).SVG(), Path(d=miniticksd, **self.mini_attr).SVG()) class Grid(Ticks): """Draws a grid over a specified region using the standard tick specification (see help(Ticks)) to place the grid lines. Grid(xmin, xmax, ymin, ymax, ticks, miniticks, logbase, mini_attr, attribute=value) xmin, xmax required the x range ymin, ymax required the y range ticks default=-10 request ticks according to the standard tick specification (see help(Ticks)) miniticks default=False request miniticks according to the standard minitick specification logbase default=None if a number, the axis is logarithmic with ticks at the given base (usually 10) mini_attr default={} SVG attributes for the minitick-lines (if miniticks != False) attribute=value pairs keyword list SVG attributes for the major tick lines """ defaults = {"stroke-width": "0.25pt", "stroke": "gray", } mini_defaults = {"stroke-width": "0.25pt", "stroke": "lightgray", "stroke-dasharray": "1,1", } def __repr__(self): return "<Grid x=(%g, %g) y=(%g, %g) ticks=%s miniticks=%s %s>" % ( self.xmin, self.xmax, self.ymin, self.ymax, str(self.ticks), str(self.miniticks), self.attr) def __init__(self, xmin, xmax, ymin, ymax, ticks=-10, miniticks=False, logbase=None, mini_attr={}, **attr): self.xmin, self.xmax = xmin, xmax self.ymin, self.ymax = ymin, ymax self.mini_attr = dict(self.mini_defaults) self.mini_attr.update(mini_attr) Ticks.__init__(self, None, None, None, ticks, miniticks, None, logbase) self.attr = dict(self.defaults) self.attr.update(attr) def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" self.low, self.high = self.xmin, self.xmax self.last_xticks, self.last_xminiticks = Ticks.interpret(self) self.low, self.high = self.ymin, self.ymax self.last_yticks, self.last_yminiticks = Ticks.interpret(self) ticksd = [] for t in self.last_xticks.keys(): ticksd += Line(t, self.ymin, t, self.ymax).Path(trans).d for t in self.last_yticks.keys(): ticksd += Line(self.xmin, t, self.xmax, t).Path(trans).d miniticksd = [] for t in self.last_xminiticks: miniticksd += Line(t, self.ymin, t, self.ymax).Path(trans).d for t in self.last_yminiticks: miniticksd += Line(self.xmin, t, self.xmax, t).Path(trans).d return SVG("g", Path(d=ticksd, **self.attr).SVG(), Path(d=miniticksd, **self.mini_attr).SVG()) ###################################################################### class XErrorBars: """Draws x error bars at a set of points. This is usually used before (under) a set of Dots at the same points. XErrorBars(d, attribute=value) d required list of (x,y,xerr...) points attribute=value pairs keyword list SVG attributes If points in d have * 3 elements, the third is the symmetric error bar * 4 elements, the third and fourth are the asymmetric lower and upper error bar. The third element should be negative, e.g. (5, 5, -1, 2) is a bar from 4 to 7. * more than 4, a tick mark is placed at each value. This lets you nest errors from different sources, correlated and uncorrelated, statistical and systematic, etc. """ defaults = {"stroke-width": "0.25pt", } def __repr__(self): return "<XErrorBars (%d nodes)>" % len(self.d) def __init__(self, d=[], **attr): self.d = list(d) self.attr = dict(self.defaults) self.attr.update(attr) def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" if isinstance(trans, basestring): trans = totrans(trans) # only once output = SVG("g") for p in self.d: x, y = p[0], p[1] if len(p) == 3: bars = [x - p[2], x + p[2]] else: bars = [x + pi for pi in p[2:]] start, end = min(bars), max(bars) output.append(LineAxis(start, y, end, y, start, end, bars, False, False, **self.attr).SVG(trans)) return output class YErrorBars: """Draws y error bars at a set of points. This is usually used before (under) a set of Dots at the same points. YErrorBars(d, attribute=value) d required list of (x,y,yerr...) points attribute=value pairs keyword list SVG attributes If points in d have * 3 elements, the third is the symmetric error bar * 4 elements, the third and fourth are the asymmetric lower and upper error bar. The third element should be negative, e.g. (5, 5, -1, 2) is a bar from 4 to 7. * more than 4, a tick mark is placed at each value. This lets you nest errors from different sources, correlated and uncorrelated, statistical and systematic, etc. """ defaults = {"stroke-width": "0.25pt", } def __repr__(self): return "<YErrorBars (%d nodes)>" % len(self.d) def __init__(self, d=[], **attr): self.d = list(d) self.attr = dict(self.defaults) self.attr.update(attr) def SVG(self, trans=None): """Apply the transformation "trans" and return an SVG object.""" if isinstance(trans, basestring): trans = totrans(trans) # only once output = SVG("g") for p in self.d: x, y = p[0], p[1] if len(p) == 3: bars = [y - p[2], y + p[2]] else: bars = [y + pi for pi in p[2:]] start, end = min(bars), max(bars) output.append(LineAxis(x, start, x, end, start, end, bars, False, False, **self.attr).SVG(trans)) return output
#!/usr/bin/env python ''' Lucas-Kanade tracker ==================== Lucas-Kanade sparse optical flow demo. Uses goodFeaturesToTrack for track initialization and back-tracking for match verification between frames. ''' # Python 2/3 compatibility from __future__ import print_function import numpy as np import cv2 as cv #local modules from tst_scene_render import TestSceneRender from tests_common import NewOpenCVTests, intersectionRate, isPointInRect lk_params = dict( winSize = (15, 15), maxLevel = 2, criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03)) feature_params = dict( maxCorners = 500, qualityLevel = 0.3, minDistance = 7, blockSize = 7 ) def getRectFromPoints(points): distances = [] for point in points: distances.append(cv.norm(point, cv.NORM_L2)) x0, y0 = points[np.argmin(distances)] x1, y1 = points[np.argmax(distances)] return np.array([x0, y0, x1, y1]) class lk_track_test(NewOpenCVTests): track_len = 10 detect_interval = 5 tracks = [] frame_idx = 0 render = None def test_lk_track(self): self.render = TestSceneRender(self.get_sample('samples/data/graf1.png'), self.get_sample('samples/data/box.png')) self.runTracker() def runTracker(self): foregroundPointsNum = 0 while True: frame = self.render.getNextFrame() frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) if len(self.tracks) > 0: img0, img1 = self.prev_gray, frame_gray p0 = np.float32([tr[-1][0] for tr in self.tracks]).reshape(-1, 1, 2) p1, _st, _err = cv.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) p0r, _st, _err = cv.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) d = abs(p0-p0r).reshape(-1, 2).max(-1) good = d < 1 new_tracks = [] for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good): if not good_flag: continue tr.append([(x, y), self.frame_idx]) if len(tr) > self.track_len: del tr[0] new_tracks.append(tr) self.tracks = new_tracks if self.frame_idx % self.detect_interval == 0: goodTracksCount = 0 for tr in self.tracks: oldRect = self.render.getRectInTime(self.render.timeStep * tr[0][1]) newRect = self.render.getRectInTime(self.render.timeStep * tr[-1][1]) if isPointInRect(tr[0][0], oldRect) and isPointInRect(tr[-1][0], newRect): goodTracksCount += 1 if self.frame_idx == self.detect_interval: foregroundPointsNum = goodTracksCount fgIndex = float(foregroundPointsNum) / (foregroundPointsNum + 1) fgRate = float(goodTracksCount) / (len(self.tracks) + 1) if self.frame_idx > 0: self.assertGreater(fgIndex, 0.9) self.assertGreater(fgRate, 0.2) mask = np.zeros_like(frame_gray) mask[:] = 255 for x, y in [np.int32(tr[-1][0]) for tr in self.tracks]: cv.circle(mask, (x, y), 5, 0, -1) p = cv.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params) if p is not None: for x, y in np.float32(p).reshape(-1, 2): self.tracks.append([[(x, y), self.frame_idx]]) self.frame_idx += 1 self.prev_gray = frame_gray if self.frame_idx > 300: break if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python ''' Lucas-Kanade homography tracker test =============================== Uses goodFeaturesToTrack for track initialization and back-tracking for match verification between frames. Finds homography between reference and current views. ''' # Python 2/3 compatibility from __future__ import print_function import numpy as np import cv2 as cv #local modules from tst_scene_render import TestSceneRender from tests_common import NewOpenCVTests, isPointInRect lk_params = dict( winSize = (19, 19), maxLevel = 2, criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03)) feature_params = dict( maxCorners = 1000, qualityLevel = 0.01, minDistance = 8, blockSize = 19 ) def checkedTrace(img0, img1, p0, back_threshold = 1.0): p1, _st, _err = cv.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) p0r, _st, _err = cv.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) d = abs(p0-p0r).reshape(-1, 2).max(-1) status = d < back_threshold return p1, status class lk_homography_test(NewOpenCVTests): render = None framesCounter = 0 frame = frame0 = None p0 = None p1 = None gray0 = gray1 = None numFeaturesInRectOnStart = 0 def test_lk_homography(self): self.render = TestSceneRender(self.get_sample('samples/data/graf1.png'), self.get_sample('samples/data/box.png'), noise = 0.1, speed = 1.0) frame = self.render.getNextFrame() frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) self.frame0 = frame.copy() self.p0 = cv.goodFeaturesToTrack(frame_gray, **feature_params) isForegroundHomographyFound = False if self.p0 is not None: self.p1 = self.p0 self.gray0 = frame_gray self.gray1 = frame_gray currRect = self.render.getCurrentRect() for (x,y) in self.p0[:,0]: if isPointInRect((x,y), currRect): self.numFeaturesInRectOnStart += 1 while self.framesCounter < 200: self.framesCounter += 1 frame = self.render.getNextFrame() frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) if self.p0 is not None: p2, trace_status = checkedTrace(self.gray1, frame_gray, self.p1) self.p1 = p2[trace_status].copy() self.p0 = self.p0[trace_status].copy() self.gray1 = frame_gray if len(self.p0) < 4: self.p0 = None continue _H, status = cv.findHomography(self.p0, self.p1, cv.RANSAC, 5.0) goodPointsInRect = 0 goodPointsOutsideRect = 0 for (_x0, _y0), (x1, y1), good in zip(self.p0[:,0], self.p1[:,0], status[:,0]): if good: if isPointInRect((x1,y1), self.render.getCurrentRect()): goodPointsInRect += 1 else: goodPointsOutsideRect += 1 if goodPointsOutsideRect < goodPointsInRect: isForegroundHomographyFound = True self.assertGreater(float(goodPointsInRect) / (self.numFeaturesInRectOnStart + 1), 0.6) else: self.p0 = cv.goodFeaturesToTrack(frame_gray, **feature_params) self.assertEqual(isForegroundHomographyFound, True) if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/bin/python # usage: # cat clAmdBlas.h | $0 from __future__ import print_function import sys, re; from common import remove_comments, getTokens, getParameters, postProcessParameters try: if len(sys.argv) > 1: f = open(sys.argv[1], "r") else: f = sys.stdin except: sys.exit("ERROR. Can't open input file") fns = [] while True: line = f.readline() if len(line) == 0: break assert isinstance(line, str) line = line.strip() parts = line.split(); if (line.startswith('clAmd') or line.startswith('cl_') or line == 'void') and len(line.split()) == 1 and line.find('(') == -1: fn = {} modifiers = [] ret = [] calling = [] i = 0 while (i < len(parts)): if parts[i].startswith('CL_'): modifiers.append(parts[i]) else: break i += 1 while (i < len(parts)): if not parts[i].startswith('CL_'): ret.append(parts[i]) else: break i += 1 while (i < len(parts)): calling.append(parts[i]) i += 1 fn['modifiers'] = [] # modifiers fn['ret'] = ret fn['calling'] = calling # print 'modifiers='+' '.join(modifiers) # print 'ret='+' '.join(type) # print 'calling='+' '.join(calling) # read block of lines line = f.readline() while True: nl = f.readline() nl = nl.strip() nl = re.sub(r'\n', r'', nl) if len(nl) == 0: break; line += ' ' + nl line = remove_comments(line) parts = getTokens(line) i = 0; name = parts[i]; i += 1; fn['name'] = name print('name=' + name) params = getParameters(i, parts) fn['params'] = params # print 'params="'+','.join(params)+'"' fns.append(fn) f.close() print('Found %d functions' % len(fns)) postProcessParameters(fns) from pprint import pprint pprint(fns) from common import * filterFileName='./filter/opencl_clamdblas_functions.list' numEnabled = readFunctionFilter(fns, filterFileName) functionsFilter = generateFilterNames(fns) filter_file = open(filterFileName, 'wb') filter_file.write(functionsFilter) ctx = {} ctx['CLAMDBLAS_REMAP_ORIGIN'] = generateRemapOrigin(fns) ctx['CLAMDBLAS_REMAP_DYNAMIC'] = generateRemapDynamic(fns) ctx['CLAMDBLAS_FN_DECLARATIONS'] = generateFnDeclaration(fns) sys.stdout = open('../../../../include/opencv2/core/opencl/runtime/autogenerated/opencl_clamdblas.hpp', 'wb') ProcessTemplate('template/opencl_clamdblas.hpp.in', ctx) ctx['CL_FN_ENUMS'] = generateEnums(fns, 'OPENCLAMDBLAS_FN', ) ctx['CL_FN_SWITCH'] = generateTemplates(23, 'openclamdblas_fn', 'openclamdblas_check_fn', '') ctx['CL_FN_ENTRY_DEFINITIONS'] = generateStructDefinitions(fns, 'openclamdblas_fn', 'OPENCLAMDBLAS_FN') ctx['CL_FN_ENTRY_LIST'] = generateListOfDefinitions(fns, 'openclamdblas_fn') ctx['CL_NUMBER_OF_ENABLED_FUNCTIONS'] = '// number of enabled functions: %d' % (numEnabled) sys.stdout = open('../autogenerated/opencl_clamdblas_impl.hpp', 'wb') ProcessTemplate('template/opencl_clamdblas_impl.hpp.in', ctx)
from __future__ import print_function import sys, os, re # # Parser helpers # def remove_comments(s): def replacer(match): s = match.group(0) if s.startswith('/'): return "" else: return s pattern = re.compile( r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"', re.DOTALL | re.MULTILINE ) return re.sub(pattern, replacer, s) def getTokens(s): return re.findall(r'[a-z_A-Z0-9_]+|[^[a-z_A-Z0-9_ \n\r\t]', s) def getParameter(pos, tokens): deep = 0 p = [] while True: if pos >= len(tokens): break if (tokens[pos] == ')' or tokens[pos] == ',') and deep == 0: if tokens[pos] == ')': pos = len(tokens) else: pos += 1 break if tokens[pos] == '(': deep += 1 if tokens[pos] == ')': deep -= 1 p.append(tokens[pos]) pos += 1 return (' '.join(p), pos) def getParameters(i, tokens): assert tokens[i] == '(' i += 1 params = [] while True: if i >= len(tokens) or tokens[i] == ')': break (param, i) = getParameter(i, tokens) if len(param) > 0: params.append(param) else: assert False break if len(params) > 0 and params[0] == 'void': del params[0] return params def postProcessParameters(fns): fns.sort(key=lambda x: x['name']) for fn in fns: fn['params_full'] = list(fn['params']) for i in range(len(fn['params'])): p = fn['params'][i] if p.find('(') != -1: p = re.sub(r'\* *([a-zA-Z0-9_]*) ?\)', '*)', p, 1) fn['params'][i] = p continue parts = re.findall(r'[a-z_A-Z0-9]+|\*', p) if len(parts) > 1: if parts[-1].find('*') == -1: del parts[-1] fn['params'][i] = ' '.join(parts) def readFunctionFilter(fns, fileName): try: f = open(fileName, "r") except: print("ERROR: Can't open filter file: %s" % fileName) return 0 count = 0 while f: line = f.readline() if not line: break assert isinstance(line, str) if line.startswith('#') or line.startswith('//'): continue line = line.replace('\n', '') if len(line) == 0: continue found = False for fn in fns: if fn['name'] == line: found = True fn['enabled'] = True if not found: sys.exit("FATAL ERROR: Unknown function: %s" % line) count = count + 1 f.close() return count # # Generator helpers # def outputToString(f): def wrapped(*args, **kwargs): from cStringIO import StringIO old_stdout = sys.stdout sys.stdout = str_stdout = StringIO() res = f(*args, **kwargs) assert res is None sys.stdout = old_stdout result = str_stdout.getvalue() result = re.sub(r'([^\n /]) [ ]+', r'\1 ', result) # don't remove spaces at start of line result = re.sub(r' ,', ',', result) result = re.sub(r' \*', '*', result) result = re.sub(r'\( ', '(', result) result = re.sub(r' \)', ')', result) return result return wrapped @outputToString def generateFilterNames(fns): for fn in fns: print('%s%s' % ('' if 'enabled' in fn else '//', fn['name'])) print('#total %d' % len(fns)) callback_check = re.compile(r'([^\(]*\(.*)(\* *)(\).*\(.*\))') def getTypeWithParam(t, p): if callback_check.match(t): return callback_check.sub(r'\1 *' + p + r'\3', t) return t + ' ' + p @outputToString def generateStructDefinitions(fns, lprefix='opencl_fn', enumprefix='OPENCL_FN'): print('// generated by %s' % os.path.basename(sys.argv[0])) for fn in fns: commentStr = '' if 'enabled' in fn else '//' decl_args = [] for (i, t) in enumerate(fn['params']): decl_args.append(getTypeWithParam(t, 'p%d' % (i+1))) decl_args_str = '(' + (', '.join(decl_args)) + ')' print('%s%s%d(%s_%s, %s, %s)' % \ (commentStr, lprefix, len(fn['params']), enumprefix, fn['name'], \ ' '.join(fn['ret']), decl_args_str)) print(commentStr + ('%s%s (%s *%s)(%s) =\n%s %s_%s_switch_fn;' % \ ((' '.join(fn['modifiers'] + ' ') if len(fn['modifiers']) > 0 else ''), ' '.join(fn['ret']), ' '.join(fn['calling']), fn['name'], ', '.join(fn['params']), \ commentStr, enumprefix, fn['name']))) print(commentStr + ('static const struct DynamicFnEntry %s_definition = { "%s", (void**)&%s};' % (fn['name'], fn['name'], fn['name']))) print() @outputToString def generateStaticDefinitions(fns): print('// generated by %s' % os.path.basename(sys.argv[0])) for fn in fns: commentStr = '' if 'enabled' in fn else '//' decl_args = [] for (i, t) in enumerate(fn['params']): decl_args.append(getTypeWithParam(t, 'p%d' % (i+1))) decl_args_str = '(' + (', '.join(decl_args)) + ')' print(commentStr + ('CL_RUNTIME_EXPORT %s%s (%s *%s_pfn)(%s) = %s;' % \ ((' '.join(fn['modifiers'] + ' ') if len(fn['modifiers']) > 0 else ''), ' '.join(fn['ret']), ' '.join(fn['calling']), fn['name'], ', '.join(fn['params']), \ fn['name']))) @outputToString def generateListOfDefinitions(fns, name='opencl_fn_list'): print('// generated by %s' % os.path.basename(sys.argv[0])) print('static const struct DynamicFnEntry* %s[] = {' % (name)) for fn in fns: commentStr = '' if 'enabled' in fn else '//' if 'enabled' in fn: print(' &%s_definition,' % (fn['name'])) else: print(' NULL/*&%s_definition*/,' % (fn['name'])) first = False print('};') @outputToString def generateEnums(fns, prefix='OPENCL_FN'): print('// generated by %s' % os.path.basename(sys.argv[0])) print('enum %s_ID {' % prefix) for (i, fn) in enumerate(fns): commentStr = '' if 'enabled' in fn else '//' print(commentStr + (' %s_%s = %d,' % (prefix, fn['name'], i))) print('};') @outputToString def generateRemapOrigin(fns): print('// generated by %s' % os.path.basename(sys.argv[0])) for fn in fns: print('#define %s %s_' % (fn['name'], fn['name'])) @outputToString def generateRemapDynamic(fns): print('// generated by %s' % os.path.basename(sys.argv[0])) for fn in fns: print('#undef %s' % (fn['name'])) commentStr = '' if 'enabled' in fn else '//' print(commentStr + ('#define %s %s_pfn' % (fn['name'], fn['name']))) @outputToString def generateFnDeclaration(fns): print('// generated by %s' % os.path.basename(sys.argv[0])) for fn in fns: commentStr = '' if 'enabled' in fn else '//' print(commentStr + ('extern CL_RUNTIME_EXPORT %s %s (%s *%s)(%s);' % (' '.join(fn['modifiers']), ' '.join(fn['ret']), ' '.join(fn['calling']), fn['name'], ', '.join(fn['params'] if 'params_full' not in fn else fn['params_full'])))) @outputToString def generateTemplates(total, lprefix, switch_name, calling_convention=''): print('// generated by %s' % os.path.basename(sys.argv[0])) for sz in range(total): template_params = ['ID', '_R', 'decl_args'] params = ['p%d' % (i + 1) for i in range(0, sz)] print('#define %s%d(%s) \\' % (lprefix, sz, ', '.join(template_params))) print(' typedef _R (%s *ID##FN)decl_args; \\' % (calling_convention)) print(' static _R %s ID##_switch_fn decl_args \\' % (calling_convention)) print(' { return ((ID##FN)%s(ID))(%s); } \\' % (switch_name, ', '.join(params))) print('') @outputToString def generateInlineWrappers(fns): print('// generated by %s' % os.path.basename(sys.argv[0])) for fn in fns: commentStr = '' if 'enabled' in fn else '//' print('#undef %s' % (fn['name'])) print(commentStr + ('#define %s %s_fn' % (fn['name'], fn['name']))) params = [] call_params = [] for i in range(0, len(fn['params'])): t = fn['params'][i] if t.find('*)') >= 0: p = re.sub(r'\*\)', (' *p%d)' % i), t, 1) params.append(p) else: params.append('%s p%d' % (t, i)) call_params.append('p%d' % (i)) if len(fn['ret']) == 1 and fn['ret'][0] == 'void': print(commentStr + ('inline void %s(%s) { %s_pfn(%s); }' \ % (fn['name'], ', '.join(params), fn['name'], ', '.join(call_params)))) else: print(commentStr + ('inline %s %s(%s) { return %s_pfn(%s); }' \ % (' '.join(fn['ret']), fn['name'], ', '.join(params), fn['name'], ', '.join(call_params)))) def ProcessTemplate(inputFile, ctx, noteLine='//\n// AUTOGENERATED, DO NOT EDIT\n//'): f = open(inputFile, "r") if noteLine: print(noteLine) for line in f: if line.startswith('@'): assert line[-1] == '\n' line = line[:-1] # remove '\n' assert line[-1] == '@' name = line[1:-1] assert name in ctx, name line = ctx[name] + ('\n' if len(ctx[name]) > 0 and ctx[name][-1] != '\n' else '') sys.stdout.write(line) f.close()
#!/bin/python # usage: # cat opencl11/cl.h | $0 cl_runtime_opencl11 # cat opencl12/cl.h | $0 cl_runtime_opencl12 from __future__ import print_function import sys, re; from common import remove_comments, getTokens, getParameters, postProcessParameters try: if len(sys.argv) > 1: module_name = sys.argv[1] outfile = open('../../../../include/opencv2/core/opencl/runtime/autogenerated/%s.hpp' % module_name, 'wb') outfile_impl = open('../autogenerated/%s_impl.hpp' % module_name, 'wb') outfile_static_impl = open('../autogenerated/%s_static_impl.hpp' % module_name, 'wb') outfile_wrappers = open('../../../../include/opencv2/core/opencl/runtime/autogenerated/%s_wrappers.hpp' % module_name, 'wb') if len(sys.argv) > 2: f = open(sys.argv[2], "r") else: f = sys.stdin else: sys.exit("ERROR. Specify output file") except: sys.exit("ERROR. Can't open input/output file, check parameters") fns = [] while True: line = f.readline() if len(line) == 0: break assert isinstance(line, str) parts = line.split(); if line.startswith('extern') and line.find('CL_API_CALL') != -1: # read block of lines while True: nl = f.readline() nl = nl.strip() nl = re.sub(r'\n', r'', nl) if len(nl) == 0: break; line += ' ' + nl line = remove_comments(line) parts = getTokens(line) fn = {} modifiers = [] ret = [] calling = [] i = 1 while (i < len(parts)): if parts[i].startswith('CL_'): modifiers.append(parts[i]) else: break i += 1 while (i < len(parts)): if not parts[i].startswith('CL_'): ret.append(parts[i]) else: break i += 1 while (i < len(parts)): calling.append(parts[i]) i += 1 if parts[i - 1] == 'CL_API_CALL': break fn['modifiers'] = [] # modifiers fn['ret'] = ret fn['calling'] = calling # print 'modifiers='+' '.join(modifiers) # print 'ret='+' '.join(type) # print 'calling='+' '.join(calling) name = parts[i]; i += 1; fn['name'] = name print('name=' + name) params = getParameters(i, parts) fn['params'] = params # print 'params="'+','.join(params)+'"' fns.append(fn) f.close() print('Found %d functions' % len(fns)) postProcessParameters(fns) from pprint import pprint pprint(fns) from common import * filterFileName = './filter/%s_functions.list' % module_name numEnabled = readFunctionFilter(fns, filterFileName) functionsFilter = generateFilterNames(fns) filter_file = open(filterFileName, 'wb') filter_file.write(functionsFilter) ctx = {} ctx['CL_REMAP_ORIGIN'] = generateRemapOrigin(fns) ctx['CL_REMAP_DYNAMIC'] = generateRemapDynamic(fns) ctx['CL_FN_DECLARATIONS'] = generateFnDeclaration(fns) sys.stdout = outfile ProcessTemplate('template/%s.hpp.in' % module_name, ctx) ctx['CL_FN_INLINE_WRAPPERS'] = generateInlineWrappers(fns) sys.stdout = outfile_wrappers ProcessTemplate('template/%s_wrappers.hpp.in' % module_name, ctx) if module_name == 'opencl_core': ctx['CL_FN_ENTRY_DEFINITIONS'] = generateStructDefinitions(fns) ctx['CL_FN_ENTRY_LIST'] = generateListOfDefinitions(fns) ctx['CL_FN_ENUMS'] = generateEnums(fns) ctx['CL_FN_SWITCH'] = generateTemplates(15, 'opencl_fn', 'opencl_check_fn', 'CL_API_CALL') else: lprefix = module_name + '_fn' enumprefix = module_name.upper() + '_FN' fn_list_name = module_name + '_fn_list' ctx['CL_FN_ENTRY_DEFINITIONS'] = generateStructDefinitions(fns, lprefix=lprefix, enumprefix=enumprefix) ctx['CL_FN_ENTRY_LIST'] = generateListOfDefinitions(fns, fn_list_name) ctx['CL_FN_ENUMS'] = generateEnums(fns, prefix=enumprefix) ctx['CL_FN_SWITCH'] = generateTemplates(15, lprefix, '%s_check_fn' % module_name, 'CL_API_CALL') ctx['CL_NUMBER_OF_ENABLED_FUNCTIONS'] = '// number of enabled functions: %d' % (numEnabled) sys.stdout = outfile_impl ProcessTemplate('template/%s_impl.hpp.in' % module_name, ctx) sys.stdout = outfile_static_impl ProcessTemplate('template/static_impl.hpp.in', dict(CL_STATIC_DEFINITIONS=generateStaticDefinitions(fns)))
#!/bin/python # usage: # cat clAmdFft.h | $0 from __future__ import print_function import sys, re; from common import remove_comments, getTokens, getParameters, postProcessParameters try: if len(sys.argv) > 1: f = open(sys.argv[1], "r") else: f = sys.stdin except: sys.exit("ERROR. Can't open input file") fns = [] while True: line = f.readline() if len(line) == 0: break assert isinstance(line, str) line = line.strip() if line.startswith('CLAMDFFTAPI'): line = re.sub(r'\n', r'', line) while True: nl = f.readline() nl = nl.strip() nl = re.sub(r'\n', r'', nl) if len(nl) == 0: break; line += ' ' + nl line = remove_comments(line) parts = getTokens(line) fn = {} modifiers = [] ret = [] calling = [] i = 0 while True: if parts[i] == "CLAMDFFTAPI": modifiers.append(parts[i]) else: break i += 1 while (i < len(parts)): if not parts[i] == '(': ret.append(parts[i]) else: del ret[-1] i -= 1 break i += 1 fn['modifiers'] = [] # modifiers fn['ret'] = ret fn['calling'] = calling name = parts[i]; i += 1; fn['name'] = name print('name=' + name) params = getParameters(i, parts) if len(params) > 0 and params[0] == 'void': del params[0] fn['params'] = params # print 'params="'+','.join(params)+'"' fns.append(fn) f.close() print('Found %d functions' % len(fns)) postProcessParameters(fns) from pprint import pprint pprint(fns) from common import * filterFileName='./filter/opencl_clamdfft_functions.list' numEnabled = readFunctionFilter(fns, filterFileName) functionsFilter = generateFilterNames(fns) filter_file = open(filterFileName, 'wb') filter_file.write(functionsFilter) ctx = {} ctx['CLAMDFFT_REMAP_ORIGIN'] = generateRemapOrigin(fns) ctx['CLAMDFFT_REMAP_DYNAMIC'] = generateRemapDynamic(fns) ctx['CLAMDFFT_FN_DECLARATIONS'] = generateFnDeclaration(fns) sys.stdout = open('../../../../include/opencv2/core/opencl/runtime/autogenerated/opencl_clamdfft.hpp', 'wb') ProcessTemplate('template/opencl_clamdfft.hpp.in', ctx) ctx['CL_FN_ENUMS'] = generateEnums(fns, 'OPENCLAMDFFT_FN') ctx['CL_FN_SWITCH'] = generateTemplates(23, 'openclamdfft_fn', 'openclamdfft_check_fn', '') ctx['CL_FN_ENTRY_DEFINITIONS'] = generateStructDefinitions(fns, 'openclamdfft_fn', 'OPENCLAMDFFT_FN') ctx['CL_FN_ENTRY_LIST'] = generateListOfDefinitions(fns, 'openclamdfft_fn') ctx['CL_NUMBER_OF_ENABLED_FUNCTIONS'] = '// number of enabled functions: %d' % (numEnabled) sys.stdout = open('../autogenerated/opencl_clamdfft_impl.hpp', 'wb') ProcessTemplate('template/opencl_clamdfft_impl.hpp.in', ctx)
#!/usr/bin/env python import cv2 as cv from tests_common import NewOpenCVTests class stitching_test(NewOpenCVTests): def test_simple(self): img1 = self.get_sample('stitching/a1.png') img2 = self.get_sample('stitching/a2.png') stitcher = cv.Stitcher.create(cv.Stitcher_PANORAMA) (_result, pano) = stitcher.stitch((img1, img2)) #cv.imshow("pano", pano) #cv.waitKey() self.assertAlmostEqual(pano.shape[0], 685, delta=100, msg="rows: %r" % list(pano.shape)) self.assertAlmostEqual(pano.shape[1], 1025, delta=100, msg="cols: %r" % list(pano.shape)) class stitching_detail_test(NewOpenCVTests): def test_simple(self): img = self.get_sample('stitching/a1.png') finder= cv.ORB.create() imgFea = cv.detail.computeImageFeatures2(finder,img) self.assertIsNotNone(imgFea) matcher = cv.detail_BestOf2NearestMatcher(False, 0.3) self.assertIsNotNone(matcher) matcher = cv.detail_AffineBestOf2NearestMatcher(False, False, 0.3) self.assertIsNotNone(matcher) matcher = cv.detail_BestOf2NearestRangeMatcher(2, False, 0.3) self.assertIsNotNone(matcher) estimator = cv.detail_AffineBasedEstimator() self.assertIsNotNone(estimator) estimator = cv.detail_HomographyBasedEstimator() self.assertIsNotNone(estimator) adjuster = cv.detail_BundleAdjusterReproj() self.assertIsNotNone(adjuster) adjuster = cv.detail_BundleAdjusterRay() self.assertIsNotNone(adjuster) adjuster = cv.detail_BundleAdjusterAffinePartial() self.assertIsNotNone(adjuster) adjuster = cv.detail_NoBundleAdjuster() self.assertIsNotNone(adjuster) compensator=cv.detail.ExposureCompensator_createDefault(cv.detail.ExposureCompensator_NO) self.assertIsNotNone(compensator) compensator=cv.detail.ExposureCompensator_createDefault(cv.detail.ExposureCompensator_GAIN) self.assertIsNotNone(compensator) compensator=cv.detail.ExposureCompensator_createDefault(cv.detail.ExposureCompensator_GAIN_BLOCKS) self.assertIsNotNone(compensator) seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_NO) self.assertIsNotNone(seam_finder) seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_NO) self.assertIsNotNone(seam_finder) seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_VORONOI_SEAM) self.assertIsNotNone(seam_finder) seam_finder = cv.detail_GraphCutSeamFinder("COST_COLOR") self.assertIsNotNone(seam_finder) seam_finder = cv.detail_GraphCutSeamFinder("COST_COLOR_GRAD") self.assertIsNotNone(seam_finder) seam_finder = cv.detail_DpSeamFinder("COLOR") self.assertIsNotNone(seam_finder) seam_finder = cv.detail_DpSeamFinder("COLOR_GRAD") self.assertIsNotNone(seam_finder) blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO) self.assertIsNotNone(blender) blender = cv.detail.Blender_createDefault(cv.detail.Blender_FEATHER) self.assertIsNotNone(blender) blender = cv.detail.Blender_createDefault(cv.detail.Blender_MULTI_BAND) self.assertIsNotNone(blender) timelapser = cv.detail.Timelapser_createDefault(cv.detail.Timelapser_AS_IS); self.assertIsNotNone(timelapser) timelapser = cv.detail.Timelapser_createDefault(cv.detail.Timelapser_CROP); self.assertIsNotNone(timelapser) if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python ''' Robust line fitting. ================== Example of using cv.fitLine function for fitting line to points in presence of outliers. Switch through different M-estimator functions and see, how well the robust functions fit the line even in case of ~50% of outliers. ''' # Python 2/3 compatibility from __future__ import print_function import sys PY3 = sys.version_info[0] == 3 import numpy as np import cv2 as cv from tests_common import NewOpenCVTests w, h = 512, 256 def toint(p): return tuple(map(int, p)) def sample_line(p1, p2, n, noise=0.0): np.random.seed(10) p1 = np.float32(p1) t = np.random.rand(n,1) return p1 + (p2-p1)*t + np.random.normal(size=(n, 2))*noise dist_func_names = ['DIST_L2', 'DIST_L1', 'DIST_L12', 'DIST_FAIR', 'DIST_WELSCH', 'DIST_HUBER'] class fitline_test(NewOpenCVTests): def test_fitline(self): noise = 5 n = 200 r = 5 / 100.0 outn = int(n*r) p0, p1 = (90, 80), (w-90, h-80) line_points = sample_line(p0, p1, n-outn, noise) outliers = np.random.rand(outn, 2) * (w, h) points = np.vstack([line_points, outliers]) lines = [] for name in dist_func_names: func = getattr(cv, name) vx, vy, cx, cy = cv.fitLine(np.float32(points), func, 0, 0.01, 0.01) line = [float(vx), float(vy), float(cx), float(cy)] lines.append(line) eps = 0.05 refVec = (np.float32(p1) - p0) / cv.norm(np.float32(p1) - p0) for i in range(len(lines)): self.assertLessEqual(cv.norm(refVec - lines[i][0:2], cv.NORM_L2), eps) if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python """Algorithm serialization test.""" import tempfile import os import cv2 as cv from tests_common import NewOpenCVTests class algorithm_rw_test(NewOpenCVTests): def test_algorithm_rw(self): fd, fname = tempfile.mkstemp(prefix="opencv_python_algorithm_", suffix=".yml") os.close(fd) # some arbitrary non-default parameters gold = cv.AKAZE_create(descriptor_size=1, descriptor_channels=2, nOctaves=3, threshold=4.0) gold.write(cv.FileStorage(fname, cv.FILE_STORAGE_WRITE), "AKAZE") fs = cv.FileStorage(fname, cv.FILE_STORAGE_READ) algorithm = cv.AKAZE_create() algorithm.read(fs.getNode("AKAZE")) self.assertEqual(algorithm.getDescriptorSize(), 1) self.assertEqual(algorithm.getDescriptorChannels(), 2) self.assertEqual(algorithm.getNOctaves(), 3) self.assertEqual(algorithm.getThreshold(), 4.0) os.remove(fname)
#!/usr/bin/env python from __future__ import print_function import ctypes from functools import partial import numpy as np import cv2 as cv from tests_common import NewOpenCVTests, unittest def is_numeric(dtype): return np.issubdtype(dtype, np.integer) or np.issubdtype(dtype, np.floating) def get_limits(dtype): if not is_numeric(dtype): return None, None if np.issubdtype(dtype, np.integer): info = np.iinfo(dtype) else: info = np.finfo(dtype) return info.min, info.max def get_conversion_error_msg(value, expected, actual): return 'Conversion "{}" of type "{}" failed\nExpected: "{}" vs Actual "{}"'.format( value, type(value).__name__, expected, actual ) def get_no_exception_msg(value): return 'Exception is not risen for {} of type {}'.format(value, type(value).__name__) class Bindings(NewOpenCVTests): def test_inheritance(self): bm = cv.StereoBM_create() bm.getPreFilterCap() # from StereoBM bm.getBlockSize() # from SteroMatcher boost = cv.ml.Boost_create() boost.getBoostType() # from ml::Boost boost.getMaxDepth() # from ml::DTrees boost.isClassifier() # from ml::StatModel def test_redirectError(self): try: cv.imshow("", None) # This causes an assert self.assertEqual("Dead code", 0) except cv.error as _e: pass handler_called = [False] def test_error_handler(status, func_name, err_msg, file_name, line): handler_called[0] = True cv.redirectError(test_error_handler) try: cv.imshow("", None) # This causes an assert self.assertEqual("Dead code", 0) except cv.error as _e: self.assertEqual(handler_called[0], True) pass cv.redirectError(None) try: cv.imshow("", None) # This causes an assert self.assertEqual("Dead code", 0) except cv.error as _e: pass class Arguments(NewOpenCVTests): def _try_to_convert(self, conversion, value): try: result = conversion(value).lower() except Exception as e: self.fail( '{} "{}" is risen for conversion {} of type {}'.format( type(e).__name__, e, value, type(value).__name__ ) ) else: return result def test_InputArray(self): res1 = cv.utils.dumpInputArray(None) # self.assertEqual(res1, "InputArray: noArray()") # not supported self.assertEqual(res1, "InputArray: empty()=true kind=0x00010000 flags=0x01010000 total(-1)=0 dims(-1)=0 size(-1)=0x0 type(-1)=CV_8UC1") res2_1 = cv.utils.dumpInputArray((1, 2)) self.assertEqual(res2_1, "InputArray: empty()=false kind=0x00010000 flags=0x01010000 total(-1)=2 dims(-1)=2 size(-1)=1x2 type(-1)=CV_64FC1") res2_2 = cv.utils.dumpInputArray(1.5) # Scalar(1.5, 1.5, 1.5, 1.5) self.assertEqual(res2_2, "InputArray: empty()=false kind=0x00010000 flags=0x01010000 total(-1)=4 dims(-1)=2 size(-1)=1x4 type(-1)=CV_64FC1") a = np.array([[1, 2], [3, 4], [5, 6]]) res3 = cv.utils.dumpInputArray(a) # 32SC1 self.assertEqual(res3, "InputArray: empty()=false kind=0x00010000 flags=0x01010000 total(-1)=6 dims(-1)=2 size(-1)=2x3 type(-1)=CV_32SC1") a = np.array([[[1, 2], [3, 4], [5, 6]]], dtype='f') res4 = cv.utils.dumpInputArray(a) # 32FC2 self.assertEqual(res4, "InputArray: empty()=false kind=0x00010000 flags=0x01010000 total(-1)=3 dims(-1)=2 size(-1)=3x1 type(-1)=CV_32FC2") a = np.array([[[1, 2]], [[3, 4]], [[5, 6]]], dtype=float) res5 = cv.utils.dumpInputArray(a) # 64FC2 self.assertEqual(res5, "InputArray: empty()=false kind=0x00010000 flags=0x01010000 total(-1)=3 dims(-1)=2 size(-1)=1x3 type(-1)=CV_64FC2") def test_InputArrayOfArrays(self): res1 = cv.utils.dumpInputArrayOfArrays(None) # self.assertEqual(res1, "InputArray: noArray()") # not supported self.assertEqual(res1, "InputArrayOfArrays: empty()=true kind=0x00050000 flags=0x01050000 total(-1)=0 dims(-1)=1 size(-1)=0x0") res2_1 = cv.utils.dumpInputArrayOfArrays((1, 2)) # { Scalar:all(1), Scalar::all(2) } self.assertEqual(res2_1, "InputArrayOfArrays: empty()=false kind=0x00050000 flags=0x01050000 total(-1)=2 dims(-1)=1 size(-1)=2x1 type(0)=CV_64FC1 dims(0)=2 size(0)=1x4 type(0)=CV_64FC1") res2_2 = cv.utils.dumpInputArrayOfArrays([1.5]) self.assertEqual(res2_2, "InputArrayOfArrays: empty()=false kind=0x00050000 flags=0x01050000 total(-1)=1 dims(-1)=1 size(-1)=1x1 type(0)=CV_64FC1 dims(0)=2 size(0)=1x4 type(0)=CV_64FC1") a = np.array([[1, 2], [3, 4], [5, 6]]) b = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) res3 = cv.utils.dumpInputArrayOfArrays([a, b]) self.assertEqual(res3, "InputArrayOfArrays: empty()=false kind=0x00050000 flags=0x01050000 total(-1)=2 dims(-1)=1 size(-1)=2x1 type(0)=CV_32SC1 dims(0)=2 size(0)=2x3 type(0)=CV_32SC1") c = np.array([[[1, 2], [3, 4], [5, 6]]], dtype='f') res4 = cv.utils.dumpInputArrayOfArrays([c, a, b]) self.assertEqual(res4, "InputArrayOfArrays: empty()=false kind=0x00050000 flags=0x01050000 total(-1)=3 dims(-1)=1 size(-1)=3x1 type(0)=CV_32FC2 dims(0)=2 size(0)=3x1 type(0)=CV_32FC2") def test_parse_to_bool_convertible(self): try_to_convert = partial(self._try_to_convert, cv.utils.dumpBool) for convertible_true in (True, 1, 64, np.bool(1), np.int8(123), np.int16(11), np.int32(2), np.int64(1), np.bool_(3), np.bool8(12)): actual = try_to_convert(convertible_true) self.assertEqual('bool: true', actual, msg=get_conversion_error_msg(convertible_true, 'bool: true', actual)) for convertible_false in (False, 0, np.uint8(0), np.bool_(0), np.int_(0)): actual = try_to_convert(convertible_false) self.assertEqual('bool: false', actual, msg=get_conversion_error_msg(convertible_false, 'bool: false', actual)) def test_parse_to_bool_not_convertible(self): for not_convertible in (1.2, np.float(2.3), 's', 'str', (1, 2), [1, 2], complex(1, 1), complex(imag=2), complex(1.1), np.array([1, 0], dtype=np.bool)): with self.assertRaises((TypeError, OverflowError), msg=get_no_exception_msg(not_convertible)): _ = cv.utils.dumpBool(not_convertible) def test_parse_to_bool_convertible_extra(self): try_to_convert = partial(self._try_to_convert, cv.utils.dumpBool) _, max_size_t = get_limits(ctypes.c_size_t) for convertible_true in (-1, max_size_t): actual = try_to_convert(convertible_true) self.assertEqual('bool: true', actual, msg=get_conversion_error_msg(convertible_true, 'bool: true', actual)) def test_parse_to_bool_not_convertible_extra(self): for not_convertible in (np.array([False]), np.array([True], dtype=np.bool)): with self.assertRaises((TypeError, OverflowError), msg=get_no_exception_msg(not_convertible)): _ = cv.utils.dumpBool(not_convertible) def test_parse_to_int_convertible(self): try_to_convert = partial(self._try_to_convert, cv.utils.dumpInt) min_int, max_int = get_limits(ctypes.c_int) for convertible in (-10, -1, 2, int(43.2), np.uint8(15), np.int8(33), np.int16(-13), np.int32(4), np.int64(345), (23), min_int, max_int, np.int_(33)): expected = 'int: {0:d}'.format(convertible) actual = try_to_convert(convertible) self.assertEqual(expected, actual, msg=get_conversion_error_msg(convertible, expected, actual)) def test_parse_to_int_not_convertible(self): min_int, max_int = get_limits(ctypes.c_int) for not_convertible in (1.2, np.float(4), float(3), np.double(45), 's', 'str', np.array([1, 2]), (1,), [1, 2], min_int - 1, max_int + 1, complex(1, 1), complex(imag=2), complex(1.1)): with self.assertRaises((TypeError, OverflowError, ValueError), msg=get_no_exception_msg(not_convertible)): _ = cv.utils.dumpInt(not_convertible) def test_parse_to_int_not_convertible_extra(self): for not_convertible in (np.bool_(True), True, False, np.float32(2.3), np.array([3, ], dtype=int), np.array([-2, ], dtype=np.int32), np.array([1, ], dtype=np.int), np.array([11, ], dtype=np.uint8)): with self.assertRaises((TypeError, OverflowError), msg=get_no_exception_msg(not_convertible)): _ = cv.utils.dumpInt(not_convertible) def test_parse_to_size_t_convertible(self): try_to_convert = partial(self._try_to_convert, cv.utils.dumpSizeT) _, max_uint = get_limits(ctypes.c_uint) for convertible in (2, max_uint, (12), np.uint8(34), np.int8(12), np.int16(23), np.int32(123), np.int64(344), np.uint64(3), np.uint16(2), np.uint32(5), np.uint(44)): expected = 'size_t: {0:d}'.format(convertible).lower() actual = try_to_convert(convertible) self.assertEqual(expected, actual, msg=get_conversion_error_msg(convertible, expected, actual)) def test_parse_to_size_t_not_convertible(self): min_long, _ = get_limits(ctypes.c_long) for not_convertible in (1.2, True, False, np.bool_(True), np.float(4), float(3), np.double(45), 's', 'str', np.array([1, 2]), (1,), [1, 2], np.float64(6), complex(1, 1), complex(imag=2), complex(1.1), -1, min_long, np.int8(-35)): with self.assertRaises((TypeError, OverflowError), msg=get_no_exception_msg(not_convertible)): _ = cv.utils.dumpSizeT(not_convertible) def test_parse_to_size_t_convertible_extra(self): try_to_convert = partial(self._try_to_convert, cv.utils.dumpSizeT) _, max_size_t = get_limits(ctypes.c_size_t) for convertible in (max_size_t,): expected = 'size_t: {0:d}'.format(convertible).lower() actual = try_to_convert(convertible) self.assertEqual(expected, actual, msg=get_conversion_error_msg(convertible, expected, actual)) def test_parse_to_size_t_not_convertible_extra(self): for not_convertible in (np.bool_(True), True, False, np.array([123, ], dtype=np.uint8),): with self.assertRaises((TypeError, OverflowError), msg=get_no_exception_msg(not_convertible)): _ = cv.utils.dumpSizeT(not_convertible) def test_parse_to_float_convertible(self): try_to_convert = partial(self._try_to_convert, cv.utils.dumpFloat) min_float, max_float = get_limits(ctypes.c_float) for convertible in (2, -13, 1.24, float(32), np.float(32.45), np.double(12.23), np.float32(-12.3), np.float64(3.22), np.float_(-1.5), min_float, max_float, np.inf, -np.inf, float('Inf'), -float('Inf'), np.double(np.inf), np.double(-np.inf), np.double(float('Inf')), np.double(-float('Inf'))): expected = 'Float: {0:.2f}'.format(convertible).lower() actual = try_to_convert(convertible) self.assertEqual(expected, actual, msg=get_conversion_error_msg(convertible, expected, actual)) # Workaround for Windows NaN tests due to Visual C runtime # special floating point values (indefinite NaN) for nan in (float('NaN'), np.nan, np.float32(np.nan), np.double(np.nan), np.double(float('NaN'))): actual = try_to_convert(nan) self.assertIn('nan', actual, msg="Can't convert nan of type {} to float. " "Actual: {}".format(type(nan).__name__, actual)) min_double, max_double = get_limits(ctypes.c_double) for inf in (min_float * 10, max_float * 10, min_double, max_double): expected = 'float: {}inf'.format('-' if inf < 0 else '') actual = try_to_convert(inf) self.assertEqual(expected, actual, msg=get_conversion_error_msg(inf, expected, actual)) def test_parse_to_float_not_convertible(self): for not_convertible in ('s', 'str', (12,), [1, 2], np.array([1, 2], dtype=np.float), np.array([1, 2], dtype=np.double), complex(1, 1), complex(imag=2), complex(1.1)): with self.assertRaises((TypeError), msg=get_no_exception_msg(not_convertible)): _ = cv.utils.dumpFloat(not_convertible) def test_parse_to_float_not_convertible_extra(self): for not_convertible in (np.bool_(False), True, False, np.array([123, ], dtype=int), np.array([1., ]), np.array([False]), np.array([True], dtype=np.bool)): with self.assertRaises((TypeError, OverflowError), msg=get_no_exception_msg(not_convertible)): _ = cv.utils.dumpFloat(not_convertible) def test_parse_to_double_convertible(self): try_to_convert = partial(self._try_to_convert, cv.utils.dumpDouble) min_float, max_float = get_limits(ctypes.c_float) min_double, max_double = get_limits(ctypes.c_double) for convertible in (2, -13, 1.24, np.float(32.45), float(2), np.double(12.23), np.float32(-12.3), np.float64(3.22), np.float_(-1.5), min_float, max_float, min_double, max_double, np.inf, -np.inf, float('Inf'), -float('Inf'), np.double(np.inf), np.double(-np.inf), np.double(float('Inf')), np.double(-float('Inf'))): expected = 'Double: {0:.2f}'.format(convertible).lower() actual = try_to_convert(convertible) self.assertEqual(expected, actual, msg=get_conversion_error_msg(convertible, expected, actual)) # Workaround for Windows NaN tests due to Visual C runtime # special floating point values (indefinite NaN) for nan in (float('NaN'), np.nan, np.double(np.nan), np.double(float('NaN'))): actual = try_to_convert(nan) self.assertIn('nan', actual, msg="Can't convert nan of type {} to double. " "Actual: {}".format(type(nan).__name__, actual)) def test_parse_to_double_not_convertible(self): for not_convertible in ('s', 'str', (12,), [1, 2], np.array([1, 2], dtype=np.float), np.array([1, 2], dtype=np.double), complex(1, 1), complex(imag=2), complex(1.1)): with self.assertRaises((TypeError), msg=get_no_exception_msg(not_convertible)): _ = cv.utils.dumpDouble(not_convertible) def test_parse_to_double_not_convertible_extra(self): for not_convertible in (np.bool_(False), True, False, np.array([123, ], dtype=int), np.array([1., ]), np.array([False]), np.array([12.4], dtype=np.double), np.array([True], dtype=np.bool)): with self.assertRaises((TypeError, OverflowError), msg=get_no_exception_msg(not_convertible)): _ = cv.utils.dumpDouble(not_convertible) def test_parse_to_cstring_convertible(self): try_to_convert = partial(self._try_to_convert, cv.utils.dumpCString) for convertible in ('s', 'str', str(123), ('char'), np.str('test1'), np.str_('test2')): expected = 'string: ' + convertible actual = try_to_convert(convertible) self.assertEqual(expected, actual, msg=get_conversion_error_msg(convertible, expected, actual)) def test_parse_to_cstring_not_convertible(self): for not_convertible in ((12,), ('t', 'e', 's', 't'), np.array(['123', ]), np.array(['t', 'e', 's', 't']), 1, -1.4, True, False, None): with self.assertRaises((TypeError), msg=get_no_exception_msg(not_convertible)): _ = cv.utils.dumpCString(not_convertible) class SamplesFindFile(NewOpenCVTests): def test_ExistedFile(self): res = cv.samples.findFile('lena.jpg', False) self.assertNotEqual(res, '') def test_MissingFile(self): res = cv.samples.findFile('non_existed.file', False) self.assertEqual(res, '') def test_MissingFileException(self): try: _res = cv.samples.findFile('non_existed.file', True) self.assertEqual("Dead code", 0) except cv.error as _e: pass if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python ''' Camshift tracker ================ This is a demo that shows mean-shift based tracking You select a color objects such as your face and it tracks it. This reads from video camera (0 by default, or the camera number the user enters) http://www.robinhewitt.com/research/track/camshift.html ''' # Python 2/3 compatibility from __future__ import print_function import sys PY3 = sys.version_info[0] == 3 if PY3: xrange = range import numpy as np import cv2 as cv from tst_scene_render import TestSceneRender from tests_common import NewOpenCVTests, intersectionRate class camshift_test(NewOpenCVTests): framesNum = 300 frame = None selection = None drag_start = None show_backproj = False track_window = None render = None errors = 0 def prepareRender(self): self.render = TestSceneRender(self.get_sample('samples/data/pca_test1.jpg'), deformation = True) def runTracker(self): framesCounter = 0 self.selection = True xmin, ymin, xmax, ymax = self.render.getCurrentRect() self.track_window = (xmin, ymin, xmax - xmin, ymax - ymin) while True: framesCounter += 1 self.frame = self.render.getNextFrame() hsv = cv.cvtColor(self.frame, cv.COLOR_BGR2HSV) mask = cv.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.))) if self.selection: x0, y0, x1, y1 = self.render.getCurrentRect() + 50 x0 -= 100 y0 -= 100 hsv_roi = hsv[y0:y1, x0:x1] mask_roi = mask[y0:y1, x0:x1] hist = cv.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] ) cv.normalize(hist, hist, 0, 255, cv.NORM_MINMAX) self.hist = hist.reshape(-1) self.selection = False if self.track_window and self.track_window[2] > 0 and self.track_window[3] > 0: self.selection = None prob = cv.calcBackProject([hsv], [0], self.hist, [0, 180], 1) prob &= mask term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 ) _track_box, self.track_window = cv.CamShift(prob, self.track_window, term_crit) trackingRect = np.array(self.track_window) trackingRect[2] += trackingRect[0] trackingRect[3] += trackingRect[1] if intersectionRate(self.render.getCurrentRect(), trackingRect) < 0.4: self.errors += 1 if framesCounter > self.framesNum: break self.assertLess(float(self.errors) / self.framesNum, 0.4) def test_camshift(self): self.prepareRender() self.runTracker() if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/python ''' This example illustrates how to use Hough Transform to find lines ''' # Python 2/3 compatibility from __future__ import print_function import cv2 as cv import numpy as np import sys import math from tests_common import NewOpenCVTests def linesDiff(line1, line2): norm1 = cv.norm(line1 - line2, cv.NORM_L2) line3 = line1[2:4] + line1[0:2] norm2 = cv.norm(line3 - line2, cv.NORM_L2) return min(norm1, norm2) class houghlines_test(NewOpenCVTests): def test_houghlines(self): fn = "/samples/data/pic1.png" src = self.get_sample(fn) dst = cv.Canny(src, 50, 200) lines = cv.HoughLinesP(dst, 1, math.pi/180.0, 40, np.array([]), 50, 10)[:,0,:] eps = 5 testLines = [ #rect1 [ 232, 25, 43, 25], [ 43, 129, 232, 129], [ 43, 129, 43, 25], [232, 129, 232, 25], #rect2 [251, 86, 314, 183], [252, 86, 323, 40], [315, 183, 386, 137], [324, 40, 386, 136], #triangle [245, 205, 377, 205], [244, 206, 305, 278], [306, 279, 377, 205], #rect3 [153, 177, 196, 177], [153, 277, 153, 179], [153, 277, 196, 277], [196, 177, 196, 277]] matches_counter = 0 for i in range(len(testLines)): for j in range(len(lines)): if linesDiff(testLines[i], lines[j]) < eps: matches_counter += 1 self.assertGreater(float(matches_counter) / len(testLines), .7) if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python ''' Watershed segmentation test ''' # Python 2/3 compatibility from __future__ import print_function import numpy as np import cv2 as cv from tests_common import NewOpenCVTests class watershed_test(NewOpenCVTests): def test_watershed(self): img = self.get_sample('cv/inpaint/orig.png') markers = self.get_sample('cv/watershed/wshed_exp.png', 0) refSegments = self.get_sample('cv/watershed/wshed_segments.png') if img is None or markers is None: self.assertEqual(0, 1, 'Missing test data') colors = np.int32( list(np.ndindex(3, 3, 3)) ) * 122 cv.watershed(img, np.int32(markers)) segments = colors[np.maximum(markers, 0)] if refSegments is None: refSegments = segments.copy() cv.imwrite(self.extraTestDataPath + '/cv/watershed/wshed_segments.png', refSegments) self.assertLess(cv.norm(segments - refSegments, cv.NORM_L1) / 255.0, 50) if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python from __future__ import print_function import numpy as np import cv2 as cv from tests_common import NewOpenCVTests class AsyncTest(NewOpenCVTests): def test_async_simple(self): m = np.array([[1,2],[3,4],[5,6]]) async_result = cv.utils.testAsyncArray(m) self.assertTrue(async_result.valid()) ret, result = async_result.get(timeoutNs=10**6) # 1ms self.assertTrue(ret) self.assertFalse(async_result.valid()) self.assertEqual(cv.norm(m, result, cv.NORM_INF), 0) def test_async_exception(self): async_result = cv.utils.testAsyncException() self.assertTrue(async_result.valid()) try: _ret, _result = async_result.get(timeoutNs=10**6) # 1ms self.fail("Exception expected") except cv.error as e: self.assertEqual(cv.Error.StsOk, e.code) if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python ''' Test for disctrete fourier transform (dft) ''' # Python 2/3 compatibility from __future__ import print_function import cv2 as cv import numpy as np import sys from tests_common import NewOpenCVTests class dft_test(NewOpenCVTests): def test_dft(self): img = self.get_sample('samples/data/rubberwhale1.png', 0) eps = 0.001 #test direct transform refDft = np.fft.fft2(img) refDftShift = np.fft.fftshift(refDft) refMagnitide = np.log(1.0 + np.abs(refDftShift)) testDft = cv.dft(np.float32(img),flags = cv.DFT_COMPLEX_OUTPUT) testDftShift = np.fft.fftshift(testDft) testMagnitude = np.log(1.0 + cv.magnitude(testDftShift[:,:,0], testDftShift[:,:,1])) refMagnitide = cv.normalize(refMagnitide, 0.0, 1.0, cv.NORM_MINMAX) testMagnitude = cv.normalize(testMagnitude, 0.0, 1.0, cv.NORM_MINMAX) self.assertLess(cv.norm(refMagnitide - testMagnitude), eps) #test inverse transform img_back = np.fft.ifft2(refDft) img_back = np.abs(img_back) img_backTest = cv.idft(testDft) img_backTest = cv.magnitude(img_backTest[:,:,0], img_backTest[:,:,1]) img_backTest = cv.normalize(img_backTest, 0.0, 1.0, cv.NORM_MINMAX) img_back = cv.normalize(img_back, 0.0, 1.0, cv.NORM_MINMAX) self.assertLess(cv.norm(img_back - img_backTest), eps) if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python ''' Morphology operations. ''' # Python 2/3 compatibility from __future__ import print_function import sys PY3 = sys.version_info[0] == 3 import numpy as np import cv2 as cv from tests_common import NewOpenCVTests class morphology_test(NewOpenCVTests): def test_morphology(self): fn = 'samples/data/rubberwhale1.png' img = self.get_sample(fn) modes = ['erode/dilate', 'open/close', 'blackhat/tophat', 'gradient'] str_modes = ['ellipse', 'rect', 'cross'] referenceHashes = { modes[0]: '071a526425b79e45b4d0d71ef51b0562', modes[1] : '071a526425b79e45b4d0d71ef51b0562', modes[2] : '427e89f581b7df1b60a831b1ed4c8618', modes[3] : '0dd8ad251088a63d0dd022bcdc57361c'} def update(cur_mode): cur_str_mode = str_modes[0] sz = 10 iters = 1 opers = cur_mode.split('/') if len(opers) > 1: sz = sz - 10 op = opers[sz > 0] sz = abs(sz) else: op = opers[0] sz = sz*2+1 str_name = 'MORPH_' + cur_str_mode.upper() oper_name = 'MORPH_' + op.upper() st = cv.getStructuringElement(getattr(cv, str_name), (sz, sz)) return cv.morphologyEx(img, getattr(cv, oper_name), st, iterations=iters) for mode in modes: res = update(mode) self.assertEqual(self.hashimg(res), referenceHashes[mode]) if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python ''' Test for copyto with mask ''' # Python 2/3 compatibility from __future__ import print_function import cv2 as cv import numpy as np import sys from tests_common import NewOpenCVTests class copytomask_test(NewOpenCVTests): def test_copytomask(self): img = self.get_sample('python/images/baboon.png', cv.IMREAD_COLOR) eps = 0. #Create mask using inRange valeurBGRinf = np.array([0,0,100]) valeurBGRSup = np.array([70, 70,255]) maskRed = cv.inRange(img, valeurBGRinf, valeurBGRSup) #New binding dstcv = np.full(np.array((2, 2, 1))*img.shape, 255, dtype=img.dtype) cv.copyTo(img, maskRed, dstcv[:img.shape[0],:img.shape[1],:]) #using numpy dstnp = np.full(np.array((2, 2, 1))*img.shape, 255, dtype=img.dtype) mask2=maskRed.astype(bool) _, mask_b = np.broadcast_arrays(img, mask2[..., None]) np.copyto(dstnp[:img.shape[0],:img.shape[1],:], img, where=mask_b) self.assertEqual(cv.norm(dstnp ,dstcv), eps) if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python """"Core serialization tests.""" import tempfile import os import cv2 as cv import numpy as np from tests_common import NewOpenCVTests class persistence_test(NewOpenCVTests): def test_yml_rw(self): fd, fname = tempfile.mkstemp(prefix="opencv_python_persistence_", suffix=".yml") os.close(fd) # Writing ... expected = np.array([[[0, 1, 2, 3, 4]]]) expected_str = ("Hello", "World", "!") fs = cv.FileStorage(fname, cv.FILE_STORAGE_WRITE) fs.write("test", expected) fs.write("strings", expected_str) fs.release() # Reading ... fs = cv.FileStorage(fname, cv.FILE_STORAGE_READ) root = fs.getFirstTopLevelNode() self.assertEqual(root.name(), "test") test = fs.getNode("test") self.assertEqual(test.empty(), False) self.assertEqual(test.name(), "test") self.assertEqual(test.type(), cv.FILE_NODE_MAP) self.assertEqual(test.isMap(), True) actual = test.mat() self.assertEqual(actual.shape, expected.shape) self.assertEqual(np.array_equal(expected, actual), True) strings = fs.getNode("strings") self.assertEqual(strings.isSeq(), True) self.assertEqual(strings.size(), len(expected_str)) self.assertEqual(all(strings.at(i).isString() for i in range(strings.size())), True) self.assertSequenceEqual([strings.at(i).string() for i in range(strings.size())], expected_str) fs.release() os.remove(fname)
#!/usr/bin/env python from itertools import product from functools import reduce import numpy as np import cv2 as cv from tests_common import NewOpenCVTests def norm_inf(x, y=None): def norm(vec): return np.linalg.norm(vec.flatten(), np.inf) x = x.astype(np.float64) return norm(x) if y is None else norm(x - y.astype(np.float64)) def norm_l1(x, y=None): def norm(vec): return np.linalg.norm(vec.flatten(), 1) x = x.astype(np.float64) return norm(x) if y is None else norm(x - y.astype(np.float64)) def norm_l2(x, y=None): def norm(vec): return np.linalg.norm(vec.flatten()) x = x.astype(np.float64) return norm(x) if y is None else norm(x - y.astype(np.float64)) def norm_l2sqr(x, y=None): def norm(vec): return np.square(vec).sum() x = x.astype(np.float64) return norm(x) if y is None else norm(x - y.astype(np.float64)) def norm_hamming(x, y=None): def norm(vec): return sum(bin(i).count('1') for i in vec.flatten()) return norm(x) if y is None else norm(np.bitwise_xor(x, y)) def norm_hamming2(x, y=None): def norm(vec): def element_norm(element): binary_str = bin(element).split('b')[-1] if len(binary_str) % 2 == 1: binary_str = '0' + binary_str gen = filter(lambda p: p != '00', (binary_str[i:i+2] for i in range(0, len(binary_str), 2))) return sum(1 for _ in gen) return sum(element_norm(element) for element in vec.flatten()) return norm(x) if y is None else norm(np.bitwise_xor(x, y)) norm_type_under_test = { cv.NORM_INF: norm_inf, cv.NORM_L1: norm_l1, cv.NORM_L2: norm_l2, cv.NORM_L2SQR: norm_l2sqr, cv.NORM_HAMMING: norm_hamming, cv.NORM_HAMMING2: norm_hamming2 } norm_name = { cv.NORM_INF: 'inf', cv.NORM_L1: 'L1', cv.NORM_L2: 'L2', cv.NORM_L2SQR: 'L2SQR', cv.NORM_HAMMING: 'Hamming', cv.NORM_HAMMING2: 'Hamming2' } def get_element_types(norm_type): if norm_type in (cv.NORM_HAMMING, cv.NORM_HAMMING2): return (np.uint8,) else: return (np.uint8, np.int8, np.uint16, np.int16, np.int32, np.float32, np.float64) def generate_vector(shape, dtype): if np.issubdtype(dtype, np.integer): return np.random.randint(0, 100, shape).astype(dtype) else: return np.random.normal(10., 12.5, shape).astype(dtype) shapes = (1, 2, 3, 5, 7, 16, (1, 1), (2, 2), (3, 5), (1, 7)) class norm_test(NewOpenCVTests): def test_norm_for_one_array(self): np.random.seed(123) for norm_type, norm in norm_type_under_test.items(): element_types = get_element_types(norm_type) for shape, element_type in product(shapes, element_types): array = generate_vector(shape, element_type) expected = norm(array) actual = cv.norm(array, norm_type) self.assertAlmostEqual( expected, actual, places=2, msg='Array {0} of {1} and norm {2}'.format( array, element_type.__name__, norm_name[norm_type] ) ) def test_norm_for_two_arrays(self): np.random.seed(456) for norm_type, norm in norm_type_under_test.items(): element_types = get_element_types(norm_type) for shape, element_type in product(shapes, element_types): first = generate_vector(shape, element_type) second = generate_vector(shape, element_type) expected = norm(first, second) actual = cv.norm(first, second, norm_type) self.assertAlmostEqual( expected, actual, places=2, msg='Arrays {0} {1} of type {2} and norm {3}'.format( first, second, element_type.__name__, norm_name[norm_type] ) ) def test_norm_fails_for_wrong_type(self): for norm_type in (cv.NORM_HAMMING, cv.NORM_HAMMING2): with self.assertRaises(Exception, msg='Type is not checked {0}'.format( norm_name[norm_type] )): cv.norm(np.array([1, 2], dtype=np.int32), norm_type) def test_norm_fails_for_array_and_scalar(self): for norm_type in norm_type_under_test: with self.assertRaises(Exception, msg='Exception is not thrown for {0}'.format( norm_name[norm_type] )): cv.norm(np.array([1, 2], dtype=np.uint8), 123, norm_type) def test_norm_fails_for_scalar_and_array(self): for norm_type in norm_type_under_test: with self.assertRaises(Exception, msg='Exception is not thrown for {0}'.format( norm_name[norm_type] )): cv.norm(4, np.array([1, 2], dtype=np.uint8), norm_type) def test_norm_fails_for_array_and_norm_type_as_scalar(self): for norm_type in norm_type_under_test: with self.assertRaises(Exception, msg='Exception is not thrown for {0}'.format( norm_name[norm_type] )): cv.norm(np.array([3, 4, 5], dtype=np.uint8), norm_type, normType=norm_type) if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python ''' MSER detector test ''' # Python 2/3 compatibility from __future__ import print_function import numpy as np import cv2 as cv from tests_common import NewOpenCVTests class mser_test(NewOpenCVTests): def test_mser(self): img = self.get_sample('cv/mser/puzzle.png', 0) smallImg = [ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255, 255], [255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255, 255], [255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255, 255], [255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255] ] thresharr = [ 0, 70, 120, 180, 255 ] kDelta = 5 mserExtractor = cv.MSER_create() mserExtractor.setDelta(kDelta) np.random.seed(10) for _i in range(100): use_big_image = int(np.random.rand(1,1)*7) != 0 invert = int(np.random.rand(1,1)*2) != 0 binarize = int(np.random.rand(1,1)*5) != 0 if use_big_image else False blur = int(np.random.rand(1,1)*2) != 0 thresh = thresharr[int(np.random.rand(1,1)*5)] src0 = img if use_big_image else np.array(smallImg).astype('uint8') src = src0.copy() kMinArea = 256 if use_big_image else 10 kMaxArea = int(src.shape[0]*src.shape[1]/4) mserExtractor.setMinArea(kMinArea) mserExtractor.setMaxArea(kMaxArea) if invert: cv.bitwise_not(src, src) if binarize: _, src = cv.threshold(src, thresh, 255, cv.THRESH_BINARY) if blur: src = cv.GaussianBlur(src, (5, 5), 1.5, 1.5) minRegs = 7 if use_big_image else 2 maxRegs = 1000 if use_big_image else 20 if binarize and (thresh == 0 or thresh == 255): minRegs = maxRegs = 0 msers, boxes = mserExtractor.detectRegions(src) nmsers = len(msers) self.assertEqual(nmsers, len(boxes)) self.assertLessEqual(minRegs, nmsers) self.assertGreaterEqual(maxRegs, nmsers) if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python ''' Location of tests: - <opencv_src>/modules/python/test - <opencv_src>/modules/<module>/misc/python/test/ ''' from __future__ import print_function import sys sys.dont_write_bytecode = True # Don't generate .pyc files / __pycache__ directories import os import unittest # Python 3 moved urlopen to urllib.requests try: from urllib.request import urlopen except ImportError: from urllib import urlopen from tests_common import NewOpenCVTests basedir = os.path.abspath(os.path.dirname(__file__)) def load_tests(loader, tests, pattern): cwd = os.getcwd() config_file = 'opencv_python_tests.cfg' locations = [cwd, basedir] if os.path.exists(config_file): with open(config_file, 'r') as f: locations += [str(s).strip() for s in f.readlines()] else: print('WARNING: OpenCV tests config file ({}) is missing, running subset of tests'.format(config_file)) tests_pattern = os.environ.get('OPENCV_PYTEST_FILTER', 'test_') + '*.py' if tests_pattern != 'test_*py': print('Tests filter: {}'.format(tests_pattern)) processed = set() for l in locations: if not os.path.isabs(l): l = os.path.normpath(os.path.join(cwd, l)) if l in processed: continue processed.add(l) print('Discovering python tests from: {}'.format(l)) sys_path_modify = l not in sys.path if sys_path_modify: sys.path.append(l) # Hack python loader discovered_tests = loader.discover(l, pattern=tests_pattern, top_level_dir=l) print(' found {} tests'.format(discovered_tests.countTestCases())) tests.addTests(loader.discover(l, pattern=tests_pattern)) if sys_path_modify: sys.path.remove(l) return tests if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python ''' Simple "Square Detector" program. Loads several images sequentially and tries to find squares in each image. ''' # Python 2/3 compatibility import sys PY3 = sys.version_info[0] == 3 if PY3: xrange = range import numpy as np import cv2 as cv def angle_cos(p0, p1, p2): d1, d2 = (p0-p1).astype('float'), (p2-p1).astype('float') return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) ) def find_squares(img): img = cv.GaussianBlur(img, (5, 5), 0) squares = [] for gray in cv.split(img): for thrs in xrange(0, 255, 26): if thrs == 0: bin = cv.Canny(gray, 0, 50, apertureSize=5) bin = cv.dilate(bin, None) else: _retval, bin = cv.threshold(gray, thrs, 255, cv.THRESH_BINARY) contours, _hierarchy = cv.findContours(bin, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) for cnt in contours: cnt_len = cv.arcLength(cnt, True) cnt = cv.approxPolyDP(cnt, 0.02*cnt_len, True) if len(cnt) == 4 and cv.contourArea(cnt) > 1000 and cv.isContourConvex(cnt): cnt = cnt.reshape(-1, 2) max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)]) if max_cos < 0.1 and filterSquares(squares, cnt): squares.append(cnt) return squares def intersectionRate(s1, s2): area, _intersection = cv.intersectConvexConvex(np.array(s1), np.array(s2)) return 2 * area / (cv.contourArea(np.array(s1)) + cv.contourArea(np.array(s2))) def filterSquares(squares, square): for i in range(len(squares)): if intersectionRate(squares[i], square) > 0.95: return False return True from tests_common import NewOpenCVTests class squares_test(NewOpenCVTests): def test_squares(self): img = self.get_sample('samples/data/pic1.png') squares = find_squares(img) testSquares = [ [[43, 25], [43, 129], [232, 129], [232, 25]], [[252, 87], [324, 40], [387, 137], [315, 184]], [[154, 178], [196, 180], [198, 278], [154, 278]], [[0, 0], [400, 0], [400, 300], [0, 300]] ] matches_counter = 0 for i in range(len(squares)): for j in range(len(testSquares)): if intersectionRate(squares[i], testSquares[j]) > 0.9: matches_counter += 1 self.assertGreater(matches_counter / len(testSquares), 0.9) self.assertLess( (len(squares) - matches_counter) / len(squares), 0.2) if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python ''' CUDA-accelerated Computer Vision functions ''' # Python 2/3 compatibility from __future__ import print_function import numpy as np import cv2 as cv import os from tests_common import NewOpenCVTests, unittest class cuda_test(NewOpenCVTests): def setUp(self): super(cuda_test, self).setUp() if not cv.cuda.getCudaEnabledDeviceCount(): self.skipTest("No CUDA-capable device is detected") def test_cuda_upload_download(self): npMat = (np.random.random((128, 128, 3)) * 255).astype(np.uint8) cuMat = cv.cuda_GpuMat() cuMat.upload(npMat) self.assertTrue(np.allclose(cuMat.download(), npMat)) if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python from __future__ import print_function import numpy as np import cv2 as cv from tests_common import NewOpenCVTests class UMat(NewOpenCVTests): def test_umat_construct(self): data = np.random.random([512, 512]) # UMat constructors data_um = cv.UMat(data) # from ndarray data_sub_um = cv.UMat(data_um, (128, 256), (128, 256)) # from UMat data_dst_um = cv.UMat(128, 128, cv.CV_64F) # from size/type # test continuous and submatrix flags assert data_um.isContinuous() and not data_um.isSubmatrix() assert not data_sub_um.isContinuous() and data_sub_um.isSubmatrix() # test operation on submatrix cv.multiply(data_sub_um, 2., dst=data_dst_um) assert np.allclose(2. * data[128:256, 128:256], data_dst_um.get()) def test_umat_handle(self): a_um = cv.UMat(256, 256, cv.CV_32F) _ctx_handle = cv.UMat.context() # obtain context handle _queue_handle = cv.UMat.queue() # obtain queue handle _a_handle = a_um.handle(cv.ACCESS_READ) # obtain buffer handle _offset = a_um.offset # obtain buffer offset def test_umat_matching(self): img1 = self.get_sample("samples/data/right01.jpg") img2 = self.get_sample("samples/data/right02.jpg") orb = cv.ORB_create() img1, img2 = cv.UMat(img1), cv.UMat(img2) ps1, descs_umat1 = orb.detectAndCompute(img1, None) ps2, descs_umat2 = orb.detectAndCompute(img2, None) self.assertIsInstance(descs_umat1, cv.UMat) self.assertIsInstance(descs_umat2, cv.UMat) self.assertGreater(len(ps1), 0) self.assertGreater(len(ps2), 0) bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True) res_umats = bf.match(descs_umat1, descs_umat2) res = bf.match(descs_umat1.get(), descs_umat2.get()) self.assertGreater(len(res), 0) self.assertEqual(len(res_umats), len(res)) def test_umat_optical_flow(self): img1 = self.get_sample("samples/data/right01.jpg", cv.IMREAD_GRAYSCALE) img2 = self.get_sample("samples/data/right02.jpg", cv.IMREAD_GRAYSCALE) # Note, that if you want to see performance boost by OCL implementation - you need enough data # For example you can increase maxCorners param to 10000 and increase img1 and img2 in such way: # img = np.hstack([np.vstack([img] * 6)] * 6) feature_params = dict(maxCorners=239, qualityLevel=0.3, minDistance=7, blockSize=7) p0 = cv.goodFeaturesToTrack(img1, mask=None, **feature_params) p0_umat = cv.goodFeaturesToTrack(cv.UMat(img1), mask=None, **feature_params) self.assertEqual(p0_umat.get().shape, p0.shape) p0 = np.array(sorted(p0, key=lambda p: tuple(p[0]))) p0_umat = cv.UMat(np.array(sorted(p0_umat.get(), key=lambda p: tuple(p[0])))) self.assertTrue(np.allclose(p0_umat.get(), p0)) _p1_mask_err = cv.calcOpticalFlowPyrLK(img1, img2, p0, None) _p1_mask_err_umat0 = list(map(lambda umat: umat.get(), cv.calcOpticalFlowPyrLK(img1, img2, p0_umat, None))) _p1_mask_err_umat1 = list(map(lambda umat: umat.get(), cv.calcOpticalFlowPyrLK(cv.UMat(img1), img2, p0_umat, None))) _p1_mask_err_umat2 = list(map(lambda umat: umat.get(), cv.calcOpticalFlowPyrLK(img1, cv.UMat(img2), p0_umat, None))) for _p1_mask_err_umat in [_p1_mask_err_umat0, _p1_mask_err_umat1, _p1_mask_err_umat2]: for data, data_umat in zip(_p1_mask_err, _p1_mask_err_umat): self.assertEqual(data.shape, data_umat.shape) self.assertEqual(data.dtype, data_umat.dtype) for _p1_mask_err_umat in [_p1_mask_err_umat1, _p1_mask_err_umat2]: for data_umat0, data_umat in zip(_p1_mask_err_umat0[:2], _p1_mask_err_umat[:2]): self.assertTrue(np.allclose(data_umat0, data_umat)) if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python from __future__ import print_function import numpy as np import cv2 as cv from tests_common import NewOpenCVTests class Hackathon244Tests(NewOpenCVTests): def test_int_array(self): a = np.array([-1, 2, -3, 4, -5]) absa0 = np.abs(a) self.assertTrue(cv.norm(a, cv.NORM_L1) == 15) absa1 = cv.absdiff(a, 0) self.assertEqual(cv.norm(absa1, absa0, cv.NORM_INF), 0) def test_imencode(self): a = np.zeros((480, 640), dtype=np.uint8) flag, ajpg = cv.imencode("img_q90.jpg", a, [cv.IMWRITE_JPEG_QUALITY, 90]) self.assertEqual(flag, True) self.assertEqual(ajpg.dtype, np.uint8) self.assertGreater(ajpg.shape[0], 1) self.assertEqual(ajpg.shape[1], 1) def test_projectPoints(self): objpt = np.float64([[1,2,3]]) imgpt0, jac0 = cv.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), np.float64([])) imgpt1, jac1 = cv.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), None) self.assertEqual(imgpt0.shape, (objpt.shape[0], 1, 2)) self.assertEqual(imgpt1.shape, imgpt0.shape) self.assertEqual(jac0.shape, jac1.shape) self.assertEqual(jac0.shape[0], 2*objpt.shape[0]) def test_estimateAffine3D(self): pattern_size = (11, 8) pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32) pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2) pattern_points *= 10 (retval, out, inliers) = cv.estimateAffine3D(pattern_points, pattern_points) self.assertEqual(retval, 1) if cv.norm(out[2,:]) < 1e-3: out[2,2]=1 self.assertLess(cv.norm(out, np.float64([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])), 1e-3) self.assertEqual(cv.countNonZero(inliers), pattern_size[0]*pattern_size[1]) def test_fast(self): fd = cv.FastFeatureDetector_create(30, True) img = self.get_sample("samples/data/right02.jpg", 0) img = cv.medianBlur(img, 3) keypoints = fd.detect(img) self.assertTrue(600 <= len(keypoints) <= 700) for kpt in keypoints: self.assertNotEqual(kpt.response, 0) def check_close_angles(self, a, b, angle_delta): self.assertTrue(abs(a - b) <= angle_delta or abs(360 - abs(a - b)) <= angle_delta) def check_close_pairs(self, a, b, delta): self.assertLessEqual(abs(a[0] - b[0]), delta) self.assertLessEqual(abs(a[1] - b[1]), delta) def check_close_boxes(self, a, b, delta, angle_delta): self.check_close_pairs(a[0], b[0], delta) self.check_close_pairs(a[1], b[1], delta) self.check_close_angles(a[2], b[2], angle_delta) def test_geometry(self): npt = 100 np.random.seed(244) a = np.random.randn(npt,2).astype('float32')*50 + 150 be = cv.fitEllipse(a) br = cv.minAreaRect(a) mc, mr = cv.minEnclosingCircle(a) be0 = ((150.2511749267578, 150.77322387695312), (158.024658203125, 197.57696533203125), 37.57804489135742) br0 = ((161.2974090576172, 154.41793823242188), (199.2301483154297, 207.7177734375), -9.164555549621582) mc0, mr0 = (160.41790771484375, 144.55152893066406), 136.713500977 self.check_close_boxes(be, be0, 5, 15) self.check_close_boxes(br, br0, 5, 15) self.check_close_pairs(mc, mc0, 5) self.assertLessEqual(abs(mr - mr0), 5) if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python from __future__ import print_function import os import sys import unittest import hashlib import random import argparse import numpy as np import cv2 as cv # Python 3 moved urlopen to urllib.requests try: from urllib.request import urlopen except ImportError: from urllib import urlopen class NewOpenCVTests(unittest.TestCase): # path to local repository folder containing 'samples' folder repoPath = None extraTestDataPath = None # github repository url repoUrl = 'https://raw.github.com/opencv/opencv/master' def find_file(self, filename, searchPaths=[], required=True): searchPaths = searchPaths if searchPaths else [self.repoPath, self.extraTestDataPath] for path in searchPaths: if path is not None: candidate = path + '/' + filename if os.path.isfile(candidate): return candidate if required: self.fail('File ' + filename + ' not found') return None def get_sample(self, filename, iscolor = None): if iscolor is None: iscolor = cv.IMREAD_COLOR if not filename in self.image_cache: filepath = self.find_file(filename) with open(filepath, 'rb') as f: filedata = f.read() self.image_cache[filename] = cv.imdecode(np.fromstring(filedata, dtype=np.uint8), iscolor) return self.image_cache[filename] def setUp(self): cv.setRNGSeed(10) self.image_cache = {} def hashimg(self, im): """ Compute a hash for an image, useful for image comparisons """ return hashlib.md5(im.tostring()).hexdigest() if sys.version_info[:2] == (2, 6): def assertLess(self, a, b, msg=None): if not a < b: self.fail('%s not less than %s' % (repr(a), repr(b))) def assertLessEqual(self, a, b, msg=None): if not a <= b: self.fail('%s not less than or equal to %s' % (repr(a), repr(b))) def assertGreater(self, a, b, msg=None): if not a > b: self.fail('%s not greater than %s' % (repr(a), repr(b))) @staticmethod def bootstrap(): parser = argparse.ArgumentParser(description='run OpenCV python tests') parser.add_argument('--repo', help='use sample image files from local git repository (path to folder), ' 'if not set, samples will be downloaded from github.com') parser.add_argument('--data', help='<not used> use data files from local folder (path to folder), ' 'if not set, data files will be downloaded from docs.opencv.org') args, other = parser.parse_known_args() print("Testing OpenCV", cv.__version__) print("Local repo path:", args.repo) NewOpenCVTests.repoPath = args.repo try: NewOpenCVTests.extraTestDataPath = os.environ['OPENCV_TEST_DATA_PATH'] except KeyError: print('Missing opencv extra repository. Some of tests may fail.') random.seed(0) unit_argv = [sys.argv[0]] + other unittest.main(argv=unit_argv) def intersectionRate(s1, s2): x1, y1, x2, y2 = s1 s1 = np.array([[x1, y1], [x2,y1], [x2, y2], [x1, y2]]) x1, y1, x2, y2 = s2 s2 = np.array([[x1, y1], [x2,y1], [x2, y2], [x1, y2]]) area, _intersection = cv.intersectConvexConvex(s1, s2) return 2 * area / (cv.contourArea(s1) + cv.contourArea(s2)) def isPointInRect(p, rect): if rect[0] <= p[0] and rect[1] <=p[1] and p[0] <= rect[2] and p[1] <= rect[3]: return True else: return False
#!/usr/bin/env python # Python 2/3 compatibility from __future__ import print_function import numpy as np from numpy import pi, sin, cos import cv2 as cv defaultSize = 512 class TestSceneRender(): def __init__(self, bgImg = None, fgImg = None, deformation = False, noise = 0.0, speed = 0.25, **params): self.time = 0.0 self.timeStep = 1.0 / 30.0 self.foreground = fgImg self.deformation = deformation self.noise = noise self.speed = speed if bgImg is not None: self.sceneBg = bgImg.copy() else: self.sceneBg = np.zeros(defaultSize, defaultSize, np.uint8) self.w = self.sceneBg.shape[0] self.h = self.sceneBg.shape[1] if fgImg is not None: self.foreground = fgImg.copy() self.center = self.currentCenter = (int(self.w/2 - fgImg.shape[0]/2), int(self.h/2 - fgImg.shape[1]/2)) self.xAmpl = self.sceneBg.shape[0] - (self.center[0] + fgImg.shape[0]) self.yAmpl = self.sceneBg.shape[1] - (self.center[1] + fgImg.shape[1]) self.initialRect = np.array([ (self.h/2, self.w/2), (self.h/2, self.w/2 + self.w/10), (self.h/2 + self.h/10, self.w/2 + self.w/10), (self.h/2 + self.h/10, self.w/2)]).astype(int) self.currentRect = self.initialRect np.random.seed(10) def getXOffset(self, time): return int(self.xAmpl*cos(time*self.speed)) def getYOffset(self, time): return int(self.yAmpl*sin(time*self.speed)) def setInitialRect(self, rect): self.initialRect = rect def getRectInTime(self, time): if self.foreground is not None: tmp = np.array(self.center) + np.array((self.getXOffset(time), self.getYOffset(time))) x0, y0 = tmp x1, y1 = tmp + self.foreground.shape[0:2] return np.array([y0, x0, y1, x1]) else: x0, y0 = self.initialRect[0] + np.array((self.getXOffset(time), self.getYOffset(time))) x1, y1 = self.initialRect[2] + np.array((self.getXOffset(time), self.getYOffset(time))) return np.array([y0, x0, y1, x1]) def getCurrentRect(self): if self.foreground is not None: x0 = self.currentCenter[0] y0 = self.currentCenter[1] x1 = self.currentCenter[0] + self.foreground.shape[0] y1 = self.currentCenter[1] + self.foreground.shape[1] return np.array([y0, x0, y1, x1]) else: x0, y0 = self.currentRect[0] x1, y1 = self.currentRect[2] return np.array([x0, y0, x1, y1]) def getNextFrame(self): img = self.sceneBg.copy() if self.foreground is not None: self.currentCenter = (self.center[0] + self.getXOffset(self.time), self.center[1] + self.getYOffset(self.time)) img[self.currentCenter[0]:self.currentCenter[0]+self.foreground.shape[0], self.currentCenter[1]:self.currentCenter[1]+self.foreground.shape[1]] = self.foreground else: self.currentRect = self.initialRect + np.int( 30*cos(self.time) + 50*sin(self.time/3)) if self.deformation: self.currentRect[1:3] += int(self.h/20*cos(self.time)) cv.fillConvexPoly(img, self.currentRect, (0, 0, 255)) self.time += self.timeStep if self.noise: noise = np.zeros(self.sceneBg.shape, np.int8) cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise) img = cv.add(img, noise, dtype=cv.CV_8UC3) return img def resetTime(self): self.time = 0.0 if __name__ == '__main__': backGr = cv.imread('../../../samples/data/lena.jpg') render = TestSceneRender(backGr, noise = 0.5) while True: img = render.getNextFrame() cv.imshow('img', img) ch = cv.waitKey(3) if ch == 27: break cv.destroyAllWindows()
#!/usr/bin/env python from __future__ import print_function import numpy as np import cv2 as cv from tests_common import NewOpenCVTests class Features2D_Tests(NewOpenCVTests): def test_issue_13406(self): self.assertEqual(True, hasattr(cv, 'drawKeypoints')) self.assertEqual(True, hasattr(cv, 'DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS')) self.assertEqual(True, hasattr(cv, 'DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS')) if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python ''' =============================================================================== Interactive Image Segmentation using GrabCut algorithm. =============================================================================== ''' # Python 2/3 compatibility from __future__ import print_function import numpy as np import cv2 as cv import sys from tests_common import NewOpenCVTests class grabcut_test(NewOpenCVTests): def verify(self, mask, exp): maxDiffRatio = 0.02 expArea = np.count_nonzero(exp) nonIntersectArea = np.count_nonzero(mask != exp) curRatio = float(nonIntersectArea) / expArea return curRatio < maxDiffRatio def scaleMask(self, mask): return np.where((mask==cv.GC_FGD) + (mask==cv.GC_PR_FGD),255,0).astype('uint8') def test_grabcut(self): img = self.get_sample('cv/shared/airplane.png') mask_prob = self.get_sample("cv/grabcut/mask_probpy.png", 0) exp_mask1 = self.get_sample("cv/grabcut/exp_mask1py.png", 0) exp_mask2 = self.get_sample("cv/grabcut/exp_mask2py.png", 0) if img is None: self.assertTrue(False, 'Missing test data') rect = (24, 126, 459, 168) mask = np.zeros(img.shape[:2], dtype = np.uint8) bgdModel = np.zeros((1,65),np.float64) fgdModel = np.zeros((1,65),np.float64) cv.grabCut(img, mask, rect, bgdModel, fgdModel, 0, cv.GC_INIT_WITH_RECT) cv.grabCut(img, mask, rect, bgdModel, fgdModel, 2, cv.GC_EVAL) if mask_prob is None: mask_prob = mask.copy() cv.imwrite(self.extraTestDataPath + '/cv/grabcut/mask_probpy.png', mask_prob) if exp_mask1 is None: exp_mask1 = self.scaleMask(mask) cv.imwrite(self.extraTestDataPath + '/cv/grabcut/exp_mask1py.png', exp_mask1) self.assertEqual(self.verify(self.scaleMask(mask), exp_mask1), True) mask = mask_prob bgdModel = np.zeros((1,65),np.float64) fgdModel = np.zeros((1,65),np.float64) cv.grabCut(img, mask, rect, bgdModel, fgdModel, 0, cv.GC_INIT_WITH_MASK) cv.grabCut(img, mask, rect, bgdModel, fgdModel, 1, cv.GC_EVAL) if exp_mask2 is None: exp_mask2 = self.scaleMask(mask) cv.imwrite(self.extraTestDataPath + '/cv/grabcut/exp_mask2py.png', exp_mask2) self.assertEqual(self.verify(self.scaleMask(mask), exp_mask2), True) if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/python ''' This example illustrates how to use cv.HoughCircles() function. ''' # Python 2/3 compatibility from __future__ import print_function import cv2 as cv import numpy as np import sys from numpy import pi, sin, cos from tests_common import NewOpenCVTests def circleApproximation(circle): nPoints = 30 dPhi = 2*pi / nPoints contour = [] for i in range(nPoints): contour.append(([circle[0] + circle[2]*cos(i*dPhi), circle[1] + circle[2]*sin(i*dPhi)])) return np.array(contour).astype(int) def convContoursIntersectiponRate(c1, c2): s1 = cv.contourArea(c1) s2 = cv.contourArea(c2) s, _ = cv.intersectConvexConvex(c1, c2) return 2*s/(s1+s2) class houghcircles_test(NewOpenCVTests): def test_houghcircles(self): fn = "samples/data/board.jpg" src = self.get_sample(fn, 1) img = cv.cvtColor(src, cv.COLOR_BGR2GRAY) img = cv.medianBlur(img, 5) circles = cv.HoughCircles(img, cv.HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30)[0] testCircles = [[38, 181, 17.6], [99.7, 166, 13.12], [142.7, 160, 13.52], [223.6, 110, 8.62], [79.1, 206.7, 8.62], [47.5, 351.6, 11.64], [189.5, 354.4, 11.64], [189.8, 298.9, 10.64], [189.5, 252.4, 14.62], [252.5, 393.4, 15.62], [602.9, 467.5, 11.42], [222, 210.4, 9.12], [263.1, 216.7, 9.12], [359.8, 222.6, 9.12], [518.9, 120.9, 9.12], [413.8, 113.4, 9.12], [489, 127.2, 9.12], [448.4, 121.3, 9.12], [384.6, 128.9, 8.62]] matches_counter = 0 for i in range(len(testCircles)): for j in range(len(circles)): tstCircle = circleApproximation(testCircles[i]) circle = circleApproximation(circles[j]) if convContoursIntersectiponRate(tstCircle, circle) > 0.6: matches_counter += 1 self.assertGreater(float(matches_counter) / len(testCircles), .5) self.assertLess(float(len(circles) - matches_counter) / len(circles), .75) if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python ''' Texture flow direction estimation. Sample shows how cv.cornerEigenValsAndVecs function can be used to estimate image texture flow direction. ''' # Python 2/3 compatibility from __future__ import print_function import numpy as np import cv2 as cv import sys from tests_common import NewOpenCVTests class texture_flow_test(NewOpenCVTests): def test_texture_flow(self): img = self.get_sample('samples/data/chessboard.png') gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) h, w = img.shape[:2] eigen = cv.cornerEigenValsAndVecs(gray, 5, 3) eigen = eigen.reshape(h, w, 3, 2) # [[e1, e2], v1, v2] flow = eigen[:,:,2] d = 300 eps = d / 30 points = np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2) textureVectors = [] for x, y in np.int32(points): textureVectors.append(np.int32(flow[y, x]*d)) for i in range(len(textureVectors)): self.assertTrue(cv.norm(textureVectors[i], cv.NORM_L2) < eps or abs(cv.norm(textureVectors[i], cv.NORM_L2) - d) < eps) if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python ''' K-means clusterization test ''' # Python 2/3 compatibility from __future__ import print_function import numpy as np import cv2 as cv from numpy import random import sys PY3 = sys.version_info[0] == 3 if PY3: xrange = range from tests_common import NewOpenCVTests def make_gaussians(cluster_n, img_size): points = [] ref_distrs = [] sizes = [] for _ in xrange(cluster_n): mean = (0.1 + 0.8*random.rand(2)) * img_size a = (random.rand(2, 2)-0.5)*img_size*0.1 cov = np.dot(a.T, a) + img_size*0.05*np.eye(2) n = 100 + random.randint(900) pts = random.multivariate_normal(mean, cov, n) points.append( pts ) ref_distrs.append( (mean, cov) ) sizes.append(n) points = np.float32( np.vstack(points) ) return points, ref_distrs, sizes def getMainLabelConfidence(labels, nLabels): n = len(labels) labelsDict = dict.fromkeys(range(nLabels), 0) labelsConfDict = dict.fromkeys(range(nLabels)) for i in range(n): labelsDict[labels[i][0]] += 1 for i in range(nLabels): labelsConfDict[i] = float(labelsDict[i]) / n return max(labelsConfDict.values()) class kmeans_test(NewOpenCVTests): def test_kmeans(self): np.random.seed(10) cluster_n = 5 img_size = 512 points, _, clusterSizes = make_gaussians(cluster_n, img_size) term_crit = (cv.TERM_CRITERIA_EPS, 30, 0.1) _ret, labels, centers = cv.kmeans(points, cluster_n, None, term_crit, 10, 0) self.assertEqual(len(centers), cluster_n) offset = 0 for i in range(cluster_n): confidence = getMainLabelConfidence(labels[offset : (offset + clusterSizes[i])], cluster_n) offset += clusterSizes[i] self.assertGreater(confidence, 0.9) if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python # Python 2/3 compatibility from __future__ import print_function import sys PY3 = sys.version_info[0] == 3 if PY3: xrange = range import numpy as np from numpy import random import cv2 as cv def make_gaussians(cluster_n, img_size): points = [] ref_distrs = [] for _ in xrange(cluster_n): mean = (0.1 + 0.8*random.rand(2)) * img_size a = (random.rand(2, 2)-0.5)*img_size*0.1 cov = np.dot(a.T, a) + img_size*0.05*np.eye(2) n = 100 + random.randint(900) pts = random.multivariate_normal(mean, cov, n) points.append( pts ) ref_distrs.append( (mean, cov) ) points = np.float32( np.vstack(points) ) return points, ref_distrs from tests_common import NewOpenCVTests class gaussian_mix_test(NewOpenCVTests): def test_gaussian_mix(self): np.random.seed(10) cluster_n = 5 img_size = 512 points, ref_distrs = make_gaussians(cluster_n, img_size) em = cv.ml.EM_create() em.setClustersNumber(cluster_n) em.setCovarianceMatrixType(cv.ml.EM_COV_MAT_GENERIC) em.trainEM(points) means = em.getMeans() covs = em.getCovs() # Known bug: https://github.com/opencv/opencv/pull/4232 #found_distrs = zip(means, covs) matches_count = 0 meanEps = 0.05 covEps = 0.1 for i in range(cluster_n): for j in range(cluster_n): if (cv.norm(means[i] - ref_distrs[j][0], cv.NORM_L2) / cv.norm(ref_distrs[j][0], cv.NORM_L2) < meanEps and cv.norm(covs[i] - ref_distrs[j][1], cv.NORM_L2) / cv.norm(ref_distrs[j][1], cv.NORM_L2) < covEps): matches_count += 1 self.assertEqual(matches_count, cluster_n) if __name__ == '__main__': NewOpenCVTests.bootstrap()
#!/usr/bin/env python from __future__ import print_function import os, sys, re, string, io # the list only for debugging. The real list, used in the real OpenCV build, is specified in CMakeLists.txt opencv_hdr_list = [ "../../core/include/opencv2/core.hpp", "../../core/include/opencv2/core/mat.hpp", "../../core/include/opencv2/core/ocl.hpp", "../../flann/include/opencv2/flann/miniflann.hpp", "../../ml/include/opencv2/ml.hpp", "../../imgproc/include/opencv2/imgproc.hpp", "../../calib3d/include/opencv2/calib3d.hpp", "../../features2d/include/opencv2/features2d.hpp", "../../video/include/opencv2/video/tracking.hpp", "../../video/include/opencv2/video/background_segm.hpp", "../../objdetect/include/opencv2/objdetect.hpp", "../../imgcodecs/include/opencv2/imgcodecs.hpp", "../../videoio/include/opencv2/videoio.hpp", "../../highgui/include/opencv2/highgui.hpp", ] """ Each declaration is [funcname, return_value_type /* in C, not in Python */, <list_of_modifiers>, <list_of_arguments>, original_return_type, docstring], where each element of <list_of_arguments> is 4-element list itself: [argtype, argname, default_value /* or "" if none */, <list_of_modifiers>] where the list of modifiers is yet another nested list of strings (currently recognized are "/O" for output argument, "/S" for static (i.e. class) methods and "/A value" for the plain C arrays with counters) original_return_type is None if the original_return_type is the same as return_value_type """ class CppHeaderParser(object): def __init__(self, generate_umat_decls=False, generate_gpumat_decls=False): self._generate_umat_decls = generate_umat_decls self._generate_gpumat_decls = generate_gpumat_decls self.BLOCK_TYPE = 0 self.BLOCK_NAME = 1 self.PROCESS_FLAG = 2 self.PUBLIC_SECTION = 3 self.CLASS_DECL = 4 self.namespaces = set() def batch_replace(self, s, pairs): for before, after in pairs: s = s.replace(before, after) return s def get_macro_arg(self, arg_str, npos): npos2 = npos3 = arg_str.find("(", npos) if npos2 < 0: print("Error: no arguments for the macro at %d" % (self.lineno,)) sys.exit(-1) balance = 1 while 1: t, npos3 = self.find_next_token(arg_str, ['(', ')'], npos3+1) if npos3 < 0: print("Error: no matching ')' in the macro call at %d" % (self.lineno,)) sys.exit(-1) if t == '(': balance += 1 if t == ')': balance -= 1 if balance == 0: break return arg_str[npos2+1:npos3].strip(), npos3 def parse_arg(self, arg_str, argno): """ Parses <arg_type> [arg_name] Returns arg_type, arg_name, modlist, argno, where modlist is the list of wrapper-related modifiers (such as "output argument", "has counter", ...) and argno is the new index of an anonymous argument. That is, if no arg_str is just an argument type without argument name, the argument name is set to "arg" + str(argno), and then argno is incremented. """ modlist = [] # pass 0: extracts the modifiers if "CV_OUT" in arg_str: modlist.append("/O") arg_str = arg_str.replace("CV_OUT", "") if "CV_IN_OUT" in arg_str: modlist.append("/IO") arg_str = arg_str.replace("CV_IN_OUT", "") isarray = False npos = arg_str.find("CV_CARRAY") if npos >= 0: isarray = True macro_arg, npos3 = self.get_macro_arg(arg_str, npos) modlist.append("/A " + macro_arg) arg_str = arg_str[:npos] + arg_str[npos3+1:] npos = arg_str.find("CV_CUSTOM_CARRAY") if npos >= 0: isarray = True macro_arg, npos3 = self.get_macro_arg(arg_str, npos) modlist.append("/CA " + macro_arg) arg_str = arg_str[:npos] + arg_str[npos3+1:] npos = arg_str.find("const") if npos >= 0: modlist.append("/C") npos = arg_str.find("&") if npos >= 0: modlist.append("/Ref") arg_str = arg_str.strip() word_start = 0 word_list = [] npos = -1 #print self.lineno, ":\t", arg_str # pass 1: split argument type into tokens while 1: npos += 1 t, npos = self.find_next_token(arg_str, [" ", "&", "*", "<", ">", ","], npos) w = arg_str[word_start:npos].strip() if w == "operator": word_list.append("operator " + arg_str[npos:].strip()) break if w not in ["", "const"]: word_list.append(w) if t not in ["", " ", "&"]: word_list.append(t) if not t: break word_start = npos+1 npos = word_start - 1 arg_type = "" arg_name = "" angle_stack = [] #print self.lineno, ":\t", word_list # pass 2: decrypt the list wi = -1 prev_w = "" for w in word_list: wi += 1 if w == "*": if prev_w == "char" and not isarray: arg_type = arg_type[:-len("char")] + "c_string" else: arg_type += w continue elif w == "<": arg_type += "_" angle_stack.append(0) elif w == "," or w == '>': if not angle_stack: print("Error at %d: argument contains ',' or '>' not within template arguments" % (self.lineno,)) sys.exit(-1) if w == ",": arg_type += "_and_" elif w == ">": if angle_stack[0] == 0: print("Error at %s:%d: template has no arguments" % (self.hname, self.lineno)) sys.exit(-1) if angle_stack[0] > 1: arg_type += "_end_" angle_stack[-1:] = [] elif angle_stack: arg_type += w angle_stack[-1] += 1 elif arg_type == "struct": arg_type += " " + w elif arg_type and arg_type != "~": arg_name = " ".join(word_list[wi:]) break else: arg_type += w prev_w = w counter_str = "" add_star = False if ("[" in arg_name) and not ("operator" in arg_str): #print arg_str p1 = arg_name.find("[") p2 = arg_name.find("]",p1+1) if p2 < 0: print("Error at %d: no closing ]" % (self.lineno,)) sys.exit(-1) counter_str = arg_name[p1+1:p2].strip() if counter_str == "": counter_str = "?" if not isarray: modlist.append("/A " + counter_str.strip()) arg_name = arg_name[:p1] add_star = True if not arg_name: if arg_type.startswith("operator"): arg_type, arg_name = "", arg_type else: arg_name = "arg" + str(argno) argno += 1 while arg_type.endswith("_end_"): arg_type = arg_type[:-len("_end_")] if add_star: arg_type += "*" arg_type = self.batch_replace(arg_type, [("std::", ""), ("cv::", ""), ("::", "_")]) return arg_type, arg_name, modlist, argno def parse_enum(self, decl_str): l = decl_str ll = l.split(",") if ll[-1].strip() == "": ll = ll[:-1] prev_val = "" prev_val_delta = -1 decl = [] for pair in ll: pv = pair.split("=") if len(pv) == 1: prev_val_delta += 1 val = "" if prev_val: val = prev_val + "+" val += str(prev_val_delta) else: prev_val_delta = 0 prev_val = val = pv[1].strip() decl.append(["const " + self.get_dotted_name(pv[0].strip()), val, [], [], None, ""]) return decl def parse_class_decl(self, decl_str): """ Parses class/struct declaration start in the form: {class|struct} [CV_EXPORTS] <class_name> [: public <base_class1> [, ...]] Returns class_name1, <list of base_classes> """ l = decl_str modlist = [] if "CV_EXPORTS_W_MAP" in l: l = l.replace("CV_EXPORTS_W_MAP", "") modlist.append("/Map") if "CV_EXPORTS_W_SIMPLE" in l: l = l.replace("CV_EXPORTS_W_SIMPLE", "") modlist.append("/Simple") npos = l.find("CV_EXPORTS_AS") if npos >= 0: macro_arg, npos3 = self.get_macro_arg(l, npos) modlist.append("=" + macro_arg) l = l[:npos] + l[npos3+1:] l = self.batch_replace(l, [("CV_EXPORTS_W", ""), ("CV_EXPORTS", ""), ("public virtual ", " "), ("public ", " "), ("::", ".")]).strip() ll = re.split(r'\s+|\s*[,:]\s*', l) ll = [le for le in ll if le] classname = ll[1] bases = ll[2:] return classname, bases, modlist def parse_func_decl_no_wrap(self, decl_str, static_method=False, docstring=""): decl_str = (decl_str or "").strip() virtual_method = False explicit_method = False if decl_str.startswith("explicit"): decl_str = decl_str[len("explicit"):].lstrip() explicit_method = True if decl_str.startswith("virtual"): decl_str = decl_str[len("virtual"):].lstrip() virtual_method = True if decl_str.startswith("static"): decl_str = decl_str[len("static"):].lstrip() static_method = True fdecl = decl_str.replace("CV_OUT", "").replace("CV_IN_OUT", "") fdecl = fdecl.strip().replace("\t", " ") while " " in fdecl: fdecl = fdecl.replace(" ", " ") fname = fdecl[:fdecl.find("(")].strip() fnpos = fname.rfind(" ") if fnpos < 0: fnpos = 0 fname = fname[fnpos:].strip() rettype = fdecl[:fnpos].strip() if rettype.endswith("operator"): fname = ("operator " + fname).strip() rettype = rettype[:rettype.rfind("operator")].strip() if rettype.endswith("::"): rpos = rettype.rfind(" ") if rpos >= 0: fname = rettype[rpos+1:].strip() + fname rettype = rettype[:rpos].strip() else: fname = rettype + fname rettype = "" apos = fdecl.find("(") if fname.endswith("operator"): fname += " ()" apos = fdecl.find("(", apos+1) fname = "cv." + fname.replace("::", ".") decl = [fname, rettype, [], [], None, docstring] # inline constructor implementation implmatch = re.match(r"(\(.*?\))\s*:\s*(\w+\(.*?\),?\s*)+", fdecl[apos:]) if bool(implmatch): fdecl = fdecl[:apos] + implmatch.group(1) args0str = fdecl[apos+1:fdecl.rfind(")")].strip() if args0str != "" and args0str != "void": args0str = re.sub(r"\([^)]*\)", lambda m: m.group(0).replace(',', "@comma@"), args0str) args0 = args0str.split(",") args = [] narg = "" for arg in args0: narg += arg.strip() balance_paren = narg.count("(") - narg.count(")") balance_angle = narg.count("<") - narg.count(">") if balance_paren == 0 and balance_angle == 0: args.append(narg.strip()) narg = "" for arg in args: dfpos = arg.find("=") defval = "" if dfpos >= 0: defval = arg[dfpos+1:].strip() else: dfpos = arg.find("CV_DEFAULT") if dfpos >= 0: defval, pos3 = self.get_macro_arg(arg, dfpos) else: dfpos = arg.find("CV_WRAP_DEFAULT") if dfpos >= 0: defval, pos3 = self.get_macro_arg(arg, dfpos) if dfpos >= 0: defval = defval.replace("@comma@", ",") arg = arg[:dfpos].strip() pos = len(arg)-1 while pos >= 0 and (arg[pos] in "_[]" or arg[pos].isalpha() or arg[pos].isdigit()): pos -= 1 if pos >= 0: aname = arg[pos+1:].strip() atype = arg[:pos+1].strip() if aname.endswith("&") or aname.endswith("*") or (aname in ["int", "String", "Mat"]): atype = (atype + " " + aname).strip() aname = "" else: atype = arg aname = "" if aname.endswith("]"): bidx = aname.find('[') atype += aname[bidx:] aname = aname[:bidx] decl[3].append([atype, aname, defval, []]) if static_method: decl[2].append("/S") if virtual_method: decl[2].append("/V") if explicit_method: decl[2].append("/E") if bool(re.match(r".*\)\s*(const)?\s*=\s*0", decl_str)): decl[2].append("/A") if bool(re.match(r".*\)\s*const(\s*=\s*0)?", decl_str)): decl[2].append("/C") return decl def parse_func_decl(self, decl_str, mat="Mat", docstring=""): """ Parses the function or method declaration in the form: [([CV_EXPORTS] <rettype>) | CVAPI(rettype)] [~]<function_name> (<arg_type1> <arg_name1>[=<default_value1>] [, <arg_type2> <arg_name2>[=<default_value2>] ...]) [const] {; | <function_body>} Returns the function declaration entry: [<func name>, <return value C-type>, <list of modifiers>, <list of arguments>, <original return type>, <docstring>] (see above) """ if self.wrap_mode: if not (("CV_EXPORTS_AS" in decl_str) or ("CV_EXPORTS_W" in decl_str) or ("CV_WRAP" in decl_str)): return [] # ignore old API in the documentation check (for now) if "CVAPI(" in decl_str and self.wrap_mode: return [] top = self.block_stack[-1] func_modlist = [] npos = decl_str.find("CV_EXPORTS_AS") if npos >= 0: arg, npos3 = self.get_macro_arg(decl_str, npos) func_modlist.append("="+arg) decl_str = decl_str[:npos] + decl_str[npos3+1:] npos = decl_str.find("CV_WRAP_AS") if npos >= 0: arg, npos3 = self.get_macro_arg(decl_str, npos) func_modlist.append("="+arg) decl_str = decl_str[:npos] + decl_str[npos3+1:] npos = decl_str.find("CV_WRAP_PHANTOM") if npos >= 0: decl_str, _ = self.get_macro_arg(decl_str, npos) func_modlist.append("/phantom") npos = decl_str.find("CV_WRAP_MAPPABLE") if npos >= 0: mappable, npos3 = self.get_macro_arg(decl_str, npos) func_modlist.append("/mappable="+mappable) classname = top[1] return ['.'.join([classname, classname]), None, func_modlist, [], None, None] virtual_method = False pure_virtual_method = False const_method = False # filter off some common prefixes, which are meaningless for Python wrappers. # note that we do not strip "static" prefix, which does matter; # it means class methods, not instance methods decl_str = self.batch_replace(decl_str, [("static inline", ""), ("inline", ""),\ ("CV_EXPORTS_W", ""), ("CV_EXPORTS", ""), ("CV_CDECL", ""), ("CV_WRAP ", " "), ("CV_INLINE", ""), ("CV_DEPRECATED", ""), ("CV_DEPRECATED_EXTERNAL", "")]).strip() if decl_str.strip().startswith('virtual'): virtual_method = True decl_str = decl_str.replace('virtual' , '') end_tokens = decl_str[decl_str.rfind(')'):].split() const_method = 'const' in end_tokens pure_virtual_method = '=' in end_tokens and '0' in end_tokens static_method = False context = top[0] if decl_str.startswith("static") and (context == "class" or context == "struct"): decl_str = decl_str[len("static"):].lstrip() static_method = True args_begin = decl_str.find("(") if decl_str.startswith("CVAPI"): rtype_end = decl_str.find(")", args_begin+1) if rtype_end < 0: print("Error at %d. no terminating ) in CVAPI() macro: %s" % (self.lineno, decl_str)) sys.exit(-1) decl_str = decl_str[args_begin+1:rtype_end] + " " + decl_str[rtype_end+1:] args_begin = decl_str.find("(") if args_begin < 0: print("Error at %d: no args in '%s'" % (self.lineno, decl_str)) sys.exit(-1) decl_start = decl_str[:args_begin].strip() # handle operator () case if decl_start.endswith("operator"): args_begin = decl_str.find("(", args_begin+1) if args_begin < 0: print("Error at %d: no args in '%s'" % (self.lineno, decl_str)) sys.exit(-1) decl_start = decl_str[:args_begin].strip() # TODO: normalize all type of operators if decl_start.endswith("()"): decl_start = decl_start[0:-2].rstrip() + " ()" # constructor/destructor case if bool(re.match(r'^(\w+::)*(?P<x>\w+)::~?(?P=x)$', decl_start)): decl_start = "void " + decl_start rettype, funcname, modlist, argno = self.parse_arg(decl_start, -1) # determine original return type, hack for return types with underscore original_type = None i = decl_start.rfind(funcname) if i > 0: original_type = decl_start[:i].replace("&", "").replace("const", "").strip() if argno >= 0: classname = top[1] if rettype == classname or rettype == "~" + classname: rettype, funcname = "", rettype else: if bool(re.match('\w+\s+\(\*\w+\)\s*\(.*\)', decl_str)): return [] # function typedef elif bool(re.match('\w+\s+\(\w+::\*\w+\)\s*\(.*\)', decl_str)): return [] # class method typedef elif bool(re.match('[A-Z_]+', decl_start)): return [] # it seems to be a macro instantiation elif "__declspec" == decl_start: return [] elif bool(re.match(r'\w+\s+\(\*\w+\)\[\d+\]', decl_str)): return [] # exotic - dynamic 2d array else: #print rettype, funcname, modlist, argno print("Error at %s:%d the function/method name is missing: '%s'" % (self.hname, self.lineno, decl_start)) sys.exit(-1) if self.wrap_mode and (("::" in funcname) or funcname.startswith("~")): # if there is :: in function name (and this is in the header file), # it means, this is inline implementation of a class method. # Thus the function has been already declared within the class and we skip this repeated # declaration. # Also, skip the destructors, as they are always wrapped return [] funcname = self.get_dotted_name(funcname) if not self.wrap_mode: decl = self.parse_func_decl_no_wrap(decl_str, static_method, docstring) decl[0] = funcname return decl arg_start = args_begin+1 npos = arg_start-1 balance = 1 angle_balance = 0 # scan the argument list; handle nested parentheses args_decls = [] args = [] argno = 1 while balance > 0: npos += 1 t, npos = self.find_next_token(decl_str, ["(", ")", ",", "<", ">"], npos) if not t: print("Error: no closing ')' at %d" % (self.lineno,)) sys.exit(-1) if t == "<": angle_balance += 1 if t == ">": angle_balance -= 1 if t == "(": balance += 1 if t == ")": balance -= 1 if (t == "," and balance == 1 and angle_balance == 0) or balance == 0: # process next function argument a = decl_str[arg_start:npos].strip() #print "arg = ", a arg_start = npos+1 if a: eqpos = a.find("=") defval = "" modlist = [] if eqpos >= 0: defval = a[eqpos+1:].strip() else: eqpos = a.find("CV_DEFAULT") if eqpos >= 0: defval, pos3 = self.get_macro_arg(a, eqpos) else: eqpos = a.find("CV_WRAP_DEFAULT") if eqpos >= 0: defval, pos3 = self.get_macro_arg(a, eqpos) if defval == "NULL": defval = "0" if eqpos >= 0: a = a[:eqpos].strip() arg_type, arg_name, modlist, argno = self.parse_arg(a, argno) if self.wrap_mode: # TODO: Vectors should contain UMat, but this is not very easy to support and not very needed vector_mat = "vector_{}".format("Mat") vector_mat_template = "vector<{}>".format("Mat") if arg_type == "InputArray": arg_type = mat elif arg_type == "InputOutputArray": arg_type = mat modlist.append("/IO") elif arg_type == "OutputArray": arg_type = mat modlist.append("/O") elif arg_type == "InputArrayOfArrays": arg_type = vector_mat elif arg_type == "InputOutputArrayOfArrays": arg_type = vector_mat modlist.append("/IO") elif arg_type == "OutputArrayOfArrays": arg_type = vector_mat modlist.append("/O") defval = self.batch_replace(defval, [("InputArrayOfArrays", vector_mat_template), ("InputOutputArrayOfArrays", vector_mat_template), ("OutputArrayOfArrays", vector_mat_template), ("InputArray", mat), ("InputOutputArray", mat), ("OutputArray", mat), ("noArray", arg_type)]).strip() args.append([arg_type, arg_name, defval, modlist]) npos = arg_start-1 if static_method: func_modlist.append("/S") if const_method: func_modlist.append("/C") if virtual_method: func_modlist.append("/V") if pure_virtual_method: func_modlist.append("/PV") return [funcname, rettype, func_modlist, args, original_type, docstring] def get_dotted_name(self, name): """ adds the dot-separated container class/namespace names to the bare function/class name, e.g. when we have namespace cv { class A { public: f(int); }; } the function will convert "A" to "cv.A" and "f" to "cv.A.f". """ if not self.block_stack: return name if name.startswith("cv."): return name qualified_name = (("." in name) or ("::" in name)) n = "" for b in self.block_stack: block_type, block_name = b[self.BLOCK_TYPE], b[self.BLOCK_NAME] if block_type in ["file", "enum"]: continue if block_type in ["enum struct", "enum class"] and block_name == name: continue if block_type not in ["struct", "class", "namespace", "enum struct", "enum class"]: print("Error at %d: there are non-valid entries in the current block stack %s" % (self.lineno, self.block_stack)) sys.exit(-1) if block_name and (block_type == "namespace" or not qualified_name): n += block_name + "." n += name.replace("::", ".") if n.endswith(".Algorithm"): n = "cv.Algorithm" return n def parse_stmt(self, stmt, end_token, mat="Mat", docstring=""): """ parses the statement (ending with ';' or '}') or a block head (ending with '{') The function calls parse_class_decl or parse_func_decl when necessary. It returns <block_type>, <block_name>, <parse_flag>, <declaration> where the first 3 values only make sense for blocks (i.e. code blocks, namespaces, classes, enums and such) """ stack_top = self.block_stack[-1] context = stack_top[self.BLOCK_TYPE] stmt_type = "" if end_token == "{": stmt_type = "block" if context == "block": print("Error at %d: should not call parse_stmt inside blocks" % (self.lineno,)) sys.exit(-1) if context == "class" or context == "struct": while 1: colon_pos = stmt.find(":") if colon_pos < 0: break w = stmt[:colon_pos].strip() if w in ["public", "protected", "private"]: if w == "public" or (not self.wrap_mode and w == "protected"): stack_top[self.PUBLIC_SECTION] = True else: stack_top[self.PUBLIC_SECTION] = False stmt = stmt[colon_pos+1:].strip() break # do not process hidden class members and template classes/functions if not stack_top[self.PUBLIC_SECTION] or stmt.startswith("template"): return stmt_type, "", False, None if end_token == "{": if not self.wrap_mode and stmt.startswith("typedef struct"): stmt_type = "struct" try: classname, bases, modlist = self.parse_class_decl(stmt[len("typedef "):]) except: print("Error at %s:%d" % (self.hname, self.lineno)) exit(1) if classname.startswith("_Ipl"): classname = classname[1:] decl = [stmt_type + " " + self.get_dotted_name(classname), "", modlist, [], None, docstring] if bases: decl[1] = ": " + ", ".join([self.get_dotted_name(b).replace(".","::") for b in bases]) return stmt_type, classname, True, decl if stmt.startswith("class") or stmt.startswith("struct"): stmt_type = stmt.split()[0] if stmt.strip() != stmt_type: try: classname, bases, modlist = self.parse_class_decl(stmt) except: print("Error at %s:%d" % (self.hname, self.lineno)) exit(1) decl = [] if ("CV_EXPORTS_W" in stmt) or ("CV_EXPORTS_AS" in stmt) or (not self.wrap_mode):# and ("CV_EXPORTS" in stmt)): decl = [stmt_type + " " + self.get_dotted_name(classname), "", modlist, [], None, docstring] if bases: decl[1] = ": " + ", ".join([self.get_dotted_name(b).replace(".","::") for b in bases]) return stmt_type, classname, True, decl if stmt.startswith("enum") or stmt.startswith("namespace"): stmt_list = stmt.rsplit(" ", 1) if len(stmt_list) < 2: stmt_list.append("<unnamed>") return stmt_list[0], stmt_list[1], True, None if stmt.startswith("extern") and "\"C\"" in stmt: return "namespace", "", True, None if end_token == "}" and context.startswith("enum"): decl = self.parse_enum(stmt) name = stack_top[self.BLOCK_NAME] return context, name, False, decl if end_token == ";" and stmt.startswith("typedef"): # TODO: handle typedef's more intelligently return stmt_type, "", False, None paren_pos = stmt.find("(") if paren_pos >= 0: # assume it's function or method declaration, # since we filtered off the other places where '(' can normally occur: # - code blocks # - function pointer typedef's decl = self.parse_func_decl(stmt, mat=mat, docstring=docstring) # we return parse_flag == False to prevent the parser to look inside function/method bodies # (except for tracking the nested blocks) return stmt_type, "", False, decl if (context == "struct" or context == "class") and end_token == ";" and stmt: # looks like it's member declaration; append the members to the class declaration class_decl = stack_top[self.CLASS_DECL] if ("CV_PROP" in stmt): # or (class_decl and ("/Map" in class_decl[2])): var_modlist = [] if "CV_PROP_RW" in stmt: var_modlist.append("/RW") stmt = self.batch_replace(stmt, [("CV_PROP_RW", ""), ("CV_PROP", "")]).strip() var_list = stmt.split(",") var_type, var_name1, modlist, argno = self.parse_arg(var_list[0], -1) var_list = [var_name1] + [i.strip() for i in var_list[1:]] for v in var_list: class_decl[3].append([var_type, v, "", var_modlist]) return stmt_type, "", False, None # something unknown return stmt_type, "", False, None def find_next_token(self, s, tlist, p=0): """ Finds the next token from the 'tlist' in the input 's', starting from position 'p'. Returns the first occurred token and its position, or ("", len(s)) when no token is found """ token = "" tpos = len(s) for t in tlist: pos = s.find(t, p) if pos < 0: continue if pos < tpos: tpos = pos token = t return token, tpos def parse(self, hname, wmode=True): """ The main method. Parses the input file. Returns the list of declarations (that can be print using print_decls) """ self.hname = hname decls = [] f = io.open(hname, 'rt', encoding='utf-8') linelist = list(f.readlines()) f.close() # states: SCAN = 0 # outside of a comment or preprocessor directive COMMENT = 1 # inside a multi-line comment DIRECTIVE = 2 # inside a multi-line preprocessor directive DOCSTRING = 3 # inside a multi-line docstring DIRECTIVE_IF_0 = 4 # inside a '#if 0' directive state = SCAN self.block_stack = [["file", hname, True, True, None]] block_head = "" docstring = "" self.lineno = 0 self.wrap_mode = wmode depth_if_0 = 0 for l0 in linelist: self.lineno += 1 #print(state, self.lineno, l0) l = l0.strip() if state == SCAN and l.startswith("#"): state = DIRECTIVE # fall through to the if state == DIRECTIVE check if state == DIRECTIVE: if l.endswith("\\"): continue state = SCAN l = re.sub(r'//(.+)?', '', l).strip() # drop // comment if l == '#if 0' or l == '#if defined(__OPENCV_BUILD)' or l == '#ifdef __OPENCV_BUILD': state = DIRECTIVE_IF_0 depth_if_0 = 1 continue if state == DIRECTIVE_IF_0: if l.startswith('#'): l = l[1:].strip() if l.startswith("if"): depth_if_0 += 1 continue if l.startswith("endif"): depth_if_0 -= 1 if depth_if_0 == 0: state = SCAN else: # print('---- {:30s}:{:5d}: {}'.format(hname[-30:], self.lineno, l)) pass continue if state == COMMENT: pos = l.find("*/") if pos < 0: continue l = l[pos+2:] state = SCAN if state == DOCSTRING: pos = l.find("*/") if pos < 0: docstring += l0 continue docstring += l[:pos] + "\n" l = l[pos+2:] state = SCAN if l.startswith('CV__') or l.startswith('__CV_'): # just ignore these lines #print('IGNORE: ' + l) state = SCAN continue if state != SCAN: print("Error at %d: invalid state = %d" % (self.lineno, state)) sys.exit(-1) while 1: token, pos = self.find_next_token(l, [";", "\"", "{", "}", "//", "/*"]) if not token: block_head += " " + l block_head = block_head.strip() if len(block_head) > 0 and block_head[-1] == ')' and block_head.startswith('CV_ENUM_FLAGS('): l = '' token = ';' else: break if token == "//": block_head += " " + l[:pos] l = '' continue if token == "/*": block_head += " " + l[:pos] end_pos = l.find("*/", pos+2) if len(l) > pos + 2 and l[pos+2] == "*": # '/**', it's a docstring if end_pos < 0: state = DOCSTRING docstring = l[pos+3:] + "\n" break else: docstring = l[pos+3:end_pos] elif end_pos < 0: state = COMMENT break l = l[end_pos+2:] continue if token == "\"": pos2 = pos + 1 while 1: t2, pos2 = self.find_next_token(l, ["\\", "\""], pos2) if t2 == "": print("Error at %d: no terminating '\"'" % (self.lineno,)) sys.exit(-1) if t2 == "\"": break pos2 += 2 block_head += " " + l[:pos2+1] l = l[pos2+1:] continue stmt = (block_head + " " + l[:pos]).strip() stmt = " ".join(stmt.split()) # normalize the statement #print(stmt) stack_top = self.block_stack[-1] if stmt.startswith("@"): # Objective C ? break decl = None if stack_top[self.PROCESS_FLAG]: # even if stack_top[PUBLIC_SECTION] is False, we still try to process the statement, # since it can start with "public:" docstring = docstring.strip() stmt_type, name, parse_flag, decl = self.parse_stmt(stmt, token, docstring=docstring) if decl: if stmt_type.startswith("enum"): decls.append([stmt_type + " " + self.get_dotted_name(name), "", [], decl, None, ""]) else: decls.append(decl) if self._generate_gpumat_decls and "cv.cuda" in decl[0]: # If function takes as one of arguments Mat or vector<Mat> - we want to create the # same declaration working with GpuMat args = decl[3] has_mat = len(list(filter(lambda x: x[0] in {"Mat", "vector_Mat"}, args))) > 0 if has_mat: _, _, _, gpumat_decl = self.parse_stmt(stmt, token, mat="cuda::GpuMat", docstring=docstring) decls.append(gpumat_decl) if self._generate_umat_decls: # If function takes as one of arguments Mat or vector<Mat> - we want to create the # same declaration working with UMat (this is important for T-Api access) args = decl[3] has_mat = len(list(filter(lambda x: x[0] in {"Mat", "vector_Mat"}, args))) > 0 if has_mat: _, _, _, umat_decl = self.parse_stmt(stmt, token, mat="UMat", docstring=docstring) decls.append(umat_decl) docstring = "" if stmt_type == "namespace": chunks = [block[1] for block in self.block_stack if block[0] == 'namespace'] + [name] self.namespaces.add('.'.join(chunks)) else: stmt_type, name, parse_flag = "block", "", False if token == "{": if stmt_type == "class": public_section = False else: public_section = True self.block_stack.append([stmt_type, name, parse_flag, public_section, decl]) if token == "}": if not self.block_stack: print("Error at %d: the block stack is empty" % (self.lineno,)) self.block_stack[-1:] = [] if pos+1 < len(l) and l[pos+1] == ';': pos += 1 block_head = "" l = l[pos+1:] return decls def print_decls(self, decls): """ Prints the list of declarations, retrieived by the parse() method """ for d in decls: print(d[0], d[1], ";".join(d[2])) # Uncomment below line to see docstrings # print('"""\n' + d[5] + '\n"""') for a in d[3]: print(" ", a[0], a[1], a[2], end="") if a[3]: print("; ".join(a[3])) else: print() if __name__ == '__main__': parser = CppHeaderParser(generate_umat_decls=True, generate_gpumat_decls=True) decls = [] for hname in opencv_hdr_list: decls += parser.parse(hname) #for hname in sys.argv[1:]: #decls += parser.parse(hname, wmode=False) parser.print_decls(decls) print(len(decls)) print("namespaces:", " ".join(sorted(parser.namespaces)))
#!/usr/bin/env python from __future__ import print_function import hdr_parser, sys, re, os from string import Template from pprint import pprint from collections import namedtuple if sys.version_info[0] >= 3: from io import StringIO else: from cStringIO import StringIO forbidden_arg_types = ["void*"] ignored_arg_types = ["RNG*"] pass_by_val_types = ["Point*", "Point2f*", "Rect*", "String*", "double*", "float*", "int*"] gen_template_check_self = Template(""" ${cname} * self1 = 0; if (!pyopencv_${name}_getp(self, self1)) return failmsgp("Incorrect type of self (must be '${name}' or its derivative)"); ${pname} _self_ = ${cvt}(self1); """) gen_template_call_constructor_prelude = Template("""new (&(self->v)) Ptr<$cname>(); // init Ptr with placement new if(self) """) gen_template_call_constructor = Template("""self->v.reset(new ${cname}${args})""") gen_template_simple_call_constructor_prelude = Template("""if(self) """) gen_template_simple_call_constructor = Template("""new (&(self->v)) ${cname}${args}""") gen_template_parse_args = Template("""const char* keywords[] = { $kw_list, NULL }; if( PyArg_ParseTupleAndKeywords(args, kw, "$fmtspec", (char**)keywords, $parse_arglist)$code_cvt )""") gen_template_func_body = Template("""$code_decl $code_parse { ${code_prelude}ERRWRAP2($code_fcall); $code_ret; } """) gen_template_mappable = Template(""" { ${mappable} _src; if (pyopencv_to(src, _src, info)) { return cv_mappable_to(_src, dst); } } """) gen_template_type_decl = Template(""" // Converter (${name}) template<> struct PyOpenCV_Converter< ${cname} > { static PyObject* from(const ${cname}& r) { return pyopencv_${name}_Instance(r); } static bool to(PyObject* src, ${cname}& dst, const ArgInfo& info) { if(!src || src == Py_None) return true; ${cname} * dst_; if (pyopencv_${name}_getp(src, dst_)) { dst = *dst_; return true; } ${mappable_code} failmsg("Expected ${cname} for argument '%s'", info.name); return false; } }; """) gen_template_map_type_cvt = Template(""" template<> bool pyopencv_to(PyObject* src, ${cname}& dst, const ArgInfo& info); """) gen_template_set_prop_from_map = Template(""" if( PyMapping_HasKeyString(src, (char*)"$propname") ) { tmp = PyMapping_GetItemString(src, (char*)"$propname"); ok = tmp && pyopencv_to(tmp, dst.$propname, ArgInfo("$propname", false)); Py_DECREF(tmp); if(!ok) return false; }""") gen_template_type_impl = Template(""" // GetSet (${name}) ${getset_code} // Methods (${name}) ${methods_code} // Tables (${name}) static PyGetSetDef pyopencv_${name}_getseters[] = {${getset_inits} {NULL} /* Sentinel */ }; static PyMethodDef pyopencv_${name}_methods[] = { ${methods_inits} {NULL, NULL} }; """) gen_template_get_prop = Template(""" static PyObject* pyopencv_${name}_get_${member}(pyopencv_${name}_t* p, void *closure) { return pyopencv_from(p->v${access}${member}); } """) gen_template_get_prop_algo = Template(""" static PyObject* pyopencv_${name}_get_${member}(pyopencv_${name}_t* p, void *closure) { $cname* _self_ = dynamic_cast<$cname*>(p->v.get()); if (!_self_) return failmsgp("Incorrect type of object (must be '${name}' or its derivative)"); return pyopencv_from(_self_${access}${member}); } """) gen_template_set_prop = Template(""" static int pyopencv_${name}_set_${member}(pyopencv_${name}_t* p, PyObject *value, void *closure) { if (!value) { PyErr_SetString(PyExc_TypeError, "Cannot delete the ${member} attribute"); return -1; } return pyopencv_to(value, p->v${access}${member}, ArgInfo("value", false)) ? 0 : -1; } """) gen_template_set_prop_algo = Template(""" static int pyopencv_${name}_set_${member}(pyopencv_${name}_t* p, PyObject *value, void *closure) { if (!value) { PyErr_SetString(PyExc_TypeError, "Cannot delete the ${member} attribute"); return -1; } $cname* _self_ = dynamic_cast<$cname*>(p->v.get()); if (!_self_) { failmsgp("Incorrect type of object (must be '${name}' or its derivative)"); return -1; } return pyopencv_to(value, _self_${access}${member}, ArgInfo("value", false)) ? 0 : -1; } """) gen_template_prop_init = Template(""" {(char*)"${member}", (getter)pyopencv_${name}_get_${member}, NULL, (char*)"${member}", NULL},""") gen_template_rw_prop_init = Template(""" {(char*)"${member}", (getter)pyopencv_${name}_get_${member}, (setter)pyopencv_${name}_set_${member}, (char*)"${member}", NULL},""") class FormatStrings: string = 's' unsigned_char = 'b' short_int = 'h' int = 'i' unsigned_int = 'I' long = 'l' unsigned_long = 'k' long_long = 'L' unsigned_long_long = 'K' size_t = 'n' float = 'f' double = 'd' object = 'O' ArgTypeInfo = namedtuple('ArgTypeInfo', ['atype', 'format_str', 'default_value', 'strict_conversion']) # strict_conversion is False by default ArgTypeInfo.__new__.__defaults__ = (False,) simple_argtype_mapping = { "bool": ArgTypeInfo("bool", FormatStrings.unsigned_char, "0", True), "size_t": ArgTypeInfo("size_t", FormatStrings.unsigned_long_long, "0", True), "int": ArgTypeInfo("int", FormatStrings.int, "0", True), "float": ArgTypeInfo("float", FormatStrings.float, "0.f", True), "double": ArgTypeInfo("double", FormatStrings.double, "0", True), "c_string": ArgTypeInfo("char*", FormatStrings.string, '(char*)""') } def normalize_class_name(name): return re.sub(r"^cv\.", "", name).replace(".", "_") def get_type_format_string(arg_type_info): if arg_type_info.strict_conversion: return FormatStrings.object else: return arg_type_info.format_str class ClassProp(object): def __init__(self, decl): self.tp = decl[0].replace("*", "_ptr") self.name = decl[1] self.readonly = True if "/RW" in decl[3]: self.readonly = False class ClassInfo(object): def __init__(self, name, decl=None): self.cname = name.replace(".", "::") self.name = self.wname = normalize_class_name(name) self.sname = name[name.rfind('.') + 1:] self.ismap = False self.issimple = False self.isalgorithm = False self.methods = {} self.props = [] self.mappables = [] self.consts = {} self.base = None self.constructor = None customname = False if decl: bases = decl[1].split()[1:] if len(bases) > 1: print("Note: Class %s has more than 1 base class (not supported by Python C extensions)" % (self.name,)) print(" Bases: ", " ".join(bases)) print(" Only the first base class will be used") #return sys.exit(-1) elif len(bases) == 1: self.base = bases[0].strip(",") if self.base.startswith("cv::"): self.base = self.base[4:] if self.base == "Algorithm": self.isalgorithm = True self.base = self.base.replace("::", "_") for m in decl[2]: if m.startswith("="): self.wname = m[1:] customname = True elif m == "/Map": self.ismap = True elif m == "/Simple": self.issimple = True self.props = [ClassProp(p) for p in decl[3]] if not customname and self.wname.startswith("Cv"): self.wname = self.wname[2:] def gen_map_code(self, codegen): all_classes = codegen.classes code = "static bool pyopencv_to(PyObject* src, %s& dst, const ArgInfo& info)\n{\n PyObject* tmp;\n bool ok;\n" % (self.cname) code += "".join([gen_template_set_prop_from_map.substitute(propname=p.name,proptype=p.tp) for p in self.props]) if self.base: code += "\n return pyopencv_to(src, (%s&)dst, info);\n}\n" % all_classes[self.base].cname else: code += "\n return true;\n}\n" return code def gen_code(self, codegen): all_classes = codegen.classes if self.ismap: return self.gen_map_code(codegen) getset_code = StringIO() getset_inits = StringIO() sorted_props = [(p.name, p) for p in self.props] sorted_props.sort() access_op = "->" if self.issimple: access_op = "." for pname, p in sorted_props: if self.isalgorithm: getset_code.write(gen_template_get_prop_algo.substitute(name=self.name, cname=self.cname, member=pname, membertype=p.tp, access=access_op)) else: getset_code.write(gen_template_get_prop.substitute(name=self.name, member=pname, membertype=p.tp, access=access_op)) if p.readonly: getset_inits.write(gen_template_prop_init.substitute(name=self.name, member=pname)) else: if self.isalgorithm: getset_code.write(gen_template_set_prop_algo.substitute(name=self.name, cname=self.cname, member=pname, membertype=p.tp, access=access_op)) else: getset_code.write(gen_template_set_prop.substitute(name=self.name, member=pname, membertype=p.tp, access=access_op)) getset_inits.write(gen_template_rw_prop_init.substitute(name=self.name, member=pname)) methods_code = StringIO() methods_inits = StringIO() sorted_methods = list(self.methods.items()) sorted_methods.sort() if self.constructor is not None: methods_code.write(self.constructor.gen_code(codegen)) for mname, m in sorted_methods: methods_code.write(m.gen_code(codegen)) methods_inits.write(m.get_tab_entry()) code = gen_template_type_impl.substitute(name=self.name, wname=self.wname, cname=self.cname, getset_code=getset_code.getvalue(), getset_inits=getset_inits.getvalue(), methods_code=methods_code.getvalue(), methods_inits=methods_inits.getvalue()) return code def gen_def(self, codegen): all_classes = codegen.classes baseptr = "NoBase" if self.base and self.base in all_classes: baseptr = all_classes[self.base].name constructor_name = "0" if self.constructor is not None: constructor_name = self.constructor.get_wrapper_name() return "CVPY_TYPE({}, {}, {}, {}, {});\n".format( self.name, self.cname if self.issimple else "Ptr<{}>".format(self.cname), self.sname if self.issimple else "Ptr", baseptr, constructor_name ) def handle_ptr(tp): if tp.startswith('Ptr_'): tp = 'Ptr<' + "::".join(tp.split('_')[1:]) + '>' return tp class ArgInfo(object): def __init__(self, arg_tuple): self.tp = handle_ptr(arg_tuple[0]) self.name = arg_tuple[1] self.defval = arg_tuple[2] self.isarray = False self.arraylen = 0 self.arraycvt = None self.inputarg = True self.outputarg = False self.returnarg = False for m in arg_tuple[3]: if m == "/O": self.inputarg = False self.outputarg = True self.returnarg = True elif m == "/IO": self.inputarg = True self.outputarg = True self.returnarg = True elif m.startswith("/A"): self.isarray = True self.arraylen = m[2:].strip() elif m.startswith("/CA"): self.isarray = True self.arraycvt = m[2:].strip() self.py_inputarg = False self.py_outputarg = False def isbig(self): return self.tp in ["Mat", "vector_Mat", "cuda::GpuMat", "GpuMat", "vector_GpuMat", "UMat", "vector_UMat"] # or self.tp.startswith("vector") def crepr(self): return "ArgInfo(\"%s\", %d)" % (self.name, self.outputarg) class FuncVariant(object): def __init__(self, classname, name, decl, isconstructor, isphantom=False): self.classname = classname self.name = self.wname = name self.isconstructor = isconstructor self.isphantom = isphantom self.docstring = decl[5] self.rettype = decl[4] or handle_ptr(decl[1]) if self.rettype == "void": self.rettype = "" self.args = [] self.array_counters = {} for a in decl[3]: ainfo = ArgInfo(a) if ainfo.isarray and not ainfo.arraycvt: c = ainfo.arraylen c_arrlist = self.array_counters.get(c, []) if c_arrlist: c_arrlist.append(ainfo.name) else: self.array_counters[c] = [ainfo.name] self.args.append(ainfo) self.init_pyproto() def init_pyproto(self): # string representation of argument list, with '[', ']' symbols denoting optional arguments, e.g. # "src1, src2[, dst[, mask]]" for cv.add argstr = "" # list of all input arguments of the Python function, with the argument numbers: # [("src1", 0), ("src2", 1), ("dst", 2), ("mask", 3)] # we keep an argument number to find the respective argument quickly, because # some of the arguments of C function may not present in the Python function (such as array counters) # or even go in a different order ("heavy" output parameters of the C function # become the first optional input parameters of the Python function, and thus they are placed right after # non-optional input parameters) arglist = [] # the list of "heavy" output parameters. Heavy parameters are the parameters # that can be expensive to allocate each time, such as vectors and matrices (see isbig). outarr_list = [] # the list of output parameters. Also includes input/output parameters. outlist = [] firstoptarg = 1000000 argno = -1 for a in self.args: argno += 1 if a.name in self.array_counters: continue assert not a.tp in forbidden_arg_types, 'Forbidden type "{}" for argument "{}" in "{}" ("{}")'.format(a.tp, a.name, self.name, self.classname) if a.tp in ignored_arg_types: continue if a.returnarg: outlist.append((a.name, argno)) if (not a.inputarg) and a.isbig(): outarr_list.append((a.name, argno)) continue if not a.inputarg: continue if not a.defval: arglist.append((a.name, argno)) else: firstoptarg = min(firstoptarg, len(arglist)) # if there are some array output parameters before the first default parameter, they # are added as optional parameters before the first optional parameter if outarr_list: arglist += outarr_list outarr_list = [] arglist.append((a.name, argno)) if outarr_list: firstoptarg = min(firstoptarg, len(arglist)) arglist += outarr_list firstoptarg = min(firstoptarg, len(arglist)) noptargs = len(arglist) - firstoptarg argnamelist = [aname for aname, argno in arglist] argstr = ", ".join(argnamelist[:firstoptarg]) argstr = "[, ".join([argstr] + argnamelist[firstoptarg:]) argstr += "]" * noptargs if self.rettype: outlist = [("retval", -1)] + outlist elif self.isconstructor: assert outlist == [] outlist = [("self", -1)] if self.isconstructor: classname = self.classname if classname.startswith("Cv"): classname=classname[2:] outstr = "<%s object>" % (classname,) elif outlist: outstr = ", ".join([o[0] for o in outlist]) else: outstr = "None" self.py_arg_str = argstr self.py_return_str = outstr self.py_prototype = "%s(%s) -> %s" % (self.wname, argstr, outstr) self.py_noptargs = noptargs self.py_arglist = arglist for aname, argno in arglist: self.args[argno].py_inputarg = True for aname, argno in outlist: if argno >= 0: self.args[argno].py_outputarg = True self.py_outlist = outlist class FuncInfo(object): def __init__(self, classname, name, cname, isconstructor, namespace, is_static): self.classname = classname self.name = name self.cname = cname self.isconstructor = isconstructor self.namespace = namespace self.is_static = is_static self.variants = [] def add_variant(self, decl, isphantom=False): self.variants.append(FuncVariant(self.classname, self.name, decl, self.isconstructor, isphantom)) def get_wrapper_name(self): name = self.name if self.classname: classname = self.classname + "_" if "[" in name: name = "getelem" else: classname = "" if self.is_static: name += "_static" return "pyopencv_" + self.namespace.replace('.','_') + '_' + classname + name def get_wrapper_prototype(self, codegen): full_fname = self.get_wrapper_name() if self.isconstructor: return "static int {fn_name}(pyopencv_{type_name}_t* self, PyObject* args, PyObject* kw)".format( fn_name=full_fname, type_name=codegen.classes[self.classname].name) if self.classname: self_arg = "self" else: self_arg = "" return "static PyObject* %s(PyObject* %s, PyObject* args, PyObject* kw)" % (full_fname, self_arg) def get_tab_entry(self): prototype_list = [] docstring_list = [] have_empty_constructor = False for v in self.variants: s = v.py_prototype if (not v.py_arglist) and self.isconstructor: have_empty_constructor = True if s not in prototype_list: prototype_list.append(s) docstring_list.append(v.docstring) # if there are just 2 constructors: default one and some other, # we simplify the notation. # Instead of ClassName(args ...) -> object or ClassName() -> object # we write ClassName([args ...]) -> object if have_empty_constructor and len(self.variants) == 2: idx = self.variants[1].py_arglist != [] s = self.variants[idx].py_prototype p1 = s.find("(") p2 = s.rfind(")") prototype_list = [s[:p1+1] + "[" + s[p1+1:p2] + "]" + s[p2:]] # The final docstring will be: Each prototype, followed by # their relevant doxygen comment full_docstring = "" for prototype, body in zip(prototype_list, docstring_list): full_docstring += Template("$prototype\n$docstring\n\n\n\n").substitute( prototype=prototype, docstring='\n'.join( ['. ' + line for line in body.split('\n')] ) ) # Escape backslashes, newlines, and double quotes full_docstring = full_docstring.strip().replace("\\", "\\\\").replace('\n', '\\n').replace("\"", "\\\"") # Convert unicode chars to xml representation, but keep as string instead of bytes full_docstring = full_docstring.encode('ascii', errors='xmlcharrefreplace').decode() return Template(' {"$py_funcname", CV_PY_FN_WITH_KW_($wrap_funcname, $flags), "$py_docstring"},\n' ).substitute(py_funcname = self.variants[0].wname, wrap_funcname=self.get_wrapper_name(), flags = 'METH_STATIC' if self.is_static else '0', py_docstring = full_docstring) def gen_code(self, codegen): all_classes = codegen.classes proto = self.get_wrapper_prototype(codegen) code = "%s\n{\n" % (proto,) code += " using namespace %s;\n\n" % self.namespace.replace('.', '::') selfinfo = None ismethod = self.classname != "" and not self.isconstructor # full name is needed for error diagnostic in PyArg_ParseTupleAndKeywords fullname = self.name if self.classname: selfinfo = all_classes[self.classname] if not self.isconstructor: if not self.is_static: code += gen_template_check_self.substitute( name=selfinfo.name, cname=selfinfo.cname if selfinfo.issimple else "Ptr<{}>".format(selfinfo.cname), pname=(selfinfo.cname + '*') if selfinfo.issimple else "Ptr<{}>".format(selfinfo.cname), cvt='' if selfinfo.issimple else '*' ) fullname = selfinfo.wname + "." + fullname all_code_variants = [] for v in self.variants: code_decl = "" code_ret = "" code_cvt_list = [] code_args = "(" all_cargs = [] if v.isphantom and ismethod and not self.is_static: code_args += "_self_" # declare all the C function arguments, # add necessary conversions from Python objects to code_cvt_list, # form the function/method call, # for the list of type mappings for a in v.args: if a.tp in ignored_arg_types: defval = a.defval if not defval and a.tp.endswith("*"): defval = "0" assert defval if not code_args.endswith("("): code_args += ", " code_args += defval all_cargs.append([[None, ""], ""]) continue tp1 = tp = a.tp amp = "" defval0 = "" if tp in pass_by_val_types: tp = tp1 = tp[:-1] amp = "&" if tp.endswith("*"): defval0 = "0" tp1 = tp.replace("*", "_ptr") tp_candidates = [a.tp, normalize_class_name(self.namespace + "." + a.tp)] if any(tp in codegen.enums.keys() for tp in tp_candidates): defval0 = "static_cast<%s>(%d)" % (a.tp, 0) arg_type_info = simple_argtype_mapping.get(tp, ArgTypeInfo(tp, FormatStrings.object, defval0, True)) parse_name = a.name if a.py_inputarg: if arg_type_info.strict_conversion: code_decl += " PyObject* pyobj_%s = NULL;\n" % (a.name,) parse_name = "pyobj_" + a.name if a.tp == 'char': code_cvt_list.append("convert_to_char(pyobj_%s, &%s, %s)" % (a.name, a.name, a.crepr())) else: code_cvt_list.append("pyopencv_to(pyobj_%s, %s, %s)" % (a.name, a.name, a.crepr())) all_cargs.append([arg_type_info, parse_name]) defval = a.defval if not defval: defval = arg_type_info.default_value else: if "UMat" in tp: if "Mat" in defval and "UMat" not in defval: defval = defval.replace("Mat", "UMat") if "cuda::GpuMat" in tp: if "Mat" in defval and "GpuMat" not in defval: defval = defval.replace("Mat", "cuda::GpuMat") # "tp arg = tp();" is equivalent to "tp arg;" in the case of complex types if defval == tp + "()" and arg_type_info.format_str == FormatStrings.object: defval = "" if a.outputarg and not a.inputarg: defval = "" if defval: code_decl += " %s %s=%s;\n" % (arg_type_info.atype, a.name, defval) else: code_decl += " %s %s;\n" % (arg_type_info.atype, a.name) if not code_args.endswith("("): code_args += ", " code_args += amp + a.name code_args += ")" if self.isconstructor: if selfinfo.issimple: templ_prelude = gen_template_simple_call_constructor_prelude templ = gen_template_simple_call_constructor else: templ_prelude = gen_template_call_constructor_prelude templ = gen_template_call_constructor code_prelude = templ_prelude.substitute(name=selfinfo.name, cname=selfinfo.cname) code_fcall = templ.substitute(name=selfinfo.name, cname=selfinfo.cname, args=code_args) if v.isphantom: code_fcall = code_fcall.replace("new " + selfinfo.cname, self.cname.replace("::", "_")) else: code_prelude = "" code_fcall = "" if v.rettype: code_decl += " " + v.rettype + " retval;\n" code_fcall += "retval = " if not v.isphantom and ismethod and not self.is_static: code_fcall += "_self_->" + self.cname else: code_fcall += self.cname code_fcall += code_args if code_cvt_list: code_cvt_list = [""] + code_cvt_list # add info about return value, if any, to all_cargs. if there non-void return value, # it is encoded in v.py_outlist as ("retval", -1) pair. # As [-1] in Python accesses the last element of a list, we automatically handle the return value by # adding the necessary info to the end of all_cargs list. if v.rettype: tp = v.rettype tp1 = tp.replace("*", "_ptr") default_info = ArgTypeInfo(tp, FormatStrings.object, "0") arg_type_info = simple_argtype_mapping.get(tp, default_info) all_cargs.append(arg_type_info) if v.args and v.py_arglist: # form the format spec for PyArg_ParseTupleAndKeywords fmtspec = "".join([ get_type_format_string(all_cargs[argno][0]) for aname, argno in v.py_arglist ]) if v.py_noptargs > 0: fmtspec = fmtspec[:-v.py_noptargs] + "|" + fmtspec[-v.py_noptargs:] fmtspec += ":" + fullname # form the argument parse code that: # - declares the list of keyword parameters # - calls PyArg_ParseTupleAndKeywords # - converts complex arguments from PyObject's to native OpenCV types code_parse = gen_template_parse_args.substitute( kw_list = ", ".join(['"' + aname + '"' for aname, argno in v.py_arglist]), fmtspec = fmtspec, parse_arglist = ", ".join(["&" + all_cargs[argno][1] for aname, argno in v.py_arglist]), code_cvt = " &&\n ".join(code_cvt_list)) else: code_parse = "if(PyObject_Size(args) == 0 && (!kw || PyObject_Size(kw) == 0))" if len(v.py_outlist) == 0: code_ret = "Py_RETURN_NONE" elif len(v.py_outlist) == 1: if self.isconstructor: code_ret = "return 0" else: aname, argno = v.py_outlist[0] code_ret = "return pyopencv_from(%s)" % (aname,) else: # there is more than 1 return parameter; form the tuple out of them fmtspec = "N"*len(v.py_outlist) code_ret = "return Py_BuildValue(\"(%s)\", %s)" % \ (fmtspec, ", ".join(["pyopencv_from(" + aname + ")" for aname, argno in v.py_outlist])) all_code_variants.append(gen_template_func_body.substitute(code_decl=code_decl, code_parse=code_parse, code_prelude=code_prelude, code_fcall=code_fcall, code_ret=code_ret)) if len(all_code_variants)==1: # if the function/method has only 1 signature, then just put it code += all_code_variants[0] else: # try to execute each signature code += " PyErr_Clear();\n\n".join([" {\n" + v + " }\n" for v in all_code_variants]) def_ret = "NULL" if self.isconstructor: def_ret = "-1" code += "\n return %s;\n}\n\n" % def_ret cname = self.cname classinfo = None #dump = False #if dump: pprint(vars(self)) #if dump: pprint(vars(self.variants[0])) if self.classname: classinfo = all_classes[self.classname] #if dump: pprint(vars(classinfo)) if self.isconstructor: py_name = 'cv.' + classinfo.wname elif self.is_static: py_name = '.'.join([self.namespace, classinfo.sname + '_' + self.variants[0].wname]) else: cname = classinfo.cname + '::' + cname py_name = 'cv.' + classinfo.wname + '.' + self.variants[0].wname else: py_name = '.'.join([self.namespace, self.variants[0].wname]) #if dump: print(cname + " => " + py_name) py_signatures = codegen.py_signatures.setdefault(cname, []) for v in self.variants: s = dict(name=py_name, arg=v.py_arg_str, ret=v.py_return_str) for old in py_signatures: if s == old: break else: py_signatures.append(s) return code class Namespace(object): def __init__(self): self.funcs = {} self.consts = {} class PythonWrapperGenerator(object): def __init__(self): self.clear() def clear(self): self.classes = {} self.namespaces = {} self.consts = {} self.enums = {} self.code_include = StringIO() self.code_enums = StringIO() self.code_types = StringIO() self.code_funcs = StringIO() self.code_ns_reg = StringIO() self.code_ns_init = StringIO() self.code_type_publish = StringIO() self.py_signatures = dict() self.class_idx = 0 def add_class(self, stype, name, decl): classinfo = ClassInfo(name, decl) classinfo.decl_idx = self.class_idx self.class_idx += 1 if classinfo.name in self.classes: print("Generator error: class %s (cname=%s) already exists" \ % (classinfo.name, classinfo.cname)) sys.exit(-1) self.classes[classinfo.name] = classinfo # Add Class to json file. namespace, classes, name = self.split_decl_name(name) namespace = '.'.join(namespace) name = '_'.join(classes+[name]) py_name = 'cv.' + classinfo.wname # use wrapper name py_signatures = self.py_signatures.setdefault(classinfo.cname, []) py_signatures.append(dict(name=py_name)) #print('class: ' + classinfo.cname + " => " + py_name) def split_decl_name(self, name): chunks = name.split('.') namespace = chunks[:-1] classes = [] while namespace and '.'.join(namespace) not in self.parser.namespaces: classes.insert(0, namespace.pop()) return namespace, classes, chunks[-1] def add_const(self, name, decl): cname = name.replace('.','::') namespace, classes, name = self.split_decl_name(name) namespace = '.'.join(namespace) name = '_'.join(classes+[name]) ns = self.namespaces.setdefault(namespace, Namespace()) if name in ns.consts: print("Generator error: constant %s (cname=%s) already exists" \ % (name, cname)) sys.exit(-1) ns.consts[name] = cname value = decl[1] py_name = '.'.join([namespace, name]) py_signatures = self.py_signatures.setdefault(cname, []) py_signatures.append(dict(name=py_name, value=value)) #print(cname + ' => ' + str(py_name) + ' (value=' + value + ')') def add_enum(self, name, decl): wname = normalize_class_name(name) if wname.endswith("<unnamed>"): wname = None else: self.enums[wname] = name const_decls = decl[3] for decl in const_decls: name = decl[0] self.add_const(name.replace("const ", "").strip(), decl) def add_func(self, decl): namespace, classes, barename = self.split_decl_name(decl[0]) cname = "::".join(namespace+classes+[barename]) name = barename classname = '' bareclassname = '' if classes: classname = normalize_class_name('.'.join(namespace+classes)) bareclassname = classes[-1] namespace = '.'.join(namespace) isconstructor = name == bareclassname is_static = False isphantom = False mappable = None for m in decl[2]: if m == "/S": is_static = True elif m == "/phantom": isphantom = True cname = cname.replace("::", "_") elif m.startswith("="): name = m[1:] elif m.startswith("/mappable="): mappable = m[10:] self.classes[classname].mappables.append(mappable) return if isconstructor: name = "_".join(classes[:-1]+[name]) if is_static: # Add it as a method to the class func_map = self.classes[classname].methods func = func_map.setdefault(name, FuncInfo(classname, name, cname, isconstructor, namespace, is_static)) func.add_variant(decl, isphantom) # Add it as global function g_name = "_".join(classes+[name]) func_map = self.namespaces.setdefault(namespace, Namespace()).funcs func = func_map.setdefault(g_name, FuncInfo("", g_name, cname, isconstructor, namespace, False)) func.add_variant(decl, isphantom) else: if classname and not isconstructor: if not isphantom: cname = barename func_map = self.classes[classname].methods else: func_map = self.namespaces.setdefault(namespace, Namespace()).funcs func = func_map.setdefault(name, FuncInfo(classname, name, cname, isconstructor, namespace, is_static)) func.add_variant(decl, isphantom) if classname and isconstructor: self.classes[classname].constructor = func def gen_namespace(self, ns_name): ns = self.namespaces[ns_name] wname = normalize_class_name(ns_name) self.code_ns_reg.write('static PyMethodDef methods_%s[] = {\n'%wname) for name, func in sorted(ns.funcs.items()): if func.isconstructor: continue self.code_ns_reg.write(func.get_tab_entry()) self.code_ns_reg.write(' {NULL, NULL}\n};\n\n') self.code_ns_reg.write('static ConstDef consts_%s[] = {\n'%wname) for name, cname in sorted(ns.consts.items()): self.code_ns_reg.write(' {"%s", static_cast<long>(%s)},\n'%(name, cname)) compat_name = re.sub(r"([a-z])([A-Z])", r"\1_\2", name).upper() if name != compat_name: self.code_ns_reg.write(' {"%s", static_cast<long>(%s)},\n'%(compat_name, cname)) self.code_ns_reg.write(' {NULL, 0}\n};\n\n') def gen_enum_reg(self, enum_name): name_seg = enum_name.split(".") is_enum_class = False if len(name_seg) >= 2 and name_seg[-1] == name_seg[-2]: enum_name = ".".join(name_seg[:-1]) is_enum_class = True wname = normalize_class_name(enum_name) cname = enum_name.replace(".", "::") code = "" if re.sub(r"^cv\.", "", enum_name) != wname: code += "typedef {0} {1};\n".format(cname, wname) code += "CV_PY_FROM_ENUM({0});\nCV_PY_TO_ENUM({0});\n\n".format(wname) self.code_enums.write(code) def save(self, path, name, buf): with open(path + "/" + name, "wt") as f: f.write(buf.getvalue()) def save_json(self, path, name, value): import json with open(path + "/" + name, "wt") as f: json.dump(value, f) def gen(self, srcfiles, output_path): self.clear() self.parser = hdr_parser.CppHeaderParser(generate_umat_decls=True, generate_gpumat_decls=True) # step 1: scan the headers and build more descriptive maps of classes, consts, functions for hdr in srcfiles: decls = self.parser.parse(hdr) if len(decls) == 0: continue if hdr.find('opencv2/') >= 0: #Avoid including the shadow files self.code_include.write( '#include "{0}"\n'.format(hdr[hdr.rindex('opencv2/'):]) ) for decl in decls: name = decl[0] if name.startswith("struct") or name.startswith("class"): # class/struct p = name.find(" ") stype = name[:p] name = name[p+1:].strip() self.add_class(stype, name, decl) elif name.startswith("const"): # constant self.add_const(name.replace("const ", "").strip(), decl) elif name.startswith("enum"): # enum self.add_enum(name.rsplit(" ", 1)[1], decl) else: # function self.add_func(decl) # step 1.5 check if all base classes exist for name, classinfo in self.classes.items(): if classinfo.base: chunks = classinfo.base.split('_') base = '_'.join(chunks) while base not in self.classes and len(chunks)>1: del chunks[-2] base = '_'.join(chunks) if base not in self.classes: print("Generator error: unable to resolve base %s for %s" % (classinfo.base, classinfo.name)) sys.exit(-1) base_instance = self.classes[base] classinfo.base = base classinfo.isalgorithm |= base_instance.isalgorithm # wrong processing of 'isalgorithm' flag: # doesn't work for trees(graphs) with depth > 2 self.classes[name] = classinfo # tree-based propagation of 'isalgorithm' processed = dict() def process_isalgorithm(classinfo): if classinfo.isalgorithm or classinfo in processed: return classinfo.isalgorithm res = False if classinfo.base: res = process_isalgorithm(self.classes[classinfo.base]) #assert not (res == True or classinfo.isalgorithm is False), "Internal error: " + classinfo.name + " => " + classinfo.base classinfo.isalgorithm |= res res = classinfo.isalgorithm processed[classinfo] = True return res for name, classinfo in self.classes.items(): process_isalgorithm(classinfo) # step 2: generate code for the classes and their methods classlist = list(self.classes.items()) classlist.sort() for name, classinfo in classlist: self.code_types.write("//{}\n".format(80*"=")) self.code_types.write("// {} ({})\n".format(name, 'Map' if classinfo.ismap else 'Generic')) self.code_types.write("//{}\n".format(80*"=")) self.code_types.write(classinfo.gen_code(self)) if classinfo.ismap: self.code_types.write(gen_template_map_type_cvt.substitute(name=classinfo.name, cname=classinfo.cname)) else: mappable_code = "\n".join([ gen_template_mappable.substitute(cname=classinfo.cname, mappable=mappable) for mappable in classinfo.mappables]) code = gen_template_type_decl.substitute( name=classinfo.name, cname=classinfo.cname if classinfo.issimple else "Ptr<{}>".format(classinfo.cname), mappable_code=mappable_code ) self.code_types.write(code) # register classes in the same order as they have been declared. # this way, base classes will be registered in Python before their derivatives. classlist1 = [(classinfo.decl_idx, name, classinfo) for name, classinfo in classlist] classlist1.sort() for decl_idx, name, classinfo in classlist1: if classinfo.ismap: continue self.code_type_publish.write(classinfo.gen_def(self)) # step 3: generate the code for all the global functions for ns_name, ns in sorted(self.namespaces.items()): if ns_name.split('.')[0] != 'cv': continue for name, func in sorted(ns.funcs.items()): if func.isconstructor: continue code = func.gen_code(self) self.code_funcs.write(code) self.gen_namespace(ns_name) self.code_ns_init.write('CVPY_MODULE("{}", {});\n'.format(ns_name[2:], normalize_class_name(ns_name))) # step 4: generate the code for enum types enumlist = list(self.enums.values()) enumlist.sort() for name in enumlist: self.gen_enum_reg(name) # step 5: generate the code for constants constlist = list(self.consts.items()) constlist.sort() for name, constinfo in constlist: self.gen_const_reg(constinfo) # That's it. Now save all the files self.save(output_path, "pyopencv_generated_include.h", self.code_include) self.save(output_path, "pyopencv_generated_funcs.h", self.code_funcs) self.save(output_path, "pyopencv_generated_enums.h", self.code_enums) self.save(output_path, "pyopencv_generated_types.h", self.code_type_publish) self.save(output_path, "pyopencv_generated_types_content.h", self.code_types) self.save(output_path, "pyopencv_generated_modules.h", self.code_ns_init) self.save(output_path, "pyopencv_generated_modules_content.h", self.code_ns_reg) self.save_json(output_path, "pyopencv_signatures.json", self.py_signatures) if __name__ == "__main__": srcfiles = hdr_parser.opencv_hdr_list dstdir = "/Users/vp/tmp" if len(sys.argv) > 1: dstdir = sys.argv[1] if len(sys.argv) > 2: with open(sys.argv[2], 'r') as f: srcfiles = [l.strip() for l in f.readlines()] generator = PythonWrapperGenerator() generator.gen(srcfiles, dstdir)
import os import sys import platform import setuptools SCRIPT_DIR=os.path.dirname(os.path.abspath(__file__)) def main(): os.chdir(SCRIPT_DIR) package_name = 'opencv' package_version = os.environ.get('OPENCV_VERSION', '4.2.0') # TODO long_description = 'Open Source Computer Vision Library Python bindings' # TODO setuptools.setup( name=package_name, version=package_version, url='https://github.com/opencv/opencv', license='BSD', description='OpenCV python bindings', long_description=long_description, long_description_content_type="text/markdown", packages=setuptools.find_packages(), maintainer="OpenCV Team", install_requires="numpy", classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Information Technology', 'Intended Audience :: Science/Research', 'License :: BSD License', 'Operating System :: MacOS', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Operating System :: Unix', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: C++', 'Programming Language :: Python :: Implementation :: CPython', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Image Recognition', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', ], ) if __name__ == '__main__': main()
''' OpenCV Python binary extension loader ''' import os import sys try: import numpy import numpy.core.multiarray except ImportError: print('OpenCV bindings requires "numpy" package.') print('Install it via command:') print(' pip install numpy') raise # TODO # is_x64 = sys.maxsize > 2**32 def bootstrap(): import sys if hasattr(sys, 'OpenCV_LOADER'): print(sys.path) raise ImportError('ERROR: recursion is detected during loading of "cv2" binary extensions. Check OpenCV installation.') sys.OpenCV_LOADER = True DEBUG = False if hasattr(sys, 'OpenCV_LOADER_DEBUG'): DEBUG = True import platform if DEBUG: print('OpenCV loader: os.name="{}" platform.system()="{}"'.format(os.name, str(platform.system()))) LOADER_DIR=os.path.dirname(os.path.abspath(__file__)) PYTHON_EXTENSIONS_PATHS = [] BINARIES_PATHS = [] g_vars = globals() l_vars = locals() if sys.version_info[:2] < (3, 0): from . load_config_py2 import exec_file_wrapper else: from . load_config_py3 import exec_file_wrapper def load_first_config(fnames, required=True): for fname in fnames: fpath = os.path.join(LOADER_DIR, fname) if not os.path.exists(fpath): if DEBUG: print('OpenCV loader: config not found, skip: {}'.format(fpath)) continue if DEBUG: print('OpenCV loader: loading config: {}'.format(fpath)) exec_file_wrapper(fpath, g_vars, l_vars) return True if required: raise ImportError('OpenCV loader: missing configuration file: {}. Check OpenCV installation.'.format(fnames)) load_first_config(['config.py'], True) load_first_config([ 'config-{}.{}.py'.format(sys.version_info[0], sys.version_info[1]), 'config-{}.py'.format(sys.version_info[0]) ], True) if DEBUG: print('OpenCV loader: PYTHON_EXTENSIONS_PATHS={}'.format(str(l_vars['PYTHON_EXTENSIONS_PATHS']))) if DEBUG: print('OpenCV loader: BINARIES_PATHS={}'.format(str(l_vars['BINARIES_PATHS']))) for p in reversed(l_vars['PYTHON_EXTENSIONS_PATHS']): sys.path.insert(1, p) if os.name == 'nt': if sys.version_info[:2] >= (3, 8): # https://github.com/python/cpython/pull/12302 for p in l_vars['BINARIES_PATHS']: try: os.add_dll_directory(p) except Exception as e: if DEBUG: print('Failed os.add_dll_directory(): '+ str(e)) pass os.environ['PATH'] = ';'.join(l_vars['BINARIES_PATHS']) + ';' + os.environ.get('PATH', '') if DEBUG: print('OpenCV loader: PATH={}'.format(str(os.environ['PATH']))) else: # amending of LD_LIBRARY_PATH works for sub-processes only os.environ['LD_LIBRARY_PATH'] = ':'.join(l_vars['BINARIES_PATHS']) + ':' + os.environ.get('LD_LIBRARY_PATH', '') if DEBUG: print('OpenCV loader: replacing cv2 module') del sys.modules['cv2'] import cv2 try: import sys del sys.OpenCV_LOADER except: pass if DEBUG: print('OpenCV loader: DONE') bootstrap()
# flake8: noqa import os import sys if sys.version_info[:2] >= (3, 0): def exec_file_wrapper(fpath, g_vars, l_vars): with open(fpath) as f: code = compile(f.read(), os.path.basename(fpath), 'exec') exec(code, g_vars, l_vars)
# flake8: noqa import sys if sys.version_info[:2] < (3, 0): def exec_file_wrapper(fpath, g_vars, l_vars): execfile(fpath, g_vars, l_vars)