file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
12.1k
suffix
large_stringlengths
0
12k
middle
large_stringlengths
0
7.51k
fim_type
large_stringclasses
4 values
simulation2_01.py
if distance[1] == True: x = (distance[0] * math.sin(azimuth * math.pi/180)) y = (distance[0] * math.cos(azimuth* math.pi/180)) #Convert back to degrees x /= 100 x *= 0.003297790480378 y /= 100 y *= 0.003297790480378 else: pass xarr[i][index] = x+xorigin yarr[i][index] = y+yorigin def calc_height(distance, ejectionangle, g, ejectionvelocity): ''' height@x = initital_height + distance(tan(theta)) - ((g(x^2))/(2(v(cos(theta))^2)) initial_height = 0, a planar surface is fit to some reference elevation. distance is in meters angle is in radians ''' trajectory = numpy.linspace(0,distance, distance/100,endpoint=True ) elevation = (trajectory * math.tan(ejectionangle)) - ((g*(trajectory**2)) / (2*((ejectionvelocity * math.cos(ejectionangle))**2))) return elevation def calc_distance(): g = 1.6249 #Calculate the ejection angle randomly from a range ejectionangle = uniform(angle[0],angle[1]) ejectionangle *= math.pi/180 #Convert to radians theta = math.sin(2*ejectionangle) #Determine the ejection velocity randomly from a range ejectionvelocity = uniform(velocity[0], velocity[1]) v2 = ejectionvelocity * ejectionvelocity #Calculate total theoretical travel distance distance = (v2 * theta) / g #Calculate the elevation over a planar surface elevation = calc_height(distance, ejectionangle, g, ejectionvelocity) return distance, ejectionangle, elevation def stromboli2(): '''distance = (velocity^2*(sin(2theta))) / gravity''' p = 0 while p <= num: p+=1 g = 1.6249 #Gravitational acceleration on the moon distance, angle, elevation = calc_distance() azimuth = random_azimuth() Xcoordinate = distance * math.sin(azimuth * math.pi/180) #Conversion to radians Ycoordinate = distance * math.cos(azimuth* math.pi/180) #The WAC visible spectrum data is 100mpp or 0.003297790480378 degrees / pixel. Xcoordinate /= 100 Xcoordinate *= 0.003297790480378 Ycoordinate /= 100 Ycoordinate *= 0.003297790480378 yield Xcoordinate, Ycoordinate, angle, azimuth, elevation, distance if p > num: done = False yield done def check_topography(dtm, originx, originy, destx, desty, distance,elevation, dev, gtinv): ''' This function checks for impact due to variation in topography by mimicing the functionality of a topographic profile from polyline. 1. Generate 2 arrays. One of X coordinates and one of Y coordinates 2. Transform these from GCS to PCS 3. Create a new array with the elevations extracted from the dtm 4. Compare it to the analytical trajectory heights 5. If the impact occurs before total potential travel distance, drop the projectile there. If not, place it at the total possible travel distance. Parameters ---------- dtm: A digital terrain model, in 16bit, storing terrain elevation, ndarray originx: The x coord of the projectile launch, scalar originy: The y coord of the projectile launch, scalar destx: The x landing coordinate on a flat plane, scalar desty: The y landing coordinate on a flat plane, scalar distance: The total possible distance traveled, scalar elevation: An array storing heights above 0 of the projectile at some interval (100m by default) dev: Geotransform parameters gtinv: Inverse geotransform parameters Returns ------- distance: The new distance the projectile has traveled if it impacts the topography. ToDo: I should grab an elevation line longer than total possible distance. On a planar surface the object lands at total length. On a surface with increasing slope it lands early;later on a downward slope. We do not test for downward slope. ''' #Extract the elevation from the dtm along the vector #We add 5km to distance as total theoretical distance may be exceeded by # downward sloping terrain xpt = numpy.linspace(originx,destx,num=(distance)/100, endpoint=True) ypt = numpy.linspace(originy,desty,num=(distance)/100, endpoint=True) xpt -= geotransform[0] ypt -= geotransform[3] xsam = numpy.round_((gtinv[1] *xpt + gtinv[2] * ypt), decimals=0) ylin = numpy.round_((gtinv[4] *xpt + gtinv[5] * ypt), decimals=0) try: dtmvector = dtm[ylin.astype(int),xsam.astype(int)] #Compute elevation of projectile from a plane at the origin height dtmvectormin = dtmvector.min() elevation -= abs(dtmvector[0]) #Compare the projectile elevation to the dtm dtmvector += abs(dtmvectormin) elevation -= dtmvector elevation += dtmvectormin #Ignore the first 2.5km of ejection distance to ensure that we get a valid elevation check. impact = numpy.where(elevation[250:] <= 0) try: #We are working at 100mpp, so the new distance is index +1 return ((impact[0][0])+1) * 100, True except: return False except: print "Total distance travel exceeds model dimensions." def density(m, xdata, ydata, shapefile, ppg): ''' This function converts the lat/lon of the input map to meters assuming an equirectangular projection. It then creates a grid at 100mpp, bins the input data into the grid (density) and creates a histogram. Finally, a mesh grid is created and the histogram is plotted in 2D over the basemap. If the shapefile flag is set to true a shapefile is created by calling the shapefile function. Parameters: m: A basemap mapping object xdata: An array of x landing coordinates, ndarray ydata: An array of y landing coordinates, ndarray shapefile: A flag on whether or not to generate a shapefile ppg: The number of meters per grid cell * 100 ''' #Convert from DD to m to create a mesh grid. xmax = (m.xmax) / 0.003297790480378 xmin = (m.xmin) / 0.003297790480378 ymax = (m.ymax) / 0.003297790480378 ymin = (m.ymin) / 0.003297790480378 #Base 100mpp nx = 1516 / int(ppg) ny = 2123 / int(ppg
for index in range(len(xarr[i])): #distance and coordinates distance, angle, elevation = calc_distance() azimuth = random_azimuth() Xcoordinate = distance * math.sin(azimuth * math.pi/180) #Conversion to radians Ycoordinate = distance * math.cos(azimuth* math.pi/180) #The WAC visible spectrum data is 100mpp or 0.003297790480378 degrees / pixel. Xcoordinate /= 100 Xcoordinate *= 0.003297790480378 Ycoordinate /= 100 Ycoordinate *= 0.003297790480378 x = float(Xcoordinate) y = float(Ycoordinate) #Randomly select the origin point along the linear vent rand_index = randrange(0,10) xorigin, yorigin = (xpt[rand_index], ypt[rand_index]) distance = check_topography(dtm, xorigin, yorigin, x+xorigin, y+yorigin, distance,elevation, dev, gtinv)
identifier_body
simulation2_01.py
.round_((gtinv[4] *xpt + gtinv[5] * ypt), decimals=0) try: dtmvector = dtm[ylin.astype(int),xsam.astype(int)] #Compute elevation of projectile from a plane at the origin height dtmvectormin = dtmvector.min() elevation -= abs(dtmvector[0]) #Compare the projectile elevation to the dtm dtmvector += abs(dtmvectormin) elevation -= dtmvector elevation += dtmvectormin #Ignore the first 2.5km of ejection distance to ensure that we get a valid elevation check. impact = numpy.where(elevation[250:] <= 0) try: #We are working at 100mpp, so the new distance is index +1 return ((impact[0][0])+1) * 100, True except: return False except: print "Total distance travel exceeds model dimensions." def density(m, xdata, ydata, shapefile, ppg): ''' This function converts the lat/lon of the input map to meters assuming an equirectangular projection. It then creates a grid at 100mpp, bins the input data into the grid (density) and creates a histogram. Finally, a mesh grid is created and the histogram is plotted in 2D over the basemap. If the shapefile flag is set to true a shapefile is created by calling the shapefile function. Parameters: m: A basemap mapping object xdata: An array of x landing coordinates, ndarray ydata: An array of y landing coordinates, ndarray shapefile: A flag on whether or not to generate a shapefile ppg: The number of meters per grid cell * 100 ''' #Convert from DD to m to create a mesh grid. xmax = (m.xmax) / 0.003297790480378 xmin = (m.xmin) / 0.003297790480378 ymax = (m.ymax) / 0.003297790480378 ymin = (m.ymin) / 0.003297790480378 #Base 100mpp nx = 1516 / int(ppg) ny = 2123 / int(ppg) #Convert to numpy arrays xdata = numpy.asarray(xdata) ydata = numpy.asarray(ydata) #Bin the data & calculate the density lon_bins = numpy.linspace(xdata.min(), xdata.max(), nx+1) lat_bins = numpy.linspace(ydata.min(), ydata.max(), ny+1) density, _, _ = numpy.histogram2d(ydata, xdata, [lat_bins, lon_bins]) #If the user wants a shapefile, pass the numpy arrays if shapefile != None: print "Writing model output to a shapefile." create_shapefile(xdata, ydata, shapefile) #Create a grid of equally spaced polygons lon_bins_2d, lat_bins_2d = numpy.meshgrid(lon_bins, lat_bins) if density.max() <= 3: maxden = 5 else: maxden = density.max() #Mask the density array so that 0 is not plotted density = numpy.ma.masked_where(density <=0, density) plt.pcolormesh(lon_bins_2d,lat_bins_2d, density, cmap=cm.RdYlGn_r, vmin=0, vmax=maxden, alpha=0.5) plt.colorbar(orientation='horizontal') if __name__ == '__main__': '''This is the main section which handles program flow.''' #Parse all of the arguments. parser = argparse.ArgumentParser(description='Stromboli Ejection Simulation Tool v1') parser.add_argument('--velocity', '-v', action='store',nargs='+',default=[350,425], dest='velocity', help='A range of ejection velocities. ') parser.add_argument('--angle','-a', action='store', nargs='+',default=[30, 60], dest='angle', help='Optional: A range of ejection angles. Example: -a 30 60') parser.add_argument('-i', '--iterations', action='store', type=int, dest='i',default=500, help='The number of ejection iterations to perform.') parser.add_argument('--shapefile', action='store',nargs=1, default=None, dest='shapefile', help='Use this flag to generate a shapefile, in Moon_2000GCS, of the point data.') parser.add_argument('--fast', action='store', default=None, nargs=1, dest='multi', help='Use this flag to forgo creating a visualization and just create a shapefile. This uses all available processing cores and is substantially faster.') parser.add_argument('--ppg', action='store', default=10, dest='ppg', help='The number of pixels per grid cell. Default is 10, which generates a 1000m grid square using 100mpp WAC Vis.') args = parser.parse_args() #Assign the user variables to the globals, not great form, but it works. try: velocity = [float(args.velocity[0]),float(args.velocity[1])] except: velocity = [float(args.velocity[0]),float(args.velocity[0])] num = args.i try: angle = [float(args.angle[0]),float(args.angle[1])] except: angle = [float(args.angle[0]),float(args.angle[0])] #Read the input DTM and get geotransformation info ds = gdal.Open('wac_dtm.tif') dtm = ds.ReadAsArray() geotransform = ds.GetGeoTransform() dev = (geotransform[1]*geotransform[5] - geotransform[2]*geotransform[4]) gtinv = ( geotransform[0] , geotransform[5]/dev, - geotransform[2]/dev, geotransform[3], - geotransform[4]/dev, geotransform[1]/dev) #Set the approximate ejection coordinates xpt = numpy.linspace(-97.788,-97.855,num=10, endpoint=True) ypt = numpy.linspace(-30.263,-29.851,num=10, endpoint=True) #If the user wants to process quickly then we omit the visualization and multiprocess to generate a shapefile if args.multi is not None: import multiprocessing cores = multiprocessing.cpu_count() cores *= 2 step = num // cores xarray = numpy.frombuffer(multiprocessing.RawArray(ctypes.c_double, num)) yarray = numpy.frombuffer(multiprocessing.RawArray(ctypes.c_double, num)) init(xarray,yarray) jobs = [] for i in range(0, num+1, step): p = multiprocessing.Process(target=strom_multi, args=(xarr,yarr,slice(i, i+step)), ) jobs.append(p) for job in jobs: job.start() for job in jobs: job.join() create_shapefile(xarr, yarr, args.multi) else: #Visualization - setup the plot fig = plt.figure(figsize=(15,10)) ax1 = fig.add_subplot(1,2,1) #Points that hit underlying topography pt, = ax1.plot([], [],'ro', markersize=3) xdata, ydata = [], [] #Points that travel the total theoretical distance ptmax, = ax1.plot([],[], 'yo', markersize=3) datamax, ydatamax = [],[] #Map lon_min = -102.5 lon_max = -93.5 lat_min = -34.5 lat_max = -25.5 m = Basemap(projection='cyl',llcrnrlat=lat_min,urcrnrlat=lat_max, llcrnrlon=lon_min,urcrnrlon=lon_max,resolution=None, rsphere=(1737400.0,1737400.0)) m.drawmeridians(numpy.arange(lon_min+0.5, lon_max+1, 1), labels=[0,0,0,1], fontsize=10) m.drawparallels(numpy.arange(lat_min+0.5,lat_max+1, 1), labels=[1,0,0,0], fontsize=10) #Read the input image im = imread('wac_global_vis.png') m.imshow(im, origin='upper', cmap=cm.Greys_r, alpha=0.9) def
run
identifier_name
graph.go
return -1 } func (this *Graph) PrintAllCircle() { fmt.Println("打印环") for _, sli := range this.allCircle { fmt.Println(sli) } } // 遍历整个图使用深度优先算法 func (this *Graph) Dfs(v int) { if this.visted[v] == 1 { return } this.visted[v] = 1 edge := this.vertNode[v].edgeTableNode for edge != nil { this.Dfs(edge.index) edge = edge.edgeTableNode } } // 遍历图 func (this *Graph) DfsAll() { for i := 0; i < this.vertNum; i++ { this.Dfs(i) } } // 通过迭代的方式遍历 --栈结构 // func (this *Graph) DfsStatck() { var stack = CreateStack() for i := 0; i < this.vertNum; i++ { if this.visted[i] == 1 { continue } stack.Push(i) this.visted[i] = 1 for !stack.IsEmpty() { elemet := stack.Get() node := this.vertNode[elemet.(int)].edgeTableNode for node != nil && this.visted[node.index] == 1 { node = node.edgeTableNode } if node != nil && this.visted[node.index] == 0 { this.visted[node.index] = 1 stack.Push(node.index) } else { stack.Pop() } } } } // 查找所有换 func (this *Graph) FindCircle() { // 初始化0 表示没有被查找过 this.DfsCircle(0) } // 遍历查找环 func (this *Graph) DfsCircle(v int) { j := FindArray(arrayList, v) if j != -1 { this.hasCircle = true tempSlice := make([]int, len(arrayList)-j) copy(tempSlice, arrayList[j:]) this.allCircle = append(this.allCircle, tempSlice) return } arrayList = append(arrayList, v) edge := this.vertNode[v].edgeTableNode for edge != nil { this.DfsCircle(edge.index) edge = edge.edgeTableNode } // 如果走到这里说明当前这个节点不是环路点 // 移除掉 arrayList = arrayList[0 : len(arrayList)-1] } func GrapBfs() { var grap = NewGraph(5, 6, 1) grap.InitGrap() grap.BfsNearPath(0, 4) } func GrapTuoPuSort() { var grap = NewGraph(5, 6, 1) grap.InitGrap() grap.TuoPuSort() } func GrapTest() { var grap = NewGraph(4, 5, 1) grap.InitGrap() grap.FindCircle() grap.PrintAllCircle() } func GrapDfs() { var grap = NewGraph(4, 5, 1) grap.InitGrap() grap.DfsAll() } // 利用栈进行遍历图 func GrapStack() { var grap = NewGraph(4, 5, 1) grap.InitGrap() grap.DfsStatck() } func GrapDijstra() { var grap = NewGraph(4, 5, 1) grap.InitGrap() grap.Dijkstra(0, 3) } func main() { //GrapDfs() //GrapTuoPuSort() //GrapBfs() GrapDijstra() } // 队列 /* 队列的特性较为单一,基本操作即初始化、获取大小、添加元素、移除元素等。 最重要的特性就是满足先进先出 */ type linkNode struct { value MapParent next *linkNode } type linkedList struct { head *linkNode tail *linkNode count int } func NewLinkList() *linkedList { return &linkedList{head: nil, tail: nil, count: 0} } func (this *linkedList) IsEmpty() bool { return this.count == 0 } func (this *linkedList) Add(value MapParent) { node := new(linkNode) node.value = value this.count++ if this.tail == nil { this.head = node this.tail = node node.next = nil return } this.tail.next = node node.next = nil this.tail = node } func (this *linkedList) Delete() *linkNode { if this.head == nil { return nil } this.count-- if this.head == this.tail { node := this.head this.head = nil this.tail = nil return node
node := this.head this.head = this.head.next return node } type Queue struct { link *linkedList } func NewQueue() *Queue { return &Queue{link: NewLinkList()} } //加入队列 func (this *Queue) Put(value MapParent) { this.link.Add(value) } //pop出队列 func (this *Queue) Pop() *linkNode { return this.link.Delete() } //获得队列的长度 func (this *Queue) GetSize() int { return this.link.count } func (this *Queue) IsEmpty() bool { return this.GetSize() == 0 } // 初始化队列 var queue *Queue = NewQueue() type MapParent struct { parent int son int } // 查找最短路径 func (this *Graph) BfsNearPath(start, end int) []int { if start == end { return []int{start} } // 用来存储的是找到终点之前所有出队列的元素 mapParent := make([]MapParent, 0) // 根据初始节点 把他的邻接点放入队列 node := this.vertNode[start] for node.edgeTableNode != nil { index := node.edgeTableNode.index queue.Put(MapParent{parent: start, son: index}) node.edgeTableNode = node.edgeTableNode.edgeTableNode } var find = false for !queue.IsEmpty() { // 检测队列的元素 node := queue.Pop() // 已经被查看过得元素不需要再次查询 防止出现死循环 if this.visted[node.value.son] == 1 { continue } // 记录出队的元素 mapParent = append(mapParent, node.value) if node.value.son == end { find = true break } // 节点的邻接点入队列 grapNode := this.vertNode[node.value.son] for grapNode.edgeTableNode != nil { index := grapNode.edgeTableNode.index // 记录父节点与子节点的映射 queue.Put(MapParent{parent: node.value.son, son: index}) grapNode.edgeTableNode = grapNode.edgeTableNode.edgeTableNode } // 记录被查询过 this.visted = append(this.visted, node.value.son) } // 逆序查找路径 path := []int{} if find == true { path = append(path, end) son := end for i := len(mapParent) - 1; i >= 0; i-- { if son == mapParent[i].son { path = append(path, mapParent[i].parent) son = mapParent[i].parent } } } // 打印查找的路径 fmt.Println(path) return path } // 拓扑排序 func (this *Graph) TuoPuSort() ([]int, bool) { mapQianXIang := make(map[int]int) sortList := []int{} for i := 0; i < this.vertNum; i++ { node := this.vertNode[i] if node == nil { continue } // 获取邻接链表 edgeNode := node.edgeTableNode // 记录每个子节点的被指向的次数 for edgeNode != nil { mapQianXIang[edgeNode.index]++ edgeNode = edgeNode.edgeTableNode } } // 拓扑排序就是把没有前项节点的先放入队列中 for i := 0; i < this.vertNum; i++ { if mapQianXIang[i] == 0 { queue.Put(MapParent{parent: -1, son: i}) } } for !queue.IsEmpty() { node := queue.Pop() sortList = append(sortList, node.value.son) // 获取邻接链表 edgeNode := this.vertNode[node.value.son].edgeTableNode // 递减指向次数 其实就是斩断的过程 for edgeNode != nil { mapQianXIang[edgeNode.index]-- if mapQianXIang[edgeNode.index] == 0 { // 忽略parent 我们son queue.Put(MapParent{parent: -
}
identifier_name
graph.go
tempNode.weight = weight tempNode.index = endVert tempNode.edgeTableNode = nil this.vertNode[startVert].edgeTableNode = tempNode continue } for edgeNode != nil { // 单链表尾插节点 if edgeNode.edgeTableNode == nil { tempNode := new(EdgeTableNode) tempNode.weight = weight tempNode.index = endVert tempNode.edgeTableNode = nil edgeNode.edgeTableNode = tempNode break } edgeNode = edgeNode.edgeTableNode } } } func FindArray(arr []int, v int) int { for index, value := range arr { if v == value { return index } } return -1 } func (this *Graph) PrintAllCircle() { fmt.Println("打印环") for _, sli := range this.allCircle { fmt.Println(sli) } } // 遍历整个图使用深度优先算法 func (this *Graph) Dfs(v int) { if this.visted[v] == 1 { return } this.visted[v] = 1 edge := this.vertNode[v].edgeTableNode for edge != nil { this.Dfs(edge.index) edge = edge.edgeTableNode } } // 遍历图 func (this *Graph) DfsAll() { for i := 0; i < this.vertNum; i++ { this.Dfs(i) } } // 通过迭代的方式遍历 --栈结构 // func (this *Graph) DfsStatck() { var stack = CreateStack() for i := 0; i < this.vertNum; i++ { if this.visted[i] == 1 { continue } stack.Push(i) this.visted[i] = 1 for !stack.IsEmpty() { elemet := stack.Get() node := this.vertNode[elemet.(int)].edgeTableNode for node != nil && this.visted[node.index] == 1 { node = node.edgeTableNode } if node != nil && this.visted[node.index] == 0 { this.visted[node.index] = 1 stack.Push(node.index) } else { stack.Pop() } } } } // 查找所有换 func (this *Graph) FindCircle() { // 初始化0 表示没有被查找过 this.DfsCircle(0) } // 遍历查找环 func (this *Graph) DfsCircle(v int) { j := FindArray(arrayList, v) if j != -1 { this.hasCircle = true tempSlice := make([]int, len(arrayList)-j) copy(tempSlice, arrayList[j:]) this.allCircle = append(this.allCircle, tempSlice) return } arrayList = append(arrayList, v) edge := this.vertNode[v].edgeTableNode for edge != nil { this.DfsCircle(edge.index) edge = edge.edgeTableNode } // 如果走到这里说明当前这个节点不是环路点 // 移除掉 arrayList = arrayList[0 : len(arrayList)-1] } func GrapBfs() { var grap = NewGraph(5, 6, 1) grap.InitGrap() grap.BfsNearPath(0, 4) } func GrapTuoPuSort() { var grap = NewGraph(5, 6, 1) grap.InitGrap() grap.TuoPuSort() } func GrapTest() { var grap = NewGraph(4, 5, 1) grap.InitGrap() grap.FindCircle() grap.PrintAllCircle() } func GrapDfs() { var grap = NewGraph(4, 5, 1) grap.InitGrap() grap.DfsAll() } // 利用栈进行遍历图 func GrapStack() { var grap = NewGraph(4, 5, 1) grap.InitGrap() grap.DfsStatck() } func GrapDijstra() { var grap = NewGraph(4, 5, 1) grap.InitGrap() grap.Dijkstra(0, 3) } func main() { //GrapDfs() //GrapTuoPuSort() //GrapBfs() GrapDijstra() } // 队列 /* 队列的特性较为单一,基本操作即初始化、获取大小、添加元素、移除元素等。 最重要的特性就是满足先进先出 */ type linkNode struct { value MapParent next *linkNode } type linkedList struct { head *linkNode tail *linkNode count int } func NewLinkList() *linkedList { return &linkedList{head: nil, tail: nil, count: 0} } func (this *linkedList) IsEmpty() bool { return this.count == 0 } func (this *linkedList) Add(value MapParent) { node := new(linkNode) node.value = value this.count++ if this.tail == nil { this.head = node this.tail = node node.next = nil return } this.tail.next = node node.next = nil this.tail = node } func (this *linkedList) Delete() *linkNode { if this.head == nil { return nil } this.count-- if this.head == this.tail { node := this.head this.head = nil this.tail = nil return node } node := this.head this.head = this.head.next return node } type Queue struct { link *linkedList } func NewQueue() *Queue { return &Queue{link: NewLinkList()} } //加入队列 func (this *Queue) Put(value MapParent) { this.link.Add(value) } //pop出队列 func (this *Queue) Pop() *linkNode { return this.link.Delete() } //获得队列的长度 func (this *Queue) GetSize() int { return this.link.count } func (this *Queue) IsEmpty() bool { return this.GetSize() == 0 } // 初始化队列 var queue *Queue = NewQueue() type MapParent struct { parent int son int } // 查找最短路径 func (this *Graph) BfsNearPath(start, end int) []int { if start == end { return []int{start} } // 用来存储的是找到终点之前所有出队列的元素 mapParent := make([]MapParent, 0) // 根据初始节点 把他的邻接点放入队列 node := this.vertNode[start] for node.edgeTableNode != nil { index := node.edgeTableNode.index queue.Put(MapParent{parent: start, son: index}) node.edgeTableNode = node.edgeTableNode.edgeTableNode } var find = false for !queue.IsEmpty() { // 检测队列的元素 node := queue.Pop() // 已经被查看过得元素不需要再次查询 防止出现死循环 if this.visted[node.value.son] == 1 { continue } // 记录出队的元素 mapParent = append(mapParent, node.value) if node.value.son == end { find = true break } // 节点的邻接点入队列 grapNode := this.vertNode[node.value.son] for grapNode.edgeTableNode != nil { index := grapNode.edgeTableNode.index // 记录父节点与子节点的映射 queue.Put(MapParent{parent: node.value.son, son: index}) grapNode.edgeTableNode = grapNode.edgeTableNode.edgeTableNode } // 记录被查询过 this.visted = append(this.visted, node.value.son) } // 逆序查找路径 path := []int{} if find == true { path = append(path, end) son := end for i := len(mapParent) - 1; i >= 0; i-- { if son == mapParent[i].son { path = append(path, mapParent[i].parent) son = mapParent[i].parent } } } // 打印查找的路径 fmt.Println(path) return path } // 拓扑排序 func (this *Graph) TuoPuSort
.Itoa(i) fmt.Println(*vert) this.vertNode = append(this.vertNode, vert) } // 边初始化 var startVert int var endVert int var weight int var n int for i := 0; i < this.edgeNum; i++ { n, _ = fmt.Scanf("%d %d %d", &startVert, &endVert, &weight) fmt.Printf("%d %d %d\n", startVert, endVert, n) var edgeNode = this.vertNode[startVert].edgeTableNode if edgeNode == nil { tempNode := new(EdgeTableNode)
identifier_body
graph.go
迭代的方式遍历 --栈结构 // func (this *Graph) DfsStatck() { var stack = CreateStack() for i := 0; i < this.vertNum; i++ { if this.visted[i] == 1 { continue } stack.Push(i) this.visted[i] = 1 for !stack.IsEmpty() { elemet := stack.Get() node := this.vertNode[elemet.(int)].edgeTableNode for node != nil && this.visted[node.index] == 1 { node = node.edgeTableNode } if node != nil && this.visted[node.index] == 0 { this.visted[node.index] = 1 stack.Push(node.index) } else { stack.Pop() } } } } // 查找所有换 func (this *Graph) FindCircle() { // 初始化0 表示没有被查找过 this.DfsCircle(0) } // 遍历查找环 func (this *Graph) DfsCircle(v int) { j := FindArray(arrayList, v) if j != -1 { this.hasCircle = true tempSlice := make([]int, len(arrayList)-j) copy(tempSlice, arrayList[j:]) this.allCircle = append(this.allCircle, tempSlice) return } arrayList = append(arrayList, v) edge := this.vertNode[v].edgeTableNode for edge != nil { this.DfsCircle(edge.index) edge = edge.edgeTableNode } // 如果走到这里说明当前这个节点不是环路点 // 移除掉 arrayList = arrayList[0 : len(arrayList)-1] } func GrapBfs() { var grap = NewGraph(5, 6, 1) grap.InitGrap() grap.BfsNearPath(0, 4) } func GrapTuoPuSort() { var grap = NewGraph(5, 6, 1) grap.InitGrap() grap.TuoPuSort() } func GrapTest() { var grap = NewGraph(4, 5, 1) grap.InitGrap() grap.FindCircle() grap.PrintAllCircle() } func GrapDfs() { var grap = NewGraph(4, 5, 1) grap.InitGrap() grap.DfsAll() } // 利用栈进行遍历图 func GrapStack() { var grap = NewGraph(4, 5, 1) grap.InitGrap() grap.DfsStatck() } func GrapDijstra() { var grap = NewGraph(4, 5, 1) grap.InitGrap() grap.Dijkstra(0, 3) } func main() { //GrapDfs() //GrapTuoPuSort() //GrapBfs() GrapDijstra() } // 队列 /* 队列的特性较为单一,基本操作即初始化、获取大小、添加元素、移除元素等。 最重要的特性就是满足先进先出 */ type linkNode struct { value MapParent next *linkNode } type linkedList struct { head *linkNode tail *linkNode count int } func NewLinkList() *linkedList { return &linkedList{head: nil, tail: nil, count: 0} } func (this *linkedList) IsEmpty() bool { return this.count == 0 } func (this *linkedList) Add(value MapParent) { node := new(linkNode) node.value = value this.count++ if this.tail == nil { this.head = node this.tail = node node.next = nil return } this.tail.next = node node.next = nil this.tail = node } func (this *linkedList) Delete() *linkNode { if this.head == nil { return nil } this.count-- if this.head == this.tail { node := this.head this.head = nil this.tail = nil return node } node := this.head this.head = this.head.next return node } type Queue struct { link *linkedList } func NewQueue() *Queue { return &Queue{link: NewLinkList()} } //加入队列 func (this *Queue) Put(value MapParent) { this.link.Add(value) } //pop出队列 func (this *Queue) Pop() *linkNode { return this.link.Delete() } //获得队列的长度 func (this *Queue) GetSize() int { return this.link.count } func (this *Queue) IsEmpty() bool { return this.GetSize() == 0 } // 初始化队列 var queue *Queue = NewQueue() type MapParent struct { parent int son int } // 查找最短路径 func (this *Graph) BfsNearPath(start, end int) []int { if start == end { return []int{start} } // 用来存储的是找到终点之前所有出队列的元素 mapParent := make([]MapParent, 0) // 根据初始节点 把他的邻接点放入队列 node := this.vertNode[start] for node.edgeTableNode != nil { index := node.edgeTableNode.index queue.Put(MapParent{parent: start, son: index}) node.edgeTableNode = node.edgeTableNode.edgeTableNode } var find = false for !queue.IsEmpty() { // 检测队列的元素 node := queue.Pop() // 已经被查看过得元素不需要再次查询 防止出现死循环 if this.visted[node.value.son] == 1 { continue } // 记录出队的元素 mapParent = append(mapParent, node.value) if node.value.son == end { find = true break } // 节点的邻接点入队列 grapNode := this.vertNode[node.value.son] for grapNode.edgeTableNode != nil { index := grapNode.edgeTableNode.index // 记录父节点与子节点的映射 queue.Put(MapParent{parent: node.value.son, son: index}) grapNode.edgeTableNode = grapNode.edgeTableNode.edgeTableNode } // 记录被查询过 this.visted = append(this.visted, node.value.son) } // 逆序查找路径 path := []int{} if find == true { path = append(path, end) son := end for i := len(mapParent) - 1; i >= 0; i-- { if son == mapParent[i].son { path = append(path, mapParent[i].parent) son = mapParent[i].parent } } } // 打印查找的路径 fmt.Println(path) return path } // 拓扑排序 func (this *Graph) TuoPuSort() ([]int, bool) { mapQianXIang := make(map[int]int) sortList := []int{} for i := 0; i < this.vertNum; i++ { node := this.vertNode[i] if node == nil { continue } // 获取邻接链表 edgeNode := node.edgeTableNode // 记录每个子节点的被指向的次数 for edgeNode != nil { mapQianXIang[edgeNode.index]++ edgeNode = edgeNode.edgeTableNode } } // 拓扑排序就是把没有前项节点的先放入队列中 for i := 0; i < this.vertNum; i++ { if mapQianXIang[i] == 0 { queue.Put(MapParent{parent: -1, son: i}) } } for !queue.IsEmpty() { node := queue.Pop() sortList = append(sortList, node.value.son) // 获取邻接链表 edgeNode := this.vertNode[node.value.son].edgeTableNode // 递减指向次数 其实就是斩断的过程 for edgeNode != nil { mapQianXIang[edgeNode.index]-- if mapQianXIang[edgeNode.index] == 0 { // 忽略parent 我们son queue.Put(MapParent{parent: -1, son: edgeNode.index}) } edgeNode = edgeNode.edgeTableNode } } fmt.Println(sortList, " ", len(sortList) != this.vertNum) // bool 表示有没有环 return sortList, len(sortList) != this.vertNum } // 获取权重最低的节点 // 不能包含已经搜索过的 func (this *Graph) MinCostsNode(costs map[int]float64) int { minCostIndex := -1 inf := math.Inf(1) for key, value := range costs { // 已经被查找过 if this.visted[key] == 1 { continue } if value < inf { minCostIndex = key inf = value
random_line_split
graph.go
return -1 } func (this *Graph) PrintAllCircle() { fmt.Println("打印环") for _, sli := range this.allCircle { fmt.Println(sli) } } // 遍历整个图使用深度优先算法 func (this *Graph) Dfs(v int) { if this.visted[v] == 1 { return } this.visted[v] = 1 edge := this.vertNode[v].edgeTableNode for edge != nil { this.Dfs(edge.index) edge = edge.edgeTableNode } } // 遍历图 func (this *Graph) DfsAll() { for i := 0; i < this.vertNum; i++ { this.Dfs(i) } } // 通过迭代的方式遍历 --栈结构 // func (this *Graph) DfsStatck() { var stack = CreateStack() for i := 0; i < this.vertNum; i++ { if this.visted[i] == 1 { continue } stack.Push(i)
for !stack.IsEmpty() { elemet := stack.Get() node := this.vertNode[elemet.(int)].edgeTableNode for node != nil && this.visted[node.index] == 1 { node = node.edgeTableNode } if node != nil && this.visted[node.index] == 0 { this.visted[node.index] = 1 stack.Push(node.index) } else { stack.Pop() } } } } // 查找所有换 func (this *Graph) FindCircle() { // 初始化0 表示没有被查找过 this.DfsCircle(0) } // 遍历查找环 func (this *Graph) DfsCircle(v int) { j := FindArray(arrayList, v) if j != -1 { this.hasCircle = true tempSlice := make([]int, len(arrayList)-j) copy(tempSlice, arrayList[j:]) this.allCircle = append(this.allCircle, tempSlice) return } arrayList = append(arrayList, v) edge := this.vertNode[v].edgeTableNode for edge != nil { this.DfsCircle(edge.index) edge = edge.edgeTableNode } // 如果走到这里说明当前这个节点不是环路点 // 移除掉 arrayList = arrayList[0 : len(arrayList)-1] } func GrapBfs() { var grap = NewGraph(5, 6, 1) grap.InitGrap() grap.BfsNearPath(0, 4) } func GrapTuoPuSort() { var grap = NewGraph(5, 6, 1) grap.InitGrap() grap.TuoPuSort() } func GrapTest() { var grap = NewGraph(4, 5, 1) grap.InitGrap() grap.FindCircle() grap.PrintAllCircle() } func GrapDfs() { var grap = NewGraph(4, 5, 1) grap.InitGrap() grap.DfsAll() } // 利用栈进行遍历图 func GrapStack() { var grap = NewGraph(4, 5, 1) grap.InitGrap() grap.DfsStatck() } func GrapDijstra() { var grap = NewGraph(4, 5, 1) grap.InitGrap() grap.Dijkstra(0, 3) } func main() { //GrapDfs() //GrapTuoPuSort() //GrapBfs() GrapDijstra() } // 队列 /* 队列的特性较为单一,基本操作即初始化、获取大小、添加元素、移除元素等。 最重要的特性就是满足先进先出 */ type linkNode struct { value MapParent next *linkNode } type linkedList struct { head *linkNode tail *linkNode count int } func NewLinkList() *linkedList { return &linkedList{head: nil, tail: nil, count: 0} } func (this *linkedList) IsEmpty() bool { return this.count == 0 } func (this *linkedList) Add(value MapParent) { node := new(linkNode) node.value = value this.count++ if this.tail == nil { this.head = node this.tail = node node.next = nil return } this.tail.next = node node.next = nil this.tail = node } func (this *linkedList) Delete() *linkNode { if this.head == nil { return nil } this.count-- if this.head == this.tail { node := this.head this.head = nil this.tail = nil return node } node := this.head this.head = this.head.next return node } type Queue struct { link *linkedList } func NewQueue() *Queue { return &Queue{link: NewLinkList()} } //加入队列 func (this *Queue) Put(value MapParent) { this.link.Add(value) } //pop出队列 func (this *Queue) Pop() *linkNode { return this.link.Delete() } //获得队列的长度 func (this *Queue) GetSize() int { return this.link.count } func (this *Queue) IsEmpty() bool { return this.GetSize() == 0 } // 初始化队列 var queue *Queue = NewQueue() type MapParent struct { parent int son int } // 查找最短路径 func (this *Graph) BfsNearPath(start, end int) []int { if start == end { return []int{start} } // 用来存储的是找到终点之前所有出队列的元素 mapParent := make([]MapParent, 0) // 根据初始节点 把他的邻接点放入队列 node := this.vertNode[start] for node.edgeTableNode != nil { index := node.edgeTableNode.index queue.Put(MapParent{parent: start, son: index}) node.edgeTableNode = node.edgeTableNode.edgeTableNode } var find = false for !queue.IsEmpty() { // 检测队列的元素 node := queue.Pop() // 已经被查看过得元素不需要再次查询 防止出现死循环 if this.visted[node.value.son] == 1 { continue } // 记录出队的元素 mapParent = append(mapParent, node.value) if node.value.son == end { find = true break } // 节点的邻接点入队列 grapNode := this.vertNode[node.value.son] for grapNode.edgeTableNode != nil { index := grapNode.edgeTableNode.index // 记录父节点与子节点的映射 queue.Put(MapParent{parent: node.value.son, son: index}) grapNode.edgeTableNode = grapNode.edgeTableNode.edgeTableNode } // 记录被查询过 this.visted = append(this.visted, node.value.son) } // 逆序查找路径 path := []int{} if find == true { path = append(path, end) son := end for i := len(mapParent) - 1; i >= 0; i-- { if son == mapParent[i].son { path = append(path, mapParent[i].parent) son = mapParent[i].parent } } } // 打印查找的路径 fmt.Println(path) return path } // 拓扑排序 func (this *Graph) TuoPuSort() ([]int, bool) { mapQianXIang := make(map[int]int) sortList := []int{} for i := 0; i < this.vertNum; i++ { node := this.vertNode[i] if node == nil { continue } // 获取邻接链表 edgeNode := node.edgeTableNode // 记录每个子节点的被指向的次数 for edgeNode != nil { mapQianXIang[edgeNode.index]++ edgeNode = edgeNode.edgeTableNode } } // 拓扑排序就是把没有前项节点的先放入队列中 for i := 0; i < this.vertNum; i++ { if mapQianXIang[i] == 0 { queue.Put(MapParent{parent: -1, son: i}) } } for !queue.IsEmpty() { node := queue.Pop() sortList = append(sortList, node.value.son) // 获取邻接链表 edgeNode := this.vertNode[node.value.son].edgeTableNode // 递减指向次数 其实就是斩断的过程 for edgeNode != nil { mapQianXIang[edgeNode.index]-- if mapQianXIang[edgeNode.index] == 0 { // 忽略parent 我们son queue.Put(MapParent{parent: -1
this.visted[i] = 1
conditional_block
lesson2-rf_interpretation.py
def get_scores(m, config=None): res = { 'config': [config], 'rmse_train': [rmse(m.predict(X_train), y_train)], 'rmse_dev': [rmse(m.predict(X_valid), y_valid)], 'r2_train': [m.score(X_train, y_train)], 'r2_dev': [m.score(X_valid, y_valid)], 'r2_oob': [None], 'n_trees':[m.n_estimators], 'train_size': [len(y_train)], 'dev_size': [len(y_valid)], } if hasattr(m, 'oob_score_'): res['r2_oob'][0] = m.oob_score_ return pd.DataFrame(res) # - df_raw # # Confidence based on tree variance # For model interpretation, there's no need to use the full dataset on each tree - using a subset will be both faster, and also provide better interpretability (since an overfit model will not provide much variance across trees). set_rf_samples(50000) m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True) m.fit(X_train, y_train) results = get_scores(m, 'baseline-subsample-tuning') results # We saw how the model averages predictions across the trees to get an estimate - but how can we know the confidence of the estimate? One simple way is to use the standard deviation of predictions, instead of just the mean. This tells us the *relative* confidence of predictions - that is, for rows where the trees give very different results, you would want to be more cautious of using those results, compared to cases where they are more consistent. Using the same example as in the last lesson when we looked at bagging: # %time preds = np.stack([t.predict(X_valid) for t in m.estimators_]) np.mean(preds[:,0]), np.std(preds[:,0]) # When we use python to loop through trees like this, we're calculating each in series, which is slow! We can use parallel processing to speed things up: def get_preds(t): return t.predict(X_valid) # %time preds = np.stack(parallel_trees(m, get_preds)) np.mean(preds[:,0]), np.std(preds[:,0]) # We can see that different trees are giving different estimates this this auction. In order to see how prediction confidence varies, we can add this into our dataset. x = raw_valid.copy() x['pred_std'] = np.std(preds, axis=0) x['pred'] = np.mean(preds, axis=0) x.Enclosure.value_counts().plot.barh(); flds = ['Enclosure', 'SalePrice', 'pred', 'pred_std'] enc_summ = x[flds].groupby('Enclosure', as_index=False).mean() enc_summ enc_summ = enc_summ[~pd.isnull(enc_summ.SalePrice)] enc_summ.plot('Enclosure', 'SalePrice', 'barh', xlim=(0,11)); enc_summ.plot('Enclosure', 'pred', 'barh', xerr='pred_std', alpha=0.6, xlim=(0,11)); # *Question*: Why are the predictions nearly exactly right, but the error bars are quite wide? raw_valid.ProductSize.value_counts().plot.barh(); flds = ['ProductSize', 'SalePrice', 'pred', 'pred_std'] summ = x[flds].groupby(flds[0]).mean() summ (summ.pred_std/summ.pred).sort_values(ascending=False) # # Feature importance # It's not normally enough to just to know that a model can make accurate predictions - we also want to know *how* it's making predictions. The most important way to see this is with *feature importance*. fi = rf_feat_importance(m, df_trn); fi[:10] fi.plot('cols', 'imp', figsize=(10,6), legend=False); def plot_fi(fi): return fi.plot('cols', 'imp', 'barh', figsize=(12,7), legend=False) plot_fi(fi[:30]); to_keep = fi[fi.imp>0.005].cols; len(to_keep) df_keep = df_trn[to_keep].copy() X_train, X_valid = split_vals(df_keep, n_trn) m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True) m.fit(X_train, y_train) tmp = get_scores(m, 'fi') tmp results = pd.concat([tmp, results]) results[::-1] fi = rf_feat_importance(m, df_keep) plot_fi(fi); # ## One-hot encoding # proc_df's optional *max_n_cat* argument will turn some categorical variables into new columns. # # For example, the column **ProductSize** which has 6 categories: # # * Large # * Large / Medium # * Medium # * Compact # * Small # * Mini # # gets turned into 6 new columns: # # * ProductSize_Large # * ProductSize_Large / Medium # * ProductSize_Medium # * ProductSize_Compact # * ProductSize_Small # * ProductSize_Mini # # and the column **ProductSize** gets removed. # # It will only happen to columns whose number of categories is no bigger than the value of the *max_n_cat* argument. # # Now some of these new columns may prove to have more important features than in the earlier situation, where all categories were in one column. # + df_trn2, y_trn, nas = proc_df(df_raw, 'SalePrice', max_n_cat=7) X_train, X_valid = split_vals(df_trn2, n_trn) m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.6, n_jobs=-1, oob_score=True) m.fit(X_train, y_train) # - tmp = get_scores(m, 'one-hot') tmp results = pd.concat([tmp, results]) results[::-1] fi = rf_feat_importance(m, df_trn2) plot_fi(fi[:25]); # # Removing redundant features # One thing that makes this harder to interpret is that there seem to be some variables with very similar meanings. Let's try to remove redundent features. from scipy.cluster import hierarchy as hc corr = np.round(scipy.stats.spearmanr(df_keep).correlation, 4) corr_condensed = hc.distance.squareform(1-corr) z = hc.linkage(corr_condensed, method='average') fig = plt.figure(figsize=(16,10)) dendrogram = hc.dendrogram(z, labels=df_keep.columns, orientation='left', leaf_font_size=16) plt.show() sorted(list(df_keep.columns)) len(corr), len(corr[0]) # Let's try removing some of these related features to see if the model can be simplified without impacting the accuracy. def get_oob(df): m = RandomForestRegressor(n_estimators=30, min_samples_leaf=5, max_features=0.6, n_jobs=-1, oob_score=True) x, _ = split_vals(df, n_trn) m.fit(x, y_train) return m.oob_score_ # Here's our baseline. get_oob(df_keep) # Now we try removing each variable one at a time. for c in ('saleYear', 'saleElapsed', 'fiModelDesc', 'fiBaseModel', 'Grouser_Tracks', 'Coupler_System'): print(c, get_oob(df_keep.drop(c, axis=1))) # It looks like we can try one from each group for removal. Let's see what that does. to_drop = ['saleYear', 'fiBaseModel', 'Grouser_Tracks'] get_oob(df_keep.drop(to_drop, axis=1)) # Looking good! Let's use this dataframe from here. We'll save the list of columns so we can reuse it later. df_keep.drop(to_drop, axis=1, inplace=True) X_train, X_valid = split_vals(df_keep, n_trn) # + # np.save('tmp/keep_cols.npy', np.array(df_keep.columns)) # - # keep_cols = np.load('tmp/keep_cols.npy') # df_keep = df_trn[keep_cols] keep_cols = df_keep.columns sorted(list(keep_cols)) # And let's see how this model looks on the full dataset. reset_rf_samples() m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True) m.fit(X_train, y_train) tmp = get_scores(m, 'baseline-slow-dedup') tmp results = pd.concat([tmp, results]) results[::-1] # # Partial dependence from pdpbox import pdp from plotnine import * set_rf_samples(50000) # This next analysis will be a little easier if we use the 1-hot encoded categorical variables, so let's load them up again. df_trn2, y_trn, nas = proc_df(df_raw, 'SalePrice', max_n_cat=7) X_train, X_valid = split_vals(df_trn2, n_trn) m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.6,
return math.sqrt(((x-y)**2).mean())
identifier_body
lesson2-rf_interpretation.py
m.fit(X_train, y_train) results = get_scores(m, 'baseline-subsample-tuning') results # We saw how the model averages predictions across the trees to get an estimate - but how can we know the confidence of the estimate? One simple way is to use the standard deviation of predictions, instead of just the mean. This tells us the *relative* confidence of predictions - that is, for rows where the trees give very different results, you would want to be more cautious of using those results, compared to cases where they are more consistent. Using the same example as in the last lesson when we looked at bagging: # %time preds = np.stack([t.predict(X_valid) for t in m.estimators_]) np.mean(preds[:,0]), np.std(preds[:,0]) # When we use python to loop through trees like this, we're calculating each in series, which is slow! We can use parallel processing to speed things up: def get_preds(t): return t.predict(X_valid) # %time preds = np.stack(parallel_trees(m, get_preds)) np.mean(preds[:,0]), np.std(preds[:,0]) # We can see that different trees are giving different estimates this this auction. In order to see how prediction confidence varies, we can add this into our dataset. x = raw_valid.copy() x['pred_std'] = np.std(preds, axis=0) x['pred'] = np.mean(preds, axis=0) x.Enclosure.value_counts().plot.barh(); flds = ['Enclosure', 'SalePrice', 'pred', 'pred_std'] enc_summ = x[flds].groupby('Enclosure', as_index=False).mean() enc_summ enc_summ = enc_summ[~pd.isnull(enc_summ.SalePrice)] enc_summ.plot('Enclosure', 'SalePrice', 'barh', xlim=(0,11)); enc_summ.plot('Enclosure', 'pred', 'barh', xerr='pred_std', alpha=0.6, xlim=(0,11)); # *Question*: Why are the predictions nearly exactly right, but the error bars are quite wide? raw_valid.ProductSize.value_counts().plot.barh(); flds = ['ProductSize', 'SalePrice', 'pred', 'pred_std'] summ = x[flds].groupby(flds[0]).mean() summ (summ.pred_std/summ.pred).sort_values(ascending=False) # # Feature importance # It's not normally enough to just to know that a model can make accurate predictions - we also want to know *how* it's making predictions. The most important way to see this is with *feature importance*. fi = rf_feat_importance(m, df_trn); fi[:10] fi.plot('cols', 'imp', figsize=(10,6), legend=False); def plot_fi(fi): return fi.plot('cols', 'imp', 'barh', figsize=(12,7), legend=False) plot_fi(fi[:30]); to_keep = fi[fi.imp>0.005].cols; len(to_keep) df_keep = df_trn[to_keep].copy() X_train, X_valid = split_vals(df_keep, n_trn) m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True) m.fit(X_train, y_train) tmp = get_scores(m, 'fi') tmp results = pd.concat([tmp, results]) results[::-1] fi = rf_feat_importance(m, df_keep) plot_fi(fi); # ## One-hot encoding # proc_df's optional *max_n_cat* argument will turn some categorical variables into new columns. # # For example, the column **ProductSize** which has 6 categories: # # * Large # * Large / Medium # * Medium # * Compact # * Small # * Mini # # gets turned into 6 new columns: # # * ProductSize_Large # * ProductSize_Large / Medium # * ProductSize_Medium # * ProductSize_Compact # * ProductSize_Small # * ProductSize_Mini # # and the column **ProductSize** gets removed. # # It will only happen to columns whose number of categories is no bigger than the value of the *max_n_cat* argument. # # Now some of these new columns may prove to have more important features than in the earlier situation, where all categories were in one column. # + df_trn2, y_trn, nas = proc_df(df_raw, 'SalePrice', max_n_cat=7) X_train, X_valid = split_vals(df_trn2, n_trn) m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.6, n_jobs=-1, oob_score=True) m.fit(X_train, y_train) # - tmp = get_scores(m, 'one-hot') tmp results = pd.concat([tmp, results]) results[::-1] fi = rf_feat_importance(m, df_trn2) plot_fi(fi[:25]); # # Removing redundant features # One thing that makes this harder to interpret is that there seem to be some variables with very similar meanings. Let's try to remove redundent features. from scipy.cluster import hierarchy as hc corr = np.round(scipy.stats.spearmanr(df_keep).correlation, 4) corr_condensed = hc.distance.squareform(1-corr) z = hc.linkage(corr_condensed, method='average') fig = plt.figure(figsize=(16,10)) dendrogram = hc.dendrogram(z, labels=df_keep.columns, orientation='left', leaf_font_size=16) plt.show() sorted(list(df_keep.columns)) len(corr), len(corr[0]) # Let's try removing some of these related features to see if the model can be simplified without impacting the accuracy. def get_oob(df): m = RandomForestRegressor(n_estimators=30, min_samples_leaf=5, max_features=0.6, n_jobs=-1, oob_score=True) x, _ = split_vals(df, n_trn) m.fit(x, y_train) return m.oob_score_ # Here's our baseline. get_oob(df_keep) # Now we try removing each variable one at a time. for c in ('saleYear', 'saleElapsed', 'fiModelDesc', 'fiBaseModel', 'Grouser_Tracks', 'Coupler_System'): print(c, get_oob(df_keep.drop(c, axis=1))) # It looks like we can try one from each group for removal. Let's see what that does. to_drop = ['saleYear', 'fiBaseModel', 'Grouser_Tracks'] get_oob(df_keep.drop(to_drop, axis=1)) # Looking good! Let's use this dataframe from here. We'll save the list of columns so we can reuse it later. df_keep.drop(to_drop, axis=1, inplace=True) X_train, X_valid = split_vals(df_keep, n_trn) # + # np.save('tmp/keep_cols.npy', np.array(df_keep.columns)) # - # keep_cols = np.load('tmp/keep_cols.npy') # df_keep = df_trn[keep_cols] keep_cols = df_keep.columns sorted(list(keep_cols)) # And let's see how this model looks on the full dataset. reset_rf_samples() m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True) m.fit(X_train, y_train) tmp = get_scores(m, 'baseline-slow-dedup') tmp results = pd.concat([tmp, results]) results[::-1] # # Partial dependence from pdpbox import pdp from plotnine import * set_rf_samples(50000) # This next analysis will be a little easier if we use the 1-hot encoded categorical variables, so let's load them up again. df_trn2, y_trn, nas = proc_df(df_raw, 'SalePrice', max_n_cat=7) X_train, X_valid = split_vals(df_trn2, n_trn) m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.6, n_jobs=-1) m.fit(X_train, y_train); plot_fi(rf_feat_importance(m, df_trn2)[:10]); df_raw.plot('YearMade', 'saleElapsed', 'scatter', alpha=0.01, figsize=(10,8)); x_all = get_sample(df_raw[df_raw.YearMade>1930], 500) ggplot(x_all, aes('YearMade', 'SalePrice'))+stat_smooth(se=True, method='loess')
def plot_pdp_old(feat, clusters=None, feat_name=None): feat_name = feat_name or feat p = pdp.pdp_isolate(m, x, feat) return pdp.pdp_plot(p, feat_name, plot_lines=True, cluster=clusters is not None, n_cluster_centers=clusters) def plot_pdp(feat, clusters = None, feat_name = None): feat_name = feat_name or feat p = pdp.pdp_isolate(m, x, feature = feat, model_features = x.columns) return pdp.pdp_plot(p, feat_name, plot_lines = True, cluster = clusters is not None
x = get_sample(X_train[X_train.YearMade>1930], 500)
random_line_split
lesson2-rf_interpretation.py
(m, config=None): res = { 'config': [config], 'rmse_train': [rmse(m.predict(X_train), y_train)], 'rmse_dev': [rmse(m.predict(X_valid), y_valid)], 'r2_train': [m.score(X_train, y_train)], 'r2_dev': [m.score(X_valid, y_valid)], 'r2_oob': [None], 'n_trees':[m.n_estimators], 'train_size': [len(y_train)], 'dev_size': [len(y_valid)], } if hasattr(m, 'oob_score_'): res['r2_oob'][0] = m.oob_score_ return pd.DataFrame(res) # - df_raw # # Confidence based on tree variance # For model interpretation, there's no need to use the full dataset on each tree - using a subset will be both faster, and also provide better interpretability (since an overfit model will not provide much variance across trees). set_rf_samples(50000) m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True) m.fit(X_train, y_train) results = get_scores(m, 'baseline-subsample-tuning') results # We saw how the model averages predictions across the trees to get an estimate - but how can we know the confidence of the estimate? One simple way is to use the standard deviation of predictions, instead of just the mean. This tells us the *relative* confidence of predictions - that is, for rows where the trees give very different results, you would want to be more cautious of using those results, compared to cases where they are more consistent. Using the same example as in the last lesson when we looked at bagging: # %time preds = np.stack([t.predict(X_valid) for t in m.estimators_]) np.mean(preds[:,0]), np.std(preds[:,0]) # When we use python to loop through trees like this, we're calculating each in series, which is slow! We can use parallel processing to speed things up: def get_preds(t): return t.predict(X_valid) # %time preds = np.stack(parallel_trees(m, get_preds)) np.mean(preds[:,0]), np.std(preds[:,0]) # We can see that different trees are giving different estimates this this auction. In order to see how prediction confidence varies, we can add this into our dataset. x = raw_valid.copy() x['pred_std'] = np.std(preds, axis=0) x['pred'] = np.mean(preds, axis=0) x.Enclosure.value_counts().plot.barh(); flds = ['Enclosure', 'SalePrice', 'pred', 'pred_std'] enc_summ = x[flds].groupby('Enclosure', as_index=False).mean() enc_summ enc_summ = enc_summ[~pd.isnull(enc_summ.SalePrice)] enc_summ.plot('Enclosure', 'SalePrice', 'barh', xlim=(0,11)); enc_summ.plot('Enclosure', 'pred', 'barh', xerr='pred_std', alpha=0.6, xlim=(0,11)); # *Question*: Why are the predictions nearly exactly right, but the error bars are quite wide? raw_valid.ProductSize.value_counts().plot.barh(); flds = ['ProductSize', 'SalePrice', 'pred', 'pred_std'] summ = x[flds].groupby(flds[0]).mean() summ (summ.pred_std/summ.pred).sort_values(ascending=False) # # Feature importance # It's not normally enough to just to know that a model can make accurate predictions - we also want to know *how* it's making predictions. The most important way to see this is with *feature importance*. fi = rf_feat_importance(m, df_trn); fi[:10] fi.plot('cols', 'imp', figsize=(10,6), legend=False); def plot_fi(fi): return fi.plot('cols', 'imp', 'barh', figsize=(12,7), legend=False) plot_fi(fi[:30]); to_keep = fi[fi.imp>0.005].cols; len(to_keep) df_keep = df_trn[to_keep].copy() X_train, X_valid = split_vals(df_keep, n_trn) m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True) m.fit(X_train, y_train) tmp = get_scores(m, 'fi') tmp results = pd.concat([tmp, results]) results[::-1] fi = rf_feat_importance(m, df_keep) plot_fi(fi); # ## One-hot encoding # proc_df's optional *max_n_cat* argument will turn some categorical variables into new columns. # # For example, the column **ProductSize** which has 6 categories: # # * Large # * Large / Medium # * Medium # * Compact # * Small # * Mini # # gets turned into 6 new columns: # # * ProductSize_Large # * ProductSize_Large / Medium # * ProductSize_Medium # * ProductSize_Compact # * ProductSize_Small # * ProductSize_Mini # # and the column **ProductSize** gets removed. # # It will only happen to columns whose number of categories is no bigger than the value of the *max_n_cat* argument. # # Now some of these new columns may prove to have more important features than in the earlier situation, where all categories were in one column. # + df_trn2, y_trn, nas = proc_df(df_raw, 'SalePrice', max_n_cat=7) X_train, X_valid = split_vals(df_trn2, n_trn) m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.6, n_jobs=-1, oob_score=True) m.fit(X_train, y_train) # - tmp = get_scores(m, 'one-hot') tmp results = pd.concat([tmp, results]) results[::-1] fi = rf_feat_importance(m, df_trn2) plot_fi(fi[:25]); # # Removing redundant features # One thing that makes this harder to interpret is that there seem to be some variables with very similar meanings. Let's try to remove redundent features. from scipy.cluster import hierarchy as hc corr = np.round(scipy.stats.spearmanr(df_keep).correlation, 4) corr_condensed = hc.distance.squareform(1-corr) z = hc.linkage(corr_condensed, method='average') fig = plt.figure(figsize=(16,10)) dendrogram = hc.dendrogram(z, labels=df_keep.columns, orientation='left', leaf_font_size=16) plt.show() sorted(list(df_keep.columns)) len(corr), len(corr[0]) # Let's try removing some of these related features to see if the model can be simplified without impacting the accuracy. def get_oob(df): m = RandomForestRegressor(n_estimators=30, min_samples_leaf=5, max_features=0.6, n_jobs=-1, oob_score=True) x, _ = split_vals(df, n_trn) m.fit(x, y_train) return m.oob_score_ # Here's our baseline. get_oob(df_keep) # Now we try removing each variable one at a time. for c in ('saleYear', 'saleElapsed', 'fiModelDesc', 'fiBaseModel', 'Grouser_Tracks', 'Coupler_System'): print(c, get_oob(df_keep.drop(c, axis=1))) # It looks like we can try one from each group for removal. Let's see what that does. to_drop = ['saleYear', 'fiBaseModel', 'Grouser_Tracks'] get_oob(df_keep.drop(to_drop, axis=1)) # Looking good! Let's use this dataframe from here. We'll save the list of columns so we can reuse it later. df_keep.drop(to_drop, axis=1, inplace=True) X_train, X_valid = split_vals(df_keep, n_trn) # + # np.save('tmp/keep_cols.npy', np.array(df_keep.columns)) # - # keep_cols = np.load('tmp/keep_cols.npy') # df_keep = df_trn[keep_cols] keep_cols = df_keep.columns sorted(list(keep_cols)) # And let's see how this model looks on the full dataset. reset_rf_samples() m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True) m.fit(X_train, y_train) tmp = get_scores(m, 'baseline-slow-dedup') tmp results = pd.concat([tmp, results]) results[::-1] # # Partial dependence from pdpbox import pdp from plotnine import * set_rf_samples(50000) # This next analysis will be a little easier if we use the 1-hot encoded categorical variables, so let's load them up again. df_trn2, y_trn, nas = proc_df(df_raw, 'SalePrice', max_n_cat=7) X_train, X_valid = split_vals(df_trn2, n_trn) m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.6, n_jobs=-1) m.fit(X_train, y_train);
get_scores
identifier_name
lesson2-rf_interpretation.py
return pd.DataFrame(res) # - df_raw # # Confidence based on tree variance # For model interpretation, there's no need to use the full dataset on each tree - using a subset will be both faster, and also provide better interpretability (since an overfit model will not provide much variance across trees). set_rf_samples(50000) m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True) m.fit(X_train, y_train) results = get_scores(m, 'baseline-subsample-tuning') results # We saw how the model averages predictions across the trees to get an estimate - but how can we know the confidence of the estimate? One simple way is to use the standard deviation of predictions, instead of just the mean. This tells us the *relative* confidence of predictions - that is, for rows where the trees give very different results, you would want to be more cautious of using those results, compared to cases where they are more consistent. Using the same example as in the last lesson when we looked at bagging: # %time preds = np.stack([t.predict(X_valid) for t in m.estimators_]) np.mean(preds[:,0]), np.std(preds[:,0]) # When we use python to loop through trees like this, we're calculating each in series, which is slow! We can use parallel processing to speed things up: def get_preds(t): return t.predict(X_valid) # %time preds = np.stack(parallel_trees(m, get_preds)) np.mean(preds[:,0]), np.std(preds[:,0]) # We can see that different trees are giving different estimates this this auction. In order to see how prediction confidence varies, we can add this into our dataset. x = raw_valid.copy() x['pred_std'] = np.std(preds, axis=0) x['pred'] = np.mean(preds, axis=0) x.Enclosure.value_counts().plot.barh(); flds = ['Enclosure', 'SalePrice', 'pred', 'pred_std'] enc_summ = x[flds].groupby('Enclosure', as_index=False).mean() enc_summ enc_summ = enc_summ[~pd.isnull(enc_summ.SalePrice)] enc_summ.plot('Enclosure', 'SalePrice', 'barh', xlim=(0,11)); enc_summ.plot('Enclosure', 'pred', 'barh', xerr='pred_std', alpha=0.6, xlim=(0,11)); # *Question*: Why are the predictions nearly exactly right, but the error bars are quite wide? raw_valid.ProductSize.value_counts().plot.barh(); flds = ['ProductSize', 'SalePrice', 'pred', 'pred_std'] summ = x[flds].groupby(flds[0]).mean() summ (summ.pred_std/summ.pred).sort_values(ascending=False) # # Feature importance # It's not normally enough to just to know that a model can make accurate predictions - we also want to know *how* it's making predictions. The most important way to see this is with *feature importance*. fi = rf_feat_importance(m, df_trn); fi[:10] fi.plot('cols', 'imp', figsize=(10,6), legend=False); def plot_fi(fi): return fi.plot('cols', 'imp', 'barh', figsize=(12,7), legend=False) plot_fi(fi[:30]); to_keep = fi[fi.imp>0.005].cols; len(to_keep) df_keep = df_trn[to_keep].copy() X_train, X_valid = split_vals(df_keep, n_trn) m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True) m.fit(X_train, y_train) tmp = get_scores(m, 'fi') tmp results = pd.concat([tmp, results]) results[::-1] fi = rf_feat_importance(m, df_keep) plot_fi(fi); # ## One-hot encoding # proc_df's optional *max_n_cat* argument will turn some categorical variables into new columns. # # For example, the column **ProductSize** which has 6 categories: # # * Large # * Large / Medium # * Medium # * Compact # * Small # * Mini # # gets turned into 6 new columns: # # * ProductSize_Large # * ProductSize_Large / Medium # * ProductSize_Medium # * ProductSize_Compact # * ProductSize_Small # * ProductSize_Mini # # and the column **ProductSize** gets removed. # # It will only happen to columns whose number of categories is no bigger than the value of the *max_n_cat* argument. # # Now some of these new columns may prove to have more important features than in the earlier situation, where all categories were in one column. # + df_trn2, y_trn, nas = proc_df(df_raw, 'SalePrice', max_n_cat=7) X_train, X_valid = split_vals(df_trn2, n_trn) m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.6, n_jobs=-1, oob_score=True) m.fit(X_train, y_train) # - tmp = get_scores(m, 'one-hot') tmp results = pd.concat([tmp, results]) results[::-1] fi = rf_feat_importance(m, df_trn2) plot_fi(fi[:25]); # # Removing redundant features # One thing that makes this harder to interpret is that there seem to be some variables with very similar meanings. Let's try to remove redundent features. from scipy.cluster import hierarchy as hc corr = np.round(scipy.stats.spearmanr(df_keep).correlation, 4) corr_condensed = hc.distance.squareform(1-corr) z = hc.linkage(corr_condensed, method='average') fig = plt.figure(figsize=(16,10)) dendrogram = hc.dendrogram(z, labels=df_keep.columns, orientation='left', leaf_font_size=16) plt.show() sorted(list(df_keep.columns)) len(corr), len(corr[0]) # Let's try removing some of these related features to see if the model can be simplified without impacting the accuracy. def get_oob(df): m = RandomForestRegressor(n_estimators=30, min_samples_leaf=5, max_features=0.6, n_jobs=-1, oob_score=True) x, _ = split_vals(df, n_trn) m.fit(x, y_train) return m.oob_score_ # Here's our baseline. get_oob(df_keep) # Now we try removing each variable one at a time. for c in ('saleYear', 'saleElapsed', 'fiModelDesc', 'fiBaseModel', 'Grouser_Tracks', 'Coupler_System'): print(c, get_oob(df_keep.drop(c, axis=1))) # It looks like we can try one from each group for removal. Let's see what that does. to_drop = ['saleYear', 'fiBaseModel', 'Grouser_Tracks'] get_oob(df_keep.drop(to_drop, axis=1)) # Looking good! Let's use this dataframe from here. We'll save the list of columns so we can reuse it later. df_keep.drop(to_drop, axis=1, inplace=True) X_train, X_valid = split_vals(df_keep, n_trn) # + # np.save('tmp/keep_cols.npy', np.array(df_keep.columns)) # - # keep_cols = np.load('tmp/keep_cols.npy') # df_keep = df_trn[keep_cols] keep_cols = df_keep.columns sorted(list(keep_cols)) # And let's see how this model looks on the full dataset. reset_rf_samples() m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True) m.fit(X_train, y_train) tmp = get_scores(m, 'baseline-slow-dedup') tmp results = pd.concat([tmp, results]) results[::-1] # # Partial dependence from pdpbox import pdp from plotnine import * set_rf_samples(50000) # This next analysis will be a little easier if we use the 1-hot encoded categorical variables, so let's load them up again. df_trn2, y_trn, nas = proc_df(df_raw, 'SalePrice', max_n_cat=7) X_train, X_valid = split_vals(df_trn2, n_trn) m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.6, n_jobs=-1) m.fit(X_train, y_train); plot_fi(rf_feat_importance(m, df_trn2)[:10]); df_raw.plot('YearMade', 'saleElapsed', 'scatter', alpha=0.01, figsize=(10,8)); x_all = get_sample(df_raw[df_raw.YearMade>1930], 500) ggplot(x_all, aes('YearMade', 'SalePrice'))+stat_smooth(se=True, method='loess') x = get_sample(X_train[X_train.YearMade>1930], 500) def plot_pdp_old(feat, clusters=None, feat_name=None): feat_name =
res['r2_oob'][0] = m.oob_score_
conditional_block
app.js
.title) { map.panTo(self.mapMarkers()[key].marker.position); map.setZoom(14); infowindow.setContent(self.mapMarkers()[key].content); infowindow.open(map, self.mapMarkers()[key].marker); map.panBy(0, -150); self.mobileShow(false); self.searchStatus(''); } } }; // AutoComplete input of city and state this.doAutoComplete = function() { var inputLocation = new google.maps.places.Autocomplete( (document.getElementById('autocomplete')), { types: ['geocode'] }); google.maps.event.addListener(inputLocation, 'place_changed', function() { var place = inputLocation.getPlace(); inputLan= place.geometry.location.lat(); inputLon = place.geometry.location.lng(); }); /* if you use the event binding to capture the keypress event of an input tag, the browser will only call your handler function and will not add the value of the key to the input element’s value. if you do want to let the default action proceed, just return true from your event handler function.*/ return true; } // Handle the input given when user searches for events in a location this.processLocationSearch = function() { //Need to use a jQuery selector instead of KO binding because this field is affected by the autocomplete plugin. The value inputted does not //seem to register via KO. self.searchStatus(''); self.searchStatus('Searching...'); var radius = 30; //var category= 25; //https://api.meetup.com/find/groups?key=6f4c634b253677752b591d6a67327&lat=38.5815719&lon=-121.49439960000001&radius=30&order=members var combine = "lat=" + inputLan + "&lon=" + inputLon + "&radius=" + radius; //clear current events and markers clearMarkers(); self.meetupEvents([]); self.filteredList([]); self.eventStatus('Loading...'); self.loadImg('<img src="img/ajax-loader.gif">'); //perform new meetup search and center map to new location getMeetups(combine); }; //Compare search keyword against event tag of all events. Return a filtered list and map markers of request. this.filterResults = function() { var searchWord = self.filterKeyword().toLowerCase(); var array = self.meetupEvents(); if(!searchWord) { return; } else { //first clear out all entries in the filteredList array self.filteredList([]); //Loop through the meetupEvents array and see if the search keyword matches //with event tag in the list, if so push that object to the filteredList //array and place the marker on the map. for(var i=0; i < array.length; i++) { if(array[i].eventTag.toLowerCase().indexOf(searchWord) != -1) { self.mapMarkers()[i].marker.setMap(map); self.filteredList.push(array[i]); } else self.mapMarkers()[i].marker.setMap(null); } self.eventStatus(self.numEvents() + ' events found for ' + self.filterKeyword()); } }; //Clear keyword from filter and show all active events in current location again. this.clearFilter = function() { self.filteredList(self.meetupEvents()); self.eventStatus(self.numEvents() + ' events found near ' + self.searchLocation()); self.filterKeyword(''); for(var i = 0; i < self.mapMarkers().length; i++) { self.mapMarkers()[i].marker.setMap(map); } }; //toggles the list view this.listToggle = function() { if(self.toggleSymbol() === 'hide') { self.toggleSymbol('show'); } else { self.toggleSymbol('hide'); } }; //Error handling if Google Maps fails to load this.mapRequestTimeout = setTimeout(function() { $('#map-canvas').html('We had trouble loading Google Maps. Please refresh your browser and try again.'); }, 8000); // Initialize Google map, perform initial events search on a city. function mapInitialize() { city = new google.maps.LatLng(37.70, -122.10); map = new google.maps.Map(document.getElementById('map-canvas'), { center: city, zoom: 10, zoomControlOptions: { position: google.maps.ControlPosition.LEFT_CENTER, style: google.maps.ZoomControlStyle.SMALL }, streetViewControlOptions: { position: google.maps.ControlPosition.LEFT_BOTTOM }, mapTypeControl: false, panControl: false }); clearTimeout(self.mapRequestTimeout); google.maps.event.addDomListener(window, "resize", function() { var center = map.getCenter(); google.maps.event.trigger(map, "resize"); map.setCenter(center); }); infowindow = new google.maps.InfoWindow({maxWidth: 300}); } // Use API to get events data and store the info as objects in an array function getMeetups(location) { var meetupUrl = "https://api.meetup.com/find/groups?key=6f4c634b253677752b591d6a67327&"; var order = "&order=members"; var query = meetupUrl + location + order; $.ajax({ url: query, dataType: 'jsonp', success: function(data) { console.log(data); var len = data.data.length; map.panTo({lat: data.data[0].lat, lng: data.data[0].lon}); for(var i = 0; i < len; i++) { var info = data.data[i]; //console.log(info); //this line filters out events that don't have a physical location to redeem if (info === undefined || info.name == undefined || info.lat == undefined || info.lon == undefined || info.link == undefined || info.group_photo == undefined|| info.city == undefined || info.state == undefined || info.members == undefined|| info.category == undefined || info.who == undefined) continue; var muName = info.name; var muLat = info.lat; var muLon = info.lon; var muLink = info.link; var muImg = info.group_photo.photo_link; var mucity = info.city; var mustate = info.state; var mumembers = info.members; var mutag = info.category.shortname; var mugroup = info.who; self.meetupEvents.push({ eventName: muName, eventLat: muLat, eventLon: muLon, eventLink: muLink, eventImg: muImg, eventAddress: mucity + ", " + mustate, eventTag: mutag, eventGroup: mugroup }); } self.filteredList(self.meetupEvents()); mapMarkers(self.meetupEvents()); self.searchStatus(''); self.loadImg(''); }, error: function() { self.eventStatus('Oops, something was wrong, please refresh and try again.'); self.loadImg(''); } }); } // Create and place markers and info windows on the map based on data from API function mapMarkers(array) { $.each(array, function(index, value) { var latitude = value.eventLat, longitude = value.eventLon, geoLoc = new google.maps.LatLng(latitude, longitude), thisEvent = value.eventName; var infoContentString = '<div id="infowindow">' + '<img src="' + value.eventImg + '">' + '<h4 class = "infoName">' + value.eventName + '</h4>' + '<div class = "clear"></div>' + '<p class = "infoAddress">' + value.eventAddress + '</p>' + '<p>Group: ' + value.eventGroup + '</p>' + '<p><a href="' + value.eventLink + '" target="_blank">Click to view event details</a></p>' + '</div>'; // Custormize marker var iconBase = 'img/meetup.png'; var marker = new google.maps.Marker({ position: geoLoc, title: thisEvent, map: map, icon: iconBase }); self.mapMarkers.push({marker: marker, content: infoContentString}); self.eventStatus(self.numEvents() + ' events found near ' + self.searchLocation()); //generate infowindows for each event google.maps.event.addListener(marker, 'click', function() { self.searchStatus(''); infowindow.setContent(infoContentString); map.setZoom(12); map.setCenter(marker.position); infowindow.open(map, marker); map.panBy(0, -150); }); }); } // Clear markers from map and array function clearMarkers() {
$.each(self.mapMarkers(), function(key, value) { value.marker.setMap(null); }); self.mapMarkers([]); }
identifier_body
app.js
map.panBy(0, -150); self.mobileShow(false); self.searchStatus(''); } } }; // AutoComplete input of city and state this.doAutoComplete = function() { var inputLocation = new google.maps.places.Autocomplete( (document.getElementById('autocomplete')), { types: ['geocode'] }); google.maps.event.addListener(inputLocation, 'place_changed', function() { var place = inputLocation.getPlace(); inputLan= place.geometry.location.lat(); inputLon = place.geometry.location.lng(); }); /* if you use the event binding to capture the keypress event of an input tag, the browser will only call your handler function and will not add the value of the key to the input element’s value. if you do want to let the default action proceed, just return true from your event handler function.*/ return true; } // Handle the input given when user searches for events in a location this.processLocationSearch = function() { //Need to use a jQuery selector instead of KO binding because this field is affected by the autocomplete plugin. The value inputted does not //seem to register via KO. self.searchStatus(''); self.searchStatus('Searching...'); var radius = 30; //var category= 25; //https://api.meetup.com/find/groups?key=6f4c634b253677752b591d6a67327&lat=38.5815719&lon=-121.49439960000001&radius=30&order=members var combine = "lat=" + inputLan + "&lon=" + inputLon + "&radius=" + radius; //clear current events and markers clearMarkers(); self.meetupEvents([]); self.filteredList([]); self.eventStatus('Loading...'); self.loadImg('<img src="img/ajax-loader.gif">'); //perform new meetup search and center map to new location getMeetups(combine); }; //Compare search keyword against event tag of all events. Return a filtered list and map markers of request. this.filterResults = function() { var searchWord = self.filterKeyword().toLowerCase(); var array = self.meetupEvents(); if(!searchWord) { return; } else { //first clear out all entries in the filteredList array self.filteredList([]); //Loop through the meetupEvents array and see if the search keyword matches //with event tag in the list, if so push that object to the filteredList //array and place the marker on the map. for(var i=0; i < array.length; i++) { if(array[i].eventTag.toLowerCase().indexOf(searchWord) != -1) { self.mapMarkers()[i].marker.setMap(map); self.filteredList.push(array[i]); } else self.mapMarkers()[i].marker.setMap(null); } self.eventStatus(self.numEvents() + ' events found for ' + self.filterKeyword()); } }; //Clear keyword from filter and show all active events in current location again. this.clearFilter = function() { self.filteredList(self.meetupEvents()); self.eventStatus(self.numEvents() + ' events found near ' + self.searchLocation()); self.filterKeyword(''); for(var i = 0; i < self.mapMarkers().length; i++) { self.mapMarkers()[i].marker.setMap(map); } }; //toggles the list view this.listToggle = function() { if(self.toggleSymbol() === 'hide') { self.toggleSymbol('show'); } else { self.toggleSymbol('hide'); } }; //Error handling if Google Maps fails to load this.mapRequestTimeout = setTimeout(function() { $('#map-canvas').html('We had trouble loading Google Maps. Please refresh your browser and try again.'); }, 8000); // Initialize Google map, perform initial events search on a city. function mapInitialize() { city = new google.maps.LatLng(37.70, -122.10); map = new google.maps.Map(document.getElementById('map-canvas'), { center: city, zoom: 10, zoomControlOptions: { position: google.maps.ControlPosition.LEFT_CENTER, style: google.maps.ZoomControlStyle.SMALL }, streetViewControlOptions: { position: google.maps.ControlPosition.LEFT_BOTTOM }, mapTypeControl: false, panControl: false }); clearTimeout(self.mapRequestTimeout); google.maps.event.addDomListener(window, "resize", function() { var center = map.getCenter(); google.maps.event.trigger(map, "resize"); map.setCenter(center); }); infowindow = new google.maps.InfoWindow({maxWidth: 300}); } // Use API to get events data and store the info as objects in an array function getMeetups(location) { var meetupUrl = "https://api.meetup.com/find/groups?key=6f4c634b253677752b591d6a67327&"; var order = "&order=members"; var query = meetupUrl + location + order; $.ajax({ url: query, dataType: 'jsonp', success: function(data) { console.log(data); var len = data.data.length; map.panTo({lat: data.data[0].lat, lng: data.data[0].lon}); for(var i = 0; i < len; i++) { var info = data.data[i]; //console.log(info); //this line filters out events that don't have a physical location to redeem if (info === undefined || info.name == undefined || info.lat == undefined || info.lon == undefined || info.link == undefined || info.group_photo == undefined|| info.city == undefined || info.state == undefined || info.members == undefined|| info.category == undefined || info.who == undefined) continue; var muName = info.name; var muLat = info.lat; var muLon = info.lon; var muLink = info.link; var muImg = info.group_photo.photo_link; var mucity = info.city; var mustate = info.state; var mumembers = info.members; var mutag = info.category.shortname; var mugroup = info.who; self.meetupEvents.push({ eventName: muName, eventLat: muLat, eventLon: muLon, eventLink: muLink, eventImg: muImg, eventAddress: mucity + ", " + mustate, eventTag: mutag, eventGroup: mugroup }); } self.filteredList(self.meetupEvents()); mapMarkers(self.meetupEvents()); self.searchStatus(''); self.loadImg(''); }, error: function() { self.eventStatus('Oops, something was wrong, please refresh and try again.'); self.loadImg(''); } }); } // Create and place markers and info windows on the map based on data from API function mapMarkers(array) { $.each(array, function(index, value) { var latitude = value.eventLat, longitude = value.eventLon, geoLoc = new google.maps.LatLng(latitude, longitude), thisEvent = value.eventName; var infoContentString = '<div id="infowindow">' + '<img src="' + value.eventImg + '">' + '<h4 class = "infoName">' + value.eventName + '</h4>' + '<div class = "clear"></div>' + '<p class = "infoAddress">' + value.eventAddress + '</p>' + '<p>Group: ' + value.eventGroup + '</p>' + '<p><a href="' + value.eventLink + '" target="_blank">Click to view event details</a></p>' + '</div>'; // Custormize marker var iconBase = 'img/meetup.png'; var marker = new google.maps.Marker({ position: geoLoc, title: thisEvent, map: map, icon: iconBase }); self.mapMarkers.push({marker: marker, content: infoContentString}); self.eventStatus(self.numEvents() + ' events found near ' + self.searchLocation()); //generate infowindows for each event google.maps.event.addListener(marker, 'click', function() { self.searchStatus(''); infowindow.setContent(infoContentString); map.setZoom(12); map.setCenter(marker.position); infowindow.open(map, marker); map.panBy(0, -150); }); }); } // Clear markers from map and array function clearMarkers() { $.each(self.mapMarkers(), function(key, value) { value.marker.setMap(null); }); self.mapMarkers([]); } //Manages the toggling of the list view, location centering, and search bar on a mobile device. this.mobileShow = ko.observable(false); this.searchBarShow = ko.observable(true); this.mobileToggleList = function() {
random_line_split
app.js
’s value. if you do want to let the default action proceed, just return true from your event handler function.*/ return true; } // Handle the input given when user searches for events in a location this.processLocationSearch = function() { //Need to use a jQuery selector instead of KO binding because this field is affected by the autocomplete plugin. The value inputted does not //seem to register via KO. self.searchStatus(''); self.searchStatus('Searching...'); var radius = 30; //var category= 25; //https://api.meetup.com/find/groups?key=6f4c634b253677752b591d6a67327&lat=38.5815719&lon=-121.49439960000001&radius=30&order=members var combine = "lat=" + inputLan + "&lon=" + inputLon + "&radius=" + radius; //clear current events and markers clearMarkers(); self.meetupEvents([]); self.filteredList([]); self.eventStatus('Loading...'); self.loadImg('<img src="img/ajax-loader.gif">'); //perform new meetup search and center map to new location getMeetups(combine); }; //Compare search keyword against event tag of all events. Return a filtered list and map markers of request. this.filterResults = function() { var searchWord = self.filterKeyword().toLowerCase(); var array = self.meetupEvents(); if(!searchWord) { return; } else { //first clear out all entries in the filteredList array self.filteredList([]); //Loop through the meetupEvents array and see if the search keyword matches //with event tag in the list, if so push that object to the filteredList //array and place the marker on the map. for(var i=0; i < array.length; i++) { if(array[i].eventTag.toLowerCase().indexOf(searchWord) != -1) { self.mapMarkers()[i].marker.setMap(map); self.filteredList.push(array[i]); } else self.mapMarkers()[i].marker.setMap(null); } self.eventStatus(self.numEvents() + ' events found for ' + self.filterKeyword()); } }; //Clear keyword from filter and show all active events in current location again. this.clearFilter = function() { self.filteredList(self.meetupEvents()); self.eventStatus(self.numEvents() + ' events found near ' + self.searchLocation()); self.filterKeyword(''); for(var i = 0; i < self.mapMarkers().length; i++) { self.mapMarkers()[i].marker.setMap(map); } }; //toggles the list view this.listToggle = function() { if(self.toggleSymbol() === 'hide') { self.toggleSymbol('show'); } else { self.toggleSymbol('hide'); } }; //Error handling if Google Maps fails to load this.mapRequestTimeout = setTimeout(function() { $('#map-canvas').html('We had trouble loading Google Maps. Please refresh your browser and try again.'); }, 8000); // Initialize Google map, perform initial events search on a city. function mapInitialize() { city = new google.maps.LatLng(37.70, -122.10); map = new google.maps.Map(document.getElementById('map-canvas'), { center: city, zoom: 10, zoomControlOptions: { position: google.maps.ControlPosition.LEFT_CENTER, style: google.maps.ZoomControlStyle.SMALL }, streetViewControlOptions: { position: google.maps.ControlPosition.LEFT_BOTTOM }, mapTypeControl: false, panControl: false }); clearTimeout(self.mapRequestTimeout); google.maps.event.addDomListener(window, "resize", function() { var center = map.getCenter(); google.maps.event.trigger(map, "resize"); map.setCenter(center); }); infowindow = new google.maps.InfoWindow({maxWidth: 300}); } // Use API to get events data and store the info as objects in an array function getMeetups(location) { var meetupUrl = "https://api.meetup.com/find/groups?key=6f4c634b253677752b591d6a67327&"; var order = "&order=members"; var query = meetupUrl + location + order; $.ajax({ url: query, dataType: 'jsonp', success: function(data) { console.log(data); var len = data.data.length; map.panTo({lat: data.data[0].lat, lng: data.data[0].lon}); for(var i = 0; i < len; i++) { var info = data.data[i]; //console.log(info); //this line filters out events that don't have a physical location to redeem if (info === undefined || info.name == undefined || info.lat == undefined || info.lon == undefined || info.link == undefined || info.group_photo == undefined|| info.city == undefined || info.state == undefined || info.members == undefined|| info.category == undefined || info.who == undefined) continue; var muName = info.name; var muLat = info.lat; var muLon = info.lon; var muLink = info.link; var muImg = info.group_photo.photo_link; var mucity = info.city; var mustate = info.state; var mumembers = info.members; var mutag = info.category.shortname; var mugroup = info.who; self.meetupEvents.push({ eventName: muName, eventLat: muLat, eventLon: muLon, eventLink: muLink, eventImg: muImg, eventAddress: mucity + ", " + mustate, eventTag: mutag, eventGroup: mugroup }); } self.filteredList(self.meetupEvents()); mapMarkers(self.meetupEvents()); self.searchStatus(''); self.loadImg(''); }, error: function() { self.eventStatus('Oops, something was wrong, please refresh and try again.'); self.loadImg(''); } }); } // Create and place markers and info windows on the map based on data from API function mapMarkers(array) { $.each(array, function(index, value) { var latitude = value.eventLat, longitude = value.eventLon, geoLoc = new google.maps.LatLng(latitude, longitude), thisEvent = value.eventName; var infoContentString = '<div id="infowindow">' + '<img src="' + value.eventImg + '">' + '<h4 class = "infoName">' + value.eventName + '</h4>' + '<div class = "clear"></div>' + '<p class = "infoAddress">' + value.eventAddress + '</p>' + '<p>Group: ' + value.eventGroup + '</p>' + '<p><a href="' + value.eventLink + '" target="_blank">Click to view event details</a></p>' + '</div>'; // Custormize marker var iconBase = 'img/meetup.png'; var marker = new google.maps.Marker({ position: geoLoc, title: thisEvent, map: map, icon: iconBase }); self.mapMarkers.push({marker: marker, content: infoContentString}); self.eventStatus(self.numEvents() + ' events found near ' + self.searchLocation()); //generate infowindows for each event google.maps.event.addListener(marker, 'click', function() { self.searchStatus(''); infowindow.setContent(infoContentString); map.setZoom(12); map.setCenter(marker.position); infowindow.open(map, marker); map.panBy(0, -150); }); }); } // Clear markers from map and array function clearMarkers() { $.each(self.mapMarkers(), function(key, value) { value.marker.setMap(null); }); self.mapMarkers([]); } //Manages the toggling of the list view, location centering, and search bar on a mobile device. this.mobileShow = ko.observable(false); this.searchBarShow = ko.observable(true); this.mobileToggleList = function() { if(self.mobileShow() === false) { self.mobileShow(true); } else { self.mobileShow(false); } }; this.searchToggle = function() { if(self.searchBarShow() === true) { self.searchBarShow(false); } else { self.searchBarShow(true); } }; //Re-center map to current city if you're viewing events that are further away this.centerMap = function() { infowindow.close(); var currCenter = map.getCenter(); var cityCenter = new google.maps.LatLng(self.currentLat(), self.currentLng()); if((cityCenter.k == currCenter.A) && (cityCenter.D == currCenter.F)) {
self.searchStatus('Map is already centered.'); } e
conditional_block
app.js
('hide'); //Hold the current location's lat & lng - useful for re-centering map this.currentLat = ko.observable(37.39); this.currentLng = ko.observable(-122.40); // When an event on the list is clicked, go to corresponding marker and open its info window. this.goToMarker = function(clickedEvent) { var clickedEventName = clickedEvent.eventName; for(var key in self.mapMarkers()) { if(clickedEventName === self.mapMarkers()[key].marker.title) { map.panTo(self.mapMarkers()[key].marker.position); map.setZoom(14); infowindow.setContent(self.mapMarkers()[key].content); infowindow.open(map, self.mapMarkers()[key].marker); map.panBy(0, -150); self.mobileShow(false); self.searchStatus(''); } } }; // AutoComplete input of city and state this.doAutoComplete = function() { var inputLocation = new google.maps.places.Autocomplete( (document.getElementById('autocomplete')), { types: ['geocode'] }); google.maps.event.addListener(inputLocation, 'place_changed', function() { var place = inputLocation.getPlace(); inputLan= place.geometry.location.lat(); inputLon = place.geometry.location.lng(); }); /* if you use the event binding to capture the keypress event of an input tag, the browser will only call your handler function and will not add the value of the key to the input element’s value. if you do want to let the default action proceed, just return true from your event handler function.*/ return true; } // Handle the input given when user searches for events in a location this.processLocationSearch = function() { //Need to use a jQuery selector instead of KO binding because this field is affected by the autocomplete plugin. The value inputted does not //seem to register via KO. self.searchStatus(''); self.searchStatus('Searching...'); var radius = 30; //var category= 25; //https://api.meetup.com/find/groups?key=6f4c634b253677752b591d6a67327&lat=38.5815719&lon=-121.49439960000001&radius=30&order=members var combine = "lat=" + inputLan + "&lon=" + inputLon + "&radius=" + radius; //clear current events and markers clearMarkers(); self.meetupEvents([]); self.filteredList([]); self.eventStatus('Loading...'); self.loadImg('<img src="img/ajax-loader.gif">'); //perform new meetup search and center map to new location getMeetups(combine); }; //Compare search keyword against event tag of all events. Return a filtered list and map markers of request. this.filterResults = function() { var searchWord = self.filterKeyword().toLowerCase(); var array = self.meetupEvents(); if(!searchWord) { return; } else { //first clear out all entries in the filteredList array self.filteredList([]); //Loop through the meetupEvents array and see if the search keyword matches //with event tag in the list, if so push that object to the filteredList //array and place the marker on the map. for(var i=0; i < array.length; i++) { if(array[i].eventTag.toLowerCase().indexOf(searchWord) != -1) { self.mapMarkers()[i].marker.setMap(map); self.filteredList.push(array[i]); } else self.mapMarkers()[i].marker.setMap(null); } self.eventStatus(self.numEvents() + ' events found for ' + self.filterKeyword()); } }; //Clear keyword from filter and show all active events in current location again. this.clearFilter = function() { self.filteredList(self.meetupEvents()); self.eventStatus(self.numEvents() + ' events found near ' + self.searchLocation()); self.filterKeyword(''); for(var i = 0; i < self.mapMarkers().length; i++) { self.mapMarkers()[i].marker.setMap(map); } }; //toggles the list view this.listToggle = function() { if(self.toggleSymbol() === 'hide') { self.toggleSymbol('show'); } else { self.toggleSymbol('hide'); } }; //Error handling if Google Maps fails to load this.mapRequestTimeout = setTimeout(function() { $('#map-canvas').html('We had trouble loading Google Maps. Please refresh your browser and try again.'); }, 8000); // Initialize Google map, perform initial events search on a city. function mapInitialize() { city = new google.maps.LatLng(37.70, -122.10); map = new google.maps.Map(document.getElementById('map-canvas'), { center: city, zoom: 10, zoomControlOptions: { position: google.maps.ControlPosition.LEFT_CENTER, style: google.maps.ZoomControlStyle.SMALL }, streetViewControlOptions: { position: google.maps.ControlPosition.LEFT_BOTTOM }, mapTypeControl: false, panControl: false }); clearTimeout(self.mapRequestTimeout); google.maps.event.addDomListener(window, "resize", function() { var center = map.getCenter(); google.maps.event.trigger(map, "resize"); map.setCenter(center); }); infowindow = new google.maps.InfoWindow({maxWidth: 300}); } // Use API to get events data and store the info as objects in an array function ge
ocation) { var meetupUrl = "https://api.meetup.com/find/groups?key=6f4c634b253677752b591d6a67327&"; var order = "&order=members"; var query = meetupUrl + location + order; $.ajax({ url: query, dataType: 'jsonp', success: function(data) { console.log(data); var len = data.data.length; map.panTo({lat: data.data[0].lat, lng: data.data[0].lon}); for(var i = 0; i < len; i++) { var info = data.data[i]; //console.log(info); //this line filters out events that don't have a physical location to redeem if (info === undefined || info.name == undefined || info.lat == undefined || info.lon == undefined || info.link == undefined || info.group_photo == undefined|| info.city == undefined || info.state == undefined || info.members == undefined|| info.category == undefined || info.who == undefined) continue; var muName = info.name; var muLat = info.lat; var muLon = info.lon; var muLink = info.link; var muImg = info.group_photo.photo_link; var mucity = info.city; var mustate = info.state; var mumembers = info.members; var mutag = info.category.shortname; var mugroup = info.who; self.meetupEvents.push({ eventName: muName, eventLat: muLat, eventLon: muLon, eventLink: muLink, eventImg: muImg, eventAddress: mucity + ", " + mustate, eventTag: mutag, eventGroup: mugroup }); } self.filteredList(self.meetupEvents()); mapMarkers(self.meetupEvents()); self.searchStatus(''); self.loadImg(''); }, error: function() { self.eventStatus('Oops, something was wrong, please refresh and try again.'); self.loadImg(''); } }); } // Create and place markers and info windows on the map based on data from API function mapMarkers(array) { $.each(array, function(index, value) { var latitude = value.eventLat, longitude = value.eventLon, geoLoc = new google.maps.LatLng(latitude, longitude), thisEvent = value.eventName; var infoContentString = '<div id="infowindow">' + '<img src="' + value.eventImg + '">' + '<h4 class = "infoName">' + value.eventName + '</h4>' + '<div class = "clear"></div>' + '<p class = "infoAddress">' + value.eventAddress + '</p>' + '<p>Group: ' + value.eventGroup + '</p>' + '<p><a href="' + value.eventLink + '" target="_blank">Click to view event details</a></p>' + '</div>'; // Custormize marker var iconBase = 'img/meetup.png'; var marker = new google.maps.Marker({ position: geoLoc, title: thisEvent, map: map, icon: iconBase }); self.mapMarkers.push({marker: marker, content: infoContentString}); self.eventStatus(self.numEvents() + ' events found near ' + self.searchLocation()); //generate infowindows
tMeetups(l
identifier_name
Router.js
'bundle-loader?lazy&name=alert!./pages/alert'; import modal from 'bundle-loader?lazy&name=modal!./pages/modal'; import message from 'bundle-loader?lazy&name=message!./pages/message'; import notification from 'bundle-loader?lazy&name=notification!./pages/notification'; import carousel from 'bundle-loader?lazy&name=carousel!./pages/carousel'; import loading from 'bundle-loader?lazy&name=loading!./pages/loading'; import transition from 'bundle-loader?lazy&name=transition!./pages/transition'; /* eslint import/no-webpack-loader-syntax: off */ const getLang = (key) => { let locale = localStorage.getItem('WUI_LANG') || 'cn'; const map = locales[locale] || {}; return key.split('.').reduce((a, b) => { const parent = map[a]; if (b) { return (parent || {})[b]; } return parent; }); } const asyncComponent = (comp) => (props) => { return ( <Bundle load={comp}> {(About) => { return <About locale={{ show: getLang('markdown.show'), hide: getLang('markdown.hide') }} {...props} /> }} </Bundle> ) } const routes = { documents: [ { path: "/:lang/quick-start", exact: true, component: asyncComponent(QuickStart) }, { path: "/:lang/theme", component: asyncComponent(Theme) }, ], components: { 'Basic': [ { path: "/:lang/color", component: asyncComponent(Color) }, { path: "/:lang/layout", component: asyncComponent(Layout) }, { path: "/:lang/icon", component: asyncComponent(icon) }, { path: "/:lang/button", component: asyncComponent(button) }, { path: "/:lang/hotkeys", component: asyncComponent(hotkeys) }, ], 'Form': [ { path: "/:lang/form", component: asyncComponent(FormCom) }, { path: "/:lang/radio", component: asyncComponent(radio) }, { path: "/:lang/checkbox", component: asyncComponent(checkbox) }, { path: "/:lang/select", component: asyncComponent(select) }, { path: "/:lang/slider", component: asyncComponent(slider) }, { path: "/:lang/switch", component: asyncComponent(SwitchCom) }, { path: "/:lang/input", component: asyncComponent(input) }, { path: "/:lang/input-number", component: asyncComponent(inputNumber) }, { path: "/:lang/time-picker", component: asyncComponent(timePicker) }, { path: "/:lang/date-picker", component: asyncComponent(datePicker) }, ], 'Data Display': [ { path: "/:lang/avatar", component: asyncComponent(Avatar) }, { path: "/:lang/badge", component: asyncComponent(badge) }, { path: "/:lang/calendar", component: asyncComponent(calendar) }, { path: "/:lang/carousel", component: asyncComponent(carousel) }, { path: "/:lang/card", component: asyncComponent(card) }, { path: "/:lang/progress", component: asyncComponent(progress) }, { path: "/:lang/rate", component: asyncComponent(rate) }, { path: "/:lang/table", component: asyncComponent(table) }, { path: "/:lang/tag", component: asyncComponent(tag) }, { path: "/:lang/tooltip", component: asyncComponent(tooltip) }, ], 'Navigation': [ { path: "/:lang/menu", component: asyncComponent(menu) }, { path: "/:lang/tabs", component: asyncComponent(tabs) }, { path: "/:lang/paging", component: asyncComponent(paging) }, { path: "/:lang/breadcrumb", component: asyncComponent(breadcrumb) }, { path: "/:lang/dropdown", component: asyncComponent(dropdown) }, { path: "/:lang/steps", component: asyncComponent(steps) }, ], 'Feedback': [ { path: "/:lang/alert", component: asyncComponent(alert) }, { path: "/:lang/modal", component: asyncComponent(modal) }, { path: "/:lang/message", component: asyncComponent(message) }, { path: "/:lang/notification", component: asyncComponent(notification) }, { path: "/:lang/loading", component: asyncComponent(loading) }, { path: "/:lang/transition", component: asyncComponent(transition) }, ] }, redirect: [ //重定向到 quick start 页面 { path: "", redirect: "/cn/quick-start" } ] } // 获取所有路由 const getRoutesTotal = (obj) => { let _obj = obj || routes; let arr = []; for (let a in _obj) { if (_obj[a] instanceof Array) { arr = arr.concat(_obj[a]) } else { arr = arr.concat(getRoutesTotal(_obj[a])) } } return arr } // 路由实例化 const getRoutes = () => { let routes = getRoutesTotal(); return routes.map((item, idx) => { let COM = item.component; if (!item.path) { return <Redirect key={idx} push to={{ pathname: item.redirect }} /> } if (item.exact) { return <Route exact key={idx} path={item.path} component={COM} /> } else { return <Route key={idx} path={item.path} component={COM} /> } }) } const getPageName = (location) => { const routes = location.match(/(?:\/(.+))?(\/(.+)\?|\/(.+))/); if (routes) { return routes[3] || routes[4]; } return 'quick-start'; } const getLangName = () => localStorage.getItem('WUI_LANG') || 'cn'; const renderMenuLi = (item, idx) => { if (!item.path) return null; if (getPageName(window.location.href) === getPageName(item.path)) { return <li key={`${idx}`} className="active" key={idx}>{getLang(`page.${getPageName(item.path)}`)}</li> } return ( <li key={`${idx}`}> <Link to={`/${getLangName()}/${getPageName(item.path)}`}> {getLang(`page.${getPageName(item.path)}`)} </Link> </li> ) } const renderMenu = (obj) => { let _obj = obj || routes; let html = [] for (let a in _obj) { if (_obj[a] instanceof Array) { html = html.concat(_obj[a].map((item, idx) => renderMenuLi(item, idx))) } else if (_obj[a] instanceof Object) { for (let e in _obj[a]) { if (_obj[a][e] instanceof Array) { html = html.concat( <ul key={`${e}`}> <li className="title">{getLang(`category.${e}`)}</li> {_obj[a][e].map((item, item_idx) => renderMenuLi(item, item_idx))} </ul> ) } } } } return html } const RoutersContainer = withRouter(({ history, location, ...props }) => { const prefixCls = 'w-docs'; return ( <div className={`${prefixCls}`}> <div className={`${prefixCls}-menu-warpper`}> <div className={`${prefixCls}-menu-content`}> <div className={`${prefixCls}-logo`}> <a href="https://uiw-react.github.io"> <img src={Logo} /> <span>uiw <sup>beta</sup></span> </a> </div> <ul className={`${prefixCls}-menu-list`}> {renderMenu()} </ul> <div className={`${prefixCls}-info`}> <a target="_blank" rel="noopener noreferrer" href="https://github.com/uiw-react/uiw/issues"><Icon type="message" /> 反馈建议</a> <a target="_blank" rel="noopener noreferrer" href="https://github.com/uiw-react/uiw/issues/new"><Icon type="question-circle" /> 提交bug</a> <a target="_blank" rel="noopener noreferrer" href="https://github.com/uiw-react/uiw"><Icon type="github" /> Github</a> </div> </div> </div> <div className={`${prefixCls}-content`} ref={(elm) => { if (elm) { elm.scrollTop = 0 } }}> <Switch> {getRoutes()} </Switch> <ScrollToTop showUnder={160} style={{ bottom: 20 }}> <div className={`${prefixCls}-totop`}></div> </ScrollToTop> </div> </div> ) }) export default class Router extends Component { constructor(props) { super(props); this.state = {}; } componentDidMount() { this.setPage(() => { if (!this.sta
te.locale) { this.setLocale(localStorage.getItem('WUI_LANG') || 'cn'); } }); } componentWillMount() { window.addEventL
identifier_body
Router.js
'bundle-loader?lazy&name=checkbox!./pages/checkbox'; import card from 'bundle-loader?lazy&name=card!./pages/card'; import select from 'bundle-loader?lazy&name=select!./pages/select'; import SwitchCom from 'bundle-loader?lazy&name=switch!./pages/switch'; import slider from 'bundle-loader?lazy&name=slider!./pages/slider'; import input from 'bundle-loader?lazy&name=input!./pages/input'; import inputNumber from 'bundle-loader?lazy&name=input-number!./pages/input-number'; import timePicker from 'bundle-loader?lazy&name=time-picker!./pages/time-picker'; import datePicker from 'bundle-loader?lazy&name=date-picker!./pages/date-picker'; import calendar from 'bundle-loader?lazy&name=calendar!./pages/calendar'; import table from 'bundle-loader?lazy&name=table!./pages/table'; import tabs from 'bundle-loader?lazy&name=tabs!./pages/tabs'; import tooltip from 'bundle-loader?lazy&name=tooltip!./pages/tooltip'; import tag from 'bundle-loader?lazy&name=tag!./pages/tag'; import rate from 'bundle-loader?lazy&name=rate!./pages/rate'; import badge from 'bundle-loader?lazy&name=badge!./pages/badge'; import menu from 'bundle-loader?lazy&name=menu!./pages/menu'; import paging from 'bundle-loader?lazy&name=paging!./pages/paging'; import progress from 'bundle-loader?lazy&name=progress!./pages/progress'; import breadcrumb from 'bundle-loader?lazy&name=breadcrumb!./pages/breadcrumb'; import dropdown from 'bundle-loader?lazy&name=dropdown!./pages/dropdown'; import steps from 'bundle-loader?lazy&name=steps!./pages/steps'; import alert from 'bundle-loader?lazy&name=alert!./pages/alert'; import modal from 'bundle-loader?lazy&name=modal!./pages/modal'; import message from 'bundle-loader?lazy&name=message!./pages/message'; import notification from 'bundle-loader?lazy&name=notification!./pages/notification'; import carousel from 'bundle-loader?lazy&name=carousel!./pages/carousel'; import loading from 'bundle-loader?lazy&name=loading!./pages/loading'; import transition from 'bundle-loader?lazy&name=transition!./pages/transition'; /* eslint import/no-webpack-loader-syntax: off */ const getLang = (key) => { let locale = localStorage.getItem('WUI_LANG') || 'cn'; const map = locales[locale] || {}; return key.split('.').reduce((a, b) => { const parent = map[a]; if (b) { return (parent || {})[b]; } return parent; }); } const asyncComponent = (comp) => (props) => { return ( <Bundle load={comp}> {(About) => { return <About locale={{ show: getLang('markdown.show'), hide: getLang('markdown.hide') }} {...props} /> }} </Bundle> ) } const routes = { documents: [ { path: "/:lang/quick-start", exact: true, component: asyncComponent(QuickStart) }, { path: "/:lang/theme", component: asyncComponent(Theme) }, ], components: { 'Basic': [ { path: "/:lang/color", component: asyncComponent(Color) }, { path: "/:lang/layout", component: asyncComponent(Layout) }, { path: "/:lang/icon", component: asyncComponent(icon) }, { path: "/:lang/button", component: asyncComponent(button) }, { path: "/:lang/hotkeys", component: asyncComponent(hotkeys) }, ], 'Form': [ { path: "/:lang/form", component: asyncComponent(FormCom) }, { path: "/:lang/radio", component: asyncComponent(radio) }, { path: "/:lang/checkbox", component: asyncComponent(checkbox) }, { path: "/:lang/select", component: asyncComponent(select) }, { path: "/:lang/slider", component: asyncComponent(slider) }, { path: "/:lang/switch", component: asyncComponent(SwitchCom) }, { path: "/:lang/input", component: asyncComponent(input) }, { path: "/:lang/input-number", component: asyncComponent(inputNumber) }, { path: "/:lang/time-picker", component: asyncComponent(timePicker) }, { path: "/:lang/date-picker", component: asyncComponent(datePicker) }, ], 'Data Display': [ { path: "/:lang/avatar", component: asyncComponent(Avatar) }, { path: "/:lang/badge", component: asyncComponent(badge) }, { path: "/:lang/calendar", component: asyncComponent(calendar) }, { path: "/:lang/carousel", component: asyncComponent(carousel) }, { path: "/:lang/card", component: asyncComponent(card) }, { path: "/:lang/progress", component: asyncComponent(progress) }, { path: "/:lang/rate", component: asyncComponent(rate) }, { path: "/:lang/table", component: asyncComponent(table) }, { path: "/:lang/tag", component: asyncComponent(tag) }, { path: "/:lang/tooltip", component: asyncComponent(tooltip) }, ], 'Navigation': [ { path: "/:lang/menu", component: asyncComponent(menu) }, { path: "/:lang/tabs", component: asyncComponent(tabs) }, { path: "/:lang/paging", component: asyncComponent(paging) }, { path: "/:lang/breadcrumb", component: asyncComponent(breadcrumb) }, { path: "/:lang/dropdown", component: asyncComponent(dropdown) }, { path: "/:lang/steps", component: asyncComponent(steps) }, ], 'Feedback': [ { path: "/:lang/alert", component: asyncComponent(alert) }, { path: "/:lang/modal", component: asyncComponent(modal) }, { path: "/:lang/message", component: asyncComponent(message) }, { path: "/:lang/notification", component: asyncComponent(notification) }, { path: "/:lang/loading", component: asyncComponent(loading) }, { path: "/:lang/transition", component: asyncComponent(transition) }, ] }, redirect: [ //重定向到 quick start 页面 { path: "", redirect: "/cn/quick-start" } ] } // 获取所有路由 const getRoutesTotal = (obj) => { let _obj = obj || routes; let arr = []; for (let a in _obj) { if (_obj[a] instanceof Array) { arr = arr.concat(_obj[a]) } else { arr = arr.concat(getRoutesTotal(_obj[a])) } } return arr } // 路由实例化 const getRoutes = () => { let routes = getRoutesTotal(); return routes.map((item, idx) => { let COM = item.component; if (!item.path) { return <Redirect key={idx} push to={{ pathname: item.redirect }} /> } if (item.exact) { return <Route exact key={idx} path={item.path} component={COM} /> } else { return <Route key={idx} path={item.path} component={COM} /> } }) }
if (routes) { return routes[3] || routes[4]; } return 'quick-start'; } const getLangName = () => localStorage.getItem('WUI_LANG') || 'cn'; const renderMenuLi = (item, idx) => { if (!item.path) return null; if (getPageName(window.location.href) === getPageName(item.path)) { return <li key={`${idx}`} className="active" key={idx}>{getLang(`page.${getPageName(item.path)}`)}</li> } return ( <li key={`${idx}`}> <Link to={`/${getLangName()}/${getPageName(item.path)}`}> {getLang(`page.${getPageName(item.path)}`)} </Link> </li> ) } const renderMenu = (obj) => { let _obj = obj || routes; let html = [] for (let a in _obj) { if (_obj[a] instanceof Array) { html = html.concat(_obj[a].map((item, idx) => renderMenuLi(item, idx))) } else if (_obj[a] instanceof Object) { for (let e in _obj[a]) { if (_obj[a][e] instanceof Array) { html = html.concat( <ul key={`${e}`}> <li className="title">{getLang(`category.${e}`)}</li> {_obj[a][e].map((item, item_idx) => renderMenuLi(item, item_idx))} </ul> ) } } } } return html } const RoutersContainer = withRouter(({ history, location, ...props }) => { const prefixCls = 'w-docs'; return ( <div className={`${prefixCls}`}> <div className={`${prefixCls}-menu-warpper`}> <
const getPageName = (location) => { const routes = location.match(/(?:\/(.+))?(\/(.+)\?|\/(.+))/);
random_line_split
Router.js
pages/progress'; import breadcrumb from 'bundle-loader?lazy&name=breadcrumb!./pages/breadcrumb'; import dropdown from 'bundle-loader?lazy&name=dropdown!./pages/dropdown'; import steps from 'bundle-loader?lazy&name=steps!./pages/steps'; import alert from 'bundle-loader?lazy&name=alert!./pages/alert'; import modal from 'bundle-loader?lazy&name=modal!./pages/modal'; import message from 'bundle-loader?lazy&name=message!./pages/message'; import notification from 'bundle-loader?lazy&name=notification!./pages/notification'; import carousel from 'bundle-loader?lazy&name=carousel!./pages/carousel'; import loading from 'bundle-loader?lazy&name=loading!./pages/loading'; import transition from 'bundle-loader?lazy&name=transition!./pages/transition'; /* eslint import/no-webpack-loader-syntax: off */ const getLang = (key) => { let locale = localStorage.getItem('WUI_LANG') || 'cn'; const map = locales[locale] || {}; return key.split('.').reduce((a, b) => { const parent = map[a]; if (b) { return (parent || {})[b]; } return parent; }); } const asyncComponent = (comp) => (props) => { return ( <Bundle load={comp}> {(About) => { return <About locale={{ show: getLang('markdown.show'), hide: getLang('markdown.hide') }} {...props} /> }} </Bundle> ) } const routes = { documents: [ { path: "/:lang/quick-start", exact: true, component: asyncComponent(QuickStart) }, { path: "/:lang/theme", component: asyncComponent(Theme) }, ], components: { 'Basic': [ { path: "/:lang/color", component: asyncComponent(Color) }, { path: "/:lang/layout", component: asyncComponent(Layout) }, { path: "/:lang/icon", component: asyncComponent(icon) }, { path: "/:lang/button", component: asyncComponent(button) }, { path: "/:lang/hotkeys", component: asyncComponent(hotkeys) }, ], 'Form': [ { path: "/:lang/form", component: asyncComponent(FormCom) }, { path: "/:lang/radio", component: asyncComponent(radio) }, { path: "/:lang/checkbox", component: asyncComponent(checkbox) }, { path: "/:lang/select", component: asyncComponent(select) }, { path: "/:lang/slider", component: asyncComponent(slider) }, { path: "/:lang/switch", component: asyncComponent(SwitchCom) }, { path: "/:lang/input", component: asyncComponent(input) }, { path: "/:lang/input-number", component: asyncComponent(inputNumber) }, { path: "/:lang/time-picker", component: asyncComponent(timePicker) }, { path: "/:lang/date-picker", component: asyncComponent(datePicker) }, ], 'Data Display': [ { path: "/:lang/avatar", component: asyncComponent(Avatar) }, { path: "/:lang/badge", component: asyncComponent(badge) }, { path: "/:lang/calendar", component: asyncComponent(calendar) }, { path: "/:lang/carousel", component: asyncComponent(carousel) }, { path: "/:lang/card", component: asyncComponent(card) }, { path: "/:lang/progress", component: asyncComponent(progress) }, { path: "/:lang/rate", component: asyncComponent(rate) }, { path: "/:lang/table", component: asyncComponent(table) }, { path: "/:lang/tag", component: asyncComponent(tag) }, { path: "/:lang/tooltip", component: asyncComponent(tooltip) }, ], 'Navigation': [ { path: "/:lang/menu", component: asyncComponent(menu) }, { path: "/:lang/tabs", component: asyncComponent(tabs) }, { path: "/:lang/paging", component: asyncComponent(paging) }, { path: "/:lang/breadcrumb", component: asyncComponent(breadcrumb) }, { path: "/:lang/dropdown", component: asyncComponent(dropdown) }, { path: "/:lang/steps", component: asyncComponent(steps) }, ], 'Feedback': [ { path: "/:lang/alert", component: asyncComponent(alert) }, { path: "/:lang/modal", component: asyncComponent(modal) }, { path: "/:lang/message", component: asyncComponent(message) }, { path: "/:lang/notification", component: asyncComponent(notification) }, { path: "/:lang/loading", component: asyncComponent(loading) }, { path: "/:lang/transition", component: asyncComponent(transition) }, ] }, redirect: [ //重定向到 quick start 页面 { path: "", redirect: "/cn/quick-start" } ] } // 获取所有路由 const getRoutesTotal = (obj) => { let _obj = obj || routes; let arr = []; for (let a in _obj) { if (_obj[a] instanceof Array) { arr = arr.concat(_obj[a]) } else { arr = arr.concat(getRoutesTotal(_obj[a])) } } return arr } // 路由实例化 const getRoutes = () => { let routes = getRoutesTotal(); return routes.map((item, idx) => { let COM = item.component; if (!item.path) { return <Redirect key={idx} push to={{ pathname: item.redirect }} /> } if (item.exact) { return <Route exact key={idx} path={item.path} component={COM} /> } else { return <Route key={idx} path={item.path} component={COM} /> } }) } const getPageName = (location) => { const routes = location.match(/(?:\/(.+))?(\/(.+)\?|\/(.+))/); if (routes) { return routes[3] || routes[4]; } return 'quick-start'; } const getLangName = () => localStorage.getItem('WUI_LANG') || 'cn'; const renderMenuLi = (item, idx) => { if (!item.path) return null; if (getPageName(window.location.href) === getPageName(item.path)) { return <li key={`${idx}`} className="active" key={idx}>{getLang(`page.${getPageName(item.path)}`)}</li> } return ( <li key={`${idx}`}> <Link to={`/${getLangName()}/${getPageName(item.path)}`}> {getLang(`page.${getPageName(item.path)}`)} </Link> </li> ) } const renderMenu = (obj) => { let _obj = obj || routes; let html = [] for (let a in _obj) { if (_obj[a] instanceof Array) { html = html.concat(_obj[a].map((item, idx) => renderMenuLi(item, idx))) } else if (_obj[a] instanceof Object) { for (let e in _obj[a]) { if (_obj[a][e] instanceof Array) { html = html.concat( <ul key={`${e}`}> <li className="title">{getLang(`category.${e}`)}</li> {_obj[a][e].map((item, item_idx) => renderMenuLi(item, item_idx))} </ul> ) } } } } return html } const RoutersContainer = withRouter(({ history, location, ...props }) => { const prefixCls = 'w-docs'; return ( <div className={`${prefixCls}`}> <div className={`${prefixCls}-menu-warpper`}> <div className={`${prefixCls}-menu-content`}> <div className={`${prefixCls}-logo`}> <a href="https://uiw-react.github.io"> <img src={Logo} /> <span>uiw <sup>beta</sup></span> </a> </div> <ul className={`${prefixCls}-menu-list`}> {renderMenu()} </ul> <div className={`${prefixCls}-info`}> <a target="_blank" rel="noopener noreferrer" href="https://github.com/uiw-react/uiw/issues"><Icon type="message" /> 反馈建议</a> <a target="_blank" rel="noopener noreferrer" href="https://github.com/uiw-react/uiw/issues/new"><Icon type="question-circle" /> 提交bug</a> <a target="_blank" rel="noopener noreferrer" href="https://github.com/uiw-react/uiw"><Icon type="github" /> Github</a> </div> </div> </div> <div className={`${prefixCls}-content`} ref={(elm) => { if (elm) { elm.scrollTop = 0 } }}> <Switch> {getRoutes()} </Switch> <ScrollToTop showUnder={160} style={{ bottom: 20 }}> <div className={`${prefixCls}-totop`}></div> </ScrollToTop> </div> </div> ) }) export default class Router extends Component { constructor(props
) {
identifier_name
Router.js
'bundle-loader?lazy&name=checkbox!./pages/checkbox'; import card from 'bundle-loader?lazy&name=card!./pages/card'; import select from 'bundle-loader?lazy&name=select!./pages/select'; import SwitchCom from 'bundle-loader?lazy&name=switch!./pages/switch'; import slider from 'bundle-loader?lazy&name=slider!./pages/slider'; import input from 'bundle-loader?lazy&name=input!./pages/input'; import inputNumber from 'bundle-loader?lazy&name=input-number!./pages/input-number'; import timePicker from 'bundle-loader?lazy&name=time-picker!./pages/time-picker'; import datePicker from 'bundle-loader?lazy&name=date-picker!./pages/date-picker'; import calendar from 'bundle-loader?lazy&name=calendar!./pages/calendar'; import table from 'bundle-loader?lazy&name=table!./pages/table'; import tabs from 'bundle-loader?lazy&name=tabs!./pages/tabs'; import tooltip from 'bundle-loader?lazy&name=tooltip!./pages/tooltip'; import tag from 'bundle-loader?lazy&name=tag!./pages/tag'; import rate from 'bundle-loader?lazy&name=rate!./pages/rate'; import badge from 'bundle-loader?lazy&name=badge!./pages/badge'; import menu from 'bundle-loader?lazy&name=menu!./pages/menu'; import paging from 'bundle-loader?lazy&name=paging!./pages/paging'; import progress from 'bundle-loader?lazy&name=progress!./pages/progress'; import breadcrumb from 'bundle-loader?lazy&name=breadcrumb!./pages/breadcrumb'; import dropdown from 'bundle-loader?lazy&name=dropdown!./pages/dropdown'; import steps from 'bundle-loader?lazy&name=steps!./pages/steps'; import alert from 'bundle-loader?lazy&name=alert!./pages/alert'; import modal from 'bundle-loader?lazy&name=modal!./pages/modal'; import message from 'bundle-loader?lazy&name=message!./pages/message'; import notification from 'bundle-loader?lazy&name=notification!./pages/notification'; import carousel from 'bundle-loader?lazy&name=carousel!./pages/carousel'; import loading from 'bundle-loader?lazy&name=loading!./pages/loading'; import transition from 'bundle-loader?lazy&name=transition!./pages/transition'; /* eslint import/no-webpack-loader-syntax: off */ const getLang = (key) => { let locale = localStorage.getItem('WUI_LANG') || 'cn'; const map = locales[locale] || {}; return key.split('.').reduce((a, b) => { const parent = map[a]; if (b) { return (parent || {})[b]; } return parent; }); } const asyncComponent = (comp) => (props) => { return ( <Bundle load={comp}> {(About) => { return <About locale={{ show: getLang('markdown.show'), hide: getLang('markdown.hide') }} {...props} /> }} </Bundle> ) } const routes = { documents: [ { path: "/:lang/quick-start", exact: true, component: asyncComponent(QuickStart) }, { path: "/:lang/theme", component: asyncComponent(Theme) }, ], components: { 'Basic': [ { path: "/:lang/color", component: asyncComponent(Color) }, { path: "/:lang/layout", component: asyncComponent(Layout) }, { path: "/:lang/icon", component: asyncComponent(icon) }, { path: "/:lang/button", component: asyncComponent(button) }, { path: "/:lang/hotkeys", component: asyncComponent(hotkeys) }, ], 'Form': [ { path: "/:lang/form", component: asyncComponent(FormCom) }, { path: "/:lang/radio", component: asyncComponent(radio) }, { path: "/:lang/checkbox", component: asyncComponent(checkbox) }, { path: "/:lang/select", component: asyncComponent(select) }, { path: "/:lang/slider", component: asyncComponent(slider) }, { path: "/:lang/switch", component: asyncComponent(SwitchCom) }, { path: "/:lang/input", component: asyncComponent(input) }, { path: "/:lang/input-number", component: asyncComponent(inputNumber) }, { path: "/:lang/time-picker", component: asyncComponent(timePicker) }, { path: "/:lang/date-picker", component: asyncComponent(datePicker) }, ], 'Data Display': [ { path: "/:lang/avatar", component: asyncComponent(Avatar) }, { path: "/:lang/badge", component: asyncComponent(badge) }, { path: "/:lang/calendar", component: asyncComponent(calendar) }, { path: "/:lang/carousel", component: asyncComponent(carousel) }, { path: "/:lang/card", component: asyncComponent(card) }, { path: "/:lang/progress", component: asyncComponent(progress) }, { path: "/:lang/rate", component: asyncComponent(rate) }, { path: "/:lang/table", component: asyncComponent(table) }, { path: "/:lang/tag", component: asyncComponent(tag) }, { path: "/:lang/tooltip", component: asyncComponent(tooltip) }, ], 'Navigation': [ { path: "/:lang/menu", component: asyncComponent(menu) }, { path: "/:lang/tabs", component: asyncComponent(tabs) }, { path: "/:lang/paging", component: asyncComponent(paging) }, { path: "/:lang/breadcrumb", component: asyncComponent(breadcrumb) }, { path: "/:lang/dropdown", component: asyncComponent(dropdown) }, { path: "/:lang/steps", component: asyncComponent(steps) }, ], 'Feedback': [ { path: "/:lang/alert", component: asyncComponent(alert) }, { path: "/:lang/modal", component: asyncComponent(modal) }, { path: "/:lang/message", component: asyncComponent(message) }, { path: "/:lang/notification", component: asyncComponent(notification) }, { path: "/:lang/loading", component: asyncComponent(loading) }, { path: "/:lang/transition", component: asyncComponent(transition) }, ] }, redirect: [ //重定向到 quick start 页面 { path: "", redirect: "/cn/quick-start" } ] } // 获取所有路由 const getRoutesTotal = (obj) => { let _obj = obj || routes; let arr = []; for (let a in _obj) { if (_obj[a] instanceof Array) { arr = arr.concat(_obj[a]) } else { arr = arr.concat(getRoutesTotal(_obj[a])) } } return arr } // 路由实例化 const getRoutes = () => { let routes = getRoutesTotal(); return routes.map((item, idx) => { let COM = item.component; if (!item.path) { return <Redirect key={idx} push to={{ pathname: item.redirect }} /> } if (item.exact) { return <Route exact key={idx} path={item.path} component={COM} /> } else { return <Route key={idx} path={item.path} component={COM} /> } }) } const getPageName = (location) => { const routes = location.match(/(?:\/(.+))?(\/(.+)\?|\/(.+))/); if (routes) { return routes[3] || routes[4]; } return 'quick-start'; } const getLangName = () => localStorage.getItem('WUI_LANG') || 'cn'; const renderMenuLi = (item, idx) => { if (!item.path) return null; if (getPageName(window.location.href) === getPageName(item.path)) { return <li key={`${idx}`} className="active" key={idx}>{getLang(`page.${getPageName(item.path)}`)}</li> } return ( <li key={`${idx}`}> <Link to={`/${getLangName()}/${getPageName(item.path)}`}> {getLang(`page.${getPageName(item.path)}`)} </Link> </li> ) } const renderMenu = (obj) => { let _obj = obj || routes; let html = [] for (let a in _obj) { if (_obj[a] instanceof Array) { html = html.concat(_obj[a].map((item, idx) => renderMenuLi(item, idx))) } else if (_obj[a] instanceof Object) { for (let e in _obj[a]) { if (_obj[a][e] instanceof Array) { html = html.concat(
const RoutersContainer = withRouter(({ history, location, ...props }) => { const prefixCls = 'w-docs'; return ( <div className={`${prefixCls}`}> <div className={`${prefixCls}-menu-warpper`}>
<ul key={`${e}`}> <li className="title">{getLang(`category.${e}`)}</li> {_obj[a][e].map((item, item_idx) => renderMenuLi(item, item_idx))} </ul> ) } } } } return html }
conditional_block
mod.rs
Subscriber> Layer<S> for FooLayer {} //! # impl<S: Subscriber> Layer<S> for BarLayer {} //! # impl FooLayer { //! # fn new() -> Self { Self {} } //! # } //! # impl BarLayer { //! # fn new() -> Self { Self {} } //! # } //! //! let subscriber = Registry::default() //! .with(FooLayer::new()) //! .with(BarLayer::new()); //! ``` //! //! If a type implementing `Layer` depends on the functionality of a `Registry` //! implementation, it should bound its `Subscriber` type parameter with the //! [`LookupSpan`] trait, like so: //! //! ```rust //! use tracing_subscriber::{registry, Layer}; //! use tracing_core::Subscriber; //! //! pub struct MyLayer { //! // ... //! } //! //! impl<S> Layer<S> for MyLayer //! where //! S: Subscriber + for<'a> registry::LookupSpan<'a>, //! { //! // ... //! } //! ``` //! When this bound is added, the `Layer` implementation will be guaranteed //! access to the [`Context`][ctx] methods, such as [`Context::span`][lookup], that //! require the root subscriber to be a registry. //! //! [`Layer`]: ../layer/trait.Layer.html //! [`Subscriber`]: //! https://docs.rs/tracing-core/latest/tracing_core/subscriber/trait.Subscriber.html //! [`Registry`]: struct.Registry.html //! [ctx]: ../layer/struct.Context.html //! [lookup]: ../layer/struct.Context.html#method.span //! [`LookupSpan`]: trait.LookupSpan.html //! [`SpanData`]: trait.SpanData.html use tracing_core::{field::FieldSet, span::Id, Metadata}; /// A module containing a type map of span extensions. mod extensions; #[cfg(feature = "registry")] mod sharded; #[cfg(feature = "registry")] mod stack; pub use extensions::{Extensions, ExtensionsMut}; #[cfg(feature = "registry")] #[cfg_attr(docsrs, doc(cfg(feature = "registry")))] pub use sharded::Data; #[cfg(feature = "registry")] #[cfg_attr(docsrs, doc(cfg(feature = "registry")))] pub use sharded::Registry; /// Provides access to stored span data. /// /// Subscribers which store span data and associate it with span IDs should /// implement this trait; if they do, any [`Layer`]s wrapping them can look up /// metadata via the [`Context`] type's [`span()`] method. /// /// [`Layer`]: ../layer/trait.Layer.html /// [`Context`]: ../layer/struct.Context.html /// [`span()`]: ../layer/struct.Context.html#method.metadata pub trait LookupSpan<'a> { /// The type of span data stored in this registry. type Data: SpanData<'a>; /// Returns the [`SpanData`] for a given `Id`, if it exists. /// /// <div class="information"> /// <div class="tooltip ignore" style="">ⓘ<span class="tooltiptext">Note</span></div> /// </div> /// <div class="example-wrap" style="display:inline-block"> /// <pre class="ignore" style="white-space:normal;font:inherit;"> /// <strong>Note</strong>: users of the <code>LookupSpan<code> trait should /// typically call the <a href="#method.span"><code>span</code> method rather /// than this method. The <code>span</code> method is implemented by /// <em>calling</em> <code>span_data</code>, but returns a reference which is /// capable of performing more sophisiticated queries. /// </pre></div> /// /// [`SpanData`]: trait.SpanData.html fn span_data(&'a self, id: &Id) -> Option<Self::Data>; /// Returns a [`SpanRef`] for the span with the given `Id`, if it exists. /// /// A `SpanRef` is similar to [`SpanData`], but it allows performing /// additional lookups against the registryr that stores the wrapped data. /// /// In general, _users_ of the `LookupSpan` trait should use this method /// rather than the [`span_data`] method; while _implementors_ of this trait /// should only implement `span_data`. /// /// [`SpanRef`]: struct.SpanRef.html /// [`SpanData`]: trait.SpanData.html /// [`span_data`]: #method.span_data fn span(&'a self, id: &Id) -> Option<SpanRef<'_, Self>> where Self: Sized, { let data = self.span_data(&id)?; Some(SpanRef { registry: self, data, }) } } /// A stored representation of data associated with a span. pub trait SpanData<'a> { /// Returns this span's ID. fn id(&self) -> Id; /// Returns a reference to the span's `Metadata`. fn metadata(&self) -> &'static Metadata<'static>; /// Returns a reference to the ID fn parent(&self) -> Option<&Id>; /// Returns a reference to this span's `Extensions`. /// /// The extensions may be used by `Layer`s to store additional data /// describing the span. fn extensions(&self) -> Extensions<'_>; /// Returns a mutable reference to this span's `Extensions`. /// /// The extensions may be used by `Layer`s to store additional data /// describing the span. fn extensions_mut(&self) -> ExtensionsMut<'_>; } /// A reference to [span data] and the associated [registry]. /// /// This type implements all the same methods as [`SpanData`][span data], and /// provides additional methods for querying the registry based on values from /// the span. /// /// [span data]: trait.SpanData.html /// [registry]: trait.LookupSpan.html #[derive(Debug)] pub struct SpanRef<'a, R: LookupSpan<'a>> { registry: &'a R, data: R::Data, } /// An iterator over the parents of a span. /// /// This is returned by the [`SpanRef::parents`] method. /// /// [`SpanRef::parents`]: struct.SpanRef.html#method.parents #[derive(Debug)] pub struct Parents<'a, R> { registry: &'a R, next: Option<Id>, } /// An iterator over a span's parents, starting with the root of the trace /// tree. /// /// For additonal details, see [`SpanRef::from_root`]. /// /// [`Span::from_root`]: struct.SpanRef.html#method.from_root pub struct FromRoot<'a, R: LookupSpan<'a>> { #[cfg(feature = "smallvec")] inner: std::iter::Rev<smallvec::IntoIter<SpanRefVecArray<'a, R>>>, #[cfg(not(feature = "smallvec"))] inner: std::iter::Rev<std::vec::IntoIter<SpanRef<'a, R>>>, } #[cfg(feature = "smallvec")] type SpanRefVecArray<'span, L> = [SpanRef<'span, L>; 16]; impl<'a, R> SpanRef<'a, R> where R: LookupSpan<'a>, { /// Returns this span's ID. pub fn id(&self) -> Id {
/// Returns a static reference to the span's metadata. pub fn metadata(&self) -> &'static Metadata<'static> { self.data.metadata() } /// Returns the span's name, pub fn name(&self) -> &'static str { self.data.metadata().name() } /// Returns a list of [fields] defined by the span. /// /// [fields]: https://docs.rs/tracing-core/latest/tracing_core/field/index.html pub fn fields(&self) -> &FieldSet { self.data.metadata().fields() } /// Returns the ID of this span's parent, or `None` if this span is the root /// of its trace tree. pub fn parent_id(&self) -> Option<&Id> { self.data.parent() } /// Returns a `SpanRef` describing this span's parent, or `None` if this /// span is the root of its trace tree. pub fn parent(&self) -> Option<Self> { let id = self.data.parent()?; let data = self.registry.span_data(id)?; Some(Self { registry: self.registry, data, }) } /// Returns an iterator over all parents of this span, starting with the /// immediate parent. /// /// The iterator will first return the span's immediate parent, followed by /// that span's parent, followed by _that_ span's parent, and so on, until a /// it reaches a root span. pub fn parents(&self) -> Parents<'a, R> { Parents { registry: self.registry, next: self.parent().map(|parent| parent.id()), } } /// Returns an iterator over all parents of this span, starting with the /// root of the trace tree. /// /// The iterator will return the root of the trace tree, followed by the /// next span, and then the next, until this span's immediate parent is /// returned. /// /// **Note**: if
self.data.id() }
identifier_body
mod.rs
the `Layer` implementation will be guaranteed //! access to the [`Context`][ctx] methods, such as [`Context::span`][lookup], that //! require the root subscriber to be a registry. //! //! [`Layer`]: ../layer/trait.Layer.html //! [`Subscriber`]: //! https://docs.rs/tracing-core/latest/tracing_core/subscriber/trait.Subscriber.html //! [`Registry`]: struct.Registry.html //! [ctx]: ../layer/struct.Context.html //! [lookup]: ../layer/struct.Context.html#method.span //! [`LookupSpan`]: trait.LookupSpan.html //! [`SpanData`]: trait.SpanData.html use tracing_core::{field::FieldSet, span::Id, Metadata}; /// A module containing a type map of span extensions. mod extensions; #[cfg(feature = "registry")] mod sharded; #[cfg(feature = "registry")] mod stack; pub use extensions::{Extensions, ExtensionsMut}; #[cfg(feature = "registry")] #[cfg_attr(docsrs, doc(cfg(feature = "registry")))] pub use sharded::Data; #[cfg(feature = "registry")] #[cfg_attr(docsrs, doc(cfg(feature = "registry")))] pub use sharded::Registry; /// Provides access to stored span data. /// /// Subscribers which store span data and associate it with span IDs should /// implement this trait; if they do, any [`Layer`]s wrapping them can look up /// metadata via the [`Context`] type's [`span()`] method. /// /// [`Layer`]: ../layer/trait.Layer.html /// [`Context`]: ../layer/struct.Context.html /// [`span()`]: ../layer/struct.Context.html#method.metadata pub trait LookupSpan<'a> { /// The type of span data stored in this registry. type Data: SpanData<'a>; /// Returns the [`SpanData`] for a given `Id`, if it exists. /// /// <div class="information"> /// <div class="tooltip ignore" style="">ⓘ<span class="tooltiptext">Note</span></div> /// </div> /// <div class="example-wrap" style="display:inline-block"> /// <pre class="ignore" style="white-space:normal;font:inherit;"> /// <strong>Note</strong>: users of the <code>LookupSpan<code> trait should /// typically call the <a href="#method.span"><code>span</code> method rather /// than this method. The <code>span</code> method is implemented by /// <em>calling</em> <code>span_data</code>, but returns a reference which is /// capable of performing more sophisiticated queries. /// </pre></div> /// /// [`SpanData`]: trait.SpanData.html fn span_data(&'a self, id: &Id) -> Option<Self::Data>; /// Returns a [`SpanRef`] for the span with the given `Id`, if it exists. /// /// A `SpanRef` is similar to [`SpanData`], but it allows performing /// additional lookups against the registryr that stores the wrapped data. /// /// In general, _users_ of the `LookupSpan` trait should use this method /// rather than the [`span_data`] method; while _implementors_ of this trait /// should only implement `span_data`. /// /// [`SpanRef`]: struct.SpanRef.html /// [`SpanData`]: trait.SpanData.html /// [`span_data`]: #method.span_data fn span(&'a self, id: &Id) -> Option<SpanRef<'_, Self>> where Self: Sized, { let data = self.span_data(&id)?; Some(SpanRef { registry: self, data, }) } } /// A stored representation of data associated with a span. pub trait SpanData<'a> { /// Returns this span's ID. fn id(&self) -> Id; /// Returns a reference to the span's `Metadata`. fn metadata(&self) -> &'static Metadata<'static>; /// Returns a reference to the ID fn parent(&self) -> Option<&Id>; /// Returns a reference to this span's `Extensions`. /// /// The extensions may be used by `Layer`s to store additional data /// describing the span. fn extensions(&self) -> Extensions<'_>; /// Returns a mutable reference to this span's `Extensions`. /// /// The extensions may be used by `Layer`s to store additional data /// describing the span. fn extensions_mut(&self) -> ExtensionsMut<'_>; } /// A reference to [span data] and the associated [registry]. /// /// This type implements all the same methods as [`SpanData`][span data], and /// provides additional methods for querying the registry based on values from /// the span. /// /// [span data]: trait.SpanData.html /// [registry]: trait.LookupSpan.html #[derive(Debug)] pub struct SpanRef<'a, R: LookupSpan<'a>> { registry: &'a R, data: R::Data, } /// An iterator over the parents of a span. /// /// This is returned by the [`SpanRef::parents`] method. /// /// [`SpanRef::parents`]: struct.SpanRef.html#method.parents #[derive(Debug)] pub struct Parents<'a, R> { registry: &'a R, next: Option<Id>, } /// An iterator over a span's parents, starting with the root of the trace /// tree. /// /// For additonal details, see [`SpanRef::from_root`]. /// /// [`Span::from_root`]: struct.SpanRef.html#method.from_root pub struct FromRoot<'a, R: LookupSpan<'a>> { #[cfg(feature = "smallvec")] inner: std::iter::Rev<smallvec::IntoIter<SpanRefVecArray<'a, R>>>, #[cfg(not(feature = "smallvec"))] inner: std::iter::Rev<std::vec::IntoIter<SpanRef<'a, R>>>, } #[cfg(feature = "smallvec")] type SpanRefVecArray<'span, L> = [SpanRef<'span, L>; 16]; impl<'a, R> SpanRef<'a, R> where R: LookupSpan<'a>, { /// Returns this span's ID. pub fn id(&self) -> Id { self.data.id() } /// Returns a static reference to the span's metadata. pub fn metadata(&self) -> &'static Metadata<'static> { self.data.metadata() } /// Returns the span's name, pub fn name(&self) -> &'static str { self.data.metadata().name() } /// Returns a list of [fields] defined by the span. /// /// [fields]: https://docs.rs/tracing-core/latest/tracing_core/field/index.html pub fn fields(&self) -> &FieldSet { self.data.metadata().fields() } /// Returns the ID of this span's parent, or `None` if this span is the root /// of its trace tree. pub fn parent_id(&self) -> Option<&Id> { self.data.parent() } /// Returns a `SpanRef` describing this span's parent, or `None` if this /// span is the root of its trace tree. pub fn parent(&self) -> Option<Self> { let id = self.data.parent()?; let data = self.registry.span_data(id)?; Some(Self { registry: self.registry, data, }) } /// Returns an iterator over all parents of this span, starting with the /// immediate parent. /// /// The iterator will first return the span's immediate parent, followed by /// that span's parent, followed by _that_ span's parent, and so on, until a /// it reaches a root span. pub fn parents(&self) -> Parents<'a, R> { Parents { registry: self.registry, next: self.parent().map(|parent| parent.id()), } } /// Returns an iterator over all parents of this span, starting with the /// root of the trace tree. /// /// The iterator will return the root of the trace tree, followed by the /// next span, and then the next, until this span's immediate parent is /// returned. /// /// **Note**: if the "smallvec" feature flag is not enabled, this may /// allocate. pub fn from_root(&self) -> FromRoot<'a, R> { #[cfg(feature = "smallvec")] type SpanRefVec<'span, L> = smallvec::SmallVec<SpanRefVecArray<'span, L>>; #[cfg(not(feature = "smallvec"))] type SpanRefVec<'span, L> = Vec<SpanRef<'span, L>>; // an alternative way to handle this would be to the recursive approach that // `fmt` uses that _does not_ entail any allocation in this fmt'ing // spans path. let parents = self.parents().collect::<SpanRefVec<'a, _>>(); let inner = parents.into_iter().rev(); FromRoot { inner } } /// Returns a reference to this span's `Extensions`. /// /// The extensions may be used by `Layer`s to store additional data /// describing the span. pub fn ex
tensions(&
identifier_name
mod.rs
Subscriber> Layer<S> for FooLayer {} //! # impl<S: Subscriber> Layer<S> for BarLayer {} //! # impl FooLayer { //! # fn new() -> Self { Self {} } //! # } //! # impl BarLayer { //! # fn new() -> Self { Self {} } //! # } //! //! let subscriber = Registry::default() //! .with(FooLayer::new()) //! .with(BarLayer::new()); //! ``` //! //! If a type implementing `Layer` depends on the functionality of a `Registry` //! implementation, it should bound its `Subscriber` type parameter with the //! [`LookupSpan`] trait, like so: //! //! ```rust //! use tracing_subscriber::{registry, Layer}; //! use tracing_core::Subscriber; //! //! pub struct MyLayer { //! // ... //! } //! //! impl<S> Layer<S> for MyLayer //! where //! S: Subscriber + for<'a> registry::LookupSpan<'a>, //! { //! // ... //! } //! ``` //! When this bound is added, the `Layer` implementation will be guaranteed //! access to the [`Context`][ctx] methods, such as [`Context::span`][lookup], that //! require the root subscriber to be a registry. //! //! [`Layer`]: ../layer/trait.Layer.html //! [`Subscriber`]: //! https://docs.rs/tracing-core/latest/tracing_core/subscriber/trait.Subscriber.html //! [`Registry`]: struct.Registry.html //! [ctx]: ../layer/struct.Context.html //! [lookup]: ../layer/struct.Context.html#method.span //! [`LookupSpan`]: trait.LookupSpan.html //! [`SpanData`]: trait.SpanData.html use tracing_core::{field::FieldSet, span::Id, Metadata}; /// A module containing a type map of span extensions. mod extensions; #[cfg(feature = "registry")] mod sharded; #[cfg(feature = "registry")] mod stack; pub use extensions::{Extensions, ExtensionsMut}; #[cfg(feature = "registry")] #[cfg_attr(docsrs, doc(cfg(feature = "registry")))] pub use sharded::Data; #[cfg(feature = "registry")]
pub use sharded::Registry; /// Provides access to stored span data. /// /// Subscribers which store span data and associate it with span IDs should /// implement this trait; if they do, any [`Layer`]s wrapping them can look up /// metadata via the [`Context`] type's [`span()`] method. /// /// [`Layer`]: ../layer/trait.Layer.html /// [`Context`]: ../layer/struct.Context.html /// [`span()`]: ../layer/struct.Context.html#method.metadata pub trait LookupSpan<'a> { /// The type of span data stored in this registry. type Data: SpanData<'a>; /// Returns the [`SpanData`] for a given `Id`, if it exists. /// /// <div class="information"> /// <div class="tooltip ignore" style="">ⓘ<span class="tooltiptext">Note</span></div> /// </div> /// <div class="example-wrap" style="display:inline-block"> /// <pre class="ignore" style="white-space:normal;font:inherit;"> /// <strong>Note</strong>: users of the <code>LookupSpan<code> trait should /// typically call the <a href="#method.span"><code>span</code> method rather /// than this method. The <code>span</code> method is implemented by /// <em>calling</em> <code>span_data</code>, but returns a reference which is /// capable of performing more sophisiticated queries. /// </pre></div> /// /// [`SpanData`]: trait.SpanData.html fn span_data(&'a self, id: &Id) -> Option<Self::Data>; /// Returns a [`SpanRef`] for the span with the given `Id`, if it exists. /// /// A `SpanRef` is similar to [`SpanData`], but it allows performing /// additional lookups against the registryr that stores the wrapped data. /// /// In general, _users_ of the `LookupSpan` trait should use this method /// rather than the [`span_data`] method; while _implementors_ of this trait /// should only implement `span_data`. /// /// [`SpanRef`]: struct.SpanRef.html /// [`SpanData`]: trait.SpanData.html /// [`span_data`]: #method.span_data fn span(&'a self, id: &Id) -> Option<SpanRef<'_, Self>> where Self: Sized, { let data = self.span_data(&id)?; Some(SpanRef { registry: self, data, }) } } /// A stored representation of data associated with a span. pub trait SpanData<'a> { /// Returns this span's ID. fn id(&self) -> Id; /// Returns a reference to the span's `Metadata`. fn metadata(&self) -> &'static Metadata<'static>; /// Returns a reference to the ID fn parent(&self) -> Option<&Id>; /// Returns a reference to this span's `Extensions`. /// /// The extensions may be used by `Layer`s to store additional data /// describing the span. fn extensions(&self) -> Extensions<'_>; /// Returns a mutable reference to this span's `Extensions`. /// /// The extensions may be used by `Layer`s to store additional data /// describing the span. fn extensions_mut(&self) -> ExtensionsMut<'_>; } /// A reference to [span data] and the associated [registry]. /// /// This type implements all the same methods as [`SpanData`][span data], and /// provides additional methods for querying the registry based on values from /// the span. /// /// [span data]: trait.SpanData.html /// [registry]: trait.LookupSpan.html #[derive(Debug)] pub struct SpanRef<'a, R: LookupSpan<'a>> { registry: &'a R, data: R::Data, } /// An iterator over the parents of a span. /// /// This is returned by the [`SpanRef::parents`] method. /// /// [`SpanRef::parents`]: struct.SpanRef.html#method.parents #[derive(Debug)] pub struct Parents<'a, R> { registry: &'a R, next: Option<Id>, } /// An iterator over a span's parents, starting with the root of the trace /// tree. /// /// For additonal details, see [`SpanRef::from_root`]. /// /// [`Span::from_root`]: struct.SpanRef.html#method.from_root pub struct FromRoot<'a, R: LookupSpan<'a>> { #[cfg(feature = "smallvec")] inner: std::iter::Rev<smallvec::IntoIter<SpanRefVecArray<'a, R>>>, #[cfg(not(feature = "smallvec"))] inner: std::iter::Rev<std::vec::IntoIter<SpanRef<'a, R>>>, } #[cfg(feature = "smallvec")] type SpanRefVecArray<'span, L> = [SpanRef<'span, L>; 16]; impl<'a, R> SpanRef<'a, R> where R: LookupSpan<'a>, { /// Returns this span's ID. pub fn id(&self) -> Id { self.data.id() } /// Returns a static reference to the span's metadata. pub fn metadata(&self) -> &'static Metadata<'static> { self.data.metadata() } /// Returns the span's name, pub fn name(&self) -> &'static str { self.data.metadata().name() } /// Returns a list of [fields] defined by the span. /// /// [fields]: https://docs.rs/tracing-core/latest/tracing_core/field/index.html pub fn fields(&self) -> &FieldSet { self.data.metadata().fields() } /// Returns the ID of this span's parent, or `None` if this span is the root /// of its trace tree. pub fn parent_id(&self) -> Option<&Id> { self.data.parent() } /// Returns a `SpanRef` describing this span's parent, or `None` if this /// span is the root of its trace tree. pub fn parent(&self) -> Option<Self> { let id = self.data.parent()?; let data = self.registry.span_data(id)?; Some(Self { registry: self.registry, data, }) } /// Returns an iterator over all parents of this span, starting with the /// immediate parent. /// /// The iterator will first return the span's immediate parent, followed by /// that span's parent, followed by _that_ span's parent, and so on, until a /// it reaches a root span. pub fn parents(&self) -> Parents<'a, R> { Parents { registry: self.registry, next: self.parent().map(|parent| parent.id()), } } /// Returns an iterator over all parents of this span, starting with the /// root of the trace tree. /// /// The iterator will return the root of the trace tree, followed by the /// next span, and then the next, until this span's immediate parent is /// returned. /// /// **Note**: if
#[cfg_attr(docsrs, doc(cfg(feature = "registry")))]
random_line_split
manualtest.py
SCENARIO_TWO_DATA_FILE_KEY="Scenario Two" ANSWER_KEY_NAME="answer_key.yml" USER_ANSWER_CASE_A="A" USER_ANSWER_CASE_B="B" ANSWER_KEY_SCENARIO_ONE="scenario one" ANSWER_KEY_SCENARIO_TWO="scenario two" ANSWER_KEY_QUESTION_KEY="Q_" MAX_CASE_NUM=24 ADJUSTED_AUDIO_SUBDIR="adjusted_audio" SCENARIO_ONE_SUBDIR="scenario_one" SCENARIO_TWO_SUBDIR="scenario_two" class Answer(): """ Wrapper for A_B_X directory containing all associated attributes. Populate all fields of the class and call grade to determine if the question was correct **user_answers user_answer either "A" or "B" indicating which file sounded better user_preference_weight numeric value between 1-5 indicating how much better the preferred value was. 5 being significant and 1 minimal user_X_value either "A" or "B" denoting which file the user believes X was a duplicate of user_answer_confidence numeric value between 1-5 indicating how easy it was to distinguish between A and B and pick X x_answer_alpha the answer to which file X was a duplicate of. Either "A" or "B" A_value String field denoting which scenario A belonged to. Either scenario_one or SCENARIO_TWO_SUBDIR B_value String field denoting which scenario B belonged to. Either scenario_one or SCENARIO_TWO_SUBDIR correct Call self.grade to populate this field. Compares user_X_value and x_answer_alpha to determine if question was correct. Populates with boolean """ def __init__(self, question_num, **user_answers): self.question_num=question_num self.correct = None try: self.user_answer=user_answers[USER_ANSWER_KEY] except KeyError: self.user_answer=None try: self.user_preference_weight=user_answers[USER_PREFERENCE_KEY] except KeyError: self.user_preference_weight=None try: self.user_X_value=user_answers[USER_X_VALUE_KEY] except KeyError: self.user_X_value=None try: self.user_answer_confidence=user_answers[USER_CONFIDENCE_KEY] except KeyError: self.user_answer_confidence=None try: self.x_answer_alpha=user_answers[X_ANSWER_KEY] except KeyError: self.x_answer_alpha=None try: self.A_value=user_answers[A_VALUE_KEY] except KeyError: self.A_value=None try: self.B_value=user_answers[B_VALUE_KEY] except KeyError: self.B_value=None def grade(self): if self.x_answer_alpha==self.user_X_value: self.correct=True else: self.correct=False def _collect_locations(): # Method to pair all the files for comparison in the two scenarios the user has elected to compare logging.info("Enter: _collect_locations") global scenario_one global scenario_two global output_base_path if not os.path.exists(scenario_one): print("Scenario One file path does not exist. Exiting") sys.exit() if not os.path.exists(scenario_two): print("Scenario Two file path does not exist. Exiting") sys.exit() print("Creating listening test...") logging.info("Exit: _collect_locations") return scenario_one, scenario_two, output_base_path def _cleanup_scenarios(adjusted_file_path): # Delete the adjusted audio created for this module try: shutil.rmtree(adjusted_file_path) except: print("The system could not delete the temporary audio files that " "were created for this test. This directory can be removed " "at {}".format(adjusted_file_path)) def _create_output_directory(output_base_path): # From the base path create a testcases subdirectory # Return the subdirectory full path logging.info("Enter: _create_output_directory") global output_path output_path = os.path.join(output_base_path, TESTCASES_SUBDIR) if os.path.exists(output_path): try: input("Please note there is already a Testcases directory at - {} .\nPress enter to continue and remove it. Press CNTRL-C to exit.".format(output_path)) shutil.rmtree(output_path) except PermissionError: print("There is a test directory located in the same location as the test directory location you specified") print("It cannot be removed becase another process is still using it. Please close the process or delete yourself.") sys.exit() except KeyboardInterrupt: print("Exiting...") sys.exit() os.mkdir(output_path) logging.info("Exit: _create_output_directory") return output_path def _create_answer_key(output_path): # Parse the data file from scenario one and two if it exists and add too answer key # Dump data from processes to ANSWER_KEY_NAME in output_path logging.info("Enter: _create_answer_key") global answer_key global scenario_one global scenario_two scenario_one_latency_data={} if os.path.exists(os.path.join(scenario_one, SCNEARIO_ONE_DATA_FILE)): with open(os.path.join(scenario_one, SCNEARIO_ONE_DATA_FILE)) as output_data: scenario_one_latency_data[SCENARIO_ONE_DATA_FILE_KEY]=yaml.load(output_data) scenario_two_latency_data={} if os.path.exists(os.path.join(scenario_two, SCENARIO_TWO_DATA_FILE)): with open(os.path.join(scenario_two, SCENARIO_TWO_DATA_FILE)) as output_data: scenario_two_latency_data[SCENARIO_TWO_DATA_FILE_KEY]=yaml.load(output_data) with open(os.path.join(output_path, ANSWER_KEY_NAME), "w") as answer_key_yml: yaml.dump(scenario_one_latency_data, answer_key_yml, default_flow_style=False) yaml.dump(scenario_two_latency_data, answer_key_yml, default_flow_style=False) for question in answer_key: yaml_dict={} Key = str(ANSWER_KEY_QUESTION_KEY+str(question.question_num)) yaml_dict[Key] = {X_ANSWER_KEY: question.x_answer_alpha,A_VALUE_KEY: question.A_value,B_VALUE_KEY: question.B_value} yaml.dump(yaml_dict, answer_key_yml, default_flow_style=False) logging.info("Exit: _create_answer_key") def _create_temp_dir(root_directory, scenario_one, scenario_two): logging.info("Enter: _create_temp_dir") # Will create exact copies of both directories specified so files may be altered later adjusted_file_path = os.path.join(root_directory, ADJUSTED_AUDIO_SUBDIR) scenario_one_temp = os.path.join(adjusted_file_path, SCENARIO_ONE_SUBDIR) scenario_two_temp = os.path.join(adjusted_file_path, SCENARIO_TWO_SUBDIR) try: os.mkdir(adjusted_file_path) except FileExistsError: print("To properly create ABX tests, the audio files are modified so audio begins play at the same time") print("In order to do this, a new directory called 'adjusted_audio' is temproarily created to hold the adjusted audio.") input("This directory already exists. Press enter to remove and continue or CTRL-C to quit") shutil.rmtree(adjusted_file_path) os.mkdir(adjusted_file_path) shutil.copytree(scenario_one, scenario_one_temp) shutil.copytree(scenario_two, scenario_two_temp) logging.info("Exit: _create_temp_dir") return adjusted_file_path, scenario_one_temp, scenario_one_temp def create_A_B_X_cases(A_B_cases_zip_list, output_path): """ Method to create A_B_X testing directories and return the corresponding answer key An A file is chosen from either the scenario one or two with a 50/50 probability. The B file is then from the scenario not chosen for A. An X file is then created with a 50/50 probability of being either a duplicate of A or B Parameters: A_B_cases_zip_list: A list containing absolute file pairs [[scenario_one, scenario_two]...] output_path: absolute file path to store testcase directory Returns: None """ logging.info("Enter: create_A_B_X_cases ") global scenario_one global scenario_two global answer_key # create listening directories and record answer to each in answer_log for case_num, case in enumerate(A_B_cases_zip_list): #MRR I really don't like silently dropping audio pairs. Please just create multiple ABX tests, each with up to 25. Up to you whether you have 3 of 25 and one of 21 or 4 of 24. if case_num > MAX_CASE_NUM: logging.info("The amount of cases has exceeded 25. Please note that " "the accompanying excel sheet only has 25 answer slots and that it will need to " "be restructured") print("The amount of cases has exceeded 25. Please note that " "the accompanying excel sheet only has 25 answer slots and that it will need to " "be restructured") test_case_path = os.path.join(output_path, str(case_num)) try: os.mkdir(test_case_path)
except FileExistsError: logging.debug("Could not create test case directory at {} - encountered FileExistsError".format(test_case_path)) print("Could not create test case directory at {} - encountered FileExistsError".format(test_case_path))
random_line_split
manualtest.py
REFERENCE_KEY="user_preference_weight" USER_X_VALUE_KEY="user_X_value" USER_CONFIDENCE_KEY="user_answer_confidence" X_ANSWER_KEY="x_answer_alpha" A_VALUE_KEY="A_value" B_VALUE_KEY="B_value" TESTCASES_SUBDIR="testcases" A_CASE_NAME="A_" B_CASE_NAME="B_" X_CASE_NAME="X_" WNDWS_COPY_CMD="copy" AUDIO_TYPE=".wav" SCNEARIO_ONE_DATA_FILE="output_data.yml" SCENARIO_ONE_DATA_FILE_KEY="Scenario One" SCENARIO_TWO_DATA_FILE="output_data.yml" SCENARIO_TWO_DATA_FILE_KEY="Scenario Two" ANSWER_KEY_NAME="answer_key.yml" USER_ANSWER_CASE_A="A" USER_ANSWER_CASE_B="B" ANSWER_KEY_SCENARIO_ONE="scenario one" ANSWER_KEY_SCENARIO_TWO="scenario two" ANSWER_KEY_QUESTION_KEY="Q_" MAX_CASE_NUM=24 ADJUSTED_AUDIO_SUBDIR="adjusted_audio" SCENARIO_ONE_SUBDIR="scenario_one" SCENARIO_TWO_SUBDIR="scenario_two" class Answer(): """ Wrapper for A_B_X directory containing all associated attributes. Populate all fields of the class and call grade to determine if the question was correct **user_answers user_answer either "A" or "B" indicating which file sounded better user_preference_weight numeric value between 1-5 indicating how much better the preferred value was. 5 being significant and 1 minimal user_X_value either "A" or "B" denoting which file the user believes X was a duplicate of user_answer_confidence numeric value between 1-5 indicating how easy it was to distinguish between A and B and pick X x_answer_alpha the answer to which file X was a duplicate of. Either "A" or "B" A_value String field denoting which scenario A belonged to. Either scenario_one or SCENARIO_TWO_SUBDIR B_value String field denoting which scenario B belonged to. Either scenario_one or SCENARIO_TWO_SUBDIR correct Call self.grade to populate this field. Compares user_X_value and x_answer_alpha to determine if question was correct. Populates with boolean """ def __init__(self, question_num, **user_answers): self.question_num=question_num self.correct = None try: self.user_answer=user_answers[USER_ANSWER_KEY] except KeyError: self.user_answer=None try: self.user_preference_weight=user_answers[USER_PREFERENCE_KEY] except KeyError: self.user_preference_weight=None try: self.user_X_value=user_answers[USER_X_VALUE_KEY] except KeyError: self.user_X_value=None try: self.user_answer_confidence=user_answers[USER_CONFIDENCE_KEY] except KeyError: self.user_answer_confidence=None try: self.x_answer_alpha=user_answers[X_ANSWER_KEY] except KeyError: self.x_answer_alpha=None try: self.A_value=user_answers[A_VALUE_KEY] except KeyError: self.A_value=None try: self.B_value=user_answers[B_VALUE_KEY] except KeyError: self.B_value=None def grade(self): if self.x_answer_alpha==self.user_X_value: self.correct=True else: self.correct=False def _collect_locations(): # Method to pair all the files for comparison in the two scenarios the user has elected to compare logging.info("Enter: _collect_locations") global scenario_one global scenario_two global output_base_path if not os.path.exists(scenario_one): print("Scenario One file path does not exist. Exiting") sys.exit() if not os.path.exists(scenario_two): print("Scenario Two file path does not exist. Exiting") sys.exit() print("Creating listening test...") logging.info("Exit: _collect_locations") return scenario_one, scenario_two, output_base_path def _cleanup_scenarios(adjusted_file_path): # Delete the adjusted audio created for this module try: shutil.rmtree(adjusted_file_path) except: print("The system could not delete the temporary audio files that " "were created for this test. This directory can be removed " "at {}".format(adjusted_file_path)) def _create_output_directory(output_base_path): # From the base path create a testcases subdirectory # Return the subdirectory full path logging.info("Enter: _create_output_directory") global output_path output_path = os.path.join(output_base_path, TESTCASES_SUBDIR) if os.path.exists(output_path): try: input("Please note there is already a Testcases directory at - {} .\nPress enter to continue and remove it. Press CNTRL-C to exit.".format(output_path)) shutil.rmtree(output_path) except PermissionError: print("There is a test directory located in the same location as the test directory location you specified") print("It cannot be removed becase another process is still using it. Please close the process or delete yourself.") sys.exit() except KeyboardInterrupt: print("Exiting...") sys.exit() os.mkdir(output_path) logging.info("Exit: _create_output_directory") return output_path def _create_answer_key(output_path): # Parse the data file from scenario one and two if it exists and add too answer key # Dump data from processes to ANSWER_KEY_NAME in output_path logging.info("Enter: _create_answer_key") global answer_key global scenario_one global scenario_two scenario_one_latency_data={} if os.path.exists(os.path.join(scenario_one, SCNEARIO_ONE_DATA_FILE)): with open(os.path.join(scenario_one, SCNEARIO_ONE_DATA_FILE)) as output_data: scenario_one_latency_data[SCENARIO_ONE_DATA_FILE_KEY]=yaml.load(output_data) scenario_two_latency_data={} if os.path.exists(os.path.join(scenario_two, SCENARIO_TWO_DATA_FILE)): with open(os.path.join(scenario_two, SCENARIO_TWO_DATA_FILE)) as output_data: scenario_two_latency_data[SCENARIO_TWO_DATA_FILE_KEY]=yaml.load(output_data) with open(os.path.join(output_path, ANSWER_KEY_NAME), "w") as answer_key_yml: yaml.dump(scenario_one_latency_data, answer_key_yml, default_flow_style=False) yaml.dump(scenario_two_latency_data, answer_key_yml, default_flow_style=False) for question in answer_key: yaml_dict={} Key = str(ANSWER_KEY_QUESTION_KEY+str(question.question_num)) yaml_dict[Key] = {X_ANSWER_KEY: question.x_answer_alpha,A_VALUE_KEY: question.A_value,B_VALUE_KEY: question.B_value} yaml.dump(yaml_dict, answer_key_yml, default_flow_style=False) logging.info("Exit: _create_answer_key") def
(root_directory, scenario_one, scenario_two): logging.info("Enter: _create_temp_dir") # Will create exact copies of both directories specified so files may be altered later adjusted_file_path = os.path.join(root_directory, ADJUSTED_AUDIO_SUBDIR) scenario_one_temp = os.path.join(adjusted_file_path, SCENARIO_ONE_SUBDIR) scenario_two_temp = os.path.join(adjusted_file_path, SCENARIO_TWO_SUBDIR) try: os.mkdir(adjusted_file_path) except FileExistsError: print("To properly create ABX tests, the audio files are modified so audio begins play at the same time") print("In order to do this, a new directory called 'adjusted_audio' is temproarily created to hold the adjusted audio.") input("This directory already exists. Press enter to remove and continue or CTRL-C to quit") shutil.rmtree(adjusted_file_path) os.mkdir(adjusted_file_path) shutil.copytree(scenario_one, scenario_one_temp) shutil.copytree(scenario_two, scenario_two_temp) logging.info("Exit: _create_temp_dir") return adjusted_file_path, scenario_one_temp, scenario_one_temp def create_A_B_X_cases(A_B_cases_zip_list, output_path): """ Method to create A_B_X testing directories and return the corresponding answer key An A file is chosen from either the scenario one or two with a 50/50 probability. The B file is then from the scenario not chosen for A. An X file is then created with a 50/50 probability of being either a duplicate of A or B Parameters: A_B_cases_zip_list: A list containing absolute file pairs [[scenario_one, scenario_two]...] output_path: absolute file path to store testcase directory Returns: None """ logging.info("Enter: create_A_B_X_cases ") global scenario_one global scenario_two global answer_key # create listening directories and record answer to each in answer_log for case_num, case in enumerate(A_B_cases_zip_list): #MRR I really don't like silently dropping audio pairs. Please just create multiple ABX tests, each with up to 25. Up to you whether you have 3 of 25 and one of 21 or 4 of 24. if case_num > MAX_CASE_NUM: logging.info("The amount of cases has exceeded 25. Please note that " "the accompanying excel sheet only has 25 answer slots and that it will need to " "be
_create_temp_dir
identifier_name
manualtest.py
REFERENCE_KEY="user_preference_weight" USER_X_VALUE_KEY="user_X_value" USER_CONFIDENCE_KEY="user_answer_confidence" X_ANSWER_KEY="x_answer_alpha" A_VALUE_KEY="A_value" B_VALUE_KEY="B_value" TESTCASES_SUBDIR="testcases" A_CASE_NAME="A_" B_CASE_NAME="B_" X_CASE_NAME="X_" WNDWS_COPY_CMD="copy" AUDIO_TYPE=".wav" SCNEARIO_ONE_DATA_FILE="output_data.yml" SCENARIO_ONE_DATA_FILE_KEY="Scenario One" SCENARIO_TWO_DATA_FILE="output_data.yml" SCENARIO_TWO_DATA_FILE_KEY="Scenario Two" ANSWER_KEY_NAME="answer_key.yml" USER_ANSWER_CASE_A="A" USER_ANSWER_CASE_B="B" ANSWER_KEY_SCENARIO_ONE="scenario one" ANSWER_KEY_SCENARIO_TWO="scenario two" ANSWER_KEY_QUESTION_KEY="Q_" MAX_CASE_NUM=24 ADJUSTED_AUDIO_SUBDIR="adjusted_audio" SCENARIO_ONE_SUBDIR="scenario_one" SCENARIO_TWO_SUBDIR="scenario_two" class Answer(): """ Wrapper for A_B_X directory containing all associated attributes. Populate all fields of the class and call grade to determine if the question was correct **user_answers user_answer either "A" or "B" indicating which file sounded better user_preference_weight numeric value between 1-5 indicating how much better the preferred value was. 5 being significant and 1 minimal user_X_value either "A" or "B" denoting which file the user believes X was a duplicate of user_answer_confidence numeric value between 1-5 indicating how easy it was to distinguish between A and B and pick X x_answer_alpha the answer to which file X was a duplicate of. Either "A" or "B" A_value String field denoting which scenario A belonged to. Either scenario_one or SCENARIO_TWO_SUBDIR B_value String field denoting which scenario B belonged to. Either scenario_one or SCENARIO_TWO_SUBDIR correct Call self.grade to populate this field. Compares user_X_value and x_answer_alpha to determine if question was correct. Populates with boolean """ def __init__(self, question_num, **user_answers): self.question_num=question_num self.correct = None try: self.user_answer=user_answers[USER_ANSWER_KEY] except KeyError: self.user_answer=None try: self.user_preference_weight=user_answers[USER_PREFERENCE_KEY] except KeyError: self.user_preference_weight=None try: self.user_X_value=user_answers[USER_X_VALUE_KEY] except KeyError: self.user_X_value=None try: self.user_answer_confidence=user_answers[USER_CONFIDENCE_KEY] except KeyError: self.user_answer_confidence=None try: self.x_answer_alpha=user_answers[X_ANSWER_KEY] except KeyError: self.x_answer_alpha=None try: self.A_value=user_answers[A_VALUE_KEY] except KeyError: self.A_value=None try: self.B_value=user_answers[B_VALUE_KEY] except KeyError: self.B_value=None def grade(self): if self.x_answer_alpha==self.user_X_value: self.correct=True else: self.correct=False def _collect_locations(): # Method to pair all the files for comparison in the two scenarios the user has elected to compare logging.info("Enter: _collect_locations") global scenario_one global scenario_two global output_base_path if not os.path.exists(scenario_one): print("Scenario One file path does not exist. Exiting") sys.exit() if not os.path.exists(scenario_two): print("Scenario Two file path does not exist. Exiting") sys.exit() print("Creating listening test...") logging.info("Exit: _collect_locations") return scenario_one, scenario_two, output_base_path def _cleanup_scenarios(adjusted_file_path): # Delete the adjusted audio created for this module try: shutil.rmtree(adjusted_file_path) except: print("The system could not delete the temporary audio files that " "were created for this test. This directory can be removed " "at {}".format(adjusted_file_path)) def _create_output_directory(output_base_path): # From the base path create a testcases subdirectory # Return the subdirectory full path logging.info("Enter: _create_output_directory") global output_path output_path = os.path.join(output_base_path, TESTCASES_SUBDIR) if os.path.exists(output_path): try: input("Please note there is already a Testcases directory at - {} .\nPress enter to continue and remove it. Press CNTRL-C to exit.".format(output_path)) shutil.rmtree(output_path) except PermissionError: print("There is a test directory located in the same location as the test directory location you specified") print("It cannot be removed becase another process is still using it. Please close the process or delete yourself.") sys.exit() except KeyboardInterrupt: print("Exiting...") sys.exit() os.mkdir(output_path) logging.info("Exit: _create_output_directory") return output_path def _create_answer_key(output_path): # Parse the data file from scenario one and two if it exists and add too answer key # Dump data from processes to ANSWER_KEY_NAME in output_path logging.info("Enter: _create_answer_key") global answer_key global scenario_one global scenario_two scenario_one_latency_data={} if os.path.exists(os.path.join(scenario_one, SCNEARIO_ONE_DATA_FILE)): with open(os.path.join(scenario_one, SCNEARIO_ONE_DATA_FILE)) as output_data: scenario_one_latency_data[SCENARIO_ONE_DATA_FILE_KEY]=yaml.load(output_data) scenario_two_latency_data={} if os.path.exists(os.path.join(scenario_two, SCENARIO_TWO_DATA_FILE)): with open(os.path.join(scenario_two, SCENARIO_TWO_DATA_FILE)) as output_data: scenario_two_latency_data[SCENARIO_TWO_DATA_FILE_KEY]=yaml.load(output_data) with open(os.path.join(output_path, ANSWER_KEY_NAME), "w") as answer_key_yml: yaml.dump(scenario_one_latency_data, answer_key_yml, default_flow_style=False) yaml.dump(scenario_two_latency_data, answer_key_yml, default_flow_style=False) for question in answer_key: yaml_dict={} Key = str(ANSWER_KEY_QUESTION_KEY+str(question.question_num)) yaml_dict[Key] = {X_ANSWER_KEY: question.x_answer_alpha,A_VALUE_KEY: question.A_value,B_VALUE_KEY: question.B_value} yaml.dump(yaml_dict, answer_key_yml, default_flow_style=False) logging.info("Exit: _create_answer_key") def _create_temp_dir(root_directory, scenario_one, scenario_two): logging.info("Enter: _create_temp_dir") # Will create exact copies of both directories specified so files may be altered later adjusted_file_path = os.path.join(root_directory, ADJUSTED_AUDIO_SUBDIR) scenario_one_temp = os.path.join(adjusted_file_path, SCENARIO_ONE_SUBDIR) scenario_two_temp = os.path.join(adjusted_file_path, SCENARIO_TWO_SUBDIR) try: os.mkdir(adjusted_file_path) except FileExistsError: print("To properly create ABX tests, the audio files are modified so audio begins play at the same time") print("In order to do this, a new directory called 'adjusted_audio' is temproarily created to hold the adjusted audio.") input("This directory already exists. Press enter to remove and continue or CTRL-C to quit") shutil.rmtree(adjusted_file_path) os.mkdir(adjusted_file_path) shutil.copytree(scenario_one, scenario_one_temp) shutil.copytree(scenario_two, scenario_two_temp) logging.info("Exit: _create_temp_dir") return adjusted_file_path, scenario_one_temp, scenario_one_temp def create_A_B_X_cases(A_B_cases_zip_list, output_path):
if case_num > MAX_CASE_NUM: logging.info("The amount of cases has exceeded 25. Please note that " "the accompanying excel sheet only has 25 answer slots and that it will need to " "be
""" Method to create A_B_X testing directories and return the corresponding answer key An A file is chosen from either the scenario one or two with a 50/50 probability. The B file is then from the scenario not chosen for A. An X file is then created with a 50/50 probability of being either a duplicate of A or B Parameters: A_B_cases_zip_list: A list containing absolute file pairs [[scenario_one, scenario_two]...] output_path: absolute file path to store testcase directory Returns: None """ logging.info("Enter: create_A_B_X_cases ") global scenario_one global scenario_two global answer_key # create listening directories and record answer to each in answer_log for case_num, case in enumerate(A_B_cases_zip_list): #MRR I really don't like silently dropping audio pairs. Please just create multiple ABX tests, each with up to 25. Up to you whether you have 3 of 25 and one of 21 or 4 of 24.
identifier_body
manualtest.py
B_value" TESTCASES_SUBDIR="testcases" A_CASE_NAME="A_" B_CASE_NAME="B_" X_CASE_NAME="X_" WNDWS_COPY_CMD="copy" AUDIO_TYPE=".wav" SCNEARIO_ONE_DATA_FILE="output_data.yml" SCENARIO_ONE_DATA_FILE_KEY="Scenario One" SCENARIO_TWO_DATA_FILE="output_data.yml" SCENARIO_TWO_DATA_FILE_KEY="Scenario Two" ANSWER_KEY_NAME="answer_key.yml" USER_ANSWER_CASE_A="A" USER_ANSWER_CASE_B="B" ANSWER_KEY_SCENARIO_ONE="scenario one" ANSWER_KEY_SCENARIO_TWO="scenario two" ANSWER_KEY_QUESTION_KEY="Q_" MAX_CASE_NUM=24 ADJUSTED_AUDIO_SUBDIR="adjusted_audio" SCENARIO_ONE_SUBDIR="scenario_one" SCENARIO_TWO_SUBDIR="scenario_two" class Answer(): """ Wrapper for A_B_X directory containing all associated attributes. Populate all fields of the class and call grade to determine if the question was correct **user_answers user_answer either "A" or "B" indicating which file sounded better user_preference_weight numeric value between 1-5 indicating how much better the preferred value was. 5 being significant and 1 minimal user_X_value either "A" or "B" denoting which file the user believes X was a duplicate of user_answer_confidence numeric value between 1-5 indicating how easy it was to distinguish between A and B and pick X x_answer_alpha the answer to which file X was a duplicate of. Either "A" or "B" A_value String field denoting which scenario A belonged to. Either scenario_one or SCENARIO_TWO_SUBDIR B_value String field denoting which scenario B belonged to. Either scenario_one or SCENARIO_TWO_SUBDIR correct Call self.grade to populate this field. Compares user_X_value and x_answer_alpha to determine if question was correct. Populates with boolean """ def __init__(self, question_num, **user_answers): self.question_num=question_num self.correct = None try: self.user_answer=user_answers[USER_ANSWER_KEY] except KeyError: self.user_answer=None try: self.user_preference_weight=user_answers[USER_PREFERENCE_KEY] except KeyError: self.user_preference_weight=None try: self.user_X_value=user_answers[USER_X_VALUE_KEY] except KeyError: self.user_X_value=None try: self.user_answer_confidence=user_answers[USER_CONFIDENCE_KEY] except KeyError: self.user_answer_confidence=None try: self.x_answer_alpha=user_answers[X_ANSWER_KEY] except KeyError: self.x_answer_alpha=None try: self.A_value=user_answers[A_VALUE_KEY] except KeyError: self.A_value=None try: self.B_value=user_answers[B_VALUE_KEY] except KeyError: self.B_value=None def grade(self): if self.x_answer_alpha==self.user_X_value: self.correct=True else: self.correct=False def _collect_locations(): # Method to pair all the files for comparison in the two scenarios the user has elected to compare logging.info("Enter: _collect_locations") global scenario_one global scenario_two global output_base_path if not os.path.exists(scenario_one): print("Scenario One file path does not exist. Exiting") sys.exit() if not os.path.exists(scenario_two): print("Scenario Two file path does not exist. Exiting") sys.exit() print("Creating listening test...") logging.info("Exit: _collect_locations") return scenario_one, scenario_two, output_base_path def _cleanup_scenarios(adjusted_file_path): # Delete the adjusted audio created for this module try: shutil.rmtree(adjusted_file_path) except: print("The system could not delete the temporary audio files that " "were created for this test. This directory can be removed " "at {}".format(adjusted_file_path)) def _create_output_directory(output_base_path): # From the base path create a testcases subdirectory # Return the subdirectory full path logging.info("Enter: _create_output_directory") global output_path output_path = os.path.join(output_base_path, TESTCASES_SUBDIR) if os.path.exists(output_path): try: input("Please note there is already a Testcases directory at - {} .\nPress enter to continue and remove it. Press CNTRL-C to exit.".format(output_path)) shutil.rmtree(output_path) except PermissionError: print("There is a test directory located in the same location as the test directory location you specified") print("It cannot be removed becase another process is still using it. Please close the process or delete yourself.") sys.exit() except KeyboardInterrupt: print("Exiting...") sys.exit() os.mkdir(output_path) logging.info("Exit: _create_output_directory") return output_path def _create_answer_key(output_path): # Parse the data file from scenario one and two if it exists and add too answer key # Dump data from processes to ANSWER_KEY_NAME in output_path logging.info("Enter: _create_answer_key") global answer_key global scenario_one global scenario_two scenario_one_latency_data={} if os.path.exists(os.path.join(scenario_one, SCNEARIO_ONE_DATA_FILE)): with open(os.path.join(scenario_one, SCNEARIO_ONE_DATA_FILE)) as output_data: scenario_one_latency_data[SCENARIO_ONE_DATA_FILE_KEY]=yaml.load(output_data) scenario_two_latency_data={} if os.path.exists(os.path.join(scenario_two, SCENARIO_TWO_DATA_FILE)): with open(os.path.join(scenario_two, SCENARIO_TWO_DATA_FILE)) as output_data: scenario_two_latency_data[SCENARIO_TWO_DATA_FILE_KEY]=yaml.load(output_data) with open(os.path.join(output_path, ANSWER_KEY_NAME), "w") as answer_key_yml: yaml.dump(scenario_one_latency_data, answer_key_yml, default_flow_style=False) yaml.dump(scenario_two_latency_data, answer_key_yml, default_flow_style=False) for question in answer_key: yaml_dict={} Key = str(ANSWER_KEY_QUESTION_KEY+str(question.question_num)) yaml_dict[Key] = {X_ANSWER_KEY: question.x_answer_alpha,A_VALUE_KEY: question.A_value,B_VALUE_KEY: question.B_value} yaml.dump(yaml_dict, answer_key_yml, default_flow_style=False) logging.info("Exit: _create_answer_key") def _create_temp_dir(root_directory, scenario_one, scenario_two): logging.info("Enter: _create_temp_dir") # Will create exact copies of both directories specified so files may be altered later adjusted_file_path = os.path.join(root_directory, ADJUSTED_AUDIO_SUBDIR) scenario_one_temp = os.path.join(adjusted_file_path, SCENARIO_ONE_SUBDIR) scenario_two_temp = os.path.join(adjusted_file_path, SCENARIO_TWO_SUBDIR) try: os.mkdir(adjusted_file_path) except FileExistsError: print("To properly create ABX tests, the audio files are modified so audio begins play at the same time") print("In order to do this, a new directory called 'adjusted_audio' is temproarily created to hold the adjusted audio.") input("This directory already exists. Press enter to remove and continue or CTRL-C to quit") shutil.rmtree(adjusted_file_path) os.mkdir(adjusted_file_path) shutil.copytree(scenario_one, scenario_one_temp) shutil.copytree(scenario_two, scenario_two_temp) logging.info("Exit: _create_temp_dir") return adjusted_file_path, scenario_one_temp, scenario_one_temp def create_A_B_X_cases(A_B_cases_zip_list, output_path): """ Method to create A_B_X testing directories and return the corresponding answer key An A file is chosen from either the scenario one or two with a 50/50 probability. The B file is then from the scenario not chosen for A. An X file is then created with a 50/50 probability of being either a duplicate of A or B Parameters: A_B_cases_zip_list: A list containing absolute file pairs [[scenario_one, scenario_two]...] output_path: absolute file path to store testcase directory Returns: None """ logging.info("Enter: create_A_B_X_cases ") global scenario_one global scenario_two global answer_key # create listening directories and record answer to each in answer_log for case_num, case in enumerate(A_B_cases_zip_list): #MRR I really don't like silently dropping audio pairs. Please just create multiple ABX tests, each with up to 25. Up to you whether you have 3 of 25 and one of 21 or 4 of 24. if case_num > MAX_CASE_NUM:
logging.info("The amount of cases has exceeded 25. Please note that " "the accompanying excel sheet only has 25 answer slots and that it will need to " "be restructured") print("The amount of cases has exceeded 25. Please note that " "the accompanying excel sheet only has 25 answer slots and that it will need to " "be restructured")
conditional_block
docker_compose.rs
async_trait; use eyre::{eyre, Context, Result}; use std::ffi::OsStr; use std::fmt::Debug; use std::path::PathBuf; use std::process::Command; use std::process::Output; use url::Url; /// Each instance of this struct represents a particular FHIR Server implementation, where the implementation /// is launched and managed via Docker Compose. #[derive(Clone, Debug)] pub struct DockerComposeServerPlugin { server_name: ServerName, server_script: PathBuf, base_url: Url, request_builder_factory: fn(client: reqwest::Client, method: http::Method, url: Url) -> reqwest::RequestBuilder, } impl DockerComposeServerPlugin { /// Returns the [PathBuf] to the `docker compose` wrapper script for this server. fn server_script(&self) -> PathBuf { self.server_script.clone() } /// Returns the base [Url] that the server will use, once launched. fn base_url(&self) -> &Url { &self.base_url } } impl DockerComposeServerPlugin { /// Constructs a new `DockerComposeServerPlugin` instance that will represent a particular FHIR Server /// implementation. /// /// Parameters: /// * `server_name`: the [ServerName] that will uniquely identify the FHIR Server implemenation /// * `server_script`: a [PathBuf] to the shell script that wraps the `docker compose` command for this /// particular FHIR Server implementation /// * `base_url`: the base [Url] that should be used for all requests to the FHIR Server, once launched /// * `request_builder_factory`: a function that can produce the [reqwest::RequestBuilder] to use when /// querying the FHIR Server, once launched pub fn new( server_name: ServerName, server_script: PathBuf, base_url: Url, request_builder_factory: fn( client: reqwest::Client, method: http::Method, url: Url, ) -> reqwest::RequestBuilder, ) -> DockerComposeServerPlugin { DockerComposeServerPlugin { server_name, server_script, base_url, request_builder_factory, } } } #[async_trait] impl ServerPlugin for DockerComposeServerPlugin { fn server_name(&self) -> &ServerName { &self.server_name } async fn launch(&self, app_state: &AppState) -> Result<Box<dyn ServerHandle>> { launch_server(app_state, self).await } } /// Runs the specified Docker Compose subcommand with the specified argument, for the specified FHIR Server /// implementation. /// /// Parameters: /// * `server_plugin`: the [DockerComposeServerPlugin] that represents the FHIR Server implementation to run /// the command for/against /// * `args`: the Docker Compose subcommand and options to run, e.g. `["up", "--detach"]` #[tracing::instrument(level = "info", skip(server_plugin))] fn run_docker_compose<I, S>(server_plugin: &DockerComposeServerPlugin, args: I) -> Result<Output> where I: IntoIterator<Item = S> + Debug, S: AsRef<OsStr>, { /* * Build and launch the FHIR server. */ let docker_compose_output = Command::new(server_plugin.server_script()) .args(args) .output() .with_context(|| { format!( "Error returned by control command for the '{}' FHIR server.", server_plugin.server_name() ) })?; if !docker_compose_output.status.success() { return Err(eyre!(crate::errors::AppError::ChildProcessFailure( docker_compose_output.status, format!( "Error returned by control command for the '{}' FHIR server.", server_plugin.server_name() ), String::from_utf8_lossy(&docker_compose_output.stdout).into(), String::from_utf8_lossy(&docker_compose_output.stderr).into() ))); } Ok(docker_compose_output) } /// Launches the server, producing a boxed [SparkFhirServerHandle]. /// /// Parameters: /// * `app_state`: the application's [AppState] /// * `server_plugin`: the [DockerComposeServerPlugin] for the server to launch async fn launch_server( app_state: &AppState, server_plugin: &DockerComposeServerPlugin, ) -> Result<Box<dyn ServerHandle>> { /* * Build and launch the server. */ run_docker_compose(server_plugin, &["up", "--detach"]).with_context(|| { format!( "Running '{} up --detach' failed.", server_plugin .server_script() .file_name() .expect("Unable to get control script name.") .to_string_lossy() ) })?; /* * The server containers have now been started, though they're not necessarily ready yet. Build a * handle for it, copying any fields from the plugin that will be needed (as we can't safely downcast * the plugin, so this is the only way to have access to those fields from the handle). */ let server_plugin = app_state .find_server_plugin(server_plugin.server_name().as_str()) .expect("Unable to find server plugin"); let http_client = super::client_default()?; let server_handle = DockerComposeServerHandle { server_plugin: server_plugin.clone(), http_client, }; // Wait (up to a timeout) for the server to be ready. match wait_for_ready(app_state, &server_handle).await { Err(err) => { server_handle.emit_logs_info()?; Err(err) } Ok(_) => { let server_handle: Box<dyn ServerHandle> = Box::new(server_handle); Ok(server_handle) } } } /// Checks the specified server repeatedly to see if it is ready, up to a hardcoded timeout. /// /// Parameters: /// * `app_state`: the application's [AppState] /// * `server_handle`: the [DockerComposeServerPlugin] to test /// /// Returns an empty [Result], where an error indicates that the server was not ready. #[tracing::instrument(level = "debug", skip(app_state, server_handle))] async fn wait_for_ready( app_state: &AppState, server_handle: &DockerComposeServerHandle, ) -> Result<()> { let probe_result = tokio::time::timeout(std::time::Duration::from_secs(60 * 5), async { let mut ready = false; let mut probe = None; while !ready { probe = Some( crate::test_framework::metadata::check_metadata_operation(app_state, server_handle) .await, ); ready = probe.as_ref().expect("probe result missing").is_ok(); if !ready { tokio::time::sleep(std::time::Duration::from_millis(500)).await; } } probe.expect("probe results missing") }) .await .with_context(|| { format!( "Timed out while waiting for server '{}' to launch.", server_handle.plugin().server_name() ) })?; match probe_result { Err(err) => { server_handle.emit_logs_info()?; Err(err) } Ok(_) => Ok(()), } } /// Represents a running instance of a [DockerComposeServerPlugin] instance. struct DockerComposeServerHandle { server_plugin: ServerPluginWrapper, http_client: reqwest::Client, } #[async_trait] impl ServerHandle for DockerComposeServerHandle { fn plugin(&self) -> &ServerPluginWrapper { &self.server_plugin } fn base_url(&self) -> url::Url { let server_plugin = server_plugin_downcast(self); server_plugin.base_url().clone() } fn client(&self) -> Result<reqwest::Client> { Ok(self.http_client.clone()) } fn request_builder( &self, client: reqwest::Client, method: http::Method, url: Url, ) -> reqwest::RequestBuilder { let server_plugin = server_plugin_downcast(self); (server_plugin.request_builder_factory)(client, method, url) } fn
(&self) -> Result<String> { let server_plugin = server_plugin_downcast(self); match run_docker_compose(server_plugin, &["logs", "--no-color"]).with_context(|| { format!( "Running '{} up --detach' failed.", server_plugin .server_script() .file_name() .expect("Unable to get control script name.") .to_string_lossy() ) }) { Ok(output) => Ok(String::from_utf8_lossy(&output.stdout).to_string()), Err(err) => Err(err), } } #[tracing::instrument(level = "debug", skip(self, app_state))] async fn expunge_all_content(&self, app_state: &AppState) -> Result<()> { self.shutdown()?; let server_plugin = server_plugin_downcast(self); launch_server(app_state, server_plugin).await?; Ok(()) } #[tracing::instrument(level = "debug", skip(self))] fn shutdown(&self) -> Result<()> { let server_plugin = server_plugin_downcast(self); let docker_down_output = run_docker_compose(server_plugin, &["down"]).with_context(||
emit_logs
identifier_name
docker_compose.rs
_trait; use eyre::{eyre, Context, Result}; use std::ffi::OsStr; use std::fmt::Debug; use std::path::PathBuf; use std::process::Command; use std::process::Output; use url::Url; /// Each instance of this struct represents a particular FHIR Server implementation, where the implementation /// is launched and managed via Docker Compose. #[derive(Clone, Debug)] pub struct DockerComposeServerPlugin { server_name: ServerName, server_script: PathBuf, base_url: Url, request_builder_factory: fn(client: reqwest::Client, method: http::Method, url: Url) -> reqwest::RequestBuilder, } impl DockerComposeServerPlugin { /// Returns the [PathBuf] to the `docker compose` wrapper script for this server. fn server_script(&self) -> PathBuf { self.server_script.clone() } /// Returns the base [Url] that the server will use, once launched. fn base_url(&self) -> &Url { &self.base_url } } impl DockerComposeServerPlugin { /// Constructs a new `DockerComposeServerPlugin` instance that will represent a particular FHIR Server /// implementation. /// /// Parameters: /// * `server_name`: the [ServerName] that will uniquely identify the FHIR Server implemenation /// * `server_script`: a [PathBuf] to the shell script that wraps the `docker compose` command for this /// particular FHIR Server implementation /// * `base_url`: the base [Url] that should be used for all requests to the FHIR Server, once launched /// * `request_builder_factory`: a function that can produce the [reqwest::RequestBuilder] to use when /// querying the FHIR Server, once launched pub fn new( server_name: ServerName, server_script: PathBuf, base_url: Url, request_builder_factory: fn( client: reqwest::Client, method: http::Method, url: Url, ) -> reqwest::RequestBuilder, ) -> DockerComposeServerPlugin { DockerComposeServerPlugin { server_name, server_script, base_url, request_builder_factory, } } } #[async_trait] impl ServerPlugin for DockerComposeServerPlugin { fn server_name(&self) -> &ServerName { &self.server_name } async fn launch(&self, app_state: &AppState) -> Result<Box<dyn ServerHandle>> { launch_server(app_state, self).await } } /// Runs the specified Docker Compose subcommand with the specified argument, for the specified FHIR Server /// implementation. /// /// Parameters: /// * `server_plugin`: the [DockerComposeServerPlugin] that represents the FHIR Server implementation to run /// the command for/against /// * `args`: the Docker Compose subcommand and options to run, e.g. `["up", "--detach"]` #[tracing::instrument(level = "info", skip(server_plugin))] fn run_docker_compose<I, S>(server_plugin: &DockerComposeServerPlugin, args: I) -> Result<Output> where I: IntoIterator<Item = S> + Debug, S: AsRef<OsStr>, { /* * Build and launch the FHIR server. */ let docker_compose_output = Command::new(server_plugin.server_script()) .args(args) .output() .with_context(|| { format!( "Error returned by control command for the '{}' FHIR server.", server_plugin.server_name() ) })?; if !docker_compose_output.status.success() { return Err(eyre!(crate::errors::AppError::ChildProcessFailure( docker_compose_output.status, format!( "Error returned by control command for the '{}' FHIR server.", server_plugin.server_name() ), String::from_utf8_lossy(&docker_compose_output.stdout).into(), String::from_utf8_lossy(&docker_compose_output.stderr).into() ))); } Ok(docker_compose_output) } /// Launches the server, producing a boxed [SparkFhirServerHandle]. /// /// Parameters: /// * `app_state`: the application's [AppState] /// * `server_plugin`: the [DockerComposeServerPlugin] for the server to launch async fn launch_server( app_state: &AppState, server_plugin: &DockerComposeServerPlugin, ) -> Result<Box<dyn ServerHandle>> { /* * Build and launch the server. */ run_docker_compose(server_plugin, &["up", "--detach"]).with_context(|| { format!( "Running '{} up --detach' failed.", server_plugin .server_script() .file_name() .expect("Unable to get control script name.") .to_string_lossy() ) })?; /* * The server containers have now been started, though they're not necessarily ready yet. Build a * handle for it, copying any fields from the plugin that will be needed (as we can't safely downcast * the plugin, so this is the only way to have access to those fields from the handle). */ let server_plugin = app_state .find_server_plugin(server_plugin.server_name().as_str()) .expect("Unable to find server plugin"); let http_client = super::client_default()?;
// Wait (up to a timeout) for the server to be ready. match wait_for_ready(app_state, &server_handle).await { Err(err) => { server_handle.emit_logs_info()?; Err(err) } Ok(_) => { let server_handle: Box<dyn ServerHandle> = Box::new(server_handle); Ok(server_handle) } } } /// Checks the specified server repeatedly to see if it is ready, up to a hardcoded timeout. /// /// Parameters: /// * `app_state`: the application's [AppState] /// * `server_handle`: the [DockerComposeServerPlugin] to test /// /// Returns an empty [Result], where an error indicates that the server was not ready. #[tracing::instrument(level = "debug", skip(app_state, server_handle))] async fn wait_for_ready( app_state: &AppState, server_handle: &DockerComposeServerHandle, ) -> Result<()> { let probe_result = tokio::time::timeout(std::time::Duration::from_secs(60 * 5), async { let mut ready = false; let mut probe = None; while !ready { probe = Some( crate::test_framework::metadata::check_metadata_operation(app_state, server_handle) .await, ); ready = probe.as_ref().expect("probe result missing").is_ok(); if !ready { tokio::time::sleep(std::time::Duration::from_millis(500)).await; } } probe.expect("probe results missing") }) .await .with_context(|| { format!( "Timed out while waiting for server '{}' to launch.", server_handle.plugin().server_name() ) })?; match probe_result { Err(err) => { server_handle.emit_logs_info()?; Err(err) } Ok(_) => Ok(()), } } /// Represents a running instance of a [DockerComposeServerPlugin] instance. struct DockerComposeServerHandle { server_plugin: ServerPluginWrapper, http_client: reqwest::Client, } #[async_trait] impl ServerHandle for DockerComposeServerHandle { fn plugin(&self) -> &ServerPluginWrapper { &self.server_plugin } fn base_url(&self) -> url::Url { let server_plugin = server_plugin_downcast(self); server_plugin.base_url().clone() } fn client(&self) -> Result<reqwest::Client> { Ok(self.http_client.clone()) } fn request_builder( &self, client: reqwest::Client, method: http::Method, url: Url, ) -> reqwest::RequestBuilder { let server_plugin = server_plugin_downcast(self); (server_plugin.request_builder_factory)(client, method, url) } fn emit_logs(&self) -> Result<String> { let server_plugin = server_plugin_downcast(self); match run_docker_compose(server_plugin, &["logs", "--no-color"]).with_context(|| { format!( "Running '{} up --detach' failed.", server_plugin .server_script() .file_name() .expect("Unable to get control script name.") .to_string_lossy() ) }) { Ok(output) => Ok(String::from_utf8_lossy(&output.stdout).to_string()), Err(err) => Err(err), } } #[tracing::instrument(level = "debug", skip(self, app_state))] async fn expunge_all_content(&self, app_state: &AppState) -> Result<()> { self.shutdown()?; let server_plugin = server_plugin_downcast(self); launch_server(app_state, server_plugin).await?; Ok(()) } #[tracing::instrument(level = "debug", skip(self))] fn shutdown(&self) -> Result<()> { let server_plugin = server_plugin_downcast(self); let docker_down_output = run_docker_compose(server_plugin, &["down"]).with_context(||
let server_handle = DockerComposeServerHandle { server_plugin: server_plugin.clone(), http_client, };
random_line_split
docker_compose.rs
_trait; use eyre::{eyre, Context, Result}; use std::ffi::OsStr; use std::fmt::Debug; use std::path::PathBuf; use std::process::Command; use std::process::Output; use url::Url; /// Each instance of this struct represents a particular FHIR Server implementation, where the implementation /// is launched and managed via Docker Compose. #[derive(Clone, Debug)] pub struct DockerComposeServerPlugin { server_name: ServerName, server_script: PathBuf, base_url: Url, request_builder_factory: fn(client: reqwest::Client, method: http::Method, url: Url) -> reqwest::RequestBuilder, } impl DockerComposeServerPlugin { /// Returns the [PathBuf] to the `docker compose` wrapper script for this server. fn server_script(&self) -> PathBuf { self.server_script.clone() } /// Returns the base [Url] that the server will use, once launched. fn base_url(&self) -> &Url { &self.base_url } } impl DockerComposeServerPlugin { /// Constructs a new `DockerComposeServerPlugin` instance that will represent a particular FHIR Server /// implementation. /// /// Parameters: /// * `server_name`: the [ServerName] that will uniquely identify the FHIR Server implemenation /// * `server_script`: a [PathBuf] to the shell script that wraps the `docker compose` command for this /// particular FHIR Server implementation /// * `base_url`: the base [Url] that should be used for all requests to the FHIR Server, once launched /// * `request_builder_factory`: a function that can produce the [reqwest::RequestBuilder] to use when /// querying the FHIR Server, once launched pub fn new( server_name: ServerName, server_script: PathBuf, base_url: Url, request_builder_factory: fn( client: reqwest::Client, method: http::Method, url: Url, ) -> reqwest::RequestBuilder, ) -> DockerComposeServerPlugin { DockerComposeServerPlugin { server_name, server_script, base_url, request_builder_factory, } } } #[async_trait] impl ServerPlugin for DockerComposeServerPlugin { fn server_name(&self) -> &ServerName { &self.server_name } async fn launch(&self, app_state: &AppState) -> Result<Box<dyn ServerHandle>>
} /// Runs the specified Docker Compose subcommand with the specified argument, for the specified FHIR Server /// implementation. /// /// Parameters: /// * `server_plugin`: the [DockerComposeServerPlugin] that represents the FHIR Server implementation to run /// the command for/against /// * `args`: the Docker Compose subcommand and options to run, e.g. `["up", "--detach"]` #[tracing::instrument(level = "info", skip(server_plugin))] fn run_docker_compose<I, S>(server_plugin: &DockerComposeServerPlugin, args: I) -> Result<Output> where I: IntoIterator<Item = S> + Debug, S: AsRef<OsStr>, { /* * Build and launch the FHIR server. */ let docker_compose_output = Command::new(server_plugin.server_script()) .args(args) .output() .with_context(|| { format!( "Error returned by control command for the '{}' FHIR server.", server_plugin.server_name() ) })?; if !docker_compose_output.status.success() { return Err(eyre!(crate::errors::AppError::ChildProcessFailure( docker_compose_output.status, format!( "Error returned by control command for the '{}' FHIR server.", server_plugin.server_name() ), String::from_utf8_lossy(&docker_compose_output.stdout).into(), String::from_utf8_lossy(&docker_compose_output.stderr).into() ))); } Ok(docker_compose_output) } /// Launches the server, producing a boxed [SparkFhirServerHandle]. /// /// Parameters: /// * `app_state`: the application's [AppState] /// * `server_plugin`: the [DockerComposeServerPlugin] for the server to launch async fn launch_server( app_state: &AppState, server_plugin: &DockerComposeServerPlugin, ) -> Result<Box<dyn ServerHandle>> { /* * Build and launch the server. */ run_docker_compose(server_plugin, &["up", "--detach"]).with_context(|| { format!( "Running '{} up --detach' failed.", server_plugin .server_script() .file_name() .expect("Unable to get control script name.") .to_string_lossy() ) })?; /* * The server containers have now been started, though they're not necessarily ready yet. Build a * handle for it, copying any fields from the plugin that will be needed (as we can't safely downcast * the plugin, so this is the only way to have access to those fields from the handle). */ let server_plugin = app_state .find_server_plugin(server_plugin.server_name().as_str()) .expect("Unable to find server plugin"); let http_client = super::client_default()?; let server_handle = DockerComposeServerHandle { server_plugin: server_plugin.clone(), http_client, }; // Wait (up to a timeout) for the server to be ready. match wait_for_ready(app_state, &server_handle).await { Err(err) => { server_handle.emit_logs_info()?; Err(err) } Ok(_) => { let server_handle: Box<dyn ServerHandle> = Box::new(server_handle); Ok(server_handle) } } } /// Checks the specified server repeatedly to see if it is ready, up to a hardcoded timeout. /// /// Parameters: /// * `app_state`: the application's [AppState] /// * `server_handle`: the [DockerComposeServerPlugin] to test /// /// Returns an empty [Result], where an error indicates that the server was not ready. #[tracing::instrument(level = "debug", skip(app_state, server_handle))] async fn wait_for_ready( app_state: &AppState, server_handle: &DockerComposeServerHandle, ) -> Result<()> { let probe_result = tokio::time::timeout(std::time::Duration::from_secs(60 * 5), async { let mut ready = false; let mut probe = None; while !ready { probe = Some( crate::test_framework::metadata::check_metadata_operation(app_state, server_handle) .await, ); ready = probe.as_ref().expect("probe result missing").is_ok(); if !ready { tokio::time::sleep(std::time::Duration::from_millis(500)).await; } } probe.expect("probe results missing") }) .await .with_context(|| { format!( "Timed out while waiting for server '{}' to launch.", server_handle.plugin().server_name() ) })?; match probe_result { Err(err) => { server_handle.emit_logs_info()?; Err(err) } Ok(_) => Ok(()), } } /// Represents a running instance of a [DockerComposeServerPlugin] instance. struct DockerComposeServerHandle { server_plugin: ServerPluginWrapper, http_client: reqwest::Client, } #[async_trait] impl ServerHandle for DockerComposeServerHandle { fn plugin(&self) -> &ServerPluginWrapper { &self.server_plugin } fn base_url(&self) -> url::Url { let server_plugin = server_plugin_downcast(self); server_plugin.base_url().clone() } fn client(&self) -> Result<reqwest::Client> { Ok(self.http_client.clone()) } fn request_builder( &self, client: reqwest::Client, method: http::Method, url: Url, ) -> reqwest::RequestBuilder { let server_plugin = server_plugin_downcast(self); (server_plugin.request_builder_factory)(client, method, url) } fn emit_logs(&self) -> Result<String> { let server_plugin = server_plugin_downcast(self); match run_docker_compose(server_plugin, &["logs", "--no-color"]).with_context(|| { format!( "Running '{} up --detach' failed.", server_plugin .server_script() .file_name() .expect("Unable to get control script name.") .to_string_lossy() ) }) { Ok(output) => Ok(String::from_utf8_lossy(&output.stdout).to_string()), Err(err) => Err(err), } } #[tracing::instrument(level = "debug", skip(self, app_state))] async fn expunge_all_content(&self, app_state: &AppState) -> Result<()> { self.shutdown()?; let server_plugin = server_plugin_downcast(self); launch_server(app_state, server_plugin).await?; Ok(()) } #[tracing::instrument(level = "debug", skip(self))] fn shutdown(&self) -> Result<()> { let server_plugin = server_plugin_downcast(self); let docker_down_output = run_docker_compose(server_plugin, &["down"]).with_context(||
{ launch_server(app_state, self).await }
identifier_body
common.go
erThumbprint provides a constant to capture our env variable "IMPORTER_THUMBPRINT" ImporterThumbprint = "IMPORTER_THUMBPRINT" // ImporterCurrentCheckpoint provides a constant to capture our env variable "IMPORTER_CURRENT_CHECKPOINT" ImporterCurrentCheckpoint = "IMPORTER_CURRENT_CHECKPOINT" // ImporterPreviousCheckpoint provides a constant to capture our env variable "IMPORTER_PREVIOUS_CHECKPOINT" ImporterPreviousCheckpoint = "IMPORTER_PREVIOUS_CHECKPOINT" // ImporterFinalCheckpoint provides a constant to capture our env variable "IMPORTER_FINAL_CHECKPOINT" ImporterFinalCheckpoint = "IMPORTER_FINAL_CHECKPOINT" // Preallocation provides a constant to capture out env variable "PREALLOCATION" Preallocation = "PREALLOCATION" // ImportProxyHTTP provides a constant to capture our env variable "http_proxy" ImportProxyHTTP = "http_proxy" // ImportProxyHTTPS provides a constant to capture our env variable "https_proxy" ImportProxyHTTPS = "https_proxy" // ImportProxyNoProxy provides a constant to capture our env variable "no_proxy" ImportProxyNoProxy = "no_proxy" // ImporterProxyCertDirVar provides a constant to capture our env variable "IMPORTER_PROXY_CERT_DIR" ImporterProxyCertDirVar = "IMPORTER_PROXY_CERT_DIR" // InstallerPartOfLabel provides a constant to capture our env variable "INSTALLER_PART_OF_LABEL" InstallerPartOfLabel = "INSTALLER_PART_OF_LABEL" // InstallerVersionLabel provides a constant to capture our env variable "INSTALLER_VERSION_LABEL" InstallerVersionLabel = "INSTALLER_VERSION_LABEL" // ImporterExtraHeader provides a constant to include extra HTTP headers, as the prefix to a format string ImporterExtraHeader = "IMPORTER_EXTRA_HEADER_" // ImporterSecretExtraHeadersDir is where the secrets containing extra HTTP headers will be mounted ImporterSecretExtraHeadersDir = "/extraheaders" // ImporterGoogleCredentialFileVar provides a constant to capture our env variable "GOOGLE_APPLICATION_CREDENTIALS" ImporterGoogleCredentialFileVar = "GOOGLE_APPLICATION_CREDENTIALS" // ImporterGoogleCredentialDir provides a constant to capture our secret mount Dir ImporterGoogleCredentialDir = "/google" // ImporterGoogleCredentialFile provides a constant to capture our credentials.json file ImporterGoogleCredentialFile = "/google/credentials.json" // CloningLabelValue provides a constant to use as a label value for pod affinity (controller pkg only) CloningLabelValue = "host-assisted-cloning" // CloningTopologyKey (controller pkg only) CloningTopologyKey = "kubernetes.io/hostname" // ClonerSourcePodName (controller pkg only) ClonerSourcePodName = "cdi-clone-source" // ClonerMountPath (controller pkg only) ClonerMountPath = "/var/run/cdi/clone/source" // ClonerSourcePodNameSuffix (controller pkg only) ClonerSourcePodNameSuffix = "-source-pod" // KubeVirtAnnKey is part of a kubevirt.io key. KubeVirtAnnKey = "kubevirt.io/" // CDIAnnKey is part of a kubevirt.io key. CDIAnnKey = "cdi.kubevirt.io/" // SmartClonerCDILabel is the label applied to resources created by the smart-clone controller SmartClonerCDILabel = "cdi-smart-clone" // CloneFromSnapshotFallbackPVCCDILabel is the label applied to the temp host assisted PVC used for fallback in cloning from volumesnapshot CloneFromSnapshotFallbackPVCCDILabel = "cdi-clone-from-snapshot-source-host-assisted-fallback-pvc" // UploadPodName (controller pkg only) UploadPodName = "cdi-upload" // UploadServerCDILabel is the label applied to upload server resources UploadServerCDILabel = "cdi-upload-server" // UploadServerPodname is name of the upload server pod container UploadServerPodname = UploadServerCDILabel // UploadServerDataDir is the destination directoryfor uploads UploadServerDataDir = ImporterDataDir // UploadServerServiceLabel is the label selector for upload server services UploadServerServiceLabel = "service" // UploadImageSize provides a constant to capture our env variable "UPLOAD_IMAGE_SIZE" UploadImageSize = "UPLOAD_IMAGE_SIZE" // FilesystemOverheadVar provides a constant to capture our env variable "FILESYSTEM_OVERHEAD" FilesystemOverheadVar = "FILESYSTEM_OVERHEAD" // DefaultGlobalOverhead is the amount of space reserved on Filesystem volumes by default DefaultGlobalOverhead = "0.055" // ConfigName is the name of default CDI Config ConfigName = "config" // OwnerUID provides the UID of the owner entity (either PVC or DV) OwnerUID = "OWNER_UID" // KeyAccess provides a constant to the accessKeyId label using in controller pkg and transport_test.go KeyAccess = "accessKeyId" // KeySecret provides a constant to the secretKey label using in controller pkg and transport_test.go KeySecret = "secretKey" // DefaultResyncPeriod sets a 10 minute resync period, used in the controller pkg and the controller cmd executable DefaultResyncPeriod = 10 * time.Minute // ScratchSpaceNeededExitCode is the exit code that indicates the importer pod requires scratch space to function properly. ScratchSpaceNeededExitCode = 42 // ScratchNameSuffix (controller pkg only) ScratchNameSuffix = "scratch" // UploadTokenIssuer is the JWT issuer of upload tokens UploadTokenIssuer = "cdi-apiserver" // CloneTokenIssuer is the JWT issuer for clone tokens CloneTokenIssuer = "cdi-apiserver" // ExtendedCloneTokenIssuer is the JWT issuer for clone tokens ExtendedCloneTokenIssuer = "cdi-deployment" // QemuSubGid is the gid used as the qemu group in fsGroup QemuSubGid = int64(107) // ControllerServiceAccountName is the name of the CDI controller service account ControllerServiceAccountName = "cdi-sa" // CronJobServiceAccountName is the name of the CDI cron job service account CronJobServiceAccountName = "cdi-cronjob" // VddkConfigMap is the name of the ConfigMap with a reference to the VDDK image VddkConfigMap = "v2v-vmware" // VddkConfigDataKey is the name of the ConfigMap key of the VDDK image reference VddkConfigDataKey = "vddk-init-image" // AwaitingVDDK is a Pending condition reason that indicates the PVC is waiting for a VDDK image AwaitingVDDK = "AwaitingVDDK" // UploadContentTypeHeader is the header upload clients may use to set the content type explicitly UploadContentTypeHeader = "x-cdi-content-type" // FilesystemCloneContentType is the content type when cloning a filesystem FilesystemCloneContentType = "filesystem-clone" // BlockdeviceClone is the content type when cloning a block device BlockdeviceClone = "blockdevice-clone" // UploadPathSync is the path to POST CDI uploads UploadPathSync = "/v1beta1/upload" // UploadPathAsync is the path to POST CDI uploads in async mode UploadPathAsync = "/v1beta1/upload-async" // UploadArchivePath is the path to POST CDI archive uploads UploadArchivePath = "/v1beta1/upload-archive" // UploadArchiveAlphaPath is the path to POST CDI alpha archive uploads UploadArchiveAlphaPath = "/v1alpha1/upload-archive" // UploadFormSync is the path to POST CDI uploads as form data UploadFormSync = "/v1beta1/upload-form" // UploadFormAsync is the path to POST CDI uploads as form data in async mode UploadFormAsync = "/v1beta1/upload-form-async" // PreallocationApplied is a string inserted into importer's/uploader's exit message PreallocationApplied = "Preallocation applied" // SecretHeader is the key in a secret containing a sensitive extra header for HTTP data sources SecretHeader = "secretHeader" // UnusualRestartCountThreshold is the number of pod restarts that we consider unusual and would like to alert about UnusualRestartCountThreshold = 3 // GenericError is a generic error string GenericError = "Error" // CDIControllerLeaderElectionHelperName is the name of the configmap that is used as a helper for controller leader election CDIControllerLeaderElectionHelperName = "cdi-controller-leader-election-helper" ) // ProxyPaths are all supported paths var ProxyPaths = append( append(SyncUploadPaths, AsyncUploadPaths...), append(SyncUploadFormPaths, AsyncUploadFormPaths...)..., ) // SyncUploadPaths are paths to POST CDI uploads var SyncUploadPaths = []string{ UploadPathSync, "/v1alpha1/upload", } // AsyncUploadPaths are paths to POST CDI uploads in async mode var AsyncUploadPaths = []string{ UploadPathAsync, "/v1alpha1/upload-async", } // ArchiveUploadPaths are paths to POST CDI uploads of archive var ArchiveUploadPaths = []string{ UploadArchivePath, UploadArchiveAlphaPath,
}
random_line_split
zsum_mulcov_rate.py
parent rates and subgroup covariate multipliers use a grid with # one point in age and two points in time. Thus there are six model variables # for each rate, two for the parent rates and four for the # covariate multipliers. # The resulting rates will be constant # in age and constant in time except between the two time grid points # where it is linear. # # Source Code # *********** # {xrst_literal # BEGIN PYTHON # END PYTHON # } # # {xrst_end user_zsum_mulcov_rate.py} # --------------------------------------------------------------------------- # BEGIN PYTHON # ------------------------------------------------------------------------ # begin problem parameters number_data = 50 iota_parent = 1e-2 rho_parent = 2e-2 subgroup_mulcov = 0.2; measurement_cv = 0.01 # end problem parameters # ------------------------------------------------------------------------ import sys import os import copy import math import random import time test_program = 'example/user/zsum_mulcov_rate.py' if sys.argv[0] != test_program or len(sys.argv) != 1 : usage = 'python3 ' + test_program + '\n' usage += 'where python3 is the python 3 program on your system\n' usage += 'and working directory is the dismod_at distribution directory\n' sys.exit(usage) print(test_program) # # import dismod_at local_dir = os.getcwd() + '/python' if( os.path.isdir( local_dir + '/dismod_at' ) ) : sys.path.insert(0, local_dir) import dismod_at # # change into the build/example/user directory if not os.path.exists('build/example/user') : os.makedirs('build/example/user') os.chdir('build/example/user') # ------------------------------------------------------------------------ python_seed = int( time.time() ) random.seed( python_seed ) # ------------------------------------------------------------------------ # Note that the a, t values are not used for this example def example_db (file_name) : def fun_rate_subgroup(a, t) : return ('prior_rate_subgroup', None, 'prior_gauss_diff') def fun_rate_parent(a, t) : return ('prior_rate_parent', None, 'prior_gauss_diff') import dismod_at # ---------------------------------------------------------------------- # age list age_list = [ 0.0, 50.0, 100.0 ] # # time list time_list = [ 1990.0, 2010.0 ] # # integrand table integrand_table = [ { 'name':'Sincidence' }, { 'name':'remission' } ] # # node table: north_america -> (united_states, canada) node_table = [ { 'name':'north_america', 'parent':'' }, { 'name':'united_states', 'parent':'north_america' }, { 'name':'canada', 'parent':'north_america' } ] # # subgroup_table subgroup_table = [ { 'subgroup':'none', 'group':'none' }, { 'subgroup':'united_states', 'group':'north_america' }, { 'subgroup':'canada', 'group':'north_america' }, ] # # mulcov table mulcov_table = [ { # subgroup covariate multiplers effecting iota 'covariate':'one', 'type':'rate_value', 'effected':'iota', 'group':'north_america', 'smooth':None, 'subsmooth':'smooth_rate_subgroup' },{ # subgroup covariate multipliers effecting rho 'covariate':'one', 'type':'rate_value', 'effected':'rho', 'group':'north_america', 'smooth':None, 'subsmooth':'smooth_rate_subgroup' } ] # # weight table: weight_table = list() # # covariate table: no covriates covariate_table = [ { 'name':'one', 'reference':0.0, 'max_difference':None } ] # # avgint table: same order as list of integrands avgint_table = list() # # nslist_dict: nslist_dict = dict() # ---------------------------------------------------------------------- # data table: same order as list of integrands data_table = list() # write out data row = { 'density': 'gaussian', 'weight': '', 'hold_out': False, 'age_lower': 50.0, 'age_upper': 50.0, 'one': 1.0, } for data_id in range(number_data) : if data_id % 3 == 0 : row['node'] = 'north_america' row['subgroup'] = 'none' row['data_name'] = 'na_' + str( data_id / 2 ) effect_true = 0.0 if data_id % 3 == 1 : row['node'] = 'united_states' row['subgroup'] = 'united_states' row['data_name'] = 'us_' + str( data_id / 2 ) effect_true = - subgroup_mulcov if data_id % 3 == 2 : row['node'] = 'canada' row['subgroup'] = 'canada' row['data_name'] = 'ca_' + str( data_id / 2 ) effect_true = + subgroup_mulcov if data_id % 2 == 0 : row['time_lower'] = 1990.0 row['time_upper'] = 1990.0 else : row['time_lower'] = 2010.0 row['time_upper'] = 2010.0 # if data_id < number_data / 2 : iota_true = math.exp(effect_true) * iota_parent row['integrand'] = 'Sincidence' row['meas_std'] = iota_true * measurement_cv noise = iota_true * random.gauss(0.0, measurement_cv) row['meas_value'] = iota_true + noise else : rho_true = math.exp(effect_true) * rho_parent row['integrand'] = 'remission' row['meas_std'] = rho_true * measurement_cv noise = rho_true * random.gauss(0.0, measurement_cv) row['meas_value'] = rho_true + noise # data_table.append( copy.copy(row) ) # # ---------------------------------------------------------------------- # prior_table prior_table = [ { # prior_rate_parent 'name': 'prior_rate_parent', 'density': 'uniform', 'lower': min(iota_true, rho_true) / 100.0, 'upper': max(iota_true, rho_true) * 100.0, 'mean': (iota_true + rho_true), },{ # prior_rate_subgroup 'name': 'prior_rate_subgroup', 'density': 'gaussian', 'mean': 0.0, 'std': 100.0, # very large so like a uniform distribution },{ # prior_gauss_diff 'name': 'prior_gauss_diff', 'density': 'gaussian', 'mean': 0.0, 'std': 100.0, # very large so like uniform
{ # smooth_rate_subgroup 'name': 'smooth_rate_subgroup', 'age_id': [ 0 ], 'time_id': [ 0, 1 ], 'fun': fun_rate_subgroup },{ # smooth_rate_parent 'name': 'smooth_rate_parent', 'age_id': [ 0 ], 'time_id': [ 0, 1 ], 'fun': fun_rate_parent } ] # ---------------------------------------------------------------------- # rate table rate_table = [ { 'name': 'iota', 'parent_smooth': 'smooth_rate_parent', 'child_smooth': None, },{ 'name': 'rho', 'parent_smooth': 'smooth_rate_parent', 'child_smooth': None, } ] # ---------------------------------------------------------------------- # option_table option_table = [ { 'name':'parent_node_name', 'value':'north_america' }, { 'name':'zero_sum_mulcov_group', 'value':'north_america' }, { 'name':'random_seed', 'value':'0' }, { 'name':'ode_step_size', 'value':'10.0' }, { 'name':'rate_case', 'value':'iota_pos_rho_pos' }, { 'name':'quasi_fixed', 'value':'true' }, { 'name':'derivative_test_fixed', '
} ] # ---------------------------------------------------------------------- # smooth table smooth_table = [
random_line_split
zsum_mulcov_rate.py
rates and subgroup covariate multipliers use a grid with # one point in age and two points in time. Thus there are six model variables # for each rate, two for the parent rates and four for the # covariate multipliers. # The resulting rates will be constant # in age and constant in time except between the two time grid points # where it is linear. # # Source Code # *********** # {xrst_literal # BEGIN PYTHON # END PYTHON # } # # {xrst_end user_zsum_mulcov_rate.py} # --------------------------------------------------------------------------- # BEGIN PYTHON # ------------------------------------------------------------------------ # begin problem parameters number_data = 50 iota_parent = 1e-2 rho_parent = 2e-2 subgroup_mulcov = 0.2; measurement_cv = 0.01 # end problem parameters # ------------------------------------------------------------------------ import sys import os import copy import math import random import time test_program = 'example/user/zsum_mulcov_rate.py' if sys.argv[0] != test_program or len(sys.argv) != 1 : usage = 'python3 ' + test_program + '\n' usage += 'where python3 is the python 3 program on your system\n' usage += 'and working directory is the dismod_at distribution directory\n' sys.exit(usage) print(test_program) # # import dismod_at local_dir = os.getcwd() + '/python' if( os.path.isdir( local_dir + '/dismod_at' ) ) : sys.path.insert(0, local_dir) import dismod_at # # change into the build/example/user directory if not os.path.exists('build/example/user') : os.makedirs('build/example/user') os.chdir('build/example/user') # ------------------------------------------------------------------------ python_seed = int( time.time() ) random.seed( python_seed ) # ------------------------------------------------------------------------ # Note that the a, t values are not used for this example def example_db (file_name) : def fun_rate_subgroup(a, t) :
def fun_rate_parent(a, t) : return ('prior_rate_parent', None, 'prior_gauss_diff') import dismod_at # ---------------------------------------------------------------------- # age list age_list = [ 0.0, 50.0, 100.0 ] # # time list time_list = [ 1990.0, 2010.0 ] # # integrand table integrand_table = [ { 'name':'Sincidence' }, { 'name':'remission' } ] # # node table: north_america -> (united_states, canada) node_table = [ { 'name':'north_america', 'parent':'' }, { 'name':'united_states', 'parent':'north_america' }, { 'name':'canada', 'parent':'north_america' } ] # # subgroup_table subgroup_table = [ { 'subgroup':'none', 'group':'none' }, { 'subgroup':'united_states', 'group':'north_america' }, { 'subgroup':'canada', 'group':'north_america' }, ] # # mulcov table mulcov_table = [ { # subgroup covariate multiplers effecting iota 'covariate':'one', 'type':'rate_value', 'effected':'iota', 'group':'north_america', 'smooth':None, 'subsmooth':'smooth_rate_subgroup' },{ # subgroup covariate multipliers effecting rho 'covariate':'one', 'type':'rate_value', 'effected':'rho', 'group':'north_america', 'smooth':None, 'subsmooth':'smooth_rate_subgroup' } ] # # weight table: weight_table = list() # # covariate table: no covriates covariate_table = [ { 'name':'one', 'reference':0.0, 'max_difference':None } ] # # avgint table: same order as list of integrands avgint_table = list() # # nslist_dict: nslist_dict = dict() # ---------------------------------------------------------------------- # data table: same order as list of integrands data_table = list() # write out data row = { 'density': 'gaussian', 'weight': '', 'hold_out': False, 'age_lower': 50.0, 'age_upper': 50.0, 'one': 1.0, } for data_id in range(number_data) : if data_id % 3 == 0 : row['node'] = 'north_america' row['subgroup'] = 'none' row['data_name'] = 'na_' + str( data_id / 2 ) effect_true = 0.0 if data_id % 3 == 1 : row['node'] = 'united_states' row['subgroup'] = 'united_states' row['data_name'] = 'us_' + str( data_id / 2 ) effect_true = - subgroup_mulcov if data_id % 3 == 2 : row['node'] = 'canada' row['subgroup'] = 'canada' row['data_name'] = 'ca_' + str( data_id / 2 ) effect_true = + subgroup_mulcov if data_id % 2 == 0 : row['time_lower'] = 1990.0 row['time_upper'] = 1990.0 else : row['time_lower'] = 2010.0 row['time_upper'] = 2010.0 # if data_id < number_data / 2 : iota_true = math.exp(effect_true) * iota_parent row['integrand'] = 'Sincidence' row['meas_std'] = iota_true * measurement_cv noise = iota_true * random.gauss(0.0, measurement_cv) row['meas_value'] = iota_true + noise else : rho_true = math.exp(effect_true) * rho_parent row['integrand'] = 'remission' row['meas_std'] = rho_true * measurement_cv noise = rho_true * random.gauss(0.0, measurement_cv) row['meas_value'] = rho_true + noise # data_table.append( copy.copy(row) ) # # ---------------------------------------------------------------------- # prior_table prior_table = [ { # prior_rate_parent 'name': 'prior_rate_parent', 'density': 'uniform', 'lower': min(iota_true, rho_true) / 100.0, 'upper': max(iota_true, rho_true) * 100.0, 'mean': (iota_true + rho_true), },{ # prior_rate_subgroup 'name': 'prior_rate_subgroup', 'density': 'gaussian', 'mean': 0.0, 'std': 100.0, # very large so like a uniform distribution },{ # prior_gauss_diff 'name': 'prior_gauss_diff', 'density': 'gaussian', 'mean': 0.0, 'std': 100.0, # very large so like uniform } ] # ---------------------------------------------------------------------- # smooth table smooth_table = [ { # smooth_rate_subgroup 'name': 'smooth_rate_subgroup', 'age_id': [ 0 ], 'time_id': [ 0, 1 ], 'fun': fun_rate_subgroup },{ # smooth_rate_parent 'name': 'smooth_rate_parent', 'age_id': [ 0 ], 'time_id': [ 0, 1 ], 'fun': fun_rate_parent } ] # ---------------------------------------------------------------------- # rate table rate_table = [ { 'name': 'iota', 'parent_smooth': 'smooth_rate_parent', 'child_smooth': None, },{ 'name': 'rho', 'parent_smooth': 'smooth_rate_parent', 'child_smooth': None, } ] # ---------------------------------------------------------------------- # option_table option_table = [ { 'name':'parent_node_name', 'value':'north_america' }, { 'name':'zero_sum_mulcov_group', 'value':'north_america' }, { 'name':'random_seed', 'value':'0' }, { 'name':'ode_step_size', 'value':'10.0' }, { 'name':'rate_case', 'value':'iota_pos_rho_pos' }, { 'name':'quasi_fixed', 'value':'true' }, { 'name':'derivative_test_fixed',
return ('prior_rate_subgroup', None, 'prior_gauss_diff')
identifier_body
zsum_mulcov_rate.py
rates and subgroup covariate multipliers use a grid with # one point in age and two points in time. Thus there are six model variables # for each rate, two for the parent rates and four for the # covariate multipliers. # The resulting rates will be constant # in age and constant in time except between the two time grid points # where it is linear. # # Source Code # *********** # {xrst_literal # BEGIN PYTHON # END PYTHON # } # # {xrst_end user_zsum_mulcov_rate.py} # --------------------------------------------------------------------------- # BEGIN PYTHON # ------------------------------------------------------------------------ # begin problem parameters number_data = 50 iota_parent = 1e-2 rho_parent = 2e-2 subgroup_mulcov = 0.2; measurement_cv = 0.01 # end problem parameters # ------------------------------------------------------------------------ import sys import os import copy import math import random import time test_program = 'example/user/zsum_mulcov_rate.py' if sys.argv[0] != test_program or len(sys.argv) != 1 : usage = 'python3 ' + test_program + '\n' usage += 'where python3 is the python 3 program on your system\n' usage += 'and working directory is the dismod_at distribution directory\n' sys.exit(usage) print(test_program) # # import dismod_at local_dir = os.getcwd() + '/python' if( os.path.isdir( local_dir + '/dismod_at' ) ) :
import dismod_at # # change into the build/example/user directory if not os.path.exists('build/example/user') : os.makedirs('build/example/user') os.chdir('build/example/user') # ------------------------------------------------------------------------ python_seed = int( time.time() ) random.seed( python_seed ) # ------------------------------------------------------------------------ # Note that the a, t values are not used for this example def example_db (file_name) : def fun_rate_subgroup(a, t) : return ('prior_rate_subgroup', None, 'prior_gauss_diff') def fun_rate_parent(a, t) : return ('prior_rate_parent', None, 'prior_gauss_diff') import dismod_at # ---------------------------------------------------------------------- # age list age_list = [ 0.0, 50.0, 100.0 ] # # time list time_list = [ 1990.0, 2010.0 ] # # integrand table integrand_table = [ { 'name':'Sincidence' }, { 'name':'remission' } ] # # node table: north_america -> (united_states, canada) node_table = [ { 'name':'north_america', 'parent':'' }, { 'name':'united_states', 'parent':'north_america' }, { 'name':'canada', 'parent':'north_america' } ] # # subgroup_table subgroup_table = [ { 'subgroup':'none', 'group':'none' }, { 'subgroup':'united_states', 'group':'north_america' }, { 'subgroup':'canada', 'group':'north_america' }, ] # # mulcov table mulcov_table = [ { # subgroup covariate multiplers effecting iota 'covariate':'one', 'type':'rate_value', 'effected':'iota', 'group':'north_america', 'smooth':None, 'subsmooth':'smooth_rate_subgroup' },{ # subgroup covariate multipliers effecting rho 'covariate':'one', 'type':'rate_value', 'effected':'rho', 'group':'north_america', 'smooth':None, 'subsmooth':'smooth_rate_subgroup' } ] # # weight table: weight_table = list() # # covariate table: no covriates covariate_table = [ { 'name':'one', 'reference':0.0, 'max_difference':None } ] # # avgint table: same order as list of integrands avgint_table = list() # # nslist_dict: nslist_dict = dict() # ---------------------------------------------------------------------- # data table: same order as list of integrands data_table = list() # write out data row = { 'density': 'gaussian', 'weight': '', 'hold_out': False, 'age_lower': 50.0, 'age_upper': 50.0, 'one': 1.0, } for data_id in range(number_data) : if data_id % 3 == 0 : row['node'] = 'north_america' row['subgroup'] = 'none' row['data_name'] = 'na_' + str( data_id / 2 ) effect_true = 0.0 if data_id % 3 == 1 : row['node'] = 'united_states' row['subgroup'] = 'united_states' row['data_name'] = 'us_' + str( data_id / 2 ) effect_true = - subgroup_mulcov if data_id % 3 == 2 : row['node'] = 'canada' row['subgroup'] = 'canada' row['data_name'] = 'ca_' + str( data_id / 2 ) effect_true = + subgroup_mulcov if data_id % 2 == 0 : row['time_lower'] = 1990.0 row['time_upper'] = 1990.0 else : row['time_lower'] = 2010.0 row['time_upper'] = 2010.0 # if data_id < number_data / 2 : iota_true = math.exp(effect_true) * iota_parent row['integrand'] = 'Sincidence' row['meas_std'] = iota_true * measurement_cv noise = iota_true * random.gauss(0.0, measurement_cv) row['meas_value'] = iota_true + noise else : rho_true = math.exp(effect_true) * rho_parent row['integrand'] = 'remission' row['meas_std'] = rho_true * measurement_cv noise = rho_true * random.gauss(0.0, measurement_cv) row['meas_value'] = rho_true + noise # data_table.append( copy.copy(row) ) # # ---------------------------------------------------------------------- # prior_table prior_table = [ { # prior_rate_parent 'name': 'prior_rate_parent', 'density': 'uniform', 'lower': min(iota_true, rho_true) / 100.0, 'upper': max(iota_true, rho_true) * 100.0, 'mean': (iota_true + rho_true), },{ # prior_rate_subgroup 'name': 'prior_rate_subgroup', 'density': 'gaussian', 'mean': 0.0, 'std': 100.0, # very large so like a uniform distribution },{ # prior_gauss_diff 'name': 'prior_gauss_diff', 'density': 'gaussian', 'mean': 0.0, 'std': 100.0, # very large so like uniform } ] # ---------------------------------------------------------------------- # smooth table smooth_table = [ { # smooth_rate_subgroup 'name': 'smooth_rate_subgroup', 'age_id': [ 0 ], 'time_id': [ 0, 1 ], 'fun': fun_rate_subgroup },{ # smooth_rate_parent 'name': 'smooth_rate_parent', 'age_id': [ 0 ], 'time_id': [ 0, 1 ], 'fun': fun_rate_parent } ] # ---------------------------------------------------------------------- # rate table rate_table = [ { 'name': 'iota', 'parent_smooth': 'smooth_rate_parent', 'child_smooth': None, },{ 'name': 'rho', 'parent_smooth': 'smooth_rate_parent', 'child_smooth': None, } ] # ---------------------------------------------------------------------- # option_table option_table = [ { 'name':'parent_node_name', 'value':'north_america' }, { 'name':'zero_sum_mulcov_group', 'value':'north_america' }, { 'name':'random_seed', 'value':'0' }, { 'name':'ode_step_size', 'value':'10.0' }, { 'name':'rate_case', 'value':'iota_pos_rho_pos' }, { 'name':'quasi_fixed', 'value':'true' }, { 'name':'derivative_test_fixed',
sys.path.insert(0, local_dir)
conditional_block
zsum_mulcov_rate.py
rates and subgroup covariate multipliers use a grid with # one point in age and two points in time. Thus there are six model variables # for each rate, two for the parent rates and four for the # covariate multipliers. # The resulting rates will be constant # in age and constant in time except between the two time grid points # where it is linear. # # Source Code # *********** # {xrst_literal # BEGIN PYTHON # END PYTHON # } # # {xrst_end user_zsum_mulcov_rate.py} # --------------------------------------------------------------------------- # BEGIN PYTHON # ------------------------------------------------------------------------ # begin problem parameters number_data = 50 iota_parent = 1e-2 rho_parent = 2e-2 subgroup_mulcov = 0.2; measurement_cv = 0.01 # end problem parameters # ------------------------------------------------------------------------ import sys import os import copy import math import random import time test_program = 'example/user/zsum_mulcov_rate.py' if sys.argv[0] != test_program or len(sys.argv) != 1 : usage = 'python3 ' + test_program + '\n' usage += 'where python3 is the python 3 program on your system\n' usage += 'and working directory is the dismod_at distribution directory\n' sys.exit(usage) print(test_program) # # import dismod_at local_dir = os.getcwd() + '/python' if( os.path.isdir( local_dir + '/dismod_at' ) ) : sys.path.insert(0, local_dir) import dismod_at # # change into the build/example/user directory if not os.path.exists('build/example/user') : os.makedirs('build/example/user') os.chdir('build/example/user') # ------------------------------------------------------------------------ python_seed = int( time.time() ) random.seed( python_seed ) # ------------------------------------------------------------------------ # Note that the a, t values are not used for this example def
(file_name) : def fun_rate_subgroup(a, t) : return ('prior_rate_subgroup', None, 'prior_gauss_diff') def fun_rate_parent(a, t) : return ('prior_rate_parent', None, 'prior_gauss_diff') import dismod_at # ---------------------------------------------------------------------- # age list age_list = [ 0.0, 50.0, 100.0 ] # # time list time_list = [ 1990.0, 2010.0 ] # # integrand table integrand_table = [ { 'name':'Sincidence' }, { 'name':'remission' } ] # # node table: north_america -> (united_states, canada) node_table = [ { 'name':'north_america', 'parent':'' }, { 'name':'united_states', 'parent':'north_america' }, { 'name':'canada', 'parent':'north_america' } ] # # subgroup_table subgroup_table = [ { 'subgroup':'none', 'group':'none' }, { 'subgroup':'united_states', 'group':'north_america' }, { 'subgroup':'canada', 'group':'north_america' }, ] # # mulcov table mulcov_table = [ { # subgroup covariate multiplers effecting iota 'covariate':'one', 'type':'rate_value', 'effected':'iota', 'group':'north_america', 'smooth':None, 'subsmooth':'smooth_rate_subgroup' },{ # subgroup covariate multipliers effecting rho 'covariate':'one', 'type':'rate_value', 'effected':'rho', 'group':'north_america', 'smooth':None, 'subsmooth':'smooth_rate_subgroup' } ] # # weight table: weight_table = list() # # covariate table: no covriates covariate_table = [ { 'name':'one', 'reference':0.0, 'max_difference':None } ] # # avgint table: same order as list of integrands avgint_table = list() # # nslist_dict: nslist_dict = dict() # ---------------------------------------------------------------------- # data table: same order as list of integrands data_table = list() # write out data row = { 'density': 'gaussian', 'weight': '', 'hold_out': False, 'age_lower': 50.0, 'age_upper': 50.0, 'one': 1.0, } for data_id in range(number_data) : if data_id % 3 == 0 : row['node'] = 'north_america' row['subgroup'] = 'none' row['data_name'] = 'na_' + str( data_id / 2 ) effect_true = 0.0 if data_id % 3 == 1 : row['node'] = 'united_states' row['subgroup'] = 'united_states' row['data_name'] = 'us_' + str( data_id / 2 ) effect_true = - subgroup_mulcov if data_id % 3 == 2 : row['node'] = 'canada' row['subgroup'] = 'canada' row['data_name'] = 'ca_' + str( data_id / 2 ) effect_true = + subgroup_mulcov if data_id % 2 == 0 : row['time_lower'] = 1990.0 row['time_upper'] = 1990.0 else : row['time_lower'] = 2010.0 row['time_upper'] = 2010.0 # if data_id < number_data / 2 : iota_true = math.exp(effect_true) * iota_parent row['integrand'] = 'Sincidence' row['meas_std'] = iota_true * measurement_cv noise = iota_true * random.gauss(0.0, measurement_cv) row['meas_value'] = iota_true + noise else : rho_true = math.exp(effect_true) * rho_parent row['integrand'] = 'remission' row['meas_std'] = rho_true * measurement_cv noise = rho_true * random.gauss(0.0, measurement_cv) row['meas_value'] = rho_true + noise # data_table.append( copy.copy(row) ) # # ---------------------------------------------------------------------- # prior_table prior_table = [ { # prior_rate_parent 'name': 'prior_rate_parent', 'density': 'uniform', 'lower': min(iota_true, rho_true) / 100.0, 'upper': max(iota_true, rho_true) * 100.0, 'mean': (iota_true + rho_true), },{ # prior_rate_subgroup 'name': 'prior_rate_subgroup', 'density': 'gaussian', 'mean': 0.0, 'std': 100.0, # very large so like a uniform distribution },{ # prior_gauss_diff 'name': 'prior_gauss_diff', 'density': 'gaussian', 'mean': 0.0, 'std': 100.0, # very large so like uniform } ] # ---------------------------------------------------------------------- # smooth table smooth_table = [ { # smooth_rate_subgroup 'name': 'smooth_rate_subgroup', 'age_id': [ 0 ], 'time_id': [ 0, 1 ], 'fun': fun_rate_subgroup },{ # smooth_rate_parent 'name': 'smooth_rate_parent', 'age_id': [ 0 ], 'time_id': [ 0, 1 ], 'fun': fun_rate_parent } ] # ---------------------------------------------------------------------- # rate table rate_table = [ { 'name': 'iota', 'parent_smooth': 'smooth_rate_parent', 'child_smooth': None, },{ 'name': 'rho', 'parent_smooth': 'smooth_rate_parent', 'child_smooth': None, } ] # ---------------------------------------------------------------------- # option_table option_table = [ { 'name':'parent_node_name', 'value':'north_america' }, { 'name':'zero_sum_mulcov_group', 'value':'north_america' }, { 'name':'random_seed', 'value':'0' }, { 'name':'ode_step_size', 'value':'10.0' }, { 'name':'rate_case', 'value':'iota_pos_rho_pos' }, { 'name':'quasi_fixed', 'value':'true' }, { 'name':'derivative_test_fixed',
example_db
identifier_name
lookup.rs
} else { high = mid; } } mid } /// Convert a [TValue] to a parametric `t`-value. pub(crate) fn t_value_to_parametric(&self, t: TValue) -> f64 { match t { TValue::Parametric(t) => { assert!((0.0..=1.).contains(&t)); t } TValue::Euclidean(t) => { assert!((0.0..=1.).contains(&t)); self.euclidean_to_parametric(t, DEFAULT_EUCLIDEAN_ERROR_BOUND) } TValue::EuclideanWithinError { t, error } => { assert!((0.0..=1.).contains(&t)); self.euclidean_to_parametric(t, error) } } } /// Calculate the point on the curve based on the `t`-value provided. pub(crate) fn unrestricted_parametric_evaluate(&self, t: f64) -> DVec2 { // Basis code based off of pseudocode found here: <https://pomax.github.io/bezierinfo/#explanation>. let t_squared = t * t; let one_minus_t = 1. - t; let squared_one_minus_t = one_minus_t * one_minus_t; match self.handles { BezierHandles::Linear => self.start.lerp(self.end, t), BezierHandles::Quadratic { handle } => squared_one_minus_t * self.start + 2. * one_minus_t * t * handle + t_squared * self.end, BezierHandles::Cubic { handle_start, handle_end } => { let t_cubed = t_squared * t; let cubed_one_minus_t = squared_one_minus_t * one_minus_t; cubed_one_minus_t * self.start + 3. * squared_one_minus_t * t * handle_start + 3. * one_minus_t * t_squared * handle_end + t_cubed * self.end } } } /// Calculate the coordinates of the point `t` along the curve. /// Expects `t` to be within the inclusive range `[0, 1]`. /// <iframe frameBorder="0" width="100%" height="350px" src="https://graphite.rs/libraries/bezier-rs#bezier/evaluate/solo" title="Evaluate Demo"></iframe> pub fn evaluate(&self, t: TValue) -> DVec2 { let t = self.t_value_to_parametric(t); self.unrestricted_parametric_evaluate(t) } /// Return a selection of equidistant points on the bezier curve. /// If no value is provided for `steps`, then the function will default `steps` to be 10. /// <iframe frameBorder="0" width="100%" height="350px" src="https://graphite.rs/libraries/bezier-rs#bezier/lookup-table/solo" title="Lookup-Table Demo"></iframe> pub fn compute_lookup_table(&self, steps: Option<usize>, tvalue_type: Option<TValueType>) -> Vec<DVec2> { let steps = steps.unwrap_or(DEFAULT_LUT_STEP_SIZE); let tvalue_type = tvalue_type.unwrap_or(TValueType::Parametric); (0..=steps) .map(|t| { let tvalue = match tvalue_type { TValueType::Parametric => TValue::Parametric(t as f64 / steps as f64), TValueType::Euclidean => TValue::Euclidean(t as f64 / steps as f64), }; self.evaluate(tvalue) }) .collect() } /// Return an approximation of the length of the bezier curve. /// - `num_subdivisions` - Number of subdivisions used to approximate the curve. The default value is 1000. /// <iframe frameBorder="0" width="100%" height="300px" src="https://graphite.rs/libraries/bezier-rs#bezier/length/solo" title="Length Demo"></iframe> pub fn length(&self, num_subdivisions: Option<usize>) -> f64 { match self.handles { BezierHandles::Linear => self.start.distance(self.end), _ => { // Code example from <https://gamedev.stackexchange.com/questions/5373/moving-ships-between-two-planets-along-a-bezier-missing-some-equations-for-acce/5427#5427>. // We will use an approximate approach where we split the curve into many subdivisions // and calculate the euclidean distance between the two endpoints of the subdivision let lookup_table = self.compute_lookup_table(Some(num_subdivisions.unwrap_or(DEFAULT_LENGTH_SUBDIVISIONS)), Some(TValueType::Parametric)); let mut approx_curve_length = 0.; let mut previous_point = lookup_table[0]; // Calculate approximate distance between subdivision for current_point in lookup_table.iter().skip(1) { // Calculate distance of subdivision approx_curve_length += (*current_point - previous_point).length(); // Update the previous point previous_point = *current_point; } approx_curve_length } } } /// Returns the parametric `t`-value that corresponds to the closest point on the curve to the provided point. /// Uses a searching algorithm akin to binary search that can be customized using the optional [ProjectionOptions] struct. /// <iframe frameBorder="0" width="100%" height="300px" src="https://graphite.rs/libraries/bezier-rs#bezier/project/solo" title="Project Demo"></iframe> pub fn project(&self, point: DVec2, options: Option<ProjectionOptions>) -> f64 { let options = options.unwrap_or_default(); let ProjectionOptions { lut_size, convergence_epsilon, convergence_limit, iteration_limit, } = options; // TODO: Consider optimizations from precomputing useful values, or using the GPU // First find the closest point from the results of a lookup table let lut = self.compute_lookup_table(Some(lut_size), Some(TValueType::Parametric)); let (minimum_position, minimum_distance) = utils::get_closest_point_in_lut(&lut, point); // Get the t values to the left and right of the closest result in the lookup table let lut_size_f64 = lut_size as f64; let minimum_position_f64 = minimum_position as f64; let mut left_t = (minimum_position_f64 - 1.).max(0.) / lut_size_f64; let mut right_t = (minimum_position_f64 + 1.).min(lut_size_f64) / lut_size_f64; // Perform a finer search by finding closest t from 5 points between [left_t, right_t] inclusive // Choose new left_t and right_t for a smaller range around the closest t and repeat the process let mut final_t = left_t; let mut distance; // Increment minimum_distance to ensure that the distance < minimum_distance comparison will be true for at least one iteration let mut new_minimum_distance = minimum_distance + 1.; // Maintain the previous distance to identify convergence let mut previous_distance; // Counter to limit the number of iterations let mut iteration_count = 0; // Counter to identify how many iterations have had a similar result. Used for convergence test let mut convergence_count = 0; // Store calculated distances to minimize unnecessary recomputations let mut distances: [f64; NUM_DISTANCES] = [ point.distance(lut[(minimum_position as i64 - 1).max(0) as usize]), 0., 0., 0., point.distance(lut[lut_size.min(minimum_position + 1)]), ]; while left_t <= right_t && convergence_count < convergence_limit && iteration_count < iteration_limit { previous_distance = new_minimum_distance; let step = (right_t - left_t) / (NUM_DISTANCES as f64 - 1.); let mut iterator_t = left_t; let mut target_index = 0; // Iterate through first 4 points and will handle the right most point later for (step_index, table_distance) in distances.iter_mut().enumerate().take(4) { // Use previously computed distance for the left most point, and compute new values for the
{ if ratio < error { return 0.; } if 1. - ratio < error { return 1.; } let mut low = 0.; let mut mid = 0.; let mut high = 1.; let total_length = self.length(None); while low < high { mid = (low + high) / 2.; let test_ratio = self.trim(TValue::Parametric(0.), TValue::Parametric(mid)).length(None) / total_length; if f64_compare(test_ratio, ratio, error) { break; } else if test_ratio < ratio { low = mid;
identifier_body
lookup.rs
(&self, ratio: f64, error: f64) -> f64 { if ratio < error { return 0.; } if 1. - ratio < error { return 1.; } let mut low = 0.; let mut mid = 0.; let mut high = 1.; let total_length = self.length(None); while low < high { mid = (low + high) / 2.; let test_ratio = self.trim(TValue::Parametric(0.), TValue::Parametric(mid)).length(None) / total_length; if f64_compare(test_ratio, ratio, error) { break; } else if test_ratio < ratio { low = mid; } else { high = mid; } } mid } /// Convert a [TValue] to a parametric `t`-value. pub(crate) fn t_value_to_parametric(&self, t: TValue) -> f64 { match t { TValue::Parametric(t) => { assert!((0.0..=1.).contains(&t)); t } TValue::Euclidean(t) => { assert!((0.0..=1.).contains(&t)); self.euclidean_to_parametric(t, DEFAULT_EUCLIDEAN_ERROR_BOUND) } TValue::EuclideanWithinError { t, error } => { assert!((0.0..=1.).contains(&t)); self.euclidean_to_parametric(t, error) } } } /// Calculate the point on the curve based on the `t`-value provided. pub(crate) fn unrestricted_parametric_evaluate(&self, t: f64) -> DVec2 { // Basis code based off of pseudocode found here: <https://pomax.github.io/bezierinfo/#explanation>. let t_squared = t * t; let one_minus_t = 1. - t; let squared_one_minus_t = one_minus_t * one_minus_t; match self.handles { BezierHandles::Linear => self.start.lerp(self.end, t), BezierHandles::Quadratic { handle } => squared_one_minus_t * self.start + 2. * one_minus_t * t * handle + t_squared * self.end, BezierHandles::Cubic { handle_start, handle_end } => { let t_cubed = t_squared * t; let cubed_one_minus_t = squared_one_minus_t * one_minus_t; cubed_one_minus_t * self.start + 3. * squared_one_minus_t * t * handle_start + 3. * one_minus_t * t_squared * handle_end + t_cubed * self.end } } } /// Calculate the coordinates of the point `t` along the curve. /// Expects `t` to be within the inclusive range `[0, 1]`. /// <iframe frameBorder="0" width="100%" height="350px" src="https://graphite.rs/libraries/bezier-rs#bezier/evaluate/solo" title="Evaluate Demo"></iframe> pub fn evaluate(&self, t: TValue) -> DVec2 { let t = self.t_value_to_parametric(t); self.unrestricted_parametric_evaluate(t) } /// Return a selection of equidistant points on the bezier curve. /// If no value is provided for `steps`, then the function will default `steps` to be 10. /// <iframe frameBorder="0" width="100%" height="350px" src="https://graphite.rs/libraries/bezier-rs#bezier/lookup-table/solo" title="Lookup-Table Demo"></iframe> pub fn compute_lookup_table(&self, steps: Option<usize>, tvalue_type: Option<TValueType>) -> Vec<DVec2> { let steps = steps.unwrap_or(DEFAULT_LUT_STEP_SIZE); let tvalue_type = tvalue_type.unwrap_or(TValueType::Parametric); (0..=steps) .map(|t| { let tvalue = match tvalue_type { TValueType::Parametric => TValue::Parametric(t as f64 / steps as f64), TValueType::Euclidean => TValue::Euclidean(t as f64 / steps as f64), }; self.evaluate(tvalue) }) .collect() } /// Return an approximation of the length of the bezier curve. /// - `num_subdivisions` - Number of subdivisions used to approximate the curve. The default value is 1000. /// <iframe frameBorder="0" width="100%" height="300px" src="https://graphite.rs/libraries/bezier-rs#bezier/length/solo" title="Length Demo"></iframe> pub fn length(&self, num_subdivisions: Option<usize>) -> f64 { match self.handles { BezierHandles::Linear => self.start.distance(self.end), _ => { // Code example from <https://gamedev.stackexchange.com/questions/5373/moving-ships-between-two-planets-along-a-bezier-missing-some-equations-for-acce/5427#5427>. // We will use an approximate approach where we split the curve into many subdivisions // and calculate the euclidean distance between the two endpoints of the subdivision let lookup_table = self.compute_lookup_table(Some(num_subdivisions.unwrap_or(DEFAULT_LENGTH_SUBDIVISIONS)), Some(TValueType::Parametric)); let mut approx_curve_length = 0.; let mut previous_point = lookup_table[0]; // Calculate approximate distance between subdivision for current_point in lookup_table.iter().skip(1) { // Calculate distance of subdivision approx_curve_length += (*current_point - previous_point).length(); // Update the previous point previous_point = *current_point; } approx_curve_length } } } /// Returns the parametric `t`-value that corresponds to the closest point on the curve to the provided point. /// Uses a searching algorithm akin to binary search that can be customized using the optional [ProjectionOptions] struct. /// <iframe frameBorder="0" width="100%" height="300px" src="https://graphite.rs/libraries/bezier-rs#bezier/project/solo" title="Project Demo"></iframe> pub fn project(&self, point: DVec2, options: Option<ProjectionOptions>) -> f64 { let options = options.unwrap_or_default(); let ProjectionOptions { lut_size, convergence_epsilon, convergence_limit, iteration_limit, } = options; // TODO: Consider optimizations from precomputing useful values, or using the GPU // First find the closest point from the results of a lookup table let lut = self.compute_lookup_table(Some(lut_size), Some(TValueType::Parametric)); let (minimum_position, minimum_distance) = utils::get_closest_point_in_lut(&lut, point); // Get the t values to the left and right of the closest result in the lookup table let lut_size_f64 = lut_size as f64; let minimum_position_f64 = minimum_position as f64; let mut left_t = (minimum_position_f64 - 1.).max(0.) / lut_size_f64; let mut right_t = (minimum_position_f64 + 1.).min(lut_size_f64) / lut_size_f64; // Perform a finer search by finding closest t from 5 points between [left_t, right_t] inclusive // Choose new left_t and right_t for a smaller range around the closest t and repeat the process let mut final_t = left_t; let mut distance; // Increment minimum_distance to ensure that the distance < minimum_distance comparison will be true for at least one iteration let mut new_minimum_distance = minimum_distance + 1.; // Maintain the previous distance to identify convergence let mut previous_distance; // Counter to limit the number of iterations let mut iteration_count = 0; // Counter to identify how many iterations have had a similar result. Used for convergence test let mut convergence_count = 0; // Store calculated distances to minimize unnecessary recomputations let mut distances: [f64; NUM_DISTANCES] = [ point.distance(lut[(minimum_position as i64 - 1).max(0) as usize]), 0., 0., 0., point.distance(lut[lut_size.min(minimum_position + 1)]), ]; while left_t <= right_t && convergence_count < convergence_limit && iteration_count < iteration_limit { previous_distance = new_minimum_distance; let step = (right_t - left_t) / (NUM_DISTANCES as f64 - 1.); let mut iterator_t = left_t; let mut target_index = 0; // Iterate through first 4 points and will handle the right most point later for (step_index, table_distance) in distances.iter_mut().enumerate().
euclidean_to_parametric
identifier_name
lookup.rs
} mid } /// Convert a [TValue] to a parametric `t`-value. pub(crate) fn t_value_to_parametric(&self, t: TValue) -> f64 { match t { TValue::Parametric(t) => { assert!((0.0..=1.).contains(&t)); t } TValue::Euclidean(t) => { assert!((0.0..=1.).contains(&t)); self.euclidean_to_parametric(t, DEFAULT_EUCLIDEAN_ERROR_BOUND) } TValue::EuclideanWithinError { t, error } => { assert!((0.0..=1.).contains(&t)); self.euclidean_to_parametric(t, error) } } } /// Calculate the point on the curve based on the `t`-value provided. pub(crate) fn unrestricted_parametric_evaluate(&self, t: f64) -> DVec2 { // Basis code based off of pseudocode found here: <https://pomax.github.io/bezierinfo/#explanation>. let t_squared = t * t; let one_minus_t = 1. - t; let squared_one_minus_t = one_minus_t * one_minus_t; match self.handles { BezierHandles::Linear => self.start.lerp(self.end, t), BezierHandles::Quadratic { handle } => squared_one_minus_t * self.start + 2. * one_minus_t * t * handle + t_squared * self.end, BezierHandles::Cubic { handle_start, handle_end } => { let t_cubed = t_squared * t; let cubed_one_minus_t = squared_one_minus_t * one_minus_t; cubed_one_minus_t * self.start + 3. * squared_one_minus_t * t * handle_start + 3. * one_minus_t * t_squared * handle_end + t_cubed * self.end } } } /// Calculate the coordinates of the point `t` along the curve. /// Expects `t` to be within the inclusive range `[0, 1]`. /// <iframe frameBorder="0" width="100%" height="350px" src="https://graphite.rs/libraries/bezier-rs#bezier/evaluate/solo" title="Evaluate Demo"></iframe> pub fn evaluate(&self, t: TValue) -> DVec2 { let t = self.t_value_to_parametric(t); self.unrestricted_parametric_evaluate(t) } /// Return a selection of equidistant points on the bezier curve. /// If no value is provided for `steps`, then the function will default `steps` to be 10. /// <iframe frameBorder="0" width="100%" height="350px" src="https://graphite.rs/libraries/bezier-rs#bezier/lookup-table/solo" title="Lookup-Table Demo"></iframe> pub fn compute_lookup_table(&self, steps: Option<usize>, tvalue_type: Option<TValueType>) -> Vec<DVec2> { let steps = steps.unwrap_or(DEFAULT_LUT_STEP_SIZE); let tvalue_type = tvalue_type.unwrap_or(TValueType::Parametric); (0..=steps) .map(|t| { let tvalue = match tvalue_type { TValueType::Parametric => TValue::Parametric(t as f64 / steps as f64), TValueType::Euclidean => TValue::Euclidean(t as f64 / steps as f64), }; self.evaluate(tvalue) }) .collect() } /// Return an approximation of the length of the bezier curve. /// - `num_subdivisions` - Number of subdivisions used to approximate the curve. The default value is 1000. /// <iframe frameBorder="0" width="100%" height="300px" src="https://graphite.rs/libraries/bezier-rs#bezier/length/solo" title="Length Demo"></iframe> pub fn length(&self, num_subdivisions: Option<usize>) -> f64 { match self.handles { BezierHandles::Linear => self.start.distance(self.end), _ => { // Code example from <https://gamedev.stackexchange.com/questions/5373/moving-ships-between-two-planets-along-a-bezier-missing-some-equations-for-acce/5427#5427>. // We will use an approximate approach where we split the curve into many subdivisions // and calculate the euclidean distance between the two endpoints of the subdivision let lookup_table = self.compute_lookup_table(Some(num_subdivisions.unwrap_or(DEFAULT_LENGTH_SUBDIVISIONS)), Some(TValueType::Parametric)); let mut approx_curve_length = 0.; let mut previous_point = lookup_table[0]; // Calculate approximate distance between subdivision for current_point in lookup_table.iter().skip(1) { // Calculate distance of subdivision approx_curve_length += (*current_point - previous_point).length(); // Update the previous point previous_point = *current_point; } approx_curve_length } } } /// Returns the parametric `t`-value that corresponds to the closest point on the curve to the provided point. /// Uses a searching algorithm akin to binary search that can be customized using the optional [ProjectionOptions] struct. /// <iframe frameBorder="0" width="100%" height="300px" src="https://graphite.rs/libraries/bezier-rs#bezier/project/solo" title="Project Demo"></iframe> pub fn project(&self, point: DVec2, options: Option<ProjectionOptions>) -> f64 { let options = options.unwrap_or_default(); let ProjectionOptions { lut_size, convergence_epsilon, convergence_limit, iteration_limit, } = options; // TODO: Consider optimizations from precomputing useful values, or using the GPU // First find the closest point from the results of a lookup table let lut = self.compute_lookup_table(Some(lut_size), Some(TValueType::Parametric)); let (minimum_position, minimum_distance) = utils::get_closest_point_in_lut(&lut, point); // Get the t values to the left and right of the closest result in the lookup table let lut_size_f64 = lut_size as f64; let minimum_position_f64 = minimum_position as f64; let mut left_t = (minimum_position_f64 - 1.).max(0.) / lut_size_f64; let mut right_t = (minimum_position_f64 + 1.).min(lut_size_f64) / lut_size_f64; // Perform a finer search by finding closest t from 5 points between [left_t, right_t] inclusive // Choose new left_t and right_t for a smaller range around the closest t and repeat the process let mut final_t = left_t; let mut distance; // Increment minimum_distance to ensure that the distance < minimum_distance comparison will be true for at least one iteration let mut new_minimum_distance = minimum_distance + 1.; // Maintain the previous distance to identify convergence let mut previous_distance; // Counter to limit the number of iterations let mut iteration_count = 0; // Counter to identify how many iterations have had a similar result. Used for convergence test let mut convergence_count = 0; // Store calculated distances to minimize unnecessary recomputations let mut distances: [f64; NUM_DISTANCES] = [ point.distance(lut[(minimum_position as i64 - 1).max(0) as usize]), 0., 0., 0., point.distance(lut[lut_size.min(minimum_position + 1)]), ]; while left_t <= right_t && convergence_count < convergence_limit && iteration_count < iteration_limit { previous_distance = new_minimum_distance; let step = (right_t - left_t) / (NUM_DISTANCES as f64 - 1.); let mut iterator_t = left_t; let mut target_index = 0; // Iterate through first 4 points and will handle the right most point later for (step_index, table_distance) in distances.iter_mut().enumerate().take(4) { // Use previously computed distance for the left most point, and compute new values for the others if step_index == 0 { distance = *table_distance; } else { distance = point.distance(self.evaluate(TValue::Parametric(iterator_t))); *table_distance = distance; } if distance < new_minimum_distance { new_minimum_distance = distance; target_index = step_index; final_t = iterator_t } iterator_t += step; } // Check right most edge separately since step may not perfectly add up to it (floating point errors) if distances[NUM_DISTANCES - 1] < new_minimum_distance { new_minimum_distance = distances[NUM_DISTANCES - 1]; final_t = right_t;
{ high = mid; }
conditional_block
lookup.rs
// Basis code based off of pseudocode found here: <https://pomax.github.io/bezierinfo/#explanation>. let t_squared = t * t; let one_minus_t = 1. - t; let squared_one_minus_t = one_minus_t * one_minus_t; match self.handles { BezierHandles::Linear => self.start.lerp(self.end, t), BezierHandles::Quadratic { handle } => squared_one_minus_t * self.start + 2. * one_minus_t * t * handle + t_squared * self.end, BezierHandles::Cubic { handle_start, handle_end } => { let t_cubed = t_squared * t; let cubed_one_minus_t = squared_one_minus_t * one_minus_t; cubed_one_minus_t * self.start + 3. * squared_one_minus_t * t * handle_start + 3. * one_minus_t * t_squared * handle_end + t_cubed * self.end } } } /// Calculate the coordinates of the point `t` along the curve. /// Expects `t` to be within the inclusive range `[0, 1]`. /// <iframe frameBorder="0" width="100%" height="350px" src="https://graphite.rs/libraries/bezier-rs#bezier/evaluate/solo" title="Evaluate Demo"></iframe> pub fn evaluate(&self, t: TValue) -> DVec2 { let t = self.t_value_to_parametric(t); self.unrestricted_parametric_evaluate(t) } /// Return a selection of equidistant points on the bezier curve. /// If no value is provided for `steps`, then the function will default `steps` to be 10. /// <iframe frameBorder="0" width="100%" height="350px" src="https://graphite.rs/libraries/bezier-rs#bezier/lookup-table/solo" title="Lookup-Table Demo"></iframe> pub fn compute_lookup_table(&self, steps: Option<usize>, tvalue_type: Option<TValueType>) -> Vec<DVec2> { let steps = steps.unwrap_or(DEFAULT_LUT_STEP_SIZE); let tvalue_type = tvalue_type.unwrap_or(TValueType::Parametric); (0..=steps) .map(|t| { let tvalue = match tvalue_type { TValueType::Parametric => TValue::Parametric(t as f64 / steps as f64), TValueType::Euclidean => TValue::Euclidean(t as f64 / steps as f64), }; self.evaluate(tvalue) }) .collect() } /// Return an approximation of the length of the bezier curve. /// - `num_subdivisions` - Number of subdivisions used to approximate the curve. The default value is 1000. /// <iframe frameBorder="0" width="100%" height="300px" src="https://graphite.rs/libraries/bezier-rs#bezier/length/solo" title="Length Demo"></iframe> pub fn length(&self, num_subdivisions: Option<usize>) -> f64 { match self.handles { BezierHandles::Linear => self.start.distance(self.end), _ => { // Code example from <https://gamedev.stackexchange.com/questions/5373/moving-ships-between-two-planets-along-a-bezier-missing-some-equations-for-acce/5427#5427>. // We will use an approximate approach where we split the curve into many subdivisions // and calculate the euclidean distance between the two endpoints of the subdivision let lookup_table = self.compute_lookup_table(Some(num_subdivisions.unwrap_or(DEFAULT_LENGTH_SUBDIVISIONS)), Some(TValueType::Parametric)); let mut approx_curve_length = 0.; let mut previous_point = lookup_table[0]; // Calculate approximate distance between subdivision for current_point in lookup_table.iter().skip(1) { // Calculate distance of subdivision approx_curve_length += (*current_point - previous_point).length(); // Update the previous point previous_point = *current_point; } approx_curve_length } } } /// Returns the parametric `t`-value that corresponds to the closest point on the curve to the provided point. /// Uses a searching algorithm akin to binary search that can be customized using the optional [ProjectionOptions] struct. /// <iframe frameBorder="0" width="100%" height="300px" src="https://graphite.rs/libraries/bezier-rs#bezier/project/solo" title="Project Demo"></iframe> pub fn project(&self, point: DVec2, options: Option<ProjectionOptions>) -> f64 { let options = options.unwrap_or_default(); let ProjectionOptions { lut_size, convergence_epsilon, convergence_limit, iteration_limit, } = options; // TODO: Consider optimizations from precomputing useful values, or using the GPU // First find the closest point from the results of a lookup table let lut = self.compute_lookup_table(Some(lut_size), Some(TValueType::Parametric)); let (minimum_position, minimum_distance) = utils::get_closest_point_in_lut(&lut, point); // Get the t values to the left and right of the closest result in the lookup table let lut_size_f64 = lut_size as f64; let minimum_position_f64 = minimum_position as f64; let mut left_t = (minimum_position_f64 - 1.).max(0.) / lut_size_f64; let mut right_t = (minimum_position_f64 + 1.).min(lut_size_f64) / lut_size_f64; // Perform a finer search by finding closest t from 5 points between [left_t, right_t] inclusive // Choose new left_t and right_t for a smaller range around the closest t and repeat the process let mut final_t = left_t; let mut distance; // Increment minimum_distance to ensure that the distance < minimum_distance comparison will be true for at least one iteration let mut new_minimum_distance = minimum_distance + 1.; // Maintain the previous distance to identify convergence let mut previous_distance; // Counter to limit the number of iterations let mut iteration_count = 0; // Counter to identify how many iterations have had a similar result. Used for convergence test let mut convergence_count = 0; // Store calculated distances to minimize unnecessary recomputations let mut distances: [f64; NUM_DISTANCES] = [ point.distance(lut[(minimum_position as i64 - 1).max(0) as usize]), 0., 0., 0., point.distance(lut[lut_size.min(minimum_position + 1)]), ]; while left_t <= right_t && convergence_count < convergence_limit && iteration_count < iteration_limit { previous_distance = new_minimum_distance; let step = (right_t - left_t) / (NUM_DISTANCES as f64 - 1.); let mut iterator_t = left_t; let mut target_index = 0; // Iterate through first 4 points and will handle the right most point later for (step_index, table_distance) in distances.iter_mut().enumerate().take(4) { // Use previously computed distance for the left most point, and compute new values for the others if step_index == 0 { distance = *table_distance; } else { distance = point.distance(self.evaluate(TValue::Parametric(iterator_t))); *table_distance = distance; } if distance < new_minimum_distance { new_minimum_distance = distance; target_index = step_index; final_t = iterator_t } iterator_t += step; } // Check right most edge separately since step may not perfectly add up to it (floating point errors) if distances[NUM_DISTANCES - 1] < new_minimum_distance { new_minimum_distance = distances[NUM_DISTANCES - 1]; final_t = right_t; } // Update left_t and right_t to be the t values (final_t +/- step), while handling the edges (i.e. if final_t is 0, left_t will be 0 instead of -step) // Ensure that the t values never exceed the [0, 1] range left_t = (final_t - step).max(0.); right_t = (final_t + step).min(1.); // Re-use the corresponding computed distances (target_index is the index corresponding to final_t) // Since target_index is a u_size, can't subtract one if it is zero distances[0] = distances[if target_index == 0 { 0 } else { target_index - 1 }]; distances[NUM_DISTANCES - 1] = distances[(target_index + 1).min(NUM_DISTANCES - 1)]; iteration_count += 1; // update count for consecutive iterations of similar minimum distances if previous_distance - new_minimum_distance < convergence_epsilon {
convergence_count += 1;
random_line_split
error_handle.py
while last: i+=1 tbo = last last = last.tb_next if i>100: print() return (1, "bad recursion") if not tbo: tbo = sys.last_traceback linum = sys.last_traceback.tb_lineno# first linum message += f'from line {str(linum)}\n' frame = str(tbo.tb_frame) if frame: if 'file ' in frame: # frame = 'file: ' + frame.split('file ')[1]
else: print() return (1, 'No error traceback found by sys module') if hasattr(sys, "last_type") and sys.last_type: error_type = str(sys.last_type) error_type = error_type.replace("<class '", "").replace("'>","") message = f'type {error_type}\n{message}' if hasattr(sys, "last_value") and sys.last_value: message += f'error : {str(sys.last_value)}\n' if not linum and hasattr(sys.last_value, "lineno"):# maybe not usefull print('use "last_value" line num') message += f'line {str(sys.last_value.lineno)}\n' if not message : print() return (1, 'No message to display') if message and to_clipboad: bpy.context.window_manager.clipboard = message return (0, message) def get_traceback_stack(tb=None): if tb is None: tb = sys.last_traceback stack = [] if tb and tb.tb_frame is None: tb = tb.tb_next while tb is not None: stack.append((tb.tb_frame, tb.tb_lineno)) tb = tb.tb_next return stack class DEV_OT_copy_last_traceback(bpy.types.Operator): bl_idname = "devtools.copy_last_traceback" bl_label = "Copy Last Traceback" bl_description = "Copy last traceback error in clipboard" bl_options = {"REGISTER"} def execute(self, context): error, content = get_last_traceback(to_clipboad=True) if error: self.report({'ERROR'}, content) return {"CANCELLED"} return {"FINISHED"} class DEV_OT_artificial_error(bpy.types.Operator): bl_idname = "devtools.artificial_error" bl_label = "Artificial Error" bl_description = "Generate an artificial Error" bl_options = {"REGISTER"} def execute(self, context): ## Trigger zero Division Error provoked_error = 2/0 return {"FINISHED"} class DEV_OT_clear_last_traceback(bpy.types.Operator): bl_idname = "devtools.clear_last_traceback" bl_label = "Clear Last Traceback" bl_description = "Clear last traceback infos (deleting sys.last_traceback, etc)" bl_options = {"REGISTER", "INTERNAL"} def execute(self, context): if hasattr(sys, 'last_traceback') and sys.last_traceback is not None: del sys.last_traceback if hasattr(sys, 'last_value') and sys.last_value is not None: del sys.last_value if hasattr(sys, 'last_type') and sys.last_type is not None: del sys.last_type return {"FINISHED"} class DEV_OT_open_error_file(bpy.types.Operator): bl_idname = "devtools.open_error_file" bl_label = "Open Traceback Errors" bl_description = "Open the file where there as been a traceback error" bl_options = {"REGISTER"} path_line : bpy.props.StringProperty(options={'SKIP_SAVE'}) use_external : bpy.props.BoolProperty(default=False, options={'SKIP_SAVE'}) from_clipboard : bpy.props.BoolProperty(default=False, options={'SKIP_SAVE'}) def invoke(self, context, event): if self.path_line: # print('self.path_line: ', self.path_line)#Dbg if self.use_external: editor = fn.get_addon_prefs().external_editor if not editor: mess = fn.missing_external_editor() self.report({'WARNING'}, mess) return {"CANCELLED"} ## Use passed line direcly when recalling operator cmd = [editor, '--goto', self.path_line] print('cmd: ', cmd) ## Note: Never get what happen with the shell argument ## True on windows and False on linux seem to work empirically... subprocess.Popen(cmd, shell=sys.platform.startswith('win')) else: # Open file in blender path, linum = self.path_line.rsplit(':', 1) linum = int(linum) fn.set_file_in_text_editor(path, linum=linum, context=context) return {"FINISHED"} pattern = r'[Ff]ile [\'\"](.*?)[\'\"], line (\d+),' self.error_desc = None self.error_list = [] if self.from_clipboard: clip = context.window_manager.clipboard try: self.error_list = re.findall(pattern, clip) except: self.report({'ERROR'}, 'Failed to parse clipboard for filepath and line number') return {"CANCELLED"} else: if not hasattr(sys, "last_traceback"): self.report({'ERROR'}, 'No last traceback found with sys"') return {"CANCELLED"} '''## old method stack = get_traceback_stack() if stack is None: self.report({'ERROR'}, 'No last traceback found using "sys.last_traceback"') return {"CANCELLED"} # first result of findall with pattern of first element of error (traceback frame) self.error_list = [re.findall(pattern, str(error[0]))[0] for error in stack] ''' tb_list = traceback.extract_tb(sys.last_traceback) if not tb_list: self.report({'ERROR'}, 'No last traceback found using "sys.last_traceback"') return {"CANCELLED"} ## TODO: Handle case when started from Blender and have a script ## sometimes resolve() give a too long -not needed- url. ## Always resolve with list comprehension # self.error_list = [(str(Path(t.filename).resolve()), t.lineno, t.line, t.name) for t in tb_list] always_resolve = False # Only resolve on symlink for t in tb_list: # if bpy.data.filepath and t.filename.startswith(bpy.data.filepath): file_path = Path(t.filename) current_blend = Path(bpy.data.filepath).name # Case when script executed from blend and is loaded externally if file_path.parent.name == current_blend: txt = bpy.data.texts.get(file_path.name) if txt: if txt.filepath: file_path = Path(os.path.abspath(bpy.path.abspath(txt.filepath))) if always_resolve or (file_path.exists() and file_path.is_symlink()): file_path = file_path.resolve() # resolve symlink self.error_list.append((str(file_path), t.lineno, t.line, t.name)) ## add error type and description error_type = str(sys.last_type) error_type = error_type if error_type else "Error" error_type = error_type.replace("<class '", "").replace("'>","") error_value = sys.last_value if error_value: self.error_desc = f'{error_type} : {str(error_value)}\n' if not self.error_list: self.report({'ERROR'}, 'No filepath and line number found in clipboard') return {"CANCELLED"} return context.window_manager.invoke_props_dialog(self, width=500) def draw(self, context): layout = self.layout col = layout.column() for item in self.error_list: path, line = item[0], item[1] # print(path, ' ', line) goto_line = f'{path}:{line}' box = col.box() boxcol = box.column() boxcol.alignment = 'LEFT' button_row = boxcol.row(align=True) op = button_row.operator('devtools.open_error_file', text=f'{Path(path).name} : {line}', icon='MENU_PANEL') op.path_line = goto_line op.use_external = False op = button_row.operator('devtools.open_error_file', text='', icon='TEXT') op.path_line = goto_line op.use_external = True boxcol.label(text=path) if len(item) > 3 and item[3]: boxcol.label(text=f'in: {item[3]}') if len(item) > 2 and item[2]: boxcol.label(text=item[2]) col.separator() row = layout.row() row.alignment = 'LEFT' row.operator('devtools.clear_last_traceback', text='Clear Traceback', icon='CANCEL') if self.error_desc: for l in self.error_desc.split('\n'): row = col.row() row.alignment = 'LEFT' row.label(text=l) def execute(self, context): if self.path_line: return {"FINISHED"} return {"FINISHED"} def help_error_top_bar(self, context): layout = self.layout if hasattr(sys, 'last_traceback') and sys.last_traceback: region = context.region if region.alignment == 'RIGHT
frame = '\n'.join(frame.split(', ')[1:3]) message += f'{frame}\n'
random_line_split
error_handle.py
while last: i+=1 tbo = last last = last.tb_next if i>100: print() return (1, "bad recursion") if not tbo: tbo = sys.last_traceback linum = sys.last_traceback.tb_lineno# first linum message += f'from line {str(linum)}\n' frame = str(tbo.tb_frame) if frame: if 'file ' in frame: # frame = 'file: ' + frame.split('file ')[1] frame = '\n'.join(frame.split(', ')[1:3]) message += f'{frame}\n' else: print() return (1, 'No error traceback found by sys module') if hasattr(sys, "last_type") and sys.last_type: error_type = str(sys.last_type) error_type = error_type.replace("<class '", "").replace("'>","") message = f'type {error_type}\n{message}' if hasattr(sys, "last_value") and sys.last_value: message += f'error : {str(sys.last_value)}\n' if not linum and hasattr(sys.last_value, "lineno"):# maybe not usefull
if not message : print() return (1, 'No message to display') if message and to_clipboad: bpy.context.window_manager.clipboard = message return (0, message) def get_traceback_stack(tb=None): if tb is None: tb = sys.last_traceback stack = [] if tb and tb.tb_frame is None: tb = tb.tb_next while tb is not None: stack.append((tb.tb_frame, tb.tb_lineno)) tb = tb.tb_next return stack class DEV_OT_copy_last_traceback(bpy.types.Operator): bl_idname = "devtools.copy_last_traceback" bl_label = "Copy Last Traceback" bl_description = "Copy last traceback error in clipboard" bl_options = {"REGISTER"} def execute(self, context): error, content = get_last_traceback(to_clipboad=True) if error: self.report({'ERROR'}, content) return {"CANCELLED"} return {"FINISHED"} class DEV_OT_artificial_error(bpy.types.Operator): bl_idname = "devtools.artificial_error" bl_label = "Artificial Error" bl_description = "Generate an artificial Error" bl_options = {"REGISTER"} def execute(self, context): ## Trigger zero Division Error provoked_error = 2/0 return {"FINISHED"} class DEV_OT_clear_last_traceback(bpy.types.Operator): bl_idname = "devtools.clear_last_traceback" bl_label = "Clear Last Traceback" bl_description = "Clear last traceback infos (deleting sys.last_traceback, etc)" bl_options = {"REGISTER", "INTERNAL"} def execute(self, context): if hasattr(sys, 'last_traceback') and sys.last_traceback is not None: del sys.last_traceback if hasattr(sys, 'last_value') and sys.last_value is not None: del sys.last_value if hasattr(sys, 'last_type') and sys.last_type is not None: del sys.last_type return {"FINISHED"} class DEV_OT_open_error_file(bpy.types.Operator): bl_idname = "devtools.open_error_file" bl_label = "Open Traceback Errors" bl_description = "Open the file where there as been a traceback error" bl_options = {"REGISTER"} path_line : bpy.props.StringProperty(options={'SKIP_SAVE'}) use_external : bpy.props.BoolProperty(default=False, options={'SKIP_SAVE'}) from_clipboard : bpy.props.BoolProperty(default=False, options={'SKIP_SAVE'}) def invoke(self, context, event): if self.path_line: # print('self.path_line: ', self.path_line)#Dbg if self.use_external: editor = fn.get_addon_prefs().external_editor if not editor: mess = fn.missing_external_editor() self.report({'WARNING'}, mess) return {"CANCELLED"} ## Use passed line direcly when recalling operator cmd = [editor, '--goto', self.path_line] print('cmd: ', cmd) ## Note: Never get what happen with the shell argument ## True on windows and False on linux seem to work empirically... subprocess.Popen(cmd, shell=sys.platform.startswith('win')) else: # Open file in blender path, linum = self.path_line.rsplit(':', 1) linum = int(linum) fn.set_file_in_text_editor(path, linum=linum, context=context) return {"FINISHED"} pattern = r'[Ff]ile [\'\"](.*?)[\'\"], line (\d+),' self.error_desc = None self.error_list = [] if self.from_clipboard: clip = context.window_manager.clipboard try: self.error_list = re.findall(pattern, clip) except: self.report({'ERROR'}, 'Failed to parse clipboard for filepath and line number') return {"CANCELLED"} else: if not hasattr(sys, "last_traceback"): self.report({'ERROR'}, 'No last traceback found with sys"') return {"CANCELLED"} '''## old method stack = get_traceback_stack() if stack is None: self.report({'ERROR'}, 'No last traceback found using "sys.last_traceback"') return {"CANCELLED"} # first result of findall with pattern of first element of error (traceback frame) self.error_list = [re.findall(pattern, str(error[0]))[0] for error in stack] ''' tb_list = traceback.extract_tb(sys.last_traceback) if not tb_list: self.report({'ERROR'}, 'No last traceback found using "sys.last_traceback"') return {"CANCELLED"} ## TODO: Handle case when started from Blender and have a script ## sometimes resolve() give a too long -not needed- url. ## Always resolve with list comprehension # self.error_list = [(str(Path(t.filename).resolve()), t.lineno, t.line, t.name) for t in tb_list] always_resolve = False # Only resolve on symlink for t in tb_list: # if bpy.data.filepath and t.filename.startswith(bpy.data.filepath): file_path = Path(t.filename) current_blend = Path(bpy.data.filepath).name # Case when script executed from blend and is loaded externally if file_path.parent.name == current_blend: txt = bpy.data.texts.get(file_path.name) if txt: if txt.filepath: file_path = Path(os.path.abspath(bpy.path.abspath(txt.filepath))) if always_resolve or (file_path.exists() and file_path.is_symlink()): file_path = file_path.resolve() # resolve symlink self.error_list.append((str(file_path), t.lineno, t.line, t.name)) ## add error type and description error_type = str(sys.last_type) error_type = error_type if error_type else "Error" error_type = error_type.replace("<class '", "").replace("'>","") error_value = sys.last_value if error_value: self.error_desc = f'{error_type} : {str(error_value)}\n' if not self.error_list: self.report({'ERROR'}, 'No filepath and line number found in clipboard') return {"CANCELLED"} return context.window_manager.invoke_props_dialog(self, width=500) def draw(self, context): layout = self.layout col = layout.column() for item in self.error_list: path, line = item[0], item[1] # print(path, ' ', line) goto_line = f'{path}:{line}' box = col.box() boxcol = box.column() boxcol.alignment = 'LEFT' button_row = boxcol.row(align=True) op = button_row.operator('devtools.open_error_file', text=f'{Path(path).name} : {line}', icon='MENU_PANEL') op.path_line = goto_line op.use_external = False op = button_row.operator('devtools.open_error_file', text='', icon='TEXT') op.path_line = goto_line op.use_external = True boxcol.label(text=path) if len(item) > 3 and item[3]: boxcol.label(text=f'in: {item[3]}') if len(item) > 2 and item[2]: boxcol.label(text=item[2]) col.separator() row = layout.row() row.alignment = 'LEFT' row.operator('devtools.clear_last_traceback', text='Clear Traceback', icon='CANCEL') if self.error_desc: for l in self.error_desc.split('\n'): row = col.row() row.alignment = 'LEFT' row.label(text=l) def execute(self, context): if self.path_line: return {"FINISHED"} return {"FINISHED"} def help_error_top_bar(self, context): layout = self.layout if hasattr(sys, 'last_traceback') and sys.last_traceback: region = context.region if region.alignment == '
print('use "last_value" line num') message += f'line {str(sys.last_value.lineno)}\n'
conditional_block
error_handle.py
(to_clipboad=False) -> Tuple[int, str]: '''Get last traceback error details summed in string return a tuple''' import sys message = '' linum = '' if hasattr(sys, "last_traceback") and sys.last_traceback: i = 0 last=sys.last_traceback.tb_next tbo = None while last: i+=1 tbo = last last = last.tb_next if i>100: print() return (1, "bad recursion") if not tbo: tbo = sys.last_traceback linum = sys.last_traceback.tb_lineno# first linum message += f'from line {str(linum)}\n' frame = str(tbo.tb_frame) if frame: if 'file ' in frame: # frame = 'file: ' + frame.split('file ')[1] frame = '\n'.join(frame.split(', ')[1:3]) message += f'{frame}\n' else: print() return (1, 'No error traceback found by sys module') if hasattr(sys, "last_type") and sys.last_type: error_type = str(sys.last_type) error_type = error_type.replace("<class '", "").replace("'>","") message = f'type {error_type}\n{message}' if hasattr(sys, "last_value") and sys.last_value: message += f'error : {str(sys.last_value)}\n' if not linum and hasattr(sys.last_value, "lineno"):# maybe not usefull print('use "last_value" line num') message += f'line {str(sys.last_value.lineno)}\n' if not message : print() return (1, 'No message to display') if message and to_clipboad: bpy.context.window_manager.clipboard = message return (0, message) def get_traceback_stack(tb=None): if tb is None: tb = sys.last_traceback stack = [] if tb and tb.tb_frame is None: tb = tb.tb_next while tb is not None: stack.append((tb.tb_frame, tb.tb_lineno)) tb = tb.tb_next return stack class DEV_OT_copy_last_traceback(bpy.types.Operator): bl_idname = "devtools.copy_last_traceback" bl_label = "Copy Last Traceback" bl_description = "Copy last traceback error in clipboard" bl_options = {"REGISTER"} def execute(self, context): error, content = get_last_traceback(to_clipboad=True) if error: self.report({'ERROR'}, content) return {"CANCELLED"} return {"FINISHED"} class DEV_OT_artificial_error(bpy.types.Operator): bl_idname = "devtools.artificial_error" bl_label = "Artificial Error" bl_description = "Generate an artificial Error" bl_options = {"REGISTER"} def execute(self, context): ## Trigger zero Division Error provoked_error = 2/0 return {"FINISHED"} class DEV_OT_clear_last_traceback(bpy.types.Operator): bl_idname = "devtools.clear_last_traceback" bl_label = "Clear Last Traceback" bl_description = "Clear last traceback infos (deleting sys.last_traceback, etc)" bl_options = {"REGISTER", "INTERNAL"} def execute(self, context): if hasattr(sys, 'last_traceback') and sys.last_traceback is not None: del sys.last_traceback if hasattr(sys, 'last_value') and sys.last_value is not None: del sys.last_value if hasattr(sys, 'last_type') and sys.last_type is not None: del sys.last_type return {"FINISHED"} class DEV_OT_open_error_file(bpy.types.Operator): bl_idname = "devtools.open_error_file" bl_label = "Open Traceback Errors" bl_description = "Open the file where there as been a traceback error" bl_options = {"REGISTER"} path_line : bpy.props.StringProperty(options={'SKIP_SAVE'}) use_external : bpy.props.BoolProperty(default=False, options={'SKIP_SAVE'}) from_clipboard : bpy.props.BoolProperty(default=False, options={'SKIP_SAVE'}) def invoke(self, context, event): if self.path_line: # print('self.path_line: ', self.path_line)#Dbg if self.use_external: editor = fn.get_addon_prefs().external_editor if not editor: mess = fn.missing_external_editor() self.report({'WARNING'}, mess) return {"CANCELLED"} ## Use passed line direcly when recalling operator cmd = [editor, '--goto', self.path_line] print('cmd: ', cmd) ## Note: Never get what happen with the shell argument ## True on windows and False on linux seem to work empirically... subprocess.Popen(cmd, shell=sys.platform.startswith('win')) else: # Open file in blender path, linum = self.path_line.rsplit(':', 1) linum = int(linum) fn.set_file_in_text_editor(path, linum=linum, context=context) return {"FINISHED"} pattern = r'[Ff]ile [\'\"](.*?)[\'\"], line (\d+),' self.error_desc = None self.error_list = [] if self.from_clipboard: clip = context.window_manager.clipboard try: self.error_list = re.findall(pattern, clip) except: self.report({'ERROR'}, 'Failed to parse clipboard for filepath and line number') return {"CANCELLED"} else: if not hasattr(sys, "last_traceback"): self.report({'ERROR'}, 'No last traceback found with sys"') return {"CANCELLED"} '''## old method stack = get_traceback_stack() if stack is None: self.report({'ERROR'}, 'No last traceback found using "sys.last_traceback"') return {"CANCELLED"} # first result of findall with pattern of first element of error (traceback frame) self.error_list = [re.findall(pattern, str(error[0]))[0] for error in stack] ''' tb_list = traceback.extract_tb(sys.last_traceback) if not tb_list: self.report({'ERROR'}, 'No last traceback found using "sys.last_traceback"') return {"CANCELLED"} ## TODO: Handle case when started from Blender and have a script ## sometimes resolve() give a too long -not needed- url. ## Always resolve with list comprehension # self.error_list = [(str(Path(t.filename).resolve()), t.lineno, t.line, t.name) for t in tb_list] always_resolve = False # Only resolve on symlink for t in tb_list: # if bpy.data.filepath and t.filename.startswith(bpy.data.filepath): file_path = Path(t.filename) current_blend = Path(bpy.data.filepath).name # Case when script executed from blend and is loaded externally if file_path.parent.name == current_blend: txt = bpy.data.texts.get(file_path.name) if txt: if txt.filepath: file_path = Path(os.path.abspath(bpy.path.abspath(txt.filepath))) if always_resolve or (file_path.exists() and file_path.is_symlink()): file_path = file_path.resolve() # resolve symlink self.error_list.append((str(file_path), t.lineno, t.line, t.name)) ## add error type and description error_type = str(sys.last_type) error_type = error_type if error_type else "Error" error_type = error_type.replace("<class '", "").replace("'>","") error_value = sys.last_value if error_value: self.error_desc = f'{error_type} : {str(error_value)}\n' if not self.error_list: self.report({'ERROR'}, 'No filepath and line number found in clipboard') return {"CANCELLED"} return context.window_manager.invoke_props_dialog(self, width=500) def draw(self, context): layout = self.layout col = layout.column() for item in self.error_list: path, line = item[0], item[1] # print(path, ' ', line) goto_line = f'{path}:{line}' box = col.box() boxcol = box.column() boxcol.alignment = 'LEFT' button_row = boxcol.row(align=True) op = button_row.operator('devtools.open_error_file', text=f'{Path(path).name} : {line}', icon='MENU_PANEL') op.path_line = goto_line op.use_external = False op = button_row.operator('devtools.open_error_file', text='', icon='TEXT') op.path_line = goto_line op.use_external = True boxcol.label(text=path) if len(item) > 3 and item[3]: boxcol.label(text=f'in: {item[3]}') if len(item) > 2 and item[2]: boxcol.label(text=item[2]) col.separator() row = layout.row() row.alignment = 'LEFT' row.operator('devtools.clear_last_traceback', text='Clear Traceback', icon='CANCEL') if self.error_desc: for l in self.error_desc.split('\n'): row = col.row()
get_last_traceback
identifier_name
error_handle.py
while last: i+=1 tbo = last last = last.tb_next if i>100: print() return (1, "bad recursion") if not tbo: tbo = sys.last_traceback linum = sys.last_traceback.tb_lineno# first linum message += f'from line {str(linum)}\n' frame = str(tbo.tb_frame) if frame: if 'file ' in frame: # frame = 'file: ' + frame.split('file ')[1] frame = '\n'.join(frame.split(', ')[1:3]) message += f'{frame}\n' else: print() return (1, 'No error traceback found by sys module') if hasattr(sys, "last_type") and sys.last_type: error_type = str(sys.last_type) error_type = error_type.replace("<class '", "").replace("'>","") message = f'type {error_type}\n{message}' if hasattr(sys, "last_value") and sys.last_value: message += f'error : {str(sys.last_value)}\n' if not linum and hasattr(sys.last_value, "lineno"):# maybe not usefull print('use "last_value" line num') message += f'line {str(sys.last_value.lineno)}\n' if not message : print() return (1, 'No message to display') if message and to_clipboad: bpy.context.window_manager.clipboard = message return (0, message) def get_traceback_stack(tb=None): if tb is None: tb = sys.last_traceback stack = [] if tb and tb.tb_frame is None: tb = tb.tb_next while tb is not None: stack.append((tb.tb_frame, tb.tb_lineno)) tb = tb.tb_next return stack class DEV_OT_copy_last_traceback(bpy.types.Operator): bl_idname = "devtools.copy_last_traceback" bl_label = "Copy Last Traceback" bl_description = "Copy last traceback error in clipboard" bl_options = {"REGISTER"} def execute(self, context): error, content = get_last_traceback(to_clipboad=True) if error: self.report({'ERROR'}, content) return {"CANCELLED"} return {"FINISHED"} class DEV_OT_artificial_error(bpy.types.Operator): bl_idname = "devtools.artificial_error" bl_label = "Artificial Error" bl_description = "Generate an artificial Error" bl_options = {"REGISTER"} def execute(self, context): ## Trigger zero Division Error provoked_error = 2/0 return {"FINISHED"} class DEV_OT_clear_last_traceback(bpy.types.Operator): bl_idname = "devtools.clear_last_traceback" bl_label = "Clear Last Traceback" bl_description = "Clear last traceback infos (deleting sys.last_traceback, etc)" bl_options = {"REGISTER", "INTERNAL"} def execute(self, context): if hasattr(sys, 'last_traceback') and sys.last_traceback is not None: del sys.last_traceback if hasattr(sys, 'last_value') and sys.last_value is not None: del sys.last_value if hasattr(sys, 'last_type') and sys.last_type is not None: del sys.last_type return {"FINISHED"} class DEV_OT_open_error_file(bpy.types.Operator): bl_idname = "devtools.open_error_file" bl_label = "Open Traceback Errors" bl_description = "Open the file where there as been a traceback error" bl_options = {"REGISTER"} path_line : bpy.props.StringProperty(options={'SKIP_SAVE'}) use_external : bpy.props.BoolProperty(default=False, options={'SKIP_SAVE'}) from_clipboard : bpy.props.BoolProperty(default=False, options={'SKIP_SAVE'}) def invoke(self, context, event): if self.path_line: # print('self.path_line: ', self.path_line)#Dbg if self.use_external: editor = fn.get_addon_prefs().external_editor if not editor: mess = fn.missing_external_editor() self.report({'WARNING'}, mess) return {"CANCELLED"} ## Use passed line direcly when recalling operator cmd = [editor, '--goto', self.path_line] print('cmd: ', cmd) ## Note: Never get what happen with the shell argument ## True on windows and False on linux seem to work empirically... subprocess.Popen(cmd, shell=sys.platform.startswith('win')) else: # Open file in blender path, linum = self.path_line.rsplit(':', 1) linum = int(linum) fn.set_file_in_text_editor(path, linum=linum, context=context) return {"FINISHED"} pattern = r'[Ff]ile [\'\"](.*?)[\'\"], line (\d+),' self.error_desc = None self.error_list = [] if self.from_clipboard: clip = context.window_manager.clipboard try: self.error_list = re.findall(pattern, clip) except: self.report({'ERROR'}, 'Failed to parse clipboard for filepath and line number') return {"CANCELLED"} else: if not hasattr(sys, "last_traceback"): self.report({'ERROR'}, 'No last traceback found with sys"') return {"CANCELLED"} '''## old method stack = get_traceback_stack() if stack is None: self.report({'ERROR'}, 'No last traceback found using "sys.last_traceback"') return {"CANCELLED"} # first result of findall with pattern of first element of error (traceback frame) self.error_list = [re.findall(pattern, str(error[0]))[0] for error in stack] ''' tb_list = traceback.extract_tb(sys.last_traceback) if not tb_list: self.report({'ERROR'}, 'No last traceback found using "sys.last_traceback"') return {"CANCELLED"} ## TODO: Handle case when started from Blender and have a script ## sometimes resolve() give a too long -not needed- url. ## Always resolve with list comprehension # self.error_list = [(str(Path(t.filename).resolve()), t.lineno, t.line, t.name) for t in tb_list] always_resolve = False # Only resolve on symlink for t in tb_list: # if bpy.data.filepath and t.filename.startswith(bpy.data.filepath): file_path = Path(t.filename) current_blend = Path(bpy.data.filepath).name # Case when script executed from blend and is loaded externally if file_path.parent.name == current_blend: txt = bpy.data.texts.get(file_path.name) if txt: if txt.filepath: file_path = Path(os.path.abspath(bpy.path.abspath(txt.filepath))) if always_resolve or (file_path.exists() and file_path.is_symlink()): file_path = file_path.resolve() # resolve symlink self.error_list.append((str(file_path), t.lineno, t.line, t.name)) ## add error type and description error_type = str(sys.last_type) error_type = error_type if error_type else "Error" error_type = error_type.replace("<class '", "").replace("'>","") error_value = sys.last_value if error_value: self.error_desc = f'{error_type} : {str(error_value)}\n' if not self.error_list: self.report({'ERROR'}, 'No filepath and line number found in clipboard') return {"CANCELLED"} return context.window_manager.invoke_props_dialog(self, width=500) def draw(self, context):
if len(item) > 2 and item[2]: boxcol.label(text=item[2]) col.separator() row = layout.row() row.alignment = 'LEFT' row.operator('devtools.clear_last_traceback', text='Clear Traceback', icon='CANCEL') if self.error_desc: for l in self.error_desc.split('\n'): row = col.row() row.alignment = 'LEFT' row.label(text=l) def execute(self, context): if self.path_line: return {"FINISHED"} return {"FINISHED"} def help_error_top_bar(self, context): layout = self.layout if hasattr(sys, 'last_traceback') and sys.last_traceback: region = context.region if region.alignment == 'RIGHT
layout = self.layout col = layout.column() for item in self.error_list: path, line = item[0], item[1] # print(path, ' ', line) goto_line = f'{path}:{line}' box = col.box() boxcol = box.column() boxcol.alignment = 'LEFT' button_row = boxcol.row(align=True) op = button_row.operator('devtools.open_error_file', text=f'{Path(path).name} : {line}', icon='MENU_PANEL') op.path_line = goto_line op.use_external = False op = button_row.operator('devtools.open_error_file', text='', icon='TEXT') op.path_line = goto_line op.use_external = True boxcol.label(text=path) if len(item) > 3 and item[3]: boxcol.label(text=f'in: {item[3]}')
identifier_body
assembler.py
# - Negative immediate values must have a preceeding '-' with no space # - between it and the number. # # Language definition: # # LOAD D A - load from address A to destination D # LOADA D A - load using the address register from address A + RE to destination D # STORE S A - store value in S to address A # STOREA S A - store using the address register the value in S to address A + RE # BRA L - branch to label A # BRAZ L - branch to label A if the CR zero flag is set # BRAN L - branch to label L if the CR negative flag is set # BRAO L - branch to label L if the CR overflow flag is set # BRAC L - branch to label L if the CR carry flag is set # CALL L - call the routine at label L # RETURN - return from a routine # HALT - execute the halt/exit instruction # PUSH S - push source value S to the stack # POP D - pop form the stack and put in destination D # OPORT S - output to the global port from source S # IPORT D - input from the global port to destination D # ADD A B C - execute C <= A + B # SUB A B C - execute C <= A - B # AND A B C - execute C <= A and B bitwise # OR A B C - execute C <= A or B bitwise # XOR A B C - execute C <= A xor B bitwise # SHIFTL A C - execute C <= A shift left by 1 # SHIFTR A C - execute C <= A shift right by 1 # ROTL A C - execute C <= A rotate left by 1 # ROTR A C - execute C <= A rotate right by 1 # MOVE A C - execute C <= A where A is a source register # MOVEI V C - execute C <= value V # # 2-pass assembler # pass 1: read through the instructions and put numbers on each instruction location # calculate the label values # # pass 2: read through the instructions and build the machine instructions # # Mkhanyisi Gamedze # CS232 Project 8 : Assembler # import sys # NO NEED TO CHANGE # converts d to an 8-bit 2-s complement binary value def dec2comp8( d, linenum ): try: if d > 0: l = d.bit_length() v = "00000000" v = v[0:8-l] + format( d, 'b') elif d < 0: dt = 128 + d l = dt.bit_length() v = "10000000" v = v[0:8-l] + format( dt, 'b')[:] else: v = "00000000" except: print 'Invalid decimal number on line %d' % (linenum) exit() return v # DEFAULT NO NEED TO CHANGE # converts d to an 8-bit unsigned binary value def dec2bin8( d, linenum ): if d > 0: l = d.bit_length() v = "00000000" v = v[0:8-l] + format( d, 'b' ) elif d == 0: v = "00000000" else: print 'Invalid address on line %d: value is negative' % (linenum) exit() return v # Tokenizes the input data, discarding white space and comments # returns the tokens as a list of lists, one list for each line. # # The tokenizer also converts each character to lower case. def tokenize( fp ): tokens = [] # start of the file fp.seek(0) lines = fp.readlines() # strip white space and comments from each line for line in lines: ls = line.strip() uls = '' for c in ls: if c != '#': uls = uls + c else: break # skip blank lines if len(uls) == 0: continue # split on white space words = uls.split() newwords = [] for word in words: newwords.append( word.lower() ) tokens.append( newwords ) print "done tokenizing\n" return tokens # reads through the file and returns a dictionary of all location # labels with their line numbers def pass1( tokens ): # figure out what line number corresponds to each symbol # function variables tkns = [] # actual instructions dict = {} index = 0 print "branching addresses" for t in tokens: # check symbol ID that characterizes branch symbol ":" if t[0][-1] == ":" : # capture that token excluding ":" dict[t[0][:-1]]=index print " "+t[0]+" \n" else: tkns.append(t) index+=1 print "*****" # return (tokens, labels dictionary) return tkns , dict # More thorough. Reads through tokens and creates machine assembly code def pass2( tokens, labels ): # Register Symbol tables dictionary (case insensitive, so all lower case) (3 bit representation) table_b ={'ra' : '000', 'rb' : '001', 'rc':'010','rd': '011','re':'100','sp': '101'} table_c={'ra' : '000', 'rb' : '001', 'rc':'010','rd': '011','re':'100','sp': '101','pc':'110','cr':'111'} table_d={'ra' : '000', 'rb' : '001', 'rc':'010','rd': '011','re':'100','sp': '101','pc':'110','ir':'111'} table_e={'ra' : '000', 'rb' : '001', 'rc':'010','rd': '011','re':'100','sp': '101','zeros':'110','ones':'111'} # the machine instructions # setup format ass_txt = "DEPTH=256;\nWIDTH=16;\nADDRESS_RADIX=HEX;\nDATA_RADIX=BIN;\nCONTENT\nBEGIN\n" # begin writing instructions for each token index=0 for token in tokens: ass_txt+="\n"+("%02X" % index)+":" # opcode(mnemonic) specific command if token[0]== 'load': # Load from address A to register D. A is in [0, 255] if token[1] in table_b and int(token[2])<255 and int(token[2])>=0: ass_txt+='0000'+'0'+table_b[token[1]]+dec2bin8(int(token[2]), index) else: exit() # destination source invalid elif token[0]== 'loada': # Load from address [A + RE] to register D. A is in [0, 255] if token[1] in table_b and int(token[2])<255 and int(token[2])>=0: ass_txt+='0000'+'1'+table_b[token[1]]+dec2comp8(int(token[2]), index) else: exit() # destination source invalid elif token[0]=='store': # Store the value in register S to address A. A is in [0,255] if token[1] in table_b and int(token[2])<255 and int(token[2])>=0: ass_txt+='0001'+'0'+table_b[token[1]]+dec2bin8(int(token[2]), index) else: exit() # destination source invalid elif token[0]=='storea': # Store the value in register S to address [A + RE]. A is in [0, 255] if token[1] in table_b and int(token[2])<255 and int(token[2])>=0: ass_txt+='0001'+'1'+table_b[token[1]]+dec2comp8(int(token[2]), index) else: exit() # destination source invalid elif token[0]=='bra': # Unconditional branch to label L if token[1] in table_b and int(token[2])<255 and int(token[2])>=0: ass_txt+='0010'+'0000'+dec2bin8(labels[token[1]], index) else: exit() # destination source invalid elif token[0]==
#
random_line_split
assembler.py
_length() v = "00000000" v = v[0:8-l] + format( d, 'b' ) elif d == 0: v = "00000000" else: print 'Invalid address on line %d: value is negative' % (linenum) exit() return v # Tokenizes the input data, discarding white space and comments # returns the tokens as a list of lists, one list for each line. # # The tokenizer also converts each character to lower case. def tokenize( fp ): tokens = [] # start of the file fp.seek(0) lines = fp.readlines() # strip white space and comments from each line for line in lines: ls = line.strip() uls = '' for c in ls: if c != '#': uls = uls + c else: break # skip blank lines if len(uls) == 0: continue # split on white space words = uls.split() newwords = [] for word in words: newwords.append( word.lower() ) tokens.append( newwords ) print "done tokenizing\n" return tokens # reads through the file and returns a dictionary of all location # labels with their line numbers def pass1( tokens ): # figure out what line number corresponds to each symbol # function variables tkns = [] # actual instructions dict = {} index = 0 print "branching addresses" for t in tokens: # check symbol ID that characterizes branch symbol ":" if t[0][-1] == ":" : # capture that token excluding ":" dict[t[0][:-1]]=index print " "+t[0]+" \n" else: tkns.append(t) index+=1 print "*****" # return (tokens, labels dictionary) return tkns , dict # More thorough. Reads through tokens and creates machine assembly code def pass2( tokens, labels ): # Register Symbol tables dictionary (case insensitive, so all lower case) (3 bit representation) table_b ={'ra' : '000', 'rb' : '001', 'rc':'010','rd': '011','re':'100','sp': '101'} table_c={'ra' : '000', 'rb' : '001', 'rc':'010','rd': '011','re':'100','sp': '101','pc':'110','cr':'111'} table_d={'ra' : '000', 'rb' : '001', 'rc':'010','rd': '011','re':'100','sp': '101','pc':'110','ir':'111'} table_e={'ra' : '000', 'rb' : '001', 'rc':'010','rd': '011','re':'100','sp': '101','zeros':'110','ones':'111'} # the machine instructions # setup format ass_txt = "DEPTH=256;\nWIDTH=16;\nADDRESS_RADIX=HEX;\nDATA_RADIX=BIN;\nCONTENT\nBEGIN\n" # begin writing instructions for each token index=0 for token in tokens: ass_txt+="\n"+("%02X" % index)+":" # opcode(mnemonic) specific command if token[0]== 'load': # Load from address A to register D. A is in [0, 255] if token[1] in table_b and int(token[2])<255 and int(token[2])>=0: ass_txt+='0000'+'0'+table_b[token[1]]+dec2bin8(int(token[2]), index) else: exit() # destination source invalid elif token[0]== 'loada': # Load from address [A + RE] to register D. A is in [0, 255] if token[1] in table_b and int(token[2])<255 and int(token[2])>=0: ass_txt+='0000'+'1'+table_b[token[1]]+dec2comp8(int(token[2]), index) else: exit() # destination source invalid elif token[0]=='store': # Store the value in register S to address A. A is in [0,255] if token[1] in table_b and int(token[2])<255 and int(token[2])>=0: ass_txt+='0001'+'0'+table_b[token[1]]+dec2bin8(int(token[2]), index) else: exit() # destination source invalid elif token[0]=='storea': # Store the value in register S to address [A + RE]. A is in [0, 255] if token[1] in table_b and int(token[2])<255 and int(token[2])>=0: ass_txt+='0001'+'1'+table_b[token[1]]+dec2comp8(int(token[2]), index) else: exit() # destination source invalid elif token[0]=='bra': # Unconditional branch to label L if token[1] in table_b and int(token[2])<255 and int(token[2])>=0: ass_txt+='0010'+'0000'+dec2bin8(labels[token[1]], index) else: exit() # destination source invalid elif token[0]== 'braz': # Branch to label L if the CR zero flag is set # validate label L if token[1] in labels: ass_txt+='0011'+'0000'+dec2bin8(labels[token[1]], index) else: # branch destination not valid exit() elif token[0]== 'bran': # Branch to label L if the CR negative flag is set # validate label L if token[1] in labels: ass_txt+='0011'+'0010'+dec2bin8(labels[token[1]], index) else: # branch destination not valid exit() elif token[0]=='brao': # Branch to label L if the CR overflow flag is set # validate label L if token[1] in labels: ass_txt+='0011'+'0001'+dec2bin8(labels[token[1]], index) else: # branch destination not valid exit() elif token[0]=='brac': # Branch to label L if the CR carry flag is set # validate label L if token[1] in labels: ass_txt+='0011'+'0011'+dec2bin8(labels[token[1]], index) else: # branch destination not valid exit() elif token[0]=='call': # Call the routine at label L # validate label L if token[1] in labels: ass_txt+='0011'+'01'+'00'+dec2bin8(labels[token[1]], index) else: # branch destination not valid exit() elif token[0]=='return': # return from a routine ass_txt+='0011100000000000' elif token[0]=='exit' or token[0]=='halt': # execute the halt/exit instruction ass_txt+='0011110000000000' elif token[0]=='push': # Push register S onto the stack and increment SP if token[1] in table_c: ass_txt+='0100'+table_c[token[1]]+'000000000' else: exit() elif token[0]=='pop': # Decrement SP and put the top value on the stack into register S if token[1] in table_c: ass_txt+='0101'+table_c[token[1]]+'000000000' else: exit() elif token[0]=='oport': # Send register S to the output port if token[1] in table_d: ass_txt+='0110'+table_d[token[1]]+'000000000' else: exit() elif token[0]=='iport': # Assign to register D the value of the input port if token[1] in table_b:
ass_txt+='0111'+table_d[token[1]]+'0000000000'
conditional_block
assembler.py
instructions # # Mkhanyisi Gamedze # CS232 Project 8 : Assembler # import sys # NO NEED TO CHANGE # converts d to an 8-bit 2-s complement binary value def dec2comp8( d, linenum ): try: if d > 0: l = d.bit_length() v = "00000000" v = v[0:8-l] + format( d, 'b') elif d < 0: dt = 128 + d l = dt.bit_length() v = "10000000" v = v[0:8-l] + format( dt, 'b')[:] else: v = "00000000" except: print 'Invalid decimal number on line %d' % (linenum) exit() return v # DEFAULT NO NEED TO CHANGE # converts d to an 8-bit unsigned binary value def dec2bin8( d, linenum ): if d > 0: l = d.bit_length() v = "00000000" v = v[0:8-l] + format( d, 'b' ) elif d == 0: v = "00000000" else: print 'Invalid address on line %d: value is negative' % (linenum) exit() return v # Tokenizes the input data, discarding white space and comments # returns the tokens as a list of lists, one list for each line. # # The tokenizer also converts each character to lower case. def tokenize( fp ):
# split on white space words = uls.split() newwords = [] for word in words: newwords.append( word.lower() ) tokens.append( newwords ) print "done tokenizing\n" return tokens # reads through the file and returns a dictionary of all location # labels with their line numbers def pass1( tokens ): # figure out what line number corresponds to each symbol # function variables tkns = [] # actual instructions dict = {} index = 0 print "branching addresses" for t in tokens: # check symbol ID that characterizes branch symbol ":" if t[0][-1] == ":" : # capture that token excluding ":" dict[t[0][:-1]]=index print " "+t[0]+" \n" else: tkns.append(t) index+=1 print "*****" # return (tokens, labels dictionary) return tkns , dict # More thorough. Reads through tokens and creates machine assembly code def pass2( tokens, labels ): # Register Symbol tables dictionary (case insensitive, so all lower case) (3 bit representation) table_b ={'ra' : '000', 'rb' : '001', 'rc':'010','rd': '011','re':'100','sp': '101'} table_c={'ra' : '000', 'rb' : '001', 'rc':'010','rd': '011','re':'100','sp': '101','pc':'110','cr':'111'} table_d={'ra' : '000', 'rb' : '001', 'rc':'010','rd': '011','re':'100','sp': '101','pc':'110','ir':'111'} table_e={'ra' : '000', 'rb' : '001', 'rc':'010','rd': '011','re':'100','sp': '101','zeros':'110','ones':'111'} # the machine instructions # setup format ass_txt = "DEPTH=256;\nWIDTH=16;\nADDRESS_RADIX=HEX;\nDATA_RADIX=BIN;\nCONTENT\nBEGIN\n" # begin writing instructions for each token index=0 for token in tokens: ass_txt+="\n"+("%02X" % index)+":" # opcode(mnemonic) specific command if token[0]== 'load': # Load from address A to register D. A is in [0, 255] if token[1] in table_b and int(token[2])<255 and int(token[2])>=0: ass_txt+='0000'+'0'+table_b[token[1]]+dec2bin8(int(token[2]), index) else: exit() # destination source invalid elif token[0]== 'loada': # Load from address [A + RE] to register D. A is in [0, 255] if token[1] in table_b and int(token[2])<255 and int(token[2])>=0: ass_txt+='0000'+'1'+table_b[token[1]]+dec2comp8(int(token[2]), index) else: exit() # destination source invalid elif token[0]=='store': # Store the value in register S to address A. A is in [0,255] if token[1] in table_b and int(token[2])<255 and int(token[2])>=0: ass_txt+='0001'+'0'+table_b[token[1]]+dec2bin8(int(token[2]), index) else: exit() # destination source invalid elif token[0]=='storea': # Store the value in register S to address [A + RE]. A is in [0, 255] if token[1] in table_b and int(token[2])<255 and int(token[2])>=0: ass_txt+='0001'+'1'+table_b[token[1]]+dec2comp8(int(token[2]), index) else: exit() # destination source invalid elif token[0]=='bra': # Unconditional branch to label L if token[1] in table_b and int(token[2])<255 and int(token[2])>=0: ass_txt+='0010'+'0000'+dec2bin8(labels[token[1]], index) else: exit() # destination source invalid elif token[0]== 'braz': # Branch to label L if the CR zero flag is set # validate label L if token[1] in labels: ass_txt+='0011'+'0000'+dec2bin8(labels[token[1]], index) else: # branch destination not valid exit() elif token[0]== 'bran': # Branch to label L if the CR negative flag is set # validate label L if token[1] in labels: ass_txt+='0011'+'0010'+dec2bin8(labels[token[1]], index) else: # branch destination not valid exit() elif token[0]=='brao': # Branch to label L if the CR overflow flag is set # validate label L if token[1] in labels: ass_txt+='0011'+'0001'+dec2bin8(labels[token[1]], index) else: # branch destination not valid exit() elif token[0]=='brac': # Branch to label L if the CR carry flag is set # validate label L if token[1] in labels: ass_txt+='0011'+'0011'+dec2bin8(labels[token[1]], index) else: # branch destination not valid exit() elif token[0]=='call': # Call the routine at label L # validate label L if token[1] in labels: ass_txt+='0011'+'01'+'00'+dec2bin8(labels[token[1]], index) else: # branch destination not valid exit() elif token[0]=='return': # return from a routine ass_txt+='0011100000000000' elif token[0]=='exit' or token[0]=='halt': # execute the halt/exit instruction ass_txt+='0011110000000000' elif token[0]=='push': #
tokens = [] # start of the file fp.seek(0) lines = fp.readlines() # strip white space and comments from each line for line in lines: ls = line.strip() uls = '' for c in ls: if c != '#': uls = uls + c else: break # skip blank lines if len(uls) == 0: continue
identifier_body
assembler.py
instructions # # Mkhanyisi Gamedze # CS232 Project 8 : Assembler # import sys # NO NEED TO CHANGE # converts d to an 8-bit 2-s complement binary value def dec2comp8( d, linenum ): try: if d > 0: l = d.bit_length() v = "00000000" v = v[0:8-l] + format( d, 'b') elif d < 0: dt = 128 + d l = dt.bit_length() v = "10000000" v = v[0:8-l] + format( dt, 'b')[:] else: v = "00000000" except: print 'Invalid decimal number on line %d' % (linenum) exit() return v # DEFAULT NO NEED TO CHANGE # converts d to an 8-bit unsigned binary value def
( d, linenum ): if d > 0: l = d.bit_length() v = "00000000" v = v[0:8-l] + format( d, 'b' ) elif d == 0: v = "00000000" else: print 'Invalid address on line %d: value is negative' % (linenum) exit() return v # Tokenizes the input data, discarding white space and comments # returns the tokens as a list of lists, one list for each line. # # The tokenizer also converts each character to lower case. def tokenize( fp ): tokens = [] # start of the file fp.seek(0) lines = fp.readlines() # strip white space and comments from each line for line in lines: ls = line.strip() uls = '' for c in ls: if c != '#': uls = uls + c else: break # skip blank lines if len(uls) == 0: continue # split on white space words = uls.split() newwords = [] for word in words: newwords.append( word.lower() ) tokens.append( newwords ) print "done tokenizing\n" return tokens # reads through the file and returns a dictionary of all location # labels with their line numbers def pass1( tokens ): # figure out what line number corresponds to each symbol # function variables tkns = [] # actual instructions dict = {} index = 0 print "branching addresses" for t in tokens: # check symbol ID that characterizes branch symbol ":" if t[0][-1] == ":" : # capture that token excluding ":" dict[t[0][:-1]]=index print " "+t[0]+" \n" else: tkns.append(t) index+=1 print "*****" # return (tokens, labels dictionary) return tkns , dict # More thorough. Reads through tokens and creates machine assembly code def pass2( tokens, labels ): # Register Symbol tables dictionary (case insensitive, so all lower case) (3 bit representation) table_b ={'ra' : '000', 'rb' : '001', 'rc':'010','rd': '011','re':'100','sp': '101'} table_c={'ra' : '000', 'rb' : '001', 'rc':'010','rd': '011','re':'100','sp': '101','pc':'110','cr':'111'} table_d={'ra' : '000', 'rb' : '001', 'rc':'010','rd': '011','re':'100','sp': '101','pc':'110','ir':'111'} table_e={'ra' : '000', 'rb' : '001', 'rc':'010','rd': '011','re':'100','sp': '101','zeros':'110','ones':'111'} # the machine instructions # setup format ass_txt = "DEPTH=256;\nWIDTH=16;\nADDRESS_RADIX=HEX;\nDATA_RADIX=BIN;\nCONTENT\nBEGIN\n" # begin writing instructions for each token index=0 for token in tokens: ass_txt+="\n"+("%02X" % index)+":" # opcode(mnemonic) specific command if token[0]== 'load': # Load from address A to register D. A is in [0, 255] if token[1] in table_b and int(token[2])<255 and int(token[2])>=0: ass_txt+='0000'+'0'+table_b[token[1]]+dec2bin8(int(token[2]), index) else: exit() # destination source invalid elif token[0]== 'loada': # Load from address [A + RE] to register D. A is in [0, 255] if token[1] in table_b and int(token[2])<255 and int(token[2])>=0: ass_txt+='0000'+'1'+table_b[token[1]]+dec2comp8(int(token[2]), index) else: exit() # destination source invalid elif token[0]=='store': # Store the value in register S to address A. A is in [0,255] if token[1] in table_b and int(token[2])<255 and int(token[2])>=0: ass_txt+='0001'+'0'+table_b[token[1]]+dec2bin8(int(token[2]), index) else: exit() # destination source invalid elif token[0]=='storea': # Store the value in register S to address [A + RE]. A is in [0, 255] if token[1] in table_b and int(token[2])<255 and int(token[2])>=0: ass_txt+='0001'+'1'+table_b[token[1]]+dec2comp8(int(token[2]), index) else: exit() # destination source invalid elif token[0]=='bra': # Unconditional branch to label L if token[1] in table_b and int(token[2])<255 and int(token[2])>=0: ass_txt+='0010'+'0000'+dec2bin8(labels[token[1]], index) else: exit() # destination source invalid elif token[0]== 'braz': # Branch to label L if the CR zero flag is set # validate label L if token[1] in labels: ass_txt+='0011'+'0000'+dec2bin8(labels[token[1]], index) else: # branch destination not valid exit() elif token[0]== 'bran': # Branch to label L if the CR negative flag is set # validate label L if token[1] in labels: ass_txt+='0011'+'0010'+dec2bin8(labels[token[1]], index) else: # branch destination not valid exit() elif token[0]=='brao': # Branch to label L if the CR overflow flag is set # validate label L if token[1] in labels: ass_txt+='0011'+'0001'+dec2bin8(labels[token[1]], index) else: # branch destination not valid exit() elif token[0]=='brac': # Branch to label L if the CR carry flag is set # validate label L if token[1] in labels: ass_txt+='0011'+'0011'+dec2bin8(labels[token[1]], index) else: # branch destination not valid exit() elif token[0]=='call': # Call the routine at label L # validate label L if token[1] in labels: ass_txt+='0011'+'01'+'00'+dec2bin8(labels[token[1]], index) else: # branch destination not valid exit() elif token[0]=='return': # return from a routine ass_txt+='0011100000000000' elif token[0]=='exit' or token[0]=='halt': # execute the halt/exit instruction ass_txt+='0011110000000000' elif token[0]=='push':
dec2bin8
identifier_name
agreement.rs
use untrusted; //! //! let rng = rand::SystemRandom::new(); //! //! let my_private_key = //! agreement::PrivateKey::<agreement::Ephemeral>::generate(&agreement::X25519, &rng)?; //! //! // Make `my_public_key` a byte slice containing my public key. In a real //! // application, this would be sent to the peer in an encoded protocol //! // message. //! let my_public_key = my_private_key.compute_public_key()?; //! let my_public_key = my_public_key.as_ref(); //! //! // In a real application, the peer public key would be parsed out of a //! // protocol message. Here we just generate one. //! let mut peer_public_key_buf = [0u8; agreement::PUBLIC_KEY_MAX_LEN]; //! let peer_public_key = { //! let peer_private_key = //! agreement::PrivateKey::<agreement::Ephemeral>::generate(&agreement::X25519, &rng)?; //! peer_private_key.compute_public_key()? //! }; //! let peer_public_key = untrusted::Input::from(peer_public_key.as_ref()); //! //! // In a real application, the protocol specifies how to determine what //! // algorithm was used to generate the peer's private key. Here, we know it //! // is X25519 since we just generated it. //! let peer_public_key_alg = &agreement::X25519; //! //! let input_keying_material = my_private_key.agree(peer_public_key_alg, peer_public_key)?; //! input_keying_material.derive(|_key_material| { //! // In a real application, we'd apply a KDF to the key material and the //! // public keys (as recommended in RFC 7748) and then derive session //! // keys from the result. We omit all that here. //! Ok(()) //! }) //! # } //! # fn main() { x25519_agreement_example().unwrap() } //! ``` // The "NSA Guide" steps here are from from section 3.1, "Ephemeral Unified // Model." use crate::{ec, error, rand}; use untrusted; pub use crate::ec::{ curve25519::x25519::X25519, suite_b::ecdh::{ECDH_P256, ECDH_P384}, PUBLIC_KEY_MAX_LEN, }; use core::marker::PhantomData; /// A key agreement algorithm. pub struct Algorithm { pub(crate) curve: &'static ec::Curve, pub(crate) ecdh: fn( out: &mut [u8], private_key: &ec::PrivateKey, peer_public_key: untrusted::Input, ) -> Result<(), error::Unspecified>, } derive_debug_via_self!(Algorithm, self.curve); impl Eq for Algorithm {} impl PartialEq for Algorithm { fn eq(&self, other: &Algorithm) -> bool { self.curve.id == other.curve.id } } /// How many times the key may be used. pub trait Lifetime: self::sealed::Sealed {} /// The key may be used at most once. pub struct Ephemeral {} impl Lifetime for Ephemeral {} impl self::sealed::Sealed for Ephemeral {} /// The key may be used more than once. pub struct Static {} impl Lifetime for Static {} impl self::sealed::Sealed for Static {} /// A key pair for key agreement. pub struct KeyPair<U: Lifetime> { private_key: PrivateKey<U>, public_key: PublicKey, } impl<U: Lifetime> KeyPair<U> { /// Generate a new key pair for the given algorithm. /// /// C analog: `EC_KEY_new_by_curve_name` + `EC_KEY_generate_key`. pub fn generate( alg: &'static Algorithm, rng: &rand::SecureRandom, ) -> Result<Self, error::Unspecified>
/// The private key. pub fn private_key(&self) -> &PrivateKey<U> { &self.private_key } /// The public key. pub fn public_key(&self) -> &PublicKey { &self.public_key } /// Split the key pair apart. pub fn split(self) -> (PrivateKey<U>, PublicKey) { (self.private_key, self.public_key) } } /// A public key for key agreement. pub struct PublicKey { bytes: [u8; PUBLIC_KEY_MAX_LEN], alg: &'static Algorithm, } impl AsRef<[u8]> for PublicKey { #[inline] fn as_ref(&self) -> &[u8] { &self.bytes[..self.alg.curve.public_key_len] } } /// A private key for key agreement. pub struct PrivateKey<U: Lifetime> { private_key: ec::PrivateKey, alg: &'static Algorithm, usage: PhantomData<U>, } impl<U: Lifetime> PrivateKey<U> { /// Generate a new private key for the given algorithm. /// /// C analog: `EC_KEY_new_by_curve_name` + `EC_KEY_generate_key`. pub fn generate( alg: &'static Algorithm, rng: &rand::SecureRandom, ) -> Result<Self, error::Unspecified> { // NSA Guide Step 1. // // This only handles the key generation part of step 1. The rest of // step one is done by `compute_public_key()`. let private_key = ec::PrivateKey::generate(&alg.curve, rng)?; Ok(Self { private_key, alg, usage: PhantomData, }) } /// The key exchange algorithm. #[inline] pub fn algorithm(&self) -> &'static Algorithm { self.alg } /// Computes the public key from the private key's value and fills `out` /// with the public point encoded in the standard form for the algorithm. /// /// `out.len()` must be equal to the value returned by `public_key_len`. #[inline(always)] pub fn compute_public_key(&self) -> Result<PublicKey, error::Unspecified> { // NSA Guide Step 1. // // Obviously, this only handles the part of Step 1 between the private // key generation and the sending of the public key to the peer. `out` // is what shouPrivateKeyld be sent to the peer. let mut public_key = PublicKey { bytes: [0; PUBLIC_KEY_MAX_LEN], alg: self.alg, }; self.private_key .compute_public_key(&self.alg.curve, &mut public_key.bytes)?; Ok(public_key) } /// Performs a key agreement with an private key and the given public key. /// /// Since `self` is consumed, it will not be usable after calling `agree`. /// /// `peer_public_key_alg` is the algorithm/curve for the peer's public key /// point; `agree` will return `Err(error_value)` if it does not match this /// private key's algorithm/curve. /// /// `peer_public_key` is the peer's public key. `agree` verifies that it is /// encoded in the standard form for the algorithm and that the key is /// *valid*; see the algorithm's documentation for details on how keys are /// to be encoded and what constitutes a valid key for that algorithm. /// /// C analogs: `EC_POINT_oct2point` + `ECDH_compute_key`, `X25519`. pub fn agree( self, peer_public_key_alg: &Algorithm, peer_public_key: untrusted::Input, ) -> Result<InputKeyMaterial, error::Unspecified> { agree_( &self.private_key, self.alg, peer_public_key_alg, peer_public_key, ) } #[cfg(test)] pub(crate) fn bytes(&self, curve: &ec::Curve) -> &[u8] { self.private_key.bytes(curve) } } impl PrivateKey<Static> { /// Performs a key agreement with a static private key and the given /// public key. /// /// `peer_public_key_alg` is the algorithm/curve for the peer's public key /// point; `agree_static` will return `Err(error_value)` if it does not /// match `my_private_key's` algorithm/curve. /// /// `peer_public_key` is the peer's public key. `agree_static` verifies /// that it is encoded in the standard form for the algorithm and that /// the key is *valid*; see the algorithm's documentation for details on /// how keys are to be encoded and what constitutes a valid key for that /// algorithm. /// /// C analogs: `EC_POINT_oct2point` + `ECDH_compute_key`, `X25519`. pub fn agree_static( &self, peer_public_key_alg: &Algorithm, peer_public_key: untrusted::Input, ) -> Result<InputKeyMaterial, error::Unspecified> { agree_( &self
{ // NSA Guide Step 1. let private_key = ec::PrivateKey::generate(&alg.curve, rng)?; let mut public_key = PublicKey { bytes: [0; PUBLIC_KEY_MAX_LEN], alg, }; private_key.compute_public_key(&alg.curve, &mut public_key.bytes)?; Ok(Self { private_key: PrivateKey { private_key, alg, usage: PhantomData, }, public_key, }) }
identifier_body
agreement.rs
algorithm. pub struct Algorithm { pub(crate) curve: &'static ec::Curve, pub(crate) ecdh: fn( out: &mut [u8], private_key: &ec::PrivateKey, peer_public_key: untrusted::Input, ) -> Result<(), error::Unspecified>, } derive_debug_via_self!(Algorithm, self.curve); impl Eq for Algorithm {} impl PartialEq for Algorithm { fn eq(&self, other: &Algorithm) -> bool { self.curve.id == other.curve.id } } /// How many times the key may be used. pub trait Lifetime: self::sealed::Sealed {} /// The key may be used at most once. pub struct Ephemeral {} impl Lifetime for Ephemeral {} impl self::sealed::Sealed for Ephemeral {} /// The key may be used more than once. pub struct Static {} impl Lifetime for Static {} impl self::sealed::Sealed for Static {} /// A key pair for key agreement. pub struct KeyPair<U: Lifetime> { private_key: PrivateKey<U>, public_key: PublicKey, } impl<U: Lifetime> KeyPair<U> { /// Generate a new key pair for the given algorithm. /// /// C analog: `EC_KEY_new_by_curve_name` + `EC_KEY_generate_key`. pub fn generate( alg: &'static Algorithm, rng: &rand::SecureRandom, ) -> Result<Self, error::Unspecified> { // NSA Guide Step 1. let private_key = ec::PrivateKey::generate(&alg.curve, rng)?; let mut public_key = PublicKey { bytes: [0; PUBLIC_KEY_MAX_LEN], alg, }; private_key.compute_public_key(&alg.curve, &mut public_key.bytes)?; Ok(Self { private_key: PrivateKey { private_key, alg, usage: PhantomData, }, public_key, }) } /// The private key. pub fn private_key(&self) -> &PrivateKey<U> { &self.private_key } /// The public key. pub fn public_key(&self) -> &PublicKey { &self.public_key } /// Split the key pair apart. pub fn split(self) -> (PrivateKey<U>, PublicKey) { (self.private_key, self.public_key) } } /// A public key for key agreement. pub struct PublicKey { bytes: [u8; PUBLIC_KEY_MAX_LEN], alg: &'static Algorithm, } impl AsRef<[u8]> for PublicKey { #[inline] fn as_ref(&self) -> &[u8] { &self.bytes[..self.alg.curve.public_key_len] } } /// A private key for key agreement. pub struct PrivateKey<U: Lifetime> { private_key: ec::PrivateKey, alg: &'static Algorithm, usage: PhantomData<U>, } impl<U: Lifetime> PrivateKey<U> { /// Generate a new private key for the given algorithm. /// /// C analog: `EC_KEY_new_by_curve_name` + `EC_KEY_generate_key`. pub fn generate( alg: &'static Algorithm, rng: &rand::SecureRandom, ) -> Result<Self, error::Unspecified> { // NSA Guide Step 1. // // This only handles the key generation part of step 1. The rest of // step one is done by `compute_public_key()`. let private_key = ec::PrivateKey::generate(&alg.curve, rng)?; Ok(Self { private_key, alg, usage: PhantomData, }) } /// The key exchange algorithm. #[inline] pub fn algorithm(&self) -> &'static Algorithm { self.alg } /// Computes the public key from the private key's value and fills `out` /// with the public point encoded in the standard form for the algorithm. /// /// `out.len()` must be equal to the value returned by `public_key_len`. #[inline(always)] pub fn compute_public_key(&self) -> Result<PublicKey, error::Unspecified> { // NSA Guide Step 1. // // Obviously, this only handles the part of Step 1 between the private // key generation and the sending of the public key to the peer. `out` // is what shouPrivateKeyld be sent to the peer. let mut public_key = PublicKey { bytes: [0; PUBLIC_KEY_MAX_LEN], alg: self.alg, }; self.private_key .compute_public_key(&self.alg.curve, &mut public_key.bytes)?; Ok(public_key) } /// Performs a key agreement with an private key and the given public key. /// /// Since `self` is consumed, it will not be usable after calling `agree`. /// /// `peer_public_key_alg` is the algorithm/curve for the peer's public key /// point; `agree` will return `Err(error_value)` if it does not match this /// private key's algorithm/curve. /// /// `peer_public_key` is the peer's public key. `agree` verifies that it is /// encoded in the standard form for the algorithm and that the key is /// *valid*; see the algorithm's documentation for details on how keys are /// to be encoded and what constitutes a valid key for that algorithm. /// /// C analogs: `EC_POINT_oct2point` + `ECDH_compute_key`, `X25519`. pub fn agree( self, peer_public_key_alg: &Algorithm, peer_public_key: untrusted::Input, ) -> Result<InputKeyMaterial, error::Unspecified> { agree_( &self.private_key, self.alg, peer_public_key_alg, peer_public_key, ) } #[cfg(test)] pub(crate) fn bytes(&self, curve: &ec::Curve) -> &[u8] { self.private_key.bytes(curve) } } impl PrivateKey<Static> { /// Performs a key agreement with a static private key and the given /// public key. /// /// `peer_public_key_alg` is the algorithm/curve for the peer's public key /// point; `agree_static` will return `Err(error_value)` if it does not /// match `my_private_key's` algorithm/curve. /// /// `peer_public_key` is the peer's public key. `agree_static` verifies /// that it is encoded in the standard form for the algorithm and that /// the key is *valid*; see the algorithm's documentation for details on /// how keys are to be encoded and what constitutes a valid key for that /// algorithm. /// /// C analogs: `EC_POINT_oct2point` + `ECDH_compute_key`, `X25519`. pub fn agree_static( &self, peer_public_key_alg: &Algorithm, peer_public_key: untrusted::Input, ) -> Result<InputKeyMaterial, error::Unspecified> { agree_( &self.private_key, self.alg, peer_public_key_alg, peer_public_key, ) } pub fn from_bytes( alg: &'static Algorithm, bytes: untrusted::Input ) -> Result<Self, error::Unspecified> { let private_key = ec::PrivateKey::from_bytes(&alg.curve, bytes)?; Ok(Self { private_key, alg, usage: PhantomData, }) } pub fn bytes( &self, alg: &'static Algorithm ) -> &[u8] { self.private_key.bytes(&alg.curve) } } fn agree_( my_private_key: &ec::PrivateKey, my_alg: &Algorithm, peer_public_key_alg: &Algorithm, peer_public_key: untrusted::Input, ) -> Result<InputKeyMaterial, error::Unspecified> { let alg = &my_alg; // NSA Guide Prerequisite 1. // // The domain parameters are hard-coded. This check verifies that the // peer's public key's domain parameters match the domain parameters of // this private key. if peer_public_key_alg != *alg { return Err(error::Unspecified); } // NSA Guide Prerequisite 2, regarding which KDFs are allowed, is delegated // to the caller. // NSA Guide Prerequisite 3, "Prior to or during the key-agreement process, // each party shall obtain the identifier associated with the other party // during the key-agreement scheme," is delegated to the caller. // NSA Guide Step 1 is handled by `Self::generate()` and // `Self::compute_public_key()`. // NSA Guide Steps 2, 3, and 4. // // We have a pretty liberal interpretation of the NIST's spec's "Destroy" // that doesn't meet the NSA requirement to "zeroize." let mut ikm = InputKeyMaterial { bytes: [0; ec::ELEM_MAX_BYTES], len: alg.curve.elem_and_scalar_len, }; (alg.ecdh)(&mut ikm.bytes[..ikm.len], my_private_key, peer_public_key)?; // NSA Guide Steps 5 and 6 are deferred to `InputKeyMaterial::derive`. Ok(ikm) } /// The result of a key agreement operation, to be fed into a KDF. /// /// Intentionally not `Clone` or `Copy` since the value should only be /// used once. #[must_use] pub struct
InputKeyMaterial
identifier_name
agreement.rs
use untrusted; //! //! let rng = rand::SystemRandom::new(); //! //! let my_private_key = //! agreement::PrivateKey::<agreement::Ephemeral>::generate(&agreement::X25519, &rng)?; //! //! // Make `my_public_key` a byte slice containing my public key. In a real //! // application, this would be sent to the peer in an encoded protocol //! // message. //! let my_public_key = my_private_key.compute_public_key()?; //! let my_public_key = my_public_key.as_ref(); //! //! // In a real application, the peer public key would be parsed out of a //! // protocol message. Here we just generate one. //! let mut peer_public_key_buf = [0u8; agreement::PUBLIC_KEY_MAX_LEN]; //! let peer_public_key = { //! let peer_private_key = //! agreement::PrivateKey::<agreement::Ephemeral>::generate(&agreement::X25519, &rng)?; //! peer_private_key.compute_public_key()? //! }; //! let peer_public_key = untrusted::Input::from(peer_public_key.as_ref()); //! //! // In a real application, the protocol specifies how to determine what //! // algorithm was used to generate the peer's private key. Here, we know it //! // is X25519 since we just generated it. //! let peer_public_key_alg = &agreement::X25519; //! //! let input_keying_material = my_private_key.agree(peer_public_key_alg, peer_public_key)?; //! input_keying_material.derive(|_key_material| { //! // In a real application, we'd apply a KDF to the key material and the //! // public keys (as recommended in RFC 7748) and then derive session //! // keys from the result. We omit all that here. //! Ok(()) //! }) //! # } //! # fn main() { x25519_agreement_example().unwrap() } //! ``` // The "NSA Guide" steps here are from from section 3.1, "Ephemeral Unified // Model." use crate::{ec, error, rand}; use untrusted; pub use crate::ec::{ curve25519::x25519::X25519, suite_b::ecdh::{ECDH_P256, ECDH_P384}, PUBLIC_KEY_MAX_LEN, }; use core::marker::PhantomData; /// A key agreement algorithm. pub struct Algorithm { pub(crate) curve: &'static ec::Curve, pub(crate) ecdh: fn( out: &mut [u8], private_key: &ec::PrivateKey, peer_public_key: untrusted::Input, ) -> Result<(), error::Unspecified>, } derive_debug_via_self!(Algorithm, self.curve); impl Eq for Algorithm {} impl PartialEq for Algorithm { fn eq(&self, other: &Algorithm) -> bool { self.curve.id == other.curve.id } } /// How many times the key may be used. pub trait Lifetime: self::sealed::Sealed {} /// The key may be used at most once. pub struct Ephemeral {} impl Lifetime for Ephemeral {} impl self::sealed::Sealed for Ephemeral {} /// The key may be used more than once. pub struct Static {} impl Lifetime for Static {} impl self::sealed::Sealed for Static {} /// A key pair for key agreement. pub struct KeyPair<U: Lifetime> { private_key: PrivateKey<U>, public_key: PublicKey, } impl<U: Lifetime> KeyPair<U> { /// Generate a new key pair for the given algorithm. /// /// C analog: `EC_KEY_new_by_curve_name` + `EC_KEY_generate_key`. pub fn generate( alg: &'static Algorithm, rng: &rand::SecureRandom, ) -> Result<Self, error::Unspecified> { // NSA Guide Step 1. let private_key = ec::PrivateKey::generate(&alg.curve, rng)?; let mut public_key = PublicKey { bytes: [0; PUBLIC_KEY_MAX_LEN], alg, }; private_key.compute_public_key(&alg.curve, &mut public_key.bytes)?; Ok(Self { private_key: PrivateKey { private_key, alg, usage: PhantomData, }, public_key, }) } /// The private key. pub fn private_key(&self) -> &PrivateKey<U> { &self.private_key } /// The public key. pub fn public_key(&self) -> &PublicKey { &self.public_key } /// Split the key pair apart. pub fn split(self) -> (PrivateKey<U>, PublicKey) { (self.private_key, self.public_key) } } /// A public key for key agreement. pub struct PublicKey { bytes: [u8; PUBLIC_KEY_MAX_LEN], alg: &'static Algorithm, } impl AsRef<[u8]> for PublicKey { #[inline] fn as_ref(&self) -> &[u8] { &self.bytes[..self.alg.curve.public_key_len] } } /// A private key for key agreement. pub struct PrivateKey<U: Lifetime> { private_key: ec::PrivateKey, alg: &'static Algorithm, usage: PhantomData<U>, } impl<U: Lifetime> PrivateKey<U> { /// Generate a new private key for the given algorithm. /// /// C analog: `EC_KEY_new_by_curve_name` + `EC_KEY_generate_key`. pub fn generate( alg: &'static Algorithm, rng: &rand::SecureRandom, ) -> Result<Self, error::Unspecified> { // NSA Guide Step 1. // // This only handles the key generation part of step 1. The rest of // step one is done by `compute_public_key()`. let private_key = ec::PrivateKey::generate(&alg.curve, rng)?; Ok(Self { private_key, alg, usage: PhantomData, }) } /// The key exchange algorithm. #[inline] pub fn algorithm(&self) -> &'static Algorithm { self.alg } /// Computes the public key from the private key's value and fills `out` /// with the public point encoded in the standard form for the algorithm. /// /// `out.len()` must be equal to the value returned by `public_key_len`. #[inline(always)] pub fn compute_public_key(&self) -> Result<PublicKey, error::Unspecified> { // NSA Guide Step 1. // // Obviously, this only handles the part of Step 1 between the private // key generation and the sending of the public key to the peer. `out` // is what shouPrivateKeyld be sent to the peer. let mut public_key = PublicKey { bytes: [0; PUBLIC_KEY_MAX_LEN],
}; self.private_key .compute_public_key(&self.alg.curve, &mut public_key.bytes)?; Ok(public_key) } /// Performs a key agreement with an private key and the given public key. /// /// Since `self` is consumed, it will not be usable after calling `agree`. /// /// `peer_public_key_alg` is the algorithm/curve for the peer's public key /// point; `agree` will return `Err(error_value)` if it does not match this /// private key's algorithm/curve. /// /// `peer_public_key` is the peer's public key. `agree` verifies that it is /// encoded in the standard form for the algorithm and that the key is /// *valid*; see the algorithm's documentation for details on how keys are /// to be encoded and what constitutes a valid key for that algorithm. /// /// C analogs: `EC_POINT_oct2point` + `ECDH_compute_key`, `X25519`. pub fn agree( self, peer_public_key_alg: &Algorithm, peer_public_key: untrusted::Input, ) -> Result<InputKeyMaterial, error::Unspecified> { agree_( &self.private_key, self.alg, peer_public_key_alg, peer_public_key, ) } #[cfg(test)] pub(crate) fn bytes(&self, curve: &ec::Curve) -> &[u8] { self.private_key.bytes(curve) } } impl PrivateKey<Static> { /// Performs a key agreement with a static private key and the given /// public key. /// /// `peer_public_key_alg` is the algorithm/curve for the peer's public key /// point; `agree_static` will return `Err(error_value)` if it does not /// match `my_private_key's` algorithm/curve. /// /// `peer_public_key` is the peer's public key. `agree_static` verifies /// that it is encoded in the standard form for the algorithm and that /// the key is *valid*; see the algorithm's documentation for details on /// how keys are to be encoded and what constitutes a valid key for that /// algorithm. /// /// C analogs: `EC_POINT_oct2point` + `ECDH_compute_key`, `X25519`. pub fn agree_static( &self, peer_public_key_alg: &Algorithm, peer_public_key: untrusted::Input, ) -> Result<InputKeyMaterial, error::Unspecified> { agree_( &self.private_key
alg: self.alg,
random_line_split
agreement.rs
what //! // algorithm was used to generate the peer's private key. Here, we know it //! // is X25519 since we just generated it. //! let peer_public_key_alg = &agreement::X25519; //! //! let input_keying_material = my_private_key.agree(peer_public_key_alg, peer_public_key)?; //! input_keying_material.derive(|_key_material| { //! // In a real application, we'd apply a KDF to the key material and the //! // public keys (as recommended in RFC 7748) and then derive session //! // keys from the result. We omit all that here. //! Ok(()) //! }) //! # } //! # fn main() { x25519_agreement_example().unwrap() } //! ``` // The "NSA Guide" steps here are from from section 3.1, "Ephemeral Unified // Model." use crate::{ec, error, rand}; use untrusted; pub use crate::ec::{ curve25519::x25519::X25519, suite_b::ecdh::{ECDH_P256, ECDH_P384}, PUBLIC_KEY_MAX_LEN, }; use core::marker::PhantomData; /// A key agreement algorithm. pub struct Algorithm { pub(crate) curve: &'static ec::Curve, pub(crate) ecdh: fn( out: &mut [u8], private_key: &ec::PrivateKey, peer_public_key: untrusted::Input, ) -> Result<(), error::Unspecified>, } derive_debug_via_self!(Algorithm, self.curve); impl Eq for Algorithm {} impl PartialEq for Algorithm { fn eq(&self, other: &Algorithm) -> bool { self.curve.id == other.curve.id } } /// How many times the key may be used. pub trait Lifetime: self::sealed::Sealed {} /// The key may be used at most once. pub struct Ephemeral {} impl Lifetime for Ephemeral {} impl self::sealed::Sealed for Ephemeral {} /// The key may be used more than once. pub struct Static {} impl Lifetime for Static {} impl self::sealed::Sealed for Static {} /// A key pair for key agreement. pub struct KeyPair<U: Lifetime> { private_key: PrivateKey<U>, public_key: PublicKey, } impl<U: Lifetime> KeyPair<U> { /// Generate a new key pair for the given algorithm. /// /// C analog: `EC_KEY_new_by_curve_name` + `EC_KEY_generate_key`. pub fn generate( alg: &'static Algorithm, rng: &rand::SecureRandom, ) -> Result<Self, error::Unspecified> { // NSA Guide Step 1. let private_key = ec::PrivateKey::generate(&alg.curve, rng)?; let mut public_key = PublicKey { bytes: [0; PUBLIC_KEY_MAX_LEN], alg, }; private_key.compute_public_key(&alg.curve, &mut public_key.bytes)?; Ok(Self { private_key: PrivateKey { private_key, alg, usage: PhantomData, }, public_key, }) } /// The private key. pub fn private_key(&self) -> &PrivateKey<U> { &self.private_key } /// The public key. pub fn public_key(&self) -> &PublicKey { &self.public_key } /// Split the key pair apart. pub fn split(self) -> (PrivateKey<U>, PublicKey) { (self.private_key, self.public_key) } } /// A public key for key agreement. pub struct PublicKey { bytes: [u8; PUBLIC_KEY_MAX_LEN], alg: &'static Algorithm, } impl AsRef<[u8]> for PublicKey { #[inline] fn as_ref(&self) -> &[u8] { &self.bytes[..self.alg.curve.public_key_len] } } /// A private key for key agreement. pub struct PrivateKey<U: Lifetime> { private_key: ec::PrivateKey, alg: &'static Algorithm, usage: PhantomData<U>, } impl<U: Lifetime> PrivateKey<U> { /// Generate a new private key for the given algorithm. /// /// C analog: `EC_KEY_new_by_curve_name` + `EC_KEY_generate_key`. pub fn generate( alg: &'static Algorithm, rng: &rand::SecureRandom, ) -> Result<Self, error::Unspecified> { // NSA Guide Step 1. // // This only handles the key generation part of step 1. The rest of // step one is done by `compute_public_key()`. let private_key = ec::PrivateKey::generate(&alg.curve, rng)?; Ok(Self { private_key, alg, usage: PhantomData, }) } /// The key exchange algorithm. #[inline] pub fn algorithm(&self) -> &'static Algorithm { self.alg } /// Computes the public key from the private key's value and fills `out` /// with the public point encoded in the standard form for the algorithm. /// /// `out.len()` must be equal to the value returned by `public_key_len`. #[inline(always)] pub fn compute_public_key(&self) -> Result<PublicKey, error::Unspecified> { // NSA Guide Step 1. // // Obviously, this only handles the part of Step 1 between the private // key generation and the sending of the public key to the peer. `out` // is what shouPrivateKeyld be sent to the peer. let mut public_key = PublicKey { bytes: [0; PUBLIC_KEY_MAX_LEN], alg: self.alg, }; self.private_key .compute_public_key(&self.alg.curve, &mut public_key.bytes)?; Ok(public_key) } /// Performs a key agreement with an private key and the given public key. /// /// Since `self` is consumed, it will not be usable after calling `agree`. /// /// `peer_public_key_alg` is the algorithm/curve for the peer's public key /// point; `agree` will return `Err(error_value)` if it does not match this /// private key's algorithm/curve. /// /// `peer_public_key` is the peer's public key. `agree` verifies that it is /// encoded in the standard form for the algorithm and that the key is /// *valid*; see the algorithm's documentation for details on how keys are /// to be encoded and what constitutes a valid key for that algorithm. /// /// C analogs: `EC_POINT_oct2point` + `ECDH_compute_key`, `X25519`. pub fn agree( self, peer_public_key_alg: &Algorithm, peer_public_key: untrusted::Input, ) -> Result<InputKeyMaterial, error::Unspecified> { agree_( &self.private_key, self.alg, peer_public_key_alg, peer_public_key, ) } #[cfg(test)] pub(crate) fn bytes(&self, curve: &ec::Curve) -> &[u8] { self.private_key.bytes(curve) } } impl PrivateKey<Static> { /// Performs a key agreement with a static private key and the given /// public key. /// /// `peer_public_key_alg` is the algorithm/curve for the peer's public key /// point; `agree_static` will return `Err(error_value)` if it does not /// match `my_private_key's` algorithm/curve. /// /// `peer_public_key` is the peer's public key. `agree_static` verifies /// that it is encoded in the standard form for the algorithm and that /// the key is *valid*; see the algorithm's documentation for details on /// how keys are to be encoded and what constitutes a valid key for that /// algorithm. /// /// C analogs: `EC_POINT_oct2point` + `ECDH_compute_key`, `X25519`. pub fn agree_static( &self, peer_public_key_alg: &Algorithm, peer_public_key: untrusted::Input, ) -> Result<InputKeyMaterial, error::Unspecified> { agree_( &self.private_key, self.alg, peer_public_key_alg, peer_public_key, ) } pub fn from_bytes( alg: &'static Algorithm, bytes: untrusted::Input ) -> Result<Self, error::Unspecified> { let private_key = ec::PrivateKey::from_bytes(&alg.curve, bytes)?; Ok(Self { private_key, alg, usage: PhantomData, }) } pub fn bytes( &self, alg: &'static Algorithm ) -> &[u8] { self.private_key.bytes(&alg.curve) } } fn agree_( my_private_key: &ec::PrivateKey, my_alg: &Algorithm, peer_public_key_alg: &Algorithm, peer_public_key: untrusted::Input, ) -> Result<InputKeyMaterial, error::Unspecified> { let alg = &my_alg; // NSA Guide Prerequisite 1. // // The domain parameters are hard-coded. This check verifies that the // peer's public key's domain parameters match the domain parameters of // this private key. if peer_public_key_alg != *alg
{ return Err(error::Unspecified); }
conditional_block
transformer_dynsparse.py
def embedding(self, x, src, compute_dense_grad=False, sparse_embeddings=False): # x[batch_size*sequence_length] -> x[batch_size*sequence_length, embedding_length] inshape = x.shape.as_list() assert len(inshape) == 2, f"Input to embedding lookup has shape {inshape}, but should be a 2D tensor" vocab_len = self.source_vocab_length if src else self.target_vocab_length assert vocab_len is not None, "Embedding vocab length must be defined" sequence_length = self.source_sequence_length if src else self.target_sequence_length with self.namescope('embedding'): with self.namescope('token_lut'): if sparse_embeddings: # Embeddings are created using these projection weights later. # Currently sparse embeddings only support 1x1 sparsity so we # must force the partials type to float: self.sparse_projection = self.getOrCreateSparseLinear(x_shape=[inshape[0] * inshape[1], self.embedding_length], x_dtype = self.dtype, sparsity=self.sparsity, dense_length=vocab_len, block_size=1, use_bias=self.include_projection_bias, override_partials_type="float") sparse_embeddings_layer = SparseTiedEmbedding.from_sparse_projection("tied_embedding", self.sparse_projection) x = sparse_embeddings_layer(x) embedding_dict = None else: # Embedding gets reused later on in the prediction layer dict_name = "source_" if src else "target_" dict_name = dict_name + "embedding_dict" embedding_dict = tf.get_variable(dict_name, (vocab_len, self.embedding_length), self.dtype, embedding_initializer) x = ipu.embedding_ops.embedding_lookup(embedding_dict, x) x = self.norm(x) # Add the positional encodings x = self.position_encoder(x, sequence_length) # normalize before the projection to hidden length x = self.norm(x) # The embedding length is decoupled from the hidden length with self.namescope("up_project"): x = self.sparseLinear(x, self.sparsity, self.hidden_length, compute_dense_grad, use_bias=True, disable_outlining=True) # no non-linearity here according to ALBERT return x, embedding_dict def projection(self, x, compute_dense_grad=False, sparse_embeddings=False): # x[..,embedding_length] -> x[..,target_vocab_length] with self.namescope('projection'): with self.namescope('down_project'): # Project from hidden_length to embedding_length so we can reuse the # embedding look-up table x = self.sparseLinear(x, self.sparsity, self.embedding_length, compute_dense_grad, use_bias=True, disable_outlining=True) x = ipu.nn_ops.gelu(x) x = self.norm(x) with self.namescope('decoder_logits'): # The look-up table is shared with the dense embedding layer if sparse_embeddings: if self.exclude_embedding: x = self.sparseLinear(x, self.sparsity, self.target_vocab_length, compute_dense_grad, use_bias=self.include_projection_bias, disable_outlining=True) else: x = self.applySparseLinear(x, self.sparse_projection, self.target_vocab_length, compute_dense_grad, disable_outlining=True) else: if not self.exclude_embedding: decoder_w = tf.transpose(self.tied_embedding, name="decoder_w") else: decoder_w = tf.get_variable("decoder_w", (self.embedding_length, self.target_vocab_length), x.dtype, init_glorot) # Optionally attach a bias to each token if self.include_projection_bias: decoder_b = np.zeros([self.target_vocab_length], dtype=x.dtype) + 1.0 / self.target_vocab_length # no chance of start token (small) if self.target_bos_id is not None: decoder_b[self.target_bos_id] = 1.0 / (self.target_vocab_length * 2) # every sequence has an end token, but most # sequences are shorter than sequence length if self.target_eos_id is not None: decoder_b[self.target_eos_id] = 2.0 / self.target_sequence_length decoder_b = np.log(decoder_b) decoder_b = tf.get_variable("bias", (self.target_vocab_length), x.dtype, decoder_b.astype(x.dtype)) x = tf.nn.xw_plus_b(x, decoder_w, decoder_b) else: x = tf.matmul(x, decoder_w) return x def norm(self, x): # -> x with self.namescope('layernorm'): param_initializers = { "beta": tf.initializers.constant(0.0, x.dtype), "gamma": tf.initializers.constant(0.1, x.dtype) } x = ipu.normalization_ops.group_norm(x, groups=1, param_initializers=param_initializers) return x def feed_forward(self, x, compute_dense_grad=False): # -> x with self.namescope('ffn'): with self.namescope('1'): x = self.sparseLinear(x, self.sparsity, self.ff_length, compute_dense_grad, use_bias=True) with self.namescope('activation'): x = ipu.nn_ops.gelu(x) x = self.dropout(x) with self.namescope('2'): x = self.sparseLinear(x, self.sparsity, self.hidden_length, compute_dense_grad, use_bias=True) return x def attention(self, in_q, in_k, in_v, mask=None, is_self_attention=False, compute_dense_grad=False): """ [batch_size, sequence_length, hidden_length] -> [B, S, 1 ,H] """ # Parameter dimensions b_shape = self.attention_heads * self.qkv_length use_bias = not self.exclude_attention_biases if self.disable_concat_qkv or not is_self_attention: # Prepend (head) dimension in_q_r = tf.expand_dims(in_q, axis=-2) in_k_r = tf.expand_dims(in_k, axis=-2) in_v_r = tf.expand_dims(in_v, axis=-2) # Queries with self.namescope('q'): q = self.sparseLinear(in_q_r, self.sparsity, b_shape, compute_dense_grad, use_bias) # Keys with self.namescope('k'): k = self.sparseLinear(in_k_r, self.sparsity, b_shape, compute_dense_grad, use_bias) # Values with self.namescope('v'): v = self.sparseLinear(in_v_r, self.sparsity, b_shape, compute_dense_grad, use_bias) else: with self.namescope('qkv'): # Prepend (head) dimension in_qkv_r = tf.expand_dims(in_q, axis=-2) qkv = self.sparseLinear(in_qkv_r, self.sparsity, 3 * b_shape, compute_dense_grad, use_bias) # Extract q, k and v q, k, v = tf.split(qkv, 3, axis=-1) # Extract heads and transpose [B, S, heads, qkv_len] -> [B, heads, S, qkv_len] batch_size, sequence_length, _ = in_q.shape.as_list() with self.namescope('q'): q = tf.reshape(q, [batch_size, sequence_length, self.attention_heads, self.qkv_length]) q = tf.transpose(q, perm=[0, 2, 1, 3]) # [B, S, heads, qkv_len] -> [B, heads, S, qkv_len] with self.namescope('k'): k = tf.reshape(k, [batch_size, sequence_length, self.attention_heads, self.qkv_length]) kt = tf.transpose(k, perm=[0, 2, 3, 1]) # [B, S, heads, qkv_len] -> [B, heads, qkv_len, S] with self.namescope('v'): v = tf.reshape(v, [batch_size, sequence_length, self.attention_heads, self.qkv_length]) v = tf.transpose(v, perm=[0, 2, 1, 3]) # [B, S, heads, qkv_len] -> [B, heads, S, qkv_len] # Dense attention calculation with self.namescope('interaction'): if self.use_static_sparse_autoregressive_attention and is_self_attention: z = static_sparse_attention.autoregressive_self_attention(q, kt, v) else: # Dense interaction x = tf.matmul(q, kt, name="token_to_token") c = tf.constant(1 / np.sqrt(self.qkv_length), x.dtype) x = tf.multiply(x, c) # "Memory mask" e.g. causal if mask is not None and (in_q == in_k): # only in self attention x = tf.add(x, mask, name="attention_mask") # Take softmax across the last axis (B, heads, S1, S2) pick (0, 1, 2,
self.encoder_k = None self.encoder_v = None # Dynsparse transformer is both a transformer and a sparse model Transformer.__init__(self, params, *args, **kwargs) SparseModel.__init__(self, params, *args, **kwargs)
identifier_body
transformer_dynsparse.py
params, *args, **kwargs) SparseModel.__init__(self, params, *args, **kwargs) def embedding(self, x, src, compute_dense_grad=False, sparse_embeddings=False): # x[batch_size*sequence_length] -> x[batch_size*sequence_length, embedding_length] inshape = x.shape.as_list() assert len(inshape) == 2, f"Input to embedding lookup has shape {inshape}, but should be a 2D tensor" vocab_len = self.source_vocab_length if src else self.target_vocab_length assert vocab_len is not None, "Embedding vocab length must be defined" sequence_length = self.source_sequence_length if src else self.target_sequence_length with self.namescope('embedding'): with self.namescope('token_lut'): if sparse_embeddings: # Embeddings are created using these projection weights later. # Currently sparse embeddings only support 1x1 sparsity so we # must force the partials type to float: self.sparse_projection = self.getOrCreateSparseLinear(x_shape=[inshape[0] * inshape[1], self.embedding_length], x_dtype = self.dtype, sparsity=self.sparsity, dense_length=vocab_len, block_size=1, use_bias=self.include_projection_bias, override_partials_type="float") sparse_embeddings_layer = SparseTiedEmbedding.from_sparse_projection("tied_embedding", self.sparse_projection) x = sparse_embeddings_layer(x) embedding_dict = None else: # Embedding gets reused later on in the prediction layer dict_name = "source_" if src else "target_" dict_name = dict_name + "embedding_dict" embedding_dict = tf.get_variable(dict_name, (vocab_len, self.embedding_length), self.dtype, embedding_initializer) x = ipu.embedding_ops.embedding_lookup(embedding_dict, x) x = self.norm(x) # Add the positional encodings x = self.position_encoder(x, sequence_length) # normalize before the projection to hidden length x = self.norm(x) # The embedding length is decoupled from the hidden length with self.namescope("up_project"): x = self.sparseLinear(x, self.sparsity, self.hidden_length, compute_dense_grad, use_bias=True, disable_outlining=True) # no non-linearity here according to ALBERT return x, embedding_dict def projection(self, x, compute_dense_grad=False, sparse_embeddings=False): # x[..,embedding_length] -> x[..,target_vocab_length] with self.namescope('projection'): with self.namescope('down_project'): # Project from hidden_length to embedding_length so we can reuse the # embedding look-up table x = self.sparseLinear(x, self.sparsity, self.embedding_length, compute_dense_grad, use_bias=True, disable_outlining=True) x = ipu.nn_ops.gelu(x) x = self.norm(x) with self.namescope('decoder_logits'):
compute_dense_grad, use_bias=self.include_projection_bias, disable_outlining=True) else: x = self.applySparseLinear(x, self.sparse_projection, self.target_vocab_length, compute_dense_grad, disable_outlining=True) else: if not self.exclude_embedding: decoder_w = tf.transpose(self.tied_embedding, name="decoder_w") else: decoder_w = tf.get_variable("decoder_w", (self.embedding_length, self.target_vocab_length), x.dtype, init_glorot) # Optionally attach a bias to each token if self.include_projection_bias: decoder_b = np.zeros([self.target_vocab_length], dtype=x.dtype) + 1.0 / self.target_vocab_length # no chance of start token (small) if self.target_bos_id is not None: decoder_b[self.target_bos_id] = 1.0 / (self.target_vocab_length * 2) # every sequence has an end token, but most # sequences are shorter than sequence length if self.target_eos_id is not None: decoder_b[self.target_eos_id] = 2.0 / self.target_sequence_length decoder_b = np.log(decoder_b) decoder_b = tf.get_variable("bias", (self.target_vocab_length), x.dtype, decoder_b.astype(x.dtype)) x = tf.nn.xw_plus_b(x, decoder_w, decoder_b) else: x = tf.matmul(x, decoder_w) return x def norm(self, x): # -> x with self.namescope('layernorm'): param_initializers = { "beta": tf.initializers.constant(0.0, x.dtype), "gamma": tf.initializers.constant(0.1, x.dtype) } x = ipu.normalization_ops.group_norm(x, groups=1, param_initializers=param_initializers) return x def feed_forward(self, x, compute_dense_grad=False): # -> x with self.namescope('ffn'): with self.namescope('1'): x = self.sparseLinear(x, self.sparsity, self.ff_length, compute_dense_grad, use_bias=True) with self.namescope('activation'): x = ipu.nn_ops.gelu(x) x = self.dropout(x) with self.namescope('2'): x = self.sparseLinear(x, self.sparsity, self.hidden_length, compute_dense_grad, use_bias=True) return x def attention(self, in_q, in_k, in_v, mask=None, is_self_attention=False, compute_dense_grad=False): """ [batch_size, sequence_length, hidden_length] -> [B, S, 1 ,H] """ # Parameter dimensions b_shape = self.attention_heads * self.qkv_length use_bias = not self.exclude_attention_biases if self.disable_concat_qkv or not is_self_attention: # Prepend (head) dimension in_q_r = tf.expand_dims(in_q, axis=-2) in_k_r = tf.expand_dims(in_k, axis=-2) in_v_r = tf.expand_dims(in_v, axis=-2) # Queries with self.namescope('q'): q = self.sparseLinear(in_q_r, self.sparsity, b_shape, compute_dense_grad, use_bias) # Keys with self.namescope('k'): k = self.sparseLinear(in_k_r, self.sparsity, b_shape, compute_dense_grad, use_bias) # Values with self.namescope('v'): v = self.sparseLinear(in_v_r, self.sparsity, b_shape, compute_dense_grad, use_bias) else: with self.namescope('qkv'): # Prepend (head) dimension in_qkv_r = tf.expand_dims(in_q, axis=-2) qkv = self.sparseLinear(in_qkv_r, self.sparsity, 3 * b_shape, compute_dense_grad, use_bias) # Extract q, k and v q, k, v = tf.split(qkv, 3, axis=-1) # Extract heads and transpose [B, S, heads, qkv_len] -> [B, heads, S, qkv_len] batch_size, sequence_length, _ = in_q.shape.as_list() with self.namescope('q'): q = tf.reshape(q, [batch_size, sequence_length, self.attention_heads, self.qkv_length]) q = tf.transpose(q, perm=[0, 2, 1, 3]) # [B, S, heads, qkv_len] -> [B, heads, S, qkv_len] with self.namescope('k'): k = tf.reshape(k, [batch_size, sequence_length, self.attention_heads, self.qkv_length]) kt = tf.transpose(k, perm=[0, 2, 3, 1]) # [B, S, heads, qkv_len] -> [B, heads, qkv_len, S] with self.namescope('v'): v = tf.reshape(v, [batch_size, sequence_length, self.attention_heads, self.qkv_length]) v = tf.transpose(v, perm=[0, 2, 1, 3]) # [B, S, heads, qkv_len] -> [B, heads, S, qkv_len] # Dense attention calculation with self.namescope('interaction'): if self.use_static_sparse_autoregressive_attention and is_self_attention: z = static_sparse_attention.autoregressive_self_attention(q, kt, v) else: # Dense interaction x = tf.matmul(q, kt, name="token_to_token") c = tf.constant(1 / np.sqrt(self.qkv_length), x.dtype) x = tf.multiply(x, c) # "Memory mask" e.g. causal if mask is not None and (in_q == in_k): # only in self attention x = tf.add(x, mask, name="attention_mask") # Take softmax across the last axis (B, heads, S1, S2) pick (0, 1, 2, 3<-) x = tf.nn.softmax(x, axis=3) # Pick up the values # x[B, heads, seq_len1, seq
# The look-up table is shared with the dense embedding layer if sparse_embeddings: if self.exclude_embedding: x = self.sparseLinear(x, self.sparsity, self.target_vocab_length,
random_line_split
transformer_dynsparse.py
params, *args, **kwargs) SparseModel.__init__(self, params, *args, **kwargs) def embedding(self, x, src, compute_dense_grad=False, sparse_embeddings=False): # x[batch_size*sequence_length] -> x[batch_size*sequence_length, embedding_length] inshape = x.shape.as_list() assert len(inshape) == 2, f"Input to embedding lookup has shape {inshape}, but should be a 2D tensor" vocab_len = self.source_vocab_length if src else self.target_vocab_length assert vocab_len is not None, "Embedding vocab length must be defined" sequence_length = self.source_sequence_length if src else self.target_sequence_length with self.namescope('embedding'): with self.namescope('token_lut'): if sparse_embeddings: # Embeddings are created using these projection weights later. # Currently sparse embeddings only support 1x1 sparsity so we # must force the partials type to float: self.sparse_projection = self.getOrCreateSparseLinear(x_shape=[inshape[0] * inshape[1], self.embedding_length], x_dtype = self.dtype, sparsity=self.sparsity, dense_length=vocab_len, block_size=1, use_bias=self.include_projection_bias, override_partials_type="float") sparse_embeddings_layer = SparseTiedEmbedding.from_sparse_projection("tied_embedding", self.sparse_projection) x = sparse_embeddings_layer(x) embedding_dict = None else: # Embedding gets reused later on in the prediction layer dict_name = "source_" if src else "target_" dict_name = dict_name + "embedding_dict" embedding_dict = tf.get_variable(dict_name, (vocab_len, self.embedding_length), self.dtype, embedding_initializer) x = ipu.embedding_ops.embedding_lookup(embedding_dict, x) x = self.norm(x) # Add the positional encodings x = self.position_encoder(x, sequence_length) # normalize before the projection to hidden length x = self.norm(x) # The embedding length is decoupled from the hidden length with self.namescope("up_project"): x = self.sparseLinear(x, self.sparsity, self.hidden_length, compute_dense_grad, use_bias=True, disable_outlining=True) # no non-linearity here according to ALBERT return x, embedding_dict def projection(self, x, compute_dense_grad=False, sparse_embeddings=False): # x[..,embedding_length] -> x[..,target_vocab_length] with self.namescope('projection'): with self.namescope('down_project'): # Project from hidden_length to embedding_length so we can reuse the # embedding look-up table x = self.sparseLinear(x, self.sparsity, self.embedding_length, compute_dense_grad, use_bias=True, disable_outlining=True) x = ipu.nn_ops.gelu(x) x = self.norm(x) with self.namescope('decoder_logits'): # The look-up table is shared with the dense embedding layer if sparse_embeddings: if self.exclude_embedding: x = self.sparseLinear(x, self.sparsity, self.target_vocab_length, compute_dense_grad, use_bias=self.include_projection_bias, disable_outlining=True) else: x = self.applySparseLinear(x, self.sparse_projection, self.target_vocab_length, compute_dense_grad, disable_outlining=True) else: if not self.exclude_embedding: decoder_w = tf.transpose(self.tied_embedding, name="decoder_w") else: decoder_w = tf.get_variable("decoder_w", (self.embedding_length, self.target_vocab_length), x.dtype, init_glorot) # Optionally attach a bias to each token if self.include_projection_bias: decoder_b = np.zeros([self.target_vocab_length], dtype=x.dtype) + 1.0 / self.target_vocab_length # no chance of start token (small) if self.target_bos_id is not None: decoder_b[self.target_bos_id] = 1.0 / (self.target_vocab_length * 2) # every sequence has an end token, but most # sequences are shorter than sequence length if self.target_eos_id is not None: decoder_b[self.target_eos_id] = 2.0 / self.target_sequence_length decoder_b = np.log(decoder_b) decoder_b = tf.get_variable("bias", (self.target_vocab_length), x.dtype, decoder_b.astype(x.dtype)) x = tf.nn.xw_plus_b(x, decoder_w, decoder_b) else: x = tf.matmul(x, decoder_w) return x def norm(self, x): # -> x with self.namescope('layernorm'): param_initializers = { "beta": tf.initializers.constant(0.0, x.dtype), "gamma": tf.initializers.constant(0.1, x.dtype) } x = ipu.normalization_ops.group_norm(x, groups=1, param_initializers=param_initializers) return x def feed_forward(self, x, compute_dense_grad=False): # -> x with self.namescope('ffn'): with self.namescope('1'): x = self.sparseLinear(x, self.sparsity, self.ff_length, compute_dense_grad, use_bias=True) with self.namescope('activation'): x = ipu.nn_ops.gelu(x) x = self.dropout(x) with self.namescope('2'): x = self.sparseLinear(x, self.sparsity, self.hidden_length, compute_dense_grad, use_bias=True) return x def attention(self, in_q, in_k, in_v, mask=None, is_self_attention=False, compute_dense_grad=False): """ [batch_size, sequence_length, hidden_length] -> [B, S, 1 ,H] """ # Parameter dimensions b_shape = self.attention_heads * self.qkv_length use_bias = not self.exclude_attention_biases if self.disable_concat_qkv or not is_self_attention: # Prepend (head) dimension in_q_r = tf.expand_dims(in_q, axis=-2) in_k_r = tf.expand_dims(in_k, axis=-2) in_v_r = tf.expand_dims(in_v, axis=-2) # Queries with self.namescope('q'): q = self.sparseLinear(in_q_r, self.sparsity, b_shape, compute_dense_grad, use_bias) # Keys with self.namescope('k'): k = self.sparseLinear(in_k_r, self.sparsity, b_shape, compute_dense_grad, use_bias) # Values with self.namescope('v'): v = self.sparseLinear(in_v_r, self.sparsity, b_shape, compute_dense_grad, use_bias) else:
# Extract heads and transpose [B, S, heads, qkv_len] -> [B, heads, S, qkv_len] batch_size, sequence_length, _ = in_q.shape.as_list() with self.namescope('q'): q = tf.reshape(q, [batch_size, sequence_length, self.attention_heads, self.qkv_length]) q = tf.transpose(q, perm=[0, 2, 1, 3]) # [B, S, heads, qkv_len] -> [B, heads, S, qkv_len] with self.namescope('k'): k = tf.reshape(k, [batch_size, sequence_length, self.attention_heads, self.qkv_length]) kt = tf.transpose(k, perm=[0, 2, 3, 1]) # [B, S, heads, qkv_len] -> [B, heads, qkv_len, S] with self.namescope('v'): v = tf.reshape(v, [batch_size, sequence_length, self.attention_heads, self.qkv_length]) v = tf.transpose(v, perm=[0, 2, 1, 3]) # [B, S, heads, qkv_len] -> [B, heads, S, qkv_len] # Dense attention calculation with self.namescope('interaction'): if self.use_static_sparse_autoregressive_attention and is_self_attention: z = static_sparse_attention.autoregressive_self_attention(q, kt, v) else: # Dense interaction x = tf.matmul(q, kt, name="token_to_token") c = tf.constant(1 / np.sqrt(self.qkv_length), x.dtype) x = tf.multiply(x, c) # "Memory mask" e.g. causal if mask is not None and (in_q == in_k): # only in self attention x = tf.add(x, mask, name="attention_mask") # Take softmax across the last axis (B, heads, S1, S2) pick (0, 1, 2, 3<-) x = tf.nn.softmax(x, axis=3) # Pick up the values # x[B, heads, seq_len1,
with self.namescope('qkv'): # Prepend (head) dimension in_qkv_r = tf.expand_dims(in_q, axis=-2) qkv = self.sparseLinear(in_qkv_r, self.sparsity, 3 * b_shape, compute_dense_grad, use_bias) # Extract q, k and v q, k, v = tf.split(qkv, 3, axis=-1)
conditional_block
transformer_dynsparse.py
params, *args, **kwargs) SparseModel.__init__(self, params, *args, **kwargs) def embedding(self, x, src, compute_dense_grad=False, sparse_embeddings=False): # x[batch_size*sequence_length] -> x[batch_size*sequence_length, embedding_length] inshape = x.shape.as_list() assert len(inshape) == 2, f"Input to embedding lookup has shape {inshape}, but should be a 2D tensor" vocab_len = self.source_vocab_length if src else self.target_vocab_length assert vocab_len is not None, "Embedding vocab length must be defined" sequence_length = self.source_sequence_length if src else self.target_sequence_length with self.namescope('embedding'): with self.namescope('token_lut'): if sparse_embeddings: # Embeddings are created using these projection weights later. # Currently sparse embeddings only support 1x1 sparsity so we # must force the partials type to float: self.sparse_projection = self.getOrCreateSparseLinear(x_shape=[inshape[0] * inshape[1], self.embedding_length], x_dtype = self.dtype, sparsity=self.sparsity, dense_length=vocab_len, block_size=1, use_bias=self.include_projection_bias, override_partials_type="float") sparse_embeddings_layer = SparseTiedEmbedding.from_sparse_projection("tied_embedding", self.sparse_projection) x = sparse_embeddings_layer(x) embedding_dict = None else: # Embedding gets reused later on in the prediction layer dict_name = "source_" if src else "target_" dict_name = dict_name + "embedding_dict" embedding_dict = tf.get_variable(dict_name, (vocab_len, self.embedding_length), self.dtype, embedding_initializer) x = ipu.embedding_ops.embedding_lookup(embedding_dict, x) x = self.norm(x) # Add the positional encodings x = self.position_encoder(x, sequence_length) # normalize before the projection to hidden length x = self.norm(x) # The embedding length is decoupled from the hidden length with self.namescope("up_project"): x = self.sparseLinear(x, self.sparsity, self.hidden_length, compute_dense_grad, use_bias=True, disable_outlining=True) # no non-linearity here according to ALBERT return x, embedding_dict def projection(self, x, compute_dense_grad=False, sparse_embeddings=False): # x[..,embedding_length] -> x[..,target_vocab_length] with self.namescope('projection'): with self.namescope('down_project'): # Project from hidden_length to embedding_length so we can reuse the # embedding look-up table x = self.sparseLinear(x, self.sparsity, self.embedding_length, compute_dense_grad, use_bias=True, disable_outlining=True) x = ipu.nn_ops.gelu(x) x = self.norm(x) with self.namescope('decoder_logits'): # The look-up table is shared with the dense embedding layer if sparse_embeddings: if self.exclude_embedding: x = self.sparseLinear(x, self.sparsity, self.target_vocab_length, compute_dense_grad, use_bias=self.include_projection_bias, disable_outlining=True) else: x = self.applySparseLinear(x, self.sparse_projection, self.target_vocab_length, compute_dense_grad, disable_outlining=True) else: if not self.exclude_embedding: decoder_w = tf.transpose(self.tied_embedding, name="decoder_w") else: decoder_w = tf.get_variable("decoder_w", (self.embedding_length, self.target_vocab_length), x.dtype, init_glorot) # Optionally attach a bias to each token if self.include_projection_bias: decoder_b = np.zeros([self.target_vocab_length], dtype=x.dtype) + 1.0 / self.target_vocab_length # no chance of start token (small) if self.target_bos_id is not None: decoder_b[self.target_bos_id] = 1.0 / (self.target_vocab_length * 2) # every sequence has an end token, but most # sequences are shorter than sequence length if self.target_eos_id is not None: decoder_b[self.target_eos_id] = 2.0 / self.target_sequence_length decoder_b = np.log(decoder_b) decoder_b = tf.get_variable("bias", (self.target_vocab_length), x.dtype, decoder_b.astype(x.dtype)) x = tf.nn.xw_plus_b(x, decoder_w, decoder_b) else: x = tf.matmul(x, decoder_w) return x def
(self, x): # -> x with self.namescope('layernorm'): param_initializers = { "beta": tf.initializers.constant(0.0, x.dtype), "gamma": tf.initializers.constant(0.1, x.dtype) } x = ipu.normalization_ops.group_norm(x, groups=1, param_initializers=param_initializers) return x def feed_forward(self, x, compute_dense_grad=False): # -> x with self.namescope('ffn'): with self.namescope('1'): x = self.sparseLinear(x, self.sparsity, self.ff_length, compute_dense_grad, use_bias=True) with self.namescope('activation'): x = ipu.nn_ops.gelu(x) x = self.dropout(x) with self.namescope('2'): x = self.sparseLinear(x, self.sparsity, self.hidden_length, compute_dense_grad, use_bias=True) return x def attention(self, in_q, in_k, in_v, mask=None, is_self_attention=False, compute_dense_grad=False): """ [batch_size, sequence_length, hidden_length] -> [B, S, 1 ,H] """ # Parameter dimensions b_shape = self.attention_heads * self.qkv_length use_bias = not self.exclude_attention_biases if self.disable_concat_qkv or not is_self_attention: # Prepend (head) dimension in_q_r = tf.expand_dims(in_q, axis=-2) in_k_r = tf.expand_dims(in_k, axis=-2) in_v_r = tf.expand_dims(in_v, axis=-2) # Queries with self.namescope('q'): q = self.sparseLinear(in_q_r, self.sparsity, b_shape, compute_dense_grad, use_bias) # Keys with self.namescope('k'): k = self.sparseLinear(in_k_r, self.sparsity, b_shape, compute_dense_grad, use_bias) # Values with self.namescope('v'): v = self.sparseLinear(in_v_r, self.sparsity, b_shape, compute_dense_grad, use_bias) else: with self.namescope('qkv'): # Prepend (head) dimension in_qkv_r = tf.expand_dims(in_q, axis=-2) qkv = self.sparseLinear(in_qkv_r, self.sparsity, 3 * b_shape, compute_dense_grad, use_bias) # Extract q, k and v q, k, v = tf.split(qkv, 3, axis=-1) # Extract heads and transpose [B, S, heads, qkv_len] -> [B, heads, S, qkv_len] batch_size, sequence_length, _ = in_q.shape.as_list() with self.namescope('q'): q = tf.reshape(q, [batch_size, sequence_length, self.attention_heads, self.qkv_length]) q = tf.transpose(q, perm=[0, 2, 1, 3]) # [B, S, heads, qkv_len] -> [B, heads, S, qkv_len] with self.namescope('k'): k = tf.reshape(k, [batch_size, sequence_length, self.attention_heads, self.qkv_length]) kt = tf.transpose(k, perm=[0, 2, 3, 1]) # [B, S, heads, qkv_len] -> [B, heads, qkv_len, S] with self.namescope('v'): v = tf.reshape(v, [batch_size, sequence_length, self.attention_heads, self.qkv_length]) v = tf.transpose(v, perm=[0, 2, 1, 3]) # [B, S, heads, qkv_len] -> [B, heads, S, qkv_len] # Dense attention calculation with self.namescope('interaction'): if self.use_static_sparse_autoregressive_attention and is_self_attention: z = static_sparse_attention.autoregressive_self_attention(q, kt, v) else: # Dense interaction x = tf.matmul(q, kt, name="token_to_token") c = tf.constant(1 / np.sqrt(self.qkv_length), x.dtype) x = tf.multiply(x, c) # "Memory mask" e.g. causal if mask is not None and (in_q == in_k): # only in self attention x = tf.add(x, mask, name="attention_mask") # Take softmax across the last axis (B, heads, S1, S2) pick (0, 1, 2, 3<-) x = tf.nn.softmax(x, axis=3) # Pick up the values # x[B, heads, seq_len1,
norm
identifier_name
read.rs
parse::linebreak(&mut buf)?; name }; Some(solid_name) } else { None }; // In the binary case, we still need to skip the remaining header. let triangle_count = if is_binary { buf.skip(80 - buf.offset())?; Some(buf.read_u32::<LittleEndian>()?) } else { None }; Ok(Self { buf, solid_name, triangle_count, _dummy: PhantomData, }) } /// Configures the reader to not unify vertices with the exact same /// position into one. /// /// An STL file is a simple list of triangles. Each triangle specifies the /// position of its three vertices. This means that vertices of adjacent /// triangles are stored once per triangle. When reading the file, we only /// know the vertex positions and have no idea which vertices are actually /// the same one and which are two different vertices that have the same /// position. /// /// It's common to unify vertices when reading an STL file to get a real /// mesh and not just a collection of unconnected triangles. You only need /// to disable unification in very special cases, mainly because: /// - Your mesh has vertices that have the exact same position but should /// be treated as separate vertices (this is very rare) /// - Unifying the vertices is too slow for you (unifying makes the whole /// read process a couple of times slower) /// /// But if any of this is a problem for you, you should rather use a better /// file format instead of STL. /// /// When vertices are unified, `NaN` values in vertices are not allowed. So /// in that case, if your file contains `NaN` values, the reading method /// will panic. pub fn without_vertex_unification(self) -> Reader<R, VerbatimVertices> { Reader { buf: self.buf, solid_name: self.solid_name, triangle_count: self.triangle_count, _dummy: PhantomData, } } } impl<R: io::Read, U: UnifyingMarker> Reader<R, U> { /// Returns the name of the solid. If no solid name was stored in the file, /// `None` is returned. pub fn solid_name(&self) -> Option<&str> { self.solid_name.as_ref().map(|s| s.as_str()) } /// Returns whether or not the file is a binary STL file (as opposed to /// ASCII). pub fn is_binary(&self) -> bool { self.triangle_count.is_some() } /// Returns the encoding of this STL file. pub fn encoding(&self) -> Encoding { if self.is_binary() { Encoding::Binary } else { Encoding::Ascii } } /// Returns the triangle count stored in the file. That number is stored if /// and only if the file is binary. pub fn triangle_count(&self) -> Option<u32> { self.triangle_count } /// Reads the whole file into a [`RawStorage`]. /// /// Usually you either want to use a higher level interface (via /// [`StreamSource`]) or the method [`Reader::read_raw`]. The latter is the /// streaming version of this method which doesn't require a temporary /// storage ([`RawStorage`]). pub fn into_raw_storage(self) -> Result<RawStorage, Error> { // Prepare the raw result with metadata and memory allocations. let mut out = RawStorage::empty(); out.solid_name = self.solid_name.clone(); if let Some(tri_count) = self.triangle_count { out.triangles.reserve(tri_count as usize); } // Read the all triangles into the raw result self.read_raw(|tri| { out.triangles.push(tri); Ok(()) })?; Ok(out) } /// Reads the whole file, passing each triangle to the `add_triangle` /// callback. /// /// This is a low level building block that you usually don't want to use /// directly. In particular, **this method itself never performs any vertex /// unification** (regardless of the type parameter `U`). You usually want /// to use the [`StreamSource`]) interface to actually read meshes from /// this reader. #[inline(never)] pub fn read_raw<F>(self, add_triangle: F) -> Result<(), Error> where F: FnMut(RawTriangle) -> Result<(), Error>, { if let Some(triangle_count) = self.triangle_count { self.read_raw_binary(triangle_count, add_triangle) } else { self.read_raw_ascii(add_triangle) } } /// The `read_raw` implementation for binary bodies. #[inline(never)] fn read_raw_binary<F>(self, triangle_count: u32, mut add_triangle: F) -> Result<(), Error> where F: FnMut(RawTriangle) -> Result<(), Error>, { const BYTES_PER_TRI: usize = 4 * 3 * 4 + 2; let mut buf = self.buf; // We attempt to read as many triangles as specified. If the // specified number was too high and we reach EOF early, we will // return an error. for _ in 0..triangle_count { // We don't use `with_bytes` here because it isn't inlined and // we can improve performance significantly by doing it // manually here. buf.prepare(BYTES_PER_TRI)?; let data = &buf.raw_buf()[..BYTES_PER_TRI]; /// Reads three consecutive `f32`s. #[inline(always)] fn vec3(data: &[u8]) -> [f32; 3] { [ LittleEndian::read_f32(&data[0..4]), LittleEndian::read_f32(&data[4..8]), LittleEndian::read_f32(&data[8..12]), ] } let triangle = RawTriangle { normal: vec3(&data[0..12]), vertices: [ vec3(&data[12..24]), vec3(&data[24..36]), vec3(&data[36..48]), ], attribute_byte_count: LittleEndian::read_u16(&data[48..50]), }; add_triangle(triangle)?; buf.consume(BYTES_PER_TRI); } // If the specified number of triangles was too small and there is // still data left, we also error. buf.assert_eof()?; Ok(()) } /// The `read_raw` implementation for ASCII bodies. #[inline(never)] pub fn read_raw_ascii<F>(self, mut add_triangle: F) -> Result<(), Error> where F: FnMut(RawTriangle) -> Result<(), Error>, { /// Parses three floats separated by whitespace. No leading or trailing /// whitespace is handled. fn vec3(buf: &mut impl ParseBuf) -> Result<[f32; 3], Error> { let x = parse::ascii_f32(buf)?; parse::whitespace(buf)?; let y = parse::ascii_f32(buf)?; parse::whitespace(buf)?; let z = parse::ascii_f32(buf)?; Ok([x, y, z]) } /// Parses one ASCII line with a vertex (e.g. `vertex 2.0 0.1 1`) fn vertex(buf: &mut impl ParseBuf) -> Result<[f32; 3], Error> { parse::line(buf, |buf| { buf.expect_tag(b"vertex")?; parse::whitespace(buf)?; vec3(buf) }) } let mut buf = self.buf; // Parse facets loop { // First line (`facet normal 0.0 1.0 0.0`) let normal = parse::line(&mut buf, |buf| { buf.expect_tag(b"facet normal")?; parse::whitespace(buf)?; vec3(buf) })?; // Parse vertices parse::line(&mut buf, |buf| buf.expect_tag(b"outer loop"))?; let vertices = [ vertex(&mut buf)?, vertex(&mut buf)?, vertex(&mut buf)?, ]; parse::line(&mut buf, |buf| buf.expect_tag(b"endloop"))?; // Pass parsed triangle to callback add_triangle(RawTriangle { normal, vertices, attribute_byte_count: 0, })?; // Parse last line (`endfacet`) parse::line(&mut buf, |buf| buf.expect_tag(b"endfacet"))?; // Check if the next line starts with `endsolid` and break loop // in that case. parse::opt_whitespace(&mut buf)?; if buf.is_next(b"endsolid")? { // We've seen `endsolid`: we just stop here. There could be // junk afterwards, but we don't care. break; } } Ok(()) }
} impl<R: io::Read, U: UnifyingMarker> fmt::Debug for Reader<R, U> {
random_line_split
read.rs
fn check_length(&mut self) -> Result<CountMatch, Error>; } impl<R: io::Read> SeekHelper for R { default fn check_length(&mut self) -> Result<CountMatch, Error> { Ok(CountMatch::NoInfo) } } impl<R: io::Read + io::Seek> SeekHelper for R { fn check_length(&mut self) -> Result<CountMatch, Error> { // Determine length of input. let input_len = self.seek(io::SeekFrom::End(0))?; if input_len < 84 { return Ok(CountMatch::TooShort); } // Pretend the file is binary and read the number of triangles // at offset 80 . self.seek(io::SeekFrom::Start(80))?; let num_triangles = self.read_u32::<LittleEndian>()?; self.seek(io::SeekFrom::Start(0))?; // Jump back to the start // In binary format, each triangle is stored with 50 bytes: // - 3 * 3 = 9 position floats => 36 bytes // - 3 normal floats => 12 bytes // - 2 bytes "attribute byte count" // // The binary header is 84 bytes long. let expected_len_if_binary = num_triangles as u64 * 50 + 84; if expected_len_if_binary == input_len { Ok(CountMatch::Match) } else { Ok(CountMatch::Mismatch) } } } let count_match = reader.check_length()?; // Wrap reader into parse buffer. let mut buf = Buffer::new(reader)?; // Load the first 1K bytes (or the whole file, if the file is shorter // than that). We want to inspect those bytes. buf.saturating_prepare(1024)?; let starts_with_solid = buf.raw_buf().starts_with(b"solid"); let non_ascii_bytes = buf.raw_buf().iter().take(1024).any(|b| !b.is_ascii()); let is_binary = match (starts_with_solid, non_ascii_bytes, count_match) { // ----- Binary -------------------------------------------------- // Even if we have no length info, non-ASCII bytes are strong // indicator. (true, true, CountMatch::NoInfo) => true, (false, true, CountMatch::NoInfo) => true, // A count/length match is a very strong indicator and we don't // cary about anything else. (_, _, CountMatch::Match) => true, // Is binary or corrupted -> we assume binary. (false, false, CountMatch::NoInfo) => false, // ----- ASCII --------------------------------------------------- (true, false, CountMatch::NoInfo) => false, (true, false, CountMatch::TooShort) => false, (true, false, CountMatch::Mismatch) => false, // ----- Assume binary, but error -------------------------------- (_, _, CountMatch::TooShort) => { return Err(ParseError::Custom( "corrupted binary STL file: file is shorter than 84 bytes".into(), Span::new(0, 0), ).into()); } (_, _, CountMatch::Mismatch) => { return Err(ParseError::Custom( "corrupted binary STL file: triangle count at offset 80 disagrees with \ file length".into(), Span::new(80, 84), ).into()); } }; // Check if the file starts with `solid`. If yes, a string (the solid // name) is stored next. let solid_name = if buf.is_next(b"solid")? { // Consume `solid` buf.consume(5); // Read the solid name (until line break in ASCII case, 80 chars in // binary case). let solid_name = if is_binary { buf.with_bytes( 80 - buf.offset(), |sd| { sd.assert_ascii() .map(|name| name.trim().to_string()) .map_err(|e| e.into()) }, )? } else { let name = buf.take_until(b'\n', |sd| { sd.assert_ascii() .map(|name| name.trim().to_string()) .map_err(|e| e.into()) })?; parse::linebreak(&mut buf)?; name }; Some(solid_name) } else { None }; // In the binary case, we still need to skip the remaining header. let triangle_count = if is_binary { buf.skip(80 - buf.offset())?; Some(buf.read_u32::<LittleEndian>()?) } else { None }; Ok(Self { buf, solid_name, triangle_count, _dummy: PhantomData, }) } /// Configures the reader to not unify vertices with the exact same /// position into one. /// /// An STL file is a simple list of triangles. Each triangle specifies the /// position of its three vertices. This means that vertices of adjacent /// triangles are stored once per triangle. When reading the file, we only /// know the vertex positions and have no idea which vertices are actually /// the same one and which are two different vertices that have the same /// position. /// /// It's common to unify vertices when reading an STL file to get a real /// mesh and not just a collection of unconnected triangles. You only need /// to disable unification in very special cases, mainly because: /// - Your mesh has vertices that have the exact same position but should /// be treated as separate vertices (this is very rare) /// - Unifying the vertices is too slow for you (unifying makes the whole /// read process a couple of times slower) /// /// But if any of this is a problem for you, you should rather use a better /// file format instead of STL. /// /// When vertices are unified, `NaN` values in vertices are not allowed. So /// in that case, if your file contains `NaN` values, the reading method /// will panic. pub fn without_vertex_unification(self) -> Reader<R, VerbatimVertices> { Reader { buf: self.buf, solid_name: self.solid_name, triangle_count: self.triangle_count, _dummy: PhantomData, } } } impl<R: io::Read, U: UnifyingMarker> Reader<R, U> { /// Returns the name of the solid. If no solid name was stored in the file, /// `None` is returned. pub fn solid_name(&self) -> Option<&str> { self.solid_name.as_ref().map(|s| s.as_str()) } /// Returns whether or not the file is a binary STL file (as opposed to /// ASCII). pub fn is_binary(&self) -> bool { self.triangle_count.is_some() } /// Returns the encoding of this STL file. pub fn encoding(&self) -> Encoding { if self.is_binary() { Encoding::Binary } else { Encoding::Ascii } } /// Returns the triangle count stored in the file. That number is stored if /// and only if the file is binary. pub fn triangle_count(&self) -> Option<u32> { self.triangle_count } /// Reads the whole file into a [`RawStorage`]. /// /// Usually you either want to use a higher level interface (via /// [`StreamSource`]) or the method [`Reader::read_raw`]. The latter is the /// streaming version of this method which doesn't require a temporary /// storage ([`RawStorage`]). pub fn into_raw_storage(self) -> Result<RawStorage, Error> { // Prepare the raw result with metadata and memory allocations. let mut out = RawStorage::empty(); out.solid_name = self.solid_name.clone(); if let Some(tri_count) = self.triangle_count { out.triangles.reserve(tri_count as usize); } // Read the all triangles into the raw result self.read_raw(|tri| { out.triangles.push(tri); Ok(()) })?; Ok(out) } /// Reads the whole file, passing each triangle to the `add_triangle` /// callback. /// /// This is a low level building block that you usually don't want to use /// directly. In particular, **this method itself never performs any vertex /// unification** (regardless of the type parameter `U`). You usually want /// to use the [`StreamSource`]) interface to actually read meshes from /// this reader. #[inline(never)] pub fn read_raw<F>(self, add_triangle: F) -> Result<(), Error> where F: FnMut(RawTriangle) -> Result<(), Error>, { if let Some(triangle_count) = self.triangle_count { self.read_raw_binary(triangle_count, add_triangle) } else { self.read_raw_ascii(add_triangle) } } /// The `read_raw` implementation for binary bodies. #[inline(never)] fn
<F
read_raw_binary
identifier_name
read.rs
fn check_length(&mut self) -> Result<CountMatch, Error>; } impl<R: io::Read> SeekHelper for R { default fn check_length(&mut self) -> Result<CountMatch, Error> { Ok(CountMatch::NoInfo) } } impl<R: io::Read + io::Seek> SeekHelper for R { fn check_length(&mut self) -> Result<CountMatch, Error> { // Determine length of input. let input_len = self.seek(io::SeekFrom::End(0))?; if input_len < 84 { return Ok(CountMatch::TooShort); } // Pretend the file is binary and read the number of triangles // at offset 80 . self.seek(io::SeekFrom::Start(80))?; let num_triangles = self.read_u32::<LittleEndian>()?; self.seek(io::SeekFrom::Start(0))?; // Jump back to the start // In binary format, each triangle is stored with 50 bytes: // - 3 * 3 = 9 position floats => 36 bytes // - 3 normal floats => 12 bytes // - 2 bytes "attribute byte count" // // The binary header is 84 bytes long. let expected_len_if_binary = num_triangles as u64 * 50 + 84; if expected_len_if_binary == input_len { Ok(CountMatch::Match) } else { Ok(CountMatch::Mismatch) } } } let count_match = reader.check_length()?; // Wrap reader into parse buffer. let mut buf = Buffer::new(reader)?; // Load the first 1K bytes (or the whole file, if the file is shorter // than that). We want to inspect those bytes. buf.saturating_prepare(1024)?; let starts_with_solid = buf.raw_buf().starts_with(b"solid"); let non_ascii_bytes = buf.raw_buf().iter().take(1024).any(|b| !b.is_ascii()); let is_binary = match (starts_with_solid, non_ascii_bytes, count_match) { // ----- Binary -------------------------------------------------- // Even if we have no length info, non-ASCII bytes are strong // indicator. (true, true, CountMatch::NoInfo) => true, (false, true, CountMatch::NoInfo) => true, // A count/length match is a very strong indicator and we don't // cary about anything else. (_, _, CountMatch::Match) => true, // Is binary or corrupted -> we assume binary. (false, false, CountMatch::NoInfo) => false, // ----- ASCII --------------------------------------------------- (true, false, CountMatch::NoInfo) => false, (true, false, CountMatch::TooShort) => false, (true, false, CountMatch::Mismatch) => false, // ----- Assume binary, but error -------------------------------- (_, _, CountMatch::TooShort) => { return Err(ParseError::Custom( "corrupted binary STL file: file is shorter than 84 bytes".into(), Span::new(0, 0), ).into()); } (_, _, CountMatch::Mismatch) => { return Err(ParseError::Custom( "corrupted binary STL file: triangle count at offset 80 disagrees with \ file length".into(), Span::new(80, 84), ).into()); } }; // Check if the file starts with `solid`. If yes, a string (the solid // name) is stored next. let solid_name = if buf.is_next(b"solid")? { // Consume `solid` buf.consume(5); // Read the solid name (until line break in ASCII case, 80 chars in // binary case). let solid_name = if is_binary { buf.with_bytes( 80 - buf.offset(), |sd| { sd.assert_ascii() .map(|name| name.trim().to_string()) .map_err(|e| e.into()) }, )? } else { let name = buf.take_until(b'\n', |sd| { sd.assert_ascii() .map(|name| name.trim().to_string()) .map_err(|e| e.into()) })?; parse::linebreak(&mut buf)?; name }; Some(solid_name) } else { None }; // In the binary case, we still need to skip the remaining header. let triangle_count = if is_binary { buf.skip(80 - buf.offset())?; Some(buf.read_u32::<LittleEndian>()?) } else { None }; Ok(Self { buf, solid_name, triangle_count, _dummy: PhantomData, }) } /// Configures the reader to not unify vertices with the exact same /// position into one. /// /// An STL file is a simple list of triangles. Each triangle specifies the /// position of its three vertices. This means that vertices of adjacent /// triangles are stored once per triangle. When reading the file, we only /// know the vertex positions and have no idea which vertices are actually /// the same one and which are two different vertices that have the same /// position. /// /// It's common to unify vertices when reading an STL file to get a real /// mesh and not just a collection of unconnected triangles. You only need /// to disable unification in very special cases, mainly because: /// - Your mesh has vertices that have the exact same position but should /// be treated as separate vertices (this is very rare) /// - Unifying the vertices is too slow for you (unifying makes the whole /// read process a couple of times slower) /// /// But if any of this is a problem for you, you should rather use a better /// file format instead of STL. /// /// When vertices are unified, `NaN` values in vertices are not allowed. So /// in that case, if your file contains `NaN` values, the reading method /// will panic. pub fn without_vertex_unification(self) -> Reader<R, VerbatimVertices> { Reader { buf: self.buf, solid_name: self.solid_name, triangle_count: self.triangle_count, _dummy: PhantomData, } } } impl<R: io::Read, U: UnifyingMarker> Reader<R, U> { /// Returns the name of the solid. If no solid name was stored in the file, /// `None` is returned. pub fn solid_name(&self) -> Option<&str> { self.solid_name.as_ref().map(|s| s.as_str()) } /// Returns whether or not the file is a binary STL file (as opposed to /// ASCII). pub fn is_binary(&self) -> bool { self.triangle_count.is_some() } /// Returns the encoding of this STL file. pub fn encoding(&self) -> Encoding { if self.is_binary() { Encoding::Binary } else { Encoding::Ascii } } /// Returns the triangle count stored in the file. That number is stored if /// and only if the file is binary. pub fn triangle_count(&self) -> Option<u32> { self.triangle_count } /// Reads the whole file into a [`RawStorage`]. /// /// Usually you either want to use a higher level interface (via /// [`StreamSource`]) or the method [`Reader::read_raw`]. The latter is the /// streaming version of this method which doesn't require a temporary /// storage ([`RawStorage`]). pub fn into_raw_storage(self) -> Result<RawStorage, Error> { // Prepare the raw result with metadata and memory allocations. let mut out = RawStorage::empty(); out.solid_name = self.solid_name.clone(); if let Some(tri_count) = self.triangle_count
// Read the all triangles into the raw result self.read_raw(|tri| { out.triangles.push(tri); Ok(()) })?; Ok(out) } /// Reads the whole file, passing each triangle to the `add_triangle` /// callback. /// /// This is a low level building block that you usually don't want to use /// directly. In particular, **this method itself never performs any vertex /// unification** (regardless of the type parameter `U`). You usually want /// to use the [`StreamSource`]) interface to actually read meshes from /// this reader. #[inline(never)] pub fn read_raw<F>(self, add_triangle: F) -> Result<(), Error> where F: FnMut(RawTriangle) -> Result<(), Error>, { if let Some(triangle_count) = self.triangle_count { self.read_raw_binary(triangle_count, add_triangle) } else { self.read_raw_ascii(add_triangle) } } /// The `read_raw` implementation for binary bodies. #[inline(never)] fn read_raw_binary
{ out.triangles.reserve(tri_count as usize); }
conditional_block
hostmon_server.go
("/host/"):] me := r.Method log.Printf("Got host %s (len=%d) with method %s\n", h, len(h), me) // We will key off r.Method = "GET" or "POST" // /host/ GET -> list all POST -> do nothing // /host/name GET -> list one POST -> update (or create) one switch me { case "GET": if (len(h) == 0) { // If we get no host parameter, we'll dump the whole list, so, first // execute (1) and for each result in (1) execute (2). rs, er := dbconn.Query("SELECT host from hosts ORDER BY host ASC") if (er != nil) { http.Error(w, "Fatal attempting to dump hosts", http.StatusInternalServerError) } var hh string for rs.Next() { er = rs.Scan(&hh) if (er != nil) { http.Error(w, "Fatal attempting to dump hosts", http.StatusInternalServerError) } dbCmd_2 := "SELECT * FROM reports WHERE hostname = '" + hh + "' ORDER BY timestamp DESC LIMIT 1;" qe := dbconn.QueryRow(dbCmd_2).Scan(&m.Timestamp, &m.Hostname, &m.KernelVer, &m.Release, &m.Uptime, &m.NumCPUs, &m.Memtotal, &m.LoadOne, &m.LoadFive, &m.LoadFifteen, &m.SwapUsed, &m.DiskReport) if (qe != nil) { http.Error(w, "Fatal attempting to dump hosts", http.StatusInternalServerError) } rp, erro := json.Marshal(m) if (erro != nil) { http.Error(w, "Fatal attempting to marshal JSON", http.StatusInternalServerError) } fmt.Fprintf(w, "%s", rp) } } else { // When we do have a host, just grab the most recent line for that host. dbCmd := "SELECT * from reports where hostname = '" + h + "' ORDER BY timestamp DESC LIMIT 1;" // // For each field, specify a parameter to QueryRow().Scan() i.e. // db.QueryRow(cmd).Scan(&f1, &f2, &f3, &f3) and so on // queryErr := dbconn.QueryRow(dbCmd).Scan(&m.Timestamp, &m.Hostname, &m.KernelVer, &m.Release, &m.Uptime, &m.NumCPUs, &m.Memtotal, &m.LoadOne, &m.LoadFive, &m.LoadFifteen, &m.SwapUsed, &m.DiskReport) switch { case queryErr == sql.ErrNoRows: http.Error(w, "No such host " + h, http.StatusNotFound) return case queryErr != nil: dbconn.Close() http.Error(w, "Fatal attempting to execute SELECT for host " + h, http.StatusInternalServerError) return default: } rpt, err := json.Marshal(m) if (err != nil) { http.Error(w, "Fatal attempting to marshal JSON", http.StatusInternalServerError) return } fmt.Fprintf(w, "%s", rpt) } case "POST": if (len(h) == 0) { http.Error(w, "Must specify a host for a POST request", http.StatusInternalServerError) } // Must call ParseForm() before accessing elements r.ParseForm() //bb := r.Form // Populate message Fields m.Timestamp, _ = strconv.ParseInt(r.FormValue("Timestamp"), 10, 64) m.Hostname = r.FormValue("Hostname") m.NumCPUs, _ = strconv.ParseInt(r.FormValue("NumCPUs"), 10, 64) m.Memtotal, _ = strconv.ParseInt(r.FormValue("Memtotal"), 10, 64) m.LoadOne, _ = strconv.ParseFloat(r.FormValue("LoadOne"), 64) m.LoadFive, _ = strconv.ParseFloat(r.FormValue("LoadFive"), 64) m.LoadFifteen, _ = strconv.ParseFloat(r.FormValue("LoadFifteen"), 64) m.SwapUsed, _ = strconv.ParseFloat(r.FormValue("SwapUsed"), 64) m.KernelVer = r.FormValue("KernelVer") m.Release = r.FormValue("Release") m.Uptime = r.FormValue("Uptime") m.DiskReport = r.FormValue("DiskReport") // // Check to see if the host exists in the host tracking table // dbCmd := "SELECT COUNT(*) FROM hosts where host = '" + m.Hostname + "';" _, dbExecErr := dbconn.Exec(dbCmd) if dbExecErr != nil { http.Error(w, "Fatal executing select for host " + m.Hostname, http.StatusInternalServerError) } var hostp string _ = dbconn.QueryRow(dbCmd).Scan(&hostp) hostpi, _ := strconv.Atoi(hostp) // // If not, add it to the hosts table. MySQL will generate an ID // if (hostpi == 0) { dbCmd = "INSERT INTO hosts (host) VALUES ('" + m.Hostname + "');" _, dbExecErr = dbconn.Exec(dbCmd) if dbExecErr != nil { http.Error(w, "Failed executing host table INSERT for host " + m.Hostname, http.StatusInternalServerError) } } // // Retrieve previous set of data points for this host from the reports // table // dbCmd = "SELECT * from reports where hostname = '" + m.Hostname + "' ORDER BY timestamp DESC LIMIT 1;" // // Note regarding db.QueryRow(): We should know how many fields we // have in the table. For each field, specify a parameter to the // QueryRow().Scan() method. i.e. // db.QueryRow(cmd).Scan(&f1, &f2, &f3, &f4) and so on // var dbTimeStamp, dbHostName, dbKernelVer, dbRelease, dbUptime, dbNumCPUs, dbPhysMem, dbLoadOne, dbLoadFive, dbLoadFifteen, dbSwapPctUsed, dbDiskReport string queryErr := dbconn.QueryRow(dbCmd).Scan(&dbTimeStamp, &dbHostName, &dbKernelVer, &dbRelease, &dbUptime, &dbNumCPUs, &dbPhysMem, &dbLoadOne, &dbLoadFive, &dbLoadFifteen, &dbSwapPctUsed, &dbDiskReport) switch { // If this happens, first database entry for the host in question case queryErr == sql.ErrNoRows: log.Printf("No rows returned executing SELECT for host %s\n", m.Hostname) case queryErr != nil: dbconn.Close() http.Error(w, "Fatal attempting to execute SELECT for host" + m.Hostname, http.StatusInternalServerError) default: } // // Insert the data points from the current report into the database. // dbCmd = "INSERT INTO reports VALUES (" + strconv.FormatInt(m.Timestamp, 10) + ",'" + m.Hostname + "','" + m.KernelVer + "','" + m.Release + "','" + m.Uptime + "','" + strconv.FormatInt(m.NumCPUs, 10) + "','" + strconv.FormatInt(m.Memtotal, 10) + "','" + strconv.FormatFloat(m.LoadOne, 'f', 6, 64) + "','" + strconv.FormatFloat(m.LoadFive, 'f', 6, 64) + "','" + strconv.FormatFloat(m.LoadFifteen, 'f', 6, 64) + "','" + strconv.FormatFloat(m.SwapUsed, 'f', 6, 64) + "','" + m.DiskReport + "');" log.Printf("Attempting to execute: %s\n", dbCmd) _, dbExecErr = dbconn.Exec(dbCmd) if dbExecErr != nil { dbconn.Close() http.Error(w, "Fatal executing reports table INSERT for host " + m.Hostname, http.StatusInternalServerError) } // r.Form is automatically a parsed map with appropriate keys and values //log.Printf("Got POST <%s>\n", bb) log.Printf("POST from: %s %s %s\n", m.Hostname, m.KernelVer, m.Release) } } // // Scan hosts database at configured intervals and send notifications if // thresholds have been exceeded. // func task_scan_and_notify() { t := time.NewTicker(time.Second*60) // Fixed for testing, configurable when done var htt []string for range t.C { // Dump the list of hosts rs, er := dbconn.Query("SELECT host from hosts ORDER BY host ASC") if (er != nil) { log.Fatalf("Fatal compiling list for scan and notify") } var hh string for rs.Next()
{ er = rs.Scan(&hh) if (er != nil) { log.Fatalf("Fatal compiling list for scan and notify") } htt = append(htt, hh) }
conditional_block
hostmon_server.go
() { //var bindaddr, conffile string var conffile string if (len(os.Args) != 5) { log.Fatalf("Usage: %s -b bindaddr -f configfile", os.Args[0]) } for i := 1; i < len(os.Args); i++ { switch os.Args[i] { //case "-b": //bindaddr = os.Args[i+1] case "-f": conffile = os.Args[i+1] } } log.Printf("Host monitor data server starting up\n") // // Read in the configuration file. // haveParam := make(map[string]bool) confFile, err := os.Open(conffile) if err != nil { log.Fatalf("Failed opening configuration file for reading\n") } inp := bufio.NewScanner(confFile) for inp.Scan() { line := inp.Text() if (len(line) > 0) { theFields := strings.Fields(line) key := strings.ToLower(theFields[0]) haveParam[theFields[0]] = true switch key { case "dbuser": g_dbUser = theFields[1] case "dbpass": g_dbPass = theFields[1] case "dbhost": g_dbHost = theFields[1] case "dbname": g_dbName = theFields[1] case "emailto": g_eMailTo = theFields[1] case "emailfrom": g_eMailFrom = theFields[1] case "loadthreshold": g_loadThreshold, _ = strconv.ParseFloat(theFields[1], 64) case "swapthreshold": g_swapThreshold, _ = strconv.ParseFloat(theFields[1], 64) case "loadfirstdthreshold": g_loadFirstDThreshold, _ = strconv.ParseFloat(theFields[1], 64) case "swapfirstdthreshold": g_swapFirstDThreshold, _ = strconv.ParseFloat(theFields[1], 64) case "diskthreshold": g_diskThreshold, _ = strconv.ParseInt(theFields[1], 10, 64) case "diskreportinterval": g_diskReportInterval, _ = strconv.ParseInt(theFields[1], 10, 64) default: log.Printf("Ignoring nonsense configuration parameter %s\n", theFields[1]) } } } confFile.Close() // // Make sure no configuration directives are missing // if ((haveParam["dbUser"] != true) || (haveParam["dbPass"] != true) || (haveParam["dbHost"] != true) || (haveParam["dbName"] != true) || (haveParam["eMailTo"] != true) || (haveParam["eMailFrom"] != true) || (haveParam["loadThreshold"] != true) || (haveParam["swapThreshold"] != true) || (haveParam["loadFirstDThreshold"] != true) || (haveParam["swapFirstDThreshold"] != true) || (haveParam["diskThreshold"] != true) || (haveParam["diskReportInterval"] != true)) { log.Fatalf("Fatal missing configuration directive\n") } log.Printf("Configuration report follows\n") log.Printf(" DB user: %s DB host: %s DB name: %s\n", g_dbUser, g_dbHost, g_dbName) log.Printf(" E-mail to: %s E-mail from: %s\n", g_eMailTo, g_eMailFrom) log.Printf(" Thresholds: %f %f %f %f %d\n", g_loadThreshold, g_swapThreshold, g_loadFirstDThreshold, g_swapFirstDThreshold, g_diskThreshold) log.Printf(" Disk report interval: %d sec\n", g_diskReportInterval) log.Printf("Configuration report ends\n") // // The DSN used to connect to the database should look like this: // hostmon:xyzzy123@tcp(192.168.1.253:3306)/hostmonitor // myDSN := g_dbUser + ":" + g_dbPass + "@tcp(" + g_dbHost + ":3306)/" + g_dbName // When dbconn is global, this needs to be =, not := !! dbconn, err = sql.Open("mysql", myDSN) if err != nil { log.Fatalf("Fatal connecting to database\n") } // // Test the database connection to make sure that we're in business. // err = dbconn.Ping() if err != nil { log.Fatalf("Fatal attempting to ping database") } // // Start notifier Goroutine // go task_scan_and_notify() // // Start listening for connections from the dashboard // http.HandleFunc("/host/", task_handle_host) http.ListenAndServe(":8962", nil) dbconn.Close() } // // Handle a connection // func task_handle_host(w http.ResponseWriter, r *http.Request) { var m Message // Extract hostname component of the path and the method h := r.URL.Path[len("/host/"):] me := r.Method log.Printf("Got host %s (len=%d) with method %s\n", h, len(h), me) // We will key off r.Method = "GET" or "POST" // /host/ GET -> list all POST -> do nothing // /host/name GET -> list one POST -> update (or create) one switch me { case "GET": if (len(h) == 0) { // If we get no host parameter, we'll dump the whole list, so, first // execute (1) and for each result in (1) execute (2). rs, er := dbconn.Query("SELECT host from hosts ORDER BY host ASC") if (er != nil) { http.Error(w, "Fatal attempting to dump hosts", http.StatusInternalServerError) } var hh string for rs.Next() { er = rs.Scan(&hh) if (er != nil) { http.Error(w, "Fatal attempting to dump hosts", http.StatusInternalServerError) } dbCmd_2 := "SELECT * FROM reports WHERE hostname = '" + hh + "' ORDER BY timestamp DESC LIMIT 1;" qe := dbconn.QueryRow(dbCmd_2).Scan(&m.Timestamp, &m.Hostname, &m.KernelVer, &m.Release, &m.Uptime, &m.NumCPUs, &m.Memtotal, &m.LoadOne, &m.LoadFive, &m.LoadFifteen, &m.SwapUsed, &m.DiskReport) if (qe != nil) { http.Error(w, "Fatal attempting to dump hosts", http.StatusInternalServerError) } rp, erro := json.Marshal(m) if (erro != nil) { http.Error(w, "Fatal attempting to marshal JSON", http.StatusInternalServerError) } fmt.Fprintf(w, "%s", rp) } } else { // When we do have a host, just grab the most recent line for that host. dbCmd := "SELECT * from reports where hostname = '" + h + "' ORDER BY timestamp DESC LIMIT 1;" // // For each field, specify a parameter to QueryRow().Scan() i.e. // db.QueryRow(cmd).Scan(&f1, &f2, &f3, &f3) and so on // queryErr := dbconn.QueryRow(dbCmd).Scan(&m.Timestamp, &m.Hostname, &m.KernelVer, &m.Release, &m.Uptime, &m.NumCPUs, &m.Memtotal, &m.LoadOne, &m.LoadFive, &m.LoadFifteen, &m.SwapUsed, &m.DiskReport) switch { case queryErr == sql.ErrNoRows: http.Error(w, "No such host " + h, http.StatusNotFound) return case queryErr != nil: dbconn.Close() http.Error(w, "Fatal attempting to execute SELECT for host " + h, http.StatusInternalServerError) return default: } rpt, err := json.Marshal(m) if (err != nil) { http.Error(w, "Fatal attempting to marshal JSON", http.StatusInternalServerError) return } fmt.Fprintf(w, "%s", rpt) } case "POST": if (len(h) == 0) { http.Error(w, "Must specify a host for a POST request", http.StatusInternalServerError) } // Must call ParseForm() before accessing elements r.ParseForm() //bb := r.Form // Populate message Fields m.Timestamp, _ = strconv.ParseInt(r.FormValue("Timestamp"), 10, 64) m.Hostname = r.FormValue("Hostname") m.NumCPUs, _ = strconv.ParseInt(r.FormValue("NumCPUs"), 10, 64) m.Memtotal, _ = strconv.ParseInt(r.FormValue("Memtotal"), 10, 64) m.LoadOne, _ = strconv.ParseFloat(r.FormValue("LoadOne"), 64) m.LoadFive, _ =
main
identifier_name
hostmon_server.go
MySQL will generate an ID // if (hostpi == 0) { dbCmd = "INSERT INTO hosts (host) VALUES ('" + m.Hostname + "');" _, dbExecErr = dbconn.Exec(dbCmd) if dbExecErr != nil { http.Error(w, "Failed executing host table INSERT for host " + m.Hostname, http.StatusInternalServerError) } } // // Retrieve previous set of data points for this host from the reports // table // dbCmd = "SELECT * from reports where hostname = '" + m.Hostname + "' ORDER BY timestamp DESC LIMIT 1;" // // Note regarding db.QueryRow(): We should know how many fields we // have in the table. For each field, specify a parameter to the // QueryRow().Scan() method. i.e. // db.QueryRow(cmd).Scan(&f1, &f2, &f3, &f4) and so on // var dbTimeStamp, dbHostName, dbKernelVer, dbRelease, dbUptime, dbNumCPUs, dbPhysMem, dbLoadOne, dbLoadFive, dbLoadFifteen, dbSwapPctUsed, dbDiskReport string queryErr := dbconn.QueryRow(dbCmd).Scan(&dbTimeStamp, &dbHostName, &dbKernelVer, &dbRelease, &dbUptime, &dbNumCPUs, &dbPhysMem, &dbLoadOne, &dbLoadFive, &dbLoadFifteen, &dbSwapPctUsed, &dbDiskReport) switch { // If this happens, first database entry for the host in question case queryErr == sql.ErrNoRows: log.Printf("No rows returned executing SELECT for host %s\n", m.Hostname) case queryErr != nil: dbconn.Close() http.Error(w, "Fatal attempting to execute SELECT for host" + m.Hostname, http.StatusInternalServerError) default: } // // Insert the data points from the current report into the database. // dbCmd = "INSERT INTO reports VALUES (" + strconv.FormatInt(m.Timestamp, 10) + ",'" + m.Hostname + "','" + m.KernelVer + "','" + m.Release + "','" + m.Uptime + "','" + strconv.FormatInt(m.NumCPUs, 10) + "','" + strconv.FormatInt(m.Memtotal, 10) + "','" + strconv.FormatFloat(m.LoadOne, 'f', 6, 64) + "','" + strconv.FormatFloat(m.LoadFive, 'f', 6, 64) + "','" + strconv.FormatFloat(m.LoadFifteen, 'f', 6, 64) + "','" + strconv.FormatFloat(m.SwapUsed, 'f', 6, 64) + "','" + m.DiskReport + "');" log.Printf("Attempting to execute: %s\n", dbCmd) _, dbExecErr = dbconn.Exec(dbCmd) if dbExecErr != nil { dbconn.Close() http.Error(w, "Fatal executing reports table INSERT for host " + m.Hostname, http.StatusInternalServerError) } // r.Form is automatically a parsed map with appropriate keys and values //log.Printf("Got POST <%s>\n", bb) log.Printf("POST from: %s %s %s\n", m.Hostname, m.KernelVer, m.Release) } } // // Scan hosts database at configured intervals and send notifications if // thresholds have been exceeded. // func task_scan_and_notify() { t := time.NewTicker(time.Second*60) // Fixed for testing, configurable when done var htt []string for range t.C { // Dump the list of hosts rs, er := dbconn.Query("SELECT host from hosts ORDER BY host ASC") if (er != nil) { log.Fatalf("Fatal compiling list for scan and notify") } var hh string for rs.Next() { er = rs.Scan(&hh) if (er != nil) { log.Fatalf("Fatal compiling list for scan and notify") } htt = append(htt, hh) } // For each host, run checks and send notifications for c, _ := range htt { rss, err := dbconn.Query("SELECT * FROM reports WHERE hostname = '" + htt[c] + "' ORDER BY timestamp DESC LIMIT 2") if (err != nil) { log.Fatalf("Fatal attempting to scan and notify 1") } var f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11 string var f0h, f1h, f2h, f3h, f4h, f5h, f6h, f7h, f8h, f9h, f10h, f11h string // Collect data point 1 for this host (most recent) rss.Next() err = rss.Scan(&f0, &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &f9, &f10, &f11) if (err != nil) { log.Printf("Skipping inconsistent host " + htt[c] + ", host in hosts table but no reports found") continue } log.Printf("#1: %s %s %s %s %s", f0, f1, f2, f3, f4) // Collect data point 2 for this host (historical) rss.Next() err = rss.Scan(&f0h, &f1h, &f2h, &f3h, &f4h, &f5h, &f6h, &f7h, &f8h, &f9h, &f10h, &f11h) if (err != nil) { log.Printf("Only one record for host " + htt[c]) continue } log.Printf("#2: %s %s %s %s %s", f0, f1, f2, f3, f4) lo, _ := strconv.ParseFloat(f7, 64) loh, _ := strconv.ParseFloat(f7h, 64) sw, _ := strconv.ParseFloat(f10, 64) swh, _ := strconv.ParseFloat(f10h, 64) dl := math.Abs(lo-loh) ds := math.Abs(sw-swh) log.Printf("%f %f", dl, ds) // Look at system load and notify on positive differential exceeding Thresholds if (lo > loh) { if ((lo > g_loadThreshold) && (dl > g_loadFirstDThreshold)) { send_email_notification("Subject: System load warning on " + htt[c], "System load has reached " + f7 + " from " + f7h) } } // Look at swap utilization and notify on positive differential exceeding thresholds if (sw > swh) { if ((sw > g_swapThreshold) && (ds > g_swapFirstDThreshold)) { send_email_notification("Subject: Swap utilization warning on " + htt[c], "Swap utilization has reached " + f10 + "% from " + f10h + "%") } } // Look at disk report and notify on threshold exceeded diskReptComponents := strings.Fields(f11) for i := 0; i < len(diskReptComponents)-1; i++ { valueToTest, _ := strconv.ParseInt(diskReptComponents[i+1], 10, 64) if ((valueToTest >= g_diskThreshold) && (math.Abs(float64(time.Now().Unix() - lastDNotify[htt[c]])) >= float64(g_diskReportInterval))) { send_email_notification("Subject: Disk utilization warning on " + htt[c], "Disk utilization on " + diskReptComponents[i] + " has reached " + diskReptComponents[i+1] + "%") lastDNotify[htt[c]] = time.Now().Unix() } } } //log.Printf("Host dump follows") //for c, _ := range htt { // log.Printf(" %s", htt[c]) //} htt = nil } } // // Send a notification e-mail // func send_email_notification(subj string, body string)
{ eMailConn, eMailErr := smtp.Dial("localhost:25") if eMailErr != nil { log.Printf("SMTP server connection failure sending notification\n") } eMailConn.Mail(g_eMailFrom) eMailConn.Rcpt(g_eMailTo) wc, eMailErr := eMailConn.Data() if eMailErr != nil { log.Printf("Failure initiating DATA stage sending notification\n") } defer wc.Close() buf := bytes.NewBufferString("From: " + g_eMailFrom + "\r\n" + "To: " + g_eMailTo + "\r\n" + subj + "\r\n\r\n" + body + "\r\n") _, eMailErr = buf.WriteTo(wc) if eMailErr != nil {
identifier_body
hostmon_server.go
// // Read in the configuration file. // haveParam := make(map[string]bool) confFile, err := os.Open(conffile) if err != nil { log.Fatalf("Failed opening configuration file for reading\n") } inp := bufio.NewScanner(confFile) for inp.Scan() { line := inp.Text() if (len(line) > 0) { theFields := strings.Fields(line) key := strings.ToLower(theFields[0]) haveParam[theFields[0]] = true switch key { case "dbuser": g_dbUser = theFields[1] case "dbpass": g_dbPass = theFields[1] case "dbhost": g_dbHost = theFields[1] case "dbname": g_dbName = theFields[1] case "emailto": g_eMailTo = theFields[1] case "emailfrom": g_eMailFrom = theFields[1] case "loadthreshold": g_loadThreshold, _ = strconv.ParseFloat(theFields[1], 64) case "swapthreshold": g_swapThreshold, _ = strconv.ParseFloat(theFields[1], 64) case "loadfirstdthreshold": g_loadFirstDThreshold, _ = strconv.ParseFloat(theFields[1], 64) case "swapfirstdthreshold": g_swapFirstDThreshold, _ = strconv.ParseFloat(theFields[1], 64) case "diskthreshold": g_diskThreshold, _ = strconv.ParseInt(theFields[1], 10, 64) case "diskreportinterval": g_diskReportInterval, _ = strconv.ParseInt(theFields[1], 10, 64) default: log.Printf("Ignoring nonsense configuration parameter %s\n", theFields[1]) } } } confFile.Close() // // Make sure no configuration directives are missing // if ((haveParam["dbUser"] != true) || (haveParam["dbPass"] != true) || (haveParam["dbHost"] != true) || (haveParam["dbName"] != true) || (haveParam["eMailTo"] != true) || (haveParam["eMailFrom"] != true) || (haveParam["loadThreshold"] != true) || (haveParam["swapThreshold"] != true) || (haveParam["loadFirstDThreshold"] != true) || (haveParam["swapFirstDThreshold"] != true) || (haveParam["diskThreshold"] != true) || (haveParam["diskReportInterval"] != true)) { log.Fatalf("Fatal missing configuration directive\n") } log.Printf("Configuration report follows\n") log.Printf(" DB user: %s DB host: %s DB name: %s\n", g_dbUser, g_dbHost, g_dbName) log.Printf(" E-mail to: %s E-mail from: %s\n", g_eMailTo, g_eMailFrom) log.Printf(" Thresholds: %f %f %f %f %d\n", g_loadThreshold, g_swapThreshold, g_loadFirstDThreshold, g_swapFirstDThreshold, g_diskThreshold) log.Printf(" Disk report interval: %d sec\n", g_diskReportInterval) log.Printf("Configuration report ends\n") // // The DSN used to connect to the database should look like this: // hostmon:xyzzy123@tcp(192.168.1.253:3306)/hostmonitor // myDSN := g_dbUser + ":" + g_dbPass + "@tcp(" + g_dbHost + ":3306)/" + g_dbName // When dbconn is global, this needs to be =, not := !! dbconn, err = sql.Open("mysql", myDSN) if err != nil { log.Fatalf("Fatal connecting to database\n") } // // Test the database connection to make sure that we're in business. // err = dbconn.Ping() if err != nil { log.Fatalf("Fatal attempting to ping database") } // // Start notifier Goroutine // go task_scan_and_notify() // // Start listening for connections from the dashboard // http.HandleFunc("/host/", task_handle_host) http.ListenAndServe(":8962", nil) dbconn.Close() } // // Handle a connection // func task_handle_host(w http.ResponseWriter, r *http.Request) { var m Message // Extract hostname component of the path and the method h := r.URL.Path[len("/host/"):] me := r.Method log.Printf("Got host %s (len=%d) with method %s\n", h, len(h), me) // We will key off r.Method = "GET" or "POST" // /host/ GET -> list all POST -> do nothing // /host/name GET -> list one POST -> update (or create) one switch me { case "GET": if (len(h) == 0) { // If we get no host parameter, we'll dump the whole list, so, first // execute (1) and for each result in (1) execute (2). rs, er := dbconn.Query("SELECT host from hosts ORDER BY host ASC") if (er != nil) { http.Error(w, "Fatal attempting to dump hosts", http.StatusInternalServerError) } var hh string for rs.Next() { er = rs.Scan(&hh) if (er != nil) { http.Error(w, "Fatal attempting to dump hosts", http.StatusInternalServerError) } dbCmd_2 := "SELECT * FROM reports WHERE hostname = '" + hh + "' ORDER BY timestamp DESC LIMIT 1;" qe := dbconn.QueryRow(dbCmd_2).Scan(&m.Timestamp, &m.Hostname, &m.KernelVer, &m.Release, &m.Uptime, &m.NumCPUs, &m.Memtotal, &m.LoadOne, &m.LoadFive, &m.LoadFifteen, &m.SwapUsed, &m.DiskReport) if (qe != nil) { http.Error(w, "Fatal attempting to dump hosts", http.StatusInternalServerError) } rp, erro := json.Marshal(m) if (erro != nil) { http.Error(w, "Fatal attempting to marshal JSON", http.StatusInternalServerError) } fmt.Fprintf(w, "%s", rp) } } else { // When we do have a host, just grab the most recent line for that host. dbCmd := "SELECT * from reports where hostname = '" + h + "' ORDER BY timestamp DESC LIMIT 1;" // // For each field, specify a parameter to QueryRow().Scan() i.e. // db.QueryRow(cmd).Scan(&f1, &f2, &f3, &f3) and so on // queryErr := dbconn.QueryRow(dbCmd).Scan(&m.Timestamp, &m.Hostname, &m.KernelVer, &m.Release, &m.Uptime, &m.NumCPUs, &m.Memtotal, &m.LoadOne, &m.LoadFive, &m.LoadFifteen, &m.SwapUsed, &m.DiskReport) switch { case queryErr == sql.ErrNoRows: http.Error(w, "No such host " + h, http.StatusNotFound) return case queryErr != nil: dbconn.Close() http.Error(w, "Fatal attempting to execute SELECT for host " + h, http.StatusInternalServerError) return default: } rpt, err := json.Marshal(m) if (err != nil) { http.Error(w, "Fatal attempting to marshal JSON", http.StatusInternalServerError) return } fmt.Fprintf(w, "%s", rpt) } case "POST": if (len(h) == 0) { http.Error(w, "Must specify a host for a POST request", http.StatusInternalServerError) } // Must call ParseForm() before accessing elements r.ParseForm() //bb := r.Form // Populate message Fields m.Timestamp, _ = strconv.ParseInt(r.FormValue("Timestamp"), 10, 64) m.Hostname = r.FormValue("Hostname") m.NumCPUs, _ = strconv.ParseInt(r.FormValue("NumCPUs"), 10, 64) m.Memtotal, _ = strconv.ParseInt(r.FormValue("Memtotal"), 10, 64) m.LoadOne, _ = strconv.ParseFloat(r.FormValue("LoadOne"), 64) m.LoadFive, _ = strconv.ParseFloat(r.FormValue("LoadFive"), 64) m.LoadFifteen, _ = strconv.ParseFloat(r.FormValue("LoadFifteen"), 64) m.SwapUsed, _ = strconv.ParseFloat(r.FormValue("SwapUsed"), 64) m.KernelVer = r.FormValue("KernelVer") m.Release = r.FormValue("Release") m.Uptime = r.FormValue("Uptime") m.DiskReport = r.FormValue("DiskReport") // //
} } log.Printf("Host monitor data server starting up\n")
random_line_split
index.ts
enterInitialLoadState() { overviewTable.addEventListener('click', (event) => this.handleTableEventClick(event)); overviewTable.addEventListener('save-edit', this); overviewTable.addEventListener('cancel-edit', this); itemDB.getAll().then(res => { if (res.length === 0) { console.log("No items added yet"); this.loadBlankState(); } else { for (const item of res) { this.addItem(item); } console.log(this.items); this.transitionFromSetupToNormal(); } }).catch(loadDataFailReason => { // Failure if console.error("failed to load data", loadDataFailReason); }); } addItem(item: Item) { this.items.set(item.name, item); } async addItemSubmit(event) { const addData: FormData = event.formData; const name = addData.get("item-name"); const sQty: string = addData.get("item-qty"); const qty = parseInt(sQty, 10); const sThreshold = addData.get("item-threshold"); const threshold = parseInt(sThreshold, 10); const url = addData.get("item-reorder-url") const item: Item = {name, qty, threshold, url}; await itemDB.update(item.name, item); this.items.set(item.name, item); } handleAddItemFormDataInNormal(e) { console.log("Normal item form submit"); console.log(this); this.addItemSubmit(e).then(res => { // Clear form fAddItem.reset(); // hide the form fAddItem.classList.add('hide'); // refresh Overview table contents this.refreshView(); }).catch(err => { console.error(`failed to add item to DB, ${e}`); }) } /** * Refresh the UI of the whole app based off number of items tracked */ refreshView() { // prepare new table body let newTBody = document.createElement('tbody'); overviewTable.tBodies[0].replaceWith(newTBody) const zeroItems = this.items.size === 0; if (zeroItems) { this.transitionFromNormalToSetup(); } else { this.populateInitialTable(); } } handleAddItemFormData(e) { this.addItemSubmit(e).then(res => { // Clear form fAddItem.reset(); // transition to Normal display of console.log('before transition setup -> normal'); console.log(this); this.transitionFromSetupToNormal(); } ).catch(e => { console.error(`failed to add 1st item to DB, ${e}`); }); } loadBlankState() { // Make Setup screen visible blankSetup.classList.remove('hide'); // Setup the load existing data button loadDBFromFileSetupBtn.addEventListener('change', this.loadDataHandler.bind(this)); // Make form visible fAddItem.classList.remove('hide'); // Setup form submission handling fAddItem.addEventListener('submit', e => { e.preventDefault(); new FormData(fAddItem); }, {once: true}); fAddItem.addEventListener('formdata', (e) => this.handleAddItemFormData(e), {once: true}); blankSetup.appendChild(fAddItem); // focus the 1st field of the form fAddItem.querySelector('#add-name').focus(); console.log("show form"); } transitionFromSetupToNormal() { // hide add item form console.log('entered transition Setup -> Normal fn'); console.log(fAddItem); blankSetup.classList.add('hide'); this.enterNormalState(); } enterNormalState() { this.populateInitialTable(); addItem.addEventListener('click', e => { fAddItem.classList.toggle('hide'); overviewUI.insertBefore(fAddItem, overviewTable); // focus the name fAddItem.querySelector("#add-name").focus(); // read event listener fAddItem.addEventListener('submit', e => { e.preventDefault(); new FormData(fAddItem); }, {once: true}); fAddItem.addEventListener('formdata', event => { this.handleAddItemFormDataInNormal(event); }, {once: true}); }) // show overview table overviewUI.classList.remove('hide'); loadDBFromFileBtn.addEventListener('change', this.loadDataHandler.bind(this)); // add event listener for export button saveDBToFileBtn.addEventListener('click', () => this.exportDataToFile()); } replaceAppData(newData: Map<string, string>) { // replace database contents this.items = newData; itemDB.clear().then(res => { for (const item of this.items.values()) { itemDB.addItem(item); } console.log("DB updated with replacement file"); }); } importDataFromFile(e) { console.log("file import button changed normal mode"); console.log(e) // input element has files selected if (e.target.files.length === 0) { console.log("File selection cancelled"); return; } let files: FileList = e.target.files; let file = files[0]; let reader = new FileReader(); reader.addEventListener('load', e => { // read the file data let parsedData = JSON.parse(e.target.result); // create map and replace items let newAppData = new Map([...parsedData]); this.replaceAppData(newAppData); this.refreshView(); }, {once: true}); reader.readAsText(file); } exportDataToFile() { console.log("export button pressed"); let exportedData = JSON.stringify([...this.items], null, 2); // Need data to be a sequence [] + a blob let blob = new Blob([exportedData], {type: "application/json"}); // Adapted from LogRocket blog post for downloading blobs // link: https://blog.logrocket.com/programmatic-file-downloads-in-the-browser-9a5186298d5c/ // Further adaptation - getting download to work without click handler // link: https://yon.fun/download-file-js/ // Create an object URL for the blob object let url = URL.createObjectURL(blob); // Create a new anchor element to later click and store the download link const link = document.createElement('a'); // Set the href and download attributes for the anchor element // You can optionally set other attributes like `title`, etc // Especially, if the anchor element will be attached to the DOM link.href = url; link.download = "items.json"; // Programmatically trigger a click on the anchor element // Useful if you want the download to happen automatically // Without attaching the anchor element to the DOM link.click(); // Free the url resource (and blob memory?) URL.revokeObjectURL(url); // -- end adaptation } loadDataHandler(e) { this.importDataFromFile(e); e.currentTarget.removeEventListener('change', this.loadDataHandler); // reset file handler for importing more files of same name e.target.value = ''; this.transitionFromSetupToNormal(); } transitionFromNormalToSetup() { console.log("Normal -> Setup transition"); // Hide table since 0 items overviewUI.classList.add('hide'); // Show setup screen again blankSetup.classList.remove('hide'); fAddItem.classList.remove('hide'); blankSetup.appendChild(fAddItem); // show load data button loadDBFromFileSetupBtn.addEventListener('change', this.loadDataHandler.bind(this)); // focus form name fAddItem.querySelector('#add-name').focus(); // Setup form submission handling fAddItem.addEventListener('submit', e => { e.preventDefault(); new FormData(fAddItem); }, {once: true}); fAddItem.addEventListener('formdata', (e) => { this.handleAddItemFormDataInNormal(e); overviewUI.classList.remove('hide'); blankSetup.classList.add('hide'); fAddItem.classList.remove('hide'); }, {once: true}); } handleDelete(e) { console.log("Delete item request got"); const itemName = e.target.dataset.name; itemDB.remove(e.target.dataset.name).then(() => { console.debug(`Successfully removed item - ${itemName} from IndexedDB`); if (this.items.delete(itemName)) { console.debug(`Removed '${itemName}' from internal app data`); } else { console.error('Item removed from DB was not tracked in App internal items'); } this.refreshView(); }) .catch(err => { console.error(`failed to remove '${itemName} - ${err}`); throw err; }); } handleEdit(e) { console.log("Got edit request for row"); console.log(e); console.log(e.target); const itemName = e.target.dataset.name; let trToEdit = e.target.closest('tr'); console.log("Table row to edit"); console.log(trToEdit); this.switchToEditModeForRow(trToEdit, itemName); } /** * * @param elemTRow The table row to switch to edit mode * @param itemName Item name used to identify the row */ switchToEditModeForRow(elemTRow: HTMLTableRowElement, itemName: string) { let itemNameCell = elemTRow.cells[0]; let itemQtyCell = elemTRow.cells[1]; let item
{ this.items = new Map(); }
identifier_body
index.ts
); } async addItemSubmit(event) { const addData: FormData = event.formData; const name = addData.get("item-name"); const sQty: string = addData.get("item-qty"); const qty = parseInt(sQty, 10); const sThreshold = addData.get("item-threshold"); const threshold = parseInt(sThreshold, 10); const url = addData.get("item-reorder-url") const item: Item = {name, qty, threshold, url}; await itemDB.update(item.name, item); this.items.set(item.name, item); } handleAddItemFormDataInNormal(e) { console.log("Normal item form submit"); console.log(this); this.addItemSubmit(e).then(res => { // Clear form fAddItem.reset(); // hide the form fAddItem.classList.add('hide'); // refresh Overview table contents this.refreshView(); }).catch(err => { console.error(`failed to add item to DB, ${e}`); }) } /** * Refresh the UI of the whole app based off number of items tracked */ refreshView() { // prepare new table body let newTBody = document.createElement('tbody'); overviewTable.tBodies[0].replaceWith(newTBody) const zeroItems = this.items.size === 0; if (zeroItems) { this.transitionFromNormalToSetup(); } else { this.populateInitialTable(); } } handleAddItemFormData(e) { this.addItemSubmit(e).then(res => { // Clear form fAddItem.reset(); // transition to Normal display of console.log('before transition setup -> normal'); console.log(this); this.transitionFromSetupToNormal(); } ).catch(e => { console.error(`failed to add 1st item to DB, ${e}`); }); } loadBlankState() { // Make Setup screen visible blankSetup.classList.remove('hide'); // Setup the load existing data button loadDBFromFileSetupBtn.addEventListener('change', this.loadDataHandler.bind(this)); // Make form visible fAddItem.classList.remove('hide'); // Setup form submission handling fAddItem.addEventListener('submit', e => { e.preventDefault(); new FormData(fAddItem); }, {once: true}); fAddItem.addEventListener('formdata', (e) => this.handleAddItemFormData(e), {once: true}); blankSetup.appendChild(fAddItem); // focus the 1st field of the form fAddItem.querySelector('#add-name').focus(); console.log("show form"); } transitionFromSetupToNormal() { // hide add item form console.log('entered transition Setup -> Normal fn'); console.log(fAddItem); blankSetup.classList.add('hide'); this.enterNormalState(); } enterNormalState() { this.populateInitialTable(); addItem.addEventListener('click', e => { fAddItem.classList.toggle('hide'); overviewUI.insertBefore(fAddItem, overviewTable); // focus the name fAddItem.querySelector("#add-name").focus(); // read event listener fAddItem.addEventListener('submit', e => { e.preventDefault(); new FormData(fAddItem); }, {once: true}); fAddItem.addEventListener('formdata', event => { this.handleAddItemFormDataInNormal(event); }, {once: true}); }) // show overview table overviewUI.classList.remove('hide'); loadDBFromFileBtn.addEventListener('change', this.loadDataHandler.bind(this)); // add event listener for export button saveDBToFileBtn.addEventListener('click', () => this.exportDataToFile()); } replaceAppData(newData: Map<string, string>) { // replace database contents this.items = newData; itemDB.clear().then(res => { for (const item of this.items.values()) { itemDB.addItem(item); } console.log("DB updated with replacement file"); }); } importDataFromFile(e) { console.log("file import button changed normal mode"); console.log(e) // input element has files selected if (e.target.files.length === 0) { console.log("File selection cancelled"); return; } let files: FileList = e.target.files; let file = files[0]; let reader = new FileReader(); reader.addEventListener('load', e => { // read the file data let parsedData = JSON.parse(e.target.result); // create map and replace items let newAppData = new Map([...parsedData]); this.replaceAppData(newAppData); this.refreshView(); }, {once: true}); reader.readAsText(file); } exportDataToFile() { console.log("export button pressed"); let exportedData = JSON.stringify([...this.items], null, 2); // Need data to be a sequence [] + a blob let blob = new Blob([exportedData], {type: "application/json"}); // Adapted from LogRocket blog post for downloading blobs // link: https://blog.logrocket.com/programmatic-file-downloads-in-the-browser-9a5186298d5c/ // Further adaptation - getting download to work without click handler // link: https://yon.fun/download-file-js/ // Create an object URL for the blob object let url = URL.createObjectURL(blob); // Create a new anchor element to later click and store the download link const link = document.createElement('a'); // Set the href and download attributes for the anchor element // You can optionally set other attributes like `title`, etc // Especially, if the anchor element will be attached to the DOM link.href = url; link.download = "items.json"; // Programmatically trigger a click on the anchor element // Useful if you want the download to happen automatically // Without attaching the anchor element to the DOM link.click(); // Free the url resource (and blob memory?) URL.revokeObjectURL(url); // -- end adaptation } loadDataHandler(e) { this.importDataFromFile(e); e.currentTarget.removeEventListener('change', this.loadDataHandler); // reset file handler for importing more files of same name e.target.value = ''; this.transitionFromSetupToNormal(); } transitionFromNormalToSetup() { console.log("Normal -> Setup transition"); // Hide table since 0 items overviewUI.classList.add('hide'); // Show setup screen again blankSetup.classList.remove('hide'); fAddItem.classList.remove('hide'); blankSetup.appendChild(fAddItem); // show load data button loadDBFromFileSetupBtn.addEventListener('change', this.loadDataHandler.bind(this));
fAddItem.querySelector('#add-name').focus(); // Setup form submission handling fAddItem.addEventListener('submit', e => { e.preventDefault(); new FormData(fAddItem); }, {once: true}); fAddItem.addEventListener('formdata', (e) => { this.handleAddItemFormDataInNormal(e); overviewUI.classList.remove('hide'); blankSetup.classList.add('hide'); fAddItem.classList.remove('hide'); }, {once: true}); } handleDelete(e) { console.log("Delete item request got"); const itemName = e.target.dataset.name; itemDB.remove(e.target.dataset.name).then(() => { console.debug(`Successfully removed item - ${itemName} from IndexedDB`); if (this.items.delete(itemName)) { console.debug(`Removed '${itemName}' from internal app data`); } else { console.error('Item removed from DB was not tracked in App internal items'); } this.refreshView(); }) .catch(err => { console.error(`failed to remove '${itemName} - ${err}`); throw err; }); } handleEdit(e) { console.log("Got edit request for row"); console.log(e); console.log(e.target); const itemName = e.target.dataset.name; let trToEdit = e.target.closest('tr'); console.log("Table row to edit"); console.log(trToEdit); this.switchToEditModeForRow(trToEdit, itemName); } /** * * @param elemTRow The table row to switch to edit mode * @param itemName Item name used to identify the row */ switchToEditModeForRow(elemTRow: HTMLTableRowElement, itemName: string) { let itemNameCell = elemTRow.cells[0]; let itemQtyCell = elemTRow.cells[1]; let itemThresholdCell = elemTRow.cells[2]; let item = this.items.get(itemName); this.switchCellToEditMode(itemNameCell, 'edit-cell-name-link-template', new Map(Object.entries({name: item.name, url: item.url}))); // qty cell edit - create edit cell this.switchCellToEditMode(itemQtyCell, 'edit-cell-qty-template', new Map(Object.entries({qty: item.qty}))); this.switchCellToEditMode(itemThresholdCell, 'edit-cell-threshold-template', new Map(Object.entries({threshold: item.threshold}))); } /** * @param tCellElem the table cell element ref to switch into editing mode * @param template HTML document id of the template for the edit cell * @param data Object Where key = field
// focus form name
random_line_split
index.ts
} async addItemSubmit(event) { const addData: FormData = event.formData; const name = addData.get("item-name"); const sQty: string = addData.get("item-qty"); const qty = parseInt(sQty, 10); const sThreshold = addData.get("item-threshold"); const threshold = parseInt(sThreshold, 10); const url = addData.get("item-reorder-url") const item: Item = {name, qty, threshold, url}; await itemDB.update(item.name, item); this.items.set(item.name, item); } handleAddItemFormDataInNormal(e) { console.log("Normal item form submit"); console.log(this); this.addItemSubmit(e).then(res => { // Clear form fAddItem.reset(); // hide the form fAddItem.classList.add('hide'); // refresh Overview table contents this.refreshView(); }).catch(err => { console.error(`failed to add item to DB, ${e}`); }) } /** * Refresh the UI of the whole app based off number of items tracked */ refreshView() { // prepare new table body let newTBody = document.createElement('tbody'); overviewTable.tBodies[0].replaceWith(newTBody) const zeroItems = this.items.size === 0; if (zeroItems) { this.transitionFromNormalToSetup(); } else { this.populateInitialTable(); } } handleAddItemFormData(e) { this.addItemSubmit(e).then(res => { // Clear form fAddItem.reset(); // transition to Normal display of console.log('before transition setup -> normal'); console.log(this); this.transitionFromSetupToNormal(); } ).catch(e => { console.error(`failed to add 1st item to DB, ${e}`); }); } loadBlankState() { // Make Setup screen visible blankSetup.classList.remove('hide'); // Setup the load existing data button loadDBFromFileSetupBtn.addEventListener('change', this.loadDataHandler.bind(this)); // Make form visible fAddItem.classList.remove('hide'); // Setup form submission handling fAddItem.addEventListener('submit', e => { e.preventDefault(); new FormData(fAddItem); }, {once: true}); fAddItem.addEventListener('formdata', (e) => this.handleAddItemFormData(e), {once: true}); blankSetup.appendChild(fAddItem); // focus the 1st field of the form fAddItem.querySelector('#add-name').focus(); console.log("show form"); }
() { // hide add item form console.log('entered transition Setup -> Normal fn'); console.log(fAddItem); blankSetup.classList.add('hide'); this.enterNormalState(); } enterNormalState() { this.populateInitialTable(); addItem.addEventListener('click', e => { fAddItem.classList.toggle('hide'); overviewUI.insertBefore(fAddItem, overviewTable); // focus the name fAddItem.querySelector("#add-name").focus(); // read event listener fAddItem.addEventListener('submit', e => { e.preventDefault(); new FormData(fAddItem); }, {once: true}); fAddItem.addEventListener('formdata', event => { this.handleAddItemFormDataInNormal(event); }, {once: true}); }) // show overview table overviewUI.classList.remove('hide'); loadDBFromFileBtn.addEventListener('change', this.loadDataHandler.bind(this)); // add event listener for export button saveDBToFileBtn.addEventListener('click', () => this.exportDataToFile()); } replaceAppData(newData: Map<string, string>) { // replace database contents this.items = newData; itemDB.clear().then(res => { for (const item of this.items.values()) { itemDB.addItem(item); } console.log("DB updated with replacement file"); }); } importDataFromFile(e) { console.log("file import button changed normal mode"); console.log(e) // input element has files selected if (e.target.files.length === 0) { console.log("File selection cancelled"); return; } let files: FileList = e.target.files; let file = files[0]; let reader = new FileReader(); reader.addEventListener('load', e => { // read the file data let parsedData = JSON.parse(e.target.result); // create map and replace items let newAppData = new Map([...parsedData]); this.replaceAppData(newAppData); this.refreshView(); }, {once: true}); reader.readAsText(file); } exportDataToFile() { console.log("export button pressed"); let exportedData = JSON.stringify([...this.items], null, 2); // Need data to be a sequence [] + a blob let blob = new Blob([exportedData], {type: "application/json"}); // Adapted from LogRocket blog post for downloading blobs // link: https://blog.logrocket.com/programmatic-file-downloads-in-the-browser-9a5186298d5c/ // Further adaptation - getting download to work without click handler // link: https://yon.fun/download-file-js/ // Create an object URL for the blob object let url = URL.createObjectURL(blob); // Create a new anchor element to later click and store the download link const link = document.createElement('a'); // Set the href and download attributes for the anchor element // You can optionally set other attributes like `title`, etc // Especially, if the anchor element will be attached to the DOM link.href = url; link.download = "items.json"; // Programmatically trigger a click on the anchor element // Useful if you want the download to happen automatically // Without attaching the anchor element to the DOM link.click(); // Free the url resource (and blob memory?) URL.revokeObjectURL(url); // -- end adaptation } loadDataHandler(e) { this.importDataFromFile(e); e.currentTarget.removeEventListener('change', this.loadDataHandler); // reset file handler for importing more files of same name e.target.value = ''; this.transitionFromSetupToNormal(); } transitionFromNormalToSetup() { console.log("Normal -> Setup transition"); // Hide table since 0 items overviewUI.classList.add('hide'); // Show setup screen again blankSetup.classList.remove('hide'); fAddItem.classList.remove('hide'); blankSetup.appendChild(fAddItem); // show load data button loadDBFromFileSetupBtn.addEventListener('change', this.loadDataHandler.bind(this)); // focus form name fAddItem.querySelector('#add-name').focus(); // Setup form submission handling fAddItem.addEventListener('submit', e => { e.preventDefault(); new FormData(fAddItem); }, {once: true}); fAddItem.addEventListener('formdata', (e) => { this.handleAddItemFormDataInNormal(e); overviewUI.classList.remove('hide'); blankSetup.classList.add('hide'); fAddItem.classList.remove('hide'); }, {once: true}); } handleDelete(e) { console.log("Delete item request got"); const itemName = e.target.dataset.name; itemDB.remove(e.target.dataset.name).then(() => { console.debug(`Successfully removed item - ${itemName} from IndexedDB`); if (this.items.delete(itemName)) { console.debug(`Removed '${itemName}' from internal app data`); } else { console.error('Item removed from DB was not tracked in App internal items'); } this.refreshView(); }) .catch(err => { console.error(`failed to remove '${itemName} - ${err}`); throw err; }); } handleEdit(e) { console.log("Got edit request for row"); console.log(e); console.log(e.target); const itemName = e.target.dataset.name; let trToEdit = e.target.closest('tr'); console.log("Table row to edit"); console.log(trToEdit); this.switchToEditModeForRow(trToEdit, itemName); } /** * * @param elemTRow The table row to switch to edit mode * @param itemName Item name used to identify the row */ switchToEditModeForRow(elemTRow: HTMLTableRowElement, itemName: string) { let itemNameCell = elemTRow.cells[0]; let itemQtyCell = elemTRow.cells[1]; let itemThresholdCell = elemTRow.cells[2]; let item = this.items.get(itemName); this.switchCellToEditMode(itemNameCell, 'edit-cell-name-link-template', new Map(Object.entries({name: item.name, url: item.url}))); // qty cell edit - create edit cell this.switchCellToEditMode(itemQtyCell, 'edit-cell-qty-template', new Map(Object.entries({qty: item.qty}))); this.switchCellToEditMode(itemThresholdCell, 'edit-cell-threshold-template', new Map(Object.entries({threshold: item.threshold}))); } /** * @param tCellElem the table cell element ref to switch into editing mode * @param template HTML document id of the template for the edit cell * @param data Object Where key = field
transitionFromSetupToNormal
identifier_name
index.ts
.classList.remove('hide'); // Setup the load existing data button loadDBFromFileSetupBtn.addEventListener('change', this.loadDataHandler.bind(this)); // Make form visible fAddItem.classList.remove('hide'); // Setup form submission handling fAddItem.addEventListener('submit', e => { e.preventDefault(); new FormData(fAddItem); }, {once: true}); fAddItem.addEventListener('formdata', (e) => this.handleAddItemFormData(e), {once: true}); blankSetup.appendChild(fAddItem); // focus the 1st field of the form fAddItem.querySelector('#add-name').focus(); console.log("show form"); } transitionFromSetupToNormal() { // hide add item form console.log('entered transition Setup -> Normal fn'); console.log(fAddItem); blankSetup.classList.add('hide'); this.enterNormalState(); } enterNormalState() { this.populateInitialTable(); addItem.addEventListener('click', e => { fAddItem.classList.toggle('hide'); overviewUI.insertBefore(fAddItem, overviewTable); // focus the name fAddItem.querySelector("#add-name").focus(); // read event listener fAddItem.addEventListener('submit', e => { e.preventDefault(); new FormData(fAddItem); }, {once: true}); fAddItem.addEventListener('formdata', event => { this.handleAddItemFormDataInNormal(event); }, {once: true}); }) // show overview table overviewUI.classList.remove('hide'); loadDBFromFileBtn.addEventListener('change', this.loadDataHandler.bind(this)); // add event listener for export button saveDBToFileBtn.addEventListener('click', () => this.exportDataToFile()); } replaceAppData(newData: Map<string, string>) { // replace database contents this.items = newData; itemDB.clear().then(res => { for (const item of this.items.values()) { itemDB.addItem(item); } console.log("DB updated with replacement file"); }); } importDataFromFile(e) { console.log("file import button changed normal mode"); console.log(e) // input element has files selected if (e.target.files.length === 0) { console.log("File selection cancelled"); return; } let files: FileList = e.target.files; let file = files[0]; let reader = new FileReader(); reader.addEventListener('load', e => { // read the file data let parsedData = JSON.parse(e.target.result); // create map and replace items let newAppData = new Map([...parsedData]); this.replaceAppData(newAppData); this.refreshView(); }, {once: true}); reader.readAsText(file); } exportDataToFile() { console.log("export button pressed"); let exportedData = JSON.stringify([...this.items], null, 2); // Need data to be a sequence [] + a blob let blob = new Blob([exportedData], {type: "application/json"}); // Adapted from LogRocket blog post for downloading blobs // link: https://blog.logrocket.com/programmatic-file-downloads-in-the-browser-9a5186298d5c/ // Further adaptation - getting download to work without click handler // link: https://yon.fun/download-file-js/ // Create an object URL for the blob object let url = URL.createObjectURL(blob); // Create a new anchor element to later click and store the download link const link = document.createElement('a'); // Set the href and download attributes for the anchor element // You can optionally set other attributes like `title`, etc // Especially, if the anchor element will be attached to the DOM link.href = url; link.download = "items.json"; // Programmatically trigger a click on the anchor element // Useful if you want the download to happen automatically // Without attaching the anchor element to the DOM link.click(); // Free the url resource (and blob memory?) URL.revokeObjectURL(url); // -- end adaptation } loadDataHandler(e) { this.importDataFromFile(e); e.currentTarget.removeEventListener('change', this.loadDataHandler); // reset file handler for importing more files of same name e.target.value = ''; this.transitionFromSetupToNormal(); } transitionFromNormalToSetup() { console.log("Normal -> Setup transition"); // Hide table since 0 items overviewUI.classList.add('hide'); // Show setup screen again blankSetup.classList.remove('hide'); fAddItem.classList.remove('hide'); blankSetup.appendChild(fAddItem); // show load data button loadDBFromFileSetupBtn.addEventListener('change', this.loadDataHandler.bind(this)); // focus form name fAddItem.querySelector('#add-name').focus(); // Setup form submission handling fAddItem.addEventListener('submit', e => { e.preventDefault(); new FormData(fAddItem); }, {once: true}); fAddItem.addEventListener('formdata', (e) => { this.handleAddItemFormDataInNormal(e); overviewUI.classList.remove('hide'); blankSetup.classList.add('hide'); fAddItem.classList.remove('hide'); }, {once: true}); } handleDelete(e) { console.log("Delete item request got"); const itemName = e.target.dataset.name; itemDB.remove(e.target.dataset.name).then(() => { console.debug(`Successfully removed item - ${itemName} from IndexedDB`); if (this.items.delete(itemName)) { console.debug(`Removed '${itemName}' from internal app data`); } else { console.error('Item removed from DB was not tracked in App internal items'); } this.refreshView(); }) .catch(err => { console.error(`failed to remove '${itemName} - ${err}`); throw err; }); } handleEdit(e) { console.log("Got edit request for row"); console.log(e); console.log(e.target); const itemName = e.target.dataset.name; let trToEdit = e.target.closest('tr'); console.log("Table row to edit"); console.log(trToEdit); this.switchToEditModeForRow(trToEdit, itemName); } /** * * @param elemTRow The table row to switch to edit mode * @param itemName Item name used to identify the row */ switchToEditModeForRow(elemTRow: HTMLTableRowElement, itemName: string) { let itemNameCell = elemTRow.cells[0]; let itemQtyCell = elemTRow.cells[1]; let itemThresholdCell = elemTRow.cells[2]; let item = this.items.get(itemName); this.switchCellToEditMode(itemNameCell, 'edit-cell-name-link-template', new Map(Object.entries({name: item.name, url: item.url}))); // qty cell edit - create edit cell this.switchCellToEditMode(itemQtyCell, 'edit-cell-qty-template', new Map(Object.entries({qty: item.qty}))); this.switchCellToEditMode(itemThresholdCell, 'edit-cell-threshold-template', new Map(Object.entries({threshold: item.threshold}))); } /** * @param tCellElem the table cell element ref to switch into editing mode * @param template HTML document id of the template for the edit cell * @param data Object Where key = field editing, and value = value of current field **/ switchCellToEditMode(tCellElem: HTMLTableCellElement, template: string, data: Map<string, any>) { // Replace with edit cell let editCell = tCellElem.cloneNode(false); tCellElem.parentNode.replaceChild(editCell, tCellElem); let editCellTemplate = document.getElementById(template); let editCellTemplateContent = editCellTemplate.content.cloneNode(true); editCell.appendChild(editCellTemplateContent); // Once appended, the current reference to the template content is invalid. // Get a new reference let activeEditContentCell = editCell.querySelector('edit-cell'); for (const [fieldName, value] of data.entries()) { activeEditContentCell.setAttribute(fieldName, value); } // } handleSaveEdit(e) { console.log("got save edit event to handle"); console.log(e.detail); console.log(`closest tr: ${e.target.closest("tr")}`); console.log(`item orig name from tr: ${e.target.closest("tr").dataset.name}`); let origItemName = e.target.closest("tr").dataset.name; // Get original contents const origItem = this.items.get(origItemName); console.log(`Original Item: ${JSON.stringify(origItem)}`); // Merge changes console.log(e.detail.changes); let editedItem = {...origItem, ...e.detail.changes}; console.log(`edited update item: ${JSON.stringify(editedItem)}`); this.items.set(editedItem.name, editedItem); itemDB.update(origItem.name, editedItem); // Need to remove the old item name if it changed. Replace with edited name if (editedItem.name !== origItem.name) { this.items.delete(origItemName); } this.refreshView(); } handleEvent(e) { console.log("got event type"); console.log(e.type) if (e.type == 'save-edit')
{ this.handleSaveEdit(e); }
conditional_block
setup_groupsched.py
For example, you can specify extra directories (CMake Binary Dir, kernel path) and force those arguments to be defined. The wrapped version also takes care of distutils causing errors when extra arguments are passed straight to the setup_<module-name>.py scripts. """ ######################################################## # # All of the test modules will need the same options front # end, so this can be imported where one would normally # place the optparse template. # # def run_distutils(): """Executes the distutils code to build and install the pyccsm wrappers.""" from distutils.core import setup, Extension source_dir = os.getcwd() build_dir = Params.cmake_binary_dir gsched_module = Extension('_gsched', sources=[build_dir+'/gsched_wrap.c', build_dir+'/gsched.c'], include_dirs = [source_dir+'/include'], libraries = ['gsched'], extra_compile_args = ['-fPIC'], library_dirs = [build_dir, source_dir], ) # sdf_seq_module = Extension('_sdf_seq', # sources=[source_dir+'/libgsched/sdf_seq_wrap.c', # source_dir+'/libgsched/sdf_seq.c'], # include_dirs = [source_dir+'/include'], #) setup(name='pygsched', version='1.0', description="Group Scheduling Python API bindings.", url='http://www.ittc.ku.edu/kusp', package_dir={'': '.'}, include_dirs = [source_dir+'/include'], ext_modules = [gsched_module], # sdf_seq_module], py_modules = ['gschedapi'], scripts = ['tools/gschedctrl', 'tools/gschedexec', 'tools/gschedpprint', 'tools/gschedsnapshot', 'tools/gschedsh', ], packages=['pygsched','pygsched.gsconsole'] ) pass if __name__ == '__main__': # imports required if this module is called as a # command import optparse, sys, os from pprint import * # Define the set of permitted parameters, including the # command arguments. The initialization method creates # the parser and defines the defaults. The parse() # method actually parses the arguments one the command # line. This was done so that the instance of the class # could be global and thus available to all # routines. and then parse the arguments to this call # according to the specification class Params_Set: USAGE = "usage: %prog [options]" def __init__(self): # Create the argument parser and then tell it # about the set of legal arguments for this # command. The parse() method of this class # calls parse_args of the optparse module self.p = optparse.OptionParser(usage=self.USAGE) # Boring and totally standard verbose and # debugging options that should be common to # virtually any command # self.p.add_option("-d", action="store_const", const=1, dest="debug_level", help="Turn on diagnostic output at level 1") self.p.add_option("-D", action="store", type ="int", dest="debug_level", help="Turn on diagnostic output at level DEBUG_LEVEL") self.p.add_option("-v", action="store_const", const=1, dest="verbose_level", help="Turn on narrative output at level 1") self.p.add_option("-V", action="store", type ="int", dest="verbose_level", help="Turn on narrative output at level VERBOSE_LEVEL") # Command specific options. We can specify a # configuration file to parse, which defaults to # stdin, and an output file name, which defaults # to stdout. self.p.add_option("--build", action="store_const", const=True, dest="run_build", help="Turns on build mode.") self.p.add_option("-b", action="store", type ="string", dest="build_dir", help="The build directory path.") self.p.add_option("--install", action="store_const", const=True, dest="run_install", help="Turns on install mode.") self.p.add_option("--prefix", action="store", type ="string", dest="install_prefix", help="Install Prefix") self.p.add_option("--kernel", action="store", type="string", dest="kernel_path", help="Path to the kernel against which you wish to compile.") self.p.add_option("--cbd", action="store", type="string", dest="cmake_binary_dir", help="CMake binary directory in which to look for libraries against which" " to compile.") # Now tell the parser about the default values of all the options # we just told it about self.p.set_defaults( debug_level = 2, verbose_level = 0, build_dir = None, run_build = False, run_install = False, install_prefix = None, kernel_path = None, cmake_binary_dir = None ) def parse(self): self.options, self.args = self.p.parse_args() self.debug_level = self.options.debug_level self.verbose_level = self.options.verbose_level self.build_dir = self.options.build_dir self.run_build = self.options.run_build self.run_install = self.options.run_install self.install_prefix = self.options.install_prefix self.kernel_path = self.options.kernel_path self.cmake_binary_dir = self.options.cmake_binary_dir # Output option details if debugging level is high enough if self.debug_level >= 3 : print print "Options: ", self.options print "Args: ", self.args # Defining this method defines the string representation of the # object when given as an argument to str() or the "print" command #cd def __str__(self): param_print_str = \ """Parameters: debug_level : %d verbose_level : %d run_build : %s build_dir : %s run_install : %s install_prefix : %s kernel_path : %s cmake_binary_dir : %s """ str_output = param_print_str % \ (self.debug_level, self.verbose_level, self.run_build, self.build_dir, self.run_install, self.install_prefix, self.kernel_path, self.cmake_binary_dir) return str_output def
(): # Global level params class instance was # created before calling main(). We make it # global so that other code can access the set # of Parameters, simply by accessing the Params # instance. Here, however, we call the parse() # method to actually get the arguments, since # we have been called from the command line. Params.parse() debug_level = Params.debug_level if Params.debug_level >= 2: print Params if not Params.cmake_binary_dir: # Forcing a check for the CMake binary directory. # This needs to be defined in order to find libraries # and other files within the build directory. # print "Must define the CMake binary directory --cbd=<build-dir>" sys.exit(1) ## ## if not Params.kernel_path: ## # Forcing a check for the kernel path variable. ## # Obvisouly used when making something that needs to compile ## # specifically against the kernel. ## # ## print "Must define the kernel path --kernel=<kernel-path>" ## sys.exit(1) ## ################################################# if (Params.run_install and Params.run_build) or \ (not Params.run_install and not Params.run_build): # This should never be run when both a build and an install, # nor should it be run with nothing to do. So force a check for # conflicting install and build arguments True True and False False # that will cause inappropriate function, and force an exit. # print "Conflicting arguments run_build and run_install. XOR." sys.exit(1) elif Params.run_build: # The user has specified to run a build # check to make sure they have specified a build directory. # if not Params.build_dir: # Build directory check failed. Exit. # print "Must define the build directory when running a build -b <build-dir>" sys.exit(1) # Reset sys.argv with the correct parameters for distutils to parse # giving it the options verbose, tell it to build, and the path to the # build directory. # sys.argv = [sys.argv[0], '-v', 'build', '-b', Params.build_dir] elif Params.run_install: # The user has specified to run an install. # check to make sure that the installation root prefix # has been defined. # if not Params.install_prefix: # Installation prefix is not defined, therefore there cannot # be an install, exit
main
identifier_name
setup_groupsched.py
For example, you can specify extra directories (CMake Binary Dir, kernel path) and force those arguments to be defined. The wrapped version also takes care of distutils causing errors when extra arguments are passed straight to the setup_<module-name>.py scripts. """ ######################################################## # # All of the test modules will need the same options front # end, so this can be imported where one would normally # place the optparse template. # # def run_distutils(): """Executes the distutils code to build and install the pyccsm wrappers.""" from distutils.core import setup, Extension source_dir = os.getcwd() build_dir = Params.cmake_binary_dir gsched_module = Extension('_gsched', sources=[build_dir+'/gsched_wrap.c', build_dir+'/gsched.c'], include_dirs = [source_dir+'/include'],
) # sdf_seq_module = Extension('_sdf_seq', # sources=[source_dir+'/libgsched/sdf_seq_wrap.c', # source_dir+'/libgsched/sdf_seq.c'], # include_dirs = [source_dir+'/include'], #) setup(name='pygsched', version='1.0', description="Group Scheduling Python API bindings.", url='http://www.ittc.ku.edu/kusp', package_dir={'': '.'}, include_dirs = [source_dir+'/include'], ext_modules = [gsched_module], # sdf_seq_module], py_modules = ['gschedapi'], scripts = ['tools/gschedctrl', 'tools/gschedexec', 'tools/gschedpprint', 'tools/gschedsnapshot', 'tools/gschedsh', ], packages=['pygsched','pygsched.gsconsole'] ) pass if __name__ == '__main__': # imports required if this module is called as a # command import optparse, sys, os from pprint import * # Define the set of permitted parameters, including the # command arguments. The initialization method creates # the parser and defines the defaults. The parse() # method actually parses the arguments one the command # line. This was done so that the instance of the class # could be global and thus available to all # routines. and then parse the arguments to this call # according to the specification class Params_Set: USAGE = "usage: %prog [options]" def __init__(self): # Create the argument parser and then tell it # about the set of legal arguments for this # command. The parse() method of this class # calls parse_args of the optparse module self.p = optparse.OptionParser(usage=self.USAGE) # Boring and totally standard verbose and # debugging options that should be common to # virtually any command # self.p.add_option("-d", action="store_const", const=1, dest="debug_level", help="Turn on diagnostic output at level 1") self.p.add_option("-D", action="store", type ="int", dest="debug_level", help="Turn on diagnostic output at level DEBUG_LEVEL") self.p.add_option("-v", action="store_const", const=1, dest="verbose_level", help="Turn on narrative output at level 1") self.p.add_option("-V", action="store", type ="int", dest="verbose_level", help="Turn on narrative output at level VERBOSE_LEVEL") # Command specific options. We can specify a # configuration file to parse, which defaults to # stdin, and an output file name, which defaults # to stdout. self.p.add_option("--build", action="store_const", const=True, dest="run_build", help="Turns on build mode.") self.p.add_option("-b", action="store", type ="string", dest="build_dir", help="The build directory path.") self.p.add_option("--install", action="store_const", const=True, dest="run_install", help="Turns on install mode.") self.p.add_option("--prefix", action="store", type ="string", dest="install_prefix", help="Install Prefix") self.p.add_option("--kernel", action="store", type="string", dest="kernel_path", help="Path to the kernel against which you wish to compile.") self.p.add_option("--cbd", action="store", type="string", dest="cmake_binary_dir", help="CMake binary directory in which to look for libraries against which" " to compile.") # Now tell the parser about the default values of all the options # we just told it about self.p.set_defaults( debug_level = 2, verbose_level = 0, build_dir = None, run_build = False, run_install = False, install_prefix = None, kernel_path = None, cmake_binary_dir = None ) def parse(self): self.options, self.args = self.p.parse_args() self.debug_level = self.options.debug_level self.verbose_level = self.options.verbose_level self.build_dir = self.options.build_dir self.run_build = self.options.run_build self.run_install = self.options.run_install self.install_prefix = self.options.install_prefix self.kernel_path = self.options.kernel_path self.cmake_binary_dir = self.options.cmake_binary_dir # Output option details if debugging level is high enough if self.debug_level >= 3 : print print "Options: ", self.options print "Args: ", self.args # Defining this method defines the string representation of the # object when given as an argument to str() or the "print" command #cd def __str__(self): param_print_str = \ """Parameters: debug_level : %d verbose_level : %d run_build : %s build_dir : %s run_install : %s install_prefix : %s kernel_path : %s cmake_binary_dir : %s """ str_output = param_print_str % \ (self.debug_level, self.verbose_level, self.run_build, self.build_dir, self.run_install, self.install_prefix, self.kernel_path, self.cmake_binary_dir) return str_output def main(): # Global level params class instance was # created before calling main(). We make it # global so that other code can access the set # of Parameters, simply by accessing the Params # instance. Here, however, we call the parse() # method to actually get the arguments, since # we have been called from the command line. Params.parse() debug_level = Params.debug_level if Params.debug_level >= 2: print Params if not Params.cmake_binary_dir: # Forcing a check for the CMake binary directory. # This needs to be defined in order to find libraries # and other files within the build directory. # print "Must define the CMake binary directory --cbd=<build-dir>" sys.exit(1) ## ## if not Params.kernel_path: ## # Forcing a check for the kernel path variable. ## # Obvisouly used when making something that needs to compile ## # specifically against the kernel. ## # ## print "Must define the kernel path --kernel=<kernel-path>" ## sys.exit(1) ## ################################################# if (Params.run_install and Params.run_build) or \ (not Params.run_install and not Params.run_build): # This should never be run when both a build and an install, # nor should it be run with nothing to do. So force a check for # conflicting install and build arguments True True and False False # that will cause inappropriate function, and force an exit. # print "Conflicting arguments run_build and run_install. XOR." sys.exit(1) elif Params.run_build: # The user has specified to run a build # check to make sure they have specified a build directory. # if not Params.build_dir: # Build directory check failed. Exit. # print "Must define the build directory when running a build -b <build-dir>" sys.exit(1) # Reset sys.argv with the correct parameters for distutils to parse # giving it the options verbose, tell it to build, and the path to the # build directory. # sys.argv = [sys.argv[0], '-v', 'build', '-b', Params.build_dir] elif Params.run_install: # The user has specified to run an install. # check to make sure that the installation root prefix # has been defined. # if not Params.install_prefix: # Installation prefix is not defined, therefore there cannot # be an install, exit.
libraries = ['gsched'], extra_compile_args = ['-fPIC'], library_dirs = [build_dir, source_dir],
random_line_split
setup_groupsched.py
For example, you can specify extra directories (CMake Binary Dir, kernel path) and force those arguments to be defined. The wrapped version also takes care of distutils causing errors when extra arguments are passed straight to the setup_<module-name>.py scripts. """ ######################################################## # # All of the test modules will need the same options front # end, so this can be imported where one would normally # place the optparse template. # # def run_distutils(): """Executes the distutils code to build and install the pyccsm wrappers.""" from distutils.core import setup, Extension source_dir = os.getcwd() build_dir = Params.cmake_binary_dir gsched_module = Extension('_gsched', sources=[build_dir+'/gsched_wrap.c', build_dir+'/gsched.c'], include_dirs = [source_dir+'/include'], libraries = ['gsched'], extra_compile_args = ['-fPIC'], library_dirs = [build_dir, source_dir], ) # sdf_seq_module = Extension('_sdf_seq', # sources=[source_dir+'/libgsched/sdf_seq_wrap.c', # source_dir+'/libgsched/sdf_seq.c'], # include_dirs = [source_dir+'/include'], #) setup(name='pygsched', version='1.0', description="Group Scheduling Python API bindings.", url='http://www.ittc.ku.edu/kusp', package_dir={'': '.'}, include_dirs = [source_dir+'/include'], ext_modules = [gsched_module], # sdf_seq_module], py_modules = ['gschedapi'], scripts = ['tools/gschedctrl', 'tools/gschedexec', 'tools/gschedpprint', 'tools/gschedsnapshot', 'tools/gschedsh', ], packages=['pygsched','pygsched.gsconsole'] ) pass if __name__ == '__main__': # imports required if this module is called as a # command import optparse, sys, os from pprint import * # Define the set of permitted parameters, including the # command arguments. The initialization method creates # the parser and defines the defaults. The parse() # method actually parses the arguments one the command # line. This was done so that the instance of the class # could be global and thus available to all # routines. and then parse the arguments to this call # according to the specification class Params_Set: USAGE = "usage: %prog [options]" def __init__(self): # Create the argument parser and then tell it # about the set of legal arguments for this # command. The parse() method of this class # calls parse_args of the optparse module self.p = optparse.OptionParser(usage=self.USAGE) # Boring and totally standard verbose and # debugging options that should be common to # virtually any command # self.p.add_option("-d", action="store_const", const=1, dest="debug_level", help="Turn on diagnostic output at level 1") self.p.add_option("-D", action="store", type ="int", dest="debug_level", help="Turn on diagnostic output at level DEBUG_LEVEL") self.p.add_option("-v", action="store_const", const=1, dest="verbose_level", help="Turn on narrative output at level 1") self.p.add_option("-V", action="store", type ="int", dest="verbose_level", help="Turn on narrative output at level VERBOSE_LEVEL") # Command specific options. We can specify a # configuration file to parse, which defaults to # stdin, and an output file name, which defaults # to stdout. self.p.add_option("--build", action="store_const", const=True, dest="run_build", help="Turns on build mode.") self.p.add_option("-b", action="store", type ="string", dest="build_dir", help="The build directory path.") self.p.add_option("--install", action="store_const", const=True, dest="run_install", help="Turns on install mode.") self.p.add_option("--prefix", action="store", type ="string", dest="install_prefix", help="Install Prefix") self.p.add_option("--kernel", action="store", type="string", dest="kernel_path", help="Path to the kernel against which you wish to compile.") self.p.add_option("--cbd", action="store", type="string", dest="cmake_binary_dir", help="CMake binary directory in which to look for libraries against which" " to compile.") # Now tell the parser about the default values of all the options # we just told it about self.p.set_defaults( debug_level = 2, verbose_level = 0, build_dir = None, run_build = False, run_install = False, install_prefix = None, kernel_path = None, cmake_binary_dir = None ) def parse(self): self.options, self.args = self.p.parse_args() self.debug_level = self.options.debug_level self.verbose_level = self.options.verbose_level self.build_dir = self.options.build_dir self.run_build = self.options.run_build self.run_install = self.options.run_install self.install_prefix = self.options.install_prefix self.kernel_path = self.options.kernel_path self.cmake_binary_dir = self.options.cmake_binary_dir # Output option details if debugging level is high enough if self.debug_level >= 3 : print print "Options: ", self.options print "Args: ", self.args # Defining this method defines the string representation of the # object when given as an argument to str() or the "print" command #cd def __str__(self): param_print_str = \ """Parameters: debug_level : %d verbose_level : %d run_build : %s build_dir : %s run_install : %s install_prefix : %s kernel_path : %s cmake_binary_dir : %s """ str_output = param_print_str % \ (self.debug_level, self.verbose_level, self.run_build, self.build_dir, self.run_install, self.install_prefix, self.kernel_path, self.cmake_binary_dir) return str_output def main(): # Global level params class instance was # created before calling main(). We make it # global so that other code can access the set # of Parameters, simply by accessing the Params # instance. Here, however, we call the parse() # method to actually get the arguments, since # we have been called from the command line. Params.parse() debug_level = Params.debug_level if Params.debug_level >= 2: print Params if not Params.cmake_binary_dir: # Forcing a check for the CMake binary directory. # This needs to be defined in order to find libraries # and other files within the build directory. #
## ## if not Params.kernel_path: ## # Forcing a check for the kernel path variable. ## # Obvisouly used when making something that needs to compile ## # specifically against the kernel. ## # ## print "Must define the kernel path --kernel=<kernel-path>" ## sys.exit(1) ## ################################################# if (Params.run_install and Params.run_build) or \ (not Params.run_install and not Params.run_build): # This should never be run when both a build and an install, # nor should it be run with nothing to do. So force a check for # conflicting install and build arguments True True and False False # that will cause inappropriate function, and force an exit. # print "Conflicting arguments run_build and run_install. XOR." sys.exit(1) elif Params.run_build: # The user has specified to run a build # check to make sure they have specified a build directory. # if not Params.build_dir: # Build directory check failed. Exit. # print "Must define the build directory when running a build -b <build-dir>" sys.exit(1) # Reset sys.argv with the correct parameters for distutils to parse # giving it the options verbose, tell it to build, and the path to the # build directory. # sys.argv = [sys.argv[0], '-v', 'build', '-b', Params.build_dir] elif Params.run_install: # The user has specified to run an install. # check to make sure that the installation root prefix # has been defined. # if not Params.install_prefix: # Installation prefix is not defined, therefore there cannot # be an install, exit.
print "Must define the CMake binary directory --cbd=<build-dir>" sys.exit(1)
conditional_block
setup_groupsched.py
For example, you can specify extra directories (CMake Binary Dir, kernel path) and force those arguments to be defined. The wrapped version also takes care of distutils causing errors when extra arguments are passed straight to the setup_<module-name>.py scripts. """ ######################################################## # # All of the test modules will need the same options front # end, so this can be imported where one would normally # place the optparse template. # # def run_distutils(): """Executes the distutils code to build and install the pyccsm wrappers.""" from distutils.core import setup, Extension source_dir = os.getcwd() build_dir = Params.cmake_binary_dir gsched_module = Extension('_gsched', sources=[build_dir+'/gsched_wrap.c', build_dir+'/gsched.c'], include_dirs = [source_dir+'/include'], libraries = ['gsched'], extra_compile_args = ['-fPIC'], library_dirs = [build_dir, source_dir], ) # sdf_seq_module = Extension('_sdf_seq', # sources=[source_dir+'/libgsched/sdf_seq_wrap.c', # source_dir+'/libgsched/sdf_seq.c'], # include_dirs = [source_dir+'/include'], #) setup(name='pygsched', version='1.0', description="Group Scheduling Python API bindings.", url='http://www.ittc.ku.edu/kusp', package_dir={'': '.'}, include_dirs = [source_dir+'/include'], ext_modules = [gsched_module], # sdf_seq_module], py_modules = ['gschedapi'], scripts = ['tools/gschedctrl', 'tools/gschedexec', 'tools/gschedpprint', 'tools/gschedsnapshot', 'tools/gschedsh', ], packages=['pygsched','pygsched.gsconsole'] ) pass if __name__ == '__main__': # imports required if this module is called as a # command import optparse, sys, os from pprint import * # Define the set of permitted parameters, including the # command arguments. The initialization method creates # the parser and defines the defaults. The parse() # method actually parses the arguments one the command # line. This was done so that the instance of the class # could be global and thus available to all # routines. and then parse the arguments to this call # according to the specification class Params_Set:
dest="verbose_level", help="Turn on narrative output at level 1") self.p.add_option("-V", action="store", type ="int", dest="verbose_level", help="Turn on narrative output at level VERBOSE_LEVEL") # Command specific options. We can specify a # configuration file to parse, which defaults to # stdin, and an output file name, which defaults # to stdout. self.p.add_option("--build", action="store_const", const=True, dest="run_build", help="Turns on build mode.") self.p.add_option("-b", action="store", type ="string", dest="build_dir", help="The build directory path.") self.p.add_option("--install", action="store_const", const=True, dest="run_install", help="Turns on install mode.") self.p.add_option("--prefix", action="store", type ="string", dest="install_prefix", help="Install Prefix") self.p.add_option("--kernel", action="store", type="string", dest="kernel_path", help="Path to the kernel against which you wish to compile.") self.p.add_option("--cbd", action="store", type="string", dest="cmake_binary_dir", help="CMake binary directory in which to look for libraries against which" " to compile.") # Now tell the parser about the default values of all the options # we just told it about self.p.set_defaults( debug_level = 2, verbose_level = 0, build_dir = None, run_build = False, run_install = False, install_prefix = None, kernel_path = None, cmake_binary_dir = None ) def parse(self): self.options, self.args = self.p.parse_args() self.debug_level = self.options.debug_level self.verbose_level = self.options.verbose_level self.build_dir = self.options.build_dir self.run_build = self.options.run_build self.run_install = self.options.run_install self.install_prefix = self.options.install_prefix self.kernel_path = self.options.kernel_path self.cmake_binary_dir = self.options.cmake_binary_dir # Output option details if debugging level is high enough if self.debug_level >= 3 : print print "Options: ", self.options print "Args: ", self.args # Defining this method defines the string representation of the # object when given as an argument to str() or the "print" command #cd def __str__(self): param_print_str = \ """Parameters: debug_level : %d verbose_level : %d run_build : %s build_dir : %s run_install : %s install_prefix : %s kernel_path : %s cmake_binary_dir : %s """ str_output = param_print_str % \ (self.debug_level, self.verbose_level, self.run_build, self.build_dir, self.run_install, self.install_prefix, self.kernel_path, self.cmake_binary_dir) return str_output def main(): # Global level params class instance was # created before calling main(). We make it # global so that other code can access the set # of Parameters, simply by accessing the Params # instance. Here, however, we call the parse() # method to actually get the arguments, since # we have been called from the command line. Params.parse() debug_level = Params.debug_level if Params.debug_level >= 2: print Params if not Params.cmake_binary_dir: # Forcing a check for the CMake binary directory. # This needs to be defined in order to find libraries # and other files within the build directory. # print "Must define the CMake binary directory --cbd=<build-dir>" sys.exit(1) ## ## if not Params.kernel_path: ## # Forcing a check for the kernel path variable. ## # Obvisouly used when making something that needs to compile ## # specifically against the kernel. ## # ## print "Must define the kernel path --kernel=<kernel-path>" ## sys.exit(1) ## ################################################# if (Params.run_install and Params.run_build) or \ (not Params.run_install and not Params.run_build): # This should never be run when both a build and an install, # nor should it be run with nothing to do. So force a check for # conflicting install and build arguments True True and False False # that will cause inappropriate function, and force an exit. # print "Conflicting arguments run_build and run_install. XOR." sys.exit(1) elif Params.run_build: # The user has specified to run a build # check to make sure they have specified a build directory. # if not Params.build_dir: # Build directory check failed. Exit. # print "Must define the build directory when running a build -b <build-dir>" sys.exit(1) # Reset sys.argv with the correct parameters for distutils to parse # giving it the options verbose, tell it to build, and the path to the # build directory. # sys.argv = [sys.argv[0], '-v', 'build', '-b', Params.build_dir] elif Params.run_install: # The user has specified to run an install. # check to make sure that the installation root prefix # has been defined. # if not Params.install_prefix: # Installation prefix is not defined, therefore there cannot # be an install, exit
USAGE = "usage: %prog [options]" def __init__(self): # Create the argument parser and then tell it # about the set of legal arguments for this # command. The parse() method of this class # calls parse_args of the optparse module self.p = optparse.OptionParser(usage=self.USAGE) # Boring and totally standard verbose and # debugging options that should be common to # virtually any command # self.p.add_option("-d", action="store_const", const=1, dest="debug_level", help="Turn on diagnostic output at level 1") self.p.add_option("-D", action="store", type ="int", dest="debug_level", help="Turn on diagnostic output at level DEBUG_LEVEL") self.p.add_option("-v", action="store_const", const=1,
identifier_body
fs.go
fs.flushBalloc(op, blockA) ino.NBytes = newLen // TODO: leaves the inode dirty, caller must flush // // we should be able to flush it if we knew its inode number, potentially // relying on absorption within the transaction return true } func (fs Fs) shrinkInode(op *awol.Op, ino *inode, newLen uint64) { if !(newLen <= ino.NBytes) { panic("shrinkInode requires a smaller length") } oldBlks := divUp(ino.NBytes, disk.BlockSize) newBlks := divUp(newLen, disk.BlockSize) blockA := fs.readBalloc() // newBlks <= oldBlks for b := newBlks; b <= oldBlks; b++ { oldB := ino.btoa(b) blockA.Free(oldB) } // TODO: same problem as in growInode of flushing the allocator fs.flushBalloc(op, blockA) ino.NBytes = newLen } func (fs Fs) lookupDir(dir *inode, name string) Inum { if dir.Kind != INODE_KIND_DIR { panic("lookup on non-dir inode") } // invariant: directories always have length a multiple of BlockSize blocks := dir.NBytes / disk.BlockSize for b := uint64(0); b < blocks; b++ { de := decodeDirEnt(fs.inodeRead(dir, b)) if !de.Valid { continue } if de.Name == name { return de.I } } return 0 } func (fs Fs) findFreeDirEnt(op *awol.Op, dir *inode) (uint64, bool) { // invariant: directories always have length a multiple of BlockSize blocks := dir.NBytes / disk.BlockSize for b := uint64(0); b < blocks; b++ { de := decodeDirEnt(fs.inodeRead(dir, b)) if !de.Valid { return b, true } } // nothing free, allocate a new one ok := fs.growInode(op, dir, dir.NBytes+disk.BlockSize) if !ok { return 0, false } // return the newly-allocated index return blocks, true } // createLink creates a pointer name to i in the directory dir // // returns false if this fails (eg, due to allocation failure) func (fs Fs) createLink(op *awol.Op, dir *inode, name string, i Inum) bool { if dir.Kind != INODE_KIND_DIR { panic("create on non-dir inode") } fs.checkInode(i) b, ok := fs.findFreeDirEnt(op, dir) if !ok { fmt.Fprintln(os.Stderr, "createLink: no more space") return false } fs.inodeWrite(op, dir, b, encodeDirEnt(&DirEnt{ Valid: true, Name: name, I: i, })) return true } // removeLink removes the link from name in dir // // returns true if a link was removed, false if name was not found func (fs Fs) removeLink(op *awol.Op, dir *inode, name string) bool { if dir.Kind != INODE_KIND_DIR { panic("remove on non-dir inode") } blocks := dir.NBytes / disk.BlockSize for b := uint64(0); b < blocks; b++ { de := decodeDirEnt(fs.inodeRead(dir, b)) if !de.Valid { continue } if de.Name == name { fs.inodeWrite(op, dir, b, encodeDirEnt(&DirEnt{ Valid: false, Name: "", I: 0, })) return true } } return false } func (fs Fs) isDirEmpty(dir *inode) bool { if dir.Kind != INODE_KIND_DIR { panic("remove on non-dir inode") } blocks := dir.NBytes / disk.BlockSize for b := uint64(0); b < blocks; b++ { de := decodeDirEnt(fs.inodeRead(dir, b)) if de.Valid { return false } } return true } // readDirEntries reads all of the entries in dir // // NFS's readdir operation has a more sophisticated cookie/cookie verifier // mechanism for paging and reporting iterator invalidation. func (fs Fs) readDirEntries(dir *inode) []string { names := make([]string, 0) blocks := dir.NBytes / disk.BlockSize for b := uint64(0); b < blocks; b++ { de := decodeDirEnt(fs.inodeRead(dir, b)) if !de.Valid { continue } names = append(names, de.Name) } return names } // file-system API func (fs Fs) RootInode() Inum { return fs.sb.rootInode } func (fs Fs) Lookup(i Inum, name string) Inum { dir := fs.getInode(i) return fs.lookupDir(dir, name) } func (fs Fs) GetAttr(i Inum) (Attr, bool) { ino := fs.getInode(i) if ino.Kind == INODE_KIND_FREE { return Attr{}, false } return Attr{IsDir: ino.Kind == INODE_KIND_DIR}, true } func (fs Fs) Create(dirI Inum, name string, unchecked bool) (Inum, bool) { op := fs.log.Begin() dir := fs.getInode(dirI) if dir.Kind != INODE_KIND_DIR { fmt.Fprintf(os.Stderr, "Create: %d is not a dir\n", dirI) return 0, false } existingI := fs.lookupDir(dir, name) if existingI != 0 { if unchecked { ino := fs.getInode(existingI) if ino.Kind == INODE_KIND_DIR { fmt.Fprintln(os.Stderr, "dir not empty") return 0, false } fs.removeLink(op, dir, name) } else { // checked, fail early return 0, false } } i, ino := fs.findFreeInode() if i == 0 { fmt.Fprintln(os.Stderr, "no space left") return 0, false } ok := fs.createLink(op, dir, name, i) if !ok { fmt.Fprintln(os.Stderr, "could not create link") return 0, false } fs.flushInode(op, dirI, dir) ino.Kind = INODE_KIND_FILE fs.flushInode(op, i, ino) fs.log.Commit(op) return i, true } func (fs Fs) Mkdir(dirI Inum, name string) (Inum, bool) { op := fs.log.Begin() dir := fs.getInode(dirI) if dir.Kind != INODE_KIND_DIR { fmt.Fprintf(os.Stderr, "Mkdir: %d is not a dir\n", dirI) return 0, false } i, ino := fs.findFreeInode() if i == 0 { return 0, false } ino.Kind = INODE_KIND_DIR ok := fs.createLink(op, dir, name, i) if !ok { return 0, false } fs.flushInode(op, dirI, dir) fs.flushInode(op, i, ino) fs.log.Commit(op) return i, true } func (fs Fs) Read(i Inum, off uint64, length uint64) ([]byte, bool) { ino := fs.getInode(i) if ino.Kind != INODE_KIND_FILE { return nil, false } if off+length > ino.NBytes { return nil, false } bs := make([]byte, 0, length) for boff := off / disk.BlockSize; length > 0; boff++ { b := fs.inodeRead(ino, boff) if off%disk.BlockSize != 0 { byteOff := off % disk.BlockSize b = b[byteOff:] } if length < uint64(len(b)) { b = b[:length] } bs = append(bs, b...) length -= uint64(len(b)) } return bs, true } func (fs Fs) Write(i Inum, off uint64, bs []byte) bool
{ op := fs.log.Begin() ino := fs.getInode(i) if ino.Kind != INODE_KIND_FILE { return false } for boff := off / disk.BlockSize; len(bs) > 0; boff++ { if off%disk.BlockSize != 0 { b := fs.inodeRead(ino, boff) byteOff := off % disk.BlockSize nBytes := disk.BlockSize - byteOff if uint64(len(bs)) < nBytes { nBytes = uint64(len(bs)) } for i := byteOff; i < nBytes; i++ { b[byteOff+i] = bs[i] } fs.inodeWrite(op, ino, boff, b) bs = bs[nBytes:] off += nBytes
identifier_body
fs.go
, due to inode size or running out of blocks) func (fs Fs) growInode(op *awol.Op, ino *inode, newLen uint64) bool { if !(ino.NBytes <= newLen) { panic("growInode requires a larger length") } oldBlks := divUp(ino.NBytes, disk.BlockSize) newBlks := divUp(newLen, disk.BlockSize) if newBlks > NumDirect { return false } blockA := fs.readBalloc() for b := oldBlks; b < newBlks; b++ { newB, ok := blockA.Alloc() if !ok { return false } ino.Direct[b] = newB } // TODO: it's brittle (and incorrect for concurrent allocation) that we've // modified the allocator only in the transaction; reading your own writes // would make this easier to implement, but I'm not sure it makes the // abstraction and invariants easier. fs.flushBalloc(op, blockA) ino.NBytes = newLen // TODO: leaves the inode dirty, caller must flush // // we should be able to flush it if we knew its inode number, potentially // relying on absorption within the transaction return true } func (fs Fs) shrinkInode(op *awol.Op, ino *inode, newLen uint64) { if !(newLen <= ino.NBytes) { panic("shrinkInode requires a smaller length") } oldBlks := divUp(ino.NBytes, disk.BlockSize) newBlks := divUp(newLen, disk.BlockSize) blockA := fs.readBalloc() // newBlks <= oldBlks for b := newBlks; b <= oldBlks; b++ { oldB := ino.btoa(b) blockA.Free(oldB) } // TODO: same problem as in growInode of flushing the allocator fs.flushBalloc(op, blockA) ino.NBytes = newLen } func (fs Fs) lookupDir(dir *inode, name string) Inum { if dir.Kind != INODE_KIND_DIR { panic("lookup on non-dir inode") } // invariant: directories always have length a multiple of BlockSize blocks := dir.NBytes / disk.BlockSize for b := uint64(0); b < blocks; b++ { de := decodeDirEnt(fs.inodeRead(dir, b)) if !de.Valid { continue } if de.Name == name { return de.I } } return 0 } func (fs Fs) findFreeDirEnt(op *awol.Op, dir *inode) (uint64, bool) { // invariant: directories always have length a multiple of BlockSize blocks := dir.NBytes / disk.BlockSize for b := uint64(0); b < blocks; b++ { de := decodeDirEnt(fs.inodeRead(dir, b)) if !de.Valid { return b, true } } // nothing free, allocate a new one ok := fs.growInode(op, dir, dir.NBytes+disk.BlockSize) if !ok { return 0, false } // return the newly-allocated index return blocks, true } // createLink creates a pointer name to i in the directory dir // // returns false if this fails (eg, due to allocation failure) func (fs Fs) createLink(op *awol.Op, dir *inode, name string, i Inum) bool { if dir.Kind != INODE_KIND_DIR { panic("create on non-dir inode") } fs.checkInode(i) b, ok := fs.findFreeDirEnt(op, dir) if !ok { fmt.Fprintln(os.Stderr, "createLink: no more space") return false } fs.inodeWrite(op, dir, b, encodeDirEnt(&DirEnt{ Valid: true, Name: name, I: i, })) return true } // removeLink removes the link from name in dir // // returns true if a link was removed, false if name was not found func (fs Fs) removeLink(op *awol.Op, dir *inode, name string) bool { if dir.Kind != INODE_KIND_DIR { panic("remove on non-dir inode") } blocks := dir.NBytes / disk.BlockSize for b := uint64(0); b < blocks; b++ { de := decodeDirEnt(fs.inodeRead(dir, b)) if !de.Valid { continue } if de.Name == name { fs.inodeWrite(op, dir, b, encodeDirEnt(&DirEnt{ Valid: false, Name: "", I: 0, })) return true } } return false } func (fs Fs) isDirEmpty(dir *inode) bool { if dir.Kind != INODE_KIND_DIR { panic("remove on non-dir inode") } blocks := dir.NBytes / disk.BlockSize for b := uint64(0); b < blocks; b++ { de := decodeDirEnt(fs.inodeRead(dir, b)) if de.Valid { return false } } return true } // readDirEntries reads all of the entries in dir // // NFS's readdir operation has a more sophisticated cookie/cookie verifier // mechanism for paging and reporting iterator invalidation. func (fs Fs) readDirEntries(dir *inode) []string { names := make([]string, 0) blocks := dir.NBytes / disk.BlockSize for b := uint64(0); b < blocks; b++ { de := decodeDirEnt(fs.inodeRead(dir, b)) if !de.Valid { continue } names = append(names, de.Name) } return names } // file-system API func (fs Fs) RootInode() Inum { return fs.sb.rootInode } func (fs Fs) Lookup(i Inum, name string) Inum { dir := fs.getInode(i) return fs.lookupDir(dir, name) } func (fs Fs) GetAttr(i Inum) (Attr, bool) { ino := fs.getInode(i) if ino.Kind == INODE_KIND_FREE { return Attr{}, false } return Attr{IsDir: ino.Kind == INODE_KIND_DIR}, true } func (fs Fs) Create(dirI Inum, name string, unchecked bool) (Inum, bool) { op := fs.log.Begin() dir := fs.getInode(dirI) if dir.Kind != INODE_KIND_DIR { fmt.Fprintf(os.Stderr, "Create: %d is not a dir\n", dirI) return 0, false } existingI := fs.lookupDir(dir, name) if existingI != 0 { if unchecked { ino := fs.getInode(existingI) if ino.Kind == INODE_KIND_DIR { fmt.Fprintln(os.Stderr, "dir not empty") return 0, false } fs.removeLink(op, dir, name) } else { // checked, fail early return 0, false } } i, ino := fs.findFreeInode() if i == 0 { fmt.Fprintln(os.Stderr, "no space left") return 0, false } ok := fs.createLink(op, dir, name, i) if !ok { fmt.Fprintln(os.Stderr, "could not create link") return 0, false } fs.flushInode(op, dirI, dir) ino.Kind = INODE_KIND_FILE fs.flushInode(op, i, ino) fs.log.Commit(op) return i, true } func (fs Fs) Mkdir(dirI Inum, name string) (Inum, bool) { op := fs.log.Begin() dir := fs.getInode(dirI) if dir.Kind != INODE_KIND_DIR { fmt.Fprintf(os.Stderr, "Mkdir: %d is not a dir\n", dirI) return 0, false } i, ino := fs.findFreeInode() if i == 0 { return 0, false } ino.Kind = INODE_KIND_DIR ok := fs.createLink(op, dir, name, i) if !ok { return 0, false } fs.flushInode(op, dirI, dir) fs.flushInode(op, i, ino) fs.log.Commit(op) return i, true } func (fs Fs) Read(i Inum, off uint64, length uint64) ([]byte, bool) { ino := fs.getInode(i) if ino.Kind != INODE_KIND_FILE { return nil, false } if off+length > ino.NBytes { return nil, false } bs := make([]byte, 0, length) for boff := off / disk.BlockSize; length > 0; boff++ { b := fs.inodeRead(ino, boff) if off%disk.BlockSize != 0 { byteOff := off % disk.BlockSize b = b[byteOff:] } if length < uint64(len(b))
{ b = b[:length] }
conditional_block
fs.go
sb *SuperBlock } func NewFs(log Log) Fs { sb := NewSuperBlock(uint64(log.Size())) blockA := balloc.Init(int(sb.NumBlockBitmaps)) op := log.Begin() op.Write(0, encodeSuperBlock(sb)) blockA.Flush(op, sb.blockAllocBase) log.Commit(op) op = log.Begin() op.Write(sb.inodeBase+(sb.rootInode-1), encodeInode(newInode(INODE_KIND_DIR))) log.Commit(op) freeInode := encodeInode(newInode(INODE_KIND_FREE)) for i := Inum(2); i < sb.numInodes; i++ { op := log.Begin() op.Write(sb.inodeBase+(i-1), freeInode) log.Commit(op) } return Fs{log: log, sb: sb} } func OpenFs(log Log) Fs { sb := decodeSuperBlock(log.Read(0)) return Fs{log: log, sb: sb} } func (fs Fs)
() balloc.Bitmap { bs := make([]disk.Block, fs.sb.NumBlockBitmaps) for i := 0; i < len(bs); i++ { bs[i] = fs.log.Read(fs.sb.blockAllocBase + uint64(i)) } return balloc.Open(bs) } func (fs Fs) flushBalloc(op *awol.Op, bm balloc.Bitmap) { bm.Flush(op, fs.sb.blockAllocBase) } // btoa translates an offset in an inode to a block number // // does not depend on the rest of the disk because there are no indirect blocks func (ino inode) btoa(boff uint64) Bnum { if boff >= uint64(len(ino.Direct)) { panic("invalid block offset") } return ino.Direct[boff] } func (fs Fs) inodeRead(ino *inode, boff uint64) disk.Block { return fs.log.Read(fs.sb.dataBase + ino.btoa(boff) - 1) } func (fs Fs) inodeWrite(op *awol.Op, ino *inode, boff uint64, b disk.Block) { op.Write(fs.sb.dataBase+ino.btoa(boff)-1, b) } func (fs Fs) checkInode(i Inum) { if i == 0 { panic("0 is an invalid inode number") } if i > fs.sb.numInodes { panic("invalid inode number") } } func (fs Fs) getInode(i Inum) *inode { fs.checkInode(i) b := fs.log.Read(fs.sb.inodeBase + (i - 1)) ino := new(inode) *ino = decodeInode(b) return ino } func (fs Fs) findFreeInode() (Inum, *inode) { for i := uint64(1); i <= fs.sb.numInodes; i++ { ino := fs.getInode(i) if ino.Kind == INODE_KIND_FREE { return i, ino } } return 0, nil } func (fs Fs) flushInode(op *awol.Op, i Inum, ino *inode) { op.Write(fs.sb.inodeBase+(i-1), encodeInode(*ino)) } // returns false if grow failed (eg, due to inode size or running out of blocks) func (fs Fs) growInode(op *awol.Op, ino *inode, newLen uint64) bool { if !(ino.NBytes <= newLen) { panic("growInode requires a larger length") } oldBlks := divUp(ino.NBytes, disk.BlockSize) newBlks := divUp(newLen, disk.BlockSize) if newBlks > NumDirect { return false } blockA := fs.readBalloc() for b := oldBlks; b < newBlks; b++ { newB, ok := blockA.Alloc() if !ok { return false } ino.Direct[b] = newB } // TODO: it's brittle (and incorrect for concurrent allocation) that we've // modified the allocator only in the transaction; reading your own writes // would make this easier to implement, but I'm not sure it makes the // abstraction and invariants easier. fs.flushBalloc(op, blockA) ino.NBytes = newLen // TODO: leaves the inode dirty, caller must flush // // we should be able to flush it if we knew its inode number, potentially // relying on absorption within the transaction return true } func (fs Fs) shrinkInode(op *awol.Op, ino *inode, newLen uint64) { if !(newLen <= ino.NBytes) { panic("shrinkInode requires a smaller length") } oldBlks := divUp(ino.NBytes, disk.BlockSize) newBlks := divUp(newLen, disk.BlockSize) blockA := fs.readBalloc() // newBlks <= oldBlks for b := newBlks; b <= oldBlks; b++ { oldB := ino.btoa(b) blockA.Free(oldB) } // TODO: same problem as in growInode of flushing the allocator fs.flushBalloc(op, blockA) ino.NBytes = newLen } func (fs Fs) lookupDir(dir *inode, name string) Inum { if dir.Kind != INODE_KIND_DIR { panic("lookup on non-dir inode") } // invariant: directories always have length a multiple of BlockSize blocks := dir.NBytes / disk.BlockSize for b := uint64(0); b < blocks; b++ { de := decodeDirEnt(fs.inodeRead(dir, b)) if !de.Valid { continue } if de.Name == name { return de.I } } return 0 } func (fs Fs) findFreeDirEnt(op *awol.Op, dir *inode) (uint64, bool) { // invariant: directories always have length a multiple of BlockSize blocks := dir.NBytes / disk.BlockSize for b := uint64(0); b < blocks; b++ { de := decodeDirEnt(fs.inodeRead(dir, b)) if !de.Valid { return b, true } } // nothing free, allocate a new one ok := fs.growInode(op, dir, dir.NBytes+disk.BlockSize) if !ok { return 0, false } // return the newly-allocated index return blocks, true } // createLink creates a pointer name to i in the directory dir // // returns false if this fails (eg, due to allocation failure) func (fs Fs) createLink(op *awol.Op, dir *inode, name string, i Inum) bool { if dir.Kind != INODE_KIND_DIR { panic("create on non-dir inode") } fs.checkInode(i) b, ok := fs.findFreeDirEnt(op, dir) if !ok { fmt.Fprintln(os.Stderr, "createLink: no more space") return false } fs.inodeWrite(op, dir, b, encodeDirEnt(&DirEnt{ Valid: true, Name: name, I: i, })) return true } // removeLink removes the link from name in dir // // returns true if a link was removed, false if name was not found func (fs Fs) removeLink(op *awol.Op, dir *inode, name string) bool { if dir.Kind != INODE_KIND_DIR { panic("remove on non-dir inode") } blocks := dir.NBytes / disk.BlockSize for b := uint64(0); b < blocks; b++ { de := decodeDirEnt(fs.inodeRead(dir, b)) if !de.Valid { continue } if de.Name == name { fs.inodeWrite(op, dir, b, encodeDirEnt(&DirEnt{ Valid: false, Name: "", I: 0, })) return true } } return false } func (fs Fs) isDirEmpty(dir *inode) bool { if dir.Kind != INODE_KIND_DIR { panic("remove on non-dir inode") } blocks := dir.NBytes / disk.BlockSize for b := uint64(0); b < blocks; b++ { de := decodeDirEnt(fs.inodeRead(dir, b)) if de.Valid { return false } } return true } // readDirEntries reads all of the entries in dir // // NFS's readdir operation has a more sophisticated cookie/cookie verifier // mechanism for paging and reporting iterator invalidation. func (fs Fs) readDirEntries(dir *inode) []string { names := make([]string, 0) blocks := dir.NBytes / disk.BlockSize for b := uint64(0); b < blocks; b++ { de := decodeDirEnt(fs.inodeRead(dir, b)) if !de.Valid { continue } names = append(names, de.Name) } return names } // file-system API func (fs Fs) RootInode() Inum { return fs.sb.rootInode } func (fs Fs) Lookup(i
readBalloc
identifier_name
fs.go
sb *SuperBlock } func NewFs(log Log) Fs { sb := NewSuperBlock(uint64(log.Size())) blockA := balloc.Init(int(sb.NumBlockBitmaps)) op := log.Begin() op.Write(0, encodeSuperBlock(sb)) blockA.Flush(op, sb.blockAllocBase) log.Commit(op) op = log.Begin() op.Write(sb.inodeBase+(sb.rootInode-1), encodeInode(newInode(INODE_KIND_DIR))) log.Commit(op) freeInode := encodeInode(newInode(INODE_KIND_FREE)) for i := Inum(2); i < sb.numInodes; i++ { op := log.Begin() op.Write(sb.inodeBase+(i-1), freeInode) log.Commit(op) } return Fs{log: log, sb: sb} } func OpenFs(log Log) Fs { sb := decodeSuperBlock(log.Read(0)) return Fs{log: log, sb: sb} } func (fs Fs) readBalloc() balloc.Bitmap { bs := make([]disk.Block, fs.sb.NumBlockBitmaps) for i := 0; i < len(bs); i++ { bs[i] = fs.log.Read(fs.sb.blockAllocBase + uint64(i)) } return balloc.Open(bs) } func (fs Fs) flushBalloc(op *awol.Op, bm balloc.Bitmap) { bm.Flush(op, fs.sb.blockAllocBase) } // btoa translates an offset in an inode to a block number // // does not depend on the rest of the disk because there are no indirect blocks func (ino inode) btoa(boff uint64) Bnum { if boff >= uint64(len(ino.Direct)) { panic("invalid block offset") } return ino.Direct[boff] } func (fs Fs) inodeRead(ino *inode, boff uint64) disk.Block { return fs.log.Read(fs.sb.dataBase + ino.btoa(boff) - 1) } func (fs Fs) inodeWrite(op *awol.Op, ino *inode, boff uint64, b disk.Block) { op.Write(fs.sb.dataBase+ino.btoa(boff)-1, b) } func (fs Fs) checkInode(i Inum) { if i == 0 { panic("0 is an invalid inode number") } if i > fs.sb.numInodes { panic("invalid inode number") } } func (fs Fs) getInode(i Inum) *inode { fs.checkInode(i) b := fs.log.Read(fs.sb.inodeBase + (i - 1)) ino := new(inode) *ino = decodeInode(b) return ino } func (fs Fs) findFreeInode() (Inum, *inode) { for i := uint64(1); i <= fs.sb.numInodes; i++ { ino := fs.getInode(i) if ino.Kind == INODE_KIND_FREE { return i, ino } } return 0, nil } func (fs Fs) flushInode(op *awol.Op, i Inum, ino *inode) { op.Write(fs.sb.inodeBase+(i-1), encodeInode(*ino)) } // returns false if grow failed (eg, due to inode size or running out of blocks) func (fs Fs) growInode(op *awol.Op, ino *inode, newLen uint64) bool { if !(ino.NBytes <= newLen) { panic("growInode requires a larger length") } oldBlks := divUp(ino.NBytes, disk.BlockSize) newBlks := divUp(newLen, disk.BlockSize) if newBlks > NumDirect { return false } blockA := fs.readBalloc() for b := oldBlks; b < newBlks; b++ { newB, ok := blockA.Alloc() if !ok { return false } ino.Direct[b] = newB } // TODO: it's brittle (and incorrect for concurrent allocation) that we've // modified the allocator only in the transaction; reading your own writes // would make this easier to implement, but I'm not sure it makes the // abstraction and invariants easier. fs.flushBalloc(op, blockA) ino.NBytes = newLen // TODO: leaves the inode dirty, caller must flush // // we should be able to flush it if we knew its inode number, potentially // relying on absorption within the transaction return true } func (fs Fs) shrinkInode(op *awol.Op, ino *inode, newLen uint64) { if !(newLen <= ino.NBytes) { panic("shrinkInode requires a smaller length") } oldBlks := divUp(ino.NBytes, disk.BlockSize) newBlks := divUp(newLen, disk.BlockSize) blockA := fs.readBalloc() // newBlks <= oldBlks for b := newBlks; b <= oldBlks; b++ { oldB := ino.btoa(b) blockA.Free(oldB) } // TODO: same problem as in growInode of flushing the allocator fs.flushBalloc(op, blockA) ino.NBytes = newLen } func (fs Fs) lookupDir(dir *inode, name string) Inum { if dir.Kind != INODE_KIND_DIR { panic("lookup on non-dir inode") } // invariant: directories always have length a multiple of BlockSize blocks := dir.NBytes / disk.BlockSize for b := uint64(0); b < blocks; b++ { de := decodeDirEnt(fs.inodeRead(dir, b)) if !de.Valid { continue } if de.Name == name { return de.I } } return 0 } func (fs Fs) findFreeDirEnt(op *awol.Op, dir *inode) (uint64, bool) { // invariant: directories always have length a multiple of BlockSize blocks := dir.NBytes / disk.BlockSize for b := uint64(0); b < blocks; b++ { de := decodeDirEnt(fs.inodeRead(dir, b)) if !de.Valid { return b, true } } // nothing free, allocate a new one ok := fs.growInode(op, dir, dir.NBytes+disk.BlockSize) if !ok { return 0, false } // return the newly-allocated index return blocks, true } // createLink creates a pointer name to i in the directory dir // // returns false if this fails (eg, due to allocation failure) func (fs Fs) createLink(op *awol.Op, dir *inode, name string, i Inum) bool { if dir.Kind != INODE_KIND_DIR { panic("create on non-dir inode") } fs.checkInode(i) b, ok := fs.findFreeDirEnt(op, dir) if !ok { fmt.Fprintln(os.Stderr, "createLink: no more space") return false } fs.inodeWrite(op, dir, b, encodeDirEnt(&DirEnt{ Valid: true, Name: name, I: i, })) return true } // removeLink removes the link from name in dir // // returns true if a link was removed, false if name was not found func (fs Fs) removeLink(op *awol.Op, dir *inode, name string) bool { if dir.Kind != INODE_KIND_DIR {
de := decodeDirEnt(fs.inodeRead(dir, b)) if !de.Valid { continue } if de.Name == name { fs.inodeWrite(op, dir, b, encodeDirEnt(&DirEnt{ Valid: false, Name: "", I: 0, })) return true } } return false } func (fs Fs) isDirEmpty(dir *inode) bool { if dir.Kind != INODE_KIND_DIR { panic("remove on non-dir inode") } blocks := dir.NBytes / disk.BlockSize for b := uint64(0); b < blocks; b++ { de := decodeDirEnt(fs.inodeRead(dir, b)) if de.Valid { return false } } return true } // readDirEntries reads all of the entries in dir // // NFS's readdir operation has a more sophisticated cookie/cookie verifier // mechanism for paging and reporting iterator invalidation. func (fs Fs) readDirEntries(dir *inode) []string { names := make([]string, 0) blocks := dir.NBytes / disk.BlockSize for b := uint64(0); b < blocks; b++ { de := decodeDirEnt(fs.inodeRead(dir, b)) if !de.Valid { continue } names = append(names, de.Name) } return names } // file-system API func (fs Fs) RootInode() Inum { return fs.sb.rootInode } func (fs Fs) Lookup(i In
panic("remove on non-dir inode") } blocks := dir.NBytes / disk.BlockSize for b := uint64(0); b < blocks; b++ {
random_line_split
pop-AP-var-v0.1.1-20190805.py
nd) ### # read into the amount of aboriginal peoples' population # with open(datadir+'\\'+'population-sum-ETL.csv', 'r', encoding='utf-8', newline='') as csvfile: df = pd.read_csv( csvfile, header = 0, usecols = ['日期', '身分', '區域別', '總計', '阿美族', '泰雅族', '排灣族', '布農族', '魯凱族', '卑南族', '鄒族', '賽夏族', '雅美族', '邵族', '噶瑪蘭族', '太魯閣族', '撒奇萊雅族', '賽德克族', '拉阿魯哇族', '卡那卡那富族', '尚未申報'], verbose = True, skip_blank_lines = True, ) df = trim_all_cells(df) # trim whitespace from each cell in dataframe ### # 各縣市不分平地山地身分的人口總數(依照原住民族) # selecting the accumulated records(rows) in order to the needs of analysis # add the 'selected' column as the mark: True and null # drop the rows that are marked as 'null' (nan) # df.loc[(df.身分 == '不分平地山地'), 'selected'] = np.nan # mark all of the rows as null df.loc[(df.身分 != '不分平地山地'), 'selected'] = np.nan # mark all of the rows as null df.loc[((df.日期 == ybeg) | (df.日期 == yend)) & ( # filtered by the period of time ybeg~yend (df.區域別 == '新北市') | (df.區域別 == '臺北市') | (df.區域別 == '臺中市') | (df.區域別 == '臺南市') | (df.區域別 == '高雄市') | (df.區域別 == '桃園市') | # 桃園縣 promote as 桃園市 (df.區域別 == '宜蘭縣') | (df.區域別 == '桃園縣') | (df.區域別 == '新竹縣') | (df.區域別 == '苗栗縣') | (df.區域別 == '彰化縣') | (df.區域別 == '南投縣') | (df.區域別 == '雲林縣') | (df.區域別 == '嘉義縣') | (df.區域別 == '屏東縣') | (df.區域別 == '臺東縣') | (df.區域別 == '花蓮縣') | (df.區域別 == '澎湖縣') | (df.區域別 == '基隆市') | (df.區域別 == '新竹市') | (df.區域別 == '嘉義市') | (df.區域別 == '金門縣') | (df.區域別 == '連江縣') ), 'selected'] = True # 只取各區域的合計資料(row) df.dropna(subset=['selected'], inplace=True) # conduct dropping of the row that are marked as null df.drop(columns=['selected'], inplace=True) # remove the "selected' column df.reset_index(inplace=True) # let index be the sequence order ### # transform to array format applied for presenting the figures with profile # df1 = pd.DataFrame(columns=['日期區間', # indicate the statistic period of the populaton variation, start year-finish year '區域別', '總計', '阿美族', '泰雅族', '排灣族', '布農族', '魯凱族', '卑南族', '鄒族', '賽夏族', '雅美族', '邵族', '噶瑪蘭族', '太魯閣族', '撒奇萊雅族', '賽德克族', '拉阿魯哇族', '卡那卡那富族', '尚未申報']) # Notes: # 1. 桃園縣-桃園市 資料動應時間點產生的落差(與其它縣市),且舊桃園市(省轄市)在升格前資料未納入,升格後直接併入桃園市(直轄市) # 2. 拉阿魯哇族、卡那卡那富族 正式產生資料時間點問題(未成立前直接被補植為0) # # mapping table for area areadict = {'新北市':0, '臺北市':1, '桃園縣':2, '桃園市':2, # align 桃園縣 with 桃園市 (數字計算錯誤-誤差來自於舊的桃園市-省轄市 和桃園縣市分開計算) '臺中市':3, '臺南市':4, '高雄市':5, '宜蘭縣':6, '新竹縣':7, '苗栗縣':8, '彰化縣':9, '南投縣':10, '雲林縣':11, '嘉義縣':12, '屏東縣':13, '臺東縣':14, '花蓮縣':15, '澎湖縣':16, '基隆市':17, '新竹市':18, '嘉義市':19, '金門縣':20, '連江縣':21} # construct a temporary dataframe for keeping the data of the year ybeg data = np.zeros((22,18), dtype=np.int) basedf = pd.DataFrame({'阿美族':data[:,0], '泰雅族':data[:,1], '排灣族':data[:,2], '布農族':data[:,3], '魯凱族':data[:,4], '卑南族':data[:,5], '鄒族':data[:,6], '賽夏族':data[:,7], '雅美族':data[:,8], '邵族':data[:,9], '噶瑪蘭族':data[:,10], '太魯閣族':data[:,11], '撒奇萊雅族':data[:,12], '賽德克族':data[:,13], '拉阿魯哇族':data[:,14], '卡那卡那富族':data[:,15], '尚未申報':data[:,16], '總計':data[:,17]}) ### # construct dataframe(df1) for keeping the transformed variation numbers # offset = 0 # the base point for the dataframe of df1 used as the statistical population variation base = 0 # the base point for the dataframe of df1 used as the transformed data for i in range(0, len(df.index)): if df.at[i, '日期'] == ybeg: offset = areadict[df.at[i, '區域別']] # indicate the row number corresponding to the sequence of areas (i.e., areadict) basedf.at[offset, '阿美族'] = df.at[i, '阿美族'] basedf.at[offset, '泰雅族'] = df.at[i, '泰雅族'] basedf.at[offset, '排灣族'] = df.at[i, '排灣族'] basedf.at[offset, '布農族'] = df.at[i, '布農族'] basedf.at[offset, '魯凱族'] = df.at[i, '魯凱族'] basedf.at[offset, '卑南族'] = df.at[i, '卑南族'] basedf.at[offset, '鄒族'] = df.at[i, '鄒族'] basedf.at[offset, '賽夏族'] = df.at[i, '賽夏族'] basedf.at[offset, '雅美族'] = df.at[i, '雅美族'] basedf.at[offset, '邵族'] = df.at[i, '邵族'] basedf.at[offset, '噶瑪蘭族'] = df.at[i, '噶瑪蘭族'] basedf.at[offset, '太魯閣族'] = df.at[i, '太魯閣族'] basedf.at[offset, '撒奇萊雅族'] = df.at[i, '撒奇萊雅族'] basedf.at[offset, '賽德克族'] = df.at[i, '賽德克族'] basedf.at[offset, '拉阿魯哇族'] = df.at[i, '拉阿魯哇族'] basedf.at[offset, '卡那卡那富族'] = df.at[i, '卡那卡那富族'] basedf.at[offset, '尚未申報'] = df.at[i, '尚未申報'] basedf.at[offset, '總計'] = df.at[i, '總計'] else: offset = areadict[df.at[i, '區域別']] # indicate the row number corresponding to the sequence of areas (i.e., areadict)] df1.at[base, '日期區間'] = invyeardict[ybeg
wrong doing of input process print(ybeg, '~', ye
conditional_block
pop-AP-var-v0.1.1-20190805.py
(df): # trim whitespace from ends of each value across all series in dataframe trim_strings = lambda x: x.strip() if isinstance(x, str) else x return df.applymap(trim_strings) ### # set output figure and input data directories # pathdir = '.\\figure' # directory of output folder if not os.path.isdir(pathdir): os.mkdir(pathdir) datadir = '.\\data' # directory of input data folder if not os.path.isdir(datadir): os.mkdir(datadir) ### # given the comparison period (from the year ybeg to the year yend ) # status quo: # 1. 輸入日期(年份:中華民國、西元,月份,日歷天數)都一致轉為只有西元年-12-31 # 2. 限定兩個年度期間: 起始年(ybeg)、結束年(yend),yend > ybeg # print('觀察比較原住民族人口變化:年度1 vs 年度2 (e.g., 2011, 2013)') year1 = input('年度1:') year2 = input('年度2:') yeardict = {'2011':'2011-12-31', '2013':'2013-12-31', '2014':'2014-12-31', '2018':'2018-12-31', '100':'2011-12-31', '102':'2013-12-31', '103':'2014-12-31', '107':'2018-12-31'} invyeardict = {'2011-12-31':'100', '2013-12-31':'102', '2014-12-31':'103', '2018-12-31':'107'} if yeardict[year1] > yeardict[year2]: ybeg = yeardict[year2] yend = yeardict[year1] else: ybeg = yeardict[year1] yend = yeardict[year2] # still needs to deal with wrong doing of input process print(ybeg, '~', yend) ### # read into the amount of aboriginal peoples' population # with open(datadir+'\\'+'population-sum-ETL.csv', 'r', encoding='utf-8', newline='') as csvfile: df = pd.read_csv( csvfile, header = 0, usecols = ['日期', '身分', '區域別', '總計', '阿美族', '泰雅族', '排灣族', '布農族', '魯凱族', '卑南族', '鄒族', '賽夏族', '雅美族', '邵族', '噶瑪蘭族', '太魯閣族', '撒奇萊雅族', '賽德克族', '拉阿魯哇族', '卡那卡那富族', '尚未申報'], verbose = True, skip_blank_lines = True, ) df = trim_all_cells(df) # trim whitespace from each cell in dataframe ### # 各縣市不分平地山地身分的人口總數(依照原住民族) # selecting the accumulated records(rows) in order to the needs of analysis # add the 'selected' column as the mark: True and null # drop the rows that are marked as 'null' (nan) # df.loc[(df.身分 == '不分平地山地'), 'selected'] = np.nan # mark all of the rows as null df.loc[(df.身分 != '不分平地山地'), 'selected'] = np.nan # mark all of the rows as null df.loc[((df.日期 == ybeg) | (df.日期 == yend)) & ( # filtered by the period of time ybeg~yend (df.區域別 == '新北市') | (df.區域別 == '臺北市') | (df.區域別 == '臺中市') | (df.區域別 == '臺南市') | (df.區域別 == '高雄市') | (df.區域別 == '桃園市') | # 桃園縣 promote as 桃園市 (df.區域別 == '宜蘭縣') | (df.區域別 == '桃園縣') | (df.區域別 == '新竹縣') | (df.區域別 == '苗栗縣') | (df.區域別 == '彰化縣') | (df.區域別 == '南投縣') | (df.區域別 == '雲林縣') | (df.區域別 == '嘉義縣') | (df.區域別 == '屏東縣') | (df.區域別 == '臺東縣') | (df.區域別 == '花蓮縣') | (df.區域別 == '澎湖縣') | (df.區域別 == '基隆市') | (df.區域別 == '新竹市') | (df.區域別 == '嘉義市') | (df.區域別 == '金門縣') | (df.區域別 == '連江縣') ), 'selected'] = True # 只取各區域的合計資料(row) df.dropna(subset=['selected'], inplace=True) # conduct dropping of the row that are marked as null df.drop(columns=['selected'], inplace=True) # remove the "selected' column df.reset_index(inplace=True) # let index be the sequence order ### # transform to array format applied for presenting the figures with profile # df1 = pd.DataFrame(columns=['日期區間', # indicate the statistic period of the populaton variation, start year-finish year '區域別', '總計', '阿美族', '泰雅族', '排灣族', '布農族', '魯凱族', '卑南族', '鄒族', '賽夏族', '雅美族', '邵族', '噶瑪蘭族', '太魯閣族', '撒奇萊雅族', '賽德克族', '拉阿魯哇族', '卡那卡那富族', '尚未申報']) # Notes: # 1. 桃園縣-桃園市 資料動應時間點產生的落差(與其它縣市),且舊桃園市(省轄市)在升格前資料未納入,升格後直接併入桃園市(直轄市) # 2. 拉阿魯哇族、卡那卡那富族 正式產生資料時間點問題(未成立前直接被補植為0) # # mapping table for area areadict = {'新北市':0, '臺北市':1, '桃園縣':2, '桃園市':2, # align 桃園縣 with 桃園市 (數字計算錯誤-誤差來自於舊的桃園市-省轄市 和桃園縣市分開計算) '臺中市':3, '臺南市':4, '高雄市':5, '宜蘭縣':6, '新竹縣':7, '苗栗縣':8, '彰化縣':9, '南投縣':10, '雲林縣':11, '嘉義縣':12, '屏東縣':13, '臺東縣':14, '花蓮縣':15, '澎湖縣':16, '基隆市':17, '新竹市':18, '嘉義市':19, '金門縣':20, '連江縣':21} # construct a temporary dataframe for keeping the data of the year ybeg data = np.zeros((22,18), dtype=np.int) basedf = pd.DataFrame({'阿美族':data[:,0], '泰雅族':data[:,1], '排灣族':data[:,2], '布農族':data[:,3], '魯凱族':data[:,4], '卑南族':data[:,5], '鄒族':data[:,6], '賽夏族':data[:,7], '雅美族':data[:,8], '邵族':data[:,9], '噶瑪蘭族':data[:,10], '太魯閣族':data[:,11], '撒奇萊雅族':data[:,12], '賽德克族':data[:,13], '拉阿魯哇族':data[:,14], '卡那卡那富族':data[:,15], '尚未申報':data[:,16], '總計':data[:,17]}) ### # construct dataframe(df1) for keeping the transformed variation numbers # offset = 0 # the base point for the dataframe of df1
trim_all_cells
identifier_name
pop-AP-var-v0.1.1-20190805.py
df = pd.read_csv( csvfile, header = 0, usecols = ['日期', '身分', '區域別', '總計', '阿美族', '泰雅族', '排灣族', '布農族', '魯凱族', '卑南族', '鄒族', '賽夏族', '雅美族', '邵族', '噶瑪蘭族', '太魯閣族', '撒奇萊雅族', '賽德克族', '拉阿魯哇族', '卡那卡那富族', '尚未申報'], verbose = True, skip_blank_lines = True, ) df = trim_all_cells(df) # trim whitespace from each cell in dataframe ### # 各縣市不分平地山地身分的人口總數(依照原住民族) # selecting the accumulated records(rows) in order to the needs of analysis # add the 'selected' column as the mark: True and null # drop the rows that are marked as 'null' (nan) # df.loc[(df.身分 == '不分平地山地'), 'selected'] = np.nan # mark all of the rows as null df.loc[(df.身分 != '不分平地山地'), 'selected'] = np.nan # mark all of the rows as null df.loc[((df.日期 == ybeg) | (df.日期 == yend)) & ( # filtered by the period of time ybeg~yend (df.區域別 == '新北市') | (df.區域別 == '臺北市') | (df.區域別 == '臺中市') | (df.區域別 == '臺南市') | (df.區域別 == '高雄市') | (df.區域別 == '桃園市') | # 桃園縣 promote as 桃園市 (df.區域別 == '宜蘭縣') | (df.區域別 == '桃園縣') | (df.區域別 == '新竹縣') | (df.區域別 == '苗栗縣') | (df.區域別 == '彰化縣') | (df.區域別 == '南投縣') | (df.區域別 == '雲林縣') | (df.區域別 == '嘉義縣') | (df.區域別 == '屏東縣') | (df.區域別 == '臺東縣') | (df.區域別 == '花蓮縣') | (df.區域別 == '澎湖縣') | (df.區域別 == '基隆市') | (df.區域別 == '新竹市') | (df.區域別 == '嘉義市') | (df.區域別 == '金門縣') | (df.區域別 == '連江縣') ), 'selected'] = True # 只取各區域的合計資料(row) df.dropna(subset=['selected'], inplace=True) # conduct dropping of the row that are marked as null df.drop(columns=['selected'], inplace=True) # remove the "selected' column df.reset_index(inplace=True) # let index be the sequence order ### # transform to array format applied for presenting the figures with profile # df1 = pd.DataFrame(columns=['日期區間', # indicate the statistic period of the populaton variation, start year-finish year '區域別', '總計', '阿美族', '泰雅族', '排灣族', '布農族', '魯凱族', '卑南族', '鄒族', '賽夏族', '雅美族', '邵族', '噶瑪蘭族', '太魯閣族', '撒奇萊雅族', '賽德克族', '拉阿魯哇族', '卡那卡那富族', '尚未申報']) # Notes: # 1. 桃園縣-桃園市 資料動應時間點產生的落差(與其它縣市),且舊桃園市(省轄市)在升格前資料未納入,升格後直接併入桃園市(直轄市) # 2. 拉阿魯哇族、卡那卡那富族 正式產生資料時間點問題(未成立前直接被補植為0) # # mapping table for area areadict = {'新北市':0, '臺北市':1, '桃園縣':2, '桃園市':2, # align 桃園縣 with 桃園市 (數字計算錯誤-誤差來自於舊的桃園市-省轄市 和桃園縣市分開計算) '臺中市':3, '臺南市':4, '高雄市':5, '宜蘭縣':6, '新竹縣':7, '苗栗縣':8, '彰化縣':9, '南投縣':10, '雲林縣':11, '嘉義縣':12, '屏東縣':13, '臺東縣':14, '花蓮縣':15, '澎湖縣':16, '基隆市':17, '新竹市':18, '嘉義市':19, '金門縣':20, '連江縣':21} # construct a temporary dataframe for keeping the data of the year ybeg data = np.zeros((22,18), dtype=np.int) basedf = pd.DataFrame({'阿美族':data[:,0], '泰雅族':data[:,1], '排灣族':data[:,2], '布農族':data[:,3], '魯凱族':data[:,4], '卑南族':data[:,5], '鄒族':data[:,6], '賽夏族':data[:,7], '雅美族':data[:,8], '邵族':data[:,9], '噶瑪蘭族':data[:,10], '太魯閣族':data[:,11], '撒奇萊雅族':data[:,12], '賽德克族':data[:,13], '拉阿魯哇族':data[:,14], '卡那卡那富族':data[:,15], '尚未申報':data[:,16], '總計':data[:,17]}) ### # construct dataframe(df1) for keeping the transformed variation numbers # offset = 0 # the base point for the dataframe of df1 used as the statistical population variation base = 0 # the base point for the dataframe of df1 used as the transformed data for i in range(0, len(df.index)): if df.at[i, '日期'] == ybeg: offset = areadict[df.at[i, '區域別']] # indicate the row number corresponding to the sequence of areas (i.e., areadict) basedf.at[offset, '阿美族'] = df.at[i, '阿美族'] basedf.at[offset, '泰雅族'] = df.at[i, '泰雅族'] basedf.at[offset, '排灣族'] = df.at[i, '排灣族'] basedf.at[offset, '布農族'] = df.at[i, '布農族'] basedf.at[offset, '魯凱族'] = df.at[i, '魯凱族'] basedf.at[offset, '卑南族'] = df.at[i, '卑南族'] basedf.at[offset, '鄒族'] = df.at[i, '鄒族'] basedf.at[offset, '賽夏族'] = df.at[i, '賽夏族'] basedf.at[offset, '雅美族'] = df.at[i, '雅美族'] basedf.at[offset, '邵族'] = df.at[i, '邵族'] basedf.at[offset, '噶瑪蘭族'] = df.at[i, '噶瑪蘭族'] basedf.at[offset, '太魯閣族'] = df.at[i, '太魯閣族'] basedf.at[offset, '撒奇萊雅族'] = df.at[i, '撒奇萊雅族'] basedf.at[offset, '賽德克族'] = df.at[i, '賽德克族'] basedf.at[offset, '拉阿魯哇族'] = df.at[i, '拉阿魯哇族'] basedf.at[offset, '卡那卡那富族'] = df.at[i, '卡那卡那富族'] basedf.at[offset, '尚未申報'] = df.at[i, '尚未申報'] basedf.at[offset, '總計'] = df.at[i, '總計'] else: offset = areadict[df.at[i, '區域別']] # indicate the row number corresponding to the sequence of areas (i.e., areadict)] df1.at[base, '日期區間'] = invyeardict[ybeg]+'-'+invyeardict[yend] df1.at[base, '區域別'] = df.at[i, '區域別']
with open(datadir+'\\'+'population-sum-ETL.csv', 'r', encoding='utf-8', newline='') as csvfile:
random_line_split
pop-AP-var-v0.1.1-20190805.py
### # set output figure and input data directories # pathdir = '.\\figure' # directory of output folder if not os.path.isdir(pathdir): os.mkdir(pathdir) datadir = '.\\data' # directory of input data folder if not os.path.isdir(datadir): os.mkdir(datadir) ### # given the comparison period (from the year ybeg to the year yend ) # status quo: # 1. 輸入日期(年份:中華民國、西元,月份,日歷天數)都一致轉為只有西元年-12-31 # 2. 限定兩個年度期間: 起始年(ybeg)、結束年(yend),yend > ybeg # print('觀察比較原住民族人口變化:年度1 vs 年度2 (e.g., 2011, 2013)') year1 = input('年度1:') year2 = input('年度2:') yeardict = {'2011':'2011-12-31', '2013':'2013-12-31', '2014':'2014-12-31', '2018':'2018-12-31', '100':'2011-12-31', '102':'2013-12-31', '103':'2014-12-31', '107':'2018-12-31'} invyeardict = {'2011-12-31':'100', '2013-12-31':'102', '2014-12-31':'103', '2018-12-31':'107'} if yeardict[year1] > yeardict[year2]: ybeg = yeardict[year2] yend = yeardict[year1] else: ybeg = yeardict[year1] yend = yeardict[year2] # still needs to deal with wrong doing of input process print(ybeg, '~', yend) ### # read into the amount of aboriginal peoples' population # with open(datadir+'\\'+'population-sum-ETL.csv', 'r', encoding='utf-8', newline='') as csvfile: df = pd.read_csv( csvfile, header = 0, usecols = ['日期', '身分', '區域別', '總計', '阿美族', '泰雅族', '排灣族', '布農族', '魯凱族', '卑南族', '鄒族', '賽夏族', '雅美族', '邵族', '噶瑪蘭族', '太魯閣族', '撒奇萊雅族', '賽德克族', '拉阿魯哇族', '卡那卡那富族', '尚未申報'], verbose = True, skip_blank_lines = True, ) df = trim_all_cells(df) # trim whitespace from each cell in dataframe ### # 各縣市不分平地山地身分的人口總數(依照原住民族) # selecting the accumulated records(rows) in order to the needs of analysis # add the 'selected' column as the mark: True and null # drop the rows that are marked as 'null' (nan) # df.loc[(df.身分 == '不分平地山地'), 'selected'] = np.nan # mark all of the rows as null df.loc[(df.身分 != '不分平地山地'), 'selected'] = np.nan # mark all of the rows as null df.loc[((df.日期 == ybeg) | (df.日期 == yend)) & ( # filtered by the period of time ybeg~yend (df.區域別 == '新北市') | (df.區域別 == '臺北市') | (df.區域別 == '臺中市') | (df.區域別 == '臺南市') | (df.區域別 == '高雄市') | (df.區域別 == '桃園市') | # 桃園縣 promote as 桃園市 (df.區域別 == '宜蘭縣') | (df.區域別 == '桃園縣') | (df.區域別 == '新竹縣') | (df.區域別 == '苗栗縣') | (df.區域別 == '彰化縣') | (df.區域別 == '南投縣') | (df.區域別 == '雲林縣') | (df.區域別 == '嘉義縣') | (df.區域別 == '屏東縣') | (df.區域別 == '臺東縣') | (df.區域別 == '花蓮縣') | (df.區域別 == '澎湖縣') | (df.區域別 == '基隆市') | (df.區域別 == '新竹市') | (df.區域別 == '嘉義市') | (df.區域別 == '金門縣') | (df.區域別 == '連江縣') ), 'selected'] = True # 只取各區域的合計資料(row) df.dropna(subset=['selected'], inplace=True) # conduct dropping of the row that are marked as null df.drop(columns=['selected'], inplace=True) # remove the "selected' column df.reset_index(inplace=True) # let index be the sequence order ### # transform to array format applied for presenting the figures with profile # df1 = pd.DataFrame(columns=['日期區間', # indicate the statistic period of the populaton variation, start year-finish year '區域別', '總計', '阿美族', '泰雅族', '排灣族', '布農族', '魯凱族', '卑南族', '鄒族', '賽夏族', '雅美族', '邵族', '噶瑪蘭族', '太魯閣族', '撒奇萊雅族', '賽德克族', '拉阿魯哇族', '卡那卡那富族', '尚未申報']) # Notes: # 1. 桃園縣-桃園市 資料動應時間點產生的落差(與其它縣市),且舊桃園市(省轄市)在升格前資料未納入,升格後直接併入桃園市(直轄市) # 2. 拉阿魯哇族、卡那卡那富族 正式產生資料時間點問題(未成立前直接被補植為0) # # mapping table for area areadict = {'新北市':0, '臺北市':1, '桃園縣':2, '桃園市':2, # align 桃園縣 with 桃園市 (數字計算錯誤-誤差來自於舊的桃園市-省轄市 和桃園縣市分開計算) '臺中市':3, '臺南市':4, '高雄市':5, '宜蘭縣':6, '新竹縣':7, '苗栗縣':8, '彰化縣':9, '南投縣':10, '雲林縣':11, '嘉義縣':12, '屏東縣':13, '臺東縣':14, '花蓮縣':15, '澎湖縣':16, '基隆市':17, '新竹市':18, '嘉義市':19, '金門縣':20, '連江縣':21} # construct a temporary dataframe for keeping the data of the year ybeg data = np.zeros((22,18), dtype=np.int) basedf = pd.DataFrame({'阿美族':data[:,0], '泰雅族':data[:,1], '排灣族':data[:,2], '布農族':data[:,3], '魯凱族':data[:,4], '卑南族':data[:,5], '鄒族':data[:,6], '賽夏族':data[:,7], '雅美族':data[:,8], '邵族':data[:,9], '噶瑪蘭族':data[:,10], '太魯閣族':data[:,11], '撒奇萊雅族':data[:,12], '賽德克族':data[:,13], '拉阿魯哇族':data[:,14], '卡那卡那富族':data[:,15], '尚未申報':data[:,16], '總計':data[:,17]}) ### # construct dataframe(df1) for keeping the transformed variation numbers # offset = 0 # the base point for the dataframe of df1 used as the statistical population variation base = 0 # the base point for the dataframe of
trim_strings = lambda x: x.strip() if isinstance(x, str) else x return df.applymap(trim_strings)
identifier_body
viastitching_dialog.py
return False for edge in self.board_edges: if edge.ShowShape() == 'Line': the_distance, _ = pnt2line(p1, edge.GetStart(), edge.GetEnd()) if the_distance <= clearance + via.GetWidth() / 2: return False if edge.ShowShape() == 'Arc': # distance from center of Arc and with angle within Arc angle should be outside Arc radius +- clearance + via Width/2 center = edge.GetPosition() start = edge.GetStart() end = edge.GetEnd() radius = norm(center - end) dist = norm(p1 - center) if radius - (self.clearance + via.GetWidth() / 2) < dist < radius + ( self.clearance + via.GetWidth() / 2): # via is in range need to check the angle start_angle = math.atan2((start - center).y, (start - center).x) end_angle = math.atan2((end - center).y, (end - center).x) if end_angle < start_angle: end_angle += 2 * math.pi point_angle = math.atan2((p1 - center).y, (p1 - center).x) if start_angle <= point_angle <= end_angle: return False return True def CheckOverlap(self, via): """Check if via overlaps or interfere with other items on the board. Parameters: via (pcbnew.VIA): Via to be checked Returns: bool: True if via overlaps with an item, False otherwise. """ for item in self.overlappings: if type(item) is pcbnew.PAD: if item.GetBoundingBox().Intersects(via.GetBoundingBox()): return True elif type(item) is pcbnew.PCB_VIA: # Overlapping with vias work best if checking is performed by intersection if item.GetBoundingBox().Intersects(via.GetBoundingBox()): return True elif type(item) in [pcbnew.ZONE, pcbnew.FP_ZONE]: if item.GetBoundingBox().Intersects(via.GetBoundingBox()): return True elif type(item) is pcbnew.PCB_TRACK: if item.GetBoundingBox().Intersects(via.GetBoundingBox()): width = item.GetWidth() dist, _ = pnt2line(via.GetPosition(), item.GetStart(), item.GetEnd()) if dist <= self.clearance + width // 2 + via.GetWidth() / 2: return True return False def FillupArea(self): """Fills selected area with vias.""" drillsize = self.FromUserUnit(float(self.m_txtViaDrillSize.GetValue())) viasize = self.FromUserUnit(float(self.m_txtViaSize.GetValue())) step_x = self.FromUserUnit(float(self.m_txtHSpacing.GetValue())) step_y = self.FromUserUnit(float(self.m_txtVSpacing.GetValue())) clearance = self.FromUserUnit(float(self.m_txtClearance.GetValue())) self.randomize = self.m_chkRandomize.GetValue() self.clearance = clearance bbox = self.area.GetBoundingBox() top = bbox.GetTop() bottom = bbox.GetBottom() right = bbox.GetRight() left = bbox.GetLeft() netname = self.m_cbNet.GetStringSelection() netcode = self.board.GetNetcodeFromNetname(netname) # commit = pcbnew.COMMIT() viacount = 0 x = left # Cycle trough area bounding box checking and implanting vias layer = self.area.GetLayer() while x <= right: y = top while y <= bottom: if self.randomize: xp = x + random.uniform(-1, 1) * step_x / 5 yp = y + random.uniform(-1, 1) * step_y / 5 else: xp = x yp = y if hasattr(pcbnew, 'VECTOR2I'): p = pcbnew.VECTOR2I(xp, yp) else: if(hasattr(pcbnew, 'wxPoint')): p = pcbnew.wxPoint(xp, yp) if self.area.HitTestFilledArea(layer, p, 0): via = pcbnew.PCB_VIA(self.board) via.SetPosition(p) via.SetLayer(layer) via.SetNetCode(netcode) # Set up via with clearance added to its size-> bounding box check will be OK in worst case, may be too conservative, but additional checks are possible if needed # TODO: possibly take the clearance from the PCB settings instead of the dialog # Clearance is all around -> *2 via.SetDrill(drillsize + 2 * clearance) via.SetWidth(viasize + 2 * clearance) # via.SetTimeStamp(__timecode__) if not self.CheckOverlap(via): # Check clearance only if clearance value differs from 0 (disabled) if (clearance == 0) or self.CheckClearance(via, self.area, clearance): via.SetWidth(viasize) via.SetDrill(drillsize) self.board.Add(via) # commit.Add(via) self.pcb_group.AddItem(via) viacount += 1 y += step_y x += step_x if viacount > 0: wx.MessageBox(_(u"Implanted: %d vias!") % viacount) # commit.Push() pcbnew.Refresh() else: wx.MessageBox(_(u"No vias implanted!")) def onProcessAction(self, event): """Manage main button (Ok) click event.""" zone_name = self.area.GetZoneName() if zone_name == "": for i in range(1000): candidate_name = f"stitch_zone_{i}" if candidate_name not in self.config.keys(): zone_name = candidate_name break else: wx.LogError("Tried 1000 different names and all were taken. Please give a name to the zone.") self.Destroy() self.area.SetZoneName(zone_name) config = { "HSpacing": self.m_txtHSpacing.GetValue(), "VSpacing": self.m_txtVSpacing.GetValue(), "Clearance": self.m_txtClearance.GetValue(), "Randomize": self.m_chkRandomize.GetValue()} if self.config_textbox == None: self.config = {__plugin_name__: __version__ } title_block = pcbnew.PCB_TEXT(self.board) title_block.SetLayer(self.config_layer) if hasattr(pcbnew, 'GR_TEXT_HJUSTIFY_LEFT'): title_block.SetHorizJustify(pcbnew.GR_TEXT_HJUSTIFY_LEFT) else: if hasattr(pcbnew, 'GR_TEXT_H_ALIGN_LEFT'): title_block.SetHorizJustify(pcbnew.GR_TEXT_H_ALIGN_LEFT) if hasattr(pcbnew, 'GR_TEXT_VJUSTIFY_TOP'): title_block.SetVertJustify(pcbnew.GR_TEXT_VJUSTIFY_TOP) else: if hasattr(pcbnew, 'GR_TEXT_V_ALIGN_TOP'): title_block.SetVertJustify(pcbnew.GR_TEXT_V_ALIGN_TOP) title_block.SetVisible(False) self.config_textbox = title_block self.board.Add(title_block) self.config[zone_name] = config self.config_textbox.SetText(json.dumps(self.config, indent=2)) # Get overlapping items self.GetOverlappingItems() # Search trough groups for group in self.board.Groups(): if group.GetName() == self.viagroupname: self.pcb_group = group if self.pcb_group is None: self.pcb_group = pcbnew.PCB_GROUP(None) self.pcb_group.SetName(self.viagroupname) self.board.Add(self.pcb_group) self.FillupArea() self.Destroy() def onClearAction(self, event): """Manage clear vias button (Clear) click event.""" self.ClearArea() self.Destroy() def onCloseWindow(self, event): """Manage Close button click event.""" self.Destroy() def GetStandardLayerName(self, layerid): if hasattr(pcbnew, 'BOARD_GetStandardLayerName'): layer_name = pcbnew.BOARD_GetStandardLayerName(layerid) else: layer_name = self.board.GetStandardLayerName(layerid) return layer_name def getConfigLayer(self): self.config_layer = 0 user_layer = 0 for i in range(pcbnew.PCBNEW_LAYER_ID_START, pcbnew.PCBNEW_LAYER_ID_START + pcbnew.PCB_LAYER_ID_COUNT): if __plugin_config_layer_name__ == self.GetStandardLayerName(i): self.config_layer = i break if "User.9" == self.GetStandardLayerName(i): user_layer = i else: self.config_layer = user_layer self.board.SetLayerName(self.config_layer, __plugin_config_layer_name__) def InitViaStitchingDialog(board): """Initalize dialog.""" dlg = ViaStitchingDialog(board) dlg.Show(True) return dlg class aVector(): def __init__(self, point: [pcbnew.wxPoint, list]): if isinstance(point, pcbnew.wxPoint):
self.x = float(point.x) self.y = float(point.y)
conditional_block
viastitching_dialog.py
Clearance", "0")) self.m_chkRandomize.SetValue(defaults.get("Randomize", False)) # Get default Vias dimensions via_dim_list = self.board.GetViasDimensionsList() if via_dim_list: via_dims = via_dim_list.pop() else: wx.MessageBox(_(u"Please set via drill/size in board")) self.Destroy() self.m_txtViaSize.SetValue("%.6f" % self.ToUserUnit(via_dims.m_Diameter)) self.m_txtViaDrillSize.SetValue("%.6f" % self.ToUserUnit(via_dims.m_Drill)) via_dim_list.push_back(via_dims) self.overlappings = None def
(self): """Collect overlapping items. Every bounding box of any item found is a candidate to be inspected for overlapping. """ area_bbox = self.area.GetBoundingBox() if hasattr(self.board, 'GetModules'): modules = self.board.GetModules() else: modules = self.board.GetFootprints() tracks = self.board.GetTracks() self.overlappings = [] for zone in self.board.Zones(): if zone.GetZoneName() != self.area.GetZoneName(): if zone.GetBoundingBox().Intersects(area_bbox): self.overlappings.append(zone) for item in tracks: if (type(item) is pcbnew.PCB_VIA) and (item.GetBoundingBox().Intersects(area_bbox)): self.overlappings.append(item) if type(item) is pcbnew.PCB_TRACK: self.overlappings.append(item) for item in modules: if item.GetBoundingBox().Intersects(area_bbox): for pad in item.Pads(): self.overlappings.append(pad) for zone in item.Zones(): self.overlappings.append(zone) # TODO: change algorithm to 'If one of the candidate area's edges overlaps with target area declare candidate as overlapping' for i in range(0, self.board.GetAreaCount()): item = self.board.GetArea(i) if item.GetBoundingBox().Intersects(area_bbox): if item.GetNetname() != self.net: self.overlappings.append(item) def GetAreaConfig(self): """Check selected area (if any) and verify if it is a valid container for vias. Returns: bool: Returns True if an area/zone is selected and matches the implant criteria, False otherwise. """ for i in range(0, self.board.GetAreaCount()): area = self.board.GetArea(i) if area.IsSelected(): if not area.IsOnCopperLayer(): return False elif area.GetDoNotAllowCopperPour(): return False self.area = area self.net = area.GetNetname() return True return False def PopulateNets(self): """Populate nets widget.""" nets = self.board.GetNetsByName() # Tricky loop, the iterator should return two values, unluckly I'm not able to use the # first value of the couple so I'm recycling it as netname. for netname, net in nets.items(): netname = net.GetNetname() if (netname != None) and (netname != ""): self.m_cbNet.Append(netname) # Select the net used by area (if any) if self.net != None: index = self.m_cbNet.FindString(self.net) self.m_cbNet.Select(index) def ClearArea(self): """Clear selected area.""" undo = self.m_chkClearOwn.IsChecked() drillsize = self.FromUserUnit(float(self.m_txtViaDrillSize.GetValue())) viasize = self.FromUserUnit(float(self.m_txtViaSize.GetValue())) netname = self.m_cbNet.GetStringSelection() netcode = self.board.GetNetcodeFromNetname(netname) #commit = pcbnew.COMMIT() viacount = 0 for item in self.board.GetTracks(): if type(item) is pcbnew.PCB_VIA: # If the user selected the Undo action only signed/grouped vias are removed, # otherwise are removed vias matching values set in the dialog. # if undo and (item.GetTimeStamp() == __timecode__): if undo and (self.pcb_group is not None): group = item.GetParentGroup() if (group is not None and group.GetName() == self.viagroupname): self.board.Remove(item) viacount += 1 # commit.Remove(item) elif (not undo) and self.area.HitTestFilledArea(self.area.GetLayer(), item.GetPosition(), 0) and ( item.GetDrillValue() == drillsize) and (item.GetWidth() == viasize) and ( item.GetNetname() == netname): self.board.Remove(item) self.pcb_group.RemoveItem(item) viacount += 1 # commit.Remove(item) if viacount > 0: wx.MessageBox(_(u"Removed: %d vias!") % viacount) #commit.Push() pcbnew.Refresh() def CheckClearance(self, via, area, clearance): """Check if position specified by p1 comply with given clearance in area. Parameters: p1 (wxPoint): Position to test area (pcbnew.ZONE_CONTAINER): Area clearance (int): Clearance value Returns: bool: True if p1 position comply with clearance value False otherwise. """ p1 = via.GetPosition() corners = area.GetNumCorners() # Calculate minimum distance from corners # TODO: remove? for i in range(corners): corner = area.GetCornerPosition(i) p2 = corner.getWxPoint() the_distance = norm(p2 - p1) # sqrt((p2.x - p1.x) ** 2 + (p2.y - p1.y) ** 2) if the_distance < clearance: return False for i in range(corners): corner1 = area.GetCornerPosition(i) corner2 = area.GetCornerPosition((i + 1) % corners) pc1 = corner1.getWxPoint() pc2 = corner2.getWxPoint() the_distance, _ = pnt2line(p1, pc1, pc2) if the_distance <= clearance: return False for edge in self.board_edges: if edge.ShowShape() == 'Line': the_distance, _ = pnt2line(p1, edge.GetStart(), edge.GetEnd()) if the_distance <= clearance + via.GetWidth() / 2: return False if edge.ShowShape() == 'Arc': # distance from center of Arc and with angle within Arc angle should be outside Arc radius +- clearance + via Width/2 center = edge.GetPosition() start = edge.GetStart() end = edge.GetEnd() radius = norm(center - end) dist = norm(p1 - center) if radius - (self.clearance + via.GetWidth() / 2) < dist < radius + ( self.clearance + via.GetWidth() / 2): # via is in range need to check the angle start_angle = math.atan2((start - center).y, (start - center).x) end_angle = math.atan2((end - center).y, (end - center).x) if end_angle < start_angle: end_angle += 2 * math.pi point_angle = math.atan2((p1 - center).y, (p1 - center).x) if start_angle <= point_angle <= end_angle: return False return True def CheckOverlap(self, via): """Check if via overlaps or interfere with other items on the board. Parameters: via (pcbnew.VIA): Via to be checked Returns: bool: True if via overlaps with an item, False otherwise. """ for item in self.overlappings: if type(item) is pcbnew.PAD: if item.GetBoundingBox().Intersects(via.GetBoundingBox()): return True elif type(item) is pcbnew.PCB_VIA: # Overlapping with vias work best if checking is performed by intersection if item.GetBoundingBox().Intersects(via.GetBoundingBox()): return True elif type(item) in [pcbnew.ZONE, pcbnew.FP_ZONE]: if item.GetBoundingBox().Intersects(via.GetBoundingBox()): return True elif type(item) is pcbnew.PCB_TRACK: if item.GetBoundingBox().Intersects(via.GetBoundingBox()): width = item.GetWidth() dist, _ = pnt2line(via.GetPosition(), item.GetStart(), item.GetEnd()) if dist <= self.clearance + width // 2 + via.GetWidth() / 2: return True return False def FillupArea(self): """Fills selected area with vias.""" drillsize = self.FromUserUnit(float(self.m_txtViaDrillSize.GetValue())) viasize = self.FromUserUnit(float(self.m_txtViaSize.GetValue())) step_x = self.FromUserUnit(float(self.m_txtHSpacing.GetValue())) step_y = self.FromUserUnit(float(self.m_txtVSpacing.GetValue())) clearance = self.FromUserUnit(float(self.m_txtClearance.GetValue())) self.randomize = self.m_chkRandomize.GetValue() self.clearance = clearance
GetOverlappingItems
identifier_name
viastitching_dialog.py
Clearance", "0")) self.m_chkRandomize.SetValue(defaults.get("Randomize", False)) # Get default Vias dimensions via_dim_list = self.board.GetViasDimensionsList() if via_dim_list: via_dims = via_dim_list.pop() else: wx.MessageBox(_(u"Please set via drill/size in board")) self.Destroy() self.m_txtViaSize.SetValue("%.6f" % self.ToUserUnit(via_dims.m_Diameter)) self.m_txtViaDrillSize.SetValue("%.6f" % self.ToUserUnit(via_dims.m_Drill)) via_dim_list.push_back(via_dims) self.overlappings = None def GetOverlappingItems(self): """Collect overlapping items. Every bounding box of any item found is a candidate to be inspected for overlapping. """ area_bbox = self.area.GetBoundingBox() if hasattr(self.board, 'GetModules'): modules = self.board.GetModules() else: modules = self.board.GetFootprints() tracks = self.board.GetTracks() self.overlappings = [] for zone in self.board.Zones(): if zone.GetZoneName() != self.area.GetZoneName(): if zone.GetBoundingBox().Intersects(area_bbox): self.overlappings.append(zone) for item in tracks: if (type(item) is pcbnew.PCB_VIA) and (item.GetBoundingBox().Intersects(area_bbox)): self.overlappings.append(item) if type(item) is pcbnew.PCB_TRACK: self.overlappings.append(item) for item in modules: if item.GetBoundingBox().Intersects(area_bbox): for pad in item.Pads(): self.overlappings.append(pad) for zone in item.Zones(): self.overlappings.append(zone) # TODO: change algorithm to 'If one of the candidate area's edges overlaps with target area declare candidate as overlapping' for i in range(0, self.board.GetAreaCount()): item = self.board.GetArea(i) if item.GetBoundingBox().Intersects(area_bbox): if item.GetNetname() != self.net: self.overlappings.append(item) def GetAreaConfig(self): """Check selected area (if any) and verify if it is a valid container for vias. Returns: bool: Returns True if an area/zone is selected and matches the implant criteria, False otherwise. """ for i in range(0, self.board.GetAreaCount()): area = self.board.GetArea(i) if area.IsSelected(): if not area.IsOnCopperLayer(): return False elif area.GetDoNotAllowCopperPour(): return False self.area = area self.net = area.GetNetname() return True return False def PopulateNets(self): """Populate nets widget.""" nets = self.board.GetNetsByName() # Tricky loop, the iterator should return two values, unluckly I'm not able to use the # first value of the couple so I'm recycling it as netname. for netname, net in nets.items(): netname = net.GetNetname() if (netname != None) and (netname != ""): self.m_cbNet.Append(netname) # Select the net used by area (if any) if self.net != None: index = self.m_cbNet.FindString(self.net) self.m_cbNet.Select(index) def ClearArea(self): """Clear selected area.""" undo = self.m_chkClearOwn.IsChecked() drillsize = self.FromUserUnit(float(self.m_txtViaDrillSize.GetValue())) viasize = self.FromUserUnit(float(self.m_txtViaSize.GetValue())) netname = self.m_cbNet.GetStringSelection() netcode = self.board.GetNetcodeFromNetname(netname) #commit = pcbnew.COMMIT() viacount = 0 for item in self.board.GetTracks(): if type(item) is pcbnew.PCB_VIA: # If the user selected the Undo action only signed/grouped vias are removed, # otherwise are removed vias matching values set in the dialog. # if undo and (item.GetTimeStamp() == __timecode__): if undo and (self.pcb_group is not None): group = item.GetParentGroup() if (group is not None and group.GetName() == self.viagroupname): self.board.Remove(item) viacount += 1 # commit.Remove(item) elif (not undo) and self.area.HitTestFilledArea(self.area.GetLayer(), item.GetPosition(), 0) and ( item.GetDrillValue() == drillsize) and (item.GetWidth() == viasize) and ( item.GetNetname() == netname): self.board.Remove(item) self.pcb_group.RemoveItem(item) viacount += 1 # commit.Remove(item) if viacount > 0: wx.MessageBox(_(u"Removed: %d vias!") % viacount) #commit.Push() pcbnew.Refresh() def CheckClearance(self, via, area, clearance): """Check if position specified by p1 comply with given clearance in area. Parameters: p1 (wxPoint): Position to test area (pcbnew.ZONE_CONTAINER): Area clearance (int): Clearance value Returns: bool: True if p1 position comply with clearance value False otherwise. """ p1 = via.GetPosition() corners = area.GetNumCorners() # Calculate minimum distance from corners # TODO: remove? for i in range(corners): corner = area.GetCornerPosition(i) p2 = corner.getWxPoint() the_distance = norm(p2 - p1) # sqrt((p2.x - p1.x) ** 2 + (p2.y - p1.y) ** 2) if the_distance < clearance: return False for i in range(corners): corner1 = area.GetCornerPosition(i) corner2 = area.GetCornerPosition((i + 1) % corners) pc1 = corner1.getWxPoint() pc2 = corner2.getWxPoint() the_distance, _ = pnt2line(p1, pc1, pc2) if the_distance <= clearance: return False for edge in self.board_edges: if edge.ShowShape() == 'Line': the_distance, _ = pnt2line(p1, edge.GetStart(), edge.GetEnd()) if the_distance <= clearance + via.GetWidth() / 2: return False if edge.ShowShape() == 'Arc': # distance from center of Arc and with angle within Arc angle should be outside Arc radius +- clearance + via Width/2 center = edge.GetPosition() start = edge.GetStart() end = edge.GetEnd() radius = norm(center - end) dist = norm(p1 - center) if radius - (self.clearance + via.GetWidth() / 2) < dist < radius + ( self.clearance + via.GetWidth() / 2): # via is in range need to check the angle start_angle = math.atan2((start - center).y, (start - center).x) end_angle = math.atan2((end - center).y, (end - center).x) if end_angle < start_angle: end_angle += 2 * math.pi point_angle = math.atan2((p1 - center).y, (p1 - center).x) if start_angle <= point_angle <= end_angle: return False return True def CheckOverlap(self, via): """Check if via overlaps or interfere with other items on the board. Parameters: via (pcbnew.VIA): Via to be checked Returns: bool: True if via overlaps with an item, False otherwise. """ for item in self.overlappings: if type(item) is pcbnew.PAD: if item.GetBoundingBox().Intersects(via.GetBoundingBox()): return True elif type(item) is pcbnew.PCB_VIA: # Overlapping with vias work best if checking is performed by intersection if item.GetBoundingBox().Intersects(via.GetBoundingBox()): return True elif type(item) in [pcbnew.ZONE, pcbnew.FP_ZONE]:
if item.GetBoundingBox().Intersects(via.GetBoundingBox()): return True elif type(item) is pcbnew.PCB_TRACK: if item.GetBoundingBox().Intersects(via.GetBoundingBox()): width = item.GetWidth() dist, _ = pnt2line(via.GetPosition(), item.GetStart(), item.GetEnd()) if dist <= self.clearance + width // 2 + via.GetWidth() / 2: return True return False def FillupArea(self): """Fills selected area with vias.""" drillsize = self.FromUserUnit(float(self.m_txtViaDrillSize.GetValue())) viasize = self.FromUserUnit(float(self.m_txtViaSize.GetValue())) step_x = self.FromUserUnit(float(self.m_txtHSpacing.GetValue())) step_y = self.FromUserUnit(float(self.m_txtVSpacing.GetValue())) clearance = self.FromUserUnit(float(self.m_txtClearance.GetValue())) self.randomize = self.m_chkRandomize.GetValue() self.clearance = clearance
random_line_split
viastitching_dialog.py
Clearance", "0")) self.m_chkRandomize.SetValue(defaults.get("Randomize", False)) # Get default Vias dimensions via_dim_list = self.board.GetViasDimensionsList() if via_dim_list: via_dims = via_dim_list.pop() else: wx.MessageBox(_(u"Please set via drill/size in board")) self.Destroy() self.m_txtViaSize.SetValue("%.6f" % self.ToUserUnit(via_dims.m_Diameter)) self.m_txtViaDrillSize.SetValue("%.6f" % self.ToUserUnit(via_dims.m_Drill)) via_dim_list.push_back(via_dims) self.overlappings = None def GetOverlappingItems(self): """Collect overlapping items. Every bounding box of any item found is a candidate to be inspected for overlapping. """ area_bbox = self.area.GetBoundingBox() if hasattr(self.board, 'GetModules'): modules = self.board.GetModules() else: modules = self.board.GetFootprints() tracks = self.board.GetTracks() self.overlappings = [] for zone in self.board.Zones(): if zone.GetZoneName() != self.area.GetZoneName(): if zone.GetBoundingBox().Intersects(area_bbox): self.overlappings.append(zone) for item in tracks: if (type(item) is pcbnew.PCB_VIA) and (item.GetBoundingBox().Intersects(area_bbox)): self.overlappings.append(item) if type(item) is pcbnew.PCB_TRACK: self.overlappings.append(item) for item in modules: if item.GetBoundingBox().Intersects(area_bbox): for pad in item.Pads(): self.overlappings.append(pad) for zone in item.Zones(): self.overlappings.append(zone) # TODO: change algorithm to 'If one of the candidate area's edges overlaps with target area declare candidate as overlapping' for i in range(0, self.board.GetAreaCount()): item = self.board.GetArea(i) if item.GetBoundingBox().Intersects(area_bbox): if item.GetNetname() != self.net: self.overlappings.append(item) def GetAreaConfig(self): """Check selected area (if any) and verify if it is a valid container for vias. Returns: bool: Returns True if an area/zone is selected and matches the implant criteria, False otherwise. """ for i in range(0, self.board.GetAreaCount()): area = self.board.GetArea(i) if area.IsSelected(): if not area.IsOnCopperLayer(): return False elif area.GetDoNotAllowCopperPour(): return False self.area = area self.net = area.GetNetname() return True return False def PopulateNets(self): """Populate nets widget.""" nets = self.board.GetNetsByName() # Tricky loop, the iterator should return two values, unluckly I'm not able to use the # first value of the couple so I'm recycling it as netname. for netname, net in nets.items(): netname = net.GetNetname() if (netname != None) and (netname != ""): self.m_cbNet.Append(netname) # Select the net used by area (if any) if self.net != None: index = self.m_cbNet.FindString(self.net) self.m_cbNet.Select(index) def ClearArea(self): """Clear selected area.""" undo = self.m_chkClearOwn.IsChecked() drillsize = self.FromUserUnit(float(self.m_txtViaDrillSize.GetValue())) viasize = self.FromUserUnit(float(self.m_txtViaSize.GetValue())) netname = self.m_cbNet.GetStringSelection() netcode = self.board.GetNetcodeFromNetname(netname) #commit = pcbnew.COMMIT() viacount = 0 for item in self.board.GetTracks(): if type(item) is pcbnew.PCB_VIA: # If the user selected the Undo action only signed/grouped vias are removed, # otherwise are removed vias matching values set in the dialog. # if undo and (item.GetTimeStamp() == __timecode__): if undo and (self.pcb_group is not None): group = item.GetParentGroup() if (group is not None and group.GetName() == self.viagroupname): self.board.Remove(item) viacount += 1 # commit.Remove(item) elif (not undo) and self.area.HitTestFilledArea(self.area.GetLayer(), item.GetPosition(), 0) and ( item.GetDrillValue() == drillsize) and (item.GetWidth() == viasize) and ( item.GetNetname() == netname): self.board.Remove(item) self.pcb_group.RemoveItem(item) viacount += 1 # commit.Remove(item) if viacount > 0: wx.MessageBox(_(u"Removed: %d vias!") % viacount) #commit.Push() pcbnew.Refresh() def CheckClearance(self, via, area, clearance):
if the_distance < clearance: return False for i in range(corners): corner1 = area.GetCornerPosition(i) corner2 = area.GetCornerPosition((i + 1) % corners) pc1 = corner1.getWxPoint() pc2 = corner2.getWxPoint() the_distance, _ = pnt2line(p1, pc1, pc2) if the_distance <= clearance: return False for edge in self.board_edges: if edge.ShowShape() == 'Line': the_distance, _ = pnt2line(p1, edge.GetStart(), edge.GetEnd()) if the_distance <= clearance + via.GetWidth() / 2: return False if edge.ShowShape() == 'Arc': # distance from center of Arc and with angle within Arc angle should be outside Arc radius +- clearance + via Width/2 center = edge.GetPosition() start = edge.GetStart() end = edge.GetEnd() radius = norm(center - end) dist = norm(p1 - center) if radius - (self.clearance + via.GetWidth() / 2) < dist < radius + ( self.clearance + via.GetWidth() / 2): # via is in range need to check the angle start_angle = math.atan2((start - center).y, (start - center).x) end_angle = math.atan2((end - center).y, (end - center).x) if end_angle < start_angle: end_angle += 2 * math.pi point_angle = math.atan2((p1 - center).y, (p1 - center).x) if start_angle <= point_angle <= end_angle: return False return True def CheckOverlap(self, via): """Check if via overlaps or interfere with other items on the board. Parameters: via (pcbnew.VIA): Via to be checked Returns: bool: True if via overlaps with an item, False otherwise. """ for item in self.overlappings: if type(item) is pcbnew.PAD: if item.GetBoundingBox().Intersects(via.GetBoundingBox()): return True elif type(item) is pcbnew.PCB_VIA: # Overlapping with vias work best if checking is performed by intersection if item.GetBoundingBox().Intersects(via.GetBoundingBox()): return True elif type(item) in [pcbnew.ZONE, pcbnew.FP_ZONE]: if item.GetBoundingBox().Intersects(via.GetBoundingBox()): return True elif type(item) is pcbnew.PCB_TRACK: if item.GetBoundingBox().Intersects(via.GetBoundingBox()): width = item.GetWidth() dist, _ = pnt2line(via.GetPosition(), item.GetStart(), item.GetEnd()) if dist <= self.clearance + width // 2 + via.GetWidth() / 2: return True return False def FillupArea(self): """Fills selected area with vias.""" drillsize = self.FromUserUnit(float(self.m_txtViaDrillSize.GetValue())) viasize = self.FromUserUnit(float(self.m_txtViaSize.GetValue())) step_x = self.FromUserUnit(float(self.m_txtHSpacing.GetValue())) step_y = self.FromUserUnit(float(self.m_txtVSpacing.GetValue())) clearance = self.FromUserUnit(float(self.m_txtClearance.GetValue())) self.randomize = self.m_chkRandomize.GetValue() self.clearance =
"""Check if position specified by p1 comply with given clearance in area. Parameters: p1 (wxPoint): Position to test area (pcbnew.ZONE_CONTAINER): Area clearance (int): Clearance value Returns: bool: True if p1 position comply with clearance value False otherwise. """ p1 = via.GetPosition() corners = area.GetNumCorners() # Calculate minimum distance from corners # TODO: remove? for i in range(corners): corner = area.GetCornerPosition(i) p2 = corner.getWxPoint() the_distance = norm(p2 - p1) # sqrt((p2.x - p1.x) ** 2 + (p2.y - p1.y) ** 2)
identifier_body
difev_adversarial.py
_totensor(difev_vars.adv_image) # out = difev_vars.model.run(difev_vars.trans_adv_image) out = difev_vars.model.run(difev_vars.adv_image) # difev_vars.prob_adv = softmax(out.data.cpu().numpy()[0]) difev_vars.prob_adv = softmax(out) difev_vars.pred_adv = np.argmax(difev_vars.prob_adv) p = difev_vars.prob_adv[difev_vars.pred_adv] difev_vars.stage += 1 # if pred_adv != pred_orig and prob_adv >= 0.9: if difev_vars.pred_adv != difev_vars.pred_orig and p > 0.9: return True if difev_vars.stage > max_stage: return True else: print('Prob [%s]: %f, %d iterations' % ( class_names[difev_vars.pred_orig], difev_vars.prob_adv[difev_vars.pred_orig], difev_vars.stage)) class PixelAttack: """ Use differential evolution to modify a small number of pixels (self.d pixels) """ def __init__(self): self.input_size = (0, 0) self.d = 3 self.bounds = [(0, self.input_size[0]), (0, self.input_size[1]), (0, 255), (0, 255), (0, 255)] * self.d self.name = 'pixel' # @staticmethod def perturb(self, x): global difev_vars self.input_size = difev_vars.image.size adv_image = np.array(difev_vars.image.copy()) # calculate pixel locations and values pixs = np.array(np.split(x, len(x) / 5)).astype(int) loc = (pixs[:, 0], pixs[:, 1]) val = pixs[:, 2:] adv_image[loc] = val adv_image = Image.fromarray(adv_image) return adv_image class ColorAttack: """ Change the color balance and try to defeat the classifier """ def __init__(self): # v = (0.47,0.53) v = (0.9, 1.1) self.bounds = [v, v, v] self.name = 'color' @staticmethod def perturb(x): global difev_vars adv_image = np.array(difev_vars.image.copy()) # calculate pixel locations and values adv_image = adv_image * x adv_image[adv_image > 255] = 255 adv_image = Image.fromarray(adv_image.astype('uint8')) return adv_image class RotationTranslationAttack: """ Translate / Rotate the image to defeat the classifier """ def __init__(self): self.bounds = [(0, 360), (0, 50), (0, 50)] # rotation, x translation, y translation self.name = 'rotation' @staticmethod def perturb(x): global difev_vars adv_image = difev_vars.image.copy() adv_image = adv_image.transform(adv_image.size, Image.AFFINE, (1, 0, x[1], 0, 1, x[2])) adv_image = adv_image.rotate(x[0]) return adv_image def run_attack(attack, img_path, filename, target, fig_path, save=True): global difev_vars assert difev_vars.model is not None assert target in class_names difev_vars.stage = 0 difev_vars.perturb_fn = attack.perturb difev_vars.image = difev_vars.model.load_image(img_path + filename) X = difev_vars.model.run(difev_vars.image) # difev_vars.prob_orig = softmax(X.data.cpu().numpy()[0]) difev_vars.prob_orig = softmax(X) difev_vars.pred_orig = np.argmax(difev_vars.prob_orig) print('Prediction before attack: %s' % (class_names[difev_vars.pred_orig])) print('Probability: %f' % (difev_vars.prob_orig[difev_vars.pred_orig])) if class_names[difev_vars.pred_orig] == target: print('Matches target before attack') return 'incorrect class' # Run the differential evolution attack import warnings with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=UserWarning) result = differential_evolution(optimize, attack.bounds, maxiter=iters, popsize=popsize, tol=1e-5, callback=callback, workers=1) # result = differential_evolution(optimize, attack.bounds, maxiter=iters, popsize=popsize, tol=1e-5, # callback=callback) adv_image = difev_vars.perturb_fn(result.x) # trans_adv_image = difev_vars.model.normalize_totensor(adv_image) # out = difev_vars.model.run(trans_adv_image) out = difev_vars.model.run(adv_image) # prob = softmax(out.data.numpy()[0]) prob = softmax(out) a = class_names[difev_vars.pred_orig] b = class_names[difev_vars.pred_adv] if a != b: print('Successful attack') print('Prob [%s]: %f --> Prob[%s]: %f' % (class_names[difev_vars.pred_orig], difev_vars.prob_orig[difev_vars.pred_orig], class_names[difev_vars.pred_adv], difev_vars.prob_adv[difev_vars.pred_adv])) base_name = filename.split('.')[0] name_image = fig_path + base_name + '_orig_%.3f' % (difev_vars.prob_orig[difev_vars.pred_orig]) + '.jpg' name_adv = fig_path + base_name + '_adv_%.3f' % (difev_vars.prob_adv[difev_vars.pred_adv]) + '.jpg' adv_image.save(name_adv, 'jpeg') difev_vars.image.save(name_image, 'jpeg') if attack.name == 'pixel': name_diff = fig_path + base_name + '_diff' + '.jpg' diff = PIL.ImageChops.difference(adv_image, difev_vars.image) diff.save(name_diff) # difev_vars.image.show() # adv_image.show() return 'success' else: print('Attack failed') return 'failed' # def test_paths(test_path_list): # test_img_paths = [] # for test_path in test_path_list: # for root, dirs, files in os.walk(test_path): # for fname in files: # ext = (os.path.splitext(fname)[-1]).lower() # if ext == ".jpg" or ext == ".jpeg" or ext == ".gif" or ext == ".png": test_img_paths += [ # os.path.join(root, fname)] # # if (len(test_img_paths) == 0): # print("No image (.jpg .gif .png) exist at " + test_path) # sys.exit(0) # return test_img_paths # def attack_all(attack, img_path, results_path, fig_path): # """ # Run attacks on all images in the validation set # """ # assert False # not yet edited # import os # from shutil import copyfile # # if attack == 'pixel': # attack = PixelAttack() # elif attack == 'color': # attack = ColorAttack() # elif attack == 'rotation': # attack = RotationTranslationAttack() # attack.d = 3 # target = 'nevus' # # load model to attack # # difev_vars.model, _ = classify.initialize_model('inception', num_classes=2, feature_extract=False, # use_pretrained=False, load=True) # if is_cuda: difev_vars.model.cuda() # difev_vars.model.eval() # results = {} # if os.path.exists(results_path + os.sep + 'results.pkl'): # results = pickle.load(open(results_path + 'results.pkl', 'rb')) # # for filename in os.listdir(img_path): # print(img_path + filename) # assert (os.path.exists(img_path + filename)) # if filename + os.sep + attack.name in results: # print('skipping') # continue # outcome = run_attack(attack, img_path, filename, target, fig_path=fig_path, save=False) # # p_best = difev_vars.prob_adv[class_names.index(target)] # results[filename + os.sep + attack.name] = {'outcome': outcome, # 'orig': difev_vars.prob_orig[difev_vars.pred_orig]} # # 'adv': p_best} # if os.path.exists(results_path + 'results.pkl'): # copyfile(results_path + 'results.pkl', results_path + 'results.old') # pickle.dump(results, open(results_path + 'results.pkl', 'wb')) def edit_results(): assert False results_path = '/data/figs/lesions-adversarial/difev/' results = pickle.load(open(results_path + 'results.pkl', 'rb')) new = {} for k, v in results.items():
if k.find('color') == -1: new[k] = v
conditional_block
difev_adversarial.py
lasertoning', 'lentigo', 'leukemiacutis', 'leukocytoclasticvasculitis', 'lichenamyloidosis', 'lichennitidus', 'lichenoiddrugeruption', 'lichenplanus', 'lichensimplexchronicus', 'lichenstriatus', 'lipoma', 'lipomatosis', 'livedoidvasculitis', 'livedoreticularis', 'lmdf', 'lupuserythematosus', 'lymphangioma', 'lymphoma', 'lymphomatoidpapulosis', 'malignantmelanoma', 'mastocytoma', 'mastocytosis', 'melanocyticnevus', 'melanonychia', 'melasma', 'metastasis', 'milia', 'milium', 'molluscumcontagiosum', 'morphea', 'mucocele', 'mucosalmelanoticmacule', 'mucouscyst', 'mycosisfungoides', 'naildystrophy', 'neurofibroma', 'neurofibromatosis', 'nevus_postop', 'nevusdepigmentosus', 'nevussebaceus', 'nevusspilus', 'nippleeczema', 'normalnail', 'ntminfection', 'nummulareczema', 'onycholysis', 'onychomycosis', 'organoidnevus', 'otanevus', 'otherdermatitis', 'pagetsdisease', 'palmoplantarpustulosis', 'panniculitis', 'papularurticaria', 'parapsoriasis', 'paronychia', 'pemphigusfoliaceus', 'pemphigusvulgaris', 'perioraldermatitis', 'photosensitivedermatitis', 'pigmentedcontactdermatitis', 'pigmentednevus', 'pigmentedprogressivepurpuricdermatosis', 'pilarcyst', 'pilomatricoma', 'pityriasisalba', 'pityriasislichenoideschronica', 'pityriasislichenoidesetvarioliformisacuta', 'pityriasisrosea', 'pityriasisrubrapilaris', 'poikiloderma', 'pompholyx', 'porokeratosis', 'poroma', 'portwinestain', 'postinflammatoryhyperpigmentation', 'prurigonodularis', 'prurigopigmentosa', 'pruritus', 'pseudolymphoma', 'psoriasis', 'puppp', 'purpura', 'pustularpsoriasis', 'pyodermagangrenosum', 'pyogenicgranuloma', 'rhielmelanosis', 'rosacea', 'rupturedcyst', 'sarcoidosis', 'scabies', 'scar', 'scar_postlaser', 'scar_postop', 'scc_postop', 'scleroderma', 'sebaceoushyperplasia', 'seborrheicdermatitis', 'seborrheickeratosis', 'skintag', 'softfibroma', 'squamouscellcarcinoma', 'staphylococcalscaldedskinsyndrome', 'stasisdermatitis', 'steatocystomamultiplex', 'steroidrosacea', 'striaedistensae', 'subcutaneousnodule', 'subungalhematoma', 'sweetsyndrome', 'syringoma', 'systemiccontactdermatitis', 'systemiclupuserythematosus', 'tattoo', 'telangiectasia', 'tineacorporis', 'tineafaciale', 'tineapedis', 'toxicepidermalnecrolysis', 'traumaticfatnecrosis', 'traumatictattoo', 'ulcer', 'urticaria', 'urticarialvasculitis', 'urticariapigmentosa', 'varicella', 'vascularmalformation', 'vasculitis', 'venouslake', 'venousmalformation', 'verrucaplana', 'viralexanthem', 'vitiligo', 'wart', 'wrinkle', 'xanthelasma', 'xanthogranuloma', 'xanthoma', 'xeroticeczema'] self.main_dx2 = ['Malignant melanoma', 'Basal cell carcinoma', 'Squamous cell carcinoma', 'Intraepithelial carcinoma', 'Pyogenic granuloma', 'Seborrheic keratosis', 'Melanocytic nevus', 'Actinic keratosis', 'Dermatofibroma', 'Hemangioma', 'Wart', 'Lentigo'] self.main_dx = ['malignantmelanoma', 'basalcellcarcinoma', 'squamouscellcarcinoma', 'bowendisease', 'pyogenicgranuloma', 'seborrheickeratosis', 'pigmentednevus', 'actinickeratosis', 'dermatofibroma', 'hemangioma', 'wart', 'lentigo'] def loadcaffemodel(self, modelbasepath, modelname, deployname): mean_blob = caffe_pb2.BlobProto() with open(os.path.join(modelbasepath, 'mean224x224.binaryproto'), 'rb') as f: mean_blob.ParseFromString(f.read()) mean_array = np.asarray(mean_blob.data, dtype=np.float32).reshape( (mean_blob.channels, mean_blob.height, mean_blob.width)) # Read model architecture and trained model's weights print(os.path.join(modelbasepath, deployname), os.path.join(modelbasepath, modelname + '.caffemodel')) net = caffe.Net(os.path.join(modelbasepath, deployname), os.path.join(modelbasepath, modelname + '.caffemodel'), caffe.TEST) # Define image transformers transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) transformer.set_mean('data', mean_array) transformer.set_transpose('data', (2, 0, 1)) return net def transform_img(self, img, img_width=224, img_height=224): # Histogram Equalization img[:, :, 0] = cv2.equalizeHist(img[:, :, 0]) img[:, :, 1] = cv2.equalizeHist(img[:, :, 1]) img[:, :, 2] = cv2.equalizeHist(img[:, :, 2]) # Image Resizing img = cv2.resize(img, (img_width, img_height), interpolation=cv2.INTER_CUBIC) return img def getname(self, i):
def get_basenames(self, img_path): basenames = [] dirname = os.path.dirname(img_path) for alias_ in self.list_alias: dirname = dirname.replace(alias_[0], alias_[1]) olddir = '' while dirname != '' and dirname != '/' and olddir != dirname: if ('lesion_' not in os.path.basename(dirname)): basenames += [os.path.basename(dirname)] olddir = dirname dirname = os.path.dirname(dirname) return basenames def get_final_results(self, out): img_path = "" # temp threshold = [726, 39, 172, 429, 166, 9, 227, 18, 14, 30, 1107, 305] final_result = [] model_result = [] result = [] pred_probas = out['prob'] result += [(img_path, pred_probas[0].tolist())] for modelnail_ in result: if (modelnail_[0] == img_path): model_result = modelnail_[1] # print("result - ", model_result) # get right index from folder name right_dx_index = -1 right_dx_name = '' for i, dx_ in enumerate(self.main_dx2): if dx_ in self.get_basenames(img_path): right_dx_name = self.main_dx[i] for j, dx2_ in enumerate(self.list_dx): if dx2_ == right_dx_name: right_dx_index = j final_result += [(img_path, model_result, right_dx_index)] results = [] countall = 0.0 correct = 0.0 all_results = [] assert len(final_result) == 1 for final_ in final_result: countall += 1 diagnosis = [final_[0]] f_ = [] for i, p_ in enumerate(final_[1]): thres_ = 10000 for j, dx_ in enumerate(self.main_dx): if (dx_ == self.list_dx[i]): thres_ = threshold[j] if (
for j, dx_ in enumerate(self.main_dx): if dx_ == self.list_dx[i]: return self.main_dx2[j] return ""
identifier_body
difev_adversarial.py
# return self.loader1(image) def normalize_totensor(self, image): """ Input PIL image output cuda tensor """ x = self.loader2(image) x = x.repeat(1, 1, 1, 1) if is_cuda: x = x.cuda() return x def load_image(self, filename): # Load the image that we modify image = Image.open(filename) image = self.loader1(image) return image def run(self, image): """ Input: PIL image Output: 2 probs """ image = self.normalize_totensor(image) result = self.model(image) return result.data.cpu().numpy()[0] class CaffeModel: """ Class to abstract loading and running of the pytorch model All Pytorch-specific code in this class """ def __init__(self): # load the model model_path = "asan" name_caffemodel = "59024" deployname = 'deploy.prototxt' self.net = self.loadcaffemodel(model_path, name_caffemodel, deployname) self.list_alias = [] self.list_dx = ['abnom', 'abscess', 'acanthosisnigricans', 'acne', 'acneiformeruption', 'acnescar', 'acrallentiginousnevus', 'actiniccheilitis', 'actinickeratosis', 'acutegeneralizedexanthematouspustulosis', 'acutegvhd', 'adultonsetstillsdisease', 'allergiccontactdermatitis', 'allergicvasculitis', 'alopecia', 'alopeciaareata', 'amyloidosis', 'androgenicalopecia', 'angioedema', 'angiofibroma', 'angiokeratoma', 'angiolipoma', 'ashydermatitis', 'ashydermatosis', 'atopicdermatitis', 'atypicalmycobacterialinfection', 'basalcellcarcinoma', 'basalcellcarcinoma_postop', 'beckernevus', 'behcetdisease', 'bluenevus', 'bowendisease', 'bowenoidpapulosis', 'bullousdisease', 'bullousdrugeruption', 'bullouspemphigoid', 'burn', 'burnscar', 'cafeaulaitmacule', 'calcinosiscutis', 'callus', 'cellulitis', 'cetuximabinducedacneiformeruption', 'cheilitis', 'chickenpox', 'cholinergicurticaria', 'chroniceczema', 'chronicgvhd', 'chronicurticaria', 'coldinducedurticaria', 'condyloma', 'confluentreticulatedpapillomatosis', 'congenitalnevus', 'connectivetissuedisease', 'contactcheilitis', 'contactdermatitis', 'cutaneoushorn', 'cyst', 'darkcircle', 'depressedscar', 'dermatitisherpetiformis', 'dermatofibroma', 'dermatomyositis', 'dilatedpore', 'dirtyneck', 'dohimelanosis', 'drugeruption', 'dyshidroticeczema', 'dysplasticnevus', 'eczema', 'eczemaherpeticum', 'epidermalcyst', 'epidermalnevus', 'eruptivesyringoma', 'erythemaabigne', 'erythemaannularecentrifugum', 'erythemamultiforme', 'erythemanodosum', 'exfoliativedermatitis', 'extramammarypagetdisease', 'fibroma', 'fixeddrugeruption', 'folliculitis', 'fordycespot', 'foreignbodygranuloma', 'foreignbodyreaction', 'freckle', 'fungalinfection', 'furuncle', 'glomustumor', 'graftversushostdisease', 'granuloma', 'granulomaannulare', 'guttatepsoriasis', 'handeczema', 'hemangioma', 'hematoma', 'henochschonleinpurpura', 'herpessimplex', 'herpeszoster', 'hyperpigmentation', 'hypersensitivityvasculitis', 'hypertrophicscar', 'hypopigmentation', 'idiopathicguttatehypomelanosis', 'idreaction', 'impetigo', 'inflammedcyst', 'ingrowingnail', 'insectbite', 'intradermalnevus', 'irritantcontactdermatitis', 'irritatedlentigo', 'irritatedseborrheickeratosis', 'juvenilexanthogranuloma', 'kaposisarcoma', 'keloid', 'keratoacanthoma', 'keratoderma', 'keratosispilaris', 'langerhanscellhistiocytosis', 'lasertoning', 'lentigo', 'leukemiacutis', 'leukocytoclasticvasculitis', 'lichenamyloidosis', 'lichennitidus', 'lichenoiddrugeruption', 'lichenplanus', 'lichensimplexchronicus', 'lichenstriatus', 'lipoma', 'lipomatosis', 'livedoidvasculitis', 'livedoreticularis', 'lmdf', 'lupuserythematosus', 'lymphangioma', 'lymphoma', 'lymphomatoidpapulosis', 'malignantmelanoma', 'mastocytoma', 'mastocytosis', 'melanocyticnevus', 'melanonychia', 'melasma', 'metastasis', 'milia', 'milium', 'molluscumcontagiosum', 'morphea', 'mucocele', 'mucosalmelanoticmacule', 'mucouscyst', 'mycosisfungoides', 'naildystrophy', 'neurofibroma', 'neurofibromatosis', 'nevus_postop', 'nevusdepigmentosus', 'nevussebaceus', 'nevusspilus', 'nippleeczema', 'normalnail', 'ntminfection', 'nummulareczema', 'onycholysis', 'onychomycosis', 'organoidnevus', 'otanevus', 'otherdermatitis', 'pagetsdisease', 'palmoplantarpustulosis', 'panniculitis', 'papularurticaria', 'parapsoriasis', 'paronychia', 'pemphigusfoliaceus', 'pemphigusvulgaris', 'perioraldermatitis', 'photosensitivedermatitis', 'pigmentedcontactdermatitis', 'pigmentednevus', 'pigmentedprogressivepurpuricdermatosis', 'pilarcyst', 'pilomatricoma', 'pityriasisalba', 'pityriasislichenoideschronica', 'pityriasislichenoidesetvarioliformisacuta', 'pityriasisrosea', 'pityriasisrubrapilaris', 'poikiloderma', 'pompholyx', 'porokeratosis', 'poroma', 'portwinestain', 'postinflammatoryhyperpigmentation', 'prurigonodularis', 'prurigopigmentosa', 'pruritus', 'pseudolymphoma', 'psoriasis', 'puppp', 'purpura', 'pustularpsoriasis', 'pyodermagangrenosum', 'pyogenicgranuloma', 'rhielmelanosis', 'rosacea', 'rupturedcyst', 'sarcoidosis', 'scabies', 'scar', 'scar_postlaser', 'scar_postop', 'scc_postop', 'scleroderma', 'sebaceoushyperplasia', 'seborrheicdermatitis', 'seborrheickeratosis', 'skintag', 'softfibroma', 'squamouscellcarcinoma', 'staphylococcalscaldedskinsyndrome', 'stasisdermatitis', 'steatocystomamultiplex', 'steroidrosacea', 'striaedistensae', 'subcutaneousnodule', 'subungalhematoma', 'sweetsyndrome', 'syringoma', 'systemiccontactdermatitis', 'systemiclupuserythematosus', 'tattoo', 'telangiectasia', 'tineacorporis', 'tineafaciale', 'tineapedis', 'toxicepidermalnecrolysis', 'traumaticfatnecrosis', 'traumatictattoo', 'ulcer', 'urticaria', 'urticarialvasculitis', 'urticariapig
# """
random_line_split
difev_adversarial.py
borrheic keratosis', 'Melanocytic nevus', 'Actinic keratosis', 'Dermatofibroma', 'Hemangioma', 'Wart', 'Lentigo'] self.main_dx = ['malignantmelanoma', 'basalcellcarcinoma', 'squamouscellcarcinoma', 'bowendisease', 'pyogenicgranuloma', 'seborrheickeratosis', 'pigmentednevus', 'actinickeratosis', 'dermatofibroma', 'hemangioma', 'wart', 'lentigo'] def loadcaffemodel(self, modelbasepath, modelname, deployname): mean_blob = caffe_pb2.BlobProto() with open(os.path.join(modelbasepath, 'mean224x224.binaryproto'), 'rb') as f: mean_blob.ParseFromString(f.read()) mean_array = np.asarray(mean_blob.data, dtype=np.float32).reshape( (mean_blob.channels, mean_blob.height, mean_blob.width)) # Read model architecture and trained model's weights print(os.path.join(modelbasepath, deployname), os.path.join(modelbasepath, modelname + '.caffemodel')) net = caffe.Net(os.path.join(modelbasepath, deployname), os.path.join(modelbasepath, modelname + '.caffemodel'), caffe.TEST) # Define image transformers transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) transformer.set_mean('data', mean_array) transformer.set_transpose('data', (2, 0, 1)) return net def transform_img(self, img, img_width=224, img_height=224): # Histogram Equalization img[:, :, 0] = cv2.equalizeHist(img[:, :, 0]) img[:, :, 1] = cv2.equalizeHist(img[:, :, 1]) img[:, :, 2] = cv2.equalizeHist(img[:, :, 2]) # Image Resizing img = cv2.resize(img, (img_width, img_height), interpolation=cv2.INTER_CUBIC) return img def getname(self, i): for j, dx_ in enumerate(self.main_dx): if dx_ == self.list_dx[i]: return self.main_dx2[j] return "" def get_basenames(self, img_path): basenames = [] dirname = os.path.dirname(img_path) for alias_ in self.list_alias: dirname = dirname.replace(alias_[0], alias_[1]) olddir = '' while dirname != '' and dirname != '/' and olddir != dirname: if ('lesion_' not in os.path.basename(dirname)): basenames += [os.path.basename(dirname)] olddir = dirname dirname = os.path.dirname(dirname) return basenames def get_final_results(self, out): img_path = "" # temp threshold = [726, 39, 172, 429, 166, 9, 227, 18, 14, 30, 1107, 305] final_result = [] model_result = [] result = [] pred_probas = out['prob'] result += [(img_path, pred_probas[0].tolist())] for modelnail_ in result: if (modelnail_[0] == img_path): model_result = modelnail_[1] # print("result - ", model_result) # get right index from folder name right_dx_index = -1 right_dx_name = '' for i, dx_ in enumerate(self.main_dx2): if dx_ in self.get_basenames(img_path): right_dx_name = self.main_dx[i] for j, dx2_ in enumerate(self.list_dx): if dx2_ == right_dx_name: right_dx_index = j final_result += [(img_path, model_result, right_dx_index)] results = [] countall = 0.0 correct = 0.0 all_results = [] assert len(final_result) == 1 for final_ in final_result: countall += 1 diagnosis = [final_[0]] f_ = [] for i, p_ in enumerate(final_[1]): thres_ = 10000 for j, dx_ in enumerate(self.main_dx): if (dx_ == self.list_dx[i]): thres_ = threshold[j] if (p_ * 10000 > thres_): f_ += [(p_, self.getname(i))] if i == final_[2]: correct += 1 f_ = sorted(f_, reverse=True) for f in f_: if f[1] == "Melanocytic nevus": correct += 1 for i, p_ in enumerate(final_[1]): for j, dx_ in enumerate(self.main_dx): if (dx_ == self.list_dx[i]): diagnosis.append([self.getname(i), p_]) results.append((self.getname(i), p_, final_[0])) diagnosis = {x[0]: x[1] for x in diagnosis[1:]} p_melanoma = diagnosis['Malignant melanoma'] p_nevus = diagnosis['Melanocytic nevus'] p_melanoma, p_nevus = softmax([p_melanoma, p_nevus]) return (p_melanoma, p_nevus) def load_image(self, filename): """ Input: Filename Output: PIL image """ image = cv2.imread(filename, cv2.IMREAD_COLOR) image = self.transform_img(image) # convert cv2 to PIL image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = Image.fromarray(image) return image def run(self, image): """ Input: PIL image Output: probabilities """ # convert from PIL to CV2 image = np.array(image) image = image[:, :, ::-1].copy() transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape}) # transformer.set_mean('data', mean_array) transformer.set_transpose('data', (2, 0, 1)) self.net.blobs['data'].data[...] = transformer.preprocess('data', image) out = self.net.forward() return self.get_final_results(out) def softmax(x): e_x = np.exp(x - np.max(x)) return e_x / e_x.sum() def optimize(x): global difev_vars adv_image = difev_vars.perturb_fn(x) # trans_adv_image = difev_vars.model.normalize_totensor(adv_image) # out = difev_vars.model.run(trans_adv_image) out = difev_vars.model.run(adv_image) prob = softmax(out) # prob = softmax(out.data.cpu().numpy()[0]) return prob[difev_vars.pred_orig] def callback(x, convergence): global difev_vars difev_vars.adv_image = difev_vars.perturb_fn(x) # difev_vars.trans_adv_image = difev_vars.model.normalize_totensor(difev_vars.adv_image) # out = difev_vars.model.run(difev_vars.trans_adv_image) out = difev_vars.model.run(difev_vars.adv_image) # difev_vars.prob_adv = softmax(out.data.cpu().numpy()[0]) difev_vars.prob_adv = softmax(out) difev_vars.pred_adv = np.argmax(difev_vars.prob_adv) p = difev_vars.prob_adv[difev_vars.pred_adv] difev_vars.stage += 1 # if pred_adv != pred_orig and prob_adv >= 0.9: if difev_vars.pred_adv != difev_vars.pred_orig and p > 0.9: return True if difev_vars.stage > max_stage: return True else: print('Prob [%s]: %f, %d iterations' % ( class_names[difev_vars.pred_orig], difev_vars.prob_adv[difev_vars.pred_orig], difev_vars.stage)) class PixelAttack: """ Use differential evolution to modify a small number of pixels (self.d pixels) """ def __init__(self): self.input_size = (0, 0) self.d = 3 self.bounds = [(0, self.input_size[0]), (0, self.input_size[1]), (0, 255), (0, 255), (0, 255)] * self.d self.name = 'pixel' # @staticmethod def perturb(self, x): global difev_vars self.input_size = difev_vars.image.size adv_image = np.array(difev_vars.image.copy()) # calculate pixel locations and values pixs = np.array(np.split(x, len(x) / 5)).astype(int) loc = (pixs[:, 0], pixs[:, 1]) val = pixs[:, 2:] adv_image[loc] = val adv_image = Image.fromarray(adv_image) return adv_image class
ColorAttack
identifier_name
rsync.rs
( rrdp_state: &RrdpState, changed: bool, config: &Config, ) -> Result<()> { // Check that there is a current snapshot, if not, there is no work if rrdp_state.snapshot_path().is_none() { return Ok(()); } // We can assume now that there is a snapshot and unwrap things for it let snapshot_path = rrdp_state.snapshot_path().unwrap(); let snapshot = rrdp_state.snapshot().unwrap(); let session_id = snapshot.session_id(); let serial = snapshot.serial(); let mut rsync_state = RsyncDirState::recover(config)?; let new_revision = RsyncRevision { session_id, serial }; if changed { let mut writer = RsyncFromSnapshotWriter { out_path: new_revision.path(config), include_host_and_module: config.rsync_include_host, }; writer.create_out_path_if_missing()?; writer.for_snapshot_path(&snapshot_path)?; if config.rsync_dir_use_symlinks() { symlink_current_to_new_revision_dir(&new_revision, config)?; } else { rename_new_revision_dir_to_current(&new_revision, &rsync_state, config)?; } rsync_state.update_current(new_revision); } rsync_state.clean_old(config)?; rsync_state.persist(config)?; Ok(()) } /// Create a new symlink then rename it. We need to do this because the std library /// refuses to overwrite an existing symlink. And if we were to remove it first, then /// we would introduce a race condition for clients accessing. fn symlink_current_to_new_revision_dir( new_revision: &RsyncRevision, config: &Config, ) -> Result<()> { info!( "Updating symlink 'current' to '{}' under rsync dir '{}'", new_revision.dir_name(), config.rsync_dir.display() ); let current_path = config.rsync_dir_current(); let tmp_name = file_ops::path_with_extension(&current_path, config::TMP_FILE_EXT); if tmp_name.exists() { std::fs::remove_file(&tmp_name).with_context(|| { format!( "Could not remove lingering temporary symlink for current rsync dir at '{}'", tmp_name.display() ) })?; } std::os::unix::fs::symlink(new_revision.dir_name(), &tmp_name).with_context(|| { format!( "Could not create temporary symlink for new rsync content at '{}'", tmp_name.display() ) })?; std::fs::rename(&tmp_name, &current_path).with_context(|| { format!( "Could not rename symlink for current rsync dir from '{}' to '{}'", tmp_name.display(), current_path.display() ) })?; Ok(()) } /// Rename the path for the new revision to the current rsync path, *after* /// renaming any existing current path to the serial and session for that /// revision. fn rename_new_revision_dir_to_current( new_revision: &RsyncRevision, rsync_state: &RsyncDirState, config: &Config, ) -> Result<()> { info!("Renaming rsync folders for close to atomic update of the rsync module dir"); let current_path = config.rsync_dir_current(); if let Some(current) = &rsync_state.current { let current_preserve_path = current.path(config); if current_path.exists() { info!( "Renaming the rsync directory for previous revision to: {}", current_preserve_path.display() ); std::fs::rename(&current_path, &current_preserve_path).with_context(|| { format!( "Could not rename current rsync dir from '{}' to '{}'", current_path.display(), current_preserve_path.display() ) })?; } } info!( "Rename rsync dir for new revision to '{}'", current_path.display() ); std::fs::rename(&new_revision.path(config), &current_path).with_context(|| { format!( "Could not rename new rsync dir from '{}' to '{}'", new_revision.path(config).display(), current_path.display() ) })?; Ok(()) } #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] struct RsyncDirState { current: Option<RsyncRevision>, old: Vec<DeprecatedRsyncRevision>, } impl RsyncDirState { /// Gets the current state from disk, if a state file exists. Otherwise returns /// a new blank state. fn recover(config: &Config) -> Result<Self> { let state_path = config.rsync_state_path(); if state_path.exists() { let json_bytes = file_ops::read_file(&state_path).with_context(|| { format!("Cannot read rsync state file at: {}", state_path.display()) })?; serde_json::from_slice(json_bytes.as_ref()).with_context(|| { format!( "Cannot deserialize json for current state from {}", state_path.display() ) }) } else { Ok(RsyncDirState { current: None, old: vec![], }) } } /// Persists the state to disk fn persist(&self, config: &Config) -> Result<()> { let state_path = config.rsync_state_path(); let json = serde_json::to_string_pretty(&self)?; file_ops::write_buf(&state_path, json.as_bytes()).with_context(|| "Could not save state.") } /// Updates the current revision for this state, moves a possible /// existing current state to old. fn update_current(&mut self, current: RsyncRevision) { let existing = std::mem::replace(&mut self.current, Some(current)); if let Some(existing) = existing { self.old.push(existing.deprecate()); } } /// Cleans old directories from disk when their time has come, and updates /// this state (forgets these old versions). Will throw an error if removing /// an old dir fails, but will simply skip removing old dirs if they had /// already been removed. fn clean_old(&mut self, config: &Config) -> Result<()> { let clean_before = Time::seconds_ago(config.cleanup_after); for old in self .old .iter() .filter(|deprecated| deprecated.since <= clean_before) { let path = old.revision.path(config); if path.exists() { info!( "Removing rsync directory: {}, deprecated since: {}", path.display(), old.since ); // Try to remove the old directory if it still exists std::fs::remove_dir_all(&path).with_context(|| { format!( "Could not remove rsync dir for old revision at: {}", path.display() ) })?; } } self.old .retain(|deprecated| deprecated.since > clean_before); Ok(()) } } #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] struct RsyncRevision { #[serde(deserialize_with = "util::de_uuid", serialize_with = "util::ser_uuid")] session_id: Uuid, serial: u64, } impl RsyncRevision { fn dir_name(&self) -> String { format!("session_{}_serial_{}", self.session_id, self.serial) } fn path(&self, config: &Config) -> PathBuf { config.rsync_dir.join(&self.dir_name()) } fn deprecate(self) -> DeprecatedRsyncRevision { DeprecatedRsyncRevision { since: Time::now(), revision: self, } } } #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] struct DeprecatedRsyncRevision { since: Time, revision: RsyncRevision, } struct RsyncFromSnapshotWriter { out_path: PathBuf, include_host_and_module: bool, } impl RsyncFromSnapshotWriter { /// Creates an empty directory for the rsync out_path. Particularly needed if the snapshot /// is empty since no files (and parent dirs) would be created in that case - and we want to /// see an empty directory. See issue #62. fn create_out_path_if_missing(&self) -> Result<()> { if !self.out_path.exists() { std::fs::create_dir_all(&self.out_path).with_context(|| { format!( "Cannot create output directory for rsync at {}", &self.out_path.display() ) }) } else { Ok(()) } } /// Processes the given snapshot and writes any published files under the /// rsync out_path directory fn for_snapshot_path(&mut self, snapshot: &Path) -> Result<()> { let source_file = File::open(snapshot)?; let buf_reader = BufReader::new(source_file); self.process(buf_reader)?; Ok(()) } } impl ProcessSnapshot for RsyncFromSnapshotWriter { type Err = anyhow::Error; fn meta(&mut self, _session_id: Uuid, _serial: u64) -> Result<()> { Ok(()) // nothing to do } fn publish( &mut self, uri: rpki::uri::Rsync, data: &mut rpki::rrdp::ObjectReader, ) -> Result<()> { let path = if self.include_host_and_module { self.out_path.join(format!( "{}/{}/{}", uri.authority(), uri.module_name(),
update_from_rrdp_state
identifier_name
rsync.rs
_use_symlinks() { symlink_current_to_new_revision_dir(&new_revision, config)?; } else { rename_new_revision_dir_to_current(&new_revision, &rsync_state, config)?; } rsync_state.update_current(new_revision); } rsync_state.clean_old(config)?; rsync_state.persist(config)?; Ok(()) } /// Create a new symlink then rename it. We need to do this because the std library /// refuses to overwrite an existing symlink. And if we were to remove it first, then /// we would introduce a race condition for clients accessing. fn symlink_current_to_new_revision_dir( new_revision: &RsyncRevision, config: &Config, ) -> Result<()> { info!( "Updating symlink 'current' to '{}' under rsync dir '{}'", new_revision.dir_name(), config.rsync_dir.display() ); let current_path = config.rsync_dir_current(); let tmp_name = file_ops::path_with_extension(&current_path, config::TMP_FILE_EXT); if tmp_name.exists() { std::fs::remove_file(&tmp_name).with_context(|| { format!( "Could not remove lingering temporary symlink for current rsync dir at '{}'", tmp_name.display() ) })?; } std::os::unix::fs::symlink(new_revision.dir_name(), &tmp_name).with_context(|| { format!( "Could not create temporary symlink for new rsync content at '{}'", tmp_name.display() ) })?; std::fs::rename(&tmp_name, &current_path).with_context(|| { format!( "Could not rename symlink for current rsync dir from '{}' to '{}'", tmp_name.display(), current_path.display() ) })?; Ok(()) } /// Rename the path for the new revision to the current rsync path, *after* /// renaming any existing current path to the serial and session for that /// revision. fn rename_new_revision_dir_to_current( new_revision: &RsyncRevision, rsync_state: &RsyncDirState, config: &Config, ) -> Result<()> { info!("Renaming rsync folders for close to atomic update of the rsync module dir"); let current_path = config.rsync_dir_current(); if let Some(current) = &rsync_state.current { let current_preserve_path = current.path(config); if current_path.exists() { info!(
"Renaming the rsync directory for previous revision to: {}", current_preserve_path.display() ); std::fs::rename(&current_path, &current_preserve_path).with_context(|| { format!( "Could not rename current rsync dir from '{}' to '{}'", current_path.display(), current_preserve_path.display() ) })?; } } info!( "Rename rsync dir for new revision to '{}'", current_path.display() ); std::fs::rename(&new_revision.path(config), &current_path).with_context(|| { format!( "Could not rename new rsync dir from '{}' to '{}'", new_revision.path(config).display(), current_path.display() ) })?; Ok(()) } #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] struct RsyncDirState { current: Option<RsyncRevision>, old: Vec<DeprecatedRsyncRevision>, } impl RsyncDirState { /// Gets the current state from disk, if a state file exists. Otherwise returns /// a new blank state. fn recover(config: &Config) -> Result<Self> { let state_path = config.rsync_state_path(); if state_path.exists() { let json_bytes = file_ops::read_file(&state_path).with_context(|| { format!("Cannot read rsync state file at: {}", state_path.display()) })?; serde_json::from_slice(json_bytes.as_ref()).with_context(|| { format!( "Cannot deserialize json for current state from {}", state_path.display() ) }) } else { Ok(RsyncDirState { current: None, old: vec![], }) } } /// Persists the state to disk fn persist(&self, config: &Config) -> Result<()> { let state_path = config.rsync_state_path(); let json = serde_json::to_string_pretty(&self)?; file_ops::write_buf(&state_path, json.as_bytes()).with_context(|| "Could not save state.") } /// Updates the current revision for this state, moves a possible /// existing current state to old. fn update_current(&mut self, current: RsyncRevision) { let existing = std::mem::replace(&mut self.current, Some(current)); if let Some(existing) = existing { self.old.push(existing.deprecate()); } } /// Cleans old directories from disk when their time has come, and updates /// this state (forgets these old versions). Will throw an error if removing /// an old dir fails, but will simply skip removing old dirs if they had /// already been removed. fn clean_old(&mut self, config: &Config) -> Result<()> { let clean_before = Time::seconds_ago(config.cleanup_after); for old in self .old .iter() .filter(|deprecated| deprecated.since <= clean_before) { let path = old.revision.path(config); if path.exists() { info!( "Removing rsync directory: {}, deprecated since: {}", path.display(), old.since ); // Try to remove the old directory if it still exists std::fs::remove_dir_all(&path).with_context(|| { format!( "Could not remove rsync dir for old revision at: {}", path.display() ) })?; } } self.old .retain(|deprecated| deprecated.since > clean_before); Ok(()) } } #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] struct RsyncRevision { #[serde(deserialize_with = "util::de_uuid", serialize_with = "util::ser_uuid")] session_id: Uuid, serial: u64, } impl RsyncRevision { fn dir_name(&self) -> String { format!("session_{}_serial_{}", self.session_id, self.serial) } fn path(&self, config: &Config) -> PathBuf { config.rsync_dir.join(&self.dir_name()) } fn deprecate(self) -> DeprecatedRsyncRevision { DeprecatedRsyncRevision { since: Time::now(), revision: self, } } } #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] struct DeprecatedRsyncRevision { since: Time, revision: RsyncRevision, } struct RsyncFromSnapshotWriter { out_path: PathBuf, include_host_and_module: bool, } impl RsyncFromSnapshotWriter { /// Creates an empty directory for the rsync out_path. Particularly needed if the snapshot /// is empty since no files (and parent dirs) would be created in that case - and we want to /// see an empty directory. See issue #62. fn create_out_path_if_missing(&self) -> Result<()> { if !self.out_path.exists() { std::fs::create_dir_all(&self.out_path).with_context(|| { format!( "Cannot create output directory for rsync at {}", &self.out_path.display() ) }) } else { Ok(()) } } /// Processes the given snapshot and writes any published files under the /// rsync out_path directory fn for_snapshot_path(&mut self, snapshot: &Path) -> Result<()> { let source_file = File::open(snapshot)?; let buf_reader = BufReader::new(source_file); self.process(buf_reader)?; Ok(()) } } impl ProcessSnapshot for RsyncFromSnapshotWriter { type Err = anyhow::Error; fn meta(&mut self, _session_id: Uuid, _serial: u64) -> Result<()> { Ok(()) // nothing to do } fn publish( &mut self, uri: rpki::uri::Rsync, data: &mut rpki::rrdp::ObjectReader, ) -> Result<()> { let path = if self.include_host_and_module { self.out_path.join(format!( "{}/{}/{}", uri.authority(), uri.module_name(), uri.path() )) } else { self.out_path.join(uri.path()) }; // Read the bytes into memory, we will need to parse this in order // to fix the mtime of the file. In other words.. we _could_ copy // the bytes from the reader into a file on disk, but then we would // have to re-read them to parse them anyway. let mut bytes: Vec<u8> = vec![]; data.read_to_end(&mut bytes)?; file_ops::write_buf(&path, &bytes).with_context(|| { format!( "Could not copy element for uri: {}, to path: {}", uri, path.to_string_lossy() ) })?; if let Err(e) = fix_since(&path, &bytes) { warn!("{}", e); } Ok(()) } } // Try to fix the modification time for a repository object. // This is needed because otherwise some clients will always think // there is an update. fn fix_since(path
random_line_split
rsync.rs
_name = file_ops::path_with_extension(&current_path, config::TMP_FILE_EXT); if tmp_name.exists() { std::fs::remove_file(&tmp_name).with_context(|| { format!( "Could not remove lingering temporary symlink for current rsync dir at '{}'", tmp_name.display() ) })?; } std::os::unix::fs::symlink(new_revision.dir_name(), &tmp_name).with_context(|| { format!( "Could not create temporary symlink for new rsync content at '{}'", tmp_name.display() ) })?; std::fs::rename(&tmp_name, &current_path).with_context(|| { format!( "Could not rename symlink for current rsync dir from '{}' to '{}'", tmp_name.display(), current_path.display() ) })?; Ok(()) } /// Rename the path for the new revision to the current rsync path, *after* /// renaming any existing current path to the serial and session for that /// revision. fn rename_new_revision_dir_to_current( new_revision: &RsyncRevision, rsync_state: &RsyncDirState, config: &Config, ) -> Result<()> { info!("Renaming rsync folders for close to atomic update of the rsync module dir"); let current_path = config.rsync_dir_current(); if let Some(current) = &rsync_state.current { let current_preserve_path = current.path(config); if current_path.exists() { info!( "Renaming the rsync directory for previous revision to: {}", current_preserve_path.display() ); std::fs::rename(&current_path, &current_preserve_path).with_context(|| { format!( "Could not rename current rsync dir from '{}' to '{}'", current_path.display(), current_preserve_path.display() ) })?; } } info!( "Rename rsync dir for new revision to '{}'", current_path.display() ); std::fs::rename(&new_revision.path(config), &current_path).with_context(|| { format!( "Could not rename new rsync dir from '{}' to '{}'", new_revision.path(config).display(), current_path.display() ) })?; Ok(()) } #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] struct RsyncDirState { current: Option<RsyncRevision>, old: Vec<DeprecatedRsyncRevision>, } impl RsyncDirState { /// Gets the current state from disk, if a state file exists. Otherwise returns /// a new blank state. fn recover(config: &Config) -> Result<Self> { let state_path = config.rsync_state_path(); if state_path.exists() { let json_bytes = file_ops::read_file(&state_path).with_context(|| { format!("Cannot read rsync state file at: {}", state_path.display()) })?; serde_json::from_slice(json_bytes.as_ref()).with_context(|| { format!( "Cannot deserialize json for current state from {}", state_path.display() ) }) } else { Ok(RsyncDirState { current: None, old: vec![], }) } } /// Persists the state to disk fn persist(&self, config: &Config) -> Result<()> { let state_path = config.rsync_state_path(); let json = serde_json::to_string_pretty(&self)?; file_ops::write_buf(&state_path, json.as_bytes()).with_context(|| "Could not save state.") } /// Updates the current revision for this state, moves a possible /// existing current state to old. fn update_current(&mut self, current: RsyncRevision) { let existing = std::mem::replace(&mut self.current, Some(current)); if let Some(existing) = existing { self.old.push(existing.deprecate()); } } /// Cleans old directories from disk when their time has come, and updates /// this state (forgets these old versions). Will throw an error if removing /// an old dir fails, but will simply skip removing old dirs if they had /// already been removed. fn clean_old(&mut self, config: &Config) -> Result<()> { let clean_before = Time::seconds_ago(config.cleanup_after); for old in self .old .iter() .filter(|deprecated| deprecated.since <= clean_before) { let path = old.revision.path(config); if path.exists() { info!( "Removing rsync directory: {}, deprecated since: {}", path.display(), old.since ); // Try to remove the old directory if it still exists std::fs::remove_dir_all(&path).with_context(|| { format!( "Could not remove rsync dir for old revision at: {}", path.display() ) })?; } } self.old .retain(|deprecated| deprecated.since > clean_before); Ok(()) } } #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] struct RsyncRevision { #[serde(deserialize_with = "util::de_uuid", serialize_with = "util::ser_uuid")] session_id: Uuid, serial: u64, } impl RsyncRevision { fn dir_name(&self) -> String { format!("session_{}_serial_{}", self.session_id, self.serial) } fn path(&self, config: &Config) -> PathBuf { config.rsync_dir.join(&self.dir_name()) } fn deprecate(self) -> DeprecatedRsyncRevision { DeprecatedRsyncRevision { since: Time::now(), revision: self, } } } #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] struct DeprecatedRsyncRevision { since: Time, revision: RsyncRevision, } struct RsyncFromSnapshotWriter { out_path: PathBuf, include_host_and_module: bool, } impl RsyncFromSnapshotWriter { /// Creates an empty directory for the rsync out_path. Particularly needed if the snapshot /// is empty since no files (and parent dirs) would be created in that case - and we want to /// see an empty directory. See issue #62. fn create_out_path_if_missing(&self) -> Result<()> { if !self.out_path.exists() { std::fs::create_dir_all(&self.out_path).with_context(|| { format!( "Cannot create output directory for rsync at {}", &self.out_path.display() ) }) } else { Ok(()) } } /// Processes the given snapshot and writes any published files under the /// rsync out_path directory fn for_snapshot_path(&mut self, snapshot: &Path) -> Result<()> { let source_file = File::open(snapshot)?; let buf_reader = BufReader::new(source_file); self.process(buf_reader)?; Ok(()) } } impl ProcessSnapshot for RsyncFromSnapshotWriter { type Err = anyhow::Error; fn meta(&mut self, _session_id: Uuid, _serial: u64) -> Result<()> { Ok(()) // nothing to do } fn publish( &mut self, uri: rpki::uri::Rsync, data: &mut rpki::rrdp::ObjectReader, ) -> Result<()> { let path = if self.include_host_and_module { self.out_path.join(format!( "{}/{}/{}", uri.authority(), uri.module_name(), uri.path() )) } else { self.out_path.join(uri.path()) }; // Read the bytes into memory, we will need to parse this in order // to fix the mtime of the file. In other words.. we _could_ copy // the bytes from the reader into a file on disk, but then we would // have to re-read them to parse them anyway. let mut bytes: Vec<u8> = vec![]; data.read_to_end(&mut bytes)?; file_ops::write_buf(&path, &bytes).with_context(|| { format!( "Could not copy element for uri: {}, to path: {}", uri, path.to_string_lossy() ) })?; if let Err(e) = fix_since(&path, &bytes) { warn!("{}", e); } Ok(()) } } // Try to fix the modification time for a repository object. // This is needed because otherwise some clients will always think // there is an update. fn fix_since(path: &Path, data: &[u8]) -> Result<()> { let path_str = path.to_string_lossy(); let time = if path_str.ends_with(".cer") { Cert::decode(data).map(|cert| cert.validity().not_before()) } else if path_str.ends_with(".crl") { Crl::decode(data).map(|crl| crl.this_update()) } else if path_str.ends_with(".mft") { Manifest::decode(data, false).map(|mft| mft.this_update()) } else if path_str.ends_with(".roa") { Roa::decode(data, false).map(|roa| roa.cert().validity().not_before()) } else
{ // Try to parse this as a generic RPKI signed object SignedObject::decode(data, false).map(|signed| signed.cert().validity().not_before()) }
conditional_block
tcp.rs
out your //! specific `ulimit -n` settings and raise the max number of open files.) //! //! #### Avoid cross-thread communication //! This library uses no cross-thread communication via `std::sync` or `crossbeam`. All futures //! are executed on a `LocalPool`, and the number of OS threads used is user configurable. This //! has a number of design impacts. For example, it becomes more difficult to aggregate what each //! connection is doing. This is simple if you just pass the results to a channel, but this has a //! non-trivial impact on performance. //! //! *Note: This is currently violated by the way we accomplish rate limiting, which relies on a //! global thread that manages timers. This ends up putting disproportionate load on that thread at //! some point. But if you're relying on rate limiting you're trying to slow it down, so we're //! putting this in the 'feature' column. (If anyone would like to contribute a thread-local //! futures timer it'd be a great contribution to the Rust community!) //! use std::net::SocketAddr; use std::time::Instant; use async_std::io::{self}; use async_std::net::{TcpStream}; use async_std::prelude::*; // I'd like to remove this dependency, but async-std doesn't currently have a LocalPool executor // todo: Revisit use futures::executor::LocalPool; use futures::task::SpawnExt; use futures_timer::Delay; use log::{debug, error, info, warn}; use crate::{Config}; use byte_mutator::ByteMutator; use byte_mutator::fuzz_config::FuzzConfig; /// The overall test runner /// /// This method contains the main core loop. /// /// `clobber` will create `connections` number of async futures, distribute them across `threads` /// threads (defaults to num_cpus), and each future will perform requests in a tight loop. If /// there is a `rate` specified, there will be an optional delay to stay under the requested rate. /// The futures are driven by a LocalPool executor, and there is no cross-thread synchronization /// or communication with the default config. Note: for maximum performance avoid use of the /// `rate`, `connect_timeout`, and `read_timeout` options. /// pub fn clobber(config: Config, message: Vec<u8>) -> std::io::Result<()> { info!("Starting: {:#?}", config); let mut threads = Vec::with_capacity(config.num_threads() as usize); // configure fuzzing if a file has been provided in the config let message = match &config.fuzz_path { None => ByteMutator::new(&message), Some(path) => { match FuzzConfig::from_file(&path) { Ok(fuzz_config) => ByteMutator::new_from_config(&message, fuzz_config), Err(e) => { return Err(e) }, } }, }; for _ in 0..config.num_threads() { // per-thread clones let message = message.clone(); let config = config.clone(); // start OS thread which will contain a chunk of connections let thread = std::thread::spawn(move || { let mut pool = LocalPool::new(); let mut spawner = pool.spawner(); // all connection futures are spawned up front for i in 0..config.connections_per_thread() { // per-connection clones let message = message.clone(); let config = config.clone(); spawner .spawn(async move { if config.rate.is_some() { Delay::new(i * config.connection_delay()); } connection(message, config) .await .expect("Failed to run connection"); }).unwrap(); } pool.run(); }); threads.push(thread); } for handle in threads { handle.join().unwrap(); } Ok(()) } /// Handles a single connection /// /// This method infinitely loops, performing a connect/write/read transaction against the /// configured target. If `repeat` is true in `config`, the loop will keep the connection alive. /// Otherwise, it will drop the connection after successfully completing a read, and then it will /// start over and reconnect. If it does not successfully read, it will block until the underlying /// TCP read fails unless `read-timeout` is configured. /// /// This is a long-running function that will continue making calls until it hits a time or total /// loop count limit. /// /// todo: This ignores both read-timeout and repeat async fn connection(mut message: ByteMutator, config: Config) -> io::Result<()> { let start = Instant::now(); let mut count = 0; let mut loop_complete = |config:&Config| { count += 1; if let Some(duration) = config.duration { if Instant::now() >= start + duration { return true; } } if let Some(limit) = config.limit_per_connection() { if count > limit { return true; } } false }; let should_delay = |elapsed, config: &Config| { match config.rate { Some(_) => { if elapsed < config.connection_delay() { true } else { warn!("running behind; consider adding more connections"); false } } None => false, } }; // This is the guts of the application; the tight loop that executes requests let mut read_buffer = [0u8; 1024]; // todo variable size? :( while !loop_complete(&config) { // todo: add optional timeouts back let request_start = Instant::now(); if let Ok(mut stream) = connect(&config.target).await { // one write/read transaction per repeat for _ in 0..config.repeat { if write(&mut stream, message.read()).await.is_ok() { read(&mut stream, &mut read_buffer).await.ok(); } } // todo: analysis // advance mutator state (no-op with no fuzzer config) message.next(); } if config.rate.is_some() { let elapsed = Instant::now() - request_start; if should_delay(elapsed, &config) { Delay::new(config.connection_delay() - elapsed) .await .unwrap(); } } } Ok(()) } /// Connects to the provided address, logs, returns Result<TcpStream, io::Error> async fn connect(addr: &SocketAddr) -> io::Result<TcpStream> { match TcpStream::connect(addr).await { Ok(stream) => { debug!("connected to {}", addr); Ok(stream) } Err(e) => { if e.kind() != io::ErrorKind::TimedOut { error!("unknown connect error: '{}'", e); } Err(e) } } } /// Writes provided buffer to the provided address, logs, returns Result<bytes_written, io::Error> async fn write(stream: &mut TcpStream, buf: &[u8]) -> io::Result<usize> { match stream.write_all(buf).await { Ok(_) => { let n = buf.len(); debug!("{} bytes written", n); Ok(n) } Err(e) => { error!("write error: '{}'", e); Err(e) } } } /// Reads from stream, logs, returns Result<num_bytes_read, io::Error> async fn read(stream: &mut TcpStream, mut read_buffer: &mut [u8]) -> io::Result<usize> { match stream.read(&mut read_buffer).await { Ok(n) => { debug!("{} bytes read ", n); Ok(n) } Err(e) => { error!("read error: '{}'", e); Err(e) } } // todo: Do something with the read_buffer? // todo: More verbose logging; dump to stdout, do post-run analysis on demand } #[cfg(test)] mod tests { use super::*; use crate::server::echo_server; #[test] fn test_connect() { let result = async_std::task::block_on(async { let addr = echo_server().unwrap(); let result = connect(&addr).await; result }); assert!(result.is_ok()); } #[test] fn test_write() { let addr = echo_server().unwrap(); let input = "test".as_bytes(); let want = input.len(); let result = async_std::task::block_on(async move { let mut stream = connect(&addr).await?; let bytes_written = write(&mut stream, &input).await?; Ok::<_, io::Error>(bytes_written) }); assert!(result.is_ok()); assert_eq!(result.unwrap(), want); } #[test] fn test_read()
{ let addr = echo_server().unwrap(); let input = "test\n\r\n".as_bytes(); let want = input.len(); let result = async_std::task::block_on(async move { let mut stream = connect(&addr).await?; let mut read_buffer = [0u8; 1024]; let _ = write(&mut stream, &input).await?; let bytes_read = read(&mut stream, &mut read_buffer).await?; Ok::<_, io::Error>(bytes_read) }); assert!(result.is_ok()); assert_eq!(want, result.unwrap()); }
identifier_body
tcp.rs
//! //! The pool of connections is created up front, and then connections begin sending requests //! to match the defined rate. (Or in the case of no defined, they start immediately.) In general //! we try to to limit significant allocations to startup rather than doing them on the fly. //! More specifically, you shouldn't see any of these behaviors inside the tight `while` loop //! inside the `connection()` method. //! //! ### Limit open ports and files //! //! Two of the key limiting factors for high TCP client throughput are running out of ports, or //! opening more files than the underlying OS will allow. `clobber` tries to minimize issues here //! by giving users control over the max connections. (It's also a good idea to check out your //! specific `ulimit -n` settings and raise the max number of open files.) //! //! #### Avoid cross-thread communication //! This library uses no cross-thread communication via `std::sync` or `crossbeam`. All futures //! are executed on a `LocalPool`, and the number of OS threads used is user configurable. This //! has a number of design impacts. For example, it becomes more difficult to aggregate what each //! connection is doing. This is simple if you just pass the results to a channel, but this has a //! non-trivial impact on performance. //! //! *Note: This is currently violated by the way we accomplish rate limiting, which relies on a //! global thread that manages timers. This ends up putting disproportionate load on that thread at //! some point. But if you're relying on rate limiting you're trying to slow it down, so we're //! putting this in the 'feature' column. (If anyone would like to contribute a thread-local //! futures timer it'd be a great contribution to the Rust community!) //! use std::net::SocketAddr; use std::time::Instant; use async_std::io::{self}; use async_std::net::{TcpStream}; use async_std::prelude::*; // I'd like to remove this dependency, but async-std doesn't currently have a LocalPool executor // todo: Revisit use futures::executor::LocalPool; use futures::task::SpawnExt; use futures_timer::Delay; use log::{debug, error, info, warn}; use crate::{Config}; use byte_mutator::ByteMutator; use byte_mutator::fuzz_config::FuzzConfig; /// The overall test runner /// /// This method contains the main core loop. /// /// `clobber` will create `connections` number of async futures, distribute them across `threads` /// threads (defaults to num_cpus), and each future will perform requests in a tight loop. If /// there is a `rate` specified, there will be an optional delay to stay under the requested rate. /// The futures are driven by a LocalPool executor, and there is no cross-thread synchronization /// or communication with the default config. Note: for maximum performance avoid use of the /// `rate`, `connect_timeout`, and `read_timeout` options. /// pub fn clobber(config: Config, message: Vec<u8>) -> std::io::Result<()> { info!("Starting: {:#?}", config); let mut threads = Vec::with_capacity(config.num_threads() as usize); // configure fuzzing if a file has been provided in the config let message = match &config.fuzz_path { None => ByteMutator::new(&message), Some(path) => { match FuzzConfig::from_file(&path) { Ok(fuzz_config) => ByteMutator::new_from_config(&message, fuzz_config), Err(e) => { return Err(e) }, } }, }; for _ in 0..config.num_threads() { // per-thread clones let message = message.clone(); let config = config.clone(); // start OS thread which will contain a chunk of connections let thread = std::thread::spawn(move || { let mut pool = LocalPool::new(); let mut spawner = pool.spawner(); // all connection futures are spawned up front for i in 0..config.connections_per_thread() { // per-connection clones let message = message.clone(); let config = config.clone(); spawner .spawn(async move { if config.rate.is_some() { Delay::new(i * config.connection_delay()); } connection(message, config) .await .expect("Failed to run connection"); }).unwrap(); } pool.run(); }); threads.push(thread); } for handle in threads { handle.join().unwrap(); } Ok(()) } /// Handles a single connection /// /// This method infinitely loops, performing a connect/write/read transaction against the /// configured target. If `repeat` is true in `config`, the loop will keep the connection alive. /// Otherwise, it will drop the connection after successfully completing a read, and then it will /// start over and reconnect. If it does not successfully read, it will block until the underlying /// TCP read fails unless `read-timeout` is configured. /// /// This is a long-running function that will continue making calls until it hits a time or total /// loop count limit. /// /// todo: This ignores both read-timeout and repeat async fn connection(mut message: ByteMutator, config: Config) -> io::Result<()> { let start = Instant::now(); let mut count = 0; let mut loop_complete = |config:&Config| { count += 1; if let Some(duration) = config.duration { if Instant::now() >= start + duration { return true; } } if let Some(limit) = config.limit_per_connection() { if count > limit { return true; } } false }; let should_delay = |elapsed, config: &Config| { match config.rate { Some(_) => { if elapsed < config.connection_delay() { true } else { warn!("running behind; consider adding more connections"); false } } None => false, } }; // This is the guts of the application; the tight loop that executes requests let mut read_buffer = [0u8; 1024]; // todo variable size? :( while !loop_complete(&config) { // todo: add optional timeouts back let request_start = Instant::now(); if let Ok(mut stream) = connect(&config.target).await { // one write/read transaction per repeat for _ in 0..config.repeat { if write(&mut stream, message.read()).await.is_ok() { read(&mut stream, &mut read_buffer).await.ok(); } } // todo: analysis // advance mutator state (no-op with no fuzzer config) message.next(); } if config.rate.is_some() { let elapsed = Instant::now() - request_start; if should_delay(elapsed, &config) { Delay::new(config.connection_delay() - elapsed) .await .unwrap(); } } } Ok(()) } /// Connects to the provided address, logs, returns Result<TcpStream, io::Error> async fn connect(addr: &SocketAddr) -> io::Result<TcpStream> { match TcpStream::connect(addr).await { Ok(stream) => { debug!("connected to {}", addr); Ok(stream) } Err(e) => { if e.kind() != io::ErrorKind::TimedOut { error!("unknown connect error: '{}'", e); } Err(e) } } } /// Writes provided buffer to the provided address, logs, returns Result<bytes_written, io::Error> async fn write(stream: &mut TcpStream, buf: &[u8]) -> io::Result<usize> { match stream.write_all(buf).await { Ok(_) => { let n = buf.len(); debug!("{} bytes written", n); Ok(n) } Err(e) => { error!("write error: '{}'", e); Err(e) } } } /// Reads from stream, logs, returns Result<num_bytes_read, io::Error> async fn read(stream: &mut TcpStream, mut read_buffer: &mut [u8]) -> io::Result<usize> { match stream.read(&mut read_buffer).await { Ok(n) => { debug!("{} bytes read ", n); Ok(n) } Err(e) => { error!("read error: '{}'", e); Err(e) } } // todo: Do something with the read_buffer? // todo: More verbose logging; dump to stdout, do post-run analysis on demand } #[cfg(test)] mod tests { use super::*; use crate::server::echo_server; #[test] fn
() { let result = async_std::task::block_on(async { let addr = echo_server().unwrap(); let result = connect(&addr).await; result }); assert!(result.is_ok()); } #[test] fn test_write() { let addr = echo_server().unwrap(); let input = "test".as_bytes(); let want = input.len(); let result = async_std::task::block_on(async move { let mut stream = connect(&addr).await?; let bytes_written = write(&mut stream, &input).await?; Ok::<_, io::Error>(bytes_written) }); assert!(
test_connect
identifier_name
tcp.rs
//! //! The pool of connections is created up front, and then connections begin sending requests //! to match the defined rate. (Or in the case of no defined, they start immediately.) In general //! we try to to limit significant allocations to startup rather than doing them on the fly. //! More specifically, you shouldn't see any of these behaviors inside the tight `while` loop //! inside the `connection()` method. //! //! ### Limit open ports and files //! //! Two of the key limiting factors for high TCP client throughput are running out of ports, or //! opening more files than the underlying OS will allow. `clobber` tries to minimize issues here //! by giving users control over the max connections. (It's also a good idea to check out your //! specific `ulimit -n` settings and raise the max number of open files.) //! //! #### Avoid cross-thread communication //! This library uses no cross-thread communication via `std::sync` or `crossbeam`. All futures //! are executed on a `LocalPool`, and the number of OS threads used is user configurable. This //! has a number of design impacts. For example, it becomes more difficult to aggregate what each //! connection is doing. This is simple if you just pass the results to a channel, but this has a //! non-trivial impact on performance. //! //! *Note: This is currently violated by the way we accomplish rate limiting, which relies on a //! global thread that manages timers. This ends up putting disproportionate load on that thread at //! some point. But if you're relying on rate limiting you're trying to slow it down, so we're //! putting this in the 'feature' column. (If anyone would like to contribute a thread-local //! futures timer it'd be a great contribution to the Rust community!) //! use std::net::SocketAddr; use std::time::Instant; use async_std::io::{self}; use async_std::net::{TcpStream}; use async_std::prelude::*; // I'd like to remove this dependency, but async-std doesn't currently have a LocalPool executor // todo: Revisit use futures::executor::LocalPool; use futures::task::SpawnExt; use futures_timer::Delay; use log::{debug, error, info, warn}; use crate::{Config}; use byte_mutator::ByteMutator; use byte_mutator::fuzz_config::FuzzConfig; /// The overall test runner /// /// This method contains the main core loop. /// /// `clobber` will create `connections` number of async futures, distribute them across `threads` /// threads (defaults to num_cpus), and each future will perform requests in a tight loop. If /// there is a `rate` specified, there will be an optional delay to stay under the requested rate. /// The futures are driven by a LocalPool executor, and there is no cross-thread synchronization /// or communication with the default config. Note: for maximum performance avoid use of the /// `rate`, `connect_timeout`, and `read_timeout` options. /// pub fn clobber(config: Config, message: Vec<u8>) -> std::io::Result<()> { info!("Starting: {:#?}", config); let mut threads = Vec::with_capacity(config.num_threads() as usize); // configure fuzzing if a file has been provided in the config let message = match &config.fuzz_path { None => ByteMutator::new(&message), Some(path) => { match FuzzConfig::from_file(&path) { Ok(fuzz_config) => ByteMutator::new_from_config(&message, fuzz_config), Err(e) => { return Err(e) }, } }, }; for _ in 0..config.num_threads() { // per-thread clones let message = message.clone(); let config = config.clone(); // start OS thread which will contain a chunk of connections let thread = std::thread::spawn(move || { let mut pool = LocalPool::new(); let mut spawner = pool.spawner(); // all connection futures are spawned up front for i in 0..config.connections_per_thread() { // per-connection clones let message = message.clone(); let config = config.clone(); spawner .spawn(async move { if config.rate.is_some() { Delay::new(i * config.connection_delay()); } connection(message, config) .await .expect("Failed to run connection"); }).unwrap(); } pool.run(); }); threads.push(thread); } for handle in threads { handle.join().unwrap(); } Ok(()) } /// Handles a single connection /// /// This method infinitely loops, performing a connect/write/read transaction against the /// configured target. If `repeat` is true in `config`, the loop will keep the connection alive. /// Otherwise, it will drop the connection after successfully completing a read, and then it will /// start over and reconnect. If it does not successfully read, it will block until the underlying /// TCP read fails unless `read-timeout` is configured. /// /// This is a long-running function that will continue making calls until it hits a time or total /// loop count limit. /// /// todo: This ignores both read-timeout and repeat async fn connection(mut message: ByteMutator, config: Config) -> io::Result<()> { let start = Instant::now(); let mut count = 0; let mut loop_complete = |config:&Config| { count += 1; if let Some(duration) = config.duration { if Instant::now() >= start + duration { return true; } } if let Some(limit) = config.limit_per_connection() { if count > limit { return true; } } false }; let should_delay = |elapsed, config: &Config| { match config.rate { Some(_) => { if elapsed < config.connection_delay() { true } else { warn!("running behind; consider adding more connections"); false } } None => false, } }; // This is the guts of the application; the tight loop that executes requests let mut read_buffer = [0u8; 1024]; // todo variable size? :( while !loop_complete(&config) { // todo: add optional timeouts back let request_start = Instant::now(); if let Ok(mut stream) = connect(&config.target).await { // one write/read transaction per repeat for _ in 0..config.repeat { if write(&mut stream, message.read()).await.is_ok() { read(&mut stream, &mut read_buffer).await.ok(); } } // todo: analysis // advance mutator state (no-op with no fuzzer config) message.next(); } if config.rate.is_some() { let elapsed = Instant::now() - request_start; if should_delay(elapsed, &config) { Delay::new(config.connection_delay() - elapsed) .await .unwrap(); } } } Ok(()) } /// Connects to the provided address, logs, returns Result<TcpStream, io::Error> async fn connect(addr: &SocketAddr) -> io::Result<TcpStream> { match TcpStream::connect(addr).await { Ok(stream) => { debug!("connected to {}", addr); Ok(stream) } Err(e) => { if e.kind() != io::ErrorKind::TimedOut { error!("unknown connect error: '{}'", e); } Err(e) } } } /// Writes provided buffer to the provided address, logs, returns Result<bytes_written, io::Error> async fn write(stream: &mut TcpStream, buf: &[u8]) -> io::Result<usize> { match stream.write_all(buf).await { Ok(_) => { let n = buf.len(); debug!("{} bytes written", n); Ok(n) } Err(e) => { error!("write error: '{}'", e); Err(e) } } } /// Reads from stream, logs, returns Result<num_bytes_read, io::Error> async fn read(stream: &mut TcpStream, mut read_buffer: &mut [u8]) -> io::Result<usize> { match stream.read(&mut read_buffer).await { Ok(n) => { debug!("{} bytes read ", n); Ok(n) } Err(e) => { error!("read error: '{}'", e); Err(e) } }
} #[cfg(test)] mod tests { use super::*; use crate::server::echo_server; #[test] fn test_connect() { let result = async_std::task::block_on(async { let addr = echo_server().unwrap(); let result = connect(&addr).await; result }); assert!(result.is_ok()); } #[test] fn test_write() { let addr = echo_server().unwrap(); let input = "test".as_bytes(); let want = input.len(); let result = async_std::task::block_on(async move { let mut stream = connect(&addr).await?; let bytes_written = write(&mut stream, &input).await?; Ok::<_, io::Error>(bytes_written) }); assert!(result.is
// todo: Do something with the read_buffer? // todo: More verbose logging; dump to stdout, do post-run analysis on demand
random_line_split
epoll_file.rs
a host // file is ready, then it will be pushed into the ready list. Note // that this is the only way through which a host file can appear in // the ready list. This ensures that only the host files whose // events are update-to-date will be returned, reducing the chances // of false positive results to the minimum. self.host_file_epoller.poll_events(max_count); // Prepare for the waiter.wait_mut() at the end of the loop self.waiters.reset_and_enqueue(waiter.as_ref()); // Pop from the ready list to find as many results as possible let mut count = 0; while count < max_count { // Pop some entries from the ready list let mut ready_entries = self.pop_ready(max_count - count); if ready_entries.len() == 0 { break; } // Note that while iterating the ready entries, we do not hold the lock // of the ready list. This reduces the chances of lock contention. for ep_entry in ready_entries.into_iter() { if ep_entry.is_deleted.load(Ordering::Acquire) { continue; } // Poll the file that corresponds to the entry let mut inner = ep_entry.inner.lock().unwrap(); let mask = inner.event.mask(); let file = &ep_entry.file; let events = file.poll_new() & mask; if events.is_empty() { continue; } // We find a ready file! let mut revent = inner.event; revent.mask = events; revents[count].write(revent); count += 1; // Behave differently according the epoll flags if inner.flags.contains(EpollFlags::ONE_SHOT) { inner.event.mask = IoEvents::empty(); } if !inner .flags .intersects(EpollFlags::EDGE_TRIGGER | EpollFlags::ONE_SHOT) { drop(inner); // Host files should not be reinserted into the ready list if ep_entry.file.host_fd().is_none() { reinsert.push_back(ep_entry); } } } } // If any results, we can return if count > 0 { // Push the entries that are still ready after polling back to the ready list if reinsert.len() > 0 { self.push_ready_iter(reinsert.into_iter()); } return Ok(count); } // Wait for a while to try again later. let ret = waiter.wait_mut(timeout.as_mut()); if let Err(e) = ret { if e.errno() == ETIMEDOUT { return Ok(0); } else { return Err(e); } } // This means we have been waken up successfully. Let's try again. } } fn add_interest(&self, fd: FileDesc, mut event: EpollEvent, flags: EpollFlags) -> Result<()> { let file = current!().file(fd)?; let arc_self = self.weak_self.upgrade().unwrap(); if Arc::ptr_eq(&(arc_self as Arc<dyn File>), &file) { return_errno!(EINVAL, "a epoll file cannot epoll itself"); } self.check_flags(&flags); self.prepare_event(&mut event); let ep_entry = Arc::new(EpollEntry::new(fd, file, event, flags)); // A critical section protected by the lock of self.interest { let notifier = ep_entry .file .notifier() .ok_or_else(|| errno!(EINVAL, "a file must has an associated notifier"))?; let mut interest_entries = self.interest.lock().unwrap(); if interest_entries.get(&fd).is_some() { return_errno!(EEXIST, "fd is already registered"); } interest_entries.insert(fd, ep_entry.clone()); // Start observing events on the target file. let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>; let weak_ep_entry = Arc::downgrade(&ep_entry); notifier.register(weak_observer, Some(IoEvents::all()), Some(weak_ep_entry)); // Handle host file if ep_entry.file.host_fd().is_some() { self.host_file_epoller .add_file(ep_entry.file.clone(), event, flags); return Ok(()); } } self.push_ready(ep_entry); Ok(()) } fn del_interest(&self, fd: FileDesc) -> Result<()> { // A critical section protected by the lock of self.interest { let mut interest_entries = self.interest.lock().unwrap(); let ep_entry = interest_entries .remove(&fd) .ok_or_else(|| errno!(ENOENT, "fd is not added"))?; ep_entry.is_deleted.store(true, Ordering::Release); let notifier = ep_entry.file.notifier().unwrap(); let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>; notifier.unregister(&weak_observer); if ep_entry.file.host_fd().is_some() { self.host_file_epoller.del_file(&ep_entry.file); } } Ok(()) } fn mod_interest(&self, fd: FileDesc, mut event: EpollEvent, flags: EpollFlags) -> Result<()> { self.check_flags(&flags); self.prepare_event(&mut event); // A critical section protected by the lock of self.interest let ep_entry = { let mut interest_entries = self.interest.lock().unwrap(); let ep_entry = interest_entries .get(&fd) .ok_or_else(|| errno!(ENOENT, "fd is not added"))? .clone(); let new_ep_inner = EpollEntryInner { event, flags }; let mut old_ep_inner = ep_entry.inner.lock().unwrap(); *old_ep_inner = new_ep_inner; drop(old_ep_inner); if ep_entry.file.host_fd().is_some() { self.host_file_epoller .mod_file(&ep_entry.file, event, flags); return Ok(()); } ep_entry }; self.push_ready(ep_entry); Ok(()) } fn push_ready(&self, ep_entry: Arc<EpollEntry>) { // Fast path to avoid locking if ep_entry.is_ready.load(Ordering::Relaxed) { // Concurrency note: // What if right after returning a true value of `is_ready`, then the `EpollEntry` is // popped from the ready list? Does it mean than we miss an interesting event? // // The answer is NO. If the `is_ready` field of an `EpollEntry` turns from `true` to // `false`, then the `EpollEntry` must be popped out of the ready list and its // corresponding file must be polled in the `wait` method. This means that we have // taken into account any interesting events happened on the file so far. return; } self.push_ready_iter(std::iter::once(ep_entry)); } fn push_ready_iter<I: Iterator<Item = Arc<EpollEntry>>>(&self, ep_entries: I) { let mut has_pushed_any = false; // A critical section protected by self.ready.lock() { let mut ready_entries = self.ready.lock().unwrap(); for ep_entry in ep_entries { if ep_entry.is_ready.load(Ordering::Relaxed) { continue; } ep_entry.is_ready.store(true, Ordering::Relaxed); ready_entries.push_back(ep_entry); has_pushed_any = true; } } if has_pushed_any { self.mark_ready(); } } fn pop_ready(&self, max_count: usize) -> VecDeque<Arc<EpollEntry>> { // A critical section protected by self.ready.lock() { let mut ready_entries = self.ready.lock().unwrap(); let max_count = max_count.min(ready_entries.len()); ready_entries .drain(..max_count) .map(|ep_entry| { ep_entry.is_ready.store(false, Ordering::Relaxed); ep_entry }) .collect::<VecDeque<Arc<EpollEntry>>>() } } fn mark_ready(&self) { self.notifier.broadcast(&IoEvents::IN); self.waiters.dequeue_and_wake_all(); } fn check_flags(&self, flags: &EpollFlags) { if flags.intersects(EpollFlags::EXCLUSIVE | EpollFlags::WAKE_UP) { warn!("{:?} contains unsupported flags", flags); } } fn prepare_event(&self, event: &mut EpollEvent) { // Add two events that are reported by default event.mask |= (IoEvents::ERR | IoEvents::HUP); } } impl File for EpollFile { fn poll_new(&self) -> IoEvents { if self .host_events .load(Ordering::Acquire) .contains(IoEvents::IN) { return IoEvents::IN; } let ready_entries = self.ready.lock().unwrap(); if !ready_entries.is_empty() { return IoEvents::IN; } IoEvents::empty() } fn notifier(&self) -> Option<&IoNotifier> { Some(&self.notifier) } fn host_fd(&self) -> Option<&HostFd>
{ Some(self.host_file_epoller.host_fd()) }
identifier_body
epoll_file.rs
()); } return Ok(count); } // Wait for a while to try again later. let ret = waiter.wait_mut(timeout.as_mut()); if let Err(e) = ret { if e.errno() == ETIMEDOUT { return Ok(0); } else { return Err(e); } } // This means we have been waken up successfully. Let's try again. } } fn add_interest(&self, fd: FileDesc, mut event: EpollEvent, flags: EpollFlags) -> Result<()> { let file = current!().file(fd)?; let arc_self = self.weak_self.upgrade().unwrap(); if Arc::ptr_eq(&(arc_self as Arc<dyn File>), &file) { return_errno!(EINVAL, "a epoll file cannot epoll itself"); } self.check_flags(&flags); self.prepare_event(&mut event); let ep_entry = Arc::new(EpollEntry::new(fd, file, event, flags)); // A critical section protected by the lock of self.interest { let notifier = ep_entry .file .notifier() .ok_or_else(|| errno!(EINVAL, "a file must has an associated notifier"))?; let mut interest_entries = self.interest.lock().unwrap(); if interest_entries.get(&fd).is_some() { return_errno!(EEXIST, "fd is already registered"); } interest_entries.insert(fd, ep_entry.clone()); // Start observing events on the target file. let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>; let weak_ep_entry = Arc::downgrade(&ep_entry); notifier.register(weak_observer, Some(IoEvents::all()), Some(weak_ep_entry)); // Handle host file if ep_entry.file.host_fd().is_some() { self.host_file_epoller .add_file(ep_entry.file.clone(), event, flags); return Ok(()); } } self.push_ready(ep_entry); Ok(()) } fn del_interest(&self, fd: FileDesc) -> Result<()> { // A critical section protected by the lock of self.interest { let mut interest_entries = self.interest.lock().unwrap(); let ep_entry = interest_entries .remove(&fd) .ok_or_else(|| errno!(ENOENT, "fd is not added"))?; ep_entry.is_deleted.store(true, Ordering::Release); let notifier = ep_entry.file.notifier().unwrap(); let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>; notifier.unregister(&weak_observer); if ep_entry.file.host_fd().is_some() { self.host_file_epoller.del_file(&ep_entry.file); } } Ok(()) } fn mod_interest(&self, fd: FileDesc, mut event: EpollEvent, flags: EpollFlags) -> Result<()> { self.check_flags(&flags); self.prepare_event(&mut event); // A critical section protected by the lock of self.interest let ep_entry = { let mut interest_entries = self.interest.lock().unwrap(); let ep_entry = interest_entries .get(&fd) .ok_or_else(|| errno!(ENOENT, "fd is not added"))? .clone(); let new_ep_inner = EpollEntryInner { event, flags }; let mut old_ep_inner = ep_entry.inner.lock().unwrap(); *old_ep_inner = new_ep_inner; drop(old_ep_inner); if ep_entry.file.host_fd().is_some() { self.host_file_epoller .mod_file(&ep_entry.file, event, flags); return Ok(()); } ep_entry }; self.push_ready(ep_entry); Ok(()) } fn push_ready(&self, ep_entry: Arc<EpollEntry>) { // Fast path to avoid locking if ep_entry.is_ready.load(Ordering::Relaxed) { // Concurrency note: // What if right after returning a true value of `is_ready`, then the `EpollEntry` is // popped from the ready list? Does it mean than we miss an interesting event? // // The answer is NO. If the `is_ready` field of an `EpollEntry` turns from `true` to // `false`, then the `EpollEntry` must be popped out of the ready list and its // corresponding file must be polled in the `wait` method. This means that we have // taken into account any interesting events happened on the file so far. return; } self.push_ready_iter(std::iter::once(ep_entry)); } fn push_ready_iter<I: Iterator<Item = Arc<EpollEntry>>>(&self, ep_entries: I) { let mut has_pushed_any = false; // A critical section protected by self.ready.lock() { let mut ready_entries = self.ready.lock().unwrap(); for ep_entry in ep_entries { if ep_entry.is_ready.load(Ordering::Relaxed) { continue; } ep_entry.is_ready.store(true, Ordering::Relaxed); ready_entries.push_back(ep_entry); has_pushed_any = true; } } if has_pushed_any { self.mark_ready(); } } fn pop_ready(&self, max_count: usize) -> VecDeque<Arc<EpollEntry>> { // A critical section protected by self.ready.lock() { let mut ready_entries = self.ready.lock().unwrap(); let max_count = max_count.min(ready_entries.len()); ready_entries .drain(..max_count) .map(|ep_entry| { ep_entry.is_ready.store(false, Ordering::Relaxed); ep_entry }) .collect::<VecDeque<Arc<EpollEntry>>>() } } fn mark_ready(&self) { self.notifier.broadcast(&IoEvents::IN); self.waiters.dequeue_and_wake_all(); } fn check_flags(&self, flags: &EpollFlags) { if flags.intersects(EpollFlags::EXCLUSIVE | EpollFlags::WAKE_UP) { warn!("{:?} contains unsupported flags", flags); } } fn prepare_event(&self, event: &mut EpollEvent) { // Add two events that are reported by default event.mask |= (IoEvents::ERR | IoEvents::HUP); } } impl File for EpollFile { fn poll_new(&self) -> IoEvents { if self .host_events .load(Ordering::Acquire) .contains(IoEvents::IN) { return IoEvents::IN; } let ready_entries = self.ready.lock().unwrap(); if !ready_entries.is_empty() { return IoEvents::IN; } IoEvents::empty() } fn notifier(&self) -> Option<&IoNotifier> { Some(&self.notifier) } fn host_fd(&self) -> Option<&HostFd> { Some(self.host_file_epoller.host_fd()) } fn update_host_events(&self, ready: &IoEvents, mask: &IoEvents, trigger_notifier: bool) { self.host_events.update(ready, mask, Ordering::Release); if trigger_notifier { self.notifier.broadcast(ready); } } fn as_any(&self) -> &dyn Any { self } } impl Drop for EpollFile { fn drop(&mut self) { // Do not try to `self.weak_self.upgrade()`! The Arc object must have been // dropped at this point. let self_observer = self.weak_self.clone() as Weak<dyn Observer<IoEvents>>; // Unregister ourself from all interesting files' notifiers let mut interest_entries = self.interest.lock().unwrap(); interest_entries.drain().for_each(|(_, ep_entry)| { if let Some(notifier) = ep_entry.file.notifier() { notifier.unregister(&self_observer); } }); self.unregister_from_file_table(); } } impl Observer<IoEvents> for EpollFile { fn on_event(&self, _events: &IoEvents, metadata: &Option<Weak<dyn Any + Send + Sync>>) { let ep_entry_opt = metadata .as_ref() .and_then(|weak_any| weak_any.upgrade()) .and_then(|strong_any| strong_any.downcast().ok()); let ep_entry: Arc<EpollEntry> = match ep_entry_opt { None => return, Some(ep_entry) => ep_entry, }; self.push_ready(ep_entry); } } impl Observer<FileTableEvent> for EpollFile { fn on_event(&self, event: &FileTableEvent, _metadata: &Option<Weak<dyn Any + Send + Sync>>) { let FileTableEvent::Del(fd) = event; let _ = self.del_interest(*fd); } } impl fmt::Debug for EpollFile { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("EpollFile") .field("interest", &self.interest.lock().unwrap()) .field("ready", &self.ready.lock().unwrap()) .finish() } } pub trait AsEpollFile { fn as_epoll_file(&self) -> Result<&EpollFile>; } impl AsEpollFile for FileRef { fn
as_epoll_file
identifier_name
epoll_file.rs
Entries that are probably ready (having events happened). ready: SgxMutex<VecDeque<Arc<EpollEntry>>>, // All threads that are waiting on this epoll file. waiters: WaiterQueue, // A notifier to broadcast events on this epoll file. notifier: IoNotifier, // A helper to poll the events on the interesting host files. host_file_epoller: HostFileEpoller, // Any EpollFile is wrapped with Arc when created. weak_self: Weak<Self>, // Host events host_events: Atomic<IoEvents>, } impl EpollFile { pub fn new() -> Arc<Self> { let interest = Default::default(); let ready = Default::default(); let waiters = WaiterQueue::new(); let notifier = IoNotifier::new(); let host_file_epoller = HostFileEpoller::new(); let weak_self = Default::default(); let host_events = Atomic::new(IoEvents::empty()); let arc_self = Self { interest, ready, waiters, notifier, host_file_epoller, weak_self, host_events, } .wrap_self(); arc_self.register_to_file_table(); arc_self } fn wrap_self(self) -> Arc<Self> { let mut strong_self = Arc::new(self); let weak_self = Arc::downgrade(&strong_self); unsafe { let ptr_self = Arc::into_raw(strong_self) as *mut Self; (*ptr_self).weak_self = weak_self; strong_self = Arc::from_raw(ptr_self); } strong_self } fn register_to_file_table(&self) { let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>; let thread = current!(); let file_table = thread.files().lock().unwrap(); file_table.notifier().register(weak_observer, None, None); } fn unregister_from_file_table(&self) { let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>; let thread = current!(); let file_table = thread.files().lock().unwrap(); file_table.notifier().unregister(&weak_observer); } pub fn control(&self, cmd: &EpollCtl) -> Result<()> { debug!("epoll control: cmd = {:?}", cmd); match cmd { EpollCtl::Add(fd, event, flags) =>
EpollCtl::Del(fd) => { self.del_interest(*fd)?; } EpollCtl::Mod(fd, event, flags) => { self.mod_interest(*fd, *event, *flags)?; } } Ok(()) } pub fn wait( &self, revents: &mut [MaybeUninit<EpollEvent>], timeout: Option<&Duration>, ) -> Result<usize> { debug!("epoll wait: timeout = {:?}", timeout); let mut timeout = timeout.cloned(); let max_count = revents.len(); let mut reinsert = VecDeque::with_capacity(max_count); let waiter = EpollWaiter::new(&self.host_file_epoller); loop { // Poll the latest states of the interested host files. If a host // file is ready, then it will be pushed into the ready list. Note // that this is the only way through which a host file can appear in // the ready list. This ensures that only the host files whose // events are update-to-date will be returned, reducing the chances // of false positive results to the minimum. self.host_file_epoller.poll_events(max_count); // Prepare for the waiter.wait_mut() at the end of the loop self.waiters.reset_and_enqueue(waiter.as_ref()); // Pop from the ready list to find as many results as possible let mut count = 0; while count < max_count { // Pop some entries from the ready list let mut ready_entries = self.pop_ready(max_count - count); if ready_entries.len() == 0 { break; } // Note that while iterating the ready entries, we do not hold the lock // of the ready list. This reduces the chances of lock contention. for ep_entry in ready_entries.into_iter() { if ep_entry.is_deleted.load(Ordering::Acquire) { continue; } // Poll the file that corresponds to the entry let mut inner = ep_entry.inner.lock().unwrap(); let mask = inner.event.mask(); let file = &ep_entry.file; let events = file.poll_new() & mask; if events.is_empty() { continue; } // We find a ready file! let mut revent = inner.event; revent.mask = events; revents[count].write(revent); count += 1; // Behave differently according the epoll flags if inner.flags.contains(EpollFlags::ONE_SHOT) { inner.event.mask = IoEvents::empty(); } if !inner .flags .intersects(EpollFlags::EDGE_TRIGGER | EpollFlags::ONE_SHOT) { drop(inner); // Host files should not be reinserted into the ready list if ep_entry.file.host_fd().is_none() { reinsert.push_back(ep_entry); } } } } // If any results, we can return if count > 0 { // Push the entries that are still ready after polling back to the ready list if reinsert.len() > 0 { self.push_ready_iter(reinsert.into_iter()); } return Ok(count); } // Wait for a while to try again later. let ret = waiter.wait_mut(timeout.as_mut()); if let Err(e) = ret { if e.errno() == ETIMEDOUT { return Ok(0); } else { return Err(e); } } // This means we have been waken up successfully. Let's try again. } } fn add_interest(&self, fd: FileDesc, mut event: EpollEvent, flags: EpollFlags) -> Result<()> { let file = current!().file(fd)?; let arc_self = self.weak_self.upgrade().unwrap(); if Arc::ptr_eq(&(arc_self as Arc<dyn File>), &file) { return_errno!(EINVAL, "a epoll file cannot epoll itself"); } self.check_flags(&flags); self.prepare_event(&mut event); let ep_entry = Arc::new(EpollEntry::new(fd, file, event, flags)); // A critical section protected by the lock of self.interest { let notifier = ep_entry .file .notifier() .ok_or_else(|| errno!(EINVAL, "a file must has an associated notifier"))?; let mut interest_entries = self.interest.lock().unwrap(); if interest_entries.get(&fd).is_some() { return_errno!(EEXIST, "fd is already registered"); } interest_entries.insert(fd, ep_entry.clone()); // Start observing events on the target file. let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>; let weak_ep_entry = Arc::downgrade(&ep_entry); notifier.register(weak_observer, Some(IoEvents::all()), Some(weak_ep_entry)); // Handle host file if ep_entry.file.host_fd().is_some() { self.host_file_epoller .add_file(ep_entry.file.clone(), event, flags); return Ok(()); } } self.push_ready(ep_entry); Ok(()) } fn del_interest(&self, fd: FileDesc) -> Result<()> { // A critical section protected by the lock of self.interest { let mut interest_entries = self.interest.lock().unwrap(); let ep_entry = interest_entries .remove(&fd) .ok_or_else(|| errno!(ENOENT, "fd is not added"))?; ep_entry.is_deleted.store(true, Ordering::Release); let notifier = ep_entry.file.notifier().unwrap(); let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>; notifier.unregister(&weak_observer); if ep_entry.file.host_fd().is_some() { self.host_file_epoller.del_file(&ep_entry.file); } } Ok(()) } fn mod_interest(&self, fd: FileDesc, mut event: EpollEvent, flags: EpollFlags) -> Result<()> { self.check_flags(&flags); self.prepare_event(&mut event); // A critical section protected by the lock of self.interest let ep_entry = { let mut interest_entries = self.interest.lock().unwrap(); let ep_entry = interest_entries .get(&fd) .ok_or_else(|| errno!(ENOENT, "fd is not added"))? .clone(); let new_ep_inner = EpollEntryInner { event, flags }; let mut old_ep_inner = ep_entry.inner.lock().unwrap(); *old_ep_inner = new_ep_inner; drop(old_ep_inner); if ep_entry.file.host_fd().is_some() { self.host_file_epoller .mod_file(&ep_entry.file, event, flags); return Ok(()); } ep_entry }; self.push_ready(ep_entry); Ok(()) } fn push_ready(&self,
{ self.add_interest(*fd, *event, *flags)?; }
conditional_block
epoll_file.rs
// process, then this EpollFile still has connection with the parent process's // file table, which is problematic. /// A file that provides epoll API. /// /// Conceptually, we maintain two lists: one consists of all interesting files, /// which can be managed by the epoll ctl commands; the other are for ready files, /// which are files that have some events. A epoll wait only needs to iterate the /// ready list and poll each file to see if the file is ready for the interesting /// I/O. /// /// To maintain the ready list, we need to monitor interesting events that happen /// on the files. To do so, the `EpollFile` registers itself as an `Observer` to /// the `IoNotifier`s of the monotored files. Thus, we can add a file to the ready /// list when an event happens on the file. /// /// LibOS files are easy to monitor. LibOS files are implemented by us. We know /// exactly when an event happens and thus can broadcast it using `IoNotifier`. /// /// Unlike LibOS files, host files are implemented by the host OS. We have no way /// to let the host OS _push_ events to us. Luckily, we can do the reverse: _poll_ /// host files to check events. And there is a good timing for it; that is, at /// every epoll wait call. We have made a helper called `HostFileEpoller`, which can /// poll events on a set of host files and trigger their associated `Notifier`s to /// broadcast their events, e.g., to `EpollFile`. /// /// This way, both LibOS files and host files can notify the `EpollFile` about /// their events. pub struct EpollFile { // All interesting entries. interest: SgxMutex<HashMap<FileDesc, Arc<EpollEntry>>>, // Entries that are probably ready (having events happened). ready: SgxMutex<VecDeque<Arc<EpollEntry>>>, // All threads that are waiting on this epoll file. waiters: WaiterQueue, // A notifier to broadcast events on this epoll file. notifier: IoNotifier, // A helper to poll the events on the interesting host files. host_file_epoller: HostFileEpoller, // Any EpollFile is wrapped with Arc when created. weak_self: Weak<Self>, // Host events host_events: Atomic<IoEvents>, } impl EpollFile { pub fn new() -> Arc<Self> { let interest = Default::default(); let ready = Default::default(); let waiters = WaiterQueue::new(); let notifier = IoNotifier::new(); let host_file_epoller = HostFileEpoller::new(); let weak_self = Default::default(); let host_events = Atomic::new(IoEvents::empty()); let arc_self = Self { interest, ready, waiters, notifier, host_file_epoller, weak_self, host_events, } .wrap_self(); arc_self.register_to_file_table(); arc_self } fn wrap_self(self) -> Arc<Self> { let mut strong_self = Arc::new(self); let weak_self = Arc::downgrade(&strong_self); unsafe { let ptr_self = Arc::into_raw(strong_self) as *mut Self; (*ptr_self).weak_self = weak_self; strong_self = Arc::from_raw(ptr_self); } strong_self } fn register_to_file_table(&self) { let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>; let thread = current!(); let file_table = thread.files().lock().unwrap(); file_table.notifier().register(weak_observer, None, None); } fn unregister_from_file_table(&self) { let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>; let thread = current!(); let file_table = thread.files().lock().unwrap(); file_table.notifier().unregister(&weak_observer); } pub fn control(&self, cmd: &EpollCtl) -> Result<()> { debug!("epoll control: cmd = {:?}", cmd); match cmd { EpollCtl::Add(fd, event, flags) => { self.add_interest(*fd, *event, *flags)?; } EpollCtl::Del(fd) => { self.del_interest(*fd)?; } EpollCtl::Mod(fd, event, flags) => { self.mod_interest(*fd, *event, *flags)?; } } Ok(()) } pub fn wait( &self, revents: &mut [MaybeUninit<EpollEvent>], timeout: Option<&Duration>, ) -> Result<usize> { debug!("epoll wait: timeout = {:?}", timeout); let mut timeout = timeout.cloned(); let max_count = revents.len(); let mut reinsert = VecDeque::with_capacity(max_count); let waiter = EpollWaiter::new(&self.host_file_epoller); loop { // Poll the latest states of the interested host files. If a host // file is ready, then it will be pushed into the ready list. Note // that this is the only way through which a host file can appear in // the ready list. This ensures that only the host files whose // events are update-to-date will be returned, reducing the chances // of false positive results to the minimum. self.host_file_epoller.poll_events(max_count); // Prepare for the waiter.wait_mut() at the end of the loop self.waiters.reset_and_enqueue(waiter.as_ref()); // Pop from the ready list to find as many results as possible let mut count = 0; while count < max_count { // Pop some entries from the ready list let mut ready_entries = self.pop_ready(max_count - count); if ready_entries.len() == 0 { break; } // Note that while iterating the ready entries, we do not hold the lock // of the ready list. This reduces the chances of lock contention. for ep_entry in ready_entries.into_iter() { if ep_entry.is_deleted.load(Ordering::Acquire) { continue; } // Poll the file that corresponds to the entry let mut inner = ep_entry.inner.lock().unwrap(); let mask = inner.event.mask(); let file = &ep_entry.file; let events = file.poll_new() & mask; if events.is_empty() { continue; } // We find a ready file! let mut revent = inner.event; revent.mask = events; revents[count].write(revent); count += 1; // Behave differently according the epoll flags if inner.flags.contains(EpollFlags::ONE_SHOT) { inner.event.mask = IoEvents::empty(); } if !inner .flags .intersects(EpollFlags::EDGE_TRIGGER | EpollFlags::ONE_SHOT) { drop(inner); // Host files should not be reinserted into the ready list if ep_entry.file.host_fd().is_none() { reinsert.push_back(ep_entry); } } } } // If any results, we can return if count > 0 { // Push the entries that are still ready after polling back to the ready list if reinsert.len() > 0 { self.push_ready_iter(reinsert.into_iter()); } return Ok(count); } // Wait for a while to try again later. let ret = waiter.wait_mut(timeout.as_mut()); if let Err(e) = ret { if e.errno() == ETIMEDOUT { return Ok(0); } else { return Err(e); } } // This means we have been waken up successfully. Let's try again. } } fn add_interest(&self, fd: FileDesc, mut event: EpollEvent, flags: EpollFlags) -> Result<()> { let file = current!().file(fd)?; let arc_self = self.weak_self.upgrade().unwrap(); if Arc::ptr_eq(&(arc_self as Arc<dyn File>), &file) { return_errno!(EINVAL, "a epoll file cannot epoll itself"); } self.check_flags(&flags); self.prepare_event(&mut event); let ep_entry = Arc::new(EpollEntry::new(fd, file, event, flags)); // A critical section protected by the lock of self.interest { let notifier = ep_entry .file .notifier() .ok_or_else(|| errno!(EINVAL, "a file must has an associated notifier"))?; let mut interest_entries = self.interest.lock().unwrap(); if interest_entries.get(&fd).is_some() { return_errno!(EEXIST, "fd is already registered"); } interest_entries.insert(fd, ep_entry.clone()); // Start observing events on the target file. let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>; let weak_ep_entry = Arc::downgrade(&ep_entry); notifier.register(weak_observer, Some(IoEvents::all()), Some(weak_ep_entry)); // Handle host file if ep_entry.file
// with the current process's file table by regitering itself as an observer // to the file table. But if an EpollFile is cloned or inherited by a child
random_line_split
lib.rs
::f128::BaseElement, FieldElement}, //! ExecutionTrace, //! }; //! //! pub fn build_do_work_trace(start: BaseElement, n: usize) -> ExecutionTrace<BaseElement> { //! // Instantiate the trace with a given width and length; this will allocate all //! // required memory for the trace //! let trace_width = 1; //! let mut trace = ExecutionTrace::new(trace_width, n); //! //! // Fill the trace with data; the first closure initializes the first state of the //! // computation; the second closure computes the next state of the computation based //! // on its current state. //! trace.fill( //! |state| { //! state[0] = start; //! }, //! |_, state| { //! state[0] = state[0].exp(3u32.into()) + BaseElement::new(42); //! }, //! ); //! //! trace //! } //! ``` //! //! Next, we need to define *algebraic intermediate representation* (AIR) for our computation. //! This process is usually called *arithmetization*. We do this by implementing the [Air] trait. //! At the high level, the code below does three things: //! //! 1. Defines what the public inputs for our computation should look like. These inputs are //! called "public" because they must be known to both, the prover and the verifier. //! 2. Defines a transition function with a single transition constraint. This transition //! constraint must evaluate to zero for all valid state transitions, and to non-zero for any //! invalid state transition. The degree of this constraint is 3 (see more about constraint //! degrees "Constraint degrees" section of [Air] trait documentation). //! 3. Define two assertions against an execution trace of our computation. These assertions tie //! a specific set of public inputs to a specific execution trace (see more about assertions //! "Trace assertions" section of [Air] trait documentation). //! //! Here is the actual code: //! //! ```no_run //! use winterfell::{ //! math::{fields::f128::BaseElement, FieldElement}, //! Air, AirContext, Assertion, ByteWriter, EvaluationFrame, ProofOptions, Serializable, //! TraceInfo, TransitionConstraintDegree, //! }; //! //! // Public inputs for our computation will consist of the starting value and the end result. //! pub struct PublicInputs { //! start: BaseElement, //! result: BaseElement, //! } //! //! // We need to describe how public inputs can be converted to bytes. //! impl Serializable for PublicInputs { //! fn write_into<W: ByteWriter>(&self, target: &mut W) { //! target.write(self.start); //! target.write(self.result); //! } //! } //! //! // For a specific instance of our computation, we'll keep track of the public inputs and //! // the computation's context which we'll build in the constructor. The context is used //! // internally by the Winterfell prover/verifier when interpreting this AIR. //! pub struct WorkAir { //! context: AirContext<BaseElement>, //! start: BaseElement, //! result: BaseElement, //! } //! //! impl Air for WorkAir { //! // First, we'll specify which finite field to use for our computation, and also how //! // the public inputs must look like. //! type BaseElement = BaseElement; //! type PublicInputs = PublicInputs; //! //! // Here, we'll construct a new instance of our computation which is defined by 3 //! // parameters: starting value, number of steps, and the end result. Another way to //! // think about it is that an instance of our computation is a specific invocation of //! // the do_work() function. //! fn new(trace_info: TraceInfo, pub_inputs: PublicInputs, options: ProofOptions) -> Self { //! // our execution trace should have only one column. //! assert_eq!(1, trace_info.width()); //! //! // Our computation requires a single transition constraint. The constraint itself //! // is defined in the evaluate_transition() method below, but here we need to specify //! // the expected degree of the constraint. If the expected and actual degrees of the //! // constraints don't match, an error will be thrown in the debug mode, but in release //! // mode, an invalid proof will be generated which will not be accepted by any //! // verifier. //! let degrees = vec![TransitionConstraintDegree::new(3)]; //! WorkAir { //! context: AirContext::new(trace_info, degrees, options), //! start: pub_inputs.start, //! result: pub_inputs.result, //! } //! } //! //! // In this method we'll define our transition constraints; a computation is considered to //! // be valid, if for all valid state transitions, transition constraints evaluate to all //! // zeros, and for any invalid transition, at least one constraint evaluates to a non-zero //! // value. The `frame` parameter will contain current and next states of the computation. //! fn evaluate_transition<E: FieldElement + From<Self::BaseElement>>( //! &self, //! frame: &EvaluationFrame<E>, //! _periodic_values: &[E], //! result: &mut [E], //! ) { //! // First, we'll read the current state, and use it to compute the expected next state //! let current_state = &frame.current()[0]; //! let next_state = current_state.exp(3u32.into()) + E::from(42u32); //! //! // Then, we'll subtract the expected next state from the actual next state; this will //! // evaluate to zero if and only if the expected and actual states are the same. //! result[0] = frame.next()[0] - next_state; //! } //! //! // Here, we'll define a set of assertions about the execution trace which must be //! // satisfied for the computation to be valid. Essentially, this ties computation's //! // execution trace to the public inputs. //! fn get_assertions(&self) -> Vec<Assertion<Self::BaseElement>> { //! // for our computation to be valid, value in column 0 at step 0 must be equal to the //! // starting value, and at the last step it must be equal to the result. //! let last_step = self.trace_length() - 1; //! vec![ //! Assertion::single(0, 0, self.start), //! Assertion::single(0, last_step, self.result), //! ] //! } //! //! // This is just boilerplate which is used by the Winterfell prover/verifier to retrieve //! // the context of the computation. //! fn context(&self) -> &AirContext<Self::BaseElement> { //! &self.context //! } //! } //! ``` //! //! Now, we are finally ready to generate and verify STARK proofs. //! //! In the code below, we will execute our computation and get the result together with the proof //! that the computation was executed correctly. Then, we will use this proof (together with the //! public inputs) to verify that we did in fact execute the computation and got the claimed //! result. //! //! ``` //! # use winterfell::{ //! # math::{fields::f128::BaseElement, FieldElement}, //! # Air, AirContext, Assertion, ByteWriter, EvaluationFrame, Serializable, //! # TraceInfo, TransitionConstraintDegree, //! # ExecutionTrace, FieldExtension, HashFunction, ProofOptions, StarkProof, //! # }; //! # //! # pub fn build_do_work_trace(start: BaseElement, n: usize) -> ExecutionTrace<BaseElement> { //! # let trace_width = 1; //! # let mut trace = ExecutionTrace::new(trace_width, n); //! # trace.fill( //! # |state| { //! # state[0] = start; //! # }, //! # |_, state| { //! # state[0] = state[0].exp(3u32.into()) + BaseElement::new(42); //! # }, //! # ); //! # trace //! # } //! # //! # //! # pub struct PublicInputs { //! # start: BaseElement, //! # result: BaseElement, //! # } //! # //! # impl Serializable for PublicInputs { //! # fn write_into<W: ByteWriter>(&self, target: &mut W) { //! # target.write(self.start); //! # target.write(self.result); //! # } //! # } //! # //! # pub struct WorkAir { //! # context: AirContext<BaseElement>, //! # start: BaseElement, //! # result: BaseElement, //! # } //! # //! # impl Air for WorkAir { //! # type BaseElement = BaseElement; //! # type PublicInputs = PublicInputs; //! # //! # fn new(trace_info: TraceInfo, pub_inputs: PublicInputs, options: ProofOptions) -> Self { //! # assert_eq!(1, trace_info.width()); //! # let degrees = vec![TransitionConstraintDegree::new(3)]; //! # WorkAir { //! # context: AirContext::new(trace_info, degrees, options), //! # start: pub_inputs.start, //! # result: pub_inputs.result,
//! # } //! # }
random_line_split
secrets.pb.go
0x11, 0xec, 0x14, 0xe1, 0x5e, 0x11, 0x1e, 0x14, 0xc1, 0x51, 0x11, 0x9e, 0x14, 0xc1, 0x59, 0x11, 0x5e, 0x14, 0xc1, 0x42, 0x13, 0x2c, 0x35, 0xe1, 0x9d, 0x26, 0xb8, 0xd7, 0x84, 0x0f, 0x9a, 0x60, 0xa5, 0x09, 0xd6, 0x9a, 0x70, 0xa3, 0x09, 0xb7, 0x9a, 0xf0, 0x1f, 0x0b, 0x84, 0x2d, 0x27, 0x5c, 0x4e, 0xc2, 0x38, 0xc8, 0xec, 0x98, 0xcb, 0xb9, 0x48, 0x23, 0xf6, 0xbe, 0xfa, 0x7c, 0xc0, 0x92, 0x28, 0x60, 0x52, 0xc6, 0x89, 0xe7, 0x35, 0xaa, 0xd7, 0x07, 0x2f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7f, 0x9a, 0x18, 0xc7, 0x9f, 0x01, 0x00, 0x00, } func (this *Secret) Equal(that interface{}) bool { if that == nil { return this == nil } that1, ok := that.(*Secret) if !ok { that2, ok := that.(Secret) if ok { that1 = &that2 } else { return false } } if that1 == nil { return this == nil } else if this == nil { return false } if this.KeyID != that1.KeyID { return false } if !bytes.Equal(this.Value, that1.Value) { return false } return true } func (m *Secret) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Secret) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *Secret) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Value) > 0 { i -= len(m.Value) copy(dAtA[i:], m.Value) i = encodeVarintSecrets(dAtA, i, uint64(len(m.Value))) i-- dAtA[i] = 0x12 } if len(m.KeyID) > 0 { i -= len(m.KeyID) copy(dAtA[i:], m.KeyID) i = encodeVarintSecrets(dAtA, i, uint64(len(m.KeyID))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func encodeVarintSecrets(dAtA []byte, offset int, v uint64) int { offset -= sovSecrets(v) base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return base } func NewPopulatedSecret(r randySecrets, easy bool) *Secret { this := &Secret{} this.KeyID = randStringSecrets(r) v1 := r.Intn(100) this.Value = make([]byte, v1) for i := 0; i < v1; i++ { this.Value[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { } return this } type randySecrets interface { Float32() float32 Float64() float64 Int63() int64 Int31() int32 Uint32() uint32 Intn(n int) int } func randUTF8RuneSecrets(r randySecrets) rune { ru := r.Intn(62) if ru < 10 { return rune(ru + 48) } else if ru < 36 { return rune(ru + 55) } return rune(ru + 61) } func randStringSecrets(r randySecrets) string { v2 := r.Intn(100) tmps := make([]rune, v2) for i := 0; i < v2; i++ { tmps[i] = randUTF8RuneSecrets(r) } return string(tmps) } func randUnrecognizedSecrets(r randySecrets, maxFieldNumber int) (dAtA []byte) { l := r.Intn(5) for i := 0; i < l; i++ { wire := r.Intn(4) if wire == 3 { wire = 5 } fieldNumber := maxFieldNumber + r.Intn(100) dAtA = randFieldSecrets(dAtA, r, fieldNumber, wire) } return dAtA } func randFieldSecrets(dAtA []byte, r randySecrets, fieldNumber int, wire int) []byte { key := uint32(fieldNumber)<<3 | uint32(wire) switch wire { case 0: dAtA = encodeVarintPopulateSecrets(dAtA, uint64(key)) v3 := r.Int63() if r.Intn(2) == 0 { v3 *= -1 } dAtA = encodeVarintPopulateSecrets(dAtA, uint64(v3)) case 1: dAtA = encodeVarintPopulateSecrets(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) case 2: dAtA = encodeVarintPopulateSecrets(dAtA, uint64(key)) ll := r.Intn(100) dAtA = encodeVarintPopulateSecrets(dAtA, uint64(ll)) for j := 0; j < ll; j++ { dAtA = append(dAtA, byte(r.Intn(256))) } default: dAtA = encodeVarintPopulateSecrets(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) } return dAtA } func encodeVarintPopulateSecrets(dAtA []byte, v uint64) []byte { for v >= 1<<7 { dAtA = append(dAtA, uint8(v&0x7f|0x80)) v >>= 7 } dAtA = append(dAtA, uint8(v)) return dAtA } func (m *Secret) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.KeyID) if l > 0 { n += 1 + l + sovSecrets(uint64(l)) } l = len(m.Value) if l > 0 { n += 1 + l + sovSecrets(uint64(l)) } return n } func sovSecrets(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozSecrets(x uint64) (n int) { return sovSecrets((x << 1) ^ uint64((int64(x) >> 63)))
} func (this *Secret) String() string { if this == nil {
random_line_split
secrets.pb.go
xc4, 0x5d, 0x49, 0xb8, 0x2f, 0x09, 0x0f, 0x25, 0xc1, 0xb1, 0x24, 0x38, 0x95, 0x04, 0xe7, 0x92, 0xe0, 0x52, 0x12, 0x2e, 0x14, 0xe1, 0x52, 0x11, 0xac, 0x14, 0xe1, 0x5a, 0x11, 0x6c, 0x14, 0xc1, 0x56, 0x11, 0xec, 0x14, 0xe1, 0x5e, 0x11, 0x1e, 0x14, 0xc1, 0x51, 0x11, 0x9e, 0x14, 0xc1, 0x59, 0x11, 0x5e, 0x14, 0xc1, 0x42, 0x13, 0x2c, 0x35, 0xe1, 0x9d, 0x26, 0xb8, 0xd7, 0x84, 0x0f, 0x9a, 0x60, 0xa5, 0x09, 0xd6, 0x9a, 0x70, 0xa3, 0x09, 0xb7, 0x9a, 0xf0, 0x1f, 0x0b, 0x84, 0x2d, 0x27, 0x5c, 0x4e, 0xc2, 0x38, 0xc8, 0xec, 0x98, 0xcb, 0xb9, 0x48, 0x23, 0xf6, 0xbe, 0xfa, 0x7c, 0xc0, 0x92, 0x28, 0x60, 0x52, 0xc6, 0x89, 0xe7, 0x35, 0xaa, 0xd7, 0x07, 0x2f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7f, 0x9a, 0x18, 0xc7, 0x9f, 0x01, 0x00, 0x00, } func (this *Secret) Equal(that interface{}) bool { if that == nil { return this == nil } that1, ok := that.(*Secret) if !ok { that2, ok := that.(Secret) if ok { that1 = &that2 } else { return false } } if that1 == nil { return this == nil } else if this == nil { return false } if this.KeyID != that1.KeyID { return false } if !bytes.Equal(this.Value, that1.Value) { return false } return true } func (m *Secret) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Secret) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *Secret) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Value) > 0 { i -= len(m.Value) copy(dAtA[i:], m.Value) i = encodeVarintSecrets(dAtA, i, uint64(len(m.Value))) i-- dAtA[i] = 0x12 } if len(m.KeyID) > 0 { i -= len(m.KeyID) copy(dAtA[i:], m.KeyID) i = encodeVarintSecrets(dAtA, i, uint64(len(m.KeyID))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func encodeVarintSecrets(dAtA []byte, offset int, v uint64) int { offset -= sovSecrets(v) base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return base } func NewPopulatedSecret(r randySecrets, easy bool) *Secret { this := &Secret{} this.KeyID = randStringSecrets(r) v1 := r.Intn(100) this.Value = make([]byte, v1) for i := 0; i < v1; i++ { this.Value[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { } return this } type randySecrets interface { Float32() float32 Float64() float64 Int63() int64 Int31() int32 Uint32() uint32 Intn(n int) int } func randUTF8RuneSecrets(r randySecrets) rune { ru := r.Intn(62) if ru < 10 { return rune(ru + 48) } else if ru < 36 { return rune(ru + 55) } return rune(ru + 61) } func randStringSecrets(r randySecrets) string { v2 := r.Intn(100) tmps := make([]rune, v2) for i := 0; i < v2; i++ { tmps[i] = randUTF8RuneSecrets(r) } return string(tmps) } func randUnrecognizedSecrets(r randySecrets, maxFieldNumber int) (dAtA []byte) { l := r.Intn(5) for i := 0; i < l; i++ { wire := r.Intn(4) if wire == 3 { wire = 5 } fieldNumber := maxFieldNumber + r.Intn(100) dAtA = randFieldSecrets(dAtA, r, fieldNumber, wire) } return dAtA } func randFieldSecrets(dAtA []byte, r randySecrets, fieldNumber int, wire int) []byte { key := uint32(fieldNumber)<<3 | uint32(wire) switch wire { case 0: dAtA = encodeVarintPopulateSecrets(dAtA, uint64(key)) v3 := r.Int63() if r.Intn(2) == 0 { v3 *= -1 } dAtA = encodeVarintPopulateSecrets(dAtA, uint64(v3)) case 1: dAtA = encodeVarintPopulateSecrets(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) case 2: dAtA = encodeVarintPopulateSecrets(dAtA, uint64(key)) ll := r.Intn(100) dAtA = encodeVarintPopulateSecrets(dAtA, uint64(ll)) for j := 0; j < ll; j++ { dAtA = append(dAtA, byte(r.Intn(256))) } default: dAtA = encodeVarintPopulateSecrets(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) } return dAtA } func encodeVarintPopulateSecrets(dAtA []byte, v uint64) []byte { for v >= 1<<7 { dAtA = append(dAtA, uint8(v&0x7f|0x80)) v >>= 7 } dAtA = append(dAtA, uint8(v)) return dAtA } func (m *Secret)
Size
identifier_name
secrets.pb.go
00, } func (this *Secret) Equal(that interface{}) bool { if that == nil { return this == nil } that1, ok := that.(*Secret) if !ok { that2, ok := that.(Secret) if ok { that1 = &that2 } else { return false } } if that1 == nil { return this == nil } else if this == nil { return false } if this.KeyID != that1.KeyID { return false } if !bytes.Equal(this.Value, that1.Value) { return false } return true } func (m *Secret) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Secret) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *Secret) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Value) > 0 { i -= len(m.Value) copy(dAtA[i:], m.Value) i = encodeVarintSecrets(dAtA, i, uint64(len(m.Value))) i-- dAtA[i] = 0x12 } if len(m.KeyID) > 0 { i -= len(m.KeyID) copy(dAtA[i:], m.KeyID) i = encodeVarintSecrets(dAtA, i, uint64(len(m.KeyID))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func encodeVarintSecrets(dAtA []byte, offset int, v uint64) int { offset -= sovSecrets(v) base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return base } func NewPopulatedSecret(r randySecrets, easy bool) *Secret { this := &Secret{} this.KeyID = randStringSecrets(r) v1 := r.Intn(100) this.Value = make([]byte, v1) for i := 0; i < v1; i++ { this.Value[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { } return this } type randySecrets interface { Float32() float32 Float64() float64 Int63() int64 Int31() int32 Uint32() uint32 Intn(n int) int } func randUTF8RuneSecrets(r randySecrets) rune { ru := r.Intn(62) if ru < 10 { return rune(ru + 48) } else if ru < 36 { return rune(ru + 55) } return rune(ru + 61) } func randStringSecrets(r randySecrets) string { v2 := r.Intn(100) tmps := make([]rune, v2) for i := 0; i < v2; i++ { tmps[i] = randUTF8RuneSecrets(r) } return string(tmps) } func randUnrecognizedSecrets(r randySecrets, maxFieldNumber int) (dAtA []byte) { l := r.Intn(5) for i := 0; i < l; i++ { wire := r.Intn(4) if wire == 3 { wire = 5 } fieldNumber := maxFieldNumber + r.Intn(100) dAtA = randFieldSecrets(dAtA, r, fieldNumber, wire) } return dAtA } func randFieldSecrets(dAtA []byte, r randySecrets, fieldNumber int, wire int) []byte { key := uint32(fieldNumber)<<3 | uint32(wire) switch wire { case 0: dAtA = encodeVarintPopulateSecrets(dAtA, uint64(key)) v3 := r.Int63() if r.Intn(2) == 0 { v3 *= -1 } dAtA = encodeVarintPopulateSecrets(dAtA, uint64(v3)) case 1: dAtA = encodeVarintPopulateSecrets(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) case 2: dAtA = encodeVarintPopulateSecrets(dAtA, uint64(key)) ll := r.Intn(100) dAtA = encodeVarintPopulateSecrets(dAtA, uint64(ll)) for j := 0; j < ll; j++ { dAtA = append(dAtA, byte(r.Intn(256))) } default: dAtA = encodeVarintPopulateSecrets(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) } return dAtA } func encodeVarintPopulateSecrets(dAtA []byte, v uint64) []byte { for v >= 1<<7 { dAtA = append(dAtA, uint8(v&0x7f|0x80)) v >>= 7 } dAtA = append(dAtA, uint8(v)) return dAtA } func (m *Secret) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.KeyID) if l > 0 { n += 1 + l + sovSecrets(uint64(l)) } l = len(m.Value) if l > 0 { n += 1 + l + sovSecrets(uint64(l)) } return n } func sovSecrets(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozSecrets(x uint64) (n int) { return sovSecrets((x << 1) ^ uint64((int64(x) >> 63))) } func (this *Secret) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&Secret{`, `KeyID:` + fmt.Sprintf("%v", this.KeyID) + `,`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `}`, }, "") return s } func valueToStringSecrets(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { return "nil" } pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } func (m *Secret) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowSecrets } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: Secret: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field KeyID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7
{ if shift >= 64 { return ErrIntOverflowSecrets } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } }
conditional_block
secrets.pb.go
func init() { proto.RegisterType((*Secret)(nil), "ttn.lorawan.v3.Secret") golang_proto.RegisterType((*Secret)(nil), "ttn.lorawan.v3.Secret") } func init() { proto.RegisterFile("lorawan-stack/api/secrets.proto", fileDescriptor_8c9d796b7f7ca235) } func init() { golang_proto.RegisterFile("lorawan-stack/api/secrets.proto", fileDescriptor_8c9d796b7f7ca235) } var fileDescriptor_8c9d796b7f7ca235 = []byte{ // 350 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x90, 0xb1, 0x6f, 0xda, 0x50, 0x10, 0xc6, 0xef, 0x5a, 0x19, 0x15, 0xab, 0xaa, 0x2a, 0x26, 0xc4, 0x70, 0xa0, 0x4e, 0x2c, 0xf6, 0x1b, 0xf8, 0x0b, 0x6a, 0x75, 0xa1, 0xdd, 0xe8, 0xd6, 0xa5, 0xb2, 0xcd, 0x8b, 0xb1, 0x4c, 0xfc, 0x2c, 0xfb, 0x61, 0xe2, 0x4c, 0x8c, 0x8c, 0x19, 0x33, 0x66, 0x89, 0xc4, 0xc8, 0xc8, 0xc8, 0xc8, 0xc8, 0xc8, 0x14, 0xe1, 0xf7, 0x16, 0x46, 0x46, 0x94, 0x29, 0x92, 0x9d, 0x21, 0x51, 0xb6, 0xef, 0x3b, 0xfd, 0xbe, 0xd3, 0xdd, 0x67, 0x76, 0xa7, 0x22, 0x75, 0xe7, 0x6e, 0x6c, 0x65, 0xd2, 0xf5, 0x23, 0xe6, 0x26, 0x21, 0xcb, 0xb8, 0x9f, 0x72, 0x99, 0xd9, 0x49, 0x2a, 0xa4, 0x68, 0x7d, 0x93, 0x32, 0xb6, 0x5f, 0x21, 0x3b, 0x1f, 0x74, 0xac, 0x20, 0x94, 0x93, 0x99, 0x67, 0xfb, 0xe2, 0x9a, 0x05, 0x22, 0x10, 0xac, 0xc2, 0xbc, 0xd9, 0x55, 0xe5, 0x2a, 0x53, 0xa9, 0x3a, 0xde, 0xf9, 0xf9, 0x06, 0xe7, 0x71, 0x2e, 0x8a, 0x24, 0x15, 0x37, 0x45, 0x1d, 0xf2, 0xad, 0x80, 0xc7, 0x56, 0xee, 0x4e, 0xc3, 0xb1, 0x2b, 0x39, 0xfb, 0x20, 0xea, 0x15, 0x3f, 0x7e, 0x9b, 0x8d, 0xbf, 0xd5, 0x49, 0xad, 0x9e, 0xd9, 0x88, 0x78, 0xf1, 0x3f, 0x1c, 0xb7, 0xb1, 0x87, 0xfd, 0xa6, 0xd3, 0x54, 0x4f, 0x5d, 0xe3, 0x0f, 0x2f, 0x86, 0xbf, 0x46, 0x46, 0xc4, 0x8b, 0xe1, 0xb8, 0x45, 0xa6, 0x91, 0xbb, 0xd3, 0x19, 0x6f, 0x7f, 0xea, 0x61, 0xff, 0xab, 0xf3, 0xe5, 0xd9, 0x31, 0x6e, 0x3f, 0xb7, 0x17, 0xdf, 0x47, 0xf5, 0xd8, 0x79, 0xc4, 0x5d, 0x49, 0xb8, 0x2f, 0x09, 0x0f, 0x25, 0xc1, 0xb1, 0x24, 0x38, 0x95, 0x04, 0xe7, 0x92, 0xe0, 0x52, 0x12, 0x2e, 0x14, 0xe1, 0x52, 0x11, 0xac, 0x14, 0xe1, 0x5a, 0x11, 0x6c, 0x14, 0xc1, 0x56, 0x11, 0xec, 0x14, 0xe1, 0x5e, 0x11, 0x1e, 0x14, 0xc1, 0x51, 0x11, 0x9e, 0x14, 0xc1, 0x59, 0x11, 0x5e, 0x14, 0xc1, 0x42, 0x13, 0x2c, 0x35, 0xe1, 0x9d, 0x26, 0xb8, 0xd7, 0x84, 0x0f, 0x9a, 0x60, 0xa5, 0x09, 0xd6, 0x9a, 0x70, 0xa3, 0x09, 0xb7, 0x9a, 0xf0, 0x1f, 0x0b, 0x84, 0x2d, 0x27, 0x5c, 0x4e, 0xc2, 0x38, 0xc8, 0xec, 0x98, 0xcb, 0xb9, 0x48, 0x23, 0xf6, 0xbe, 0xfa, 0x7c, 0xc0, 0x92, 0x28, 0x60, 0x52, 0xc6, 0x89, 0xe7, 0x35, 0xaa, 0xd7, 0x07, 0x2f, 0x01, 0x00, 0x00, 0xff, 0xff
{ if m != nil { return m.Value } return nil }
identifier_body
script.js
122.463701, 37.747683]) //-122.41, 37.79 .scale(width*200) .translate([width / 2, height / 2]); var path = d3.geo.path() .projection(projection); var ndx, yearDim, mhiDim, mgrDim, tractDim; var filterYear, filterMetric, sf_topojson; var parsed_data = []; var parsed_biz = []; var parsed_biz_tract = []; var divlegend = document.getElementById("legend"); var maptitle = document.getElementById("yolo"); var filterMetric_mapping = {"mhi": "Mean Household Income", "mgr": "Median Gross Rent"}; var format = d3.format("$.2s"); function
(evt) { var oldFilterMetric = filterMetric; filterMetric = evt.target.id; if (filterMetric !== "mhi" && filterMetric !== "mgr") { filterYear = +(filterMetric.split("_")[1]); filterMetric = oldFilterMetric; metricChange(true); var titleMetric = maptitle.innerHTML.split(" ").slice(0, 3); titleMetric.push(filterYear); maptitle.innerHTML = titleMetric.join(" "); } else { metricChange(false); var titleMetric = maptitle.innerHTML.split(" ").slice(3, 4); var new_metric = full_title[filterMetric].split(" ").reverse(); new_metric.forEach(function(d) { titleMetric.unshift(d); }); maptitle.innerHTML = titleMetric.join(" "); } } divlegend.addEventListener("click", clickedOn, false); function metricChange(yearChange) { var specColors = color(filterMetric); var specDomains = domains[filterMetric]; map.valueAccessor( function(d) { if (filterMetric === "mhi") { return d.value.mhi; } else if (filterMetric === "pov") { return d.value.pov; } else { //default on mgr return d.value.mgr; } }) .colors(d3.scale.threshold().domain(specDomains).range(specColors)); if (yearChange) { var new_year_group = metricYearChange_map(); map.group(new_year_group); plot_biz(yearChange); } // var new_metric_group = metricChange_timeline(); // timeline.group(new_metric_group) // .yAxisLabel(function(d) { // if (filterMetric === "mhi") { // return "Mean Household Income"; // } else if (filterMetric === "pov") { // return d.value.pov; // } else { //default on mgr // return "Median Gross Rent"; // }}); dc.redrawAll(); } queue() .defer(d3.json, "/data/sf_tracts.json") .defer(d3.csv, "/data/year_ordered_acs_inc_rent_biz.csv") .defer(d3.csv, "/data/groc_liq.csv") .await(ready); var data_real; function ready(error, tracts, data, biz) { if (error) throw error; sf_topojson = topojson.feature(tracts, tracts.objects.sf).features; filterMetric = "mgr"; filterYear = 2014; svg.style("stroke", "black").style("fill", "lightgrey").style("stroke-width", "1px"); var dateFormat; data.forEach(function(d) { dateFormat = d3.time.format('%Y'); d.year = dateFormat.parse(d.year); d.mhi = +d.mhi; d.mgr = +d.mgr; parsed_data.push({"id": d.id, "year": d.year, "mhi": d.mhi, "mgr": d.mgr, "liq": +d.liq, "high_end_groc": +d.high_end_groc, "low_end_groc": +d.low_end_groc, "groc": +d.groc}); }); biz.forEach(function(d) { d.end_date = dateFormat.parse(d.end_date); d.start_date = dateFormat.parse(d.start_date); d.storetype = d.storetype; d.lat = +d.lat; d.lon = +d.lon; parsed_biz.push({"end_date": d.end_date, "storetype": d.storetype, "lat": d.lat, "lon": d.lon, "start_date": d.start_date}); }); ndx = crossfilter(parsed_data); yearDim = ndx.dimension(function(d) { return [d.id, +d.year.getFullYear()]; }); tractDim = ndx.dimension(function(d) { return d.id; }); vector = svg.append("g") .attr("class", "vector") .call(renderTiles); d3.select(".loading").remove(); render(filterYear, filterMetric); plot_biz(false); } function sort_group(group, order) { return { all: function() { var g = group.all(), map = {}; g.forEach(function(kv) { map[kv.key] = kv.value; }); return order.map(function(k) { return {key: k, value: map[k]}; }); } }; }; function render(filterYear, filterMetric) { // dc.filterAll(); var metric_grouped = metricYearChange_map(); var year_grouped = metricChange_timeline(); var hg_grouped = yearDim.group(function(d) { return d;}).reduceSum( function(d) { return d.high_end_groc; }); var lg_grouped = yearDim.group(function(d) { return d;}).reduceSum( function(d) { return d.low_end_groc; }); var order = parsed_data.map(function(values) { return [values.id, +values.year.getFullYear()]; }); // // var hg_grouped = sort_group(hg_grouped, order); // var lg_grouped = sort_group(lg_grouped, order) var specColors = color(filterMetric); var specDomains = domains[filterMetric]; map.projection(projection) .dimension(tractDim) .group(metric_grouped) .valueAccessor(function(d) { console.log(filterMetric_mapping[filterMetric]); if (filterMetric === "mhi") { return d.value.mhi; } else if (filterMetric === "pov") { return d.value.pov; } else { //default on mgr return d.value.mgr; } }) .colorAccessor(function(d) { return d; }) .overlayGeoJson(sf_topojson, 'sf', function(d) { return d.id; }) .colors(d3.scale.threshold().domain(specDomains).range(specColors)) .title(function (d) { return "Census Tract " + d.key + "\n" + filterMetric_mapping[filterMetric] + " : " + format(d.value ? d.value : 0); }) .transitionDuration(500); // timeline.width(670) // .height(300) // .margins({top: 10, right: 50, bottom: 30, left: 50}) // .dimension(yearDim) // .group(year_grouped) // .keyAccessor(function(d) { return +d.key[1]; }) // .valueAccessor(function(d) { // return +d.value; // }) // .seriesAccessor(function(d) { return d.key[0]; }) // .x(d3.scale.linear().domain([2000, 2014])) // .renderHorizontalGridLines(true) // .xAxisLabel("Year") // .yAxisLabel(function(d) { // if (filterMetric === "mhi") { // return "Mean Household Income"; // } else { //default on mgr // return "Median Gross Rent"; // } // }) // .clipPadding(10) // .elasticY(true) // .brushOn(false) // .xAxis().ticks(8).tickFormat(d3.format("d")); // // biz_timeline.width(470) // .height(300) // .margins({top: 10, right: 50, bottom: 30, left: 30}) // .dimension(yearDim) // .group(hg_grouped) // .keyAccessor(function(d) { return +d.key[1]; }) // .valueAccessor(function(d) { // return +d.value; // }) // .seriesAccessor(function(d) { return d.key[0]; }) // .x(d3.scale.linear().domain([2000, 2014])) // .renderHorizontalGridLines(true) // .xAxisLabel("Year") // .yAxisLabel("Number of '$$'+ Grocery Stores") // .clipPadding(10) // .elasticY(true) // .brushOn(false) // .xAxis().ticks(8).tickFormat(d3.format("d")); // // biz2_timeline.width(470) // .height(300) // .margins({top: 10, right: 50, bottom: 30, left: 30}) // .dimension(yearDim
clickedOn
identifier_name