file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
12.1k
suffix
large_stringlengths
0
12k
middle
large_stringlengths
0
7.51k
fim_type
large_stringclasses
4 values
ACPF_ExportMuRaster.py
] + "*" currentWS = env.workspace env.workspace = logFolder logList = arcpy.ListFiles(logFile) for lg in logList: arcpy.Delete_management(lg) env.workspace = currentWS return True except: errorMsg() False ## =================================================================================== def CheckSpatialReference(muPolygon): # Make sure that the coordinate system is projected and units are meters try: desc = arcpy.Describe(muPolygon) inputSR = desc.spatialReference if inputSR.type.upper() == "PROJECTED": if inputSR.linearUnitName.upper() == "METER": env.outputCoordinateSystem = inputSR return True else: raise MyError, os.path.basename(theGDB) + ": Input soil polygon layer does not have a valid coordinate system for gSSURGO" else: raise MyError, os.path.basename(theGDB) + ": Input soil polygon layer must have a projected coordinate system" except MyError, e: # Example: raise MyError, "This is an error message" PrintMsg(str(e), 2) return False except: errorMsg() return False ## =================================================================================== def ConvertToRaster(muPolygon, rasterName): # main function used for raster conversion try: # # Set geoprocessing environment # env.overwriteOutput = True arcpy.env.compression = "LZ77" env.tileSize = "128 128" gdb = os.path.dirname(muPolygon) outputRaster = os.path.join(gdb, rasterName) iRaster = 10 # output resolution is 10 meters # Make sure that the env.scratchGDB is NOT Default.gdb. This causes problems for # some unknown reason. if (os.path.basename(env.scratchGDB).lower() == "default.gdb") or \ (os.path.basename(env.scratchWorkspace).lower() == "default.gdb") or \ (os.path.basename(env.scratchGDB).lower() == gdb): raise MyError, "Invalid scratch workspace setting (" + env.scratchWorkspace + ")" # Create an ArcInfo workspace under the scratchFolder. Trying to prevent # 99999 errors for PolygonToRaster on very large databases # aiWorkspace = env.scratchFolder if not arcpy.Exists(os.path.join(aiWorkspace, "info")): #PrintMsg(" \nCreating ArcInfo workspace (" + os.path.basename(aiWorkspace) + ") in: " + os.path.dirname(aiWorkspace), 1) arcpy.CreateArcInfoWorkspace_management(os.path.dirname(aiWorkspace), os.path.basename(aiWorkspace)) # turn off automatic Pyramid creation and Statistics calculation env.rasterStatistics = "NONE" env.pyramid = "PYRAMIDS 0" env.workspace = gdb # Need to check for dashes or spaces in folder names or leading numbers in database or raster names desc = arcpy.Describe(muPolygon) if not arcpy.Exists(muPolygon): raise MyError, "Could not find input featureclass: " + muPolygon # Check input layer's coordinate system to make sure horizontal units are meters # set the output coordinate system for the raster (neccessary for PolygonToRaster) if CheckSpatialReference(muPolygon) == False: return False # Sometimes it helps to compact large databases before raster conversion #arcpy.SetProgressorLabel("Compacting database prior to rasterization...") #arcpy.Compact_management(gdb) # For rasters named using an attribute value, some attribute characters can result in # 'illegal' names. outputRaster = outputRaster.replace("-", "") if arcpy.Exists(outputRaster): arcpy.Delete_management(outputRaster) time.sleep(1) if arcpy.Exists(outputRaster): err = "Output raster (" + os.path.basename(outputRaster) + ") already exists" raise MyError, err #start = time.time() # start clock to measure total processing time #begin = time.time() # start clock to measure set up time time.sleep(2) PrintMsg(" \nBeginning raster conversion process", 0) # Create Lookup table for storing MUKEY values and their integer counterparts # lu = os.path.join(env.scratchGDB, "Lookup") if arcpy.Exists(lu): arcpy.Delete_management(lu) # The Lookup table contains both MUKEY and its integer counterpart (CELLVALUE). # Using the joined lookup table creates a raster with CellValues that are the # same as MUKEY (but integer). This will maintain correct MUKEY values # during a moscaic or clip. # arcpy.CreateTable_management(os.path.dirname(lu), os.path.basename(lu)) arcpy.AddField_management(lu, "CELLVALUE", "LONG") arcpy.AddField_management(lu, "mukey", "TEXT", "#", "#", "30") # Create list of areasymbols present in the MUPOLYGON featureclass # Having problems processing CONUS list of MUKEYs. Python seems to be running out of memory, # but I don't see high usage in Windows Task Manager # # PrintMsg(" \nscratchFolder set to: " + env.scratchFolder, 1) # Create list of MUKEY values from the MUPOLYGON featureclass # # Create a list of map unit keys present in the MUPOLYGON featureclass # PrintMsg("\tGetting list of mukeys from input soil polygon layer...", 0) arcpy.SetProgressor("default", "Getting inventory of map units...") tmpPolys = "SoilPolygons" sqlClause = ("DISTINCT", None) with arcpy.da.SearchCursor(muPolygon, ["mukey"], "", "", "", sql_clause=sqlClause) as srcCursor: # Create a unique, sorted list of MUKEY values in the MUPOLYGON featureclass mukeyList = [row[0] for row in srcCursor] mukeyList.sort() if len(mukeyList) == 0: raise MyError, "Failed to get MUKEY values from " + muPolygon muCnt = len(mukeyList) # Load MUKEY values into Lookup table # #PrintMsg("\tSaving " + Number_Format(muCnt, 0, True) + " MUKEY values for " + Number_Format(polyCnt, 0, True) + " polygons" , 0) arcpy.SetProgressorLabel("Creating lookup table...") with arcpy.da.InsertCursor(lu, ("CELLVALUE", "mukey") ) as inCursor: for mukey in mukeyList: rec = mukey, mukey inCursor.insertRow(rec) # Add MUKEY attribute index to Lookup table arcpy.AddIndex_management(lu, ["mukey"], "Indx_LU") # # End of Lookup table code # Match NLCD raster (snapraster) cdlRasters = arcpy.ListRasters("wsCDL*") if len(cdlRasters) == 0: raise MyError, "Required Cropland Data Layer rasters missing from " + gdb else: cdlRaster = cdlRasters[-1] env.snapRaster = cdlRaster #env.extent = cdlRaster # Raster conversion process... # PrintMsg(" \nConverting featureclass " + os.path.basename(muPolygon) + " to raster (" + str(iRaster) + " meter)", 0) tmpPolys = "poly_tmp" arcpy.MakeFeatureLayer_management (muPolygon, tmpPolys) arcpy.AddJoin_management (tmpPolys, "mukey", lu, "mukey", "KEEP_ALL") arcpy.SetProgressor("default", "Running PolygonToRaster conversion...") # Need to make sure that the join was successful time.sleep(1) rasterFields = arcpy.ListFields(tmpPolys) rasterFieldNames = list() for rFld in rasterFields: rasterFieldNames.append(rFld.name.upper()) if not "LOOKUP.CELLVALUE" in rasterFieldNames: raise MyError, "Join failed for Lookup table (CELLVALUE)" if (os.path.basename(muPolygon).upper() + ".MUKEY") in rasterFieldNames: #raise MyError, "Join failed for Lookup table (SPATIALVERSION)" priorityFld = os.path.basename(muPolygon) + ".MUKEY" else: priorityFld = os.path.basename(muPolygon) + ".CELLVALUE" #ListEnv() arcpy.PolygonToRaster_conversion(tmpPolys, "Lookup.CELLVALUE", outputRaster, "MAXIMUM_COMBINED_AREA", "", iRaster) # No priority field for single raster # immediately delete temporary polygon layer to free up memory for the rest of the process time.sleep(1) arcpy.Delete_management(tmpPolys) # End of single raster process # Now finish up the single temporary raster # PrintMsg(" \nFinalizing raster conversion process:", 0) # Reset the stopwatch for the raster post-processing #begin = time.time() # Remove lookup table if arcpy.Exists(lu): arcpy.Delete_management(lu) # **************************************************** # Build pyramids and statistics # ****************************************************
if arcpy.Exists(outputRaster): time.sleep(1)
random_line_split
get_sheet.py
3 = self.get_bin_ins_oot(type_lst=['ins', 'oot', 'oot2'])''' ) #一整个 #return d2_1, d2_2, d2_3, d3 #df, df_woe, use_lst, cal_iv, type_train,cal_psi ,lr def get_2_1_imp(self, df): d1 = DataFrame(index=self.use_lst) cover_dic = dict(df[use_lst].notnull().sum()) d1['auc'] = [round(0.5+abs(0.5-roc_auc_score(df[self.y], df[i])), 3) for i in self.use_lst] #d1['ks'] = [round(max(abs(roc_curve(df[self.y],df[name])[0]- roc_curve(df[self.y],df[name])[1])), 3) for name in self.use_lst] d1['ks'] = [round(float(self.ks_calc_cross(df, name, self.y)[0]['gap']), 3) for name in self.use_lst] d1['ins_iv'] = [round(self.cal_iv(df[df[self.type_train]=='ins'], name, self.y), 3) for name in self.use_lst] d1['oot_iv'] = [round(self.cal_iv(df[df[self.type_train]=='oot'], name, self.y), 3) for name in self.use_lst] d1['coef'] = [round(i, 4) for i in self.model.coef_[0]] #d1['importance'] = self.model.feature_importances_ d1 = d1.reset_index() d1['psi'] = [round(self.cal_psi(df, name), 5) for name in self.use_lst] d1['vif'] = [round(variance_inflation_factor(np.matrix(df[self.use_lst]), i),3) for i in range(len(self.use_lst))] #d1['fill_missing_data'] = [fill_na_dic[name] for name in self.use_lst] #d2_1 = d1 d1.index = range(1, d1.shape[0]+1) return d1 #df, use_lst, type_train def get_2_2_des(self): df = self.df[self.df[self.type_train].isin(['ins', 'oot'])] df_data_des = df[self.use_lst].describe().T cover_dic = dict(df[use_lst].notnull().sum())
.use_lst).reset_index() d2_3.index = range(1, d2_3.shape[0]+1) return d2_3 #df_bin, use_lst, #type_lst#, type_train, woe_dic def get_bin_ins_oot(self, type_lst=['ins', 'oot', 'oot2']): res = [] for loc, i in enumerate(type_lst): lst = [] df_tmp = self.df_bin[(self.df_bin[self.type_train]==i)] for name in self.use_lst: #ks_lst = list(self.ks_calc_cross(df_tmp, name, self.y)[1]['gap']) #while len(ks_lst) > df_tmp.shape[0]: # ks_lst.pop() #while len(ks_lst) < df_tmp.shape[0]: # ks_lst.append(0) #print(ks_lst) dd_tmp = df_tmp.groupby(name).sum()[[self.y, 'count']] dd_tmp['bad_rate'] = dd_tmp[self.y]/dd_tmp['count'] dd_tmp = dd_tmp.reset_index() dd_tmp['woe'] = dd_tmp[name].apply(lambda x: self.woe_dic[name][x]) dd_tmp.sort_values(by='bad_rate', inplace=True) dd_tmp['sort_key'] = [float(i.split(',')[0][1:]) if i[0]=='(' else float('inf') for i in dd_tmp[name]] #print(dd_tmp) dd_tmp.sort_values(by='sort_key', inplace=True) dd_tmp.drop(columns=['sort_key'], inplace=True) name1 = '-' d = DataFrame(columns=['slice', 'bad', 'count', 'bad_rio', 'woe'], data=[[str(name1), '-', '-', '-','-']]+dd_tmp.values.tolist()[:], index=[[name]]+['-']*dd_tmp.shape[0]) if loc < 1: split_name = '<-->'+str(i) else: split_name = str(type_lst[loc-1])+'<-->'+str(i) d[split_name] = [split_name for i in range(d.shape[0])] d = d[[split_name, 'slice', 'bad', 'count', 'bad_rio', 'woe' ]] lst.append(d) res.append(lst) return pd.concat((pd.concat(i for i in res[i]) for i in range(len(type_lst))),axis=1) #按照类别做DataFrame def get_categories_df(self, df, cate='type_new', base_cut='ins', y='final_score'): df_tmp = copy.deepcopy(df[[cate, self.y, y]]) df_tmp.rename(columns={cate:'category', self.y:'bad'}, inplace=True) cut_line = list(np.percentile(list(df_tmp[df_tmp['category']==base_cut][y]), range(1, 101,10))) #np.percentile出来的是np.array格式 cut_line[0] = -float('inf') cut_line.append(float('inf')) df_tmp['bins'] = pd.cut(df_tmp[y], bins=cut_line) df_tmp['count'] = [1 for i in range(df_tmp.shape[0])] #print(df_tmp) ks_lst = [] for i in sorted(Counter(df_tmp['category']).keys()): #print(df_tmp[df_tmp['category']==i].shape) lst = list(ks_calc_cross(df_tmp[df_tmp['category']==i], 'bins', 'bad')[1]['gap']) #print(lst) while len(lst) < 10: lst = [0]+lst ks_lst.extend(lst) df = df_tmp.groupby(['category', 'bins']).sum()[['bad', 'count']] df = df.reset_index() df['bad_rate'] = df['bad']/df['count'] df['ks'] = ks_lst #print(df) for i in ['bad', 'count', 'bad_rate', 'ks']: df[i] = df[i].astype(float) #df[['bad', 'count', 'bad_rate', 'ks']] = df[['bad', 'count', 'bad_rate', 'ks']].astype(float) #df = df.astype(str) df[['bad', 'count', 'bad_rate', 'ks'] ]= df[['bad', 'count', 'bad_rate', 'ks']].fillna(0) #添加几行用来画画 # #n = len(Counter(df_tmp[cate])) #length = df.shape[0]//n #for i in range(n): # #df[:length] #print(df) # df.index = range(1, df.shape[0]+1) return df def ks_calc_cross(self,data,pred,y_label): ''' 功能: 计算KS值,输出对应分割点和累计分布函数曲线图 输入值: data: 二维数组或dataframe,包括模型得分和真实的标签 pred: 一维数组或series,代表模型得分(一般为预测正类的概率) y_label: 一维数组或series,代表真实的标签({0,1}或{-1,1}) 输出值: 'ks': KS值,'crossdens': 好坏客户累积概率分布以及其差值gap ''' crossfreq = pd.crosstab(data[pred],data[y_label]) crossdens = crossfreq.cumsum(axis=0) / crossfreq.sum() crossdens['gap'] = abs(crossdens[0] - crossdens[1]) ks = crossdens[crossdens['gap'] == crossdens['gap'].max()] return ks,crossdens def cal_iv(self,df1, x, y='is_7_p'): df = copy.deepcopy(df1) if 'count' not in df.columns: df['count'] = [1 for i in range(df.shape[0])] df_tmp = df[[x,'count', y]].groupby(x).sum() df_tmp['good'] = df_tmp['count'] - df_tmp[y] df_tmp[y] = df_tmp[y
df_data_des = df_data_des.reset_index() df_data_des['cover'] = df_data_des['index'].apply(lambda x: round(cover_dic[x]/df.shape[0], 4)) df_data_des.index = df_data_des['index'] df_data_des.drop(columns=['index', 'count'], inplace=True) d2_2 = df_data_des.reset_index() d2_2.index = range(1, d2_2.shape[0]+1) return d2_2 #df_woe, use_lst def get_2_3_corr(self): corr = np.corrcoef(np.array(self.df_woe[self.use_lst]).T) d2_3 = DataFrame(corr, columns=range(len(self.use_lst)), index=self
identifier_body
get_sheet.py
= self.get_bin_ins_oot(type_lst=['ins', 'oot', 'oot2'])''' ) #一整个 #return d2_1, d2_2, d2_3, d3 #df, df_woe, use_lst, cal_iv, type_train,cal_psi ,lr def get_2_1_imp(self, df): d1 = DataFrame(index=self.use_lst) cover_dic = dict(df[use_lst].notnull().sum()) d1['auc'] = [round(0.5+abs(0.5-roc_auc_score(df[self.y], df[i])), 3) for i in self.use_lst] #d1['ks'] = [round(max(abs(roc_curve(df[self.y],df[name])[0]- roc_curve(df[self.y],df[name])[1])), 3) for name in self.use_lst] d1['ks'] = [round(float(self.ks_calc_cross(df, name, self.y)[0]['gap']), 3) for name in self.use_lst] d1['ins_iv'] = [round(self.cal_iv(df[df[self.type_train]=='ins'], name, self.y), 3) for name in self.use_lst] d1['oot_iv'] = [round(self.cal_iv(df[df[self.type_train]=='oot'], name, self.y), 3) for name in self.use_lst] d1['coef'] = [round(i, 4) for i in self.model.coef_[0]] #d1['importance'] = self.model.feature_importances_ d1 = d1.reset_index() d1['psi'] = [round(self.cal_psi(df, name), 5) for name in self.use_lst] d1['vif'] = [round(variance_inflation_factor(np.matrix(df[self.use_lst]), i),3) for i in range(len(self.use_lst))] #d1['fill_missing_data'] = [fill_na_dic[name] for name in self.use_lst] #d2_1 = d1 d1.index = range(1, d1.shape[0]+1) return d1 #df, use_lst, type_train def get_2_2_des(self): df = self.df[self.df[self.type_train].isin(['ins', 'oot'])] df_data_des = df[self.use_lst].describe().T cover_dic = dict(df[use_lst].notnull().sum()) df_data_des = df_data_des.reset_index() df_data_des['cover'] = df_data_des['index'].apply(lambda x: round(cover_dic[x]/df.shape[0], 4)) df_data_des.index = df_data_des['index'] df_data_des.drop(columns=['index', 'count'], inplace=True) d2_2 = df_data_des.reset_index() d2_2.index = range(1, d2_2.shape[0]+1) return d2_2 #df_woe, use_lst def get_2_3_corr(self): corr = np.corrcoef(np.array(self.df_woe[self.use_lst]).T) d2_3 = DataFrame(corr, columns=range(len(self.use_lst)), index=self.use_lst).reset_index() d2_3.index = range(1, d2_3.shape[0]+1) return d2_3 #df_bin, use_lst, #type_lst#, type_train, woe_dic def get_bin_ins_oot(self, type_lst=['ins', 'oot', 'oot2']): res = [] for loc, i in enumerate(type_lst): lst = [] df_tmp = self.df_bin[(self.df_bin[self.type_train]==i)] for name in self.use_lst: #ks_lst = list(self.ks_calc_cross(df_tmp, name, self.y)[1]['gap']) #while len(ks_lst) > df_tmp.shape[0]: # ks_lst.pop() #while len(ks_lst) < df_tmp.shape[0]: # ks_lst.append(0) #print(ks_lst) dd_tmp = df_tmp.groupby(name).sum()[[self.y, 'count']] dd_tmp['bad_rate'] = dd_tmp[self.y]/dd_tmp['count'] dd_tmp = dd_tmp.reset_index() dd_tmp['woe'] = dd_tmp[name].apply(lambda x: self.woe_dic[name][x]) dd_tmp.sort_values(by='bad_rate', inplace=True) dd_tmp['sort_key'] = [float(i.split(',')[0][1:]) if i[0]=='(' else float('inf') for i in dd_tmp[name]] #print(dd_tmp) dd_tmp.sort_values(by='sort_key', inplace=True) dd_tmp.drop(columns=['sort_key'], inplace=True) name1 = '-' d = DataFrame(columns=['slice', 'bad', 'count', 'bad_rio', 'woe'], data=[[str(name1), '-', '-', '-','-']]+dd_tmp.values.tolist()[:], index=[[name]]+['-']*dd_tmp.shape[0]) if loc < 1: split_name = '<-->'+str(i) else: split_name = str(type_lst[loc-1])+'<-->'+str(i) d[split_name] = [split_name for i in range(d.shape[0])] d = d[[split_name, 'slice', 'bad', 'count', 'bad_rio', 'woe' ]] lst.append(d) res.append(lst) return pd.concat((pd.concat(i for i in res[i]) for i in range(len(type_lst))),axis=1) #按照类别做DataFrame def get_categories_df(self, df, cate='type_new', base_cut='ins', y='final_score'): df_tmp = copy.deepcopy(df[[cate, self.y, y]]) df_tmp.rename(columns={cate:'category', self.y:'bad'}, inplace=True) cut_line = list(np.percentile(list(df_tmp[df_tmp['category']==base_cut][y]), range(1, 101,10))) #np.percentile出来的是np.array格式 cut_line[0] = -float('inf') cut_line.append(float('inf')) df_tmp['bins'] = pd.cut(df_tmp[y], bins=cut_line) df_tmp['count'] = [1 for i in range(df_tmp.shape[0])] #print(df_tmp) ks_lst = [] for i in sorted(Counter(df_tmp['category']).keys()): #print(df_tmp[df_tmp['category']==i].shape) lst = list(ks_calc_cross(df_tmp[df_tmp['category']==i], 'bins', 'bad')[1]['gap']) #print(lst) while len(lst) < 10: lst = [0]+lst ks_lst.extend(lst) df = df_tmp.groupby(['category', 'bins']).sum()[['bad', 'count']] df = df.reset_index() df['bad_rate'] = df['bad']/df['count'] df['ks'] = ks_lst #print(df) for i in ['bad', 'count', 'bad_rate', 'ks']: df[i] = df[i].astype(float) #df[['bad', 'count', 'bad_rate', 'ks']] = df[['bad', 'count', 'bad_rate', 'ks']].astype(float) #df = df.astype(str) df[['bad', 'count', 'bad_rate', 'ks'] ]= df[['bad'
]].fillna(0) #添加几行用来画画 # #n = len(Counter(df_tmp[cate])) #length = df.shape[0]//n #for i in range(n): # #df[:length] #print(df) # df.index = range(1, df.shape[0]+1) return df def ks_calc_cross(self,data,pred,y_label): ''' 功能: 计算KS值,输出对应分割点和累计分布函数曲线图 输入值: data: 二维数组或dataframe,包括模型得分和真实的标签 pred: 一维数组或series,代表模型得分(一般为预测正类的概率) y_label: 一维数组或series,代表真实的标签({0,1}或{-1,1}) 输出值: 'ks': KS值,'crossdens': 好坏客户累积概率分布以及其差值gap ''' crossfreq = pd.crosstab(data[pred],data[y_label]) crossdens = crossfreq.cumsum(axis=0) / crossfreq.sum() crossdens['gap'] = abs(crossdens[0] - crossdens[1]) ks = crossdens[crossdens['gap'] == crossdens['gap'].max()] return ks,crossdens def cal_iv(self,df1, x, y='is_7_p'): df = copy.deepcopy(df1) if 'count' not in df.columns: df['count'] = [1 for i in range(df.shape[0])] df_tmp = df[[x,'count', y]].groupby(x).sum() df_tmp['good'] = df_tmp['count'] - df_tmp[y] df_tmp[y] = df_tmp[y
, 'count', 'bad_rate', 'ks'
conditional_block
get_sheet.py
e_lst = use_lst self.woe_dic = woe_dic self.type_train = type_train self.model = lr self.y = y def main(self): print('d2_1 = self.get_2_1_imp()',#依次放好, 'd2_2 = self.get_2_2_des()', 'd2_3 = self.get_2_3_corr()', '''d3 = self.get_bin_ins_oot(type_lst=['ins', 'oot', 'oot2'])''' ) #一整个 #return d2_1, d2_2, d2_3, d3 #df, df_woe, use_lst, cal_iv, type_train,cal_psi ,lr def get_2_1_imp(self, df): d1 = DataFrame(index=self.use_lst) cover_dic = dict(df[use_lst].notnull().sum()) d1['auc'] = [round(0.5+abs(0.5-roc_auc_score(df[self.y], df[i])), 3) for i in self.use_lst] #d1['ks'] = [round(max(abs(roc_curve(df[self.y],df[name])[0]- roc_curve(df[self.y],df[name])[1])), 3) for name in self.use_lst] d1['ks'] = [round(float(self.ks_calc_cross(df, name, self.y)[0]['gap']), 3) for name in self.use_lst] d1['ins_iv'] = [round(self.cal_iv(df[df[self.type_train]=='ins'], name, self.y), 3) for name in self.use_lst] d1['oot_iv'] = [round(self.cal_iv(df[df[self.type_train]=='oot'], name, self.y), 3) for name in self.use_lst] d1['coef'] = [round(i, 4) for i in self.model.coef_[0]] #d1['importance'] = self.model.feature_importances_ d1 = d1.reset_index() d1['psi'] = [round(self.cal_psi(df, name), 5) for name in self.use_lst] d1['vif'] = [round(variance_inflation_factor(np.matrix(df[self.use_lst]), i),3) for i in range(len(self.use_lst))] #d1['fill_missing_data'] = [fill_na_dic[name] for name in self.use_lst] #d2_1 = d1 d1.index = range(1, d1.shape[0]+1) return d1 #df, use_lst, type_train def get_2_2_des(self): df = self.df[self.df[self.type_train].isin(['ins', 'oot'])] df_data_des = df[self.use_lst].describe().T cover_dic = dict(df[use_lst].notnull().sum()) df_data_des = df_data_des.reset_index() df_data_des['cover'] = df_data_des['index'].apply(lambda x: round(cover_dic[x]/df.shape[0], 4)) df_data_des.index = df_data_des['index'] df_data_des.drop(columns=['index', 'count'], inplace=True) d2_2 = df_data_des.reset_index() d2_2.index = range(1, d2_2.shape[0]+1) return d2_2 #df_woe, use_lst def get_2_3_corr(self): corr = np.corrcoef(np.array(self.df_woe[self.use_lst]).T) d2_3 = DataFrame(corr, columns=range(len(self.use_lst)), index=self.use_lst).reset_index() d2_3.index = range(1, d2_3.shape[0]+1) return d2_3 #df_bin, use_lst, #type_lst#, type_train, woe_dic def get_bin_ins_oot(self, type_lst=['ins', 'oot', 'oot2']): res = [] for loc, i in enumerate(type_lst): lst = [] df_tmp = self.df_bin[(self.df_bin[self.type_train]==i)] for name in self.use_lst: #ks_lst = list(self.ks_calc_cross(df_tmp, name, self.y)[1]['gap']) #while len(ks_lst) > df_tmp.shape[0]: # ks_lst.pop() #while len(ks_lst) < df_tmp.shape[0]: # ks_lst.append(0) #print(ks_lst) dd_tmp = df_tmp.groupby(name).sum()[[self.y, 'count']] dd_tmp['bad_rate'] = dd_tmp[self.y]/dd_tmp['count'] dd_tmp = dd_tmp.reset_index() dd_tmp['woe'] = dd_tmp[name].apply(lambda x: self.woe_dic[name][x]) dd_tmp.sort_values(by='bad_rate', inplace=True) dd_tmp['sort_key'] = [float(i.split(',')[0][1:]) if i[0]=='(' else float('inf') for i in dd_tmp[name]] #print(dd_tmp) dd_tmp.sort_values(by='sort_key', inplace=True) dd_tmp.drop(columns=['sort_key'], inplace=True) name1 = '-' d = DataFrame(columns=['slice', 'bad', 'count', 'bad_rio', 'woe'], data=[[str(name1), '-', '-', '-','-']]+dd_tmp.values.tolist()[:], index=[[name]]+['-']*dd_tmp.shape[0]) if loc < 1: split_name = '<-->'+str(i) else: split_name = str(type_lst[loc-1])+'<-->'+str(i) d[split_name] = [split_name for i in range(d.shape[0])] d = d[[split_name, 'slice', 'bad', 'count', 'bad_rio', 'woe' ]] lst.append(d) res.append(lst) return pd.concat((pd.concat(i for i in res[i]) for i in range(len(type_lst))),axis=1) #按照类别做DataFrame def get_categories_df(self, df, cate='type_new', base_cut='ins', y='final_score'): df_tmp = copy.deepcopy(df[[cate, self.y, y]]) df_tmp.rename(columns={cate:'category', self.y:'bad'}, inplace=True) cut_line = list(np.percentile(list(df_tmp[df_tmp['category']==base_cut][y]), range(1, 101,10))) #np.percentile出来的是np.array格式 cut_line[0] = -float('inf') cut_line.append(float('inf')) df_tmp['bins'] = pd.cut(df_tmp[y], bins=cut_line) df_tmp['count'] = [1 for i in range(df_tmp.shape[0])] #print(df_tmp) ks_lst = [] for i in sorted(Counter(df_tmp['category']).keys()): #print(df_tmp[df_tmp['category']==i].shape) lst = list(ks_calc_cross(df_tmp[df_tmp['category']==i], 'bins', 'bad')[1]['gap']) #print(lst) while len(lst) < 10: lst = [0]+lst ks_lst.extend(lst) df = df_tmp.groupby(['category', 'bins']).sum()[['bad', 'count']] df = df.reset_index() df['bad_rate'] = df['bad']/df['count'] df['ks'] = ks_lst #print(df) for i in ['bad', 'count', 'bad_rate', 'ks']: df[i] = df[i].astype(float) #df[['bad', 'count', 'bad_rate', 'ks']] = df[['bad', 'count', 'bad_rate', 'ks']].astype(float) #df = df.astype(str) df[['bad', 'count', 'bad_rate', 'ks'] ]= df[['bad', 'count', 'bad_rate', 'ks']].fillna(0) #添加几行用来画画 # #n = len(Counter(df_tmp[cate])) #length = df.shape[0]//n #for i in range(n): # #df[:length] #print(df) # df.index = range(1, df.shape[0]+1) return df def ks_calc_cross(self,data,pred,y_label): ''' 功能: 计算KS值,输出对应分割点和累计分布函数曲线图 输入值: data: 二维数组或dataframe,包括模型得分和真实的标签 pred: 一维数组或series,代表模型得分(一般为预测正类的概率) y_label: 一维数组或series,代表真实的标签({0,1}或{-1,1}) 输出值: 'ks': KS值,'crossdens': 好坏客户累积概率分布以及其差值gap ''' crossfreq = pd.crosstab(data[pred],data[y_label]) crossdens = crossfreq.cumsum(axis=0) / crossfreq.sum() crossdens['gap'] = abs(crossdens[0] - crossdens[1]) ks = crossdens[crossdens['gap'] == crossdens['gap'].max()] return ks,c
self.us
identifier_name
get_sheet.py
3 = self.get_bin_ins_oot(type_lst=['ins', 'oot', 'oot2'])''' ) #一整个 #return d2_1, d2_2, d2_3, d3 #df, df_woe, use_lst, cal_iv, type_train,cal_psi ,lr def get_2_1_imp(self, df): d1 = DataFrame(index=self.use_lst) cover_dic = dict(df[use_lst].notnull().sum()) d1['auc'] = [round(0.5+abs(0.5-roc_auc_score(df[self.y], df[i])), 3) for i in self.use_lst] #d1['ks'] = [round(max(abs(roc_curve(df[self.y],df[name])[0]- roc_curve(df[self.y],df[name])[1])), 3) for name in self.use_lst] d1['ks'] = [round(float(self.ks_calc_cross(df, name, self.y)[0]['gap']), 3) for name in self.use_lst] d1['ins_iv'] = [round(self.cal_iv(df[df[self.type_train]=='ins'], name, self.y), 3) for name in self.use_lst] d1['oot_iv'] = [round(self.cal_iv(df[df[self.type_train]=='oot'], name, self.y), 3) for name in self.use_lst] d1['coef'] = [round(i, 4) for i in self.model.coef_[0]] #d1['importance'] = self.model.feature_importances_ d1 = d1.reset_index() d1['psi'] = [round(self.cal_psi(df, name), 5) for name in self.use_lst] d1['vif'] = [round(variance_inflation_factor(np.matrix(df[self.use_lst]), i),3) for i in range(len(self.use_lst))] #d1['fill_missing_data'] = [fill_na_dic[name] for name in self.use_lst] #d2_1 = d1 d1.index = range(1, d1.shape[0]+1) return d1 #df, use_lst, type_train def get_2_2_des(self): df = self.df[self.df[self.type_train].isin(['ins', 'oot'])] df_data_des = df[self.use_lst].describe().T cover_dic = dict(df[use_lst].notnull().sum()) df_data_des = df_data_des.reset_index() df_data_des['cover'] = df_data_des['index'].apply(lambda x: round(cover_dic[x]/df.shape[0], 4)) df_data_des.index = df_data_des['index'] df_data_des.drop(columns=['index', 'count'], inplace=True) d2_2 = df_data_des.reset_index() d2_2.index = range(1, d2_2.shape[0]+1) return d2_2 #df_woe, use_lst def get_2_3_corr(self): corr = np.corrcoef(np.array(self.df_woe[self.use_lst]).T) d2_3 = DataFrame(corr, columns=range(len(self.use_lst)), index=self.use_lst).reset_index() d2_3.index = range(1, d2_3.shape[0]+1) return d2_3 #df_bin, use_lst, #type_lst#, type_train, woe_dic def get_bin_ins_oot(self, type_lst=['ins', 'oot', 'oot2']): res = [] for loc, i in enumerate(type_lst): lst = [] df_tmp = self.df_bin[(self.df_bin[self.type_train]==i)] for name in self.use_lst: #ks_lst = list(self.ks_calc_cross(df_tmp, name, self.y)[1]['gap']) #while len(ks_lst) > df_tmp.shape[0]: # ks_lst.pop() #while len(ks_lst) < df_tmp.shape[0]: # ks_lst.append(0) #print(ks_lst) dd_tmp = df_tmp.groupby(name).sum()[[self.y, 'count']] dd_tmp['bad_rate'] = dd_tmp[self.y]/dd_tmp['count'] dd_tmp = dd_tmp.reset_index() dd_tmp['woe'] = dd_tmp[name].apply(lambda x: self.woe_dic[name][x]) dd_tmp.sort_values(by='bad_rate', inplace=True) dd_tmp['sort_key'] = [float(i.split(',')[0][1:]) if i[0]=='(' else float('inf') for i in dd_tmp[name]] #print(dd_tmp) dd_tmp.sort_values(by='sort_key', inplace=True) dd_tmp.drop(columns=['sort_key'], inplace=True) name1 = '-' d = DataFrame(columns=['slice', 'bad', 'count', 'bad_rio', 'woe'], data=[[str(name1), '-', '-', '-','-']]+dd_tmp.values.tolist()[:], index=[[name]]+['-']*dd_tmp.shape[0]) if loc < 1:
split_name = str(type_lst[loc-1])+'<-->'+str(i) d[split_name] = [split_name for i in range(d.shape[0])] d = d[[split_name, 'slice', 'bad', 'count', 'bad_rio', 'woe' ]] lst.append(d) res.append(lst) return pd.concat((pd.concat(i for i in res[i]) for i in range(len(type_lst))),axis=1) #按照类别做DataFrame def get_categories_df(self, df, cate='type_new', base_cut='ins', y='final_score'): df_tmp = copy.deepcopy(df[[cate, self.y, y]]) df_tmp.rename(columns={cate:'category', self.y:'bad'}, inplace=True) cut_line = list(np.percentile(list(df_tmp[df_tmp['category']==base_cut][y]), range(1, 101,10))) #np.percentile出来的是np.array格式 cut_line[0] = -float('inf') cut_line.append(float('inf')) df_tmp['bins'] = pd.cut(df_tmp[y], bins=cut_line) df_tmp['count'] = [1 for i in range(df_tmp.shape[0])] #print(df_tmp) ks_lst = [] for i in sorted(Counter(df_tmp['category']).keys()): #print(df_tmp[df_tmp['category']==i].shape) lst = list(ks_calc_cross(df_tmp[df_tmp['category']==i], 'bins', 'bad')[1]['gap']) #print(lst) while len(lst) < 10: lst = [0]+lst ks_lst.extend(lst) df = df_tmp.groupby(['category', 'bins']).sum()[['bad', 'count']] df = df.reset_index() df['bad_rate'] = df['bad']/df['count'] df['ks'] = ks_lst #print(df) for i in ['bad', 'count', 'bad_rate', 'ks']: df[i] = df[i].astype(float) #df[['bad', 'count', 'bad_rate', 'ks']] = df[['bad', 'count', 'bad_rate', 'ks']].astype(float) #df = df.astype(str) df[['bad', 'count', 'bad_rate', 'ks'] ]= df[['bad', 'count', 'bad_rate', 'ks']].fillna(0) #添加几行用来画画 # #n = len(Counter(df_tmp[cate])) #length = df.shape[0]//n #for i in range(n): # #df[:length] #print(df) # df.index = range(1, df.shape[0]+1) return df def ks_calc_cross(self,data,pred,y_label): ''' 功能: 计算KS值,输出对应分割点和累计分布函数曲线图 输入值: data: 二维数组或dataframe,包括模型得分和真实的标签 pred: 一维数组或series,代表模型得分(一般为预测正类的概率) y_label: 一维数组或series,代表真实的标签({0,1}或{-1,1}) 输出值: 'ks': KS值,'crossdens': 好坏客户累积概率分布以及其差值gap ''' crossfreq = pd.crosstab(data[pred],data[y_label]) crossdens = crossfreq.cumsum(axis=0) / crossfreq.sum() crossdens['gap'] = abs(crossdens[0] - crossdens[1]) ks = crossdens[crossdens['gap'] == crossdens['gap'].max()] return ks,crossdens def cal_iv(self,df1, x, y='is_7_p'): df = copy.deepcopy(df1) if 'count' not in df.columns: df['count'] = [1 for i in range(df.shape[0])] df_tmp = df[[x,'count', y]].groupby(x).sum() df_tmp['good'] = df_tmp['count'] - df_tmp[y] df_tmp[y] = df_tmp[y].
split_name = '<-->'+str(i) else:
random_line_split
instance_list.py
# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import heapq import itertools from nova import context from nova import db from nova import exception from nova import objects from nova.objects import instance as instance_obj class InstanceSortContext(object): def __init__(self, sort_keys, sort_dirs): self._sort_keys = sort_keys self._sort_dirs = sort_dirs def compare_instances(self, inst1, inst2): """Implements cmp(inst1, inst2) for the first key that is different Adjusts for the requested sort direction by inverting the result as needed. """ for skey, sdir in zip(self._sort_keys, self._sort_dirs): resultflag = 1 if sdir == 'desc' else -1 if inst1[skey] < inst2[skey]: return resultflag elif inst1[skey] > inst2[skey]: return resultflag * -1 return 0 class InstanceWrapper(object):
def _get_marker_instance(ctx, marker): """Get the marker instance from its cell. This returns the marker instance from the cell in which it lives """ try: im = objects.InstanceMapping.get_by_instance_uuid(ctx, marker) except exception.InstanceMappingNotFound: raise exception.MarkerNotFound(marker=marker) elevated = ctx.elevated(read_deleted='yes') with context.target_cell(elevated, im.cell_mapping) as cctx: try: db_inst = db.instance_get_by_uuid(cctx, marker, columns_to_join=[]) except exception.InstanceNotFound: db_inst = None if not db_inst: raise exception.MarkerNotFound(marker=marker) return db_inst def get_instances_sorted(ctx, filters, limit, marker, columns_to_join, sort_keys, sort_dirs): """Get a cross-cell list of instances matching filters. This iterates cells in parallel generating a unified and sorted list of instances as efficiently as possible. It takes care to iterate the list as infrequently as possible. We wrap the results in InstanceWrapper objects so that they are sortable by heapq.merge(), which requires that the '<' operator just works. We encapsulate our sorting requirements into an InstanceSortContext which we pass to all of the wrappers so they behave the way we want. This function is a generator of instances from the database like what you would get from instance_get_all_by_filters_sort() in the DB API. NOTE: Since we do these in parallel, a nonzero limit will be passed to each database query, although the limit will be enforced in the output of this function. Meaning, we will still query $limit from each database, but only return $limit total results. """ if not sort_keys: # This is the default from the process_sort_params() method in # the DB API. It doesn't really matter, as this only comes into # play if the user didn't ask for a specific ordering, but we # use the same scheme for consistency. sort_keys = ['created_at', 'id'] sort_dirs = ['asc', 'asc'] sort_ctx = InstanceSortContext(sort_keys, sort_dirs) if marker: # A marker UUID was provided from the API. Call this the 'global' # marker as it determines where we start the process across # all cells. Look up the instance in whatever cell it is in and # record the values for the sort keys so we can find the marker # instance in each cell (called the 'local' marker). global_marker_instance = _get_marker_instance(ctx, marker) global_marker_values = [global_marker_instance[key] for key in sort_keys] def do_query(ctx): """Generate InstanceWrapper(Instance) objects from a cell. We do this inside the thread (created by scatter_gather_all_cells()) so that we return wrappers and avoid having to iterate the combined result list in the caller again. This is run against each cell by the scatter_gather routine. """ # The local marker is a uuid of an instance in a cell that is found # by the special method instance_get_by_sort_filters(). It should # be the next instance in order according to the sort provided, # but after the marker instance which may have been in another cell. local_marker = None # Since the regular DB query routines take a marker and assume that # the marked instance was the last entry of the previous page, we # may need to prefix it to our result query if we're not the cell # that had the actual marker instance. local_marker_prefix = [] if marker: # FIXME(danms): If we knew which cell we were in here, we could # avoid looking up the marker again. But, we don't currently. local_marker = db.instance_get_by_sort_filters( ctx, sort_keys, sort_dirs, global_marker_values) if local_marker: if local_marker != marker: # We did find a marker in our cell, but it wasn't # the global marker. Thus, we will use it as our # marker in the main query below, but we also need # to prefix that result with this marker instance # since the result below will not return it and it # has not been returned to the user yet. Note that # we do _not_ prefix the marker instance if our # marker was the global one since that has already # been sent to the user. local_marker_filters = copy.copy(filters) if 'uuid' not in local_marker_filters: # If a uuid filter was provided, it will # have included our marker already if this instance # is desired in the output set. If it wasn't, we # specifically query for it. If the other filters would # have excluded it, then we'll get an empty set here # and not include it in the output as expected. local_marker_filters['uuid'] = [local_marker] local_marker_prefix = db.instance_get_all_by_filters_sort( ctx, local_marker_filters, limit=1, marker=None, columns_to_join=columns_to_join, sort_keys=sort_keys, sort_dirs=sort_dirs) else: # There was a global marker but everything in our cell is # _before_ that marker, so we return nothing. If we didn't # have this clause, we'd pass marker=None to the query below # and return a full unpaginated set for our cell. return [] main_query_result = db.instance_get_all_by_filters_sort( ctx, filters, limit=limit, marker=local_marker, columns_to_join=columns_to_join, sort_keys=sort_keys, sort_dirs=sort_dirs) return (InstanceWrapper(sort_ctx, inst) for inst in itertools.chain(local_marker_prefix, main_query_result)) # FIXME(danms): If we raise or timeout on a cell we need to handle # that here gracefully. The below routine will provide sentinels # to indicate that, which will crash the merge below, but we don't # handle this anywhere yet anyway. results = context.scatter_gather_all_cells(ctx, do_query) # If a limit was provided, and passed to the per-cell query routines. # That means we have NUM_CELLS * limit items across results. So, we # need to consume from that limit below and stop returning results. limit = limit or 0 # Generate results from heapq so we can return the inner # instance instead of the wrapper. This is basically free # as it works as our caller iterates the results. for i in heapq.merge(*results.values()): yield i._db_instance limit -= 1 if limit == 0: # We'll only hit this if limit was nonzero and we just generated # our last one return def get_instance_objects_sorted(ctx, filters, limit, marker, expected_attrs, sort_keys, sort_dirs): """Same as above, but return an InstanceList.""" columns_to_join =
"""Wrap an instance object from the database so it is sortable. We use heapq.merge() below to do the merge sort of things from the cell databases. That routine assumes it can use regular python operators (> and <) on the contents. Since that won't work with instances from the database (and depends on the sort keys/dirs), we need this wrapper class to provide that. Implementing __lt__ is enough for heapq.merge() to do its work. """ def __init__(self, sort_ctx, db_instance): self._sort_ctx = sort_ctx self._db_instance = db_instance def __lt__(self, other): r = self._sort_ctx.compare_instances(self._db_instance, other._db_instance) # cmp(x, y) returns -1 if x < y return r == -1
identifier_body
instance_list.py
# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import heapq import itertools from nova import context from nova import db from nova import exception from nova import objects from nova.objects import instance as instance_obj class InstanceSortContext(object): def __init__(self, sort_keys, sort_dirs): self._sort_keys = sort_keys self._sort_dirs = sort_dirs def compare_instances(self, inst1, inst2): """Implements cmp(inst1, inst2) for the first key that is different Adjusts for the requested sort direction by inverting the result as needed. """ for skey, sdir in zip(self._sort_keys, self._sort_dirs): resultflag = 1 if sdir == 'desc' else -1 if inst1[skey] < inst2[skey]: return resultflag elif inst1[skey] > inst2[skey]: return resultflag * -1 return 0 class InstanceWrapper(object): """Wrap an instance object from the database so it is sortable. We use heapq.merge() below to do the merge sort of things from the cell databases. That routine assumes it can use regular python operators (> and <) on the contents. Since that won't work with instances from the database (and depends on the sort keys/dirs), we need this wrapper class to provide that. Implementing __lt__ is enough for heapq.merge() to do its work. """ def __init__(self, sort_ctx, db_instance): self._sort_ctx = sort_ctx self._db_instance = db_instance def __lt__(self, other): r = self._sort_ctx.compare_instances(self._db_instance, other._db_instance) # cmp(x, y) returns -1 if x < y return r == -1 def _get_marker_instance(ctx, marker): """Get the marker instance from its cell. This returns the marker instance from the cell in which it lives """ try: im = objects.InstanceMapping.get_by_instance_uuid(ctx, marker) except exception.InstanceMappingNotFound: raise exception.MarkerNotFound(marker=marker) elevated = ctx.elevated(read_deleted='yes') with context.target_cell(elevated, im.cell_mapping) as cctx: try: db_inst = db.instance_get_by_uuid(cctx, marker, columns_to_join=[]) except exception.InstanceNotFound: db_inst = None if not db_inst:
return db_inst def get_instances_sorted(ctx, filters, limit, marker, columns_to_join, sort_keys, sort_dirs): """Get a cross-cell list of instances matching filters. This iterates cells in parallel generating a unified and sorted list of instances as efficiently as possible. It takes care to iterate the list as infrequently as possible. We wrap the results in InstanceWrapper objects so that they are sortable by heapq.merge(), which requires that the '<' operator just works. We encapsulate our sorting requirements into an InstanceSortContext which we pass to all of the wrappers so they behave the way we want. This function is a generator of instances from the database like what you would get from instance_get_all_by_filters_sort() in the DB API. NOTE: Since we do these in parallel, a nonzero limit will be passed to each database query, although the limit will be enforced in the output of this function. Meaning, we will still query $limit from each database, but only return $limit total results. """ if not sort_keys: # This is the default from the process_sort_params() method in # the DB API. It doesn't really matter, as this only comes into # play if the user didn't ask for a specific ordering, but we # use the same scheme for consistency. sort_keys = ['created_at', 'id'] sort_dirs = ['asc', 'asc'] sort_ctx = InstanceSortContext(sort_keys, sort_dirs) if marker: # A marker UUID was provided from the API. Call this the 'global' # marker as it determines where we start the process across # all cells. Look up the instance in whatever cell it is in and # record the values for the sort keys so we can find the marker # instance in each cell (called the 'local' marker). global_marker_instance = _get_marker_instance(ctx, marker) global_marker_values = [global_marker_instance[key] for key in sort_keys] def do_query(ctx): """Generate InstanceWrapper(Instance) objects from a cell. We do this inside the thread (created by scatter_gather_all_cells()) so that we return wrappers and avoid having to iterate the combined result list in the caller again. This is run against each cell by the scatter_gather routine. """ # The local marker is a uuid of an instance in a cell that is found # by the special method instance_get_by_sort_filters(). It should # be the next instance in order according to the sort provided, # but after the marker instance which may have been in another cell. local_marker = None # Since the regular DB query routines take a marker and assume that # the marked instance was the last entry of the previous page, we # may need to prefix it to our result query if we're not the cell # that had the actual marker instance. local_marker_prefix = [] if marker: # FIXME(danms): If we knew which cell we were in here, we could # avoid looking up the marker again. But, we don't currently. local_marker = db.instance_get_by_sort_filters( ctx, sort_keys, sort_dirs, global_marker_values) if local_marker: if local_marker != marker: # We did find a marker in our cell, but it wasn't # the global marker. Thus, we will use it as our # marker in the main query below, but we also need # to prefix that result with this marker instance # since the result below will not return it and it # has not been returned to the user yet. Note that # we do _not_ prefix the marker instance if our # marker was the global one since that has already # been sent to the user. local_marker_filters = copy.copy(filters) if 'uuid' not in local_marker_filters: # If a uuid filter was provided, it will # have included our marker already if this instance # is desired in the output set. If it wasn't, we # specifically query for it. If the other filters would # have excluded it, then we'll get an empty set here # and not include it in the output as expected. local_marker_filters['uuid'] = [local_marker] local_marker_prefix = db.instance_get_all_by_filters_sort( ctx, local_marker_filters, limit=1, marker=None, columns_to_join=columns_to_join, sort_keys=sort_keys, sort_dirs=sort_dirs) else: # There was a global marker but everything in our cell is # _before_ that marker, so we return nothing. If we didn't # have this clause, we'd pass marker=None to the query below # and return a full unpaginated set for our cell. return [] main_query_result = db.instance_get_all_by_filters_sort( ctx, filters, limit=limit, marker=local_marker, columns_to_join=columns_to_join, sort_keys=sort_keys, sort_dirs=sort_dirs) return (InstanceWrapper(sort_ctx, inst) for inst in itertools.chain(local_marker_prefix, main_query_result)) # FIXME(danms): If we raise or timeout on a cell we need to handle # that here gracefully. The below routine will provide sentinels # to indicate that, which will crash the merge below, but we don't # handle this anywhere yet anyway. results = context.scatter_gather_all_cells(ctx, do_query) # If a limit was provided, and passed to the per-cell query routines. # That means we have NUM_CELLS * limit items across results. So, we # need to consume from that limit below and stop returning results. limit = limit or 0 # Generate results from heapq so we can return the inner # instance instead of the wrapper. This is basically free # as it works as our caller iterates the results. for i in heapq.merge(*results.values()): yield i._db_instance limit -= 1 if limit == 0: # We'll only hit this if limit was nonzero and we just generated # our last one return def get_instance_objects_sorted(ctx, filters, limit, marker, expected_attrs, sort_keys, sort_dirs): """Same as above, but return an InstanceList.""" columns_to_join
raise exception.MarkerNotFound(marker=marker)
conditional_block
instance_list.py
# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import heapq import itertools from nova import context from nova import db from nova import exception from nova import objects from nova.objects import instance as instance_obj class InstanceSortContext(object): def __init__(self, sort_keys, sort_dirs): self._sort_keys = sort_keys self._sort_dirs = sort_dirs def compare_instances(self, inst1, inst2): """Implements cmp(inst1, inst2) for the first key that is different Adjusts for the requested sort direction by inverting the result as needed. """ for skey, sdir in zip(self._sort_keys, self._sort_dirs): resultflag = 1 if sdir == 'desc' else -1 if inst1[skey] < inst2[skey]: return resultflag elif inst1[skey] > inst2[skey]: return resultflag * -1 return 0 class InstanceWrapper(object): """Wrap an instance object from the database so it is sortable. We use heapq.merge() below to do the merge sort of things from the cell databases. That routine assumes it can use regular python operators (> and <) on the contents. Since that won't work with instances from the database (and depends on the sort keys/dirs), we need this wrapper class to provide that. Implementing __lt__ is enough for heapq.merge() to do its work. """ def __init__(self, sort_ctx, db_instance): self._sort_ctx = sort_ctx self._db_instance = db_instance def __lt__(self, other): r = self._sort_ctx.compare_instances(self._db_instance, other._db_instance) # cmp(x, y) returns -1 if x < y return r == -1 def _get_marker_instance(ctx, marker): """Get the marker instance from its cell. This returns the marker instance from the cell in which it lives """ try: im = objects.InstanceMapping.get_by_instance_uuid(ctx, marker) except exception.InstanceMappingNotFound: raise exception.MarkerNotFound(marker=marker) elevated = ctx.elevated(read_deleted='yes') with context.target_cell(elevated, im.cell_mapping) as cctx: try: db_inst = db.instance_get_by_uuid(cctx, marker, columns_to_join=[]) except exception.InstanceNotFound: db_inst = None if not db_inst: raise exception.MarkerNotFound(marker=marker) return db_inst def get_instances_sorted(ctx, filters, limit, marker, columns_to_join, sort_keys, sort_dirs): """Get a cross-cell list of instances matching filters. This iterates cells in parallel generating a unified and sorted list of instances as efficiently as possible. It takes care to iterate the list as infrequently as possible. We wrap the results in InstanceWrapper objects so that they are sortable by heapq.merge(), which requires that the '<' operator just works. We encapsulate our sorting requirements into an InstanceSortContext which we pass to all of the wrappers so they behave the way we want. This function is a generator of instances from the database like what you would get from instance_get_all_by_filters_sort() in the DB API. NOTE: Since we do these in parallel, a nonzero limit will be passed to each database query, although the limit will be enforced in the output of this function. Meaning, we will still query $limit from each database, but only return $limit total results. """ if not sort_keys: # This is the default from the process_sort_params() method in # the DB API. It doesn't really matter, as this only comes into # play if the user didn't ask for a specific ordering, but we # use the same scheme for consistency. sort_keys = ['created_at', 'id'] sort_dirs = ['asc', 'asc'] sort_ctx = InstanceSortContext(sort_keys, sort_dirs) if marker: # A marker UUID was provided from the API. Call this the 'global' # marker as it determines where we start the process across # all cells. Look up the instance in whatever cell it is in and # record the values for the sort keys so we can find the marker # instance in each cell (called the 'local' marker). global_marker_instance = _get_marker_instance(ctx, marker) global_marker_values = [global_marker_instance[key] for key in sort_keys] def
(ctx): """Generate InstanceWrapper(Instance) objects from a cell. We do this inside the thread (created by scatter_gather_all_cells()) so that we return wrappers and avoid having to iterate the combined result list in the caller again. This is run against each cell by the scatter_gather routine. """ # The local marker is a uuid of an instance in a cell that is found # by the special method instance_get_by_sort_filters(). It should # be the next instance in order according to the sort provided, # but after the marker instance which may have been in another cell. local_marker = None # Since the regular DB query routines take a marker and assume that # the marked instance was the last entry of the previous page, we # may need to prefix it to our result query if we're not the cell # that had the actual marker instance. local_marker_prefix = [] if marker: # FIXME(danms): If we knew which cell we were in here, we could # avoid looking up the marker again. But, we don't currently. local_marker = db.instance_get_by_sort_filters( ctx, sort_keys, sort_dirs, global_marker_values) if local_marker: if local_marker != marker: # We did find a marker in our cell, but it wasn't # the global marker. Thus, we will use it as our # marker in the main query below, but we also need # to prefix that result with this marker instance # since the result below will not return it and it # has not been returned to the user yet. Note that # we do _not_ prefix the marker instance if our # marker was the global one since that has already # been sent to the user. local_marker_filters = copy.copy(filters) if 'uuid' not in local_marker_filters: # If a uuid filter was provided, it will # have included our marker already if this instance # is desired in the output set. If it wasn't, we # specifically query for it. If the other filters would # have excluded it, then we'll get an empty set here # and not include it in the output as expected. local_marker_filters['uuid'] = [local_marker] local_marker_prefix = db.instance_get_all_by_filters_sort( ctx, local_marker_filters, limit=1, marker=None, columns_to_join=columns_to_join, sort_keys=sort_keys, sort_dirs=sort_dirs) else: # There was a global marker but everything in our cell is # _before_ that marker, so we return nothing. If we didn't # have this clause, we'd pass marker=None to the query below # and return a full unpaginated set for our cell. return [] main_query_result = db.instance_get_all_by_filters_sort( ctx, filters, limit=limit, marker=local_marker, columns_to_join=columns_to_join, sort_keys=sort_keys, sort_dirs=sort_dirs) return (InstanceWrapper(sort_ctx, inst) for inst in itertools.chain(local_marker_prefix, main_query_result)) # FIXME(danms): If we raise or timeout on a cell we need to handle # that here gracefully. The below routine will provide sentinels # to indicate that, which will crash the merge below, but we don't # handle this anywhere yet anyway. results = context.scatter_gather_all_cells(ctx, do_query) # If a limit was provided, and passed to the per-cell query routines. # That means we have NUM_CELLS * limit items across results. So, we # need to consume from that limit below and stop returning results. limit = limit or 0 # Generate results from heapq so we can return the inner # instance instead of the wrapper. This is basically free # as it works as our caller iterates the results. for i in heapq.merge(*results.values()): yield i._db_instance limit -= 1 if limit == 0: # We'll only hit this if limit was nonzero and we just generated # our last one return def get_instance_objects_sorted(ctx, filters, limit, marker, expected_attrs, sort_keys, sort_dirs): """Same as above, but return an InstanceList.""" columns_to_join
do_query
identifier_name
instance_list.py
# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import heapq import itertools from nova import context from nova import db from nova import exception from nova import objects from nova.objects import instance as instance_obj class InstanceSortContext(object): def __init__(self, sort_keys, sort_dirs): self._sort_keys = sort_keys self._sort_dirs = sort_dirs
def compare_instances(self, inst1, inst2): """Implements cmp(inst1, inst2) for the first key that is different Adjusts for the requested sort direction by inverting the result as needed. """ for skey, sdir in zip(self._sort_keys, self._sort_dirs): resultflag = 1 if sdir == 'desc' else -1 if inst1[skey] < inst2[skey]: return resultflag elif inst1[skey] > inst2[skey]: return resultflag * -1 return 0 class InstanceWrapper(object): """Wrap an instance object from the database so it is sortable. We use heapq.merge() below to do the merge sort of things from the cell databases. That routine assumes it can use regular python operators (> and <) on the contents. Since that won't work with instances from the database (and depends on the sort keys/dirs), we need this wrapper class to provide that. Implementing __lt__ is enough for heapq.merge() to do its work. """ def __init__(self, sort_ctx, db_instance): self._sort_ctx = sort_ctx self._db_instance = db_instance def __lt__(self, other): r = self._sort_ctx.compare_instances(self._db_instance, other._db_instance) # cmp(x, y) returns -1 if x < y return r == -1 def _get_marker_instance(ctx, marker): """Get the marker instance from its cell. This returns the marker instance from the cell in which it lives """ try: im = objects.InstanceMapping.get_by_instance_uuid(ctx, marker) except exception.InstanceMappingNotFound: raise exception.MarkerNotFound(marker=marker) elevated = ctx.elevated(read_deleted='yes') with context.target_cell(elevated, im.cell_mapping) as cctx: try: db_inst = db.instance_get_by_uuid(cctx, marker, columns_to_join=[]) except exception.InstanceNotFound: db_inst = None if not db_inst: raise exception.MarkerNotFound(marker=marker) return db_inst def get_instances_sorted(ctx, filters, limit, marker, columns_to_join, sort_keys, sort_dirs): """Get a cross-cell list of instances matching filters. This iterates cells in parallel generating a unified and sorted list of instances as efficiently as possible. It takes care to iterate the list as infrequently as possible. We wrap the results in InstanceWrapper objects so that they are sortable by heapq.merge(), which requires that the '<' operator just works. We encapsulate our sorting requirements into an InstanceSortContext which we pass to all of the wrappers so they behave the way we want. This function is a generator of instances from the database like what you would get from instance_get_all_by_filters_sort() in the DB API. NOTE: Since we do these in parallel, a nonzero limit will be passed to each database query, although the limit will be enforced in the output of this function. Meaning, we will still query $limit from each database, but only return $limit total results. """ if not sort_keys: # This is the default from the process_sort_params() method in # the DB API. It doesn't really matter, as this only comes into # play if the user didn't ask for a specific ordering, but we # use the same scheme for consistency. sort_keys = ['created_at', 'id'] sort_dirs = ['asc', 'asc'] sort_ctx = InstanceSortContext(sort_keys, sort_dirs) if marker: # A marker UUID was provided from the API. Call this the 'global' # marker as it determines where we start the process across # all cells. Look up the instance in whatever cell it is in and # record the values for the sort keys so we can find the marker # instance in each cell (called the 'local' marker). global_marker_instance = _get_marker_instance(ctx, marker) global_marker_values = [global_marker_instance[key] for key in sort_keys] def do_query(ctx): """Generate InstanceWrapper(Instance) objects from a cell. We do this inside the thread (created by scatter_gather_all_cells()) so that we return wrappers and avoid having to iterate the combined result list in the caller again. This is run against each cell by the scatter_gather routine. """ # The local marker is a uuid of an instance in a cell that is found # by the special method instance_get_by_sort_filters(). It should # be the next instance in order according to the sort provided, # but after the marker instance which may have been in another cell. local_marker = None # Since the regular DB query routines take a marker and assume that # the marked instance was the last entry of the previous page, we # may need to prefix it to our result query if we're not the cell # that had the actual marker instance. local_marker_prefix = [] if marker: # FIXME(danms): If we knew which cell we were in here, we could # avoid looking up the marker again. But, we don't currently. local_marker = db.instance_get_by_sort_filters( ctx, sort_keys, sort_dirs, global_marker_values) if local_marker: if local_marker != marker: # We did find a marker in our cell, but it wasn't # the global marker. Thus, we will use it as our # marker in the main query below, but we also need # to prefix that result with this marker instance # since the result below will not return it and it # has not been returned to the user yet. Note that # we do _not_ prefix the marker instance if our # marker was the global one since that has already # been sent to the user. local_marker_filters = copy.copy(filters) if 'uuid' not in local_marker_filters: # If a uuid filter was provided, it will # have included our marker already if this instance # is desired in the output set. If it wasn't, we # specifically query for it. If the other filters would # have excluded it, then we'll get an empty set here # and not include it in the output as expected. local_marker_filters['uuid'] = [local_marker] local_marker_prefix = db.instance_get_all_by_filters_sort( ctx, local_marker_filters, limit=1, marker=None, columns_to_join=columns_to_join, sort_keys=sort_keys, sort_dirs=sort_dirs) else: # There was a global marker but everything in our cell is # _before_ that marker, so we return nothing. If we didn't # have this clause, we'd pass marker=None to the query below # and return a full unpaginated set for our cell. return [] main_query_result = db.instance_get_all_by_filters_sort( ctx, filters, limit=limit, marker=local_marker, columns_to_join=columns_to_join, sort_keys=sort_keys, sort_dirs=sort_dirs) return (InstanceWrapper(sort_ctx, inst) for inst in itertools.chain(local_marker_prefix, main_query_result)) # FIXME(danms): If we raise or timeout on a cell we need to handle # that here gracefully. The below routine will provide sentinels # to indicate that, which will crash the merge below, but we don't # handle this anywhere yet anyway. results = context.scatter_gather_all_cells(ctx, do_query) # If a limit was provided, and passed to the per-cell query routines. # That means we have NUM_CELLS * limit items across results. So, we # need to consume from that limit below and stop returning results. limit = limit or 0 # Generate results from heapq so we can return the inner # instance instead of the wrapper. This is basically free # as it works as our caller iterates the results. for i in heapq.merge(*results.values()): yield i._db_instance limit -= 1 if limit == 0: # We'll only hit this if limit was nonzero and we just generated # our last one return def get_instance_objects_sorted(ctx, filters, limit, marker, expected_attrs, sort_keys, sort_dirs): """Same as above, but return an InstanceList.""" columns_to_join =
random_line_split
youtube.rs
/* [ { title: String, videoId: String, author: String, authorId: String, authorUrl: String, videoThumbnails: [ { quality: String, url: String, width: Int32, height: Int32 } ], description: String, descriptionHtml: String, viewCount: Int64, published: Int64, publishedText: String, lengthSeconds: Int32 paid: Bool, premium: Bool } ] */ #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "camelCase")] struct YTVideoInfo { title: String, video_id: String, video_thumbnails: Vec<YTThumbnailInfo>, description: String, length_seconds: i32, paid: bool, premium: bool, published: i64, } #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "camelCase")] struct YTThumbnailInfo { quality: Option<String>, url: String, width: i32, height: i32, } #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "camelCase")] struct YTChannelInfo { author: String, author_id: String, description: String, author_thumbnails: Vec<YTThumbnailInfo>, author_banners: Vec<YTThumbnailInfo>, } /// Important info about channel #[derive(Debug)] pub struct ChannelMetadata { pub title: String, pub thumbnail: String, pub description: String, } /// Important info about a video pub struct VideoInfo { pub id: String, pub url: String, pub title: String, pub description: String, pub thumbnail_url: String, pub published_at: chrono::DateTime<chrono::Utc>, } impl std::fmt::Debug for VideoInfo { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "VideoInfo{{id: {:?}, title: {:?}, url: {:?}, published_at: {:?}}}", self.id, self.title, self.url, self.published_at, ) } } fn request_data<T: serde::de::DeserializeOwned + std::fmt::Debug>(url: &str) -> Result<T> { fn subreq<T: serde::de::DeserializeOwned + std::fmt::Debug>(url: &str) -> Result<T> { debug!("Retrieving URL {}", &url); let resp = attohttpc::get(&url).send()?; let text = resp.text()?; trace!("Raw response: {}", &text); let data: T = serde_json::from_str(&text) .with_context(|| format!("Failed to parse response from {}", &url))?; trace!("Raw deserialisation: {:?}", &data); Ok(data) } let mut tries = 0; let ret: Result<T> = loop { let resp = subreq(url); if let Ok(data) = resp { break Ok(data); } debug!("Retrying request to {} because {:?}", &url, &resp); if tries > 3 { break resp; } tries += 1; }; ret } /// Object to query data about given channel #[derive(Debug)] pub struct YoutubeQuery<'a> { chan_id: &'a YoutubeID, } impl<'a> YoutubeQuery<'a> { pub fn new(chan_id: &YoutubeID) -> YoutubeQuery { YoutubeQuery { chan_id } } pub fn get_metadata(&self) -> Result<ChannelMetadata> { let url = format!( "{prefix}/api/v1/channels/{chanid}", prefix = api_prefix(), chanid = self.chan_id.id ); let d: YTChannelInfo = request_data(&url)?; Ok(ChannelMetadata { title: d.author.clone(), thumbnail: d.author_thumbnails[0].url.clone(), description: d.description.clone(), }) } pub fn videos<'i>(&'i self) -> impl Iterator<Item = Result<VideoInfo>> + 'i { // GET /api/v1/channels/:ucid/videos?page=1 fn get_page(chanid: &str, page: i32) -> Result<Vec<VideoInfo>> { let url = format!( "{prefix}/api/v1/channels/videos/{chanid}?page={page}", prefix = api_prefix(), chanid = chanid, page = page, ); let data: Vec<YTVideoInfo> = request_data(&url)?; let ret: Vec<VideoInfo> = data .iter() .map(|d| VideoInfo { id: d.video_id.clone(), url: format!("http://youtube.com/watch?v={id}", id = d.video_id), title: d.title.clone(), description: d.description.clone(), thumbnail_url: d.video_thumbnails.first().unwrap().url.clone(), published_at: chrono::Utc.timestamp(d.published, 0), }) .collect(); Ok(ret) } let mut page_num = 1; use std::collections::VecDeque; let mut completed = false; let mut current_items: VecDeque<VideoInfo> = VecDeque::new(); let it = std::iter::from_fn(move || -> Option<Result<VideoInfo>> { if completed { return None; } if let Some(cur) = current_items.pop_front() { // Iterate through previously stored items Some(Ok(cur)) } else { // If nothing is stored, get next page of videos let data: Result<Vec<VideoInfo>> = get_page(&self.chan_id.id, page_num); page_num += 1; // Increment for future let nextup: Option<Result<VideoInfo>> = match data { // Something went wrong, return an error item Err(e) => { // Error state, prevent future iteration completed = true; // Return error Some(Err(e)) } Ok(new_items) => { if new_items.len() == 0 { // No more items, stop iterator None } else { current_items.extend(new_items); Some(Ok(current_items.pop_front().unwrap())) } } }; nextup } }); it } } /// Find channel ID either from a username or ID use crate::common::ChannelID; pub fn find_channel_id(name: &str, service: &Service) -> Result<ChannelID> { match service { Service::Youtube => { debug!("Looking up by username"); let url = format!( "{prefix}/api/v1/channels/{name}", prefix = api_prefix(), name = name ); debug!("Retrieving URL {}", &url); let resp = attohttpc::get(&url).send()?; let text = resp.text().unwrap(); trace!("Raw response: {}", &text); let data: YTChannelInfo = serde_json::from_str(&text) .with_context(|| format!("Failed to parse response from {}", &url))?; trace!("Raw deserialisation: {:?}", &data); Ok(ChannelID::Youtube(YoutubeID { id: data.author_id })) } Service::Vimeo => Err(anyhow::anyhow!("Not yet implemented!")), // FIXME: This method belongs outside of youtube.rs } } #[cfg(test)] mod test { use super::*; #[test] fn test_basic_find() -> Result<()> { let _m1 = mockito::mock("GET", "/api/v1/channels/thegreatsd") .with_body_from_file("testdata/channel_thegreatsd.json") .create(); let _m2 = mockito::mock("GET", "/api/v1/channels/UCUBfKCp83QT19JCUekEdxOQ") .with_body_from_file("testdata/channel_thegreatsd.json") // Same content .create(); let c = find_channel_id("thegreatsd", &crate::common::Service::Youtube)?; assert_eq!(c.id_str(), "UCUBfKCp83QT19JCUekEdxOQ"); assert_eq!(c.service(), crate::common::Service::Youtube); // Check same `ChannelID` is found by ID as by username let by_id = find_channel_id("UCUBfKCp83QT19JCUekEdxOQ", &crate::common::Service::Youtube)?; assert_eq!(by_id, c); Ok(()) } #[test] fn test_video_list() -> Result<()> { let mock_p1 = mockito::mock( "GET", "/api/v1/channels/videos/UCOYYX1Ucvx87A7CSy5M99yw?page=1", ) .with_body_from_file("testdata/channel_climb_page1.json") .create(); let mock_p2 = mockito::mock( "GET", "/api/v1/channels/videos/UCO
{ #[cfg(test)] let prefix: &str = &mockito::server_url(); #[cfg(not(test))] let prefix: &str = "https://invidio.us"; prefix.into() }
identifier_body
youtube.rs
lengthSeconds: Int32 paid: Bool, premium: Bool } ] */ #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "camelCase")] struct YTVideoInfo { title: String, video_id: String, video_thumbnails: Vec<YTThumbnailInfo>, description: String, length_seconds: i32, paid: bool, premium: bool, published: i64, } #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "camelCase")] struct YTThumbnailInfo { quality: Option<String>, url: String, width: i32, height: i32, } #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "camelCase")] struct YTChannelInfo { author: String, author_id: String, description: String, author_thumbnails: Vec<YTThumbnailInfo>, author_banners: Vec<YTThumbnailInfo>, }
/// Important info about channel #[derive(Debug)] pub struct ChannelMetadata { pub title: String, pub thumbnail: String, pub description: String, } /// Important info about a video pub struct VideoInfo { pub id: String, pub url: String, pub title: String, pub description: String, pub thumbnail_url: String, pub published_at: chrono::DateTime<chrono::Utc>, } impl std::fmt::Debug for VideoInfo { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "VideoInfo{{id: {:?}, title: {:?}, url: {:?}, published_at: {:?}}}", self.id, self.title, self.url, self.published_at, ) } } fn request_data<T: serde::de::DeserializeOwned + std::fmt::Debug>(url: &str) -> Result<T> { fn subreq<T: serde::de::DeserializeOwned + std::fmt::Debug>(url: &str) -> Result<T> { debug!("Retrieving URL {}", &url); let resp = attohttpc::get(&url).send()?; let text = resp.text()?; trace!("Raw response: {}", &text); let data: T = serde_json::from_str(&text) .with_context(|| format!("Failed to parse response from {}", &url))?; trace!("Raw deserialisation: {:?}", &data); Ok(data) } let mut tries = 0; let ret: Result<T> = loop { let resp = subreq(url); if let Ok(data) = resp { break Ok(data); } debug!("Retrying request to {} because {:?}", &url, &resp); if tries > 3 { break resp; } tries += 1; }; ret } /// Object to query data about given channel #[derive(Debug)] pub struct YoutubeQuery<'a> { chan_id: &'a YoutubeID, } impl<'a> YoutubeQuery<'a> { pub fn new(chan_id: &YoutubeID) -> YoutubeQuery { YoutubeQuery { chan_id } } pub fn get_metadata(&self) -> Result<ChannelMetadata> { let url = format!( "{prefix}/api/v1/channels/{chanid}", prefix = api_prefix(), chanid = self.chan_id.id ); let d: YTChannelInfo = request_data(&url)?; Ok(ChannelMetadata { title: d.author.clone(), thumbnail: d.author_thumbnails[0].url.clone(), description: d.description.clone(), }) } pub fn videos<'i>(&'i self) -> impl Iterator<Item = Result<VideoInfo>> + 'i { // GET /api/v1/channels/:ucid/videos?page=1 fn get_page(chanid: &str, page: i32) -> Result<Vec<VideoInfo>> { let url = format!( "{prefix}/api/v1/channels/videos/{chanid}?page={page}", prefix = api_prefix(), chanid = chanid, page = page, ); let data: Vec<YTVideoInfo> = request_data(&url)?; let ret: Vec<VideoInfo> = data .iter() .map(|d| VideoInfo { id: d.video_id.clone(), url: format!("http://youtube.com/watch?v={id}", id = d.video_id), title: d.title.clone(), description: d.description.clone(), thumbnail_url: d.video_thumbnails.first().unwrap().url.clone(), published_at: chrono::Utc.timestamp(d.published, 0), }) .collect(); Ok(ret) } let mut page_num = 1; use std::collections::VecDeque; let mut completed = false; let mut current_items: VecDeque<VideoInfo> = VecDeque::new(); let it = std::iter::from_fn(move || -> Option<Result<VideoInfo>> { if completed { return None; } if let Some(cur) = current_items.pop_front() { // Iterate through previously stored items Some(Ok(cur)) } else { // If nothing is stored, get next page of videos let data: Result<Vec<VideoInfo>> = get_page(&self.chan_id.id, page_num); page_num += 1; // Increment for future let nextup: Option<Result<VideoInfo>> = match data { // Something went wrong, return an error item Err(e) => { // Error state, prevent future iteration completed = true; // Return error Some(Err(e)) } Ok(new_items) => { if new_items.len() == 0 { // No more items, stop iterator None } else { current_items.extend(new_items); Some(Ok(current_items.pop_front().unwrap())) } } }; nextup } }); it } } /// Find channel ID either from a username or ID use crate::common::ChannelID; pub fn find_channel_id(name: &str, service: &Service) -> Result<ChannelID> { match service { Service::Youtube => { debug!("Looking up by username"); let url = format!( "{prefix}/api/v1/channels/{name}", prefix = api_prefix(), name = name ); debug!("Retrieving URL {}", &url); let resp = attohttpc::get(&url).send()?; let text = resp.text().unwrap(); trace!("Raw response: {}", &text); let data: YTChannelInfo = serde_json::from_str(&text) .with_context(|| format!("Failed to parse response from {}", &url))?; trace!("Raw deserialisation: {:?}", &data); Ok(ChannelID::Youtube(YoutubeID { id: data.author_id })) } Service::Vimeo => Err(anyhow::anyhow!("Not yet implemented!")), // FIXME: This method belongs outside of youtube.rs } } #[cfg(test)] mod test { use super::*; #[test] fn test_basic_find() -> Result<()> { let _m1 = mockito::mock("GET", "/api/v1/channels/thegreatsd") .with_body_from_file("testdata/channel_thegreatsd.json") .create(); let _m2 = mockito::mock("GET", "/api/v1/channels/UCUBfKCp83QT19JCUekEdxOQ") .with_body_from_file("testdata/channel_thegreatsd.json") // Same content .create(); let c = find_channel_id("thegreatsd", &crate::common::Service::Youtube)?; assert_eq!(c.id_str(), "UCUBfKCp83QT19JCUekEdxOQ"); assert_eq!(c.service(), crate::common::Service::Youtube); // Check same `ChannelID` is found by ID as by username let by_id = find_channel_id("UCUBfKCp83QT19JCUekEdxOQ", &crate::common::Service::Youtube)?; assert_eq!(by_id, c); Ok(()) } #[test] fn test_video_list() -> Result<()> { let mock_p1 = mockito::mock( "GET", "/api/v1/channels/videos/UCOYYX1Ucvx87A7CSy5M99yw?page=1", ) .with_body_from_file("testdata/channel_climb_page1.json") .create(); let mock_p2 = mockito::mock( "GET", "/api/v1/channels/videos/UCOYYX1Ucvx87A7CSy5M99yw?page=2", ) .with_body_from_file("testdata/channel_climb_page2.json") .create(); let cid = crate::common::YoutubeID { id: "UCOYYX1Ucvx87A7CSy5M99yw".into(), }; let yt = YoutubeQuery::new(&cid); let vids = yt.videos(); let result: Vec<super::VideoInfo> = vids .into_iter() .skip(58) // 60 videos per page, want to breach boundry .take(3) .
random_line_split
youtube.rs
lengthSeconds: Int32 paid: Bool, premium: Bool } ] */ #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "camelCase")] struct YTVideoInfo { title: String, video_id: String, video_thumbnails: Vec<YTThumbnailInfo>, description: String, length_seconds: i32, paid: bool, premium: bool, published: i64, } #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "camelCase")] struct YTThumbnailInfo { quality: Option<String>, url: String, width: i32, height: i32, } #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "camelCase")] struct YTChannelInfo { author: String, author_id: String, description: String, author_thumbnails: Vec<YTThumbnailInfo>, author_banners: Vec<YTThumbnailInfo>, } /// Important info about channel #[derive(Debug)] pub struct ChannelMetadata { pub title: String, pub thumbnail: String, pub description: String, } /// Important info about a video pub struct VideoInfo { pub id: String, pub url: String, pub title: String, pub description: String, pub thumbnail_url: String, pub published_at: chrono::DateTime<chrono::Utc>, } impl std::fmt::Debug for VideoInfo { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "VideoInfo{{id: {:?}, title: {:?}, url: {:?}, published_at: {:?}}}", self.id, self.title, self.url, self.published_at, ) } } fn request_data<T: serde::de::DeserializeOwned + std::fmt::Debug>(url: &str) -> Result<T> { fn subreq<T: serde::de::DeserializeOwned + std::fmt::Debug>(url: &str) -> Result<T> { debug!("Retrieving URL {}", &url); let resp = attohttpc::get(&url).send()?; let text = resp.text()?; trace!("Raw response: {}", &text); let data: T = serde_json::from_str(&text) .with_context(|| format!("Failed to parse response from {}", &url))?; trace!("Raw deserialisation: {:?}", &data); Ok(data) } let mut tries = 0; let ret: Result<T> = loop { let resp = subreq(url); if let Ok(data) = resp { break Ok(data); } debug!("Retrying request to {} because {:?}", &url, &resp); if tries > 3 { break resp; } tries += 1; }; ret } /// Object to query data about given channel #[derive(Debug)] pub struct YoutubeQuery<'a> { chan_id: &'a YoutubeID, } impl<'a> YoutubeQuery<'a> { pub fn new(chan_id: &YoutubeID) -> YoutubeQuery { YoutubeQuery { chan_id } } pub fn get_metadata(&self) -> Result<ChannelMetadata> { let url = format!( "{prefix}/api/v1/channels/{chanid}", prefix = api_prefix(), chanid = self.chan_id.id ); let d: YTChannelInfo = request_data(&url)?; Ok(ChannelMetadata { title: d.author.clone(), thumbnail: d.author_thumbnails[0].url.clone(), description: d.description.clone(), }) } pub fn videos<'i>(&'i self) -> impl Iterator<Item = Result<VideoInfo>> + 'i { // GET /api/v1/channels/:ucid/videos?page=1 fn get_page(chanid: &str, page: i32) -> Result<Vec<VideoInfo>> { let url = format!( "{prefix}/api/v1/channels/videos/{chanid}?page={page}", prefix = api_prefix(), chanid = chanid, page = page, ); let data: Vec<YTVideoInfo> = request_data(&url)?; let ret: Vec<VideoInfo> = data .iter() .map(|d| VideoInfo { id: d.video_id.clone(), url: format!("http://youtube.com/watch?v={id}", id = d.video_id), title: d.title.clone(), description: d.description.clone(), thumbnail_url: d.video_thumbnails.first().unwrap().url.clone(), published_at: chrono::Utc.timestamp(d.published, 0), }) .collect(); Ok(ret) } let mut page_num = 1; use std::collections::VecDeque; let mut completed = false; let mut current_items: VecDeque<VideoInfo> = VecDeque::new(); let it = std::iter::from_fn(move || -> Option<Result<VideoInfo>> { if completed { return None; } if let Some(cur) = current_items.pop_front() { // Iterate through previously stored items Some(Ok(cur)) } else { // If nothing is stored, get next page of videos let data: Result<Vec<VideoInfo>> = get_page(&self.chan_id.id, page_num); page_num += 1; // Increment for future let nextup: Option<Result<VideoInfo>> = match data { // Something went wrong, return an error item Err(e) =>
Ok(new_items) => { if new_items.len() == 0 { // No more items, stop iterator None } else { current_items.extend(new_items); Some(Ok(current_items.pop_front().unwrap())) } } }; nextup } }); it } } /// Find channel ID either from a username or ID use crate::common::ChannelID; pub fn find_channel_id(name: &str, service: &Service) -> Result<ChannelID> { match service { Service::Youtube => { debug!("Looking up by username"); let url = format!( "{prefix}/api/v1/channels/{name}", prefix = api_prefix(), name = name ); debug!("Retrieving URL {}", &url); let resp = attohttpc::get(&url).send()?; let text = resp.text().unwrap(); trace!("Raw response: {}", &text); let data: YTChannelInfo = serde_json::from_str(&text) .with_context(|| format!("Failed to parse response from {}", &url))?; trace!("Raw deserialisation: {:?}", &data); Ok(ChannelID::Youtube(YoutubeID { id: data.author_id })) } Service::Vimeo => Err(anyhow::anyhow!("Not yet implemented!")), // FIXME: This method belongs outside of youtube.rs } } #[cfg(test)] mod test { use super::*; #[test] fn test_basic_find() -> Result<()> { let _m1 = mockito::mock("GET", "/api/v1/channels/thegreatsd") .with_body_from_file("testdata/channel_thegreatsd.json") .create(); let _m2 = mockito::mock("GET", "/api/v1/channels/UCUBfKCp83QT19JCUekEdxOQ") .with_body_from_file("testdata/channel_thegreatsd.json") // Same content .create(); let c = find_channel_id("thegreatsd", &crate::common::Service::Youtube)?; assert_eq!(c.id_str(), "UCUBfKCp83QT19JCUekEdxOQ"); assert_eq!(c.service(), crate::common::Service::Youtube); // Check same `ChannelID` is found by ID as by username let by_id = find_channel_id("UCUBfKCp83QT19JCUekEdxOQ", &crate::common::Service::Youtube)?; assert_eq!(by_id, c); Ok(()) } #[test] fn test_video_list() -> Result<()> { let mock_p1 = mockito::mock( "GET", "/api/v1/channels/videos/UCOYYX1Ucvx87A7CSy5M99yw?page=1", ) .with_body_from_file("testdata/channel_climb_page1.json") .create(); let mock_p2 = mockito::mock( "GET", "/api/v1/channels/videos/UCOYYX1Ucvx87A7CSy5M99yw?page=2", ) .with_body_from_file("testdata/channel_climb_page2.json") .create(); let cid = crate::common::YoutubeID { id: "UCOYYX1Ucvx87A7CSy5M99yw".into(), }; let yt = YoutubeQuery::new(&cid); let vids = yt.videos(); let result: Vec<super::VideoInfo> = vids .into_iter() .skip(58) // 60 videos per page, want to breach boundry .take(3)
{ // Error state, prevent future iteration completed = true; // Return error Some(Err(e)) }
conditional_block
youtube.rs
lengthSeconds: Int32 paid: Bool, premium: Bool } ] */ #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "camelCase")] struct YTVideoInfo { title: String, video_id: String, video_thumbnails: Vec<YTThumbnailInfo>, description: String, length_seconds: i32, paid: bool, premium: bool, published: i64, } #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "camelCase")] struct YTThumbnailInfo { quality: Option<String>, url: String, width: i32, height: i32, } #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "camelCase")] struct YTChannelInfo { author: String, author_id: String, description: String, author_thumbnails: Vec<YTThumbnailInfo>, author_banners: Vec<YTThumbnailInfo>, } /// Important info about channel #[derive(Debug)] pub struct ChannelMetadata { pub title: String, pub thumbnail: String, pub description: String, } /// Important info about a video pub struct VideoInfo { pub id: String, pub url: String, pub title: String, pub description: String, pub thumbnail_url: String, pub published_at: chrono::DateTime<chrono::Utc>, } impl std::fmt::Debug for VideoInfo { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "VideoInfo{{id: {:?}, title: {:?}, url: {:?}, published_at: {:?}}}", self.id, self.title, self.url, self.published_at, ) } } fn request_data<T: serde::de::DeserializeOwned + std::fmt::Debug>(url: &str) -> Result<T> { fn subreq<T: serde::de::DeserializeOwned + std::fmt::Debug>(url: &str) -> Result<T> { debug!("Retrieving URL {}", &url); let resp = attohttpc::get(&url).send()?; let text = resp.text()?; trace!("Raw response: {}", &text); let data: T = serde_json::from_str(&text) .with_context(|| format!("Failed to parse response from {}", &url))?; trace!("Raw deserialisation: {:?}", &data); Ok(data) } let mut tries = 0; let ret: Result<T> = loop { let resp = subreq(url); if let Ok(data) = resp { break Ok(data); } debug!("Retrying request to {} because {:?}", &url, &resp); if tries > 3 { break resp; } tries += 1; }; ret } /// Object to query data about given channel #[derive(Debug)] pub struct YoutubeQuery<'a> { chan_id: &'a YoutubeID, } impl<'a> YoutubeQuery<'a> { pub fn new(chan_id: &YoutubeID) -> YoutubeQuery { YoutubeQuery { chan_id } } pub fn
(&self) -> Result<ChannelMetadata> { let url = format!( "{prefix}/api/v1/channels/{chanid}", prefix = api_prefix(), chanid = self.chan_id.id ); let d: YTChannelInfo = request_data(&url)?; Ok(ChannelMetadata { title: d.author.clone(), thumbnail: d.author_thumbnails[0].url.clone(), description: d.description.clone(), }) } pub fn videos<'i>(&'i self) -> impl Iterator<Item = Result<VideoInfo>> + 'i { // GET /api/v1/channels/:ucid/videos?page=1 fn get_page(chanid: &str, page: i32) -> Result<Vec<VideoInfo>> { let url = format!( "{prefix}/api/v1/channels/videos/{chanid}?page={page}", prefix = api_prefix(), chanid = chanid, page = page, ); let data: Vec<YTVideoInfo> = request_data(&url)?; let ret: Vec<VideoInfo> = data .iter() .map(|d| VideoInfo { id: d.video_id.clone(), url: format!("http://youtube.com/watch?v={id}", id = d.video_id), title: d.title.clone(), description: d.description.clone(), thumbnail_url: d.video_thumbnails.first().unwrap().url.clone(), published_at: chrono::Utc.timestamp(d.published, 0), }) .collect(); Ok(ret) } let mut page_num = 1; use std::collections::VecDeque; let mut completed = false; let mut current_items: VecDeque<VideoInfo> = VecDeque::new(); let it = std::iter::from_fn(move || -> Option<Result<VideoInfo>> { if completed { return None; } if let Some(cur) = current_items.pop_front() { // Iterate through previously stored items Some(Ok(cur)) } else { // If nothing is stored, get next page of videos let data: Result<Vec<VideoInfo>> = get_page(&self.chan_id.id, page_num); page_num += 1; // Increment for future let nextup: Option<Result<VideoInfo>> = match data { // Something went wrong, return an error item Err(e) => { // Error state, prevent future iteration completed = true; // Return error Some(Err(e)) } Ok(new_items) => { if new_items.len() == 0 { // No more items, stop iterator None } else { current_items.extend(new_items); Some(Ok(current_items.pop_front().unwrap())) } } }; nextup } }); it } } /// Find channel ID either from a username or ID use crate::common::ChannelID; pub fn find_channel_id(name: &str, service: &Service) -> Result<ChannelID> { match service { Service::Youtube => { debug!("Looking up by username"); let url = format!( "{prefix}/api/v1/channels/{name}", prefix = api_prefix(), name = name ); debug!("Retrieving URL {}", &url); let resp = attohttpc::get(&url).send()?; let text = resp.text().unwrap(); trace!("Raw response: {}", &text); let data: YTChannelInfo = serde_json::from_str(&text) .with_context(|| format!("Failed to parse response from {}", &url))?; trace!("Raw deserialisation: {:?}", &data); Ok(ChannelID::Youtube(YoutubeID { id: data.author_id })) } Service::Vimeo => Err(anyhow::anyhow!("Not yet implemented!")), // FIXME: This method belongs outside of youtube.rs } } #[cfg(test)] mod test { use super::*; #[test] fn test_basic_find() -> Result<()> { let _m1 = mockito::mock("GET", "/api/v1/channels/thegreatsd") .with_body_from_file("testdata/channel_thegreatsd.json") .create(); let _m2 = mockito::mock("GET", "/api/v1/channels/UCUBfKCp83QT19JCUekEdxOQ") .with_body_from_file("testdata/channel_thegreatsd.json") // Same content .create(); let c = find_channel_id("thegreatsd", &crate::common::Service::Youtube)?; assert_eq!(c.id_str(), "UCUBfKCp83QT19JCUekEdxOQ"); assert_eq!(c.service(), crate::common::Service::Youtube); // Check same `ChannelID` is found by ID as by username let by_id = find_channel_id("UCUBfKCp83QT19JCUekEdxOQ", &crate::common::Service::Youtube)?; assert_eq!(by_id, c); Ok(()) } #[test] fn test_video_list() -> Result<()> { let mock_p1 = mockito::mock( "GET", "/api/v1/channels/videos/UCOYYX1Ucvx87A7CSy5M99yw?page=1", ) .with_body_from_file("testdata/channel_climb_page1.json") .create(); let mock_p2 = mockito::mock( "GET", "/api/v1/channels/videos/UCOYYX1Ucvx87A7CSy5M99yw?page=2", ) .with_body_from_file("testdata/channel_climb_page2.json") .create(); let cid = crate::common::YoutubeID { id: "UCOYYX1Ucvx87A7CSy5M99yw".into(), }; let yt = YoutubeQuery::new(&cid); let vids = yt.videos(); let result: Vec<super::VideoInfo> = vids .into_iter() .skip(58) // 60 videos per page, want to breach boundry .take(3)
get_metadata
identifier_name
x25519.rs
} /// A short-lived Diffie-Hellman secret key that can only be used to compute a single /// [`SharedSecret`]. /// /// This type is identical to the [`StaticSecret`] type, except that the /// [`EphemeralSecret::diffie_hellman`] method consumes and then wipes the secret key, and there /// are no serialization methods defined. This means that [`EphemeralSecret`]s can only be /// generated from fresh randomness by [`EphemeralSecret::new`] and the compiler statically checks /// that the resulting secret is used at most once. #[derive(Zeroize)] #[zeroize(drop)] pub struct EphemeralSecret(pub(crate) Scalar); impl EphemeralSecret { /// Perform a Diffie-Hellman key agreement between `self` and /// `their_public` key to produce a [`SharedSecret`]. pub fn diffie_hellman(self, their_public: &PublicKey) -> SharedSecret { SharedSecret(self.0 * their_public.0) } /// Generate an x25519 [`EphemeralSecret`] key. pub fn new<T: RngCore + CryptoRng>(mut csprng: T) -> Self { let mut bytes = [0u8; 32]; csprng.fill_bytes(&mut bytes); EphemeralSecret(clamp_scalar(bytes)) } } impl<'a> From<&'a EphemeralSecret> for PublicKey { /// Given an x25519 [`EphemeralSecret`] key, compute its corresponding [`PublicKey`]. fn from(secret: &'a EphemeralSecret) -> PublicKey { PublicKey((&ED25519_BASEPOINT_TABLE * &secret.0).to_montgomery()) } } /// A Diffie-Hellman secret key that can be used to compute multiple [`SharedSecret`]s. /// /// This type is identical to the [`EphemeralSecret`] type, except that the /// [`StaticSecret::diffie_hellman`] method does not consume the secret key, and the type provides /// serialization methods to save and load key material. This means that the secret may be used /// multiple times (but does not *have to be*). /// /// Some protocols, such as Noise, already handle the static/ephemeral distinction, so the /// additional guarantees provided by [`EphemeralSecret`] are not helpful or would cause duplicate /// code paths. In this case, it may be useful to /// ```rust,ignore /// use x25519_dalek::StaticSecret as SecretKey; /// ``` /// since the only difference between the two is that [`StaticSecret`] does not enforce at /// compile-time that the key is only used once. #[cfg_attr(feature = "serde", serde(crate = "our_serde"))] #[cfg_attr( feature = "serde", derive(our_serde::Serialize, our_serde::Deserialize) )] #[derive(Clone, Zeroize)] #[zeroize(drop)] pub struct StaticSecret( #[cfg_attr(feature = "serde", serde(with = "AllowUnreducedScalarBytes"))] pub(crate) Scalar, ); impl StaticSecret { /// Perform a Diffie-Hellman key agreement between `self` and /// `their_public` key to produce a `SharedSecret`. pub fn diffie_hellman(&self, their_public: &PublicKey) -> SharedSecret { SharedSecret(&self.0 * their_public.0) } /// Generate an x25519 key. pub fn new<T: RngCore + CryptoRng>(mut csprng: T) -> Self { let mut bytes = [0u8; 32]; csprng.fill_bytes(&mut bytes); StaticSecret(clamp_scalar(bytes)) } /// Extract this key's bytes for serialization. pub fn to_bytes(&self) -> [u8; 32] { self.0.to_bytes() } } impl From<[u8; 32]> for StaticSecret { /// Load a secret key from a byte array. fn from(bytes: [u8; 32]) -> StaticSecret { StaticSecret(clamp_scalar(bytes)) } } impl<'a> From<&'a StaticSecret> for PublicKey { /// Given an x25519 [`StaticSecret`] key, compute its corresponding [`PublicKey`]. fn from(secret: &'a StaticSecret) -> PublicKey { PublicKey((&ED25519_BASEPOINT_TABLE * &secret.0).to_montgomery()) } } /// The result of a Diffie-Hellman key exchange. /// /// Each party computes this using their [`EphemeralSecret`] or [`StaticSecret`] and their /// counterparty's [`PublicKey`]. #[derive(Zeroize)] #[zeroize(drop)] pub struct SharedSecret(pub(crate) MontgomeryPoint); impl SharedSecret { /// Convert this shared secret to a byte array. #[inline] pub fn to_bytes(&self) -> [u8; 32] { self.0.to_bytes() } /// View this shared secret key as a byte array. #[inline] pub fn as_bytes(&self) -> &[u8; 32] { self.0.as_bytes() } } /// "Decode" a scalar from a 32-byte array. /// /// By "decode" here, what is really meant is applying key clamping by twiddling /// some bits. /// /// # Returns /// /// A `Scalar`. fn clamp_scalar(mut scalar: [u8; 32]) -> Scalar { scalar[0] &= 248; scalar[31] &= 127; scalar[31] |= 64; Scalar::from_bits(scalar) } /// The bare, byte-oriented x25519 function, exactly as specified in RFC7748. /// /// This can be used with [`X25519_BASEPOINT_BYTES`] for people who /// cannot use the better, safer, and faster DH API. pub fn x25519(k: [u8; 32], u: [u8; 32]) -> [u8; 32] { (clamp_scalar(k) * MontgomeryPoint(u)).to_bytes() } /// The X25519 basepoint, for use with the bare, byte-oriented x25519 /// function. This is provided for people who cannot use the typed /// DH API for some reason. pub const X25519_BASEPOINT_BYTES: [u8; 32] = [ 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; /// Derived serialization methods will not work on a StaticSecret because x25519 requires /// non-canonical scalars which are rejected by curve25519-dalek. Thus we provide a way to convert /// the bytes directly to a scalar using Serde's remote derive functionality. #[cfg_attr(feature = "serde", serde(crate = "our_serde"))] #[cfg_attr( feature = "serde", derive(our_serde::Serialize, our_serde::Deserialize) )] #[cfg_attr(feature = "serde", serde(remote = "Scalar"))] struct AllowUnreducedScalarBytes( #[cfg_attr(feature = "serde", serde(getter = "Scalar::to_bytes"))] [u8; 32], ); impl From<AllowUnreducedScalarBytes> for Scalar { fn from(bytes: AllowUnreducedScalarBytes) -> Scalar { clamp_scalar(bytes.0) } } #[cfg(test)] mod test { use super::*; use rand_core::OsRng; #[test] fn byte_basepoint_matches_edwards_scalar_mul() { let mut scalar_bytes = [0x37; 32]; for i in 0..32 { scalar_bytes[i] += 2; let result = x25519(scalar_bytes, X25519_BASEPOINT_BYTES); let expected = (&ED25519_BASEPOINT_TABLE * &clamp_scalar(scalar_bytes)) .to_montgomery() .to_bytes(); assert_eq!(result, expected); } } #[test] #[cfg(feature = "serde")] fn serde_bincode_public_key_roundtrip() { use bincode; let public_key = PublicKey::from(X25519_BASEPOINT_BYTES); let encoded = bincode::serialize(&public_key).unwrap(); let decoded: PublicKey = bincode::deserialize(&encoded).unwrap(); assert_eq!(encoded.len(), 32); assert_eq!(decoded.as_bytes(), public_key.as_bytes()); } #[test] #[cfg(feature = "serde")] fn serde_bincode_public_key_matches_from_bytes() { use bincode; let expected = PublicKey::from(X25519_BASEPOINT_BYTES); let decoded: PublicKey = bincode::deserialize(&X25519_BASEPOINT_BYTES).unwrap();
{ self.0.as_bytes() }
identifier_body
x25519.rs
(bytes: [u8; 32]) -> PublicKey { PublicKey(MontgomeryPoint(bytes)) } } impl PublicKey { /// Convert this public key to a byte array. #[inline] pub fn to_bytes(&self) -> [u8; 32] { self.0.to_bytes() } /// View this public key as a byte array. #[inline] pub fn as_bytes(&self) -> &[u8; 32] { self.0.as_bytes() } } /// A short-lived Diffie-Hellman secret key that can only be used to compute a single /// [`SharedSecret`]. /// /// This type is identical to the [`StaticSecret`] type, except that the /// [`EphemeralSecret::diffie_hellman`] method consumes and then wipes the secret key, and there /// are no serialization methods defined. This means that [`EphemeralSecret`]s can only be /// generated from fresh randomness by [`EphemeralSecret::new`] and the compiler statically checks /// that the resulting secret is used at most once. #[derive(Zeroize)] #[zeroize(drop)] pub struct EphemeralSecret(pub(crate) Scalar); impl EphemeralSecret { /// Perform a Diffie-Hellman key agreement between `self` and /// `their_public` key to produce a [`SharedSecret`]. pub fn diffie_hellman(self, their_public: &PublicKey) -> SharedSecret { SharedSecret(self.0 * their_public.0) } /// Generate an x25519 [`EphemeralSecret`] key. pub fn new<T: RngCore + CryptoRng>(mut csprng: T) -> Self { let mut bytes = [0u8; 32]; csprng.fill_bytes(&mut bytes); EphemeralSecret(clamp_scalar(bytes)) } } impl<'a> From<&'a EphemeralSecret> for PublicKey { /// Given an x25519 [`EphemeralSecret`] key, compute its corresponding [`PublicKey`]. fn from(secret: &'a EphemeralSecret) -> PublicKey { PublicKey((&ED25519_BASEPOINT_TABLE * &secret.0).to_montgomery()) } } /// A Diffie-Hellman secret key that can be used to compute multiple [`SharedSecret`]s. /// /// This type is identical to the [`EphemeralSecret`] type, except that the /// [`StaticSecret::diffie_hellman`] method does not consume the secret key, and the type provides /// serialization methods to save and load key material. This means that the secret may be used /// multiple times (but does not *have to be*). /// /// Some protocols, such as Noise, already handle the static/ephemeral distinction, so the /// additional guarantees provided by [`EphemeralSecret`] are not helpful or would cause duplicate /// code paths. In this case, it may be useful to /// ```rust,ignore /// use x25519_dalek::StaticSecret as SecretKey; /// ``` /// since the only difference between the two is that [`StaticSecret`] does not enforce at /// compile-time that the key is only used once. #[cfg_attr(feature = "serde", serde(crate = "our_serde"))] #[cfg_attr( feature = "serde", derive(our_serde::Serialize, our_serde::Deserialize) )] #[derive(Clone, Zeroize)] #[zeroize(drop)] pub struct StaticSecret( #[cfg_attr(feature = "serde", serde(with = "AllowUnreducedScalarBytes"))] pub(crate) Scalar, ); impl StaticSecret { /// Perform a Diffie-Hellman key agreement between `self` and /// `their_public` key to produce a `SharedSecret`. pub fn diffie_hellman(&self, their_public: &PublicKey) -> SharedSecret { SharedSecret(&self.0 * their_public.0) } /// Generate an x25519 key. pub fn new<T: RngCore + CryptoRng>(mut csprng: T) -> Self { let mut bytes = [0u8; 32]; csprng.fill_bytes(&mut bytes); StaticSecret(clamp_scalar(bytes)) } /// Extract this key's bytes for serialization. pub fn to_bytes(&self) -> [u8; 32] { self.0.to_bytes() } } impl From<[u8; 32]> for StaticSecret { /// Load a secret key from a byte array. fn from(bytes: [u8; 32]) -> StaticSecret { StaticSecret(clamp_scalar(bytes)) } } impl<'a> From<&'a StaticSecret> for PublicKey { /// Given an x25519 [`StaticSecret`] key, compute its corresponding [`PublicKey`]. fn from(secret: &'a StaticSecret) -> PublicKey { PublicKey((&ED25519_BASEPOINT_TABLE * &secret.0).to_montgomery()) } } /// The result of a Diffie-Hellman key exchange. /// /// Each party computes this using their [`EphemeralSecret`] or [`StaticSecret`] and their /// counterparty's [`PublicKey`]. #[derive(Zeroize)] #[zeroize(drop)] pub struct SharedSecret(pub(crate) MontgomeryPoint); impl SharedSecret { /// Convert this shared secret to a byte array. #[inline] pub fn to_bytes(&self) -> [u8; 32] { self.0.to_bytes() } /// View this shared secret key as a byte array. #[inline] pub fn as_bytes(&self) -> &[u8; 32] { self.0.as_bytes() } } /// "Decode" a scalar from a 32-byte array. /// /// By "decode" here, what is really meant is applying key clamping by twiddling /// some bits. /// /// # Returns /// /// A `Scalar`. fn clamp_scalar(mut scalar: [u8; 32]) -> Scalar { scalar[0] &= 248; scalar[31] &= 127; scalar[31] |= 64; Scalar::from_bits(scalar) } /// The bare, byte-oriented x25519 function, exactly as specified in RFC7748. /// /// This can be used with [`X25519_BASEPOINT_BYTES`] for people who /// cannot use the better, safer, and faster DH API. pub fn x25519(k: [u8; 32], u: [u8; 32]) -> [u8; 32] { (clamp_scalar(k) * MontgomeryPoint(u)).to_bytes() } /// The X25519 basepoint, for use with the bare, byte-oriented x25519 /// function. This is provided for people who cannot use the typed /// DH API for some reason. pub const X25519_BASEPOINT_BYTES: [u8; 32] = [ 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; /// Derived serialization methods will not work on a StaticSecret because x25519 requires /// non-canonical scalars which are rejected by curve25519-dalek. Thus we provide a way to convert /// the bytes directly to a scalar using Serde's remote derive functionality. #[cfg_attr(feature = "serde", serde(crate = "our_serde"))] #[cfg_attr( feature = "serde", derive(our_serde::Serialize, our_serde::Deserialize) )] #[cfg_attr(feature = "serde", serde(remote = "Scalar"))] struct AllowUnreducedScalarBytes( #[cfg_attr(feature = "serde", serde(getter = "Scalar::to_bytes"))] [u8; 32], ); impl From<AllowUnreducedScalarBytes> for Scalar { fn from(bytes: AllowUnreducedScalarBytes) -> Scalar { clamp_scalar(bytes.0) } } #[cfg(test)] mod test { use super::*; use rand_core::OsRng; #[test] fn byte_basepoint_matches_edwards_scalar_mul() { let mut scalar_bytes = [0x37; 32]; for i in 0..32 { scalar_bytes[i] += 2; let result = x25519(scalar_bytes, X25519_BASEPOINT_BYTES); let expected = (&ED25519_BASEPOINT_TABLE * &clamp_scalar(scalar_bytes)) .to_montgomery() .to_bytes(); assert_eq!(result, expected); } } #[test] #[cfg(feature = "serde")] fn serde_bincode_public_key_roundtrip() { use bincode; let public_key = PublicKey::from(X25519_BASEPOINT_BYTES); let encoded = bincode::serialize(&public_key).unwrap(); let decoded: PublicKey = bincode::
from
identifier_name
x25519.rs
be*). /// /// Some protocols, such as Noise, already handle the static/ephemeral distinction, so the /// additional guarantees provided by [`EphemeralSecret`] are not helpful or would cause duplicate /// code paths. In this case, it may be useful to /// ```rust,ignore /// use x25519_dalek::StaticSecret as SecretKey; /// ``` /// since the only difference between the two is that [`StaticSecret`] does not enforce at /// compile-time that the key is only used once. #[cfg_attr(feature = "serde", serde(crate = "our_serde"))] #[cfg_attr( feature = "serde", derive(our_serde::Serialize, our_serde::Deserialize) )] #[derive(Clone, Zeroize)] #[zeroize(drop)] pub struct StaticSecret( #[cfg_attr(feature = "serde", serde(with = "AllowUnreducedScalarBytes"))] pub(crate) Scalar, ); impl StaticSecret { /// Perform a Diffie-Hellman key agreement between `self` and /// `their_public` key to produce a `SharedSecret`. pub fn diffie_hellman(&self, their_public: &PublicKey) -> SharedSecret { SharedSecret(&self.0 * their_public.0) } /// Generate an x25519 key. pub fn new<T: RngCore + CryptoRng>(mut csprng: T) -> Self { let mut bytes = [0u8; 32]; csprng.fill_bytes(&mut bytes); StaticSecret(clamp_scalar(bytes)) } /// Extract this key's bytes for serialization. pub fn to_bytes(&self) -> [u8; 32] { self.0.to_bytes() } } impl From<[u8; 32]> for StaticSecret { /// Load a secret key from a byte array. fn from(bytes: [u8; 32]) -> StaticSecret { StaticSecret(clamp_scalar(bytes)) } } impl<'a> From<&'a StaticSecret> for PublicKey { /// Given an x25519 [`StaticSecret`] key, compute its corresponding [`PublicKey`]. fn from(secret: &'a StaticSecret) -> PublicKey { PublicKey((&ED25519_BASEPOINT_TABLE * &secret.0).to_montgomery()) } } /// The result of a Diffie-Hellman key exchange. /// /// Each party computes this using their [`EphemeralSecret`] or [`StaticSecret`] and their /// counterparty's [`PublicKey`]. #[derive(Zeroize)] #[zeroize(drop)] pub struct SharedSecret(pub(crate) MontgomeryPoint); impl SharedSecret { /// Convert this shared secret to a byte array. #[inline] pub fn to_bytes(&self) -> [u8; 32] { self.0.to_bytes() } /// View this shared secret key as a byte array. #[inline] pub fn as_bytes(&self) -> &[u8; 32] { self.0.as_bytes() } } /// "Decode" a scalar from a 32-byte array. /// /// By "decode" here, what is really meant is applying key clamping by twiddling /// some bits. /// /// # Returns /// /// A `Scalar`. fn clamp_scalar(mut scalar: [u8; 32]) -> Scalar { scalar[0] &= 248; scalar[31] &= 127; scalar[31] |= 64; Scalar::from_bits(scalar) } /// The bare, byte-oriented x25519 function, exactly as specified in RFC7748. /// /// This can be used with [`X25519_BASEPOINT_BYTES`] for people who /// cannot use the better, safer, and faster DH API. pub fn x25519(k: [u8; 32], u: [u8; 32]) -> [u8; 32] { (clamp_scalar(k) * MontgomeryPoint(u)).to_bytes() } /// The X25519 basepoint, for use with the bare, byte-oriented x25519 /// function. This is provided for people who cannot use the typed /// DH API for some reason. pub const X25519_BASEPOINT_BYTES: [u8; 32] = [ 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; /// Derived serialization methods will not work on a StaticSecret because x25519 requires /// non-canonical scalars which are rejected by curve25519-dalek. Thus we provide a way to convert /// the bytes directly to a scalar using Serde's remote derive functionality. #[cfg_attr(feature = "serde", serde(crate = "our_serde"))] #[cfg_attr( feature = "serde", derive(our_serde::Serialize, our_serde::Deserialize) )] #[cfg_attr(feature = "serde", serde(remote = "Scalar"))] struct AllowUnreducedScalarBytes( #[cfg_attr(feature = "serde", serde(getter = "Scalar::to_bytes"))] [u8; 32], ); impl From<AllowUnreducedScalarBytes> for Scalar { fn from(bytes: AllowUnreducedScalarBytes) -> Scalar { clamp_scalar(bytes.0) } } #[cfg(test)] mod test { use super::*; use rand_core::OsRng; #[test] fn byte_basepoint_matches_edwards_scalar_mul() { let mut scalar_bytes = [0x37; 32]; for i in 0..32 { scalar_bytes[i] += 2; let result = x25519(scalar_bytes, X25519_BASEPOINT_BYTES); let expected = (&ED25519_BASEPOINT_TABLE * &clamp_scalar(scalar_bytes)) .to_montgomery()
} #[test] #[cfg(feature = "serde")] fn serde_bincode_public_key_roundtrip() { use bincode; let public_key = PublicKey::from(X25519_BASEPOINT_BYTES); let encoded = bincode::serialize(&public_key).unwrap(); let decoded: PublicKey = bincode::deserialize(&encoded).unwrap(); assert_eq!(encoded.len(), 32); assert_eq!(decoded.as_bytes(), public_key.as_bytes()); } #[test] #[cfg(feature = "serde")] fn serde_bincode_public_key_matches_from_bytes() { use bincode; let expected = PublicKey::from(X25519_BASEPOINT_BYTES); let decoded: PublicKey = bincode::deserialize(&X25519_BASEPOINT_BYTES).unwrap(); assert_eq!(decoded.as_bytes(), expected.as_bytes()); } #[test] #[cfg(feature = "serde")] fn serde_bincode_static_secret_roundtrip() { use bincode; let static_secret = StaticSecret(clamp_scalar([0x24; 32])); let encoded = bincode::serialize(&static_secret).unwrap(); let decoded: StaticSecret = bincode::deserialize(&encoded).unwrap(); assert_eq!(encoded.len(), 32); assert_eq!(decoded.to_bytes(), static_secret.to_bytes()); } #[test] #[cfg(feature = "serde")] fn serde_bincode_static_secret_matches_from_bytes() { use bincode; let expected = StaticSecret(clamp_scalar([0x24; 32])); let clamped_bytes = clamp_scalar([0x24; 32]).to_bytes(); let decoded: StaticSecret = bincode::deserialize(&clamped_bytes).unwrap(); assert_eq!(decoded.to_bytes(), expected.to_bytes()); } fn do_rfc7748_ladder_test1(input_scalar: [u8; 32], input_point: [u8; 32], expected: [u8; 32]) { let result = x25519(input_scalar, input_point); assert_eq!(result, expected); } #[test] fn rfc7748_ladder_test1_vectorset1() { let input_scalar: [u8; 32] = [ 0xa5, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d, 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd, 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18, 0x50, 0x6a, 0x22, 0x
.to_bytes(); assert_eq!(result, expected); }
random_line_split
engine.rs
/// A sink. /// /// Note that dropping the handle doesn't delete the sink. You must call `stop` explicitely. pub struct Handle<'a> { engine: &'a Engine, source_id: usize, remaining_duration_ms: Arc<AtomicUsize>, samples_rate: u32, channels: u16, // Holds a pointer to the list of iterators to be played after the current one has // finished playing. next_sounds: Arc<Mutex<Vec<Box<Iterator<Item = f32> + Send>>>>, } impl<'a> Handle<'a> { /// Appends a new source of data after the current one. #[inline] pub fn append<S>(&self, source: S) where S: Source + Send + 'static, S::Item: Sample + Clone + Send { // adding the estimated duration of the sound to `remaining_duration_ms` if let Some(duration) = source.get_total_duration() { let duration = duration.as_secs() as usize * 1000 + duration.subsec_nanos() as usize / 1000000; self.remaining_duration_ms.fetch_add(duration, Ordering::Relaxed); } else { let duration = source.size_hint().0 * 1000 / (source.get_samples_rate() as usize * source.get_channels() as usize); self.remaining_duration_ms.fetch_add(duration, Ordering::Relaxed); } // pushing the source to `next_sounds` let source = UniformSourceIterator::new(source, self.channels, self.samples_rate); let source = Box::new(source); self.next_sounds.lock().unwrap().push(source); } /// Changes the volume of the sound played by this sink. #[inline] pub fn set_volume(&self, value: f32) { let commands = self.engine.commands.lock().unwrap(); commands.send(Command::SetVolume(self.source_id, value)).unwrap(); } /// Stops the sound. // note that this method could take `self` instead of `&self`, but it makes the `Sink` object's // life easier not to take `self` #[inline] pub fn stop(&self) { let commands = self.engine.commands.lock().unwrap(); commands.send(Command::Stop(self.source_id)).unwrap(); if let Some(ref thread) = self.engine.thread { thread.unpark(); } } /// Returns the minimum estimated duration of the sound being played by this sink. #[inline] pub fn get_min_remaining_duration(&self) -> Duration { Duration::from_millis(self.remaining_duration_ms.load(Ordering::Relaxed) as u64) } } /// A command sent by the regular threads to the background thread. pub enum Command { /// Adds a new voice to the list of voices to process. Play(Endpoint, Option<Voice>, QueueIterator, Arc<AtomicUsize>), /// Stops a voice. Stop(usize), /// Changes the volume of a voice. SetVolume(usize, f32), } fn background(rx: Receiver<Command>) { // for each endpoint name, stores the voice and the list of sounds with their volume let mut voices: HashMap<String, (Voice, Vec<(QueueIterator, Arc<AtomicUsize>, f32)>)> = HashMap::new(); // list of sounds to stop playing let mut sounds_to_remove: Vec<*const Mutex<Vec<Box<Iterator<Item = f32> + Send>>>> = Vec::new(); // stores the time when the next loop must start let mut next_loop_timer = time::precise_time_ns(); loop { // sleeping so that we get a loop every `FIXED_STEP_MS` millisecond { let now = time::precise_time_ns(); if next_loop_timer > now + 1000000 /* 1ms */ { let sleep = next_loop_timer - now; thread::park_timeout(Duration::from_millis(sleep / 1000000)); } next_loop_timer += FIXED_STEP_NS; } // polling for new commands if let Ok(command) = rx.try_recv() { match command { Command::Play(endpoint, new_voice, decoder, remaining_duration_ms) => { let mut entry = voices.entry(endpoint.get_name()).or_insert_with(|| { (new_voice.unwrap(), Vec::new()) }); entry.1.push((decoder, remaining_duration_ms, 1.0)); }, Command::Stop(decoder) => { for (_, &mut (_, ref mut sounds)) in voices.iter_mut() { sounds.retain(|dec| { &*dec.0.next as *const Mutex<_> as *const u8 as usize != decoder }) } }, Command::SetVolume(decoder, volume) => { for (_, &mut (_, ref mut sounds)) in voices.iter_mut() { if let Some(d) = sounds.iter_mut() .find(|dec| &*dec.0.next as *const Mutex<_> as *const u8 as usize == decoder) { d.2 = volume; } } }, } } // removing sounds that have finished playing for decoder in mem::replace(&mut sounds_to_remove, Vec::new()) { for (_, &mut (_, ref mut sounds)) in voices.iter_mut() { sounds.retain(|dec| &*dec.0.next as *const Mutex<_> != decoder); } } // updating the existing sounds for (_, &mut (ref mut voice, ref mut sounds)) in voices.iter_mut() { // we want the number of samples remaining to be processed by the sound to be around // twice the number of samples that are being processed in one loop, with a minimum of 2 periods let samples_read_per_loop = (voice.get_samples_rate().0 * voice.get_channels() as u32 * FIXED_STEP_MS / 1000) as usize; let pending_samples = voice.get_pending_samples(); let period = cmp::max(voice.get_period(), 1); let samples_required_in_buffer = cmp::max(samples_read_per_loop * 2, period * 2); // writing to the output if pending_samples < samples_required_in_buffer { // building an iterator that produces samples from `sounds` let samples_iter = (0..).map(|_| { sounds.iter_mut().map(|s| s.0.next().unwrap_or(0.0) * s.2) .fold(0.0, |a, b| { let v = a + b; if v > 1.0 { 1.0 } else if v < -1.0 { -1.0 } else { v } }) }); let mut buffer = voice.append_data(samples_required_in_buffer - pending_samples); match buffer { UnknownTypeBuffer::U16(ref mut buffer) => { for (o, i) in buffer.iter_mut().zip(samples_iter) { *o = i.to_u16(); } }, UnknownTypeBuffer::I16(ref mut buffer) => { for (o, i) in buffer.iter_mut().zip(samples_iter) { *o = i.to_i16(); } }, UnknownTypeBuffer::F32(ref mut buffer) => { for (o, i) in buffer.iter_mut().zip(samples_iter) { *o = i; } }, } } // updating the contents of `remaining_duration_ms` for &(ref decoder, ref remaining_duration_ms, _) in sounds.iter() { let (num_samples, _) = decoder.size_hint(); // TODO: differenciate sounds from this sink from sounds from other sinks let num_samples = num_samples + voice.get_pending_samples(); let value = (num_samples as u64 * 1000 / (voice.get_channels() as u64 * voice.get_samples_rate().0 as u64)) as u32; remaining_duration_ms.store(value as usize, Ordering::Relaxed); } // TODO: do better voice.play(); } } } /// Main source of samples for a voice. pub struct QueueIterator { /// The current iterator that produces samples. current: Box<Iterator<Item = f32> + Send>, /// A `Vec` containing the next iterators to play. Shared with other threads so they can add /// sounds to the list. next: Arc<Mutex<Vec<Box<Iterator<Item = f32> + Send>>>>, } impl Iterator for QueueIterator { type Item = f32; #[inline] fn next(&mut self) -> Option<f32> { loop { // basic situation that will happen most of the time if let Some(sample) = self.current.next() { return Some(sample); } let next = { let mut next = self.next.lock().unwrap(); if next.len() == 0 { // if there's no iter waiting, we create a dummy iter with 1000 null samples // this avoids a spinlock Box::new((0 .. 1000).map(|_| 0.0f32)) as Box<Iterator<Item = f32> + Send> } else { next.remove(0) } }; self.current = next; } } #[inline] fn
size_hint
identifier_name
engine.rs
next_sounds, } } } /// A sink. /// /// Note that dropping the handle doesn't delete the sink. You must call `stop` explicitely. pub struct Handle<'a> { engine: &'a Engine, source_id: usize, remaining_duration_ms: Arc<AtomicUsize>, samples_rate: u32, channels: u16, // Holds a pointer to the list of iterators to be played after the current one has // finished playing. next_sounds: Arc<Mutex<Vec<Box<Iterator<Item = f32> + Send>>>>, } impl<'a> Handle<'a> { /// Appends a new source of data after the current one. #[inline] pub fn append<S>(&self, source: S) where S: Source + Send + 'static, S::Item: Sample + Clone + Send { // adding the estimated duration of the sound to `remaining_duration_ms` if let Some(duration) = source.get_total_duration() { let duration = duration.as_secs() as usize * 1000 + duration.subsec_nanos() as usize / 1000000; self.remaining_duration_ms.fetch_add(duration, Ordering::Relaxed); } else { let duration = source.size_hint().0 * 1000 / (source.get_samples_rate() as usize * source.get_channels() as usize); self.remaining_duration_ms.fetch_add(duration, Ordering::Relaxed); } // pushing the source to `next_sounds` let source = UniformSourceIterator::new(source, self.channels, self.samples_rate); let source = Box::new(source); self.next_sounds.lock().unwrap().push(source); } /// Changes the volume of the sound played by this sink. #[inline] pub fn set_volume(&self, value: f32) { let commands = self.engine.commands.lock().unwrap(); commands.send(Command::SetVolume(self.source_id, value)).unwrap(); } /// Stops the sound. // note that this method could take `self` instead of `&self`, but it makes the `Sink` object's // life easier not to take `self` #[inline] pub fn stop(&self) { let commands = self.engine.commands.lock().unwrap(); commands.send(Command::Stop(self.source_id)).unwrap(); if let Some(ref thread) = self.engine.thread { thread.unpark(); } } /// Returns the minimum estimated duration of the sound being played by this sink. #[inline] pub fn get_min_remaining_duration(&self) -> Duration { Duration::from_millis(self.remaining_duration_ms.load(Ordering::Relaxed) as u64) } } /// A command sent by the regular threads to the background thread. pub enum Command { /// Adds a new voice to the list of voices to process. Play(Endpoint, Option<Voice>, QueueIterator, Arc<AtomicUsize>), /// Stops a voice. Stop(usize), /// Changes the volume of a voice. SetVolume(usize, f32), } fn background(rx: Receiver<Command>) { // for each endpoint name, stores the voice and the list of sounds with their volume let mut voices: HashMap<String, (Voice, Vec<(QueueIterator, Arc<AtomicUsize>, f32)>)> = HashMap::new(); // list of sounds to stop playing let mut sounds_to_remove: Vec<*const Mutex<Vec<Box<Iterator<Item = f32> + Send>>>> = Vec::new(); // stores the time when the next loop must start let mut next_loop_timer = time::precise_time_ns(); loop { // sleeping so that we get a loop every `FIXED_STEP_MS` millisecond { let now = time::precise_time_ns(); if next_loop_timer > now + 1000000 /* 1ms */ { let sleep = next_loop_timer - now; thread::park_timeout(Duration::from_millis(sleep / 1000000)); } next_loop_timer += FIXED_STEP_NS; } // polling for new commands if let Ok(command) = rx.try_recv() { match command { Command::Play(endpoint, new_voice, decoder, remaining_duration_ms) => { let mut entry = voices.entry(endpoint.get_name()).or_insert_with(|| { (new_voice.unwrap(), Vec::new()) }); entry.1.push((decoder, remaining_duration_ms, 1.0)); }, Command::Stop(decoder) => { for (_, &mut (_, ref mut sounds)) in voices.iter_mut() { sounds.retain(|dec| { &*dec.0.next as *const Mutex<_> as *const u8 as usize != decoder }) } }, Command::SetVolume(decoder, volume) => { for (_, &mut (_, ref mut sounds)) in voices.iter_mut() { if let Some(d) = sounds.iter_mut() .find(|dec| &*dec.0.next as *const Mutex<_> as *const u8 as usize == decoder) { d.2 = volume; } } }, } } // removing sounds that have finished playing for decoder in mem::replace(&mut sounds_to_remove, Vec::new()) { for (_, &mut (_, ref mut sounds)) in voices.iter_mut() { sounds.retain(|dec| &*dec.0.next as *const Mutex<_> != decoder); } } // updating the existing sounds for (_, &mut (ref mut voice, ref mut sounds)) in voices.iter_mut() { // we want the number of samples remaining to be processed by the sound to be around // twice the number of samples that are being processed in one loop, with a minimum of 2 periods let samples_read_per_loop = (voice.get_samples_rate().0 * voice.get_channels() as u32 * FIXED_STEP_MS / 1000) as usize; let pending_samples = voice.get_pending_samples(); let period = cmp::max(voice.get_period(), 1); let samples_required_in_buffer = cmp::max(samples_read_per_loop * 2, period * 2); // writing to the output if pending_samples < samples_required_in_buffer { // building an iterator that produces samples from `sounds` let samples_iter = (0..).map(|_| { sounds.iter_mut().map(|s| s.0.next().unwrap_or(0.0) * s.2) .fold(0.0, |a, b| { let v = a + b; if v > 1.0 { 1.0 } else if v < -1.0 { -1.0 } else { v } }) }); let mut buffer = voice.append_data(samples_required_in_buffer - pending_samples); match buffer { UnknownTypeBuffer::U16(ref mut buffer) => { for (o, i) in buffer.iter_mut().zip(samples_iter) { *o = i.to_u16(); } }, UnknownTypeBuffer::I16(ref mut buffer) => { for (o, i) in buffer.iter_mut().zip(samples_iter) { *o = i.to_i16(); } }, UnknownTypeBuffer::F32(ref mut buffer) => { for (o, i) in buffer.iter_mut().zip(samples_iter) { *o = i; } }, } } // updating the contents of `remaining_duration_ms` for &(ref decoder, ref remaining_duration_ms, _) in sounds.iter() { let (num_samples, _) = decoder.size_hint(); // TODO: differenciate sounds from this sink from sounds from other sinks let num_samples = num_samples + voice.get_pending_samples(); let value = (num_samples as u64 * 1000 / (voice.get_channels() as u64 * voice.get_samples_rate().0 as u64)) as u32; remaining_duration_ms.store(value as usize, Ordering::Relaxed); } // TODO: do better voice.play(); } } } /// Main source of samples for a voice. pub struct QueueIterator { /// The current iterator that produces samples. current: Box<Iterator<Item = f32> + Send>, /// A `Vec` containing the next iterators to play. Shared with other threads so they can add /// sounds to the list. next: Arc<Mutex<Vec<Box<Iterator<Item = f32> + Send>>>>, } impl Iterator for QueueIterator { type Item = f32; #[inline] fn next(&mut self) -> Option<f32> { loop { // basic situation that will happen most of the time if let Some(sample) = self.current.next() { return Some(sample); } let next = { let mut next = self.next.lock().unwrap(); if next.len() == 0 { // if there's no iter waiting, we create a dummy iter with 1000 null samples // this avoids a spinlock Box::new((0 .. 1000).map(|_| 0.0f32)) as Box<Iterator<Item = f32> + Send> } else { next.remove(0) } }; self.current = next;
} }
random_line_split
geomodel.go
ArrayDoubleTuple struct { first []int second float64 } type ByDistanceIA []IntArrayDoubleTuple func (a ByDistanceIA) Len() int { return len(a) } func (a ByDistanceIA) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a ByDistanceIA) Less(i, j int) bool { return a[i].second < a[j].second } type ByDistance []LocationComparableTuple func (a ByDistance) Len() int { return len(a) } func (a ByDistance) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a ByDistance) Less(i, j int) bool { return a[i].second < a[j].second } type RepositorySearch func([]string) []LocationCapable func GeoHash(lat, lon float64, resolution int) string { return GeoCell(lat, lon, resolution) } func DecodeGeoHash(hash string) (float64, float64) { latMin := -90.0 latMax := 90.0 lonMin := -180.0 lonMax := 180.0 even := true for i := 0; i < len(hash); i++ { chr := hash[i] index := strings.Index(GEOCELL_ALPHABET, string(chr)) for n := 4; n >= 0; n-- { bitN := index >> uint(n) & 1; if even { lonMid := (lonMin + lonMax) / 2 if bitN == 1 { lonMin = lonMid } else { lonMax = lonMid } } else { latMid := (latMin + latMax) / 2 if bitN == 1 { latMin = latMid } else { latMax = latMid } } even = !even } } return (latMin + latMax) / 2.0, (lonMin + lonMax) / 2.0 } func GeoCell(lat, lon float64, resolution int) string { resolution = resolution + 1 north := 90.0 south := -90.0 east := 180.0 west := -180.0 isEven := true mid := 0.0 ch := 0 bit := 0 bits := []int{16, 8, 4, 2, 1} cell := make([]byte, resolution, resolution) i := 0 for i = 0; i < resolution; { if isEven { mid = (west + east) / 2 if lon > mid { ch |= bits[bit] west = mid } else { east = mid } } else { mid = (south + north) / 2 if lat > mid { ch |= bits[bit] south = mid } else { north = mid
isEven = !isEven if bit < 4 { bit = bit + 1 } else { cell[i] = GEOCELL_ALPHABET[ch] i = i + 1 bit = 0 ch = 0 } } cell = cell[:len(cell)-1] return string(cell) } func GeoCells(lat, lon float64, resolution int) []string { g := GeoCell(lat, lon, resolution) cells := make([]string, len(g), len(g)) for i := 0; i < resolution; i++ { cells[i] = g[0 : i+1] } return cells } func Distance(lat1, lon1, lat2, lon2 float64) float64 { var p1lat = DegToRad(lat1) var p1lon = DegToRad(lon1) var p2lat = DegToRad(lat2) var p2lon = DegToRad(lon2) return 6378135 * math.Acos(math.Sin(p1lat)*math.Sin(p2lat)+math.Cos(p1lat)*math.Cos(p2lat)*math.Cos(p2lon-p1lon)) } func DistanceSortedEdges(cells []string, lat, lon float64) []IntArrayDoubleTuple { var boxes []BoundingBox = make([]BoundingBox, 0, len(cells)) for _, cell := range cells { boxes = append(boxes, ComputeBox(cell)) } var maxNorth float64 = -math.MaxFloat64 var maxEast float64 = -math.MaxFloat64 var maxSouth float64 = -math.MaxFloat64 var maxWest float64 = -math.MaxFloat64 for _, box := range boxes { maxNorth = math.Max(maxNorth, box.latNE) maxEast = math.Max(maxEast, box.lonNE) maxSouth = math.Max(maxSouth, box.latSW) maxWest = math.Max(maxWest, box.lonSW) } result := make([]IntArrayDoubleTuple, 4) result[0] = IntArrayDoubleTuple{SOUTH, Distance(maxSouth, lon, lat, lon)} result[1] = IntArrayDoubleTuple{NORTH, Distance(maxNorth, lon, lat, lon)} result[2] = IntArrayDoubleTuple{WEST, Distance(lat, maxWest, lat, lon)} result[3] = IntArrayDoubleTuple{EAST, Distance(maxSouth, maxEast, lat, lon)} sort.Sort(ByDistanceIA(result)) return result } func ComputeBox(cell string) BoundingBox { var bbox BoundingBox if cell == "" { return bbox } bbox = NewBoundingBox(90.0, 180.0, -90.0, -180.0) for len(cell) > 0 { var subcellLonSpan float64 = (bbox.lonNE - bbox.lonSW) / GEOCELL_GRID_SIZE var subcellLatSpan float64 = (bbox.latNE - bbox.latSW) / GEOCELL_GRID_SIZE var l []int = SubdivXY(rune(cell[0])) var x int = l[0] var y int = l[1] bbox = NewBoundingBox(bbox.latSW+subcellLatSpan*(float64(y)+1), bbox.lonSW+subcellLonSpan*(float64(x)+1), bbox.latSW+subcellLatSpan*float64(y), bbox.lonSW+subcellLonSpan*float64(x)) cell = cell[1:] } return bbox } func ProximityFetch(lat, lon float64, maxResults int, maxDistance float64, search RepositorySearch, maxResolution int) []LocationCapable { var results []LocationComparableTuple // The current search geocell containing the lat,lon. var curContainingGeocell string = GeoCell(lat, lon, maxResolution) var searchedCells []string = make([]string, 0) /* * The currently-being-searched geocells. * NOTES: * Start with max possible. * Must always be of the same resolution. * Must always form a rectangular region. * One of these must be equal to the cur_containing_geocell. */ var curGeocells []string = make([]string, 0) curGeocells = append(curGeocells, curContainingGeocell) var closestPossibleNextResultDist float64 = 0 var noDirection = []int{0, 0} var sortedEdgeDistances []IntArrayDoubleTuple sortedEdgeDistances = append(sortedEdgeDistances, IntArrayDoubleTuple{noDirection, 0}) for len(curGeocells) != 0 { closestPossibleNextResultDist = sortedEdgeDistances[0].second if maxDistance > 0 && closestPossibleNextResultDist > maxDistance { break } var curTempUnique = deleteRecords(curGeocells, searchedCells) var curGeocellsUnique = curTempUnique var newResultEntities = search(curGeocellsUnique) searchedCells = append(searchedCells, curGeocells...) // Begin storing distance from the search result entity to the // search center along with the search result itself, in a tuple. var newResults []LocationComparableTuple = make([]LocationComparableTuple, 0, len(newResultEntities)) for _, entity := range newResultEntities { newResults = append(newResults, LocationComparableTuple{entity, Distance(lat, lon, entity.Latitude(), entity.Longitude())}) } sort.Sort(ByDistance(newResults)) newResults = newResults[0:int(math.Min(float64(maxResults), float64(len(newResults))))] // Merge new_results into results for _, tuple := range newResults { // contains method will check if entity in tuple have same key if !contains(results, tuple) { results = append(results, tuple) } } sort.Sort(ByDistance(results)) results = results[0:int
} }
random_line_split
geomodel.go
ArrayDoubleTuple struct { first []int second float64 } type ByDistanceIA []IntArrayDoubleTuple func (a ByDistanceIA) Len() int { return len(a) } func (a ByDistanceIA) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a ByDistanceIA) Less(i, j int) bool { return a[i].second < a[j].second } type ByDistance []LocationComparableTuple func (a ByDistance) Len() int { return len(a) } func (a ByDistance) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a ByDistance) Less(i, j int) bool { return a[i].second < a[j].second } type RepositorySearch func([]string) []LocationCapable func GeoHash(lat, lon float64, resolution int) string { return GeoCell(lat, lon, resolution) } func DecodeGeoHash(hash string) (float64, float64) { latMin := -90.0 latMax := 90.0 lonMin := -180.0 lonMax := 180.0 even := true for i := 0; i < len(hash); i++ { chr := hash[i] index := strings.Index(GEOCELL_ALPHABET, string(chr)) for n := 4; n >= 0; n-- { bitN := index >> uint(n) & 1; if even { lonMid := (lonMin + lonMax) / 2 if bitN == 1 { lonMin = lonMid } else { lonMax = lonMid } } else { latMid := (latMin + latMax) / 2 if bitN == 1 { latMin = latMid } else { latMax = latMid } } even = !even } } return (latMin + latMax) / 2.0, (lonMin + lonMax) / 2.0 } func GeoCell(lat, lon float64, resolution int) string { resolution = resolution + 1 north := 90.0 south := -90.0 east := 180.0 west := -180.0 isEven := true mid := 0.0 ch := 0 bit := 0 bits := []int{16, 8, 4, 2, 1} cell := make([]byte, resolution, resolution) i := 0 for i = 0; i < resolution; { if isEven { mid = (west + east) / 2 if lon > mid { ch |= bits[bit] west = mid } else { east = mid } } else { mid = (south + north) / 2 if lat > mid { ch |= bits[bit] south = mid } else { north = mid } } isEven = !isEven if bit < 4 { bit = bit + 1 } else { cell[i] = GEOCELL_ALPHABET[ch] i = i + 1 bit = 0 ch = 0 } } cell = cell[:len(cell)-1] return string(cell) } func GeoCells(lat, lon float64, resolution int) []string { g := GeoCell(lat, lon, resolution) cells := make([]string, len(g), len(g)) for i := 0; i < resolution; i++ { cells[i] = g[0 : i+1] } return cells } func Distance(lat1, lon1, lat2, lon2 float64) float64 { var p1lat = DegToRad(lat1) var p1lon = DegToRad(lon1) var p2lat = DegToRad(lat2) var p2lon = DegToRad(lon2) return 6378135 * math.Acos(math.Sin(p1lat)*math.Sin(p2lat)+math.Cos(p1lat)*math.Cos(p2lat)*math.Cos(p2lon-p1lon)) } func DistanceSortedEdges(cells []string, lat, lon float64) []IntArrayDoubleTuple { var boxes []BoundingBox = make([]BoundingBox, 0, len(cells)) for _, cell := range cells { boxes = append(boxes, ComputeBox(cell)) } var maxNorth float64 = -math.MaxFloat64 var maxEast float64 = -math.MaxFloat64 var maxSouth float64 = -math.MaxFloat64 var maxWest float64 = -math.MaxFloat64 for _, box := range boxes { maxNorth = math.Max(maxNorth, box.latNE) maxEast = math.Max(maxEast, box.lonNE) maxSouth = math.Max(maxSouth, box.latSW) maxWest = math.Max(maxWest, box.lonSW) } result := make([]IntArrayDoubleTuple, 4) result[0] = IntArrayDoubleTuple{SOUTH, Distance(maxSouth, lon, lat, lon)} result[1] = IntArrayDoubleTuple{NORTH, Distance(maxNorth, lon, lat, lon)} result[2] = IntArrayDoubleTuple{WEST, Distance(lat, maxWest, lat, lon)} result[3] = IntArrayDoubleTuple{EAST, Distance(maxSouth, maxEast, lat, lon)} sort.Sort(ByDistanceIA(result)) return result } func ComputeBox(cell string) BoundingBox { var bbox BoundingBox if cell == ""
bbox = NewBoundingBox(90.0, 180.0, -90.0, -180.0) for len(cell) > 0 { var subcellLonSpan float64 = (bbox.lonNE - bbox.lonSW) / GEOCELL_GRID_SIZE var subcellLatSpan float64 = (bbox.latNE - bbox.latSW) / GEOCELL_GRID_SIZE var l []int = SubdivXY(rune(cell[0])) var x int = l[0] var y int = l[1] bbox = NewBoundingBox(bbox.latSW+subcellLatSpan*(float64(y)+1), bbox.lonSW+subcellLonSpan*(float64(x)+1), bbox.latSW+subcellLatSpan*float64(y), bbox.lonSW+subcellLonSpan*float64(x)) cell = cell[1:] } return bbox } func ProximityFetch(lat, lon float64, maxResults int, maxDistance float64, search RepositorySearch, maxResolution int) []LocationCapable { var results []LocationComparableTuple // The current search geocell containing the lat,lon. var curContainingGeocell string = GeoCell(lat, lon, maxResolution) var searchedCells []string = make([]string, 0) /* * The currently-being-searched geocells. * NOTES: * Start with max possible. * Must always be of the same resolution. * Must always form a rectangular region. * One of these must be equal to the cur_containing_geocell. */ var curGeocells []string = make([]string, 0) curGeocells = append(curGeocells, curContainingGeocell) var closestPossibleNextResultDist float64 = 0 var noDirection = []int{0, 0} var sortedEdgeDistances []IntArrayDoubleTuple sortedEdgeDistances = append(sortedEdgeDistances, IntArrayDoubleTuple{noDirection, 0}) for len(curGeocells) != 0 { closestPossibleNextResultDist = sortedEdgeDistances[0].second if maxDistance > 0 && closestPossibleNextResultDist > maxDistance { break } var curTempUnique = deleteRecords(curGeocells, searchedCells) var curGeocellsUnique = curTempUnique var newResultEntities = search(curGeocellsUnique) searchedCells = append(searchedCells, curGeocells...) // Begin storing distance from the search result entity to the // search center along with the search result itself, in a tuple. var newResults []LocationComparableTuple = make([]LocationComparableTuple, 0, len(newResultEntities)) for _, entity := range newResultEntities { newResults = append(newResults, LocationComparableTuple{entity, Distance(lat, lon, entity.Latitude(), entity.Longitude())}) } sort.Sort(ByDistance(newResults)) newResults = newResults[0:int(math.Min(float64(maxResults), float64(len(newResults))))] // Merge new_results into results for _, tuple := range newResults { // contains method will check if entity in tuple have same key if !contains(results, tuple) { results = append(results, tuple) } } sort.Sort(ByDistance(results)) results = results
{ return bbox }
conditional_block
geomodel.go
ArrayDoubleTuple struct { first []int second float64 } type ByDistanceIA []IntArrayDoubleTuple func (a ByDistanceIA) Len() int { return len(a) } func (a ByDistanceIA) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a ByDistanceIA) Less(i, j int) bool { return a[i].second < a[j].second } type ByDistance []LocationComparableTuple func (a ByDistance) Len() int { return len(a) } func (a ByDistance) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a ByDistance) Less(i, j int) bool { return a[i].second < a[j].second } type RepositorySearch func([]string) []LocationCapable func GeoHash(lat, lon float64, resolution int) string { return GeoCell(lat, lon, resolution) } func DecodeGeoHash(hash string) (float64, float64) { latMin := -90.0 latMax := 90.0 lonMin := -180.0 lonMax := 180.0 even := true for i := 0; i < len(hash); i++ { chr := hash[i] index := strings.Index(GEOCELL_ALPHABET, string(chr)) for n := 4; n >= 0; n-- { bitN := index >> uint(n) & 1; if even { lonMid := (lonMin + lonMax) / 2 if bitN == 1 { lonMin = lonMid } else { lonMax = lonMid } } else { latMid := (latMin + latMax) / 2 if bitN == 1 { latMin = latMid } else { latMax = latMid } } even = !even } } return (latMin + latMax) / 2.0, (lonMin + lonMax) / 2.0 } func GeoCell(lat, lon float64, resolution int) string { resolution = resolution + 1 north := 90.0 south := -90.0 east := 180.0 west := -180.0 isEven := true mid := 0.0 ch := 0 bit := 0 bits := []int{16, 8, 4, 2, 1} cell := make([]byte, resolution, resolution) i := 0 for i = 0; i < resolution; { if isEven { mid = (west + east) / 2 if lon > mid { ch |= bits[bit] west = mid } else { east = mid } } else { mid = (south + north) / 2 if lat > mid { ch |= bits[bit] south = mid } else { north = mid } } isEven = !isEven if bit < 4 { bit = bit + 1 } else { cell[i] = GEOCELL_ALPHABET[ch] i = i + 1 bit = 0 ch = 0 } } cell = cell[:len(cell)-1] return string(cell) } func GeoCells(lat, lon float64, resolution int) []string { g := GeoCell(lat, lon, resolution) cells := make([]string, len(g), len(g)) for i := 0; i < resolution; i++ { cells[i] = g[0 : i+1] } return cells } func
(lat1, lon1, lat2, lon2 float64) float64 { var p1lat = DegToRad(lat1) var p1lon = DegToRad(lon1) var p2lat = DegToRad(lat2) var p2lon = DegToRad(lon2) return 6378135 * math.Acos(math.Sin(p1lat)*math.Sin(p2lat)+math.Cos(p1lat)*math.Cos(p2lat)*math.Cos(p2lon-p1lon)) } func DistanceSortedEdges(cells []string, lat, lon float64) []IntArrayDoubleTuple { var boxes []BoundingBox = make([]BoundingBox, 0, len(cells)) for _, cell := range cells { boxes = append(boxes, ComputeBox(cell)) } var maxNorth float64 = -math.MaxFloat64 var maxEast float64 = -math.MaxFloat64 var maxSouth float64 = -math.MaxFloat64 var maxWest float64 = -math.MaxFloat64 for _, box := range boxes { maxNorth = math.Max(maxNorth, box.latNE) maxEast = math.Max(maxEast, box.lonNE) maxSouth = math.Max(maxSouth, box.latSW) maxWest = math.Max(maxWest, box.lonSW) } result := make([]IntArrayDoubleTuple, 4) result[0] = IntArrayDoubleTuple{SOUTH, Distance(maxSouth, lon, lat, lon)} result[1] = IntArrayDoubleTuple{NORTH, Distance(maxNorth, lon, lat, lon)} result[2] = IntArrayDoubleTuple{WEST, Distance(lat, maxWest, lat, lon)} result[3] = IntArrayDoubleTuple{EAST, Distance(maxSouth, maxEast, lat, lon)} sort.Sort(ByDistanceIA(result)) return result } func ComputeBox(cell string) BoundingBox { var bbox BoundingBox if cell == "" { return bbox } bbox = NewBoundingBox(90.0, 180.0, -90.0, -180.0) for len(cell) > 0 { var subcellLonSpan float64 = (bbox.lonNE - bbox.lonSW) / GEOCELL_GRID_SIZE var subcellLatSpan float64 = (bbox.latNE - bbox.latSW) / GEOCELL_GRID_SIZE var l []int = SubdivXY(rune(cell[0])) var x int = l[0] var y int = l[1] bbox = NewBoundingBox(bbox.latSW+subcellLatSpan*(float64(y)+1), bbox.lonSW+subcellLonSpan*(float64(x)+1), bbox.latSW+subcellLatSpan*float64(y), bbox.lonSW+subcellLonSpan*float64(x)) cell = cell[1:] } return bbox } func ProximityFetch(lat, lon float64, maxResults int, maxDistance float64, search RepositorySearch, maxResolution int) []LocationCapable { var results []LocationComparableTuple // The current search geocell containing the lat,lon. var curContainingGeocell string = GeoCell(lat, lon, maxResolution) var searchedCells []string = make([]string, 0) /* * The currently-being-searched geocells. * NOTES: * Start with max possible. * Must always be of the same resolution. * Must always form a rectangular region. * One of these must be equal to the cur_containing_geocell. */ var curGeocells []string = make([]string, 0) curGeocells = append(curGeocells, curContainingGeocell) var closestPossibleNextResultDist float64 = 0 var noDirection = []int{0, 0} var sortedEdgeDistances []IntArrayDoubleTuple sortedEdgeDistances = append(sortedEdgeDistances, IntArrayDoubleTuple{noDirection, 0}) for len(curGeocells) != 0 { closestPossibleNextResultDist = sortedEdgeDistances[0].second if maxDistance > 0 && closestPossibleNextResultDist > maxDistance { break } var curTempUnique = deleteRecords(curGeocells, searchedCells) var curGeocellsUnique = curTempUnique var newResultEntities = search(curGeocellsUnique) searchedCells = append(searchedCells, curGeocells...) // Begin storing distance from the search result entity to the // search center along with the search result itself, in a tuple. var newResults []LocationComparableTuple = make([]LocationComparableTuple, 0, len(newResultEntities)) for _, entity := range newResultEntities { newResults = append(newResults, LocationComparableTuple{entity, Distance(lat, lon, entity.Latitude(), entity.Longitude())}) } sort.Sort(ByDistance(newResults)) newResults = newResults[0:int(math.Min(float64(maxResults), float64(len(newResults))))] // Merge new_results into results for _, tuple := range newResults { // contains method will check if entity in tuple have same key if !contains(results, tuple) { results = append(results, tuple) } } sort.Sort(ByDistance(results)) results = results[
Distance
identifier_name
geomodel.go
ArrayDoubleTuple struct { first []int second float64 } type ByDistanceIA []IntArrayDoubleTuple func (a ByDistanceIA) Len() int { return len(a) } func (a ByDistanceIA) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a ByDistanceIA) Less(i, j int) bool { return a[i].second < a[j].second } type ByDistance []LocationComparableTuple func (a ByDistance) Len() int { return len(a) } func (a ByDistance) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a ByDistance) Less(i, j int) bool { return a[i].second < a[j].second } type RepositorySearch func([]string) []LocationCapable func GeoHash(lat, lon float64, resolution int) string { return GeoCell(lat, lon, resolution) } func DecodeGeoHash(hash string) (float64, float64) { latMin := -90.0 latMax := 90.0 lonMin := -180.0 lonMax := 180.0 even := true for i := 0; i < len(hash); i++ { chr := hash[i] index := strings.Index(GEOCELL_ALPHABET, string(chr)) for n := 4; n >= 0; n-- { bitN := index >> uint(n) & 1; if even { lonMid := (lonMin + lonMax) / 2 if bitN == 1 { lonMin = lonMid } else { lonMax = lonMid } } else { latMid := (latMin + latMax) / 2 if bitN == 1 { latMin = latMid } else { latMax = latMid } } even = !even } } return (latMin + latMax) / 2.0, (lonMin + lonMax) / 2.0 } func GeoCell(lat, lon float64, resolution int) string
ch |= bits[bit] west = mid } else { east = mid } } else { mid = (south + north) / 2 if lat > mid { ch |= bits[bit] south = mid } else { north = mid } } isEven = !isEven if bit < 4 { bit = bit + 1 } else { cell[i] = GEOCELL_ALPHABET[ch] i = i + 1 bit = 0 ch = 0 } } cell = cell[:len(cell)-1] return string(cell) } func GeoCells(lat, lon float64, resolution int) []string { g := GeoCell(lat, lon, resolution) cells := make([]string, len(g), len(g)) for i := 0; i < resolution; i++ { cells[i] = g[0 : i+1] } return cells } func Distance(lat1, lon1, lat2, lon2 float64) float64 { var p1lat = DegToRad(lat1) var p1lon = DegToRad(lon1) var p2lat = DegToRad(lat2) var p2lon = DegToRad(lon2) return 6378135 * math.Acos(math.Sin(p1lat)*math.Sin(p2lat)+math.Cos(p1lat)*math.Cos(p2lat)*math.Cos(p2lon-p1lon)) } func DistanceSortedEdges(cells []string, lat, lon float64) []IntArrayDoubleTuple { var boxes []BoundingBox = make([]BoundingBox, 0, len(cells)) for _, cell := range cells { boxes = append(boxes, ComputeBox(cell)) } var maxNorth float64 = -math.MaxFloat64 var maxEast float64 = -math.MaxFloat64 var maxSouth float64 = -math.MaxFloat64 var maxWest float64 = -math.MaxFloat64 for _, box := range boxes { maxNorth = math.Max(maxNorth, box.latNE) maxEast = math.Max(maxEast, box.lonNE) maxSouth = math.Max(maxSouth, box.latSW) maxWest = math.Max(maxWest, box.lonSW) } result := make([]IntArrayDoubleTuple, 4) result[0] = IntArrayDoubleTuple{SOUTH, Distance(maxSouth, lon, lat, lon)} result[1] = IntArrayDoubleTuple{NORTH, Distance(maxNorth, lon, lat, lon)} result[2] = IntArrayDoubleTuple{WEST, Distance(lat, maxWest, lat, lon)} result[3] = IntArrayDoubleTuple{EAST, Distance(maxSouth, maxEast, lat, lon)} sort.Sort(ByDistanceIA(result)) return result } func ComputeBox(cell string) BoundingBox { var bbox BoundingBox if cell == "" { return bbox } bbox = NewBoundingBox(90.0, 180.0, -90.0, -180.0) for len(cell) > 0 { var subcellLonSpan float64 = (bbox.lonNE - bbox.lonSW) / GEOCELL_GRID_SIZE var subcellLatSpan float64 = (bbox.latNE - bbox.latSW) / GEOCELL_GRID_SIZE var l []int = SubdivXY(rune(cell[0])) var x int = l[0] var y int = l[1] bbox = NewBoundingBox(bbox.latSW+subcellLatSpan*(float64(y)+1), bbox.lonSW+subcellLonSpan*(float64(x)+1), bbox.latSW+subcellLatSpan*float64(y), bbox.lonSW+subcellLonSpan*float64(x)) cell = cell[1:] } return bbox } func ProximityFetch(lat, lon float64, maxResults int, maxDistance float64, search RepositorySearch, maxResolution int) []LocationCapable { var results []LocationComparableTuple // The current search geocell containing the lat,lon. var curContainingGeocell string = GeoCell(lat, lon, maxResolution) var searchedCells []string = make([]string, 0) /* * The currently-being-searched geocells. * NOTES: * Start with max possible. * Must always be of the same resolution. * Must always form a rectangular region. * One of these must be equal to the cur_containing_geocell. */ var curGeocells []string = make([]string, 0) curGeocells = append(curGeocells, curContainingGeocell) var closestPossibleNextResultDist float64 = 0 var noDirection = []int{0, 0} var sortedEdgeDistances []IntArrayDoubleTuple sortedEdgeDistances = append(sortedEdgeDistances, IntArrayDoubleTuple{noDirection, 0}) for len(curGeocells) != 0 { closestPossibleNextResultDist = sortedEdgeDistances[0].second if maxDistance > 0 && closestPossibleNextResultDist > maxDistance { break } var curTempUnique = deleteRecords(curGeocells, searchedCells) var curGeocellsUnique = curTempUnique var newResultEntities = search(curGeocellsUnique) searchedCells = append(searchedCells, curGeocells...) // Begin storing distance from the search result entity to the // search center along with the search result itself, in a tuple. var newResults []LocationComparableTuple = make([]LocationComparableTuple, 0, len(newResultEntities)) for _, entity := range newResultEntities { newResults = append(newResults, LocationComparableTuple{entity, Distance(lat, lon, entity.Latitude(), entity.Longitude())}) } sort.Sort(ByDistance(newResults)) newResults = newResults[0:int(math.Min(float64(maxResults), float64(len(newResults))))] // Merge new_results into results for _, tuple := range newResults { // contains method will check if entity in tuple have same key if !contains(results, tuple) { results = append(results, tuple) } } sort.Sort(ByDistance(results)) results = results[
{ resolution = resolution + 1 north := 90.0 south := -90.0 east := 180.0 west := -180.0 isEven := true mid := 0.0 ch := 0 bit := 0 bits := []int{16, 8, 4, 2, 1} cell := make([]byte, resolution, resolution) i := 0 for i = 0; i < resolution; { if isEven { mid = (west + east) / 2 if lon > mid {
identifier_body
gles2.rs
0..(BATCH_MAX / 4) as u16 { let index = index * 4; vertex_indices.push(index); vertex_indices.push(index + 1); vertex_indices.push(index + 3); vertex_indices.push(index + 1); vertex_indices.push(index + 2); vertex_indices.push(index + 3); } unsafe { gl::Enable(gl::BLEND); gl::DepthMask(gl::FALSE); gl::GenVertexArrays(1, &mut vao); gl::GenBuffers(1, &mut ebo); gl::GenBuffers(1, &mut vbo); gl::BindVertexArray(vao); // Elements buffer. gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ebo); gl::BufferData( gl::ELEMENT_ARRAY_BUFFER, (vertex_indices.capacity() * size_of::<u16>()) as isize, vertex_indices.as_ptr() as *const _, gl::STATIC_DRAW, ); // Vertex buffer. gl::BindBuffer(gl::ARRAY_BUFFER, vbo); gl::BufferData( gl::ARRAY_BUFFER, (BATCH_MAX * size_of::<TextVertex>()) as isize, ptr::null(), gl::STREAM_DRAW, ); let mut index = 0; let mut size = 0; macro_rules! add_attr { ($count:expr, $gl_type:expr, $type:ty) => { gl::VertexAttribPointer( index, $count, $gl_type, gl::FALSE, size_of::<TextVertex>() as i32, size as *const _, ); gl::EnableVertexAttribArray(index); #[allow(unused_assignments)] { size += $count * size_of::<$type>(); index += 1; } }; } // Cell coords. add_attr!(2, gl::SHORT, i16); // Glyph coords. add_attr!(2, gl::SHORT, i16); // UV. add_attr!(2, gl::FLOAT, u32); // Color and bitmap color. // // These are packed together because of an OpenGL driver issue on macOS, which caused a // `vec3(u8)` text color and a `u8` for glyph color to cause performance regressions. add_attr!(4, gl::UNSIGNED_BYTE, u8); // Background color. add_attr!(4, gl::UNSIGNED_BYTE, u8); // Cleanup. gl::BindVertexArray(0); gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0); gl::BindBuffer(gl::ARRAY_BUFFER, 0); } Ok(Self { program, vao, vbo, ebo, atlas: vec![Atlas::new(ATLAS_SIZE, is_gles_context)], batch: Batch::new(), current_atlas: 0, active_tex: 0, dual_source_blending, }) } } impl Drop for Gles2Renderer { fn drop(&mut self) { unsafe { gl::DeleteBuffers(1, &self.vbo); gl::DeleteBuffers(1, &self.ebo); gl::DeleteVertexArrays(1, &self.vao); } } } impl<'a> TextRenderer<'a> for Gles2Renderer { type RenderApi = RenderApi<'a>; type RenderBatch = Batch; type Shader = TextShaderProgram; fn program(&self) -> &Self::Shader { &self.program } fn with_api<'b: 'a, F, T>(&'b mut self, _: &'b SizeInfo, func: F) -> T where F: FnOnce(Self::RenderApi) -> T, { unsafe { gl::UseProgram(self.program.id()); gl::BindVertexArray(self.vao); gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo); gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo); gl::ActiveTexture(gl::TEXTURE0); } let res = func(RenderApi { active_tex: &mut self.active_tex, batch: &mut self.batch, atlas: &mut self.atlas, current_atlas: &mut self.current_atlas, program: &mut self.program, dual_source_blending: self.dual_source_blending, }); unsafe { gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0); gl::BindBuffer(gl::ARRAY_BUFFER, 0); gl::BindVertexArray(0); gl::UseProgram(0); } res } fn loader_api(&mut self) -> LoaderApi<'_> { LoaderApi { active_tex: &mut self.active_tex, atlas: &mut self.atlas, current_atlas: &mut self.current_atlas, } } } /// Maximum items to be drawn in a batch. /// /// We use the closest number to `u16::MAX` dividable by 4 (amount of vertices we push for a glyph), /// since it's the maximum possible index in `glDrawElements` in GLES2. const BATCH_MAX: usize = (u16::MAX - u16::MAX % 4) as usize; #[derive(Debug)] pub struct
{ tex: GLuint, vertices: Vec<TextVertex>, } impl Batch { fn new() -> Self { Self { tex: 0, vertices: Vec::with_capacity(BATCH_MAX) } } #[inline] fn len(&self) -> usize { self.vertices.len() } #[inline] fn capacity(&self) -> usize { BATCH_MAX } #[inline] fn size(&self) -> usize { self.len() * size_of::<TextVertex>() } #[inline] fn clear(&mut self) { self.vertices.clear(); } } impl TextRenderBatch for Batch { #[inline] fn tex(&self) -> GLuint { self.tex } #[inline] fn full(&self) -> bool { self.capacity() == self.len() } #[inline] fn is_empty(&self) -> bool { self.len() == 0 } fn add_item(&mut self, cell: &RenderableCell, glyph: &Glyph, size_info: &SizeInfo) { if self.is_empty() { self.tex = glyph.tex_id; } // Calculate the cell position. let x = cell.point.column.0 as i16 * size_info.cell_width() as i16; let y = cell.point.line as i16 * size_info.cell_height() as i16; // Calculate the glyph position. let glyph_x = cell.point.column.0 as i16 * size_info.cell_width() as i16 + glyph.left; let glyph_y = (cell.point.line + 1) as i16 * size_info.cell_height() as i16 - glyph.top; let colored = if glyph.multicolor { RenderingGlyphFlags::COLORED } else { RenderingGlyphFlags::empty() }; let is_wide = if cell.flags.contains(Flags::WIDE_CHAR) { 2 } else { 1 }; let mut vertex = TextVertex { x, y: y + size_info.cell_height() as i16, glyph_x, glyph_y: glyph_y + glyph.height, u: glyph.uv_left, v: glyph.uv_bot + glyph.uv_height, r: cell.fg.r, g: cell.fg.g, b: cell.fg.b, colored, bg_r: cell.bg.r, bg_g: cell.bg.g, bg_b: cell.bg.b, bg_a: (cell.bg_alpha * 255.0) as u8, }; self.vertices.push(vertex); vertex.y = y; vertex.glyph_y = glyph_y; vertex.u = glyph.uv_left; vertex.v = glyph.uv_bot; self.vertices.push(vertex); vertex.x = x + is_wide * size_info.cell_width() as i16; vertex.glyph_x = glyph_x + glyph.width; vertex.u = glyph.uv_left + glyph.uv_width; vertex.v = glyph.uv_bot; self.vertices.push(vertex); vertex.x = x + is_wide * size_info.cell_width() as i16; vertex.y = y + size_info.cell_height() as i16; vertex.glyph_x = glyph_x + glyph.width; vertex.glyph_y = glyph_y + glyph.height; vertex.u = glyph.uv_left + glyph.uv_width; vertex.v = glyph.uv_bot + glyph.uv_height; self.vertices.push(vertex); } } #[derive(Debug)] pub struct RenderApi<'a> { active_tex: &'a mut GLuint, batch: &'a mut Batch, atlas: &'a mut Vec<Atlas>, current_atlas: &'a mut usize, program: &'a mut TextShaderProgram, dual_source_blending: bool, } impl<'a> Drop for RenderApi<'a> { fn drop(&mut self) { if !self.batch.is_empty() { self.render_batch(); } } } impl<'a> LoadGlyph for RenderApi<'a> { fn load_glyph(&mut self, rasterized: &RasterizedGlyph)
Batch
identifier_name
gles2.rs
0..(BATCH_MAX / 4) as u16 { let index = index * 4; vertex_indices.push(index); vertex_indices.push(index + 1); vertex_indices.push(index + 3); vertex_indices.push(index + 1); vertex_indices.push(index + 2); vertex_indices.push(index + 3); } unsafe { gl::Enable(gl::BLEND); gl::DepthMask(gl::FALSE); gl::GenVertexArrays(1, &mut vao); gl::GenBuffers(1, &mut ebo); gl::GenBuffers(1, &mut vbo); gl::BindVertexArray(vao); // Elements buffer. gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ebo); gl::BufferData( gl::ELEMENT_ARRAY_BUFFER, (vertex_indices.capacity() * size_of::<u16>()) as isize, vertex_indices.as_ptr() as *const _, gl::STATIC_DRAW, ); // Vertex buffer. gl::BindBuffer(gl::ARRAY_BUFFER, vbo); gl::BufferData( gl::ARRAY_BUFFER, (BATCH_MAX * size_of::<TextVertex>()) as isize, ptr::null(), gl::STREAM_DRAW, ); let mut index = 0; let mut size = 0;
gl::VertexAttribPointer( index, $count, $gl_type, gl::FALSE, size_of::<TextVertex>() as i32, size as *const _, ); gl::EnableVertexAttribArray(index); #[allow(unused_assignments)] { size += $count * size_of::<$type>(); index += 1; } }; } // Cell coords. add_attr!(2, gl::SHORT, i16); // Glyph coords. add_attr!(2, gl::SHORT, i16); // UV. add_attr!(2, gl::FLOAT, u32); // Color and bitmap color. // // These are packed together because of an OpenGL driver issue on macOS, which caused a // `vec3(u8)` text color and a `u8` for glyph color to cause performance regressions. add_attr!(4, gl::UNSIGNED_BYTE, u8); // Background color. add_attr!(4, gl::UNSIGNED_BYTE, u8); // Cleanup. gl::BindVertexArray(0); gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0); gl::BindBuffer(gl::ARRAY_BUFFER, 0); } Ok(Self { program, vao, vbo, ebo, atlas: vec![Atlas::new(ATLAS_SIZE, is_gles_context)], batch: Batch::new(), current_atlas: 0, active_tex: 0, dual_source_blending, }) } } impl Drop for Gles2Renderer { fn drop(&mut self) { unsafe { gl::DeleteBuffers(1, &self.vbo); gl::DeleteBuffers(1, &self.ebo); gl::DeleteVertexArrays(1, &self.vao); } } } impl<'a> TextRenderer<'a> for Gles2Renderer { type RenderApi = RenderApi<'a>; type RenderBatch = Batch; type Shader = TextShaderProgram; fn program(&self) -> &Self::Shader { &self.program } fn with_api<'b: 'a, F, T>(&'b mut self, _: &'b SizeInfo, func: F) -> T where F: FnOnce(Self::RenderApi) -> T, { unsafe { gl::UseProgram(self.program.id()); gl::BindVertexArray(self.vao); gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo); gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo); gl::ActiveTexture(gl::TEXTURE0); } let res = func(RenderApi { active_tex: &mut self.active_tex, batch: &mut self.batch, atlas: &mut self.atlas, current_atlas: &mut self.current_atlas, program: &mut self.program, dual_source_blending: self.dual_source_blending, }); unsafe { gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0); gl::BindBuffer(gl::ARRAY_BUFFER, 0); gl::BindVertexArray(0); gl::UseProgram(0); } res } fn loader_api(&mut self) -> LoaderApi<'_> { LoaderApi { active_tex: &mut self.active_tex, atlas: &mut self.atlas, current_atlas: &mut self.current_atlas, } } } /// Maximum items to be drawn in a batch. /// /// We use the closest number to `u16::MAX` dividable by 4 (amount of vertices we push for a glyph), /// since it's the maximum possible index in `glDrawElements` in GLES2. const BATCH_MAX: usize = (u16::MAX - u16::MAX % 4) as usize; #[derive(Debug)] pub struct Batch { tex: GLuint, vertices: Vec<TextVertex>, } impl Batch { fn new() -> Self { Self { tex: 0, vertices: Vec::with_capacity(BATCH_MAX) } } #[inline] fn len(&self) -> usize { self.vertices.len() } #[inline] fn capacity(&self) -> usize { BATCH_MAX } #[inline] fn size(&self) -> usize { self.len() * size_of::<TextVertex>() } #[inline] fn clear(&mut self) { self.vertices.clear(); } } impl TextRenderBatch for Batch { #[inline] fn tex(&self) -> GLuint { self.tex } #[inline] fn full(&self) -> bool { self.capacity() == self.len() } #[inline] fn is_empty(&self) -> bool { self.len() == 0 } fn add_item(&mut self, cell: &RenderableCell, glyph: &Glyph, size_info: &SizeInfo) { if self.is_empty() { self.tex = glyph.tex_id; } // Calculate the cell position. let x = cell.point.column.0 as i16 * size_info.cell_width() as i16; let y = cell.point.line as i16 * size_info.cell_height() as i16; // Calculate the glyph position. let glyph_x = cell.point.column.0 as i16 * size_info.cell_width() as i16 + glyph.left; let glyph_y = (cell.point.line + 1) as i16 * size_info.cell_height() as i16 - glyph.top; let colored = if glyph.multicolor { RenderingGlyphFlags::COLORED } else { RenderingGlyphFlags::empty() }; let is_wide = if cell.flags.contains(Flags::WIDE_CHAR) { 2 } else { 1 }; let mut vertex = TextVertex { x, y: y + size_info.cell_height() as i16, glyph_x, glyph_y: glyph_y + glyph.height, u: glyph.uv_left, v: glyph.uv_bot + glyph.uv_height, r: cell.fg.r, g: cell.fg.g, b: cell.fg.b, colored, bg_r: cell.bg.r, bg_g: cell.bg.g, bg_b: cell.bg.b, bg_a: (cell.bg_alpha * 255.0) as u8, }; self.vertices.push(vertex); vertex.y = y; vertex.glyph_y = glyph_y; vertex.u = glyph.uv_left; vertex.v = glyph.uv_bot; self.vertices.push(vertex); vertex.x = x + is_wide * size_info.cell_width() as i16; vertex.glyph_x = glyph_x + glyph.width; vertex.u = glyph.uv_left + glyph.uv_width; vertex.v = glyph.uv_bot; self.vertices.push(vertex); vertex.x = x + is_wide * size_info.cell_width() as i16; vertex.y = y + size_info.cell_height() as i16; vertex.glyph_x = glyph_x + glyph.width; vertex.glyph_y = glyph_y + glyph.height; vertex.u = glyph.uv_left + glyph.uv_width; vertex.v = glyph.uv_bot + glyph.uv_height; self.vertices.push(vertex); } } #[derive(Debug)] pub struct RenderApi<'a> { active_tex: &'a mut GLuint, batch: &'a mut Batch, atlas: &'a mut Vec<Atlas>, current_atlas: &'a mut usize, program: &'a mut TextShaderProgram, dual_source_blending: bool, } impl<'a> Drop for RenderApi<'a> { fn drop(&mut self) { if !self.batch.is_empty() { self.render_batch(); } } } impl<'a> LoadGlyph for RenderApi<'a> { fn load_glyph(&mut self, rasterized: &RasterizedGlyph)
macro_rules! add_attr { ($count:expr, $gl_type:expr, $type:ty) => {
random_line_split
gles2.rs
0..(BATCH_MAX / 4) as u16 { let index = index * 4; vertex_indices.push(index); vertex_indices.push(index + 1); vertex_indices.push(index + 3); vertex_indices.push(index + 1); vertex_indices.push(index + 2); vertex_indices.push(index + 3); } unsafe { gl::Enable(gl::BLEND); gl::DepthMask(gl::FALSE); gl::GenVertexArrays(1, &mut vao); gl::GenBuffers(1, &mut ebo); gl::GenBuffers(1, &mut vbo); gl::BindVertexArray(vao); // Elements buffer. gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ebo); gl::BufferData( gl::ELEMENT_ARRAY_BUFFER, (vertex_indices.capacity() * size_of::<u16>()) as isize, vertex_indices.as_ptr() as *const _, gl::STATIC_DRAW, ); // Vertex buffer. gl::BindBuffer(gl::ARRAY_BUFFER, vbo); gl::BufferData( gl::ARRAY_BUFFER, (BATCH_MAX * size_of::<TextVertex>()) as isize, ptr::null(), gl::STREAM_DRAW, ); let mut index = 0; let mut size = 0; macro_rules! add_attr { ($count:expr, $gl_type:expr, $type:ty) => { gl::VertexAttribPointer( index, $count, $gl_type, gl::FALSE, size_of::<TextVertex>() as i32, size as *const _, ); gl::EnableVertexAttribArray(index); #[allow(unused_assignments)] { size += $count * size_of::<$type>(); index += 1; } }; } // Cell coords. add_attr!(2, gl::SHORT, i16); // Glyph coords. add_attr!(2, gl::SHORT, i16); // UV. add_attr!(2, gl::FLOAT, u32); // Color and bitmap color. // // These are packed together because of an OpenGL driver issue on macOS, which caused a // `vec3(u8)` text color and a `u8` for glyph color to cause performance regressions. add_attr!(4, gl::UNSIGNED_BYTE, u8); // Background color. add_attr!(4, gl::UNSIGNED_BYTE, u8); // Cleanup. gl::BindVertexArray(0); gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0); gl::BindBuffer(gl::ARRAY_BUFFER, 0); } Ok(Self { program, vao, vbo, ebo, atlas: vec![Atlas::new(ATLAS_SIZE, is_gles_context)], batch: Batch::new(), current_atlas: 0, active_tex: 0, dual_source_blending, }) } } impl Drop for Gles2Renderer { fn drop(&mut self) { unsafe { gl::DeleteBuffers(1, &self.vbo); gl::DeleteBuffers(1, &self.ebo); gl::DeleteVertexArrays(1, &self.vao); } } } impl<'a> TextRenderer<'a> for Gles2Renderer { type RenderApi = RenderApi<'a>; type RenderBatch = Batch; type Shader = TextShaderProgram; fn program(&self) -> &Self::Shader { &self.program } fn with_api<'b: 'a, F, T>(&'b mut self, _: &'b SizeInfo, func: F) -> T where F: FnOnce(Self::RenderApi) -> T, { unsafe { gl::UseProgram(self.program.id()); gl::BindVertexArray(self.vao); gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo); gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo); gl::ActiveTexture(gl::TEXTURE0); } let res = func(RenderApi { active_tex: &mut self.active_tex, batch: &mut self.batch, atlas: &mut self.atlas, current_atlas: &mut self.current_atlas, program: &mut self.program, dual_source_blending: self.dual_source_blending, }); unsafe { gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0); gl::BindBuffer(gl::ARRAY_BUFFER, 0); gl::BindVertexArray(0); gl::UseProgram(0); } res } fn loader_api(&mut self) -> LoaderApi<'_> { LoaderApi { active_tex: &mut self.active_tex, atlas: &mut self.atlas, current_atlas: &mut self.current_atlas, } } } /// Maximum items to be drawn in a batch. /// /// We use the closest number to `u16::MAX` dividable by 4 (amount of vertices we push for a glyph), /// since it's the maximum possible index in `glDrawElements` in GLES2. const BATCH_MAX: usize = (u16::MAX - u16::MAX % 4) as usize; #[derive(Debug)] pub struct Batch { tex: GLuint, vertices: Vec<TextVertex>, } impl Batch { fn new() -> Self { Self { tex: 0, vertices: Vec::with_capacity(BATCH_MAX) } } #[inline] fn len(&self) -> usize { self.vertices.len() } #[inline] fn capacity(&self) -> usize { BATCH_MAX } #[inline] fn size(&self) -> usize { self.len() * size_of::<TextVertex>() } #[inline] fn clear(&mut self) { self.vertices.clear(); } } impl TextRenderBatch for Batch { #[inline] fn tex(&self) -> GLuint { self.tex } #[inline] fn full(&self) -> bool { self.capacity() == self.len() } #[inline] fn is_empty(&self) -> bool { self.len() == 0 } fn add_item(&mut self, cell: &RenderableCell, glyph: &Glyph, size_info: &SizeInfo) { if self.is_empty() { self.tex = glyph.tex_id; } // Calculate the cell position. let x = cell.point.column.0 as i16 * size_info.cell_width() as i16; let y = cell.point.line as i16 * size_info.cell_height() as i16; // Calculate the glyph position. let glyph_x = cell.point.column.0 as i16 * size_info.cell_width() as i16 + glyph.left; let glyph_y = (cell.point.line + 1) as i16 * size_info.cell_height() as i16 - glyph.top; let colored = if glyph.multicolor { RenderingGlyphFlags::COLORED } else
; let is_wide = if cell.flags.contains(Flags::WIDE_CHAR) { 2 } else { 1 }; let mut vertex = TextVertex { x, y: y + size_info.cell_height() as i16, glyph_x, glyph_y: glyph_y + glyph.height, u: glyph.uv_left, v: glyph.uv_bot + glyph.uv_height, r: cell.fg.r, g: cell.fg.g, b: cell.fg.b, colored, bg_r: cell.bg.r, bg_g: cell.bg.g, bg_b: cell.bg.b, bg_a: (cell.bg_alpha * 255.0) as u8, }; self.vertices.push(vertex); vertex.y = y; vertex.glyph_y = glyph_y; vertex.u = glyph.uv_left; vertex.v = glyph.uv_bot; self.vertices.push(vertex); vertex.x = x + is_wide * size_info.cell_width() as i16; vertex.glyph_x = glyph_x + glyph.width; vertex.u = glyph.uv_left + glyph.uv_width; vertex.v = glyph.uv_bot; self.vertices.push(vertex); vertex.x = x + is_wide * size_info.cell_width() as i16; vertex.y = y + size_info.cell_height() as i16; vertex.glyph_x = glyph_x + glyph.width; vertex.glyph_y = glyph_y + glyph.height; vertex.u = glyph.uv_left + glyph.uv_width; vertex.v = glyph.uv_bot + glyph.uv_height; self.vertices.push(vertex); } } #[derive(Debug)] pub struct RenderApi<'a> { active_tex: &'a mut GLuint, batch: &'a mut Batch, atlas: &'a mut Vec<Atlas>, current_atlas: &'a mut usize, program: &'a mut TextShaderProgram, dual_source_blending: bool, } impl<'a> Drop for RenderApi<'a> { fn drop(&mut self) { if !self.batch.is_empty() { self.render_batch(); } } } impl<'a> LoadGlyph for RenderApi<'a> { fn load_glyph(&mut self, rasterized: &RasterizedGlyph
{ RenderingGlyphFlags::empty() }
conditional_block
gles2.rs
0..(BATCH_MAX / 4) as u16 { let index = index * 4; vertex_indices.push(index); vertex_indices.push(index + 1); vertex_indices.push(index + 3); vertex_indices.push(index + 1); vertex_indices.push(index + 2); vertex_indices.push(index + 3); } unsafe { gl::Enable(gl::BLEND); gl::DepthMask(gl::FALSE); gl::GenVertexArrays(1, &mut vao); gl::GenBuffers(1, &mut ebo); gl::GenBuffers(1, &mut vbo); gl::BindVertexArray(vao); // Elements buffer. gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ebo); gl::BufferData( gl::ELEMENT_ARRAY_BUFFER, (vertex_indices.capacity() * size_of::<u16>()) as isize, vertex_indices.as_ptr() as *const _, gl::STATIC_DRAW, ); // Vertex buffer. gl::BindBuffer(gl::ARRAY_BUFFER, vbo); gl::BufferData( gl::ARRAY_BUFFER, (BATCH_MAX * size_of::<TextVertex>()) as isize, ptr::null(), gl::STREAM_DRAW, ); let mut index = 0; let mut size = 0; macro_rules! add_attr { ($count:expr, $gl_type:expr, $type:ty) => { gl::VertexAttribPointer( index, $count, $gl_type, gl::FALSE, size_of::<TextVertex>() as i32, size as *const _, ); gl::EnableVertexAttribArray(index); #[allow(unused_assignments)] { size += $count * size_of::<$type>(); index += 1; } }; } // Cell coords. add_attr!(2, gl::SHORT, i16); // Glyph coords. add_attr!(2, gl::SHORT, i16); // UV. add_attr!(2, gl::FLOAT, u32); // Color and bitmap color. // // These are packed together because of an OpenGL driver issue on macOS, which caused a // `vec3(u8)` text color and a `u8` for glyph color to cause performance regressions. add_attr!(4, gl::UNSIGNED_BYTE, u8); // Background color. add_attr!(4, gl::UNSIGNED_BYTE, u8); // Cleanup. gl::BindVertexArray(0); gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0); gl::BindBuffer(gl::ARRAY_BUFFER, 0); } Ok(Self { program, vao, vbo, ebo, atlas: vec![Atlas::new(ATLAS_SIZE, is_gles_context)], batch: Batch::new(), current_atlas: 0, active_tex: 0, dual_source_blending, }) } } impl Drop for Gles2Renderer { fn drop(&mut self) { unsafe { gl::DeleteBuffers(1, &self.vbo); gl::DeleteBuffers(1, &self.ebo); gl::DeleteVertexArrays(1, &self.vao); } } } impl<'a> TextRenderer<'a> for Gles2Renderer { type RenderApi = RenderApi<'a>; type RenderBatch = Batch; type Shader = TextShaderProgram; fn program(&self) -> &Self::Shader { &self.program } fn with_api<'b: 'a, F, T>(&'b mut self, _: &'b SizeInfo, func: F) -> T where F: FnOnce(Self::RenderApi) -> T, { unsafe { gl::UseProgram(self.program.id()); gl::BindVertexArray(self.vao); gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo); gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo); gl::ActiveTexture(gl::TEXTURE0); } let res = func(RenderApi { active_tex: &mut self.active_tex, batch: &mut self.batch, atlas: &mut self.atlas, current_atlas: &mut self.current_atlas, program: &mut self.program, dual_source_blending: self.dual_source_blending, }); unsafe { gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0); gl::BindBuffer(gl::ARRAY_BUFFER, 0); gl::BindVertexArray(0); gl::UseProgram(0); } res } fn loader_api(&mut self) -> LoaderApi<'_> { LoaderApi { active_tex: &mut self.active_tex, atlas: &mut self.atlas, current_atlas: &mut self.current_atlas, } } } /// Maximum items to be drawn in a batch. /// /// We use the closest number to `u16::MAX` dividable by 4 (amount of vertices we push for a glyph), /// since it's the maximum possible index in `glDrawElements` in GLES2. const BATCH_MAX: usize = (u16::MAX - u16::MAX % 4) as usize; #[derive(Debug)] pub struct Batch { tex: GLuint, vertices: Vec<TextVertex>, } impl Batch { fn new() -> Self { Self { tex: 0, vertices: Vec::with_capacity(BATCH_MAX) } } #[inline] fn len(&self) -> usize { self.vertices.len() } #[inline] fn capacity(&self) -> usize { BATCH_MAX } #[inline] fn size(&self) -> usize { self.len() * size_of::<TextVertex>() } #[inline] fn clear(&mut self) { self.vertices.clear(); } } impl TextRenderBatch for Batch { #[inline] fn tex(&self) -> GLuint { self.tex } #[inline] fn full(&self) -> bool { self.capacity() == self.len() } #[inline] fn is_empty(&self) -> bool { self.len() == 0 } fn add_item(&mut self, cell: &RenderableCell, glyph: &Glyph, size_info: &SizeInfo) { if self.is_empty() { self.tex = glyph.tex_id; } // Calculate the cell position. let x = cell.point.column.0 as i16 * size_info.cell_width() as i16; let y = cell.point.line as i16 * size_info.cell_height() as i16; // Calculate the glyph position. let glyph_x = cell.point.column.0 as i16 * size_info.cell_width() as i16 + glyph.left; let glyph_y = (cell.point.line + 1) as i16 * size_info.cell_height() as i16 - glyph.top; let colored = if glyph.multicolor { RenderingGlyphFlags::COLORED } else { RenderingGlyphFlags::empty() }; let is_wide = if cell.flags.contains(Flags::WIDE_CHAR) { 2 } else { 1 }; let mut vertex = TextVertex { x, y: y + size_info.cell_height() as i16, glyph_x, glyph_y: glyph_y + glyph.height, u: glyph.uv_left, v: glyph.uv_bot + glyph.uv_height, r: cell.fg.r, g: cell.fg.g, b: cell.fg.b, colored, bg_r: cell.bg.r, bg_g: cell.bg.g, bg_b: cell.bg.b, bg_a: (cell.bg_alpha * 255.0) as u8, }; self.vertices.push(vertex); vertex.y = y; vertex.glyph_y = glyph_y; vertex.u = glyph.uv_left; vertex.v = glyph.uv_bot; self.vertices.push(vertex); vertex.x = x + is_wide * size_info.cell_width() as i16; vertex.glyph_x = glyph_x + glyph.width; vertex.u = glyph.uv_left + glyph.uv_width; vertex.v = glyph.uv_bot; self.vertices.push(vertex); vertex.x = x + is_wide * size_info.cell_width() as i16; vertex.y = y + size_info.cell_height() as i16; vertex.glyph_x = glyph_x + glyph.width; vertex.glyph_y = glyph_y + glyph.height; vertex.u = glyph.uv_left + glyph.uv_width; vertex.v = glyph.uv_bot + glyph.uv_height; self.vertices.push(vertex); } } #[derive(Debug)] pub struct RenderApi<'a> { active_tex: &'a mut GLuint, batch: &'a mut Batch, atlas: &'a mut Vec<Atlas>, current_atlas: &'a mut usize, program: &'a mut TextShaderProgram, dual_source_blending: bool, } impl<'a> Drop for RenderApi<'a> { fn drop(&mut self)
} impl<'a> LoadGlyph for RenderApi<'a> { fn load_glyph(&mut self, rasterized: &RasterizedGlyph
{ if !self.batch.is_empty() { self.render_batch(); } }
identifier_body
state.rs
Op, FrontFace}; use core::target::{ColorValue, Rect, Stencil}; use gl; pub fn bind_raster_method(gl: &gl::Gl, method: s::RasterMethod, offset: Option<s::Offset>) { let (gl_draw, gl_offset) = match method { RasterMethod::Point => (gl::POINT, gl::POLYGON_OFFSET_POINT), RasterMethod::Line(width) => { unsafe { gl.LineWidth(width as gl::types::GLfloat) }; (gl::LINE, gl::POLYGON_OFFSET_LINE) }, RasterMethod::Fill => (gl::FILL, gl::POLYGON_OFFSET_FILL), }; unsafe { gl.PolygonMode(gl::FRONT_AND_BACK, gl_draw) }; match offset { Some(Offset(factor, units)) => unsafe { gl.Enable(gl_offset); gl.PolygonOffset(factor as gl::types::GLfloat, units as gl::types::GLfloat); }, None => unsafe { gl.Disable(gl_offset) }, } } pub fn bind_rasterizer(gl: &gl::Gl, r: &s::Rasterizer, is_embedded: bool) { unsafe { gl.FrontFace(match r.front_face { FrontFace::Clockwise => gl::CW, FrontFace::CounterClockwise => gl::CCW, }) }; match r.cull_face { CullFace::Nothing => unsafe { gl.Disable(gl::CULL_FACE) }, CullFace::Front => { unsafe { gl.Enable(gl::CULL_FACE); gl.CullFace(gl::FRONT); }}, CullFace::Back => { unsafe { gl.Enable(gl::CULL_FACE); gl.CullFace(gl::BACK); }} } if !is_embedded { bind_raster_method(gl, r.method, r.offset); } match r.samples { Some(_) => unsafe { gl.Enable(gl::MULTISAMPLE) }, None => unsafe { gl.Disable(gl::MULTISAMPLE) }, } } pub fn bind_draw_color_buffers(gl: &gl::Gl, mask: usize) { let attachments = [ gl::COLOR_ATTACHMENT0, gl::COLOR_ATTACHMENT1, gl::COLOR_ATTACHMENT2, gl::COLOR_ATTACHMENT3, gl::COLOR_ATTACHMENT4, gl::COLOR_ATTACHMENT5, gl::COLOR_ATTACHMENT6, gl::COLOR_ATTACHMENT7, gl::COLOR_ATTACHMENT8, gl::COLOR_ATTACHMENT9, gl::COLOR_ATTACHMENT10, gl::COLOR_ATTACHMENT11, gl::COLOR_ATTACHMENT12, gl::COLOR_ATTACHMENT13, gl::COLOR_ATTACHMENT14, gl::COLOR_ATTACHMENT15]; let mut targets = [0; MAX_COLOR_TARGETS]; let mut count = 0; let mut i = 0; while mask >> i != 0 { if mask & (1<<i) != 0 { targets[count] = attachments[i]; count += 1; } i += 1; } unsafe { gl.DrawBuffers(count as gl::types::GLint, targets.as_ptr()) }; } pub fn bind_viewport(gl: &gl::Gl, rect: Rect) { unsafe { gl.Viewport( rect.x as gl::types::GLint, rect.y as gl::types::GLint, rect.w as gl::types::GLint, rect.h as gl::types::GLint )}; } pub fn bind_scissor(gl: &gl::Gl, rect: Option<Rect>) { match rect { Some(r) => { unsafe { gl.Enable(gl::SCISSOR_TEST); gl.Scissor( r.x as gl::types::GLint, r.y as gl::types::GLint, r.w as gl::types::GLint, r.h as gl::types::GLint ); }}, None => unsafe { gl.Disable(gl::SCISSOR_TEST) }, } } pub fn map_comparison(cmp: Comparison) -> gl::types::GLenum { match cmp { Comparison::Never => gl::NEVER, Comparison::Less => gl::LESS, Comparison::LessEqual => gl::LEQUAL, Comparison::Equal => gl::EQUAL, Comparison::GreaterEqual => gl::GEQUAL, Comparison::Greater => gl::GREATER, Comparison::NotEqual => gl::NOTEQUAL, Comparison::Always => gl::ALWAYS, } } pub fn bind_depth(gl: &gl::Gl, depth: &Option<s::Depth>) { match depth { &Some(ref d) => { unsafe { gl.Enable(gl::DEPTH_TEST); gl.DepthFunc(map_comparison(d.fun)); gl.DepthMask(if d.write {gl::TRUE} else {gl::FALSE}); }}, &None => unsafe { gl.Disable(gl::DEPTH_TEST) }, } } fn map_operation(op: StencilOp) -> gl::types::GLenum { match op { StencilOp::Keep => gl::KEEP, StencilOp::Zero => gl::ZERO, StencilOp::Replace => gl::REPLACE, StencilOp::IncrementClamp=> gl::INCR, StencilOp::IncrementWrap => gl::INCR_WRAP, StencilOp::DecrementClamp=> gl::DECR, StencilOp::DecrementWrap => gl::DECR_WRAP, StencilOp::Invert => gl::INVERT, } } pub fn bind_stencil(gl: &gl::Gl, stencil: &Option<s::Stencil>, refs: (Stencil, Stencil), cull: s::CullFace) { fn bind_side(gl: &gl::Gl, face: gl::types::GLenum, side: s::StencilSide, ref_value: Stencil) { unsafe { gl.StencilFuncSeparate(face, map_comparison(side.fun), ref_value as gl::types::GLint, side.mask_read as gl::types::GLuint); gl.StencilMaskSeparate(face, side.mask_write as gl::types::GLuint); gl.StencilOpSeparate(face, map_operation(side.op_fail), map_operation(side.op_depth_fail), map_operation(side.op_pass)); }} match stencil { &Some(ref s) => { unsafe { gl.Enable(gl::STENCIL_TEST) }; if cull != CullFace::Front { bind_side(gl, gl::FRONT, s.front, refs.0); } if cull != CullFace::Back { bind_side(gl, gl::BACK, s.back, refs.1); } } &None => unsafe { gl.Disable(gl::STENCIL_TEST) }, } } fn map_equation(eq: Equation) -> gl::types::GLenum { match eq { Equation::Add => gl::FUNC_ADD, Equation::Sub => gl::FUNC_SUBTRACT, Equation::RevSub => gl::FUNC_REVERSE_SUBTRACT, Equation::Min => gl::MIN, Equation::Max => gl::MAX, } } fn map_factor(factor: s::Factor) -> gl::types::GLenum { match factor { s::Factor::Zero => gl::ZERO, s::Factor::One => gl::ONE, s::Factor::ZeroPlus(BlendValue::SourceColor) => gl::SRC_COLOR, s::Factor::OneMinus(BlendValue::SourceColor) => gl::ONE_MINUS_SRC_COLOR, s::Factor::ZeroPlus(BlendValue::SourceAlpha) => gl::SRC_ALPHA, s::Factor::OneMinus(BlendValue::SourceAlpha) => gl::ONE_MINUS_SRC_ALPHA, s::Factor::ZeroPlus(BlendValue::DestColor) => gl::DST_COLOR, s::Factor::OneMinus(BlendValue::DestColor) => gl::ONE_MINUS_DST_COLOR, s::Factor::ZeroPlus(BlendValue::DestAlpha) => gl::DST_ALPHA, s::Factor::OneMinus(BlendValue::DestAlpha) => gl::ONE_MINUS_DST_ALPHA, s::Factor::ZeroPlus(BlendValue::ConstColor) => gl::CONSTANT_COLOR, s::Factor::OneMinus(BlendValue::ConstColor) => gl::ONE_MINUS_CONSTANT_COLOR, s::Factor::ZeroPlus(BlendValue::ConstAlpha) => gl::CONSTANT_ALPHA, s::Factor::OneMinus(BlendValue::ConstAlpha) => gl::ONE_MINUS_CONSTANT_ALPHA, s::Factor::SourceAlphaSaturated => gl::SRC_ALPHA_SATURATE, } } pub fn bind_blend(gl: &gl::Gl, color: s::Color) { match color.blend { Some(b) => unsafe { gl.Enable(gl::BLEND); gl.BlendEquationSeparate( map_equation(b.color.equation), map_equation(b.alpha.equation) ); gl.BlendFuncSeparate( map_factor(b.color.source), map_factor(b.color.destination), map_factor(b.alpha.source), map_factor(b.alpha.destination) ); }, None => unsafe { gl.Disable(gl::BLEND); }, }; unsafe { gl.ColorMask( if (color.mask & s::RED ).is_empty() {gl::FALSE} else {gl::TRUE}, if (color.mask & s::GREEN).is_empty()
else {gl::TRUE}, if
{gl::FALSE}
conditional_block
state.rs
Op, FrontFace}; use core::target::{ColorValue, Rect, Stencil}; use gl; pub fn bind_raster_method(gl: &gl::Gl, method: s::RasterMethod, offset: Option<s::Offset>) { let (gl_draw, gl_offset) = match method { RasterMethod::Point => (gl::POINT, gl::POLYGON_OFFSET_POINT), RasterMethod::Line(width) => { unsafe { gl.LineWidth(width as gl::types::GLfloat) }; (gl::LINE, gl::POLYGON_OFFSET_LINE) }, RasterMethod::Fill => (gl::FILL, gl::POLYGON_OFFSET_FILL), }; unsafe { gl.PolygonMode(gl::FRONT_AND_BACK, gl_draw) }; match offset { Some(Offset(factor, units)) => unsafe { gl.Enable(gl_offset); gl.PolygonOffset(factor as gl::types::GLfloat, units as gl::types::GLfloat); }, None => unsafe { gl.Disable(gl_offset) }, } } pub fn bind_rasterizer(gl: &gl::Gl, r: &s::Rasterizer, is_embedded: bool) { unsafe { gl.FrontFace(match r.front_face { FrontFace::Clockwise => gl::CW, FrontFace::CounterClockwise => gl::CCW, }) }; match r.cull_face { CullFace::Nothing => unsafe { gl.Disable(gl::CULL_FACE) }, CullFace::Front => { unsafe { gl.Enable(gl::CULL_FACE); gl.CullFace(gl::FRONT); }}, CullFace::Back => { unsafe { gl.Enable(gl::CULL_FACE); gl.CullFace(gl::BACK); }} } if !is_embedded { bind_raster_method(gl, r.method, r.offset); } match r.samples { Some(_) => unsafe { gl.Enable(gl::MULTISAMPLE) }, None => unsafe { gl.Disable(gl::MULTISAMPLE) }, } } pub fn bind_draw_color_buffers(gl: &gl::Gl, mask: usize) { let attachments = [ gl::COLOR_ATTACHMENT0, gl::COLOR_ATTACHMENT1, gl::COLOR_ATTACHMENT2, gl::COLOR_ATTACHMENT3, gl::COLOR_ATTACHMENT4, gl::COLOR_ATTACHMENT5, gl::COLOR_ATTACHMENT6, gl::COLOR_ATTACHMENT7, gl::COLOR_ATTACHMENT8, gl::COLOR_ATTACHMENT9, gl::COLOR_ATTACHMENT10, gl::COLOR_ATTACHMENT11, gl::COLOR_ATTACHMENT12, gl::COLOR_ATTACHMENT13, gl::COLOR_ATTACHMENT14, gl::COLOR_ATTACHMENT15]; let mut targets = [0; MAX_COLOR_TARGETS]; let mut count = 0; let mut i = 0; while mask >> i != 0 { if mask & (1<<i) != 0 { targets[count] = attachments[i]; count += 1; } i += 1; } unsafe { gl.DrawBuffers(count as gl::types::GLint, targets.as_ptr()) }; } pub fn bind_viewport(gl: &gl::Gl, rect: Rect) { unsafe { gl.Viewport( rect.x as gl::types::GLint, rect.y as gl::types::GLint, rect.w as gl::types::GLint, rect.h as gl::types::GLint )}; } pub fn bind_scissor(gl: &gl::Gl, rect: Option<Rect>) { match rect { Some(r) => { unsafe { gl.Enable(gl::SCISSOR_TEST); gl.Scissor( r.x as gl::types::GLint, r.y as gl::types::GLint, r.w as gl::types::GLint, r.h as gl::types::GLint ); }}, None => unsafe { gl.Disable(gl::SCISSOR_TEST) }, } } pub fn map_comparison(cmp: Comparison) -> gl::types::GLenum { match cmp { Comparison::Never => gl::NEVER, Comparison::Less => gl::LESS, Comparison::LessEqual => gl::LEQUAL, Comparison::Equal => gl::EQUAL, Comparison::GreaterEqual => gl::GEQUAL, Comparison::Greater => gl::GREATER, Comparison::NotEqual => gl::NOTEQUAL, Comparison::Always => gl::ALWAYS, } } pub fn bind_depth(gl: &gl::Gl, depth: &Option<s::Depth>) { match depth { &Some(ref d) => { unsafe { gl.Enable(gl::DEPTH_TEST); gl.DepthFunc(map_comparison(d.fun)); gl.DepthMask(if d.write {gl::TRUE} else {gl::FALSE}); }}, &None => unsafe { gl.Disable(gl::DEPTH_TEST) }, } } fn map_operation(op: StencilOp) -> gl::types::GLenum { match op { StencilOp::Keep => gl::KEEP, StencilOp::Zero => gl::ZERO, StencilOp::Replace => gl::REPLACE, StencilOp::IncrementClamp=> gl::INCR, StencilOp::IncrementWrap => gl::INCR_WRAP, StencilOp::DecrementClamp=> gl::DECR, StencilOp::DecrementWrap => gl::DECR_WRAP, StencilOp::Invert => gl::INVERT, } } pub fn bind_stencil(gl: &gl::Gl, stencil: &Option<s::Stencil>, refs: (Stencil, Stencil), cull: s::CullFace) { fn bind_side(gl: &gl::Gl, face: gl::types::GLenum, side: s::StencilSide, ref_value: Stencil) { unsafe { gl.StencilFuncSeparate(face, map_comparison(side.fun), ref_value as gl::types::GLint, side.mask_read as gl::types::GLuint); gl.StencilMaskSeparate(face, side.mask_write as gl::types::GLuint); gl.StencilOpSeparate(face, map_operation(side.op_fail), map_operation(side.op_depth_fail), map_operation(side.op_pass)); }} match stencil { &Some(ref s) => { unsafe { gl.Enable(gl::STENCIL_TEST) }; if cull != CullFace::Front { bind_side(gl, gl::FRONT, s.front, refs.0); } if cull != CullFace::Back { bind_side(gl, gl::BACK, s.back, refs.1); } } &None => unsafe { gl.Disable(gl::STENCIL_TEST) }, } } fn map_equation(eq: Equation) -> gl::types::GLenum { match eq { Equation::Add => gl::FUNC_ADD, Equation::Sub => gl::FUNC_SUBTRACT, Equation::RevSub => gl::FUNC_REVERSE_SUBTRACT, Equation::Min => gl::MIN, Equation::Max => gl::MAX, } } fn map_factor(factor: s::Factor) -> gl::types::GLenum { match factor { s::Factor::Zero => gl::ZERO, s::Factor::One => gl::ONE, s::Factor::ZeroPlus(BlendValue::SourceColor) => gl::SRC_COLOR, s::Factor::OneMinus(BlendValue::SourceColor) => gl::ONE_MINUS_SRC_COLOR, s::Factor::ZeroPlus(BlendValue::SourceAlpha) => gl::SRC_ALPHA, s::Factor::OneMinus(BlendValue::SourceAlpha) => gl::ONE_MINUS_SRC_ALPHA, s::Factor::ZeroPlus(BlendValue::DestColor) => gl::DST_COLOR, s::Factor::OneMinus(BlendValue::DestColor) => gl::ONE_MINUS_DST_COLOR, s::Factor::ZeroPlus(BlendValue::DestAlpha) => gl::DST_ALPHA, s::Factor::OneMinus(BlendValue::DestAlpha) => gl::ONE_MINUS_DST_ALPHA, s::Factor::ZeroPlus(BlendValue::ConstColor) => gl::CONSTANT_COLOR, s::Factor::OneMinus(BlendValue::ConstColor) => gl::ONE_MINUS_CONSTANT_COLOR, s::Factor::ZeroPlus(BlendValue::ConstAlpha) => gl::CONSTANT_ALPHA, s::Factor::OneMinus(BlendValue::ConstAlpha) => gl::ONE_MINUS_CONSTANT_ALPHA, s::Factor::SourceAlphaSaturated => gl::SRC_ALPHA_SATURATE, } } pub fn
(gl: &gl::Gl, color: s::Color) { match color.blend { Some(b) => unsafe { gl.Enable(gl::BLEND); gl.BlendEquationSeparate( map_equation(b.color.equation), map_equation(b.alpha.equation) ); gl.BlendFuncSeparate( map_factor(b.color.source), map_factor(b.color.destination), map_factor(b.alpha.source), map_factor(b.alpha.destination) ); }, None => unsafe { gl.Disable(gl::BLEND); }, }; unsafe { gl.ColorMask( if (color.mask & s::RED ).is_empty() {gl::FALSE} else {gl::TRUE}, if (color.mask & s::GREEN).is_empty() {gl::FALSE} else {gl::TRUE}, if
bind_blend
identifier_name
state.rs
Op, FrontFace}; use core::target::{ColorValue, Rect, Stencil}; use gl; pub fn bind_raster_method(gl: &gl::Gl, method: s::RasterMethod, offset: Option<s::Offset>) { let (gl_draw, gl_offset) = match method { RasterMethod::Point => (gl::POINT, gl::POLYGON_OFFSET_POINT), RasterMethod::Line(width) => { unsafe { gl.LineWidth(width as gl::types::GLfloat) }; (gl::LINE, gl::POLYGON_OFFSET_LINE) }, RasterMethod::Fill => (gl::FILL, gl::POLYGON_OFFSET_FILL), }; unsafe { gl.PolygonMode(gl::FRONT_AND_BACK, gl_draw) }; match offset { Some(Offset(factor, units)) => unsafe { gl.Enable(gl_offset); gl.PolygonOffset(factor as gl::types::GLfloat, units as gl::types::GLfloat); }, None => unsafe { gl.Disable(gl_offset)
}, } } pub fn bind_rasterizer(gl: &gl::Gl, r: &s::Rasterizer, is_embedded: bool) { unsafe { gl.FrontFace(match r.front_face { FrontFace::Clockwise => gl::CW, FrontFace::CounterClockwise => gl::CCW, }) }; match r.cull_face { CullFace::Nothing => unsafe { gl.Disable(gl::CULL_FACE) }, CullFace::Front => { unsafe { gl.Enable(gl::CULL_FACE); gl.CullFace(gl::FRONT); }}, CullFace::Back => { unsafe { gl.Enable(gl::CULL_FACE); gl.CullFace(gl::BACK); }} } if !is_embedded { bind_raster_method(gl, r.method, r.offset); } match r.samples { Some(_) => unsafe { gl.Enable(gl::MULTISAMPLE) }, None => unsafe { gl.Disable(gl::MULTISAMPLE) }, } } pub fn bind_draw_color_buffers(gl: &gl::Gl, mask: usize) { let attachments = [ gl::COLOR_ATTACHMENT0, gl::COLOR_ATTACHMENT1, gl::COLOR_ATTACHMENT2, gl::COLOR_ATTACHMENT3, gl::COLOR_ATTACHMENT4, gl::COLOR_ATTACHMENT5, gl::COLOR_ATTACHMENT6, gl::COLOR_ATTACHMENT7, gl::COLOR_ATTACHMENT8, gl::COLOR_ATTACHMENT9, gl::COLOR_ATTACHMENT10, gl::COLOR_ATTACHMENT11, gl::COLOR_ATTACHMENT12, gl::COLOR_ATTACHMENT13, gl::COLOR_ATTACHMENT14, gl::COLOR_ATTACHMENT15]; let mut targets = [0; MAX_COLOR_TARGETS]; let mut count = 0; let mut i = 0; while mask >> i != 0 { if mask & (1<<i) != 0 { targets[count] = attachments[i]; count += 1; } i += 1; } unsafe { gl.DrawBuffers(count as gl::types::GLint, targets.as_ptr()) }; } pub fn bind_viewport(gl: &gl::Gl, rect: Rect) { unsafe { gl.Viewport( rect.x as gl::types::GLint, rect.y as gl::types::GLint, rect.w as gl::types::GLint, rect.h as gl::types::GLint )}; } pub fn bind_scissor(gl: &gl::Gl, rect: Option<Rect>) { match rect { Some(r) => { unsafe { gl.Enable(gl::SCISSOR_TEST); gl.Scissor( r.x as gl::types::GLint, r.y as gl::types::GLint, r.w as gl::types::GLint, r.h as gl::types::GLint ); }}, None => unsafe { gl.Disable(gl::SCISSOR_TEST) }, } } pub fn map_comparison(cmp: Comparison) -> gl::types::GLenum { match cmp { Comparison::Never => gl::NEVER, Comparison::Less => gl::LESS, Comparison::LessEqual => gl::LEQUAL, Comparison::Equal => gl::EQUAL, Comparison::GreaterEqual => gl::GEQUAL, Comparison::Greater => gl::GREATER, Comparison::NotEqual => gl::NOTEQUAL, Comparison::Always => gl::ALWAYS, } } pub fn bind_depth(gl: &gl::Gl, depth: &Option<s::Depth>) { match depth { &Some(ref d) => { unsafe { gl.Enable(gl::DEPTH_TEST); gl.DepthFunc(map_comparison(d.fun)); gl.DepthMask(if d.write {gl::TRUE} else {gl::FALSE}); }}, &None => unsafe { gl.Disable(gl::DEPTH_TEST) }, } } fn map_operation(op: StencilOp) -> gl::types::GLenum { match op { StencilOp::Keep => gl::KEEP, StencilOp::Zero => gl::ZERO, StencilOp::Replace => gl::REPLACE, StencilOp::IncrementClamp=> gl::INCR, StencilOp::IncrementWrap => gl::INCR_WRAP, StencilOp::DecrementClamp=> gl::DECR, StencilOp::DecrementWrap => gl::DECR_WRAP, StencilOp::Invert => gl::INVERT, } } pub fn bind_stencil(gl: &gl::Gl, stencil: &Option<s::Stencil>, refs: (Stencil, Stencil), cull: s::CullFace) { fn bind_side(gl: &gl::Gl, face: gl::types::GLenum, side: s::StencilSide, ref_value: Stencil) { unsafe { gl.StencilFuncSeparate(face, map_comparison(side.fun), ref_value as gl::types::GLint, side.mask_read as gl::types::GLuint); gl.StencilMaskSeparate(face, side.mask_write as gl::types::GLuint); gl.StencilOpSeparate(face, map_operation(side.op_fail), map_operation(side.op_depth_fail), map_operation(side.op_pass)); }} match stencil { &Some(ref s) => { unsafe { gl.Enable(gl::STENCIL_TEST) }; if cull != CullFace::Front { bind_side(gl, gl::FRONT, s.front, refs.0); } if cull != CullFace::Back { bind_side(gl, gl::BACK, s.back, refs.1); } } &None => unsafe { gl.Disable(gl::STENCIL_TEST) }, } } fn map_equation(eq: Equation) -> gl::types::GLenum { match eq { Equation::Add => gl::FUNC_ADD, Equation::Sub => gl::FUNC_SUBTRACT, Equation::RevSub => gl::FUNC_REVERSE_SUBTRACT, Equation::Min => gl::MIN, Equation::Max => gl::MAX, } } fn map_factor(factor: s::Factor) -> gl::types::GLenum { match factor { s::Factor::Zero => gl::ZERO, s::Factor::One => gl::ONE, s::Factor::ZeroPlus(BlendValue::SourceColor) => gl::SRC_COLOR, s::Factor::OneMinus(BlendValue::SourceColor) => gl::ONE_MINUS_SRC_COLOR, s::Factor::ZeroPlus(BlendValue::SourceAlpha) => gl::SRC_ALPHA, s::Factor::OneMinus(BlendValue::SourceAlpha) => gl::ONE_MINUS_SRC_ALPHA, s::Factor::ZeroPlus(BlendValue::DestColor) => gl::DST_COLOR, s::Factor::OneMinus(BlendValue::DestColor) => gl::ONE_MINUS_DST_COLOR, s::Factor::ZeroPlus(BlendValue::DestAlpha) => gl::DST_ALPHA, s::Factor::OneMinus(BlendValue::DestAlpha) => gl::ONE_MINUS_DST_ALPHA, s::Factor::ZeroPlus(BlendValue::ConstColor) => gl::CONSTANT_COLOR, s::Factor::OneMinus(BlendValue::ConstColor) => gl::ONE_MINUS_CONSTANT_COLOR, s::Factor::ZeroPlus(BlendValue::ConstAlpha) => gl::CONSTANT_ALPHA, s::Factor::OneMinus(BlendValue::ConstAlpha) => gl::ONE_MINUS_CONSTANT_ALPHA, s::Factor::SourceAlphaSaturated => gl::SRC_ALPHA_SATURATE, } } pub fn bind_blend(gl: &gl::Gl, color: s::Color) { match color.blend { Some(b) => unsafe { gl.Enable(gl::BLEND); gl.BlendEquationSeparate( map_equation(b.color.equation), map_equation(b.alpha.equation) ); gl.BlendFuncSeparate( map_factor(b.color.source), map_factor(b.color.destination), map_factor(b.alpha.source), map_factor(b.alpha.destination) ); }, None => unsafe { gl.Disable(gl::BLEND); }, }; unsafe { gl.ColorMask( if (color.mask & s::RED ).is_empty() {gl::FALSE} else {gl::TRUE}, if (color.mask & s::GREEN).is_empty() {gl::FALSE} else {gl::TRUE}, if (
random_line_split
state.rs
Op, FrontFace}; use core::target::{ColorValue, Rect, Stencil}; use gl; pub fn bind_raster_method(gl: &gl::Gl, method: s::RasterMethod, offset: Option<s::Offset>) { let (gl_draw, gl_offset) = match method { RasterMethod::Point => (gl::POINT, gl::POLYGON_OFFSET_POINT), RasterMethod::Line(width) => { unsafe { gl.LineWidth(width as gl::types::GLfloat) }; (gl::LINE, gl::POLYGON_OFFSET_LINE) }, RasterMethod::Fill => (gl::FILL, gl::POLYGON_OFFSET_FILL), }; unsafe { gl.PolygonMode(gl::FRONT_AND_BACK, gl_draw) }; match offset { Some(Offset(factor, units)) => unsafe { gl.Enable(gl_offset); gl.PolygonOffset(factor as gl::types::GLfloat, units as gl::types::GLfloat); }, None => unsafe { gl.Disable(gl_offset) }, } } pub fn bind_rasterizer(gl: &gl::Gl, r: &s::Rasterizer, is_embedded: bool) { unsafe { gl.FrontFace(match r.front_face { FrontFace::Clockwise => gl::CW, FrontFace::CounterClockwise => gl::CCW, }) }; match r.cull_face { CullFace::Nothing => unsafe { gl.Disable(gl::CULL_FACE) }, CullFace::Front => { unsafe { gl.Enable(gl::CULL_FACE); gl.CullFace(gl::FRONT); }}, CullFace::Back => { unsafe { gl.Enable(gl::CULL_FACE); gl.CullFace(gl::BACK); }} } if !is_embedded { bind_raster_method(gl, r.method, r.offset); } match r.samples { Some(_) => unsafe { gl.Enable(gl::MULTISAMPLE) }, None => unsafe { gl.Disable(gl::MULTISAMPLE) }, } } pub fn bind_draw_color_buffers(gl: &gl::Gl, mask: usize) { let attachments = [ gl::COLOR_ATTACHMENT0, gl::COLOR_ATTACHMENT1, gl::COLOR_ATTACHMENT2, gl::COLOR_ATTACHMENT3, gl::COLOR_ATTACHMENT4, gl::COLOR_ATTACHMENT5, gl::COLOR_ATTACHMENT6, gl::COLOR_ATTACHMENT7, gl::COLOR_ATTACHMENT8, gl::COLOR_ATTACHMENT9, gl::COLOR_ATTACHMENT10, gl::COLOR_ATTACHMENT11, gl::COLOR_ATTACHMENT12, gl::COLOR_ATTACHMENT13, gl::COLOR_ATTACHMENT14, gl::COLOR_ATTACHMENT15]; let mut targets = [0; MAX_COLOR_TARGETS]; let mut count = 0; let mut i = 0; while mask >> i != 0 { if mask & (1<<i) != 0 { targets[count] = attachments[i]; count += 1; } i += 1; } unsafe { gl.DrawBuffers(count as gl::types::GLint, targets.as_ptr()) }; } pub fn bind_viewport(gl: &gl::Gl, rect: Rect) { unsafe { gl.Viewport( rect.x as gl::types::GLint, rect.y as gl::types::GLint, rect.w as gl::types::GLint, rect.h as gl::types::GLint )}; } pub fn bind_scissor(gl: &gl::Gl, rect: Option<Rect>) { match rect { Some(r) => { unsafe { gl.Enable(gl::SCISSOR_TEST); gl.Scissor( r.x as gl::types::GLint, r.y as gl::types::GLint, r.w as gl::types::GLint, r.h as gl::types::GLint ); }}, None => unsafe { gl.Disable(gl::SCISSOR_TEST) }, } } pub fn map_comparison(cmp: Comparison) -> gl::types::GLenum { match cmp { Comparison::Never => gl::NEVER, Comparison::Less => gl::LESS, Comparison::LessEqual => gl::LEQUAL, Comparison::Equal => gl::EQUAL, Comparison::GreaterEqual => gl::GEQUAL, Comparison::Greater => gl::GREATER, Comparison::NotEqual => gl::NOTEQUAL, Comparison::Always => gl::ALWAYS, } } pub fn bind_depth(gl: &gl::Gl, depth: &Option<s::Depth>) { match depth { &Some(ref d) => { unsafe { gl.Enable(gl::DEPTH_TEST); gl.DepthFunc(map_comparison(d.fun)); gl.DepthMask(if d.write {gl::TRUE} else {gl::FALSE}); }}, &None => unsafe { gl.Disable(gl::DEPTH_TEST) }, } } fn map_operation(op: StencilOp) -> gl::types::GLenum { match op { StencilOp::Keep => gl::KEEP, StencilOp::Zero => gl::ZERO, StencilOp::Replace => gl::REPLACE, StencilOp::IncrementClamp=> gl::INCR, StencilOp::IncrementWrap => gl::INCR_WRAP, StencilOp::DecrementClamp=> gl::DECR, StencilOp::DecrementWrap => gl::DECR_WRAP, StencilOp::Invert => gl::INVERT, } } pub fn bind_stencil(gl: &gl::Gl, stencil: &Option<s::Stencil>, refs: (Stencil, Stencil), cull: s::CullFace) { fn bind_side(gl: &gl::Gl, face: gl::types::GLenum, side: s::StencilSide, ref_value: Stencil) { unsafe { gl.StencilFuncSeparate(face, map_comparison(side.fun), ref_value as gl::types::GLint, side.mask_read as gl::types::GLuint); gl.StencilMaskSeparate(face, side.mask_write as gl::types::GLuint); gl.StencilOpSeparate(face, map_operation(side.op_fail), map_operation(side.op_depth_fail), map_operation(side.op_pass)); }} match stencil { &Some(ref s) => { unsafe { gl.Enable(gl::STENCIL_TEST) }; if cull != CullFace::Front { bind_side(gl, gl::FRONT, s.front, refs.0); } if cull != CullFace::Back { bind_side(gl, gl::BACK, s.back, refs.1); } } &None => unsafe { gl.Disable(gl::STENCIL_TEST) }, } } fn map_equation(eq: Equation) -> gl::types::GLenum { match eq { Equation::Add => gl::FUNC_ADD, Equation::Sub => gl::FUNC_SUBTRACT, Equation::RevSub => gl::FUNC_REVERSE_SUBTRACT, Equation::Min => gl::MIN, Equation::Max => gl::MAX, } } fn map_factor(factor: s::Factor) -> gl::types::GLenum
pub fn bind_blend(gl: &gl::Gl, color: s::Color) { match color.blend { Some(b) => unsafe { gl.Enable(gl::BLEND); gl.BlendEquationSeparate( map_equation(b.color.equation), map_equation(b.alpha.equation) ); gl.BlendFuncSeparate( map_factor(b.color.source), map_factor(b.color.destination), map_factor(b.alpha.source), map_factor(b.alpha.destination) ); }, None => unsafe { gl.Disable(gl::BLEND); }, }; unsafe { gl.ColorMask( if (color.mask & s::RED ).is_empty() {gl::FALSE} else {gl::TRUE}, if (color.mask & s::GREEN).is_empty() {gl::FALSE} else {gl::TRUE},
{ match factor { s::Factor::Zero => gl::ZERO, s::Factor::One => gl::ONE, s::Factor::ZeroPlus(BlendValue::SourceColor) => gl::SRC_COLOR, s::Factor::OneMinus(BlendValue::SourceColor) => gl::ONE_MINUS_SRC_COLOR, s::Factor::ZeroPlus(BlendValue::SourceAlpha) => gl::SRC_ALPHA, s::Factor::OneMinus(BlendValue::SourceAlpha) => gl::ONE_MINUS_SRC_ALPHA, s::Factor::ZeroPlus(BlendValue::DestColor) => gl::DST_COLOR, s::Factor::OneMinus(BlendValue::DestColor) => gl::ONE_MINUS_DST_COLOR, s::Factor::ZeroPlus(BlendValue::DestAlpha) => gl::DST_ALPHA, s::Factor::OneMinus(BlendValue::DestAlpha) => gl::ONE_MINUS_DST_ALPHA, s::Factor::ZeroPlus(BlendValue::ConstColor) => gl::CONSTANT_COLOR, s::Factor::OneMinus(BlendValue::ConstColor) => gl::ONE_MINUS_CONSTANT_COLOR, s::Factor::ZeroPlus(BlendValue::ConstAlpha) => gl::CONSTANT_ALPHA, s::Factor::OneMinus(BlendValue::ConstAlpha) => gl::ONE_MINUS_CONSTANT_ALPHA, s::Factor::SourceAlphaSaturated => gl::SRC_ALPHA_SATURATE, } }
identifier_body
TestGAN.py
as sbs import pdb sbs.set() epsilon = 0.0000000001 def generate_sample(gen, batch_size, latent_dim): noise = np.random.normal(0, 1, (batch_size, latent_dim)) return gen.predict(noise) def show_images(imgs): cols = 8 rows = (len(imgs) // cols ) + (len(imgs) % cols) # Rescale images 0 - 1 imgs = 0.5 * imgs + 0.5 fig, axs = plt.subplots(rows, cols) cnt = 0 for i in range(rows): for j in range(cols): if cnt < len(imgs): axs[i,j].imshow(imgs[cnt, :,:,0], cmap='gray') axs[i,j].axis('off') cnt += 1 else: break #fig.savefig("images/%d.png" % epoch) plt.show() plt.close() def show_and_tell(imgs, pred): for i in range(len(imgs)): plt.figure() print("Prediction #", i, " is ", pred[i]) plt.imshow(imgs[i, :, :,0]) plt.show() plt.close() def get_class_distribution(dataset): return collections.Counter(dataset) # Other ideas for distribution: # - classifier accuracy on manually labeled examples # - classifier "correctness" def get_positive_distribution(imgs, tags, num_types): dist = {} for i in range(len(imgs)): if tags[i] in dist: dist[tags[i]] += sum(imgs[i] >= 0) else: dist[tags[i]] = sum(imgs[i] >= 0) # normalize size = len(imgs) for key in dist.keys(): dist[key] = dist[key] / size return dist def get_zero_distribution(imgs, tags, num_types): dist = {} for i in range(len(imgs)): if tags[i] in dist: dist[tags[i]] += sum(imgs[i] < -0.9999) else: dist[tags[i]] = sum(imgs[i] < -0.9999) # normalize size = len(imgs) for key in dist.keys(): dist[key] = dist[key] / size return dist def normalize_distribution(dataset, target_distribution): # now all classes might be present for i in target_distribution.keys(): dataset.setdefault(i, epsilon) return dataset def compare_distributions(dataset_a, dataset_b): da = np.zeros(len(dataset_a.keys())) da[list(dataset_a.keys())] = list(dataset_a.values()) db = np.zeros(len(dataset_b.keys())) db[list(dataset_b.keys())] = list(dataset_b.values()) da /= sum(da) db /= sum(db) ent = entropy(da, qk=db) if np.isinf(ent): pdb.set_trace() return ent def compare_matrix_distributions(dataset_a, dataset_b): ent_array = np.zeros(len(dataset_a.keys())) for i in range(len(dataset_a.keys())): norm_a = dataset_a[i] + epsilon norm_b = dataset_b[i] + epsilon ent = entropy(norm_a.flatten(), qk=norm_b.flatten()) ent_array[i] = ent return (np.mean(ent_array), np.std(ent_array)) class GraphParams: def __init__(self, xlabel, ylabel, control_legend, ddiff_legend): self.xlabel = xlabel self.ylabel = ylabel self.control_legend = control_legend self.ddiff_legend = ddiff_legend def plot_results(sizes, ddiff, dstd, control, graph_params, filename=None): plt.figure() carr = np.zeros(len(sizes)) carr.fill(control) plt.plot(sizes, carr, label=graph_params.control_legend) plt.errorbar(sizes, ddiff, yerr=dstd, label=graph_params.ddiff_legend) plt.grid(b=True) plt.xlabel(graph_params.xlabel) plt.ylabel(graph_params.ylabel) plt.legend(loc = 'upper right', numpoints=1, fancybox=True) if not filename: plt.show() else: plt.savefig(filename) plt.close() def plot_all_results(sizes, ddiff, dstd, pdiff, pstd, zdiff, zstd, control, graph_params, filename=None): sbs.set_style("whitegrid", {"axes.grid": True}) plt.figure() carr = np.zeros(len(sizes)) carr.fill(control) plt.plot(sizes, carr, label=graph_params.control_legend) plt.errorbar(sizes, ddiff, yerr=dstd, label=graph_params.ddiff_legend) plt.errorbar(sizes, pdiff, yerr=pstd, label=graph_params.pdiff_legend) plt.errorbar(sizes, zdiff, yerr=zstd, label=graph_params.zdiff_legend) plt.grid(b=True) plt.xlabel(graph_params.xlabel) plt.ylabel(graph_params.ylabel) plt.legend(numpoints=1, fancybox=True) if not filename:
plt.savefig(filename) plt.close() def show_images(gan_imgs, orig_imgs, filename=None): n = 10 # how many digits we will display plt.figure(figsize=(20, 4)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(gan_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + 1 + n) plt.imshow(orig_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) if filename == None: plt.show() else: plt.savefig(filename) plt.close() if __name__ == '__main__': out_file = None if len(sys.argv) > 1: out_file = sys.argv[1] (X_train, y_train), (X_test, y_test) = mnist.load_data() X_train = X_train / 127.5 - 1. X_test = X_test / 127.5 - 1. y_train_distribution = get_class_distribution(y_train) y_test_distribution = get_class_distribution(y_test) num_types = len(y_train_distribution) control_distribution_difference = compare_distributions(y_train_distribution, y_test_distribution) print("Train/test distribution difference:", control_distribution_difference) train_positive_distribution = get_positive_distribution(X_train, y_train, num_types) test_positive_distribution = get_positive_distribution(X_test, y_test, num_types) control_positive_difference = compare_matrix_distributions(train_positive_distribution, test_positive_distribution) train_zero_distribution = get_zero_distribution(X_train, y_train, num_types) train_zero_distribution = get_zero_distribution(X_test, y_test, num_types) control_zero_difference = compare_matrix_distributions(train_zero_distribution, train_zero_distribution) ext = ".hdf5" filename = "gan_model" num_runs = 10 save_sample_images = True generator = load_model(filename + "_generator") classifier = load_model(filename + "_classifier" + ext) if save_sample_images: sample_imgs = generate_sample(generator, batch_size=10, latent_dim=100) rand_img_ind = np.random.randint(0, len(X_test)-1, 10) test_imgs = X_test[rand_img_ind] show_images(sample_imgs, test_imgs, 'generator_sample.png') sys.exit() multiplier = 1000 sample_sizes = [1* multiplier, 5 * multiplier, 10 * multiplier, 50 * multiplier] ddiff = np.zeros(len(sample_sizes)) dstd = np.zeros(len(sample_sizes)) pdiff = np.zeros(len(sample_sizes)) pstd = np.zeros(len(sample_sizes)) zdiff = np.zeros(len(sample_sizes)) zstd = np.zeros(len(sample_sizes)) for sample_size in range(len(sample_sizes)): digits_differences = np.zeros(num_runs) zero_differences = np.zeros(num_runs) positive_differences = np.zeros(num_runs) for i in range(len(digits_differences)): # Generate a batch of new images sample_imgs = generate_sample(generator, batch_size=sample_sizes[sample_size], latent_dim=100) pred_classes = classifier.predict_classes(sample_imgs) pred_classes_distribution = get_class_distribution(pred_classes) pred_classes_distribution = normalize_distribution(pred_classes_distribution, y_train_distribution) sample_pos_distribution = get_positive_distribution(sample_imgs, pred_classes, num_types) sample_zero_distribution = get_zero_distribution(sample_imgs, pred_classes, num_types) distribution_difference = compare_distributions(y_train_distribution, pred_classes_distribution) digits_differences[i] = distribution_difference positive_difference = compare_matrix_distributions(train_positive_distribution, sample_pos_distribution) positive_differences[i] = positive_difference[0] zero_difference = compare_matrix_distributions(train_zero_distribution, sample_zero_distribution) zero_differences[i] = zero_difference[0] print("Distribution
plt.show() else:
random_line_split
TestGAN.py
as sbs import pdb sbs.set() epsilon = 0.0000000001 def generate_sample(gen, batch_size, latent_dim): noise = np.random.normal(0, 1, (batch_size, latent_dim)) return gen.predict(noise) def show_images(imgs): cols = 8 rows = (len(imgs) // cols ) + (len(imgs) % cols) # Rescale images 0 - 1 imgs = 0.5 * imgs + 0.5 fig, axs = plt.subplots(rows, cols) cnt = 0 for i in range(rows): for j in range(cols): if cnt < len(imgs): axs[i,j].imshow(imgs[cnt, :,:,0], cmap='gray') axs[i,j].axis('off') cnt += 1 else: break #fig.savefig("images/%d.png" % epoch) plt.show() plt.close() def show_and_tell(imgs, pred): for i in range(len(imgs)): plt.figure() print("Prediction #", i, " is ", pred[i]) plt.imshow(imgs[i, :, :,0]) plt.show() plt.close() def get_class_distribution(dataset): return collections.Counter(dataset) # Other ideas for distribution: # - classifier accuracy on manually labeled examples # - classifier "correctness" def get_positive_distribution(imgs, tags, num_types): dist = {} for i in range(len(imgs)): if tags[i] in dist: dist[tags[i]] += sum(imgs[i] >= 0) else: dist[tags[i]] = sum(imgs[i] >= 0) # normalize size = len(imgs) for key in dist.keys(): dist[key] = dist[key] / size return dist def get_zero_distribution(imgs, tags, num_types): dist = {} for i in range(len(imgs)): if tags[i] in dist: dist[tags[i]] += sum(imgs[i] < -0.9999) else: dist[tags[i]] = sum(imgs[i] < -0.9999) # normalize size = len(imgs) for key in dist.keys(): dist[key] = dist[key] / size return dist def normalize_distribution(dataset, target_distribution): # now all classes might be present for i in target_distribution.keys(): dataset.setdefault(i, epsilon) return dataset def compare_distributions(dataset_a, dataset_b): da = np.zeros(len(dataset_a.keys())) da[list(dataset_a.keys())] = list(dataset_a.values()) db = np.zeros(len(dataset_b.keys())) db[list(dataset_b.keys())] = list(dataset_b.values()) da /= sum(da) db /= sum(db) ent = entropy(da, qk=db) if np.isinf(ent): pdb.set_trace() return ent def compare_matrix_distributions(dataset_a, dataset_b): ent_array = np.zeros(len(dataset_a.keys())) for i in range(len(dataset_a.keys())): norm_a = dataset_a[i] + epsilon norm_b = dataset_b[i] + epsilon ent = entropy(norm_a.flatten(), qk=norm_b.flatten()) ent_array[i] = ent return (np.mean(ent_array), np.std(ent_array)) class GraphParams:
def plot_results(sizes, ddiff, dstd, control, graph_params, filename=None): plt.figure() carr = np.zeros(len(sizes)) carr.fill(control) plt.plot(sizes, carr, label=graph_params.control_legend) plt.errorbar(sizes, ddiff, yerr=dstd, label=graph_params.ddiff_legend) plt.grid(b=True) plt.xlabel(graph_params.xlabel) plt.ylabel(graph_params.ylabel) plt.legend(loc = 'upper right', numpoints=1, fancybox=True) if not filename: plt.show() else: plt.savefig(filename) plt.close() def plot_all_results(sizes, ddiff, dstd, pdiff, pstd, zdiff, zstd, control, graph_params, filename=None): sbs.set_style("whitegrid", {"axes.grid": True}) plt.figure() carr = np.zeros(len(sizes)) carr.fill(control) plt.plot(sizes, carr, label=graph_params.control_legend) plt.errorbar(sizes, ddiff, yerr=dstd, label=graph_params.ddiff_legend) plt.errorbar(sizes, pdiff, yerr=pstd, label=graph_params.pdiff_legend) plt.errorbar(sizes, zdiff, yerr=zstd, label=graph_params.zdiff_legend) plt.grid(b=True) plt.xlabel(graph_params.xlabel) plt.ylabel(graph_params.ylabel) plt.legend(numpoints=1, fancybox=True) if not filename: plt.show() else: plt.savefig(filename) plt.close() def show_images(gan_imgs, orig_imgs, filename=None): n = 10 # how many digits we will display plt.figure(figsize=(20, 4)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(gan_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + 1 + n) plt.imshow(orig_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) if filename == None: plt.show() else: plt.savefig(filename) plt.close() if __name__ == '__main__': out_file = None if len(sys.argv) > 1: out_file = sys.argv[1] (X_train, y_train), (X_test, y_test) = mnist.load_data() X_train = X_train / 127.5 - 1. X_test = X_test / 127.5 - 1. y_train_distribution = get_class_distribution(y_train) y_test_distribution = get_class_distribution(y_test) num_types = len(y_train_distribution) control_distribution_difference = compare_distributions(y_train_distribution, y_test_distribution) print("Train/test distribution difference:", control_distribution_difference) train_positive_distribution = get_positive_distribution(X_train, y_train, num_types) test_positive_distribution = get_positive_distribution(X_test, y_test, num_types) control_positive_difference = compare_matrix_distributions(train_positive_distribution, test_positive_distribution) train_zero_distribution = get_zero_distribution(X_train, y_train, num_types) train_zero_distribution = get_zero_distribution(X_test, y_test, num_types) control_zero_difference = compare_matrix_distributions(train_zero_distribution, train_zero_distribution) ext = ".hdf5" filename = "gan_model" num_runs = 10 save_sample_images = True generator = load_model(filename + "_generator") classifier = load_model(filename + "_classifier" + ext) if save_sample_images: sample_imgs = generate_sample(generator, batch_size=10, latent_dim=100) rand_img_ind = np.random.randint(0, len(X_test)-1, 10) test_imgs = X_test[rand_img_ind] show_images(sample_imgs, test_imgs, 'generator_sample.png') sys.exit() multiplier = 1000 sample_sizes = [1* multiplier, 5 * multiplier, 10 * multiplier, 50 * multiplier] ddiff = np.zeros(len(sample_sizes)) dstd = np.zeros(len(sample_sizes)) pdiff = np.zeros(len(sample_sizes)) pstd = np.zeros(len(sample_sizes)) zdiff = np.zeros(len(sample_sizes)) zstd = np.zeros(len(sample_sizes)) for sample_size in range(len(sample_sizes)): digits_differences = np.zeros(num_runs) zero_differences = np.zeros(num_runs) positive_differences = np.zeros(num_runs) for i in range(len(digits_differences)): # Generate a batch of new images sample_imgs = generate_sample(generator, batch_size=sample_sizes[sample_size], latent_dim=100) pred_classes = classifier.predict_classes(sample_imgs) pred_classes_distribution = get_class_distribution(pred_classes) pred_classes_distribution = normalize_distribution(pred_classes_distribution, y_train_distribution) sample_pos_distribution = get_positive_distribution(sample_imgs, pred_classes, num_types) sample_zero_distribution = get_zero_distribution(sample_imgs, pred_classes, num_types) distribution_difference = compare_distributions(y_train_distribution, pred_classes_distribution) digits_differences[i] = distribution_difference positive_difference = compare_matrix_distributions(train_positive_distribution, sample_pos_distribution) positive_differences[i] = positive_difference[0] zero_difference = compare_matrix_distributions(train_zero_distribution, sample_zero_distribution) zero_differences[i] = zero_difference[0] print("Distribution
def __init__(self, xlabel, ylabel, control_legend, ddiff_legend): self.xlabel = xlabel self.ylabel = ylabel self.control_legend = control_legend self.ddiff_legend = ddiff_legend
identifier_body
TestGAN.py
as sbs import pdb sbs.set() epsilon = 0.0000000001 def generate_sample(gen, batch_size, latent_dim): noise = np.random.normal(0, 1, (batch_size, latent_dim)) return gen.predict(noise) def show_images(imgs): cols = 8 rows = (len(imgs) // cols ) + (len(imgs) % cols) # Rescale images 0 - 1 imgs = 0.5 * imgs + 0.5 fig, axs = plt.subplots(rows, cols) cnt = 0 for i in range(rows): for j in range(cols): if cnt < len(imgs): axs[i,j].imshow(imgs[cnt, :,:,0], cmap='gray') axs[i,j].axis('off') cnt += 1 else: break #fig.savefig("images/%d.png" % epoch) plt.show() plt.close() def show_and_tell(imgs, pred): for i in range(len(imgs)): plt.figure() print("Prediction #", i, " is ", pred[i]) plt.imshow(imgs[i, :, :,0]) plt.show() plt.close() def get_class_distribution(dataset): return collections.Counter(dataset) # Other ideas for distribution: # - classifier accuracy on manually labeled examples # - classifier "correctness" def get_positive_distribution(imgs, tags, num_types): dist = {} for i in range(len(imgs)): if tags[i] in dist: dist[tags[i]] += sum(imgs[i] >= 0) else: dist[tags[i]] = sum(imgs[i] >= 0) # normalize size = len(imgs) for key in dist.keys(): dist[key] = dist[key] / size return dist def get_zero_distribution(imgs, tags, num_types): dist = {} for i in range(len(imgs)): if tags[i] in dist: dist[tags[i]] += sum(imgs[i] < -0.9999) else: dist[tags[i]] = sum(imgs[i] < -0.9999) # normalize size = len(imgs) for key in dist.keys(): dist[key] = dist[key] / size return dist def normalize_distribution(dataset, target_distribution): # now all classes might be present for i in target_distribution.keys(): dataset.setdefault(i, epsilon) return dataset def compare_distributions(dataset_a, dataset_b): da = np.zeros(len(dataset_a.keys())) da[list(dataset_a.keys())] = list(dataset_a.values()) db = np.zeros(len(dataset_b.keys())) db[list(dataset_b.keys())] = list(dataset_b.values()) da /= sum(da) db /= sum(db) ent = entropy(da, qk=db) if np.isinf(ent): pdb.set_trace() return ent def compare_matrix_distributions(dataset_a, dataset_b): ent_array = np.zeros(len(dataset_a.keys())) for i in range(len(dataset_a.keys())): norm_a = dataset_a[i] + epsilon norm_b = dataset_b[i] + epsilon ent = entropy(norm_a.flatten(), qk=norm_b.flatten()) ent_array[i] = ent return (np.mean(ent_array), np.std(ent_array)) class GraphParams: def __init__(self, xlabel, ylabel, control_legend, ddiff_legend): self.xlabel = xlabel self.ylabel = ylabel self.control_legend = control_legend self.ddiff_legend = ddiff_legend def plot_results(sizes, ddiff, dstd, control, graph_params, filename=None): plt.figure() carr = np.zeros(len(sizes)) carr.fill(control) plt.plot(sizes, carr, label=graph_params.control_legend) plt.errorbar(sizes, ddiff, yerr=dstd, label=graph_params.ddiff_legend) plt.grid(b=True) plt.xlabel(graph_params.xlabel) plt.ylabel(graph_params.ylabel) plt.legend(loc = 'upper right', numpoints=1, fancybox=True) if not filename: plt.show() else: plt.savefig(filename) plt.close() def plot_all_results(sizes, ddiff, dstd, pdiff, pstd, zdiff, zstd, control, graph_params, filename=None): sbs.set_style("whitegrid", {"axes.grid": True}) plt.figure() carr = np.zeros(len(sizes)) carr.fill(control) plt.plot(sizes, carr, label=graph_params.control_legend) plt.errorbar(sizes, ddiff, yerr=dstd, label=graph_params.ddiff_legend) plt.errorbar(sizes, pdiff, yerr=pstd, label=graph_params.pdiff_legend) plt.errorbar(sizes, zdiff, yerr=zstd, label=graph_params.zdiff_legend) plt.grid(b=True) plt.xlabel(graph_params.xlabel) plt.ylabel(graph_params.ylabel) plt.legend(numpoints=1, fancybox=True) if not filename:
else: plt.savefig(filename) plt.close() def show_images(gan_imgs, orig_imgs, filename=None): n = 10 # how many digits we will display plt.figure(figsize=(20, 4)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(gan_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + 1 + n) plt.imshow(orig_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) if filename == None: plt.show() else: plt.savefig(filename) plt.close() if __name__ == '__main__': out_file = None if len(sys.argv) > 1: out_file = sys.argv[1] (X_train, y_train), (X_test, y_test) = mnist.load_data() X_train = X_train / 127.5 - 1. X_test = X_test / 127.5 - 1. y_train_distribution = get_class_distribution(y_train) y_test_distribution = get_class_distribution(y_test) num_types = len(y_train_distribution) control_distribution_difference = compare_distributions(y_train_distribution, y_test_distribution) print("Train/test distribution difference:", control_distribution_difference) train_positive_distribution = get_positive_distribution(X_train, y_train, num_types) test_positive_distribution = get_positive_distribution(X_test, y_test, num_types) control_positive_difference = compare_matrix_distributions(train_positive_distribution, test_positive_distribution) train_zero_distribution = get_zero_distribution(X_train, y_train, num_types) train_zero_distribution = get_zero_distribution(X_test, y_test, num_types) control_zero_difference = compare_matrix_distributions(train_zero_distribution, train_zero_distribution) ext = ".hdf5" filename = "gan_model" num_runs = 10 save_sample_images = True generator = load_model(filename + "_generator") classifier = load_model(filename + "_classifier" + ext) if save_sample_images: sample_imgs = generate_sample(generator, batch_size=10, latent_dim=100) rand_img_ind = np.random.randint(0, len(X_test)-1, 10) test_imgs = X_test[rand_img_ind] show_images(sample_imgs, test_imgs, 'generator_sample.png') sys.exit() multiplier = 1000 sample_sizes = [1* multiplier, 5 * multiplier, 10 * multiplier, 50 * multiplier] ddiff = np.zeros(len(sample_sizes)) dstd = np.zeros(len(sample_sizes)) pdiff = np.zeros(len(sample_sizes)) pstd = np.zeros(len(sample_sizes)) zdiff = np.zeros(len(sample_sizes)) zstd = np.zeros(len(sample_sizes)) for sample_size in range(len(sample_sizes)): digits_differences = np.zeros(num_runs) zero_differences = np.zeros(num_runs) positive_differences = np.zeros(num_runs) for i in range(len(digits_differences)): # Generate a batch of new images sample_imgs = generate_sample(generator, batch_size=sample_sizes[sample_size], latent_dim=100) pred_classes = classifier.predict_classes(sample_imgs) pred_classes_distribution = get_class_distribution(pred_classes) pred_classes_distribution = normalize_distribution(pred_classes_distribution, y_train_distribution) sample_pos_distribution = get_positive_distribution(sample_imgs, pred_classes, num_types) sample_zero_distribution = get_zero_distribution(sample_imgs, pred_classes, num_types) distribution_difference = compare_distributions(y_train_distribution, pred_classes_distribution) digits_differences[i] = distribution_difference positive_difference = compare_matrix_distributions(train_positive_distribution, sample_pos_distribution) positive_differences[i] = positive_difference[0] zero_difference = compare_matrix_distributions(train_zero_distribution, sample_zero_distribution) zero_differences[i] = zero_difference[0] print("
plt.show()
conditional_block
TestGAN.py
as sbs import pdb sbs.set() epsilon = 0.0000000001 def generate_sample(gen, batch_size, latent_dim): noise = np.random.normal(0, 1, (batch_size, latent_dim)) return gen.predict(noise) def
(imgs): cols = 8 rows = (len(imgs) // cols ) + (len(imgs) % cols) # Rescale images 0 - 1 imgs = 0.5 * imgs + 0.5 fig, axs = plt.subplots(rows, cols) cnt = 0 for i in range(rows): for j in range(cols): if cnt < len(imgs): axs[i,j].imshow(imgs[cnt, :,:,0], cmap='gray') axs[i,j].axis('off') cnt += 1 else: break #fig.savefig("images/%d.png" % epoch) plt.show() plt.close() def show_and_tell(imgs, pred): for i in range(len(imgs)): plt.figure() print("Prediction #", i, " is ", pred[i]) plt.imshow(imgs[i, :, :,0]) plt.show() plt.close() def get_class_distribution(dataset): return collections.Counter(dataset) # Other ideas for distribution: # - classifier accuracy on manually labeled examples # - classifier "correctness" def get_positive_distribution(imgs, tags, num_types): dist = {} for i in range(len(imgs)): if tags[i] in dist: dist[tags[i]] += sum(imgs[i] >= 0) else: dist[tags[i]] = sum(imgs[i] >= 0) # normalize size = len(imgs) for key in dist.keys(): dist[key] = dist[key] / size return dist def get_zero_distribution(imgs, tags, num_types): dist = {} for i in range(len(imgs)): if tags[i] in dist: dist[tags[i]] += sum(imgs[i] < -0.9999) else: dist[tags[i]] = sum(imgs[i] < -0.9999) # normalize size = len(imgs) for key in dist.keys(): dist[key] = dist[key] / size return dist def normalize_distribution(dataset, target_distribution): # now all classes might be present for i in target_distribution.keys(): dataset.setdefault(i, epsilon) return dataset def compare_distributions(dataset_a, dataset_b): da = np.zeros(len(dataset_a.keys())) da[list(dataset_a.keys())] = list(dataset_a.values()) db = np.zeros(len(dataset_b.keys())) db[list(dataset_b.keys())] = list(dataset_b.values()) da /= sum(da) db /= sum(db) ent = entropy(da, qk=db) if np.isinf(ent): pdb.set_trace() return ent def compare_matrix_distributions(dataset_a, dataset_b): ent_array = np.zeros(len(dataset_a.keys())) for i in range(len(dataset_a.keys())): norm_a = dataset_a[i] + epsilon norm_b = dataset_b[i] + epsilon ent = entropy(norm_a.flatten(), qk=norm_b.flatten()) ent_array[i] = ent return (np.mean(ent_array), np.std(ent_array)) class GraphParams: def __init__(self, xlabel, ylabel, control_legend, ddiff_legend): self.xlabel = xlabel self.ylabel = ylabel self.control_legend = control_legend self.ddiff_legend = ddiff_legend def plot_results(sizes, ddiff, dstd, control, graph_params, filename=None): plt.figure() carr = np.zeros(len(sizes)) carr.fill(control) plt.plot(sizes, carr, label=graph_params.control_legend) plt.errorbar(sizes, ddiff, yerr=dstd, label=graph_params.ddiff_legend) plt.grid(b=True) plt.xlabel(graph_params.xlabel) plt.ylabel(graph_params.ylabel) plt.legend(loc = 'upper right', numpoints=1, fancybox=True) if not filename: plt.show() else: plt.savefig(filename) plt.close() def plot_all_results(sizes, ddiff, dstd, pdiff, pstd, zdiff, zstd, control, graph_params, filename=None): sbs.set_style("whitegrid", {"axes.grid": True}) plt.figure() carr = np.zeros(len(sizes)) carr.fill(control) plt.plot(sizes, carr, label=graph_params.control_legend) plt.errorbar(sizes, ddiff, yerr=dstd, label=graph_params.ddiff_legend) plt.errorbar(sizes, pdiff, yerr=pstd, label=graph_params.pdiff_legend) plt.errorbar(sizes, zdiff, yerr=zstd, label=graph_params.zdiff_legend) plt.grid(b=True) plt.xlabel(graph_params.xlabel) plt.ylabel(graph_params.ylabel) plt.legend(numpoints=1, fancybox=True) if not filename: plt.show() else: plt.savefig(filename) plt.close() def show_images(gan_imgs, orig_imgs, filename=None): n = 10 # how many digits we will display plt.figure(figsize=(20, 4)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(gan_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + 1 + n) plt.imshow(orig_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) if filename == None: plt.show() else: plt.savefig(filename) plt.close() if __name__ == '__main__': out_file = None if len(sys.argv) > 1: out_file = sys.argv[1] (X_train, y_train), (X_test, y_test) = mnist.load_data() X_train = X_train / 127.5 - 1. X_test = X_test / 127.5 - 1. y_train_distribution = get_class_distribution(y_train) y_test_distribution = get_class_distribution(y_test) num_types = len(y_train_distribution) control_distribution_difference = compare_distributions(y_train_distribution, y_test_distribution) print("Train/test distribution difference:", control_distribution_difference) train_positive_distribution = get_positive_distribution(X_train, y_train, num_types) test_positive_distribution = get_positive_distribution(X_test, y_test, num_types) control_positive_difference = compare_matrix_distributions(train_positive_distribution, test_positive_distribution) train_zero_distribution = get_zero_distribution(X_train, y_train, num_types) train_zero_distribution = get_zero_distribution(X_test, y_test, num_types) control_zero_difference = compare_matrix_distributions(train_zero_distribution, train_zero_distribution) ext = ".hdf5" filename = "gan_model" num_runs = 10 save_sample_images = True generator = load_model(filename + "_generator") classifier = load_model(filename + "_classifier" + ext) if save_sample_images: sample_imgs = generate_sample(generator, batch_size=10, latent_dim=100) rand_img_ind = np.random.randint(0, len(X_test)-1, 10) test_imgs = X_test[rand_img_ind] show_images(sample_imgs, test_imgs, 'generator_sample.png') sys.exit() multiplier = 1000 sample_sizes = [1* multiplier, 5 * multiplier, 10 * multiplier, 50 * multiplier] ddiff = np.zeros(len(sample_sizes)) dstd = np.zeros(len(sample_sizes)) pdiff = np.zeros(len(sample_sizes)) pstd = np.zeros(len(sample_sizes)) zdiff = np.zeros(len(sample_sizes)) zstd = np.zeros(len(sample_sizes)) for sample_size in range(len(sample_sizes)): digits_differences = np.zeros(num_runs) zero_differences = np.zeros(num_runs) positive_differences = np.zeros(num_runs) for i in range(len(digits_differences)): # Generate a batch of new images sample_imgs = generate_sample(generator, batch_size=sample_sizes[sample_size], latent_dim=100) pred_classes = classifier.predict_classes(sample_imgs) pred_classes_distribution = get_class_distribution(pred_classes) pred_classes_distribution = normalize_distribution(pred_classes_distribution, y_train_distribution) sample_pos_distribution = get_positive_distribution(sample_imgs, pred_classes, num_types) sample_zero_distribution = get_zero_distribution(sample_imgs, pred_classes, num_types) distribution_difference = compare_distributions(y_train_distribution, pred_classes_distribution) digits_differences[i] = distribution_difference positive_difference = compare_matrix_distributions(train_positive_distribution, sample_pos_distribution) positive_differences[i] = positive_difference[0] zero_difference = compare_matrix_distributions(train_zero_distribution, sample_zero_distribution) zero_differences[i] = zero_difference[0] print("
show_images
identifier_name
service.go
SpecID) (*graphql.Campaign, error) { var result struct { Campaign *graphql.Campaign `json:"applyCampaign"` } if ok, err := svc.newRequest(applyCampaignMutation, map[string]interface{}{ "campaignSpec": spec, }).Do(ctx, &result); err != nil || !ok { return nil, err } return result.Campaign, nil } const createCampaignSpecMutation = ` mutation CreateCampaignSpec( $namespace: ID!, $spec: String!, $changesetSpecs: [ID!]! ) { createCampaignSpec( namespace: $namespace, campaignSpec: $spec, changesetSpecs: $changesetSpecs ) { id applyURL } } ` func (svc *Service) CreateCampaignSpec(ctx context.Context, namespace, spec string, ids []ChangesetSpecID) (CampaignSpecID, string, error) { var result struct { CreateCampaignSpec struct { ID string ApplyURL string } } if ok, err := svc.client.NewRequest(createCampaignSpecMutation, map[string]interface{}{ "namespace": namespace, "spec": spec, "changesetSpecs": ids, }).Do(ctx, &result); err != nil || !ok { return "", "", err } return CampaignSpecID(result.CreateCampaignSpec.ID), result.CreateCampaignSpec.ApplyURL, nil } const createChangesetSpecMutation = ` mutation CreateChangesetSpec($spec: String!) { createChangesetSpec(changesetSpec: $spec) { ... on HiddenChangesetSpec { id } ... on VisibleChangesetSpec { id } } } ` func (svc *Service) CreateChangesetSpec(ctx context.Context, spec *ChangesetSpec) (ChangesetSpecID, error) { raw, err := json.Marshal(spec) if err != nil { return "", errors.Wrap(err, "marshalling changeset spec JSON") } var result struct { CreateChangesetSpec struct { ID string } } if ok, err := svc.newRequest(createChangesetSpecMutation, map[string]interface{}{ "spec": string(raw), }).Do(ctx, &result); err != nil || !ok { return "", err } return ChangesetSpecID(result.CreateChangesetSpec.ID), nil } func (svc *Service) NewExecutionCache(dir string) ExecutionCache { if dir == "" { return &ExecutionNoOpCache{} } return &ExecutionDiskCache{dir} } type ExecutorOpts struct { Cache ExecutionCache Creator *WorkspaceCreator Parallelism int Timeout time.Duration ClearCache bool KeepLogs bool TempDir string CacheDir string } func (svc *Service) NewExecutor(opts ExecutorOpts) Executor { return newExecutor(opts, svc.client, svc.features) } func (svc *Service) NewWorkspaceCreator(dir string, cleanArchives bool) *WorkspaceCreator { return &WorkspaceCreator{dir: dir, client: svc.client, deleteZips: cleanArchives} } func (svc *Service) SetDockerImages(ctx context.Context, spec *CampaignSpec, progress func(i int)) error { for i, step := range spec.Steps { image, err := getDockerImageContentDigest(ctx, step.Container) if err != nil { return err } spec.Steps[i].image = image progress(i + 1) } return nil } func (svc *Service) ExecuteCampaignSpec(ctx context.Context, repos []*graphql.Repository, x Executor, spec *CampaignSpec, progress func([]*TaskStatus), skipErrors bool) ([]*ChangesetSpec, error) { for _, repo := range repos { x.AddTask(repo, spec.Steps, spec.TransformChanges, spec.ChangesetTemplate) } done := make(chan struct{}) if progress != nil { go func() { x.LockedTaskStatuses(progress) ticker := time.NewTicker(1 * time.Second) defer ticker.Stop() for { select { case <-ticker.C: x.LockedTaskStatuses(progress) case <-done: return } } }() } var errs *multierror.Error x.Start(ctx) specs, err := x.Wait() if progress != nil { x.LockedTaskStatuses(progress) done <- struct{}{} } if err != nil { if skipErrors { errs = multierror.Append(errs, err) } else { return nil, err } } // Add external changeset specs. for _, ic := range spec.ImportChangesets { repo, err := svc.resolveRepositoryName(ctx, ic.Repository) if err != nil { wrapped := errors.Wrapf(err, "resolving repository name %q", ic.Repository) if skipErrors { errs = multierror.Append(errs, wrapped) continue } else { return nil, wrapped } } for _, id := range ic.ExternalIDs { var sid string switch tid := id.(type) { case string: sid = tid case int, int8, int16, int32, int64: sid = strconv.FormatInt(reflect.ValueOf(id).Int(), 10) case uint, uint8, uint16, uint32, uint64: sid = strconv.FormatUint(reflect.ValueOf(id).Uint(), 10) case float32: sid = strconv.FormatFloat(float64(tid), 'f', -1, 32) case float64: sid = strconv.FormatFloat(tid, 'f', -1, 64) default: return nil, errors.Errorf("cannot convert value of type %T into a valid external ID: expected string or int", id) } specs = append(specs, &ChangesetSpec{ BaseRepository: repo.ID, ExternalChangeset: &ExternalChangeset{sid}, }) } } return specs, errs.ErrorOrNil() } func (svc *Service) ParseCampaignSpec(in io.Reader) (*CampaignSpec, string, error) { data, err := ioutil.ReadAll(in) if err != nil { return nil, "", errors.Wrap(err, "reading campaign spec") } spec, err := ParseCampaignSpec(data, svc.features) if err != nil { return nil, "", errors.Wrap(err, "parsing campaign spec") } return spec, string(data), nil } const namespaceQuery = ` query NamespaceQuery($name: String!) { user(username: $name) { id } organization(name: $name) { id } } ` const usernameQuery = ` query GetCurrentUserID { currentUser { id } } `
Data struct { CurrentUser struct { ID string `json:"id"` } `json:"currentUser"` } `json:"data"` } if ok, err := svc.client.NewRequest(usernameQuery, nil).DoRaw(ctx, &resp); err != nil || !ok { return "", errors.WithMessage(err, "failed to resolve namespace: no user logged in") } if resp.Data.CurrentUser.ID == "" { return "", errors.New("cannot resolve current user") } return resp.Data.CurrentUser.ID, nil } var result struct { Data struct { User *struct{ ID string } Organization *struct{ ID string } } Errors []interface{} } if ok, err := svc.client.NewRequest(namespaceQuery, map[string]interface{}{ "name": namespace, }).DoRaw(ctx, &result); err != nil || !ok { return "", err } if result.Data.User != nil { return result.Data.User.ID, nil } if result.Data.Organization != nil { return result.Data.Organization.ID, nil } return "", fmt.Errorf("failed to resolve namespace %q: no user or organization found", namespace) } func (svc *Service) ResolveRepositories(ctx context.Context, spec *CampaignSpec) ([]*graphql.Repository, error) { seen := map[string]*graphql.Repository{} unsupported := UnsupportedRepoSet{} // TODO: this could be trivially parallelised in the future. for _, on := range spec.On { repos, err := svc.ResolveRepositoriesOn(ctx, &on) if err != nil { return nil, errors.Wrapf(err, "resolving %q", on.String()) } for _, repo := range repos { if !repo.HasBranch() { continue } if other, ok := seen[repo.ID]; !ok { seen[repo.ID] = repo switch st := strings.ToLower(repo.ExternalRepository.ServiceType); st { case "github", "gitlab", "bitbucketserver": default: if !svc.allowUnsupported { unsupported.appendRepo(repo) } } } else { // If we've already seen this repository, we overwrite the // Commit/Branch fields with the latest value we have
func (svc *Service) ResolveNamespace(ctx context.Context, namespace string) (string, error) { if namespace == "" { // if no namespace is provided, default to logged in user as namespace var resp struct {
random_line_split
service.go
multierror.Error x.Start(ctx) specs, err := x.Wait() if progress != nil { x.LockedTaskStatuses(progress) done <- struct{}{} } if err != nil { if skipErrors { errs = multierror.Append(errs, err) } else { return nil, err } } // Add external changeset specs. for _, ic := range spec.ImportChangesets { repo, err := svc.resolveRepositoryName(ctx, ic.Repository) if err != nil { wrapped := errors.Wrapf(err, "resolving repository name %q", ic.Repository) if skipErrors { errs = multierror.Append(errs, wrapped) continue } else { return nil, wrapped } } for _, id := range ic.ExternalIDs { var sid string switch tid := id.(type) { case string: sid = tid case int, int8, int16, int32, int64: sid = strconv.FormatInt(reflect.ValueOf(id).Int(), 10) case uint, uint8, uint16, uint32, uint64: sid = strconv.FormatUint(reflect.ValueOf(id).Uint(), 10) case float32: sid = strconv.FormatFloat(float64(tid), 'f', -1, 32) case float64: sid = strconv.FormatFloat(tid, 'f', -1, 64) default: return nil, errors.Errorf("cannot convert value of type %T into a valid external ID: expected string or int", id) } specs = append(specs, &ChangesetSpec{ BaseRepository: repo.ID, ExternalChangeset: &ExternalChangeset{sid}, }) } } return specs, errs.ErrorOrNil() } func (svc *Service) ParseCampaignSpec(in io.Reader) (*CampaignSpec, string, error) { data, err := ioutil.ReadAll(in) if err != nil { return nil, "", errors.Wrap(err, "reading campaign spec") } spec, err := ParseCampaignSpec(data, svc.features) if err != nil { return nil, "", errors.Wrap(err, "parsing campaign spec") } return spec, string(data), nil } const namespaceQuery = ` query NamespaceQuery($name: String!) { user(username: $name) { id } organization(name: $name) { id } } ` const usernameQuery = ` query GetCurrentUserID { currentUser { id } } ` func (svc *Service) ResolveNamespace(ctx context.Context, namespace string) (string, error) { if namespace == "" { // if no namespace is provided, default to logged in user as namespace var resp struct { Data struct { CurrentUser struct { ID string `json:"id"` } `json:"currentUser"` } `json:"data"` } if ok, err := svc.client.NewRequest(usernameQuery, nil).DoRaw(ctx, &resp); err != nil || !ok { return "", errors.WithMessage(err, "failed to resolve namespace: no user logged in") } if resp.Data.CurrentUser.ID == "" { return "", errors.New("cannot resolve current user") } return resp.Data.CurrentUser.ID, nil } var result struct { Data struct { User *struct{ ID string } Organization *struct{ ID string } } Errors []interface{} } if ok, err := svc.client.NewRequest(namespaceQuery, map[string]interface{}{ "name": namespace, }).DoRaw(ctx, &result); err != nil || !ok { return "", err } if result.Data.User != nil { return result.Data.User.ID, nil } if result.Data.Organization != nil { return result.Data.Organization.ID, nil } return "", fmt.Errorf("failed to resolve namespace %q: no user or organization found", namespace) } func (svc *Service) ResolveRepositories(ctx context.Context, spec *CampaignSpec) ([]*graphql.Repository, error) { seen := map[string]*graphql.Repository{} unsupported := UnsupportedRepoSet{} // TODO: this could be trivially parallelised in the future. for _, on := range spec.On { repos, err := svc.ResolveRepositoriesOn(ctx, &on) if err != nil { return nil, errors.Wrapf(err, "resolving %q", on.String()) } for _, repo := range repos { if !repo.HasBranch() { continue } if other, ok := seen[repo.ID]; !ok { seen[repo.ID] = repo switch st := strings.ToLower(repo.ExternalRepository.ServiceType); st { case "github", "gitlab", "bitbucketserver": default: if !svc.allowUnsupported { unsupported.appendRepo(repo) } } } else { // If we've already seen this repository, we overwrite the // Commit/Branch fields with the latest value we have other.Commit = repo.Commit other.Branch = repo.Branch } } } final := make([]*graphql.Repository, 0, len(seen)) for _, repo := range seen { if !unsupported.includes(repo) { final = append(final, repo) } } if unsupported.hasUnsupported() { return final, unsupported } return final, nil } func (svc *Service) ResolveRepositoriesOn(ctx context.Context, on *OnQueryOrRepository) ([]*graphql.Repository, error) { if on.RepositoriesMatchingQuery != "" { return svc.resolveRepositorySearch(ctx, on.RepositoriesMatchingQuery) } else if on.Repository != "" && on.Branch != "" { repo, err := svc.resolveRepositoryNameAndBranch(ctx, on.Repository, on.Branch) if err != nil { return nil, err } return []*graphql.Repository{repo}, nil } else if on.Repository != "" { repo, err := svc.resolveRepositoryName(ctx, on.Repository) if err != nil { return nil, err } return []*graphql.Repository{repo}, nil } // This shouldn't happen on any campaign spec that has passed validation, // but, alas, software. return nil, ErrMalformedOnQueryOrRepository } const repositoryNameQuery = ` query Repository($name: String!, $queryCommit: Boolean!, $rev: String!) { repository(name: $name) { ...repositoryFields } } ` + graphql.RepositoryFieldsFragment func (svc *Service) resolveRepositoryName(ctx context.Context, name string) (*graphql.Repository, error) { var result struct{ Repository *graphql.Repository } if ok, err := svc.client.NewRequest(repositoryNameQuery, map[string]interface{}{ "name": name, "queryCommit": false, "rev": "", }).Do(ctx, &result); err != nil || !ok { return nil, err } if result.Repository == nil { return nil, errors.New("no repository found") } return result.Repository, nil } func (svc *Service) resolveRepositoryNameAndBranch(ctx context.Context, name, branch string) (*graphql.Repository, error) { var result struct{ Repository *graphql.Repository } if ok, err := svc.client.NewRequest(repositoryNameQuery, map[string]interface{}{ "name": name, "queryCommit": true, "rev": branch, }).Do(ctx, &result); err != nil || !ok { return nil, err } if result.Repository == nil { return nil, errors.New("no repository found") } if result.Repository.Commit.OID == "" { return nil, fmt.Errorf("no branch matching %q found for repository %s", branch, name) } result.Repository.Branch = graphql.Branch{ Name: branch, Target: result.Repository.Commit, } return result.Repository, nil } // TODO: search result alerts. const repositorySearchQuery = ` query ChangesetRepos( $query: String!, $queryCommit: Boolean!, $rev: String!, ) { search(query: $query, version: V2) { results { results { __typename ... on Repository { ...repositoryFields } ... on FileMatch { file { path } repository { ...repositoryFields } } } } } } ` + graphql.RepositoryFieldsFragment func (svc *Service) resolveRepositorySearch(ctx context.Context, query string) ([]*graphql.Repository, error) { var result struct { Search struct { Results struct { Results []searchResult } } } if ok, err := svc.client.NewRequest(repositorySearchQuery, map[string]interface{}{ "query": setDefaultQueryCount(query), "queryCommit": false, "rev": "", }).Do(ctx, &result); err != nil || !ok { return nil, err } ids := map[string]*graphql.Repository{} var repos []*graphql.Repository for _, r := range result.Search.Results.Results { existing, ok := ids[r.ID] if !ok
{ repo := r.Repository repos = append(repos, &repo) ids[r.ID] = &repo }
conditional_block
service.go
SpecID) (*graphql.Campaign, error) { var result struct { Campaign *graphql.Campaign `json:"applyCampaign"` } if ok, err := svc.newRequest(applyCampaignMutation, map[string]interface{}{ "campaignSpec": spec, }).Do(ctx, &result); err != nil || !ok { return nil, err } return result.Campaign, nil } const createCampaignSpecMutation = ` mutation CreateCampaignSpec( $namespace: ID!, $spec: String!, $changesetSpecs: [ID!]! ) { createCampaignSpec( namespace: $namespace, campaignSpec: $spec, changesetSpecs: $changesetSpecs ) { id applyURL } } ` func (svc *Service)
(ctx context.Context, namespace, spec string, ids []ChangesetSpecID) (CampaignSpecID, string, error) { var result struct { CreateCampaignSpec struct { ID string ApplyURL string } } if ok, err := svc.client.NewRequest(createCampaignSpecMutation, map[string]interface{}{ "namespace": namespace, "spec": spec, "changesetSpecs": ids, }).Do(ctx, &result); err != nil || !ok { return "", "", err } return CampaignSpecID(result.CreateCampaignSpec.ID), result.CreateCampaignSpec.ApplyURL, nil } const createChangesetSpecMutation = ` mutation CreateChangesetSpec($spec: String!) { createChangesetSpec(changesetSpec: $spec) { ... on HiddenChangesetSpec { id } ... on VisibleChangesetSpec { id } } } ` func (svc *Service) CreateChangesetSpec(ctx context.Context, spec *ChangesetSpec) (ChangesetSpecID, error) { raw, err := json.Marshal(spec) if err != nil { return "", errors.Wrap(err, "marshalling changeset spec JSON") } var result struct { CreateChangesetSpec struct { ID string } } if ok, err := svc.newRequest(createChangesetSpecMutation, map[string]interface{}{ "spec": string(raw), }).Do(ctx, &result); err != nil || !ok { return "", err } return ChangesetSpecID(result.CreateChangesetSpec.ID), nil } func (svc *Service) NewExecutionCache(dir string) ExecutionCache { if dir == "" { return &ExecutionNoOpCache{} } return &ExecutionDiskCache{dir} } type ExecutorOpts struct { Cache ExecutionCache Creator *WorkspaceCreator Parallelism int Timeout time.Duration ClearCache bool KeepLogs bool TempDir string CacheDir string } func (svc *Service) NewExecutor(opts ExecutorOpts) Executor { return newExecutor(opts, svc.client, svc.features) } func (svc *Service) NewWorkspaceCreator(dir string, cleanArchives bool) *WorkspaceCreator { return &WorkspaceCreator{dir: dir, client: svc.client, deleteZips: cleanArchives} } func (svc *Service) SetDockerImages(ctx context.Context, spec *CampaignSpec, progress func(i int)) error { for i, step := range spec.Steps { image, err := getDockerImageContentDigest(ctx, step.Container) if err != nil { return err } spec.Steps[i].image = image progress(i + 1) } return nil } func (svc *Service) ExecuteCampaignSpec(ctx context.Context, repos []*graphql.Repository, x Executor, spec *CampaignSpec, progress func([]*TaskStatus), skipErrors bool) ([]*ChangesetSpec, error) { for _, repo := range repos { x.AddTask(repo, spec.Steps, spec.TransformChanges, spec.ChangesetTemplate) } done := make(chan struct{}) if progress != nil { go func() { x.LockedTaskStatuses(progress) ticker := time.NewTicker(1 * time.Second) defer ticker.Stop() for { select { case <-ticker.C: x.LockedTaskStatuses(progress) case <-done: return } } }() } var errs *multierror.Error x.Start(ctx) specs, err := x.Wait() if progress != nil { x.LockedTaskStatuses(progress) done <- struct{}{} } if err != nil { if skipErrors { errs = multierror.Append(errs, err) } else { return nil, err } } // Add external changeset specs. for _, ic := range spec.ImportChangesets { repo, err := svc.resolveRepositoryName(ctx, ic.Repository) if err != nil { wrapped := errors.Wrapf(err, "resolving repository name %q", ic.Repository) if skipErrors { errs = multierror.Append(errs, wrapped) continue } else { return nil, wrapped } } for _, id := range ic.ExternalIDs { var sid string switch tid := id.(type) { case string: sid = tid case int, int8, int16, int32, int64: sid = strconv.FormatInt(reflect.ValueOf(id).Int(), 10) case uint, uint8, uint16, uint32, uint64: sid = strconv.FormatUint(reflect.ValueOf(id).Uint(), 10) case float32: sid = strconv.FormatFloat(float64(tid), 'f', -1, 32) case float64: sid = strconv.FormatFloat(tid, 'f', -1, 64) default: return nil, errors.Errorf("cannot convert value of type %T into a valid external ID: expected string or int", id) } specs = append(specs, &ChangesetSpec{ BaseRepository: repo.ID, ExternalChangeset: &ExternalChangeset{sid}, }) } } return specs, errs.ErrorOrNil() } func (svc *Service) ParseCampaignSpec(in io.Reader) (*CampaignSpec, string, error) { data, err := ioutil.ReadAll(in) if err != nil { return nil, "", errors.Wrap(err, "reading campaign spec") } spec, err := ParseCampaignSpec(data, svc.features) if err != nil { return nil, "", errors.Wrap(err, "parsing campaign spec") } return spec, string(data), nil } const namespaceQuery = ` query NamespaceQuery($name: String!) { user(username: $name) { id } organization(name: $name) { id } } ` const usernameQuery = ` query GetCurrentUserID { currentUser { id } } ` func (svc *Service) ResolveNamespace(ctx context.Context, namespace string) (string, error) { if namespace == "" { // if no namespace is provided, default to logged in user as namespace var resp struct { Data struct { CurrentUser struct { ID string `json:"id"` } `json:"currentUser"` } `json:"data"` } if ok, err := svc.client.NewRequest(usernameQuery, nil).DoRaw(ctx, &resp); err != nil || !ok { return "", errors.WithMessage(err, "failed to resolve namespace: no user logged in") } if resp.Data.CurrentUser.ID == "" { return "", errors.New("cannot resolve current user") } return resp.Data.CurrentUser.ID, nil } var result struct { Data struct { User *struct{ ID string } Organization *struct{ ID string } } Errors []interface{} } if ok, err := svc.client.NewRequest(namespaceQuery, map[string]interface{}{ "name": namespace, }).DoRaw(ctx, &result); err != nil || !ok { return "", err } if result.Data.User != nil { return result.Data.User.ID, nil } if result.Data.Organization != nil { return result.Data.Organization.ID, nil } return "", fmt.Errorf("failed to resolve namespace %q: no user or organization found", namespace) } func (svc *Service) ResolveRepositories(ctx context.Context, spec *CampaignSpec) ([]*graphql.Repository, error) { seen := map[string]*graphql.Repository{} unsupported := UnsupportedRepoSet{} // TODO: this could be trivially parallelised in the future. for _, on := range spec.On { repos, err := svc.ResolveRepositoriesOn(ctx, &on) if err != nil { return nil, errors.Wrapf(err, "resolving %q", on.String()) } for _, repo := range repos { if !repo.HasBranch() { continue } if other, ok := seen[repo.ID]; !ok { seen[repo.ID] = repo switch st := strings.ToLower(repo.ExternalRepository.ServiceType); st { case "github", "gitlab", "bitbucketserver": default: if !svc.allowUnsupported { unsupported.appendRepo(repo) } } } else { // If we've already seen this repository, we overwrite the // Commit/Branch fields with the latest value we have
CreateCampaignSpec
identifier_name
service.go
sid = strconv.FormatInt(reflect.ValueOf(id).Int(), 10) case uint, uint8, uint16, uint32, uint64: sid = strconv.FormatUint(reflect.ValueOf(id).Uint(), 10) case float32: sid = strconv.FormatFloat(float64(tid), 'f', -1, 32) case float64: sid = strconv.FormatFloat(tid, 'f', -1, 64) default: return nil, errors.Errorf("cannot convert value of type %T into a valid external ID: expected string or int", id) } specs = append(specs, &ChangesetSpec{ BaseRepository: repo.ID, ExternalChangeset: &ExternalChangeset{sid}, }) } } return specs, errs.ErrorOrNil() } func (svc *Service) ParseCampaignSpec(in io.Reader) (*CampaignSpec, string, error) { data, err := ioutil.ReadAll(in) if err != nil { return nil, "", errors.Wrap(err, "reading campaign spec") } spec, err := ParseCampaignSpec(data, svc.features) if err != nil { return nil, "", errors.Wrap(err, "parsing campaign spec") } return spec, string(data), nil } const namespaceQuery = ` query NamespaceQuery($name: String!) { user(username: $name) { id } organization(name: $name) { id } } ` const usernameQuery = ` query GetCurrentUserID { currentUser { id } } ` func (svc *Service) ResolveNamespace(ctx context.Context, namespace string) (string, error) { if namespace == "" { // if no namespace is provided, default to logged in user as namespace var resp struct { Data struct { CurrentUser struct { ID string `json:"id"` } `json:"currentUser"` } `json:"data"` } if ok, err := svc.client.NewRequest(usernameQuery, nil).DoRaw(ctx, &resp); err != nil || !ok { return "", errors.WithMessage(err, "failed to resolve namespace: no user logged in") } if resp.Data.CurrentUser.ID == "" { return "", errors.New("cannot resolve current user") } return resp.Data.CurrentUser.ID, nil } var result struct { Data struct { User *struct{ ID string } Organization *struct{ ID string } } Errors []interface{} } if ok, err := svc.client.NewRequest(namespaceQuery, map[string]interface{}{ "name": namespace, }).DoRaw(ctx, &result); err != nil || !ok { return "", err } if result.Data.User != nil { return result.Data.User.ID, nil } if result.Data.Organization != nil { return result.Data.Organization.ID, nil } return "", fmt.Errorf("failed to resolve namespace %q: no user or organization found", namespace) } func (svc *Service) ResolveRepositories(ctx context.Context, spec *CampaignSpec) ([]*graphql.Repository, error) { seen := map[string]*graphql.Repository{} unsupported := UnsupportedRepoSet{} // TODO: this could be trivially parallelised in the future. for _, on := range spec.On { repos, err := svc.ResolveRepositoriesOn(ctx, &on) if err != nil { return nil, errors.Wrapf(err, "resolving %q", on.String()) } for _, repo := range repos { if !repo.HasBranch() { continue } if other, ok := seen[repo.ID]; !ok { seen[repo.ID] = repo switch st := strings.ToLower(repo.ExternalRepository.ServiceType); st { case "github", "gitlab", "bitbucketserver": default: if !svc.allowUnsupported { unsupported.appendRepo(repo) } } } else { // If we've already seen this repository, we overwrite the // Commit/Branch fields with the latest value we have other.Commit = repo.Commit other.Branch = repo.Branch } } } final := make([]*graphql.Repository, 0, len(seen)) for _, repo := range seen { if !unsupported.includes(repo) { final = append(final, repo) } } if unsupported.hasUnsupported() { return final, unsupported } return final, nil } func (svc *Service) ResolveRepositoriesOn(ctx context.Context, on *OnQueryOrRepository) ([]*graphql.Repository, error) { if on.RepositoriesMatchingQuery != "" { return svc.resolveRepositorySearch(ctx, on.RepositoriesMatchingQuery) } else if on.Repository != "" && on.Branch != "" { repo, err := svc.resolveRepositoryNameAndBranch(ctx, on.Repository, on.Branch) if err != nil { return nil, err } return []*graphql.Repository{repo}, nil } else if on.Repository != "" { repo, err := svc.resolveRepositoryName(ctx, on.Repository) if err != nil { return nil, err } return []*graphql.Repository{repo}, nil } // This shouldn't happen on any campaign spec that has passed validation, // but, alas, software. return nil, ErrMalformedOnQueryOrRepository } const repositoryNameQuery = ` query Repository($name: String!, $queryCommit: Boolean!, $rev: String!) { repository(name: $name) { ...repositoryFields } } ` + graphql.RepositoryFieldsFragment func (svc *Service) resolveRepositoryName(ctx context.Context, name string) (*graphql.Repository, error) { var result struct{ Repository *graphql.Repository } if ok, err := svc.client.NewRequest(repositoryNameQuery, map[string]interface{}{ "name": name, "queryCommit": false, "rev": "", }).Do(ctx, &result); err != nil || !ok { return nil, err } if result.Repository == nil { return nil, errors.New("no repository found") } return result.Repository, nil } func (svc *Service) resolveRepositoryNameAndBranch(ctx context.Context, name, branch string) (*graphql.Repository, error) { var result struct{ Repository *graphql.Repository } if ok, err := svc.client.NewRequest(repositoryNameQuery, map[string]interface{}{ "name": name, "queryCommit": true, "rev": branch, }).Do(ctx, &result); err != nil || !ok { return nil, err } if result.Repository == nil { return nil, errors.New("no repository found") } if result.Repository.Commit.OID == "" { return nil, fmt.Errorf("no branch matching %q found for repository %s", branch, name) } result.Repository.Branch = graphql.Branch{ Name: branch, Target: result.Repository.Commit, } return result.Repository, nil } // TODO: search result alerts. const repositorySearchQuery = ` query ChangesetRepos( $query: String!, $queryCommit: Boolean!, $rev: String!, ) { search(query: $query, version: V2) { results { results { __typename ... on Repository { ...repositoryFields } ... on FileMatch { file { path } repository { ...repositoryFields } } } } } } ` + graphql.RepositoryFieldsFragment func (svc *Service) resolveRepositorySearch(ctx context.Context, query string) ([]*graphql.Repository, error) { var result struct { Search struct { Results struct { Results []searchResult } } } if ok, err := svc.client.NewRequest(repositorySearchQuery, map[string]interface{}{ "query": setDefaultQueryCount(query), "queryCommit": false, "rev": "", }).Do(ctx, &result); err != nil || !ok { return nil, err } ids := map[string]*graphql.Repository{} var repos []*graphql.Repository for _, r := range result.Search.Results.Results { existing, ok := ids[r.ID] if !ok { repo := r.Repository repos = append(repos, &repo) ids[r.ID] = &repo } else { for file := range r.FileMatches { existing.FileMatches[file] = true } } } return repos, nil } var defaultQueryCountRegex = regexp.MustCompile(`\bcount:\d+\b`) const hardCodedCount = " count:999999" func setDefaultQueryCount(query string) string { if defaultQueryCountRegex.MatchString(query) { return query } return query + hardCodedCount } type searchResult struct { graphql.Repository } func (sr *searchResult) UnmarshalJSON(data []byte) error
{ var tn struct { Typename string `json:"__typename"` } if err := json.Unmarshal(data, &tn); err != nil { return err } switch tn.Typename { case "FileMatch": var result struct { Repository graphql.Repository File struct { Path string } } if err := json.Unmarshal(data, &result); err != nil { return err }
identifier_body
debian.py
""" @attrs(eq=False, order=False, frozen=True, hash=False, slots=True, str=False) class Version(object): """ Rich comparison of Debian package versions as first-class Python objects. The :class:`Version` class is a subclass of the built in :class:`str` type that implements rich comparison according to the version sorting order defined in the Debian Policy Manual. Use it to sort Debian package versions from oldest to newest in ascending version order like this: >>> from univers.debian import Version >>> unsorted = ['0.1', '0.5', '1.0', '2.0', '3.0', '1:0.4', '2:0.3'] >>> print([str(v) for v in sorted(Version.from_string(s) for s in unsorted)]) ['0.1', '0.5', '1.0', '2.0', '3.0', '1:0.4', '2:0.3'] This example uses 'epoch' numbers (the numbers before the colons) to demonstrate that this version sorting order is different from regular sorting and 'natural order sorting'. """ epoch = attrib(default=0) upstream = attrib(default=None) revision = attrib(default="0") def __str__(self, *args, **kwargs): if self.epoch: version = f"{self.epoch}:{self.upstream}" else: version = f"{self.upstream}" if self.revision not in (None, "0"): version += f"-{self.revision}" return version def __repr__(self, *args, **kwargs): return str(self) def __hash__(self): return hash(self.tuple()) def __eq__(self, other): return type(self) is type(other) and self.tuple() == other.tuple() def __ne__(self, other): return not self.__eq__(other) def __lt__(self, other): if type(self) is type(other): return eval_constraint(self, "<<", other) return NotImplemented def __le__(self, other): if type(self) is type(other): return eval_constraint(self, "<=", other) return NotImplemented def __gt__(self, other): if type(self) is type(other): return eval_constraint(self, ">>", other) return NotImplemented def __ge__(self, other): if type(self) is type(other): return eval_constraint(self, ">=", other) return NotImplemented @classmethod def from_string(cls, version): if not version and not isinstance(version, str): raise ValueError('Invalid version string: "{}"'.format(version)) version = version.strip() if not version: raise ValueError('Invalid version string: "{}"'.format(version)) if not _is_valid_version(version): raise ValueError('Invalid version string: "{}"'.format(version)) if ":" in version: epoch, _, version = version.partition(":") epoch = int(epoch) else: epoch = 0 if "-" in version:
else: upstream = version revision = "0" return cls(epoch=epoch, upstream=upstream, revision=revision) def compare(self, other_version): return compare_versions(self, other_version) def to_dict(self): return asdict(self) def tuple(self): return self.epoch, self.upstream, self.revision _is_valid_version = re.compile( r"^" # epoch must start with a digit r"(\d+:)?" # upstream must start with a digit r"\d" r"(" # upstream can contain only alphanumerics and the characters . + - # ~ (full stop, plus, hyphen, tilde) # we are adding the extra check that it must end with alphanum r"[A-Za-z0-9\.\+\-\~]*[A-Za-z0-9]" r"|" # If there is no debian_revision then hyphens are not allowed. # we are adding the extra check that it must end with alphanum r"[A-Za-z0-9\.\+\~]*[A-Za-z0-9]-[A-Za-z0-9\+\.\~]*[A-Za-z0-9]" r")?" r"$" ).match def eval_constraint(version1, operator, version2): """ Evaluate a versions constraint where two Debian package versions are compared with an operator such as < or >. Return True if the constraint is satisfied and False otherwise. """ version1 = coerce_version(version1) version2 = coerce_version(version2) result = compare_versions(version1, version2) # See https://www.debian.org/doc/debian-policy/ch-relationships.html#syntax-of-relationship-fields operators = { "<=": operator_module.le, # legacy for compat "<": operator_module.le, ">=": operator_module.ge, # legacy for compat ">": operator_module.ge, "<<": operator_module.lt, ">>": operator_module.gt, "=": operator_module.eq, } try: operator = operators[operator] except KeyError: msg = f"Unsupported Debian version constraint comparison operator: {version1} {operator} {version2}" raise ValueError(msg) return operator(result, 0) def compare_versions_key(x): """ Return a key version function suitable for use in sorted(). """ return cmp_to_key(compare_versions)(x) def compare_strings_key(x): """ Return a key string function suitable for use in sorted(). """ return cmp_to_key(compare_strings)(x) def compare_strings(version1, version2): """ Compare two version strings (upstream or revision) using Debain semantics and return one of the following integer numbers: - -1 means version1 sorts before version2 - 0 means version1 and version2 are equal - 1 means version1 sorts after version2 """ logger.debug("Comparing Debian version number substrings %r and %r ..", version1, version2) mapping = characters_order v1 = list(version1) v2 = list(version2) while v1 or v2: # Quoting from the 'deb-version' manual page: First the initial part of each # string consisting entirely of non-digit characters is determined. These two # parts (one of which may be empty) are compared lexically. If a difference is # found it is returned. The lexical comparison is a comparison of ASCII values # modified so that all the letters sort earlier than all the non-letters and so # that a tilde sorts before anything, even the end of a part. For example, the # following parts are in sorted order: '~~', '~~a', '~', the empty part, 'a'. p1 = get_non_digit_prefix(v1) p2 = get_non_digit_prefix(v2) if p1 != p2: logger.debug("Comparing non-digit prefixes %r and %r ..", p1, p2) for c1, c2 in zip_longest(p1, p2, fillvalue=""): logger.debug( "Performing lexical comparison between characters %r and %r ..", c1, c2 ) o1 = mapping.get(c1) o2 = mapping.get(c2) if o1 < o2: logger.debug( "Determined that %r sorts before %r (based on lexical comparison).", version1, version2, ) return -1 elif o1 > o2: logger.debug( "Determined that %r sorts after %r (based on lexical comparison).", version1, version2, ) return 1 elif p1: logger.debug("Skipping matching non-digit prefix %r ..", p1) # Quoting from the 'deb-version' manual page: Then the initial part of the # remainder of each string which consists entirely of digit characters is # determined. The numerical values of these two parts are compared, and any # difference found is returned as the result of the comparison. For these purposes # an empty string (which can only occur at the end of one or both version strings # being compared) counts as zero. d1 = get_digit_prefix(v1) d2 = get_digit_prefix(v2) logger.debug("Comparing numeric prefixes %i and %i ..", d1, d2) if d1 < d2: logger.debug( "Determined that %r sorts before %r (based on numeric comparison).", version1, version2, ) return -1 elif d1 > d2: logger.debug( "Determined that %r sorts after %r (based on numeric comparison).", version1, version2, ) return 1 else: logger.debug("Determined that numeric prefixes match.") logger.debug("Determined that version numbers are equal.") return 0 def compare_versions(version1, version2): """ Compare two Version objects or strings and return one of the following integer numbers
upstream, _, revision = version.rpartition("-")
conditional_block
debian.py
""" @attrs(eq=False, order=False, frozen=True, hash=False, slots=True, str=False) class Version(object): """ Rich comparison of Debian package versions as first-class Python objects. The :class:`Version` class is a subclass of the built in :class:`str` type that implements rich comparison according to the version sorting order defined in the Debian Policy Manual. Use it to sort Debian package versions from oldest to newest in ascending version order like this: >>> from univers.debian import Version >>> unsorted = ['0.1', '0.5', '1.0', '2.0', '3.0', '1:0.4', '2:0.3'] >>> print([str(v) for v in sorted(Version.from_string(s) for s in unsorted)]) ['0.1', '0.5', '1.0', '2.0', '3.0', '1:0.4', '2:0.3'] This example uses 'epoch' numbers (the numbers before the colons) to demonstrate that this version sorting order is different from regular sorting and 'natural order sorting'. """ epoch = attrib(default=0) upstream = attrib(default=None) revision = attrib(default="0") def __str__(self, *args, **kwargs): if self.epoch: version = f"{self.epoch}:{self.upstream}" else: version = f"{self.upstream}" if self.revision not in (None, "0"): version += f"-{self.revision}" return version def __repr__(self, *args, **kwargs): return str(self) def __hash__(self): return hash(self.tuple()) def __eq__(self, other): return type(self) is type(other) and self.tuple() == other.tuple() def __ne__(self, other): return not self.__eq__(other) def __lt__(self, other): if type(self) is type(other): return eval_constraint(self, "<<", other) return NotImplemented def __le__(self, other): if type(self) is type(other): return eval_constraint(self, "<=", other) return NotImplemented def __gt__(self, other): if type(self) is type(other): return eval_constraint(self, ">>", other) return NotImplemented def __ge__(self, other): if type(self) is type(other): return eval_constraint(self, ">=", other) return NotImplemented @classmethod def from_string(cls, version): if not version and not isinstance(version, str): raise ValueError('Invalid version string: "{}"'.format(version)) version = version.strip() if not version: raise ValueError('Invalid version string: "{}"'.format(version)) if not _is_valid_version(version): raise ValueError('Invalid version string: "{}"'.format(version)) if ":" in version: epoch, _, version = version.partition(":") epoch = int(epoch) else: epoch = 0 if "-" in version: upstream, _, revision = version.rpartition("-") else: upstream = version revision = "0" return cls(epoch=epoch, upstream=upstream, revision=revision) def compare(self, other_version): return compare_versions(self, other_version) def to_dict(self): return asdict(self) def tuple(self): return self.epoch, self.upstream, self.revision _is_valid_version = re.compile( r"^" # epoch must start with a digit r"(\d+:)?" # upstream must start with a digit r"\d" r"(" # upstream can contain only alphanumerics and the characters . + - # ~ (full stop, plus, hyphen, tilde) # we are adding the extra check that it must end with alphanum r"[A-Za-z0-9\.\+\-\~]*[A-Za-z0-9]" r"|" # If there is no debian_revision then hyphens are not allowed. # we are adding the extra check that it must end with alphanum r"[A-Za-z0-9\.\+\~]*[A-Za-z0-9]-[A-Za-z0-9\+\.\~]*[A-Za-z0-9]" r")?" r"$" ).match def eval_constraint(version1, operator, version2): """ Evaluate a versions constraint where two Debian package versions are compared with an operator such as < or >. Return True if the constraint is satisfied and False otherwise. """ version1 = coerce_version(version1) version2 = coerce_version(version2) result = compare_versions(version1, version2) # See https://www.debian.org/doc/debian-policy/ch-relationships.html#syntax-of-relationship-fields operators = { "<=": operator_module.le, # legacy for compat "<": operator_module.le, ">=": operator_module.ge, # legacy for compat ">": operator_module.ge, "<<": operator_module.lt, ">>": operator_module.gt, "=": operator_module.eq, } try: operator = operators[operator] except KeyError: msg = f"Unsupported Debian version constraint comparison operator: {version1} {operator} {version2}" raise ValueError(msg) return operator(result, 0) def compare_versions_key(x): """ Return a key version function suitable for use in sorted(). """ return cmp_to_key(compare_versions)(x) def compare_strings_key(x): """ Return a key string function suitable for use in sorted(). """ return cmp_to_key(compare_strings)(x) def compare_strings(version1, version2):
p2 = get_non_digit_prefix(v2) if p1 != p2: logger.debug("Comparing non-digit prefixes %r and %r ..", p1, p2) for c1, c2 in zip_longest(p1, p2, fillvalue=""): logger.debug( "Performing lexical comparison between characters %r and %r ..", c1, c2 ) o1 = mapping.get(c1) o2 = mapping.get(c2) if o1 < o2: logger.debug( "Determined that %r sorts before %r (based on lexical comparison).", version1, version2, ) return -1 elif o1 > o2: logger.debug( "Determined that %r sorts after %r (based on lexical comparison).", version1, version2, ) return 1 elif p1: logger.debug("Skipping matching non-digit prefix %r ..", p1) # Quoting from the 'deb-version' manual page: Then the initial part of the # remainder of each string which consists entirely of digit characters is # determined. The numerical values of these two parts are compared, and any # difference found is returned as the result of the comparison. For these purposes # an empty string (which can only occur at the end of one or both version strings # being compared) counts as zero. d1 = get_digit_prefix(v1) d2 = get_digit_prefix(v2) logger.debug("Comparing numeric prefixes %i and %i ..", d1, d2) if d1 < d2: logger.debug( "Determined that %r sorts before %r (based on numeric comparison).", version1, version2, ) return -1 elif d1 > d2: logger.debug( "Determined that %r sorts after %r (based on numeric comparison).", version1, version2, ) return 1 else: logger.debug("Determined that numeric prefixes match.") logger.debug("Determined that version numbers are equal.") return 0 def compare_versions(version1, version2): """ Compare two Version objects or strings and return one of the following integer numbers:
""" Compare two version strings (upstream or revision) using Debain semantics and return one of the following integer numbers: - -1 means version1 sorts before version2 - 0 means version1 and version2 are equal - 1 means version1 sorts after version2 """ logger.debug("Comparing Debian version number substrings %r and %r ..", version1, version2) mapping = characters_order v1 = list(version1) v2 = list(version2) while v1 or v2: # Quoting from the 'deb-version' manual page: First the initial part of each # string consisting entirely of non-digit characters is determined. These two # parts (one of which may be empty) are compared lexically. If a difference is # found it is returned. The lexical comparison is a comparison of ASCII values # modified so that all the letters sort earlier than all the non-letters and so # that a tilde sorts before anything, even the end of a part. For example, the # following parts are in sorted order: '~~', '~~a', '~', the empty part, 'a'. p1 = get_non_digit_prefix(v1)
identifier_body
debian.py
""" @attrs(eq=False, order=False, frozen=True, hash=False, slots=True, str=False) class Version(object): """ Rich comparison of Debian package versions as first-class Python objects. The :class:`Version` class is a subclass of the built in :class:`str` type that implements rich comparison according to the version sorting order defined in the Debian Policy Manual. Use it to sort Debian package versions from oldest to newest in ascending version order like this: >>> from univers.debian import Version >>> unsorted = ['0.1', '0.5', '1.0', '2.0', '3.0', '1:0.4', '2:0.3'] >>> print([str(v) for v in sorted(Version.from_string(s) for s in unsorted)]) ['0.1', '0.5', '1.0', '2.0', '3.0', '1:0.4', '2:0.3'] This example uses 'epoch' numbers (the numbers before the colons) to demonstrate that this version sorting order is different from regular sorting and 'natural order sorting'. """ epoch = attrib(default=0) upstream = attrib(default=None) revision = attrib(default="0") def __str__(self, *args, **kwargs): if self.epoch: version = f"{self.epoch}:{self.upstream}" else: version = f"{self.upstream}" if self.revision not in (None, "0"): version += f"-{self.revision}" return version def __repr__(self, *args, **kwargs): return str(self) def __hash__(self): return hash(self.tuple()) def __eq__(self, other): return type(self) is type(other) and self.tuple() == other.tuple() def
(self, other): return not self.__eq__(other) def __lt__(self, other): if type(self) is type(other): return eval_constraint(self, "<<", other) return NotImplemented def __le__(self, other): if type(self) is type(other): return eval_constraint(self, "<=", other) return NotImplemented def __gt__(self, other): if type(self) is type(other): return eval_constraint(self, ">>", other) return NotImplemented def __ge__(self, other): if type(self) is type(other): return eval_constraint(self, ">=", other) return NotImplemented @classmethod def from_string(cls, version): if not version and not isinstance(version, str): raise ValueError('Invalid version string: "{}"'.format(version)) version = version.strip() if not version: raise ValueError('Invalid version string: "{}"'.format(version)) if not _is_valid_version(version): raise ValueError('Invalid version string: "{}"'.format(version)) if ":" in version: epoch, _, version = version.partition(":") epoch = int(epoch) else: epoch = 0 if "-" in version: upstream, _, revision = version.rpartition("-") else: upstream = version revision = "0" return cls(epoch=epoch, upstream=upstream, revision=revision) def compare(self, other_version): return compare_versions(self, other_version) def to_dict(self): return asdict(self) def tuple(self): return self.epoch, self.upstream, self.revision _is_valid_version = re.compile( r"^" # epoch must start with a digit r"(\d+:)?" # upstream must start with a digit r"\d" r"(" # upstream can contain only alphanumerics and the characters . + - # ~ (full stop, plus, hyphen, tilde) # we are adding the extra check that it must end with alphanum r"[A-Za-z0-9\.\+\-\~]*[A-Za-z0-9]" r"|" # If there is no debian_revision then hyphens are not allowed. # we are adding the extra check that it must end with alphanum r"[A-Za-z0-9\.\+\~]*[A-Za-z0-9]-[A-Za-z0-9\+\.\~]*[A-Za-z0-9]" r")?" r"$" ).match def eval_constraint(version1, operator, version2): """ Evaluate a versions constraint where two Debian package versions are compared with an operator such as < or >. Return True if the constraint is satisfied and False otherwise. """ version1 = coerce_version(version1) version2 = coerce_version(version2) result = compare_versions(version1, version2) # See https://www.debian.org/doc/debian-policy/ch-relationships.html#syntax-of-relationship-fields operators = { "<=": operator_module.le, # legacy for compat "<": operator_module.le, ">=": operator_module.ge, # legacy for compat ">": operator_module.ge, "<<": operator_module.lt, ">>": operator_module.gt, "=": operator_module.eq, } try: operator = operators[operator] except KeyError: msg = f"Unsupported Debian version constraint comparison operator: {version1} {operator} {version2}" raise ValueError(msg) return operator(result, 0) def compare_versions_key(x): """ Return a key version function suitable for use in sorted(). """ return cmp_to_key(compare_versions)(x) def compare_strings_key(x): """ Return a key string function suitable for use in sorted(). """ return cmp_to_key(compare_strings)(x) def compare_strings(version1, version2): """ Compare two version strings (upstream or revision) using Debain semantics and return one of the following integer numbers: - -1 means version1 sorts before version2 - 0 means version1 and version2 are equal - 1 means version1 sorts after version2 """ logger.debug("Comparing Debian version number substrings %r and %r ..", version1, version2) mapping = characters_order v1 = list(version1) v2 = list(version2) while v1 or v2: # Quoting from the 'deb-version' manual page: First the initial part of each # string consisting entirely of non-digit characters is determined. These two # parts (one of which may be empty) are compared lexically. If a difference is # found it is returned. The lexical comparison is a comparison of ASCII values # modified so that all the letters sort earlier than all the non-letters and so # that a tilde sorts before anything, even the end of a part. For example, the # following parts are in sorted order: '~~', '~~a', '~', the empty part, 'a'. p1 = get_non_digit_prefix(v1) p2 = get_non_digit_prefix(v2) if p1 != p2: logger.debug("Comparing non-digit prefixes %r and %r ..", p1, p2) for c1, c2 in zip_longest(p1, p2, fillvalue=""): logger.debug( "Performing lexical comparison between characters %r and %r ..", c1, c2 ) o1 = mapping.get(c1) o2 = mapping.get(c2) if o1 < o2: logger.debug( "Determined that %r sorts before %r (based on lexical comparison).", version1, version2, ) return -1 elif o1 > o2: logger.debug( "Determined that %r sorts after %r (based on lexical comparison).", version1, version2, ) return 1 elif p1: logger.debug("Skipping matching non-digit prefix %r ..", p1) # Quoting from the 'deb-version' manual page: Then the initial part of the # remainder of each string which consists entirely of digit characters is # determined. The numerical values of these two parts are compared, and any # difference found is returned as the result of the comparison. For these purposes # an empty string (which can only occur at the end of one or both version strings # being compared) counts as zero. d1 = get_digit_prefix(v1) d2 = get_digit_prefix(v2) logger.debug("Comparing numeric prefixes %i and %i ..", d1, d2) if d1 < d2: logger.debug( "Determined that %r sorts before %r (based on numeric comparison).", version1, version2, ) return -1 elif d1 > d2: logger.debug( "Determined that %r sorts after %r (based on numeric comparison).", version1, version2, ) return 1 else: logger.debug("Determined that numeric prefixes match.") logger.debug("Determined that version numbers are equal.") return 0 def compare_versions(version1, version2): """ Compare two Version objects or strings and return one of the following integer numbers:
__ne__
identifier_name
debian.py
""" @attrs(eq=False, order=False, frozen=True, hash=False, slots=True, str=False) class Version(object): """ Rich comparison of Debian package versions as first-class Python objects. The :class:`Version` class is a subclass of the built in :class:`str` type that implements rich comparison according to the version sorting order defined in the Debian Policy Manual. Use it to sort Debian package versions from oldest to newest in ascending version order like this: >>> from univers.debian import Version >>> unsorted = ['0.1', '0.5', '1.0', '2.0', '3.0', '1:0.4', '2:0.3'] >>> print([str(v) for v in sorted(Version.from_string(s) for s in unsorted)]) ['0.1', '0.5', '1.0', '2.0', '3.0', '1:0.4', '2:0.3'] This example uses 'epoch' numbers (the numbers before the colons) to demonstrate that this version sorting order is different from regular sorting and 'natural order sorting'. """ epoch = attrib(default=0) upstream = attrib(default=None) revision = attrib(default="0") def __str__(self, *args, **kwargs): if self.epoch: version = f"{self.epoch}:{self.upstream}" else: version = f"{self.upstream}" if self.revision not in (None, "0"): version += f"-{self.revision}" return version def __repr__(self, *args, **kwargs): return str(self) def __hash__(self): return hash(self.tuple()) def __eq__(self, other): return type(self) is type(other) and self.tuple() == other.tuple() def __ne__(self, other): return not self.__eq__(other) def __lt__(self, other): if type(self) is type(other): return eval_constraint(self, "<<", other) return NotImplemented def __le__(self, other): if type(self) is type(other): return eval_constraint(self, "<=", other) return NotImplemented def __gt__(self, other): if type(self) is type(other): return eval_constraint(self, ">>", other) return NotImplemented def __ge__(self, other): if type(self) is type(other): return eval_constraint(self, ">=", other) return NotImplemented @classmethod def from_string(cls, version): if not version and not isinstance(version, str): raise ValueError('Invalid version string: "{}"'.format(version)) version = version.strip() if not version: raise ValueError('Invalid version string: "{}"'.format(version)) if not _is_valid_version(version): raise ValueError('Invalid version string: "{}"'.format(version)) if ":" in version: epoch, _, version = version.partition(":") epoch = int(epoch) else: epoch = 0 if "-" in version:
upstream = version revision = "0" return cls(epoch=epoch, upstream=upstream, revision=revision) def compare(self, other_version): return compare_versions(self, other_version) def to_dict(self): return asdict(self) def tuple(self): return self.epoch, self.upstream, self.revision _is_valid_version = re.compile( r"^" # epoch must start with a digit r"(\d+:)?" # upstream must start with a digit r"\d" r"(" # upstream can contain only alphanumerics and the characters . + - # ~ (full stop, plus, hyphen, tilde) # we are adding the extra check that it must end with alphanum r"[A-Za-z0-9\.\+\-\~]*[A-Za-z0-9]" r"|" # If there is no debian_revision then hyphens are not allowed. # we are adding the extra check that it must end with alphanum r"[A-Za-z0-9\.\+\~]*[A-Za-z0-9]-[A-Za-z0-9\+\.\~]*[A-Za-z0-9]" r")?" r"$" ).match def eval_constraint(version1, operator, version2): """ Evaluate a versions constraint where two Debian package versions are compared with an operator such as < or >. Return True if the constraint is satisfied and False otherwise. """ version1 = coerce_version(version1) version2 = coerce_version(version2) result = compare_versions(version1, version2) # See https://www.debian.org/doc/debian-policy/ch-relationships.html#syntax-of-relationship-fields operators = { "<=": operator_module.le, # legacy for compat "<": operator_module.le, ">=": operator_module.ge, # legacy for compat ">": operator_module.ge, "<<": operator_module.lt, ">>": operator_module.gt, "=": operator_module.eq, } try: operator = operators[operator] except KeyError: msg = f"Unsupported Debian version constraint comparison operator: {version1} {operator} {version2}" raise ValueError(msg) return operator(result, 0) def compare_versions_key(x): """ Return a key version function suitable for use in sorted(). """ return cmp_to_key(compare_versions)(x) def compare_strings_key(x): """ Return a key string function suitable for use in sorted(). """ return cmp_to_key(compare_strings)(x) def compare_strings(version1, version2): """ Compare two version strings (upstream or revision) using Debain semantics and return one of the following integer numbers: - -1 means version1 sorts before version2 - 0 means version1 and version2 are equal - 1 means version1 sorts after version2 """ logger.debug("Comparing Debian version number substrings %r and %r ..", version1, version2) mapping = characters_order v1 = list(version1) v2 = list(version2) while v1 or v2: # Quoting from the 'deb-version' manual page: First the initial part of each # string consisting entirely of non-digit characters is determined. These two # parts (one of which may be empty) are compared lexically. If a difference is # found it is returned. The lexical comparison is a comparison of ASCII values # modified so that all the letters sort earlier than all the non-letters and so # that a tilde sorts before anything, even the end of a part. For example, the # following parts are in sorted order: '~~', '~~a', '~', the empty part, 'a'. p1 = get_non_digit_prefix(v1) p2 = get_non_digit_prefix(v2) if p1 != p2: logger.debug("Comparing non-digit prefixes %r and %r ..", p1, p2) for c1, c2 in zip_longest(p1, p2, fillvalue=""): logger.debug( "Performing lexical comparison between characters %r and %r ..", c1, c2 ) o1 = mapping.get(c1) o2 = mapping.get(c2) if o1 < o2: logger.debug( "Determined that %r sorts before %r (based on lexical comparison).", version1, version2, ) return -1 elif o1 > o2: logger.debug( "Determined that %r sorts after %r (based on lexical comparison).", version1, version2, ) return 1 elif p1: logger.debug("Skipping matching non-digit prefix %r ..", p1) # Quoting from the 'deb-version' manual page: Then the initial part of the # remainder of each string which consists entirely of digit characters is # determined. The numerical values of these two parts are compared, and any # difference found is returned as the result of the comparison. For these purposes # an empty string (which can only occur at the end of one or both version strings # being compared) counts as zero. d1 = get_digit_prefix(v1) d2 = get_digit_prefix(v2) logger.debug("Comparing numeric prefixes %i and %i ..", d1, d2) if d1 < d2: logger.debug( "Determined that %r sorts before %r (based on numeric comparison).", version1, version2, ) return -1 elif d1 > d2: logger.debug( "Determined that %r sorts after %r (based on numeric comparison).", version1, version2, ) return 1 else: logger.debug("Determined that numeric prefixes match.") logger.debug("Determined that version numbers are equal.") return 0 def compare_versions(version1, version2): """ Compare two Version objects or strings and return one of the following integer numbers:
upstream, _, revision = version.rpartition("-") else:
random_line_split
proxy.rs
)] pub enum Error { ForkingJail(minijail::Error), Io(io::Error), } pub type Result<T> = std::result::Result<T, Error>; impl Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::Error::*; match self { ForkingJail(e) => write!(f, "Failed to fork jail process: {}", e), Io(e) => write!(f, "IO error configuring proxy device {}.", e), } } } const SOCKET_TIMEOUT_MS: u64 = 2000; #[derive(Debug, MsgOnSocket)] enum Command { Read { len: u32, info: BusAccessInfo, }, Write { len: u32, info: BusAccessInfo, data: [u8; 8], }, ReadConfig(u32), WriteConfig { reg_idx: u32, offset: u32, len: u32, data: [u8; 4], }, Shutdown, } #[derive(MsgOnSocket)] enum CommandResult { Ok, ReadResult([u8; 8]), ReadConfigResult(u32), } fn child_proc<D: BusDevice>(sock: UnixSeqpacket, device: &mut D) { let mut running = true; let sock = MsgSocket::<CommandResult, Command>::new(sock); while running { let cmd = match sock.recv() { Ok(cmd) => cmd, Err(err) => { error!("child device process failed recv: {}", err); break; } }; let res = match cmd { Command::Read { len, info } => { let mut buffer = [0u8; 8]; device.read(info, &mut buffer[0..len as usize]); sock.send(&CommandResult::ReadResult(buffer)) } Command::Write { len, info, data } => { let len = len as usize; device.write(info, &data[0..len]); // Command::Write does not have a result. Ok(()) } Command::ReadConfig(idx) => { let val = device.config_register_read(idx as usize); sock.send(&CommandResult::ReadConfigResult(val)) } Command::WriteConfig { reg_idx, offset, len, data, } => { let len = len as usize; device.config_register_write(reg_idx as usize, offset as u64, &data[0..len]); // Command::WriteConfig does not have a result. Ok(()) } Command::Shutdown => { running = false; sock.send(&CommandResult::Ok) } }; if let Err(e) = res { error!("child device process failed send: {}", e); } } } /// Wraps an inner `BusDevice` that is run inside a child process via fork. /// /// Because forks are very unfriendly to destructors and all memory mappings and file descriptors /// are inherited, this should be used as early as possible in the main process. pub struct ProxyDevice { sock: MsgSocket<Command, CommandResult>, pid: pid_t, debug_label: String, } impl ProxyDevice { /// Takes the given device and isolates it into another process via fork before returning. /// /// The forked process will automatically be terminated when this is dropped, so be sure to keep /// a reference. /// /// # Arguments /// * `device` - The device to isolate to another process. /// * `jail` - The jail to use for isolating the given device. /// * `keep_rds` - File descriptors that will be kept open in the child. pub fn new<D: BusDevice>( mut device: D, jail: &Minijail, mut keep_rds: Vec<RawDescriptor>, ) -> Result<ProxyDevice> { let debug_label = device.debug_label(); let (child_sock, parent_sock) = UnixSeqpacket::pair().map_err(Error::Io)?; keep_rds.push(child_sock.as_raw_descriptor()); // Forking here is safe as long as the program is still single threaded. let pid = unsafe { match jail.fork(Some(&keep_rds)).map_err(Error::ForkingJail)? { 0 => { device.on_sandboxed(); child_proc(child_sock, &mut device); // We're explicitly not using std::process::exit here to avoid the cleanup of // stdout/stderr globals. This can cause cascading panics and SIGILL if a worker // thread attempts to log to stderr after at_exit handlers have been run. // TODO(crbug.com/992494): Remove this once device shutdown ordering is clearly // defined. // // exit() is trivially safe. // ! Never returns libc::exit(0); } p => p, } }; parent_sock .set_write_timeout(Some(Duration::from_millis(SOCKET_TIMEOUT_MS))) .map_err(Error::Io)?; parent_sock .set_read_timeout(Some(Duration::from_millis(SOCKET_TIMEOUT_MS))) .map_err(Error::Io)?; Ok(ProxyDevice { sock: MsgSocket::<Command, CommandResult>::new(parent_sock), pid, debug_label, }) } pub fn pid(&self) -> pid_t { self.pid } /// Send a command that does not expect a response from the child device process. fn send_no_result(&self, cmd: &Command) { let res = self.sock.send(cmd); if let Err(e) = res { error!( "failed write to child device process {}: {}", self.debug_label, e, ); } } /// Send a command and read its response from the child device process. fn sync_send(&self, cmd: &Command) -> Option<CommandResult> { self.send_no_result(cmd); match self.sock.recv() { Err(e) =>
Ok(r) => Some(r), } } } impl BusDevice for ProxyDevice { fn debug_label(&self) -> String { self.debug_label.clone() } fn config_register_write(&mut self, reg_idx: usize, offset: u64, data: &[u8]) { let len = data.len() as u32; let mut buffer = [0u8; 4]; buffer[0..data.len()].clone_from_slice(data); let reg_idx = reg_idx as u32; let offset = offset as u32; self.send_no_result(&Command::WriteConfig { reg_idx, offset, len, data: buffer, }); } fn config_register_read(&self, reg_idx: usize) -> u32 { let res = self.sync_send(&Command::ReadConfig(reg_idx as u32)); if let Some(CommandResult::ReadConfigResult(val)) = res { val } else { 0 } } fn read(&mut self, info: BusAccessInfo, data: &mut [u8]) { let len = data.len() as u32; if let Some(CommandResult::ReadResult(buffer)) = self.sync_send(&Command::Read { len, info }) { let len = data.len(); data.clone_from_slice(&buffer[0..len]); } } fn write(&mut self, info: BusAccessInfo, data: &[u8]) { let mut buffer = [0u8; 8]; let len = data.len() as u32; buffer[0..data.len()].clone_from_slice(data); self.send_no_result(&Command::Write { len, info, data: buffer, }); } } impl Drop for ProxyDevice { fn drop(&mut self) { self.sync_send(&Command::Shutdown); } } /// Note: These tests must be run with --test-threads=1 to allow minijail to fork /// the process. #[cfg(test)] mod tests { use super::*; /// A simple test echo device that outputs the same u8 that was written to it. struct EchoDevice { data: u8, config: u8, } impl EchoDevice { fn new() -> EchoDevice { EchoDevice { data: 0, config: 0 } } } impl BusDevice for EchoDevice { fn debug_label(&self) -> String { "EchoDevice".to_owned() } fn write(&mut self, _info: BusAccessInfo, data: &[u8]) { assert!(data.len() == 1); self.data = data[0]; } fn read(&mut self, _info: BusAccessInfo, data: &mut [u8]) { assert!(data.len() == 1); data[0] = self.data; } fn config_register_write(&mut self, _reg_idx: usize, _offset: u64, data: &[u8]) { assert!(data.len() == 1); self.config = data[0]; }
{ error!( "failed to read result of {:?} from child device process {}: {}", cmd, self.debug_label, e, ); None }
conditional_block
proxy.rs
)] pub enum Error { ForkingJail(minijail::Error), Io(io::Error), } pub type Result<T> = std::result::Result<T, Error>; impl Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::Error::*; match self { ForkingJail(e) => write!(f, "Failed to fork jail process: {}", e), Io(e) => write!(f, "IO error configuring proxy device {}.", e), } } } const SOCKET_TIMEOUT_MS: u64 = 2000; #[derive(Debug, MsgOnSocket)] enum Command { Read { len: u32, info: BusAccessInfo, }, Write { len: u32, info: BusAccessInfo, data: [u8; 8], }, ReadConfig(u32), WriteConfig { reg_idx: u32, offset: u32, len: u32, data: [u8; 4], }, Shutdown, } #[derive(MsgOnSocket)] enum CommandResult { Ok, ReadResult([u8; 8]), ReadConfigResult(u32), } fn child_proc<D: BusDevice>(sock: UnixSeqpacket, device: &mut D) { let mut running = true; let sock = MsgSocket::<CommandResult, Command>::new(sock); while running { let cmd = match sock.recv() { Ok(cmd) => cmd, Err(err) => { error!("child device process failed recv: {}", err); break; } }; let res = match cmd { Command::Read { len, info } => { let mut buffer = [0u8; 8]; device.read(info, &mut buffer[0..len as usize]); sock.send(&CommandResult::ReadResult(buffer)) } Command::Write { len, info, data } => { let len = len as usize; device.write(info, &data[0..len]); // Command::Write does not have a result. Ok(()) } Command::ReadConfig(idx) => { let val = device.config_register_read(idx as usize); sock.send(&CommandResult::ReadConfigResult(val)) } Command::WriteConfig { reg_idx, offset, len, data, } => { let len = len as usize; device.config_register_write(reg_idx as usize, offset as u64, &data[0..len]); // Command::WriteConfig does not have a result. Ok(()) } Command::Shutdown => { running = false; sock.send(&CommandResult::Ok) } }; if let Err(e) = res { error!("child device process failed send: {}", e); } } } /// Wraps an inner `BusDevice` that is run inside a child process via fork. /// /// Because forks are very unfriendly to destructors and all memory mappings and file descriptors /// are inherited, this should be used as early as possible in the main process. pub struct ProxyDevice { sock: MsgSocket<Command, CommandResult>, pid: pid_t, debug_label: String, } impl ProxyDevice { /// Takes the given device and isolates it into another process via fork before returning. /// /// The forked process will automatically be terminated when this is dropped, so be sure to keep /// a reference. /// /// # Arguments /// * `device` - The device to isolate to another process. /// * `jail` - The jail to use for isolating the given device. /// * `keep_rds` - File descriptors that will be kept open in the child. pub fn new<D: BusDevice>( mut device: D, jail: &Minijail, mut keep_rds: Vec<RawDescriptor>, ) -> Result<ProxyDevice> { let debug_label = device.debug_label(); let (child_sock, parent_sock) = UnixSeqpacket::pair().map_err(Error::Io)?; keep_rds.push(child_sock.as_raw_descriptor()); // Forking here is safe as long as the program is still single threaded. let pid = unsafe { match jail.fork(Some(&keep_rds)).map_err(Error::ForkingJail)? { 0 => { device.on_sandboxed(); child_proc(child_sock, &mut device); // We're explicitly not using std::process::exit here to avoid the cleanup of // stdout/stderr globals. This can cause cascading panics and SIGILL if a worker // thread attempts to log to stderr after at_exit handlers have been run. // TODO(crbug.com/992494): Remove this once device shutdown ordering is clearly // defined. // // exit() is trivially safe. // ! Never returns libc::exit(0); } p => p, } }; parent_sock .set_write_timeout(Some(Duration::from_millis(SOCKET_TIMEOUT_MS))) .map_err(Error::Io)?; parent_sock .set_read_timeout(Some(Duration::from_millis(SOCKET_TIMEOUT_MS))) .map_err(Error::Io)?; Ok(ProxyDevice { sock: MsgSocket::<Command, CommandResult>::new(parent_sock), pid, debug_label, }) } pub fn pid(&self) -> pid_t { self.pid } /// Send a command that does not expect a response from the child device process. fn send_no_result(&self, cmd: &Command) { let res = self.sock.send(cmd); if let Err(e) = res { error!( "failed write to child device process {}: {}", self.debug_label, e, ); } } /// Send a command and read its response from the child device process. fn sync_send(&self, cmd: &Command) -> Option<CommandResult> { self.send_no_result(cmd); match self.sock.recv() { Err(e) => { error!( "failed to read result of {:?} from child device process {}: {}", cmd, self.debug_label, e, ); None } Ok(r) => Some(r), } } } impl BusDevice for ProxyDevice { fn debug_label(&self) -> String { self.debug_label.clone() } fn config_register_write(&mut self, reg_idx: usize, offset: u64, data: &[u8]) { let len = data.len() as u32; let mut buffer = [0u8; 4]; buffer[0..data.len()].clone_from_slice(data); let reg_idx = reg_idx as u32; let offset = offset as u32; self.send_no_result(&Command::WriteConfig { reg_idx, offset, len, data: buffer, }); } fn config_register_read(&self, reg_idx: usize) -> u32 { let res = self.sync_send(&Command::ReadConfig(reg_idx as u32)); if let Some(CommandResult::ReadConfigResult(val)) = res { val } else { 0 } } fn read(&mut self, info: BusAccessInfo, data: &mut [u8]) { let len = data.len() as u32; if let Some(CommandResult::ReadResult(buffer)) = self.sync_send(&Command::Read { len, info }) { let len = data.len(); data.clone_from_slice(&buffer[0..len]); } } fn
(&mut self, info: BusAccessInfo, data: &[u8]) { let mut buffer = [0u8; 8]; let len = data.len() as u32; buffer[0..data.len()].clone_from_slice(data); self.send_no_result(&Command::Write { len, info, data: buffer, }); } } impl Drop for ProxyDevice { fn drop(&mut self) { self.sync_send(&Command::Shutdown); } } /// Note: These tests must be run with --test-threads=1 to allow minijail to fork /// the process. #[cfg(test)] mod tests { use super::*; /// A simple test echo device that outputs the same u8 that was written to it. struct EchoDevice { data: u8, config: u8, } impl EchoDevice { fn new() -> EchoDevice { EchoDevice { data: 0, config: 0 } } } impl BusDevice for EchoDevice { fn debug_label(&self) -> String { "EchoDevice".to_owned() } fn write(&mut self, _info: BusAccessInfo, data: &[u8]) { assert!(data.len() == 1); self.data = data[0]; } fn read(&mut self, _info: BusAccessInfo, data: &mut [u8]) { assert!(data.len() == 1); data[0] = self.data; } fn config_register_write(&mut self, _reg_idx: usize, _offset: u64, data: &[u8]) { assert!(data.len() == 1); self.config = data[0]; }
write
identifier_name
proxy.rs
)] pub enum Error { ForkingJail(minijail::Error), Io(io::Error), } pub type Result<T> = std::result::Result<T, Error>; impl Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::Error::*; match self { ForkingJail(e) => write!(f, "Failed to fork jail process: {}", e), Io(e) => write!(f, "IO error configuring proxy device {}.", e), } } } const SOCKET_TIMEOUT_MS: u64 = 2000; #[derive(Debug, MsgOnSocket)] enum Command { Read { len: u32, info: BusAccessInfo, }, Write { len: u32, info: BusAccessInfo, data: [u8; 8], }, ReadConfig(u32), WriteConfig { reg_idx: u32, offset: u32, len: u32, data: [u8; 4], }, Shutdown, } #[derive(MsgOnSocket)] enum CommandResult { Ok, ReadResult([u8; 8]), ReadConfigResult(u32), } fn child_proc<D: BusDevice>(sock: UnixSeqpacket, device: &mut D) { let mut running = true; let sock = MsgSocket::<CommandResult, Command>::new(sock); while running { let cmd = match sock.recv() { Ok(cmd) => cmd, Err(err) => { error!("child device process failed recv: {}", err); break; } }; let res = match cmd { Command::Read { len, info } => { let mut buffer = [0u8; 8]; device.read(info, &mut buffer[0..len as usize]); sock.send(&CommandResult::ReadResult(buffer)) } Command::Write { len, info, data } => { let len = len as usize; device.write(info, &data[0..len]); // Command::Write does not have a result. Ok(()) } Command::ReadConfig(idx) => { let val = device.config_register_read(idx as usize); sock.send(&CommandResult::ReadConfigResult(val)) } Command::WriteConfig { reg_idx, offset, len, data, } => { let len = len as usize; device.config_register_write(reg_idx as usize, offset as u64, &data[0..len]); // Command::WriteConfig does not have a result. Ok(()) } Command::Shutdown => { running = false; sock.send(&CommandResult::Ok) } }; if let Err(e) = res { error!("child device process failed send: {}", e); } } } /// Wraps an inner `BusDevice` that is run inside a child process via fork. /// /// Because forks are very unfriendly to destructors and all memory mappings and file descriptors /// are inherited, this should be used as early as possible in the main process. pub struct ProxyDevice { sock: MsgSocket<Command, CommandResult>, pid: pid_t, debug_label: String, } impl ProxyDevice { /// Takes the given device and isolates it into another process via fork before returning. /// /// The forked process will automatically be terminated when this is dropped, so be sure to keep /// a reference. /// /// # Arguments /// * `device` - The device to isolate to another process. /// * `jail` - The jail to use for isolating the given device. /// * `keep_rds` - File descriptors that will be kept open in the child. pub fn new<D: BusDevice>( mut device: D, jail: &Minijail, mut keep_rds: Vec<RawDescriptor>, ) -> Result<ProxyDevice> { let debug_label = device.debug_label(); let (child_sock, parent_sock) = UnixSeqpacket::pair().map_err(Error::Io)?; keep_rds.push(child_sock.as_raw_descriptor()); // Forking here is safe as long as the program is still single threaded. let pid = unsafe { match jail.fork(Some(&keep_rds)).map_err(Error::ForkingJail)? { 0 => { device.on_sandboxed(); child_proc(child_sock, &mut device); // We're explicitly not using std::process::exit here to avoid the cleanup of // stdout/stderr globals. This can cause cascading panics and SIGILL if a worker // thread attempts to log to stderr after at_exit handlers have been run. // TODO(crbug.com/992494): Remove this once device shutdown ordering is clearly // defined. // // exit() is trivially safe. // ! Never returns libc::exit(0); } p => p, } }; parent_sock .set_write_timeout(Some(Duration::from_millis(SOCKET_TIMEOUT_MS))) .map_err(Error::Io)?; parent_sock .set_read_timeout(Some(Duration::from_millis(SOCKET_TIMEOUT_MS))) .map_err(Error::Io)?; Ok(ProxyDevice { sock: MsgSocket::<Command, CommandResult>::new(parent_sock), pid, debug_label, }) } pub fn pid(&self) -> pid_t { self.pid } /// Send a command that does not expect a response from the child device process. fn send_no_result(&self, cmd: &Command) { let res = self.sock.send(cmd); if let Err(e) = res { error!( "failed write to child device process {}: {}", self.debug_label, e, ); } } /// Send a command and read its response from the child device process. fn sync_send(&self, cmd: &Command) -> Option<CommandResult> { self.send_no_result(cmd); match self.sock.recv() { Err(e) => { error!( "failed to read result of {:?} from child device process {}: {}", cmd, self.debug_label, e, ); None } Ok(r) => Some(r), } } } impl BusDevice for ProxyDevice { fn debug_label(&self) -> String { self.debug_label.clone() } fn config_register_write(&mut self, reg_idx: usize, offset: u64, data: &[u8]) { let len = data.len() as u32; let mut buffer = [0u8; 4]; buffer[0..data.len()].clone_from_slice(data); let reg_idx = reg_idx as u32; let offset = offset as u32; self.send_no_result(&Command::WriteConfig { reg_idx, offset, len, data: buffer, }); } fn config_register_read(&self, reg_idx: usize) -> u32 { let res = self.sync_send(&Command::ReadConfig(reg_idx as u32)); if let Some(CommandResult::ReadConfigResult(val)) = res { val } else { 0 } } fn read(&mut self, info: BusAccessInfo, data: &mut [u8]) { let len = data.len() as u32; if let Some(CommandResult::ReadResult(buffer)) = self.sync_send(&Command::Read { len, info }) { let len = data.len(); data.clone_from_slice(&buffer[0..len]); } } fn write(&mut self, info: BusAccessInfo, data: &[u8]) { let mut buffer = [0u8; 8]; let len = data.len() as u32; buffer[0..data.len()].clone_from_slice(data); self.send_no_result(&Command::Write { len, info, data: buffer, }); } } impl Drop for ProxyDevice { fn drop(&mut self) { self.sync_send(&Command::Shutdown); } } /// Note: These tests must be run with --test-threads=1 to allow minijail to fork /// the process. #[cfg(test)] mod tests { use super::*; /// A simple test echo device that outputs the same u8 that was written to it. struct EchoDevice { data: u8, config: u8, } impl EchoDevice { fn new() -> EchoDevice { EchoDevice { data: 0, config: 0 } } } impl BusDevice for EchoDevice { fn debug_label(&self) -> String {
self.data = data[0]; } fn read(&mut self, _info: BusAccessInfo, data: &mut [u8]) { assert!(data.len() == 1); data[0] = self.data; } fn config_register_write(&mut self, _reg_idx: usize, _offset: u64, data: &[u8]) { assert!(data.len() == 1); self.config = data[0]; } fn
"EchoDevice".to_owned() } fn write(&mut self, _info: BusAccessInfo, data: &[u8]) { assert!(data.len() == 1);
random_line_split
client.rs
Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. use reqwest::StatusCode; use serde::Deserialize; use serde::Serialize; use std::cmp::Ordering; use std::sync::RwLock; #[derive(Debug, thiserror::Error)] pub enum ClientError { #[error("request: {0}")] ReqwestError(reqwest::Error), #[error("json: {0}")] JsonError(serde_json::error::Error), #[error("failed to parse version: {0}")] VersionParseError(String), #[error("{0}")] StringError(String), } impl From<reqwest::Error> for ClientError { fn from(err: reqwest::Error) -> Self { ClientError::ReqwestError(err) } } impl From<serde_json::error::Error> for ClientError { fn from(err: serde_json::error::Error) -> Self { ClientError::JsonError(err) } } #[derive(Debug, Default)] pub struct Client { url: String, disable_certificate_validation: bool, username: Option<String>, password: Option<String>, pub version: RwLock<Option<Version>>, } impl Clone for Client { fn clone(&self) -> Self { let version = self.version.read().unwrap(); Self { url: self.url.clone(), disable_certificate_validation: self.disable_certificate_validation, username: self.username.clone(), password: self.password.clone(), version: RwLock::new(version.clone()), } } } impl Client { pub fn new(url: &str) -> Self { Self { url: url.to_string(), ..Default::default() } } pub fn get_http_client(&self) -> Result<reqwest::Client, reqwest::Error> { let mut builder = reqwest::Client::builder(); if self.disable_certificate_validation { builder = builder.danger_accept_invalid_certs(true); } builder.build() } pub fn get(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> { let url = format!("{}/{}", self.url, path); let request = self .get_http_client()? .get(&url) .header("Content-Type", "application/json"); let request = if let Some(username) = &self.username { request.basic_auth(username, self.password.clone()) } else { request }; Ok(request) } pub fn post(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> { let url = format!("{}/{}", self.url, path); let request = self .get_http_client()? .post(&url) .header("Content-Type", "application/json"); let request = if let Some(username) = &self.username { request.basic_auth(username, self.password.clone()) } else { request }; Ok(request) } pub fn put(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> { let url = format!("{}/{}", self.url, path); let request = self .get_http_client()? .put(&url) .header("Content-Type", "application/json"); let request = if let Some(username) = &self.username { request.basic_auth(username, self.password.clone()) } else { request }; Ok(request) } #[inline(always)] pub async fn get_version(&self) -> Result<Version, ClientError> { if let Ok(version) = self.version.read() { if let Some(version) = &*version { return Ok(version.clone()); } } let r = self.get("")?.send().await?; let status_code = r.status(); if status_code != StatusCode::OK { let body = r.text().await?; let err = format!("{} -- {}", status_code.as_u16(), body.trim()); return Err(ClientError::StringError(err)); } let body = r.text().await?; let response: super::ElasticResponse = serde_json::from_str(&body)?; if let Some(error) = response.error { return Err(ClientError::StringError(error.reason)); } if response.version.is_none() { return Err(ClientError::StringError( "request for version did not return a version".to_string(), )); } let version = Version::parse(&response.version.unwrap().number)?; let mut locked = self.version.write().unwrap(); *locked = Some(version.clone()); Ok(version) } pub async fn put_template(&self, name: &str, template: String) -> Result<(), ClientError> { let path = format!("_template/{}", name); let response = self.put(&path)?.body(template).send().await?; if response.status().as_u16() == 200 { return Ok(()); } let body = response.text().await?; return Err(ClientError::StringError(body)); } pub async fn get_template( &self, name: &str, ) -> Result<Option<serde_json::Value>, Box<dyn std::error::Error>> { let path = format!("_template/{}", name); let response = self.get(&path)?.send().await?; if response.status() == reqwest::StatusCode::OK { let template: serde_json::Value = response.json().await?; return Ok(Some(template)); } else if response.status() == reqwest::StatusCode::NOT_FOUND { return Ok(None); } return Err(format!("Failed to get template: {}", response.status()).into()); } } #[derive(Debug, Clone, Eq)] pub struct Version { pub version: String, pub major: u64, pub minor: u64, pub patch: u64, } impl Version { pub fn parse(s: &str) -> Result<Version, ClientError> { let mut major = 0; let mut minor = 0; let mut patch = 0; for (i, part) in s.split('.').enumerate() { if i == 0 { major = part .parse::<u64>() .map_err(|_| ClientError::VersionParseError(s.to_string()))?; } else if i == 1 { minor = part .parse::<u64>() .map_err(|_| ClientError::VersionParseError(s.to_string()))?; } else if i == 2 {
.map_err(|_| ClientError::VersionParseError(s.to_string()))?; } } let version = Version { version: s.to_string(), major, minor, patch, }; Ok(version) } pub fn as_u64(&self) -> u64 { (self.major * 1_000_000_000) + (self.minor * 1_000_000) + (self.patch * 1_000) } } impl Ord for Version { fn cmp(&self, other: &Self) -> Ordering { self.as_u64().cmp(&other.as_u64()) } } impl PartialOrd for Version { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl PartialEq for Version { fn eq(&self, other: &Self) -> bool { self.as_u64() == other.as_u64() } } #[derive(Default, Debug)] pub struct ClientBuilder { url: String, disable_certificate_validation: bool, username: Option<String>, password: Option<String>, } impl ClientBuilder { pub fn new(url: &str) -> ClientBuilder { ClientBuilder { url: url.to_string(), ..ClientBuilder::default() } } pub fn disable_certificate_validation(&mut self, yes: bool) -> &Self { self.disable_certificate_validation = yes; self } pub fn with_username(&mut self, username: &str) -> &Self { self.username = Some(username.to_string()); self } pub fn with_password(&mut self, password: &str) -> &Self { self.password = Some(password.to_string()); self } pub fn build(&self) -> Client { Client { url: self.url.clone(), disable_certificate_validation: self.disable_certificate_validation, username: self.username.clone(), password: self.password.clone(), version: RwLock::new(None), } } } #[derive(Deserialize, Serialize, Debug)] pub struct BulkResponse { pub errors: Option<bool>, pub items: Option<Vec<serde_json::Value>>, pub error: Option<serde_json::Value>, #[serde(flatten)] pub other: std::collections::HashMap<String, serde_json::Value>, } impl BulkResponse { pub fn
patch = part .parse::<u64>()
random_line_split
client.rs
Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. use reqwest::StatusCode; use serde::Deserialize; use serde::Serialize; use std::cmp::Ordering; use std::sync::RwLock; #[derive(Debug, thiserror::Error)] pub enum ClientError { #[error("request: {0}")] ReqwestError(reqwest::Error), #[error("json: {0}")] JsonError(serde_json::error::Error), #[error("failed to parse version: {0}")] VersionParseError(String), #[error("{0}")] StringError(String), } impl From<reqwest::Error> for ClientError { fn from(err: reqwest::Error) -> Self { ClientError::ReqwestError(err) } } impl From<serde_json::error::Error> for ClientError { fn from(err: serde_json::error::Error) -> Self { ClientError::JsonError(err) } } #[derive(Debug, Default)] pub struct Client { url: String, disable_certificate_validation: bool, username: Option<String>, password: Option<String>, pub version: RwLock<Option<Version>>, } impl Clone for Client { fn clone(&self) -> Self { let version = self.version.read().unwrap(); Self { url: self.url.clone(), disable_certificate_validation: self.disable_certificate_validation, username: self.username.clone(), password: self.password.clone(), version: RwLock::new(version.clone()), } } } impl Client { pub fn new(url: &str) -> Self { Self { url: url.to_string(), ..Default::default() } } pub fn get_http_client(&self) -> Result<reqwest::Client, reqwest::Error> { let mut builder = reqwest::Client::builder(); if self.disable_certificate_validation { builder = builder.danger_accept_invalid_certs(true); } builder.build() } pub fn get(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> { let url = format!("{}/{}", self.url, path); let request = self .get_http_client()? .get(&url) .header("Content-Type", "application/json"); let request = if let Some(username) = &self.username { request.basic_auth(username, self.password.clone()) } else { request }; Ok(request) } pub fn post(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> { let url = format!("{}/{}", self.url, path); let request = self .get_http_client()? .post(&url) .header("Content-Type", "application/json"); let request = if let Some(username) = &self.username { request.basic_auth(username, self.password.clone()) } else { request }; Ok(request) } pub fn put(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> { let url = format!("{}/{}", self.url, path); let request = self .get_http_client()? .put(&url) .header("Content-Type", "application/json"); let request = if let Some(username) = &self.username { request.basic_auth(username, self.password.clone()) } else { request }; Ok(request) } #[inline(always)] pub async fn get_version(&self) -> Result<Version, ClientError> { if let Ok(version) = self.version.read() { if let Some(version) = &*version { return Ok(version.clone()); } } let r = self.get("")?.send().await?; let status_code = r.status(); if status_code != StatusCode::OK { let body = r.text().await?; let err = format!("{} -- {}", status_code.as_u16(), body.trim()); return Err(ClientError::StringError(err)); } let body = r.text().await?; let response: super::ElasticResponse = serde_json::from_str(&body)?; if let Some(error) = response.error { return Err(ClientError::StringError(error.reason)); } if response.version.is_none() { return Err(ClientError::StringError( "request for version did not return a version".to_string(), )); } let version = Version::parse(&response.version.unwrap().number)?; let mut locked = self.version.write().unwrap(); *locked = Some(version.clone()); Ok(version) } pub async fn put_template(&self, name: &str, template: String) -> Result<(), ClientError> { let path = format!("_template/{}", name); let response = self.put(&path)?.body(template).send().await?; if response.status().as_u16() == 200 { return Ok(()); } let body = response.text().await?; return Err(ClientError::StringError(body)); } pub async fn get_template( &self, name: &str, ) -> Result<Option<serde_json::Value>, Box<dyn std::error::Error>> { let path = format!("_template/{}", name); let response = self.get(&path)?.send().await?; if response.status() == reqwest::StatusCode::OK { let template: serde_json::Value = response.json().await?; return Ok(Some(template)); } else if response.status() == reqwest::StatusCode::NOT_FOUND { return Ok(None); } return Err(format!("Failed to get template: {}", response.status()).into()); } } #[derive(Debug, Clone, Eq)] pub struct Version { pub version: String, pub major: u64, pub minor: u64, pub patch: u64, } impl Version { pub fn parse(s: &str) -> Result<Version, ClientError> { let mut major = 0; let mut minor = 0; let mut patch = 0; for (i, part) in s.split('.').enumerate() { if i == 0 { major = part .parse::<u64>() .map_err(|_| ClientError::VersionParseError(s.to_string()))?; } else if i == 1 { minor = part .parse::<u64>() .map_err(|_| ClientError::VersionParseError(s.to_string()))?; } else if i == 2 { patch = part .parse::<u64>() .map_err(|_| ClientError::VersionParseError(s.to_string()))?; } } let version = Version { version: s.to_string(), major, minor, patch, }; Ok(version) } pub fn as_u64(&self) -> u64 { (self.major * 1_000_000_000) + (self.minor * 1_000_000) + (self.patch * 1_000) } } impl Ord for Version { fn cmp(&self, other: &Self) -> Ordering { self.as_u64().cmp(&other.as_u64()) } } impl PartialOrd for Version { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl PartialEq for Version { fn eq(&self, other: &Self) -> bool { self.as_u64() == other.as_u64() } } #[derive(Default, Debug)] pub struct ClientBuilder { url: String, disable_certificate_validation: bool, username: Option<String>, password: Option<String>, } impl ClientBuilder { pub fn new(url: &str) -> ClientBuilder { ClientBuilder { url: url.to_string(), ..ClientBuilder::default() } } pub fn
(&mut self, yes: bool) -> &Self { self.disable_certificate_validation = yes; self } pub fn with_username(&mut self, username: &str) -> &Self { self.username = Some(username.to_string()); self } pub fn with_password(&mut self, password: &str) -> &Self { self.password = Some(password.to_string()); self } pub fn build(&self) -> Client { Client { url: self.url.clone(), disable_certificate_validation: self.disable_certificate_validation, username: self.username.clone(), password: self.password.clone(), version: RwLock::new(None), } } } #[derive(Deserialize, Serialize, Debug)] pub struct BulkResponse { pub errors: Option<bool>, pub items: Option<Vec<serde_json::Value>>, pub error: Option<serde_json::Value>, #[serde(flatten)] pub other: std::collections::HashMap<String, serde_json::Value>, } impl BulkResponse { pub
disable_certificate_validation
identifier_name
client.rs
#[error("json: {0}")] JsonError(serde_json::error::Error), #[error("failed to parse version: {0}")] VersionParseError(String), #[error("{0}")] StringError(String), } impl From<reqwest::Error> for ClientError { fn from(err: reqwest::Error) -> Self { ClientError::ReqwestError(err) } } impl From<serde_json::error::Error> for ClientError { fn from(err: serde_json::error::Error) -> Self { ClientError::JsonError(err) } } #[derive(Debug, Default)] pub struct Client { url: String, disable_certificate_validation: bool, username: Option<String>, password: Option<String>, pub version: RwLock<Option<Version>>, } impl Clone for Client { fn clone(&self) -> Self { let version = self.version.read().unwrap(); Self { url: self.url.clone(), disable_certificate_validation: self.disable_certificate_validation, username: self.username.clone(), password: self.password.clone(), version: RwLock::new(version.clone()), } } } impl Client { pub fn new(url: &str) -> Self { Self { url: url.to_string(), ..Default::default() } } pub fn get_http_client(&self) -> Result<reqwest::Client, reqwest::Error> { let mut builder = reqwest::Client::builder(); if self.disable_certificate_validation { builder = builder.danger_accept_invalid_certs(true); } builder.build() } pub fn get(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> { let url = format!("{}/{}", self.url, path); let request = self .get_http_client()? .get(&url) .header("Content-Type", "application/json"); let request = if let Some(username) = &self.username { request.basic_auth(username, self.password.clone()) } else { request }; Ok(request) } pub fn post(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> { let url = format!("{}/{}", self.url, path); let request = self .get_http_client()? .post(&url) .header("Content-Type", "application/json"); let request = if let Some(username) = &self.username { request.basic_auth(username, self.password.clone()) } else { request }; Ok(request) } pub fn put(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> { let url = format!("{}/{}", self.url, path); let request = self .get_http_client()? .put(&url) .header("Content-Type", "application/json"); let request = if let Some(username) = &self.username { request.basic_auth(username, self.password.clone()) } else { request }; Ok(request) } #[inline(always)] pub async fn get_version(&self) -> Result<Version, ClientError> { if let Ok(version) = self.version.read() { if let Some(version) = &*version { return Ok(version.clone()); } } let r = self.get("")?.send().await?; let status_code = r.status(); if status_code != StatusCode::OK { let body = r.text().await?; let err = format!("{} -- {}", status_code.as_u16(), body.trim()); return Err(ClientError::StringError(err)); } let body = r.text().await?; let response: super::ElasticResponse = serde_json::from_str(&body)?; if let Some(error) = response.error { return Err(ClientError::StringError(error.reason)); } if response.version.is_none() { return Err(ClientError::StringError( "request for version did not return a version".to_string(), )); } let version = Version::parse(&response.version.unwrap().number)?; let mut locked = self.version.write().unwrap(); *locked = Some(version.clone()); Ok(version) } pub async fn put_template(&self, name: &str, template: String) -> Result<(), ClientError> { let path = format!("_template/{}", name); let response = self.put(&path)?.body(template).send().await?; if response.status().as_u16() == 200 { return Ok(()); } let body = response.text().await?; return Err(ClientError::StringError(body)); } pub async fn get_template( &self, name: &str, ) -> Result<Option<serde_json::Value>, Box<dyn std::error::Error>> { let path = format!("_template/{}", name); let response = self.get(&path)?.send().await?; if response.status() == reqwest::StatusCode::OK { let template: serde_json::Value = response.json().await?; return Ok(Some(template)); } else if response.status() == reqwest::StatusCode::NOT_FOUND { return Ok(None); } return Err(format!("Failed to get template: {}", response.status()).into()); } } #[derive(Debug, Clone, Eq)] pub struct Version { pub version: String, pub major: u64, pub minor: u64, pub patch: u64, } impl Version { pub fn parse(s: &str) -> Result<Version, ClientError> { let mut major = 0; let mut minor = 0; let mut patch = 0; for (i, part) in s.split('.').enumerate() { if i == 0 { major = part .parse::<u64>() .map_err(|_| ClientError::VersionParseError(s.to_string()))?; } else if i == 1 { minor = part .parse::<u64>() .map_err(|_| ClientError::VersionParseError(s.to_string()))?; } else if i == 2 { patch = part .parse::<u64>() .map_err(|_| ClientError::VersionParseError(s.to_string()))?; } } let version = Version { version: s.to_string(), major, minor, patch, }; Ok(version) } pub fn as_u64(&self) -> u64 { (self.major * 1_000_000_000) + (self.minor * 1_000_000) + (self.patch * 1_000) } } impl Ord for Version { fn cmp(&self, other: &Self) -> Ordering { self.as_u64().cmp(&other.as_u64()) } } impl PartialOrd for Version { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl PartialEq for Version { fn eq(&self, other: &Self) -> bool { self.as_u64() == other.as_u64() } } #[derive(Default, Debug)] pub struct ClientBuilder { url: String, disable_certificate_validation: bool, username: Option<String>, password: Option<String>, } impl ClientBuilder { pub fn new(url: &str) -> ClientBuilder { ClientBuilder { url: url.to_string(), ..ClientBuilder::default() } } pub fn disable_certificate_validation(&mut self, yes: bool) -> &Self { self.disable_certificate_validation = yes; self } pub fn with_username(&mut self, username: &str) -> &Self { self.username = Some(username.to_string()); self } pub fn with_password(&mut self, password: &str) -> &Self { self.password = Some(password.to_string()); self } pub fn build(&self) -> Client { Client { url: self.url.clone(), disable_certificate_validation: self.disable_certificate_validation, username: self.username.clone(), password: self.password.clone(), version: RwLock::new(None), } } } #[derive(Deserialize, Serialize, Debug)] pub struct BulkResponse { pub errors: Option<bool>, pub items: Option<Vec<serde_json::Value>>, pub error: Option<serde_json::Value>, #[serde(flatten)] pub other: std::collections::HashMap<String, serde_json::Value>, } impl BulkResponse { pub fn is_error(&self) -> bool { self.error.is_some() } pub fn has_error(&self) -> bool { if let Some(errors) = self.errors { return errors; } if self.error.is_some() { return true; } return false; } pub fn first_error(&self) -> Option<String>
{ if !self.has_error() { return None; } if let Some(error) = &self.error { return Some(error.to_string()); } if let Some(items) = &self.items { for item in items { if let serde_json::Value::String(err) = &item["index"]["error"]["reason"] { return Some(err.to_string()); } } } None }
identifier_body
client.rs
. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. use reqwest::StatusCode; use serde::Deserialize; use serde::Serialize; use std::cmp::Ordering; use std::sync::RwLock; #[derive(Debug, thiserror::Error)] pub enum ClientError { #[error("request: {0}")] ReqwestError(reqwest::Error), #[error("json: {0}")] JsonError(serde_json::error::Error), #[error("failed to parse version: {0}")] VersionParseError(String), #[error("{0}")] StringError(String), } impl From<reqwest::Error> for ClientError { fn from(err: reqwest::Error) -> Self { ClientError::ReqwestError(err) } } impl From<serde_json::error::Error> for ClientError { fn from(err: serde_json::error::Error) -> Self { ClientError::JsonError(err) } } #[derive(Debug, Default)] pub struct Client { url: String, disable_certificate_validation: bool, username: Option<String>, password: Option<String>, pub version: RwLock<Option<Version>>, } impl Clone for Client { fn clone(&self) -> Self { let version = self.version.read().unwrap(); Self { url: self.url.clone(), disable_certificate_validation: self.disable_certificate_validation, username: self.username.clone(), password: self.password.clone(), version: RwLock::new(version.clone()), } } } impl Client { pub fn new(url: &str) -> Self { Self { url: url.to_string(), ..Default::default() } } pub fn get_http_client(&self) -> Result<reqwest::Client, reqwest::Error> { let mut builder = reqwest::Client::builder(); if self.disable_certificate_validation { builder = builder.danger_accept_invalid_certs(true); } builder.build() } pub fn get(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> { let url = format!("{}/{}", self.url, path); let request = self .get_http_client()? .get(&url) .header("Content-Type", "application/json"); let request = if let Some(username) = &self.username { request.basic_auth(username, self.password.clone()) } else
; Ok(request) } pub fn post(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> { let url = format!("{}/{}", self.url, path); let request = self .get_http_client()? .post(&url) .header("Content-Type", "application/json"); let request = if let Some(username) = &self.username { request.basic_auth(username, self.password.clone()) } else { request }; Ok(request) } pub fn put(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> { let url = format!("{}/{}", self.url, path); let request = self .get_http_client()? .put(&url) .header("Content-Type", "application/json"); let request = if let Some(username) = &self.username { request.basic_auth(username, self.password.clone()) } else { request }; Ok(request) } #[inline(always)] pub async fn get_version(&self) -> Result<Version, ClientError> { if let Ok(version) = self.version.read() { if let Some(version) = &*version { return Ok(version.clone()); } } let r = self.get("")?.send().await?; let status_code = r.status(); if status_code != StatusCode::OK { let body = r.text().await?; let err = format!("{} -- {}", status_code.as_u16(), body.trim()); return Err(ClientError::StringError(err)); } let body = r.text().await?; let response: super::ElasticResponse = serde_json::from_str(&body)?; if let Some(error) = response.error { return Err(ClientError::StringError(error.reason)); } if response.version.is_none() { return Err(ClientError::StringError( "request for version did not return a version".to_string(), )); } let version = Version::parse(&response.version.unwrap().number)?; let mut locked = self.version.write().unwrap(); *locked = Some(version.clone()); Ok(version) } pub async fn put_template(&self, name: &str, template: String) -> Result<(), ClientError> { let path = format!("_template/{}", name); let response = self.put(&path)?.body(template).send().await?; if response.status().as_u16() == 200 { return Ok(()); } let body = response.text().await?; return Err(ClientError::StringError(body)); } pub async fn get_template( &self, name: &str, ) -> Result<Option<serde_json::Value>, Box<dyn std::error::Error>> { let path = format!("_template/{}", name); let response = self.get(&path)?.send().await?; if response.status() == reqwest::StatusCode::OK { let template: serde_json::Value = response.json().await?; return Ok(Some(template)); } else if response.status() == reqwest::StatusCode::NOT_FOUND { return Ok(None); } return Err(format!("Failed to get template: {}", response.status()).into()); } } #[derive(Debug, Clone, Eq)] pub struct Version { pub version: String, pub major: u64, pub minor: u64, pub patch: u64, } impl Version { pub fn parse(s: &str) -> Result<Version, ClientError> { let mut major = 0; let mut minor = 0; let mut patch = 0; for (i, part) in s.split('.').enumerate() { if i == 0 { major = part .parse::<u64>() .map_err(|_| ClientError::VersionParseError(s.to_string()))?; } else if i == 1 { minor = part .parse::<u64>() .map_err(|_| ClientError::VersionParseError(s.to_string()))?; } else if i == 2 { patch = part .parse::<u64>() .map_err(|_| ClientError::VersionParseError(s.to_string()))?; } } let version = Version { version: s.to_string(), major, minor, patch, }; Ok(version) } pub fn as_u64(&self) -> u64 { (self.major * 1_000_000_000) + (self.minor * 1_000_000) + (self.patch * 1_000) } } impl Ord for Version { fn cmp(&self, other: &Self) -> Ordering { self.as_u64().cmp(&other.as_u64()) } } impl PartialOrd for Version { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl PartialEq for Version { fn eq(&self, other: &Self) -> bool { self.as_u64() == other.as_u64() } } #[derive(Default, Debug)] pub struct ClientBuilder { url: String, disable_certificate_validation: bool, username: Option<String>, password: Option<String>, } impl ClientBuilder { pub fn new(url: &str) -> ClientBuilder { ClientBuilder { url: url.to_string(), ..ClientBuilder::default() } } pub fn disable_certificate_validation(&mut self, yes: bool) -> &Self { self.disable_certificate_validation = yes; self } pub fn with_username(&mut self, username: &str) -> &Self { self.username = Some(username.to_string()); self } pub fn with_password(&mut self, password: &str) -> &Self { self.password = Some(password.to_string()); self } pub fn build(&self) -> Client { Client { url: self.url.clone(), disable_certificate_validation: self.disable_certificate_validation, username: self.username.clone(), password: self.password.clone(), version: RwLock::new(None), } } } #[derive(Deserialize, Serialize, Debug)] pub struct BulkResponse { pub errors: Option<bool>, pub items: Option<Vec<serde_json::Value>>, pub error: Option<serde_json::Value>, #[serde(flatten)] pub other: std::collections::HashMap<String, serde_json::Value>, } impl BulkResponse { pub
{ request }
conditional_block
interactive.rs
the selection process struct State<'s, 't> where 't: 's, { /// Which suggestion is operated upon. pub suggestion: &'s Suggestion<'t>, /// The content the user provided for the suggestion, if any. pub custom_replacement: String, /// Which index to show as highlighted. pub pick_idx: usize, /// Total number of pickable slots. pub n_items: usize, } impl<'s, 't> From<&'s Suggestion<'t>> for State<'s, 't> { fn from(suggestion: &'s Suggestion<'t>) -> Self { Self { suggestion, custom_replacement: String::new(), pick_idx: 0usize, // all items provided by the checkers plus the user provided n_items: suggestion.replacements.len() + 1, } } } impl<'s, 't> State<'s, 't> where 't: 's, { pub fn select_next(&mut self) { self.pick_idx = (self.pick_idx + 1).rem_euclid(self.n_items); } pub fn select_previous(&mut self) { self.pick_idx = (self.pick_idx + self.n_items - 1).rem_euclid(self.n_items); } pub fn select_custom(&mut self) { self.pick_idx = self.n_items - 1; } /// the last one is user input pub fn is_custom_entry(&self) -> bool
pub fn to_bandaid(&self) -> BandAid { if self.is_custom_entry() { BandAid::from(( self.custom_replacement.clone(), self.suggestion.span.clone(), )) } else { BandAid::try_from((self.suggestion, self.pick_idx)) .expect("Was constructed around this suggestion.") } } } /// The selection of used suggestion replacements #[derive(Debug, Clone, Default)] pub struct UserPicked { pub bandaids: indexmap::IndexMap<PathBuf, Vec<BandAid>>, } impl UserPicked { /// Count the number of suggestions accress file in total pub fn count(&self) -> usize { self.bandaids.iter().map(|(_path, vec)| vec.len()).sum() } /// Apply a single bandaid. fn add_bandaid<'u>(&mut self, path: &Path, fix: BandAid) { self.bandaids .entry(path.to_owned()) .or_insert_with(|| Vec::with_capacity(10)) .push(fix); } /// Apply multiple bandaids. #[allow(unused)] fn add_bandaids<I>(&mut self, path: &Path, fixes: I) where I: IntoIterator<Item = BandAid>, { let iter = fixes.into_iter(); self.bandaids .entry(path.to_owned()) .or_insert_with(|| Vec::with_capacity(iter.size_hint().0)) .extend(iter); } /// Provide a replacement that was not provided by the backend fn custom_replacement(&self, state: &mut State, event: KeyEvent) -> Result<Pick> { let KeyEvent { code, modifiers } = event; match code { KeyCode::Up => state.select_next(), KeyCode::Down => state.select_previous(), KeyCode::Enter => { let bandaid = BandAid::new(&state.custom_replacement, &state.suggestion.span); return Ok(Pick::Replacement(bandaid)); } KeyCode::Esc => return Ok(Pick::Quit), KeyCode::Char('c') if modifiers == KeyModifiers::CONTROL => return Ok(Pick::Quit), KeyCode::Char(c) => state.custom_replacement.push(c), // @todo handle cursors and insert / delete mode _ => {} } Ok(Pick::Nop) } /// only print the list of replacements to the user // initial thougth was to show a horizontal list of replacements, navigate left/ right // by using the arrow keys // .. suggestion0 [suggestion1] suggestion2 suggestion3 .. // arrow left // .. suggestion1 [suggestion2] suggestion3 suggestion4 .. // but now it's only a very simple list for now fn print_replacements_list(&self, state: &State) -> Result<()> { let mut stdout = stdout(); let tick = ContentStyle::new() .foreground(Color::Green) .attribute(Attribute::Bold); let highlight = ContentStyle::new() .background(Color::Black) .foreground(Color::Green) .attribute(Attribute::Bold); let others = ContentStyle::new() .background(Color::Black) .foreground(Color::Blue); let custom = ContentStyle::new() .background(Color::Black) .foreground(Color::Yellow); // render all replacements in a vertical list stdout.queue(cursor::SavePosition).unwrap(); let _ = stdout.flush(); let active_idx = state.pick_idx; let custom_content = if state.custom_replacement.is_empty() { "..." } else { state.custom_replacement.as_str() }; if state.n_items != active_idx + 1 { stdout .queue(cursor::MoveUp(1)) .unwrap() .queue(terminal::Clear(terminal::ClearType::CurrentLine)) .unwrap() .queue(cursor::MoveToColumn(4)) .unwrap() .queue(PrintStyledContent(StyledContent::new( custom, custom_content, ))) .unwrap(); } else { stdout .queue(cursor::MoveUp(1)) .unwrap() .queue(terminal::Clear(terminal::ClearType::CurrentLine)) .unwrap() .queue(cursor::MoveToColumn(2)) .unwrap() .queue(PrintStyledContent(StyledContent::new(tick.clone(), '»'))) .unwrap() .queue(cursor::MoveToColumn(4)) .unwrap() .queue(PrintStyledContent(StyledContent::new( custom, custom_content, ))) .unwrap(); } let _ = stdout.flush(); state .suggestion .replacements .iter() .enumerate() .for_each(|(idx, replacement)| { let idx = idx as u16; if idx != active_idx as u16 { // @todo figure out a way to deal with those errors better stdout // .queue(cursor::MoveTo(start.0 + idx, start.1)).unwrap() .queue(cursor::MoveUp(1)) .unwrap() .queue(terminal::Clear(terminal::ClearType::CurrentLine)) .unwrap() .queue(cursor::MoveToColumn(4)) .unwrap() .queue(PrintStyledContent(StyledContent::new( others.clone(), replacement, ))) .unwrap(); } else { stdout // .queue(cursor::MoveTo(start.0 + idx, start.1)).unwrap() .queue(cursor::MoveUp(1)) .unwrap() .queue(terminal::Clear(terminal::ClearType::CurrentLine)) .unwrap() .queue(cursor::MoveToColumn(2)) .unwrap() .queue(PrintStyledContent(StyledContent::new(tick.clone(), '»'))) .unwrap() .queue(cursor::MoveToColumn(4)) .unwrap() .queue(PrintStyledContent(StyledContent::new( highlight.clone(), replacement, ))) .unwrap(); } }); stdout.queue(cursor::RestorePosition).unwrap(); let _ = stdout.flush(); Ok(()) } /// Wait for user input and process it into a `Pick` enum fn user_input(&self, state: &mut State, running_idx: (usize, usize)) -> Result<Pick> { { let _guard = ScopedRaw::new(); let boring = ContentStyle::new() .foreground(Color::Blue) .attribute(Attribute::Bold); let question = format!( "({nth}/{of_n}) Apply this suggestion [y,n,q,a,d,j,e,?]?", nth = running_idx.0 + 1, of_n = running_idx.1 ); // a new suggestion, so prepare for the number of items that are visible // and also overwrite the last lines of the regular print which would // already contain the suggestions stdout() .queue(cursor::Hide) .unwrap() .queue(cursor::MoveToColumn(0)) .unwrap() .queue(cursor::MoveUp(5)) // erase the 5 last lines of suggestion print .unwrap() .queue(cursor::MoveToColumn(0)) .unwrap() .queue(terminal::Clear(terminal::ClearType::CurrentLine)) .unwrap() .queue(cursor::MoveDown(1)) .unwrap() .queue(terminal::Clear(terminal::ClearType::CurrentLine)) .unwrap() .queue(cursor::MoveToColumn(0)) .unwrap() .queue(PrintStyledContent(StyledContent::new(boring, question))) .unwrap() .queue(cursor::MoveToColumn(0)) .unwrap() .queue(cursor::MoveDown(1)) .unwrap()
{ self.pick_idx + 1 == self.n_items }
identifier_body
interactive.rs
{ Replacement(BandAid), /// Skip this suggestion and move on to the next suggestion. Skip, /// Jump to the previous suggestion. Previous, /// Print the help message and exit. Help, /// Skip the remaining fixes for the current file. SkipFile, /// Stop execution. Quit, /// continue as if whatever returned this was never called. Nop, } /// Statefulness for the selection process struct State<'s, 't> where 't: 's, { /// Which suggestion is operated upon. pub suggestion: &'s Suggestion<'t>, /// The content the user provided for the suggestion, if any. pub custom_replacement: String, /// Which index to show as highlighted. pub pick_idx: usize, /// Total number of pickable slots. pub n_items: usize, } impl<'s, 't> From<&'s Suggestion<'t>> for State<'s, 't> { fn from(suggestion: &'s Suggestion<'t>) -> Self { Self { suggestion, custom_replacement: String::new(), pick_idx: 0usize, // all items provided by the checkers plus the user provided n_items: suggestion.replacements.len() + 1, } } } impl<'s, 't> State<'s, 't> where 't: 's, { pub fn select_next(&mut self) { self.pick_idx = (self.pick_idx + 1).rem_euclid(self.n_items); } pub fn select_previous(&mut self) { self.pick_idx = (self.pick_idx + self.n_items - 1).rem_euclid(self.n_items); } pub fn select_custom(&mut self) { self.pick_idx = self.n_items - 1; } /// the last one is user input pub fn is_custom_entry(&self) -> bool { self.pick_idx + 1 == self.n_items } pub fn to_bandaid(&self) -> BandAid { if self.is_custom_entry() { BandAid::from(( self.custom_replacement.clone(), self.suggestion.span.clone(), )) } else { BandAid::try_from((self.suggestion, self.pick_idx)) .expect("Was constructed around this suggestion.") } } } /// The selection of used suggestion replacements #[derive(Debug, Clone, Default)] pub struct UserPicked { pub bandaids: indexmap::IndexMap<PathBuf, Vec<BandAid>>, } impl UserPicked { /// Count the number of suggestions accress file in total pub fn count(&self) -> usize { self.bandaids.iter().map(|(_path, vec)| vec.len()).sum() } /// Apply a single bandaid. fn add_bandaid<'u>(&mut self, path: &Path, fix: BandAid) { self.bandaids .entry(path.to_owned()) .or_insert_with(|| Vec::with_capacity(10)) .push(fix); } /// Apply multiple bandaids. #[allow(unused)] fn add_bandaids<I>(&mut self, path: &Path, fixes: I) where I: IntoIterator<Item = BandAid>, { let iter = fixes.into_iter(); self.bandaids .entry(path.to_owned()) .or_insert_with(|| Vec::with_capacity(iter.size_hint().0)) .extend(iter); } /// Provide a replacement that was not provided by the backend fn custom_replacement(&self, state: &mut State, event: KeyEvent) -> Result<Pick> { let KeyEvent { code, modifiers } = event; match code { KeyCode::Up => state.select_next(), KeyCode::Down => state.select_previous(), KeyCode::Enter => { let bandaid = BandAid::new(&state.custom_replacement, &state.suggestion.span); return Ok(Pick::Replacement(bandaid)); } KeyCode::Esc => return Ok(Pick::Quit), KeyCode::Char('c') if modifiers == KeyModifiers::CONTROL => return Ok(Pick::Quit), KeyCode::Char(c) => state.custom_replacement.push(c), // @todo handle cursors and insert / delete mode _ => {} } Ok(Pick::Nop) } /// only print the list of replacements to the user // initial thougth was to show a horizontal list of replacements, navigate left/ right // by using the arrow keys // .. suggestion0 [suggestion1] suggestion2 suggestion3 .. // arrow left // .. suggestion1 [suggestion2] suggestion3 suggestion4 .. // but now it's only a very simple list for now fn print_replacements_list(&self, state: &State) -> Result<()> { let mut stdout = stdout(); let tick = ContentStyle::new() .foreground(Color::Green) .attribute(Attribute::Bold); let highlight = ContentStyle::new() .background(Color::Black) .foreground(Color::Green) .attribute(Attribute::Bold); let others = ContentStyle::new() .background(Color::Black) .foreground(Color::Blue); let custom = ContentStyle::new() .background(Color::Black) .foreground(Color::Yellow); // render all replacements in a vertical list stdout.queue(cursor::SavePosition).unwrap(); let _ = stdout.flush(); let active_idx = state.pick_idx; let custom_content = if state.custom_replacement.is_empty() { "..." } else { state.custom_replacement.as_str() }; if state.n_items != active_idx + 1 { stdout .queue(cursor::MoveUp(1)) .unwrap() .queue(terminal::Clear(terminal::ClearType::CurrentLine)) .unwrap() .queue(cursor::MoveToColumn(4)) .unwrap() .queue(PrintStyledContent(StyledContent::new( custom, custom_content, ))) .unwrap(); } else { stdout .queue(cursor::MoveUp(1)) .unwrap() .queue(terminal::Clear(terminal::ClearType::CurrentLine)) .unwrap() .queue(cursor::MoveToColumn(2)) .unwrap() .queue(PrintStyledContent(StyledContent::new(tick.clone(), '»'))) .unwrap() .queue(cursor::MoveToColumn(4)) .unwrap() .queue(PrintStyledContent(StyledContent::new( custom, custom_content, ))) .unwrap(); } let _ = stdout.flush(); state .suggestion .replacements .iter() .enumerate() .for_each(|(idx, replacement)| { let idx = idx as u16; if idx != active_idx as u16 { // @todo figure out a way to deal with those errors better stdout // .queue(cursor::MoveTo(start.0 + idx, start.1)).unwrap() .queue(cursor::MoveUp(1)) .unwrap() .queue(terminal::Clear(terminal::ClearType::CurrentLine)) .unwrap() .queue(cursor::MoveToColumn(4)) .unwrap() .queue(PrintStyledContent(StyledContent::new( others.clone(), replacement, ))) .unwrap(); } else { stdout // .queue(cursor::MoveTo(start.0 + idx, start.1)).unwrap() .queue(cursor::MoveUp(1)) .unwrap() .queue(terminal::Clear(terminal::ClearType::CurrentLine)) .unwrap() .queue(cursor::MoveToColumn(2)) .unwrap() .queue(PrintStyledContent(StyledContent::new(tick.clone(), '»'))) .unwrap() .queue(cursor::MoveToColumn(4)) .unwrap() .queue(PrintStyledContent(StyledContent::new( highlight.clone(), replacement, ))) .unwrap(); } }); stdout.queue(cursor::RestorePosition).unwrap(); let _ = stdout.flush(); Ok(()) } /// Wait for user input and process it into a `Pick` enum fn user_input(&self, state: &mut State, running_idx: (usize, usize)) -> Result<Pick> { { let _guard = ScopedRaw::new(); let boring = ContentStyle::new() .foreground(Color::Blue) .attribute(Attribute::Bold); let question = format!( "({nth}/{of_n}) Apply this suggestion [y,n,q,a,d,j,e,?]?", nth = running_idx.0 + 1, of_n = running_idx.1 ); // a new suggestion, so prepare for the number of items that are visible // and also overwrite the last lines of the regular print which would // already contain the suggestions stdout() .queue(cursor::Hide) .unwrap() .queue(cursor::MoveToColumn(0)) .unwrap() .queue(cursor::MoveUp(5)) // erase the 5 last lines of suggestion print .unwrap() .queue(cursor::MoveToColumn(0)) .unwrap() .queue(terminal::Clear(terminal::ClearType::CurrentLine)) .unwrap() .queue(cursor::MoveDown(1
Pick
identifier_name
interactive.rs
use crossterm; use crossterm::{ cursor, event::{Event, KeyCode, KeyEvent, KeyModifiers}, style::{style, Attribute, Color, ContentStyle, Print, PrintStyledContent, StyledContent}, terminal, QueueableCommand, }; use std::convert::TryFrom; use std::io::{stdin, stdout}; use std::path::Path; const HELP: &'static str = r##"y - apply this suggestion n - do not apply the suggested correction q - quit; do not stage this hunk or any of the remaining ones d - do not apply this suggestion and skip the rest of the file g - select a suggestion to go to j - leave this hunk undecided, see next undecided hunk J - leave this hunk undecided, see next hunk e - manually edit the current hunk ? - print help "##; /// Helper strict to assure we leave the terminals raw mode struct ScopedRaw; impl ScopedRaw { fn new() -> Result<Self> { crossterm::terminal::enable_raw_mode()?; Ok(Self) } } impl Drop for ScopedRaw { fn drop(&mut self) { let _ = crossterm::terminal::disable_raw_mode(); } } /// In which direction we should progress #[derive(Debug, Clone, Copy)] enum Direction { Forward, Backward, } /// The user picked something. This is the pick representation. #[derive(Debug, Clone, PartialEq, Eq)] pub(super) enum Pick { Replacement(BandAid), /// Skip this suggestion and move on to the next suggestion. Skip, /// Jump to the previous suggestion. Previous, /// Print the help message and exit. Help, /// Skip the remaining fixes for the current file. SkipFile, /// Stop execution. Quit, /// continue as if whatever returned this was never called. Nop, } /// Statefulness for the selection process struct State<'s, 't> where 't: 's, { /// Which suggestion is operated upon. pub suggestion: &'s Suggestion<'t>, /// The content the user provided for the suggestion, if any. pub custom_replacement: String, /// Which index to show as highlighted. pub pick_idx: usize, /// Total number of pickable slots. pub n_items: usize, } impl<'s, 't> From<&'s Suggestion<'t>> for State<'s, 't> { fn from(suggestion: &'s Suggestion<'t>) -> Self { Self { suggestion, custom_replacement: String::new(), pick_idx: 0usize, // all items provided by the checkers plus the user provided n_items: suggestion.replacements.len() + 1, } } } impl<'s, 't> State<'s, 't> where 't: 's, { pub fn select_next(&mut self) { self.pick_idx = (self.pick_idx + 1).rem_euclid(self.n_items); } pub fn select_previous(&mut self) { self.pick_idx = (self.pick_idx + self.n_items - 1).rem_euclid(self.n_items); } pub fn select_custom(&mut self) { self.pick_idx = self.n_items - 1; } /// the last one is user input pub fn is_custom_entry(&self) -> bool { self.pick_idx + 1 == self.n_items } pub fn to_bandaid(&self) -> BandAid { if self.is_custom_entry() { BandAid::from(( self.custom_replacement.clone(), self.suggestion.span.clone(), )) } else { BandAid::try_from((self.suggestion, self.pick_idx)) .expect("Was constructed around this suggestion.") } } } /// The selection of used suggestion replacements #[derive(Debug, Clone, Default)] pub struct UserPicked { pub bandaids: indexmap::IndexMap<PathBuf, Vec<BandAid>>, } impl UserPicked { /// Count the number of suggestions accress file in total pub fn count(&self) -> usize { self.bandaids.iter().map(|(_path, vec)| vec.len()).sum() } /// Apply a single bandaid. fn add_bandaid<'u>(&mut self, path: &Path, fix: BandAid) { self.bandaids .entry(path.to_owned()) .or_insert_with(|| Vec::with_capacity(10)) .push(fix); } /// Apply multiple bandaids. #[allow(unused)] fn add_bandaids<I>(&mut self, path: &Path, fixes: I) where I: IntoIterator<Item = BandAid>, { let iter = fixes.into_iter(); self.bandaids .entry(path.to_owned()) .or_insert_with(|| Vec::with_capacity(iter.size_hint().0)) .extend(iter); } /// Provide a replacement that was not provided by the backend fn custom_replacement(&self, state: &mut State, event: KeyEvent) -> Result<Pick> { let KeyEvent { code, modifiers } = event; match code { KeyCode::Up => state.select_next(), KeyCode::Down => state.select_previous(), KeyCode::Enter => { let bandaid = BandAid::new(&state.custom_replacement, &state.suggestion.span); return Ok(Pick::Replacement(bandaid)); } KeyCode::Esc => return Ok(Pick::Quit), KeyCode::Char('c') if modifiers == KeyModifiers::CONTROL => return Ok(Pick::Quit), KeyCode::Char(c) => state.custom_replacement.push(c), // @todo handle cursors and insert / delete mode _ => {} } Ok(Pick::Nop) } /// only print the list of replacements to the user // initial thougth was to show a horizontal list of replacements, navigate left/ right // by using the arrow keys // .. suggestion0 [suggestion1] suggestion2 suggestion3 .. // arrow left // .. suggestion1 [suggestion2] suggestion3 suggestion4 .. // but now it's only a very simple list for now fn print_replacements_list(&self, state: &State) -> Result<()> { let mut stdout = stdout(); let tick = ContentStyle::new() .foreground(Color::Green) .attribute(Attribute::Bold); let highlight = ContentStyle::new() .background(Color::Black) .foreground(Color::Green) .attribute(Attribute::Bold); let others = ContentStyle::new() .background(Color::Black) .foreground(Color::Blue); let custom = ContentStyle::new() .background(Color::Black) .foreground(Color::Yellow); // render all replacements in a vertical list stdout.queue(cursor::SavePosition).unwrap(); let _ = stdout.flush(); let active_idx = state.pick_idx; let custom_content = if state.custom_replacement.is_empty() { "..." } else { state.custom_replacement.as_str() }; if state.n_items != active_idx + 1 { stdout .queue(cursor::MoveUp(1)) .unwrap() .queue(terminal::Clear(terminal::ClearType::CurrentLine)) .unwrap() .queue(cursor::MoveToColumn(4)) .unwrap() .queue(PrintStyledContent(StyledContent::new( custom, custom_content, ))) .unwrap(); } else { stdout .queue(cursor::MoveUp(1)) .unwrap() .queue(terminal::Clear(terminal::ClearType::CurrentLine)) .unwrap() .queue(cursor::MoveToColumn(2)) .unwrap() .queue(PrintStyledContent(StyledContent::new(tick.clone(), '»'))) .unwrap() .queue(cursor::MoveToColumn(4)) .unwrap() .queue(PrintStyledContent(StyledContent::new( custom, custom_content, ))) .unwrap(); } let _ = stdout.flush(); state .suggestion .replacements .iter() .enumerate() .for_each(|(idx, replacement)| { let idx = idx as u16; if idx != active_idx as u16 { // @todo figure out a way to deal with those errors better stdout // .queue(cursor::MoveTo(start.0 + idx, start.1)).unwrap() .queue(cursor::MoveUp(1)) .unwrap() .queue(terminal::Clear(terminal::ClearType::CurrentLine)) .unwrap() .queue(cursor::MoveToColumn(4)) .unwrap() .queue(PrintStyledContent(StyledContent::new( others.clone(), replacement, ))) .unwrap(); } else { stdout // .queue(cursor::MoveTo(start.0 + idx, start.1)).unwrap() .queue(cursor::MoveUp(1)) .unwrap() .queue(terminal::Clear(terminal::ClearType::CurrentLine)) .unwrap() .queue(cursor::MoveToColumn(2)) .unwrap() .queue(PrintStyledContent(StyledContent::new(tick.clone(), '
//! //! The result of that pick is a bandaid. use super::*;
random_line_split
WebGlCanvas.ts
return tex; } private resizeTexture(texture: WebGLTexture, width: number, height: number) { if (this.checkContextLoss()) return; const gl = this.gl; const textureType = this.textureTypes.get(texture); gl.bindTexture(gl.TEXTURE_2D, texture); gl.texImage2D(gl.TEXTURE_2D, 0, textureType, width, height, 0, textureType, gl.UNSIGNED_BYTE, null); this.textureSizes.set(texture, { width, height }); } private createFramebuffer(texture: WebGLTexture) { if (this.checkContextLoss()) return; const gl = this.gl; const fb = gl.createFramebuffer(); gl.bindFramebuffer(gl.FRAMEBUFFER, fb); gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture, 0); this.refs.frameBuffers.push(fb); this.frameBufferTextures.set(fb, texture); return fb; } private useFramebuffer(fb: WebGLFramebuffer, viewX?: number, viewY?: number, viewWidth?: number, viewHeight?: number) { if (this.checkContextLoss()) return; const gl = this.gl; if (fb === null) { gl.bindFramebuffer(gl.FRAMEBUFFER, null); gl.viewport(viewX ?? 0, viewY ?? 0, viewWidth ?? gl.drawingBufferWidth, viewHeight ?? gl.drawingBufferHeight); } else { const tex = this.frameBufferTextures.get(fb); const { width, height } = this.textureSizes.get(tex); gl.bindFramebuffer(gl.FRAMEBUFFER, fb); gl.viewport(viewX ?? 0, viewY ?? 0, viewWidth ?? width, viewHeight ?? height); } } private resizeFramebuffer(fb: WebGLFramebuffer, width: number, height: number) { if (this.checkContextLoss()) return; const gl = this.gl; const texture = this.frameBufferTextures.get(fb); this.resizeTexture(texture, width, height); } /** * Resize the canvas surface * @param width - New canvas width, in CSS pixels * @param height - New canvas height, in CSS pixels * * The ratio between `width` and `height` should be 3:4 for best results */ setCanvasSize(width: number, height: number) { const dpi = this.options.useDpi ? (window.devicePixelRatio || 1) : 1; const internalWidth = width * dpi; const internalHeight = height * dpi; this.width = width; this.height = height; this.canvas.width = internalWidth; this.canvas.height = internalHeight; this.dstWidth = internalWidth; this.dstHeight = internalHeight; this.canvas.style.width = `${ width }px`; this.canvas.style.height = `${ height }px`; this.checkContextLoss(); } /** * Sets the note to use for this player */ setNote(note: FlipnoteParserBase) { if (this.checkContextLoss()) return; const width = note.imageWidth; const height = note.imageHeight; this.note = note; this.srcWidth = width; this.srcHeight = height; this.resizeFramebuffer(this.frameBuffer, width, height); this.resizeTexture(this.layerTexture, width, height); this.layerTexturePixelBuffer = new Uint32Array(width * height); this.layerTexturePixels = new Uint8Array(this.layerTexturePixelBuffer.buffer); // same memory buffer as rgbaData this.frameIndex = undefined; // set canvas alt text this.canvas.title = note.getTitle(); } /** * Clear the canvas * @param color optional RGBA color to use as a background color */ clear(color?: [number, number, number, number]) { if (this.checkContextLoss()) return; const gl = this.gl; const paperColor = color ?? this.note.getFramePalette(this.frameIndex)[0]; const [r, g, b, a] = paperColor; gl.clearColor(r / 255, g / 255, b / 255, a /255); gl.clear(gl.COLOR_BUFFER_BIT); } /** * Draw a frame from the currently loaded Flipnote * @param frameIndex */ drawFrame(frameIndex: number) { if (this.checkContextLoss()) return; const gl = this.gl; const mode = this.stereoscopeMode; const strength = this.stereoscopeStrength; this.frameIndex = frameIndex; if (mode === CanvasStereoscopicMode.None) { this.drawLayers(frameIndex); this.useFramebuffer(null); this.upscale(gl.drawingBufferWidth, gl.drawingBufferHeight); } else if (mode === CanvasStereoscopicMode.Dual) { this.drawLayers(frameIndex, strength, FlipnoteStereoscopicEye.Left); this.useFramebuffer(null, 0, 0, gl.drawingBufferWidth / 2, gl.drawingBufferHeight); this.upscale(gl.drawingBufferWidth / 2, gl.drawingBufferHeight); this.drawLayers(frameIndex, strength, FlipnoteStereoscopicEye.Right); this.useFramebuffer(null, gl.drawingBufferWidth / 2, 0, gl.drawingBufferWidth / 2, gl.drawingBufferHeight); this.upscale(gl.drawingBufferWidth / 2, gl.drawingBufferHeight); } } private upscale(width: number, height: number) { if (this.checkContextLoss()) return; const gl = this.gl; gl.useProgram(this.upscaleProgram.program); setUniforms(this.upscaleProgram, { // u_flipY: true, u_tex: this.frameTexture, u_textureSize: [this.srcWidth, this.srcHeight], u_screenSize: [width, height], }); gl.drawElements(gl.TRIANGLES, this.quadBuffer.numElements, this.quadBuffer.elementType, 0); } requestStereoScopeMode(mode: CanvasStereoscopicMode) { if (this.supportedStereoscopeModes.includes(mode)) this.stereoscopeMode = mode; else this.stereoscopeMode = CanvasStereoscopicMode.None; this.forceUpdate(); } forceUpdate() { if (this.frameIndex !== undefined) this.drawFrame(this.frameIndex); } /** * Returns true if the webGL context has returned an error */ isErrorState() { const gl = this.gl; return gl === null || gl.getError() !== gl.NO_ERROR; } private drawLayers( frameIndex: number, depthStrength = 0, depthEye: FlipnoteStereoscopicEye = FlipnoteStereoscopicEye.Left, shouldClear = true, ) { const gl = this.gl; const note = this.note; const srcWidth = this.srcWidth; const srcHeight = this.srcHeight; const numLayers = note.numLayers; const layerOrder = note.getFrameLayerOrder(frameIndex); const layerDepths = note.getFrameLayerDepths(frameIndex); this.useFramebuffer(this.frameBuffer); if (shouldClear) this.clear(); gl.useProgram(this.layerProgram.program); for (let i = 0; i < numLayers; i++) { const layerIndex = layerOrder[i]; note.getLayerPixelsRgba(frameIndex, layerIndex, this.layerTexturePixelBuffer, this.paletteBuffer); setUniforms(this.layerProgram, { u_flipY: true, u_tex: this.layerTexture, u_textureSize: [srcWidth, srcHeight], u_3d_mode: this.stereoscopeMode, u_3d_eye: depthEye, u_3d_depth: layerDepths[layerIndex], u_3d_strength: depthStrength, }); gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, srcWidth, srcHeight, 0, gl.RGBA, gl.UNSIGNED_BYTE, this.layerTexturePixels); gl.drawElements(gl.TRIANGLES, this.quadBuffer.numElements, this.quadBuffer.elementType, 0); } } /** * Only a certain number of WebGL contexts can be added to a single page before the browser will start culling old contexts. * This method returns true if it has been culled, false if not */ private checkContextLoss() { const isLost = this.isCtxLost || this.isErrorState(); if (isLost) this.handleContextLoss(); return isLost; } private handleContextLoss = (e?: Event) => { this.destroy(); if (e) e.preventDefault(); if (!this.isCtxLost) this.options.onlost(); this.isCtxLost = true; } private handleContextRestored = (e?: Event) => { this.isCtxLost = false; this.init(); this.options.onrestored(); } /** * * @param type image mime type (`image/jpeg`, `image/png`, etc) * @param quality image quality where supported, between 0 and 1 */ getDataUrl(type?: string, quality?: any) { return this.canvas.toDataURL(type, quality); } async
getBlob
identifier_name
WebGlCanvas.ts
private frameBuffer: WebGLFramebuffer; private textureTypes = new Map<WebGLTexture, number>(); private textureSizes = new Map<WebGLTexture, { width: number, height: number }>(); private frameBufferTextures = new Map<WebGLFramebuffer, WebGLTexture>(); private refs: ResourceMap = { programs: [], shaders: [], textures: [], buffers: [], frameBuffers: [] }; private isCtxLost = false; /** * Creates a new WebGlCanvas instance * @param el - Canvas HTML element to use as a rendering surface * @param width - Canvas width in CSS pixels * @param height - Canvas height in CSS pixels * * The ratio between `width` and `height` should be 3:4 for best results */ constructor(parent: Element, width=640, height=480, options: Partial<WebglCanvasOptions> = {}) { assertBrowserEnv(); this.options = { ...WebglCanvas.defaultOptions, ...options }; this.width = width; this.height = height; this.canvas = document.createElement('canvas'); this.canvas.addEventListener('webglcontextlost', this.handleContextLoss, false); this.canvas.addEventListener('webglcontextrestored', this.handleContextRestored, false); this.canvas.className = 'FlipnoteCanvas FlipnoteCanvas--webgl'; this.gl = this.canvas.getContext('webgl', { antialias: false, alpha: true }); if (parent) parent.appendChild(this.canvas); this.init(); } private init() { this.setCanvasSize(this.width, this.height); const gl = this.gl; if (this.checkContextLoss()) return; this.layerProgram = this.createProgram(vertShaderLayer, fragShaderLayer); this.upscaleProgram = this.createProgram(vertShaderUpscale, fragShaderUpscale); this.quadBuffer = this.createScreenQuad(-1, -1, 2, 2, 1, 1); this.setBuffersAndAttribs(this.layerProgram, this.quadBuffer); this.layerTexture = this.createTexture(gl.RGBA, gl.LINEAR, gl.CLAMP_TO_EDGE); this.frameTexture = this.createTexture(gl.RGBA, gl.LINEAR, gl.CLAMP_TO_EDGE); this.frameBuffer = this.createFramebuffer(this.frameTexture); } private createProgram(vertexShaderSource: string, fragmentShaderSource: string) { if (this.checkContextLoss()) return; const gl = this.gl; const vert = this.createShader(gl.VERTEX_SHADER, vertexShaderSource); const frag = this.createShader(gl.FRAGMENT_SHADER, fragmentShaderSource); const program = gl.createProgram(); // set up shaders gl.attachShader(program, vert); gl.attachShader(program, frag); // link program gl.linkProgram(program); if (!gl.getProgramParameter(program, gl.LINK_STATUS)) { const log = gl.getProgramInfoLog(program); gl.deleteProgram(program); throw new Error(log); } const programInfo = createProgramInfoFromProgram(gl, program); this.refs.programs.push(program); return programInfo; } private createShader(type: number, source: string) { if (this.checkContextLoss()) return; const gl = this.gl; const shader = gl.createShader(type); gl.shaderSource(shader, source); gl.compileShader(shader); // test if shader compilation was successful if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) { const log = gl.getShaderInfoLog(shader); gl.deleteShader(shader); throw new Error(log); } this.refs.shaders.push(shader); return shader; } // creating a subdivided quad seems to produce slightly nicer texture filtering private createScreenQuad(x0: number, y0: number, width: number, height: number, xSubdivs: number, ySubdivs: number) { if (this.checkContextLoss()) return; const numVerts = (xSubdivs + 1) * (ySubdivs + 1); const numVertsAcross = xSubdivs + 1; const positions = new Float32Array(numVerts * 2); const texCoords = new Float32Array(numVerts * 2); let positionPtr = 0; let texCoordPtr = 0; for (let y = 0; y <= ySubdivs; y++) { for (let x = 0; x <= xSubdivs; x++) { const u = x / xSubdivs; const v = y / ySubdivs; positions[positionPtr++] = x0 + width * u; positions[positionPtr++] = y0 + height * v; texCoords[texCoordPtr++] = u; texCoords[texCoordPtr++] = v; } } const indices = new Uint16Array(xSubdivs * ySubdivs * 2 * 3); let indicesPtr = 0; for (let y = 0; y < ySubdivs; y++)
const bufferInfo = createBufferInfoFromArrays(this.gl, { position: { numComponents: 2, data: positions }, texcoord: { numComponents: 2, data: texCoords }, indices: indices }); // collect references to buffer objects for (let name in bufferInfo.attribs) this.refs.buffers.push(bufferInfo.attribs[name].buffer); return bufferInfo; } private setBuffersAndAttribs(program: ProgramInfo, buffer: BufferInfo) { if (this.checkContextLoss()) return; const gl = this.gl; setAttributes(program.attribSetters, buffer.attribs); gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, buffer.indices); } private createTexture(type: number, minMag: number, wrap: number, width = 1, height = 1) { if (this.checkContextLoss()) return; const gl = this.gl; const tex = gl.createTexture(); gl.bindTexture(gl.TEXTURE_2D, tex); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, wrap); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, wrap); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, minMag); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, minMag); gl.texImage2D(gl.TEXTURE_2D, 0, type, width, height, 0, type, gl.UNSIGNED_BYTE, null); this.refs.textures.push(tex); this.textureTypes.set(tex, type); this.textureSizes.set(tex, { width, height }); return tex; } private resizeTexture(texture: WebGLTexture, width: number, height: number) { if (this.checkContextLoss()) return; const gl = this.gl; const textureType = this.textureTypes.get(texture); gl.bindTexture(gl.TEXTURE_2D, texture); gl.texImage2D(gl.TEXTURE_2D, 0, textureType, width, height, 0, textureType, gl.UNSIGNED_BYTE, null); this.textureSizes.set(texture, { width, height }); } private createFramebuffer(texture: WebGLTexture) { if (this.checkContextLoss()) return; const gl = this.gl; const fb = gl.createFramebuffer(); gl.bindFramebuffer(gl.FRAMEBUFFER, fb); gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture, 0); this.refs.frameBuffers.push(fb); this.frameBufferTextures.set(fb, texture); return fb; } private useFramebuffer(fb: WebGLFramebuffer, viewX?: number, viewY?: number, viewWidth?: number, viewHeight?: number) { if (this.checkContextLoss()) return; const gl = this.gl; if (fb === null) { gl.bindFramebuffer(gl.FRAMEBUFFER, null); gl.viewport(viewX ?? 0, viewY ?? 0, viewWidth ?? gl.drawingBufferWidth, viewHeight ?? gl.drawingBufferHeight); } else { const tex = this.frameBufferTextures.get(fb); const { width, height } = this.textureSizes.get(tex); gl.bindFramebuffer(gl.FRAMEBUFFER, fb); gl.viewport(viewX ?? 0, viewY ?? 0, viewWidth ?? width, viewHeight ?? height); } } private resizeFramebuffer(fb: WebGLFramebuffer, width: number, height: number) {
{ for (let x = 0; x < xSubdivs; x++) { // triangle 1 indices[indicesPtr++] = (y + 0) * numVertsAcross + x; indices[indicesPtr++] = (y + 1) * numVertsAcross + x; indices[indicesPtr++] = (y + 0) * numVertsAcross + x + 1; // triangle 2 indices[indicesPtr++] = (y + 0) * numVertsAcross + x + 1; indices[indicesPtr++] = (y + 1) * numVertsAcross + x; indices[indicesPtr++] = (y + 1) * numVertsAcross + x + 1; } }
conditional_block
WebGlCanvas.ts
private frameBuffer: WebGLFramebuffer; private textureTypes = new Map<WebGLTexture, number>(); private textureSizes = new Map<WebGLTexture, { width: number, height: number }>(); private frameBufferTextures = new Map<WebGLFramebuffer, WebGLTexture>(); private refs: ResourceMap = { programs: [], shaders: [], textures: [], buffers: [], frameBuffers: [] }; private isCtxLost = false; /** * Creates a new WebGlCanvas instance * @param el - Canvas HTML element to use as a rendering surface * @param width - Canvas width in CSS pixels * @param height - Canvas height in CSS pixels * * The ratio between `width` and `height` should be 3:4 for best results */ constructor(parent: Element, width=640, height=480, options: Partial<WebglCanvasOptions> = {}) { assertBrowserEnv(); this.options = { ...WebglCanvas.defaultOptions, ...options }; this.width = width; this.height = height; this.canvas = document.createElement('canvas'); this.canvas.addEventListener('webglcontextlost', this.handleContextLoss, false); this.canvas.addEventListener('webglcontextrestored', this.handleContextRestored, false); this.canvas.className = 'FlipnoteCanvas FlipnoteCanvas--webgl'; this.gl = this.canvas.getContext('webgl', { antialias: false, alpha: true }); if (parent) parent.appendChild(this.canvas); this.init(); } private init()
private createProgram(vertexShaderSource: string, fragmentShaderSource: string) { if (this.checkContextLoss()) return; const gl = this.gl; const vert = this.createShader(gl.VERTEX_SHADER, vertexShaderSource); const frag = this.createShader(gl.FRAGMENT_SHADER, fragmentShaderSource); const program = gl.createProgram(); // set up shaders gl.attachShader(program, vert); gl.attachShader(program, frag); // link program gl.linkProgram(program); if (!gl.getProgramParameter(program, gl.LINK_STATUS)) { const log = gl.getProgramInfoLog(program); gl.deleteProgram(program); throw new Error(log); } const programInfo = createProgramInfoFromProgram(gl, program); this.refs.programs.push(program); return programInfo; } private createShader(type: number, source: string) { if (this.checkContextLoss()) return; const gl = this.gl; const shader = gl.createShader(type); gl.shaderSource(shader, source); gl.compileShader(shader); // test if shader compilation was successful if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) { const log = gl.getShaderInfoLog(shader); gl.deleteShader(shader); throw new Error(log); } this.refs.shaders.push(shader); return shader; } // creating a subdivided quad seems to produce slightly nicer texture filtering private createScreenQuad(x0: number, y0: number, width: number, height: number, xSubdivs: number, ySubdivs: number) { if (this.checkContextLoss()) return; const numVerts = (xSubdivs + 1) * (ySubdivs + 1); const numVertsAcross = xSubdivs + 1; const positions = new Float32Array(numVerts * 2); const texCoords = new Float32Array(numVerts * 2); let positionPtr = 0; let texCoordPtr = 0; for (let y = 0; y <= ySubdivs; y++) { for (let x = 0; x <= xSubdivs; x++) { const u = x / xSubdivs; const v = y / ySubdivs; positions[positionPtr++] = x0 + width * u; positions[positionPtr++] = y0 + height * v; texCoords[texCoordPtr++] = u; texCoords[texCoordPtr++] = v; } } const indices = new Uint16Array(xSubdivs * ySubdivs * 2 * 3); let indicesPtr = 0; for (let y = 0; y < ySubdivs; y++) { for (let x = 0; x < xSubdivs; x++) { // triangle 1 indices[indicesPtr++] = (y + 0) * numVertsAcross + x; indices[indicesPtr++] = (y + 1) * numVertsAcross + x; indices[indicesPtr++] = (y + 0) * numVertsAcross + x + 1; // triangle 2 indices[indicesPtr++] = (y + 0) * numVertsAcross + x + 1; indices[indicesPtr++] = (y + 1) * numVertsAcross + x; indices[indicesPtr++] = (y + 1) * numVertsAcross + x + 1; } } const bufferInfo = createBufferInfoFromArrays(this.gl, { position: { numComponents: 2, data: positions }, texcoord: { numComponents: 2, data: texCoords }, indices: indices }); // collect references to buffer objects for (let name in bufferInfo.attribs) this.refs.buffers.push(bufferInfo.attribs[name].buffer); return bufferInfo; } private setBuffersAndAttribs(program: ProgramInfo, buffer: BufferInfo) { if (this.checkContextLoss()) return; const gl = this.gl; setAttributes(program.attribSetters, buffer.attribs); gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, buffer.indices); } private createTexture(type: number, minMag: number, wrap: number, width = 1, height = 1) { if (this.checkContextLoss()) return; const gl = this.gl; const tex = gl.createTexture(); gl.bindTexture(gl.TEXTURE_2D, tex); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, wrap); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, wrap); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, minMag); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, minMag); gl.texImage2D(gl.TEXTURE_2D, 0, type, width, height, 0, type, gl.UNSIGNED_BYTE, null); this.refs.textures.push(tex); this.textureTypes.set(tex, type); this.textureSizes.set(tex, { width, height }); return tex; } private resizeTexture(texture: WebGLTexture, width: number, height: number) { if (this.checkContextLoss()) return; const gl = this.gl; const textureType = this.textureTypes.get(texture); gl.bindTexture(gl.TEXTURE_2D, texture); gl.texImage2D(gl.TEXTURE_2D, 0, textureType, width, height, 0, textureType, gl.UNSIGNED_BYTE, null); this.textureSizes.set(texture, { width, height }); } private createFramebuffer(texture: WebGLTexture) { if (this.checkContextLoss()) return; const gl = this.gl; const fb = gl.createFramebuffer(); gl.bindFramebuffer(gl.FRAMEBUFFER, fb); gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture, 0); this.refs.frameBuffers.push(fb); this.frameBufferTextures.set(fb, texture); return fb; } private useFramebuffer(fb: WebGLFramebuffer, viewX?: number, viewY?: number, viewWidth?: number, viewHeight?: number) { if (this.checkContextLoss()) return; const gl = this.gl; if (fb === null) { gl.bindFramebuffer(gl.FRAMEBUFFER, null); gl.viewport(viewX ?? 0, viewY ?? 0, viewWidth ?? gl.drawingBufferWidth, viewHeight ?? gl.drawingBufferHeight); } else { const tex = this.frameBufferTextures.get(fb); const { width, height } = this.textureSizes.get(tex); gl.bindFramebuffer(gl.FRAMEBUFFER, fb); gl.viewport(viewX ?? 0, viewY ?? 0, viewWidth ?? width, viewHeight ?? height); } } private resizeFramebuffer(fb: WebGLFramebuffer, width: number, height: number) {
{ this.setCanvasSize(this.width, this.height); const gl = this.gl; if (this.checkContextLoss()) return; this.layerProgram = this.createProgram(vertShaderLayer, fragShaderLayer); this.upscaleProgram = this.createProgram(vertShaderUpscale, fragShaderUpscale); this.quadBuffer = this.createScreenQuad(-1, -1, 2, 2, 1, 1); this.setBuffersAndAttribs(this.layerProgram, this.quadBuffer); this.layerTexture = this.createTexture(gl.RGBA, gl.LINEAR, gl.CLAMP_TO_EDGE); this.frameTexture = this.createTexture(gl.RGBA, gl.LINEAR, gl.CLAMP_TO_EDGE); this.frameBuffer = this.createFramebuffer(this.frameTexture); }
identifier_body
WebGlCanvas.ts
CanvasStereoscopicMode.Dual, // CanvasStereoscopicMode.Anaglyph, // couldn't get this working, despite spending lots of time on it :/ ]; /** */ stereoscopeMode = CanvasStereoscopicMode.None; /** */ stereoscopeStrength = 0; private options: WebglCanvasOptions; private layerProgram: ProgramInfo; // for drawing renderbuffer w/ filtering private upscaleProgram: ProgramInfo; // for drawing renderbuffer w/ filtering private quadBuffer: BufferInfo; private paletteBuffer = new Uint32Array(16); private layerTexture: WebGLTexture; private layerTexturePixelBuffer: Uint32Array; private layerTexturePixels: Uint8Array; // will be same memory as layerTexturePixelBuffer, just uint8 for webgl texture private frameTexture: WebGLTexture; private frameBuffer: WebGLFramebuffer; private textureTypes = new Map<WebGLTexture, number>(); private textureSizes = new Map<WebGLTexture, { width: number, height: number }>(); private frameBufferTextures = new Map<WebGLFramebuffer, WebGLTexture>(); private refs: ResourceMap = { programs: [], shaders: [], textures: [], buffers: [], frameBuffers: [] }; private isCtxLost = false; /** * Creates a new WebGlCanvas instance * @param el - Canvas HTML element to use as a rendering surface * @param width - Canvas width in CSS pixels * @param height - Canvas height in CSS pixels * * The ratio between `width` and `height` should be 3:4 for best results */ constructor(parent: Element, width=640, height=480, options: Partial<WebglCanvasOptions> = {}) { assertBrowserEnv(); this.options = { ...WebglCanvas.defaultOptions, ...options }; this.width = width; this.height = height; this.canvas = document.createElement('canvas'); this.canvas.addEventListener('webglcontextlost', this.handleContextLoss, false); this.canvas.addEventListener('webglcontextrestored', this.handleContextRestored, false); this.canvas.className = 'FlipnoteCanvas FlipnoteCanvas--webgl'; this.gl = this.canvas.getContext('webgl', { antialias: false, alpha: true }); if (parent) parent.appendChild(this.canvas); this.init(); } private init() { this.setCanvasSize(this.width, this.height); const gl = this.gl; if (this.checkContextLoss()) return; this.layerProgram = this.createProgram(vertShaderLayer, fragShaderLayer); this.upscaleProgram = this.createProgram(vertShaderUpscale, fragShaderUpscale); this.quadBuffer = this.createScreenQuad(-1, -1, 2, 2, 1, 1); this.setBuffersAndAttribs(this.layerProgram, this.quadBuffer); this.layerTexture = this.createTexture(gl.RGBA, gl.LINEAR, gl.CLAMP_TO_EDGE); this.frameTexture = this.createTexture(gl.RGBA, gl.LINEAR, gl.CLAMP_TO_EDGE); this.frameBuffer = this.createFramebuffer(this.frameTexture); } private createProgram(vertexShaderSource: string, fragmentShaderSource: string) { if (this.checkContextLoss()) return; const gl = this.gl; const vert = this.createShader(gl.VERTEX_SHADER, vertexShaderSource); const frag = this.createShader(gl.FRAGMENT_SHADER, fragmentShaderSource); const program = gl.createProgram(); // set up shaders gl.attachShader(program, vert); gl.attachShader(program, frag); // link program gl.linkProgram(program); if (!gl.getProgramParameter(program, gl.LINK_STATUS)) { const log = gl.getProgramInfoLog(program); gl.deleteProgram(program); throw new Error(log); } const programInfo = createProgramInfoFromProgram(gl, program); this.refs.programs.push(program); return programInfo; } private createShader(type: number, source: string) { if (this.checkContextLoss()) return; const gl = this.gl; const shader = gl.createShader(type); gl.shaderSource(shader, source); gl.compileShader(shader); // test if shader compilation was successful if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) { const log = gl.getShaderInfoLog(shader); gl.deleteShader(shader); throw new Error(log); } this.refs.shaders.push(shader); return shader; } // creating a subdivided quad seems to produce slightly nicer texture filtering private createScreenQuad(x0: number, y0: number, width: number, height: number, xSubdivs: number, ySubdivs: number) { if (this.checkContextLoss()) return; const numVerts = (xSubdivs + 1) * (ySubdivs + 1); const numVertsAcross = xSubdivs + 1; const positions = new Float32Array(numVerts * 2); const texCoords = new Float32Array(numVerts * 2); let positionPtr = 0; let texCoordPtr = 0; for (let y = 0; y <= ySubdivs; y++) { for (let x = 0; x <= xSubdivs; x++) { const u = x / xSubdivs; const v = y / ySubdivs; positions[positionPtr++] = x0 + width * u; positions[positionPtr++] = y0 + height * v; texCoords[texCoordPtr++] = u; texCoords[texCoordPtr++] = v; } } const indices = new Uint16Array(xSubdivs * ySubdivs * 2 * 3); let indicesPtr = 0; for (let y = 0; y < ySubdivs; y++) { for (let x = 0; x < xSubdivs; x++) { // triangle 1 indices[indicesPtr++] = (y + 0) * numVertsAcross + x; indices[indicesPtr++] = (y + 1) * numVertsAcross + x; indices[indicesPtr++] = (y + 0) * numVertsAcross + x + 1; // triangle 2 indices[indicesPtr++] = (y + 0) * numVertsAcross + x + 1; indices[indicesPtr++] = (y + 1) * numVertsAcross + x; indices[indicesPtr++] = (y + 1) * numVertsAcross + x + 1; } } const bufferInfo = createBufferInfoFromArrays(this.gl, { position: { numComponents: 2, data: positions }, texcoord: { numComponents: 2, data: texCoords }, indices: indices }); // collect references to buffer objects for (let name in bufferInfo.attribs) this.refs.buffers.push(bufferInfo.attribs[name].buffer); return bufferInfo; } private setBuffersAndAttribs(program: ProgramInfo, buffer: BufferInfo) { if (this.checkContextLoss()) return; const gl = this.gl; setAttributes(program.attribSetters, buffer.attribs); gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, buffer.indices); } private createTexture(type: number, minMag: number, wrap: number, width = 1, height = 1) { if (this.checkContextLoss()) return; const gl = this.gl; const tex = gl.createTexture(); gl.bindTexture(gl.TEXTURE_2D, tex); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, wrap); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, wrap); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, minMag); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, minMag); gl.texImage2D(gl.TEXTURE_2D, 0, type, width, height, 0, type, gl.UNSIGNED_BYTE, null); this.refs.textures.push(tex); this.textureTypes.set(tex, type); this.textureSizes.set(tex, { width, height }); return tex; } private resizeTexture(texture: WebGLTexture, width: number, height: number) { if (this.checkContextLoss()) return; const gl = this.gl; const textureType = this.textureTypes.get(texture); gl.bindTexture(gl.TEXTURE_2D, texture); gl.texImage2D(gl.TEXTURE_2D, 0, textureType, width, height, 0, textureType, gl.UNSIGNED_BYTE, null); this.textureSizes.set(texture, { width, height }); } private createFramebuffer(texture: WebGLTexture) { if (this.checkContextLoss()) return; const gl = this.gl; const fb = gl.createFramebuffer(); gl.bindFramebuffer(gl.FRAMEBUFFER, fb); gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture, 0
/** */ frameIndex: number; /** */ supportedStereoscopeModes = [ CanvasStereoscopicMode.None,
random_line_split
tables.py
Parameters... none # CRITICAL for multiple input ports def FillInputPortInformation(self, port, info): """Used by pipeline. Necessary when dealing with multiple input ports """ # all are tables so no need to check port info.Set(self.INPUT_REQUIRED_DATA_TYPE(), "vtkTable") return 1 def RequestData(self, request, inInfo, outInfo): """Used by pipeline to generate output """ # Inputs from different ports: pdi0 = self.GetInputData(inInfo, 0, 0) pdi1 = self.GetInputData(inInfo, 1, 0) pdo = self.GetOutputData(outInfo, 0) pdo.DeepCopy(pdi0) # Get number of rows nrows = pdi0.GetNumberOfRows() nrows1 = pdi1.GetNumberOfRows() assert(nrows == nrows1) for i in range(pdi1.GetRowData().GetNumberOfArrays()): arr = pdi1.GetRowData().GetArray(i) pdo.GetRowData().AddArray(arr) return 1 def Apply(self, table0, table1): self.SetInputDataObject(0, table0) self.SetInputDataObject(1, table1) self.Update() return self.GetOutput() ############################################################################### #---- Reshape Table ----# class ReshapeTable(FilterBase): """This filter will take a ``vtkTable`` object and reshape it. This filter essentially treats ``vtkTable``s as 2D matrices and reshapes them using ``numpy.reshape`` in a C contiguous manner. Unfortunately, data fields will be renamed arbitrarily because VTK data arrays require a name. """ __displayname__ = 'Reshape Table' __category__ = 'filter' def __init__(self, **kwargs): FilterBase.__init__(self, nInputPorts=1, inputType='vtkTable', nOutputPorts=1, outputType='vtkTable') # Parameters self.__nrows = kwargs.get('nrows', 1) self.__ncols = kwargs.get('ncols', 1) self.__names = kwargs.get('names', []) self.__order = kwargs.get('order', 'F') def _Reshape(self, pdi, pdo): """Internal helper to perfrom the reshape """ # Get number of columns cols = pdi.GetNumberOfColumns() # Get number of rows rows = pdi.GetColumn(0).GetNumberOfTuples() if len(self.__names) is not 0: num = len(self.__names) if num < self.__ncols: for i in range(num, self.__ncols): self.__names.append('Field %d' % i) elif num > self.__ncols: raise _helpers.PVGeoError('Too many array names. `ncols` specified as %d and %d names given.' % (self.__ncols, num)) else: self.__names = ['Field %d' % i for i in range(self.__ncols)] # Make a 2D numpy array and fill with data from input table data = np.empty((rows,cols)) for i in range(cols): c = pdi.GetColumn(i) data[:,i] = interface.convertArray(c) if ((self.__ncols*self.__nrows) != (cols*rows)): raise _helpers.PVGeoError('Total number of elements must remain %d. Check reshape dimensions.' % (cols*rows)) # Use numpy.reshape() to reshape data NOTE: only 2D because its a table # NOTE: column access of this reshape is not contigous data = np.array(np.reshape(data.flatten(), (self.__nrows,self.__ncols), order=self.__order)) pdo.SetNumberOfRows(self.__nrows) # Add new array to output table and assign incremental names (e.g. Field0) for i in range(self.__ncols): # Make a contigous array from the column we want col = np.array(data[:,i]) # allow type to be determined by input # VTK arrays need a name. Set arbitrarily insert = interface.convertArray(col, name=self.__names[i]) # array_type=vtk.VTK_FLOAT #pdo.AddColumn(insert) # these are not getting added to the output table # ... work around: pdo.GetRowData().AddArray(insert) # NOTE: this is in the FieldData return pdo def RequestData(self, request, inInfo, outInfo): """Used by pipeline """ # Get input/output of Proxy pdi = self.GetInputData(inInfo, 0, 0) pdo = self.GetOutputData(outInfo, 0) # Perfrom task self._Reshape(pdi, pdo) return 1 #### Seters and Geters #### def SetNames(self, names): """Set names using a semicolon (;) seperated string or a list of strings Args: names (string): a string of data array names for the reshaped table using a semicolon (;) to spearate """ # parse the names (a semicolon seperated list of names) if isinstance(names, str): names = names.split(';') if self.__names != names: self.__names = names self.Modified() def AddName(self, name): """Use to append a name to the list of data array names for the output table. """ self.__names.append(name) self.Modified() def GetNames(self): return self.__names def SetNumberOfColumns(self, ncols): """Set the number of columns for the output ``vtkTable`` """ if isinstance(ncols, float): ncols = int(ncols) if self.__ncols != ncols: self.__ncols = ncols self.Modified() def SetNumberOfRows(self, nrows): """Set the number of rows for the output ``vtkTable`` """ if isinstance(nrows, float): nrows = int(nrows) if self.__nrows != nrows: self.__nrows = nrows self.Modified() def SetOrder(self, order): """Set the reshape order (``'C'`` of ``'F'``) """ if self.__order != order: self.__order = order self.Modified() ############################################################################### class ExtractArray(FilterBase): """Extract an array from a ``vtkDataSet`` and make a ``vtkTable`` of it. """ __displayname__ = 'Extract Array' __category__ = 'filter' def __init__(self): FilterBase.__init__(self, nInputPorts=1, inputType='vtkDataSet', nOutputPorts=1, outputType='vtkTable') self.__inputArray = [None, None] def RequestData(self, request, inInfo, outInfo): """Used by pipeline to generate output """ # Inputs from different ports: pdi = self.GetInputData(inInfo, 0, 0) table = self.GetOutputData(outInfo, 0) # Note user has to select a single array to save out field, name = self.__inputArray[0], self.__inputArray[1] vtkarr = _helpers.getVTKArray(pdi, field, name) table.GetRowData().AddArray(vtkarr) return 1 def SetInputArrayToProcess(self, idx, port, connection, field, name): """Used to set the input array(s) Args: idx (int): the index of the array to process port (int): input port (use 0 if unsure) connection (int): the connection on the port (use 0 if unsure) field (int): the array field (0 for points, 1 for cells, 2 for field, and 6 for row) name (int): the name of the array """ if self.__inputArray[0] != field: self.__inputArray[0] = field self.Modified() if self.__inputArray[1] != name: self.__inputArray[1] = name self.Modified()
def Apply(self, inputDataObject, arrayName): self.SetInputDataObject(inputDataObject) arr, field = _helpers.searchForArray(inputDataObject, arrayName) self.SetInputArrayToProcess(0, 0, 0, field, arrayName) self.Update() return self.GetOutput() ############################################################################### class SplitTableOnArray(FilterBase): """A filter to seperate table data based on the unique values of a given data array into a ``vtkMultiBlockDataSet``. """ __displayname__ = 'Split Table On Array' __category__ = 'filter' def __init__(self): FilterBase.__init__(self, nInputPorts=1, inputType='vtkTable', nOutputPorts=1, outputType='vtkMultiBlockDataSet') self.__inputArray = [None, None] def RequestData(self, request, inInfo, outInfo): # Get input/output of Proxy table = self.GetInputData(inInfo, 0, 0) # Get
return 1
random_line_split
tables.py
Parameters... none # CRITICAL for multiple input ports def FillInputPortInformation(self, port, info): """Used by pipeline. Necessary when dealing with multiple input ports """ # all are tables so no need to check port info.Set(self.INPUT_REQUIRED_DATA_TYPE(), "vtkTable") return 1 def RequestData(self, request, inInfo, outInfo): """Used by pipeline to generate output """ # Inputs from different ports: pdi0 = self.GetInputData(inInfo, 0, 0) pdi1 = self.GetInputData(inInfo, 1, 0) pdo = self.GetOutputData(outInfo, 0) pdo.DeepCopy(pdi0) # Get number of rows nrows = pdi0.GetNumberOfRows() nrows1 = pdi1.GetNumberOfRows() assert(nrows == nrows1) for i in range(pdi1.GetRowData().GetNumberOfArrays()): arr = pdi1.GetRowData().GetArray(i) pdo.GetRowData().AddArray(arr) return 1 def Apply(self, table0, table1): self.SetInputDataObject(0, table0) self.SetInputDataObject(1, table1) self.Update() return self.GetOutput() ############################################################################### #---- Reshape Table ----# class ReshapeTable(FilterBase): """This filter will take a ``vtkTable`` object and reshape it. This filter essentially treats ``vtkTable``s as 2D matrices and reshapes them using ``numpy.reshape`` in a C contiguous manner. Unfortunately, data fields will be renamed arbitrarily because VTK data arrays require a name. """ __displayname__ = 'Reshape Table' __category__ = 'filter' def __init__(self, **kwargs): FilterBase.__init__(self, nInputPorts=1, inputType='vtkTable', nOutputPorts=1, outputType='vtkTable') # Parameters self.__nrows = kwargs.get('nrows', 1) self.__ncols = kwargs.get('ncols', 1) self.__names = kwargs.get('names', []) self.__order = kwargs.get('order', 'F') def _Reshape(self, pdi, pdo): """Internal helper to perfrom the reshape """ # Get number of columns cols = pdi.GetNumberOfColumns() # Get number of rows rows = pdi.GetColumn(0).GetNumberOfTuples() if len(self.__names) is not 0: num = len(self.__names) if num < self.__ncols: for i in range(num, self.__ncols): self.__names.append('Field %d' % i) elif num > self.__ncols: raise _helpers.PVGeoError('Too many array names. `ncols` specified as %d and %d names given.' % (self.__ncols, num)) else: self.__names = ['Field %d' % i for i in range(self.__ncols)] # Make a 2D numpy array and fill with data from input table data = np.empty((rows,cols)) for i in range(cols): c = pdi.GetColumn(i) data[:,i] = interface.convertArray(c) if ((self.__ncols*self.__nrows) != (cols*rows)): raise _helpers.PVGeoError('Total number of elements must remain %d. Check reshape dimensions.' % (cols*rows)) # Use numpy.reshape() to reshape data NOTE: only 2D because its a table # NOTE: column access of this reshape is not contigous data = np.array(np.reshape(data.flatten(), (self.__nrows,self.__ncols), order=self.__order)) pdo.SetNumberOfRows(self.__nrows) # Add new array to output table and assign incremental names (e.g. Field0) for i in range(self.__ncols): # Make a contigous array from the column we want col = np.array(data[:,i]) # allow type to be determined by input # VTK arrays need a name. Set arbitrarily insert = interface.convertArray(col, name=self.__names[i]) # array_type=vtk.VTK_FLOAT #pdo.AddColumn(insert) # these are not getting added to the output table # ... work around: pdo.GetRowData().AddArray(insert) # NOTE: this is in the FieldData return pdo def RequestData(self, request, inInfo, outInfo): """Used by pipeline """ # Get input/output of Proxy pdi = self.GetInputData(inInfo, 0, 0) pdo = self.GetOutputData(outInfo, 0) # Perfrom task self._Reshape(pdi, pdo) return 1 #### Seters and Geters #### def SetNames(self, names): """Set names using a semicolon (;) seperated string or a list of strings Args: names (string): a string of data array names for the reshaped table using a semicolon (;) to spearate """ # parse the names (a semicolon seperated list of names) if isinstance(names, str): names = names.split(';') if self.__names != names: self.__names = names self.Modified() def AddName(self, name): """Use to append a name to the list of data array names for the output table. """ self.__names.append(name) self.Modified() def GetNames(self): return self.__names def SetNumberOfColumns(self, ncols):
def SetNumberOfRows(self, nrows): """Set the number of rows for the output ``vtkTable`` """ if isinstance(nrows, float): nrows = int(nrows) if self.__nrows != nrows: self.__nrows = nrows self.Modified() def SetOrder(self, order): """Set the reshape order (``'C'`` of ``'F'``) """ if self.__order != order: self.__order = order self.Modified() ############################################################################### class ExtractArray(FilterBase): """Extract an array from a ``vtkDataSet`` and make a ``vtkTable`` of it. """ __displayname__ = 'Extract Array' __category__ = 'filter' def __init__(self): FilterBase.__init__(self, nInputPorts=1, inputType='vtkDataSet', nOutputPorts=1, outputType='vtkTable') self.__inputArray = [None, None] def RequestData(self, request, inInfo, outInfo): """Used by pipeline to generate output """ # Inputs from different ports: pdi = self.GetInputData(inInfo, 0, 0) table = self.GetOutputData(outInfo, 0) # Note user has to select a single array to save out field, name = self.__inputArray[0], self.__inputArray[1] vtkarr = _helpers.getVTKArray(pdi, field, name) table.GetRowData().AddArray(vtkarr) return 1 def SetInputArrayToProcess(self, idx, port, connection, field, name): """Used to set the input array(s) Args: idx (int): the index of the array to process port (int): input port (use 0 if unsure) connection (int): the connection on the port (use 0 if unsure) field (int): the array field (0 for points, 1 for cells, 2 for field, and 6 for row) name (int): the name of the array """ if self.__inputArray[0] != field: self.__inputArray[0] = field self.Modified() if self.__inputArray[1] != name: self.__inputArray[1] = name self.Modified() return 1 def Apply(self, inputDataObject, arrayName): self.SetInputDataObject(inputDataObject) arr, field = _helpers.searchForArray(inputDataObject, arrayName) self.SetInputArrayToProcess(0, 0, 0, field, arrayName) self.Update() return self.GetOutput() ############################################################################### class SplitTableOnArray(FilterBase): """A filter to seperate table data based on the unique values of a given data array into a ``vtkMultiBlockDataSet``. """ __displayname__ = 'Split Table On Array' __category__ = 'filter' def __init__(self): FilterBase.__init__(self, nInputPorts=1, inputType='vtkTable', nOutputPorts=1, outputType='vtkMultiBlockDataSet') self.__inputArray = [None, None] def RequestData(self, request, inInfo, outInfo): # Get input/output of Proxy table = self.GetInputData(inInfo, 0, 0) #
"""Set the number of columns for the output ``vtkTable`` """ if isinstance(ncols, float): ncols = int(ncols) if self.__ncols != ncols: self.__ncols = ncols self.Modified()
identifier_body
tables.py
Parameters... none # CRITICAL for multiple input ports def FillInputPortInformation(self, port, info): """Used by pipeline. Necessary when dealing with multiple input ports """ # all are tables so no need to check port info.Set(self.INPUT_REQUIRED_DATA_TYPE(), "vtkTable") return 1 def RequestData(self, request, inInfo, outInfo): """Used by pipeline to generate output """ # Inputs from different ports: pdi0 = self.GetInputData(inInfo, 0, 0) pdi1 = self.GetInputData(inInfo, 1, 0) pdo = self.GetOutputData(outInfo, 0) pdo.DeepCopy(pdi0) # Get number of rows nrows = pdi0.GetNumberOfRows() nrows1 = pdi1.GetNumberOfRows() assert(nrows == nrows1) for i in range(pdi1.GetRowData().GetNumberOfArrays()): arr = pdi1.GetRowData().GetArray(i) pdo.GetRowData().AddArray(arr) return 1 def Apply(self, table0, table1): self.SetInputDataObject(0, table0) self.SetInputDataObject(1, table1) self.Update() return self.GetOutput() ############################################################################### #---- Reshape Table ----# class ReshapeTable(FilterBase): """This filter will take a ``vtkTable`` object and reshape it. This filter essentially treats ``vtkTable``s as 2D matrices and reshapes them using ``numpy.reshape`` in a C contiguous manner. Unfortunately, data fields will be renamed arbitrarily because VTK data arrays require a name. """ __displayname__ = 'Reshape Table' __category__ = 'filter' def
(self, **kwargs): FilterBase.__init__(self, nInputPorts=1, inputType='vtkTable', nOutputPorts=1, outputType='vtkTable') # Parameters self.__nrows = kwargs.get('nrows', 1) self.__ncols = kwargs.get('ncols', 1) self.__names = kwargs.get('names', []) self.__order = kwargs.get('order', 'F') def _Reshape(self, pdi, pdo): """Internal helper to perfrom the reshape """ # Get number of columns cols = pdi.GetNumberOfColumns() # Get number of rows rows = pdi.GetColumn(0).GetNumberOfTuples() if len(self.__names) is not 0: num = len(self.__names) if num < self.__ncols: for i in range(num, self.__ncols): self.__names.append('Field %d' % i) elif num > self.__ncols: raise _helpers.PVGeoError('Too many array names. `ncols` specified as %d and %d names given.' % (self.__ncols, num)) else: self.__names = ['Field %d' % i for i in range(self.__ncols)] # Make a 2D numpy array and fill with data from input table data = np.empty((rows,cols)) for i in range(cols): c = pdi.GetColumn(i) data[:,i] = interface.convertArray(c) if ((self.__ncols*self.__nrows) != (cols*rows)): raise _helpers.PVGeoError('Total number of elements must remain %d. Check reshape dimensions.' % (cols*rows)) # Use numpy.reshape() to reshape data NOTE: only 2D because its a table # NOTE: column access of this reshape is not contigous data = np.array(np.reshape(data.flatten(), (self.__nrows,self.__ncols), order=self.__order)) pdo.SetNumberOfRows(self.__nrows) # Add new array to output table and assign incremental names (e.g. Field0) for i in range(self.__ncols): # Make a contigous array from the column we want col = np.array(data[:,i]) # allow type to be determined by input # VTK arrays need a name. Set arbitrarily insert = interface.convertArray(col, name=self.__names[i]) # array_type=vtk.VTK_FLOAT #pdo.AddColumn(insert) # these are not getting added to the output table # ... work around: pdo.GetRowData().AddArray(insert) # NOTE: this is in the FieldData return pdo def RequestData(self, request, inInfo, outInfo): """Used by pipeline """ # Get input/output of Proxy pdi = self.GetInputData(inInfo, 0, 0) pdo = self.GetOutputData(outInfo, 0) # Perfrom task self._Reshape(pdi, pdo) return 1 #### Seters and Geters #### def SetNames(self, names): """Set names using a semicolon (;) seperated string or a list of strings Args: names (string): a string of data array names for the reshaped table using a semicolon (;) to spearate """ # parse the names (a semicolon seperated list of names) if isinstance(names, str): names = names.split(';') if self.__names != names: self.__names = names self.Modified() def AddName(self, name): """Use to append a name to the list of data array names for the output table. """ self.__names.append(name) self.Modified() def GetNames(self): return self.__names def SetNumberOfColumns(self, ncols): """Set the number of columns for the output ``vtkTable`` """ if isinstance(ncols, float): ncols = int(ncols) if self.__ncols != ncols: self.__ncols = ncols self.Modified() def SetNumberOfRows(self, nrows): """Set the number of rows for the output ``vtkTable`` """ if isinstance(nrows, float): nrows = int(nrows) if self.__nrows != nrows: self.__nrows = nrows self.Modified() def SetOrder(self, order): """Set the reshape order (``'C'`` of ``'F'``) """ if self.__order != order: self.__order = order self.Modified() ############################################################################### class ExtractArray(FilterBase): """Extract an array from a ``vtkDataSet`` and make a ``vtkTable`` of it. """ __displayname__ = 'Extract Array' __category__ = 'filter' def __init__(self): FilterBase.__init__(self, nInputPorts=1, inputType='vtkDataSet', nOutputPorts=1, outputType='vtkTable') self.__inputArray = [None, None] def RequestData(self, request, inInfo, outInfo): """Used by pipeline to generate output """ # Inputs from different ports: pdi = self.GetInputData(inInfo, 0, 0) table = self.GetOutputData(outInfo, 0) # Note user has to select a single array to save out field, name = self.__inputArray[0], self.__inputArray[1] vtkarr = _helpers.getVTKArray(pdi, field, name) table.GetRowData().AddArray(vtkarr) return 1 def SetInputArrayToProcess(self, idx, port, connection, field, name): """Used to set the input array(s) Args: idx (int): the index of the array to process port (int): input port (use 0 if unsure) connection (int): the connection on the port (use 0 if unsure) field (int): the array field (0 for points, 1 for cells, 2 for field, and 6 for row) name (int): the name of the array """ if self.__inputArray[0] != field: self.__inputArray[0] = field self.Modified() if self.__inputArray[1] != name: self.__inputArray[1] = name self.Modified() return 1 def Apply(self, inputDataObject, arrayName): self.SetInputDataObject(inputDataObject) arr, field = _helpers.searchForArray(inputDataObject, arrayName) self.SetInputArrayToProcess(0, 0, 0, field, arrayName) self.Update() return self.GetOutput() ############################################################################### class SplitTableOnArray(FilterBase): """A filter to seperate table data based on the unique values of a given data array into a ``vtkMultiBlockDataSet``. """ __displayname__ = 'Split Table On Array' __category__ = 'filter' def __init__(self): FilterBase.__init__(self, nInputPorts=1, inputType='vtkTable', nOutputPorts=1, outputType='vtkMultiBlockDataSet') self.__inputArray = [None, None] def RequestData(self, request, inInfo, outInfo): # Get input/output of Proxy table = self.GetInputData(inInfo, 0, 0) #
__init__
identifier_name
tables.py
Parameters... none # CRITICAL for multiple input ports def FillInputPortInformation(self, port, info): """Used by pipeline. Necessary when dealing with multiple input ports """ # all are tables so no need to check port info.Set(self.INPUT_REQUIRED_DATA_TYPE(), "vtkTable") return 1 def RequestData(self, request, inInfo, outInfo): """Used by pipeline to generate output """ # Inputs from different ports: pdi0 = self.GetInputData(inInfo, 0, 0) pdi1 = self.GetInputData(inInfo, 1, 0) pdo = self.GetOutputData(outInfo, 0) pdo.DeepCopy(pdi0) # Get number of rows nrows = pdi0.GetNumberOfRows() nrows1 = pdi1.GetNumberOfRows() assert(nrows == nrows1) for i in range(pdi1.GetRowData().GetNumberOfArrays()): arr = pdi1.GetRowData().GetArray(i) pdo.GetRowData().AddArray(arr) return 1 def Apply(self, table0, table1): self.SetInputDataObject(0, table0) self.SetInputDataObject(1, table1) self.Update() return self.GetOutput() ############################################################################### #---- Reshape Table ----# class ReshapeTable(FilterBase): """This filter will take a ``vtkTable`` object and reshape it. This filter essentially treats ``vtkTable``s as 2D matrices and reshapes them using ``numpy.reshape`` in a C contiguous manner. Unfortunately, data fields will be renamed arbitrarily because VTK data arrays require a name. """ __displayname__ = 'Reshape Table' __category__ = 'filter' def __init__(self, **kwargs): FilterBase.__init__(self, nInputPorts=1, inputType='vtkTable', nOutputPorts=1, outputType='vtkTable') # Parameters self.__nrows = kwargs.get('nrows', 1) self.__ncols = kwargs.get('ncols', 1) self.__names = kwargs.get('names', []) self.__order = kwargs.get('order', 'F') def _Reshape(self, pdi, pdo): """Internal helper to perfrom the reshape """ # Get number of columns cols = pdi.GetNumberOfColumns() # Get number of rows rows = pdi.GetColumn(0).GetNumberOfTuples() if len(self.__names) is not 0: num = len(self.__names) if num < self.__ncols: for i in range(num, self.__ncols): self.__names.append('Field %d' % i) elif num > self.__ncols:
else: self.__names = ['Field %d' % i for i in range(self.__ncols)] # Make a 2D numpy array and fill with data from input table data = np.empty((rows,cols)) for i in range(cols): c = pdi.GetColumn(i) data[:,i] = interface.convertArray(c) if ((self.__ncols*self.__nrows) != (cols*rows)): raise _helpers.PVGeoError('Total number of elements must remain %d. Check reshape dimensions.' % (cols*rows)) # Use numpy.reshape() to reshape data NOTE: only 2D because its a table # NOTE: column access of this reshape is not contigous data = np.array(np.reshape(data.flatten(), (self.__nrows,self.__ncols), order=self.__order)) pdo.SetNumberOfRows(self.__nrows) # Add new array to output table and assign incremental names (e.g. Field0) for i in range(self.__ncols): # Make a contigous array from the column we want col = np.array(data[:,i]) # allow type to be determined by input # VTK arrays need a name. Set arbitrarily insert = interface.convertArray(col, name=self.__names[i]) # array_type=vtk.VTK_FLOAT #pdo.AddColumn(insert) # these are not getting added to the output table # ... work around: pdo.GetRowData().AddArray(insert) # NOTE: this is in the FieldData return pdo def RequestData(self, request, inInfo, outInfo): """Used by pipeline """ # Get input/output of Proxy pdi = self.GetInputData(inInfo, 0, 0) pdo = self.GetOutputData(outInfo, 0) # Perfrom task self._Reshape(pdi, pdo) return 1 #### Seters and Geters #### def SetNames(self, names): """Set names using a semicolon (;) seperated string or a list of strings Args: names (string): a string of data array names for the reshaped table using a semicolon (;) to spearate """ # parse the names (a semicolon seperated list of names) if isinstance(names, str): names = names.split(';') if self.__names != names: self.__names = names self.Modified() def AddName(self, name): """Use to append a name to the list of data array names for the output table. """ self.__names.append(name) self.Modified() def GetNames(self): return self.__names def SetNumberOfColumns(self, ncols): """Set the number of columns for the output ``vtkTable`` """ if isinstance(ncols, float): ncols = int(ncols) if self.__ncols != ncols: self.__ncols = ncols self.Modified() def SetNumberOfRows(self, nrows): """Set the number of rows for the output ``vtkTable`` """ if isinstance(nrows, float): nrows = int(nrows) if self.__nrows != nrows: self.__nrows = nrows self.Modified() def SetOrder(self, order): """Set the reshape order (``'C'`` of ``'F'``) """ if self.__order != order: self.__order = order self.Modified() ############################################################################### class ExtractArray(FilterBase): """Extract an array from a ``vtkDataSet`` and make a ``vtkTable`` of it. """ __displayname__ = 'Extract Array' __category__ = 'filter' def __init__(self): FilterBase.__init__(self, nInputPorts=1, inputType='vtkDataSet', nOutputPorts=1, outputType='vtkTable') self.__inputArray = [None, None] def RequestData(self, request, inInfo, outInfo): """Used by pipeline to generate output """ # Inputs from different ports: pdi = self.GetInputData(inInfo, 0, 0) table = self.GetOutputData(outInfo, 0) # Note user has to select a single array to save out field, name = self.__inputArray[0], self.__inputArray[1] vtkarr = _helpers.getVTKArray(pdi, field, name) table.GetRowData().AddArray(vtkarr) return 1 def SetInputArrayToProcess(self, idx, port, connection, field, name): """Used to set the input array(s) Args: idx (int): the index of the array to process port (int): input port (use 0 if unsure) connection (int): the connection on the port (use 0 if unsure) field (int): the array field (0 for points, 1 for cells, 2 for field, and 6 for row) name (int): the name of the array """ if self.__inputArray[0] != field: self.__inputArray[0] = field self.Modified() if self.__inputArray[1] != name: self.__inputArray[1] = name self.Modified() return 1 def Apply(self, inputDataObject, arrayName): self.SetInputDataObject(inputDataObject) arr, field = _helpers.searchForArray(inputDataObject, arrayName) self.SetInputArrayToProcess(0, 0, 0, field, arrayName) self.Update() return self.GetOutput() ############################################################################### class SplitTableOnArray(FilterBase): """A filter to seperate table data based on the unique values of a given data array into a ``vtkMultiBlockDataSet``. """ __displayname__ = 'Split Table On Array' __category__ = 'filter' def __init__(self): FilterBase.__init__(self, nInputPorts=1, inputType='vtkTable', nOutputPorts=1, outputType='vtkMultiBlockDataSet') self.__inputArray = [None, None] def RequestData(self, request, inInfo, outInfo): # Get input/output of Proxy table = self.GetInputData(inInfo, 0, 0) #
raise _helpers.PVGeoError('Too many array names. `ncols` specified as %d and %d names given.' % (self.__ncols, num))
conditional_block
testdriver.py
664 /target/testdriver.log') def log(msg=""): """ Logs the given text message to stdout AND the logfile """ print(msg) sys.stdout.flush() f = open("/target/testdriver.log", "a") f.write('{:%Y-%m-%d %H:%M:%S.%s} :: '.format(datetime.datetime.now())) f.write(f"{msg}\n") f.close() def is_dry_run(): """ Checks if the testdriver should be executed as a 'dry run', which means that the cluster is not created. """ return 'DRY_RUN' in os.environ and os.environ['DRY_RUN']=='true' def is_interactive_mode(): """ Checks if the testdriver should be run in 'interactive mode', which means that the cluster is created and after that, the script waits for the file '/cluster_lock' to be deleted. """ return 'INTERACTIVE_MODE' in os.environ and os.environ['INTERACTIVE_MODE']=='true' def run_test_script(): if os.path.isfile("/test.sh"): os.system('rm -rf /target/stackable-versions.txt || true') os.system('rm -rf /target/test_output.log || true') os.system('touch /target/test_output.log') os.system(f"chown {uid_gid_output} /target/test_output.log") os.system('chmod 664 /target/test_output.log') os.system('sh /test.sh 2>&1 | tee /target/test_output.log') else: log("No test script supplied.") def
(): """Launch a cluster. This function creates a folder .cluster/ where everything related to the cluster is stored. In the cluster definition, the 'publicKeys' section is extended with a generated public key. The according private key is used to access the cluster later. If the cluster launch fails, this script exits. T2 takes care of the termination of partly created clusters. """ os.mkdir(CLUSTER_FOLDER) os.system(f"ssh-keygen -f {PRIVATE_KEY_FILE} -q -N '' -C ''") with open (PUBLIC_KEY_FILE, "r") as f: public_key = f.read().strip() with open ("/cluster.yaml", "r") as f: cluster_definition_string = f.read() cluster_definition_yaml = yaml.load(cluster_definition_string, Loader=yaml.FullLoader) if(not "publicKeys" in cluster_definition_yaml or not isinstance(cluster_definition_yaml["publicKeys"], list)): log("Error: The cluster definition file does not contain a valid 'publicKeys' section.") exit(1) cluster_definition_yaml["publicKeys"].append(public_key) with open (f"{CLUSTER_FOLDER}/cluster.yaml", "w") as f: f.write(yaml.dump(cluster_definition_yaml, default_flow_style=False)) f.close() start_time = time.time() cluster = create_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], yaml.dump(cluster_definition_yaml, default_flow_style=False)) if(not cluster): log("Error: Failed to create cluster via API.") exit(1) log(f"Created cluster '{cluster['id']}'. Waiting for cluster to be up and running...") cluster = get_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id']) while(TIMEOUT_SECONDS > (time.time()-start_time) and cluster['status']['state'] != 'RUNNING' and not cluster['status']['failed']): time.sleep(5) cluster = get_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id']) if(cluster['status']['failed']): log("Cluster launch failed.") exit(1) if(TIMEOUT_SECONDS <= (time.time()-start_time)): log("Timeout while launching cluster.") exit(1) log(f"Cluster '{cluster['id']}' is up and running.") with open(f"{CLUSTER_FOLDER}/uuid", "w") as uuid_text_file: print(cluster['id'], file=uuid_text_file) log("Downloading Stackable client script for cluster") with open ("/stackable.sh", "w") as f: f.write(get_client_script(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id'])) f.close() os.chmod("/stackable.sh", 0o755) log("Downloading Stackable kubeconfig") with open ("/kubeconfig", "w") as f: f.write(get_kubeconfig(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id'])) f.close() log("Downloading Stackable version information sheet for cluster") stackable_versions = get_version_information_sheet(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id']) with open ("/target/stackable-versions.txt", "w") as f: f.write(stackable_versions) f.close() os.system(f"chown {uid_gid_output} /target/stackable-versions.txt") os.system('chmod 664 /target/stackable-versions.txt') def terminate(): """Terminates the cluster identified by the data in the .cluster/ folder. """ with open (f"{CLUSTER_FOLDER}/uuid", "r") as f: uuid = f.read().strip() start_time = time.time() cluster = delete_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], uuid) if(not cluster): log("Failed to terminate cluster via API.") exit(1) log(f"Started termination of cluster '{cluster['id']}'. Waiting for cluster to be terminated...") cluster = get_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id']) while(TIMEOUT_SECONDS > (time.time()-start_time) and cluster['status']['state'] != 'TERMINATED' and not cluster['status']['failed']): time.sleep(5) cluster = get_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id']) if(cluster['status']['failed']): log("Cluster termination failed.") exit(1) if(TIMEOUT_SECONDS <= (time.time()-start_time)): log("Timeout while launching cluster.") exit(1) log(f"Cluster '{cluster['id']}' is terminated.") def create_cluster(t2_url, t2_token, cluster_definition): """Create a cluster using T2 REST API Returns: - JSON representing cluster (REST response) """ response = requests.post(f"{t2_url}/api/clusters", data=cluster_definition, headers={ "t2-token": t2_token, "Content-Type": "application/yaml" }) if(response.status_code != 200): log(f"API call to create cluster returned error code {response}") return None return response.json() def get_cluster(t2_url, t2_token, id): """Get the cluster information using T2 REST API Returns: - JSON representing cluster (REST response) """ response = requests.get(f"{t2_url}/api/clusters/{id}", headers={ "t2-token": t2_token }) if(response.status_code != 200): log(f"API call to get cluster returned error code {response.status_code}") return None return response.json() def delete_cluster(t2_url, t2_token, id): """Delete the cluster using T2 REST API Returns: - JSON representing terminated cluster (REST response) """ response = requests.delete(f"{t2_url}/api/clusters/{id}", headers={ "t2-token": t2_token }) if(response.status_code != 200): log(f"API call to delete cluster returned error code {response.status_code}") return None return response.json() def get_client_script(t2_url, t2_token, id): """Downloads the Stackable client script using T2 REST API Returns: - content of the Stackable client script """ response = requests.get(f"{t2_url}/api/clusters/{id}/stackable-client-script", headers={ "t2-token": t2_token }) if(response.status_code != 200): log(f"API call to get Stackable client script returned error code {response.status_code}") return None return response.text def get_version_information_sheet(t2_url, t2_token, id): """Downloads the Stackable version information sheet using T2 REST API Returns: - content of the Stackable version information sheet """ response = requests.get(f"{t2_url}/api/clusters/{id}/stackable-versions", headers={ "t2-token": t2_token }) if(response.status_code != 200): log(f"API call to get Stackable version information sheet returned error code {response.status_code}") return "No Stackable version information available." return response.text def get_kubeconfig(t2_url, t2_token, id): """Downloads the kubeconfig using T2 REST API Returns: - content of the Stackable kubeconfig """ response = requests.get(f"{t2_url}/api/clusters/{id}/kubeconfig", headers={ "t2-token": t2_token }) if(response.status_code != 200): log(f"API call to get Stackable kubeconfig returned error code {response.status_code}") return
launch
identifier_name
testdriver.py
not contain a valid 'publicKeys' section.") exit(1) cluster_definition_yaml["publicKeys"].append(public_key) with open (f"{CLUSTER_FOLDER}/cluster.yaml", "w") as f: f.write(yaml.dump(cluster_definition_yaml, default_flow_style=False)) f.close() start_time = time.time() cluster = create_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], yaml.dump(cluster_definition_yaml, default_flow_style=False)) if(not cluster): log("Error: Failed to create cluster via API.") exit(1) log(f"Created cluster '{cluster['id']}'. Waiting for cluster to be up and running...") cluster = get_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id']) while(TIMEOUT_SECONDS > (time.time()-start_time) and cluster['status']['state'] != 'RUNNING' and not cluster['status']['failed']): time.sleep(5) cluster = get_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id']) if(cluster['status']['failed']): log("Cluster launch failed.") exit(1) if(TIMEOUT_SECONDS <= (time.time()-start_time)): log("Timeout while launching cluster.") exit(1) log(f"Cluster '{cluster['id']}' is up and running.") with open(f"{CLUSTER_FOLDER}/uuid", "w") as uuid_text_file: print(cluster['id'], file=uuid_text_file) log("Downloading Stackable client script for cluster") with open ("/stackable.sh", "w") as f: f.write(get_client_script(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id'])) f.close() os.chmod("/stackable.sh", 0o755) log("Downloading Stackable kubeconfig") with open ("/kubeconfig", "w") as f: f.write(get_kubeconfig(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id'])) f.close() log("Downloading Stackable version information sheet for cluster") stackable_versions = get_version_information_sheet(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id']) with open ("/target/stackable-versions.txt", "w") as f: f.write(stackable_versions) f.close() os.system(f"chown {uid_gid_output} /target/stackable-versions.txt") os.system('chmod 664 /target/stackable-versions.txt') def terminate(): """Terminates the cluster identified by the data in the .cluster/ folder. """ with open (f"{CLUSTER_FOLDER}/uuid", "r") as f: uuid = f.read().strip() start_time = time.time() cluster = delete_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], uuid) if(not cluster): log("Failed to terminate cluster via API.") exit(1) log(f"Started termination of cluster '{cluster['id']}'. Waiting for cluster to be terminated...") cluster = get_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id']) while(TIMEOUT_SECONDS > (time.time()-start_time) and cluster['status']['state'] != 'TERMINATED' and not cluster['status']['failed']): time.sleep(5) cluster = get_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id']) if(cluster['status']['failed']): log("Cluster termination failed.") exit(1) if(TIMEOUT_SECONDS <= (time.time()-start_time)): log("Timeout while launching cluster.") exit(1) log(f"Cluster '{cluster['id']}' is terminated.") def create_cluster(t2_url, t2_token, cluster_definition): """Create a cluster using T2 REST API Returns: - JSON representing cluster (REST response) """ response = requests.post(f"{t2_url}/api/clusters", data=cluster_definition, headers={ "t2-token": t2_token, "Content-Type": "application/yaml" }) if(response.status_code != 200): log(f"API call to create cluster returned error code {response}") return None return response.json() def get_cluster(t2_url, t2_token, id): """Get the cluster information using T2 REST API Returns: - JSON representing cluster (REST response) """ response = requests.get(f"{t2_url}/api/clusters/{id}", headers={ "t2-token": t2_token }) if(response.status_code != 200): log(f"API call to get cluster returned error code {response.status_code}") return None return response.json() def delete_cluster(t2_url, t2_token, id): """Delete the cluster using T2 REST API Returns: - JSON representing terminated cluster (REST response) """ response = requests.delete(f"{t2_url}/api/clusters/{id}", headers={ "t2-token": t2_token }) if(response.status_code != 200): log(f"API call to delete cluster returned error code {response.status_code}") return None return response.json() def get_client_script(t2_url, t2_token, id): """Downloads the Stackable client script using T2 REST API Returns: - content of the Stackable client script """ response = requests.get(f"{t2_url}/api/clusters/{id}/stackable-client-script", headers={ "t2-token": t2_token }) if(response.status_code != 200): log(f"API call to get Stackable client script returned error code {response.status_code}") return None return response.text def get_version_information_sheet(t2_url, t2_token, id): """Downloads the Stackable version information sheet using T2 REST API Returns: - content of the Stackable version information sheet """ response = requests.get(f"{t2_url}/api/clusters/{id}/stackable-versions", headers={ "t2-token": t2_token }) if(response.status_code != 200): log(f"API call to get Stackable version information sheet returned error code {response.status_code}") return "No Stackable version information available." return response.text def get_kubeconfig(t2_url, t2_token, id): """Downloads the kubeconfig using T2 REST API Returns: - content of the Stackable kubeconfig """ response = requests.get(f"{t2_url}/api/clusters/{id}/kubeconfig", headers={ "t2-token": t2_token }) if(response.status_code != 200): log(f"API call to get Stackable kubeconfig returned error code {response.status_code}") return None return response.text def create_kubeconfig_for_ssh_tunnel(kubeconfig_file, kubeconfig_target_file): """ Creates a kubeconfig in which the Server URL is modified to use a locally set up SSH tunnel. (using 127.0.0.1 as an address) Returns a tuple consisting of: - the original IP/Servername of the K8s API - the original Port of the K8s API """ with open (kubeconfig_file, "r") as f: kubeconfig = yaml.load(f.read(), Loader=yaml.FullLoader) original_server_address = kubeconfig["clusters"][0]["cluster"]["server"] address_pattern = re.compile('https://([^:]*):([0-9]+)') match = address_pattern.match(original_server_address) if not match: print('Error: No API address found in kubeconfig') exit(1) original_api_hostname = match.group(1) original_api_port = match.group(2) kubeconfig["clusters"][0]["cluster"]["server"] = f"https://127.0.0.1:{original_api_port}" with open (kubeconfig_target_file, "w") as f: f.write(yaml.dump(kubeconfig, default_flow_style=False)) f.close() return (original_api_hostname, original_api_port) def establish_ssh_tunnel_to_api(api_port): os.system(f"/stackable.sh -i {PRIVATE_KEY_FILE} api-tunnel {api_port}") if __name__ == "__main__": prerequisites() uid_gid_output = "0:0" if 'UID_GID' in os.environ: uid_gid_output = os.environ['UID_GID'] init_log() log("Starting T2 test driver...") dry_run = is_dry_run() interactive_mode = is_interactive_mode() if not dry_run: log(f"Creating a cluster using T2 at {os.environ['T2_URL']}...") launch() (_, api_port) = create_kubeconfig_for_ssh_tunnel("/kubeconfig", "/root/.kube/config") establish_ssh_tunnel_to_api(api_port) else: log(f"DRY RUN: Not creating a cluster!") if not interactive_mode: log("Running test script...") run_test_script() log("Test script finished.") else:
log("Interactive mode. The testdriver will be open for business until you stop it by creating a file /cluster_lock") while not os.path.exists('/cluster_lock'): time.sleep(5)
conditional_block
testdriver.py
664 /target/testdriver.log') def log(msg=""): """ Logs the given text message to stdout AND the logfile """ print(msg) sys.stdout.flush() f = open("/target/testdriver.log", "a") f.write('{:%Y-%m-%d %H:%M:%S.%s} :: '.format(datetime.datetime.now())) f.write(f"{msg}\n") f.close() def is_dry_run(): """ Checks if the testdriver should be executed as a 'dry run', which means that the cluster is not created. """ return 'DRY_RUN' in os.environ and os.environ['DRY_RUN']=='true' def is_interactive_mode(): """ Checks if the testdriver should be run in 'interactive mode', which means that the cluster is created and after that, the script waits for the file '/cluster_lock' to be deleted. """ return 'INTERACTIVE_MODE' in os.environ and os.environ['INTERACTIVE_MODE']=='true' def run_test_script(): if os.path.isfile("/test.sh"): os.system('rm -rf /target/stackable-versions.txt || true') os.system('rm -rf /target/test_output.log || true') os.system('touch /target/test_output.log') os.system(f"chown {uid_gid_output} /target/test_output.log") os.system('chmod 664 /target/test_output.log') os.system('sh /test.sh 2>&1 | tee /target/test_output.log') else: log("No test script supplied.") def launch(): """Launch a cluster. This function creates a folder .cluster/ where everything related to the cluster is stored. In the cluster definition, the 'publicKeys' section is extended with a generated public key. The according private key is used to access the cluster later. If the cluster launch fails, this script exits. T2 takes care of the termination of partly created clusters. """ os.mkdir(CLUSTER_FOLDER) os.system(f"ssh-keygen -f {PRIVATE_KEY_FILE} -q -N '' -C ''") with open (PUBLIC_KEY_FILE, "r") as f: public_key = f.read().strip() with open ("/cluster.yaml", "r") as f: cluster_definition_string = f.read() cluster_definition_yaml = yaml.load(cluster_definition_string, Loader=yaml.FullLoader) if(not "publicKeys" in cluster_definition_yaml or not isinstance(cluster_definition_yaml["publicKeys"], list)): log("Error: The cluster definition file does not contain a valid 'publicKeys' section.") exit(1) cluster_definition_yaml["publicKeys"].append(public_key) with open (f"{CLUSTER_FOLDER}/cluster.yaml", "w") as f: f.write(yaml.dump(cluster_definition_yaml, default_flow_style=False)) f.close() start_time = time.time() cluster = create_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], yaml.dump(cluster_definition_yaml, default_flow_style=False)) if(not cluster): log("Error: Failed to create cluster via API.") exit(1) log(f"Created cluster '{cluster['id']}'. Waiting for cluster to be up and running...") cluster = get_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id']) while(TIMEOUT_SECONDS > (time.time()-start_time) and cluster['status']['state'] != 'RUNNING' and not cluster['status']['failed']): time.sleep(5) cluster = get_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id']) if(cluster['status']['failed']): log("Cluster launch failed.") exit(1) if(TIMEOUT_SECONDS <= (time.time()-start_time)): log("Timeout while launching cluster.") exit(1) log(f"Cluster '{cluster['id']}' is up and running.") with open(f"{CLUSTER_FOLDER}/uuid", "w") as uuid_text_file: print(cluster['id'], file=uuid_text_file) log("Downloading Stackable client script for cluster") with open ("/stackable.sh", "w") as f: f.write(get_client_script(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id'])) f.close() os.chmod("/stackable.sh", 0o755) log("Downloading Stackable kubeconfig") with open ("/kubeconfig", "w") as f: f.write(get_kubeconfig(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id'])) f.close() log("Downloading Stackable version information sheet for cluster") stackable_versions = get_version_information_sheet(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id']) with open ("/target/stackable-versions.txt", "w") as f: f.write(stackable_versions) f.close() os.system(f"chown {uid_gid_output} /target/stackable-versions.txt") os.system('chmod 664 /target/stackable-versions.txt') def terminate(): """Terminates the cluster identified by the data in the .cluster/ folder. """ with open (f"{CLUSTER_FOLDER}/uuid", "r") as f: uuid = f.read().strip() start_time = time.time() cluster = delete_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], uuid) if(not cluster): log("Failed to terminate cluster via API.") exit(1) log(f"Started termination of cluster '{cluster['id']}'. Waiting for cluster to be terminated...") cluster = get_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id']) while(TIMEOUT_SECONDS > (time.time()-start_time) and cluster['status']['state'] != 'TERMINATED' and not cluster['status']['failed']): time.sleep(5) cluster = get_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id']) if(cluster['status']['failed']): log("Cluster termination failed.") exit(1) if(TIMEOUT_SECONDS <= (time.time()-start_time)): log("Timeout while launching cluster.") exit(1) log(f"Cluster '{cluster['id']}' is terminated.") def create_cluster(t2_url, t2_token, cluster_definition):
def get_cluster(t2_url, t2_token, id): """Get the cluster information using T2 REST API Returns: - JSON representing cluster (REST response) """ response = requests.get(f"{t2_url}/api/clusters/{id}", headers={ "t2-token": t2_token }) if(response.status_code != 200): log(f"API call to get cluster returned error code {response.status_code}") return None return response.json() def delete_cluster(t2_url, t2_token, id): """Delete the cluster using T2 REST API Returns: - JSON representing terminated cluster (REST response) """ response = requests.delete(f"{t2_url}/api/clusters/{id}", headers={ "t2-token": t2_token }) if(response.status_code != 200): log(f"API call to delete cluster returned error code {response.status_code}") return None return response.json() def get_client_script(t2_url, t2_token, id): """Downloads the Stackable client script using T2 REST API Returns: - content of the Stackable client script """ response = requests.get(f"{t2_url}/api/clusters/{id}/stackable-client-script", headers={ "t2-token": t2_token }) if(response.status_code != 200): log(f"API call to get Stackable client script returned error code {response.status_code}") return None return response.text def get_version_information_sheet(t2_url, t2_token, id): """Downloads the Stackable version information sheet using T2 REST API Returns: - content of the Stackable version information sheet """ response = requests.get(f"{t2_url}/api/clusters/{id}/stackable-versions", headers={ "t2-token": t2_token }) if(response.status_code != 200): log(f"API call to get Stackable version information sheet returned error code {response.status_code}") return "No Stackable version information available." return response.text def get_kubeconfig(t2_url, t2_token, id): """Downloads the kubeconfig using T2 REST API Returns: - content of the Stackable kubeconfig """ response = requests.get(f"{t2_url}/api/clusters/{id}/kubeconfig", headers={ "t2-token": t2_token }) if(response.status_code != 200): log(f"API call to get Stackable kubeconfig returned error code {response.status_code}") return
"""Create a cluster using T2 REST API Returns: - JSON representing cluster (REST response) """ response = requests.post(f"{t2_url}/api/clusters", data=cluster_definition, headers={ "t2-token": t2_token, "Content-Type": "application/yaml" }) if(response.status_code != 200): log(f"API call to create cluster returned error code {response}") return None return response.json()
identifier_body
testdriver.py
664 /target/testdriver.log') def log(msg=""): """ Logs the given text message to stdout AND the logfile """ print(msg) sys.stdout.flush() f = open("/target/testdriver.log", "a") f.write('{:%Y-%m-%d %H:%M:%S.%s} :: '.format(datetime.datetime.now())) f.write(f"{msg}\n") f.close() def is_dry_run(): """ Checks if the testdriver should be executed as a 'dry run', which means that the cluster is not created. """ return 'DRY_RUN' in os.environ and os.environ['DRY_RUN']=='true' def is_interactive_mode(): """ Checks if the testdriver should be run in 'interactive mode', which means that the cluster is created and after that, the script waits for the file '/cluster_lock' to be deleted. """
def run_test_script(): if os.path.isfile("/test.sh"): os.system('rm -rf /target/stackable-versions.txt || true') os.system('rm -rf /target/test_output.log || true') os.system('touch /target/test_output.log') os.system(f"chown {uid_gid_output} /target/test_output.log") os.system('chmod 664 /target/test_output.log') os.system('sh /test.sh 2>&1 | tee /target/test_output.log') else: log("No test script supplied.") def launch(): """Launch a cluster. This function creates a folder .cluster/ where everything related to the cluster is stored. In the cluster definition, the 'publicKeys' section is extended with a generated public key. The according private key is used to access the cluster later. If the cluster launch fails, this script exits. T2 takes care of the termination of partly created clusters. """ os.mkdir(CLUSTER_FOLDER) os.system(f"ssh-keygen -f {PRIVATE_KEY_FILE} -q -N '' -C ''") with open (PUBLIC_KEY_FILE, "r") as f: public_key = f.read().strip() with open ("/cluster.yaml", "r") as f: cluster_definition_string = f.read() cluster_definition_yaml = yaml.load(cluster_definition_string, Loader=yaml.FullLoader) if(not "publicKeys" in cluster_definition_yaml or not isinstance(cluster_definition_yaml["publicKeys"], list)): log("Error: The cluster definition file does not contain a valid 'publicKeys' section.") exit(1) cluster_definition_yaml["publicKeys"].append(public_key) with open (f"{CLUSTER_FOLDER}/cluster.yaml", "w") as f: f.write(yaml.dump(cluster_definition_yaml, default_flow_style=False)) f.close() start_time = time.time() cluster = create_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], yaml.dump(cluster_definition_yaml, default_flow_style=False)) if(not cluster): log("Error: Failed to create cluster via API.") exit(1) log(f"Created cluster '{cluster['id']}'. Waiting for cluster to be up and running...") cluster = get_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id']) while(TIMEOUT_SECONDS > (time.time()-start_time) and cluster['status']['state'] != 'RUNNING' and not cluster['status']['failed']): time.sleep(5) cluster = get_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id']) if(cluster['status']['failed']): log("Cluster launch failed.") exit(1) if(TIMEOUT_SECONDS <= (time.time()-start_time)): log("Timeout while launching cluster.") exit(1) log(f"Cluster '{cluster['id']}' is up and running.") with open(f"{CLUSTER_FOLDER}/uuid", "w") as uuid_text_file: print(cluster['id'], file=uuid_text_file) log("Downloading Stackable client script for cluster") with open ("/stackable.sh", "w") as f: f.write(get_client_script(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id'])) f.close() os.chmod("/stackable.sh", 0o755) log("Downloading Stackable kubeconfig") with open ("/kubeconfig", "w") as f: f.write(get_kubeconfig(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id'])) f.close() log("Downloading Stackable version information sheet for cluster") stackable_versions = get_version_information_sheet(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id']) with open ("/target/stackable-versions.txt", "w") as f: f.write(stackable_versions) f.close() os.system(f"chown {uid_gid_output} /target/stackable-versions.txt") os.system('chmod 664 /target/stackable-versions.txt') def terminate(): """Terminates the cluster identified by the data in the .cluster/ folder. """ with open (f"{CLUSTER_FOLDER}/uuid", "r") as f: uuid = f.read().strip() start_time = time.time() cluster = delete_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], uuid) if(not cluster): log("Failed to terminate cluster via API.") exit(1) log(f"Started termination of cluster '{cluster['id']}'. Waiting for cluster to be terminated...") cluster = get_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id']) while(TIMEOUT_SECONDS > (time.time()-start_time) and cluster['status']['state'] != 'TERMINATED' and not cluster['status']['failed']): time.sleep(5) cluster = get_cluster(os.environ["T2_URL"], os.environ["T2_TOKEN"], cluster['id']) if(cluster['status']['failed']): log("Cluster termination failed.") exit(1) if(TIMEOUT_SECONDS <= (time.time()-start_time)): log("Timeout while launching cluster.") exit(1) log(f"Cluster '{cluster['id']}' is terminated.") def create_cluster(t2_url, t2_token, cluster_definition): """Create a cluster using T2 REST API Returns: - JSON representing cluster (REST response) """ response = requests.post(f"{t2_url}/api/clusters", data=cluster_definition, headers={ "t2-token": t2_token, "Content-Type": "application/yaml" }) if(response.status_code != 200): log(f"API call to create cluster returned error code {response}") return None return response.json() def get_cluster(t2_url, t2_token, id): """Get the cluster information using T2 REST API Returns: - JSON representing cluster (REST response) """ response = requests.get(f"{t2_url}/api/clusters/{id}", headers={ "t2-token": t2_token }) if(response.status_code != 200): log(f"API call to get cluster returned error code {response.status_code}") return None return response.json() def delete_cluster(t2_url, t2_token, id): """Delete the cluster using T2 REST API Returns: - JSON representing terminated cluster (REST response) """ response = requests.delete(f"{t2_url}/api/clusters/{id}", headers={ "t2-token": t2_token }) if(response.status_code != 200): log(f"API call to delete cluster returned error code {response.status_code}") return None return response.json() def get_client_script(t2_url, t2_token, id): """Downloads the Stackable client script using T2 REST API Returns: - content of the Stackable client script """ response = requests.get(f"{t2_url}/api/clusters/{id}/stackable-client-script", headers={ "t2-token": t2_token }) if(response.status_code != 200): log(f"API call to get Stackable client script returned error code {response.status_code}") return None return response.text def get_version_information_sheet(t2_url, t2_token, id): """Downloads the Stackable version information sheet using T2 REST API Returns: - content of the Stackable version information sheet """ response = requests.get(f"{t2_url}/api/clusters/{id}/stackable-versions", headers={ "t2-token": t2_token }) if(response.status_code != 200): log(f"API call to get Stackable version information sheet returned error code {response.status_code}") return "No Stackable version information available." return response.text def get_kubeconfig(t2_url, t2_token, id): """Downloads the kubeconfig using T2 REST API Returns: - content of the Stackable kubeconfig """ response = requests.get(f"{t2_url}/api/clusters/{id}/kubeconfig", headers={ "t2-token": t2_token }) if(response.status_code != 200): log(f"API call to get Stackable kubeconfig returned error code {response.status_code}") return None
return 'INTERACTIVE_MODE' in os.environ and os.environ['INTERACTIVE_MODE']=='true'
random_line_split
hackc.rs
32 MiB"))] stack_size: Byte, /// Instead of printing the unit, print a list of the decls requested during compilation. /// (only used by --test-compile-with-decls) #[clap(long)] pub(crate) log_decls_requested: bool, /// Use serialized decl instead of decl pointer as the decl provider API /// (only used by --test-compile-with-decls) #[clap(long)] pub(crate) use_serialized_decls: bool, /// Controls systemlib specific logic #[clap(long)] is_systemlib: bool, } /// Hack Compiler #[derive(Parser, Debug, Default)] struct FileOpts { /// Input file(s) filenames: Vec<PathBuf>, /// Read a list of files (one-per-line) from this file #[clap(long)] input_file_list: Option<PathBuf>, } #[derive(Parser, Debug)] enum Command { /// Assemble HHAS file(s) into HackCUnit. Prints those HCUs' HHAS representation. Assemble(assemble::Opts), /// Compile one Hack source file or a list of files to HHAS Compile(compile::Opts), /// Compile Hack source files or directories and produce a single CRC per /// input file. Crc(crc::Opts), /// Print the source code with expression tree literals desugared. /// Best effort debugging tool. DesugarExprTrees(expr_trees::Opts), /// Compute facts for a set of files. Facts(facts::Opts), /// Render the source text parse tree for each given file. Parse(parse::Opts), /// Parse many files whose filenames are read from stdin, discard parser output. ParseBench(parse::BenchOpts), /// Compile Hack source files or directories and check for compilation errors. Verify(verify::Opts), } /// Which command are we running? Using bool opts for compatibility with test harnesses. /// New commands should be defined as subcommands using the Command enum. #[derive(Parser, Debug, Default)] struct FlagCommands { /// Parse decls from source text, transform them into facts, and print the facts /// in JSON format. #[clap(long)] extract_facts_from_decls: bool, /// Compile file with decls from the same file available during compilation. #[clap(long)] test_compile_with_decls: bool, } impl FileOpts { pub fn gather_input_files(&mut self) -> Result<Vec<PathBuf>> { use std::io::BufReader; let mut files: Vec<PathBuf> = Default::default(); if let Some(list_path) = self.input_file_list.take() { for line in BufReader::new(std::fs::File::open(list_path)?).lines() { files.push(Path::new(&line?).to_path_buf()); } } files.append(&mut self.filenames); Ok(files) } pub fn is_batch_mode(&self) -> bool { self.input_file_list.is_some() || self.filenames.len() > 1 } } impl Opts { pub fn env_flags(&self) -> EnvFlags { let mut flags = EnvFlags::empty(); if self.for_debugger_eval { flags |= EnvFlags::FOR_DEBUGGER_EVAL; } if self.disable_toplevel_elaboration { flags |= EnvFlags::DISABLE_TOPLEVEL_ELABORATION; } if self.is_systemlib { flags |= EnvFlags::IS_SYSTEMLIB; } flags } pub fn decl_opts(&self) -> DeclParserOptions { // TODO: share this logic with hackc_create_decl_parse_options() let config_opts = options::Options::from_configs(&[Self::AUTO_NAMESPACE_MAP]).unwrap(); let auto_namespace_map = match config_opts.hhvm.aliased_namespaces.get().as_map() { Some(m) => m.iter().map(|(k, v)| (k.clone(), v.clone())).collect(), None => Vec::new(), }; DeclParserOptions { auto_namespace_map, disable_xhp_element_mangling: false, interpret_soft_types_as_like_types: true, allow_new_attribute_syntax: true, enable_xhp_class_modifier: false, php5_compat_mode: true, hhvm_compat_mode: true, ..Default::default() } } pub fn native_env(&self, path: PathBuf) -> Result<NativeEnv<'_>> { let hhvm_options = &self.hhvm_options; let hhvm_config = hhvm_options.to_config()?; let parser_flags = ParserFlags::from_hhvm_config(&hhvm_config)?; let hhbc_flags = HHBCFlags::from_hhvm_config(&hhvm_config)?; Ok(NativeEnv { filepath: RelativePath::make(relative_path::Prefix::Dummy, path), aliased_namespaces: crate::Opts::AUTO_NAMESPACE_MAP, include_roots: crate::Opts::INCLUDE_ROOTS, hhbc_flags, parser_flags, flags: self.env_flags(), emit_class_pointers: self.emit_class_pointers, check_int_overflow: self.check_int_overflow, }) } // TODO (T118266805): get these from nearest .hhconfig enclosing each file. pub(crate) const AUTO_NAMESPACE_MAP: &'static str = r#"{ "hhvm.aliased_namespaces": { "global_value": { "Async": "HH\\Lib\\Async", "C": "FlibSL\\C", "Dict": "FlibSL\\Dict", "File": "HH\\Lib\\File", "IO": "HH\\Lib\\IO", "Keyset": "FlibSL\\Keyset", "Locale": "FlibSL\\Locale", "Math": "FlibSL\\Math", "OS": "HH\\Lib\\OS", "PHP": "FlibSL\\PHP", "PseudoRandom": "FlibSL\\PseudoRandom", "Regex": "FlibSL\\Regex", "SecureRandom": "FlibSL\\SecureRandom", "Str": "FlibSL\\Str", "Vec": "FlibSL\\Vec" } } }"#; pub(crate) const INCLUDE_ROOTS: &'static str = ""; } fn main() -> Result<()> { env_logger::init(); let mut opts = Opts::parse(); // Some subcommands need worker threads with larger than default stacks, // even when using Stacker. In particular, various derived traits (e.g. Drop) // on AAST nodes are inherently recursive. rayon::ThreadPoolBuilder::new() .num_threads(opts.num_threads) .stack_size(opts.stack_size.get_bytes().try_into()?) .build_global() .unwrap(); match opts.command.take() { Some(Command::Assemble(opts)) => assemble::run(opts), Some(Command::Crc(opts)) => crc::run(opts), Some(Command::Parse(parse_opts)) => parse::run(parse_opts), Some(Command::ParseBench(bench_opts)) => parse::run_bench_command(bench_opts), Some(Command::Verify(opts)) => verify::run(opts), // Expr trees Some(Command::DesugarExprTrees(et_opts)) => expr_trees::desugar_expr_trees(&opts, et_opts), // Facts Some(Command::Facts(facts_opts)) => { facts::extract_facts(&opts, facts_opts, &mut std::io::stdout()) } None if opts.daemon && opts.flag_commands.extract_facts_from_decls => { facts::daemon(&mut opts) } None if opts.flag_commands.extract_facts_from_decls => { facts::run_flag(&mut opts, &mut std::io::stdout()) } // Test Decls-in-Compilation None if opts.daemon && opts.flag_commands.test_compile_with_decls => { compile::test_decl_compile_daemon(&mut opts) } None if opts.flag_commands.test_compile_with_decls => { compile::test_decl_compile(&mut opts, &mut std::io::stdout()) } // Compile to hhas Some(Command::Compile(mut opts)) => compile::run(&mut opts), None if opts.daemon => compile::daemon(&mut opts), None => compile::compile_from_text(&mut opts, &mut std::io::stdout()), } } /// In daemon mode, hackc blocks waiting for a filename on stdin. /// Then, using the originally invoked options, dispatches that file to be compiled. fn daemon_loop(mut f: impl FnMut(PathBuf, &mut Vec<u8>) -> Result<()>) -> Result<()>
{ use std::io::Write; for line in std::io::stdin().lock().lines() { let mut buf = Vec::new(); f(Path::new(&line?).to_path_buf(), &mut buf)?; // Account for utf-8 encoding and text streams with the python test runner: // https://stackoverflow.com/questions/3586923/counting-unicode-characters-in-c let mut w = std::io::stdout(); let num_chars = buf.iter().filter(|&b| (b & 0xc0) != 0x80).count() + 1; writeln!(w, "{num_chars}")?; w.write_all(&buf)?; w.write_all(b"\n")?; w.flush()?; } Ok(()) }
identifier_body
hackc.rs
; use std::path::Path; use std::path::PathBuf; /// Hack Compiler #[derive(Parser, Debug, Default)] struct Opts { #[clap(subcommand)] command: Option<Command>, /// Runs in daemon mode for testing purposes. Do not rely on for production #[clap(long)] daemon: bool, #[clap(flatten)] flag_commands: FlagCommands, #[clap(flatten)] hhvm_options: HhvmOptions, #[clap(flatten)] files: FileOpts, /// Disable toplevel definition elaboration #[clap(long)] disable_toplevel_elaboration: bool, /// Mutate the program as if we're in the debugger repl #[clap(long)] for_debugger_eval: bool, #[clap(long, default_value("0"))] emit_class_pointers: i32, #[clap(long, default_value("0"))] check_int_overflow: i32, /// Number of parallel worker threads for subcommands that support parallelism, /// otherwise ignored. If 0, use available parallelism, typically num-cpus. #[clap(long, default_value("0"))] num_threads: usize, /// Stack size to use for parallel worker threads. Supports unit suffixes like KB, MiB, etc. #[clap(long, default_value("32 MiB"))] stack_size: Byte, /// Instead of printing the unit, print a list of the decls requested during compilation. /// (only used by --test-compile-with-decls) #[clap(long)] pub(crate) log_decls_requested: bool, /// Use serialized decl instead of decl pointer as the decl provider API /// (only used by --test-compile-with-decls) #[clap(long)] pub(crate) use_serialized_decls: bool, /// Controls systemlib specific logic #[clap(long)] is_systemlib: bool, } /// Hack Compiler #[derive(Parser, Debug, Default)] struct
{ /// Input file(s) filenames: Vec<PathBuf>, /// Read a list of files (one-per-line) from this file #[clap(long)] input_file_list: Option<PathBuf>, } #[derive(Parser, Debug)] enum Command { /// Assemble HHAS file(s) into HackCUnit. Prints those HCUs' HHAS representation. Assemble(assemble::Opts), /// Compile one Hack source file or a list of files to HHAS Compile(compile::Opts), /// Compile Hack source files or directories and produce a single CRC per /// input file. Crc(crc::Opts), /// Print the source code with expression tree literals desugared. /// Best effort debugging tool. DesugarExprTrees(expr_trees::Opts), /// Compute facts for a set of files. Facts(facts::Opts), /// Render the source text parse tree for each given file. Parse(parse::Opts), /// Parse many files whose filenames are read from stdin, discard parser output. ParseBench(parse::BenchOpts), /// Compile Hack source files or directories and check for compilation errors. Verify(verify::Opts), } /// Which command are we running? Using bool opts for compatibility with test harnesses. /// New commands should be defined as subcommands using the Command enum. #[derive(Parser, Debug, Default)] struct FlagCommands { /// Parse decls from source text, transform them into facts, and print the facts /// in JSON format. #[clap(long)] extract_facts_from_decls: bool, /// Compile file with decls from the same file available during compilation. #[clap(long)] test_compile_with_decls: bool, } impl FileOpts { pub fn gather_input_files(&mut self) -> Result<Vec<PathBuf>> { use std::io::BufReader; let mut files: Vec<PathBuf> = Default::default(); if let Some(list_path) = self.input_file_list.take() { for line in BufReader::new(std::fs::File::open(list_path)?).lines() { files.push(Path::new(&line?).to_path_buf()); } } files.append(&mut self.filenames); Ok(files) } pub fn is_batch_mode(&self) -> bool { self.input_file_list.is_some() || self.filenames.len() > 1 } } impl Opts { pub fn env_flags(&self) -> EnvFlags { let mut flags = EnvFlags::empty(); if self.for_debugger_eval { flags |= EnvFlags::FOR_DEBUGGER_EVAL; } if self.disable_toplevel_elaboration { flags |= EnvFlags::DISABLE_TOPLEVEL_ELABORATION; } if self.is_systemlib { flags |= EnvFlags::IS_SYSTEMLIB; } flags } pub fn decl_opts(&self) -> DeclParserOptions { // TODO: share this logic with hackc_create_decl_parse_options() let config_opts = options::Options::from_configs(&[Self::AUTO_NAMESPACE_MAP]).unwrap(); let auto_namespace_map = match config_opts.hhvm.aliased_namespaces.get().as_map() { Some(m) => m.iter().map(|(k, v)| (k.clone(), v.clone())).collect(), None => Vec::new(), }; DeclParserOptions { auto_namespace_map, disable_xhp_element_mangling: false, interpret_soft_types_as_like_types: true, allow_new_attribute_syntax: true, enable_xhp_class_modifier: false, php5_compat_mode: true, hhvm_compat_mode: true, ..Default::default() } } pub fn native_env(&self, path: PathBuf) -> Result<NativeEnv<'_>> { let hhvm_options = &self.hhvm_options; let hhvm_config = hhvm_options.to_config()?; let parser_flags = ParserFlags::from_hhvm_config(&hhvm_config)?; let hhbc_flags = HHBCFlags::from_hhvm_config(&hhvm_config)?; Ok(NativeEnv { filepath: RelativePath::make(relative_path::Prefix::Dummy, path), aliased_namespaces: crate::Opts::AUTO_NAMESPACE_MAP, include_roots: crate::Opts::INCLUDE_ROOTS, hhbc_flags, parser_flags, flags: self.env_flags(), emit_class_pointers: self.emit_class_pointers, check_int_overflow: self.check_int_overflow, }) } // TODO (T118266805): get these from nearest .hhconfig enclosing each file. pub(crate) const AUTO_NAMESPACE_MAP: &'static str = r#"{ "hhvm.aliased_namespaces": { "global_value": { "Async": "HH\\Lib\\Async", "C": "FlibSL\\C", "Dict": "FlibSL\\Dict", "File": "HH\\Lib\\File", "IO": "HH\\Lib\\IO", "Keyset": "FlibSL\\Keyset", "Locale": "FlibSL\\Locale", "Math": "FlibSL\\Math", "OS": "HH\\Lib\\OS", "PHP": "FlibSL\\PHP", "PseudoRandom": "FlibSL\\PseudoRandom", "Regex": "FlibSL\\Regex", "SecureRandom": "FlibSL\\SecureRandom", "Str": "FlibSL\\Str", "Vec": "FlibSL\\Vec" } } }"#; pub(crate) const INCLUDE_ROOTS: &'static str = ""; } fn main() -> Result<()> { env_logger::init(); let mut opts = Opts::parse(); // Some subcommands need worker threads with larger than default stacks, // even when using Stacker. In particular, various derived traits (e.g. Drop) // on AAST nodes are inherently recursive. rayon::ThreadPoolBuilder::new() .num_threads(opts.num_threads) .stack_size(opts.stack_size.get_bytes().try_into()?) .build_global() .unwrap(); match opts.command.take() { Some(Command::Assemble(opts)) => assemble::run(opts), Some(Command::Crc(opts)) => crc::run(opts), Some(Command::Parse(parse_opts)) => parse::run(parse_opts), Some(Command::ParseBench(bench_opts)) => parse::run_bench_command(bench_opts), Some(Command::Verify(opts)) => verify::run(opts), // Expr trees Some(Command::DesugarExprTrees(et_opts)) => expr_trees::desugar_expr_trees(&opts, et_opts), // Facts Some(Command::Facts(facts_opts)) => { facts::extract_facts(&opts, facts_opts, &mut std::io::stdout()) } None if opts.daemon && opts.flag_commands.extract_facts_from_decls => { facts::daemon(&mut opts) } None if opts.flag_commands.extract_facts_from_decls => { facts::run_flag(&mut opts, &mut std::io::stdout()) } // Test Decls-in-Compilation None if opts.daemon && opts.flag_commands.test_compile_with_decls => { compile::test_decl_compile_daemon(&mut opts) } None if opts.flag_commands.test_compile_with_decls => { compile::test_decl_compile(&mut opts
FileOpts
identifier_name
hackc.rs
; use std::path::Path; use std::path::PathBuf; /// Hack Compiler #[derive(Parser, Debug, Default)] struct Opts { #[clap(subcommand)] command: Option<Command>, /// Runs in daemon mode for testing purposes. Do not rely on for production #[clap(long)] daemon: bool, #[clap(flatten)] flag_commands: FlagCommands, #[clap(flatten)] hhvm_options: HhvmOptions, #[clap(flatten)] files: FileOpts, /// Disable toplevel definition elaboration #[clap(long)] disable_toplevel_elaboration: bool, /// Mutate the program as if we're in the debugger repl #[clap(long)] for_debugger_eval: bool, #[clap(long, default_value("0"))] emit_class_pointers: i32, #[clap(long, default_value("0"))] check_int_overflow: i32, /// Number of parallel worker threads for subcommands that support parallelism, /// otherwise ignored. If 0, use available parallelism, typically num-cpus. #[clap(long, default_value("0"))] num_threads: usize, /// Stack size to use for parallel worker threads. Supports unit suffixes like KB, MiB, etc. #[clap(long, default_value("32 MiB"))] stack_size: Byte, /// Instead of printing the unit, print a list of the decls requested during compilation. /// (only used by --test-compile-with-decls) #[clap(long)] pub(crate) log_decls_requested: bool, /// Use serialized decl instead of decl pointer as the decl provider API /// (only used by --test-compile-with-decls) #[clap(long)] pub(crate) use_serialized_decls: bool, /// Controls systemlib specific logic #[clap(long)] is_systemlib: bool, } /// Hack Compiler #[derive(Parser, Debug, Default)] struct FileOpts { /// Input file(s) filenames: Vec<PathBuf>, /// Read a list of files (one-per-line) from this file #[clap(long)] input_file_list: Option<PathBuf>, } #[derive(Parser, Debug)] enum Command { /// Assemble HHAS file(s) into HackCUnit. Prints those HCUs' HHAS representation. Assemble(assemble::Opts), /// Compile one Hack source file or a list of files to HHAS Compile(compile::Opts), /// Compile Hack source files or directories and produce a single CRC per /// input file. Crc(crc::Opts), /// Print the source code with expression tree literals desugared. /// Best effort debugging tool. DesugarExprTrees(expr_trees::Opts), /// Compute facts for a set of files. Facts(facts::Opts), /// Render the source text parse tree for each given file. Parse(parse::Opts), /// Parse many files whose filenames are read from stdin, discard parser output. ParseBench(parse::BenchOpts), /// Compile Hack source files or directories and check for compilation errors. Verify(verify::Opts), } /// Which command are we running? Using bool opts for compatibility with test harnesses. /// New commands should be defined as subcommands using the Command enum. #[derive(Parser, Debug, Default)] struct FlagCommands { /// Parse decls from source text, transform them into facts, and print the facts /// in JSON format. #[clap(long)] extract_facts_from_decls: bool, /// Compile file with decls from the same file available during compilation. #[clap(long)] test_compile_with_decls: bool, } impl FileOpts { pub fn gather_input_files(&mut self) -> Result<Vec<PathBuf>> { use std::io::BufReader; let mut files: Vec<PathBuf> = Default::default(); if let Some(list_path) = self.input_file_list.take() { for line in BufReader::new(std::fs::File::open(list_path)?).lines() { files.push(Path::new(&line?).to_path_buf()); } } files.append(&mut self.filenames); Ok(files) } pub fn is_batch_mode(&self) -> bool { self.input_file_list.is_some() || self.filenames.len() > 1 } } impl Opts { pub fn env_flags(&self) -> EnvFlags { let mut flags = EnvFlags::empty(); if self.for_debugger_eval { flags |= EnvFlags::FOR_DEBUGGER_EVAL; } if self.disable_toplevel_elaboration { flags |= EnvFlags::DISABLE_TOPLEVEL_ELABORATION; } if self.is_systemlib { flags |= EnvFlags::IS_SYSTEMLIB; } flags } pub fn decl_opts(&self) -> DeclParserOptions { // TODO: share this logic with hackc_create_decl_parse_options() let config_opts = options::Options::from_configs(&[Self::AUTO_NAMESPACE_MAP]).unwrap(); let auto_namespace_map = match config_opts.hhvm.aliased_namespaces.get().as_map() { Some(m) => m.iter().map(|(k, v)| (k.clone(), v.clone())).collect(), None => Vec::new(), }; DeclParserOptions { auto_namespace_map, disable_xhp_element_mangling: false, interpret_soft_types_as_like_types: true, allow_new_attribute_syntax: true, enable_xhp_class_modifier: false, php5_compat_mode: true, hhvm_compat_mode: true, ..Default::default() } } pub fn native_env(&self, path: PathBuf) -> Result<NativeEnv<'_>> { let hhvm_options = &self.hhvm_options; let hhvm_config = hhvm_options.to_config()?; let parser_flags = ParserFlags::from_hhvm_config(&hhvm_config)?; let hhbc_flags = HHBCFlags::from_hhvm_config(&hhvm_config)?; Ok(NativeEnv { filepath: RelativePath::make(relative_path::Prefix::Dummy, path), aliased_namespaces: crate::Opts::AUTO_NAMESPACE_MAP, include_roots: crate::Opts::INCLUDE_ROOTS, hhbc_flags, parser_flags, flags: self.env_flags(), emit_class_pointers: self.emit_class_pointers, check_int_overflow: self.check_int_overflow, }) } // TODO (T118266805): get these from nearest .hhconfig enclosing each file. pub(crate) const AUTO_NAMESPACE_MAP: &'static str = r#"{ "hhvm.aliased_namespaces": { "global_value": { "Async": "HH\\Lib\\Async", "C": "FlibSL\\C", "Dict": "FlibSL\\Dict", "File": "HH\\Lib\\File", "IO": "HH\\Lib\\IO", "Keyset": "FlibSL\\Keyset", "Locale": "FlibSL\\Locale", "Math": "FlibSL\\Math", "OS": "HH\\Lib\\OS", "PHP": "FlibSL\\PHP", "PseudoRandom": "FlibSL\\PseudoRandom", "Regex": "FlibSL\\Regex", "SecureRandom": "FlibSL\\SecureRandom", "Str": "FlibSL\\Str", "Vec": "FlibSL\\Vec" } } }"#; pub(crate) const INCLUDE_ROOTS: &'static str = ""; } fn main() -> Result<()> { env_logger::init(); let mut opts = Opts::parse(); // Some subcommands need worker threads with larger than default stacks, // even when using Stacker. In particular, various derived traits (e.g. Drop) // on AAST nodes are inherently recursive. rayon::ThreadPoolBuilder::new() .num_threads(opts.num_threads) .stack_size(opts.stack_size.get_bytes().try_into()?) .build_global() .unwrap(); match opts.command.take() { Some(Command::Assemble(opts)) => assemble::run(opts), Some(Command::Crc(opts)) => crc::run(opts), Some(Command::Parse(parse_opts)) => parse::run(parse_opts), Some(Command::ParseBench(bench_opts)) => parse::run_bench_command(bench_opts), Some(Command::Verify(opts)) => verify::run(opts), // Expr trees Some(Command::DesugarExprTrees(et_opts)) => expr_trees::desugar_expr_trees(&opts, et_opts), // Facts Some(Command::Facts(facts_opts)) => { facts::extract_facts(&opts, facts_opts, &mut std::io::stdout()) } None if opts.daemon && opts.flag_commands.extract_facts_from_decls => { facts::daemon(&mut opts) } None if opts.flag_commands.extract_facts_from_decls =>
// Test Decls-in-Compilation None if opts.daemon && opts.flag_commands.test_compile_with_decls => { compile::test_decl_compile_daemon(&mut opts) } None if opts.flag_commands.test_compile_with_decls => { compile::test_decl_compile(&mut
{ facts::run_flag(&mut opts, &mut std::io::stdout()) }
conditional_block
hackc.rs
Read; use std::path::Path; use std::path::PathBuf; /// Hack Compiler #[derive(Parser, Debug, Default)] struct Opts { #[clap(subcommand)] command: Option<Command>, /// Runs in daemon mode for testing purposes. Do not rely on for production #[clap(long)] daemon: bool, #[clap(flatten)] flag_commands: FlagCommands, #[clap(flatten)] hhvm_options: HhvmOptions, #[clap(flatten)] files: FileOpts, /// Disable toplevel definition elaboration
for_debugger_eval: bool, #[clap(long, default_value("0"))] emit_class_pointers: i32, #[clap(long, default_value("0"))] check_int_overflow: i32, /// Number of parallel worker threads for subcommands that support parallelism, /// otherwise ignored. If 0, use available parallelism, typically num-cpus. #[clap(long, default_value("0"))] num_threads: usize, /// Stack size to use for parallel worker threads. Supports unit suffixes like KB, MiB, etc. #[clap(long, default_value("32 MiB"))] stack_size: Byte, /// Instead of printing the unit, print a list of the decls requested during compilation. /// (only used by --test-compile-with-decls) #[clap(long)] pub(crate) log_decls_requested: bool, /// Use serialized decl instead of decl pointer as the decl provider API /// (only used by --test-compile-with-decls) #[clap(long)] pub(crate) use_serialized_decls: bool, /// Controls systemlib specific logic #[clap(long)] is_systemlib: bool, } /// Hack Compiler #[derive(Parser, Debug, Default)] struct FileOpts { /// Input file(s) filenames: Vec<PathBuf>, /// Read a list of files (one-per-line) from this file #[clap(long)] input_file_list: Option<PathBuf>, } #[derive(Parser, Debug)] enum Command { /// Assemble HHAS file(s) into HackCUnit. Prints those HCUs' HHAS representation. Assemble(assemble::Opts), /// Compile one Hack source file or a list of files to HHAS Compile(compile::Opts), /// Compile Hack source files or directories and produce a single CRC per /// input file. Crc(crc::Opts), /// Print the source code with expression tree literals desugared. /// Best effort debugging tool. DesugarExprTrees(expr_trees::Opts), /// Compute facts for a set of files. Facts(facts::Opts), /// Render the source text parse tree for each given file. Parse(parse::Opts), /// Parse many files whose filenames are read from stdin, discard parser output. ParseBench(parse::BenchOpts), /// Compile Hack source files or directories and check for compilation errors. Verify(verify::Opts), } /// Which command are we running? Using bool opts for compatibility with test harnesses. /// New commands should be defined as subcommands using the Command enum. #[derive(Parser, Debug, Default)] struct FlagCommands { /// Parse decls from source text, transform them into facts, and print the facts /// in JSON format. #[clap(long)] extract_facts_from_decls: bool, /// Compile file with decls from the same file available during compilation. #[clap(long)] test_compile_with_decls: bool, } impl FileOpts { pub fn gather_input_files(&mut self) -> Result<Vec<PathBuf>> { use std::io::BufReader; let mut files: Vec<PathBuf> = Default::default(); if let Some(list_path) = self.input_file_list.take() { for line in BufReader::new(std::fs::File::open(list_path)?).lines() { files.push(Path::new(&line?).to_path_buf()); } } files.append(&mut self.filenames); Ok(files) } pub fn is_batch_mode(&self) -> bool { self.input_file_list.is_some() || self.filenames.len() > 1 } } impl Opts { pub fn env_flags(&self) -> EnvFlags { let mut flags = EnvFlags::empty(); if self.for_debugger_eval { flags |= EnvFlags::FOR_DEBUGGER_EVAL; } if self.disable_toplevel_elaboration { flags |= EnvFlags::DISABLE_TOPLEVEL_ELABORATION; } if self.is_systemlib { flags |= EnvFlags::IS_SYSTEMLIB; } flags } pub fn decl_opts(&self) -> DeclParserOptions { // TODO: share this logic with hackc_create_decl_parse_options() let config_opts = options::Options::from_configs(&[Self::AUTO_NAMESPACE_MAP]).unwrap(); let auto_namespace_map = match config_opts.hhvm.aliased_namespaces.get().as_map() { Some(m) => m.iter().map(|(k, v)| (k.clone(), v.clone())).collect(), None => Vec::new(), }; DeclParserOptions { auto_namespace_map, disable_xhp_element_mangling: false, interpret_soft_types_as_like_types: true, allow_new_attribute_syntax: true, enable_xhp_class_modifier: false, php5_compat_mode: true, hhvm_compat_mode: true, ..Default::default() } } pub fn native_env(&self, path: PathBuf) -> Result<NativeEnv<'_>> { let hhvm_options = &self.hhvm_options; let hhvm_config = hhvm_options.to_config()?; let parser_flags = ParserFlags::from_hhvm_config(&hhvm_config)?; let hhbc_flags = HHBCFlags::from_hhvm_config(&hhvm_config)?; Ok(NativeEnv { filepath: RelativePath::make(relative_path::Prefix::Dummy, path), aliased_namespaces: crate::Opts::AUTO_NAMESPACE_MAP, include_roots: crate::Opts::INCLUDE_ROOTS, hhbc_flags, parser_flags, flags: self.env_flags(), emit_class_pointers: self.emit_class_pointers, check_int_overflow: self.check_int_overflow, }) } // TODO (T118266805): get these from nearest .hhconfig enclosing each file. pub(crate) const AUTO_NAMESPACE_MAP: &'static str = r#"{ "hhvm.aliased_namespaces": { "global_value": { "Async": "HH\\Lib\\Async", "C": "FlibSL\\C", "Dict": "FlibSL\\Dict", "File": "HH\\Lib\\File", "IO": "HH\\Lib\\IO", "Keyset": "FlibSL\\Keyset", "Locale": "FlibSL\\Locale", "Math": "FlibSL\\Math", "OS": "HH\\Lib\\OS", "PHP": "FlibSL\\PHP", "PseudoRandom": "FlibSL\\PseudoRandom", "Regex": "FlibSL\\Regex", "SecureRandom": "FlibSL\\SecureRandom", "Str": "FlibSL\\Str", "Vec": "FlibSL\\Vec" } } }"#; pub(crate) const INCLUDE_ROOTS: &'static str = ""; } fn main() -> Result<()> { env_logger::init(); let mut opts = Opts::parse(); // Some subcommands need worker threads with larger than default stacks, // even when using Stacker. In particular, various derived traits (e.g. Drop) // on AAST nodes are inherently recursive. rayon::ThreadPoolBuilder::new() .num_threads(opts.num_threads) .stack_size(opts.stack_size.get_bytes().try_into()?) .build_global() .unwrap(); match opts.command.take() { Some(Command::Assemble(opts)) => assemble::run(opts), Some(Command::Crc(opts)) => crc::run(opts), Some(Command::Parse(parse_opts)) => parse::run(parse_opts), Some(Command::ParseBench(bench_opts)) => parse::run_bench_command(bench_opts), Some(Command::Verify(opts)) => verify::run(opts), // Expr trees Some(Command::DesugarExprTrees(et_opts)) => expr_trees::desugar_expr_trees(&opts, et_opts), // Facts Some(Command::Facts(facts_opts)) => { facts::extract_facts(&opts, facts_opts, &mut std::io::stdout()) } None if opts.daemon && opts.flag_commands.extract_facts_from_decls => { facts::daemon(&mut opts) } None if opts.flag_commands.extract_facts_from_decls => { facts::run_flag(&mut opts, &mut std::io::stdout()) } // Test Decls-in-Compilation None if opts.daemon && opts.flag_commands.test_compile_with_decls => { compile::test_decl_compile_daemon(&mut opts) } None if opts.flag_commands.test_compile_with_decls => { compile::test_decl_compile(&mut opts,
#[clap(long)] disable_toplevel_elaboration: bool, /// Mutate the program as if we're in the debugger repl #[clap(long)]
random_line_split
conductor.go
chan error au audition prErrCh <-chan error pr prompter colErrCh <-chan error col collector spotErrCh <-chan error spm spotMgr } func (ap *app) makeTheater(ctx context.Context) (th theater) { prompterAndAuditiontoCollectorCh := make(chan collectorEvent, 10) prompterAndSpotlightsToAuditionCh := make(chan auditableEvent, len(ap.cfg.actors)) prompterToConductorErrCh := make(chan error, 1) prompterToSpotlightsTermCh := make(chan struct{}) th.prErrCh = prompterToConductorErrCh th.pr = prompter{ r: ap, cfg: ap.cfg, stopper: ap.stopper, numRepeats: &ap.auRes.numRepeats, collCh: prompterAndAuditiontoCollectorCh, auditCh: prompterAndSpotlightsToAuditionCh, termCh: prompterToSpotlightsTermCh, errCh: prompterToConductorErrCh, } spotlightsToConductorErrCh := make(chan error, 1) th.spotErrCh = spotlightsToConductorErrCh th.spm = spotMgr{ r: ap, cfg: ap.cfg, stopper: ap.stopper, logger: log.NewSecondaryLogger(ctx, nil, "spotlight", true /*enableGc*/, false /*forceSyncWrite*/), auditCh: prompterAndSpotlightsToAuditionCh, termCh: prompterToSpotlightsTermCh, errCh: spotlightsToConductorErrCh, } auditionToConductorErrCh := make(chan error, 1) th.auErrCh = auditionToConductorErrCh th.au = audition{ r: ap, cfg: ap.cfg, stopper: ap.stopper, logger: log.NewSecondaryLogger(ctx, nil, "audit", true /*enableGc*/, false /*forceSyncWrite*/), res: &ap.auRes, st: makeAuditionState(ap.cfg), eventCh: prompterAndSpotlightsToAuditionCh, collCh: prompterAndAuditiontoCollectorCh, errCh: auditionToConductorErrCh, } collectorToConductorErrCh := make(chan error, 1) th.colErrCh = collectorToConductorErrCh th.col = collector{ r: ap, cfg: ap.cfg, stopper: ap.stopper, st: makeCollectorState(ap.cfg), logger: log.NewSecondaryLogger(ctx, nil, "collector", true /*enableGc*/, false /*forceSyncWrite*/), eventCh: prompterAndAuditiontoCollectorCh, errCh: collectorToConductorErrCh, } return th } // startPrompter runs the prompter until completion. func (pr *prompter) startPrompter(ctx context.Context, wg *sync.WaitGroup) func() { promptCtx, promptDone := context.WithCancel(ctx) promptCtx = logtags.AddTag(promptCtx, "prompter", nil) wg.Add(1) runWorker(promptCtx, pr.stopper, func(ctx context.Context) { defer func() { // Inform the spotlights to terminate. close(pr.termCh) // Indicate to the conductor that we are terminating. wg.Done() // Also indicate to the conductor there will be no further error // reported. close(pr.errCh) log.Info(ctx, "<exit>") }() log.Info(ctx, "<intrat>") pr.errCh <- errors.WithContextTags(pr.prompt(ctx), ctx) }) return promptDone } // startAudition starts the audition in the background. func (au *audition) startAudition(ctx context.Context, wg *sync.WaitGroup) (cancelFunc func()) { auCtx, auDone := context.WithCancel(ctx) auCtx = logtags.AddTag(auCtx, "audition", nil) wg.Add(1) runWorker(auCtx, au.stopper, func(ctx context.Context) { defer func() { // Indicate to the conductor that we are terminating. wg.Done() // Also indicate to the conductor there will be no further error // reported. close(au.errCh) log.Info(ctx, "<ends>") }() log.Info(ctx, "<begins>") au.errCh <- errors.WithContextTags(au.audit(ctx), ctx) }) return auDone } // startCollector starts the collector in the background. func (col *collector) startCollector(ctx context.Context, wg *sync.WaitGroup) (cancelFunc func()) { colCtx, colDone := context.WithCancel(ctx) colCtx = logtags.AddTag(colCtx, "collector", nil) wg.Add(1) runWorker(colCtx, col.stopper, func(ctx context.Context) { defer func() { // Indicate to the conductor that we are terminating. wg.Done() // Also indicate to the conductor there will be no further error // reported. close(col.errCh) log.Info(ctx, "<exit>") }() log.Info(ctx, "<intrat>") col.errCh <- errors.WithContextTags(col.collect(ctx), ctx) }) return colDone } // startSpotlights starts all the spotlights in the background. func (spm *spotMgr) startSpotlights(ctx context.Context, wg *sync.WaitGroup) (cancelFunc func()) { spotCtx, spotDone := context.WithCancel(ctx) spotCtx = logtags.AddTag(spotCtx, "spotlight-supervisor", nil) wg.Add(1) runWorker(spotCtx, spm.stopper, func(ctx context.Context) { defer func() { // Inform the audience to terminate. close(spm.auditCh) // Indicate to the conductor that we are terminating. wg.Done() // Also indicate to the conductor there will be no further error // reported. close(spm.errCh) log.Info(ctx, "<exit>") }() log.Info(ctx, "<intrat>") spm.errCh <- errors.WithContextTags(spm.manageSpotlights(ctx), ctx) }) return spotDone } func (ap *app) runCleanup(ctx context.Context) error { return ap.runForAllActors(ctx, "cleanup", func(a *actor) string { return a.cleanupScript }) } func (ap *app) runForAllActors( ctx context.Context, prefix string, getScript func(a *actor) string, ) (err error) { // errCh collects the errors from the concurrent actors. errCh := make(chan error, len(ap.cfg.actors)+1) defer func() { if r := recover(); r != nil { panic(r) } // At the end of the scene, make runScene() return the collected // errors. err = collectErrors(ctx, nil, errCh, prefix) }() var wg sync.WaitGroup defer func() { wg.Wait() }() actNums := 0 for actName, thisActor := range ap.cfg.actors { pScript := getScript(thisActor) if pScript == "" { // No command to run. Nothing to do. continue } actCtx := logtags.AddTag(ctx, prefix, nil) actCtx = logtags.AddTag(actCtx, "actor", actName) actCtx = logtags.AddTag(actCtx, "role", thisActor.role.name) a := thisActor wg.Add(1) runWorker(actCtx, ap.stopper, func(ctx context.Context) { defer func() { wg.Done() log.Info(ctx, "<done>") }() // Start one actor. log.Info(ctx, "<start>") outdata, ps, err, _ := a.runActorCommand(ctx, ap.stopper, 10*time.Second, false /*interruptible*/, pScript) if err == nil && ps != nil && !ps.Success() { err = errors.WithDetail( errors.WithDetail( errors.Newf("command failed: %s", errors.Safe(ps.String())), string(pScript)), outdata) } errCh <- errors.WithContextTags(err, ctx) }) } if actNums == 0 { // Nothing was launched, ensure that collectErrors terminates in any case. errCh <- nil } // errors are collected by the defer above. return nil } func collectErrors( ctx context.Context, closers []func(), errCh chan error, prefix string, ) (finalErr error) { // Wait on the first error return. select { case err := <-errCh: if err != nil { log.Errorf(ctx, "complaint during %s: %+v", prefix, err) finalErr = combineErrors(finalErr, err) } } // Signal all to terminate and wait for each of them. for _, closer := range closers { closer() } // At this point all have terminate. Ensure the loop below // terminates in all cases. close(errCh) for stErr := range errCh { if stErr == nil
{ continue }
conditional_block
conductor.go
} }() // Start the spotlights. var wgspot sync.WaitGroup allSpotsDone := th.spm.startSpotlights(ctx, &wgspot) // Start the prompter. var wgPrompt sync.WaitGroup promptDone := th.pr.startPrompter(ctx, &wgPrompt) // The shutdown sequence without cancellation/stopper is: // - prompter exits, this closes spotTermCh // - spotlights detect closed spotTermCh, terminate, then close auChan. // - auditors detect closed auChan, exit, this closes collectorChan. // - collectors detects closed collectorChan and exits. // However it's possible for things to terminate out of order: // - spotlights can detect a command error. // - auditors can encounter an audit failure. // - collector can encounter a file failure. // So at each of the shutdown stages below, we detect if a stage // later has completed and cancel the stages before. // TODO: this code can probably factored into a loop, not yet found // out how. var finalErr error var interrupt bool // First stage of shutdown: wait for the prompter to finish. select { case err := <-th.prErrCh: finalErr = combineErrors(err, finalErr) // ok case err := <-th.spotErrCh: finalErr = combineErrors(err, finalErr) interrupt = true case err := <-th.auErrCh: finalErr = combineErrors(err, finalErr) interrupt = true case err := <-th.colErrCh: finalErr = combineErrors(err, finalErr) interrupt = true } if interrupt { log.Info(ctx, "something went wrong other than prompter, cancelling everything") promptDone() finalErr = combineErrors(ignCancel(<-th.prErrCh), finalErr) allSpotsDone() finalErr = combineErrors(ignCancel(<-th.spotErrCh), finalErr) auDone() finalErr = combineErrors(ignCancel(<-th.auErrCh), finalErr) colDone() finalErr = combineErrors(ignCancel(<-th.colErrCh), finalErr) interrupt = false } wgPrompt.Wait() promptDone() // in case not called before. // Second stage: wait for the spotlights to finish. select { case err := <-th.spotErrCh: finalErr = combineErrors(err, finalErr) // ok case err := <-th.auErrCh: finalErr = combineErrors(err, finalErr) interrupt = true case err := <-th.colErrCh: finalErr = combineErrors(err, finalErr) interrupt = true } if interrupt { log.Info(ctx, "something went wrong after prompter terminated: cancelling spotlights, audience and collector") allSpotsDone() finalErr = combineErrors(ignCancel(<-th.spotErrCh), finalErr) auDone() finalErr = combineErrors(ignCancel(<-th.auErrCh), finalErr) colDone() finalErr = combineErrors(ignCancel(<-th.colErrCh), finalErr) interrupt = false } wgspot.Wait() allSpotsDone() // in case not called before. // Third stage: wait for the auditors to finish. select { case err := <-th.auErrCh: finalErr = combineErrors(err, finalErr) // ok case err := <-th.colErrCh: finalErr = combineErrors(err, finalErr) interrupt = true } if interrupt {
interrupt = false } wgau.Wait() auDone() // in case not called before. // Fourth stage: wait for the collector to finish. finalErr = combineErrors(ignCancel(<-th.colErrCh), finalErr) wgcol.Wait() colDone() // in case not called before. return finalErr } type theater struct { auErrCh <-chan error au audition prErrCh <-chan error pr prompter colErrCh <-chan error col collector spotErrCh <-chan error spm spotMgr } func (ap *app) makeTheater(ctx context.Context) (th theater) { prompterAndAuditiontoCollectorCh := make(chan collectorEvent, 10) prompterAndSpotlightsToAuditionCh := make(chan auditableEvent, len(ap.cfg.actors)) prompterToConductorErrCh := make(chan error, 1) prompterToSpotlightsTermCh := make(chan struct{}) th.prErrCh = prompterToConductorErrCh th.pr = prompter{ r: ap, cfg: ap.cfg, stopper: ap.stopper, numRepeats: &ap.auRes.numRepeats, collCh: prompterAndAuditiontoCollectorCh, auditCh: prompterAndSpotlightsToAuditionCh, termCh: prompterToSpotlightsTermCh, errCh: prompterToConductorErrCh, } spotlightsToConductorErrCh := make(chan error, 1) th.spotErrCh = spotlightsToConductorErrCh th.spm = spotMgr{ r: ap, cfg: ap.cfg, stopper: ap.stopper, logger: log.NewSecondaryLogger(ctx, nil, "spotlight", true /*enableGc*/, false /*forceSyncWrite*/), auditCh: prompterAndSpotlightsToAuditionCh, termCh: prompterToSpotlightsTermCh, errCh: spotlightsToConductorErrCh, } auditionToConductorErrCh := make(chan error, 1) th.auErrCh = auditionToConductorErrCh th.au = audition{ r: ap, cfg: ap.cfg, stopper: ap.stopper, logger: log.NewSecondaryLogger(ctx, nil, "audit", true /*enableGc*/, false /*forceSyncWrite*/), res: &ap.auRes, st: makeAuditionState(ap.cfg), eventCh: prompterAndSpotlightsToAuditionCh, collCh: prompterAndAuditiontoCollectorCh, errCh: auditionToConductorErrCh, } collectorToConductorErrCh := make(chan error, 1) th.colErrCh = collectorToConductorErrCh th.col = collector{ r: ap, cfg: ap.cfg, stopper: ap.stopper, st: makeCollectorState(ap.cfg), logger: log.NewSecondaryLogger(ctx, nil, "collector", true /*enableGc*/, false /*forceSyncWrite*/), eventCh: prompterAndAuditiontoCollectorCh, errCh: collectorToConductorErrCh, } return th } // startPrompter runs the prompter until completion. func (pr *prompter) startPrompter(ctx context.Context, wg *sync.WaitGroup) func() { promptCtx, promptDone := context.WithCancel(ctx) promptCtx = logtags.AddTag(promptCtx, "prompter", nil) wg.Add(1) runWorker(promptCtx, pr.stopper, func(ctx context.Context) { defer func() { // Inform the spotlights to terminate. close(pr.termCh) // Indicate to the conductor that we are terminating. wg.Done() // Also indicate to the conductor there will be no further error // reported. close(pr.errCh) log.Info(ctx, "<exit>") }() log.Info(ctx, "<intrat>") pr.errCh <- errors.WithContextTags(pr.prompt(ctx), ctx) }) return promptDone } // startAudition starts the audition in the background. func (au *audition) startAudition(ctx context.Context, wg *sync.WaitGroup) (cancelFunc func()) { auCtx, auDone := context.WithCancel(ctx) auCtx = logtags.AddTag(auCtx, "audition", nil) wg.Add(1) runWorker(auCtx, au.stopper, func(ctx context.Context) { defer func() { // Indicate to the conductor that we are terminating. wg.Done() // Also indicate to the conductor there will be no further error // reported. close(au.errCh) log.Info(ctx, "<ends>") }() log.Info(ctx, "<begins>") au.errCh <- errors.WithContextTags(au.audit(ctx), ctx) }) return auDone } // startCollector starts the collector in the background. func (col *collector) startCollector(ctx context.Context, wg *sync.WaitGroup) (cancelFunc func()) { colCtx, colDone := context.WithCancel(ctx) colCtx = logtags.AddTag(colCtx, "collector", nil) wg.Add(1) runWorker(colCtx, col.stopper, func(ctx context.Context) { defer func() { // Indicate
log.Info(ctx, "something went wrong after spotlights terminated, cancelling audience and collector") auDone() finalErr = combineErrors(ignCancel(<-th.auErrCh), finalErr) colDone() finalErr = combineErrors(ignCancel(<-th.colErrCh), finalErr)
random_line_split
conductor.go
finalErr = combineErrors(err, finalErr) interrupt = true case err := <-th.auErrCh: finalErr = combineErrors(err, finalErr) interrupt = true case err := <-th.colErrCh: finalErr = combineErrors(err, finalErr) interrupt = true } if interrupt { log.Info(ctx, "something went wrong other than prompter, cancelling everything") promptDone() finalErr = combineErrors(ignCancel(<-th.prErrCh), finalErr) allSpotsDone() finalErr = combineErrors(ignCancel(<-th.spotErrCh), finalErr) auDone() finalErr = combineErrors(ignCancel(<-th.auErrCh), finalErr) colDone() finalErr = combineErrors(ignCancel(<-th.colErrCh), finalErr) interrupt = false } wgPrompt.Wait() promptDone() // in case not called before. // Second stage: wait for the spotlights to finish. select { case err := <-th.spotErrCh: finalErr = combineErrors(err, finalErr) // ok case err := <-th.auErrCh: finalErr = combineErrors(err, finalErr) interrupt = true case err := <-th.colErrCh: finalErr = combineErrors(err, finalErr) interrupt = true } if interrupt { log.Info(ctx, "something went wrong after prompter terminated: cancelling spotlights, audience and collector") allSpotsDone() finalErr = combineErrors(ignCancel(<-th.spotErrCh), finalErr) auDone() finalErr = combineErrors(ignCancel(<-th.auErrCh), finalErr) colDone() finalErr = combineErrors(ignCancel(<-th.colErrCh), finalErr) interrupt = false } wgspot.Wait() allSpotsDone() // in case not called before. // Third stage: wait for the auditors to finish. select { case err := <-th.auErrCh: finalErr = combineErrors(err, finalErr) // ok case err := <-th.colErrCh: finalErr = combineErrors(err, finalErr) interrupt = true } if interrupt { log.Info(ctx, "something went wrong after spotlights terminated, cancelling audience and collector") auDone() finalErr = combineErrors(ignCancel(<-th.auErrCh), finalErr) colDone() finalErr = combineErrors(ignCancel(<-th.colErrCh), finalErr) interrupt = false } wgau.Wait() auDone() // in case not called before. // Fourth stage: wait for the collector to finish. finalErr = combineErrors(ignCancel(<-th.colErrCh), finalErr) wgcol.Wait() colDone() // in case not called before. return finalErr } type theater struct { auErrCh <-chan error au audition prErrCh <-chan error pr prompter colErrCh <-chan error col collector spotErrCh <-chan error spm spotMgr } func (ap *app) makeTheater(ctx context.Context) (th theater) { prompterAndAuditiontoCollectorCh := make(chan collectorEvent, 10) prompterAndSpotlightsToAuditionCh := make(chan auditableEvent, len(ap.cfg.actors)) prompterToConductorErrCh := make(chan error, 1) prompterToSpotlightsTermCh := make(chan struct{}) th.prErrCh = prompterToConductorErrCh th.pr = prompter{ r: ap, cfg: ap.cfg, stopper: ap.stopper, numRepeats: &ap.auRes.numRepeats, collCh: prompterAndAuditiontoCollectorCh, auditCh: prompterAndSpotlightsToAuditionCh, termCh: prompterToSpotlightsTermCh, errCh: prompterToConductorErrCh, } spotlightsToConductorErrCh := make(chan error, 1) th.spotErrCh = spotlightsToConductorErrCh th.spm = spotMgr{ r: ap, cfg: ap.cfg, stopper: ap.stopper, logger: log.NewSecondaryLogger(ctx, nil, "spotlight", true /*enableGc*/, false /*forceSyncWrite*/), auditCh: prompterAndSpotlightsToAuditionCh, termCh: prompterToSpotlightsTermCh, errCh: spotlightsToConductorErrCh, } auditionToConductorErrCh := make(chan error, 1) th.auErrCh = auditionToConductorErrCh th.au = audition{ r: ap, cfg: ap.cfg, stopper: ap.stopper, logger: log.NewSecondaryLogger(ctx, nil, "audit", true /*enableGc*/, false /*forceSyncWrite*/), res: &ap.auRes, st: makeAuditionState(ap.cfg), eventCh: prompterAndSpotlightsToAuditionCh, collCh: prompterAndAuditiontoCollectorCh, errCh: auditionToConductorErrCh, } collectorToConductorErrCh := make(chan error, 1) th.colErrCh = collectorToConductorErrCh th.col = collector{ r: ap, cfg: ap.cfg, stopper: ap.stopper, st: makeCollectorState(ap.cfg), logger: log.NewSecondaryLogger(ctx, nil, "collector", true /*enableGc*/, false /*forceSyncWrite*/), eventCh: prompterAndAuditiontoCollectorCh, errCh: collectorToConductorErrCh, } return th } // startPrompter runs the prompter until completion. func (pr *prompter) startPrompter(ctx context.Context, wg *sync.WaitGroup) func() { promptCtx, promptDone := context.WithCancel(ctx) promptCtx = logtags.AddTag(promptCtx, "prompter", nil) wg.Add(1) runWorker(promptCtx, pr.stopper, func(ctx context.Context) { defer func() { // Inform the spotlights to terminate. close(pr.termCh) // Indicate to the conductor that we are terminating. wg.Done() // Also indicate to the conductor there will be no further error // reported. close(pr.errCh) log.Info(ctx, "<exit>") }() log.Info(ctx, "<intrat>") pr.errCh <- errors.WithContextTags(pr.prompt(ctx), ctx) }) return promptDone } // startAudition starts the audition in the background. func (au *audition) startAudition(ctx context.Context, wg *sync.WaitGroup) (cancelFunc func()) { auCtx, auDone := context.WithCancel(ctx) auCtx = logtags.AddTag(auCtx, "audition", nil) wg.Add(1) runWorker(auCtx, au.stopper, func(ctx context.Context) { defer func() { // Indicate to the conductor that we are terminating. wg.Done() // Also indicate to the conductor there will be no further error // reported. close(au.errCh) log.Info(ctx, "<ends>") }() log.Info(ctx, "<begins>") au.errCh <- errors.WithContextTags(au.audit(ctx), ctx) }) return auDone } // startCollector starts the collector in the background. func (col *collector) startCollector(ctx context.Context, wg *sync.WaitGroup) (cancelFunc func()) { colCtx, colDone := context.WithCancel(ctx) colCtx = logtags.AddTag(colCtx, "collector", nil) wg.Add(1) runWorker(colCtx, col.stopper, func(ctx context.Context) { defer func() { // Indicate to the conductor that we are terminating. wg.Done() // Also indicate to the conductor there will be no further error // reported. close(col.errCh) log.Info(ctx, "<exit>") }() log.Info(ctx, "<intrat>") col.errCh <- errors.WithContextTags(col.collect(ctx), ctx) }) return colDone } // startSpotlights starts all the spotlights in the background. func (spm *spotMgr) startSpotlights(ctx context.Context, wg *sync.WaitGroup) (cancelFunc func()) { spotCtx, spotDone := context.WithCancel(ctx) spotCtx = logtags.AddTag(spotCtx, "spotlight-supervisor", nil) wg.Add(1) runWorker(spotCtx, spm.stopper, func(ctx context.Context) { defer func() { // Inform the audience to terminate. close(spm.auditCh) // Indicate to the conductor that we are terminating. wg.Done() // Also indicate to the conductor there will be no further error // reported. close(spm.errCh) log.Info(ctx, "<exit>") }() log.Info(ctx, "<intrat>") spm.errCh <- errors.WithContextTags(spm.manageSpotlights(ctx), ctx) }) return spotDone } func (ap *app)
runCleanup
identifier_name
conductor.go
() // in case not called before. // Second stage: wait for the spotlights to finish. select { case err := <-th.spotErrCh: finalErr = combineErrors(err, finalErr) // ok case err := <-th.auErrCh: finalErr = combineErrors(err, finalErr) interrupt = true case err := <-th.colErrCh: finalErr = combineErrors(err, finalErr) interrupt = true } if interrupt { log.Info(ctx, "something went wrong after prompter terminated: cancelling spotlights, audience and collector") allSpotsDone() finalErr = combineErrors(ignCancel(<-th.spotErrCh), finalErr) auDone() finalErr = combineErrors(ignCancel(<-th.auErrCh), finalErr) colDone() finalErr = combineErrors(ignCancel(<-th.colErrCh), finalErr) interrupt = false } wgspot.Wait() allSpotsDone() // in case not called before. // Third stage: wait for the auditors to finish. select { case err := <-th.auErrCh: finalErr = combineErrors(err, finalErr) // ok case err := <-th.colErrCh: finalErr = combineErrors(err, finalErr) interrupt = true } if interrupt { log.Info(ctx, "something went wrong after spotlights terminated, cancelling audience and collector") auDone() finalErr = combineErrors(ignCancel(<-th.auErrCh), finalErr) colDone() finalErr = combineErrors(ignCancel(<-th.colErrCh), finalErr) interrupt = false } wgau.Wait() auDone() // in case not called before. // Fourth stage: wait for the collector to finish. finalErr = combineErrors(ignCancel(<-th.colErrCh), finalErr) wgcol.Wait() colDone() // in case not called before. return finalErr } type theater struct { auErrCh <-chan error au audition prErrCh <-chan error pr prompter colErrCh <-chan error col collector spotErrCh <-chan error spm spotMgr } func (ap *app) makeTheater(ctx context.Context) (th theater) { prompterAndAuditiontoCollectorCh := make(chan collectorEvent, 10) prompterAndSpotlightsToAuditionCh := make(chan auditableEvent, len(ap.cfg.actors)) prompterToConductorErrCh := make(chan error, 1) prompterToSpotlightsTermCh := make(chan struct{}) th.prErrCh = prompterToConductorErrCh th.pr = prompter{ r: ap, cfg: ap.cfg, stopper: ap.stopper, numRepeats: &ap.auRes.numRepeats, collCh: prompterAndAuditiontoCollectorCh, auditCh: prompterAndSpotlightsToAuditionCh, termCh: prompterToSpotlightsTermCh, errCh: prompterToConductorErrCh, } spotlightsToConductorErrCh := make(chan error, 1) th.spotErrCh = spotlightsToConductorErrCh th.spm = spotMgr{ r: ap, cfg: ap.cfg, stopper: ap.stopper, logger: log.NewSecondaryLogger(ctx, nil, "spotlight", true /*enableGc*/, false /*forceSyncWrite*/), auditCh: prompterAndSpotlightsToAuditionCh, termCh: prompterToSpotlightsTermCh, errCh: spotlightsToConductorErrCh, } auditionToConductorErrCh := make(chan error, 1) th.auErrCh = auditionToConductorErrCh th.au = audition{ r: ap, cfg: ap.cfg, stopper: ap.stopper, logger: log.NewSecondaryLogger(ctx, nil, "audit", true /*enableGc*/, false /*forceSyncWrite*/), res: &ap.auRes, st: makeAuditionState(ap.cfg), eventCh: prompterAndSpotlightsToAuditionCh, collCh: prompterAndAuditiontoCollectorCh, errCh: auditionToConductorErrCh, } collectorToConductorErrCh := make(chan error, 1) th.colErrCh = collectorToConductorErrCh th.col = collector{ r: ap, cfg: ap.cfg, stopper: ap.stopper, st: makeCollectorState(ap.cfg), logger: log.NewSecondaryLogger(ctx, nil, "collector", true /*enableGc*/, false /*forceSyncWrite*/), eventCh: prompterAndAuditiontoCollectorCh, errCh: collectorToConductorErrCh, } return th } // startPrompter runs the prompter until completion. func (pr *prompter) startPrompter(ctx context.Context, wg *sync.WaitGroup) func() { promptCtx, promptDone := context.WithCancel(ctx) promptCtx = logtags.AddTag(promptCtx, "prompter", nil) wg.Add(1) runWorker(promptCtx, pr.stopper, func(ctx context.Context) { defer func() { // Inform the spotlights to terminate. close(pr.termCh) // Indicate to the conductor that we are terminating. wg.Done() // Also indicate to the conductor there will be no further error // reported. close(pr.errCh) log.Info(ctx, "<exit>") }() log.Info(ctx, "<intrat>") pr.errCh <- errors.WithContextTags(pr.prompt(ctx), ctx) }) return promptDone } // startAudition starts the audition in the background. func (au *audition) startAudition(ctx context.Context, wg *sync.WaitGroup) (cancelFunc func()) { auCtx, auDone := context.WithCancel(ctx) auCtx = logtags.AddTag(auCtx, "audition", nil) wg.Add(1) runWorker(auCtx, au.stopper, func(ctx context.Context) { defer func() { // Indicate to the conductor that we are terminating. wg.Done() // Also indicate to the conductor there will be no further error // reported. close(au.errCh) log.Info(ctx, "<ends>") }() log.Info(ctx, "<begins>") au.errCh <- errors.WithContextTags(au.audit(ctx), ctx) }) return auDone } // startCollector starts the collector in the background. func (col *collector) startCollector(ctx context.Context, wg *sync.WaitGroup) (cancelFunc func()) { colCtx, colDone := context.WithCancel(ctx) colCtx = logtags.AddTag(colCtx, "collector", nil) wg.Add(1) runWorker(colCtx, col.stopper, func(ctx context.Context) { defer func() { // Indicate to the conductor that we are terminating. wg.Done() // Also indicate to the conductor there will be no further error // reported. close(col.errCh) log.Info(ctx, "<exit>") }() log.Info(ctx, "<intrat>") col.errCh <- errors.WithContextTags(col.collect(ctx), ctx) }) return colDone } // startSpotlights starts all the spotlights in the background. func (spm *spotMgr) startSpotlights(ctx context.Context, wg *sync.WaitGroup) (cancelFunc func()) { spotCtx, spotDone := context.WithCancel(ctx) spotCtx = logtags.AddTag(spotCtx, "spotlight-supervisor", nil) wg.Add(1) runWorker(spotCtx, spm.stopper, func(ctx context.Context) { defer func() { // Inform the audience to terminate. close(spm.auditCh) // Indicate to the conductor that we are terminating. wg.Done() // Also indicate to the conductor there will be no further error // reported. close(spm.errCh) log.Info(ctx, "<exit>") }() log.Info(ctx, "<intrat>") spm.errCh <- errors.WithContextTags(spm.manageSpotlights(ctx), ctx) }) return spotDone } func (ap *app) runCleanup(ctx context.Context) error { return ap.runForAllActors(ctx, "cleanup", func(a *actor) string { return a.cleanupScript }) } func (ap *app) runForAllActors( ctx context.Context, prefix string, getScript func(a *actor) string, ) (err error)
{ // errCh collects the errors from the concurrent actors. errCh := make(chan error, len(ap.cfg.actors)+1) defer func() { if r := recover(); r != nil { panic(r) } // At the end of the scene, make runScene() return the collected // errors. err = collectErrors(ctx, nil, errCh, prefix) }() var wg sync.WaitGroup defer func() { wg.Wait() }() actNums := 0 for actName, thisActor := range ap.cfg.actors { pScript := getScript(thisActor) if pScript == "" {
identifier_body
creat_model_save.py
..] doc = {} # bestN = 4 # result = np.array([0, 0, 0, 0]) # maxDepth = 5 avgDepth = 0 # If the values are supplied as command line arguments if len(sys.argv) == 3: branches = int(sys.argv[1]) maxDepth = int(sys.argv[2]) model = MiniBatchKMeans(n_clusters=branches) # The KMeans Clustering Model sift = cv2.xfeatures2d.SIFT_create(nfeatures=500) # SIFT Feature extractor model leafClusterSize = 2 * branches fileList = sorted(os.listdir('data/full')) dirName = 'data/full' fileList1 = sorted(os.listdir('data/full1')) dirName1 = 'data/full1' # ------------------------------------------------------------------------------------------------------------ # Function to dump all the SIFT descriptors from training data in the feature space def dumpFeatures(rootDir): features = [] n = 0 for fname in fileList: # print("Reading Image: " + dirName + "/" + fname) kp, des = feat(dirName + "/" + fname) for d in des: features.append(d) del kp, des n = n + 1 if n >= N: break features = np.array(features) return features # Function to construct the vocabulary tree def constructTree(node, featuresIDs, depth): global nodeIndex, nodes, tree, imagesInLeaves, avgDepth tree[node] = [] if len(featuresIDs) >= leafClusterSize and depth < maxDepth: # Here we will fetch the cluster from the indices and then use it to fit the kmeans # And then just after that we will delete the cluster model.fit([features[i] for i in featuresIDs]) childFeatureIDs = [[] for i in range(branches)] for i in range(len(featuresIDs)): childFeatureIDs[model.labels_[i]].append(featuresIDs[i]) for i in range(branches): nodeIndex = nodeIndex + 1 nodes[nodeIndex] = model.cluster_centers_[i] tree[node].append(nodeIndex) constructTree(nodeIndex, childFeatureIDs[i], depth + 1) else: imagesInLeaves[node] = {} avgDepth = avgDepth + depth # Function to lookup a SIFT descriptor in the vocabulary tree, returns a leaf cluster def lookup(descriptor, node): D = float("inf") goto = None for child in tree[node]: # Difference between them and magnitude of the vector dist = np.linalg.norm([nodes[child] - descriptor]) if D > dist: D = dist goto = child if tree[goto] == []: return goto return lookup(descriptor, goto) # Constructs the inverted file frequency index def tfidf(filename): global imagesInLeaves kp, des = feat(dirName + "/" + fname) for d in des: leafID = lookup(d, 0) if filename in imagesInLeaves[leafID]: imagesInLeaves[leafID][filename] += 1 else: imagesInLeaves[leafID][filename] = 1 del kp, des # This function returns the weight of a leaf node def weight(leafID): return math.log1p(N / 1.0 * len(imagesInLeaves[leafID])) # Returns the scores of the images in the dataset def getScores(q): scores = {} n = 0 curr = [float("inf"), float("inf"), float("inf"), float("inf")] currimg = ["", "", "", ""] for fname in fileList: img = dirName + "/" + fname scores[img] = 0 for leafID in imagesInLeaves: if leafID in doc[img] and leafID in q: scores[img] += math.fabs(q[leafID] - doc[img][leafID]) elif leafID in q and leafID not in doc[img]: scores[img] += math.fabs(q[leafID]) elif leafID not in q and leafID in doc[img]: scores[img] += math.fabs(doc[img][leafID]) if scores[img] > curr[-1]: break if scores[img] <= curr[0]: currimg[3], curr[3] = currimg[2], curr[2] currimg[2], curr[2] = currimg[1], curr[1] currimg[1], curr[1] = currimg[0], curr[0] currimg[0], curr[0] = img, scores[img] elif scores[img] > curr[0] and scores[img] <= curr[1]: currimg[3], curr[3] = currimg[2], curr[2] currimg[2], curr[2] = currimg[1], curr[1] currimg[1], curr[1] = img, scores[img] elif scores[img] > curr[1] and scores[img] <= curr[2]: currimg[3], curr[3] = currimg[2], curr[2] currimg[2], curr[2] = img, scores[img] elif scores[img] > curr[2] and scores[img] <= curr[3]: currimg[3], curr[3] = img, scores[img] n = n + 1 if n >= N: break return currimg # Return the bestN best matches def findBest(scores, bestN): sorted_scores = sorted(scores.items(), key=operator.itemgetter(1)) return sorted_scores[:bestN] def
(F, M1, M2, M3, M4): a = [0, 0, 0, 0] group = int(F / 4) if int(M1 / 4) == group: a[0] = 1 if int(M2 / 4) == group: a[1] = 1 if int(M3 / 4) == group: a[2] = 1 if int(M4 / 4) == group: a[3] = 1 return np.array(a) # Finds 4 best matches for the query def match(filename): # dirName + "/" + fname # q is the frequency of this image appearing in each of the leaf nodes q = {} kp, des = feat(filename, 480) if des is not None: for d in des: leafID = lookup(d, 0) if leafID in q: q[leafID] += 1 else: q[leafID] = 1 else: print "error at {}".format(filename) s = 0.0 for key in q: q[key] = q[key] * weight(key) s += q[key] for key in q: q[key] = q[key] / s return getScores(q) # return findBest(scores, bestN) def getImgID(s): return int((re.findall("\d+", s))[0]) # ------------------------------------------------------------------------------------------------------------ start = t.time() print("Extracting Features: " + rootDir + " ...") # dump all features as array features = dumpFeatures(rootDir) end = t.time() print("Time Taken: ", str(round((end - start) / 60, 2))) start = t.time() print("Constructing Vocabulary Tree ... ") # average of all values in row root = features.mean(axis=0) nodes[0] = root # Array of indices into the construct tree function featuresIDs = [x for x in range(len(features))] constructTree(0, featuresIDs, 0) end = t.time() print("Time Taken: ", str(round((end - start) / 60, 2))) # memorytes() del features avgDepth = int(avgDepth / len(imagesInLeaves)) start = t.time() print("Mapping images to leaf nodes of the tree ...") n = 0 for fname in fileList: filename = dirName + "/" + fname tfidf(filename) n = n + 1 if n >= N: break # Creating weights for the leaf images tress for leafID in imagesInLeaves: for img in imagesInLeaves[leafID]: if img not in doc: doc[img] = {} # weight of leafId * frequency of occurance doc[img][leafID] = weight(leafID) * (imagesInLeaves[leafID][img]) # scale the weights in range(0,1) for img in doc: s = 0.0 for leafID in doc[img]: s += doc[img][leafID] for leafID in doc[img]: doc[img][leafID] /= s end = t.time() print("Time Taken: ", str(round((end - start) / 60, 2))) # memorytes() # saving stuff # ------------------------------------------------------------------------------------------------------------ with open('model.pkl', 'wb') as fid:
accuracy
identifier_name
creat_model_save.py
..] doc = {} # bestN = 4 # result = np.array([0, 0, 0, 0]) # maxDepth = 5 avgDepth = 0 # If the values are supplied as command line arguments if len(sys.argv) == 3: branches = int(sys.argv[1]) maxDepth = int(sys.argv[2]) model = MiniBatchKMeans(n_clusters=branches) # The KMeans Clustering Model sift = cv2.xfeatures2d.SIFT_create(nfeatures=500) # SIFT Feature extractor model leafClusterSize = 2 * branches fileList = sorted(os.listdir('data/full')) dirName = 'data/full' fileList1 = sorted(os.listdir('data/full1')) dirName1 = 'data/full1' # ------------------------------------------------------------------------------------------------------------ # Function to dump all the SIFT descriptors from training data in the feature space def dumpFeatures(rootDir): features = [] n = 0 for fname in fileList: # print("Reading Image: " + dirName + "/" + fname) kp, des = feat(dirName + "/" + fname) for d in des: features.append(d) del kp, des n = n + 1 if n >= N: break features = np.array(features) return features # Function to construct the vocabulary tree def constructTree(node, featuresIDs, depth): global nodeIndex, nodes, tree, imagesInLeaves, avgDepth tree[node] = [] if len(featuresIDs) >= leafClusterSize and depth < maxDepth: # Here we will fetch the cluster from the indices and then use it to fit the kmeans # And then just after that we will delete the cluster model.fit([features[i] for i in featuresIDs]) childFeatureIDs = [[] for i in range(branches)] for i in range(len(featuresIDs)): childFeatureIDs[model.labels_[i]].append(featuresIDs[i]) for i in range(branches): nodeIndex = nodeIndex + 1 nodes[nodeIndex] = model.cluster_centers_[i] tree[node].append(nodeIndex) constructTree(nodeIndex, childFeatureIDs[i], depth + 1) else: imagesInLeaves[node] = {} avgDepth = avgDepth + depth # Function to lookup a SIFT descriptor in the vocabulary tree, returns a leaf cluster def lookup(descriptor, node): D = float("inf") goto = None for child in tree[node]: # Difference between them and magnitude of the vector dist = np.linalg.norm([nodes[child] - descriptor]) if D > dist: D = dist goto = child if tree[goto] == []: return goto return lookup(descriptor, goto) # Constructs the inverted file frequency index def tfidf(filename): global imagesInLeaves kp, des = feat(dirName + "/" + fname) for d in des: leafID = lookup(d, 0) if filename in imagesInLeaves[leafID]: imagesInLeaves[leafID][filename] += 1 else: imagesInLeaves[leafID][filename] = 1 del kp, des # This function returns the weight of a leaf node def weight(leafID): return math.log1p(N / 1.0 * len(imagesInLeaves[leafID])) # Returns the scores of the images in the dataset def getScores(q): scores = {} n = 0 curr = [float("inf"), float("inf"), float("inf"), float("inf")] currimg = ["", "", "", ""] for fname in fileList: img = dirName + "/" + fname scores[img] = 0 for leafID in imagesInLeaves: if leafID in doc[img] and leafID in q: scores[img] += math.fabs(q[leafID] - doc[img][leafID]) elif leafID in q and leafID not in doc[img]: scores[img] += math.fabs(q[leafID]) elif leafID not in q and leafID in doc[img]: scores[img] += math.fabs(doc[img][leafID]) if scores[img] > curr[-1]: break if scores[img] <= curr[0]: currimg[3], curr[3] = currimg[2], curr[2] currimg[2], curr[2] = currimg[1], curr[1] currimg[1], curr[1] = currimg[0], curr[0] currimg[0], curr[0] = img, scores[img] elif scores[img] > curr[0] and scores[img] <= curr[1]: currimg[3], curr[3] = currimg[2], curr[2] currimg[2], curr[2] = currimg[1], curr[1] currimg[1], curr[1] = img, scores[img] elif scores[img] > curr[1] and scores[img] <= curr[2]: currimg[3], curr[3] = currimg[2], curr[2] currimg[2], curr[2] = img, scores[img] elif scores[img] > curr[2] and scores[img] <= curr[3]: currimg[3], curr[3] = img, scores[img] n = n + 1 if n >= N: break return currimg # Return the bestN best matches def findBest(scores, bestN): sorted_scores = sorted(scores.items(), key=operator.itemgetter(1)) return sorted_scores[:bestN] def accuracy(F, M1, M2, M3, M4): a = [0, 0, 0, 0] group = int(F / 4) if int(M1 / 4) == group: a[0] = 1 if int(M2 / 4) == group: a[1] = 1 if int(M3 / 4) == group: a[2] = 1 if int(M4 / 4) == group: a[3] = 1 return np.array(a) # Finds 4 best matches for the query def match(filename): # dirName + "/" + fname # q is the frequency of this image appearing in each of the leaf nodes q = {} kp, des = feat(filename, 480) if des is not None: for d in des: leafID = lookup(d, 0) if leafID in q: q[leafID] += 1 else: q[leafID] = 1 else: print "error at {}".format(filename) s = 0.0 for key in q: q[key] = q[key] * weight(key) s += q[key] for key in q: q[key] = q[key] / s return getScores(q) # return findBest(scores, bestN) def getImgID(s):
# ------------------------------------------------------------------------------------------------------------ start = t.time() print("Extracting Features: " + rootDir + " ...") # dump all features as array features = dumpFeatures(rootDir) end = t.time() print("Time Taken: ", str(round((end - start) / 60, 2))) start = t.time() print("Constructing Vocabulary Tree ... ") # average of all values in row root = features.mean(axis=0) nodes[0] = root # Array of indices into the construct tree function featuresIDs = [x for x in range(len(features))] constructTree(0, featuresIDs, 0) end = t.time() print("Time Taken: ", str(round((end - start) / 60, 2))) # memorytes() del features avgDepth = int(avgDepth / len(imagesInLeaves)) start = t.time() print("Mapping images to leaf nodes of the tree ...") n = 0 for fname in fileList: filename = dirName + "/" + fname tfidf(filename) n = n + 1 if n >= N: break # Creating weights for the leaf images tress for leafID in imagesInLeaves: for img in imagesInLeaves[leafID]: if img not in doc: doc[img] = {} # weight of leafId * frequency of occurance doc[img][leafID] = weight(leafID) * (imagesInLeaves[leafID][img]) # scale the weights in range(0,1) for img in doc: s = 0.0 for leafID in doc[img]: s += doc[img][leafID] for leafID in doc[img]: doc[img][leafID] /= s end = t.time() print("Time Taken: ", str(round((end - start) / 60, 2))) # memorytes() # saving stuff # ------------------------------------------------------------------------------------------------------------ with open('model.pkl', 'wb') as fid:
return int((re.findall("\d+", s))[0])
identifier_body
creat_model_save.py
..] doc = {} # bestN = 4 # result = np.array([0, 0, 0, 0]) # maxDepth = 5 avgDepth = 0 # If the values are supplied as command line arguments if len(sys.argv) == 3: branches = int(sys.argv[1]) maxDepth = int(sys.argv[2]) model = MiniBatchKMeans(n_clusters=branches) # The KMeans Clustering Model sift = cv2.xfeatures2d.SIFT_create(nfeatures=500) # SIFT Feature extractor model leafClusterSize = 2 * branches fileList = sorted(os.listdir('data/full')) dirName = 'data/full' fileList1 = sorted(os.listdir('data/full1')) dirName1 = 'data/full1' # ------------------------------------------------------------------------------------------------------------ # Function to dump all the SIFT descriptors from training data in the feature space def dumpFeatures(rootDir): features = [] n = 0 for fname in fileList: # print("Reading Image: " + dirName + "/" + fname) kp, des = feat(dirName + "/" + fname) for d in des: features.append(d) del kp, des n = n + 1 if n >= N: break features = np.array(features) return features # Function to construct the vocabulary tree def constructTree(node, featuresIDs, depth): global nodeIndex, nodes, tree, imagesInLeaves, avgDepth tree[node] = [] if len(featuresIDs) >= leafClusterSize and depth < maxDepth: # Here we will fetch the cluster from the indices and then use it to fit the kmeans # And then just after that we will delete the cluster model.fit([features[i] for i in featuresIDs]) childFeatureIDs = [[] for i in range(branches)] for i in range(len(featuresIDs)): childFeatureIDs[model.labels_[i]].append(featuresIDs[i]) for i in range(branches): nodeIndex = nodeIndex + 1 nodes[nodeIndex] = model.cluster_centers_[i] tree[node].append(nodeIndex) constructTree(nodeIndex, childFeatureIDs[i], depth + 1) else: imagesInLeaves[node] = {} avgDepth = avgDepth + depth # Function to lookup a SIFT descriptor in the vocabulary tree, returns a leaf cluster def lookup(descriptor, node): D = float("inf") goto = None for child in tree[node]: # Difference between them and magnitude of the vector dist = np.linalg.norm([nodes[child] - descriptor]) if D > dist: D = dist goto = child if tree[goto] == []: return goto return lookup(descriptor, goto) # Constructs the inverted file frequency index def tfidf(filename): global imagesInLeaves kp, des = feat(dirName + "/" + fname) for d in des: leafID = lookup(d, 0) if filename in imagesInLeaves[leafID]: imagesInLeaves[leafID][filename] += 1 else: imagesInLeaves[leafID][filename] = 1 del kp, des
# Returns the scores of the images in the dataset def getScores(q): scores = {} n = 0 curr = [float("inf"), float("inf"), float("inf"), float("inf")] currimg = ["", "", "", ""] for fname in fileList: img = dirName + "/" + fname scores[img] = 0 for leafID in imagesInLeaves: if leafID in doc[img] and leafID in q: scores[img] += math.fabs(q[leafID] - doc[img][leafID]) elif leafID in q and leafID not in doc[img]: scores[img] += math.fabs(q[leafID]) elif leafID not in q and leafID in doc[img]: scores[img] += math.fabs(doc[img][leafID]) if scores[img] > curr[-1]: break if scores[img] <= curr[0]: currimg[3], curr[3] = currimg[2], curr[2] currimg[2], curr[2] = currimg[1], curr[1] currimg[1], curr[1] = currimg[0], curr[0] currimg[0], curr[0] = img, scores[img] elif scores[img] > curr[0] and scores[img] <= curr[1]: currimg[3], curr[3] = currimg[2], curr[2] currimg[2], curr[2] = currimg[1], curr[1] currimg[1], curr[1] = img, scores[img] elif scores[img] > curr[1] and scores[img] <= curr[2]: currimg[3], curr[3] = currimg[2], curr[2] currimg[2], curr[2] = img, scores[img] elif scores[img] > curr[2] and scores[img] <= curr[3]: currimg[3], curr[3] = img, scores[img] n = n + 1 if n >= N: break return currimg # Return the bestN best matches def findBest(scores, bestN): sorted_scores = sorted(scores.items(), key=operator.itemgetter(1)) return sorted_scores[:bestN] def accuracy(F, M1, M2, M3, M4): a = [0, 0, 0, 0] group = int(F / 4) if int(M1 / 4) == group: a[0] = 1 if int(M2 / 4) == group: a[1] = 1 if int(M3 / 4) == group: a[2] = 1 if int(M4 / 4) == group: a[3] = 1 return np.array(a) # Finds 4 best matches for the query def match(filename): # dirName + "/" + fname # q is the frequency of this image appearing in each of the leaf nodes q = {} kp, des = feat(filename, 480) if des is not None: for d in des: leafID = lookup(d, 0) if leafID in q: q[leafID] += 1 else: q[leafID] = 1 else: print "error at {}".format(filename) s = 0.0 for key in q: q[key] = q[key] * weight(key) s += q[key] for key in q: q[key] = q[key] / s return getScores(q) # return findBest(scores, bestN) def getImgID(s): return int((re.findall("\d+", s))[0]) # ------------------------------------------------------------------------------------------------------------ start = t.time() print("Extracting Features: " + rootDir + " ...") # dump all features as array features = dumpFeatures(rootDir) end = t.time() print("Time Taken: ", str(round((end - start) / 60, 2))) start = t.time() print("Constructing Vocabulary Tree ... ") # average of all values in row root = features.mean(axis=0) nodes[0] = root # Array of indices into the construct tree function featuresIDs = [x for x in range(len(features))] constructTree(0, featuresIDs, 0) end = t.time() print("Time Taken: ", str(round((end - start) / 60, 2))) # memorytes() del features avgDepth = int(avgDepth / len(imagesInLeaves)) start = t.time() print("Mapping images to leaf nodes of the tree ...") n = 0 for fname in fileList: filename = dirName + "/" + fname tfidf(filename) n = n + 1 if n >= N: break # Creating weights for the leaf images tress for leafID in imagesInLeaves: for img in imagesInLeaves[leafID]: if img not in doc: doc[img] = {} # weight of leafId * frequency of occurance doc[img][leafID] = weight(leafID) * (imagesInLeaves[leafID][img]) # scale the weights in range(0,1) for img in doc: s = 0.0 for leafID in doc[img]: s += doc[img][leafID] for leafID in doc[img]: doc[img][leafID] /= s end = t.time() print("Time Taken: ", str(round((end - start) / 60, 2))) # memorytes() # saving stuff # ------------------------------------------------------------------------------------------------------------ with open('model.pkl', 'wb') as fid:
# This function returns the weight of a leaf node def weight(leafID): return math.log1p(N / 1.0 * len(imagesInLeaves[leafID]))
random_line_split
creat_model_save.py
..] doc = {} # bestN = 4 # result = np.array([0, 0, 0, 0]) # maxDepth = 5 avgDepth = 0 # If the values are supplied as command line arguments if len(sys.argv) == 3: branches = int(sys.argv[1]) maxDepth = int(sys.argv[2]) model = MiniBatchKMeans(n_clusters=branches) # The KMeans Clustering Model sift = cv2.xfeatures2d.SIFT_create(nfeatures=500) # SIFT Feature extractor model leafClusterSize = 2 * branches fileList = sorted(os.listdir('data/full')) dirName = 'data/full' fileList1 = sorted(os.listdir('data/full1')) dirName1 = 'data/full1' # ------------------------------------------------------------------------------------------------------------ # Function to dump all the SIFT descriptors from training data in the feature space def dumpFeatures(rootDir): features = [] n = 0 for fname in fileList: # print("Reading Image: " + dirName + "/" + fname) kp, des = feat(dirName + "/" + fname) for d in des: features.append(d) del kp, des n = n + 1 if n >= N: break features = np.array(features) return features # Function to construct the vocabulary tree def constructTree(node, featuresIDs, depth): global nodeIndex, nodes, tree, imagesInLeaves, avgDepth tree[node] = [] if len(featuresIDs) >= leafClusterSize and depth < maxDepth: # Here we will fetch the cluster from the indices and then use it to fit the kmeans # And then just after that we will delete the cluster model.fit([features[i] for i in featuresIDs]) childFeatureIDs = [[] for i in range(branches)] for i in range(len(featuresIDs)): childFeatureIDs[model.labels_[i]].append(featuresIDs[i]) for i in range(branches): nodeIndex = nodeIndex + 1 nodes[nodeIndex] = model.cluster_centers_[i] tree[node].append(nodeIndex) constructTree(nodeIndex, childFeatureIDs[i], depth + 1) else: imagesInLeaves[node] = {} avgDepth = avgDepth + depth # Function to lookup a SIFT descriptor in the vocabulary tree, returns a leaf cluster def lookup(descriptor, node): D = float("inf") goto = None for child in tree[node]: # Difference between them and magnitude of the vector dist = np.linalg.norm([nodes[child] - descriptor]) if D > dist: D = dist goto = child if tree[goto] == []: return goto return lookup(descriptor, goto) # Constructs the inverted file frequency index def tfidf(filename): global imagesInLeaves kp, des = feat(dirName + "/" + fname) for d in des: leafID = lookup(d, 0) if filename in imagesInLeaves[leafID]: imagesInLeaves[leafID][filename] += 1 else: imagesInLeaves[leafID][filename] = 1 del kp, des # This function returns the weight of a leaf node def weight(leafID): return math.log1p(N / 1.0 * len(imagesInLeaves[leafID])) # Returns the scores of the images in the dataset def getScores(q): scores = {} n = 0 curr = [float("inf"), float("inf"), float("inf"), float("inf")] currimg = ["", "", "", ""] for fname in fileList: img = dirName + "/" + fname scores[img] = 0 for leafID in imagesInLeaves: if leafID in doc[img] and leafID in q: scores[img] += math.fabs(q[leafID] - doc[img][leafID]) elif leafID in q and leafID not in doc[img]: scores[img] += math.fabs(q[leafID]) elif leafID not in q and leafID in doc[img]: scores[img] += math.fabs(doc[img][leafID]) if scores[img] > curr[-1]: break if scores[img] <= curr[0]:
elif scores[img] > curr[0] and scores[img] <= curr[1]: currimg[3], curr[3] = currimg[2], curr[2] currimg[2], curr[2] = currimg[1], curr[1] currimg[1], curr[1] = img, scores[img] elif scores[img] > curr[1] and scores[img] <= curr[2]: currimg[3], curr[3] = currimg[2], curr[2] currimg[2], curr[2] = img, scores[img] elif scores[img] > curr[2] and scores[img] <= curr[3]: currimg[3], curr[3] = img, scores[img] n = n + 1 if n >= N: break return currimg # Return the bestN best matches def findBest(scores, bestN): sorted_scores = sorted(scores.items(), key=operator.itemgetter(1)) return sorted_scores[:bestN] def accuracy(F, M1, M2, M3, M4): a = [0, 0, 0, 0] group = int(F / 4) if int(M1 / 4) == group: a[0] = 1 if int(M2 / 4) == group: a[1] = 1 if int(M3 / 4) == group: a[2] = 1 if int(M4 / 4) == group: a[3] = 1 return np.array(a) # Finds 4 best matches for the query def match(filename): # dirName + "/" + fname # q is the frequency of this image appearing in each of the leaf nodes q = {} kp, des = feat(filename, 480) if des is not None: for d in des: leafID = lookup(d, 0) if leafID in q: q[leafID] += 1 else: q[leafID] = 1 else: print "error at {}".format(filename) s = 0.0 for key in q: q[key] = q[key] * weight(key) s += q[key] for key in q: q[key] = q[key] / s return getScores(q) # return findBest(scores, bestN) def getImgID(s): return int((re.findall("\d+", s))[0]) # ------------------------------------------------------------------------------------------------------------ start = t.time() print("Extracting Features: " + rootDir + " ...") # dump all features as array features = dumpFeatures(rootDir) end = t.time() print("Time Taken: ", str(round((end - start) / 60, 2))) start = t.time() print("Constructing Vocabulary Tree ... ") # average of all values in row root = features.mean(axis=0) nodes[0] = root # Array of indices into the construct tree function featuresIDs = [x for x in range(len(features))] constructTree(0, featuresIDs, 0) end = t.time() print("Time Taken: ", str(round((end - start) / 60, 2))) # memorytes() del features avgDepth = int(avgDepth / len(imagesInLeaves)) start = t.time() print("Mapping images to leaf nodes of the tree ...") n = 0 for fname in fileList: filename = dirName + "/" + fname tfidf(filename) n = n + 1 if n >= N: break # Creating weights for the leaf images tress for leafID in imagesInLeaves: for img in imagesInLeaves[leafID]: if img not in doc: doc[img] = {} # weight of leafId * frequency of occurance doc[img][leafID] = weight(leafID) * (imagesInLeaves[leafID][img]) # scale the weights in range(0,1) for img in doc: s = 0.0 for leafID in doc[img]: s += doc[img][leafID] for leafID in doc[img]: doc[img][leafID] /= s end = t.time() print("Time Taken: ", str(round((end - start) / 60, 2))) # memorytes() # saving stuff # ------------------------------------------------------------------------------------------------------------ with open('model.pkl', 'wb') as fid:
currimg[3], curr[3] = currimg[2], curr[2] currimg[2], curr[2] = currimg[1], curr[1] currimg[1], curr[1] = currimg[0], curr[0] currimg[0], curr[0] = img, scores[img]
conditional_block
pipeline_v2.py
## Part 2: explore data def take_sample(df, fraction): return df.sample(frac = fraction) def show_columns(df): return df.columns def descrip_stats(df): return df.describe() def counts_per_variable(df, x): return df.groupby(x).size() def group_and_describe(df, x): return df.groupby(x).describe() def ctab_percent(df, x, y): return pd.crosstab(df.loc[:, x], df.loc[:,y], normalize='index') def ctab_raw(df, x, y): return pd.crosstab(df.loc[:, x], df.loc[:,y]) def basic_hist(df, x, title_text): sns.distplot(df[x]).set_title(title_text) plt.show() return def basic_scatter(df, x, y, title_text): g = sns.lmplot(x, y, data= df) g = (g.set_axis_labels(x, y).set_title(title_text)) plt.show() return def correlation_heatmap(df, title_text): corrmat = df.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(corrmat, vmax=.8, square=True).set_title(title_text) plt.show() return def basic_boxplot(df, colname, title_text): sns.boxplot(y=df[colname]).set_title(title_text) plt.show() return ## Part III: Pre-processing data def show_nulls(df): return df.isna().sum().sort_values(ascending=False) def fill_whole_df_with_mean(df): num_cols = len(df.columns) for i in range(0, num_cols): df.iloc[:,i] = fill_col_with_mean(df.iloc[:,i]) return def fill_allNA_mode(df): num_col = len(df.columns.tolist()) for i in range(0,num_col): df_feats.iloc[:,i] = df_feats.iloc[:,i].fillna(df_feats.iloc[:,i].mode()[0]) return df def fill_col_with_mean(df): return df.fillna(df.mean()) def left_merge(df_left, df_right, merge_column): return pd.merge(df_left, df_right, how = 'left', on = merge_column) # generating features def generate_dummy(df, colname, attach = False): # generate dummy variables from a categorical variable # if attach == True, then attach the dummy variables to the original dataframe if (attach == False): return pd.get_dummies(df[colname]) else: return pd.concat([df, pd.get_dummies(df[colname])], axis = 1) def discret_eqlbins(df, colname, bin_num): # cut continuous variable into bin_num bins return pd.cut(df[colname], bin_num) def discret_quantiles(df, colname, quantile_num): # cut cont. variable into quantiles return pd.qcut(df[colname], quantile_num) # feature-scaling from sklearn import preprocessing #min_max_scaler = preprocessing.MinMaxScaler() #df_scaled = min_max_scaler.fit_transform(df) # standardize data # scaled_column = scale(df[['x','y']]) from sklearn.preprocessing import scale def scale_df(df, features_list): temp_scaled = scale(df[features_list]) #return a DF return pd.DataFrame(temp_scaled, columns= df.columns) # split data into training and test sets from sklearn.model_selection import train_test_split def split_traintest(df_features, df_target, test_size = 0.2): X_train, X_test, Y_train, Y_test = train_test_split(df_features, df_target, test_size = test_size) return X_train, X_test, Y_train, Y_test # methods for training classifiers from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.naive_bayes import MultinomialNB from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn.ensemble import GradientBoostingClassifier def fit_randomforest(x_train, y_train, feature_number, num_trees, depth_num, criterion_choice): rf_clf = RandomForestClassifier(max_features = feature_number, n_estimators = num_trees, max_depth = depth_num, criterion = criterion_choice) rf_clf.fit(x_train,y_train) return rf_clf def fit_svm(x_train, y_train, c_value, kern, rbf_gam): svm_clf = SVC(C = c_value, kernel = kern, gamma = rbf_gam, probability = True) svm_clf.fit(x_train, y_train) return svm_clf def fit_naivebayes(x_train, y_train, alpha_value): nb_clf = MultinomialNB(alpha = alpha_value) nb_clf.fit(x_train,y_train) return nb_clf def fit_knn(x_train, y_train, neighbor_num, distance_type, weight_type): knn_clf = KNeighborsClassifier(n_neighbors= neighbor_num, metric= distance_type, weights = weight_type) knn_clf.fit(x_train, y_train) return knn_clf def fit_dtree(x_train, y_train, crit_par, split_par, maxdepth_par, minsplit_par,maxfeat_par, minleaf_par, maxleaf_par): dt_clf = DecisionTreeClassifier(criterion = crit_par, splitter = split_par, max_depth = maxdepth_par, min_samples_split = minsplit_par, max_features = maxfeat_par, min_samples_leaf = minleaf_par, max_leaf_nodes = maxleaf_par) dt_clf.fit(x_train, y_train) return dt_clf def fit_logit(x_train, y_train, penalty_para, c_para): logit_clf = LogisticRegression(penalty = penalty_para, C = c_para) logit_clf.fit(x_train,y_train) return logit_clf # grid methods from sklearn.model_selection import GridSearchCV def grid_cv(clf, param_grid, scoring, cv, x_train, y_train): # initialize the grid, scoring = a scoring metric or a dictionary of metrics, # refit is necessary when u have a list of scoring metrics, it determines how the gridsearch algorithm decides the best estimator. grid = GridSearchCV(clf(), param_grid, scoring = scoring, cv= cv) grid.fit(x_train, y_train) # call the best classifier: grid.best_estimator_ # see all performances: return grid def grid_cv_mtp(clf, param_grid, scoring, cv = 5, refit_metric = 'roc'): # initialize the grid, scoring = a scoring metric or a dictionary of metrics, # refit is necessary when u have a list of scoring metrics, it determines how the gridsearch algorithm decides the best estimator. grid = GridSearchCV(clf(), param_grid, scoring = scoring, cv= cv_num, refit = refit_metric) grid.fit(x_train, y_train) # call the best classifier: grid.best_estimator_ # see all performances: return grid model_params ={ RandomForestClassifier: { 'max_features': ["auto", "sqrt", "log2", 0.2], 'n_estimators' : [5, 10, 20, 50, 100, 300, 500], "max_depth": [3,5,8], "criterion": ["gini", "entropy"] }, SVC:{ "C": [10**i for i in range(-5, 5)], "kernel":["linear", "rbf"], "gamma": [10**i for i in np.arange(0, 1, 0.05)], "probability": [True] }, MultinomialNB:{ "alpha": [1, 5, 10, 25, 100] }, KNeighborsClassifier:{ "n_neighbors":[3,5,8,10, 13,15,20,25,30,50], "metric": ["euclidean", "manhattan", "chebyshev" ], "weights":["uniform", "distance"] }, DecisionTreeClassifier:{ "criterion": ["gini", "entropy"], "splitter": ["best", "random"], "max_depth": [None, "auto", "sqrt", "log2", 5, 0.3 ], "min_samples_split": [1, 3, 5, 7, 9 ,15 ,20], "max_features": [2, 3, 4, 5], "min_samples_leaf": [1,2,3,4,5], "max_leaf_nodes": [None, 2, 3 ,4, 5] }, LogisticRegression:{ "penalty": ['l1', 'l2'], "C": [10**-5, 10**-2, 10**-1, 1, 10, 10**2, 10**5] }, GradientBoostingClassifier
if filetype == "csv": return pd.read_csv(fn) if filetype == "excel": return pd.read_excel(fn) if filetype == "sql": return pd.read_sql(fn, con=conn) else: return print("I only have CSVs at the moment!")
identifier_body
pipeline_v2.py
): return pd.crosstab(df.loc[:, x], df.loc[:,y]) def basic_hist(df, x, title_text): sns.distplot(df[x]).set_title(title_text) plt.show() return def basic_scatter(df, x, y, title_text): g = sns.lmplot(x, y, data= df) g = (g.set_axis_labels(x, y).set_title(title_text)) plt.show() return def correlation_heatmap(df, title_text): corrmat = df.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(corrmat, vmax=.8, square=True).set_title(title_text) plt.show() return def basic_boxplot(df, colname, title_text): sns.boxplot(y=df[colname]).set_title(title_text) plt.show() return ## Part III: Pre-processing data def show_nulls(df): return df.isna().sum().sort_values(ascending=False) def
(df): num_cols = len(df.columns) for i in range(0, num_cols): df.iloc[:,i] = fill_col_with_mean(df.iloc[:,i]) return def fill_allNA_mode(df): num_col = len(df.columns.tolist()) for i in range(0,num_col): df_feats.iloc[:,i] = df_feats.iloc[:,i].fillna(df_feats.iloc[:,i].mode()[0]) return df def fill_col_with_mean(df): return df.fillna(df.mean()) def left_merge(df_left, df_right, merge_column): return pd.merge(df_left, df_right, how = 'left', on = merge_column) # generating features def generate_dummy(df, colname, attach = False): # generate dummy variables from a categorical variable # if attach == True, then attach the dummy variables to the original dataframe if (attach == False): return pd.get_dummies(df[colname]) else: return pd.concat([df, pd.get_dummies(df[colname])], axis = 1) def discret_eqlbins(df, colname, bin_num): # cut continuous variable into bin_num bins return pd.cut(df[colname], bin_num) def discret_quantiles(df, colname, quantile_num): # cut cont. variable into quantiles return pd.qcut(df[colname], quantile_num) # feature-scaling from sklearn import preprocessing #min_max_scaler = preprocessing.MinMaxScaler() #df_scaled = min_max_scaler.fit_transform(df) # standardize data # scaled_column = scale(df[['x','y']]) from sklearn.preprocessing import scale def scale_df(df, features_list): temp_scaled = scale(df[features_list]) #return a DF return pd.DataFrame(temp_scaled, columns= df.columns) # split data into training and test sets from sklearn.model_selection import train_test_split def split_traintest(df_features, df_target, test_size = 0.2): X_train, X_test, Y_train, Y_test = train_test_split(df_features, df_target, test_size = test_size) return X_train, X_test, Y_train, Y_test # methods for training classifiers from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.naive_bayes import MultinomialNB from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn.ensemble import GradientBoostingClassifier def fit_randomforest(x_train, y_train, feature_number, num_trees, depth_num, criterion_choice): rf_clf = RandomForestClassifier(max_features = feature_number, n_estimators = num_trees, max_depth = depth_num, criterion = criterion_choice) rf_clf.fit(x_train,y_train) return rf_clf def fit_svm(x_train, y_train, c_value, kern, rbf_gam): svm_clf = SVC(C = c_value, kernel = kern, gamma = rbf_gam, probability = True) svm_clf.fit(x_train, y_train) return svm_clf def fit_naivebayes(x_train, y_train, alpha_value): nb_clf = MultinomialNB(alpha = alpha_value) nb_clf.fit(x_train,y_train) return nb_clf def fit_knn(x_train, y_train, neighbor_num, distance_type, weight_type): knn_clf = KNeighborsClassifier(n_neighbors= neighbor_num, metric= distance_type, weights = weight_type) knn_clf.fit(x_train, y_train) return knn_clf def fit_dtree(x_train, y_train, crit_par, split_par, maxdepth_par, minsplit_par,maxfeat_par, minleaf_par, maxleaf_par): dt_clf = DecisionTreeClassifier(criterion = crit_par, splitter = split_par, max_depth = maxdepth_par, min_samples_split = minsplit_par, max_features = maxfeat_par, min_samples_leaf = minleaf_par, max_leaf_nodes = maxleaf_par) dt_clf.fit(x_train, y_train) return dt_clf def fit_logit(x_train, y_train, penalty_para, c_para): logit_clf = LogisticRegression(penalty = penalty_para, C = c_para) logit_clf.fit(x_train,y_train) return logit_clf # grid methods from sklearn.model_selection import GridSearchCV def grid_cv(clf, param_grid, scoring, cv, x_train, y_train): # initialize the grid, scoring = a scoring metric or a dictionary of metrics, # refit is necessary when u have a list of scoring metrics, it determines how the gridsearch algorithm decides the best estimator. grid = GridSearchCV(clf(), param_grid, scoring = scoring, cv= cv) grid.fit(x_train, y_train) # call the best classifier: grid.best_estimator_ # see all performances: return grid def grid_cv_mtp(clf, param_grid, scoring, cv = 5, refit_metric = 'roc'): # initialize the grid, scoring = a scoring metric or a dictionary of metrics, # refit is necessary when u have a list of scoring metrics, it determines how the gridsearch algorithm decides the best estimator. grid = GridSearchCV(clf(), param_grid, scoring = scoring, cv= cv_num, refit = refit_metric) grid.fit(x_train, y_train) # call the best classifier: grid.best_estimator_ # see all performances: return grid model_params ={ RandomForestClassifier: { 'max_features': ["auto", "sqrt", "log2", 0.2], 'n_estimators' : [5, 10, 20, 50, 100, 300, 500], "max_depth": [3,5,8], "criterion": ["gini", "entropy"] }, SVC:{ "C": [10**i for i in range(-5, 5)], "kernel":["linear", "rbf"], "gamma": [10**i for i in np.arange(0, 1, 0.05)], "probability": [True] }, MultinomialNB:{ "alpha": [1, 5, 10, 25, 100] }, KNeighborsClassifier:{ "n_neighbors":[3,5,8,10, 13,15,20,25,30,50], "metric": ["euclidean", "manhattan", "chebyshev" ], "weights":["uniform", "distance"] }, DecisionTreeClassifier:{ "criterion": ["gini", "entropy"], "splitter": ["best", "random"], "max_depth": [None, "auto", "sqrt", "log2", 5, 0.3 ], "min_samples_split": [1, 3, 5, 7, 9 ,15 ,20], "max_features": [2, 3, 4, 5], "min_samples_leaf": [1,2,3,4,5], "max_leaf_nodes": [None, 2, 3 ,4, 5] }, LogisticRegression:{ "penalty": ['l1', 'l2'], "C": [10**-5, 10**-2, 10**-1, 1, 10, 10**2, 10**5] }, GradientBoostingClassifier:{ 'loss': ["deviance", "exponential"], 'learning_rate': [0.01, 0.1, 0.2, 0.3], 'n_estimators': [3, 6, 10, 20, 100, 200, 500] } } def classifier_comparison(model_params, x_train, y_train, eva_metric, cv_num): comparison_results = {} for model, param_grid in model_params.items(): # initialize gridsearch object grid = GridSearchCV(clf(), param_grid, scoring = eva_metric, cv= cv_num) grid.fit(x_train, y_train) comparison_results[model] ={} comparison_results[model]['cv_results'] = grid.cv_results_ comparison_results
fill_whole_df_with_mean
identifier_name
pipeline_v2.py
left_merge(df_left, df_right, merge_column): return pd.merge(df_left, df_right, how = 'left', on = merge_column) # generating features def generate_dummy(df, colname, attach = False): # generate dummy variables from a categorical variable # if attach == True, then attach the dummy variables to the original dataframe if (attach == False): return pd.get_dummies(df[colname]) else: return pd.concat([df, pd.get_dummies(df[colname])], axis = 1) def discret_eqlbins(df, colname, bin_num): # cut continuous variable into bin_num bins return pd.cut(df[colname], bin_num) def discret_quantiles(df, colname, quantile_num): # cut cont. variable into quantiles return pd.qcut(df[colname], quantile_num) # feature-scaling from sklearn import preprocessing #min_max_scaler = preprocessing.MinMaxScaler() #df_scaled = min_max_scaler.fit_transform(df) # standardize data # scaled_column = scale(df[['x','y']]) from sklearn.preprocessing import scale def scale_df(df, features_list): temp_scaled = scale(df[features_list]) #return a DF return pd.DataFrame(temp_scaled, columns= df.columns) # split data into training and test sets from sklearn.model_selection import train_test_split def split_traintest(df_features, df_target, test_size = 0.2): X_train, X_test, Y_train, Y_test = train_test_split(df_features, df_target, test_size = test_size) return X_train, X_test, Y_train, Y_test # methods for training classifiers from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.naive_bayes import MultinomialNB from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn.ensemble import GradientBoostingClassifier def fit_randomforest(x_train, y_train, feature_number, num_trees, depth_num, criterion_choice): rf_clf = RandomForestClassifier(max_features = feature_number, n_estimators = num_trees, max_depth = depth_num, criterion = criterion_choice) rf_clf.fit(x_train,y_train) return rf_clf def fit_svm(x_train, y_train, c_value, kern, rbf_gam): svm_clf = SVC(C = c_value, kernel = kern, gamma = rbf_gam, probability = True) svm_clf.fit(x_train, y_train) return svm_clf def fit_naivebayes(x_train, y_train, alpha_value): nb_clf = MultinomialNB(alpha = alpha_value) nb_clf.fit(x_train,y_train) return nb_clf def fit_knn(x_train, y_train, neighbor_num, distance_type, weight_type): knn_clf = KNeighborsClassifier(n_neighbors= neighbor_num, metric= distance_type, weights = weight_type) knn_clf.fit(x_train, y_train) return knn_clf def fit_dtree(x_train, y_train, crit_par, split_par, maxdepth_par, minsplit_par,maxfeat_par, minleaf_par, maxleaf_par): dt_clf = DecisionTreeClassifier(criterion = crit_par, splitter = split_par, max_depth = maxdepth_par, min_samples_split = minsplit_par, max_features = maxfeat_par, min_samples_leaf = minleaf_par, max_leaf_nodes = maxleaf_par) dt_clf.fit(x_train, y_train) return dt_clf def fit_logit(x_train, y_train, penalty_para, c_para): logit_clf = LogisticRegression(penalty = penalty_para, C = c_para) logit_clf.fit(x_train,y_train) return logit_clf # grid methods from sklearn.model_selection import GridSearchCV def grid_cv(clf, param_grid, scoring, cv, x_train, y_train): # initialize the grid, scoring = a scoring metric or a dictionary of metrics, # refit is necessary when u have a list of scoring metrics, it determines how the gridsearch algorithm decides the best estimator. grid = GridSearchCV(clf(), param_grid, scoring = scoring, cv= cv) grid.fit(x_train, y_train) # call the best classifier: grid.best_estimator_ # see all performances: return grid def grid_cv_mtp(clf, param_grid, scoring, cv = 5, refit_metric = 'roc'): # initialize the grid, scoring = a scoring metric or a dictionary of metrics, # refit is necessary when u have a list of scoring metrics, it determines how the gridsearch algorithm decides the best estimator. grid = GridSearchCV(clf(), param_grid, scoring = scoring, cv= cv_num, refit = refit_metric) grid.fit(x_train, y_train) # call the best classifier: grid.best_estimator_ # see all performances: return grid model_params ={ RandomForestClassifier: { 'max_features': ["auto", "sqrt", "log2", 0.2], 'n_estimators' : [5, 10, 20, 50, 100, 300, 500], "max_depth": [3,5,8], "criterion": ["gini", "entropy"] }, SVC:{ "C": [10**i for i in range(-5, 5)], "kernel":["linear", "rbf"], "gamma": [10**i for i in np.arange(0, 1, 0.05)], "probability": [True] }, MultinomialNB:{ "alpha": [1, 5, 10, 25, 100] }, KNeighborsClassifier:{ "n_neighbors":[3,5,8,10, 13,15,20,25,30,50], "metric": ["euclidean", "manhattan", "chebyshev" ], "weights":["uniform", "distance"] }, DecisionTreeClassifier:{ "criterion": ["gini", "entropy"], "splitter": ["best", "random"], "max_depth": [None, "auto", "sqrt", "log2", 5, 0.3 ], "min_samples_split": [1, 3, 5, 7, 9 ,15 ,20], "max_features": [2, 3, 4, 5], "min_samples_leaf": [1,2,3,4,5], "max_leaf_nodes": [None, 2, 3 ,4, 5] }, LogisticRegression:{ "penalty": ['l1', 'l2'], "C": [10**-5, 10**-2, 10**-1, 1, 10, 10**2, 10**5] }, GradientBoostingClassifier:{ 'loss': ["deviance", "exponential"], 'learning_rate': [0.01, 0.1, 0.2, 0.3], 'n_estimators': [3, 6, 10, 20, 100, 200, 500] } } def classifier_comparison(model_params, x_train, y_train, eva_metric, cv_num): comparison_results = {} for model, param_grid in model_params.items(): # initialize gridsearch object grid = GridSearchCV(clf(), param_grid, scoring = eva_metric, cv= cv_num) grid.fit(x_train, y_train) comparison_results[model] ={} comparison_results[model]['cv_results'] = grid.cv_results_ comparison_results[model]['best_estimator'] = grid.best_estimator_ comparison_results[model]['best_score'] = grid.best_score_ comparison_results[model]['best_params'] = grid.best_params_ return comparison_results ## Part VI: Evaluating the classifier #generate predictions according to a custom threshold def make_predictions(clf, x_test, threshold = 0.7): # threshold = the probability threshold for something to be a 0. # generate array with predicted probabilities pred_array = clf.predict_proba(x_test) # initialize an empty array for the predictions pred_generated = np.array([]) # predict the first entry if pred_array[0][0] >= threshold: pred_generated = np.hstack([pred_generated, 0]) else: pred_generated = np.hstack([pred_generated, 1]) # loops over the rest of the array for i in range(1,len(x_test)): if pred_array[i][0] >= threshold: pred_generated = np.vstack([pred_generated, 0]) else: pred_generated = np.vstack([pred_generated, 1]) # return an np.array return pred_generated from sklearn.metrics import accuracy_score, f1_score, roc_auc_score, precision_score, recall_score def evaluateAccuracy(clf,predictDF, truthDF): correct_pred = 0 pred_x = clf.predict(predictDF) for i in range(0,len(predictDF)): if pred_x[i] == truthDF.iloc[i]:
correct_pred +=1
conditional_block
pipeline_v2.py
y): return pd.crosstab(df.loc[:, x], df.loc[:,y]) def basic_hist(df, x, title_text): sns.distplot(df[x]).set_title(title_text) plt.show() return def basic_scatter(df, x, y, title_text): g = sns.lmplot(x, y, data= df) g = (g.set_axis_labels(x, y).set_title(title_text)) plt.show() return def correlation_heatmap(df, title_text): corrmat = df.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(corrmat, vmax=.8, square=True).set_title(title_text) plt.show() return def basic_boxplot(df, colname, title_text): sns.boxplot(y=df[colname]).set_title(title_text) plt.show() return ## Part III: Pre-processing data def show_nulls(df): return df.isna().sum().sort_values(ascending=False) def fill_whole_df_with_mean(df): num_cols = len(df.columns) for i in range(0, num_cols): df.iloc[:,i] = fill_col_with_mean(df.iloc[:,i]) return def fill_allNA_mode(df): num_col = len(df.columns.tolist()) for i in range(0,num_col): df_feats.iloc[:,i] = df_feats.iloc[:,i].fillna(df_feats.iloc[:,i].mode()[0]) return df def fill_col_with_mean(df): return df.fillna(df.mean()) def left_merge(df_left, df_right, merge_column): return pd.merge(df_left, df_right, how = 'left', on = merge_column) # generating features def generate_dummy(df, colname, attach = False): # generate dummy variables from a categorical variable # if attach == True, then attach the dummy variables to the original dataframe if (attach == False): return pd.get_dummies(df[colname]) else: return pd.concat([df, pd.get_dummies(df[colname])], axis = 1) def discret_eqlbins(df, colname, bin_num): # cut continuous variable into bin_num bins return pd.cut(df[colname], bin_num) def discret_quantiles(df, colname, quantile_num): # cut cont. variable into quantiles return pd.qcut(df[colname], quantile_num) # feature-scaling from sklearn import preprocessing #min_max_scaler = preprocessing.MinMaxScaler() #df_scaled = min_max_scaler.fit_transform(df) # standardize data # scaled_column = scale(df[['x','y']]) from sklearn.preprocessing import scale def scale_df(df, features_list): temp_scaled = scale(df[features_list]) #return a DF return pd.DataFrame(temp_scaled, columns= df.columns) # split data into training and test sets from sklearn.model_selection import train_test_split def split_traintest(df_features, df_target, test_size = 0.2): X_train, X_test, Y_train, Y_test = train_test_split(df_features, df_target, test_size = test_size) return X_train, X_test, Y_train, Y_test # methods for training classifiers from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.naive_bayes import MultinomialNB from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn.ensemble import GradientBoostingClassifier def fit_randomforest(x_train, y_train, feature_number, num_trees, depth_num, criterion_choice): rf_clf = RandomForestClassifier(max_features = feature_number, n_estimators = num_trees, max_depth = depth_num, criterion = criterion_choice) rf_clf.fit(x_train,y_train) return rf_clf def fit_svm(x_train, y_train, c_value, kern, rbf_gam): svm_clf = SVC(C = c_value, kernel = kern, gamma = rbf_gam, probability = True) svm_clf.fit(x_train, y_train) return svm_clf def fit_naivebayes(x_train, y_train, alpha_value): nb_clf = MultinomialNB(alpha = alpha_value) nb_clf.fit(x_train,y_train) return nb_clf def fit_knn(x_train, y_train, neighbor_num, distance_type, weight_type): knn_clf = KNeighborsClassifier(n_neighbors= neighbor_num, metric= distance_type, weights = weight_type) knn_clf.fit(x_train, y_train) return knn_clf def fit_dtree(x_train, y_train, crit_par, split_par, maxdepth_par, minsplit_par,maxfeat_par, minleaf_par, maxleaf_par): dt_clf = DecisionTreeClassifier(criterion = crit_par, splitter = split_par, max_depth = maxdepth_par, min_samples_split = minsplit_par, max_features = maxfeat_par, min_samples_leaf = minleaf_par, max_leaf_nodes = maxleaf_par) dt_clf.fit(x_train, y_train) return dt_clf def fit_logit(x_train, y_train, penalty_para, c_para): logit_clf = LogisticRegression(penalty = penalty_para, C = c_para) logit_clf.fit(x_train,y_train) return logit_clf # grid methods from sklearn.model_selection import GridSearchCV def grid_cv(clf, param_grid, scoring, cv, x_train, y_train): # initialize the grid, scoring = a scoring metric or a dictionary of metrics, # refit is necessary when u have a list of scoring metrics, it determines how the gridsearch algorithm decides the best estimator. grid = GridSearchCV(clf(), param_grid, scoring = scoring, cv= cv) grid.fit(x_train, y_train) # call the best classifier: grid.best_estimator_ # see all performances: return grid def grid_cv_mtp(clf, param_grid, scoring, cv = 5, refit_metric = 'roc'): # initialize the grid, scoring = a scoring metric or a dictionary of metrics, # refit is necessary when u have a list of scoring metrics, it determines how the gridsearch algorithm decides the best estimator. grid = GridSearchCV(clf(), param_grid, scoring = scoring, cv= cv_num, refit = refit_metric) grid.fit(x_train, y_train) # call the best classifier: grid.best_estimator_ # see all performances: return grid model_params ={ RandomForestClassifier: { 'max_features': ["auto", "sqrt", "log2", 0.2], 'n_estimators' : [5, 10, 20, 50, 100, 300, 500], "max_depth": [3,5,8], "criterion": ["gini", "entropy"] }, SVC:{ "C": [10**i for i in range(-5, 5)], "kernel":["linear", "rbf"], "gamma": [10**i for i in np.arange(0, 1, 0.05)], "probability": [True] }, MultinomialNB:{ "alpha": [1, 5, 10, 25, 100] }, KNeighborsClassifier:{ "n_neighbors":[3,5,8,10, 13,15,20,25,30,50], "metric": ["euclidean", "manhattan", "chebyshev" ], "weights":["uniform", "distance"] }, DecisionTreeClassifier:{ "criterion": ["gini", "entropy"], "splitter": ["best", "random"], "max_depth": [None, "auto", "sqrt", "log2", 5, 0.3 ], "min_samples_split": [1, 3, 5, 7, 9 ,15 ,20], "max_features": [2, 3, 4, 5], "min_samples_leaf": [1,2,3,4,5], "max_leaf_nodes": [None, 2, 3 ,4, 5] }, LogisticRegression:{ "penalty": ['l1', 'l2'],
'loss': ["deviance", "exponential"], 'learning_rate': [0.01, 0.1, 0.2, 0.3], 'n_estimators': [3, 6, 10, 20, 100, 200, 500] } } def classifier_comparison(model_params, x_train, y_train, eva_metric, cv_num): comparison_results = {} for model, param_grid in model_params.items(): # initialize gridsearch object grid = GridSearchCV(clf(), param_grid, scoring = eva_metric, cv= cv_num) grid.fit(x_train, y_train) comparison_results[model] ={} comparison_results[model]['cv_results'] = grid.cv_results_ comparison_results
"C": [10**-5, 10**-2, 10**-1, 1, 10, 10**2, 10**5] }, GradientBoostingClassifier:{
random_line_split
xfdemo.py
len: {str(len(self.__keywords))}") return 0 # turn language argument into reuqest parameters def loadLanguage(self, language="zh"): # input arguments -> request parameters if language == "zh": self.__language = "cn" else: self.__language = language return 0 # check the file and calculate slice amount def preCheck(self): import os self.__file_size = os.path.getsize(self.__file_path) self.__slice_num = int(self.__file_size/(size10m)) + 1 stg_log(f"preCheck done file_name: {self.__file_path}, file_size: {str(self.__file_size)}, slice_num: {str(self.__slice_num)}") return 0 # Generate timestamp and sign def getTimeAndSign(self): from datetime import datetime import hashlib, hmac, base64 now_time = datetime.now() now_stamp = int(now_time.timestamp()) base_string = f"{self.__appid}{now_stamp}" hash_obj = hashlib.md5(base_string.encode('utf8')) hash_str = hash_obj.hexdigest() b_key = str.encode(self.__secret_key) # to bytes hmac_obj = hmac.new(b_key, hash_str.encode('utf8'), 'sha1') hmac_str = hmac_obj.digest() final_str = base64.b64encode(hmac_str).decode('utf8') return str(now_stamp), final_str # step 1: pre treat def reqPreTreat(self): stamp, sign = self.getTimeAndSign() headers = {"Content-Type": "application/x-www-form-urlencoded", "charset": "UTF-8"} req_data = {"app_id": self.__appid, "signa": sign, "ts": stamp, "language": self.__language, "file_len": str(self.__file_size), "file_name": self.__file_path, "slice_num": self.__slice_num} # set keywords if avilable if len(self.__keywords) != 0: req_data["has_sensitive"] = 'true' req_data["sensitive_type"] = '1' req_data["keywords"] = self.__keywords try: req = requests.post(base_url+prepare_url, data=req_data, headers=headers, timeout=10) res = req.json() # to be checked self.__task_id = res["data"] except TimeoutError as e: stg_log(f"step 1: reqPreTreat timeout error occured") stg_log(f"{str(e)}") finally: pass stg_log(f"step 1: pre treat done") stg_log(f"taskid: {str(self.__task_id)}") return 0 # step 2: upload file in slices def reqFileSlice(self): with open(self.__file_path, 'rb') as fi: # get next slice id slice_id_getter = SliceIdGenerator() for slice_index in range(0, self.__slice_num): current_slice_id = slice_id_getter.getNextSliceId() stamp, sign = self.getTimeAndSign() # read file in 10m current_slice = fi.read(size10m) if not current_slice or (len(current_slice) == 0): stg_log(f"reqFileSlice file ends") break # headers not required # headers = {"Content-Type": "multipart/form-data"} headers = None req_data = {"app_id": self.__appid, "signa": sign, "ts": stamp, "task_id": self.__task_id, "slice_id": current_slice_id } # be caution of the format! req_file = { "filename": None, "content": current_slice } try: req = requests.post(base_url+upload_url, data=req_data, files=req_file, headers=headers, timeout=100) res = req.json() stg_log(f"step 2: upload file done: {str(slice_index)}/{str(self.__slice_num-1)}") except TimeoutError as e: stg_log(f"reqFileSlice timeout error occured") stg_log(f"{str(e)}") finally: pass return 0 # step 3: finish the upload process def reqMergeFile(self): stamp, sign = self.getTimeAndSign() headers = {"Content-Type": "application/x-www-form-urlencoded", "charset": "UTF-8"} req_data = {"app_id": self.__appid, "signa": sign, "ts": stamp, "task_id": self.__task_id} try: req = requests.post(base_url+merge_url, data=req_data, headers=headers, timeout=10) res = req.json() stg_log(f"step 3: merge file done") except TimeoutError as e: stg_log(f"reqMergeFile timeout error occured") stg_log(f"{str(e)}") finally: pass return 0 # step 4: query for convert status def reqStatus(self): import json stamp, sign = self.getTimeAndSign() headers = {"Content-Type": "application/x-www-form-urlencoded", "charset": "UTF-8"} req_data = {"app_id": self.__appid, "signa": sign, "ts": stamp, "task_id": self.__task_id} try: req = requests.post(base_url+getprogress_url, data=req_data, headers=headers, timeout=10) res = req.json() # res.data is in string format.. res_status = json.loads(res["data"]) if res_status["status"] == 9: stg_log(f"step 4: reqStatus convert done") return 0 elif res_status["status"] == 3: stg_log(f"reqStatus still converting") return 2 # tbd... else: stg_log(f"reqStatus failed") return 3 except TimeoutError as e: stg_log(f"reqStatus timeout error occured") stg_log(f"{str(e)}") except TypeError as e2: stg_log(f"reqStatus type error occured") stg_log(f"{str(e2)}") finally: pass return 1 # step 5: query for convert result def reqResult(self): stamp, sign = self.getTimeAndSign() headers = {"Content-Type": "application/x-www-form-urlencoded", "charset": "UTF-8"} req_data = {"app_id": self.__appid, "signa": sign, "ts": stamp, "task_id": self.__task_id} try: req = requests.post(base_url+getresult_url, data=req_data, headers=headers, timeout=10) res = req.json() stg_log(f"step 5: getResult res done") self.__result = res["data"] except TimeoutError as e: stg_log(f"reqResult timeout error occured") stg_log(f"{str(e)}") finally: pass return 0 def getFinalResult(self): return self.__result # export content to json def writeFinalResultTemp(self): with open(f"./export/{self.__file_name}.json", 'w') as fo: fo.write(self.__result) return 0 # export content to txt def writeFinalResultText(self): import json with open(f"./export/{self.__file_name}.json", 'r') as fi: text_json = json.load(fi) with open(f"./export/{self.__file_name}.txt", "w") as fo: # audio_result and keyword matchs is listed individually in keyword-porvided mode # same as below if "audio_result" in text_json: sentence_list = json.loads(text_json["audio_result"]) else: sentence_list = text_json for every_sentence in sentence_list: es_gbk = every_sentence["onebest"] fo.write(f"{es_gbk}\n") stg_log(f"write to text file done") return 0 # export content to lrc file with timetags def writeFinalResultLrc(self): import json with open(f"./export/{self.__file_name}.json", 'r') as fi: text_json = json.load(fi) with open(f"./export/{self.__file_name}.lrc", "w") as fo: if "audio_result" in text_json: sentence_list = json.loads(text_json["audio_result"]) else: sentence_list = text_json for every_sentence in sentence_list: es_gbk = every_sentence["onebest"] correct_time = int(every_sentence["bg"]) + self.__time_offset es_timetag = lrc_time_conveter(correct_time) fo.write(f"[{es_timetag}]{es_gbk}\n") stg_log(f"write to lrc file done") return 0 # create dir if not exist def checkTempdir(self, dirname): import os if not os.path.exists(dirname): os.makedirs(dirname)
return 0
random_line_split
xfdemo.py
1:] break else: ch = ch[:j] + 'a' + ch[j+1:] j = j -1 self.__ch = ch return self.__ch class xfdemo(object): def __
return 0 def loadKeywords(self, keywordfile = "keywords.txt"): # load keywords from text file and convert into string with open(keywordfile, encoding='utf8') as fi: keyword_str = fi.read() self.__keywords = keyword_str.replace('\n', ',') stg_log(f"keywords loaded: {str(self.__keywords)}, len: {str(len(self.__keywords))}") return 0 # turn language argument into reuqest parameters def loadLanguage(self, language="zh"): # input arguments -> request parameters if language == "zh": self.__language = "cn" else: self.__language = language return 0 # check the file and calculate slice amount def preCheck(self): import os self.__file_size = os.path.getsize(self.__file_path) self.__slice_num = int(self.__file_size/(size10m)) + 1 stg_log(f"preCheck done file_name: {self.__file_path}, file_size: {str(self.__file_size)}, slice_num: {str(self.__slice_num)}") return 0 # Generate timestamp and sign def getTimeAndSign(self): from datetime import datetime import hashlib, hmac, base64 now_time = datetime.now() now_stamp = int(now_time.timestamp()) base_string = f"{self.__appid}{now_stamp}" hash_obj = hashlib.md5(base_string.encode('utf8')) hash_str = hash_obj.hexdigest() b_key = str.encode(self.__secret_key) # to bytes hmac_obj = hmac.new(b_key, hash_str.encode('utf8'), 'sha1') hmac_str = hmac_obj.digest() final_str = base64.b64encode(hmac_str).decode('utf8') return str(now_stamp), final_str # step 1: pre treat def reqPreTreat(self): stamp, sign = self.getTimeAndSign() headers = {"Content-Type": "application/x-www-form-urlencoded", "charset": "UTF-8"} req_data = {"app_id": self.__appid, "signa": sign, "ts": stamp, "language": self.__language, "file_len": str(self.__file_size), "file_name": self.__file_path, "slice_num": self.__slice_num} # set keywords if avilable if len(self.__keywords) != 0: req_data["has_sensitive"] = 'true' req_data["sensitive_type"] = '1' req_data["keywords"] = self.__keywords try: req = requests.post(base_url+prepare_url, data=req_data, headers=headers, timeout=10) res = req.json() # to be checked self.__task_id = res["data"] except TimeoutError as e: stg_log(f"step 1: reqPreTreat timeout error occured") stg_log(f"{str(e)}") finally: pass stg_log(f"step 1: pre treat done") stg_log(f"taskid: {str(self.__task_id)}") return 0 # step 2: upload file in slices def reqFileSlice(self): with open(self.__file_path, 'rb') as fi: # get next slice id slice_id_getter = SliceIdGenerator() for slice_index in range(0, self.__slice_num): current_slice_id = slice_id_getter.getNextSliceId() stamp, sign = self.getTimeAndSign() # read file in 10m current_slice = fi.read(size10m) if not current_slice or (len(current_slice) == 0): stg_log(f"reqFileSlice file ends") break # headers not required # headers = {"Content-Type": "multipart/form-data"} headers = None req_data = {"app_id": self.__appid, "signa": sign, "ts": stamp, "task_id": self.__task_id, "slice_id": current_slice_id } # be caution of the format! req_file = { "filename": None, "content": current_slice } try: req = requests.post(base_url+upload_url, data=req_data, files=req_file, headers=headers, timeout=100) res = req.json() stg_log(f"step 2: upload file done: {str(slice_index)}/{str(self.__slice_num-1)}") except TimeoutError as e: stg_log(f"reqFileSlice timeout error occured") stg_log(f"{str(e)}") finally: pass return 0 # step 3: finish the upload process def reqMergeFile(self): stamp, sign = self.getTimeAndSign() headers = {"Content-Type": "application/x-www-form-urlencoded", "charset": "UTF-8"} req_data = {"app_id": self.__appid, "signa": sign, "ts": stamp, "task_id": self.__task_id} try: req = requests.post(base_url+merge_url, data=req_data, headers=headers, timeout=10) res = req.json() stg_log(f"step 3: merge file done") except TimeoutError as e: stg_log(f"reqMergeFile timeout error occured") stg_log(f"{str(e)}") finally: pass return 0 # step 4: query for convert status def reqStatus(self): import json stamp, sign = self.getTimeAndSign() headers = {"Content-Type": "application/x-www-form-urlencoded", "charset": "UTF-8"} req_data = {"app_id": self.__appid, "signa": sign, "ts": stamp, "task_id": self.__task_id} try: req = requests.post(base_url+getprogress_url, data=req_data, headers=headers, timeout=10) res = req.json() # res.data is in string format.. res_status = json.loads(res["data"]) if res_status["status"] == 9: stg_log(f"step 4: reqStatus convert done") return 0 elif res_status["status"] == 3: stg_log(f"reqStatus still converting") return 2 # tbd... else: stg_log(f"reqStatus failed") return 3 except TimeoutError as e: stg_log(f"reqStatus timeout error occured") stg_log(f"{str(e)}") except TypeError as e2: stg_log(f"reqStatus type error occured") stg_log(f"{str(e2)}") finally: pass return 1 # step 5: query for convert result def reqResult(self): stamp, sign = self.getTimeAndSign() headers = {"Content-Type": "application/x-www-form-urlencoded", "charset": "UTF-8"} req_data = {"app_id": self.__appid, "signa": sign, "ts": stamp, "task_id": self.__task_id} try: req = requests.post(base_url+getresult_url, data=req_data, headers=headers, timeout=10) res = req.json() stg_log(f"step 5: getResult res done") self.__result = res["data"] except TimeoutError as e: stg_log(f"reqResult timeout error occured") stg_log(f"{str(e)}") finally: pass return 0 def getFinalResult(self): return self.__result # export content to json def writeFinalResultTemp(self): with open(f"./export/{self.__file_name}.json", 'w') as fo: fo.write(self.__result) return 0 # export content to txt def writeFinalResultText(self): import json with open(f"./export/{self.__file_name}.json", 'r') as fi: text_json = json.load(fi) with open(f"./export/{self.__file_name}.txt", "w") as fo: # audio_result and keyword matchs is listed individually in keyword-p
init__(self, audio_file_name, time_offset=0): from pathlib import PurePath self.__file_path = audio_file_name pathobj = PurePath(self.__file_path) self.__file_name = pathobj.parts[-1] self.__file_size = 0 self.__slice_num = 1 self.__time_offset = time_offset self.__keywords = "" self.__language = "" stg_log(f"xfdemo loaded with filename: {self.__file_name}") # load addid & secret key def loadConfig(self, configfile = "config.json"): import json with open(configfile) as fi: configobj = json.load(fi) self.__appid = configobj["appid"] self.__secret_key = configobj["secret_key"] stg_log(f"loadConfig: loaded")
identifier_body
xfdemo.py
keywords))}") return 0 # turn language argument into reuqest parameters def loadLanguage(self, language="zh"): # input arguments -> request parameters if language == "zh": self.__language = "cn" else: self.__language = language return 0 # check the file and calculate slice amount def preCheck(self): import os self.__file_size = os.path.getsize(self.__file_path) self.__slice_num = int(self.__file_size/(size10m)) + 1 stg_log(f"preCheck done file_name: {self.__file_path}, file_size: {str(self.__file_size)}, slice_num: {str(self.__slice_num)}") return 0 # Generate timestamp and sign def getTimeAndSign(self): from datetime import datetime import hashlib, hmac, base64 now_time = datetime.now() now_stamp = int(now_time.timestamp()) base_string = f"{self.__appid}{now_stamp}" hash_obj = hashlib.md5(base_string.encode('utf8')) hash_str = hash_obj.hexdigest() b_key = str.encode(self.__secret_key) # to bytes hmac_obj = hmac.new(b_key, hash_str.encode('utf8'), 'sha1') hmac_str = hmac_obj.digest() final_str = base64.b64encode(hmac_str).decode('utf8') return str(now_stamp), final_str # step 1: pre treat def reqPreTreat(self): stamp, sign = self.getTimeAndSign() headers = {"Content-Type": "application/x-www-form-urlencoded", "charset": "UTF-8"} req_data = {"app_id": self.__appid, "signa": sign, "ts": stamp, "language": self.__language, "file_len": str(self.__file_size), "file_name": self.__file_path, "slice_num": self.__slice_num} # set keywords if avilable if len(self.__keywords) != 0: req_data["has_sensitive"] = 'true' req_data["sensitive_type"] = '1' req_data["keywords"] = self.__keywords try: req = requests.post(base_url+prepare_url, data=req_data, headers=headers, timeout=10) res = req.json() # to be checked self.__task_id = res["data"] except TimeoutError as e: stg_log(f"step 1: reqPreTreat timeout error occured") stg_log(f"{str(e)}") finally: pass stg_log(f"step 1: pre treat done") stg_log(f"taskid: {str(self.__task_id)}") return 0 # step 2: upload file in slices def reqFileSlice(self): with open(self.__file_path, 'rb') as fi: # get next slice id slice_id_getter = SliceIdGenerator() for slice_index in range(0, self.__slice_num): current_slice_id = slice_id_getter.getNextSliceId() stamp, sign = self.getTimeAndSign() # read file in 10m current_slice = fi.read(size10m) if not current_slice or (len(current_slice) == 0): stg_log(f"reqFileSlice file ends") break # headers not required # headers = {"Content-Type": "multipart/form-data"} headers = None req_data = {"app_id": self.__appid, "signa": sign, "ts": stamp, "task_id": self.__task_id, "slice_id": current_slice_id } # be caution of the format! req_file = { "filename": None, "content": current_slice } try: req = requests.post(base_url+upload_url, data=req_data, files=req_file, headers=headers, timeout=100) res = req.json() stg_log(f"step 2: upload file done: {str(slice_index)}/{str(self.__slice_num-1)}") except TimeoutError as e: stg_log(f"reqFileSlice timeout error occured") stg_log(f"{str(e)}") finally: pass return 0 # step 3: finish the upload process def reqMergeFile(self): stamp, sign = self.getTimeAndSign() headers = {"Content-Type": "application/x-www-form-urlencoded", "charset": "UTF-8"} req_data = {"app_id": self.__appid, "signa": sign, "ts": stamp, "task_id": self.__task_id} try: req = requests.post(base_url+merge_url, data=req_data, headers=headers, timeout=10) res = req.json() stg_log(f"step 3: merge file done") except TimeoutError as e: stg_log(f"reqMergeFile timeout error occured") stg_log(f"{str(e)}") finally: pass return 0 # step 4: query for convert status def reqStatus(self): import json stamp, sign = self.getTimeAndSign() headers = {"Content-Type": "application/x-www-form-urlencoded", "charset": "UTF-8"} req_data = {"app_id": self.__appid, "signa": sign, "ts": stamp, "task_id": self.__task_id} try: req = requests.post(base_url+getprogress_url, data=req_data, headers=headers, timeout=10) res = req.json() # res.data is in string format.. res_status = json.loads(res["data"]) if res_status["status"] == 9: stg_log(f"step 4: reqStatus convert done") return 0 elif res_status["status"] == 3: stg_log(f"reqStatus still converting") return 2 # tbd... else: stg_log(f"reqStatus failed") return 3 except TimeoutError as e: stg_log(f"reqStatus timeout error occured") stg_log(f"{str(e)}") except TypeError as e2: stg_log(f"reqStatus type error occured") stg_log(f"{str(e2)}") finally: pass return 1 # step 5: query for convert result def reqResult(self): stamp, sign = self.getTimeAndSign() headers = {"Content-Type": "application/x-www-form-urlencoded", "charset": "UTF-8"} req_data = {"app_id": self.__appid, "signa": sign, "ts": stamp, "task_id": self.__task_id} try: req = requests.post(base_url+getresult_url, data=req_data, headers=headers, timeout=10) res = req.json() stg_log(f"step 5: getResult res done") self.__result = res["data"] except TimeoutError as e: stg_log(f"reqResult timeout error occured") stg_log(f"{str(e)}") finally: pass return 0 def getFinalResult(self): return self.__result # export content to json def writeFinalResultTemp(self): with open(f"./export/{self.__file_name}.json", 'w') as fo: fo.write(self.__result) return 0 # export content to txt def writeFinalResultText(self): import json with open(f"./export/{self.__file_name}.json", 'r') as fi: text_json = json.load(fi) with open(f"./export/{self.__file_name}.txt", "w") as fo: # audio_result and keyword matchs is listed individually in keyword-porvided mode # same as below if "audio_result" in text_json: sentence_list = json.loads(text_json["audio_result"]) else: sentence_list = text_json for every_sentence in sentence_list: es_gbk = every_sentence["onebest"] fo.write(f"{es_gbk}\n") stg_log(f"write to text file done") return 0 # export content to lrc file with timetags def writeFinalResultLrc(self): import json with open(f"./export/{self.__file_name}.json", 'r') as fi: text_json = json.load(fi) with open(f"./export/{self.__file_name}.lrc", "w") as fo: if "audio_result" in text_json: sentence_list = json.loads(text_json["audio_result"]) else: sentence_list = text_json for every_sentence in sentence_list: es_gbk = every_sentence["onebest"] correct_time = int(every_sentence["bg"]) + self.__time_offset es_timetag = lrc_time_conveter(correct_time) fo.write(f"[{es_timetag}]{es_gbk}\n") stg_log(f"write to lrc file done") return 0 # create dir if not exist def checkTempdir(self, dirname): import os if not os.path.exists(dirname): os.makedirs(dirname) return 0 def loadAr
gs():
identifier_name
xfdemo.py
:] break else: ch = ch[:j] + 'a' + ch[j+1:] j = j -1 self.__ch = ch return self.__ch class xfdemo(object): def __init__(self, audio_file_name, time_offset=0): from pathlib import PurePath self.__file_path = audio_file_name pathobj = PurePath(self.__file_path) self.__file_name = pathobj.parts[-1] self.__file_size = 0 self.__slice_num = 1 self.__time_offset = time_offset self.__keywords = "" self.__language = "" stg_log(f"xfdemo loaded with filename: {self.__file_name}") # load addid & secret key def loadConfig(self, configfile = "config.json"): import json with open(configfile) as fi: configobj = json.load(fi) self.__appid = configobj["appid"] self.__secret_key = configobj["secret_key"] stg_log(f"loadConfig: loaded") return 0 def loadKeywords(self, keywordfile = "keywords.txt"): # load keywords from text file and convert into string with open(keywordfile, encoding='utf8') as fi: keyword_str = fi.read() self.__keywords = keyword_str.replace('\n', ',') stg_log(f"keywords loaded: {str(self.__keywords)}, len: {str(len(self.__keywords))}") return 0 # turn language argument into reuqest parameters def loadLanguage(self, language="zh"): # input arguments -> request parameters if language == "zh": self.__language = "cn" else: self.__language = language return 0 # check the file and calculate slice amount def preCheck(self): import os self.__file_size = os.path.getsize(self.__file_path) self.__slice_num = int(self.__file_size/(size10m)) + 1 stg_log(f"preCheck done file_name: {self.__file_path}, file_size: {str(self.__file_size)}, slice_num: {str(self.__slice_num)}") return 0 # Generate timestamp and sign def getTimeAndSign(self): from datetime import datetime import hashlib, hmac, base64 now_time = datetime.now() now_stamp = int(now_time.timestamp()) base_string = f"{self.__appid}{now_stamp}" hash_obj = hashlib.md5(base_string.encode('utf8')) hash_str = hash_obj.hexdigest() b_key = str.encode(self.__secret_key) # to bytes hmac_obj = hmac.new(b_key, hash_str.encode('utf8'), 'sha1') hmac_str = hmac_obj.digest() final_str = base64.b64encode(hmac_str).decode('utf8') return str(now_stamp), final_str # step 1: pre treat def reqPreTreat(self): stamp, sign = self.getTimeAndSign() headers = {"Content-Type": "application/x-www-form-urlencoded", "charset": "UTF-8"} req_data = {"app_id": self.__appid, "signa": sign, "ts": stamp, "language": self.__language, "file_len": str(self.__file_size), "file_name": self.__file_path, "slice_num": self.__slice_num} # set keywords if avilable if len(self.__keywords) != 0: req_data["has_sensitive"] = 'true' req_data["sensitive_type"] = '1' req_data["keywords"] = self.__keywords try: req = requests.post(base_url+prepare_url, data=req_data, headers=headers, timeout=10) res = req.json() # to be checked self.__task_id = res["data"] except TimeoutError as e: stg_log(f"step 1: reqPreTreat timeout error occured") stg_log(f"{str(e)}") finally: pass stg_log(f"step 1: pre treat done") stg_log(f"taskid: {str(self.__task_id)}") return 0 # step 2: upload file in slices def reqFileSlice(self): with open(self.__file_path, 'rb') as fi: # get next slice id slice_id_getter = SliceIdGenerator() for slice_index in range(0, self.__slice_num): current_slice_id = slice_id_getter.getNextSliceId() stamp, sign = self.getTimeAndSign() # read file in 10m current_slice = fi.read(size10m) if not current_slice or (len(current_slice) == 0): stg_lo
# headers not required # headers = {"Content-Type": "multipart/form-data"} headers = None req_data = {"app_id": self.__appid, "signa": sign, "ts": stamp, "task_id": self.__task_id, "slice_id": current_slice_id } # be caution of the format! req_file = { "filename": None, "content": current_slice } try: req = requests.post(base_url+upload_url, data=req_data, files=req_file, headers=headers, timeout=100) res = req.json() stg_log(f"step 2: upload file done: {str(slice_index)}/{str(self.__slice_num-1)}") except TimeoutError as e: stg_log(f"reqFileSlice timeout error occured") stg_log(f"{str(e)}") finally: pass return 0 # step 3: finish the upload process def reqMergeFile(self): stamp, sign = self.getTimeAndSign() headers = {"Content-Type": "application/x-www-form-urlencoded", "charset": "UTF-8"} req_data = {"app_id": self.__appid, "signa": sign, "ts": stamp, "task_id": self.__task_id} try: req = requests.post(base_url+merge_url, data=req_data, headers=headers, timeout=10) res = req.json() stg_log(f"step 3: merge file done") except TimeoutError as e: stg_log(f"reqMergeFile timeout error occured") stg_log(f"{str(e)}") finally: pass return 0 # step 4: query for convert status def reqStatus(self): import json stamp, sign = self.getTimeAndSign() headers = {"Content-Type": "application/x-www-form-urlencoded", "charset": "UTF-8"} req_data = {"app_id": self.__appid, "signa": sign, "ts": stamp, "task_id": self.__task_id} try: req = requests.post(base_url+getprogress_url, data=req_data, headers=headers, timeout=10) res = req.json() # res.data is in string format.. res_status = json.loads(res["data"]) if res_status["status"] == 9: stg_log(f"step 4: reqStatus convert done") return 0 elif res_status["status"] == 3: stg_log(f"reqStatus still converting") return 2 # tbd... else: stg_log(f"reqStatus failed") return 3 except TimeoutError as e: stg_log(f"reqStatus timeout error occured") stg_log(f"{str(e)}") except TypeError as e2: stg_log(f"reqStatus type error occured") stg_log(f"{str(e2)}") finally: pass return 1 # step 5: query for convert result def reqResult(self): stamp, sign = self.getTimeAndSign() headers = {"Content-Type": "application/x-www-form-urlencoded", "charset": "UTF-8"} req_data = {"app_id": self.__appid, "signa": sign, "ts": stamp, "task_id": self.__task_id} try: req = requests.post(base_url+getresult_url, data=req_data, headers=headers, timeout=10) res = req.json() stg_log(f"step 5: getResult res done") self.__result = res["data"] except TimeoutError as e: stg_log(f"reqResult timeout error occured") stg_log(f"{str(e)}") finally: pass return 0 def getFinalResult(self): return self.__result # export content to json def writeFinalResultTemp(self): with open(f"./export/{self.__file_name}.json", 'w') as fo: fo.write(self.__result) return 0 # export content to txt def writeFinalResultText(self): import json with open(f"./export/{self.__file_name}.json", 'r') as fi: text_json = json.load(fi) with open(f"./export/{self.__file_name}.txt", "w") as fo: # audio_result and keyword matchs is listed individually in keyword
g(f"reqFileSlice file ends") break
conditional_block
crea_cig_esiti_senza_duplicati.py
n_OS2B= n_OS2B+1 elif categoria =='OS3': n_OS3= n_OS3+1 elif categoria =='OS4': n_OS4= n_OS4+1 elif categoria =='OS5': n_OS5= n_OS5+1 elif categoria =='OS6': n_OS6= n_OS6+1 elif categoria =='OS7': n_OS7= n_OS7+1 elif categoria =='OS8': n_OS8= n_OS8+1 elif categoria =='OS9': n_OS9= n_OS9+1 elif categoria =='OS10': n_OS10= n_OS10+1 elif categoria =='OS11': n_OS11= n_OS11+1 elif categoria =='OS12-A': n_OS12A= n_OS12A+1 elif categoria =='OS12-B': n_OS12B= n_OS12B+1 elif categoria =='OS13': n_OS13= n_OS13+1 elif categoria =='OS14': n_OS14= n_OS14+1 elif categoria =='OS15': n_OS15= n_OS15+1 elif categoria =='OS16': n_OS16= n_OS16+1 elif categoria =='OS17': n_OS17= n_OS17+1 elif categoria =='OS18-A': n_OS18A= n_OS18A+1 elif categoria =='OS18-B': n_OS18B= n_OS18B+1 elif categoria =='OS19': n_OS19= n_OS19+1 elif categoria =='OS20-A': n_OS20A= n_OS20A+1 elif categoria =='OS20-B': n_OS20B= n_OS20B+1 elif categoria =='OS21': n_OS21= n_OS21+1 elif categoria =='OS22': n_OS22= n_OS22+1 elif categoria =='OS23': n_OS23= n_OS23+1 elif categoria =='OS24': n_OS24= n_OS24+1 elif categoria =='OS25': n_OS25= n_OS25+1 elif categoria =='OS26': n_OS26= n_OS26+1 elif categoria =='OS27': n_OS27= n_OS27+1 elif categoria =='OS28': n_OS28= n_OS28+1 elif categoria =='OS29': n_OS29= n_OS29+1 elif categoria =='OS30': n_OS30= n_OS30+1 elif categoria =='OS31': n_OS31= n_OS31+1 elif categoria =='OS32': n_OS32= n_OS32+1 elif categoria =='OS33': n_OS33= n_OS33+1 elif categoria =='OS34': n_OS34= n_OS34+1 elif categoria =='OS35': n_OS35= n_OS35+1 lista = [] lista.append(('OG1',n_OG1)) lista.append(('OG2',n_OG2)) lista.append(('OG3',n_OG3)) lista.append(('OG4',n_OG4)) lista.append(('OG5',n_OG5)) lista.append(('OG6',n_OG6)) lista.append(('OG7',n_OG7)) lista.append(('OG8',n_OG8)) lista.append(('OG9',n_OG9)) lista.append(('OG10',n_OG10)) lista.append(('OG11',n_OG11)) lista.append(('OG12',n_OG12)) lista.append(('OG13',n_OG13)) lista.append(('OS1',n_OS1)) lista.append(('OS2A',n_OS2A)) lista.append(('OS2B',n_OS2B)) lista.append(('OS3',n_OS3)) lista.append(('OS4',n_OS4)) lista.append(('OS5',n_OS5)) lista.append(('OS6',n_OS6)) lista.append(('OS7',n_OS7)) lista.append(('OS8',n_OS8)) lista.append(('OS9',n_OS9)) lista.append(('OS10',n_OS10)) lista.append(('OS11',n_OS11)) lista.append(('OS12A',n_OS12A)) lista.append(('OS12B',n_OS12B)) lista.append(('OS13',n_OS13)) lista.append(('OS14',n_OS14)) lista.append(('OS15',n_OS15)) lista.append(('OS16',n_OS16)) lista.append(('OS17',n_OS17)) lista.append(('OS18A',n_OS18A)) lista.append(('OS18B',n_OS18B)) lista.append(('OS19',n_OS19)) lista.append(('OS20A',n_OS20A)) lista.append(('OS20B',n_OS20B)) lista.append(('OS21',n_OS21)) lista.append(('OS22',n_OS22)) lista.append(('OS23',n_OS23)) lista.append(('OS24',n_OS24)) lista.append(('OS25',n_OS25)) lista.append(('OS26',n_OS26)) lista.append(('OS27',n_OS27)) lista.append(('OS28',n_OS28)) lista.append(('OS29',n_OS29)) lista.append(('OS30',n_OS30)) lista.append(('OS31',n_OS31)) lista.append(('OS32',n_OS32)) lista.append(('OS33',n_OS33)) lista.append(('OS34',n_OS34)) lista.append(('OS35',n_OS35)) ordinata = sorted (lista , key=lambda lista : lista[1],reverse=True) return ordinata def converti_data (data_py): data_ita = data_py.strftime('%d-%m-%Y') sitrng = str(data_ita) return data_ita def get_session_info(): uid = "web"; access_type= "no"; info = "uid="+uid+"&pwd="+access_type; return info; account = get_session_info() def function_config(): name_server = "10*.119*.128*.95"; name_project = "SISk*_Extranet"; directory = "Extranet"; url_din = ""+name_server+"."+name_project+".0_&shared=*-1.*-1.0.0.0&ftb=0.422541B24E28B69DC5DF858B20E67091.*0.8.0.0-8.18_268453447.*-1.1.*0&fb=0.422541B24E28B69DC5DF858B20E67091."+directory+".8.0.0-8.768.769.774.770.773.772.775.55.256.10.257.776.777_268453447.*-1.1.*0"; return url_din; part_url = function_config() def get_server(): server_ip = "portaletrasparenza.avcp.it"; return server_ip; ip = get_server() def
(data): data=data.replace(u'\xa0', '') if data: d = datetime.strptime(data, '%d/%m/%Y') day_string = d.strftime('%Y-%m-%d') else: day_string = "1900-01-01" return day_string def prendi_provincia_regione2(comun): try: sql_com = 'SELECT * FROM gare.comuni where nome = "'+comun+'"' cursore1 = conn.cursor() cursore1.execute(sql_com) id_prov = cursore1.fetchall()[0][2] sql_prov = 'SELECT * FROM gare.province where id = '+ str(id_prov) cursore1.execute(sql_prov) prov = cursore1.fetchall() provincia = prov[0][1] sql_sigle = 'SELECT * FROM gare.province_sigle where nomeprovincia = "'+provincia+'"' cursore1.execute(sql_sigle) try: sigla_prov = cursore1.fetchall()[0][3].
data_per_db
identifier_name
crea_cig_esiti_senza_duplicati.py
n_OS2B= n_OS2B+1 elif categoria =='OS3': n_OS3= n_OS3+1 elif categoria =='OS4': n_OS4= n_OS4+1 elif categoria =='OS5': n_OS5= n_OS5+1 elif categoria =='OS6': n_OS6= n_OS6+1 elif categoria =='OS7': n_OS7= n_OS7+1 elif categoria =='OS8': n_OS8= n_OS8+1 elif categoria =='OS9': n_OS9= n_OS9+1 elif categoria =='OS10':
elif categoria =='OS11': n_OS11= n_OS11+1 elif categoria =='OS12-A': n_OS12A= n_OS12A+1 elif categoria =='OS12-B': n_OS12B= n_OS12B+1 elif categoria =='OS13': n_OS13= n_OS13+1 elif categoria =='OS14': n_OS14= n_OS14+1 elif categoria =='OS15': n_OS15= n_OS15+1 elif categoria =='OS16': n_OS16= n_OS16+1 elif categoria =='OS17': n_OS17= n_OS17+1 elif categoria =='OS18-A': n_OS18A= n_OS18A+1 elif categoria =='OS18-B': n_OS18B= n_OS18B+1 elif categoria =='OS19': n_OS19= n_OS19+1 elif categoria =='OS20-A': n_OS20A= n_OS20A+1 elif categoria =='OS20-B': n_OS20B= n_OS20B+1 elif categoria =='OS21': n_OS21= n_OS21+1 elif categoria =='OS22': n_OS22= n_OS22+1 elif categoria =='OS23': n_OS23= n_OS23+1 elif categoria =='OS24': n_OS24= n_OS24+1 elif categoria =='OS25': n_OS25= n_OS25+1 elif categoria =='OS26': n_OS26= n_OS26+1 elif categoria =='OS27': n_OS27= n_OS27+1 elif categoria =='OS28': n_OS28= n_OS28+1 elif categoria =='OS29': n_OS29= n_OS29+1 elif categoria =='OS30': n_OS30= n_OS30+1 elif categoria =='OS31': n_OS31= n_OS31+1 elif categoria =='OS32': n_OS32= n_OS32+1 elif categoria =='OS33': n_OS33= n_OS33+1 elif categoria =='OS34': n_OS34= n_OS34+1 elif categoria =='OS35': n_OS35= n_OS35+1 lista = [] lista.append(('OG1',n_OG1)) lista.append(('OG2',n_OG2)) lista.append(('OG3',n_OG3)) lista.append(('OG4',n_OG4)) lista.append(('OG5',n_OG5)) lista.append(('OG6',n_OG6)) lista.append(('OG7',n_OG7)) lista.append(('OG8',n_OG8)) lista.append(('OG9',n_OG9)) lista.append(('OG10',n_OG10)) lista.append(('OG11',n_OG11)) lista.append(('OG12',n_OG12)) lista.append(('OG13',n_OG13)) lista.append(('OS1',n_OS1)) lista.append(('OS2A',n_OS2A)) lista.append(('OS2B',n_OS2B)) lista.append(('OS3',n_OS3)) lista.append(('OS4',n_OS4)) lista.append(('OS5',n_OS5)) lista.append(('OS6',n_OS6)) lista.append(('OS7',n_OS7)) lista.append(('OS8',n_OS8)) lista.append(('OS9',n_OS9)) lista.append(('OS10',n_OS10)) lista.append(('OS11',n_OS11)) lista.append(('OS12A',n_OS12A)) lista.append(('OS12B',n_OS12B)) lista.append(('OS13',n_OS13)) lista.append(('OS14',n_OS14)) lista.append(('OS15',n_OS15)) lista.append(('OS16',n_OS16)) lista.append(('OS17',n_OS17)) lista.append(('OS18A',n_OS18A)) lista.append(('OS18B',n_OS18B)) lista.append(('OS19',n_OS19)) lista.append(('OS20A',n_OS20A)) lista.append(('OS20B',n_OS20B)) lista.append(('OS21',n_OS21)) lista.append(('OS22',n_OS22)) lista.append(('OS23',n_OS23)) lista.append(('OS24',n_OS24)) lista.append(('OS25',n_OS25)) lista.append(('OS26',n_OS26)) lista.append(('OS27',n_OS27)) lista.append(('OS28',n_OS28)) lista.append(('OS29',n_OS29)) lista.append(('OS30',n_OS30)) lista.append(('OS31',n_OS31)) lista.append(('OS32',n_OS32)) lista.append(('OS33',n_OS33)) lista.append(('OS34',n_OS34)) lista.append(('OS35',n_OS35)) ordinata = sorted (lista , key=lambda lista : lista[1],reverse=True) return ordinata def converti_data (data_py): data_ita = data_py.strftime('%d-%m-%Y') sitrng = str(data_ita) return data_ita def get_session_info(): uid = "web"; access_type= "no"; info = "uid="+uid+"&pwd="+access_type; return info; account = get_session_info() def function_config(): name_server = "10*.119*.128*.95"; name_project = "SISk*_Extranet"; directory = "Extranet"; url_din = ""+name_server+"."+name_project+".0_&shared=*-1.*-1.0.0.0&ftb=0.422541B24E28B69DC5DF858B20E67091.*0.8.0.0-8.18_268453447.*-1.1.*0&fb=0.422541B24E28B69DC5DF858B20E67091."+directory+".8.0.0-8.768.769.774.770.773.772.775.55.256.10.257.776.777_268453447.*-1.1.*0"; return url_din; part_url = function_config() def get_server(): server_ip = "portaletrasparenza.avcp.it"; return server_ip; ip = get_server() def data_per_db(data): data=data.replace(u'\xa0', '') if data: d = datetime.strptime(data, '%d/%m/%Y') day_string = d.strftime('%Y-%m-%d') else: day_string = "1900-01-01" return day_string def prendi_provincia_regione2(comun): try: sql_com = 'SELECT * FROM gare.comuni where nome = "'+comun+'"' cursore1 = conn.cursor() cursore1.execute(sql_com) id_prov = cursore1.fetchall()[0][2] sql_prov = 'SELECT * FROM gare.province where id = '+ str(id_prov) cursore1.execute(sql_prov) prov = cursore1.fetchall() provincia = prov[0][1] sql_sigle = 'SELECT * FROM gare.province_sigle where nomeprovincia = "'+provincia+'"' cursore1.execute(sql_sigle) try: sigla_prov = cursore1.fetchall()[0][3].
n_OS10= n_OS10+1
conditional_block
crea_cig_esiti_senza_duplicati.py
n_OS3=0 n_OS4=0 n_OS5=0 n_OS6=0 n_OS7=0 n_OS8=0 n_OS9=0 n_OS10=0 n_OS11=0 n_OS12A=0 n_OS12B=0 n_OS13=0 n_OS14=0 n_OS15=0 n_OS16=0 n_OS17=0 n_OS18A=0 n_OS18B=0 n_OS19=0 n_OS20A=0 n_OS20B=0 n_OS21=0 n_OS22=0 n_OS23=0 n_OS24=0 n_OS25=0 n_OS26=0 n_OS27=0 n_OS28=0 n_OS29=0 n_OS30=0 n_OS31=0 n_OS32=0 n_OS33=0 n_OS34=0 n_OS35=0 for y in CPV_trovati: categoria = y[1] if categoria=='OG1': n_OG1 = n_OG1+1 elif categoria =='OG1': n_OG1= n_OG1+1 elif categoria =='OG2': n_OG2= n_OG2+1 elif categoria =='OG3': n_OG3= n_OG3+1 elif categoria =='OG4': n_OG4= n_OG4+1 elif categoria =='OG5': n_OG5= n_OG5+1 elif categoria =='OG6': n_OG6= n_OG6+1 elif categoria =='OG7': n_OG7= n_OG7+1 elif categoria =='OG8': n_OG8= n_OG8+1 elif categoria =='OG9': n_OG9= n_OG9+1 elif categoria =='OG10': n_OG10= n_OG10+1 elif categoria =='OG11': n_OG11= n_OG11+1 elif categoria =='OG12': n_OG12= n_OG12+1 elif categoria =='OG13': n_OG13= n_OG13+1 elif categoria =='OS1': n_OS1= n_OS1+1 elif categoria =='OS2-A': n_OS2A= n_OS2A+1 elif categoria =='OS2-B': n_OS2B= n_OS2B+1 elif categoria =='OS3': n_OS3= n_OS3+1 elif categoria =='OS4': n_OS4= n_OS4+1 elif categoria =='OS5': n_OS5= n_OS5+1 elif categoria =='OS6': n_OS6= n_OS6+1 elif categoria =='OS7': n_OS7= n_OS7+1 elif categoria =='OS8': n_OS8= n_OS8+1 elif categoria =='OS9': n_OS9= n_OS9+1 elif categoria =='OS10': n_OS10= n_OS10+1 elif categoria =='OS11': n_OS11= n_OS11+1 elif categoria =='OS12-A': n_OS12A= n_OS12A+1 elif categoria =='OS12-B': n_OS12B= n_OS12B+1 elif categoria =='OS13': n_OS13= n_OS13+1 elif categoria =='OS14': n_OS14= n_OS14+1 elif categoria =='OS15': n_OS15= n_OS15+1 elif categoria =='OS16': n_OS16= n_OS16+1 elif categoria =='OS17': n_OS17= n_OS17+1 elif categoria =='OS18-A': n_OS18A= n_OS18A+1 elif categoria =='OS18-B': n_OS18B= n_OS18B+1 elif categoria =='OS19': n_OS19= n_OS19+1 elif categoria =='OS20-A': n_OS20A= n_OS20A+1 elif categoria =='OS20-B': n_OS20B= n_OS20B+1 elif categoria =='OS21': n_OS21= n_OS21+1 elif categoria =='OS22': n_OS22= n_OS22+1 elif categoria =='OS23': n_OS23= n_OS23+1 elif categoria =='OS24': n_OS24= n_OS24+1 elif categoria =='OS25': n_OS25= n_OS25+1 elif categoria =='OS26': n_OS26= n_OS26+1 elif categoria =='OS27': n_OS27= n_OS27+1 elif categoria =='OS28': n_OS28= n_OS28+1 elif categoria =='OS29': n_OS29= n_OS29+1 elif categoria =='OS30': n_OS30= n_OS30+1 elif categoria =='OS31': n_OS31= n_OS31+1 elif categoria =='OS32': n_OS32= n_OS32+1 elif categoria =='OS33': n_OS33= n_OS33+1 elif categoria =='OS34': n_OS34= n_OS34+1 elif categoria =='OS35': n_OS35= n_OS35+1 lista = [] lista.append(('OG1',n_OG1)) lista.append(('OG2',n_OG2)) lista.append(('OG3',n_OG3)) lista.append(('OG4',n_OG4)) lista.append(('OG5',n_OG5)) lista.append(('OG6',n_OG6)) lista.append(('OG7',n_OG7)) lista.append(('OG8',n_OG8)) lista.append(('OG9',n_OG9)) lista.append(('OG10',n_OG10)) lista.append(('OG11',n_OG11)) lista.append(('OG12',n_OG12)) lista.append(('OG13',n_OG13)) lista.append(('OS1',n_OS1)) lista.append(('OS2A',n_OS2A)) lista.append(('OS2B',n_OS2B)) lista.append(('OS3',n_OS3)) lista.append(('OS4',n_OS4)) lista.append(('OS5',n_OS5)) lista.append(('OS6',n_OS6)) lista.append(('OS7',n_OS7)) lista.append(('OS8',n_OS8)) lista.append(('OS9',n_OS9)) lista.append(('OS10',n_OS10)) lista.append(('OS11',n_OS11)) lista.append(('OS12A',n_OS12A)) lista.append(('OS12B',n_OS12B)) lista.append(('OS13',n_OS13)) lista.append(('OS14',n_OS14)) lista.append(('OS15',n_OS15)) lista.append(('OS16',n_OS16)) lista.append(('OS17',n_OS17)) lista.append(('OS18A',n_OS18A)) lista.append(('OS18B',n_OS18B)) lista.append(('OS19',n_OS19)) lista.append(('OS20A',n_OS20A)) lista.append(('OS20B',n_OS20B)) lista.append(('OS21',n_OS21)) lista.append(('OS22',n_OS22)) lista.append(('OS23',n_OS23)) lista
cursore = conn.cursor() cursore.execute("SELECT * FROM gare.cpv_to_cat where CPV = '"+CPV+"'") CPV_trovati = cursore.fetchall() n_OG1= 0 n_OG2 = 0 n_OG3 = 0 n_OG4 = 0 n_OG5=0 n_OG6=0 n_OG7=0 n_OG8=0 n_OG9=0 n_OG10=0 n_OG11=0 n_OG12=0 n_OG13=0 n_OS1=0 n_OS2A=0 n_OS2B=0
identifier_body
crea_cig_esiti_senza_duplicati.py
n_OS12B=0 n_OS13=0 n_OS14=0 n_OS15=0 n_OS16=0 n_OS17=0 n_OS18A=0 n_OS18B=0 n_OS19=0 n_OS20A=0 n_OS20B=0 n_OS21=0 n_OS22=0 n_OS23=0 n_OS24=0 n_OS25=0 n_OS26=0 n_OS27=0 n_OS28=0 n_OS29=0 n_OS30=0 n_OS31=0 n_OS32=0 n_OS33=0 n_OS34=0 n_OS35=0 for y in CPV_trovati: categoria = y[1] if categoria=='OG1': n_OG1 = n_OG1+1 elif categoria =='OG1': n_OG1= n_OG1+1 elif categoria =='OG2': n_OG2= n_OG2+1 elif categoria =='OG3': n_OG3= n_OG3+1 elif categoria =='OG4': n_OG4= n_OG4+1 elif categoria =='OG5': n_OG5= n_OG5+1 elif categoria =='OG6': n_OG6= n_OG6+1 elif categoria =='OG7': n_OG7= n_OG7+1 elif categoria =='OG8': n_OG8= n_OG8+1 elif categoria =='OG9': n_OG9= n_OG9+1 elif categoria =='OG10': n_OG10= n_OG10+1 elif categoria =='OG11': n_OG11= n_OG11+1 elif categoria =='OG12': n_OG12= n_OG12+1 elif categoria =='OG13': n_OG13= n_OG13+1 elif categoria =='OS1': n_OS1= n_OS1+1 elif categoria =='OS2-A': n_OS2A= n_OS2A+1 elif categoria =='OS2-B': n_OS2B= n_OS2B+1 elif categoria =='OS3': n_OS3= n_OS3+1 elif categoria =='OS4': n_OS4= n_OS4+1 elif categoria =='OS5': n_OS5= n_OS5+1 elif categoria =='OS6': n_OS6= n_OS6+1 elif categoria =='OS7': n_OS7= n_OS7+1 elif categoria =='OS8': n_OS8= n_OS8+1 elif categoria =='OS9': n_OS9= n_OS9+1 elif categoria =='OS10': n_OS10= n_OS10+1 elif categoria =='OS11': n_OS11= n_OS11+1 elif categoria =='OS12-A': n_OS12A= n_OS12A+1 elif categoria =='OS12-B': n_OS12B= n_OS12B+1 elif categoria =='OS13': n_OS13= n_OS13+1 elif categoria =='OS14': n_OS14= n_OS14+1 elif categoria =='OS15': n_OS15= n_OS15+1 elif categoria =='OS16': n_OS16= n_OS16+1 elif categoria =='OS17': n_OS17= n_OS17+1 elif categoria =='OS18-A': n_OS18A= n_OS18A+1 elif categoria =='OS18-B': n_OS18B= n_OS18B+1 elif categoria =='OS19': n_OS19= n_OS19+1 elif categoria =='OS20-A': n_OS20A= n_OS20A+1 elif categoria =='OS20-B': n_OS20B= n_OS20B+1 elif categoria =='OS21': n_OS21= n_OS21+1 elif categoria =='OS22': n_OS22= n_OS22+1 elif categoria =='OS23': n_OS23= n_OS23+1 elif categoria =='OS24': n_OS24= n_OS24+1 elif categoria =='OS25': n_OS25= n_OS25+1 elif categoria =='OS26': n_OS26= n_OS26+1 elif categoria =='OS27': n_OS27= n_OS27+1 elif categoria =='OS28': n_OS28= n_OS28+1 elif categoria =='OS29': n_OS29= n_OS29+1 elif categoria =='OS30': n_OS30= n_OS30+1 elif categoria =='OS31': n_OS31= n_OS31+1 elif categoria =='OS32': n_OS32= n_OS32+1 elif categoria =='OS33': n_OS33= n_OS33+1 elif categoria =='OS34': n_OS34= n_OS34+1 elif categoria =='OS35': n_OS35= n_OS35+1 lista = [] lista.append(('OG1',n_OG1)) lista.append(('OG2',n_OG2)) lista.append(('OG3',n_OG3)) lista.append(('OG4',n_OG4)) lista.append(('OG5',n_OG5)) lista.append(('OG6',n_OG6)) lista.append(('OG7',n_OG7)) lista.append(('OG8',n_OG8)) lista.append(('OG9',n_OG9)) lista.append(('OG10',n_OG10)) lista.append(('OG11',n_OG11)) lista.append(('OG12',n_OG12)) lista.append(('OG13',n_OG13)) lista.append(('OS1',n_OS1)) lista.append(('OS2A',n_OS2A)) lista.append(('OS2B',n_OS2B)) lista.append(('OS3',n_OS3)) lista.append(('OS4',n_OS4)) lista.append(('OS5',n_OS5)) lista.append(('OS6',n_OS6)) lista.append(('OS7',n_OS7)) lista.append(('OS8',n_OS8)) lista.append(('OS9',n_OS9)) lista.append(('OS10',n_OS10)) lista.append(('OS11',n_OS11)) lista.append(('OS12A',n_OS12A)) lista.append(('OS12B',n_OS12B)) lista.append(('OS13',n_OS13)) lista.append(('OS14',n_OS14)) lista.append(('OS15',n_OS15)) lista.append(('OS16',n_OS16)) lista.append(('OS17',n_OS17)) lista.append(('OS18A',n_OS18A)) lista.append(('OS18B',n_OS18B)) lista.append(('OS19',n_OS19)) lista.append(('OS20A',n_OS20A)) lista.append(('OS20B',n_OS20B)) lista.append(('OS21',n_OS21)) lista.append(('OS22',n_OS22)) lista.append(('OS23',n_OS23)) lista.append(('OS24',n_OS24)) lista.append(('OS25',n_OS25)) lista.append(('OS26',n_OS26)) lista.append(('OS27',n_OS27)) lista.append(('OS28',n_OS28)) lista.append(('OS29',n_OS29)) lista.append(('OS30',n_OS30)) lista.append(('OS31',n_OS31)) lista.append(('OS32',n_OS32)) lista.append(('OS33',n_OS33)) lista.append(('OS34',n_OS34)) lista.append(('OS35',n_OS35)) ordinata = sorted (lista , key=lambda lista : lista[1],reverse=True) return ordinata def converti_data (data_py): data_ita = data_py.strftime('%d-%m-%Y') sitrng = str(data_ita) return data_ita
n_OS9=0 n_OS10=0 n_OS11=0 n_OS12A=0
random_line_split
neuralnet.py
umsamples (N) x Outputs (No CLasses) # an array containing the size of the output of all of the laye of the neural net """ # Hidden layer DxHLS weights_L1,bias_L1,weights_L2,bias_L2 = self._extract_weights(W) # Output layer HLSxOUT # A_2 = N x HLS A_2 = transfer_func(np.dot(X,weights_L1) + bias_L1 ) # A_3 = N x Outputs - softmax A_3 = self.softmax(weights_L2,A_2,bias_L2) # output layer return [A_2,A_3] def __init__(self,training_set,testing_set,validation_set,no_neurons=300,transfer_func=None,optimization_func=None): """ yup """ self._transfer_func = transfer_func self._training_data = training_set[0] self._training_labels = training_set[1] self._testing_data = testing_set[0] self._testing_labels = testing_set[1] self._validation_data = validation_set[0] self._validation_labels = validation_set[1] self._hidden_layer_size = no_neurons self._weights_filename = "nn_receptive_fields.dump" #how mauch data do we have for training? self._N = self._training_data.shape[0] #Dimension of the data (basically, the number of inputs in the input layer) self._D = self._training_data.shape[1] # We assume that the labels are encoded in an array of Nx1 wich each entry an integer # defining the class starting at 0 #number of classes or basically, how many outputs are we going to have. self._output_size = max(self._training_labels) + 1 #initialize the weights for the layers: - we are going to work with one 1 hidden layer #layer 1: input * number neuros in the hidden layer + hidde_layer_size for the biases # first layer network_weight_no = self._D * self._hidden_layer_size + self._hidden_layer_size # second layer network_weight_no += self._hidden_layer_size * self._output_size + self._output_size self._betas = np.random.normal(0, 0.1, network_weight_no) #layer 2: hidden layer * no_classes + no_classes for the biases def _traing_mini_sgd(self,learning_rate=0.2,batch_number=10,epochs=1,rms_incr=0.9,rms_dcr=0.1): """ Training miniSGD """ self._batch_number = batch_number self._batch_size = self._N / self._batch_number self._epochs = epochs self._learning_rate = learning_rate print "Training using MiniBatch SGD with Hidden layer size = {3} :: learning_rate = {0} :: Batches = {1} :: Epochs = {2}".format(learning_rate,batch_number, epochs,self._hidden_layer_size) # Erro reporting: self.error_testing = np.zeros(self._epochs*self._batch_number) self.error_validation = np.zeros(self._epochs*self._batch_number) self.error_training = np.zeros(self._epochs*self._batch_number) self.cost_output= np.zeros(self._epochs*self._batch_number) patience = 5000 # look as this many examples regardless patience_increase = 2 # wait this much longer when a new best is found improvement_threshold = 0.995 # a relative improvement of this much is # considered significant validation_frequency = min(self._batch_number, patience/2) # go through this many # minibatches before checking the network # on the validation set; in this case we # check every epoch #Mean Square weight for rmsprop means_sqrt_w = np.zeros(self._betas.size) early_stopping=0 done_looping = False for epoch in range(self._epochs): #early stopping if(done_looping): break for batch in range(self._batch_number): print "Trainining: Epoch {0}/{1} - Batch {2}/{3}".format(epoch+1,self._epochs,batch+1,self._batch_number) start = self._batch_size*batch end = self._batch_size*batch + self._batch_size Xs = self._training_data[start:end,:] Ls = self._training_labels[start:end] delta_ws = self._back_prop(self._betas,Xs,Ls) means_sqrt_w = rms_incr*means_sqrt_w + rms_dcr*(delta_ws**2) rmsdelta = np.divide(delta_ws,np.sqrt(means_sqrt_w)) self._betas = self._betas - self._learning_rate*rmsdelta iter = (epoch - 1) * self._batch_number + batch if(early_stopping > 0): if (iter + 1) % validation_frequency == 0: this_validation_loss = self.calculate_zero_one_loss("validation") if this_validation_loss < best_validation_loss: # improve patience if loss improvement is good enough
if patience <= iter: done_looping = True break # for the graph self.error_testing[epoch*self._batch_number + batch] = self.calculate_zero_one_loss() print "Error in the testing datatest: {0}%".format(self.error_testing[epoch*self._batch_number + batch]*100) self.error_validation[epoch*self._batch_number + batch] = self.calculate_zero_one_loss("validation") self.error_training[epoch*self._batch_number + batch] = self.calculate_zero_one_loss("training") self.cost_output[epoch*self._batch_number + batch] = self.cost_function(self._betas, self._training_data, self._training_labels) # after traing dump the weights self.dump_weights() def dump_weights(self): #dumping only the hidden layer weights f = open(self._weights_filename, 'w') cPickle.dump(self._betas[0:self._D*self._hidden_layer_size],f) f.close() def visualize_receptive_fields(self): import sys beta_set = cPickle.load(open(self._weights_filename,"r")) beta_set = beta_set.reshape(self._D,self._hidden_layer_size).T #print beta_set.shape plt.figure(9) sys.stdout.write('Writing visualizations of receptive field to disk...') sys.stdout.flush() for i, beta in enumerate(beta_set): d = beta.reshape(28, 28) gray_map = plt.get_cmap('gray') plt.imshow(d, gray_map) plt.savefig('nn_receptive_fields/receptive_field'+str(i)+'.png', dpi=150) sys.stdout.write('.') sys.stdout.flush() plt.close() sys.stdout.write('.DONE - check directory nn_receptive_fields/ \n') sys.stdout.flush() def train(self,learning_rate=0.1,algorithm="msgd",**kwargs): """ Do some nifty training here :) """ e = self.cost_function(self._betas,self._training_data,self._training_labels) print "Inital total cost: ", e if(algorithm=="msgd"): #There should be a more elegant way to do this self._traing_mini_sgd(learning_rate,**kwargs) else: raise Exception("Algorithm not yet implemented, check later ;)") loss = self.calculate_zero_one_loss() print "After training error: ", loss e = self.cost_function(self._betas,self._training_data,self._training_labels) print "Final total cost: ", e def make_1_of_c_encoding(self,labels): # get one of C encoding matrix for each entry (=row) and class (=column) Y = np.zeros(shape=(labels.shape[0],self._output_size)) #labels = self._training_labels[start:end] for row, label in enumerate(labels): Y[row, label] = 1 return Y def cost_function(self,W,X,labels,reg=0.00001): """ reg: regularization term No weight decay term - lets leave it for later """ outputs = self._forward_prop(W,X,sigmoid)[-1] #take the last layer out sample_size = X.shape[0] y = self.make_1_of_c_encoding(labels) e1 = -np.sum(np.log(outputs)*y, axis=1) #error = e1.sum(axis=1) error = e1.sum()/sample_size + 0.5*reg*(np
if this_validation_loss < best_validation_loss * improvement_threshold: patience = max(patience, iter * patience_increase) best_params = np.copy(self._betas) best_validation_loss = this_validation_loss
conditional_block
neuralnet.py
_weight_no += self._hidden_layer_size * self._output_size + self._output_size self._betas = np.random.normal(0, 0.1, network_weight_no) #layer 2: hidden layer * no_classes + no_classes for the biases def _traing_mini_sgd(self,learning_rate=0.2,batch_number=10,epochs=1,rms_incr=0.9,rms_dcr=0.1): """ Training miniSGD """ self._batch_number = batch_number self._batch_size = self._N / self._batch_number self._epochs = epochs self._learning_rate = learning_rate print "Training using MiniBatch SGD with Hidden layer size = {3} :: learning_rate = {0} :: Batches = {1} :: Epochs = {2}".format(learning_rate,batch_number, epochs,self._hidden_layer_size) # Erro reporting: self.error_testing = np.zeros(self._epochs*self._batch_number) self.error_validation = np.zeros(self._epochs*self._batch_number) self.error_training = np.zeros(self._epochs*self._batch_number) self.cost_output= np.zeros(self._epochs*self._batch_number) patience = 5000 # look as this many examples regardless patience_increase = 2 # wait this much longer when a new best is found improvement_threshold = 0.995 # a relative improvement of this much is # considered significant validation_frequency = min(self._batch_number, patience/2) # go through this many # minibatches before checking the network # on the validation set; in this case we # check every epoch #Mean Square weight for rmsprop means_sqrt_w = np.zeros(self._betas.size) early_stopping=0 done_looping = False for epoch in range(self._epochs): #early stopping if(done_looping): break for batch in range(self._batch_number): print "Trainining: Epoch {0}/{1} - Batch {2}/{3}".format(epoch+1,self._epochs,batch+1,self._batch_number) start = self._batch_size*batch end = self._batch_size*batch + self._batch_size Xs = self._training_data[start:end,:] Ls = self._training_labels[start:end] delta_ws = self._back_prop(self._betas,Xs,Ls) means_sqrt_w = rms_incr*means_sqrt_w + rms_dcr*(delta_ws**2) rmsdelta = np.divide(delta_ws,np.sqrt(means_sqrt_w)) self._betas = self._betas - self._learning_rate*rmsdelta iter = (epoch - 1) * self._batch_number + batch if(early_stopping > 0): if (iter + 1) % validation_frequency == 0: this_validation_loss = self.calculate_zero_one_loss("validation") if this_validation_loss < best_validation_loss: # improve patience if loss improvement is good enough if this_validation_loss < best_validation_loss * improvement_threshold: patience = max(patience, iter * patience_increase) best_params = np.copy(self._betas) best_validation_loss = this_validation_loss if patience <= iter: done_looping = True break # for the graph self.error_testing[epoch*self._batch_number + batch] = self.calculate_zero_one_loss() print "Error in the testing datatest: {0}%".format(self.error_testing[epoch*self._batch_number + batch]*100) self.error_validation[epoch*self._batch_number + batch] = self.calculate_zero_one_loss("validation") self.error_training[epoch*self._batch_number + batch] = self.calculate_zero_one_loss("training") self.cost_output[epoch*self._batch_number + batch] = self.cost_function(self._betas, self._training_data, self._training_labels) # after traing dump the weights self.dump_weights() def dump_weights(self): #dumping only the hidden layer weights f = open(self._weights_filename, 'w') cPickle.dump(self._betas[0:self._D*self._hidden_layer_size],f) f.close() def visualize_receptive_fields(self): import sys beta_set = cPickle.load(open(self._weights_filename,"r")) beta_set = beta_set.reshape(self._D,self._hidden_layer_size).T #print beta_set.shape plt.figure(9) sys.stdout.write('Writing visualizations of receptive field to disk...') sys.stdout.flush() for i, beta in enumerate(beta_set): d = beta.reshape(28, 28) gray_map = plt.get_cmap('gray') plt.imshow(d, gray_map) plt.savefig('nn_receptive_fields/receptive_field'+str(i)+'.png', dpi=150) sys.stdout.write('.') sys.stdout.flush() plt.close() sys.stdout.write('.DONE - check directory nn_receptive_fields/ \n') sys.stdout.flush() def train(self,learning_rate=0.1,algorithm="msgd",**kwargs): """ Do some nifty training here :) """ e = self.cost_function(self._betas,self._training_data,self._training_labels) print "Inital total cost: ", e if(algorithm=="msgd"): #There should be a more elegant way to do this self._traing_mini_sgd(learning_rate,**kwargs) else: raise Exception("Algorithm not yet implemented, check later ;)") loss = self.calculate_zero_one_loss() print "After training error: ", loss e = self.cost_function(self._betas,self._training_data,self._training_labels) print "Final total cost: ", e def make_1_of_c_encoding(self,labels): # get one of C encoding matrix for each entry (=row) and class (=column) Y = np.zeros(shape=(labels.shape[0],self._output_size)) #labels = self._training_labels[start:end] for row, label in enumerate(labels): Y[row, label] = 1 return Y def cost_function(self,W,X,labels,reg=0.00001): """ reg: regularization term No weight decay term - lets leave it for later """ outputs = self._forward_prop(W,X,sigmoid)[-1] #take the last layer out sample_size = X.shape[0] y = self.make_1_of_c_encoding(labels) e1 = -np.sum(np.log(outputs)*y, axis=1) #error = e1.sum(axis=1) error = e1.sum()/sample_size + 0.5*reg*(np.square(W)).sum() return error def _back_prop(self,W,X,labels,f=sigmoid,fprime=sigmoid_prime,lam=0.000001): """ Calculate the partial derivates of the cost function using backpropagation. Using a closure,can be used with more advanced methods of optimization lam: regularization term / weight decay """ Wl1,bl1,Wl2,bl2 = self._extract_weights(W) layers_outputs = self._forward_prop(W,X,f) y = self.make_1_of_c_encoding(labels) num_samples = X.shape[0] # layers_outputs[-1].shape[0] # Dot product return Numsamples (N) x Outputs (No CLasses) # Y is NxNo Clases # Layers output to # small_delta_nl = NxNo_Outputs big_delta = np.zeros(Wl2.size + bl2.size + Wl1.size + bl1.size) big_delta_wl1, big_delta_bl1, big_delta_wl2, big_delta_bl2 = self._extract_weights(big_delta) dE_dy = layers_outputs[-1] - y big_delta_bl2 = dE_dy.sum(axis=0) dE_dhl = dE_dy.dot(Wl2.T) small_delta_hl = dE_dhl*fprime(layers_outputs[-2]) big_delta_bl1 = small_delta_hl.sum(axis=0) big_delta_wl2 = np.dot(layers_outputs[-2].T,dE_dy) big_delta_wl1 = np.dot(X.T,small_delta_hl) big_delta_wl2 = np.true_divide(big_delta_wl2,num_samples) + lam*Wl2*2 big_delta_bl2 = np.true_divide(big_delta_bl2,num_samples) big_delta_wl1 = np.true_divide(big_delta_wl1,num_samples) + lam*Wl1*2 big_delta_bl1 = np.true_divide(big_delta_bl1,num_samples) #return big_delta return np.concatenate([big_delta_wl1.ravel(), big_delta_bl1, big_delta_wl2.ravel(), big_delta_bl2])
random_line_split
neuralnet.py
umsamples (N) x Outputs (No CLasses) # an array containing the size of the output of all of the laye of the neural net """ # Hidden layer DxHLS weights_L1,bias_L1,weights_L2,bias_L2 = self._extract_weights(W) # Output layer HLSxOUT # A_2 = N x HLS A_2 = transfer_func(np.dot(X,weights_L1) + bias_L1 ) # A_3 = N x Outputs - softmax A_3 = self.softmax(weights_L2,A_2,bias_L2) # output layer return [A_2,A_3] def __init__(self,training_set,testing_set,validation_set,no_neurons=300,transfer_func=None,optimization_func=None): """ yup """ self._transfer_func = transfer_func self._training_data = training_set[0] self._training_labels = training_set[1] self._testing_data = testing_set[0] self._testing_labels = testing_set[1] self._validation_data = validation_set[0] self._validation_labels = validation_set[1] self._hidden_layer_size = no_neurons self._weights_filename = "nn_receptive_fields.dump" #how mauch data do we have for training? self._N = self._training_data.shape[0] #Dimension of the data (basically, the number of inputs in the input layer) self._D = self._training_data.shape[1] # We assume that the labels are encoded in an array of Nx1 wich each entry an integer # defining the class starting at 0 #number of classes or basically, how many outputs are we going to have. self._output_size = max(self._training_labels) + 1 #initialize the weights for the layers: - we are going to work with one 1 hidden layer #layer 1: input * number neuros in the hidden layer + hidde_layer_size for the biases # first layer network_weight_no = self._D * self._hidden_layer_size + self._hidden_layer_size # second layer network_weight_no += self._hidden_layer_size * self._output_size + self._output_size self._betas = np.random.normal(0, 0.1, network_weight_no) #layer 2: hidden layer * no_classes + no_classes for the biases def _traing_mini_sgd(self,learning_rate=0.2,batch_number=10,epochs=1,rms_incr=0.9,rms_dcr=0.1): """ Training miniSGD """ self._batch_number = batch_number self._batch_size = self._N / self._batch_number self._epochs = epochs self._learning_rate = learning_rate print "Training using MiniBatch SGD with Hidden layer size = {3} :: learning_rate = {0} :: Batches = {1} :: Epochs = {2}".format(learning_rate,batch_number, epochs,self._hidden_layer_size) # Erro reporting: self.error_testing = np.zeros(self._epochs*self._batch_number) self.error_validation = np.zeros(self._epochs*self._batch_number) self.error_training = np.zeros(self._epochs*self._batch_number) self.cost_output= np.zeros(self._epochs*self._batch_number) patience = 5000 # look as this many examples regardless patience_increase = 2 # wait this much longer when a new best is found improvement_threshold = 0.995 # a relative improvement of this much is # considered significant validation_frequency = min(self._batch_number, patience/2) # go through this many # minibatches before checking the network # on the validation set; in this case we # check every epoch #Mean Square weight for rmsprop means_sqrt_w = np.zeros(self._betas.size) early_stopping=0 done_looping = False for epoch in range(self._epochs): #early stopping if(done_looping): break for batch in range(self._batch_number): print "Trainining: Epoch {0}/{1} - Batch {2}/{3}".format(epoch+1,self._epochs,batch+1,self._batch_number) start = self._batch_size*batch end = self._batch_size*batch + self._batch_size Xs = self._training_data[start:end,:] Ls = self._training_labels[start:end] delta_ws = self._back_prop(self._betas,Xs,Ls) means_sqrt_w = rms_incr*means_sqrt_w + rms_dcr*(delta_ws**2) rmsdelta = np.divide(delta_ws,np.sqrt(means_sqrt_w)) self._betas = self._betas - self._learning_rate*rmsdelta iter = (epoch - 1) * self._batch_number + batch if(early_stopping > 0): if (iter + 1) % validation_frequency == 0: this_validation_loss = self.calculate_zero_one_loss("validation") if this_validation_loss < best_validation_loss: # improve patience if loss improvement is good enough if this_validation_loss < best_validation_loss * improvement_threshold: patience = max(patience, iter * patience_increase) best_params = np.copy(self._betas) best_validation_loss = this_validation_loss if patience <= iter: done_looping = True break # for the graph self.error_testing[epoch*self._batch_number + batch] = self.calculate_zero_one_loss() print "Error in the testing datatest: {0}%".format(self.error_testing[epoch*self._batch_number + batch]*100) self.error_validation[epoch*self._batch_number + batch] = self.calculate_zero_one_loss("validation") self.error_training[epoch*self._batch_number + batch] = self.calculate_zero_one_loss("training") self.cost_output[epoch*self._batch_number + batch] = self.cost_function(self._betas, self._training_data, self._training_labels) # after traing dump the weights self.dump_weights() def dump_weights(self): #dumping only the hidden layer weights
def visualize_receptive_fields(self): import sys beta_set = cPickle.load(open(self._weights_filename,"r")) beta_set = beta_set.reshape(self._D,self._hidden_layer_size).T #print beta_set.shape plt.figure(9) sys.stdout.write('Writing visualizations of receptive field to disk...') sys.stdout.flush() for i, beta in enumerate(beta_set): d = beta.reshape(28, 28) gray_map = plt.get_cmap('gray') plt.imshow(d, gray_map) plt.savefig('nn_receptive_fields/receptive_field'+str(i)+'.png', dpi=150) sys.stdout.write('.') sys.stdout.flush() plt.close() sys.stdout.write('.DONE - check directory nn_receptive_fields/ \n') sys.stdout.flush() def train(self,learning_rate=0.1,algorithm="msgd",**kwargs): """ Do some nifty training here :) """ e = self.cost_function(self._betas,self._training_data,self._training_labels) print "Inital total cost: ", e if(algorithm=="msgd"): #There should be a more elegant way to do this self._traing_mini_sgd(learning_rate,**kwargs) else: raise Exception("Algorithm not yet implemented, check later ;)") loss = self.calculate_zero_one_loss() print "After training error: ", loss e = self.cost_function(self._betas,self._training_data,self._training_labels) print "Final total cost: ", e def make_1_of_c_encoding(self,labels): # get one of C encoding matrix for each entry (=row) and class (=column) Y = np.zeros(shape=(labels.shape[0],self._output_size)) #labels = self._training_labels[start:end] for row, label in enumerate(labels): Y[row, label] = 1 return Y def cost_function(self,W,X,labels,reg=0.00001): """ reg: regularization term No weight decay term - lets leave it for later """ outputs = self._forward_prop(W,X,sigmoid)[-1] #take the last layer out sample_size = X.shape[0] y = self.make_1_of_c_encoding(labels) e1 = -np.sum(np.log(outputs)*y, axis=1) #error = e1.sum(axis=1) error = e1.sum()/sample_size + 0.5*reg*(
f = open(self._weights_filename, 'w') cPickle.dump(self._betas[0:self._D*self._hidden_layer_size],f) f.close()
identifier_body
neuralnet.py
(self,W): """ This will extract the weights from we big W array. in a 1-hidden layer network. this can be easily generalized. """ wl1_size = self._D*self._hidden_layer_size bl1_size = self._hidden_layer_size wl2_size = self._hidden_layer_size*self._output_size bl2_size = self._output_size weights_L1 = W[0:wl1_size].reshape((self._D,self._hidden_layer_size)) bias_L1 = W[wl1_size:wl1_size+bl1_size] start_l2 = wl1_size+bl1_size weights_L2 = W[start_l2: start_l2 + wl2_size].reshape((self._hidden_layer_size,self._output_size)) bias_L2 = W[start_l2 + wl2_size : start_l2 + wl2_size + bl2_size] return weights_L1,bias_L1,weights_L2,bias_L2 def _forward_prop(self,W,X,transfer_func=sigmoid): """ Return the output of the net a Numsamples (N) x Outputs (No CLasses) # an array containing the size of the output of all of the laye of the neural net """ # Hidden layer DxHLS weights_L1,bias_L1,weights_L2,bias_L2 = self._extract_weights(W) # Output layer HLSxOUT # A_2 = N x HLS A_2 = transfer_func(np.dot(X,weights_L1) + bias_L1 ) # A_3 = N x Outputs - softmax A_3 = self.softmax(weights_L2,A_2,bias_L2) # output layer return [A_2,A_3] def __init__(self,training_set,testing_set,validation_set,no_neurons=300,transfer_func=None,optimization_func=None): """ yup """ self._transfer_func = transfer_func self._training_data = training_set[0] self._training_labels = training_set[1] self._testing_data = testing_set[0] self._testing_labels = testing_set[1] self._validation_data = validation_set[0] self._validation_labels = validation_set[1] self._hidden_layer_size = no_neurons self._weights_filename = "nn_receptive_fields.dump" #how mauch data do we have for training? self._N = self._training_data.shape[0] #Dimension of the data (basically, the number of inputs in the input layer) self._D = self._training_data.shape[1] # We assume that the labels are encoded in an array of Nx1 wich each entry an integer # defining the class starting at 0 #number of classes or basically, how many outputs are we going to have. self._output_size = max(self._training_labels) + 1 #initialize the weights for the layers: - we are going to work with one 1 hidden layer #layer 1: input * number neuros in the hidden layer + hidde_layer_size for the biases # first layer network_weight_no = self._D * self._hidden_layer_size + self._hidden_layer_size # second layer network_weight_no += self._hidden_layer_size * self._output_size + self._output_size self._betas = np.random.normal(0, 0.1, network_weight_no) #layer 2: hidden layer * no_classes + no_classes for the biases def _traing_mini_sgd(self,learning_rate=0.2,batch_number=10,epochs=1,rms_incr=0.9,rms_dcr=0.1): """ Training miniSGD """ self._batch_number = batch_number self._batch_size = self._N / self._batch_number self._epochs = epochs self._learning_rate = learning_rate print "Training using MiniBatch SGD with Hidden layer size = {3} :: learning_rate = {0} :: Batches = {1} :: Epochs = {2}".format(learning_rate,batch_number, epochs,self._hidden_layer_size) # Erro reporting: self.error_testing = np.zeros(self._epochs*self._batch_number) self.error_validation = np.zeros(self._epochs*self._batch_number) self.error_training = np.zeros(self._epochs*self._batch_number) self.cost_output= np.zeros(self._epochs*self._batch_number) patience = 5000 # look as this many examples regardless patience_increase = 2 # wait this much longer when a new best is found improvement_threshold = 0.995 # a relative improvement of this much is # considered significant validation_frequency = min(self._batch_number, patience/2) # go through this many # minibatches before checking the network # on the validation set; in this case we # check every epoch #Mean Square weight for rmsprop means_sqrt_w = np.zeros(self._betas.size) early_stopping=0 done_looping = False for epoch in range(self._epochs): #early stopping if(done_looping): break for batch in range(self._batch_number): print "Trainining: Epoch {0}/{1} - Batch {2}/{3}".format(epoch+1,self._epochs,batch+1,self._batch_number) start = self._batch_size*batch end = self._batch_size*batch + self._batch_size Xs = self._training_data[start:end,:] Ls = self._training_labels[start:end] delta_ws = self._back_prop(self._betas,Xs,Ls) means_sqrt_w = rms_incr*means_sqrt_w + rms_dcr*(delta_ws**2) rmsdelta = np.divide(delta_ws,np.sqrt(means_sqrt_w)) self._betas = self._betas - self._learning_rate*rmsdelta iter = (epoch - 1) * self._batch_number + batch if(early_stopping > 0): if (iter + 1) % validation_frequency == 0: this_validation_loss = self.calculate_zero_one_loss("validation") if this_validation_loss < best_validation_loss: # improve patience if loss improvement is good enough if this_validation_loss < best_validation_loss * improvement_threshold: patience = max(patience, iter * patience_increase) best_params = np.copy(self._betas) best_validation_loss = this_validation_loss if patience <= iter: done_looping = True break # for the graph self.error_testing[epoch*self._batch_number + batch] = self.calculate_zero_one_loss() print "Error in the testing datatest: {0}%".format(self.error_testing[epoch*self._batch_number + batch]*100) self.error_validation[epoch*self._batch_number + batch] = self.calculate_zero_one_loss("validation") self.error_training[epoch*self._batch_number + batch] = self.calculate_zero_one_loss("training") self.cost_output[epoch*self._batch_number + batch] = self.cost_function(self._betas, self._training_data, self._training_labels) # after traing dump the weights self.dump_weights() def dump_weights(self): #dumping only the hidden layer weights f = open(self._weights_filename, 'w') cPickle.dump(self._betas[0:self._D*self._hidden_layer_size],f) f.close() def visualize_receptive_fields(self): import sys beta_set = cPickle.load(open(self._weights_filename,"r")) beta_set = beta_set.reshape(self._D,self._hidden_layer_size).T #print beta_set.shape plt.figure(9) sys.stdout.write('Writing visualizations of receptive field to disk...') sys.stdout.flush() for i, beta in enumerate(beta_set): d = beta.reshape(28, 28) gray_map = plt.get_cmap('gray') plt.imshow(d, gray_map) plt.savefig('nn_receptive_fields/receptive_field'+str(i)+'.png', dpi=150) sys.stdout.write('.') sys.stdout.flush() plt.close() sys.stdout.write('.DONE - check directory nn_receptive_fields/ \n') sys.stdout.flush() def train(self,learning_rate=0.1,algorithm="msgd",**kwargs): """ Do some nifty training here :) """ e = self.cost_function(self._betas,self._training_data,self._training_labels) print "Inital total cost: ", e if(algorithm=="msgd"): #There should be a more elegant way to do this self._traing_mini_sgd(learning_rate,**kwargs) else: raise Exception("Algorithm not yet implemented, check later ;)") loss = self.calculate_zero_one_loss() print "After training error
_extract_weights
identifier_name
fiber.rs
. /// /// This kind of waiting is more convenient than going into a loop and periodically checking the status; /// however, it works only if the fiber was created with [fiber.new()](#method.new) and was made joinable with /// [fiber.set_joinable()](#method.set_joinable). /// /// The fiber must not be detached (See also: [fiber.set_joinable()](#method.set_joinable)). /// /// Return: fiber function return code pub fn join(&self) -> i32 { unsafe { ffi::fiber_join(self.inner) } } /// Set fiber to be joinable (false by default). /// /// - `is_joinable` - status to set pub fn set_joinable(&mut self, is_joinable: bool) { unsafe { ffi::fiber_set_joinable(self.inner, is_joinable) } } /// Cancel a fiber. (set `FIBER_IS_CANCELLED` flag) /// /// Running and suspended fibers can be cancelled. After a fiber has been cancelled, attempts to operate on it will /// cause error: the fiber is dead. But a dead fiber can still report its id and status. /// Possible errors: cancel is not permitted for the specified fiber object. /// /// If target fiber's flag `FIBER_IS_CANCELLABLE` set, then it would be woken up (maybe prematurely). /// Then current fiber yields until the target fiber is dead (or is woken up by /// [fiber.wakeup()](#method.wakeup)). pub fn cancel(&mut self) { unsafe { ffi::fiber_cancel(self.inner) } } } /// Make it possible or not possible to wakeup the current /// fiber immediately when it's cancelled. /// /// - `is_cancellable` - status to set /// /// Returns previous state. pub fn set_cancellable(is_cancellable: bool) -> bool { unsafe { ffi::fiber_set_cancellable(is_cancellable) } } /// Check current fiber for cancellation (it must be checked manually). pub fn is_cancelled() -> bool { unsafe { ffi::fiber_is_cancelled() } } /// Put the current fiber to sleep for at least `time` seconds. /// /// Yield control to the scheduler and sleep for the specified number of seconds. /// Only the current fiber can be made to sleep. /// /// - `time` - time to sleep /// /// > **Note:** this is a cancellation point (See also: [is_cancelled()](fn.is_cancelled.html)) pub fn sleep(time: f64) { unsafe { ffi::fiber_sleep(time) } } /// Report loop begin time as double (cheap). pub fn time() -> f64 { unsafe { ffi::fiber_time() } } /// Report loop begin time as 64-bit int. pub fn time64() -> u64 { unsafe { ffi::fiber_time64() } } /// Report loop begin time as double (cheap). Uses monotonic clock. pub fn clock() -> f64 { unsafe { ffi::fiber_clock() } } /// Report loop begin time as 64-bit int. Uses monotonic clock. pub fn clock64() -> u64 { unsafe { ffi::fiber_clock64() } } /// Yield control to the scheduler. /// /// Return control to another fiber and wait until it'll be woken. Equivalent to `fiber.sleep(0)`. /// /// See also: [Fiber::wakeup()](struct.Fiber.html#method.wakeup) pub fn fiber_yield() { unsafe { ffi::fiber_yield() } } /// Reschedule fiber to end of event loop cycle. pub fn reschedule() { unsafe { ffi::fiber_reschedule() } } /// Fiber attributes container pub struct FiberAttr { inner: *mut ffi::FiberAttr, } impl FiberAttr { /// Create a new fiber attribute container and initialize it with default parameters. /// Can be used for many fibers creation, corresponding fibers will not take ownership. /// /// This is safe to drop `FiberAttr` value when fibers created with this attribute still exist. pub fn new() -> Self { FiberAttr { inner: unsafe { ffi::fiber_attr_new() }, } } /// Get stack size from the fiber attribute. /// /// Returns: stack size pub fn stack_size(&self) -> usize { unsafe { ffi::fiber_attr_getstacksize(self.inner) } } ///Set stack size for the fiber attribute. /// /// - `stack_size` - stack size for new fibers pub fn set_stack_size(&mut self, stack_size: usize) -> Result<(), Error> { if unsafe { ffi::fiber_attr_setstacksize(self.inner, stack_size) } < 0 { Err(TarantoolError::last().into()) } else { Ok(()) } } } impl Drop for FiberAttr { fn drop(&mut self) { unsafe { ffi::fiber_attr_delete(self.inner) } } } /// Conditional variable for cooperative multitasking (fibers). /// /// A cond (short for "condition variable") is a synchronization primitive /// that allow fibers to yield until some predicate is satisfied. Fiber /// conditions have two basic operations - `wait()` and `signal()`. [cond.wait()](#method.wait) /// suspends execution of fiber (i.e. yields) until [cond.signal()](#method.signal) is called. /// /// Example: /// /// ```rust /// use tarantool::fiber::Cond; /// let cond = fiber.cond(); /// cond.wait(); /// ``` /// /// The job will hang because [cond.wait()](#method.wait) – will go to sleep until the condition variable changes. /// /// ```rust /// // Call from another fiber: /// cond.signal(); /// ``` /// /// The waiting stopped, and the [cond.wait()](#method.wait) function returned true. /// /// This example depended on the use of a global conditional variable with the arbitrary name cond. /// In real life, programmers would make sure to use different conditional variable names for different applications. /// /// Unlike `pthread_cond`, [Cond]() doesn't require mutex/latch wrapping. pub struct Cond { inner: *mut ffi::FiberCond, } /// - call [Cond::new()](#method.new) to create a named condition variable, which will be called `cond` for examples in this section. /// - call [cond.wait()](#method.wait) to make a fiber wait for a signal via a condition variable. /// - call [cond.signal()](#method.signal) to send a signal to wake up a single fiber that has executed [cond.wait()](#method.wait). /// - call [cond.broadcast()](#method.broadcast) to send a signal to all fibers that have executed [cond.wait()](#method.wait). impl Cond { /// Instantiate a new fiber cond object. pub fn new() -> Self { Cond { inner: unsafe { ffi::fiber_cond_new() }, } } /// Wake one fiber waiting for the cond. /// Does nothing if no one is waiting. Does not yield. pub fn signal(&self) { unsafe { ffi::fiber_cond_signal(self.inner) } } /// Wake up all fibers waiting for the cond. /// Does not yield. pub fn broadcast(&self) { unsafe { ffi::fiber_cond_broadcast(self.inner) } } /// Suspend the execution of the current fiber (i.e. yield) until [signal()](#method.signal) is called. /// /// Like pthread_cond, FiberCond can issue spurious wake ups caused by explicit /// [Fiber::wakeup()](struct.Fiber.html#method.wakeup) or [Fiber::cancel()](struct.Fiber.html#method.cancel) /// calls. It is highly recommended to wrap calls to this function into a loop /// and check an actual predicate and `fiber_testcancel()` on every iteration. /// /// - `timeout` - timeout in seconds /// /// Returns: /// - `true` on [signal()](#method.signal) call or a spurious wake up. /// - `false` on timeout, diag is set to `TimedOut` pub fn wait_timeout(&self, timeout: Duration) -> bool { !(unsafe { ffi::fiber_cond_wait_timeout(self.inner, timeout.as_secs_f64()) } < 0) } /// Shortcut for [wait_timeout()](#method.wait_timeout). pub fn wait(&self) -> bool { !(unsafe { ffi::fiber_cond_wait(self.inner) } < 0) } } impl Drop for Cond { fn drop(&mut self) { unsafe { ffi::fiber_cond_delete(self.inner) } } } /// A lock for cooperative multitasking environment pub struct Latch { inner: *mut ffi::Latch, } impl Latch { /// Allocate and initialize the new latch. pub fn new() -> Self { Latch { inner: unsafe { ffi::box_latch_new() }, } } /// Lock a latch. Waits indefinitely until the current fiber can gain access to the latch. pub fn lock(&self) -> LatchGuard { unsafe { ffi::box_latch_lock(self.inner) }; LatchGuard { latch_inner: self.inner, } } /// Try to lock a latch. Return immediately if the latch is locked. /// /// Returns: /// - `Some` - success
/// - `None` - the latch is locked.
random_line_split
fiber.rs
fiber.cancel()](struct.Fiber.html#method.cancel) sends an asynchronous wakeup event to the fiber, /// and [is_cancelled()](fn.is_cancelled.html) is checked whenever such a wakeup event occurs. /// /// Example: /// ```rust /// use tarantool::fiber::Fiber; /// let mut fiber = Fiber::new("test_fiber", &mut |_| { /// println!("I'm a fiber"); /// 0 /// }); /// fiber.start(()); /// println!("Fiber started") /// ``` /// /// ```text /// I'm a fiber /// Fiber started /// ``` pub struct Fiber<'a, T: 'a> { inner: *mut ffi::Fiber, callback: *mut c_void, phantom: PhantomData<&'a T>, } impl<'a, T> Fiber<'a, T> { /// Create a new fiber. /// /// Takes a fiber from fiber cache, if it's not empty. Can fail only if there is not enough memory for /// the fiber structure or fiber stack. /// /// The created fiber automatically returns itself to the fiber cache when its `main` function /// completes. The initial fiber state is **suspended**. /// /// Ordinarily [Fiber::new()](#method.new) is used in conjunction with [fiber.set_joinable()](#method.set_joinable) /// and [fiber.join()](#method.join) /// /// - `name` - string with fiber name /// - `callback` - function for run inside fiber /// /// See also: [fiber.start()](#method.start) pub fn new<F>(name: &str, callback: &mut F) -> Self where F: FnMut(Box<T>) -> i32, { let (callback_ptr, trampoline) = unsafe { unpack_callback(callback) }; Self { inner: unsafe { ffi::fiber_new(CString::new(name).unwrap().into_raw(), trampoline) }, callback: callback_ptr, phantom: PhantomData, } } /// Create a new fiber with defined attributes. /// /// Can fail only if there is not enough memory for the fiber structure or fiber stack. /// /// The created fiber automatically returns itself to the fiber cache if has default stack size /// when its `main` function completes. The initial fiber state is **suspended**. /// /// - `name` - string with fiber name /// - `fiber_attr` - fiber attributes /// - `callback` - function for run inside fiber /// /// See also: [fiber.start()](#method.start) pub fn new_with_attr<F>(name: &str, attr: &FiberAttr, callback: &mut F) -> Self where F: FnMut(Box<T>) -> i32, { let (callback_ptr, trampoline) = unsafe { unpack_callback(callback) }; Self { inner: unsafe { ffi::fiber_new_ex( CString::new(name).unwrap().into_raw(), attr.inner, trampoline, ) }, callback: callback_ptr, phantom: PhantomData, } } /// Start execution of created fiber. /// /// - `arg` - argument to start the fiber with /// /// See also: [fiber.new()](#method.new) pub fn start(&mut self, arg: T) { unsafe { ffi::fiber_start(self.inner, self.callback, Box::into_raw(Box::<T>::new(arg))); } } /// Interrupt a synchronous wait of a fiber. pub fn wakeup(
{ unsafe { ffi::fiber_wakeup(self.inner) } } /// Wait until the fiber is dead and then move its execution status to the caller. /// /// “Join” a joinable fiber. That is, let the fiber’s function run and wait until the fiber’s status is **dead** /// (normally a status becomes **dead** when the function execution finishes). Joining will cause a yield, /// therefore, if the fiber is currently in a **suspended** state, execution of its fiber function will resume. /// /// This kind of waiting is more convenient than going into a loop and periodically checking the status; /// however, it works only if the fiber was created with [fiber.new()](#method.new) and was made joinable with /// [fiber.set_joinable()](#method.set_joinable). /// /// The fiber must not be detached (See also: [fiber.set_joinable()](#method.set_joinable)). /// /// Return: fiber function return code pub fn join(&self) -> i32 { unsafe { ffi::fiber_join(self.inner) } } /// Set fiber to be joinable (false by default). /// /// - `is_joinable` - status to set pub fn set_joinable(&mut self, is_joinable: bool) { unsafe { ffi::fiber_set_joinable(self.inner, is_joinable) } } /// Cancel a fiber. (set `FIBER_IS_CANCELLED` flag) /// /// Running and suspended fibers can be cancelled. After a fiber has been cancelled, attempts to operate on it will /// cause error: the fiber is dead. But a dead fiber can still report its id and status. /// Possible errors: cancel is not permitted for the specified fiber object. /// /// If target fiber's flag `FIBER_IS_CANCELLABLE` set, then it would be woken up (maybe prematurely). /// Then current fiber yields until the target fiber is dead (or is woken up by /// [fiber.wakeup()](#method.wakeup)). pub fn cancel(&mut self) { unsafe { ffi::fiber_cancel(self.inner) } } } /// Make it possible or not possible to wakeup the current /// fiber immediately when it's cancelled. /// /// - `is_cancellable` - status to set /// /// Returns previous state. pub fn set_cancellable(is_cancellable: bool) -> bool { unsafe { ffi::fiber_set_cancellable(is_cancellable) } } /// Check current fiber for cancellation (it must be checked manually). pub fn is_cancelled() -> bool { unsafe { ffi::fiber_is_cancelled() } } /// Put the current fiber to sleep for at least `time` seconds. /// /// Yield control to the scheduler and sleep for the specified number of seconds. /// Only the current fiber can be made to sleep. /// /// - `time` - time to sleep /// /// > **Note:** this is a cancellation point (See also: [is_cancelled()](fn.is_cancelled.html)) pub fn sleep(time: f64) { unsafe { ffi::fiber_sleep(time) } } /// Report loop begin time as double (cheap). pub fn time() -> f64 { unsafe { ffi::fiber_time() } } /// Report loop begin time as 64-bit int. pub fn time64() -> u64 { unsafe { ffi::fiber_time64() } } /// Report loop begin time as double (cheap). Uses monotonic clock. pub fn clock() -> f64 { unsafe { ffi::fiber_clock() } } /// Report loop begin time as 64-bit int. Uses monotonic clock. pub fn clock64() -> u64 { unsafe { ffi::fiber_clock64() } } /// Yield control to the scheduler. /// /// Return control to another fiber and wait until it'll be woken. Equivalent to `fiber.sleep(0)`. /// /// See also: [Fiber::wakeup()](struct.Fiber.html#method.wakeup) pub fn fiber_yield() { unsafe { ffi::fiber_yield() } } /// Reschedule fiber to end of event loop cycle. pub fn reschedule() { unsafe { ffi::fiber_reschedule() } } /// Fiber attributes container pub struct FiberAttr { inner: *mut ffi::FiberAttr, } impl FiberAttr { /// Create a new fiber attribute container and initialize it with default parameters. /// Can be used for many fibers creation, corresponding fibers will not take ownership. /// /// This is safe to drop `FiberAttr` value when fibers created with this attribute still exist. pub fn new() -> Self { FiberAttr { inner: unsafe { ffi::fiber_attr_new() }, } } /// Get stack size from the fiber attribute. /// /// Returns: stack size pub fn stack_size(&self) -> usize { unsafe { ffi::fiber_attr_getstacksize(self.inner) } } ///Set stack size for the fiber attribute. /// /// - `stack_size` - stack size for new fibers pub fn set_stack_size(&mut self, stack_size: usize) -> Result<(), Error> { if unsafe { ffi::fiber_attr_setstacksize(self.inner, stack_size) } < 0 { Err(TarantoolError::last().into()) } else { Ok(()) } } } impl Drop for FiberAttr { fn drop(&mut self) { unsafe { ffi::fiber_attr_delete(self.inner) } } } /// Conditional variable for cooperative multitasking (fibers). /// /// A cond (short for "condition variable") is a synchronization primitive /// that allow fibers to yield until some predicate is satisfied. Fiber /// conditions have two basic operations - `wait()` and `signal()`. [cond.wait()](#method.wait
&self)
identifier_name
fiber.rs
able). /// /// The fiber must not be detached (See also: [fiber.set_joinable()](#method.set_joinable)). /// /// Return: fiber function return code pub fn join(&self) -> i32 { unsafe { ffi::fiber_join(self.inner) } } /// Set fiber to be joinable (false by default). /// /// - `is_joinable` - status to set pub fn set_joinable(&mut self, is_joinable: bool) { unsafe { ffi::fiber_set_joinable(self.inner, is_joinable) } } /// Cancel a fiber. (set `FIBER_IS_CANCELLED` flag) /// /// Running and suspended fibers can be cancelled. After a fiber has been cancelled, attempts to operate on it will /// cause error: the fiber is dead. But a dead fiber can still report its id and status. /// Possible errors: cancel is not permitted for the specified fiber object. /// /// If target fiber's flag `FIBER_IS_CANCELLABLE` set, then it would be woken up (maybe prematurely). /// Then current fiber yields until the target fiber is dead (or is woken up by /// [fiber.wakeup()](#method.wakeup)). pub fn cancel(&mut self) { unsafe { ffi::fiber_cancel(self.inner) } } } /// Make it possible or not possible to wakeup the current /// fiber immediately when it's cancelled. /// /// - `is_cancellable` - status to set /// /// Returns previous state. pub fn set_cancellable(is_cancellable: bool) -> bool { unsafe { ffi::fiber_set_cancellable(is_cancellable) } } /// Check current fiber for cancellation (it must be checked manually). pub fn is_cancelled() -> bool { unsafe { ffi::fiber_is_cancelled() } } /// Put the current fiber to sleep for at least `time` seconds. /// /// Yield control to the scheduler and sleep for the specified number of seconds. /// Only the current fiber can be made to sleep. /// /// - `time` - time to sleep /// /// > **Note:** this is a cancellation point (See also: [is_cancelled()](fn.is_cancelled.html)) pub fn sleep(time: f64) { unsafe { ffi::fiber_sleep(time) } } /// Report loop begin time as double (cheap). pub fn time() -> f64 { unsafe { ffi::fiber_time() } } /// Report loop begin time as 64-bit int. pub fn time64() -> u64 { unsafe { ffi::fiber_time64() } } /// Report loop begin time as double (cheap). Uses monotonic clock. pub fn clock() -> f64 { unsafe { ffi::fiber_clock() } } /// Report loop begin time as 64-bit int. Uses monotonic clock. pub fn clock64() -> u64 { unsafe { ffi::fiber_clock64() } } /// Yield control to the scheduler. /// /// Return control to another fiber and wait until it'll be woken. Equivalent to `fiber.sleep(0)`. /// /// See also: [Fiber::wakeup()](struct.Fiber.html#method.wakeup) pub fn fiber_yield() { unsafe { ffi::fiber_yield() } } /// Reschedule fiber to end of event loop cycle. pub fn reschedule() { unsafe { ffi::fiber_reschedule() } } /// Fiber attributes container pub struct FiberAttr { inner: *mut ffi::FiberAttr, } impl FiberAttr { /// Create a new fiber attribute container and initialize it with default parameters. /// Can be used for many fibers creation, corresponding fibers will not take ownership. /// /// This is safe to drop `FiberAttr` value when fibers created with this attribute still exist. pub fn new() -> Self { FiberAttr { inner: unsafe { ffi::fiber_attr_new() }, } } /// Get stack size from the fiber attribute. /// /// Returns: stack size pub fn stack_size(&self) -> usize { unsafe { ffi::fiber_attr_getstacksize(self.inner) } } ///Set stack size for the fiber attribute. /// /// - `stack_size` - stack size for new fibers pub fn set_stack_size(&mut self, stack_size: usize) -> Result<(), Error> { if unsafe { ffi::fiber_attr_setstacksize(self.inner, stack_size) } < 0 { Err(TarantoolError::last().into()) } else { Ok(()) } } } impl Drop for FiberAttr { fn drop(&mut self) { unsafe { ffi::fiber_attr_delete(self.inner) } } } /// Conditional variable for cooperative multitasking (fibers). /// /// A cond (short for "condition variable") is a synchronization primitive /// that allow fibers to yield until some predicate is satisfied. Fiber /// conditions have two basic operations - `wait()` and `signal()`. [cond.wait()](#method.wait) /// suspends execution of fiber (i.e. yields) until [cond.signal()](#method.signal) is called. /// /// Example: /// /// ```rust /// use tarantool::fiber::Cond; /// let cond = fiber.cond(); /// cond.wait(); /// ``` /// /// The job will hang because [cond.wait()](#method.wait) – will go to sleep until the condition variable changes. /// /// ```rust /// // Call from another fiber: /// cond.signal(); /// ``` /// /// The waiting stopped, and the [cond.wait()](#method.wait) function returned true. /// /// This example depended on the use of a global conditional variable with the arbitrary name cond. /// In real life, programmers would make sure to use different conditional variable names for different applications. /// /// Unlike `pthread_cond`, [Cond]() doesn't require mutex/latch wrapping. pub struct Cond { inner: *mut ffi::FiberCond, } /// - call [Cond::new()](#method.new) to create a named condition variable, which will be called `cond` for examples in this section. /// - call [cond.wait()](#method.wait) to make a fiber wait for a signal via a condition variable. /// - call [cond.signal()](#method.signal) to send a signal to wake up a single fiber that has executed [cond.wait()](#method.wait). /// - call [cond.broadcast()](#method.broadcast) to send a signal to all fibers that have executed [cond.wait()](#method.wait). impl Cond { /// Instantiate a new fiber cond object. pub fn new() -> Self { Cond { inner: unsafe { ffi::fiber_cond_new() }, } } /// Wake one fiber waiting for the cond. /// Does nothing if no one is waiting. Does not yield. pub fn signal(&self) { unsafe { ffi::fiber_cond_signal(self.inner) } } /// Wake up all fibers waiting for the cond. /// Does not yield. pub fn broadcast(&self) { unsafe { ffi::fiber_cond_broadcast(self.inner) } } /// Suspend the execution of the current fiber (i.e. yield) until [signal()](#method.signal) is called. /// /// Like pthread_cond, FiberCond can issue spurious wake ups caused by explicit /// [Fiber::wakeup()](struct.Fiber.html#method.wakeup) or [Fiber::cancel()](struct.Fiber.html#method.cancel) /// calls. It is highly recommended to wrap calls to this function into a loop /// and check an actual predicate and `fiber_testcancel()` on every iteration. /// /// - `timeout` - timeout in seconds /// /// Returns: /// - `true` on [signal()](#method.signal) call or a spurious wake up. /// - `false` on timeout, diag is set to `TimedOut` pub fn wait_timeout(&self, timeout: Duration) -> bool { !(unsafe { ffi::fiber_cond_wait_timeout(self.inner, timeout.as_secs_f64()) } < 0) } /// Shortcut for [wait_timeout()](#method.wait_timeout). pub fn wait(&self) -> bool { !(unsafe { ffi::fiber_cond_wait(self.inner) } < 0) } } impl Drop for Cond { fn drop(&mut self) { unsafe { ffi::fiber_cond_delete(self.inner) } } } /// A lock for cooperative multitasking environment pub struct Latch { inner: *mut ffi::Latch, } impl Latch { /// Allocate and initialize the new latch. pub fn new() -> Self { Latch { inner: unsafe { ffi::box_latch_new() }, } } /// Lock a latch. Waits indefinitely until the current fiber can gain access to the latch. pub fn lock(&self) -> LatchGuard { unsafe { ffi::box_latch_lock(self.inner) }; LatchGuard { latch_inner: self.inner, } } /// Try to lock a latch. Return immediately if the latch is locked. /// /// Returns: /// - `Some` - success /// - `None` - the latch is locked. pub fn try_lock(&self) -> Option<LatchGuard> { if unsafe { ffi::box_latch_trylock(self.inner) } == 0 { Some(LatchGuard { latch_inner: self.inner, }) } else { Non
e } } } impl Dr
conditional_block
fiber.rs
} /// Create a new fiber with defined attributes. /// /// Can fail only if there is not enough memory for the fiber structure or fiber stack. /// /// The created fiber automatically returns itself to the fiber cache if has default stack size /// when its `main` function completes. The initial fiber state is **suspended**. /// /// - `name` - string with fiber name /// - `fiber_attr` - fiber attributes /// - `callback` - function for run inside fiber /// /// See also: [fiber.start()](#method.start) pub fn new_with_attr<F>(name: &str, attr: &FiberAttr, callback: &mut F) -> Self where F: FnMut(Box<T>) -> i32, { let (callback_ptr, trampoline) = unsafe { unpack_callback(callback) }; Self { inner: unsafe { ffi::fiber_new_ex( CString::new(name).unwrap().into_raw(), attr.inner, trampoline, ) }, callback: callback_ptr, phantom: PhantomData, } } /// Start execution of created fiber. /// /// - `arg` - argument to start the fiber with /// /// See also: [fiber.new()](#method.new) pub fn start(&mut self, arg: T) { unsafe { ffi::fiber_start(self.inner, self.callback, Box::into_raw(Box::<T>::new(arg))); } } /// Interrupt a synchronous wait of a fiber. pub fn wakeup(&self) { unsafe { ffi::fiber_wakeup(self.inner) } } /// Wait until the fiber is dead and then move its execution status to the caller. /// /// “Join” a joinable fiber. That is, let the fiber’s function run and wait until the fiber’s status is **dead** /// (normally a status becomes **dead** when the function execution finishes). Joining will cause a yield, /// therefore, if the fiber is currently in a **suspended** state, execution of its fiber function will resume. /// /// This kind of waiting is more convenient than going into a loop and periodically checking the status; /// however, it works only if the fiber was created with [fiber.new()](#method.new) and was made joinable with /// [fiber.set_joinable()](#method.set_joinable). /// /// The fiber must not be detached (See also: [fiber.set_joinable()](#method.set_joinable)). /// /// Return: fiber function return code pub fn join(&self) -> i32 { unsafe { ffi::fiber_join(self.inner) } } /// Set fiber to be joinable (false by default). /// /// - `is_joinable` - status to set pub fn set_joinable(&mut self, is_joinable: bool) { unsafe { ffi::fiber_set_joinable(self.inner, is_joinable) } } /// Cancel a fiber. (set `FIBER_IS_CANCELLED` flag) /// /// Running and suspended fibers can be cancelled. After a fiber has been cancelled, attempts to operate on it will /// cause error: the fiber is dead. But a dead fiber can still report its id and status. /// Possible errors: cancel is not permitted for the specified fiber object. /// /// If target fiber's flag `FIBER_IS_CANCELLABLE` set, then it would be woken up (maybe prematurely). /// Then current fiber yields until the target fiber is dead (or is woken up by /// [fiber.wakeup()](#method.wakeup)). pub fn cancel(&mut self) { unsafe { ffi::fiber_cancel(self.inner) } } } /// Make it possible or not possible to wakeup the current /// fiber immediately when it's cancelled. /// /// - `is_cancellable` - status to set /// /// Returns previous state. pub fn set_cancellable(is_cancellable: bool) -> bool { unsafe { ffi::fiber_set_cancellable(is_cancellable) } } /// Check current fiber for cancellation (it must be checked manually). pub fn is_cancelled() -> bool { unsafe { ffi::fiber_is_cancelled() } } /// Put the current fiber to sleep for at least `time` seconds. /// /// Yield control to the scheduler and sleep for the specified number of seconds. /// Only the current fiber can be made to sleep. /// /// - `time` - time to sleep /// /// > **Note:** this is a cancellation point (See also: [is_cancelled()](fn.is_cancelled.html)) pub fn sleep(time: f64) { unsafe { ffi::fiber_sleep(time) } } /// Report loop begin time as double (cheap). pub fn time() -> f64 { unsafe { ffi::fiber_time() } } /// Report loop begin time as 64-bit int. pub fn time64() -> u64 { unsafe { ffi::fiber_time64() } } /// Report loop begin time as double (cheap). Uses monotonic clock. pub fn clock() -> f64 { unsafe { ffi::fiber_clock() } } /// Report loop begin time as 64-bit int. Uses monotonic clock. pub fn clock64() -> u64 { unsafe { ffi::fiber_clock64() } } /// Yield control to the scheduler. /// /// Return control to another fiber and wait until it'll be woken. Equivalent to `fiber.sleep(0)`. /// /// See also: [Fiber::wakeup()](struct.Fiber.html#method.wakeup) pub fn fiber_yield() { unsafe { ffi::fiber_yield() } } /// Reschedule fiber to end of event loop cycle. pub fn reschedule() { unsafe { ffi::fiber_reschedule() } } /// Fiber attributes container pub struct FiberAttr { inner: *mut ffi::FiberAttr, } impl FiberAttr { /// Create a new fiber attribute container and initialize it with default parameters. /// Can be used for many fibers creation, corresponding fibers will not take ownership. /// /// This is safe to drop `FiberAttr` value when fibers created with this attribute still exist. pub fn new() -> Self { FiberAttr { inner: unsafe { ffi::fiber_attr_new() }, } } /// Get stack size from the fiber attribute. /// /// Returns: stack size pub fn stack_size(&self) -> usize { unsafe { ffi::fiber_attr_getstacksize(self.inner) } } ///Set stack size for the fiber attribute. /// /// - `stack_size` - stack size for new fibers pub fn set_stack_size(&mut self, stack_size: usize) -> Result<(), Error> { if unsafe { ffi::fiber_attr_setstacksize(self.inner, stack_size) } < 0 { Err(TarantoolError::last().into()) } else { Ok(()) } } } impl Drop for FiberAttr { fn drop(&mut self) { unsafe { ffi::fiber_attr_delete(self.inner) } } } /// Conditional variable for cooperative multitasking (fibers). /// /// A cond (short for "condition variable") is a synchronization primitive /// that allow fibers to yield until some predicate is satisfied. Fiber /// conditions have two basic operations - `wait()` and `signal()`. [cond.wait()](#method.wait) /// suspends execution of fiber (i.e. yields) until [cond.signal()](#method.signal) is called. /// /// Example: /// /// ```rust /// use tarantool::fiber::Cond; /// let cond = fiber.cond(); /// cond.wait(); /// ``` /// /// The job will hang because [cond.wait()](#method.wait) – will go to sleep until the condition variable changes. /// /// ```rust /// // Call from another fiber: /// cond.signal(); /// ``` /// /// The waiting stopped, and the [cond.wait()](#method.wait) function returned true. /// /// This example depended on the use of a global conditional variable with the arbitrary name cond. /// In real life, programmers would make sure to use different conditional variable names for different applications. /// /// Unlike `pthread_cond`, [Cond]() doesn't require mutex/latch wrapping. pub struct Cond { inner: *mut ffi::FiberCond, } /// - call [Cond::new()](#method.new) to create a named condition variable, which will be called `cond` for examples in this section. /// - call [cond.wait()](#method.wait) to make a fiber wait for a signal via a condition variable. /// - call [cond.signal()](#method.signal) to send a signal to wake up a single fiber that has executed [cond.wait()](#method.wait). /// - call [cond.broadcast()](#method.broadcast) to send a signal to all fibers that have executed [cond.wait()](#method.wait). impl Cond { /// Instantiate a new fiber cond object. pub fn new() -> Self { Cond { inner: unsafe { ffi::fiber_cond_new() }, } } /// Wake one fiber waiting for the cond. /// Does nothing if no one is waiting. Does not yield. pub fn signal(&self) { unsafe { ffi::fiber_cond_signal(self.inner) } } /// Wake up all fibers waiting for the cond. /// Does not yield. pub fn broadcast(&self) { unsafe
{ ffi::fiber_cond_broadcast(self.inner) } } /// Suspend
identifier_body
renderer.js
mongodPath; }; const TESS_HOME = path.join(os.homedir(), "tesserae"); // application home const MONGOD_PATH = getMongodPath(); const MONGORESTORE_PATH = getMongorestorePath(); const MONGODB_DBPATH = path.join(TESS_HOME, "tessdb"); /** * Initialize the loading screen * @returns {Promise<null>} * * The loading screen displays messages indicating what stages of application * initialization have occurred. The messages are displayed in order such that * the newest message comes beneath all of the others. * * In order to allow the content of the loading screen to be changed at * runtime, the webPreferences option is set to find loading screen changing * code in preloadStartup.js. */ const loadStartupWindow = () => { return new Promise((resolve) => { startupWindow = new BrowserWindow({ width: 800, height: 600, resizable: true, webPreferences: { preload: path.join(__dirname, "preloadStartup.js") } }); startupWindow.loadFile(path.join(__dirname, "startup.html")); // startupWindow.webContents.openDevTools(); startupWindow.on("closed", () => { startupWindow = null }); startupWindow.webContents.once("dom-ready", (event, msg) => { resolve(); }); }); }; /** * @param {string} msg a message to display on the loading screen */ const writeStartupMessage = (msg) => { console.log(msg); if (startupWindow !== null)
}; /** * Write an error message to the loading screen * @param {string} msg an error message to display on the loading screen * @param {*} err the error object that was thrown * * If an error occurs during application initialization, all resources the * application has taken so far should be freed, the error should be displayed * on the loading screen, and the loading screen should remain however long the * user wishes in order to read the error message. When the user closes the * loading screen, the application should be completely shut down. */ const writeStartupError = (msg, err) => { console.error(msg); console.error(err); if (startupWindow !== null) { if (err !== null) { startupWindow.webContents.send('error', msg, err.toString()); } else { startupWindow.webContents.send('error', msg, ''); } } if (mainWindow !== null) { mainWindow.close(); } }; /** * Unzip a .zip file * @param {string} zipPath path to .zip file * @param {string} unzipPath path to where contents of .zip file should be placed * @returns {Promise<null>} */ const getPromiseUnzip = (zipPath, unzipPath) => { return new Promise((resolve) => { yauzl.open( zipPath, {"lazyEntries": true, "autoclose": true}, (err, zipfile) => { if (err) { writeStartupError(`Error occurred while opening ${zipPath}`, err); } zipfile.on("close", () => { resolve(); }); zipfile.on("error", (inner_err) => { writeStartupError( `Error occurred in unzipping ${zipPath}`, inner_err ); }); zipfile.readEntry(); zipfile.on("entry", (entry) => { if (/\/$/.test(entry.fileName)) { // ignore directory entries, since they may or may not be there zipfile.readEntry(); } else { // make sure that output directory exists const neededDir = path.join( unzipPath, path.dirname(entry.fileName) ); if (!fs.existsSync(neededDir)) { mkdirp.sync(neededDir); } zipfile.openReadStream(entry, (err, readStream) => { if (err) { writeStartupError( `Error occurred while reading ${entry.fileName}`, err ); } readStream.on("end", () => { zipfile.readEntry(); }); readStream.on("error", (err) => { writeStartupError( `Error occurred while decompressing ${entry.fileName}`, err ); }); const outpath = path.join( unzipPath, entry.fileName ); const outfile = fs.createWriteStream(outpath); readStream.pipe(outfile); }); } }); } ); }); }; /** * Decompress and untar a .tgz file * @param {string} downloadDest path to .tgz file * @returns {Promise<null>} * * The contents of the .tgz file will be placed in the same directory as where * the .tgz file is located. */ const getPromiseUntgz = (downloadDest) => { return new Promise((resolve) => { const downloadedFileStream = fs.createReadStream(downloadDest); downloadedFileStream.on("error", (err) => { downloadedFileStream.end(); writeStartupError( `Error reading downloaded file (${downloadDest})`, err ); }); const untarred = require("tar-fs").extract(TESS_HOME); untarred.on("finish", () => { resolve(); }); downloadedFileStream .pipe(require("gunzip-maybe")()) .pipe(untarred); }); }; /** * Unpack the downloaded MongoDB * @param {str} downloadDest path to download location of MongoDB */ const unpackMongoInstall = async (downloadDest) => { writeStartupMessage(`\tMongoDB downloaded; now installing`); if (path.extname(downloadDest) === ".zip") { await getPromiseUnzip(downloadDest, TESS_HOME); } else { // assume .tgz await getPromiseUntgz(downloadDest); } }; /** * Download a file via https * @param {string} downloadUrl URL of file to download * @param {string} downloadDest path naming the location of the downloaded file * @returns {Promise<null>} */ const getPromiseViaHttps = (downloadUrl, downloadDest) => { writeStartupMessage(`\tDownloading ${downloadUrl}`); var file = fs.createWriteStream(downloadDest); return new Promise((resolve) => { require("https").get(downloadUrl, response => { if (response.statusCode >= 300 && response.statusCode < 400) { const newUrl = response.headers.location; writeStartupMessage(`\tRedirected: ${downloadUrl} => ${newUrl}`); return getPromiseViaHttps(newUrl, downloadDest).then(resolve); } else { response.on("error", (err) => { file.end(); writeStartupError(`Error during download (${downloadUrl})`, err); }); response.on("end", () => { resolve(); }); response.pipe(file); } }).on("error", (err) => { fs.unlinkSync(downloadDest); writeStartupError(`Could not use download URL (${downloadUrl})`, err); }); }); }; /** * Launch mongod in the background * @param {Object} config MongoDB configuration */ const launchMongod = async (config) => { mkdirp.sync(MONGODB_DBPATH); const mongoPort = config["port"]; const mongodSpawn = child_process.spawn( MONGOD_PATH, [ '--port', mongoPort, '--dbpath', MONGODB_DBPATH ] ); mongodSpawn.on("close", (code) => { if (code != 0) { writeStartupError( `mongod (${MONGOD_PATH}) failed with non-zero error code`, code ); } }); mongodSpawn.on("error", (err) => { if (err !== null) { writeStartupError(`mongod refused to start (${MONGOD_PATH})`, err); } }); }; /** * Get MongoDB configuration * @returns {Object} MongoDB configuration */ const getMongoConfig = () => { let mongoOptions = { "port": "40404", }; const configpath = path.join(TESS_HOME, "tesserae.cfg"); if (fs.existsSync(configpath)) { const ini = require("ini"); const config = ini.parse(fs.readFileSync(configpath, "utf-8")); if ("MONGO" in config) { const dbconfig = config["MONGO"]; for (const property in dbconfig) { mongoOptions[property] = dbconfig[property]; } } } return mongoOptions }; /** * Get a connection to MongoDB * @param {Object} config MongoDB configuration * @returns {MongoClient} */ const getMongoClient = config => { const mongoUrl = `mongodb://localhost:${config["port"]}`; const MongoClient = require('mongodb').MongoClient; return new MongoClient(mongoUrl, {"useUnifiedTopology": true}); }; /** * Ping MongoDB and make sure it is populated * @param {Object} config MongoDB configuration * @returns {Promise<null>} * * If pinging MongoDB fails, application initialization fails. * * If MongoDB is not populated, it will be populated with a base corpus. Any * errors in downloading the base corpus or in populating it will cause * application initialization to fail
{ startupWindow.webContents.send('update', msg); }
conditional_block
renderer.js
return mongodPath; }; const TESS_HOME = path.join(os.homedir(), "tesserae"); // application home const MONGOD_PATH = getMongodPath(); const MONGORESTORE_PATH = getMongorestorePath(); const MONGODB_DBPATH = path.join(TESS_HOME, "tessdb"); /** * Initialize the loading screen * @returns {Promise<null>} * * The loading screen displays messages indicating what stages of application * initialization have occurred. The messages are displayed in order such that * the newest message comes beneath all of the others. * * In order to allow the content of the loading screen to be changed at * runtime, the webPreferences option is set to find loading screen changing * code in preloadStartup.js. */ const loadStartupWindow = () => { return new Promise((resolve) => { startupWindow = new BrowserWindow({ width: 800, height: 600, resizable: true, webPreferences: {
// startupWindow.webContents.openDevTools(); startupWindow.on("closed", () => { startupWindow = null }); startupWindow.webContents.once("dom-ready", (event, msg) => { resolve(); }); }); }; /** * @param {string} msg a message to display on the loading screen */ const writeStartupMessage = (msg) => { console.log(msg); if (startupWindow !== null) { startupWindow.webContents.send('update', msg); } }; /** * Write an error message to the loading screen * @param {string} msg an error message to display on the loading screen * @param {*} err the error object that was thrown * * If an error occurs during application initialization, all resources the * application has taken so far should be freed, the error should be displayed * on the loading screen, and the loading screen should remain however long the * user wishes in order to read the error message. When the user closes the * loading screen, the application should be completely shut down. */ const writeStartupError = (msg, err) => { console.error(msg); console.error(err); if (startupWindow !== null) { if (err !== null) { startupWindow.webContents.send('error', msg, err.toString()); } else { startupWindow.webContents.send('error', msg, ''); } } if (mainWindow !== null) { mainWindow.close(); } }; /** * Unzip a .zip file * @param {string} zipPath path to .zip file * @param {string} unzipPath path to where contents of .zip file should be placed * @returns {Promise<null>} */ const getPromiseUnzip = (zipPath, unzipPath) => { return new Promise((resolve) => { yauzl.open( zipPath, {"lazyEntries": true, "autoclose": true}, (err, zipfile) => { if (err) { writeStartupError(`Error occurred while opening ${zipPath}`, err); } zipfile.on("close", () => { resolve(); }); zipfile.on("error", (inner_err) => { writeStartupError( `Error occurred in unzipping ${zipPath}`, inner_err ); }); zipfile.readEntry(); zipfile.on("entry", (entry) => { if (/\/$/.test(entry.fileName)) { // ignore directory entries, since they may or may not be there zipfile.readEntry(); } else { // make sure that output directory exists const neededDir = path.join( unzipPath, path.dirname(entry.fileName) ); if (!fs.existsSync(neededDir)) { mkdirp.sync(neededDir); } zipfile.openReadStream(entry, (err, readStream) => { if (err) { writeStartupError( `Error occurred while reading ${entry.fileName}`, err ); } readStream.on("end", () => { zipfile.readEntry(); }); readStream.on("error", (err) => { writeStartupError( `Error occurred while decompressing ${entry.fileName}`, err ); }); const outpath = path.join( unzipPath, entry.fileName ); const outfile = fs.createWriteStream(outpath); readStream.pipe(outfile); }); } }); } ); }); }; /** * Decompress and untar a .tgz file * @param {string} downloadDest path to .tgz file * @returns {Promise<null>} * * The contents of the .tgz file will be placed in the same directory as where * the .tgz file is located. */ const getPromiseUntgz = (downloadDest) => { return new Promise((resolve) => { const downloadedFileStream = fs.createReadStream(downloadDest); downloadedFileStream.on("error", (err) => { downloadedFileStream.end(); writeStartupError( `Error reading downloaded file (${downloadDest})`, err ); }); const untarred = require("tar-fs").extract(TESS_HOME); untarred.on("finish", () => { resolve(); }); downloadedFileStream .pipe(require("gunzip-maybe")()) .pipe(untarred); }); }; /** * Unpack the downloaded MongoDB * @param {str} downloadDest path to download location of MongoDB */ const unpackMongoInstall = async (downloadDest) => { writeStartupMessage(`\tMongoDB downloaded; now installing`); if (path.extname(downloadDest) === ".zip") { await getPromiseUnzip(downloadDest, TESS_HOME); } else { // assume .tgz await getPromiseUntgz(downloadDest); } }; /** * Download a file via https * @param {string} downloadUrl URL of file to download * @param {string} downloadDest path naming the location of the downloaded file * @returns {Promise<null>} */ const getPromiseViaHttps = (downloadUrl, downloadDest) => { writeStartupMessage(`\tDownloading ${downloadUrl}`); var file = fs.createWriteStream(downloadDest); return new Promise((resolve) => { require("https").get(downloadUrl, response => { if (response.statusCode >= 300 && response.statusCode < 400) { const newUrl = response.headers.location; writeStartupMessage(`\tRedirected: ${downloadUrl} => ${newUrl}`); return getPromiseViaHttps(newUrl, downloadDest).then(resolve); } else { response.on("error", (err) => { file.end(); writeStartupError(`Error during download (${downloadUrl})`, err); }); response.on("end", () => { resolve(); }); response.pipe(file); } }).on("error", (err) => { fs.unlinkSync(downloadDest); writeStartupError(`Could not use download URL (${downloadUrl})`, err); }); }); }; /** * Launch mongod in the background * @param {Object} config MongoDB configuration */ const launchMongod = async (config) => { mkdirp.sync(MONGODB_DBPATH); const mongoPort = config["port"]; const mongodSpawn = child_process.spawn( MONGOD_PATH, [ '--port', mongoPort, '--dbpath', MONGODB_DBPATH ] ); mongodSpawn.on("close", (code) => { if (code != 0) { writeStartupError( `mongod (${MONGOD_PATH}) failed with non-zero error code`, code ); } }); mongodSpawn.on("error", (err) => { if (err !== null) { writeStartupError(`mongod refused to start (${MONGOD_PATH})`, err); } }); }; /** * Get MongoDB configuration * @returns {Object} MongoDB configuration */ const getMongoConfig = () => { let mongoOptions = { "port": "40404", }; const configpath = path.join(TESS_HOME, "tesserae.cfg"); if (fs.existsSync(configpath)) { const ini = require("ini"); const config = ini.parse(fs.readFileSync(configpath, "utf-8")); if ("MONGO" in config) { const dbconfig = config["MONGO"]; for (const property in dbconfig) { mongoOptions[property] = dbconfig[property]; } } } return mongoOptions }; /** * Get a connection to MongoDB * @param {Object} config MongoDB configuration * @returns {MongoClient} */ const getMongoClient = config => { const mongoUrl = `mongodb://localhost:${config["port"]}`; const MongoClient = require('mongodb').MongoClient; return new MongoClient(mongoUrl, {"useUnifiedTopology": true}); }; /** * Ping MongoDB and make sure it is populated * @param {Object} config MongoDB configuration * @returns {Promise<null>} * * If pinging MongoDB fails, application initialization fails. * * If MongoDB is not populated, it will be populated with a base corpus. Any * errors in downloading the base corpus or in populating it will cause * application initialization to fail.
preload: path.join(__dirname, "preloadStartup.js") } }); startupWindow.loadFile(path.join(__dirname, "startup.html"));
random_line_split
sampleMultiMCTSAgentTrajectory.py
Buffer import SampleBatchFromBuffer, SaveToBuffer from exec.preProcessing import AccumulateMultiAgentRewards, AddValuesToTrajectory, RemoveTerminalTupleFromTrajectory, \ ActionToOneHot, ProcessTrajectoryForPolicyValueNet from src.algorithms.mcts import ScoreChild, SelectChild, InitializeChildren, Expand, MCTS, backup, establishPlainActionDist from exec.trainMCTSNNIteratively.valueFromNode import EstimateValueFromNode from src.constrainedChasingEscapingEnv.policies import stationaryAgentPolicy, HeatSeekingContinuesDeterministicPolicy from src.episode import SampleTrajectory, sampleAction from exec.parallelComputing import GenerateTrajectoriesParallel def composeMultiAgentTransitInSingleAgentMCTS(agentId, state, selfAction, othersPolicy, transit): multiAgentActions = [sampleAction(policy(state)) for policy in othersPolicy] multiAgentActions.insert(agentId, selfAction) transitInSelfMCTS = transit(state, multiAgentActions) return transitInSelfMCTS class ComposeSingleAgentGuidedMCTS(): def __init__(self, numSimulations, actionSpace, terminalRewardList, selectChild, isTerminal, transit, getStateFromNode, getApproximatePolicy, getApproximateValue): self.numSimulations = numSimulations self.actionSpace = actionSpace self.terminalRewardList = terminalRewardList self.selectChild = selectChild self.isTerminal = isTerminal self.transit = transit self.getStateFromNode = getStateFromNode self.getApproximatePolicy = getApproximatePolicy self.getApproximateValue = getApproximateValue def __call__(self, agentId, selfNNModel, othersPolicy): approximateActionPrior = self.getApproximatePolicy(selfNNModel) transitInMCTS = lambda state, selfAction: composeMultiAgentTransitInSingleAgentMCTS(agentId, state, selfAction, othersPolicy, self.transit) initializeChildren = InitializeChildren(self.actionSpace, transitInMCTS, approximateActionPrior) expand = Expand(self.isTerminal, initializeChildren) terminalReward = self.terminalRewardList[agentId] approximateValue = self.getApproximateValue(selfNNModel) estimateValue = EstimateValueFromNode(terminalReward, self.isTerminal, self.getStateFromNode, approximateValue) guidedMCTSPolicy = MCTS(self.numSimulations, self.selectChild, expand, estimateValue, backup, establishPlainActionDist) return guidedMCTSPolicy class PrepareMultiAgentPolicy:
def main(): #check file exists or not dirName = os.path.dirname(__file__) trajectoriesSaveDirectory = os.path.join(dirName, '..', '..', 'data', 'multiAgentTrain', 'multiMCTSAgent', 'trajectories') if not os.path.exists(trajectoriesSaveDirectory): os.makedirs(trajectoriesSaveDirectory) trajectorySaveExtension = '.pickle' maxRunningSteps = 20 numSimulations = 200 killzoneRadius = 2 fixedParameters = {'maxRunningSteps': maxRunningSteps, 'numSimulations': numSimulations, 'killzoneRadius': killzoneRadius} generateTrajectorySavePath = GetSavePath(trajectoriesSaveDirectory, trajectorySaveExtension, fixedParameters) parametersForTrajectoryPath = json.loads(sys.argv[1]) startSampleIndex = int(sys.argv[2]) endSampleIndex = int(sys.argv[3]) parametersForTrajectoryPath['sampleIndex'] = (startSampleIndex, endSampleIndex) trajectorySavePath = generateTrajectorySavePath(parametersForTrajectoryPath) if not os.path.isfile(trajectorySavePath): # Mujoco environment physicsDynamicsPath = os.path.join(dirName, '..', '..', 'env', 'xmls', 'twoAgents.xml') physicsModel = mujoco.load_model_from_path(physicsDynamicsPath) physicsSimulation = mujoco.MjSim(physicsModel) # MDP function qPosInit = (0, 0, 0, 0) qVelInit = [0, 0, 0, 0] numAgents = 2 qVelInitNoise = 8 qPosInitNoise = 9.7 reset = ResetUniform(physicsSimulation, qPosInit, qVelInit, numAgents, qPosInitNoise, qVelInitNoise) agentIds = list(range(numAgents)) sheepId = 0 wolfId = 1 xPosIndex = [2, 3] getSheepXPos = GetAgentPosFromState(sheepId, xPosIndex) getWolfXPos = GetAgentPosFromState(wolfId, xPosIndex) sheepAliveBonus = 1 / maxRunningSteps wolfAlivePenalty = -sheepAliveBonus sheepTerminalPenalty = -1 wolfTerminalReward = 1 terminalRewardList = [sheepTerminalPenalty, wolfTerminalReward] isTerminal = IsTerminal(killzoneRadius, getSheepXPos, getWolfXPos) numSimulationFrames = 20 transit = TransitionFunction(physicsSimulation, isTerminal, numSimulationFrames) rewardSheep = RewardFunctionCompete(sheepAliveBonus, sheepTerminalPenalty, isTerminal) rewardWolf = RewardFunctionCompete(wolfAlivePenalty, wolfTerminalReward, isTerminal) rewardMultiAgents = [rewardSheep, rewardWolf] decay = 1 accumulateMultiAgentRewards = AccumulateMultiAgentRewards(decay, rewardMultiAgents) # NNGuidedMCTS init cInit = 1 cBase = 100 calculateScore = ScoreChild(cInit, cBase) selectChild = SelectChild(calculateScore) actionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0), (-7, -7), (0, -10), (7, -7)] getApproximatePolicy = lambda NNmodel: ApproximatePolicy(NNmodel, actionSpace) getApproximateValue = lambda NNmodel: ApproximateValue(NNmodel) getStateFromNode = lambda node: list(node.id.values())[0] # sample trajectory sampleTrajectory = SampleTrajectory(maxRunningSteps, transit, isTerminal, reset, sampleAction) # neural network init numStateSpace = 12 numActionSpace = len(actionSpace) regularizationFactor = 1e-4 sharedWidths = [128] actionLayerWidths = [128] valueLayerWidths = [128] generateModel = GenerateModel(numStateSpace, numActionSpace, regularizationFactor) # load save dir NNModelSaveExtension = '' NNModelSaveDirectory = os.path.join(dirName, '..', '..', 'data', 'multiAgentTrain', 'multiMCTSAgent', 'NNModel') if not os.path.exists(NNModelSaveDirectory): os.makedirs(NNModelSaveDirectory) generateNNModelSavePath = GetSavePath(NNModelSaveDirectory, NNModelSaveExtension, fixedParameters) # load wolf baseline for init iteration # wolfBaselineNNModelSaveDirectory = os.path.join(dirName, '..', '..', 'data','SheepWolfBaselinePolicy', 'wolfBaselineNNPolicy') # baselineSaveParameters = {'numSimulations': 10, 'killzoneRadius': 2, # 'qPosInitNoise': 9.7, 'qVelInitNoise': 8, # 'rolloutHeuristicWeight': 0.1, 'maxRunningSteps': 25} # getWolfBaselineModelSavePath = GetSavePath(wolfBaselineNNModelSaveDirectory, NNModelSaveExtension, baselineSaveParameters) # baselineModelTrainSteps = 1000 # wolfBaselineNNModelSavePath = getWolfBaselineModelSavePath({'trainSteps': baselineModelTrainSteps})
def __init__(self, composeSingleAgentGuidedMCTS, approximatePolicy, MCTSAgentIds): self.composeSingleAgentGuidedMCTS = composeSingleAgentGuidedMCTS self.approximatePolicy = approximatePolicy self.MCTSAgentIds = MCTSAgentIds def __call__(self, multiAgentNNModel): multiAgentApproximatePolicy = np.array([self.approximatePolicy(NNModel) for NNModel in multiAgentNNModel]) otherAgentPolicyForMCTSAgents = np.array([np.concatenate([multiAgentApproximatePolicy[:agentId], multiAgentApproximatePolicy[agentId + 1:]]) for agentId in self.MCTSAgentIds]) MCTSAgentIdWithCorrespondingOtherPolicyPair = zip(self.MCTSAgentIds, otherAgentPolicyForMCTSAgents) MCTSAgentsPolicy = np.array([self.composeSingleAgentGuidedMCTS(agentId, multiAgentNNModel[agentId], correspondingOtherAgentPolicy) for agentId, correspondingOtherAgentPolicy in MCTSAgentIdWithCorrespondingOtherPolicyPair]) multiAgentPolicy = np.copy(multiAgentApproximatePolicy) multiAgentPolicy[self.MCTSAgentIds] = MCTSAgentsPolicy policy = lambda state: [agentPolicy(state) for agentPolicy in multiAgentPolicy] return policy
identifier_body
sampleMultiMCTSAgentTrajectory.py
Buffer import SampleBatchFromBuffer, SaveToBuffer from exec.preProcessing import AccumulateMultiAgentRewards, AddValuesToTrajectory, RemoveTerminalTupleFromTrajectory, \ ActionToOneHot, ProcessTrajectoryForPolicyValueNet from src.algorithms.mcts import ScoreChild, SelectChild, InitializeChildren, Expand, MCTS, backup, establishPlainActionDist from exec.trainMCTSNNIteratively.valueFromNode import EstimateValueFromNode from src.constrainedChasingEscapingEnv.policies import stationaryAgentPolicy, HeatSeekingContinuesDeterministicPolicy from src.episode import SampleTrajectory, sampleAction from exec.parallelComputing import GenerateTrajectoriesParallel def composeMultiAgentTransitInSingleAgentMCTS(agentId, state, selfAction, othersPolicy, transit): multiAgentActions = [sampleAction(policy(state)) for policy in othersPolicy] multiAgentActions.insert(agentId, selfAction) transitInSelfMCTS = transit(state, multiAgentActions) return transitInSelfMCTS class ComposeSingleAgentGuidedMCTS(): def __init__(self, numSimulations, actionSpace, terminalRewardList, selectChild, isTerminal, transit, getStateFromNode, getApproximatePolicy, getApproximateValue): self.numSimulations = numSimulations self.actionSpace = actionSpace self.terminalRewardList = terminalRewardList self.selectChild = selectChild self.isTerminal = isTerminal self.transit = transit self.getStateFromNode = getStateFromNode self.getApproximatePolicy = getApproximatePolicy self.getApproximateValue = getApproximateValue def __call__(self, agentId, selfNNModel, othersPolicy): approximateActionPrior = self.getApproximatePolicy(selfNNModel) transitInMCTS = lambda state, selfAction: composeMultiAgentTransitInSingleAgentMCTS(agentId, state, selfAction, othersPolicy, self.transit) initializeChildren = InitializeChildren(self.actionSpace, transitInMCTS, approximateActionPrior) expand = Expand(self.isTerminal, initializeChildren) terminalReward = self.terminalRewardList[agentId] approximateValue = self.getApproximateValue(selfNNModel) estimateValue = EstimateValueFromNode(terminalReward, self.isTerminal, self.getStateFromNode, approximateValue) guidedMCTSPolicy = MCTS(self.numSimulations, self.selectChild, expand, estimateValue, backup, establishPlainActionDist) return guidedMCTSPolicy class PrepareMultiAgentPolicy: def __init__(self, composeSingleAgentGuidedMCTS, approximatePolicy, MCTSAgentIds): self.composeSingleAgentGuidedMCTS = composeSingleAgentGuidedMCTS self.approximatePolicy = approximatePolicy self.MCTSAgentIds = MCTSAgentIds def
(self, multiAgentNNModel): multiAgentApproximatePolicy = np.array([self.approximatePolicy(NNModel) for NNModel in multiAgentNNModel]) otherAgentPolicyForMCTSAgents = np.array([np.concatenate([multiAgentApproximatePolicy[:agentId], multiAgentApproximatePolicy[agentId + 1:]]) for agentId in self.MCTSAgentIds]) MCTSAgentIdWithCorrespondingOtherPolicyPair = zip(self.MCTSAgentIds, otherAgentPolicyForMCTSAgents) MCTSAgentsPolicy = np.array([self.composeSingleAgentGuidedMCTS(agentId, multiAgentNNModel[agentId], correspondingOtherAgentPolicy) for agentId, correspondingOtherAgentPolicy in MCTSAgentIdWithCorrespondingOtherPolicyPair]) multiAgentPolicy = np.copy(multiAgentApproximatePolicy) multiAgentPolicy[self.MCTSAgentIds] = MCTSAgentsPolicy policy = lambda state: [agentPolicy(state) for agentPolicy in multiAgentPolicy] return policy def main(): #check file exists or not dirName = os.path.dirname(__file__) trajectoriesSaveDirectory = os.path.join(dirName, '..', '..', 'data', 'multiAgentTrain', 'multiMCTSAgent', 'trajectories') if not os.path.exists(trajectoriesSaveDirectory): os.makedirs(trajectoriesSaveDirectory) trajectorySaveExtension = '.pickle' maxRunningSteps = 20 numSimulations = 200 killzoneRadius = 2 fixedParameters = {'maxRunningSteps': maxRunningSteps, 'numSimulations': numSimulations, 'killzoneRadius': killzoneRadius} generateTrajectorySavePath = GetSavePath(trajectoriesSaveDirectory, trajectorySaveExtension, fixedParameters) parametersForTrajectoryPath = json.loads(sys.argv[1]) startSampleIndex = int(sys.argv[2]) endSampleIndex = int(sys.argv[3]) parametersForTrajectoryPath['sampleIndex'] = (startSampleIndex, endSampleIndex) trajectorySavePath = generateTrajectorySavePath(parametersForTrajectoryPath) if not os.path.isfile(trajectorySavePath): # Mujoco environment physicsDynamicsPath = os.path.join(dirName, '..', '..', 'env', 'xmls', 'twoAgents.xml') physicsModel = mujoco.load_model_from_path(physicsDynamicsPath) physicsSimulation = mujoco.MjSim(physicsModel) # MDP function qPosInit = (0, 0, 0, 0) qVelInit = [0, 0, 0, 0] numAgents = 2 qVelInitNoise = 8 qPosInitNoise = 9.7 reset = ResetUniform(physicsSimulation, qPosInit, qVelInit, numAgents, qPosInitNoise, qVelInitNoise) agentIds = list(range(numAgents)) sheepId = 0 wolfId = 1 xPosIndex = [2, 3] getSheepXPos = GetAgentPosFromState(sheepId, xPosIndex) getWolfXPos = GetAgentPosFromState(wolfId, xPosIndex) sheepAliveBonus = 1 / maxRunningSteps wolfAlivePenalty = -sheepAliveBonus sheepTerminalPenalty = -1 wolfTerminalReward = 1 terminalRewardList = [sheepTerminalPenalty, wolfTerminalReward] isTerminal = IsTerminal(killzoneRadius, getSheepXPos, getWolfXPos) numSimulationFrames = 20 transit = TransitionFunction(physicsSimulation, isTerminal, numSimulationFrames) rewardSheep = RewardFunctionCompete(sheepAliveBonus, sheepTerminalPenalty, isTerminal) rewardWolf = RewardFunctionCompete(wolfAlivePenalty, wolfTerminalReward, isTerminal) rewardMultiAgents = [rewardSheep, rewardWolf] decay = 1 accumulateMultiAgentRewards = AccumulateMultiAgentRewards(decay, rewardMultiAgents) # NNGuidedMCTS init cInit = 1 cBase = 100 calculateScore = ScoreChild(cInit, cBase) selectChild = SelectChild(calculateScore) actionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0), (-7, -7), (0, -10), (7, -7)] getApproximatePolicy = lambda NNmodel: ApproximatePolicy(NNmodel, actionSpace) getApproximateValue = lambda NNmodel: ApproximateValue(NNmodel) getStateFromNode = lambda node: list(node.id.values())[0] # sample trajectory sampleTrajectory = SampleTrajectory(maxRunningSteps, transit, isTerminal, reset, sampleAction) # neural network init numStateSpace = 12 numActionSpace = len(actionSpace) regularizationFactor = 1e-4 sharedWidths = [128] actionLayerWidths = [128] valueLayerWidths = [128] generateModel = GenerateModel(numStateSpace, numActionSpace, regularizationFactor) # load save dir NNModelSaveExtension = '' NNModelSaveDirectory = os.path.join(dirName, '..', '..', 'data', 'multiAgentTrain', 'multiMCTSAgent', 'NNModel') if not os.path.exists(NNModelSaveDirectory): os.makedirs(NNModelSaveDirectory) generateNNModelSavePath = GetSavePath(NNModelSaveDirectory, NNModelSaveExtension, fixedParameters) # load wolf baseline for init iteration # wolfBaselineNNModelSaveDirectory = os.path.join(dirName, '..', '..', 'data','SheepWolfBaselinePolicy', 'wolfBaselineNNPolicy') # baselineSaveParameters = {'numSimulations': 10, 'killzoneRadius': 2, # 'qPosInitNoise': 9.7, 'qVelInitNoise': 8, # 'rolloutHeuristicWeight': 0.1, 'maxRunningSteps': 25} # getWolfBaselineModelSavePath = GetSavePath(wolfBaselineNNModelSaveDirectory, NNModelSaveExtension, baselineSaveParameters) # baselineModelTrainSteps = 1000 # wolfBaselineNNModelSavePath = getWolfBaselineModelSavePath({'trainSteps': baselineModelTrainSteps})
__call__
identifier_name