file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
12.1k
suffix
large_stringlengths
0
12k
middle
large_stringlengths
0
7.51k
fim_type
large_stringclasses
4 values
xmp_dashboard.py
md5_calc', 'ok_nok', 'date') VALUES (?,?,?,?,?,?)", results) if vital_stats: print("Thread identity: " + str(threading.get_ident()) + " Second sql segment: Rows returned from execute INSERT INTO md5_resutls = " + str(cur.rowcount)) except sqlite3.Error as error: if print_errors: print("Thread identity: " + str(threading.get_ident()) + " Failed to INSERT INTO md5_results sqlite table", error) try: cur.execute("UPDATE dashboard SET last_run = ?, tot_xmp = ?, ok_xmp = ?, nok_xmp = ?, missing_raw = ?, missing_xmp = ? WHERE main_path = '" + main_folder + "'", (todays_date, md5_OK+md5_NOK, md5_OK, md5_NOK,md5_missing_raw,md5_not_found)) if vital_stats: print("Thread identity: " + str(threading.get_ident()) + " Second sql segment: Rows returned from execute UPDATE dashboard = " + str(cur.rowcount)) except sqlite3.Error as error: if print_errors: print("Thread identity: " + str(threading.get_ident()) + " Failed to UPDATE dashboard from sqlite table", error) if vital_stats: print("Thread identity: " + str(threading.get_ident()) + " Run results for path: " + main_folder + " md5_OK=" + str(md5_OK) + " md5_NOK=" + str(md5_NOK) + " md5__not_found=" + str(md5_not_found) + " md5_missing_raw=" + str(md5_missing_raw) + ", in " + str(round(time.time()-time1)) + " seconds, finished at " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ".") def runrunrun(): global combo global combo_var global master for key, value in combo.items(): combo_var[key] = combo[key].get() #combo values gets destroyed when closing GUI, therefore copy values. master.destroy() # Close GUI and continue after mainloop def quitquit(): conn.close() exit() def xmp_count(): # Check how many xmp files there are in each main folder. global main_folders global xmp_tracker global verbose global xmp_tracker xmp_file_count = 0 for main_folder in main_folders: for subdir, dirs, files in os.walk(main_folder): for file in files: if file[-3:] == 'xmp': xmp_file_count += 1 xmp_tracker.append([main_folder,xmp_file_count]) if vital_stats: print(main_folder + " has " + str(xmp_file_count) + " xmp files") xmp_file_count = 0 def build_dashboard(): # Build dashboard # Creating main tkinter window/toplevel global dashboard global main_folders global combo global combo_var global xmp_tracker global verbose global master date_format = "%Y-%m-%d" todays_date = datetime.date.today().strftime(date_format) max_width = max(len(x) for x in main_folders) # needed to size the cell with path # this will create a label widget col_1 = Label(master, relief=RIDGE, text = "Folder path", width = max_width) col_2 = Label(master, relief=RIDGE, text = "Last run", width = 12) col_3 = Label(master, relief=RIDGE, text = "Days since last run", width = 18) col_4 = Label(master, relief=RIDGE, text = "Tot last count xmp") col_5 = Label(master, relief=RIDGE, text = "Tot db xmp") col_6 = Label(master, relief=RIDGE, text = "OK xmp") col_7 = Label(master, relief=RIDGE, text = "NOK xmp") col_8 = Label(master, relief=RIDGE, text = "Missing RAW") col_9 = Label(master, relief=RIDGE, text = "Missing xmp") col_10 = Label(master, relief=RIDGE, text = "Start/Restart") # grid method to arrange labels in respective # rows and columns as specified col_1.grid(row = 0, column = 0, sticky = W, pady = 2) col_2.grid(row = 0, column = 1, sticky = W, pady = 2) col_3.grid(row = 0, column = 2, sticky = W, pady = 2) col_4.grid(row = 0, column = 3, sticky = W, pady = 2) col_5.grid(row = 0, column = 4, sticky = W, pady = 2) col_6.grid(row = 0, column = 5, sticky = W, pady = 2) col_7.grid(row = 0, column = 6, sticky = W, pady = 2) col_8.grid(row = 0, column = 7, sticky = W, pady = 2) col_9.grid(row = 0, column = 8, sticky = W, pady = 2) col_10.grid(row = 0, column = 9, sticky = W, pady = 2) for ii, each_row in enumerate(dashboard): # print(each_row) # print(ii) col_1 = Label(master, text = each_row[1]) col_2 = Label(master, text = each_row[2]) col_3 = Label(master, text = (datetime.datetime.strptime(todays_date, date_format)- datetime.datetime.strptime(each_row[2], date_format)).days) col_4 = Label(master, text = xmp_tracker[ii][1]) col_5 = Label(master, text = each_row[3]) col_6 = Label(master, text = each_row[4]) col_7 = Label(master, text = each_row[5]) col_8 = Label(master, text = each_row[6]) col_9 = Label(master, text = each_row[7]) col_1.grid(row = ii+1, column = 0, sticky = W, pady = 5, padx = 5) col_2.grid(row = ii+1, column = 1, sticky = W, pady = 5, padx = 5) col_3.grid(row = ii+1, column = 2, sticky = W, pady = 5, padx = 5) col_4.grid(row = ii+1, column = 3, sticky = W, pady = 5, padx = 5) col_5.grid(row = ii+1, column = 4, sticky = W, pady = 5, padx = 5) col_6.grid(row = ii+1, column = 5, sticky = W, pady = 5, padx = 5) col_7.grid(row = ii+1, column = 6, sticky = W, pady = 5, padx = 5) col_8.grid(row = ii+1, column = 7, sticky = W, pady = 5, padx = 5) col_9.grid(row = ii+1, column = 8, sticky = W, pady = 5, padx = 5) valores=("Do nothing", "Restart", "New run") # key_name[1] innehåller path som blir key i dict, och värdet blir det man valt i drop down. for index, key_name in enumerate(dashboard): combo[key_name[1]] = ttk.Combobox(master, values=valores) combo[key_name[1]].set("Do nothing ") combo[key_name[1]].grid(row = 1+index, column = 9, sticky = W, pady = 6, padx = 5) # button widget b1 = Button(master, text = "Cancel", width = 9, command=quitquit) b2 = Button(master, text = "Go", width = 9, command=runrunrun) # arranging button widgets b1.grid(row = ii+2, column = 9, sticky = W) b2.grid(row = ii+2, column = 9, sticky = E) # infinite loop which can be terminated # by keyboard or mouse interrupt mainloop() def get_list_of_folders(main_folders_2): tmp = [] tmp.append(main_folders_2[0]) for ii in range(len(main_folders_2)): if main_folders_2[ii] not in tmp and main_folders_2[ii][0] not in [item[0] for item in tmp]: tmp.append(main_folders_2[ii]) return tmp def run_thru_folders(): global dashboard global main_folders global combo global combo_var global verbose global vital_stats global cur
new_run
identifier_name
xmp_dashboard.py
den ut loopen. Tidigare fortsatte den att stega igenom hela mappen även om den hiottat rätt fil. # - Ta time() vid start av vartje mapp, och vi slkutet och spara. Skriv sedan ut en liten summering. # - Vid varje start skriver jag ut mappnamnet. Kan man skriva ut antalet filer också? JAg räknar ju dem innan. Samma via Approxxx fuiler av yy? # - Lade till en ny parameter vital_stats som om satt skriver ut det viktigaste. # - Lite andra småfix. # v0.11 191118 # - Optimerade lite för läsbarhet i loopen med filjämförelser. # - Lade till lite mer text i starten och förlupen tid i sekunder för varje limited_printouts intervall. # - Nu funkar det ganska bra, så stegar. # v0.12 191118 # - Vilken röra. Tog bort tkinter och några av klasserna jag gjort, nog bättre att gå tillbaka till mer procedural kod. # - Jag räknar antalet fall där jag har en xmp-fil utan tillhörande RAW,men jag visar det inte ngnstans. # - En liten räknare som visar hur många dagar sen det var man körde mappen? # - PyQt # - Ange sekunder mellantider funkar bra, man kunde ge delta tid också. # - Threading, man måste ha någon intelligens så man lägger ut dem på olika hårddiskar, och begränsar antalet trådar till antalet hårddiskar. # v0.12 branch THREAD 191118 # - Threading verkar faktiskt funka. Men jisses vad rörig koden är nu! # v0.13 branch THREAD 191119 # - Initiala tester visar att den inte klarar av att separera diskarna, så den startar två mappar på samma disk. # Måste alltså starta threadsen manuellt, och vänta på att de blir klara. # v0.14 191120 # - Det funkar nu! Den tittar på listan över mappar som skall köras, och fördelar sedan threadsen över # hårddiskarna så att det aldrig körs två threads samtidigt på samma disk. # - Detta blir nu huvudbranchen, tar bort THREAD. # - Skulle behöva snygga till det, nu är det riktigt grötigt. # - Den där Cancel-knappen är nog bra att ge sig på tillsammans med PtQt. # - Delta sekunder för utskriften efter x antal filer. Men den måste vara thread-specifik, dvs varje thread har sin egen räknare. # - Jag fyller inte i missing raw kolumnen i dashboard. # - Test av GitHub, denna skall vara för W12. # v0.15 191127 # - Gjorde om så att istället för att ha en version för varje dator så gjorde jag en config-fil, connect_sqlite_db.py, som jag # anropar. I den finns rätt sträng för att connecta till rätt databas. # v1.0 191127 # Andra försöket i git, tar bort ver-hantering i namnet, och passar på att stega till 1.0 # 191127 # - I Gityran så slarvade jag bort filerna med följande fix: # -- Lade till att den visar antalet filer med missing RAW. # --- Uppdaterade db på LM och W12, behövs ju på ACTUAL och W10 också, # dvs ta fram sql:en och testa. # --- GUIt behöver fixas, själva Dashboarden saknar klumnen missing RAW. # - Allt ovan nu fixat, behöver lägga till SQL för att uppdatera db på W10&ACTUAL # 191129 # - Lade till antal dagar sedan senaste körningen. # - Lade till delta seconds sedan varje limited_printouts. import os from os.path import join from operator import itemgetter, attrgetter from tkinter import * import fnmatch import sys import math import threading import subprocess import fileinput import datetime import time import hashlib import sqlite3 from tkinter import ttk # Denna innehåller comboboxen - drop down. # generate_md5_Checksum_def är en funktion som ligger i en separat fil, from generate_md5_Checksum_def import md5Checksum from connect_sqlite_db import connect_sqlite_db def index_containing_substring(the_list, substring): # returns the line number of the md5 sum, zero if no md5. for i, s in enumerate(the_list): if substring in s: if substring[0] == '<': md5 = s[s.find(substring)+28:s.find(substring)+60] else: md5 = s[s.find(substring)+28:s.find(substring)+60] return md5 return 0 def folderThread(main_folder): global dashboard global main_folders global combo global combo_var global verbose global cur print("Thread started at " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) print('Printout levels: ' + str(vital_stats) + ' ' + str(verbose) + ' ', flush=True) print('Thread identity: ' + str(threading.get_ident())) if vital_stats: print('Thread identity: ' + str(threading.get_ident()) + " Starting with " + main_folder + " and " + combo_var[main_folder] + " containing " + str(xmp_tracker[main_folders.index(main_folder)][1]) + " xmp files.") md5_OK = 0 md5_NOK = 0 md5_not_found = 0 md5_missing_raw = 0 xmp_file_counter = 0 results = [] time1 = time.time() try: # Clear db here from all rows with path sql = "DELETE FROM md5_results WHERE file_path LIKE '" + main_folder + "%'" cur.execute(sql) if verbose: print("Thread identity: " + str(threading.get_ident()) + " Executed sql: " + sql) if verbose: print("Thread identity: " + str(threading.get_ident()) + " First sql segment: Rows returned from execute = " + str(cur.rowcount), flush=True) conn.commit() except sqlite3.Error as error: if print_errors: print("Thread identity: " + str(threading.get_ident()) + " Failed to delete record from sqlite table", error, flush=True) try: # Update dashboard since I've removed all files for folder main_folder. cur.execute("UPDATE dashboard SET last_run = ?, tot_xmp = ?, ok_xmp = ?, nok_xmp = ?, missing_raw = ? WHERE main_path = '" + main_folder + "'", (todays_date, 0, 0, 0, 0)) if verbose: print(" Thread identity: " + str(threading.get_ident()) + "First sql segment: Rows returned from execute UPDATE dashboard = " + str(cur.rowcount), flush=True) conn.commit() except sqlite3.Error as error: if print_errors: print(" Thread identity: " + str(threading.get_ident()) + "Failed to UPDATE dashboard from sqlite table", error, flush=True) time2 = time.time() for subdir, dirs, files in os.walk(main_folder): for file in files: found_raw = 0 try: if file.endswith('xmp'): xmp_file_counter +=1 if verbose: print ('Found file: ' + file, flush=True) f = open(subdir+'\\'+ file,"r") list_file = list(f) md5_in
le, '<PelleTags:PelleTag1_md5sum>'), index_containing_substring(list_file, 'PelleTags:PelleTag1_md5sum=')] if verbose: print(md5_index) if any(md5_index): # xmp-filen innehåller en md5-summa. res = [idx for idx, val in enumerate(md5_index) if val != 0] # Ger vilken typ av xmp encoding det är. md5_xmp = md5_index[res[0]] for raw_file in os.listdir(subdir): # Find the corresponding RAW-file to generate md5 sum. if file[:-
dex = [index_containing_substring(list_fi
conditional_block
xmp_dashboard.py
xmp_file_counter = 0 results = [] time1 = time.time() try: # Clear db here from all rows with path sql = "DELETE FROM md5_results WHERE file_path LIKE '" + main_folder + "%'" cur.execute(sql) if verbose: print("Thread identity: " + str(threading.get_ident()) + " Executed sql: " + sql) if verbose: print("Thread identity: " + str(threading.get_ident()) + " First sql segment: Rows returned from execute = " + str(cur.rowcount), flush=True) conn.commit() except sqlite3.Error as error: if print_errors: print("Thread identity: " + str(threading.get_ident()) + " Failed to delete record from sqlite table", error, flush=True) try: # Update dashboard since I've removed all files for folder main_folder. cur.execute("UPDATE dashboard SET last_run = ?, tot_xmp = ?, ok_xmp = ?, nok_xmp = ?, missing_raw = ? WHERE main_path = '" + main_folder + "'", (todays_date, 0, 0, 0, 0)) if verbose: print(" Thread identity: " + str(threading.get_ident()) + "First sql segment: Rows returned from execute UPDATE dashboard = " + str(cur.rowcount), flush=True) conn.commit() except sqlite3.Error as error: if print_errors: print(" Thread identity: " + str(threading.get_ident()) + "Failed to UPDATE dashboard from sqlite table", error, flush=True) time2 = time.time() for subdir, dirs, files in os.walk(main_folder): for file in files: found_raw = 0 try: if file.endswith('xmp'): xmp_file_counter +=1 if verbose: print ('Found file: ' + file, flush=True) f = open(subdir+'\\'+ file,"r") list_file = list(f) md5_index = [index_containing_substring(list_file, '<PelleTags:PelleTag1_md5sum>'), index_containing_substring(list_file, 'PelleTags:PelleTag1_md5sum=')] if verbose: print(md5_index) if any(md5_index): # xmp-filen innehåller en md5-summa. res = [idx for idx, val in enumerate(md5_index) if val != 0] # Ger vilken typ av xmp encoding det är. md5_xmp = md5_index[res[0]] for raw_file in os.listdir(subdir): # Find the corresponding RAW-file to generate md5 sum. if file[:-3] in raw_file[:-3]: # Här slicar jag bort ändelserna för att se om de har samma namn. if raw_file[-3:].upper() in raw_extensions: found_raw = 1 md5_calculated = md5Checksum(subdir + '\\' + raw_file) if verbose: print ('Calculated md5 for file ' + raw_file) if verbose: print (md5_calculated) if md5_calculated == md5_xmp: results.append((subdir, file, md5_xmp, md5_calculated, 'OK', todays_date)) md5_OK +=1 if verbose: print("md5 stämmer " + " subdir " + subdir + " file " + file) else: results.append((subdir, file, md5_xmp, md5_calculated, 'NOK', todays_date)) md5_NOK +=1 if print_errors: print("md5 fail: " + subdir + "\\" + str(raw_file)) break if not found_raw: # Efter break exekveras denna. Tror jag... if print_errors: print("xmp without matching raw: " + subdir + "\\" + str(raw_file)) results.append((subdir, file, 'No valid raw file found', '-', 'NOK', todays_date)) md5_missing_raw +=1 else: # index_containing_substring returns zero, PelleTags not present in xmp-file if print_errors: print("Error, no md5 sum in file " + subdir + "\\" + file) md5_not_found += 1 results.append((subdir, file, 'No md5 in xmp', md5_calculated, 'NOK', todays_date)) f.close() if xmp_file_counter % limited_printouts == 0: if vital_stats: print("Thread identity: " + str(threading.get_ident()) + ", " + str(xmp_file_counter) + " xmp-files processed in " + str(round(time.time()-time1)) + " seconds, delta time " + str(round(time.time()-time2)) + " seconds, local time " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) time2 = time.time() except: if print_errors: print ('Unexpected fail for file: ' + file) print (sys.exc_info()) f.close() try: cur.executemany("INSERT INTO md5_results ('file_path', 'file_name', 'md5_file', 'md5_calc', 'ok_nok', 'date') VALUES (?,?,?,?,?,?)", results) if vital_stats: print("Thread identity: " + str(threading.get_ident()) + " Second sql segment: Rows returned from execute INSERT INTO md5_resutls = " + str(cur.rowcount)) except sqlite3.Error as error: if print_errors: print("Thread identity: " + str(threading.get_ident()) + " Failed to INSERT INTO md5_results sqlite table", error) try: cur.execute("UPDATE dashboard SET last_run = ?, tot_xmp = ?, ok_xmp = ?, nok_xmp = ?, missing_raw = ?, missing_xmp = ? WHERE main_path = '" + main_folder + "'", (todays_date, md5_OK+md5_NOK, md5_OK, md5_NOK,md5_missing_raw,md5_not_found)) if vital_stats: print("Thread identity: " + str(threading.get_ident()) + " Second sql segment: Rows returned from execute UPDATE dashboard = " + str(cur.rowcount)) except sqlite3.Error as error: if print_errors: print("Thread identity: " + str(threading.get_ident()) + " Failed to UPDATE dashboard from sqlite table", error) if vital_stats: print("Thread identity: " + str(threading.get_ident()) + " Run results for path: " + main_folder + " md5_OK=" + str(md5_OK) + " md5_NOK=" + str(md5_NOK) + " md5__not_found=" + str(md5_not_found) + " md5_missing_raw=" + str(md5_missing_raw) + ", in " + str(round(time.time()-time1)) + " seconds, finished at " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ".") def runrunrun(): global combo global combo_var global master for key, value in combo.items(): combo_var[key] = combo[key].get() #combo values gets destroyed when closing GUI, therefore copy values. master.destroy() # Close GUI and continue after mainloop def quitquit(): conn.close() exit() def xmp_count(): # Check how many xmp files there are in each main folder. global main_folders global xmp_tracker global verbose global xmp_tracker xmp_file_count = 0 for main_folder in main_folders: for subdir, dirs, files in os.walk(main_folder): for file in files: if file[-3:] == 'xmp': xmp_file_count += 1 xmp_tracker.append([main_folder,xmp_file_count]) if vital_stats: print(main_folder + " has " + str(xmp_file_count) + " xmp files") xmp_file_count = 0 def build_dashboard(): # Build dashboard # Creating main tkinter window/toplevel global dashboard global main_folders global combo global combo_var global xmp_tracker global verbose global master date_format = "%Y-%m-%d" todays_date = datetime.date.today().strftime(date_format) max_width = max(len(x) for x in main_folders) # needed to size the cell with path # this will create a label widget col_1 = Label(master, relief=RIDGE, text = "Folder path", width = max_width) col_2 = Label(master, relief=RIDGE, text = "Last run", width = 12) col_3 = Label(master, relief=RIDGE, text = "Days since last run", width = 18) col_4 = Label(master, relief=RIDGE, text = "Tot last count xmp") col_5 = Label(master, relief=RIDGE, text = "Tot db xmp") col_6 = Label(master, relief=RIDGE, text = "OK xmp") col_7 = Label(master, relief=RIDGE, text = "NOK xmp")
random_line_split
xmp_dashboard.py
import subprocess import fileinput import datetime import time import hashlib import sqlite3 from tkinter import ttk # Denna innehåller comboboxen - drop down. # generate_md5_Checksum_def är en funktion som ligger i en separat fil, from generate_md5_Checksum_def import md5Checksum from connect_sqlite_db import connect_sqlite_db def index_containing_substring(the_list, substring): # returns the line number of the md5 sum, zero if no md5. for i, s in enumerate(the_list): if substring in s: if substring[0] == '<': md5 = s[s.find(substring)+28:s.find(substring)+60] else: md5 = s[s.find(substring)+28:s.find(substring)+60] return md5 return 0 def folderThread(main_folder): global dashboard global main_folders global combo global combo_var global verbose global cur print("Thread started at " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) print('Printout levels: ' + str(vital_stats) + ' ' + str(verbose) + ' ', flush=True) print('Thread identity: ' + str(threading.get_ident())) if vital_stats: print('Thread identity: ' + str(threading.get_ident()) + " Starting with " + main_folder + " and " + combo_var[main_folder] + " containing " + str(xmp_tracker[main_folders.index(main_folder)][1]) + " xmp files.") md5_OK = 0 md5_NOK = 0 md5_not_found = 0 md5_missing_raw = 0 xmp_file_counter = 0 results = [] time1 = time.time() try: # Clear db here from all rows with path sql = "DELETE FROM md5_results WHERE file_path LIKE '" + main_folder + "%'" cur.execute(sql) if verbose: print("Thread identity: " + str(threading.get_ident()) + " Executed sql: " + sql) if verbose: print("Thread identity: " + str(threading.get_ident()) + " First sql segment: Rows returned from execute = " + str(cur.rowcount), flush=True) conn.commit() except sqlite3.Error as error: if print_errors: print("Thread identity: " + str(threading.get_ident()) + " Failed to delete record from sqlite table", error, flush=True) try: # Update dashboard since I've removed all files for folder main_folder. cur.execute("UPDATE dashboard SET last_run = ?, tot_xmp = ?, ok_xmp = ?, nok_xmp = ?, missing_raw = ? WHERE main_path = '" + main_folder + "'", (todays_date, 0, 0, 0, 0)) if verbose: print(" Thread identity: " + str(threading.get_ident()) + "First sql segment: Rows returned from execute UPDATE dashboard = " + str(cur.rowcount), flush=True) conn.commit() except sqlite3.Error as error: if print_errors: print(" Thread identity: " + str(threading.get_ident()) + "Failed to UPDATE dashboard from sqlite table", error, flush=True) time2 = time.time() for subdir, dirs, files in os.walk(main_folder): for file in files: found_raw = 0 try: if file.endswith('xmp'): xmp_file_counter +=1 if verbose: print ('Found file: ' + file, flush=True) f = open(subdir+'\\'+ file,"r") list_file = list(f) md5_index = [index_containing_substring(list_file, '<PelleTags:PelleTag1_md5sum>'), index_containing_substring(list_file, 'PelleTags:PelleTag1_md5sum=')] if verbose: print(md5_index) if any(md5_index): # xmp-filen innehåller en md5-summa. res = [idx for idx, val in enumerate(md5_index) if val != 0] # Ger vilken typ av xmp encoding det är. md5_xmp = md5_index[res[0]] for raw_file in os.listdir(subdir): # Find the corresponding RAW-file to generate md5 sum. if file[:-3] in raw_file[:-3]: # Här slicar jag bort ändelserna för att se om de har samma namn. if raw_file[-3:].upper() in raw_extensions: found_raw = 1 md5_calculated = md5Checksum(subdir + '\\' + raw_file) if verbose: print ('Calculated md5 for file ' + raw_file) if verbose: print (md5_calculated) if md5_calculated == md5_xmp: results.append((subdir, file, md5_xmp, md5_calculated, 'OK', todays_date)) md5_OK +=1 if verbose: print("md5 stämmer " + " subdir " + subdir + " file " + file) else: results.append((subdir, file, md5_xmp, md5_calculated, 'NOK', todays_date)) md5_NOK +=1 if print_errors: print("md5 fail: " + subdir + "\\" + str(raw_file)) break if not found_raw: # Efter break exekveras denna. Tror jag... if print_errors: print("xmp without matching raw: " + subdir + "\\" + str(raw_file)) results.append((subdir, file, 'No valid raw file found', '-', 'NOK', todays_date)) md5_missing_raw +=1 else: # index_containing_substring returns zero, PelleTags not present in xmp-file if print_errors: print("Error, no md5 sum in file " + subdir + "\\" + file) md5_not_found += 1 results.append((subdir, file, 'No md5 in xmp', md5_calculated, 'NOK', todays_date)) f.close() if xmp_file_counter % limited_printouts == 0: if vital_stats: print("Thread identity: " + str(threading.get_ident()) + ", " + str(xmp_file_counter) + " xmp-files processed in " + str(round(time.time()-time1)) + " seconds, delta time " + str(round(time.time()-time2)) + " seconds, local time " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) time2 = time.time() except: if print_errors: print ('Unexpected fail for file: ' + file) print (sys.exc_info()) f.close() try: cur.executemany("INSERT INTO md5_results ('file_path', 'file_name', 'md5_file', 'md5_calc', 'ok_nok', 'date') VALUES (?,?,?,?,?,?)", results) if vital_stats: print("Thread identity: " + str(threading.get_ident()) + " Second sql segment: Rows returned from execute INSERT INTO md5_resutls = " + str(cur.rowcount)) except sqlite3.Error as error: if print_errors: print("Thread identity: " + str(threading.get_ident()) + " Failed to INSERT INTO md5_results sqlite table", error) try: cur.execute("UPDATE dashboard SET last_run = ?, tot_xmp = ?, ok_xmp = ?, nok_xmp = ?, missing_raw = ?, missing_xmp = ? WHERE main_path = '" + main_folder + "'", (todays_date, md5_OK+md5_NOK, md5_OK, md5_NOK,md5_missing_raw,md5_not_found)) if vital_stats: print("Thread identity: " + str(threading.get_ident()) + " Second sql segment: Rows returned from execute UPDATE dashboard = " + str(cur.rowcount)) except sqlite3.Error as error: if print_errors: print("Thread identity: " + str(threading.get_ident()) + " Failed to UPDATE dashboard from sqlite table", error) if vital_stats: print("Thread identity: " + str(threading.get_ident()) + " Run results for path: " + main_folder + " md5_OK=" + str(md5_OK) + " md5_NOK=" + str(md5_NOK) + " md5__not_found=" + str(md5_not_found) + " md5_missing_raw=" + str(md5_missing_raw) + ", in " + str(round(time.time()-time1)) + " seconds, finished at " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ".") def runrunrun(): global combo global combo_var global master for key, value in combo.items(): combo_var[key] = combo[key].get() #combo values gets destroyed when closing GUI, therefore copy values. master.destroy() # Close GUI and continue after mainloop def quitquit(): conn.close() exit() def xmp_count(): # Check how many xmp files there are in each main folder. global main_folders global xmp_tracker global
verbose global xmp
identifier_body
sensor_update.py
())): rows.append(data[ew]) # spoof the API response return { 'result': 1, 'message': None, 'epidata': rows, } return fetch, fields @staticmethod def get_cdc(location, epiweek, valid): fields = ['num2', 'num4', 'num5', 'num6', 'num7', 'num8'] def fetch(weeks): # It appears that log-transformed counts provide a much better fit. res = Epidata.cdc(secrets.api.cdc, weeks, location) if 'epidata' in res: for row in res['epidata']: for col in fields: row[col] = np.log(1. + row[col]) return res return fetch, fields @staticmethod def get_quid(location, epiweek, valid): fields = ['value'] def fetch(weeks): res = Epidata.quidel(secrets.api.quidel, weeks, location) return res return fetch, fields class SensorFitting: def __init__(self): pass @staticmethod def fit_loch_ness(location, epiweek, name, fields, fetch, valid): # Helper functions def get_weeks(epiweek): ew1 = 200330 ew2 = epiweek ew3 = flu.add_epiweeks(epiweek, 1) weeks0 = Epidata.range(ew1, ew2) weeks1 = Epidata.range(ew1, ew3) return (ew1, ew2, ew3, weeks0, weeks1) def extract(rows, fields): data = {} for row in rows: data[row['epiweek']] = [float(row[f]) for f in fields] return data def get_training_set_data(data): epiweeks = sorted(list(data.keys())) X = [data[ew]['x'] for ew in epiweeks] Y = [data[ew]['y'] for ew in epiweeks] return (epiweeks, X, Y) def get_training_set(location, epiweek, signal, valid): ew1, ew2, ew3, weeks0, weeks1 = get_weeks(epiweek) auth = secrets.api.fluview try: result = Epidata.fluview(location, weeks0, issues=ew2, auth=auth) rows = Epidata.check(result) unstable = extract(rows, ['wili']) except Exception: unstable = {} rows = Epidata.check(Epidata.fluview(location, weeks0, auth=auth)) stable = extract(rows, ['wili']) data = {} num_dropped = 0 for ew in signal.keys(): if ew == ew3: continue sig = signal[ew] if ew not in unstable: if valid and flu.delta_epiweeks(ew, ew3) <= 5: raise Exception('unstable wILI is not available on %d' % ew) if ew not in stable: num_dropped += 1 continue wili = stable[ew] else: wili = unstable[ew] data[ew] = {'x': sig, 'y': wili} if num_dropped: msg = 'warning: dropped %d/%d signal weeks because (w)ILI was unavailable' print(msg % (num_dropped, len(signal))) return get_training_set_data(data) def dot(*Ms): """ Simple function to compute the dot product for any number of arguments. """ N = Ms[0] for M in Ms[1:]: N = np.dot(N, M) return N def get_weight(ew1, ew2): """ This function gives the weight between two given epiweeks based on a function that: - drops sharply over the most recent ~3 weeks - falls off exponentially with time - puts extra emphasis on the past weeks at the same time of year (seasonality) - gives no week a weight of zero """ dw = flu.delta_epiweeks(ew1, ew2) yr = 52.2 hl1, hl2, bw = yr, 1, 4 a = 0.05 #b = (np.cos(2 * np.pi * (dw / yr)) + 1) / 2 b = np.exp(-((min(dw % yr, yr - dw % yr) / bw) ** 2)) c = 2 ** -(dw / hl1) d = 1 - 2 ** -(dw / hl2) return (a + (1 - a) * b) * c * d def get_periodic_bias(epiweek): weeks_per_year = 52.2 offset = flu.delta_epiweeks(200001, epiweek) % weeks_per_year angle = np.pi * 2 * offset / weeks_per_year return [np.sin(angle), np.cos(angle)] def apply_model(epiweek, beta, values): bias0 = [1.] if beta.shape[0] > len(values) + 1: # constant and periodic bias bias1 = get_periodic_bias(epiweek) obs = np.array([values + bias0 + bias1]) else: # constant bias only obs = np.array([values + bias0]) return float(dot(obs, beta)) def get_model(ew2, epiweeks, X, Y): ne, nx1, nx2, ny = len(epiweeks), len(X), len(X[0]), len(Y) if ne != nx1 or nx1 != ny: raise Exception('length mismatch e=%d X=%d Y=%d' % (ne, nx1, ny)) weights = np.diag([get_weight(ew1, ew2) for ew1 in epiweeks]) X = np.array(X).reshape((nx1, nx2)) Y = np.array(Y).reshape((ny, 1)) bias0 = np.ones(Y.shape) if ne >= 26 and flu.delta_epiweeks(epiweeks[0], epiweeks[-1]) >= 52: # constant and periodic bias bias1 = np.array([get_periodic_bias(ew) for ew in epiweeks]) X = np.hstack((X, bias0, bias1)) else: # constant bias only X = np.hstack((X, bias0)) XtXi = np.linalg.inv(dot(X.T, weights, X)) XtY = dot(X.T, weights, Y) return np.dot(XtXi, XtY) if type(fields) == str: fields = [fields] ew1, ew2, ew3, weeks0, weeks1 = get_weeks(epiweek) rows = Epidata.check(fetch(weeks1)) signal = extract(rows, fields) min_rows = 3 + len(fields) if ew3 not in signal: raise Exception('%s unavailable on %d' % (name, ew3)) if len(signal) < min_rows: raise Exception('%s available less than %d weeks' % (name, min_rows)) epiweeks, X, Y = get_training_set(location, epiweek, signal, valid) min_rows = min_rows - 1 if len(Y) < min_rows: raise Exception('(w)ILI available less than %d weeks' % (min_rows)) model = get_model(ew3, epiweeks, X, Y) value = apply_model(ew3, model, signal[ew3]) return value class SensorGetter: """Class that implements different sensors. Some sensors may take in a signal to do the fitting on, others do not. """ def __init__(self): pass @staticmethod def get_sensor_implementations(): """Return a map from sensor names to sensor implementations.""" return { 'cdc': SensorGetter.get_cdc, 'gft': SensorGetter.get_gft, 'ght': SensorGetter.get_ght, 'ghtj': SensorGetter.get_ghtj, 'twtr': SensorGetter.get_twtr, 'wiki': SensorGetter.get_wiki, 'epic': SensorGetter.get_epic, 'sar3': SensorGetter.get_sar3, 'arch': SensorGetter.get_arch, 'ar3': SensorGetter.get_ar3, 'quid': SensorGetter.get_quid, } @staticmethod def get_epic(location, epiweek, valid): fc = Epidata.check(Epidata.delphi('ec', epiweek))[0] return fc['forecast']['data'][location]['x1']['point'] @staticmethod def get_sar3(location, epiweek, valid): return SAR3(location).predict(epiweek, valid=valid) @staticmethod def get_arch(location, epiweek, valid):
return ARCH(location).predict(epiweek, valid=valid) @staticmethod def get_ar3(location, epiweek, valid):
random_line_split
sensor_update.py
_name = fields[idx] idx += 1 # loop over rows of the response, ordered by epiweek for row in epidata: ew = row['epiweek'] if ew not in data: # make a new entry for this epiweek data[ew] = {'epiweek': ew} # save the value of this field data[ew][field_name] = row['value'] # convert the map to a list matching the API epidata list rows = [] for ew in sorted(list(data.keys())): rows.append(data[ew]) # spoof the API response return { 'result': 1, 'message': None, 'epidata': rows, } return fetch, fields @staticmethod def get_cdc(location, epiweek, valid): fields = ['num2', 'num4', 'num5', 'num6', 'num7', 'num8'] def fetch(weeks): # It appears that log-transformed counts provide a much better fit. res = Epidata.cdc(secrets.api.cdc, weeks, location) if 'epidata' in res: for row in res['epidata']: for col in fields: row[col] = np.log(1. + row[col]) return res return fetch, fields @staticmethod def get_quid(location, epiweek, valid): fields = ['value'] def fetch(weeks): res = Epidata.quidel(secrets.api.quidel, weeks, location) return res return fetch, fields class SensorFitting: def __init__(self): pass @staticmethod def fit_loch_ness(location, epiweek, name, fields, fetch, valid): # Helper functions def get_weeks(epiweek): ew1 = 200330 ew2 = epiweek ew3 = flu.add_epiweeks(epiweek, 1) weeks0 = Epidata.range(ew1, ew2) weeks1 = Epidata.range(ew1, ew3) return (ew1, ew2, ew3, weeks0, weeks1) def extract(rows, fields): data = {} for row in rows: data[row['epiweek']] = [float(row[f]) for f in fields] return data def get_training_set_data(data): epiweeks = sorted(list(data.keys())) X = [data[ew]['x'] for ew in epiweeks] Y = [data[ew]['y'] for ew in epiweeks] return (epiweeks, X, Y) def get_training_set(location, epiweek, signal, valid): ew1, ew2, ew3, weeks0, weeks1 = get_weeks(epiweek) auth = secrets.api.fluview try: result = Epidata.fluview(location, weeks0, issues=ew2, auth=auth) rows = Epidata.check(result) unstable = extract(rows, ['wili']) except Exception: unstable = {} rows = Epidata.check(Epidata.fluview(location, weeks0, auth=auth)) stable = extract(rows, ['wili']) data = {} num_dropped = 0 for ew in signal.keys(): if ew == ew3: continue sig = signal[ew] if ew not in unstable: if valid and flu.delta_epiweeks(ew, ew3) <= 5: raise Exception('unstable wILI is not available on %d' % ew) if ew not in stable: num_dropped += 1 continue wili = stable[ew] else: wili = unstable[ew] data[ew] = {'x': sig, 'y': wili} if num_dropped: msg = 'warning: dropped %d/%d signal weeks because (w)ILI was unavailable' print(msg % (num_dropped, len(signal))) return get_training_set_data(data) def dot(*Ms): """ Simple function to compute the dot product for any number of arguments. """ N = Ms[0] for M in Ms[1:]: N = np.dot(N, M) return N def get_weight(ew1, ew2): """ This function gives the weight between two given epiweeks based on a function that: - drops sharply over the most recent ~3 weeks - falls off exponentially with time - puts extra emphasis on the past weeks at the same time of year (seasonality) - gives no week a weight of zero """ dw = flu.delta_epiweeks(ew1, ew2) yr = 52.2 hl1, hl2, bw = yr, 1, 4 a = 0.05 #b = (np.cos(2 * np.pi * (dw / yr)) + 1) / 2 b = np.exp(-((min(dw % yr, yr - dw % yr) / bw) ** 2)) c = 2 ** -(dw / hl1) d = 1 - 2 ** -(dw / hl2) return (a + (1 - a) * b) * c * d def get_periodic_bias(epiweek): weeks_per_year = 52.2 offset = flu.delta_epiweeks(200001, epiweek) % weeks_per_year angle = np.pi * 2 * offset / weeks_per_year return [np.sin(angle), np.cos(angle)] def apply_model(epiweek, beta, values): bias0 = [1.] if beta.shape[0] > len(values) + 1: # constant and periodic bias bias1 = get_periodic_bias(epiweek) obs = np.array([values + bias0 + bias1]) else: # constant bias only obs = np.array([values + bias0]) return float(dot(obs, beta)) def get_model(ew2, epiweeks, X, Y): ne, nx1, nx2, ny = len(epiweeks), len(X), len(X[0]), len(Y) if ne != nx1 or nx1 != ny: raise Exception('length mismatch e=%d X=%d Y=%d' % (ne, nx1, ny)) weights = np.diag([get_weight(ew1, ew2) for ew1 in epiweeks]) X = np.array(X).reshape((nx1, nx2)) Y = np.array(Y).reshape((ny, 1)) bias0 = np.ones(Y.shape) if ne >= 26 and flu.delta_epiweeks(epiweeks[0], epiweeks[-1]) >= 52: # constant and periodic bias bias1 = np.array([get_periodic_bias(ew) for ew in epiweeks]) X = np.hstack((X, bias0, bias1)) else: # constant bias only X = np.hstack((X, bias0)) XtXi = np.linalg.inv(dot(X.T, weights, X)) XtY = dot(X.T, weights, Y) return np.dot(XtXi, XtY) if type(fields) == str: fields = [fields] ew1, ew2, ew3, weeks0, weeks1 = get_weeks(epiweek) rows = Epidata.check(fetch(weeks1)) signal = extract(rows, fields) min_rows = 3 + len(fields) if ew3 not in signal: raise Exception('%s unavailable on %d' % (name, ew3)) if len(signal) < min_rows: raise Exception('%s available less than %d weeks' % (name, min_rows)) epiweeks, X, Y = get_training_set(location, epiweek, signal, valid) min_rows = min_rows - 1 if len(Y) < min_rows: raise Exception('(w)ILI available less than %d weeks' % (min_rows)) model = get_model(ew3, epiweeks, X, Y) value = apply_model(ew3, model, signal[ew3]) return value class SensorGetter: """Class that implements different sensors. Some sensors may take in a signal to do the fitting on, others do not. """ def __init__(self): pass @staticmethod def get_sensor_implementations(): """Return a map from sensor names to sensor implementations.""" return { 'cdc': SensorGetter.get_cdc, 'gft': SensorGetter.get_gft, 'ght': SensorGetter.get_ght, 'ghtj': SensorGetter.get_ghtj, 'twtr': SensorGetter.get_twtr, 'wiki': SensorGetter.get_wiki, 'epic': SensorGetter.get_epic, 'sar3': SensorGetter.get_sar3, 'arch': SensorGetter.get_arch, 'ar3': SensorGetter.get_ar3, 'quid': SensorGetter.get_quid, } @staticmethod def
get_epic
identifier_name
sensor_update.py
np.sin(angle), np.cos(angle)] def apply_model(epiweek, beta, values): bias0 = [1.] if beta.shape[0] > len(values) + 1: # constant and periodic bias bias1 = get_periodic_bias(epiweek) obs = np.array([values + bias0 + bias1]) else: # constant bias only obs = np.array([values + bias0]) return float(dot(obs, beta)) def get_model(ew2, epiweeks, X, Y): ne, nx1, nx2, ny = len(epiweeks), len(X), len(X[0]), len(Y) if ne != nx1 or nx1 != ny: raise Exception('length mismatch e=%d X=%d Y=%d' % (ne, nx1, ny)) weights = np.diag([get_weight(ew1, ew2) for ew1 in epiweeks]) X = np.array(X).reshape((nx1, nx2)) Y = np.array(Y).reshape((ny, 1)) bias0 = np.ones(Y.shape) if ne >= 26 and flu.delta_epiweeks(epiweeks[0], epiweeks[-1]) >= 52: # constant and periodic bias bias1 = np.array([get_periodic_bias(ew) for ew in epiweeks]) X = np.hstack((X, bias0, bias1)) else: # constant bias only X = np.hstack((X, bias0)) XtXi = np.linalg.inv(dot(X.T, weights, X)) XtY = dot(X.T, weights, Y) return np.dot(XtXi, XtY) if type(fields) == str: fields = [fields] ew1, ew2, ew3, weeks0, weeks1 = get_weeks(epiweek) rows = Epidata.check(fetch(weeks1)) signal = extract(rows, fields) min_rows = 3 + len(fields) if ew3 not in signal: raise Exception('%s unavailable on %d' % (name, ew3)) if len(signal) < min_rows: raise Exception('%s available less than %d weeks' % (name, min_rows)) epiweeks, X, Y = get_training_set(location, epiweek, signal, valid) min_rows = min_rows - 1 if len(Y) < min_rows: raise Exception('(w)ILI available less than %d weeks' % (min_rows)) model = get_model(ew3, epiweeks, X, Y) value = apply_model(ew3, model, signal[ew3]) return value class SensorGetter: """Class that implements different sensors. Some sensors may take in a signal to do the fitting on, others do not. """ def __init__(self): pass @staticmethod def get_sensor_implementations(): """Return a map from sensor names to sensor implementations.""" return { 'cdc': SensorGetter.get_cdc, 'gft': SensorGetter.get_gft, 'ght': SensorGetter.get_ght, 'ghtj': SensorGetter.get_ghtj, 'twtr': SensorGetter.get_twtr, 'wiki': SensorGetter.get_wiki, 'epic': SensorGetter.get_epic, 'sar3': SensorGetter.get_sar3, 'arch': SensorGetter.get_arch, 'ar3': SensorGetter.get_ar3, 'quid': SensorGetter.get_quid, } @staticmethod def get_epic(location, epiweek, valid): fc = Epidata.check(Epidata.delphi('ec', epiweek))[0] return fc['forecast']['data'][location]['x1']['point'] @staticmethod def get_sar3(location, epiweek, valid): return SAR3(location).predict(epiweek, valid=valid) @staticmethod def get_arch(location, epiweek, valid): return ARCH(location).predict(epiweek, valid=valid) @staticmethod def get_ar3(location, epiweek, valid): return AR3(location).predict(epiweek, valid=valid) @staticmethod def get_ghtj(location, epiweek, valid): loc = 'US' if location == 'nat' else location def justinfun(location, epiweek): # Need to set an absolute path main_driver = '/home/automation/ghtj/ghtj.R' args = ['Rscript', main_driver, location, str(epiweek)] subprocess.check_call(args, shell=False) # Need to set an absolute path outputdir = '/home/automation/ghtj/output' prefix = 'ghtpred' predfilename = '%s/%s-%s-%d.txt' % (outputdir, prefix, loc, epiweek) with open(predfilename, 'r') as f: mypred = float(f.read()) print(mypred) return mypred # Making the single prediction now: mypred = justinfun(location, epiweek) return mypred # sensors using the loch ness fitting @staticmethod def get_gft(location, epiweek, valid): fetch = SignalGetter.get_gft(location, epiweek, valid) return SensorFitting.fit_loch_ness(location, epiweek, 'gft', 'num', fetch, valid) @staticmethod def get_ght(location, epiweek, valid): fetch = SignalGetter.get_ght(location, epiweek, valid) return SensorFitting.fit_loch_ness(location, epiweek, 'ght', 'value', fetch, valid) @staticmethod def get_twtr(location, epiweek, valid): fetch = SignalGetter.get_twtr(location, epiweek, valid) return SensorFitting.fit_loch_ness(location, epiweek, 'twtr', 'percent', fetch, valid) @staticmethod def get_wiki(location, epiweek, valid): fetch, fields = SignalGetter.get_wiki(location, epiweek, valid) return SensorFitting.fit_loch_ness(location, epiweek, 'wiki', fields, fetch, valid) @staticmethod def get_cdc(location, epiweek, valid): fetch, fields = SignalGetter.get_cdc(location, epiweek, valid) return SensorFitting.fit_loch_ness(location, epiweek, 'cdc', fields, fetch, valid) @staticmethod def get_quid(location, epiweek, valid): fetch, fields = SignalGetter.get_quid(location, epiweek, valid) return SensorFitting.fit_loch_ness(location, epiweek, 'quid', fields, fetch, valid) class SensorUpdate: """ Produces both real-time and retrospective sensor readings for ILI in the US. Readings (predictions of ILI made using raw inputs) are stored in the Delphi database and are accessible via the Epidata API. """ @staticmethod def new_instance(valid, test_mode): """ Return a new instance under the default configuration. If `test_mode` is True, database changes will not be committed. If `valid` is True, be punctilious about hiding values that were not known at the time (e.g. run the model with preliminary ILI only). Otherwise, be more lenient (e.g. fall back to final ILI when preliminary ILI isn't available). """ database = SensorsTable(test_mode=test_mode) implementations = SensorGetter.get_sensor_implementations() return SensorUpdate(valid, database, implementations, Epidata) def __init__(self, valid, database, implementations, epidata): self.valid = valid self.database = database self.implementations = implementations self.epidata = epidata def update(self, sensors, first_week, last_week): """ Compute sensor readings and store them in the database. """ # most recent issue if last_week is None: last_issue = get_most_recent_issue(self.epidata) last_week = flu.add_epiweeks(last_issue, +1) # connect with self.database as database: # update each sensor for (name, loc) in sensors: # update each location for location in get_location_list(loc): # timing ew1 = first_week if ew1 is None: ew1 = database.get_most_recent_epiweek(name, location) if ew1 is None: # If an existing sensor reading wasn't found in the database and # no start week was given, just assume that readings should start # at 2010w40. ew1 = 201040 print('%s-%s not found, starting at %d' % (name, location, ew1)) args = (name, location, ew1, last_week) print('Updating %s-%s from %d to %d.' % args) for test_week in flu.range_epiweeks(ew1, last_week, inclusive=True):
self.update_single(database, test_week, name, location)
conditional_block
sensor_update.py
dot product for any number of arguments. """ N = Ms[0] for M in Ms[1:]: N = np.dot(N, M) return N def get_weight(ew1, ew2): """ This function gives the weight between two given epiweeks based on a function that: - drops sharply over the most recent ~3 weeks - falls off exponentially with time - puts extra emphasis on the past weeks at the same time of year (seasonality) - gives no week a weight of zero """ dw = flu.delta_epiweeks(ew1, ew2) yr = 52.2 hl1, hl2, bw = yr, 1, 4 a = 0.05 #b = (np.cos(2 * np.pi * (dw / yr)) + 1) / 2 b = np.exp(-((min(dw % yr, yr - dw % yr) / bw) ** 2)) c = 2 ** -(dw / hl1) d = 1 - 2 ** -(dw / hl2) return (a + (1 - a) * b) * c * d def get_periodic_bias(epiweek): weeks_per_year = 52.2 offset = flu.delta_epiweeks(200001, epiweek) % weeks_per_year angle = np.pi * 2 * offset / weeks_per_year return [np.sin(angle), np.cos(angle)] def apply_model(epiweek, beta, values): bias0 = [1.] if beta.shape[0] > len(values) + 1: # constant and periodic bias bias1 = get_periodic_bias(epiweek) obs = np.array([values + bias0 + bias1]) else: # constant bias only obs = np.array([values + bias0]) return float(dot(obs, beta)) def get_model(ew2, epiweeks, X, Y): ne, nx1, nx2, ny = len(epiweeks), len(X), len(X[0]), len(Y) if ne != nx1 or nx1 != ny: raise Exception('length mismatch e=%d X=%d Y=%d' % (ne, nx1, ny)) weights = np.diag([get_weight(ew1, ew2) for ew1 in epiweeks]) X = np.array(X).reshape((nx1, nx2)) Y = np.array(Y).reshape((ny, 1)) bias0 = np.ones(Y.shape) if ne >= 26 and flu.delta_epiweeks(epiweeks[0], epiweeks[-1]) >= 52: # constant and periodic bias bias1 = np.array([get_periodic_bias(ew) for ew in epiweeks]) X = np.hstack((X, bias0, bias1)) else: # constant bias only X = np.hstack((X, bias0)) XtXi = np.linalg.inv(dot(X.T, weights, X)) XtY = dot(X.T, weights, Y) return np.dot(XtXi, XtY) if type(fields) == str: fields = [fields] ew1, ew2, ew3, weeks0, weeks1 = get_weeks(epiweek) rows = Epidata.check(fetch(weeks1)) signal = extract(rows, fields) min_rows = 3 + len(fields) if ew3 not in signal: raise Exception('%s unavailable on %d' % (name, ew3)) if len(signal) < min_rows: raise Exception('%s available less than %d weeks' % (name, min_rows)) epiweeks, X, Y = get_training_set(location, epiweek, signal, valid) min_rows = min_rows - 1 if len(Y) < min_rows: raise Exception('(w)ILI available less than %d weeks' % (min_rows)) model = get_model(ew3, epiweeks, X, Y) value = apply_model(ew3, model, signal[ew3]) return value class SensorGetter: """Class that implements different sensors. Some sensors may take in a signal to do the fitting on, others do not. """ def __init__(self): pass @staticmethod def get_sensor_implementations(): """Return a map from sensor names to sensor implementations.""" return { 'cdc': SensorGetter.get_cdc, 'gft': SensorGetter.get_gft, 'ght': SensorGetter.get_ght, 'ghtj': SensorGetter.get_ghtj, 'twtr': SensorGetter.get_twtr, 'wiki': SensorGetter.get_wiki, 'epic': SensorGetter.get_epic, 'sar3': SensorGetter.get_sar3, 'arch': SensorGetter.get_arch, 'ar3': SensorGetter.get_ar3, 'quid': SensorGetter.get_quid, } @staticmethod def get_epic(location, epiweek, valid): fc = Epidata.check(Epidata.delphi('ec', epiweek))[0] return fc['forecast']['data'][location]['x1']['point'] @staticmethod def get_sar3(location, epiweek, valid): return SAR3(location).predict(epiweek, valid=valid) @staticmethod def get_arch(location, epiweek, valid): return ARCH(location).predict(epiweek, valid=valid) @staticmethod def get_ar3(location, epiweek, valid): return AR3(location).predict(epiweek, valid=valid) @staticmethod def get_ghtj(location, epiweek, valid): loc = 'US' if location == 'nat' else location def justinfun(location, epiweek): # Need to set an absolute path main_driver = '/home/automation/ghtj/ghtj.R' args = ['Rscript', main_driver, location, str(epiweek)] subprocess.check_call(args, shell=False) # Need to set an absolute path outputdir = '/home/automation/ghtj/output' prefix = 'ghtpred' predfilename = '%s/%s-%s-%d.txt' % (outputdir, prefix, loc, epiweek) with open(predfilename, 'r') as f: mypred = float(f.read()) print(mypred) return mypred # Making the single prediction now: mypred = justinfun(location, epiweek) return mypred # sensors using the loch ness fitting @staticmethod def get_gft(location, epiweek, valid): fetch = SignalGetter.get_gft(location, epiweek, valid) return SensorFitting.fit_loch_ness(location, epiweek, 'gft', 'num', fetch, valid) @staticmethod def get_ght(location, epiweek, valid): fetch = SignalGetter.get_ght(location, epiweek, valid) return SensorFitting.fit_loch_ness(location, epiweek, 'ght', 'value', fetch, valid) @staticmethod def get_twtr(location, epiweek, valid): fetch = SignalGetter.get_twtr(location, epiweek, valid) return SensorFitting.fit_loch_ness(location, epiweek, 'twtr', 'percent', fetch, valid) @staticmethod def get_wiki(location, epiweek, valid): fetch, fields = SignalGetter.get_wiki(location, epiweek, valid) return SensorFitting.fit_loch_ness(location, epiweek, 'wiki', fields, fetch, valid) @staticmethod def get_cdc(location, epiweek, valid): fetch, fields = SignalGetter.get_cdc(location, epiweek, valid) return SensorFitting.fit_loch_ness(location, epiweek, 'cdc', fields, fetch, valid) @staticmethod def get_quid(location, epiweek, valid): fetch, fields = SignalGetter.get_quid(location, epiweek, valid) return SensorFitting.fit_loch_ness(location, epiweek, 'quid', fields, fetch, valid) class SensorUpdate: """ Produces both real-time and retrospective sensor readings for ILI in the US. Readings (predictions of ILI made using raw inputs) are stored in the Delphi database and are accessible via the Epidata API. """ @staticmethod def new_instance(valid, test_mode):
""" Return a new instance under the default configuration. If `test_mode` is True, database changes will not be committed. If `valid` is True, be punctilious about hiding values that were not known at the time (e.g. run the model with preliminary ILI only). Otherwise, be more lenient (e.g. fall back to final ILI when preliminary ILI isn't available). """ database = SensorsTable(test_mode=test_mode) implementations = SensorGetter.get_sensor_implementations() return SensorUpdate(valid, database, implementations, Epidata)
identifier_body
图像数据处理.py
ducer(['/path/to/output.tfrecords']) #从队列中读取一个样例 _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example(#解析单个样例函数 serialized_example, features={ 'images_raw':tf.FixedLenFeature([],tf.string),#解析为一个tensor 'pixels':tf.FixedLenFeature([],tf.int64), 'label':tf.FixedLenFeature([],tf.int64), }) image= tf.decode_raw(features[’image_raw’], tf.uint8}#将字符串tensor解析成数组 label = tf.cast(features[’label’], tf.int32} pixels = tf.cast(features[’pixels’], tf.int32} sess = tf.Session() #启动多线程处理输入数据 coord = tf.train.Coordinator(} threads = tf.train.start_queue_runners(sess=sess, coord=coord} #每次运行可以读取TFRecord 文件中的一个样例。当所有样例读完之后,在此样例中程序 #会再从头读取。 for i in range(10} : print(sess.run([image, label, pixels])) 图像编码处理 import matplotlib.pyplot as plt import tensorflow as tf image_raw_data = tf.gfile.FastGFile('/path','rb').read()#读取原始图像为字符串 with tf.Session() as sess:#对图像进行解码,使用的是jpeg,还有png等 img_data = tf.image.decode_jpeg(image_raw_data)#结果是一个tensor print(img_data.eval()) plt.imshow(img_data.eval()) plt.show() encode_image = tf.image.encode_jpeg(img_data)#将tensor编码成jpeg并存入文件 with tf.gfile.FastGFlie('/path/to/output','wb') as f: f.write(encode_image.eval()) 调整图像大小 image_raw_data = tf.gfile.FastGFile('/path','rb').read() image_data = tf.decode_jpeg(image_raw_data)#解码图像 image_data = tf.image.convert_image_dtype(image_raw_data,dtype=tf.float32)#转换格式为浮点 resized = tf.image.resize_images(image_data,[300,300],method=0) croped = tf.image.resize_images_with_crop_or_pad(img_data,400,400)#截取指定大小图像,图像够大就截取,不够就在周围填充0 central = tf.image.central_crop(image_data,0.5)#根据比例截取 #图像各种翻转 fliped = tf.image.flip_up_down(image_data) fliped = tf.image.random_flip_up_down(image_data) fliped = tf.image.flip_left_right(image_data) fliped = tf.image.random_flip_left_right(image_data) transposed = tf.image.transpose_image(image_data) adjusted = tf.image.adjust_brightness(img_data,-0.5)#调整亮度 adjust_brightness = tf.clip_by_value(adjusted, 0.0, 1.0)#把亮度限定在正确范围内 adjusted = tf.image.random_brightness(image, random_range) adjust_brightness = tf.image.adjust_contrast(image_data, 5)#调整对比度 adjusted = tf.image.adjust_hue(img_data, 0.3)#调整色彩 adjusted = tf.image.adjust_saturation(img_data, 5)#调整饱和度 adjusted = tf.image.per_image_standardization(img_data)#调整数值为0,方差为1 图像加标注框 batched = tf.expand_dims( tf.image.convert_image_dtype(img_data,tf.float32),0) boxes = tf.constant([0.05, 0.05, 0.9, 0.7],[0.35, 0.47, 0.5, 0.56])#同时添加两个标注框 #参数是相对位置,[y_min,x_min,y_max,x_max] boxed = tf.image.draw_bounding_boxes(batched,boxed) 完整图像预处理 import tensorflow as tf import matplotlib.pyplot as plt import numpy as np #随机调整一张图像的色彩,定义不同顺序调整亮度、对比度、饱和度和色相,具体使用的顺序会影响学习 def distort_color(image,color_ordering=0): if color_ordering == 0: image = tf.image.random_saturation(image,lower=0.5,upper=0.5) image = tf.image.random_brightness(image,max_delta=32. / 255. ) image = tf.random_hue(image,max_delta=0.2) elif color_ordering == 1: image = tf.image.random_brightness(image,max_delta=32. / 255. ) image = tf.image.random_saturation(image,lower=0.5,upper=0.5) image = tf.random_hue(image,max_delta=0.2) elif color_ordering == 2: #其他转换顺序 return tf.clip_by_value(image, 0.0, 1.0)#把图片每个元素值规定在范围内 #预处理图片 def preprocess_for_train(image,height,width,bbox): if bbox is None:#标注框没定义的话就取全图片 bbox = tf.constant([0.0,0.0,1.0,1.0],dtype=tf.float32,shape=[1,1,4]) if image.dtype != tf.float32#转换图像像素值类型 image = tf.convert_image_dtype(image, dtype=tf.float32) bbox_begin,bbox_size,_ = tf.image.sample_distorted_bounding_box(tf.shape(image),bounding_boxes=bbox)#随机截取图像 distort_image = tf.slice(image, bbox_begin, bbox_size) distorted_image = tf.image.resize_images(distort_image,[height,width],method=np.randint(4))#调整图像大小为神经网络的输入大小 distort_image = tf.image.random_flip_left_right(distort_image)#随机左右翻转图像 distort_image = distort_color(distort_image,np.random.randint(2))#随机调整图像颜色 return distort_image image_raw_data = tf.gfile.FastGFile(path,'rb').read() with tf.Session() as sess: image_data = tf.image.decode(image_raw_data) boxes = tf.constant([[[0.05,0.05,0.9,0.7],[0.35,0.47,0.5,0.56]]]) for i in range(6) result = preprocess_for_train(image_data,299,299,boxes) plt.imgshow(result.eval()) plt.show() 多线程处理数据输入 队列,处理输入数据的框架 import tensorflow as tf q = tf.FIFOQueue(2,'int32')#指定一个先进先出队列,可以保存两个元素 #RandomShufffleQueue是随机进出队列 init = q.enqueue_many(([0,10],))#使用函数初始化队列中的元素,元素的值为0和10 x = q.dequeue()#出队列 y = x + 1 q_inc = q.enqueue([y])#加入队列 with tf.Session() as tf: init.run()#初始化队列 for i in range(5): v,_ = sess.run([x,q_inc]) print v 多线程操作 coord = tf.train.Coordinator()#创建一个实例来协同多线程 threads = [ threading.Thread(target=MyLoop, args=(cord, i , )) for i in range(5)] for t in threads: t.start() coord.join(threads) def MyLoop(coord, worker_id): #使用tf.Coordinator 类提供的协同工具判断当前线程是否市要停止。 while not coord. should_stop (): #随机停止所有的线程。 if np.random.rand() < 0.1 print ” Stoping from id: %d\n” worker_id, #coord.request_stop()函数来通知其他线程停止。 coord.request_stop() else: #打印当前线程的Id print ” Working on id : %d\n ” % worker_id, #暂停l秒 time.sleep(l) 队列管理 queue = tf.FIFOQueue(100,"float") enqueue_op = queue.enqueue([tf.random_normal([1])])#入队操作 qr = tf.train.QueueRunner(queue, [enqueue_op] * 5)#启动几个线程,每个线程运行enqueue_op操作 tf.train.add_queue_runner(qr)#加入tf计算图上指定集合 out_tensor = queue.dequeue() with tf.Session() as sess: coord = tf.train.Coordinator()#协同启动进程 threads = tf.train.start_queue_runners(sess=sess, coord=coord) #必须明确使用此函数启动所有线程,进行入队操作以供后期调用 for _ in range(3): print(sess.run(out_tensor)[0]) coord.request_stop() coord.join(
= tf.train.Example(features=tf.train.Feature(feature={ 'pixels':_int_64_feature(pixels), 'label':_int_64_feature(np.argmax(labels[index])), 'images_raw':_bytes_features(images_raw)} )) writer.write(example.SerializerToString())#写入TFRecord文件 writer.close() 读取TFRecord import tensorflow as tf #创建一个reader来读取tfr文件 reader = tf.TFRecordReader() #创建一个队列来维护输入文件列表 filename_queue = tf.train.string_input_pro
conditional_block
图像数据处理.py
sess = tf.Session() #启动多线程处理输入数据 coord = tf.train.Coordinator(} threads = tf.train.start_queue_runners(sess=sess, coord=coord} #每次运行可以读取TFRecord 文件中的一个样例。当所有样例读完之后,在此样例中程序 #会再从头读取。 for i in range(10} : print(sess.run([image, label, pixels])) 图像编码处理 import matplotlib.pyplot as plt import tensorflow as tf image_raw_data = tf.gfile.FastGFile('/path','rb').read()#读取原始图像为字符串 with tf.Session() as sess:#对图像进行解码,使用的是jpeg,还有png等 img_data = tf.image.decode_jpeg(image_raw_data)#结果是一个tensor print(img_data.eval()) plt.imshow(img_data.eval()) plt.show() encode_image = tf.image.encode_jpeg(img_data)#将tensor编码成jpeg并存入文件 with tf.gfile.FastGFlie('/path/to/output','wb') as f: f.write(encode_image.eval()) 调整图像大小 image_raw_data = tf.gfile.FastGFile('/path','rb').read() image_data = tf.decode_jpeg(image_raw_data)#解码图像 image_data = tf.image.convert_image_dtype(image_raw_data,dtype=tf.float32)#转换格式为浮点 resized = tf.image.resize_images(image_data,[300,300],method=0) croped = tf.image.resize_images_with_crop_or_pad(img_data,400,400)#截取指定大小图像,图像够大就截取,不够就在周围填充0 central = tf.image.central_crop(image_data,0.5)#根据比例截取 #图像各种翻转 fliped = tf.image.flip_up_down(image_data) fliped = tf.image.random_flip_up_down(image_data) fliped = tf.image.flip_left_right(image_data) fliped = tf.image.random_flip_left_right(image_data) transposed = tf.image.transpose_image(image_data) adjusted = tf.image.adjust_brightness(img_data,-0.5)#调整亮度 adjust_brightness = tf.clip_by_value(adjusted, 0.0, 1.0)#把亮度限定在正确范围内 adjusted = tf.image.random_brightness(image, random_range) adjust_brightness = tf.image.adjust_contrast(image_data, 5)#调整对比度 adjusted = tf.image.adjust_hue(img_data, 0.3)#调整色彩 adjusted = tf.image.adjust_saturation(img_data, 5)#调整饱和度 adjusted = tf.image.per_image_standardization(img_data)#调整数值为0,方差为1 图像加标注框 batched = tf.expand_dims( tf.image.convert_image_dtype(img_data,tf.float32),0) boxes = tf.constant([0.05, 0.05, 0.9, 0.7],[0.35, 0.47, 0.5, 0.56])#同时添加两个标注框 #参数是相对位置,[y_min,x_min,y_max,x_max] boxed = tf.image.draw_bounding_boxes(batched,boxed) 完整图像预处理 import tensorflow as tf import matplotlib.pyplot as plt import numpy as np #随机调整一张图像的色彩,定义不同顺序调整亮度、对比度、饱和度和色相,具体使用的顺序会影响学习 def distort_color(image,color_ordering=0): if color_ordering == 0: image = tf.image.random_saturation(image,lower=0.5,upper=0.5) image = tf.image.random_brightness(image,max_delta=32. / 255. ) image = tf.random_hue(image,max_delta=0.2) elif color_ordering == 1: image = tf.image.random_brightness(image,max_delta=32. / 255. ) image = tf.image.random_saturation(image,lower=0.5,upper=0.5) image = tf.random_hue(image,max_delta=0.2) elif color_ordering == 2: #其他转换顺序 return tf.clip_by_value(image, 0.0, 1.0)#把图片每个元素值规定在范围内 #预处理图片 def preprocess_for_train(image,height,width,bbox): if bbox is None:#标注框没定义的话就取全图片 bbox = tf.constant([0.0,0.0,1.0,1.0],dtype=tf.float32,shape=[1,1,4]) if image.dtype != tf.float32#转换图像像素值类型 image = tf.convert_image_dtype(image, dtype=tf.float32) bbox_begin,bbox_size,_ = tf.image.sample_distorted_bounding_box(tf.shape(image),bounding_boxes=bbox)#随机截取图像 distort_image = tf.slice(image, bbox_begin, bbox_size) distorted_image = tf.image.resize_images(distort_image,[height,width],method=np.randint(4))#调整图像大小为神经网络的输入大小 distort_image = tf.image.random_flip_left_right(distort_image)#随机左右翻转图像 distort_image = distort_color(distort_image,np.random.randint(2))#随机调整图像颜色 return distort_image image_raw_data = tf.gfile.FastGFile(path,'rb').read() with tf.Session() as sess: image_data = tf.image.decode(image_raw_data) boxes = tf.constant([[[0.05,0.05,0.9,0.7],[0.35,0.47,0.5,0.56]]]) for i in range(6) result = preprocess_for_train(image_data,299,299,boxes) plt.imgshow(result.eval()) plt.show() 多线程处理数据输入 队列,处理输入数据的框架 import tensorflow as tf q = tf.FIFOQueue(2,'int32')#指定一个先进先出队列,可以保存两个元素 #RandomShufffleQueue是随机进出队列 init = q.enqueue_many(([0,10],))#使用函数初始化队列中的元素,元素的值为0和10 x = q.dequeue()#出队列 y = x + 1 q_inc = q.enqueue([y])#加入队列 with tf.Session() as tf: init.run()#初始化队列 for i in range(5): v,_ = sess.run([x,q_inc]) print v 多线程操作 coord = tf.train.Coordinator()#创建一个实例来协同多线程 threads = [ threading.Thread(target=MyLoop, args=(cord, i , )) for i in range(5)] for t in threads: t.start() coord.join(threads) def MyLoop(coord, worker_id): #使用tf.Coordinator 类提供的协同工具判断当前线程是否市要停止。 while not coord. should_stop (): #随机停止所有的线程。 if np.random.rand() < 0.1 print ” Stoping from id: %d\n” worker_id, #coord.request_stop()函数来通知其他线程停止。 coord.request_stop() else: #打印当前线程的Id print ” Working on id : %d\n ” % worker_id, #暂停l秒 time.sleep(l) 队列管理 queue = tf.FIFOQueue(100,"float") enqueue_op = queue.enqueue([tf.random_normal([1])])#入队操作 qr = tf.train.QueueRunner(queue, [enqueue_op] * 5)#启动几个线程,每个线程运行enqueue_op操作 tf.train.add_queue_runner(qr)#加入tf计算图上指定集合 out_tensor = queue.dequeue() with tf.Session() as sess: coord = tf.train.Coordinator()#协同启动进程 threads = tf.train.start_queue_runners(sess=sess, coord=coord) #必须明确使用此函数启动所有线程,进行入队操作以供后期调用 for _ in range(3): print(sess.run(out_tensor)[0]) coord.request_stop() coord.join(threads) 输入文件队列 num_shards = 2#总文件数 instances_per_shard = 2#每个文件多少数据 #把输入转换成TFRecord for i in range(num_shards): filename = ('/path/data.tfrecords-%.5d-of-%.5d' % (i, num_shards)) writer = tf.python_io.TFRecordWriter(filename) for j in range(instances_per_shard):#将数据封装成Example结构并写入TFRcecord文件 example = tf.train.Example(features=tf.train.Features(feature={ 'i': _int64_feature(i), 'j': _int64_feature(j) } )) writer.write(example.SerializerToString()) writer.close() 读取执行 files = tf.train.match_filenames_once('/path/data.tfrecords-*') filename_queue = tf.train.string_input_producer(files, shuffle=False)#创建输入队列 reader = tf.TFRecordReader() _, serialized_example= reader.read
image= tf.decode_raw(features[’image_raw’], tf.uint8}#将字符串tensor解析成数组 label = tf.cast(features[’label’], tf.int32} pixels = tf.cast(features[’pixels’], tf.int32}
random_line_split
图像数据处理.py
)) #读取mnist数据 mnist = input_data.read_data_sets('/path', dtype=tf.uint8, one_hot=True) images = mnist.train.images labels = mnist.train.labels pixels = images.shape[1] num_examples = mnist.train.num_examples filename = '/path/to/output.tfrecords' #创建一个writer来写TFRecord文件 writer = tf.python_io.TFRecordWriter(filename) for index in range(num_examples): images_raw = images[index].tostring()#将每个图像转换成一个字符串 #将一个样例转化为Example Protocol Buffer,并写入 example = tf.train.Example(features=tf.train.Feature(feature={ 'pixels':_int_64_feature(pixels), 'label':_int_64_feature(np.argmax(labels[index])), 'images_raw':_bytes_features(images_raw)} )) writer.write(example.SerializerToString())#写入TFRecord文件 writer.close() 读取TFRecord import tensorflow as tf #创建一个reader来读取tfr文件 reader = tf.TFRecordReader() #创建一个队列来维护输入文件列表 filename_queue = tf.train.string_input_producer(['/path/to/output.tfrecords']) #从队列中读取一个样例 _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example(#解析单个样例函数 serialized_example, features={ 'images_raw':tf.FixedLenFeature([],tf.string),#解析为一个tensor 'pixels':tf.FixedLenFeature([],tf.int64), 'label':tf.FixedLenFeature([],tf.int64), }) image= tf.decode_raw(features[’image_raw’], tf.uint8}#将字符串tensor解析成数组 label = tf.cast(features[’label’], tf.int32} pixels = tf.cast(features[’pixels’], tf.int32} sess = tf.Session() #启动多线程处理输入数据 coord = tf.train.Coordinator(} threads = tf.train.start_queue_runners(sess=sess, coord=coord} #每次运行可以读取TFRecord 文件中的一个样例。当所有样例读完之后,在此样例中程序 #会再从头读取。 for i in range(10} : print(sess.run([image, label, pixels])) 图像编码处理 import matplotlib.pyplot as plt import tensorflow as tf image_raw_data = tf.gfile.FastGFile('/path','rb').read()#读取原始图像为字符串 with tf.Session() as sess:#对图像进行解码,使用的是jpeg,还有png等 img_data = tf.image.decode_jpeg(image_raw_data)#结果是一个tensor print(img_data.eval()) plt.imshow(img_data.eval()) plt.show() encode_image = tf.image.encode_jpeg(img_data)#将tensor编码成jpeg并存入文件 with tf.gfile.FastGFlie('/path/to/output','wb') as f: f.write(encode_image.eval()) 调整图像大小 image_raw_data = tf.gfile.FastGFile('/path','rb').read() image_data = tf.decode_jpeg(image_raw_data)#解码图像 image_data = tf.image.convert_image_dtype(image_raw_data,dtype=tf.float32)#转换格式为浮点 resized = tf.image.resize_images(image_data,[300,300],method=0) croped = tf.image.resize_images_with_crop_or_pad(img_data,400,400)#截取指定大小图像,图像够大就截取,不够就在周围填充0 central = tf.image.central_crop(image_data,0.5)#根据比例截取 #图像各种翻转 fliped = tf.image.flip_up_down(image_data) fliped = tf.image.random_flip_up_down(image_data) fliped = tf.image.flip_left_right(image_data) fliped = tf.image.random_flip_left_right(image_data) transposed = tf.image.transpose_image(image_data) adjusted = tf.image.adjust_brightness(img_data,-0.5)#调整亮度 adjust_brightness = tf.clip_by_value(adjusted, 0.0, 1.0)#把亮度限定在正确范围内 adjusted = tf.image.random_brightness(image, random_range) adjust_brightness = tf.image.adjust_contrast(image_data, 5)#调整对比度 adjusted = tf.image.adjust_hue(img_data, 0.3)#调整色彩 adjusted = tf.image.adjust_saturation(img_data, 5)#调整饱和度 adjusted = tf.image.per_image_standardization(img_data)#调整数值为0,方差为1 图像加标注框 batched = tf.expand_dims( tf.image.convert_image_dtype(img_data,tf.float32),0) boxes = tf.constant([0.05, 0.05, 0.9, 0.7],[0.35, 0.47, 0.5, 0.56])#同时添加两个标注框 #参数是相对位置,[y_min,x_min,y_max,x_max] boxed = tf.image.draw_bounding_boxes(batched,boxed) 完整图像预处理 import tensorflow as tf import matplotlib.pyplot as plt import numpy as np #随机调整一张图像的色彩,定义不同顺序调整亮度、对比度、饱和度和色相,具体使用的顺序会影响学习 def distort_color(image,color_ordering=0): if color_ordering == 0: image = tf.image.random_saturation(image,lower=0.5,upper=0.5) image = tf.image.random_brightness(image,max_delta=32. / 255. ) image = tf.random_hue(image,max_delta=0.2) elif color_ordering == 1: image = tf.image.random_brightness(image,max_delta=32. / 255. ) image = tf.image.random_saturation(image,lower=0.5,upper=0.5) image = tf.random_hue(image,max_delta=0.2) elif color_ordering == 2: #其他转换顺序 return tf.clip_by_value(image, 0.0, 1.0)#把图片每个元素值规定在范围内 #预处理图片 def preprocess_for_train(image,height,width,bbox): if bbox is None:#标注框没定义的话就取全图片 bbox = tf.constant([0.0,0.0,1.0,1.0],dtype=tf.float32,shape=[1,1,4]) if image.dtype != tf.float32#转换图像像素值类型 image = tf.convert_image_dtype(image, dtype=tf.float32) bbox_begin,bbox_size,_ = tf.image.sample_distorted_bounding_box(tf.shape(image),bounding_boxes=bbox)#随机截取图像 distort_image = tf.slice(image, bbox_begin, bbox_size) distorted_image = tf.image.resize_images(distort_image,[height,width],method=np.randint(4))#调整图像大小为神经网络的输入大小 distort_image = tf.image.random_flip_left_right(distort_image)#随机左右翻转图像 distort_image = distort_color(distort_image,np.random.randint(2))#随机调整图像颜色 return distort_image image_raw_data = tf.gfile.FastGFile(path,'rb').read() with tf.Session() as sess: image_data = tf.image.decode(image_raw_data) boxes = tf.constant([[[0.05,0.05,0.9,0.7],[0.35,0.47,0.5,0.56]]]) for i in range(6) result = preprocess_for_train(image_data,299,299,boxes) plt.imgshow(result.eval()) plt.show() 多线程处理数据输入 队列,处理输入数据的框架 import tensorflow as tf q = tf.FIFOQueue(2,'int32')#指定一个先进先出队列,可以保存两个元素 #RandomShufffleQueue是随机进出队列 init = q.enqueue_many(([0,10],))#使用函数初始化队列中的元素,元素的值为0和10 x = q.dequeue()#出队列 y = x + 1 q_inc = q.enqueue([y])#加入队列 with tf.Session() as tf: init.run()#初始化队列 for i in range(5): v,_ = sess.run([x,q_inc]) print v 多线程操作 coord = tf.train.Coordinator()#创建一个实例来协同多线程 threads = [ threading.Thread(target=MyLoop, args=(cord, i , )) for i in range(5)] for t in threads: t.start() coord.join(threads) def MyLoop(coord, worker_id): #使用tf.Coordinator 类提供的协同工具判断当前线程是否市要停止。 while not coord. should_stop (): #随机停止所有的线程。 if np.random.rand() < 0.1 print ” Stoping from id: %d\n” worker_id, #coord.request_stop()函数来通知其他线程停止。 coord.request_stop() else: #打印当前线程的Id print ” Working on id : %d\n ” % worker_id, #暂停l秒 time.sleep(l) 队列管理 queue = tf.FIFOQueue(100,"float") enqueue_op = queue.enqueue([tf.random_normal([1])])#入队操作 qr =
t(value=[value]
identifier_name
图像数据处理.py
True) images = mnist.train.images labels = mnist.train.labels pixels = images.shape[1] num_examples = mnist.train.num_examples filename = '/path/to/output.tfrecords' #创建一个writer来写TFRecord文件 writer = tf.python_io.TFRecordWriter(filename) for index in range(num_examples): images_raw = images[index].tostring()#将每个图像转换成一个字符串 #将一个样例转化为Example Protocol Buffer,并写入 example = tf.train.Example(features=tf.train.Feature(feature={ 'pixels':_int_64_feature(pixels), 'label':_int_64_feature(np.argmax(labels[index])), 'images_raw':_bytes_features(images_raw)} )) writer.write(example.SerializerToString())#写入TFRecord文件 writer.close() 读取TFRecord import tensorflow as tf #创建一个reader来读取tfr文件 reader = tf.TFRecordReader() #创建一个队列来维护输入文件列表 filename_queue = tf.train.string_input_producer(['/path/to/output.tfrecords']) #从队列中读取一个样例 _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example(#解析单个样例函数 serialized_example, features={ 'images_raw':tf.FixedLenFeature([],tf.string),#解析为一个tensor 'pixels':tf.FixedLenFeature([],tf.int64), 'label':tf.FixedLenFeature([],tf.int64), }) image= tf.decode_raw(features[’image_raw’], tf.uint8}#将字符串tensor解析成数组 label = tf.cast(features[’label’], tf.int32} pixels = tf.cast(features[’pixels’], tf.int32} sess = tf.Session() #启动多线程处理输入数据 coord = tf.train.Coordinator(} threads = tf.train.start_queue_runners(sess=sess, coord=coord} #每次运行可以读取TFRecord 文件中的一个样例。当所有样例读完之后,在此样例中程序 #会再从头读取。 for i in range(10} : print(sess.run([image, label, pixels])) 图像编码处理 import matplotlib.pyplot as plt import tensorflow as tf image_raw_data = tf.gfile.FastGFile('/path','rb').read()#读取原始图像为字符串 with tf.Session() as sess:#对图像进行解码,使用的是jpeg,还有png等 img_data = tf.image.decode_jpeg(image_raw_data)#结果是一个tensor print(img_data.eval()) plt.imshow(img_data.eval()) plt.show() encode_image = tf.image.encode_jpeg(img_data)#将tensor编码成jpeg并存入文件 with tf.gfile.FastGFlie('/path/to/output','wb') as f: f.write(encode_image.eval()) 调整图像大小 image_raw_data = tf.gfile.FastGFile('/path','rb').read() image_data = tf.decode_jpeg(image_raw_data)#解码图像 image_data = tf.image.convert_image_dtype(image_raw_data,dtype=tf.float32)#转换格式为浮点 resized = tf.image.resize_images(image_data,[300,300],method=0) croped = tf.image.resize_images_with_crop_or_pad(img_data,400,400)#截取指定大小图像,图像够大就截取,不够就在周围填充0 central = tf.image.central_crop(image_data,0.5)#根据比例截取 #图像各种翻转 fliped = tf.image.flip_up_down(image_data) fliped = tf.image.random_flip_up_down(image_data) fliped = tf.image.flip_left_right(image_data) fliped = tf.image.random_flip_left_right(image_data) transposed = tf.image.transpose_image(image_data) adjusted = tf.image.adjust_brightness(img_data,-0.5)#调整亮度 adjust_brightness = tf.clip_by_value(adjusted, 0.0, 1.0)#把亮度限定在正确范围内 adjusted = tf.image.random_brightness(image, random_range) adjust_brightness = tf.image.adjust_contrast(image_data, 5)#调整对比度 adjusted = tf.image.adjust_hue(img_data, 0.3)#调整色彩 adjusted = tf.image.adjust_saturation(img_data, 5)#调整饱和度 adjusted = tf.image.per_image_standardization(img_data)#调整数值为0,方差为1 图像加标注框 batched = tf.expand_dims( tf.image.convert_image_dtype(img_data,tf.float32),0) boxes = tf.constant([0.05, 0.05, 0.9, 0.7],[0.35, 0.47, 0.5, 0.56])#同时添加两个标注框 #参数是相对位置,[y_min,x_min,y_max,x_max] boxed = tf.image.draw_bounding_boxes(batched,boxed) 完整图像预处理 import tensorflow as tf import matplotlib.pyplot as plt import numpy as np #随机调整一张图像的色彩,定义不同顺序调整亮度、对比度、饱和度和色相,具体使用的顺序会影响学习 def distort_color(image,color_ordering=0): if color_ordering == 0: image = tf.image.random_saturation(image,lower=0.5,upper=0.5) image = tf.image.random_brightness(image,max_delta=32. / 255. ) image = tf.random_hue(image,max_delta=0.2) elif color_ordering == 1: image = tf.image.random_brightness(image,max_delta=32. / 255. ) image = tf.image.random_saturation(image,lower=0.5,upper=0.5) image = tf.random_hue(image,max_delta=0.2) elif color_ordering == 2: #其他转换顺序 return tf.clip_by_value(image, 0.0, 1.0)#把图片每个元素值规定在范围内 #预处理图片 def preprocess_for_train(image,height,width,bbox): if bbox is None:#标注框没定义的话就取全图片 bbox = tf.constant([0.0,0.0,1.0,1.0],dtype=tf.float32,shape=[1,1,4]) if image.dtype != tf.float32#转换图像像素值类型 image = tf.convert_image_dtype(image, dtype=tf.float32) bbox_begin,bbox_size,_ = tf.image.sample_distorted_bounding_box(tf.shape(image),bounding_boxes=bbox)#随机截取图像 distort_image = tf.slice(image, bbox_begin, bbox_size) distorted_image = tf.image.resize_images(distort_image,[height,width],method=np.randint(4))#调整图像大小为神经网络的输入大小 distort_image = tf.image.random_flip_left_right(distort_image)#随机左右翻转图像 distort_image = distort_color(distort_image,np.random.randint(2))#随机调整图像颜色 return distort_image image_raw_data = tf.gfile.FastGFile(path,'rb').read() with tf.Session() as sess: image_data = tf.image.decode(image_raw_data) boxes = tf.constant([[[0.05,0.05,0.9,0.7],[0.35,0.47,0.5,0.56]]]) for i in range(6) result = preprocess_for_train(image_data,299,299,boxes) plt.imgshow(result.eval()) plt.show() 多线程处理数据输入 队列,处理输入数据的框架 import tensorflow as tf q = tf.FIFOQueue(2,'int32')#指定一个先进先出队列,可以保存两个元素 #RandomShufffleQueue是随机进出队列 init = q.enqueue_many(([0,10],))#使用函数初始化队列中的元素,元素的值为0和10 x = q.dequeue()#出队列 y = x + 1 q_inc = q.enqueue([y])#加入队列 with tf.Session() as tf: init.run()#初始化队列 for i in range(5): v,_ = sess.run([x,q_inc]) print v 多线程操作 coord = tf.train.Coordinator()#创建一个实例来协同多线程 threads = [ threading.Thread(target=MyLoop, args=(cord, i , )) for i in range(5)] for t in threads: t.start() coord.join(threads) def MyLoop(coord, worker_id): #使用tf.Coordinator 类提供的协同工具判断当前线程是否市要停止。 while not coord. should_stop (): #随机停止所有的线程。 if np.random.rand() < 0.1 print ” Stoping from id: %d\n” worker_id, #coord.request_stop()函数来通知其他线程停止。 coord.request_stop() else: #打印当前线程的Id print ” Working on id : %d\n ” % worker_id, #暂停l秒 time.sleep(l) 队列管理 queue = tf.FIFOQueue(100,"float") enqueue_op = queue.enqueue([tf.random_normal([1])])#入队操作 qr = tf.train.QueueRunner(queue, [enqueue_op]
据 mnist = input_data.read_data_sets('/path', dtype=tf.uint8, one_hot=
identifier_body
functions_and_their_processes.rs
n kinds of coins equals - the number of ways to change amount a using all but the the first kind of coin, plus - the number of ways to change amount (a - d) using all n kinds of coins where d is the value of the first kind of coin */ fn count_change(amount: i128) -> i128 { cc(amount, 6) } fn cc(amount: i128, coin_kind: i8) -> i128 { if amount == 0 { 1 } else { if amount < 0 || coin_kind == 0 { 0 } else { cc(amount, coin_kind - 1) + cc(amount - get_value(coin_kind), coin_kind) } } } fn get_value(coin_kind: i8) -> i128 { match coin_kind { 6 => 100, 5 => 50, 4 => 25, 3 => 10, 2 => 5, 1 => 1, _ => 0, } } /* Exercise 1.11 A function f is defined by the rule that f(n)=n if n<3 and f(n)=f(n−1)+2f(n−2)+3f(n−3) if n≥3. Write a JavaScript function that computes f by means of a recursive process. Write a function that computes f by means of an iterative process. */ fn fn3(n: i128) -> i128 { if n < 3 { n } else { fn3(n - 1) + 2 * fn3(n - 2) + 3 * fn3(n - 3) } } fn fn3_iter(n: i128) -> i128 { fn helper(p3: i128, p2: i128, p1: i128, k: i128, n: i128) -> i128 { if k == n { p1 } else { helper(p2, p1, 3 * p3 + 2 * p2 + p1, k + 1, n) } } return helper(0, 1, 2, 2, n); } // m >= n pub fn pascal(m: i128, n: i128) -> i128 { if n == 0 || m == n { 1 } else { pascal(m - 1, n - 1) + pascal(m - 1, n) } } // pascal triangle with interative process pub fn pascal_iter(m: usize, n: usize) -> i128 { fn helper(m: usize, n: usize, l: usize, pre_vec: Vec<i128>) -> i128 { if m == 0 || m == n { 1 } else { if l == m { pre_vec[n - 1] + pre_vec[n] } else { let mut new_vec = vec![]; for (i, _) in pre_vec.iter().enumerate() { if i == 0 { new_vec.push(1); } else { new_vec.push(pre_vec[i - 1] + pre_vec[i]) } } new_vec.push(1); helper(m, n, l + 1, new_vec.to_vec()) } } } helper(m, n, 2, vec![1, 1]) } pub fn cube(x: f32) -> f32 { x * x * x } fn p(x: f32) -> f32 { 3.0 * x - 4.0 * cube(x) } pub fn sine(angle: f32) -> f32 { if f32::abs(angle) <= 0.1 { angle } else { p(sine(angle / 3.0)) } } pub fn expt(b: i128, n: i128) -> i128 { if n == 0 { 1 } else { b * expt(b, n - 1) } } pub fn expt_iter(b: i128, n: i128) -> i128 { fn helper(c: i128, p: i128, b: i128, n: i128) -> i128 { if c == n { p } else { helper(c + 1, b * p, b, n) } } helper(0, 1, b, n) } pub fn is_even(n: i128) -> bool { n % 2 == 0 } pub fn square(i: i128) -> i128 { i * i } pub fn half(i: i128) -> i128 { i / 2 } pub fn fast_expt(b: i128, n: i128) -> i128 { if n == 1 { b } else { if is_even(n) { square(fast_expt(b, half(n))) } else { b * fast_expt(b, n - 1) } } } pub fn fast_expt_iter(b: i128, n: i128) -> i128 { fn helper(p: i128, b: i128, n: i128) -> i128 { if n == 0 { p } else { if is_even(n) { helper(p, square(b), half(n)) } else { helper(b * p, b, n - 1) } } } helper(1, b, n) } pub fn double(x: i128) -> i128 { x * 2 } pub fn times(a: i128, b: i128) -> i128 { if b == 0 { 0 } else { a + times(a, b - 1) } } pub fn times_iter(a: i128, b: i128) -> i128 { fn helper(s: i128, a: i128, b: i128) -> i128 { if b == 0 { s } else { if is_even(b) { helper(s, double(a), half(b)) } else { helper(s + a, a, b - 1) } } } helper(0, a, b) } pub fn fast_fib(n: i128) -> i128 { fn helper(a: i128, b: i128, p: i128, q: i128, count: i128) -> i128 { if count == 0 { b } else { if is_even(count) { helper( a, b, square(p) + square(q), 2 * p * q + square(q), half(count), ) } else { helper(b * q + a * q + a * p, b * p + a * q, p, q, count - 1) } } } helper(1, 0, 0, 1, n) } pub fn gcd(a: i128, b: i128) -> i128 { if b == 0 { a } else { gcd(b, a % b) } } pub fn devides(test_divisor: i128, n: i128) -> bool { n %
d_divisor(n: i128, test_divisor: i128) -> i128 { if square(test_divisor) > n { n } else { if devides(test_divisor, n) { test_divisor } else { find_divisor(n, test_divisor + 1) } } } pub fn smallest_divisor(n: i128) -> i128 { find_divisor(n, 2) } pub fn is_prime(n: i128) -> bool { smallest_divisor(n) == n } pub fn expmod(base: i128, exp: i128, m: i128) -> i128 { if exp == 0 { 1 } else { if is_even(exp) { // square after expmod, otherwise it will overflow easily square(expmod(base, half(exp), m)) % m } else { base * expmod(base, exp - 1, m) % m } } } // Fermat test pub fn fermat_test(n: i128) -> bool { fn try_it(a: i1
test_divisor == 0 } fn fin
identifier_body
functions_and_their_processes.rs
== 0 || m == n { 1 } else { pascal(m - 1, n - 1) + pascal(m - 1, n) } } // pascal triangle with interative process pub fn pascal_iter(m: usize, n: usize) -> i128 { fn helper(m: usize, n: usize, l: usize, pre_vec: Vec<i128>) -> i128 { if m == 0 || m == n { 1 } else { if l == m { pre_vec[n - 1] + pre_vec[n] } else { let mut new_vec = vec![]; for (i, _) in pre_vec.iter().enumerate() { if i == 0 { new_vec.push(1); } else { new_vec.push(pre_vec[i - 1] + pre_vec[i]) } } new_vec.push(1); helper(m, n, l + 1, new_vec.to_vec()) } } } helper(m, n, 2, vec![1, 1]) } pub fn cube(x: f32) -> f32 { x * x * x } fn p(x: f32) -> f32 { 3.0 * x - 4.0 * cube(x) } pub fn sine(angle: f32) -> f32 { if f32::abs(angle) <= 0.1 { angle } else { p(sine(angle / 3.0)) } } pub fn expt(b: i128, n: i128) -> i128 { if n == 0 { 1 } else { b * expt(b, n - 1) } } pub fn expt_iter(b: i128, n: i128) -> i128 { fn helper(c: i128, p: i128, b: i128, n: i128) -> i128 { if c == n { p } else { helper(c + 1, b * p, b, n) } } helper(0, 1, b, n) } pub fn is_even(n: i128) -> bool { n % 2 == 0 } pub fn square(i: i128) -> i128 { i * i } pub fn half(i: i128) -> i128 { i / 2 } pub fn fast_expt(b: i128, n: i128) -> i128 { if n == 1 { b } else { if is_even(n) { square(fast_expt(b, half(n))) } else { b * fast_expt(b, n - 1) } } } pub fn fast_expt_iter(b: i128, n: i128) -> i128 { fn helper(p: i128, b: i128, n: i128) -> i128 { if n == 0 { p } else { if is_even(n) { helper(p, square(b), half(n)) } else { helper(b * p, b, n - 1) } } } helper(1, b, n) } pub fn double(x: i128) -> i128 { x * 2 } pub fn times(a: i128, b: i128) -> i128 { if b == 0 { 0 } else { a + times(a, b - 1) } } pub fn times_iter(a: i128, b: i128) -> i128 { fn helper(s: i128, a: i128, b: i128) -> i128 { if b == 0 { s } else { if is_even(b) { helper(s, double(a), half(b)) } else { helper(s + a, a, b - 1) } } } helper(0, a, b) } pub fn fast_fib(n: i128) -> i128 { fn helper(a: i128, b: i128, p: i128, q: i128, count: i128) -> i128 { if count == 0 { b } else { if is_even(count) { helper( a, b, square(p) + square(q), 2 * p * q + square(q), half(count), ) } else { helper(b * q + a * q + a * p, b * p + a * q, p, q, count - 1) } } } helper(1, 0, 0, 1, n) } pub fn gcd(a: i128, b: i128) -> i128 { if b == 0 { a } else { gcd(b, a % b) } } pub fn devides(test_divisor: i128, n: i128) -> bool { n % test_divisor == 0 } fn find_divisor(n: i128, test_divisor: i128) -> i128 { if square(test_divisor) > n { n } else { if devides(test_divisor, n) { test_divisor } else { find_divisor(n, test_divisor + 1) } } } pub fn smallest_divisor(n: i128) -> i128 { find_divisor(n, 2) } pub fn is_prime(n: i128) -> bool { smallest_divisor(n) == n } pub fn expmod(base: i128, exp: i128, m: i128) -> i128 { if exp == 0 { 1 } else { if is_even(exp) { // square after expmod, otherwise it will overflow easily square(expmod(base, half(exp), m)) % m } else { base * expmod(base, exp - 1, m) % m } } } // Fermat test pub fn fermat_test(n: i128) -> bool { fn try_it(a: i128, n: i128) -> bool { expmod(a, n, n) == a } let mut rng = rand::thread_rng(); let a = rng.gen_range(1, n); println!("fermat_test testing {}", a); try_it(a, n) } pub fn fast_is_prime(n: i128, times: i128) -> bool { if times == 0 { true } else { if fermat_test(n) { fast_is_prime(n, times - 1) } else { false } } } // Exercise 1.22 fn timed_prime_test(n: i128) -> bool { println!(" start testing: {}", n); let now = SystemTime::now(); start_prime_test(n, now) } fn start_prime_test(n: i128, now: SystemTime) -> bool { if is_prime(n) { report_prime(now, n) } else { true } } fn report_prime(now: SystemTime, n: i128) -> bool { println!(" *** "); println!(" prime number is: {}", n); println!("Time used: {}", get_lapsed_time_millis(now)); /* match now.elapsed() { Ok(elapsed) => println!("Time used: {}", elapsed.as_millis()), Err(e) => println!("Error: {:?}", e), } */ false } fn get_lapsed_time_millis(then: SystemTime) -> u128 { let new_now = SystemTime::now(); new_now .duration_since(UNIX_EPOCH) .expect("Time") .as_millis() - then.duration_since(UNIX_EPOCH).expect("Time").as_millis() } // start is odd number fn search_for_prime(start: i128, count: i128) { fn helper(start: i128, count: i128) { if count == 0 { return; } else { if timed_prime_test(start) { helper(start + 2, count) } else { helper(start + 2, count - 1) } } } helper(start, count) } // Exercise 1.27 fn test_carmichael_number(n: i128) {
for i in 2..n { if expmod(i, n, n) == i { println!(" testing {}", i); } }
random_line_split
functions_and_their_processes.rs
else { ackermann(a - 1, ackermann(a, b - 1)) } } } } fn f(n: i128) -> i128 { ackermann(0, n) } fn g(n: i128) -> i128 { ackermann(1, n) } fn h(n: i128) -> i128 { ackermann(2, n) } pub fn fac(n: i128) -> i128 { if n == 1 { 1 } else { n * fac(n - 1) } } pub fn fib(n: i128) -> i128 { if n < 2 { n } else { fib(n - 2) + fib(n - 1) } } // rust function can not access local variable pub fn fib_iter(n: i128) -> i128 { fn helper(a: i128, b: i128, i: i128, n: i128) -> i128 { if i == n { b } else { helper(b, a + b, i + 1, n) } } helper(0, 1, 1, n) } /* The number of ways to change amount a using n kinds of coins equals - the number of ways to change amount a using all but the the first kind of coin, plus - the number of ways to change amount (a - d) using all n kinds of coins where d is the value of the first kind of coin */ fn count_change(amount: i128) -> i128 { cc(amount, 6) } fn cc(amount: i128, coin_kind: i8) -> i128 { if amount == 0 { 1 } else { if amount < 0 || coin_kind == 0 { 0 } else { cc(amount, coin_kind - 1) + cc(amount - get_value(coin_kind), coin_kind) } } } fn get_value(coin_kind: i8) -> i128 { match coin_kind { 6 => 100, 5 => 50, 4 => 25, 3 => 10, 2 => 5, 1 => 1, _ => 0, } } /* Exercise 1.11 A function f is defined by the rule that f(n)=n if n<3 and f(n)=f(n−1)+2f(n−2)+3f(n−3) if n≥3. Write a JavaScript function that computes f by means of a recursive process. Write a function that computes f by means of an iterative process. */ fn fn3(n: i128) -> i128 { if n < 3 { n } else { fn3(n - 1) + 2 * fn3(n - 2) + 3 * fn3(n - 3) } } fn fn3_iter(n: i128) -> i128 { fn helper(p3: i128, p2: i128, p1: i128, k: i128, n: i128) -> i128 { if k == n { p1 } else { helper(p2, p1, 3 * p3 + 2 * p2 + p1, k + 1, n) } } return helper(0, 1, 2, 2, n); } // m >= n pub fn pascal(m: i128, n: i128) -> i128 { if n == 0 || m == n { 1 } else { pascal(m - 1, n - 1) + pascal(m - 1, n) } } // pascal triangle with interative process pub fn pascal_iter(m: usize, n: usize) -> i128 { fn helper(m: usize, n: usize, l: usize, pre_vec: Vec<i128>) -> i128 { if m == 0 || m == n { 1 } else { if l == m { pre_vec[n - 1] + pre_vec[n] } else { let mut new_vec = vec![]; for (i, _) in pre_vec.iter().enumerate() { if i == 0 { new_vec.push(1); } else { new_vec.push(pre_vec[i - 1] + pre_vec[i]) } } new_vec.push(1); helper(m, n, l + 1, new_vec.to_vec()) } } } helper(m, n, 2, vec![1, 1]) } pub fn cube(x: f32) -> f32 { x * x * x } fn p(x: f32) -> f32 { 3.0 * x - 4.0 * cube(x) } pub fn sine(angle: f32) -> f32 { if f32::abs(angle) <= 0.1 { angle } else { p(sine(angle / 3.0)) } } pub fn expt(b: i128, n: i128) -> i128 { if n == 0 { 1 } else { b * expt(b, n - 1) } } pub fn expt_iter(b: i128, n: i128) -> i128 { fn helper(c: i128, p: i128, b: i128, n: i128) -> i128 { if c == n { p } else { helper(c + 1, b * p, b, n) } } helper(0, 1, b, n) } pub fn is_even(n: i128) -> bool { n % 2 == 0 } pub fn square(i: i128) -> i128 { i * i } pub fn half(i: i128) -> i128 { i / 2 } pub fn fast_expt(b: i128, n: i128) -> i128 { if n == 1 { b } else { if is_even(n) { square(fast_expt(b, half(n))) } else { b * fast_expt(b, n - 1) } } } pub fn fast_expt_iter(b: i128, n: i128) -> i128 { fn helper(p: i128, b: i128, n: i128) -> i128 { if n == 0 { p } else { if is_even(n) { helper(p, square(b), half(n)) } else { helper(b * p, b, n - 1) } } } helper(1, b, n) } pub fn double(x: i128) -> i128 { x * 2 } pub fn times(a: i128, b: i128) -> i128 { if b == 0 { 0 } else { a + times(a, b - 1) } } pub fn times_iter(a: i128, b: i128) -> i128 { fn helper(s: i128, a: i128, b: i128) -> i128 { if b == 0 { s } else { if is_even(b) { helper(s, double(a), half(b)) } else { helper(s + a, a, b - 1) } } } helper(0, a, b) } pub fn fast_fib(n: i128) -> i128 { fn helper(a: i128, b: i128, p: i128, q: i128, count: i128) -> i128 { if count == 0 { b } else { if is_even(count) { helper( a, b, square(p) + square(q), 2 * p * q + square(q), half(count), ) } else { helper(b * q + a * q + a * p, b * p + a * q, p, q, count - 1) } } } helper(1, 0, 0, 1, n) } pub fn gcd(a: i128, b: i128) -> i128 { if b == 0 { a } else {
{ 2 }
conditional_block
functions_and_their_processes.rs
n kinds of coins equals - the number of ways to change amount a using all but the the first kind of coin, plus - the number of ways to change amount (a - d) using all n kinds of coins where d is the value of the first kind of coin */ fn count_change(amount: i128) -> i128 { cc(amount, 6) } fn
(amount: i128, coin_kind: i8) -> i128 { if amount == 0 { 1 } else { if amount < 0 || coin_kind == 0 { 0 } else { cc(amount, coin_kind - 1) + cc(amount - get_value(coin_kind), coin_kind) } } } fn get_value(coin_kind: i8) -> i128 { match coin_kind { 6 => 100, 5 => 50, 4 => 25, 3 => 10, 2 => 5, 1 => 1, _ => 0, } } /* Exercise 1.11 A function f is defined by the rule that f(n)=n if n<3 and f(n)=f(n−1)+2f(n−2)+3f(n−3) if n≥3. Write a JavaScript function that computes f by means of a recursive process. Write a function that computes f by means of an iterative process. */ fn fn3(n: i128) -> i128 { if n < 3 { n } else { fn3(n - 1) + 2 * fn3(n - 2) + 3 * fn3(n - 3) } } fn fn3_iter(n: i128) -> i128 { fn helper(p3: i128, p2: i128, p1: i128, k: i128, n: i128) -> i128 { if k == n { p1 } else { helper(p2, p1, 3 * p3 + 2 * p2 + p1, k + 1, n) } } return helper(0, 1, 2, 2, n); } // m >= n pub fn pascal(m: i128, n: i128) -> i128 { if n == 0 || m == n { 1 } else { pascal(m - 1, n - 1) + pascal(m - 1, n) } } // pascal triangle with interative process pub fn pascal_iter(m: usize, n: usize) -> i128 { fn helper(m: usize, n: usize, l: usize, pre_vec: Vec<i128>) -> i128 { if m == 0 || m == n { 1 } else { if l == m { pre_vec[n - 1] + pre_vec[n] } else { let mut new_vec = vec![]; for (i, _) in pre_vec.iter().enumerate() { if i == 0 { new_vec.push(1); } else { new_vec.push(pre_vec[i - 1] + pre_vec[i]) } } new_vec.push(1); helper(m, n, l + 1, new_vec.to_vec()) } } } helper(m, n, 2, vec![1, 1]) } pub fn cube(x: f32) -> f32 { x * x * x } fn p(x: f32) -> f32 { 3.0 * x - 4.0 * cube(x) } pub fn sine(angle: f32) -> f32 { if f32::abs(angle) <= 0.1 { angle } else { p(sine(angle / 3.0)) } } pub fn expt(b: i128, n: i128) -> i128 { if n == 0 { 1 } else { b * expt(b, n - 1) } } pub fn expt_iter(b: i128, n: i128) -> i128 { fn helper(c: i128, p: i128, b: i128, n: i128) -> i128 { if c == n { p } else { helper(c + 1, b * p, b, n) } } helper(0, 1, b, n) } pub fn is_even(n: i128) -> bool { n % 2 == 0 } pub fn square(i: i128) -> i128 { i * i } pub fn half(i: i128) -> i128 { i / 2 } pub fn fast_expt(b: i128, n: i128) -> i128 { if n == 1 { b } else { if is_even(n) { square(fast_expt(b, half(n))) } else { b * fast_expt(b, n - 1) } } } pub fn fast_expt_iter(b: i128, n: i128) -> i128 { fn helper(p: i128, b: i128, n: i128) -> i128 { if n == 0 { p } else { if is_even(n) { helper(p, square(b), half(n)) } else { helper(b * p, b, n - 1) } } } helper(1, b, n) } pub fn double(x: i128) -> i128 { x * 2 } pub fn times(a: i128, b: i128) -> i128 { if b == 0 { 0 } else { a + times(a, b - 1) } } pub fn times_iter(a: i128, b: i128) -> i128 { fn helper(s: i128, a: i128, b: i128) -> i128 { if b == 0 { s } else { if is_even(b) { helper(s, double(a), half(b)) } else { helper(s + a, a, b - 1) } } } helper(0, a, b) } pub fn fast_fib(n: i128) -> i128 { fn helper(a: i128, b: i128, p: i128, q: i128, count: i128) -> i128 { if count == 0 { b } else { if is_even(count) { helper( a, b, square(p) + square(q), 2 * p * q + square(q), half(count), ) } else { helper(b * q + a * q + a * p, b * p + a * q, p, q, count - 1) } } } helper(1, 0, 0, 1, n) } pub fn gcd(a: i128, b: i128) -> i128 { if b == 0 { a } else { gcd(b, a % b) } } pub fn devides(test_divisor: i128, n: i128) -> bool { n % test_divisor == 0 } fn find_divisor(n: i128, test_divisor: i128) -> i128 { if square(test_divisor) > n { n } else { if devides(test_divisor, n) { test_divisor } else { find_divisor(n, test_divisor + 1) } } } pub fn smallest_divisor(n: i128) -> i128 { find_divisor(n, 2) } pub fn is_prime(n: i128) -> bool { smallest_divisor(n) == n } pub fn expmod(base: i128, exp: i128, m: i128) -> i128 { if exp == 0 { 1 } else { if is_even(exp) { // square after expmod, otherwise it will overflow easily square(expmod(base, half(exp), m)) % m } else { base * expmod(base, exp - 1, m) % m } } } // Fermat test pub fn fermat_test(n: i128) -> bool { fn try_it(a: i12
cc
identifier_name
lib.rs
/// method to check for cancellation). /// /// # Panics /// /// It is not permitted to create a snapshot from inside of a /// query. Attepting to do so will panic. /// /// # Deadlock warning /// /// The intended pattern for snapshots is that, once created, they /// are sent to another thread and used from there. As such, the /// `snapshot` acquires a "read lock" on the database -- /// therefore, so long as the `snapshot` is not dropped, any /// attempt to `set` a value in the database will block. If the /// `snapshot` is owned by the same thread that is attempting to /// `set`, this will cause a problem. /// /// # How to implement this /// /// Typically, this method will create a second copy of your /// database type (`MyDatabaseType`, in the example below), /// cloning over each of the fields from `self` into this new /// copy. For the field that stores the salsa runtime, you should /// use [the `Runtime::snapshot` method][rfm] to create a snapshot of the /// runtime. Finally, package up the result using `Snapshot::new`, /// which is a simple wrapper type that only gives `&self` access /// to the database within (thus preventing the use of methods /// that may mutate the inputs): /// /// [rfm]: struct.Runtime.html#method.snapshot /// /// ```rust,ignore /// impl ParallelDatabase for MyDatabaseType { /// fn snapshot(&self) -> Snapshot<Self> { /// Snapshot::new( /// MyDatabaseType { /// runtime: self.runtime.snapshot(self), /// other_field: self.other_field.clone(), /// } /// ) /// } /// } /// ``` fn snapshot(&self) -> Snapshot<Self>; } /// Simple wrapper struct that takes ownership of a database `DB` and /// only gives `&self` access to it. See [the `snapshot` method][fm] /// for more details. /// /// [fm]: trait.ParallelDatabase.html#method.snapshot #[derive(Debug)] pub struct Snapshot<DB: ?Sized> where DB: ParallelDatabase, { db: DB, } impl<DB> Snapshot<DB> where DB: ParallelDatabase, { /// Creates a `Snapshot` that wraps the given database handle /// `db`. From this point forward, only shared references to `db` /// will be possible. pub fn new(db: DB) -> Self { Snapshot { db } } } impl<DB> std::ops::Deref for Snapshot<DB> where DB: ParallelDatabase, { type Target = DB; fn deref(&self) -> &DB { &self.db } } /// An integer that uniquely identifies a particular query instance within the /// database. Used to track dependencies between queries. Fully ordered and /// equatable but those orderings are arbitrary, and meant to be used only for /// inserting into maps and the like. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub struct DatabaseKeyIndex { group_index: u16, query_index: u16, key_index: u32, } impl DatabaseKeyIndex { /// Returns the index of the query group containing this key. #[inline] pub fn group_index(self) -> u16 { self.group_index } /// Returns the index of the query within its query group. #[inline] pub fn query_index(self) -> u16 { self.query_index } /// Returns the index of this particular query key within the query. #[inline] pub fn key_index(self) -> u32 { self.key_index } /// Returns a type that gives a user-readable debug output. /// Use like `println!("{:?}", index.debug(db))`. pub fn debug<D: ?Sized>(self, db: &D) -> impl std::fmt::Debug + '_ where D: plumbing::DatabaseOps, { DatabaseKeyIndexDebug { index: self, db } } } /// Helper type for `DatabaseKeyIndex::debug` struct DatabaseKeyIndexDebug<'me, D: ?Sized> where D: plumbing::DatabaseOps, { index: DatabaseKeyIndex, db: &'me D, } impl<D: ?Sized> std::fmt::Debug for DatabaseKeyIndexDebug<'_, D> where D: plumbing::DatabaseOps, { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.db.fmt_index(self.index, fmt) } } /// Trait implements by all of the "special types" associated with /// each of your queries. /// /// Base trait of `Query` that has a lifetime parameter to allow the `DynDb` to be non-'static. pub trait QueryDb<'d>: Sized { /// Dyn version of the associated trait for this query group. type DynDb: ?Sized + Database + HasQueryGroup<Self::Group> + 'd; /// Associate query group struct. type Group: plumbing::QueryGroup<GroupStorage = Self::GroupStorage>; /// Generated struct that contains storage for all queries in a group. type GroupStorage; } /// Trait implements by all of the "special types" associated with /// each of your queries. pub trait Query: Debug + Default + Sized + for<'d> QueryDb<'d> { /// Type that you you give as a parameter -- for queries with zero /// or more than one input, this will be a tuple. type Key: Clone + Debug + Hash + Eq; /// What value does the query return? type Value: Clone + Debug; /// Internal struct storing the values for the query. // type Storage: plumbing::QueryStorageOps<Self>; type Storage; /// A unique index identifying this query within the group. const QUERY_INDEX: u16; /// Name of the query method (e.g., `foo`) const QUERY_NAME: &'static str; /// Exact storage for this query from the storage for its group. fn query_storage<'a>( group_storage: &'a <Self as QueryDb<'_>>::GroupStorage, ) -> &'a Arc<Self::Storage>; /// Exact storage for this query from the storage for its group. fn query_storage_mut<'a>( group_storage: &'a <Self as QueryDb<'_>>::GroupStorage, ) -> &'a Arc<Self::Storage>; } /// Return value from [the `query` method] on `Database`. /// Gives access to various less common operations on queries. /// /// [the `query` method]: trait.Database.html#method.query pub struct QueryTable<'me, Q> where Q: Query, { db: &'me <Q as QueryDb<'me>>::DynDb, storage: &'me Q::Storage, } impl<'me, Q> QueryTable<'me, Q> where Q: Query, Q::Storage: QueryStorageOps<Q>, { /// Constructs a new `QueryTable`. pub fn new(db: &'me <Q as QueryDb<'me>>::DynDb, storage: &'me Q::Storage) -> Self { Self { db, storage } } /// Execute the query on a given input. Usually it's easier to /// invoke the trait method directly. Note that for variadic /// queries (those with no inputs, or those with more than one /// input) the key will be a tuple. pub fn get(&self, key: Q::Key) -> Q::Value { self.storage.fetch(self.db, &key) } /// Completely clears the storage for this query. /// /// This method breaks internal invariants of salsa, so any further queries /// might return nonsense results. It is useful only in very specific /// circumstances -- for example, when one wants to observe which values /// dropped together with the table pub fn purge(&self) where Q::Storage: plumbing::QueryStorageMassOps, { self.storage.purge(); } } /// Return value from [the `query_mut` method] on `Database`. /// Gives access to the `set` method, notably, that is used to /// set the value of an input query. /// /// [the `query_mut` method]: trait.Database.html#method.query_mut pub struct QueryTableMut<'me, Q> where Q: Query + 'me, { runtime: &'me mut Runtime, storage: &'me Q::Storage, } impl<'me, Q> QueryTableMut<'me, Q> where Q: Query, { /// Constructs a new `QueryTableMut`. pub fn new(runtime: &'me mut Runtime, storage: &'me Q::Storage) -> Self { Self { runtime, storage } } /// Assign a value to an "input query". Must be used outside of /// an active query computation. /// /// If you are using `snapshot`, see the notes on blocking /// and cancellation on [the `query_mut` method]. /// /// [the `query_mut` method]: trait.Database.html#method.query_mut pub fn
set
identifier_name
lib.rs
/// Gives access to the underlying salsa runtime. /// /// This method should not be overridden by `Database` implementors. fn salsa_runtime(&self) -> &Runtime { self.ops_salsa_runtime() } /// Gives access to the underlying salsa runtime. /// /// This method should not be overridden by `Database` implementors. fn salsa_runtime_mut(&mut self) -> &mut Runtime { self.ops_salsa_runtime_mut() } } /// The `Event` struct identifies various notable things that can /// occur during salsa execution. Instances of this struct are given /// to `salsa_event`. pub struct Event { /// The id of the snapshot that triggered the event. Usually /// 1-to-1 with a thread, as well. pub runtime_id: RuntimeId, /// What sort of event was it. pub kind: EventKind, } impl Event { /// Returns a type that gives a user-readable debug output. /// Use like `println!("{:?}", index.debug(db))`. pub fn debug<'me, D: ?Sized>(&'me self, db: &'me D) -> impl std::fmt::Debug + 'me where D: plumbing::DatabaseOps, { EventDebug { event: self, db } } } impl fmt::Debug for Event { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("Event") .field("runtime_id", &self.runtime_id) .field("kind", &self.kind) .finish() } } struct EventDebug<'me, D: ?Sized> where D: plumbing::DatabaseOps, { event: &'me Event, db: &'me D, } impl<'me, D: ?Sized> fmt::Debug for EventDebug<'me, D> where D: plumbing::DatabaseOps, { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("Event") .field("runtime_id", &self.event.runtime_id) .field("kind", &self.event.kind.debug(self.db)) .finish() } } /// An enum identifying the various kinds of events that can occur. pub enum EventKind { /// Occurs when we found that all inputs to a memoized value are /// up-to-date and hence the value can be re-used without /// executing the closure. /// /// Executes before the "re-used" value is returned. DidValidateMemoizedValue { /// The database-key for the affected value. Implements `Debug`. database_key: DatabaseKeyIndex, }, /// Indicates that another thread (with id `other_runtime_id`) is processing the /// given query (`database_key`), so we will block until they /// finish. /// /// Executes after we have registered with the other thread but /// before they have answered us. /// /// (NB: you can find the `id` of the current thread via the /// `salsa_runtime`) WillBlockOn { /// The id of the runtime we will block on. other_runtime_id: RuntimeId, /// The database-key for the affected value. Implements `Debug`. database_key: DatabaseKeyIndex, }, /// Indicates that the function for this query will be executed. /// This is either because it has never executed before or because /// its inputs may be out of date. WillExecute { /// The database-key for the affected value. Implements `Debug`. database_key: DatabaseKeyIndex, }, /// Indicates that `unwind_if_cancelled` was called and salsa will check if /// the current revision has been cancelled. WillCheckCancellation, } impl EventKind { /// Returns a type that gives a user-readable debug output. /// Use like `println!("{:?}", index.debug(db))`. pub fn debug<'me, D: ?Sized>(&'me self, db: &'me D) -> impl std::fmt::Debug + 'me where D: plumbing::DatabaseOps, { EventKindDebug { kind: self, db } } } impl fmt::Debug for EventKind { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { match self { EventKind::DidValidateMemoizedValue { database_key } => fmt .debug_struct("DidValidateMemoizedValue") .field("database_key", database_key) .finish(), EventKind::WillBlockOn { other_runtime_id, database_key, } => fmt .debug_struct("WillBlockOn") .field("other_runtime_id", other_runtime_id) .field("database_key", database_key) .finish(), EventKind::WillExecute { database_key } => fmt .debug_struct("WillExecute") .field("database_key", database_key) .finish(), EventKind::WillCheckCancellation => fmt.debug_struct("WillCheckCancellation").finish(), } } } struct EventKindDebug<'me, D: ?Sized> where D: plumbing::DatabaseOps, { kind: &'me EventKind, db: &'me D, } impl<'me, D: ?Sized> fmt::Debug for EventKindDebug<'me, D> where D: plumbing::DatabaseOps, { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { match self.kind { EventKind::DidValidateMemoizedValue { database_key } => fmt .debug_struct("DidValidateMemoizedValue") .field("database_key", &database_key.debug(self.db)) .finish(), EventKind::WillBlockOn { other_runtime_id, database_key, } => fmt .debug_struct("WillBlockOn") .field("other_runtime_id", &other_runtime_id) .field("database_key", &database_key.debug(self.db)) .finish(), EventKind::WillExecute { database_key } => fmt .debug_struct("WillExecute") .field("database_key", &database_key.debug(self.db)) .finish(), EventKind::WillCheckCancellation => fmt.debug_struct("WillCheckCancellation").finish(), } } } /// Indicates a database that also supports parallel query /// evaluation. All of Salsa's base query support is capable of /// parallel execution, but for it to work, your query key/value types /// must also be `Send`, as must any additional data in your database. pub trait ParallelDatabase: Database + Send { /// Creates a second handle to the database that holds the /// database fixed at a particular revision. So long as this /// "frozen" handle exists, any attempt to [`set`] an input will /// block. /// /// [`set`]: struct.QueryTable.html#method.set /// /// This is the method you are meant to use most of the time in a /// parallel setting where modifications may arise asynchronously /// (e.g., a language server). In this context, it is common to /// wish to "fork off" a snapshot of the database performing some /// series of queries in parallel and arranging the results. Using /// this method for that purpose ensures that those queries will /// see a consistent view of the database (it is also advisable /// for those queries to use the [`Runtime::unwind_if_cancelled`] /// method to check for cancellation). /// /// # Panics /// /// It is not permitted to create a snapshot from inside of a /// query. Attepting to do so will panic. /// /// # Deadlock warning /// /// The intended pattern for snapshots is that, once created, they /// are sent to another thread and used from there. As such, the /// `snapshot` acquires a "read lock" on the database -- /// therefore, so long as the `snapshot` is not dropped, any /// attempt to `set` a value in the database will block. If the /// `snapshot` is owned by the same thread that is attempting to /// `set`, this will cause a problem. /// /// # How to implement this /// /// Typically, this method will create a second copy of your /// database type (`MyDatabaseType`, in the example below), /// cloning over each of the fields from `self` into this new /// copy. For the field that stores the salsa runtime, you should /// use [the `Runtime::snapshot` method][rfm] to create a snapshot of the /// runtime. Finally, package up the result using `Snapshot::new`, /// which is a simple wrapper type that only gives `&self` access /// to the database within (thus preventing the use of methods /// that may mutate the inputs): /// /// [rfm]: struct.Runtime.html#method.snapshot /// /// ```rust,ignore /// impl ParallelDatabase for MyDatabaseType { /// fn snapshot(&self) -> Snapshot<Self> { /// Snapshot::new( /// MyDatabaseType { /// runtime: self.runtime.snapshot(self), /// other_field: self.other_field.clone(), /// } /// )
if pending_revision > current_revision { runtime.unwind_cancelled(); } }
random_line_split
pwm.rs
IllegalChangeWhileEnabled(&'static str), #[error("expected boolean value, got {0:?}")] NotBoolean(String), #[error("expected a duration in nanoseconds, got {0:?}: {1}")] NotADuration(String, #[source] std::num::ParseIntError), } /// Used in PwmError to format sysfs related errors. #[derive(Debug)] pub enum Access { Read(PathBuf), Write(PathBuf), } /// Exposes PWM functionality. /// /// Since the Linux kernel exposes PWM controllers and their settings through /// sysfs, PWM operations are just file reads and writes. To allow testing with /// a real file system but outside of sysfs, the `sysfs_root` property may be /// used to "offset" those operations to an alternative directory. /// /// Documentation on Linux PWM sysfs: /// <https://www.kernel.org/doc/html/latest/driver-api/pwm.html> #[derive(Debug)] pub struct Pwm { sysfs_root: PathBuf, } /// A PWM controller (a.k.a. PWM chip) is identified by a non-negative number. #[derive(Debug, Clone)] pub struct Controller(pub u32); /// PWM controllers expose channels, which are also identified by non-negative numbers. #[derive(Debug, Clone)] pub struct Channel(pub u32); type Result<T> = std::result::Result<T, PwmError>; impl Pwm { /// Initialize PWM. pub fn new() -> Self { Self::with_sysfs_root(PathBuf::from("/sys/class/pwm")) } /// Initialize PWM with an alternative sysfs directory, for testing. pub fn with_sysfs_root(sysfs_root: PathBuf) -> Self { if !sysfs_root.exists() { panic!("sysfs root does not exist: {:?}", sysfs_root); } Self { sysfs_root } } /// Returns the number of channels for the given controller. #[instrument] pub fn npwm(&self, controller: &Controller) -> Result<u32> { self.controller_file(controller, "npwm") .and_then(|path| read(&path)) .map(|s| { s.trim() .parse::<u32>() .expect("npwm expected to contain the number of channels") }) } /// Returns whether a controller's channels are ready to be used. #[instrument] pub fn is_exported(&self, controller: &Controller) -> Result<bool> { // A controller is exported if the channel subdirectories are there. // Since a controller without any channel doesn't make sense, it's // enough to check for the existance of the first channel's enable file. match self.channel_dir(controller, &Channel(0)) { Ok(_) => Ok(true), Err(PwmError::NotExported(_)) => Ok(false), Err(e) => Err(e), } } /// Export a PWM controller, which enables access to its channels. #[instrument] pub fn export(&mut self, controller: Controller) -> Result<()> { self.controller_file(&controller, "export") .and_then(|path| write(&path, "1")) } /// Unexport a PWM controller, which disables access to its channels. #[instrument] pub fn unexport(&mut self, controller: Controller) -> Result<()> { self.controller_file(&controller, "unexport") .and_then(|path| write(&path, "1")) } /// Returns whether a controller's channel is enabled. #[instrument] pub fn is_enabled(&self, controller: &Controller, channel: &Channel) -> Result<bool> { self.channel_file(controller, channel, "enable") .and_then(|path| read(&path)) .and_then(parse_bool) } /// Enable a channel. #[instrument] pub fn enable(&mut self, controller: Controller, channel: Channel) -> Result<()> { self.channel_file(&controller, &channel, "enable") .and_then(|path| write(&path, "1")) } /// Disable a channel. #[instrument] pub fn disable(&mut self, controller: Controller, channel: Channel) -> Result<()> { self.channel_file(&controller, &channel, "enable") .and_then(|path| write(&path, "0")) } /// The total period of the PWM signal (read/write). Value is in nanoseconds /// and is the sum of the active and inactive time of the PWM. #[instrument] pub fn set_period( &mut self, controller: Controller, channel: Channel, period: Duration, ) -> Result<()> { let duty_cycle = self .channel_file(&controller, &channel, "duty_cycle") .and_then(|path| read(&path)) .and_then(parse_duration)?; if duty_cycle > period { return Err(PwmError::DutyCycleGreaterThanPeriod); } self.channel_file(&controller, &channel, "period") .and_then(|path| write(&path, &period.as_nanos().to_string())) } /// The active time of the PWM signal (read/write). Value is in nanoseconds /// and must be less than the period. #[instrument] pub fn set_duty_cycle( &mut self, controller: Controller, channel: Channel, duty_cycle: Duration, ) -> Result<()> { let period = self .channel_file(&controller, &channel, "period") .and_then(|path| read(&path)) .and_then(parse_duration)?; if duty_cycle > period { return Err(PwmError::DutyCycleGreaterThanPeriod); } self.channel_file(&controller, &channel, "duty_cycle") .and_then(|path| write(&path, &duty_cycle.as_nanos().to_string())) } /// Changes the polarity of the PWM signal (read/write). Writes to this /// property only work if the PWM chip supports changing the polarity. The /// polarity can only be changed if the PWM is not enabled. Value is the /// string “normal” or “inversed”. #[instrument] pub fn set_polarity( &mut self, controller: Controller, channel: Channel, polarity: Polarity, ) -> Result<()> { // setting polarity is only allowed if channel is disabled: if self.is_enabled(&controller, &channel)? { return Err(PwmError::IllegalChangeWhileEnabled("polarity")); } self.channel_file(&controller, &channel, "polarity") .and_then(|path| write(&path, &polarity.to_string())) } fn controller_dir(&self, controller: &Controller) -> Result<PathBuf> { let path = self.sysfs_root.join(format!("pwmchip{}", controller.0)); if path.is_dir() { Ok(path) } else { Err(PwmError::ControllerNotFound(controller.clone())) } } fn controller_file(&self, controller: &Controller, fname: &str) -> Result<PathBuf> {
if path.is_file() { Ok(path) } else { Err(PwmError::ControllerNotFound(controller.clone())) } } fn channel_dir(&self, controller: &Controller, channel: &Channel) -> Result<PathBuf> { let n_pwm = self.npwm(controller)?; if channel.0 >= n_pwm { return Err(PwmError::ChannelNotFound( controller.clone(), channel.clone(), )); } let path = self .controller_dir(controller) .map(|controller| controller.join(format!("pwm{}", channel.0)))?; if path.is_dir() { Ok(path) } else { Err(PwmError::NotExported(controller.clone())) } } fn channel_file( &self, controller: &Controller, channel: &Channel, fname: &str, ) -> Result<PathBuf> { let path = self .channel_dir(controller, channel) .map(|channel| channel.join(fname))?; if path.is_file() { Ok(path) } else { Err(PwmError::NotExported(controller.clone())) } } } fn read(path: &Path) -> Result<String> { fs::read_to_string(path).map_err(|e| PwmError::Sysfs(Access::Read(path.to_owned()), e)) } fn write(path: &Path, contents: &str) -> Result<()> { debug!("writing to {:?}", path); fs::write(path, contents).map_err(|e| PwmError::Sysfs(Access::Write(path.to_owned()), e)) } fn parse_bool(s: String) -> Result<bool> { // sysfs compatible according to http://lkml.iu.edu/hypermail/linux/kernel/1103.2/02488.html match s.trim_end().to_lowercase().as_ref() { "1" | "y" | "yes" | "true" => Ok(true), "0" | "n" | "no" | "false" | "" => Ok(false), _ => Err(PwmError::NotBoolean(s)), } } fn parse_duration(s: String) -> Result<Duration> { s.trim_end() .parse::<u64>() .map_err(|e
let path = self .sysfs_root .join(format!("pwmchip{}/{}", controller.0, fname));
random_line_split
pwm.rs
duration in nanoseconds, got {0:?}: {1}")] NotADuration(String, #[source] std::num::ParseIntError), } /// Used in PwmError to format sysfs related errors. #[derive(Debug)] pub enum Access { Read(PathBuf), Write(PathBuf), } /// Exposes PWM functionality. /// /// Since the Linux kernel exposes PWM controllers and their settings through /// sysfs, PWM operations are just file reads and writes. To allow testing with /// a real file system but outside of sysfs, the `sysfs_root` property may be /// used to "offset" those operations to an alternative directory. /// /// Documentation on Linux PWM sysfs: /// <https://www.kernel.org/doc/html/latest/driver-api/pwm.html> #[derive(Debug)] pub struct Pwm { sysfs_root: PathBuf, } /// A PWM controller (a.k.a. PWM chip) is identified by a non-negative number. #[derive(Debug, Clone)] pub struct Controller(pub u32); /// PWM controllers expose channels, which are also identified by non-negative numbers. #[derive(Debug, Clone)] pub struct Channel(pub u32); type Result<T> = std::result::Result<T, PwmError>; impl Pwm { /// Initialize PWM. pub fn new() -> Self { Self::with_sysfs_root(PathBuf::from("/sys/class/pwm")) } /// Initialize PWM with an alternative sysfs directory, for testing. pub fn with_sysfs_root(sysfs_root: PathBuf) -> Self { if !sysfs_root.exists() { panic!("sysfs root does not exist: {:?}", sysfs_root); } Self { sysfs_root } } /// Returns the number of channels for the given controller. #[instrument] pub fn npwm(&self, controller: &Controller) -> Result<u32> { self.controller_file(controller, "npwm") .and_then(|path| read(&path)) .map(|s| { s.trim() .parse::<u32>() .expect("npwm expected to contain the number of channels") }) } /// Returns whether a controller's channels are ready to be used. #[instrument] pub fn is_exported(&self, controller: &Controller) -> Result<bool> { // A controller is exported if the channel subdirectories are there. // Since a controller without any channel doesn't make sense, it's // enough to check for the existance of the first channel's enable file. match self.channel_dir(controller, &Channel(0)) { Ok(_) => Ok(true), Err(PwmError::NotExported(_)) => Ok(false), Err(e) => Err(e), } } /// Export a PWM controller, which enables access to its channels. #[instrument] pub fn export(&mut self, controller: Controller) -> Result<()> { self.controller_file(&controller, "export") .and_then(|path| write(&path, "1")) } /// Unexport a PWM controller, which disables access to its channels. #[instrument] pub fn unexport(&mut self, controller: Controller) -> Result<()> { self.controller_file(&controller, "unexport") .and_then(|path| write(&path, "1")) } /// Returns whether a controller's channel is enabled. #[instrument] pub fn is_enabled(&self, controller: &Controller, channel: &Channel) -> Result<bool> { self.channel_file(controller, channel, "enable") .and_then(|path| read(&path)) .and_then(parse_bool) } /// Enable a channel. #[instrument] pub fn enable(&mut self, controller: Controller, channel: Channel) -> Result<()> { self.channel_file(&controller, &channel, "enable") .and_then(|path| write(&path, "1")) } /// Disable a channel. #[instrument] pub fn disable(&mut self, controller: Controller, channel: Channel) -> Result<()> { self.channel_file(&controller, &channel, "enable") .and_then(|path| write(&path, "0")) } /// The total period of the PWM signal (read/write). Value is in nanoseconds /// and is the sum of the active and inactive time of the PWM. #[instrument] pub fn set_period( &mut self, controller: Controller, channel: Channel, period: Duration, ) -> Result<()> { let duty_cycle = self .channel_file(&controller, &channel, "duty_cycle") .and_then(|path| read(&path)) .and_then(parse_duration)?; if duty_cycle > period { return Err(PwmError::DutyCycleGreaterThanPeriod); } self.channel_file(&controller, &channel, "period") .and_then(|path| write(&path, &period.as_nanos().to_string())) } /// The active time of the PWM signal (read/write). Value is in nanoseconds /// and must be less than the period. #[instrument] pub fn set_duty_cycle( &mut self, controller: Controller, channel: Channel, duty_cycle: Duration, ) -> Result<()> { let period = self .channel_file(&controller, &channel, "period") .and_then(|path| read(&path)) .and_then(parse_duration)?; if duty_cycle > period { return Err(PwmError::DutyCycleGreaterThanPeriod); } self.channel_file(&controller, &channel, "duty_cycle") .and_then(|path| write(&path, &duty_cycle.as_nanos().to_string())) } /// Changes the polarity of the PWM signal (read/write). Writes to this /// property only work if the PWM chip supports changing the polarity. The /// polarity can only be changed if the PWM is not enabled. Value is the /// string “normal” or “inversed”. #[instrument] pub fn set_polarity( &mut self, controller: Controller, channel: Channel, polarity: Polarity, ) -> Result<()> { // setting polarity is only allowed if channel is disabled: if self.is_enabled(&controller, &channel)? { return Err(PwmError::IllegalChangeWhileEnabled("polarity")); } self.channel_file(&controller, &channel, "polarity") .and_then(|path| write(&path, &polarity.to_string())) } fn controller_dir(&self, controller: &Controller) -> Result<PathBuf> { let path = self.sysfs_root.join(format!("pwmchip{}", controller.0)); if path.is_dir() { Ok(path) } else { Err(PwmError::ControllerNotFound(controller.clone())) } } fn controller_file(&self, controller: &Controller, fname: &str) -> Result<PathBuf> { let path = self .sysfs_root .join(format!("pwmchip{}/{}", controller.0, fname)); if path.is_file() { Ok(path) } else { Err(PwmError::ControllerNotFound(controller.clone())) } } fn channel_dir(&self, controller: &Controller, channel: &Channel) -> Result<PathBuf> { let n_pwm = self.npwm(controller)?; if channel.0 >= n_pwm { return Err(PwmError::ChannelNotFound( controller.clone(), channel.clone(), )); } let path = self .controller_dir(controller) .map(|controller| controller.join(format!("pwm{}", channel.0)))?; if path.is_dir() { Ok(path) } else { Err(PwmError::NotExported(controller.clone())) } } fn channel_file( &self, controller: &Controller, channel: &Channel, fname: &str, ) -> Result<PathBuf> { let path = self .channel_dir(controller, channel) .map(|channel| channel.join(fname))?; if path.is_file() { Ok(path) } else { Err(PwmError::NotExported(controller.clone())) } } } fn read(path: &Path) -> Result<String> { fs::read_to_string(path).map_err(|e| PwmError::Sysfs(Access::Read(path.to_owned()), e)) } fn write(path: &Path, contents: &str) -> Result<()> { debug!("writing to {:?}", path); fs::write(path, contents).map_err(|e| PwmError::Sysfs(Access::Write(path.to_owned()), e)) } fn parse_bool(s: String) -> Result<bool> { // sysfs compatible according to http://lkml.iu.edu/hypermail/linux/kernel/1103.2/02488.html match s.trim_end().to_lowercase().as_ref() { "1" | "y" | "yes" | "true" => Ok(true), "0" | "n" | "no" | "false" | "" => Ok(false), _ => Err(PwmError::NotBoolean(s)), } } fn parse_duration(s: String) -> Result<Duration> { s.trim_end() .parse::<u64>() .map_err(|e| PwmError::NotADuration(s, e)) .map(Duration::from_nanos) } #[derive(Debug)] pub enum Polarity
{ N
identifier_name
pwm.rs
), #[error("expected a duration in nanoseconds, got {0:?}: {1}")] NotADuration(String, #[source] std::num::ParseIntError), } /// Used in PwmError to format sysfs related errors. #[derive(Debug)] pub enum Access { Read(PathBuf), Write(PathBuf), } /// Exposes PWM functionality. /// /// Since the Linux kernel exposes PWM controllers and their settings through /// sysfs, PWM operations are just file reads and writes. To allow testing with /// a real file system but outside of sysfs, the `sysfs_root` property may be /// used to "offset" those operations to an alternative directory. /// /// Documentation on Linux PWM sysfs: /// <https://www.kernel.org/doc/html/latest/driver-api/pwm.html> #[derive(Debug)] pub struct Pwm { sysfs_root: PathBuf, } /// A PWM controller (a.k.a. PWM chip) is identified by a non-negative number. #[derive(Debug, Clone)] pub struct Controller(pub u32); /// PWM controllers expose channels, which are also identified by non-negative numbers. #[derive(Debug, Clone)] pub struct Channel(pub u32); type Result<T> = std::result::Result<T, PwmError>; impl Pwm { /// Initialize PWM. pub fn new() -> Self { Self::with_sysfs_root(PathBuf::from("/sys/class/pwm")) } /// Initialize PWM with an alternative sysfs directory, for testing. pub fn with_sysfs_root(sysfs_root: PathBuf) -> Self { if !sysfs_root.exists() { panic!("sysfs root does not exist: {:?}", sysfs_root); } Self { sysfs_root } } /// Returns the number of channels for the given controller. #[instrument] pub fn npwm(&self, controller: &Controller) -> Result<u32> { self.controller_file(controller, "npwm") .and_then(|path| read(&path)) .map(|s| { s.trim() .parse::<u32>() .expect("npwm expected to contain the number of channels") }) } /// Returns whether a controller's channels are ready to be used. #[instrument] pub fn is_exported(&self, controller: &Controller) -> Result<bool> { // A controller is exported if the channel subdirectories are there. // Since a controller without any channel doesn't make sense, it's // enough to check for the existance of the first channel's enable file. match self.channel_dir(controller, &Channel(0)) { Ok(_) => Ok(true), Err(PwmError::NotExported(_)) => Ok(false), Err(e) => Err(e), } } /// Export a PWM controller, which enables access to its channels. #[instrument] pub fn export(&mut self, controller: Controller) -> Result<()> { self.controller_file(&controller, "export") .and_then(|path| write(&path, "1")) } /// Unexport a PWM controller, which disables access to its channels. #[instrument] pub fn unexport(&mut self, controller: Controller) -> Result<()> { self.controller_file(&controller, "unexport") .and_then(|path| write(&path, "1")) } /// Returns whether a controller's channel is enabled. #[instrument] pub fn is_enabled(&self, controller: &Controller, channel: &Channel) -> Result<bool> { self.channel_file(controller, channel, "enable") .and_then(|path| read(&path)) .and_then(parse_bool) } /// Enable a channel. #[instrument] pub fn enable(&mut self, controller: Controller, channel: Channel) -> Result<()> { self.channel_file(&controller, &channel, "enable") .and_then(|path| write(&path, "1")) } /// Disable a channel. #[instrument] pub fn disable(&mut self, controller: Controller, channel: Channel) -> Result<()> { self.channel_file(&controller, &channel, "enable") .and_then(|path| write(&path, "0")) } /// The total period of the PWM signal (read/write). Value is in nanoseconds /// and is the sum of the active and inactive time of the PWM. #[instrument] pub fn set_period( &mut self, controller: Controller, channel: Channel, period: Duration, ) -> Result<()> { let duty_cycle = self .channel_file(&controller, &channel, "duty_cycle") .and_then(|path| read(&path)) .and_then(parse_duration)?; if duty_cycle > period { return Err(PwmError::DutyCycleGreaterThanPeriod); } self.channel_file(&controller, &channel, "period") .and_then(|path| write(&path, &period.as_nanos().to_string())) } /// The active time of the PWM signal (read/write). Value is in nanoseconds /// and must be less than the period. #[instrument] pub fn set_duty_cycle( &mut self, controller: Controller, channel: Channel, duty_cycle: Duration, ) -> Result<()> { let period = self .channel_file(&controller, &channel, "period") .and_then(|path| read(&path)) .and_then(parse_duration)?; if duty_cycle > period { return Err(PwmError::DutyCycleGreaterThanPeriod); } self.channel_file(&controller, &channel, "duty_cycle") .and_then(|path| write(&path, &duty_cycle.as_nanos().to_string())) } /// Changes the polarity of the PWM signal (read/write). Writes to this /// property only work if the PWM chip supports changing the polarity. The /// polarity can only be changed if the PWM is not enabled. Value is the /// string “normal” or “inversed”. #[instrument] pub fn set_polarity( &mut self, controller: Controller, channel: Channel, polarity: Polarity, ) -> Result<()> { // setting polarity is only allowed if channel is disabled: if self.is_enabled(&controller, &channel)? { return Err(PwmError::IllegalChangeWhileEnabled("polarity")); } self.channel_file(&controller, &channel, "polarity") .and_then(|path| write(&path, &polarity.to_string())) } fn controller_dir(&self, controller: &Controller) -> Result<PathBuf> { let path = self.sysfs_root.join(format!("pwmchip{}", controller.0)); if path.is_dir() { Ok(path) } else { Err(PwmError::ControllerNotFound(controller.clone())) } } fn controller_file(&self, controller: &Controller, fname: &str) -> Result<PathBuf> { let path = self .sysfs_root .join(format!("pwmchip{}/{}", controller.0, fname)); if path.is_file() { Ok(path) } else { Err(PwmError::ControllerNotFound(controller.clone())) } } fn channel_dir(&self, controller: &Controller, channel: &Channel) -> Result<PathBuf> { let n_pwm = self.npwm(controller)?; if channel.0 >= n_pwm { return Err(PwmError::ChannelNotFound( controller.clone(), channel.clone(), )); } let path = self .controller_dir(controller) .map(|controller| controller.join(format!("pwm{}", channel.0)))?; if path.is_dir() { Ok(path) } else { Err(PwmError::NotExported(controller.clone())) } } fn channel_file( &self, controller: &Controller, channel: &Channel, fname: &str, ) -> Result<PathBuf> { let path = self .channel_dir(controller, channel) .map(|channel| channel.join(fname))?; if path.is_file() { Ok(path) } else { Err(PwmError::NotExported(controller.clone())) } } } fn read(path: &Path) -> Result<String> { fs::read_to_string(path).map_err(|e| PwmError::Sysfs(Access::Read(path.to_owned()), e)) } fn write(path: &Path, contents: &str) -> Result<()> { debug!("writing to {:?}", path); fs::write(path, contents).map_err(|e| PwmError::Sysfs(Access::Write(path.to_owned()), e)) } fn parse_bool(s: String) -> Result<bool> { // sysfs compatible according to http://lkml.iu.edu/hypermail/linux/kernel/1103.2/02488.html match s.trim_end().to_lowercase().as_ref() { "1" | "y" | "yes" | "true" => Ok(true), "0" | "n" | "no" | "false" | "" => Ok(false), _ => Err(PwmError::NotBoolean(s)), } } fn parse_duration(s: String) -> Result<Duration> { s.
trim_end() .parse::<u64>() .map_err(|e| PwmError::NotADuration(s, e)) .map(Duration::from_nanos) } #[deri
identifier_body
deckbuilder.py
build(): global FILE_EXT global SHEETS global EXCEL # parse args parser = argparse.ArgumentParser(description='Building decks') parser.add_argument('-s', "--source", type=str, action='store', dest='source', help='Excel source') parser.add_argument('-o', "--output", type=str, action='store', dest='output', help='Output folder') parser.add_argument('-f', "--format", type=str, action='store', dest='format', help='Only PDF for now') parser.add_argument('-t', "--tabletop", type=bool, action='store', dest='tabletop', help='Export for Tabletop Simulator') parser.add_argument('-p', "--print", type=bool, action='store', dest='print', help='Print generated files') args = parser.parse_args() # redefine global parameters parms.FILE_SOURCE = args.source parms.DIR_OUTPUT = nvl(args.output, parms.DIR_OUTPUT) parms.FORMAT = nvl(args.format, parms.FORMAT) parms.FLAG_TABLETOP = nvl(args.tabletop, parms.FLAG_TABLETOP) parms.PRINT = nvl(args.print, parms.PRINT) print("[Validating parameters]") if not valid_parameters(): return print("[Validating masks]") if not valid_masks(): return print("[Processing sheets]") process_sheets() def valid_parameters(): if parms.FILE_SOURCE is None: print("ERROR: Source file path is invalid") return False if not Path(parms.FILE_SOURCE).is_file(): print("ERROR: Source file path is invalid") return False filename, ext = parms.FILE_SOURCE.split(".") if ext.lower() not in (parms.EXT_XLS(), parms.EXT_XLSX(), parms.EXT_CSV()): print("ERROR: Source file type is not supported") return False else: global FILE_EXT FILE_EXT = ext if parms.FORMAT not in [parms.FORMAT_PDF()]: print(parms.FORMAT, parms.FORMAT_PDF()) print("ERROR: Export format not supported") return False return True def valid_masks(): global MASK_DICT for m in parms.MASKS().split(parms.MASK_SEPARATOR()): if m.count(".", 1, len(m) - 1) != 1: print(m.count(".", 1, len(m) - 1)) print("ERROR: Mask", '"' + m + '"', "is invalid") return False else: sheet_title, value = m.split(parms.MASK_DOT()) if sheet_title not in MASK_DICT.keys(): MASK_DICT[sheet_title] = [] MASK_DICT[sheet_title].append(value) print("Masks:", MASK_DICT) return True def process_sheets(): global SHEETS global EXCEL # excel if FILE_EXT in (parms.EXT_XLS(), parms.EXT_XLSX()): EXCEL = pd.ExcelFile(parms.FILE_SOURCE) for sn in EXCEL.sheet_names: sheet = EXCEL.parse(sn) SHEETS.append(sheet) process_sheet(sheet, sn) def process_sheet(sheet, sheet_title): print("Processing", '"' + sheet_title + '"', "...") deck = [] if parms.COLUMN_TITLE() not in sheet.keys() or parms.COLUMN_DESCRIPTION() not in sheet.keys(): print("WARNING:", parms.COLUMN_TITLE(), "and", parms.COLUMN_DESCRIPTION(), "columns must be defined on the sheet. Skipping.") return if parms.COLUMN_COUNT() not in sheet.keys(): print("WARNING:", parms.COLUMN_COUNT(), "column not defined on sheet", sheet_title + ".", "Generating one copy for each card") sheet[parms.COLUMN_COUNT()] = pd.Series(1, index=sheet.index) if parms.COLUMN_IDT() not in sheet.keys(): print("WARNING:", parms.COLUMN_IDT(), "column not defined on sheet", sheet_title + ".", "Ensure that you don't have cards with the same names or define unique identifier") sheet[parms.COLUMN_IDT()] = pd.Series(None, index=sheet.index) for index, row in sheet.iterrows(): card_title = cust_title.do(row, sheet_title, row[parms.COLUMN_TITLE()]) if card_included(sheet_title, card_title): if sheet_title == "Находки": print(row["Next Location"]) card_description = cust_description.do(row, sheet_title, row[parms.COLUMN_DESCRIPTION()]) card_image = generate_card_image(card_title, card_description) card_count = row[parms.COLUMN_COUNT()] card_idt = row[parms.COLUMN_IDT()] if sheet_title == "Находки": print(card_description) card = Card(card_title, card_description, card_image, card_count, card_idt) deck.append(card) print(card_count, '"' + card_title + '" cards have been generated.') save_sheet(sheet_title, deck) def generate_card_image(title, description): # scheme, size, background color img = Image.new('RGB', (parms.DIM_CARD_WIDTH(), parms.DIM_CARD_HEIGHT()), (255, 255, 255))
unicode_font = ImageFont.truetype("Arial.ttf") y_text = draw_lines(draw, unicode_font, title, parms.DIM_TEXT_TOP_MARGIN()) # space between title and description y_text += parms.DIM_TEXT_TOP_MARGIN() # draw description for p in str.split(description, "\p"): for n in str.split(p, "\n"): y_text = draw_lines(draw, unicode_font, n, y_text) y_text += parms.DIM_TEXT_TOP_MARGIN() # border img = apply_card_border(img) return img def draw_lines(draw, font, text, y_text): lines = textwrap.wrap(text, width=(parms.DIM_CARD_WIDTH() // parms.DIM_CHAR_WIDTH())) for line in lines: draw.text((parms.DIM_TEXT_LEFT_MARGIN(), y_text), line, fill=(0, 0, 0), font=font) y_text += parms.DIM_TEXT_HEIGHT() return y_text def apply_card_border(img): new_size = (img.size[0] + parms.DIM_CARD_BORDER() * 2, img.size[1] + parms.DIM_CARD_BORDER() * 2) bordered_img = Image.new("RGB", new_size) bordered_img.paste(img, (parms.DIM_CARD_BORDER(), parms.DIM_CARD_BORDER())) return bordered_img def save_sheet(sheet_title, deck): main_directory = generate_sheet_directories(sheet_title) pdf = None if parms.FORMAT == parms.FORMAT_PDF(): pdf = FPDF() card_paths = [] card_total_count = 0 for c in deck: card_total_count += c.count card_counter = 0 for i, card in enumerate(deck): for j in range(card.count): # separate images if card.idt is None: idt_suffix = "" else: idt_suffix = "_" + str(card.idt) card_path = main_directory + "/" + card.title.replace(" ", "_") + idt_suffix + "_" + str(j) + "." + parms.EXT_PNG() card_paths.append(card_path) card.image.save(card_path, parms.EXT_PNG()) card_counter += 1 # combine in one page if (card_total_count - card_counter) % (parms.CARDS_IN_ROW() * parms.CARDS_IN_COLUMN()) == 0: print("Page added", card_total_count - card_counter) sheet_page_image = Image.new('RGB', (parms.CARDS_IN_ROW() * (parms.DIM_CARD_WIDTH() + parms.DIM_CARD_BORDER() * 2), parms.CARDS_IN_COLUMN() * (parms.DIM_CARD_HEIGHT() + parms.DIM_CARD_BORDER() * 2 )), (255,255,255,0)) x_offset = 0 for k, img in enumerate(map(Image.open, card_paths)): sheet_page_image.paste(img, ((k % parms.CARDS_IN_ROW()) * img.size[0], (k // parms.CARDS_IN_COLUMN()) * img.size[1])) x_offset += img.size[0] sheet_page_image_path = main_directory + "/" + parms.DIR_PAGES() + "/"\ + parms.FILE_PAGE() + str(card_total_count - card_counter)\ + "." + parms.EXT_PNG() sheet_page_image.save(sheet_page_image_path) # pdf if parms.FORMAT == parms.FORMAT_PDF(): pdf.add_page() pdf.image(sheet_page_image_path, x=parms.DIM_PDF_LEFT_MARGIN(), y=parms.DIM_PDF_TOP_MARGIN()) card_paths = [] printing_file = None if parms.FORMAT == parms.FORMAT_PDF(): printing_file = main_directory + "/" + parms.DIR_PRINT() + "/" + sheet_title.replace(" ", "_")\ + "." + parms.FORMAT_PDF() pdf.output(printing_file, "F") if parms.PRINT is True: print_sheet(printing_file) print('"' + sheet_title + '"', "finished.") def generate_sheet_directories(sheet_title): main_directory = parms.DIR_OUTPUT + "/" + sheet_title if not os.path.exists(main_directory): os.makedirs(main_directory) for d
draw = ImageDraw.Draw(img) # draw title
random_line_split
deckbuilder.py
build(): global FILE_EXT global SHEETS global EXCEL # parse args parser = argparse.ArgumentParser(description='Building decks') parser.add_argument('-s', "--source", type=str, action='store', dest='source', help='Excel source') parser.add_argument('-o', "--output", type=str, action='store', dest='output', help='Output folder') parser.add_argument('-f', "--format", type=str, action='store', dest='format', help='Only PDF for now') parser.add_argument('-t', "--tabletop", type=bool, action='store', dest='tabletop', help='Export for Tabletop Simulator') parser.add_argument('-p', "--print", type=bool, action='store', dest='print', help='Print generated files') args = parser.parse_args() # redefine global parameters parms.FILE_SOURCE = args.source parms.DIR_OUTPUT = nvl(args.output, parms.DIR_OUTPUT) parms.FORMAT = nvl(args.format, parms.FORMAT) parms.FLAG_TABLETOP = nvl(args.tabletop, parms.FLAG_TABLETOP) parms.PRINT = nvl(args.print, parms.PRINT) print("[Validating parameters]") if not valid_parameters(): return print("[Validating masks]") if not valid_masks(): return print("[Processing sheets]") process_sheets() def valid_parameters(): if parms.FILE_SOURCE is None: print("ERROR: Source file path is invalid") return False if not Path(parms.FILE_SOURCE).is_file():
filename, ext = parms.FILE_SOURCE.split(".") if ext.lower() not in (parms.EXT_XLS(), parms.EXT_XLSX(), parms.EXT_CSV()): print("ERROR: Source file type is not supported") return False else: global FILE_EXT FILE_EXT = ext if parms.FORMAT not in [parms.FORMAT_PDF()]: print(parms.FORMAT, parms.FORMAT_PDF()) print("ERROR: Export format not supported") return False return True def valid_masks(): global MASK_DICT for m in parms.MASKS().split(parms.MASK_SEPARATOR()): if m.count(".", 1, len(m) - 1) != 1: print(m.count(".", 1, len(m) - 1)) print("ERROR: Mask", '"' + m + '"', "is invalid") return False else: sheet_title, value = m.split(parms.MASK_DOT()) if sheet_title not in MASK_DICT.keys(): MASK_DICT[sheet_title] = [] MASK_DICT[sheet_title].append(value) print("Masks:", MASK_DICT) return True def process_sheets(): global SHEETS global EXCEL # excel if FILE_EXT in (parms.EXT_XLS(), parms.EXT_XLSX()): EXCEL = pd.ExcelFile(parms.FILE_SOURCE) for sn in EXCEL.sheet_names: sheet = EXCEL.parse(sn) SHEETS.append(sheet) process_sheet(sheet, sn) def process_sheet(sheet, sheet_title): print("Processing", '"' + sheet_title + '"', "...") deck = [] if parms.COLUMN_TITLE() not in sheet.keys() or parms.COLUMN_DESCRIPTION() not in sheet.keys(): print("WARNING:", parms.COLUMN_TITLE(), "and", parms.COLUMN_DESCRIPTION(), "columns must be defined on the sheet. Skipping.") return if parms.COLUMN_COUNT() not in sheet.keys(): print("WARNING:", parms.COLUMN_COUNT(), "column not defined on sheet", sheet_title + ".", "Generating one copy for each card") sheet[parms.COLUMN_COUNT()] = pd.Series(1, index=sheet.index) if parms.COLUMN_IDT() not in sheet.keys(): print("WARNING:", parms.COLUMN_IDT(), "column not defined on sheet", sheet_title + ".", "Ensure that you don't have cards with the same names or define unique identifier") sheet[parms.COLUMN_IDT()] = pd.Series(None, index=sheet.index) for index, row in sheet.iterrows(): card_title = cust_title.do(row, sheet_title, row[parms.COLUMN_TITLE()]) if card_included(sheet_title, card_title): if sheet_title == "Находки": print(row["Next Location"]) card_description = cust_description.do(row, sheet_title, row[parms.COLUMN_DESCRIPTION()]) card_image = generate_card_image(card_title, card_description) card_count = row[parms.COLUMN_COUNT()] card_idt = row[parms.COLUMN_IDT()] if sheet_title == "Находки": print(card_description) card = Card(card_title, card_description, card_image, card_count, card_idt) deck.append(card) print(card_count, '"' + card_title + '" cards have been generated.') save_sheet(sheet_title, deck) def generate_card_image(title, description): # scheme, size, background color img = Image.new('RGB', (parms.DIM_CARD_WIDTH(), parms.DIM_CARD_HEIGHT()), (255, 255, 255)) draw = ImageDraw.Draw(img) # draw title unicode_font = ImageFont.truetype("Arial.ttf") y_text = draw_lines(draw, unicode_font, title, parms.DIM_TEXT_TOP_MARGIN()) # space between title and description y_text += parms.DIM_TEXT_TOP_MARGIN() # draw description for p in str.split(description, "\p"): for n in str.split(p, "\n"): y_text = draw_lines(draw, unicode_font, n, y_text) y_text += parms.DIM_TEXT_TOP_MARGIN() # border img = apply_card_border(img) return img def draw_lines(draw, font, text, y_text): lines = textwrap.wrap(text, width=(parms.DIM_CARD_WIDTH() // parms.DIM_CHAR_WIDTH())) for line in lines: draw.text((parms.DIM_TEXT_LEFT_MARGIN(), y_text), line, fill=(0, 0, 0), font=font) y_text += parms.DIM_TEXT_HEIGHT() return y_text def apply_card_border(img): new_size = (img.size[0] + parms.DIM_CARD_BORDER() * 2, img.size[1] + parms.DIM_CARD_BORDER() * 2) bordered_img = Image.new("RGB", new_size) bordered_img.paste(img, (parms.DIM_CARD_BORDER(), parms.DIM_CARD_BORDER())) return bordered_img def save_sheet(sheet_title, deck): main_directory = generate_sheet_directories(sheet_title) pdf = None if parms.FORMAT == parms.FORMAT_PDF(): pdf = FPDF() card_paths = [] card_total_count = 0 for c in deck: card_total_count += c.count card_counter = 0 for i, card in enumerate(deck): for j in range(card.count): # separate images if card.idt is None: idt_suffix = "" else: idt_suffix = "_" + str(card.idt) card_path = main_directory + "/" + card.title.replace(" ", "_") + idt_suffix + "_" + str(j) + "." + parms.EXT_PNG() card_paths.append(card_path) card.image.save(card_path, parms.EXT_PNG()) card_counter += 1 # combine in one page if (card_total_count - card_counter) % (parms.CARDS_IN_ROW() * parms.CARDS_IN_COLUMN()) == 0: print("Page added", card_total_count - card_counter) sheet_page_image = Image.new('RGB', (parms.CARDS_IN_ROW() * (parms.DIM_CARD_WIDTH() + parms.DIM_CARD_BORDER() * 2), parms.CARDS_IN_COLUMN() * (parms.DIM_CARD_HEIGHT() + parms.DIM_CARD_BORDER() * 2 )), (255,255,255,0)) x_offset = 0 for k, img in enumerate(map(Image.open, card_paths)): sheet_page_image.paste(img, ((k % parms.CARDS_IN_ROW()) * img.size[0], (k // parms.CARDS_IN_COLUMN()) * img.size[1])) x_offset += img.size[0] sheet_page_image_path = main_directory + "/" + parms.DIR_PAGES() + "/"\ + parms.FILE_PAGE() + str(card_total_count - card_counter)\ + "." + parms.EXT_PNG() sheet_page_image.save(sheet_page_image_path) # pdf if parms.FORMAT == parms.FORMAT_PDF(): pdf.add_page() pdf.image(sheet_page_image_path, x=parms.DIM_PDF_LEFT_MARGIN(), y=parms.DIM_PDF_TOP_MARGIN()) card_paths = [] printing_file = None if parms.FORMAT == parms.FORMAT_PDF(): printing_file = main_directory + "/" + parms.DIR_PRINT() + "/" + sheet_title.replace(" ", "_")\ + "." + parms.FORMAT_PDF() pdf.output(printing_file, "F") if parms.PRINT is True: print_sheet(printing_file) print('"' + sheet_title + '"', "finished.") def generate_sheet_directories(sheet_title): main_directory = parms.DIR_OUTPUT + "/" + sheet_title if not os.path.exists(main_directory): os.makedirs(main_directory) for
print("ERROR: Source file path is invalid") return False
conditional_block
deckbuilder.py
tabletop", type=bool, action='store', dest='tabletop', help='Export for Tabletop Simulator') parser.add_argument('-p', "--print", type=bool, action='store', dest='print', help='Print generated files') args = parser.parse_args() # redefine global parameters parms.FILE_SOURCE = args.source parms.DIR_OUTPUT = nvl(args.output, parms.DIR_OUTPUT) parms.FORMAT = nvl(args.format, parms.FORMAT) parms.FLAG_TABLETOP = nvl(args.tabletop, parms.FLAG_TABLETOP) parms.PRINT = nvl(args.print, parms.PRINT) print("[Validating parameters]") if not valid_parameters(): return print("[Validating masks]") if not valid_masks(): return print("[Processing sheets]") process_sheets() def valid_parameters(): if parms.FILE_SOURCE is None: print("ERROR: Source file path is invalid") return False if not Path(parms.FILE_SOURCE).is_file(): print("ERROR: Source file path is invalid") return False filename, ext = parms.FILE_SOURCE.split(".") if ext.lower() not in (parms.EXT_XLS(), parms.EXT_XLSX(), parms.EXT_CSV()): print("ERROR: Source file type is not supported") return False else: global FILE_EXT FILE_EXT = ext if parms.FORMAT not in [parms.FORMAT_PDF()]: print(parms.FORMAT, parms.FORMAT_PDF()) print("ERROR: Export format not supported") return False return True def valid_masks(): global MASK_DICT for m in parms.MASKS().split(parms.MASK_SEPARATOR()): if m.count(".", 1, len(m) - 1) != 1: print(m.count(".", 1, len(m) - 1)) print("ERROR: Mask", '"' + m + '"', "is invalid") return False else: sheet_title, value = m.split(parms.MASK_DOT()) if sheet_title not in MASK_DICT.keys(): MASK_DICT[sheet_title] = [] MASK_DICT[sheet_title].append(value) print("Masks:", MASK_DICT) return True def process_sheets(): global SHEETS global EXCEL # excel if FILE_EXT in (parms.EXT_XLS(), parms.EXT_XLSX()): EXCEL = pd.ExcelFile(parms.FILE_SOURCE) for sn in EXCEL.sheet_names: sheet = EXCEL.parse(sn) SHEETS.append(sheet) process_sheet(sheet, sn) def process_sheet(sheet, sheet_title): print("Processing", '"' + sheet_title + '"', "...") deck = [] if parms.COLUMN_TITLE() not in sheet.keys() or parms.COLUMN_DESCRIPTION() not in sheet.keys(): print("WARNING:", parms.COLUMN_TITLE(), "and", parms.COLUMN_DESCRIPTION(), "columns must be defined on the sheet. Skipping.") return if parms.COLUMN_COUNT() not in sheet.keys(): print("WARNING:", parms.COLUMN_COUNT(), "column not defined on sheet", sheet_title + ".", "Generating one copy for each card") sheet[parms.COLUMN_COUNT()] = pd.Series(1, index=sheet.index) if parms.COLUMN_IDT() not in sheet.keys(): print("WARNING:", parms.COLUMN_IDT(), "column not defined on sheet", sheet_title + ".", "Ensure that you don't have cards with the same names or define unique identifier") sheet[parms.COLUMN_IDT()] = pd.Series(None, index=sheet.index) for index, row in sheet.iterrows(): card_title = cust_title.do(row, sheet_title, row[parms.COLUMN_TITLE()]) if card_included(sheet_title, card_title): if sheet_title == "Находки": print(row["Next Location"]) card_description = cust_description.do(row, sheet_title, row[parms.COLUMN_DESCRIPTION()]) card_image = generate_card_image(card_title, card_description) card_count = row[parms.COLUMN_COUNT()] card_idt = row[parms.COLUMN_IDT()] if sheet_title == "Находки": print(card_description) card = Card(card_title, card_description, card_image, card_count, card_idt) deck.append(card) print(card_count, '"' + card_title + '" cards have been generated.') save_sheet(sheet_title, deck) def generate_card_image(title, description): # scheme, size, background color img = Image.new('RGB', (parms.DIM_CARD_WIDTH(), parms.DIM_CARD_HEIGHT()), (255, 255, 255)) draw = ImageDraw.Draw(img) # draw title unicode_font = ImageFont.truetype("Arial.ttf") y_text = draw_lines(draw, unicode_font, title, parms.DIM_TEXT_TOP_MARGIN()) # space between title and description y_text += parms.DIM_TEXT_TOP_MARGIN() # draw description for p in str.split(description, "\p"): for n in str.split(p, "\n"): y_text = draw_lines(draw, unicode_font, n, y_text) y_text += parms.DIM_TEXT_TOP_MARGIN() # border img = apply_card_border(img) return img def draw_lines(draw, font, text, y_text): lines = textwrap.wrap(text, width=(parms.DIM_CARD_WIDTH() // parms.DIM_CHAR_WIDTH())) for line in lines: draw.text((parms.DIM_TEXT_LEFT_MARGIN(), y_text), line, fill=(0, 0, 0), font=font) y_text += parms.DIM_TEXT_HEIGHT() return y_text def apply_card_border(img): new_size = (img.size[0] + parms.DIM_CARD_BORDER() * 2, img.size[1] + parms.DIM_CARD_BORDER() * 2) bordered_img = Image.new("RGB", new_size) bordered_img.paste(img, (parms.DIM_CARD_BORDER(), parms.DIM_CARD_BORDER())) return bordered_img def save_sheet(sheet_title, deck): main_directory = generate_sheet_directories(sheet_title) pdf = None if parms.FORMAT == parms.FORMAT_PDF(): pdf = FPDF() card_paths = [] card_total_count = 0 for c in deck: card_total_count += c.count card_counter = 0 for i, card in enumerate(deck): for j in range(card.count): # separate images if card.idt is None: idt_suffix = "" else: idt_suffix = "_" + str(card.idt) card_path = main_directory + "/" + card.title.replace(" ", "_") + idt_suffix + "_" + str(j) + "." + parms.EXT_PNG() card_paths.append(card_path) card.image.save(card_path, parms.EXT_PNG()) card_counter += 1 # combine in one page if (card_total_count - card_counter) % (parms.CARDS_IN_ROW() * parms.CARDS_IN_COLUMN()) == 0: print("Page added", card_total_count - card_counter) sheet_page_image = Image.new('RGB', (parms.CARDS_IN_ROW() * (parms.DIM_CARD_WIDTH() + parms.DIM_CARD_BORDER() * 2), parms.CARDS_IN_COLUMN() * (parms.DIM_CARD_HEIGHT() + parms.DIM_CARD_BORDER() * 2 )), (255,255,255,0)) x_offset = 0 for k, img in enumerate(map(Image.open, card_paths)): sheet_page_image.paste(img, ((k % parms.CARDS_IN_ROW()) * img.size[0], (k // parms.CARDS_IN_COLUMN()) * img.size[1])) x_offset += img.size[0] sheet_page_image_path = main_directory + "/" + parms.DIR_PAGES() + "/"\ + parms.FILE_PAGE() + str(card_total_count - card_counter)\ + "." + parms.EXT_PNG() sheet_page_image.save(sheet_page_image_path) # pdf if parms.FORMAT == parms.FORMAT_PDF(): pdf.add_page() pdf.image(sheet_page_image_path, x=parms.DIM_PDF_LEFT_MARGIN(), y=parms.DIM_PDF_TOP_MARGIN()) card_paths = [] printing_file = None if parms.FORMAT == parms.FORMAT_PDF(): printing_file = main_directory + "/" + parms.DIR_PRINT() + "/" + sheet_title.replace(" ", "_")\ + "." + parms.FORMAT_PDF() pdf.output(printing_file, "F") if parms.PRINT is True: print_sheet(printing_file) print('"' + sheet_title + '"', "finished.") def generate_sheet_directories(sheet_title): main_directory = parms.DIR_OUTPUT + "/" + sheet_title if not os.path.exists(main_directory): os.makedirs(main_directory) for d in [parms.DIR_PAGES(), parms.DIR_PRINT(), parms.DIR_TABLETOP()]: directory = main_directory + "/" + d if not os.path.exists(directory): os.makedirs(directory) return main_directory def card_included(sheet_title, card_title): global MASK_DICT if sheet_title not in MASK_DICT.keys(): return False elif parms.MASK_ALL() in MASK_DICT[sheet_title] or card_title in MASK_DICT[sheet_title]: return True else: return False def print_sheet(sh
eet_path):
identifier_name
deckbuilder.py
(): global FILE_EXT global SHEETS global EXCEL # parse args parser = argparse.ArgumentParser(description='Building decks') parser.add_argument('-s', "--source", type=str, action='store', dest='source', help='Excel source') parser.add_argument('-o', "--output", type=str, action='store', dest='output', help='Output folder') parser.add_argument('-f', "--format", type=str, action='store', dest='format', help='Only PDF for now') parser.add_argument('-t', "--tabletop", type=bool, action='store', dest='tabletop', help='Export for Tabletop Simulator') parser.add_argument('-p', "--print", type=bool, action='store', dest='print', help='Print generated files') args = parser.parse_args() # redefine global parameters parms.FILE_SOURCE = args.source parms.DIR_OUTPUT = nvl(args.output, parms.DIR_OUTPUT) parms.FORMAT = nvl(args.format, parms.FORMAT) parms.FLAG_TABLETOP = nvl(args.tabletop, parms.FLAG_TABLETOP) parms.PRINT = nvl(args.print, parms.PRINT) print("[Validating parameters]") if not valid_parameters(): return print("[Validating masks]") if not valid_masks(): return print("[Processing sheets]") process_sheets() def valid_parameters(): if parms.FILE_SOURCE is None: print("ERROR: Source file path is invalid") return False if not Path(parms.FILE_SOURCE).is_file(): print("ERROR: Source file path is invalid") return False filename, ext = parms.FILE_SOURCE.split(".") if ext.lower() not in (parms.EXT_XLS(), parms.EXT_XLSX(), parms.EXT_CSV()): print("ERROR: Source file type is not supported") return False else: global FILE_EXT FILE_EXT = ext if parms.FORMAT not in [parms.FORMAT_PDF()]: print(parms.FORMAT, parms.FORMAT_PDF()) print("ERROR: Export format not supported") return False return True def valid_masks(): global MASK_DICT for m in parms.MASKS().split(parms.MASK_SEPARATOR()): if m.count(".", 1, len(m) - 1) != 1: print(m.count(".", 1, len(m) - 1)) print("ERROR: Mask", '"' + m + '"', "is invalid") return False else: sheet_title, value = m.split(parms.MASK_DOT()) if sheet_title not in MASK_DICT.keys(): MASK_DICT[sheet_title] = [] MASK_DICT[sheet_title].append(value) print("Masks:", MASK_DICT) return True def process_sheets(): global SHEETS global EXCEL # excel if FILE_EXT in (parms.EXT_XLS(), parms.EXT_XLSX()): EXCEL = pd.ExcelFile(parms.FILE_SOURCE) for sn in EXCEL.sheet_names: sheet = EXCEL.parse(sn) SHEETS.append(sheet) process_sheet(sheet, sn) def process_sheet(sheet, sheet_title): print("Processing", '"' + sheet_title + '"', "...") deck = [] if parms.COLUMN_TITLE() not in sheet.keys() or parms.COLUMN_DESCRIPTION() not in sheet.keys(): print("WARNING:", parms.COLUMN_TITLE(), "and", parms.COLUMN_DESCRIPTION(), "columns must be defined on the sheet. Skipping.") return if parms.COLUMN_COUNT() not in sheet.keys(): print("WARNING:", parms.COLUMN_COUNT(), "column not defined on sheet", sheet_title + ".", "Generating one copy for each card") sheet[parms.COLUMN_COUNT()] = pd.Series(1, index=sheet.index) if parms.COLUMN_IDT() not in sheet.keys(): print("WARNING:", parms.COLUMN_IDT(), "column not defined on sheet", sheet_title + ".", "Ensure that you don't have cards with the same names or define unique identifier") sheet[parms.COLUMN_IDT()] = pd.Series(None, index=sheet.index) for index, row in sheet.iterrows(): card_title = cust_title.do(row, sheet_title, row[parms.COLUMN_TITLE()]) if card_included(sheet_title, card_title): if sheet_title == "Находки": print(row["Next Location"]) card_description = cust_description.do(row, sheet_title, row[parms.COLUMN_DESCRIPTION()]) card_image = generate_card_image(card_title, card_description) card_count = row[parms.COLUMN_COUNT()] card_idt = row[parms.COLUMN_IDT()] if sheet_title == "Находки": print(card_description) card = Card(card_title, card_description, card_image, card_count, card_idt) deck.append(card) print(card_count, '"' + card_title + '" cards have been generated.') save_sheet(sheet_title, deck) def generate_card_image(title, description): # scheme, size, background color img = Image.new('RGB', (parms.DIM_CARD_WIDTH(), parms.DIM_CARD_HEIGHT()), (255, 255, 255)) draw = ImageDraw.Draw(img) # draw title unicode_font = ImageFont.truetype("Arial.ttf") y_text = draw_lines(draw, unicode_font, title, parms.DIM_TEXT_TOP_MARGIN()) # space between title and description y_text += parms.DIM_TEXT_TOP_MARGIN() # draw description for p in str.split(description, "\p"): for n in str.split(p, "\n"): y_text = draw_lines(draw, unicode_font, n, y_text) y_text += parms.DIM_TEXT_TOP_MARGIN() # border img = apply_card_border(img) return img def draw_lines(draw, font, text, y_text): lines = textwrap.wrap(text, width=(parms.DIM_CARD_WIDTH() // parms.DIM_CHAR_WIDTH())) for line in lines: draw.text((parms.DIM_TEXT_LEFT_MARGIN(), y_text), line, fill=(0, 0, 0), font=font) y_text += parms.DIM_TEXT_HEIGHT() return y_text def apply_card_border(img): new_size = (im
eet(sheet_title, deck): main_directory = generate_sheet_directories(sheet_title) pdf = None if parms.FORMAT == parms.FORMAT_PDF(): pdf = FPDF() card_paths = [] card_total_count = 0 for c in deck: card_total_count += c.count card_counter = 0 for i, card in enumerate(deck): for j in range(card.count): # separate images if card.idt is None: idt_suffix = "" else: idt_suffix = "_" + str(card.idt) card_path = main_directory + "/" + card.title.replace(" ", "_") + idt_suffix + "_" + str(j) + "." + parms.EXT_PNG() card_paths.append(card_path) card.image.save(card_path, parms.EXT_PNG()) card_counter += 1 # combine in one page if (card_total_count - card_counter) % (parms.CARDS_IN_ROW() * parms.CARDS_IN_COLUMN()) == 0: print("Page added", card_total_count - card_counter) sheet_page_image = Image.new('RGB', (parms.CARDS_IN_ROW() * (parms.DIM_CARD_WIDTH() + parms.DIM_CARD_BORDER() * 2), parms.CARDS_IN_COLUMN() * (parms.DIM_CARD_HEIGHT() + parms.DIM_CARD_BORDER() * 2 )), (255,255,255,0)) x_offset = 0 for k, img in enumerate(map(Image.open, card_paths)): sheet_page_image.paste(img, ((k % parms.CARDS_IN_ROW()) * img.size[0], (k // parms.CARDS_IN_COLUMN()) * img.size[1])) x_offset += img.size[0] sheet_page_image_path = main_directory + "/" + parms.DIR_PAGES() + "/"\ + parms.FILE_PAGE() + str(card_total_count - card_counter)\ + "." + parms.EXT_PNG() sheet_page_image.save(sheet_page_image_path) # pdf if parms.FORMAT == parms.FORMAT_PDF(): pdf.add_page() pdf.image(sheet_page_image_path, x=parms.DIM_PDF_LEFT_MARGIN(), y=parms.DIM_PDF_TOP_MARGIN()) card_paths = [] printing_file = None if parms.FORMAT == parms.FORMAT_PDF(): printing_file = main_directory + "/" + parms.DIR_PRINT() + "/" + sheet_title.replace(" ", "_")\ + "." + parms.FORMAT_PDF() pdf.output(printing_file, "F") if parms.PRINT is True: print_sheet(printing_file) print('"' + sheet_title + '"', "finished.") def generate_sheet_directories(sheet_title): main_directory = parms.DIR_OUTPUT + "/" + sheet_title if not os.path.exists(main_directory): os.makedirs(main_directory)
g.size[0] + parms.DIM_CARD_BORDER() * 2, img.size[1] + parms.DIM_CARD_BORDER() * 2) bordered_img = Image.new("RGB", new_size) bordered_img.paste(img, (parms.DIM_CARD_BORDER(), parms.DIM_CARD_BORDER())) return bordered_img def save_sh
identifier_body
bot.py
imformation persistently def setup_db(): """ initialize a new database """ db = TinyDB('db.json') chats = db.table('chats') members = db.table('members') chats.insert({'id': -231128423}) # Kolab chat group members.insert({'id': 235493361}) def get_member_ids(db): table = db.table('members') return [e['id'] for e in table.all()] def get_chat_ids(db): table = db.table('chats') return [e['id'] for e in table.all()] def add_member_id(db, id): members = db.table('members') Member = Query() if members.get(Member.id == id) is None: members.insert({'id': id}) return True else: return False def restricted(func): """ This decorator allows to restrict the access of a handler to only KOLAB users and chat groups """ @wraps(func) def wrapped(update, context, *args, **kwargs): user_id = update.effective_user.id chat_id = update.effective_chat.id members = get_member_ids(DB) chats = get_chat_ids(DB) first_name = update.effective_user.first_name last_name = update.effective_user.last_name print("Request from {} {} ({}) in chat {}." .format(first_name, last_name, user_id, chat_id)) if user_id not in members and chat_id not in chats: # Log unauthorized attempt to console and return first_name = update.effective_user.first_name last_name = update.effective_user.last_name print("Unauthorized request from {} {} ({}) in chat {}." .format(first_name, last_name, user_id, chat_id)) return return func(update, context, *args, **kwargs) return wrapped @restricted def inlinequery(update: 'Update', context: 'Context'): """Handle inline queries.""" query = update.inline_query.query results = [ InlineQueryResultArticle( id=uuid4(), title="Caps", input_message_content=InputTextMessageContent( query.upper())), InlineQueryResultArticle( id=uuid4(), title="Bold", input_message_content=InputTextMessageContent( "*{}*".format(escape_markdown(query)), parse_mode=ParseMode.MARKDOWN)), InlineQueryResultArticle( id=uuid4(), title="Italic", input_message_content=InputTextMessageContent( "_{}_".format(escape_markdown(query)), parse_mode=ParseMode.MARKDOWN))] def get_cat_url():
def get_cat_image(): allowed_extension = ['jpg','jpeg','png'] file_extension = '' while file_extension not in allowed_extension: url = get_cat_url() file_extension = re.search("([^.]*)$",url).group(1).lower() return url @restricted def meow(update: 'Update', context: 'CallbackContext'): bot = context.bot chat_id = update.message.chat_id url = get_cat_url() bot.send_photo(chat_id=chat_id, photo=url) @restricted def energy_use(update: 'Update', context: 'CallbackContext'): """ Send picture of current energy use """ bot = context.bot chat_id = update.message.chat_id url = "https://vloer.ko-lab.space/verbruikdag.png?random=" + str(randint(1,9999)) try: bot.send_photo(chat_id=chat_id, photo=url) except Exception as err: msg = "Oops...something went wrong: {}".format(err) print(msg) update.message.reply_text(msg) @restricted def pixelpaint(update: 'Update', context: 'CallbackContext'): """ start pixelpaint app """ args = context.args message = " ".join(args) # send "/paint start" to start the mqtt client on the floor-pi # do this if another program is running on the led floor. if message == "start": print("Trying to start LED floor...") try: publish.single("vloer/startscript", "paint", hostname="10.94.176.100", auth={'username': 'vloer', 'password': 'ko-lab'}, port=1883, client_id="kolabbot") print("LED floor...") except (ConnectionRefusedError, TimeoutError) as err: msg = "Could not start Pixel Paint: {}".format(err) print(msg) update.message.reply_text(msg) # send a link to the pixel paint app try: # TODO: try to open pixel paint url url = "http://10.90.154.80/" #response = requests.get(url) update.message.reply_text("To paint the floor, go to {}".format(url)) except (ConnectionRefusedError, TimeoutError) as err: msg = "Could not start Pixel Paint: ".format(err) print(msg) update.message.reply_text(msg) @restricted def change_led_floor_color(update: 'Update', context: 'CallbackContext'): """ Check if sender is member of Ko-Lab group chat. If yes, change the color of the LED floor. If not, tell them to go away """ args = context.args message = " ".join(args) try: publish.single("ledfloorupdates", message, hostname="10.90.154.80", port=1883, client_id="kolabbot") update.message.reply_text('Changing LED floor color to "{}".'.format(message)) except (ConnectionRefusedError, TimeoutError) as err: msg = "Could not connect to LED-floor: {}".format(err) print(msg) update.message.reply_text(msg) @restricted def write_to_led_krant(update: 'Update', context: 'CallbackContext'): """ show message on LED-krant """ args = context.args message = " ".join(args) try: publish.single("ledkrant/write", message, hostname="10.94.176.100", port=1883, client_id="kolabbot", auth={'username': 'vloer', 'password': 'ko-lab'}) update.message.reply_text('Writing "{}" to LED-krant.'.format(message)) except (ConnectionRefusedError, TimeoutError) as err: msg = "Could not connect to LED-krant: {}".format(err) print(msg) update.message.reply_text(msg) def show_time_on_krant(context: 'CallbackContext'): """ show time on LED-krant """ print("Showing time on LED-Krant") message = strftime("%H:%M", localtime()) try: publish.single("ledkrant/time", message, hostname="10.94.176.100", port=1883, client_id="kolabbot", auth={'username': 'vloer', 'password': 'ko-lab'}) except (ConnectionRefusedError, TimeoutError) as err: msg = "Could not connect to LED-krant: {}".format(err) print(msg) def addme(update: 'Update', context: 'CallbackContext'): """ Add user to the whitelist. """ user_id = update.effective_user.id chat_id = update.effective_chat.id chats = get_chat_ids(DB) if chat_id not in chats: update.message.reply_text('Did not work. Run this command inside the Ko-Lab group.') else: if add_member_id(DB, user_id): update.message.reply_text('I have added you to the whitelist. You can now send commands from outside the Ko-Lab chat.') else: update.message.reply_text('You are already on the whitelist.') def start(update: 'Update', context: 'CallbackContext'): """ Send a message when the command /start is issued. """ update.message.reply_text('I am Kolabbot. I pass butter.') def help(update: 'Update', context: 'CallbackContext'): """ Send a message when the command /help is issued. """ update.message.reply_text('Beep. Boop.') def no_command(update: 'Update', context: 'CallbackContext'): """ What happens when you send a message to the bot with no command. """ update.message.reply_text('Sorry, I am not very chatty. Type / to see a list of commands I understand.') def error(update: 'Update', context: 'CallbackContext'): """ Log Errors caused by Updates. """ logger.warning('Update "%s" caused error "%s"', update, context.error) def main(): # Updater checks for new events, then passes them on to the dispatcher. # Dispatcher sorts them and calls the handling functions. updater = Updater(API_KEY, use_context=True) dispatcher = updater.dispatcher jobs = updater.job_queue logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO
contents = requests.get('https://aws.random.cat/meow').json() url = contents['file'] return url
identifier_body
bot.py
formation persistently def setup_db(): """ initialize a new database """ db = TinyDB('db.json') chats = db.table('chats') members = db.table('members') chats.insert({'id': -231128423}) # Kolab chat group members.insert({'id': 235493361}) def get_member_ids(db): table = db.table('members') return [e['id'] for e in table.all()] def get_chat_ids(db): table = db.table('chats') return [e['id'] for e in table.all()] def add_member_id(db, id): members = db.table('members') Member = Query() if members.get(Member.id == id) is None: members.insert({'id': id}) return True else: return False def restricted(func): """ This decorator allows to restrict the access of a handler to only KOLAB users and chat groups """ @wraps(func) def wrapped(update, context, *args, **kwargs): user_id = update.effective_user.id chat_id = update.effective_chat.id members = get_member_ids(DB) chats = get_chat_ids(DB) first_name = update.effective_user.first_name last_name = update.effective_user.last_name print("Request from {} {} ({}) in chat {}." .format(first_name, last_name, user_id, chat_id)) if user_id not in members and chat_id not in chats: # Log unauthorized attempt to console and return first_name = update.effective_user.first_name last_name = update.effective_user.last_name print("Unauthorized request from {} {} ({}) in chat {}." .format(first_name, last_name, user_id, chat_id)) return return func(update, context, *args, **kwargs) return wrapped @restricted def inlinequery(update: 'Update', context: 'Context'): """Handle inline queries.""" query = update.inline_query.query results = [ InlineQueryResultArticle( id=uuid4(), title="Caps", input_message_content=InputTextMessageContent( query.upper())), InlineQueryResultArticle( id=uuid4(), title="Bold", input_message_content=InputTextMessageContent( "*{}*".format(escape_markdown(query)), parse_mode=ParseMode.MARKDOWN)), InlineQueryResultArticle( id=uuid4(), title="Italic", input_message_content=InputTextMessageContent( "_{}_".format(escape_markdown(query)), parse_mode=ParseMode.MARKDOWN))] def get_cat_url(): contents = requests.get('https://aws.random.cat/meow').json() url = contents['file'] return url def get_cat_image(): allowed_extension = ['jpg','jpeg','png'] file_extension = '' while file_extension not in allowed_extension: url = get_cat_url() file_extension = re.search("([^.]*)$",url).group(1).lower() return url @restricted def meow(update: 'Update', context: 'CallbackContext'): bot = context.bot chat_id = update.message.chat_id url = get_cat_url() bot.send_photo(chat_id=chat_id, photo=url) @restricted def energy_use(update: 'Update', context: 'CallbackContext'): """ Send picture of current energy use """ bot = context.bot chat_id = update.message.chat_id url = "https://vloer.ko-lab.space/verbruikdag.png?random=" + str(randint(1,9999)) try: bot.send_photo(chat_id=chat_id, photo=url) except Exception as err: msg = "Oops...something went wrong: {}".format(err) print(msg) update.message.reply_text(msg) @restricted def pixelpaint(update: 'Update', context: 'CallbackContext'): """ start pixelpaint app """ args = context.args message = " ".join(args) # send "/paint start" to start the mqtt client on the floor-pi # do this if another program is running on the led floor. if message == "start": print("Trying to start LED floor...") try: publish.single("vloer/startscript", "paint", hostname="10.94.176.100", auth={'username': 'vloer', 'password': 'ko-lab'}, port=1883, client_id="kolabbot") print("LED floor...") except (ConnectionRefusedError, TimeoutError) as err: msg = "Could not start Pixel Paint: {}".format(err) print(msg) update.message.reply_text(msg) # send a link to the pixel paint app try: # TODO: try to open pixel paint url url = "http://10.90.154.80/" #response = requests.get(url) update.message.reply_text("To paint the floor, go to {}".format(url)) except (ConnectionRefusedError, TimeoutError) as err: msg = "Could not start Pixel Paint: ".format(err) print(msg) update.message.reply_text(msg) @restricted def change_led_floor_color(update: 'Update', context: 'CallbackContext'): """ Check if sender is member of Ko-Lab group chat. If yes, change the color of the LED floor. If not, tell them to go away """ args = context.args message = " ".join(args) try: publish.single("ledfloorupdates", message, hostname="10.90.154.80", port=1883, client_id="kolabbot") update.message.reply_text('Changing LED floor color to "{}".'.format(message)) except (ConnectionRefusedError, TimeoutError) as err: msg = "Could not connect to LED-floor: {}".format(err) print(msg) update.message.reply_text(msg) @restricted def write_to_led_krant(update: 'Update', context: 'CallbackContext'): """ show message on LED-krant """ args = context.args message = " ".join(args) try: publish.single("ledkrant/write", message, hostname="10.94.176.100", port=1883, client_id="kolabbot", auth={'username': 'vloer', 'password': 'ko-lab'}) update.message.reply_text('Writing "{}" to LED-krant.'.format(message)) except (ConnectionRefusedError, TimeoutError) as err: msg = "Could not connect to LED-krant: {}".format(err) print(msg) update.message.reply_text(msg) def show_time_on_krant(context: 'CallbackContext'): """ show time on LED-krant """ print("Showing time on LED-Krant") message = strftime("%H:%M", localtime()) try: publish.single("ledkrant/time", message, hostname="10.94.176.100", port=1883, client_id="kolabbot", auth={'username': 'vloer', 'password': 'ko-lab'}) except (ConnectionRefusedError, TimeoutError) as err: msg = "Could not connect to LED-krant: {}".format(err) print(msg) def
(update: 'Update', context: 'CallbackContext'): """ Add user to the whitelist. """ user_id = update.effective_user.id chat_id = update.effective_chat.id chats = get_chat_ids(DB) if chat_id not in chats: update.message.reply_text('Did not work. Run this command inside the Ko-Lab group.') else: if add_member_id(DB, user_id): update.message.reply_text('I have added you to the whitelist. You can now send commands from outside the Ko-Lab chat.') else: update.message.reply_text('You are already on the whitelist.') def start(update: 'Update', context: 'CallbackContext'): """ Send a message when the command /start is issued. """ update.message.reply_text('I am Kolabbot. I pass butter.') def help(update: 'Update', context: 'CallbackContext'): """ Send a message when the command /help is issued. """ update.message.reply_text('Beep. Boop.') def no_command(update: 'Update', context: 'CallbackContext'): """ What happens when you send a message to the bot with no command. """ update.message.reply_text('Sorry, I am not very chatty. Type / to see a list of commands I understand.') def error(update: 'Update', context: 'CallbackContext'): """ Log Errors caused by Updates. """ logger.warning('Update "%s" caused error "%s"', update, context.error) def main(): # Updater checks for new events, then passes them on to the dispatcher. # Dispatcher sorts them and calls the handling functions. updater = Updater(API_KEY, use_context=True) dispatcher = updater.dispatcher jobs = updater.job_queue logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO
addme
identifier_name
bot.py
imformation persistently def setup_db(): """ initialize a new database """ db = TinyDB('db.json') chats = db.table('chats') members = db.table('members') chats.insert({'id': -231128423}) # Kolab chat group members.insert({'id': 235493361}) def get_member_ids(db): table = db.table('members') return [e['id'] for e in table.all()] def get_chat_ids(db): table = db.table('chats') return [e['id'] for e in table.all()] def add_member_id(db, id): members = db.table('members') Member = Query() if members.get(Member.id == id) is None: members.insert({'id': id}) return True else: return False def restricted(func): """ This decorator allows to restrict the access of a handler to only KOLAB users and chat groups """ @wraps(func) def wrapped(update, context, *args, **kwargs): user_id = update.effective_user.id chat_id = update.effective_chat.id members = get_member_ids(DB) chats = get_chat_ids(DB) first_name = update.effective_user.first_name last_name = update.effective_user.last_name print("Request from {} {} ({}) in chat {}." .format(first_name, last_name, user_id, chat_id)) if user_id not in members and chat_id not in chats: # Log unauthorized attempt to console and return first_name = update.effective_user.first_name last_name = update.effective_user.last_name print("Unauthorized request from {} {} ({}) in chat {}." .format(first_name, last_name, user_id, chat_id)) return return func(update, context, *args, **kwargs)
@restricted def inlinequery(update: 'Update', context: 'Context'): """Handle inline queries.""" query = update.inline_query.query results = [ InlineQueryResultArticle( id=uuid4(), title="Caps", input_message_content=InputTextMessageContent( query.upper())), InlineQueryResultArticle( id=uuid4(), title="Bold", input_message_content=InputTextMessageContent( "*{}*".format(escape_markdown(query)), parse_mode=ParseMode.MARKDOWN)), InlineQueryResultArticle( id=uuid4(), title="Italic", input_message_content=InputTextMessageContent( "_{}_".format(escape_markdown(query)), parse_mode=ParseMode.MARKDOWN))] def get_cat_url(): contents = requests.get('https://aws.random.cat/meow').json() url = contents['file'] return url def get_cat_image(): allowed_extension = ['jpg','jpeg','png'] file_extension = '' while file_extension not in allowed_extension: url = get_cat_url() file_extension = re.search("([^.]*)$",url).group(1).lower() return url @restricted def meow(update: 'Update', context: 'CallbackContext'): bot = context.bot chat_id = update.message.chat_id url = get_cat_url() bot.send_photo(chat_id=chat_id, photo=url) @restricted def energy_use(update: 'Update', context: 'CallbackContext'): """ Send picture of current energy use """ bot = context.bot chat_id = update.message.chat_id url = "https://vloer.ko-lab.space/verbruikdag.png?random=" + str(randint(1,9999)) try: bot.send_photo(chat_id=chat_id, photo=url) except Exception as err: msg = "Oops...something went wrong: {}".format(err) print(msg) update.message.reply_text(msg) @restricted def pixelpaint(update: 'Update', context: 'CallbackContext'): """ start pixelpaint app """ args = context.args message = " ".join(args) # send "/paint start" to start the mqtt client on the floor-pi # do this if another program is running on the led floor. if message == "start": print("Trying to start LED floor...") try: publish.single("vloer/startscript", "paint", hostname="10.94.176.100", auth={'username': 'vloer', 'password': 'ko-lab'}, port=1883, client_id="kolabbot") print("LED floor...") except (ConnectionRefusedError, TimeoutError) as err: msg = "Could not start Pixel Paint: {}".format(err) print(msg) update.message.reply_text(msg) # send a link to the pixel paint app try: # TODO: try to open pixel paint url url = "http://10.90.154.80/" #response = requests.get(url) update.message.reply_text("To paint the floor, go to {}".format(url)) except (ConnectionRefusedError, TimeoutError) as err: msg = "Could not start Pixel Paint: ".format(err) print(msg) update.message.reply_text(msg) @restricted def change_led_floor_color(update: 'Update', context: 'CallbackContext'): """ Check if sender is member of Ko-Lab group chat. If yes, change the color of the LED floor. If not, tell them to go away """ args = context.args message = " ".join(args) try: publish.single("ledfloorupdates", message, hostname="10.90.154.80", port=1883, client_id="kolabbot") update.message.reply_text('Changing LED floor color to "{}".'.format(message)) except (ConnectionRefusedError, TimeoutError) as err: msg = "Could not connect to LED-floor: {}".format(err) print(msg) update.message.reply_text(msg) @restricted def write_to_led_krant(update: 'Update', context: 'CallbackContext'): """ show message on LED-krant """ args = context.args message = " ".join(args) try: publish.single("ledkrant/write", message, hostname="10.94.176.100", port=1883, client_id="kolabbot", auth={'username': 'vloer', 'password': 'ko-lab'}) update.message.reply_text('Writing "{}" to LED-krant.'.format(message)) except (ConnectionRefusedError, TimeoutError) as err: msg = "Could not connect to LED-krant: {}".format(err) print(msg) update.message.reply_text(msg) def show_time_on_krant(context: 'CallbackContext'): """ show time on LED-krant """ print("Showing time on LED-Krant") message = strftime("%H:%M", localtime()) try: publish.single("ledkrant/time", message, hostname="10.94.176.100", port=1883, client_id="kolabbot", auth={'username': 'vloer', 'password': 'ko-lab'}) except (ConnectionRefusedError, TimeoutError) as err: msg = "Could not connect to LED-krant: {}".format(err) print(msg) def addme(update: 'Update', context: 'CallbackContext'): """ Add user to the whitelist. """ user_id = update.effective_user.id chat_id = update.effective_chat.id chats = get_chat_ids(DB) if chat_id not in chats: update.message.reply_text('Did not work. Run this command inside the Ko-Lab group.') else: if add_member_id(DB, user_id): update.message.reply_text('I have added you to the whitelist. You can now send commands from outside the Ko-Lab chat.') else: update.message.reply_text('You are already on the whitelist.') def start(update: 'Update', context: 'CallbackContext'): """ Send a message when the command /start is issued. """ update.message.reply_text('I am Kolabbot. I pass butter.') def help(update: 'Update', context: 'CallbackContext'): """ Send a message when the command /help is issued. """ update.message.reply_text('Beep. Boop.') def no_command(update: 'Update', context: 'CallbackContext'): """ What happens when you send a message to the bot with no command. """ update.message.reply_text('Sorry, I am not very chatty. Type / to see a list of commands I understand.') def error(update: 'Update', context: 'CallbackContext'): """ Log Errors caused by Updates. """ logger.warning('Update "%s" caused error "%s"', update, context.error) def main(): # Updater checks for new events, then passes them on to the dispatcher. # Dispatcher sorts them and calls the handling functions. updater = Updater(API_KEY, use_context=True) dispatcher = updater.dispatcher jobs = updater.job_queue logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO
return wrapped
random_line_split
bot.py
formation persistently def setup_db(): """ initialize a new database """ db = TinyDB('db.json') chats = db.table('chats') members = db.table('members') chats.insert({'id': -231128423}) # Kolab chat group members.insert({'id': 235493361}) def get_member_ids(db): table = db.table('members') return [e['id'] for e in table.all()] def get_chat_ids(db): table = db.table('chats') return [e['id'] for e in table.all()] def add_member_id(db, id): members = db.table('members') Member = Query() if members.get(Member.id == id) is None: members.insert({'id': id}) return True else: return False def restricted(func): """ This decorator allows to restrict the access of a handler to only KOLAB users and chat groups """ @wraps(func) def wrapped(update, context, *args, **kwargs): user_id = update.effective_user.id chat_id = update.effective_chat.id members = get_member_ids(DB) chats = get_chat_ids(DB) first_name = update.effective_user.first_name last_name = update.effective_user.last_name print("Request from {} {} ({}) in chat {}." .format(first_name, last_name, user_id, chat_id)) if user_id not in members and chat_id not in chats: # Log unauthorized attempt to console and return first_name = update.effective_user.first_name last_name = update.effective_user.last_name print("Unauthorized request from {} {} ({}) in chat {}." .format(first_name, last_name, user_id, chat_id)) return return func(update, context, *args, **kwargs) return wrapped @restricted def inlinequery(update: 'Update', context: 'Context'): """Handle inline queries.""" query = update.inline_query.query results = [ InlineQueryResultArticle( id=uuid4(), title="Caps", input_message_content=InputTextMessageContent( query.upper())), InlineQueryResultArticle( id=uuid4(), title="Bold", input_message_content=InputTextMessageContent( "*{}*".format(escape_markdown(query)), parse_mode=ParseMode.MARKDOWN)), InlineQueryResultArticle( id=uuid4(), title="Italic", input_message_content=InputTextMessageContent( "_{}_".format(escape_markdown(query)), parse_mode=ParseMode.MARKDOWN))] def get_cat_url(): contents = requests.get('https://aws.random.cat/meow').json() url = contents['file'] return url def get_cat_image(): allowed_extension = ['jpg','jpeg','png'] file_extension = '' while file_extension not in allowed_extension: url = get_cat_url() file_extension = re.search("([^.]*)$",url).group(1).lower() return url @restricted def meow(update: 'Update', context: 'CallbackContext'): bot = context.bot chat_id = update.message.chat_id url = get_cat_url() bot.send_photo(chat_id=chat_id, photo=url) @restricted def energy_use(update: 'Update', context: 'CallbackContext'): """ Send picture of current energy use """ bot = context.bot chat_id = update.message.chat_id url = "https://vloer.ko-lab.space/verbruikdag.png?random=" + str(randint(1,9999)) try: bot.send_photo(chat_id=chat_id, photo=url) except Exception as err: msg = "Oops...something went wrong: {}".format(err) print(msg) update.message.reply_text(msg) @restricted def pixelpaint(update: 'Update', context: 'CallbackContext'): """ start pixelpaint app """ args = context.args message = " ".join(args) # send "/paint start" to start the mqtt client on the floor-pi # do this if another program is running on the led floor. if message == "start":
# send a link to the pixel paint app try: # TODO: try to open pixel paint url url = "http://10.90.154.80/" #response = requests.get(url) update.message.reply_text("To paint the floor, go to {}".format(url)) except (ConnectionRefusedError, TimeoutError) as err: msg = "Could not start Pixel Paint: ".format(err) print(msg) update.message.reply_text(msg) @restricted def change_led_floor_color(update: 'Update', context: 'CallbackContext'): """ Check if sender is member of Ko-Lab group chat. If yes, change the color of the LED floor. If not, tell them to go away """ args = context.args message = " ".join(args) try: publish.single("ledfloorupdates", message, hostname="10.90.154.80", port=1883, client_id="kolabbot") update.message.reply_text('Changing LED floor color to "{}".'.format(message)) except (ConnectionRefusedError, TimeoutError) as err: msg = "Could not connect to LED-floor: {}".format(err) print(msg) update.message.reply_text(msg) @restricted def write_to_led_krant(update: 'Update', context: 'CallbackContext'): """ show message on LED-krant """ args = context.args message = " ".join(args) try: publish.single("ledkrant/write", message, hostname="10.94.176.100", port=1883, client_id="kolabbot", auth={'username': 'vloer', 'password': 'ko-lab'}) update.message.reply_text('Writing "{}" to LED-krant.'.format(message)) except (ConnectionRefusedError, TimeoutError) as err: msg = "Could not connect to LED-krant: {}".format(err) print(msg) update.message.reply_text(msg) def show_time_on_krant(context: 'CallbackContext'): """ show time on LED-krant """ print("Showing time on LED-Krant") message = strftime("%H:%M", localtime()) try: publish.single("ledkrant/time", message, hostname="10.94.176.100", port=1883, client_id="kolabbot", auth={'username': 'vloer', 'password': 'ko-lab'}) except (ConnectionRefusedError, TimeoutError) as err: msg = "Could not connect to LED-krant: {}".format(err) print(msg) def addme(update: 'Update', context: 'CallbackContext'): """ Add user to the whitelist. """ user_id = update.effective_user.id chat_id = update.effective_chat.id chats = get_chat_ids(DB) if chat_id not in chats: update.message.reply_text('Did not work. Run this command inside the Ko-Lab group.') else: if add_member_id(DB, user_id): update.message.reply_text('I have added you to the whitelist. You can now send commands from outside the Ko-Lab chat.') else: update.message.reply_text('You are already on the whitelist.') def start(update: 'Update', context: 'CallbackContext'): """ Send a message when the command /start is issued. """ update.message.reply_text('I am Kolabbot. I pass butter.') def help(update: 'Update', context: 'CallbackContext'): """ Send a message when the command /help is issued. """ update.message.reply_text('Beep. Boop.') def no_command(update: 'Update', context: 'CallbackContext'): """ What happens when you send a message to the bot with no command. """ update.message.reply_text('Sorry, I am not very chatty. Type / to see a list of commands I understand.') def error(update: 'Update', context: 'CallbackContext'): """ Log Errors caused by Updates. """ logger.warning('Update "%s" caused error "%s"', update, context.error) def main(): # Updater checks for new events, then passes them on to the dispatcher. # Dispatcher sorts them and calls the handling functions. updater = Updater(API_KEY, use_context=True) dispatcher = updater.dispatcher jobs = updater.job_queue logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO
print("Trying to start LED floor...") try: publish.single("vloer/startscript", "paint", hostname="10.94.176.100", auth={'username': 'vloer', 'password': 'ko-lab'}, port=1883, client_id="kolabbot") print("LED floor...") except (ConnectionRefusedError, TimeoutError) as err: msg = "Could not start Pixel Paint: {}".format(err) print(msg) update.message.reply_text(msg)
conditional_block
scrapedin.py
Only matches if the name contains uppercase, lowercase or spaces. return [name] else: return None def login(self, username, password): ''' Login to the linked in web page. The only args required to use this function are the username and password to log into linkedin. Example usage: parser = argparse.ArgumentParser() required = parser.add_argument_group('required arguments') required.add_argument('-u', dest='username', required=True, help='LinkedIn Login Email') args = parser.parse_args() password = getpass.getpass(prompt='LinkedIn Password: ') web = WebPage() web.login(username, password) :param argparse username: argparse object with the attribute "username" :param argparse password: :rtype: None ''' self.page.get("https://www.linkedin.com/") WebDriverWait(self.page, 10).until(EC.presence_of_element_located((By.NAME, 'session_key'))) self.enter_data('session_key', username) self.enter_data('session_password', password) # Find and click submit button by type submit_btn = self.page.find_elements_by_xpath("//button[@type='submit']")[0] submit_btn.click() def apply_filters(self, company, url=None, georegion=None, industry=None, job_title=None): ''' Utilize the method within the cycle_users function to build different search parameters such as location, geotag, company, job-title, etc. This function will return the full URL. :param str company: target company name :param str url: default (or custom) linkedin url for faceted linkedin search :param str georegion: geographic region (-g) to filter :param str industry: industry (-i_ to filter :param str job_title: job title (-j) to filter :rtype: string (url if successful) or int (Unix-style error integer if error is encountered) ''' filters = [] if not url: url = 'https://www.linkedin.com/search/results/people/?' # Filter by Company # Allows the user to scrape linkedin without specifying a target company, but must do so with intent if company != "NONE": filters.append('company={0}'.format(company)) # Filter by Geographic Region if georegion: # region object is created for future-proof purposes in the event new filters become available or formating changes region = {} try: region['full_line'] = list_search('georegion', term=georegion, return_results=True) region['name'] = region['full_line'].split('\t')[-1] region['code'] = region['full_line'].split('\t')[0].split( '.') # Should be continent.country.province/state.city_id region.update({key: value.replace(' ', '') for (key, value) in zip(('continent', 'country', 'state', 'id'), region['code'])}) filters.append('facetGeoRegion=%5B"{0}%3A{1}"%5D'.format(region['country'], region['id'])) except (IndexError, KeyError, ValueError): self.log.error("[-] The region you chose is too broad to search. Search by City only") return os.EX_NOINPUT # Filter by Industry if industry: ind = list_search('industry', term=industry, return_results=True) if ind: i_code = ind.split('\t')[0].replace(' ', '') filters.append('facetIndustry=%5B"{0}"%5D'.format(i_code)) filters.append("origin=FACETED_SEARCH") if job_title: filters.append('title={0}'.format(job_title)) else: filters.append('title=') # Join additional parameters to the URL by ampersand (&). Order doesn't matter. filters.append('origin=FACETED_SEARCH') url += "&".join(filters).lstrip("&") if len(filters) > 1 else filters[0] self.log.debug("Filtered URL: " + url) return url def cycle_users(self, company, url, max_users=None): ''' You must run the login method before cycle_users will run. Once the login method has run, cycle_users can collect the names and titles of employees at the company you specify. This method requires the company name and optional value max_users from argparse. See the login method for a code example. :param argparse company: :param argpase max_users: :rtype: None (self.employee_data will be populated with names, titles and profile URLs) ''' # Wait for home screen after login WebDriverWait(self.page, 20).until(EC.visibility_of_element_located((By.XPATH, "//*[@id='global-nav']/div"))) self.log.debug("URL: " + str(url)) try: self.page.get(url) except exceptions.WebDriverException as err: self.log.error("An error occurred while getting the company page: \n{0}".format(err)) self.log.critical("[!] Check the company name or URL used") return os.EX_USAGE count = 1 # WebElements cannot be used for iteration.. current_page = 1 if not max_users:
while max_users > len(self.employee_data) and current_page < 100: self.page.execute_script("window.scrollTo(0, document.body.scrollHeight);") try: WebDriverWait(self.page, 20).until(EC.visibility_of_element_located((By.CLASS_NAME, 'active'))) # Check if the page contains the "no search results" class. This means we are out of users # This will raise a NoSuchElementException if the element is not found self.page.find_element_by_class_name("search-no-results__container") break except exceptions.NoSuchElementException: pass except exceptions.TimeoutException: # Page didn't load correctly after 20 seconds, cannot reliably recover. Bailing. return try: WebDriverWait(self.page, 5).until(EC.visibility_of_element_located((By.CLASS_NAME, 'name'))) except exceptions.TimeoutException: try: if self.page.find_elements_by_class_name('actor-name'): # If this is true, the page is filled with "LinkedIn Member". It doesn't mean there's no users # available on the page. If this is the case, click next. current_page += 1 if 'disabled=""' in self.page.find_element_by_class_name( "artdeco-pagination__button--next").parent.page_source: # If this is true then the Next button is "disabled". This happens when there's no more pages break self.page.execute_script("arguments[0].click();", self.page.find_element_by_class_name( "artdeco-pagination__button--next")) continue except exceptions.NoSuchElementException: # Reached when there's no more users available on the page. break try: # Get the current page number (at the bottom of a company search) # The value returned from the HTML looks like this: '1\nCurrent page' new_page = int(self.page.find_elements_by_class_name('active')[-1].text.split()[0]) except ValueError: # If there's only one page, linkedin doesn't show page numbers at the bottom. The only result # will be the text string "people", therefore when we try to convert the value to int we raise # an exception new_page = 1 except IndexError: # Page likely came back with "No more users" even though there appeared to be pages left return except exceptions.StaleElementReferenceException: # Handles a race condition where elements are found but are not populated yet. continue for pagnation in self.page.find_elements_by_class_name("artdeco-pagination__button"): if pagnation.text != "Next": continue if not pagnation.is_enabled(): # Next button is disabled.. This is linkedins way of saying "We are done here" return if current_page != new_page: # The script is too fast. This verifies a new page has loaded before proceeding. continue # Scroll to the bottom of the page loads all elements (employee_names) self.page.execute_script("window.scrollTo(0, document.body.scrollHeight);") # Give the elements a second to populate fully time.sleep(1) # finds each employee element by a globally set class name employee_elements = self.page.find_elements_by_xpath("//div[@class='entity-result__item']") self.log.debug(employee_elements) for employee in employee_elements: if count > len(employee_elements): count = 1 current_page += 1 # click next page try: self.page.execute_script("arguments[0].click();", self.page.find_element_by_class_name( "artdeco-pagination__button--next")) break except exceptions.NoSuchElementException: # No more pages return os.EX_OK try: # The elements of LinkedIn change frequently, but the text data of the elements is more-or-less reliable. # It's better to split via newline than parse xpaths. data = employee.text.split('\n') if "LinkedIn Member" not in data[0] and len(data) >=
max_users = float('inf')
conditional_block
scrapedin.py
Only matches if the name contains uppercase, lowercase or spaces. return [name] else: return None def login(self, username, password): ''' Login to the linked in web page. The only args required to use this function are the username and password to log into linkedin. Example usage: parser = argparse.ArgumentParser() required = parser.add_argument_group('required arguments') required.add_argument('-u', dest='username', required=True, help='LinkedIn Login Email') args = parser.parse_args() password = getpass.getpass(prompt='LinkedIn Password: ') web = WebPage() web.login(username, password) :param argparse username: argparse object with the attribute "username" :param argparse password: :rtype: None ''' self.page.get("https://www.linkedin.com/") WebDriverWait(self.page, 10).until(EC.presence_of_element_located((By.NAME, 'session_key'))) self.enter_data('session_key', username) self.enter_data('session_password', password) # Find and click submit button by type submit_btn = self.page.find_elements_by_xpath("//button[@type='submit']")[0] submit_btn.click() def apply_filters(self, company, url=None, georegion=None, industry=None, job_title=None):
# Filter by Geographic Region if georegion: # region object is created for future-proof purposes in the event new filters become available or formating changes region = {} try: region['full_line'] = list_search('georegion', term=georegion, return_results=True) region['name'] = region['full_line'].split('\t')[-1] region['code'] = region['full_line'].split('\t')[0].split( '.') # Should be continent.country.province/state.city_id region.update({key: value.replace(' ', '') for (key, value) in zip(('continent', 'country', 'state', 'id'), region['code'])}) filters.append('facetGeoRegion=%5B"{0}%3A{1}"%5D'.format(region['country'], region['id'])) except (IndexError, KeyError, ValueError): self.log.error("[-] The region you chose is too broad to search. Search by City only") return os.EX_NOINPUT # Filter by Industry if industry: ind = list_search('industry', term=industry, return_results=True) if ind: i_code = ind.split('\t')[0].replace(' ', '') filters.append('facetIndustry=%5B"{0}"%5D'.format(i_code)) filters.append("origin=FACETED_SEARCH") if job_title: filters.append('title={0}'.format(job_title)) else: filters.append('title=') # Join additional parameters to the URL by ampersand (&). Order doesn't matter. filters.append('origin=FACETED_SEARCH') url += "&".join(filters).lstrip("&") if len(filters) > 1 else filters[0] self.log.debug("Filtered URL: " + url) return url def cycle_users(self, company, url, max_users=None): ''' You must run the login method before cycle_users will run. Once the login method has run, cycle_users can collect the names and titles of employees at the company you specify. This method requires the company name and optional value max_users from argparse. See the login method for a code example. :param argparse company: :param argpase max_users: :rtype: None (self.employee_data will be populated with names, titles and profile URLs) ''' # Wait for home screen after login WebDriverWait(self.page, 20).until(EC.visibility_of_element_located((By.XPATH, "//*[@id='global-nav']/div"))) self.log.debug("URL: " + str(url)) try: self.page.get(url) except exceptions.WebDriverException as err: self.log.error("An error occurred while getting the company page: \n{0}".format(err)) self.log.critical("[!] Check the company name or URL used") return os.EX_USAGE count = 1 # WebElements cannot be used for iteration.. current_page = 1 if not max_users: max_users = float('inf') while max_users > len(self.employee_data) and current_page < 100: self.page.execute_script("window.scrollTo(0, document.body.scrollHeight);") try: WebDriverWait(self.page, 20).until(EC.visibility_of_element_located((By.CLASS_NAME, 'active'))) # Check if the page contains the "no search results" class. This means we are out of users # This will raise a NoSuchElementException if the element is not found self.page.find_element_by_class_name("search-no-results__container") break except exceptions.NoSuchElementException: pass except exceptions.TimeoutException: # Page didn't load correctly after 20 seconds, cannot reliably recover. Bailing. return try: WebDriverWait(self.page, 5).until(EC.visibility_of_element_located((By.CLASS_NAME, 'name'))) except exceptions.TimeoutException: try: if self.page.find_elements_by_class_name('actor-name'): # If this is true, the page is filled with "LinkedIn Member". It doesn't mean there's no users # available on the page. If this is the case, click next. current_page += 1 if 'disabled=""' in self.page.find_element_by_class_name( "artdeco-pagination__button--next").parent.page_source: # If this is true then the Next button is "disabled". This happens when there's no more pages break self.page.execute_script("arguments[0].click();", self.page.find_element_by_class_name( "artdeco-pagination__button--next")) continue except exceptions.NoSuchElementException: # Reached when there's no more users available on the page. break try: # Get the current page number (at the bottom of a company search) # The value returned from the HTML looks like this: '1\nCurrent page' new_page = int(self.page.find_elements_by_class_name('active')[-1].text.split()[0]) except ValueError: # If there's only one page, linkedin doesn't show page numbers at the bottom. The only result # will be the text string "people", therefore when we try to convert the value to int we raise # an exception new_page = 1 except IndexError: # Page likely came back with "No more users" even though there appeared to be pages left return except exceptions.StaleElementReferenceException: # Handles a race condition where elements are found but are not populated yet. continue for pagnation in self.page.find_elements_by_class_name("artdeco-pagination__button"): if pagnation.text != "Next": continue if not pagnation.is_enabled(): # Next button is disabled.. This is linkedins way of saying "We are done here" return if current_page != new_page: # The script is too fast. This verifies a new page has loaded before proceeding. continue # Scroll to the bottom of the page loads all elements (employee_names) self.page.execute_script("window.scrollTo(0, document.body.scrollHeight);") # Give the elements a second to populate fully time.sleep(1) # finds each employee element by a globally set class name employee_elements = self.page.find_elements_by_xpath("//div[@class='entity-result__item']") self.log.debug(employee_elements) for employee in employee_elements: if count > len(employee_elements): count = 1 current_page += 1 # click next page try: self.page.execute_script("arguments[0].click();", self.page.find_element_by_class_name( "artdeco-pagination__button--next")) break except exceptions.NoSuchElementException: # No more pages return os.EX_OK try: # The elements of LinkedIn change frequently, but the text data of the elements is more-or-less reliable. # It's better to split via newline than parse xpaths. data = employee.text.split('\n') if "LinkedIn Member" not in data[0] and len(data) >=
''' Utilize the method within the cycle_users function to build different search parameters such as location, geotag, company, job-title, etc. This function will return the full URL. :param str company: target company name :param str url: default (or custom) linkedin url for faceted linkedin search :param str georegion: geographic region (-g) to filter :param str industry: industry (-i_ to filter :param str job_title: job title (-j) to filter :rtype: string (url if successful) or int (Unix-style error integer if error is encountered) ''' filters = [] if not url: url = 'https://www.linkedin.com/search/results/people/?' # Filter by Company # Allows the user to scrape linkedin without specifying a target company, but must do so with intent if company != "NONE": filters.append('company={0}'.format(company))
identifier_body
scrapedin.py
except exceptions.StaleElementReferenceException: # Handles a race condition where elements are found but are not populated yet. continue for pagnation in self.page.find_elements_by_class_name("artdeco-pagination__button"): if pagnation.text != "Next": continue if not pagnation.is_enabled(): # Next button is disabled.. This is linkedins way of saying "We are done here" return if current_page != new_page: # The script is too fast. This verifies a new page has loaded before proceeding. continue # Scroll to the bottom of the page loads all elements (employee_names) self.page.execute_script("window.scrollTo(0, document.body.scrollHeight);") # Give the elements a second to populate fully time.sleep(1) # finds each employee element by a globally set class name employee_elements = self.page.find_elements_by_xpath("//div[@class='entity-result__item']") self.log.debug(employee_elements) for employee in employee_elements: if count > len(employee_elements): count = 1 current_page += 1 # click next page try: self.page.execute_script("arguments[0].click();", self.page.find_element_by_class_name( "artdeco-pagination__button--next")) break except exceptions.NoSuchElementException: # No more pages return os.EX_OK try: # The elements of LinkedIn change frequently, but the text data of the elements is more-or-less reliable. # It's better to split via newline than parse xpaths. data = employee.text.split('\n') if "LinkedIn Member" not in data[0] and len(data) >= 5: name = Webpage.sanitize_name(data[0]) title_text = data[3] region = data[4] else: count += 1 continue try: # This line/element does not always exist, so an exception will always be raised in this case and must be handled. alt_text = employee.find_element_by_class_name('search-result__snippets').text except exceptions.NoSuchElementException: alt_text = False title, _, company = title_text.partition(' at ') if alt_text and not company: alt_text = alt_text.lstrip('Current: ') t, _, company = alt_text.partition(' at ') if not title: title = t dept = self.dept_wizard(title) # If company is still empty at this point, bail out to unemployment company = company or 'UNEMPLOYED' url = employee.find_element_by_xpath("//a[@class='app-aware-link']").get_attribute('href') except (IndexError, exceptions.NoSuchElementException): count += 1 continue if not name: continue for person in name: self.log.info(person) self.employee_data.update({person: [dept, title, company, region, url]}) count += 1 @staticmethod def dept_wizard(linkedin_title): ''' Attempt to determine which department a given user belongs to based off of their title. If a title cannot be reliably determined then it will return a blank string. It is advised to compare their raw untouched titles to the output of dept_wizard(). Blindly trusting the dept_wizard() could lead to some awkward situations. If a title matches any of the values in the tuples below, the first value in the tuple will populate the title column in the CSV. :param str linkedin_title: a string reflecting an employees title :rtype: str ''' sales = ('Sales', 'Account Manager', 'Account Executive', 'New Business', 'Relationship Manager') hr = ('HR', 'Human Resources', 'Benefits Admin', 'Payroll', 'Talent', 'Recruiter') accounting = ('Accounting', 'Accountant', 'Financial', 'Finance', 'Billing') marketing = ('Marketing', 'Content', 'Brand', 'seo', 'Social Media') it = ( 'IT', 'Information Technology', 'Network Engineer', 'Network Admin', 'System Admin', 'sysadmin', 'sys admin', 'Help Desk', 'ITHD', 'Developer', 'Dev') infosec = ( 'Infosec', 'Red Team', 'Blue Team', 'Offensive', 'Defensive', 'Pentest', 'Penetration', 'Information Security') executive = ('Executive', 'Exec', 'cfo', 'ceo', 'coo', 'cio', 'cmo', 'cbo', 'cto', 'cso', 'Chief') audit = ('Audit', 'Compliance') all_depts = [sales, hr, accounting, marketing, it, infosec, executive, audit] for dept in all_depts: for common_title in dept: if re.match('(^|\s)' + re.escape(common_title.lower()) + '($|\s)', linkedin_title.lower()): return dept[0] def out_csv(self, filename, company, schema): ''' Write data from self.employee_data to a CSV. This data is populated from the cycle_users method. :param argparse filename: argparse object with attribute "file" :rtype: None ''' if not self.employee_data: return None csv_file = csv.writer(open(filename, "w")) for name, emp_data in self.employee_data.items(): emails = self.email_formatter(name, company, schema) for email in emails: first_name = name.split()[0] last_name = name.split()[-1] data = [first_name, last_name, email] + emp_data csv_file.writerow(data) def email_formatter(self, name, company, schema): ''' This method is called by out_csv to determine what format emails should be outputted into. The char_map determines which indexes to use when generating a username in the __prepare_username__ method. :param str name: :param argparse format: argparse object with attribute "format" :param argparse company: argparse object with attribute "company" :rtype: list emails ''' emails = [] for selected in schema: names = name.split() email = selected.format(first=names[0], last=names[-1], domain=company.replace(' ', '')) emails.append(email) return emails @staticmethod def verify_schema(schema): ''' Verify the chosen email schema is valid. :param str schema: A comma separated string containing one or more email schema formats :rtype: list of all valid schemas ''' schema = schema.split(',') for email_format in schema: try: email_format.format(first='test', last='test', domain='test') except KeyError: raise SyntaxWarning('Invalid schema: ' + email_format) return schema def list_search(target, term, return_results=False): ''' Prints list of possible geographic regions & industries per LinkedIn Documentation Specify -l by itself to print all files, or specify -g <term> / -i <term> to search for matching geographic regions and industries simultaneously. Exact matches are required for faceted searches of georegions or industries. :param str target: Search for a matching georegion or industry by specifying -g or -i parameters :param str term: Search for specific matching term in -g or -i by adding a term argument to search for :return: List of matches ''' print("========================= {0} =========================".format(target.capitalize())) try: refs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'refs/') if target == 'georegion': search_file = open(os.path.join(refs, 'georegions.txt'), 'r') elif target == 'industry': search_file = open(os.path.join(refs, 'industries.txt'), 'r') else: return os.EX_NOINPUT except IOError as unfound_file: print( "[-] You are missing the {0}.txt file from your ./refs installation directory. Please re-install scrapedin.".format( target)) print(unfound_file) return os.EX_IOERR results = [] for i, line in enumerate(search_file.readlines()): if term.lower() in line.lower(): results.append([str(line.split()[0]), str(' '.join(line.split()[1:])).strip('\n')]) # print('[{0}] {1}'.format(i, line.strip('\n'))) search_file.close() if return_results: print("Matches found: ", results) return results[0][0] print(tabulate(results, headers=['CODE', 'NAME'], tablefmt="orgtbl")) return os.EX_OK def main(): parser = argparse.ArgumentParser(epilog=HELP_EPILOG, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-m', dest='max_users', type=int, default=float('inf'), help='The maximum amount of employees to scrape (default: all)') parser.add_argument('-l', dest='list_search', action='store_true', default=False, help='List search for geographic regions and industries. (requires -g or -l)') parser.add_argument('-L', dest='loglvl', action='store', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], default='INFO', help='set the logging level')
random_line_split
scrapedin.py
within the cycle_users function to build different search parameters such as location, geotag, company, job-title, etc. This function will return the full URL. :param str company: target company name :param str url: default (or custom) linkedin url for faceted linkedin search :param str georegion: geographic region (-g) to filter :param str industry: industry (-i_ to filter :param str job_title: job title (-j) to filter :rtype: string (url if successful) or int (Unix-style error integer if error is encountered) ''' filters = [] if not url: url = 'https://www.linkedin.com/search/results/people/?' # Filter by Company # Allows the user to scrape linkedin without specifying a target company, but must do so with intent if company != "NONE": filters.append('company={0}'.format(company)) # Filter by Geographic Region if georegion: # region object is created for future-proof purposes in the event new filters become available or formating changes region = {} try: region['full_line'] = list_search('georegion', term=georegion, return_results=True) region['name'] = region['full_line'].split('\t')[-1] region['code'] = region['full_line'].split('\t')[0].split( '.') # Should be continent.country.province/state.city_id region.update({key: value.replace(' ', '') for (key, value) in zip(('continent', 'country', 'state', 'id'), region['code'])}) filters.append('facetGeoRegion=%5B"{0}%3A{1}"%5D'.format(region['country'], region['id'])) except (IndexError, KeyError, ValueError): self.log.error("[-] The region you chose is too broad to search. Search by City only") return os.EX_NOINPUT # Filter by Industry if industry: ind = list_search('industry', term=industry, return_results=True) if ind: i_code = ind.split('\t')[0].replace(' ', '') filters.append('facetIndustry=%5B"{0}"%5D'.format(i_code)) filters.append("origin=FACETED_SEARCH") if job_title: filters.append('title={0}'.format(job_title)) else: filters.append('title=') # Join additional parameters to the URL by ampersand (&). Order doesn't matter. filters.append('origin=FACETED_SEARCH') url += "&".join(filters).lstrip("&") if len(filters) > 1 else filters[0] self.log.debug("Filtered URL: " + url) return url def cycle_users(self, company, url, max_users=None): ''' You must run the login method before cycle_users will run. Once the login method has run, cycle_users can collect the names and titles of employees at the company you specify. This method requires the company name and optional value max_users from argparse. See the login method for a code example. :param argparse company: :param argpase max_users: :rtype: None (self.employee_data will be populated with names, titles and profile URLs) ''' # Wait for home screen after login WebDriverWait(self.page, 20).until(EC.visibility_of_element_located((By.XPATH, "//*[@id='global-nav']/div"))) self.log.debug("URL: " + str(url)) try: self.page.get(url) except exceptions.WebDriverException as err: self.log.error("An error occurred while getting the company page: \n{0}".format(err)) self.log.critical("[!] Check the company name or URL used") return os.EX_USAGE count = 1 # WebElements cannot be used for iteration.. current_page = 1 if not max_users: max_users = float('inf') while max_users > len(self.employee_data) and current_page < 100: self.page.execute_script("window.scrollTo(0, document.body.scrollHeight);") try: WebDriverWait(self.page, 20).until(EC.visibility_of_element_located((By.CLASS_NAME, 'active'))) # Check if the page contains the "no search results" class. This means we are out of users # This will raise a NoSuchElementException if the element is not found self.page.find_element_by_class_name("search-no-results__container") break except exceptions.NoSuchElementException: pass except exceptions.TimeoutException: # Page didn't load correctly after 20 seconds, cannot reliably recover. Bailing. return try: WebDriverWait(self.page, 5).until(EC.visibility_of_element_located((By.CLASS_NAME, 'name'))) except exceptions.TimeoutException: try: if self.page.find_elements_by_class_name('actor-name'): # If this is true, the page is filled with "LinkedIn Member". It doesn't mean there's no users # available on the page. If this is the case, click next. current_page += 1 if 'disabled=""' in self.page.find_element_by_class_name( "artdeco-pagination__button--next").parent.page_source: # If this is true then the Next button is "disabled". This happens when there's no more pages break self.page.execute_script("arguments[0].click();", self.page.find_element_by_class_name( "artdeco-pagination__button--next")) continue except exceptions.NoSuchElementException: # Reached when there's no more users available on the page. break try: # Get the current page number (at the bottom of a company search) # The value returned from the HTML looks like this: '1\nCurrent page' new_page = int(self.page.find_elements_by_class_name('active')[-1].text.split()[0]) except ValueError: # If there's only one page, linkedin doesn't show page numbers at the bottom. The only result # will be the text string "people", therefore when we try to convert the value to int we raise # an exception new_page = 1 except IndexError: # Page likely came back with "No more users" even though there appeared to be pages left return except exceptions.StaleElementReferenceException: # Handles a race condition where elements are found but are not populated yet. continue for pagnation in self.page.find_elements_by_class_name("artdeco-pagination__button"): if pagnation.text != "Next": continue if not pagnation.is_enabled(): # Next button is disabled.. This is linkedins way of saying "We are done here" return if current_page != new_page: # The script is too fast. This verifies a new page has loaded before proceeding. continue # Scroll to the bottom of the page loads all elements (employee_names) self.page.execute_script("window.scrollTo(0, document.body.scrollHeight);") # Give the elements a second to populate fully time.sleep(1) # finds each employee element by a globally set class name employee_elements = self.page.find_elements_by_xpath("//div[@class='entity-result__item']") self.log.debug(employee_elements) for employee in employee_elements: if count > len(employee_elements): count = 1 current_page += 1 # click next page try: self.page.execute_script("arguments[0].click();", self.page.find_element_by_class_name( "artdeco-pagination__button--next")) break except exceptions.NoSuchElementException: # No more pages return os.EX_OK try: # The elements of LinkedIn change frequently, but the text data of the elements is more-or-less reliable. # It's better to split via newline than parse xpaths. data = employee.text.split('\n') if "LinkedIn Member" not in data[0] and len(data) >= 5: name = Webpage.sanitize_name(data[0]) title_text = data[3] region = data[4] else: count += 1 continue try: # This line/element does not always exist, so an exception will always be raised in this case and must be handled. alt_text = employee.find_element_by_class_name('search-result__snippets').text except exceptions.NoSuchElementException: alt_text = False title, _, company = title_text.partition(' at ') if alt_text and not company: alt_text = alt_text.lstrip('Current: ') t, _, company = alt_text.partition(' at ') if not title: title = t dept = self.dept_wizard(title) # If company is still empty at this point, bail out to unemployment company = company or 'UNEMPLOYED' url = employee.find_element_by_xpath("//a[@class='app-aware-link']").get_attribute('href') except (IndexError, exceptions.NoSuchElementException): count += 1 continue if not name: continue for person in name: self.log.info(person) self.employee_data.update({person: [dept, title, company, region, url]}) count += 1 @staticmethod def
dept_wizard
identifier_name
snapshot.go
canInternalOpts := &scanInternalOptions{ visitPointKey: visitPointKey, visitRangeDel: visitRangeDel, visitRangeKey: visitRangeKey, visitSharedFile: visitSharedFile, skipSharedLevels: visitSharedFile != nil, IterOptions: IterOptions{ KeyTypes: IterKeyTypePointsAndRanges, LowerBound: lower, UpperBound: upper, }, } iter := s.db.newInternalIter(snapshotIterOpts{seqNum: s.seqNum}, scanInternalOpts) defer iter.close() return scanInternalImpl(ctx, lower, upper, iter, scanInternalOpts) } // closeLocked is similar to Close(), except it requires that db.mu be held // by the caller. func (s *Snapshot) closeLocked() error { s.db.mu.snapshots.remove(s) // If s was the previous earliest snapshot, we might be able to reclaim // disk space by dropping obsolete records that were pinned by s. if e := s.db.mu.snapshots.earliest(); e > s.seqNum { s.db.maybeScheduleCompactionPicker(pickElisionOnly) } s.db = nil return nil } // Close closes the snapshot, releasing its resources. Close must be called. // Failure to do so will result in a tiny memory leak and a large leak of // resources on disk due to the entries the snapshot is preventing from being // deleted. // // d.mu must NOT be held by the caller. func (s *Snapshot) Close() error { db := s.db if db == nil { panic(ErrClosed) } db.mu.Lock() defer db.mu.Unlock() return s.closeLocked() } type snapshotList struct { root Snapshot } func (l *snapshotList) init() { l.root.next = &l.root l.root.prev = &l.root } func (l *snapshotList) empty() bool { return l.root.next == &l.root } func (l *snapshotList) count() int { if l.empty() { return 0 } var count int for i := l.root.next; i != &l.root; i = i.next { count++ } return count } func (l *snapshotList) earliest() uint64
func (l *snapshotList) toSlice() []uint64 { if l.empty() { return nil } var results []uint64 for i := l.root.next; i != &l.root; i = i.next { results = append(results, i.seqNum) } return results } func (l *snapshotList) pushBack(s *Snapshot) { if s.list != nil || s.prev != nil || s.next != nil { panic("pebble: snapshot list is inconsistent") } s.prev = l.root.prev s.prev.next = s s.next = &l.root s.next.prev = s s.list = l } func (l *snapshotList) remove(s *Snapshot) { if s == &l.root { panic("pebble: cannot remove snapshot list root node") } if s.list != l { panic("pebble: snapshot list is inconsistent") } s.prev.next = s.next s.next.prev = s.prev s.next = nil // avoid memory leaks s.prev = nil // avoid memory leaks s.list = nil // avoid memory leaks } // EventuallyFileOnlySnapshot (aka EFOS) provides a read-only point-in-time view // of the database state, similar to Snapshot. A EventuallyFileOnlySnapshot // induces less write amplification than Snapshot, at the cost of increased space // amplification. While a Snapshot may increase write amplification across all // flushes and compactions for the duration of its lifetime, an // EventuallyFileOnlySnapshot only incurs that cost for flushes/compactions if // memtables at the time of EFOS instantiation contained keys that the EFOS is // interested in (i.e. its protectedRanges). In that case, the EFOS prevents // elision of keys visible to it, similar to a Snapshot, until those memtables // are flushed, and once that happens, the "EventuallyFileOnlySnapshot" // transitions to a file-only snapshot state in which it pins zombies sstables // like an open Iterator would, without pinning any memtables. Callers that can // tolerate the increased space amplification of pinning zombie sstables until // the snapshot is closed may prefer EventuallyFileOnlySnapshots for their // reduced write amplification. Callers that desire the benefits of the file-only // state that requires no pinning of memtables should call // `WaitForFileOnlySnapshot()` (and possibly re-mint an EFOS if it returns // ErrSnapshotExcised) before relying on the EFOS to keep producing iterators // with zero write-amp and zero pinning of memtables in memory. // // EventuallyFileOnlySnapshots interact with the IngestAndExcise operation in // subtle ways. Unlike Snapshots, EFOS guarantees that their read-only // point-in-time view is unaltered by the excision. However, if a concurrent // excise were to happen on one of the protectedRanges, WaitForFileOnlySnapshot() // would return ErrSnapshotExcised and the EFOS would maintain a reference to the // underlying readState (and by extension, zombie memtables) for its lifetime. // This could lead to increased memory utilization, which is why callers should // call WaitForFileOnlySnapshot() if they expect an EFOS to be long-lived. type EventuallyFileOnlySnapshot struct { mu struct { // NB: If both this mutex and db.mu are being grabbed, db.mu should be // grabbed _before_ grabbing this one. sync.Mutex // Either the {snap,readState} fields are set below, or the version is set at // any given point of time. If a snapshot is referenced, this is not a // file-only snapshot yet, and if a version is set (and ref'd) this is a // file-only snapshot. // The wrapped regular snapshot, if not a file-only snapshot yet. The // readState has already been ref()d once if it's set. snap *Snapshot readState *readState // The wrapped version reference, if a file-only snapshot. vers *version } // Key ranges to watch for an excise on. protectedRanges []KeyRange // excised, if true, signals that the above ranges were excised during the // lifetime of this snapshot. excised atomic.Bool // The db the snapshot was created from. db *DB seqNum uint64 closed chan struct{} } func (d *DB) makeEventuallyFileOnlySnapshot( keyRanges []KeyRange, internalKeyRanges []internalKeyRange, ) *EventuallyFileOnlySnapshot { isFileOnly := true d.mu.Lock() defer d.mu.Unlock() seqNum := d.mu.versions.visibleSeqNum.Load() // Check if any of the keyRanges overlap with a memtable. for i := range d.mu.mem.queue { mem := d.mu.mem.queue[i] if ingestMemtableOverlaps(d.cmp, mem, internalKeyRanges) { isFileOnly = false break } } es := &EventuallyFileOnlySnapshot{ db: d, seqNum: seqNum, protectedRanges: keyRanges, closed: make(chan struct{}), } if isFileOnly { es.mu.vers = d.mu.versions.currentVersion() es.mu.vers.Ref() } else { s := &Snapshot{ db: d, seqNum: seqNum, } s.efos = es es.mu.snap = s es.mu.readState = d.loadReadState() d.mu.snapshots.pushBack(s) } return es } // Transitions this EventuallyFileOnlySnapshot to a file-only snapshot. Requires // earliestUnflushedSeqNum and vers to correspond to the same Version from the // current or a past acquisition of db.mu. vers must have been Ref()'d before // that mutex was released, if it was released. // // NB: The caller is expected to check for es.excised before making this // call. // // d.mu must be held when calling this method. func (es *EventuallyFileOnlySnapshot) transitionToFileOnlySnapshot(vers *version) error { es.mu.Lock() select { case <-es.closed: vers.UnrefLocked() es.mu.Unlock() return ErrClosed default: } if es.mu.snap == nil { es.mu.Unlock() panic("pebble: tried to transition an eventually-file-only-snapshot twice") } // The caller has already called Ref() on vers. es.mu.vers = vers // NB: The callers should have already done a check of es.excised. oldSnap := es.mu.snap oldReadState := es.mu.readState es.mu.snap = nil es.mu.readState = nil es.mu.Unlock() // It's okay to close a snapshot even if iterators are already open on it. oldReadState.unrefLocked() return oldSnap.closeLocked() } // releaseReadState is called to release reference to a readState
{ v := uint64(math.MaxUint64) if !l.empty() { v = l.root.next.seqNum } return v }
identifier_body
snapshot.go
.remove(s) // If s was the previous earliest snapshot, we might be able to reclaim // disk space by dropping obsolete records that were pinned by s. if e := s.db.mu.snapshots.earliest(); e > s.seqNum { s.db.maybeScheduleCompactionPicker(pickElisionOnly) } s.db = nil return nil } // Close closes the snapshot, releasing its resources. Close must be called. // Failure to do so will result in a tiny memory leak and a large leak of // resources on disk due to the entries the snapshot is preventing from being // deleted. // // d.mu must NOT be held by the caller. func (s *Snapshot) Close() error { db := s.db if db == nil { panic(ErrClosed) } db.mu.Lock() defer db.mu.Unlock() return s.closeLocked() } type snapshotList struct { root Snapshot } func (l *snapshotList) init() { l.root.next = &l.root l.root.prev = &l.root } func (l *snapshotList) empty() bool { return l.root.next == &l.root } func (l *snapshotList) count() int { if l.empty() { return 0 } var count int for i := l.root.next; i != &l.root; i = i.next { count++ } return count } func (l *snapshotList) earliest() uint64 { v := uint64(math.MaxUint64) if !l.empty() { v = l.root.next.seqNum } return v } func (l *snapshotList) toSlice() []uint64 { if l.empty() { return nil } var results []uint64 for i := l.root.next; i != &l.root; i = i.next { results = append(results, i.seqNum) } return results } func (l *snapshotList) pushBack(s *Snapshot) { if s.list != nil || s.prev != nil || s.next != nil { panic("pebble: snapshot list is inconsistent") } s.prev = l.root.prev s.prev.next = s s.next = &l.root s.next.prev = s s.list = l } func (l *snapshotList) remove(s *Snapshot) { if s == &l.root { panic("pebble: cannot remove snapshot list root node") } if s.list != l { panic("pebble: snapshot list is inconsistent") } s.prev.next = s.next s.next.prev = s.prev s.next = nil // avoid memory leaks s.prev = nil // avoid memory leaks s.list = nil // avoid memory leaks } // EventuallyFileOnlySnapshot (aka EFOS) provides a read-only point-in-time view // of the database state, similar to Snapshot. A EventuallyFileOnlySnapshot // induces less write amplification than Snapshot, at the cost of increased space // amplification. While a Snapshot may increase write amplification across all // flushes and compactions for the duration of its lifetime, an // EventuallyFileOnlySnapshot only incurs that cost for flushes/compactions if // memtables at the time of EFOS instantiation contained keys that the EFOS is // interested in (i.e. its protectedRanges). In that case, the EFOS prevents // elision of keys visible to it, similar to a Snapshot, until those memtables // are flushed, and once that happens, the "EventuallyFileOnlySnapshot" // transitions to a file-only snapshot state in which it pins zombies sstables // like an open Iterator would, without pinning any memtables. Callers that can // tolerate the increased space amplification of pinning zombie sstables until // the snapshot is closed may prefer EventuallyFileOnlySnapshots for their // reduced write amplification. Callers that desire the benefits of the file-only // state that requires no pinning of memtables should call // `WaitForFileOnlySnapshot()` (and possibly re-mint an EFOS if it returns // ErrSnapshotExcised) before relying on the EFOS to keep producing iterators // with zero write-amp and zero pinning of memtables in memory. // // EventuallyFileOnlySnapshots interact with the IngestAndExcise operation in // subtle ways. Unlike Snapshots, EFOS guarantees that their read-only // point-in-time view is unaltered by the excision. However, if a concurrent // excise were to happen on one of the protectedRanges, WaitForFileOnlySnapshot() // would return ErrSnapshotExcised and the EFOS would maintain a reference to the // underlying readState (and by extension, zombie memtables) for its lifetime. // This could lead to increased memory utilization, which is why callers should // call WaitForFileOnlySnapshot() if they expect an EFOS to be long-lived. type EventuallyFileOnlySnapshot struct { mu struct { // NB: If both this mutex and db.mu are being grabbed, db.mu should be // grabbed _before_ grabbing this one. sync.Mutex // Either the {snap,readState} fields are set below, or the version is set at // any given point of time. If a snapshot is referenced, this is not a // file-only snapshot yet, and if a version is set (and ref'd) this is a // file-only snapshot. // The wrapped regular snapshot, if not a file-only snapshot yet. The // readState has already been ref()d once if it's set. snap *Snapshot readState *readState // The wrapped version reference, if a file-only snapshot. vers *version } // Key ranges to watch for an excise on. protectedRanges []KeyRange // excised, if true, signals that the above ranges were excised during the // lifetime of this snapshot. excised atomic.Bool // The db the snapshot was created from. db *DB seqNum uint64 closed chan struct{} } func (d *DB) makeEventuallyFileOnlySnapshot( keyRanges []KeyRange, internalKeyRanges []internalKeyRange, ) *EventuallyFileOnlySnapshot { isFileOnly := true d.mu.Lock() defer d.mu.Unlock() seqNum := d.mu.versions.visibleSeqNum.Load() // Check if any of the keyRanges overlap with a memtable. for i := range d.mu.mem.queue { mem := d.mu.mem.queue[i] if ingestMemtableOverlaps(d.cmp, mem, internalKeyRanges) { isFileOnly = false break } } es := &EventuallyFileOnlySnapshot{ db: d, seqNum: seqNum, protectedRanges: keyRanges, closed: make(chan struct{}), } if isFileOnly { es.mu.vers = d.mu.versions.currentVersion() es.mu.vers.Ref() } else { s := &Snapshot{ db: d, seqNum: seqNum, } s.efos = es es.mu.snap = s es.mu.readState = d.loadReadState() d.mu.snapshots.pushBack(s) } return es } // Transitions this EventuallyFileOnlySnapshot to a file-only snapshot. Requires // earliestUnflushedSeqNum and vers to correspond to the same Version from the // current or a past acquisition of db.mu. vers must have been Ref()'d before // that mutex was released, if it was released. // // NB: The caller is expected to check for es.excised before making this // call. // // d.mu must be held when calling this method. func (es *EventuallyFileOnlySnapshot) transitionToFileOnlySnapshot(vers *version) error { es.mu.Lock() select { case <-es.closed: vers.UnrefLocked() es.mu.Unlock() return ErrClosed default: } if es.mu.snap == nil { es.mu.Unlock() panic("pebble: tried to transition an eventually-file-only-snapshot twice") } // The caller has already called Ref() on vers. es.mu.vers = vers // NB: The callers should have already done a check of es.excised. oldSnap := es.mu.snap oldReadState := es.mu.readState es.mu.snap = nil es.mu.readState = nil es.mu.Unlock() // It's okay to close a snapshot even if iterators are already open on it. oldReadState.unrefLocked() return oldSnap.closeLocked() } // releaseReadState is called to release reference to a readState when // es.excised == true. This is to free up memory as quickly as possible; all // other snapshot resources are kept around until Close() is called. Safe for // idempotent calls. // // d.mu must be held when calling this method. func (es *EventuallyFileOnlySnapshot) releaseReadState() { if !es.excised.Load() { panic("pebble: releasing read state of eventually-file-only-snapshot but was not excised") } es.mu.Lock() defer es.mu.Unlock() if es.mu.readState != nil { es.mu.readState.unrefLocked() es.db.maybeScheduleObsoleteTableDeletionLocked() } } // hasTransitioned returns true if this EFOS has transitioned to a file-only
// snapshot. func (es *EventuallyFileOnlySnapshot) hasTransitioned() bool { es.mu.Lock() defer es.mu.Unlock()
random_line_split
snapshot.go
closeLocked() error { s.db.mu.snapshots.remove(s) // If s was the previous earliest snapshot, we might be able to reclaim // disk space by dropping obsolete records that were pinned by s. if e := s.db.mu.snapshots.earliest(); e > s.seqNum { s.db.maybeScheduleCompactionPicker(pickElisionOnly) } s.db = nil return nil } // Close closes the snapshot, releasing its resources. Close must be called. // Failure to do so will result in a tiny memory leak and a large leak of // resources on disk due to the entries the snapshot is preventing from being // deleted. // // d.mu must NOT be held by the caller. func (s *Snapshot) Close() error { db := s.db if db == nil { panic(ErrClosed) } db.mu.Lock() defer db.mu.Unlock() return s.closeLocked() } type snapshotList struct { root Snapshot } func (l *snapshotList) init() { l.root.next = &l.root l.root.prev = &l.root } func (l *snapshotList) empty() bool { return l.root.next == &l.root } func (l *snapshotList) count() int { if l.empty() { return 0 } var count int for i := l.root.next; i != &l.root; i = i.next { count++ } return count } func (l *snapshotList) earliest() uint64 { v := uint64(math.MaxUint64) if !l.empty() { v = l.root.next.seqNum } return v } func (l *snapshotList) toSlice() []uint64 { if l.empty() { return nil } var results []uint64 for i := l.root.next; i != &l.root; i = i.next { results = append(results, i.seqNum) } return results } func (l *snapshotList) pushBack(s *Snapshot) { if s.list != nil || s.prev != nil || s.next != nil { panic("pebble: snapshot list is inconsistent") } s.prev = l.root.prev s.prev.next = s s.next = &l.root s.next.prev = s s.list = l } func (l *snapshotList) remove(s *Snapshot) { if s == &l.root { panic("pebble: cannot remove snapshot list root node") } if s.list != l { panic("pebble: snapshot list is inconsistent") } s.prev.next = s.next s.next.prev = s.prev s.next = nil // avoid memory leaks s.prev = nil // avoid memory leaks s.list = nil // avoid memory leaks } // EventuallyFileOnlySnapshot (aka EFOS) provides a read-only point-in-time view // of the database state, similar to Snapshot. A EventuallyFileOnlySnapshot // induces less write amplification than Snapshot, at the cost of increased space // amplification. While a Snapshot may increase write amplification across all // flushes and compactions for the duration of its lifetime, an // EventuallyFileOnlySnapshot only incurs that cost for flushes/compactions if // memtables at the time of EFOS instantiation contained keys that the EFOS is // interested in (i.e. its protectedRanges). In that case, the EFOS prevents // elision of keys visible to it, similar to a Snapshot, until those memtables // are flushed, and once that happens, the "EventuallyFileOnlySnapshot" // transitions to a file-only snapshot state in which it pins zombies sstables // like an open Iterator would, without pinning any memtables. Callers that can // tolerate the increased space amplification of pinning zombie sstables until // the snapshot is closed may prefer EventuallyFileOnlySnapshots for their // reduced write amplification. Callers that desire the benefits of the file-only // state that requires no pinning of memtables should call // `WaitForFileOnlySnapshot()` (and possibly re-mint an EFOS if it returns // ErrSnapshotExcised) before relying on the EFOS to keep producing iterators // with zero write-amp and zero pinning of memtables in memory. // // EventuallyFileOnlySnapshots interact with the IngestAndExcise operation in // subtle ways. Unlike Snapshots, EFOS guarantees that their read-only // point-in-time view is unaltered by the excision. However, if a concurrent // excise were to happen on one of the protectedRanges, WaitForFileOnlySnapshot() // would return ErrSnapshotExcised and the EFOS would maintain a reference to the // underlying readState (and by extension, zombie memtables) for its lifetime. // This could lead to increased memory utilization, which is why callers should // call WaitForFileOnlySnapshot() if they expect an EFOS to be long-lived. type EventuallyFileOnlySnapshot struct { mu struct { // NB: If both this mutex and db.mu are being grabbed, db.mu should be // grabbed _before_ grabbing this one. sync.Mutex // Either the {snap,readState} fields are set below, or the version is set at // any given point of time. If a snapshot is referenced, this is not a // file-only snapshot yet, and if a version is set (and ref'd) this is a // file-only snapshot. // The wrapped regular snapshot, if not a file-only snapshot yet. The // readState has already been ref()d once if it's set. snap *Snapshot readState *readState // The wrapped version reference, if a file-only snapshot. vers *version } // Key ranges to watch for an excise on. protectedRanges []KeyRange // excised, if true, signals that the above ranges were excised during the // lifetime of this snapshot. excised atomic.Bool // The db the snapshot was created from. db *DB seqNum uint64 closed chan struct{} } func (d *DB) makeEventuallyFileOnlySnapshot( keyRanges []KeyRange, internalKeyRanges []internalKeyRange, ) *EventuallyFileOnlySnapshot { isFileOnly := true d.mu.Lock() defer d.mu.Unlock() seqNum := d.mu.versions.visibleSeqNum.Load() // Check if any of the keyRanges overlap with a memtable. for i := range d.mu.mem.queue { mem := d.mu.mem.queue[i] if ingestMemtableOverlaps(d.cmp, mem, internalKeyRanges) { isFileOnly = false break } } es := &EventuallyFileOnlySnapshot{ db: d, seqNum: seqNum, protectedRanges: keyRanges, closed: make(chan struct{}), } if isFileOnly { es.mu.vers = d.mu.versions.currentVersion() es.mu.vers.Ref() } else { s := &Snapshot{ db: d, seqNum: seqNum, } s.efos = es es.mu.snap = s es.mu.readState = d.loadReadState() d.mu.snapshots.pushBack(s) } return es } // Transitions this EventuallyFileOnlySnapshot to a file-only snapshot. Requires // earliestUnflushedSeqNum and vers to correspond to the same Version from the // current or a past acquisition of db.mu. vers must have been Ref()'d before // that mutex was released, if it was released. // // NB: The caller is expected to check for es.excised before making this // call. // // d.mu must be held when calling this method. func (es *EventuallyFileOnlySnapshot) transitionToFileOnlySnapshot(vers *version) error { es.mu.Lock() select { case <-es.closed: vers.UnrefLocked() es.mu.Unlock() return ErrClosed default: } if es.mu.snap == nil { es.mu.Unlock() panic("pebble: tried to transition an eventually-file-only-snapshot twice") } // The caller has already called Ref() on vers. es.mu.vers = vers // NB: The callers should have already done a check of es.excised. oldSnap := es.mu.snap oldReadState := es.mu.readState es.mu.snap = nil es.mu.readState = nil es.mu.Unlock() // It's okay to close a snapshot even if iterators are already open on it. oldReadState.unrefLocked() return oldSnap.closeLocked() } // releaseReadState is called to release reference to a readState when // es.excised == true. This is to free up memory as quickly as possible; all // other snapshot resources are kept around until Close() is called. Safe for // idempotent calls. // // d.mu must be held when calling this method. func (es *EventuallyFileOnlySnapshot) releaseReadState() { if !es.excised.Load() { panic("pebble: releasing read state of eventually-file-only-snapshot but was not excised") } es.mu.Lock() defer es.mu.Unlock() if es.mu.readState != nil { es.mu.readState.unrefLocked() es.db.maybeScheduleObsoleteTableDeletionLocked() } } // hasTransitioned returns true if this EFOS has transitioned to a file-only // snapshot. func (es *EventuallyFileOnlySnapshot)
hasTransitioned
identifier_name
snapshot.go
their read-only // point-in-time view is unaltered by the excision. However, if a concurrent // excise were to happen on one of the protectedRanges, WaitForFileOnlySnapshot() // would return ErrSnapshotExcised and the EFOS would maintain a reference to the // underlying readState (and by extension, zombie memtables) for its lifetime. // This could lead to increased memory utilization, which is why callers should // call WaitForFileOnlySnapshot() if they expect an EFOS to be long-lived. type EventuallyFileOnlySnapshot struct { mu struct { // NB: If both this mutex and db.mu are being grabbed, db.mu should be // grabbed _before_ grabbing this one. sync.Mutex // Either the {snap,readState} fields are set below, or the version is set at // any given point of time. If a snapshot is referenced, this is not a // file-only snapshot yet, and if a version is set (and ref'd) this is a // file-only snapshot. // The wrapped regular snapshot, if not a file-only snapshot yet. The // readState has already been ref()d once if it's set. snap *Snapshot readState *readState // The wrapped version reference, if a file-only snapshot. vers *version } // Key ranges to watch for an excise on. protectedRanges []KeyRange // excised, if true, signals that the above ranges were excised during the // lifetime of this snapshot. excised atomic.Bool // The db the snapshot was created from. db *DB seqNum uint64 closed chan struct{} } func (d *DB) makeEventuallyFileOnlySnapshot( keyRanges []KeyRange, internalKeyRanges []internalKeyRange, ) *EventuallyFileOnlySnapshot { isFileOnly := true d.mu.Lock() defer d.mu.Unlock() seqNum := d.mu.versions.visibleSeqNum.Load() // Check if any of the keyRanges overlap with a memtable. for i := range d.mu.mem.queue { mem := d.mu.mem.queue[i] if ingestMemtableOverlaps(d.cmp, mem, internalKeyRanges) { isFileOnly = false break } } es := &EventuallyFileOnlySnapshot{ db: d, seqNum: seqNum, protectedRanges: keyRanges, closed: make(chan struct{}), } if isFileOnly { es.mu.vers = d.mu.versions.currentVersion() es.mu.vers.Ref() } else { s := &Snapshot{ db: d, seqNum: seqNum, } s.efos = es es.mu.snap = s es.mu.readState = d.loadReadState() d.mu.snapshots.pushBack(s) } return es } // Transitions this EventuallyFileOnlySnapshot to a file-only snapshot. Requires // earliestUnflushedSeqNum and vers to correspond to the same Version from the // current or a past acquisition of db.mu. vers must have been Ref()'d before // that mutex was released, if it was released. // // NB: The caller is expected to check for es.excised before making this // call. // // d.mu must be held when calling this method. func (es *EventuallyFileOnlySnapshot) transitionToFileOnlySnapshot(vers *version) error { es.mu.Lock() select { case <-es.closed: vers.UnrefLocked() es.mu.Unlock() return ErrClosed default: } if es.mu.snap == nil { es.mu.Unlock() panic("pebble: tried to transition an eventually-file-only-snapshot twice") } // The caller has already called Ref() on vers. es.mu.vers = vers // NB: The callers should have already done a check of es.excised. oldSnap := es.mu.snap oldReadState := es.mu.readState es.mu.snap = nil es.mu.readState = nil es.mu.Unlock() // It's okay to close a snapshot even if iterators are already open on it. oldReadState.unrefLocked() return oldSnap.closeLocked() } // releaseReadState is called to release reference to a readState when // es.excised == true. This is to free up memory as quickly as possible; all // other snapshot resources are kept around until Close() is called. Safe for // idempotent calls. // // d.mu must be held when calling this method. func (es *EventuallyFileOnlySnapshot) releaseReadState() { if !es.excised.Load() { panic("pebble: releasing read state of eventually-file-only-snapshot but was not excised") } es.mu.Lock() defer es.mu.Unlock() if es.mu.readState != nil { es.mu.readState.unrefLocked() es.db.maybeScheduleObsoleteTableDeletionLocked() } } // hasTransitioned returns true if this EFOS has transitioned to a file-only // snapshot. func (es *EventuallyFileOnlySnapshot) hasTransitioned() bool { es.mu.Lock() defer es.mu.Unlock() return es.mu.vers != nil } // waitForFlush waits for a flush on any memtables that need to be flushed // before this EFOS can transition to a file-only snapshot. If this EFOS is // waiting on a flush of the mutable memtable, it forces a rotation within // `dur` duration. For immutable memtables, it schedules a flush and waits for // it to finish. func (es *EventuallyFileOnlySnapshot) waitForFlush(ctx context.Context, dur time.Duration) error { es.db.mu.Lock() defer es.db.mu.Unlock() earliestUnflushedSeqNum := es.db.getEarliestUnflushedSeqNumLocked() for earliestUnflushedSeqNum < es.seqNum { select { case <-es.closed: return ErrClosed case <-ctx.Done(): return ctx.Err() default: } // Check if the current mutable memtable contains keys less than seqNum. // If so, rotate it. if es.db.mu.mem.mutable.logSeqNum < es.seqNum && dur.Nanoseconds() > 0 { es.db.maybeScheduleDelayedFlush(es.db.mu.mem.mutable, dur) } else { // Find the last memtable that contains seqNums less than es.seqNum, // and force a flush on it. var mem *flushableEntry for i := range es.db.mu.mem.queue { if es.db.mu.mem.queue[i].logSeqNum < es.seqNum { mem = es.db.mu.mem.queue[i] } } mem.flushForced = true es.db.maybeScheduleFlush() } es.db.mu.compact.cond.Wait() earliestUnflushedSeqNum = es.db.getEarliestUnflushedSeqNumLocked() } if es.excised.Load() { return ErrSnapshotExcised } return nil } // WaitForFileOnlySnapshot blocks the calling goroutine until this snapshot // has been converted into a file-only snapshot (i.e. all memtables containing // keys < seqNum are flushed). A duration can be passed in, and if nonzero, // a delayed flush will be scheduled at that duration if necessary. // // Idempotent; can be called multiple times with no side effects. func (es *EventuallyFileOnlySnapshot) WaitForFileOnlySnapshot( ctx context.Context, dur time.Duration, ) error { if es.hasTransitioned() { return nil } if err := es.waitForFlush(ctx, dur); err != nil { return err } if invariants.Enabled { // Since we aren't returning an error, we _must_ have transitioned to a // file-only snapshot by now. if !es.hasTransitioned() { panic("expected EFOS to have transitioned to file-only snapshot after flush") } } return nil } // Close closes the file-only snapshot and releases all referenced resources. // Not idempotent. func (es *EventuallyFileOnlySnapshot) Close() error { close(es.closed) es.db.mu.Lock() defer es.db.mu.Unlock() es.mu.Lock() defer es.mu.Unlock() if es.mu.snap != nil { if err := es.mu.snap.closeLocked(); err != nil { return err } } if es.mu.readState != nil { es.mu.readState.unrefLocked() es.db.maybeScheduleObsoleteTableDeletionLocked() } if es.mu.vers != nil { es.mu.vers.UnrefLocked() } return nil } // Get implements the Reader interface. func (es *EventuallyFileOnlySnapshot) Get(key []byte) (value []byte, closer io.Closer, err error) { // TODO(jackson): Use getInternal. iter, err := es.NewIter(nil) if err != nil { return nil, nil, err } var valid bool if es.db.opts.Comparer.Split != nil { valid = iter.SeekPrefixGE(key) } else { valid = iter.SeekGE(key) } if !valid { if err = firstError(iter.Error(), iter.Close()); err != nil { return nil, nil, err } return nil, nil, ErrNotFound } if !es.db.equal(iter.Key(), key)
{ return nil, nil, firstError(iter.Close(), ErrNotFound) }
conditional_block
conteng_docker.go
"github.com/pkg/errors" "github.com/syhpoon/xenvman/pkg/lib" "github.com/syhpoon/xenvman/pkg/logger" ) var dockerLog = logger.GetLogger("xenvman.pkg.conteng.conteng_docker") type DockerEngineParams struct { } type DockerEngine struct { cl *client.Client params DockerEngineParams subNetOct1 int subNetOct2 int subNetMu sync.Mutex } func NewDockerEngine(params DockerEngineParams) (*DockerEngine, error) { var opts []func(client2 *client.Client) error cli, err := client.NewClientWithOpts(opts...) if err != nil { return nil, errors.Wrapf(err, "Error creating docker client") } cli.NegotiateAPIVersion(context.Background()) dockerLog.Debugf("Docker engine client created") return &DockerEngine{ cl: cli, params: params, subNetOct1: 0, subNetOct2: 0, }, nil } func (de *DockerEngine) CreateNetwork(ctx context.Context, name string) (NetworkId, string, error) { sub, err := de.getSubNet() if err != nil { return "", "", err } netParams := types.NetworkCreate{ CheckDuplicate: true, Driver: "bridge", IPAM: &network.IPAM{ Config: []network.IPAMConfig{ { Subnet: sub, IPRange: sub, }, }, }, } r, err := de.cl.NetworkCreate(ctx, name, netParams) if err != nil { return "", "", errors.Wrapf(err, "Error creating docker network: %s", sub) } dockerLog.Debugf("Network created: %s - %s :: %s", name, r.ID, sub) return r.ID, sub, nil } // Run Docker container func (de *DockerEngine) RunContainer(ctx context.Context, name, tag string, params RunContainerParams) (string, error) { // Hosts var hosts []string for host, ip := range params.Hosts { hosts = append(hosts, fmt.Sprintf("%s:%s", host, ip)) }
for contPort, hostPort := range params.Ports { rawPorts = append(rawPorts, fmt.Sprintf("%d:%d", hostPort, contPort)) } ports, bindings, err := nat.ParsePortSpecs(rawPorts) if err != nil { return "", errors.Wrapf(err, "Error parsing ports for %s", name) } // Environ var environ []string for k, v := range params.Environ { environ = append(environ, fmt.Sprintf("%s=%s", k, v)) } // Mounts var mounts []mount.Mount for _, fileMount := range params.FileMounts { mounts = append(mounts, mount.Mount{ Type: "bind", Source: fileMount.HostFile, Target: fileMount.ContainerFile, ReadOnly: fileMount.Readonly, }) } var dns []string if params.DiscoverDNS != "" { dns = append(dns, params.DiscoverDNS) } hostCont := &container.HostConfig{ NetworkMode: container.NetworkMode(params.NetworkId), ExtraHosts: hosts, AutoRemove: false, DNS: dns, DNSSearch: []string{"xenv"}, RestartPolicy: container.RestartPolicy{Name: "on-failure"}, PortBindings: bindings, Mounts: mounts, } netConf := &network.NetworkingConfig{ EndpointsConfig: map[string]*network.EndpointSettings{ params.NetworkId: { IPAMConfig: &network.EndpointIPAMConfig{ IPv4Address: params.IP, }, }, }, } r, err := de.cl.ContainerCreate(ctx, &container.Config{ Hostname: name, AttachStdout: true, AttachStderr: true, Image: tag, ExposedPorts: ports, Env: environ, Cmd: params.Cmd, Entrypoint: params.Entrypoint, }, hostCont, netConf, lib.NewIdShort()) if err != nil { return "", errors.Wrapf(err, "Error creating container %s", tag) } err = de.cl.ContainerStart(ctx, r.ID, types.ContainerStartOptions{}) if err != nil { return "", errors.Wrapf(err, "Error starting container: %s", tag) } dockerLog.Debugf("Container started: %s, network=%s", tag, params.NetworkId) return r.ID, nil } func (de *DockerEngine) RemoveContainer(ctx context.Context, id string) error { return de.cl.ContainerRemove(ctx, id, types.ContainerRemoveOptions{ RemoveVolumes: true, Force: true, }) } func (de *DockerEngine) StopContainer(ctx context.Context, id string) error { return de.cl.ContainerKill(ctx, id, "INT") } func (de *DockerEngine) RestartContainer(ctx context.Context, id string) error { return de.cl.ContainerStart(ctx, id, types.ContainerStartOptions{}) } func (de *DockerEngine) RemoveNetwork(ctx context.Context, id string) error { return de.cl.NetworkRemove(ctx, id) } func (de *DockerEngine) BuildImage(ctx context.Context, imgName string, buildContext io.Reader) error { opts := types.ImageBuildOptions{ NetworkMode: "bridge", Tags: []string{imgName}, Remove: true, ForceRemove: true, SuppressOutput: true, NoCache: true, PullParent: true, } r, err := de.cl.ImageBuild(ctx, buildContext, opts) if r.Body != nil { defer r.Body.Close() // Check server response if rerr := de.isErrorResponse(r.Body); rerr != nil { return errors.Errorf("Error from Docker server: %s", rerr) } } if err == nil { dockerLog.Debugf("Image built: %s", imgName) } return err } func (de *DockerEngine) RemoveImage(ctx context.Context, imgName string) error { opts := types.ImageRemoveOptions{ Force: true, PruneChildren: true, } _, err := de.cl.ImageRemove(ctx, imgName, opts) if err == nil { dockerLog.Debugf("Image removed: %s", imgName) } return err } func (de *DockerEngine) FetchImage(ctx context.Context, imgName string) error { out, err := de.cl.ImagePull(ctx, imgName, types.ImagePullOptions{}) var auth string if err != nil { // Retry with auth auth, err = de.getAuthForImage(imgName) if err != nil { return err } out, err = de.cl.ImagePull(ctx, imgName, types.ImagePullOptions{ RegistryAuth: auth, }) } if err == nil { dockerLog.Debugf("Image fetched: %s", imgName) } if out != nil { _, _ = io.Copy(ioutil.Discard, out) } return err } func (de *DockerEngine) GetImagePorts(ctx context.Context, tag string) ([]uint16, error) { r, _, err := de.cl.ImageInspectWithRaw(ctx, tag) if err != nil { return nil, errors.Wrapf(err, "Error inspecting image %s", tag) } var ports []uint16 for p := range r.Config.ExposedPorts { ports = append(ports, uint16(p.Int())) } return ports, nil } func (de *DockerEngine) Terminate() { de.cl.Close() } func (de *DockerEngine) isErrorResponse(r io.Reader) error { data, err := ioutil.ReadAll(r) if err != nil { return err } split := bytes.Split(data, []byte("\n")) type errResp struct { Error string } for i := range split { e := errResp{} if err := json.Unmarshal(split[i], &e); err == nil && e.Error != "" { return errors.New(e.Error) } } return nil } // TODO: This should probably be made more robust at some point func (de *DockerEngine) getSubNet() (string, error) { de.subNetMu.Lock() defer de.subNetMu.Unlock() addrs, err := net.InterfaceAddrs() if err != nil { return "", errors.Wrap(err, "Error getting network addresses") } var nets []*net.IPNet for _, addr := range addrs { dockerLog.Debugf("Inspecting interface %s", addr.String()) _, n, err := net.ParseCIDR(addr.String()) if err != nil { dockerLog.Warningf("Error parsing address: %s", addr.String()) continue } nets = append(nets, n) } netaddr := func() string { tpl := "10.%d.%d.0/24"
// Ports var rawPorts []string
random_line_split
conteng_docker.go
"github.com/pkg/errors" "github.com/syhpoon/xenvman/pkg/lib" "github.com/syhpoon/xenvman/pkg/logger" ) var dockerLog = logger.GetLogger("xenvman.pkg.conteng.conteng_docker") type DockerEngineParams struct { } type DockerEngine struct { cl *client.Client params DockerEngineParams subNetOct1 int subNetOct2 int subNetMu sync.Mutex } func NewDockerEngine(params DockerEngineParams) (*DockerEngine, error) { var opts []func(client2 *client.Client) error cli, err := client.NewClientWithOpts(opts...) if err != nil { return nil, errors.Wrapf(err, "Error creating docker client") } cli.NegotiateAPIVersion(context.Background()) dockerLog.Debugf("Docker engine client created") return &DockerEngine{ cl: cli, params: params, subNetOct1: 0, subNetOct2: 0, }, nil } func (de *DockerEngine) CreateNetwork(ctx context.Context, name string) (NetworkId, string, error) { sub, err := de.getSubNet() if err != nil { return "", "", err } netParams := types.NetworkCreate{ CheckDuplicate: true, Driver: "bridge", IPAM: &network.IPAM{ Config: []network.IPAMConfig{ { Subnet: sub, IPRange: sub, }, }, }, } r, err := de.cl.NetworkCreate(ctx, name, netParams) if err != nil { return "", "", errors.Wrapf(err, "Error creating docker network: %s", sub) } dockerLog.Debugf("Network created: %s - %s :: %s", name, r.ID, sub) return r.ID, sub, nil } // Run Docker container func (de *DockerEngine) RunContainer(ctx context.Context, name, tag string, params RunContainerParams) (string, error) { // Hosts var hosts []string for host, ip := range params.Hosts { hosts = append(hosts, fmt.Sprintf("%s:%s", host, ip)) } // Ports var rawPorts []string for contPort, hostPort := range params.Ports { rawPorts = append(rawPorts, fmt.Sprintf("%d:%d", hostPort, contPort)) } ports, bindings, err := nat.ParsePortSpecs(rawPorts) if err != nil { return "", errors.Wrapf(err, "Error parsing ports for %s", name) } // Environ var environ []string for k, v := range params.Environ { environ = append(environ, fmt.Sprintf("%s=%s", k, v)) } // Mounts var mounts []mount.Mount for _, fileMount := range params.FileMounts { mounts = append(mounts, mount.Mount{ Type: "bind", Source: fileMount.HostFile, Target: fileMount.ContainerFile, ReadOnly: fileMount.Readonly, }) } var dns []string if params.DiscoverDNS != "" { dns = append(dns, params.DiscoverDNS) } hostCont := &container.HostConfig{ NetworkMode: container.NetworkMode(params.NetworkId), ExtraHosts: hosts, AutoRemove: false, DNS: dns, DNSSearch: []string{"xenv"}, RestartPolicy: container.RestartPolicy{Name: "on-failure"}, PortBindings: bindings, Mounts: mounts, } netConf := &network.NetworkingConfig{ EndpointsConfig: map[string]*network.EndpointSettings{ params.NetworkId: { IPAMConfig: &network.EndpointIPAMConfig{ IPv4Address: params.IP, }, }, }, } r, err := de.cl.ContainerCreate(ctx, &container.Config{ Hostname: name, AttachStdout: true, AttachStderr: true, Image: tag, ExposedPorts: ports, Env: environ, Cmd: params.Cmd, Entrypoint: params.Entrypoint, }, hostCont, netConf, lib.NewIdShort()) if err != nil { return "", errors.Wrapf(err, "Error creating container %s", tag) } err = de.cl.ContainerStart(ctx, r.ID, types.ContainerStartOptions{}) if err != nil { return "", errors.Wrapf(err, "Error starting container: %s", tag) } dockerLog.Debugf("Container started: %s, network=%s", tag, params.NetworkId) return r.ID, nil } func (de *DockerEngine) RemoveContainer(ctx context.Context, id string) error { return de.cl.ContainerRemove(ctx, id, types.ContainerRemoveOptions{ RemoveVolumes: true, Force: true, }) } func (de *DockerEngine) StopContainer(ctx context.Context, id string) error { return de.cl.ContainerKill(ctx, id, "INT") } func (de *DockerEngine) RestartContainer(ctx context.Context, id string) error { return de.cl.ContainerStart(ctx, id, types.ContainerStartOptions{}) } func (de *DockerEngine)
(ctx context.Context, id string) error { return de.cl.NetworkRemove(ctx, id) } func (de *DockerEngine) BuildImage(ctx context.Context, imgName string, buildContext io.Reader) error { opts := types.ImageBuildOptions{ NetworkMode: "bridge", Tags: []string{imgName}, Remove: true, ForceRemove: true, SuppressOutput: true, NoCache: true, PullParent: true, } r, err := de.cl.ImageBuild(ctx, buildContext, opts) if r.Body != nil { defer r.Body.Close() // Check server response if rerr := de.isErrorResponse(r.Body); rerr != nil { return errors.Errorf("Error from Docker server: %s", rerr) } } if err == nil { dockerLog.Debugf("Image built: %s", imgName) } return err } func (de *DockerEngine) RemoveImage(ctx context.Context, imgName string) error { opts := types.ImageRemoveOptions{ Force: true, PruneChildren: true, } _, err := de.cl.ImageRemove(ctx, imgName, opts) if err == nil { dockerLog.Debugf("Image removed: %s", imgName) } return err } func (de *DockerEngine) FetchImage(ctx context.Context, imgName string) error { out, err := de.cl.ImagePull(ctx, imgName, types.ImagePullOptions{}) var auth string if err != nil { // Retry with auth auth, err = de.getAuthForImage(imgName) if err != nil { return err } out, err = de.cl.ImagePull(ctx, imgName, types.ImagePullOptions{ RegistryAuth: auth, }) } if err == nil { dockerLog.Debugf("Image fetched: %s", imgName) } if out != nil { _, _ = io.Copy(ioutil.Discard, out) } return err } func (de *DockerEngine) GetImagePorts(ctx context.Context, tag string) ([]uint16, error) { r, _, err := de.cl.ImageInspectWithRaw(ctx, tag) if err != nil { return nil, errors.Wrapf(err, "Error inspecting image %s", tag) } var ports []uint16 for p := range r.Config.ExposedPorts { ports = append(ports, uint16(p.Int())) } return ports, nil } func (de *DockerEngine) Terminate() { de.cl.Close() } func (de *DockerEngine) isErrorResponse(r io.Reader) error { data, err := ioutil.ReadAll(r) if err != nil { return err } split := bytes.Split(data, []byte("\n")) type errResp struct { Error string } for i := range split { e := errResp{} if err := json.Unmarshal(split[i], &e); err == nil && e.Error != "" { return errors.New(e.Error) } } return nil } // TODO: This should probably be made more robust at some point func (de *DockerEngine) getSubNet() (string, error) { de.subNetMu.Lock() defer de.subNetMu.Unlock() addrs, err := net.InterfaceAddrs() if err != nil { return "", errors.Wrap(err, "Error getting network addresses") } var nets []*net.IPNet for _, addr := range addrs { dockerLog.Debugf("Inspecting interface %s", addr.String()) _, n, err := net.ParseCIDR(addr.String()) if err != nil { dockerLog.Warningf("Error parsing address: %s", addr.String()) continue } nets = append(nets, n) } netaddr := func() string { tpl := "10.%d.%d.0/24"
RemoveNetwork
identifier_name
conteng_docker.go
"github.com/pkg/errors" "github.com/syhpoon/xenvman/pkg/lib" "github.com/syhpoon/xenvman/pkg/logger" ) var dockerLog = logger.GetLogger("xenvman.pkg.conteng.conteng_docker") type DockerEngineParams struct { } type DockerEngine struct { cl *client.Client params DockerEngineParams subNetOct1 int subNetOct2 int subNetMu sync.Mutex } func NewDockerEngine(params DockerEngineParams) (*DockerEngine, error) { var opts []func(client2 *client.Client) error cli, err := client.NewClientWithOpts(opts...) if err != nil { return nil, errors.Wrapf(err, "Error creating docker client") } cli.NegotiateAPIVersion(context.Background()) dockerLog.Debugf("Docker engine client created") return &DockerEngine{ cl: cli, params: params, subNetOct1: 0, subNetOct2: 0, }, nil } func (de *DockerEngine) CreateNetwork(ctx context.Context, name string) (NetworkId, string, error) { sub, err := de.getSubNet() if err != nil { return "", "", err } netParams := types.NetworkCreate{ CheckDuplicate: true, Driver: "bridge", IPAM: &network.IPAM{ Config: []network.IPAMConfig{ { Subnet: sub, IPRange: sub, }, }, }, } r, err := de.cl.NetworkCreate(ctx, name, netParams) if err != nil { return "", "", errors.Wrapf(err, "Error creating docker network: %s", sub) } dockerLog.Debugf("Network created: %s - %s :: %s", name, r.ID, sub) return r.ID, sub, nil } // Run Docker container func (de *DockerEngine) RunContainer(ctx context.Context, name, tag string, params RunContainerParams) (string, error) { // Hosts var hosts []string for host, ip := range params.Hosts { hosts = append(hosts, fmt.Sprintf("%s:%s", host, ip)) } // Ports var rawPorts []string for contPort, hostPort := range params.Ports { rawPorts = append(rawPorts, fmt.Sprintf("%d:%d", hostPort, contPort)) } ports, bindings, err := nat.ParsePortSpecs(rawPorts) if err != nil { return "", errors.Wrapf(err, "Error parsing ports for %s", name) } // Environ var environ []string for k, v := range params.Environ { environ = append(environ, fmt.Sprintf("%s=%s", k, v)) } // Mounts var mounts []mount.Mount for _, fileMount := range params.FileMounts { mounts = append(mounts, mount.Mount{ Type: "bind", Source: fileMount.HostFile, Target: fileMount.ContainerFile, ReadOnly: fileMount.Readonly, }) } var dns []string if params.DiscoverDNS != "" { dns = append(dns, params.DiscoverDNS) } hostCont := &container.HostConfig{ NetworkMode: container.NetworkMode(params.NetworkId), ExtraHosts: hosts, AutoRemove: false, DNS: dns, DNSSearch: []string{"xenv"}, RestartPolicy: container.RestartPolicy{Name: "on-failure"}, PortBindings: bindings, Mounts: mounts, } netConf := &network.NetworkingConfig{ EndpointsConfig: map[string]*network.EndpointSettings{ params.NetworkId: { IPAMConfig: &network.EndpointIPAMConfig{ IPv4Address: params.IP, }, }, }, } r, err := de.cl.ContainerCreate(ctx, &container.Config{ Hostname: name, AttachStdout: true, AttachStderr: true, Image: tag, ExposedPorts: ports, Env: environ, Cmd: params.Cmd, Entrypoint: params.Entrypoint, }, hostCont, netConf, lib.NewIdShort()) if err != nil { return "", errors.Wrapf(err, "Error creating container %s", tag) } err = de.cl.ContainerStart(ctx, r.ID, types.ContainerStartOptions{}) if err != nil { return "", errors.Wrapf(err, "Error starting container: %s", tag) } dockerLog.Debugf("Container started: %s, network=%s", tag, params.NetworkId) return r.ID, nil } func (de *DockerEngine) RemoveContainer(ctx context.Context, id string) error { return de.cl.ContainerRemove(ctx, id, types.ContainerRemoveOptions{ RemoveVolumes: true, Force: true, }) } func (de *DockerEngine) StopContainer(ctx context.Context, id string) error { return de.cl.ContainerKill(ctx, id, "INT") } func (de *DockerEngine) RestartContainer(ctx context.Context, id string) error { return de.cl.ContainerStart(ctx, id, types.ContainerStartOptions{}) } func (de *DockerEngine) RemoveNetwork(ctx context.Context, id string) error { return de.cl.NetworkRemove(ctx, id) } func (de *DockerEngine) BuildImage(ctx context.Context, imgName string, buildContext io.Reader) error { opts := types.ImageBuildOptions{ NetworkMode: "bridge", Tags: []string{imgName}, Remove: true, ForceRemove: true, SuppressOutput: true, NoCache: true, PullParent: true, } r, err := de.cl.ImageBuild(ctx, buildContext, opts) if r.Body != nil { defer r.Body.Close() // Check server response if rerr := de.isErrorResponse(r.Body); rerr != nil { return errors.Errorf("Error from Docker server: %s", rerr) } } if err == nil { dockerLog.Debugf("Image built: %s", imgName) } return err } func (de *DockerEngine) RemoveImage(ctx context.Context, imgName string) error { opts := types.ImageRemoveOptions{ Force: true, PruneChildren: true, } _, err := de.cl.ImageRemove(ctx, imgName, opts) if err == nil { dockerLog.Debugf("Image removed: %s", imgName) } return err } func (de *DockerEngine) FetchImage(ctx context.Context, imgName string) error { out, err := de.cl.ImagePull(ctx, imgName, types.ImagePullOptions{}) var auth string if err != nil { // Retry with auth auth, err = de.getAuthForImage(imgName) if err != nil { return err } out, err = de.cl.ImagePull(ctx, imgName, types.ImagePullOptions{ RegistryAuth: auth, }) } if err == nil { dockerLog.Debugf("Image fetched: %s", imgName) } if out != nil { _, _ = io.Copy(ioutil.Discard, out) } return err } func (de *DockerEngine) GetImagePorts(ctx context.Context, tag string) ([]uint16, error) { r, _, err := de.cl.ImageInspectWithRaw(ctx, tag) if err != nil { return nil, errors.Wrapf(err, "Error inspecting image %s", tag) } var ports []uint16 for p := range r.Config.ExposedPorts { ports = append(ports, uint16(p.Int())) } return ports, nil } func (de *DockerEngine) Terminate()
func (de *DockerEngine) isErrorResponse(r io.Reader) error { data, err := ioutil.ReadAll(r) if err != nil { return err } split := bytes.Split(data, []byte("\n")) type errResp struct { Error string } for i := range split { e := errResp{} if err := json.Unmarshal(split[i], &e); err == nil && e.Error != "" { return errors.New(e.Error) } } return nil } // TODO: This should probably be made more robust at some point func (de *DockerEngine) getSubNet() (string, error) { de.subNetMu.Lock() defer de.subNetMu.Unlock() addrs, err := net.InterfaceAddrs() if err != nil { return "", errors.Wrap(err, "Error getting network addresses") } var nets []*net.IPNet for _, addr := range addrs { dockerLog.Debugf("Inspecting interface %s", addr.String()) _, n, err := net.ParseCIDR(addr.String()) if err != nil { dockerLog.Warningf("Error parsing address: %s", addr.String()) continue } nets = append(nets, n) } netaddr := func() string { tpl := "10.%d.%d.0/24"
{ de.cl.Close() }
identifier_body
conteng_docker.go
"github.com/pkg/errors" "github.com/syhpoon/xenvman/pkg/lib" "github.com/syhpoon/xenvman/pkg/logger" ) var dockerLog = logger.GetLogger("xenvman.pkg.conteng.conteng_docker") type DockerEngineParams struct { } type DockerEngine struct { cl *client.Client params DockerEngineParams subNetOct1 int subNetOct2 int subNetMu sync.Mutex } func NewDockerEngine(params DockerEngineParams) (*DockerEngine, error) { var opts []func(client2 *client.Client) error cli, err := client.NewClientWithOpts(opts...) if err != nil { return nil, errors.Wrapf(err, "Error creating docker client") } cli.NegotiateAPIVersion(context.Background()) dockerLog.Debugf("Docker engine client created") return &DockerEngine{ cl: cli, params: params, subNetOct1: 0, subNetOct2: 0, }, nil } func (de *DockerEngine) CreateNetwork(ctx context.Context, name string) (NetworkId, string, error) { sub, err := de.getSubNet() if err != nil
netParams := types.NetworkCreate{ CheckDuplicate: true, Driver: "bridge", IPAM: &network.IPAM{ Config: []network.IPAMConfig{ { Subnet: sub, IPRange: sub, }, }, }, } r, err := de.cl.NetworkCreate(ctx, name, netParams) if err != nil { return "", "", errors.Wrapf(err, "Error creating docker network: %s", sub) } dockerLog.Debugf("Network created: %s - %s :: %s", name, r.ID, sub) return r.ID, sub, nil } // Run Docker container func (de *DockerEngine) RunContainer(ctx context.Context, name, tag string, params RunContainerParams) (string, error) { // Hosts var hosts []string for host, ip := range params.Hosts { hosts = append(hosts, fmt.Sprintf("%s:%s", host, ip)) } // Ports var rawPorts []string for contPort, hostPort := range params.Ports { rawPorts = append(rawPorts, fmt.Sprintf("%d:%d", hostPort, contPort)) } ports, bindings, err := nat.ParsePortSpecs(rawPorts) if err != nil { return "", errors.Wrapf(err, "Error parsing ports for %s", name) } // Environ var environ []string for k, v := range params.Environ { environ = append(environ, fmt.Sprintf("%s=%s", k, v)) } // Mounts var mounts []mount.Mount for _, fileMount := range params.FileMounts { mounts = append(mounts, mount.Mount{ Type: "bind", Source: fileMount.HostFile, Target: fileMount.ContainerFile, ReadOnly: fileMount.Readonly, }) } var dns []string if params.DiscoverDNS != "" { dns = append(dns, params.DiscoverDNS) } hostCont := &container.HostConfig{ NetworkMode: container.NetworkMode(params.NetworkId), ExtraHosts: hosts, AutoRemove: false, DNS: dns, DNSSearch: []string{"xenv"}, RestartPolicy: container.RestartPolicy{Name: "on-failure"}, PortBindings: bindings, Mounts: mounts, } netConf := &network.NetworkingConfig{ EndpointsConfig: map[string]*network.EndpointSettings{ params.NetworkId: { IPAMConfig: &network.EndpointIPAMConfig{ IPv4Address: params.IP, }, }, }, } r, err := de.cl.ContainerCreate(ctx, &container.Config{ Hostname: name, AttachStdout: true, AttachStderr: true, Image: tag, ExposedPorts: ports, Env: environ, Cmd: params.Cmd, Entrypoint: params.Entrypoint, }, hostCont, netConf, lib.NewIdShort()) if err != nil { return "", errors.Wrapf(err, "Error creating container %s", tag) } err = de.cl.ContainerStart(ctx, r.ID, types.ContainerStartOptions{}) if err != nil { return "", errors.Wrapf(err, "Error starting container: %s", tag) } dockerLog.Debugf("Container started: %s, network=%s", tag, params.NetworkId) return r.ID, nil } func (de *DockerEngine) RemoveContainer(ctx context.Context, id string) error { return de.cl.ContainerRemove(ctx, id, types.ContainerRemoveOptions{ RemoveVolumes: true, Force: true, }) } func (de *DockerEngine) StopContainer(ctx context.Context, id string) error { return de.cl.ContainerKill(ctx, id, "INT") } func (de *DockerEngine) RestartContainer(ctx context.Context, id string) error { return de.cl.ContainerStart(ctx, id, types.ContainerStartOptions{}) } func (de *DockerEngine) RemoveNetwork(ctx context.Context, id string) error { return de.cl.NetworkRemove(ctx, id) } func (de *DockerEngine) BuildImage(ctx context.Context, imgName string, buildContext io.Reader) error { opts := types.ImageBuildOptions{ NetworkMode: "bridge", Tags: []string{imgName}, Remove: true, ForceRemove: true, SuppressOutput: true, NoCache: true, PullParent: true, } r, err := de.cl.ImageBuild(ctx, buildContext, opts) if r.Body != nil { defer r.Body.Close() // Check server response if rerr := de.isErrorResponse(r.Body); rerr != nil { return errors.Errorf("Error from Docker server: %s", rerr) } } if err == nil { dockerLog.Debugf("Image built: %s", imgName) } return err } func (de *DockerEngine) RemoveImage(ctx context.Context, imgName string) error { opts := types.ImageRemoveOptions{ Force: true, PruneChildren: true, } _, err := de.cl.ImageRemove(ctx, imgName, opts) if err == nil { dockerLog.Debugf("Image removed: %s", imgName) } return err } func (de *DockerEngine) FetchImage(ctx context.Context, imgName string) error { out, err := de.cl.ImagePull(ctx, imgName, types.ImagePullOptions{}) var auth string if err != nil { // Retry with auth auth, err = de.getAuthForImage(imgName) if err != nil { return err } out, err = de.cl.ImagePull(ctx, imgName, types.ImagePullOptions{ RegistryAuth: auth, }) } if err == nil { dockerLog.Debugf("Image fetched: %s", imgName) } if out != nil { _, _ = io.Copy(ioutil.Discard, out) } return err } func (de *DockerEngine) GetImagePorts(ctx context.Context, tag string) ([]uint16, error) { r, _, err := de.cl.ImageInspectWithRaw(ctx, tag) if err != nil { return nil, errors.Wrapf(err, "Error inspecting image %s", tag) } var ports []uint16 for p := range r.Config.ExposedPorts { ports = append(ports, uint16(p.Int())) } return ports, nil } func (de *DockerEngine) Terminate() { de.cl.Close() } func (de *DockerEngine) isErrorResponse(r io.Reader) error { data, err := ioutil.ReadAll(r) if err != nil { return err } split := bytes.Split(data, []byte("\n")) type errResp struct { Error string } for i := range split { e := errResp{} if err := json.Unmarshal(split[i], &e); err == nil && e.Error != "" { return errors.New(e.Error) } } return nil } // TODO: This should probably be made more robust at some point func (de *DockerEngine) getSubNet() (string, error) { de.subNetMu.Lock() defer de.subNetMu.Unlock() addrs, err := net.InterfaceAddrs() if err != nil { return "", errors.Wrap(err, "Error getting network addresses") } var nets []*net.IPNet for _, addr := range addrs { dockerLog.Debugf("Inspecting interface %s", addr.String()) _, n, err := net.ParseCIDR(addr.String()) if err != nil { dockerLog.Warningf("Error parsing address: %s", addr.String()) continue } nets = append(nets, n) } netaddr := func() string { tpl := "10.%d.%d.0/24"
{ return "", "", err }
conditional_block
marktree.js
119,97,115,100); var press = new Array(47,45,42,43); // keydown codes // var keys2=new Array(87,65,83,68); var keys= new Array(38,37,40,39); // keyset 1 = keydown, otherwise press function checkup(keyset,n) { if (keyset==1) return (n==keys[0]); return ((n==press[0]) /*|| (n==press2[0])*/) } function checkdn(keyset,n) { if (keyset==1) return (n==keys[2]); return ((n==press[2]) /*|| (n==press2[2])*/) } function checkl(keyset,n) { if (keyset==1) return (n==keys[1]); return ((n==press[1]) /*|| (n==press2[1])*/) } function checkr(keyset,n) { if (keyset==1) return (n==keys[3]); return ((n==press[3]) /*|| (n==press2[3])*/) } function is_exp(n) { if (n==null) return false; return ((n.className=='exp') || (n.className=='exp_active')); } function is_col(n) { if (n==null) return false; return ((n.className=='col') || (n.className=='col_active')); } function is_basic(n) { if (n==null) return false; return ((n.className=='basic') || (n.className=='basic_active')); } /* returns i>=0 if true */ function is_active(node) { if (node.className==null) return false return node.className.indexOf('_active'); } function toggle_class(node) { if ((node==null) || (node.className==null)) return; str=node.className; result=""; i = str.indexOf('_active'); if (i>0) result= str.substr(0,i); else result= str+"_active"; node.className=result; return node; } function activate(node) { node.style.backgroundColor='#eeeeff'; } function deactivate(node) { node.style.backgroundColor='#ffffff'; } function is_list_node(n) { if (n==null) return false; if (n.className==null) return false; if ( (is_exp(n)) || (is_col(n)) || (is_basic(n)) ) return true; else return false; } function get_href(n) { alist=n.attributes; if (alist!=null) { hr = alist.getNamedItem('href'); if (hr!=null) return hr.nodeValue; } if (n.childNodes.length==0) return ''; for (var i=0; i<n.childNodes.length; i++) { s = get_href(n.childNodes[i]); if (s!='') return s; } return ''; } function get_link(n) { if (n==null) return null; if (n.style==null) return null; // disabling uncontrolled recursion to prevent error messages on IE // when trying to focus to invisible links (readonly mode) // alert(n.nodeName+' '+n.className); if ((n.nodeName=='UL') && (n.className=='sub')) return null; if (n.nodeName=='A') return n; if (n.childNodes.length==0) return null; for (var i=0; i<n.childNodes.length; i++) { s = get_link(n.childNodes[i]); if (s!=null) return s; } return null; } function set_lastnode(n) { /*var d = new Date(); var t_mil = d.getMilliseconds();*/ // testattu nopeuksia explorerilla, ei merkittäviä eroja if (lastnode==n) return; /* deactivate(lastnode) lastnode=n; activate(lastnode);*/ if (is_active(lastnode)>=0) toggle_class(lastnode); lastnode=n; if (!(is_active(lastnode)>=0)) toggle_class(lastnode); /*var d2 = new Date(); var t_mil2 = d2.getMilliseconds(); window.alert(t_mil2-t_mil);*/ } function next_list_node() { tempIndex = list_index; while (tempIndex<listnodes.length-1) { tempIndex++; var x = listnodes[tempIndex]; if (is_list_node(x)) { list_index=tempIndex; return; } } } function prev_list_node() { tempIndex = list_index; while (tempIndex>0) { tempIndex--; var x = listnodes[tempIndex]; if (is_list_node(x)) { list_index=tempIndex; return; } } } function getsub (li) { if (li.childNodes.length==0) return null; for (var c = 0; c < li.childNodes.length; c++) if ( (li.childNodes[c].className == 'sub') || (li.childNodes[c].className == 'subexp') ) return li.childNodes[c]; } function find_listnode_recursive (li) { if (is_list_node(li)) return li; if (li.childNodes.length==0) return null; result=null; for (var c = 0; c < li.childNodes.length; c++) { result=find_listnode_recursive(li.childNodes[c]); if (result!=null) return result; } return null; } function next_child_listnode(li) { var result=null; for (var i=0; i<li.childNodes.length; i++) { result=find_listnode_recursive(li.childNodes[i]); if (result!=null) return result; } return null; } function next_actual_sibling_listnode(li) { if (li==null) return null; var temp=li; while (1) { var n = temp.nextSibling; if (n==null) { n=parent_listnode(temp); return next_actual_sibling_listnode(n); } if (is_list_node(n)) return n; temp=n; } } function next_sibling_listnode(li) { if (li==null) return null; var result=null; var temp=li; if (is_col(temp)) return next_child_listnode(temp); while (1) { var n = temp.nextSibling; if (n==null) { n=parent_listnode(temp); return next_actual_sibling_listnode(n); } if (is_list_node(n)) return n; temp=n; } } function last_sibling_listnode(li) { if (li==null) return null; var temp=li; var last=null; while(1) { var n = temp.nextSibling; if (is_list_node(temp)) last = temp; if (n==null) { if (is_col(last)) return last_sibling_listnode(next_child_listnode(last)); else return last; } temp = n; } } function prev_sibling_listnode(li) { if (li==null) return null; var temp=li; var n = null; while (1) { n = temp.previousSibling; if (n==null) { return parent_listnode(li); } if (is_list_node(n)) { if (is_col(n)) { return last_sibling_listnode(next_child_listnode(n)); } else { return n; } } temp=n; } } function pa
i) { // added 12.7.2004 to prevent IE error when readonly mode==true if (li==null) return null; n=li; while (1) { n=n.parentNode; if (n==null) return null; if (is_list_node(n)) return n; } } function getVisibleParents(id) { var n = document.getElementById(id); while(1) { expand(n); n = parent_listnode(n); if (n==null) return; } } function onClickHandler (evt) { if (lastnode==null) { listnodes = document.getElementsByTagName('li'); lastnode=listnodes[1]; temp=listnodes[1]; } var target = evt ? evt.target : event.srcElement; if (!is_list_node(target)) return; toggle(target); set_lastnode(target); } function expand(node) { if (!is_exp(node)) return; if (node.className=='exp_active') node.className='col_active'; else node.className='col'; setSubClass(node,'subexp'); // getsub(node).className='subexp'; } function collapse(node) { if (!is_col(node)) return; if (node.className=='col_active') node.className='exp_active' else node.className='exp'; setSubClass(node,'sub'); // getsub(node).className='sub'; } function setSubClass(node,name) { sub = getsub(node); if (sub==null) return; sub.className=name; } function toggle(target) { if (!is_list_node(target)) return; if (is_col(target)) { target.className='exp';
rent_listnode(l
identifier_name
marktree.js
(119,97,115,100); var press = new Array(47,45,42,43); // keydown codes // var keys2=new Array(87,65,83,68); var keys= new Array(38,37,40,39); // keyset 1 = keydown, otherwise press function checkup(keyset,n) { if (keyset==1) return (n==keys[0]); return ((n==press[0]) /*|| (n==press2[0])*/) } function checkdn(keyset,n) { if (keyset==1) return (n==keys[2]); return ((n==press[2]) /*|| (n==press2[2])*/) } function checkl(keyset,n) { if (keyset==1) return (n==keys[1]); return ((n==press[1]) /*|| (n==press2[1])*/) } function checkr(keyset,n) { if (keyset==1) return (n==keys[3]); return ((n==press[3]) /*|| (n==press2[3])*/) } function is_exp(n) { if (n==null) return false; return ((n.className=='exp') || (n.className=='exp_active')); } function is_col(n) { if (n==null) return false; return ((n.className=='col') || (n.className=='col_active')); } function is_basic(n) { if (n==null) return false; return ((n.className=='basic') || (n.className=='basic_active')); } /* returns i>=0 if true */ function is_active(node) { if (node.className==null) return false return node.className.indexOf('_active'); } function toggle_class(node) { if ((node==null) || (node.className==null)) return; str=node.className; result=""; i = str.indexOf('_active'); if (i>0) result= str.substr(0,i); else result= str+"_active"; node.className=result; return node; } function activate(node) { node.style.backgroundColor='#eeeeff'; } function deactivate(node) { node.style.backgroundColor='#ffffff'; } function is_list_node(n) { if (n==null) return false; if (n.className==null) return false; if ( (is_exp(n)) || (is_col(n)) || (is_basic(n)) ) return true; else return false; } function get_href(n) { alist=n.attributes; if (alist!=null) { hr = alist.getNamedItem('href'); if (hr!=null) return hr.nodeValue; } if (n.childNodes.length==0) return ''; for (var i=0; i<n.childNodes.length; i++) { s = get_href(n.childNodes[i]); if (s!='') return s; } return ''; } function get_link(n) { if (n==null) return null; if (n.style==null) return null; // disabling uncontrolled recursion to prevent error messages on IE // when trying to focus to invisible links (readonly mode) // alert(n.nodeName+' '+n.className); if ((n.nodeName=='UL') && (n.className=='sub')) return null; if (n.nodeName=='A') return n; if (n.childNodes.length==0) return null; for (var i=0; i<n.childNodes.length; i++) { s = get_link(n.childNodes[i]); if (s!=null) return s; } return null; } function set_lastnode(n) { /*var d = new Date(); var t_mil = d.getMilliseconds();*/ // testattu nopeuksia explorerilla, ei merkittäviä eroja if (lastnode==n) return; /* deactivate(lastnode) lastnode=n; activate(lastnode);*/ if (is_active(lastnode)>=0) toggle_class(lastnode); lastnode=n; if (!(is_active(lastnode)>=0)) toggle_class(lastnode); /*var d2 = new Date(); var t_mil2 = d2.getMilliseconds(); window.alert(t_mil2-t_mil);*/ } function next_list_node() { tempIndex = list_index; while (tempIndex<listnodes.length-1) { tempIndex++; var x = listnodes[tempIndex]; if (is_list_node(x)) { list_index=tempIndex; return; } } } function prev_list_node() { tempIndex = list_index; while (tempIndex>0) { tempIndex--; var x = listnodes[tempIndex]; if (is_list_node(x)) { list_index=tempIndex; return; } } } function getsub (li) { if (li.childNodes.length==0) return null; for (var c = 0; c < li.childNodes.length; c++) if ( (li.childNodes[c].className == 'sub') || (li.childNodes[c].className == 'subexp') ) return li.childNodes[c]; } function find_listnode_recursive (li) { if (is_list_node(li)) return li; if (li.childNodes.length==0) return null; result=null; for (var c = 0; c < li.childNodes.length; c++) { result=find_listnode_recursive(li.childNodes[c]); if (result!=null) return result; } return null; } function next_child_listnode(li) { var result=null; for (var i=0; i<li.childNodes.length; i++) { result=find_listnode_recursive(li.childNodes[i]); if (result!=null) return result; } return null; } function next_actual_sibling_listnode(li) { if (li==null) return null; var temp=li; while (1) {
return next_actual_sibling_listnode(n); } if (is_list_node(n)) return n; temp=n; } } function next_sibling_listnode(li) { if (li==null) return null; var result=null; var temp=li; if (is_col(temp)) return next_child_listnode(temp); while (1) { var n = temp.nextSibling; if (n==null) { n=parent_listnode(temp); return next_actual_sibling_listnode(n); } if (is_list_node(n)) return n; temp=n; } } function last_sibling_listnode(li) { if (li==null) return null; var temp=li; var last=null; while(1) { var n = temp.nextSibling; if (is_list_node(temp)) last = temp; if (n==null) { if (is_col(last)) return last_sibling_listnode(next_child_listnode(last)); else return last; } temp = n; } } function prev_sibling_listnode(li) { if (li==null) return null; var temp=li; var n = null; while (1) { n = temp.previousSibling; if (n==null) { return parent_listnode(li); } if (is_list_node(n)) { if (is_col(n)) { return last_sibling_listnode(next_child_listnode(n)); } else { return n; } } temp=n; } } function parent_listnode(li) { // added 12.7.2004 to prevent IE error when readonly mode==true if (li==null) return null; n=li; while (1) { n=n.parentNode; if (n==null) return null; if (is_list_node(n)) return n; } } function getVisibleParents(id) { var n = document.getElementById(id); while(1) { expand(n); n = parent_listnode(n); if (n==null) return; } } function onClickHandler (evt) { if (lastnode==null) { listnodes = document.getElementsByTagName('li'); lastnode=listnodes[1]; temp=listnodes[1]; } var target = evt ? evt.target : event.srcElement; if (!is_list_node(target)) return; toggle(target); set_lastnode(target); } function expand(node) { if (!is_exp(node)) return; if (node.className=='exp_active') node.className='col_active'; else node.className='col'; setSubClass(node,'subexp'); // getsub(node).className='subexp'; } function collapse(node) { if (!is_col(node)) return; if (node.className=='col_active') node.className='exp_active' else node.className='exp'; setSubClass(node,'sub'); // getsub(node).className='sub'; } function setSubClass(node,name) { sub = getsub(node); if (sub==null) return; sub.className=name; } function toggle(target) { if (!is_list_node(target)) return; if (is_col(target)) { target.className='exp';
var n = temp.nextSibling; if (n==null) { n=parent_listnode(temp);
random_line_split
marktree.js
) return (n==keys[3]); return ((n==press[3]) /*|| (n==press2[3])*/) } function is_exp(n) { if (n==null) return false; return ((n.className=='exp') || (n.className=='exp_active')); } function is_col(n) { if (n==null) return false; return ((n.className=='col') || (n.className=='col_active')); } function is_basic(n) { if (n==null) return false; return ((n.className=='basic') || (n.className=='basic_active')); } /* returns i>=0 if true */ function is_active(node) { if (node.className==null) return false return node.className.indexOf('_active'); } function toggle_class(node) { if ((node==null) || (node.className==null)) return; str=node.className; result=""; i = str.indexOf('_active'); if (i>0) result= str.substr(0,i); else result= str+"_active"; node.className=result; return node; } function activate(node) { node.style.backgroundColor='#eeeeff'; } function deactivate(node) { node.style.backgroundColor='#ffffff'; } function is_list_node(n) { if (n==null) return false; if (n.className==null) return false; if ( (is_exp(n)) || (is_col(n)) || (is_basic(n)) ) return true; else return false; } function get_href(n) { alist=n.attributes; if (alist!=null) { hr = alist.getNamedItem('href'); if (hr!=null) return hr.nodeValue; } if (n.childNodes.length==0) return ''; for (var i=0; i<n.childNodes.length; i++) { s = get_href(n.childNodes[i]); if (s!='') return s; } return ''; } function get_link(n) { if (n==null) return null; if (n.style==null) return null; // disabling uncontrolled recursion to prevent error messages on IE // when trying to focus to invisible links (readonly mode) // alert(n.nodeName+' '+n.className); if ((n.nodeName=='UL') && (n.className=='sub')) return null; if (n.nodeName=='A') return n; if (n.childNodes.length==0) return null; for (var i=0; i<n.childNodes.length; i++) { s = get_link(n.childNodes[i]); if (s!=null) return s; } return null; } function set_lastnode(n) { /*var d = new Date(); var t_mil = d.getMilliseconds();*/ // testattu nopeuksia explorerilla, ei merkittäviä eroja if (lastnode==n) return; /* deactivate(lastnode) lastnode=n; activate(lastnode);*/ if (is_active(lastnode)>=0) toggle_class(lastnode); lastnode=n; if (!(is_active(lastnode)>=0)) toggle_class(lastnode); /*var d2 = new Date(); var t_mil2 = d2.getMilliseconds(); window.alert(t_mil2-t_mil);*/ } function next_list_node() { tempIndex = list_index; while (tempIndex<listnodes.length-1) { tempIndex++; var x = listnodes[tempIndex]; if (is_list_node(x)) { list_index=tempIndex; return; } } } function prev_list_node() { tempIndex = list_index; while (tempIndex>0) { tempIndex--; var x = listnodes[tempIndex]; if (is_list_node(x)) { list_index=tempIndex; return; } } } function getsub (li) { if (li.childNodes.length==0) return null; for (var c = 0; c < li.childNodes.length; c++) if ( (li.childNodes[c].className == 'sub') || (li.childNodes[c].className == 'subexp') ) return li.childNodes[c]; } function find_listnode_recursive (li) { if (is_list_node(li)) return li; if (li.childNodes.length==0) return null; result=null; for (var c = 0; c < li.childNodes.length; c++) { result=find_listnode_recursive(li.childNodes[c]); if (result!=null) return result; } return null; } function next_child_listnode(li) { var result=null; for (var i=0; i<li.childNodes.length; i++) { result=find_listnode_recursive(li.childNodes[i]); if (result!=null) return result; } return null; } function next_actual_sibling_listnode(li) { if (li==null) return null; var temp=li; while (1) { var n = temp.nextSibling; if (n==null) { n=parent_listnode(temp); return next_actual_sibling_listnode(n); } if (is_list_node(n)) return n; temp=n; } } function next_sibling_listnode(li) { if (li==null) return null; var result=null; var temp=li; if (is_col(temp)) return next_child_listnode(temp); while (1) { var n = temp.nextSibling; if (n==null) { n=parent_listnode(temp); return next_actual_sibling_listnode(n); } if (is_list_node(n)) return n; temp=n; } } function last_sibling_listnode(li) { if (li==null) return null; var temp=li; var last=null; while(1) { var n = temp.nextSibling; if (is_list_node(temp)) last = temp; if (n==null) { if (is_col(last)) return last_sibling_listnode(next_child_listnode(last)); else return last; } temp = n; } } function prev_sibling_listnode(li) { if (li==null) return null; var temp=li; var n = null; while (1) { n = temp.previousSibling; if (n==null) { return parent_listnode(li); } if (is_list_node(n)) { if (is_col(n)) { return last_sibling_listnode(next_child_listnode(n)); } else { return n; } } temp=n; } } function parent_listnode(li) { // added 12.7.2004 to prevent IE error when readonly mode==true if (li==null) return null; n=li; while (1) { n=n.parentNode; if (n==null) return null; if (is_list_node(n)) return n; } } function getVisibleParents(id) { var n = document.getElementById(id); while(1) { expand(n); n = parent_listnode(n); if (n==null) return; } } function onClickHandler (evt) { if (lastnode==null) { listnodes = document.getElementsByTagName('li'); lastnode=listnodes[1]; temp=listnodes[1]; } var target = evt ? evt.target : event.srcElement; if (!is_list_node(target)) return; toggle(target); set_lastnode(target); } function expand(node) { if (!is_exp(node)) return; if (node.className=='exp_active') node.className='col_active'; else node.className='col'; setSubClass(node,'subexp'); // getsub(node).className='subexp'; } function collapse(node) { if (!is_col(node)) return; if (node.className=='col_active') node.className='exp_active' else node.className='exp'; setSubClass(node,'sub'); // getsub(node).className='sub'; } function setSubClass(node,name) { sub = getsub(node); if (sub==null) return; sub.className=name; } function toggle(target) { if (!is_list_node(target)) return; if (is_col(target)) { target.className='exp'; setSubClass(target,'sub'); // getsub(target).className='sub'; } else if (is_exp(target)) { target.className='col'; setSubClass(target,'subexp'); // getsub(target).className='subexp'; } } function expandAll(node) { if (node.className=='exp') { node.className='col'; setSubClass(node,'subexp'); // getsub(node).className='subexp'; } var i; if (node.childNodes!=null) // if (node.hasChildNodes()) for ( i = 0; i<node.childNodes.length; i++) expandAll(node.childNodes[i]); } function collapseAll(node) {
if (node.className=='col') { node.className='exp'; setSubClass(node,'sub'); // getsub(node).className='sub'; } var i; if (node.childNodes!=null) // for opera if (node.hasChildNodes()) for ( i = 0; i<node.childNodes.length; i++) collapseAll(node.childNodes[i]); }
identifier_body
xcb.rs
fn poll_events(&self) -> Option<Event> { let event = match self.conn.poll_for_event() { Some(event) => event, None => return None, }; match event.response_type() & !0x80 { xcb::EXPOSE => return Some(Event::ExposedEvent), xcb::KEY_PRESS => return Some(Event::KeyEvent(KeyEvent::KeyPress)), xcb::KEY_RELEASE => return Some(Event::KeyEvent(KeyEvent::KeyReleased)), event => { println!("UNKOWN EVENT {:?}", event); return None; } }; } pub fn send_message(&self, destination: &Window, event: Event) { match event { Event::ClientMessageEvent {window, event_type, data , ..} => { let message_data = xcb::ffi::xproto::xcb_client_message_data_t::from_data32(data); let event = xcb::Event::<xcb::ffi::xproto::xcb_client_message_event_t>::new( 32, window, event_type, message_data ); xcb::send_event_checked( &self.conn, false, destination.id, xcb::EVENT_MASK_SUBSTRUCTURE_REDIRECT, &event ).request_check().unwrap(); } _ => { //TODO(fpalacios): Ver que hacer acá } }; self.flush().unwrap(); } pub fn flush(&self) -> Result<(), ()> { return if self.conn.flush() { Ok(()) } else { Err(()) }; } fn generate_id(&self) -> u32 { return self.conn.generate_id(); } } pub struct Screen<'client, 'conn> { pub id : ScreenID, pub client : &'client Client<'conn>, pub xcb_screen: xcb::Screen<'client>, } impl<'client, 'conn> Screen<'client, 'conn> { pub fn from_id(client: &'client Client<'conn>, id: ScreenID) -> Option<Screen<'client, 'conn>> { let xcb_screen = client.conn.get_setup().roots().nth(std::convert::TryInto::try_into(id).unwrap())?; return Some( Screen { id, client, xcb_screen, } ); } pub fn root_window(&self) -> Window { return Window { screen: self, id: self.xcb_screen.root() }; } pub fn get_black_pixel(&self) -> Color { return self.xcb_screen.black_pixel(); } pub fn get_white_pixel(&self) -> Color { return self.xcb_screen.white_pixel(); } } pub struct Window<'screen, 'client, 'conn> { pub screen: &'screen Screen<'client, 'conn>, pub id: WindowID, } impl<'screen, 'client, 'conn> Window<'screen, 'client, 'conn> { pub fn children(&self) -> Vec<Window<'screen, 'client, 'conn>> { let tree = xcb::query_tree(&self.screen.client.conn, self.id).get_reply().unwrap(); let children = tree.children(); let mut result = Vec::with_capacity(children.len()); for child in children { result.push(Window { screen: self.screen, id: child.clone()}); } return result; } pub fn get_property(&self, atom: AtomID) -> Result<Property, Error> { let property = match xcb::get_property( &self.screen.client.conn, false, self.id, atom, xcb::ATOM_ANY, 0, 1024 ).get_reply() { Ok(property) => property, Err(err) => { return Err( Error { error_code: err.error_code() } ); } }; let value = match property.type_() { xcb::ATOM_STRING => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()), xcb::ATOM_INTEGER => PropertyValue::I32(property.value::<i32>()[0]), xcb::ATOM_NONE => PropertyValue::None, xcb::ATOM_ATOM => PropertyValue::Atom(property.value::<u32>()[0]), xcb::ATOM_CARDINAL => PropertyValue::U32(property.value::<u32>()[0]), unknown_atom => { match self.screen.client.find_atom_name(unknown_atom).as_ref() { "UTF8_STRING" => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()), _ => PropertyValue::UnknownAtom(unknown_atom) } } }; return Ok(Property{ key: atom, value }); } pub fn set_property(&self, property: &Property) { let atom_type = property.value.get_type_atom_id(); match &property.value { PropertyValue::String(val) => { xcb::change_property( &self.screen.client.conn, xcb::PROP_MODE_REPLACE as u8, self.id, property.key, atom_type, 8, val.as_bytes() ); }, PropertyValue::Atom(val) => { xcb::change_property( &self.screen.client.conn, xcb::PROP_MODE_REPLACE as u8, self.id, property.key, atom_type, 32, &[val.clone()] ); }, PropertyValue::I32(val) => { xcb::change_property( &self.screen.client.conn, xcb::PROP_MODE_REPLACE as u8, self.id, property.key, atom_type, 32, &[val.clone()] ); }, PropertyValue::U32(val) => { xcb::change_property( &self.screen.client.conn, xcb::PROP_MODE_REPLACE as u8, self.id, property.key, atom_type, 32, &[val.clone()] ); }, PropertyValue::None => { xcb::change_property( &self.screen.client.conn, xcb::PROP_MODE_REPLACE as u8, self.id, property.key, atom_type, 32, &[xcb::ATOM_NONE] ); }, PropertyValue::UnknownAtom(_) => { //TODO(fpalacios): Que hacemo acá? panic!("Que hacemo acá?"); }, }; } pub fn geometry(&self) -> (i16, i16, u16, u16) { let geometry = match xcb::get_geometry(&self.screen.client.conn, self.id).get_reply() { Ok(geomerty) => geomerty, Err(error) => { println!("Error al obtener la geometria. Error code [{}]", error.error_code()); panic!(); } }; return (geometry.x(), geometry.y(), geometry.width(), geometry.height()); } pub fn map(&self) { xcb::map_window(&self.screen.client.conn, self.id); self.screen.client.flush().unwrap(); } pub fn create_child_window( &self, (x, y, width, height): (i16, i16, u16, u16), depth : u8, colormap : Option<&ColorMap>, background_pixel : Option<u32>, border_pixel : Option<u32>, visual_id : Option<VisualID> ) -> Result<Window<'screen, 'client, 'conn>, Error> { let child_id = self.screen.client.generate_id(); let mut window_attributes = vec![ ( xcb::CW_EVENT_MASK, xcb::GC_GRAPHICS_EXPOSURES | xcb::EVENT_MASK_KEY_PRESS ) ]; if let Some(colormap) = colormap { window_attributes.push((xcb::CW_COLORMAP, colormap.id)); } if let Some(background_pixel) = background_pixel { window_attributes.push((xcb::CW_BACK_PIXEL, background_pixel)) } if let Some(border_pixel) = border_pixel { window_attributes.push((xcb::CW_BORDER_PIXEL, border_pixel)); } let visual_id = match visual_id { Some(visual_id) => visual_id, None => self.screen.xcb_screen.root_visual() }; if let Err(e) = xcb::create_window_checked( &self.screen.client.conn, depth, child_id, self.id, x, y, width, height, 1, xcb::WINDOW_CLASS_INPUT_OUTPUT as u16, visual_id, &window_attributes ).request_check() {
return Err(Error{error_code: e.error_code()}) };
conditional_block
xcb.rs
impl PropertyValue { pub fn get_type_atom_id(&self) -> AtomID { return match self { PropertyValue::String(_) => xcb::ATOM_STRING, PropertyValue::I32(_) => xcb::ATOM_INTEGER, PropertyValue::U32(_) => xcb::ATOM_CARDINAL, PropertyValue::Atom(_) => xcb::ATOM_ATOM, PropertyValue::UnknownAtom(atom_id) => atom_id.clone(), PropertyValue::None => xcb::ATOM_NONE }; } } pub struct Client<'conn> { pub conn : &'conn xcb::Connection, } impl<'conn> Client<'conn> { pub fn new(conn: &'conn xcb::Connection) -> Client { return Client { conn, }; } pub fn find_atom_id_by_name(&self, name: &str) -> Option<AtomID> { let atom_id = xcb::intern_atom(&self.conn, false, name).get_reply().unwrap().atom(); return if atom_id == xcb::ATOM_NONE { None } else { Some(atom_id) }; } pub fn find_atom_name(&self, atom_id: AtomID) -> String { return xcb::get_atom_name(&self.conn, atom_id).get_reply().unwrap().name().to_owned(); } pub fn poll_events(&self) -> Option<Event> { let event = match self.conn.poll_for_event() { Some(event) => event, None => return None, }; match event.response_type() & !0x80 { xcb::EXPOSE => return Some(Event::ExposedEvent), xcb::KEY_PRESS => return Some(Event::KeyEvent(KeyEvent::KeyPress)), xcb::KEY_RELEASE => return Some(Event::KeyEvent(KeyEvent::KeyReleased)), event => { println!("UNKOWN EVENT {:?}", event); return None; } }; } pub fn send_message(&self, destination: &Window, event: Event) { match event { Event::ClientMessageEvent {window, event_type, data , ..} => { let message_data = xcb::ffi::xproto::xcb_client_message_data_t::from_data32(data); let event = xcb::Event::<xcb::ffi::xproto::xcb_client_message_event_t>::new( 32, window, event_type, message_data ); xcb::send_event_checked( &self.conn, false, destination.id, xcb::EVENT_MASK_SUBSTRUCTURE_REDIRECT, &event ).request_check().unwrap(); } _ => { //TODO(fpalacios): Ver que hacer acá } }; self.flush().unwrap(); } pub fn flush(&self) -> Result<(), ()> { return if self.conn.flush() { Ok(()) } else { Err(()) }; } fn generate_id(&self) -> u32 { return self.conn.generate_id(); } } pub struct Screen<'client, 'conn> { pub id : ScreenID, pub client : &'client Client<'conn>, pub xcb_screen: xcb::Screen<'client>, } impl<'client, 'conn> Screen<'client, 'conn> { pub fn from_id(client: &'client Client<'conn>, id: ScreenID) -> Option<Screen<'client, 'conn>> { let xcb_screen = client.conn.get_setup().roots().nth(std::convert::TryInto::try_into(id).unwrap())?; return Some( Screen { id, client, xcb_screen, } ); } pub fn root_window(&self) -> Window { return Window { screen: self, id: self.xcb_screen.root() }; } pub fn get_black_pixel(&self) -> Color { return self.xcb_screen.black_pixel(); } pub fn get_white_pixel(&self) -> Color { return self.xcb_screen.white_pixel(); } } pub struct Window<'screen, 'client, 'conn> { pub screen: &'screen Screen<'client, 'conn>, pub id: WindowID, } impl<'screen, 'client, 'conn> Window<'screen, 'client, 'conn> { pub fn children(&self) -> Vec<Window<'screen, 'client, 'conn>> { let tree = xcb::query_tree(&self.screen.client.conn, self.id).get_reply().unwrap(); let children = tree.children(); let mut result = Vec::with_capacity(children.len()); for child in children { result.push(Window { screen: self.screen, id: child.clone()}); } return result; } pub fn get_property(&self, atom: AtomID) -> Result<Property, Error> { let property = match xcb::get_property( &self.screen.client.conn, false, self.id, atom, xcb::ATOM_ANY, 0, 1024 ).get_reply() { Ok(property) => property, Err(err) => { return Err( Error { error_code: err.error_code() } ); } }; let value = match property.type_() { xcb::ATOM_STRING => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()), xcb::ATOM_INTEGER => PropertyValue::I32(property.value::<i32>()[0]), xcb::ATOM_NONE => PropertyValue::None, xcb::ATOM_ATOM => PropertyValue::Atom(property.value::<u32>()[0]), xcb::ATOM_CARDINAL => PropertyValue::U32(property.value::<u32>()[0]), unknown_atom => { match self.screen.client.find_atom_name(unknown_atom).as_ref() { "UTF8_STRING" => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()), _ => PropertyValue::UnknownAtom(unknown_atom) } } }; return Ok(Property{ key: atom, value }); } pub fn set_property(&self, property: &Property) { let atom_type = property.value.get_type_atom_id(); match &property.value { PropertyValue::String(val) => { xcb::change_property( &self.screen.client.conn, xcb::PROP_MODE_REPLACE as u8, self.id, property.key, atom_type, 8, val.as_bytes() ); }, PropertyValue::Atom(val) => { xcb::change_property( &self.screen.client.conn, xcb::PROP_MODE_REPLACE as u8, self.id, property.key, atom_type, 32, &[val.clone()] ); }, PropertyValue::I32(val) => { xcb::change_property( &self.screen.client.conn, xcb::PROP_MODE_REPLACE as u8, self.id, property.key, atom_type, 32, &[val.clone()] ); }, PropertyValue::U32(val) => { xcb::change_property( &self.screen.client.conn, xcb::PROP_MODE_REPLACE as u8, self.id, property.key, atom_type, 32, &[val.clone()] ); }, PropertyValue::None => { xcb::change_property( &self.screen.client.conn, xcb::PROP_MODE_REPLACE as u8, self.id, property.key, atom_type, 32, &[xcb::ATOM_NONE] ); }, PropertyValue::UnknownAtom(_) => { //TODO(fpalacios): Que hacemo acá? panic!("Que hacemo acá?"); }, }; } pub fn geometry(&self) -> (i16, i16, u16, u16) { let geometry = match xcb::get_geometry(&self.screen.client.conn, self.id).get_reply() { Ok(geomerty) => geomerty, Err(error) => { println!("Error al obtener la geometria. Error code [{}]", error.error_code()); panic!(); } }; return (geometry.x(), geometry.y(), geometry.width(), geometry.height()); } pub fn map(&self) { xcb::map_window(&self.screen.client.conn, self.id); self.screen.client.flush().unwrap(); } pub fn create_child_window( &self, (x, y, width, height): (i16, i16, u16, u16), depth : u8, colormap : Option<&ColorMap>, background_pixel : Option<u32>, border_pixel : Option<u32>, visual_id : Option<VisualID> )
None, Atom(AtomID), UnknownAtom(AtomID), }
random_line_split
xcb.rs
KEY_RELEASE => return Some(Event::KeyEvent(KeyEvent::KeyReleased)), event => { println!("UNKOWN EVENT {:?}", event); return None; } }; } pub fn send_message(&self, destination: &Window, event: Event) { match event { Event::ClientMessageEvent {window, event_type, data , ..} => { let message_data = xcb::ffi::xproto::xcb_client_message_data_t::from_data32(data); let event = xcb::Event::<xcb::ffi::xproto::xcb_client_message_event_t>::new( 32, window, event_type, message_data ); xcb::send_event_checked( &self.conn, false, destination.id, xcb::EVENT_MASK_SUBSTRUCTURE_REDIRECT, &event ).request_check().unwrap(); } _ => { //TODO(fpalacios): Ver que hacer acá } }; self.flush().unwrap(); } pub fn flush(&self) -> Result<(), ()> { return if self.conn.flush() { Ok(()) } else { Err(()) }; } fn generate_id(&self) -> u32 { return self.conn.generate_id(); } } pub struct Screen<'client, 'conn> { pub id : ScreenID, pub client : &'client Client<'conn>, pub xcb_screen: xcb::Screen<'client>, } impl<'client, 'conn> Screen<'client, 'conn> { pub fn from_id(client: &'client Client<'conn>, id: ScreenID) -> Option<Screen<'client, 'conn>> { let xcb_screen = client.conn.get_setup().roots().nth(std::convert::TryInto::try_into(id).unwrap())?; return Some( Screen { id, client, xcb_screen, } ); } pub fn root_window(&self) -> Window { return Window { screen: self, id: self.xcb_screen.root() }; } pub fn get_black_pixel(&self) -> Color { return self.xcb_screen.black_pixel(); } pub fn get_white_pixel(&self) -> Color { return self.xcb_screen.white_pixel(); } } pub struct Window<'screen, 'client, 'conn> { pub screen: &'screen Screen<'client, 'conn>, pub id: WindowID, } impl<'screen, 'client, 'conn> Window<'screen, 'client, 'conn> { pub fn children(&self) -> Vec<Window<'screen, 'client, 'conn>> { let tree = xcb::query_tree(&self.screen.client.conn, self.id).get_reply().unwrap(); let children = tree.children(); let mut result = Vec::with_capacity(children.len()); for child in children { result.push(Window { screen: self.screen, id: child.clone()}); } return result; } pub fn get_property(&self, atom: AtomID) -> Result<Property, Error> { let property = match xcb::get_property( &self.screen.client.conn, false, self.id, atom, xcb::ATOM_ANY, 0, 1024 ).get_reply() { Ok(property) => property, Err(err) => { return Err( Error { error_code: err.error_code() } ); } }; let value = match property.type_() { xcb::ATOM_STRING => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()), xcb::ATOM_INTEGER => PropertyValue::I32(property.value::<i32>()[0]), xcb::ATOM_NONE => PropertyValue::None, xcb::ATOM_ATOM => PropertyValue::Atom(property.value::<u32>()[0]), xcb::ATOM_CARDINAL => PropertyValue::U32(property.value::<u32>()[0]), unknown_atom => { match self.screen.client.find_atom_name(unknown_atom).as_ref() { "UTF8_STRING" => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()), _ => PropertyValue::UnknownAtom(unknown_atom) } } }; return Ok(Property{ key: atom, value }); } pub fn set_property(&self, property: &Property) { let atom_type = property.value.get_type_atom_id(); match &property.value { PropertyValue::String(val) => { xcb::change_property( &self.screen.client.conn, xcb::PROP_MODE_REPLACE as u8, self.id, property.key, atom_type, 8, val.as_bytes() ); }, PropertyValue::Atom(val) => { xcb::change_property( &self.screen.client.conn, xcb::PROP_MODE_REPLACE as u8, self.id, property.key, atom_type, 32, &[val.clone()] ); }, PropertyValue::I32(val) => { xcb::change_property( &self.screen.client.conn, xcb::PROP_MODE_REPLACE as u8, self.id, property.key, atom_type, 32, &[val.clone()] ); }, PropertyValue::U32(val) => { xcb::change_property( &self.screen.client.conn, xcb::PROP_MODE_REPLACE as u8, self.id, property.key, atom_type, 32, &[val.clone()] ); }, PropertyValue::None => { xcb::change_property( &self.screen.client.conn, xcb::PROP_MODE_REPLACE as u8, self.id, property.key, atom_type, 32, &[xcb::ATOM_NONE] ); }, PropertyValue::UnknownAtom(_) => { //TODO(fpalacios): Que hacemo acá? panic!("Que hacemo acá?"); }, }; } pub fn geometry(&self) -> (i16, i16, u16, u16) { let geometry = match xcb::get_geometry(&self.screen.client.conn, self.id).get_reply() { Ok(geomerty) => geomerty, Err(error) => { println!("Error al obtener la geometria. Error code [{}]", error.error_code()); panic!(); } }; return (geometry.x(), geometry.y(), geometry.width(), geometry.height()); } pub fn map(&self) { xcb::map_window(&self.screen.client.conn, self.id); self.screen.client.flush().unwrap(); } pub fn create_child_window( &self, (x, y, width, height): (i16, i16, u16, u16), depth : u8, colormap : Option<&ColorMap>, background_pixel : Option<u32>, border_pixel : Option<u32>, visual_id : Option<VisualID> ) -> Result<Window<'screen, 'client, 'conn>, Error> { let child_id = self.screen.client.generate_id(); let mut window_attributes = vec![ ( xcb::CW_EVENT_MASK, xcb::GC_GRAPHICS_EXPOSURES | xcb::EVENT_MASK_KEY_PRESS ) ]; if let Some(colormap) = colormap { window_attributes.push((xcb::CW_COLORMAP, colormap.id)); } if let Some(background_pixel) = background_pixel { window_attributes.push((xcb::CW_BACK_PIXEL, background_pixel)) } if let Some(border_pixel) = border_pixel { window_attributes.push((xcb::CW_BORDER_PIXEL, border_pixel)); } let visual_id = match visual_id { Some(visual_id) => visual_id, None => self.screen.xcb_screen.root_visual() }; if let Err(e) = xcb::create_window_checked( &self.screen.client.conn, depth, child_id, self.id, x, y, width, height, 1, xcb::WINDOW_CLASS_INPUT_OUTPUT as u16, visual_id, &window_attributes ).request_check() { return Err(Error{error_code: e.error_code()}) }; self.screen.client.flush().unwrap(); let window = Window { screen: self.screen, id : child_id, }; window.map(); return Ok(window); } } pub struct GraphicsContext<'client, 'conn> { id : GraphicsContextID, client: &'client Client<'conn> } impl<'client, 'conn> GraphicsContext<'client, 'conn> { pub fn gen
erate(wi
identifier_name
xcb.rs
, name).get_reply().unwrap().atom(); return if atom_id == xcb::ATOM_NONE { None } else { Some(atom_id) }; } pub fn find_atom_name(&self, atom_id: AtomID) -> String { return xcb::get_atom_name(&self.conn, atom_id).get_reply().unwrap().name().to_owned(); } pub fn poll_events(&self) -> Option<Event>
pub fn send_message(&self, destination: &Window, event: Event) { match event { Event::ClientMessageEvent {window, event_type, data , ..} => { let message_data = xcb::ffi::xproto::xcb_client_message_data_t::from_data32(data); let event = xcb::Event::<xcb::ffi::xproto::xcb_client_message_event_t>::new( 32, window, event_type, message_data ); xcb::send_event_checked( &self.conn, false, destination.id, xcb::EVENT_MASK_SUBSTRUCTURE_REDIRECT, &event ).request_check().unwrap(); } _ => { //TODO(fpalacios): Ver que hacer acá } }; self.flush().unwrap(); } pub fn flush(&self) -> Result<(), ()> { return if self.conn.flush() { Ok(()) } else { Err(()) }; } fn generate_id(&self) -> u32 { return self.conn.generate_id(); } } pub struct Screen<'client, 'conn> { pub id : ScreenID, pub client : &'client Client<'conn>, pub xcb_screen: xcb::Screen<'client>, } impl<'client, 'conn> Screen<'client, 'conn> { pub fn from_id(client: &'client Client<'conn>, id: ScreenID) -> Option<Screen<'client, 'conn>> { let xcb_screen = client.conn.get_setup().roots().nth(std::convert::TryInto::try_into(id).unwrap())?; return Some( Screen { id, client, xcb_screen, } ); } pub fn root_window(&self) -> Window { return Window { screen: self, id: self.xcb_screen.root() }; } pub fn get_black_pixel(&self) -> Color { return self.xcb_screen.black_pixel(); } pub fn get_white_pixel(&self) -> Color { return self.xcb_screen.white_pixel(); } } pub struct Window<'screen, 'client, 'conn> { pub screen: &'screen Screen<'client, 'conn>, pub id: WindowID, } impl<'screen, 'client, 'conn> Window<'screen, 'client, 'conn> { pub fn children(&self) -> Vec<Window<'screen, 'client, 'conn>> { let tree = xcb::query_tree(&self.screen.client.conn, self.id).get_reply().unwrap(); let children = tree.children(); let mut result = Vec::with_capacity(children.len()); for child in children { result.push(Window { screen: self.screen, id: child.clone()}); } return result; } pub fn get_property(&self, atom: AtomID) -> Result<Property, Error> { let property = match xcb::get_property( &self.screen.client.conn, false, self.id, atom, xcb::ATOM_ANY, 0, 1024 ).get_reply() { Ok(property) => property, Err(err) => { return Err( Error { error_code: err.error_code() } ); } }; let value = match property.type_() { xcb::ATOM_STRING => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()), xcb::ATOM_INTEGER => PropertyValue::I32(property.value::<i32>()[0]), xcb::ATOM_NONE => PropertyValue::None, xcb::ATOM_ATOM => PropertyValue::Atom(property.value::<u32>()[0]), xcb::ATOM_CARDINAL => PropertyValue::U32(property.value::<u32>()[0]), unknown_atom => { match self.screen.client.find_atom_name(unknown_atom).as_ref() { "UTF8_STRING" => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()), _ => PropertyValue::UnknownAtom(unknown_atom) } } }; return Ok(Property{ key: atom, value }); } pub fn set_property(&self, property: &Property) { let atom_type = property.value.get_type_atom_id(); match &property.value { PropertyValue::String(val) => { xcb::change_property( &self.screen.client.conn, xcb::PROP_MODE_REPLACE as u8, self.id, property.key, atom_type, 8, val.as_bytes() ); }, PropertyValue::Atom(val) => { xcb::change_property( &self.screen.client.conn, xcb::PROP_MODE_REPLACE as u8, self.id, property.key, atom_type, 32, &[val.clone()] ); }, PropertyValue::I32(val) => { xcb::change_property( &self.screen.client.conn, xcb::PROP_MODE_REPLACE as u8, self.id, property.key, atom_type, 32, &[val.clone()] ); }, PropertyValue::U32(val) => { xcb::change_property( &self.screen.client.conn, xcb::PROP_MODE_REPLACE as u8, self.id, property.key, atom_type, 32, &[val.clone()] ); }, PropertyValue::None => { xcb::change_property( &self.screen.client.conn, xcb::PROP_MODE_REPLACE as u8, self.id, property.key, atom_type, 32, &[xcb::ATOM_NONE] ); }, PropertyValue::UnknownAtom(_) => { //TODO(fpalacios): Que hacemo acá? panic!("Que hacemo acá?"); }, }; } pub fn geometry(&self) -> (i16, i16, u16, u16) { let geometry = match xcb::get_geometry(&self.screen.client.conn, self.id).get_reply() { Ok(geomerty) => geomerty, Err(error) => { println!("Error al obtener la geometria. Error code [{}]", error.error_code()); panic!(); } }; return (geometry.x(), geometry.y(), geometry.width(), geometry.height()); } pub fn map(&self) { xcb::map_window(&self.screen.client.conn, self.id); self.screen.client.flush().unwrap(); } pub fn create_child_window( &self, (x, y, width, height): (i16, i16, u16, u16), depth : u8, colormap : Option<&ColorMap>, background_pixel : Option<u32>, border_pixel : Option<u32>, visual_id : Option<VisualID> ) -> Result<Window<'screen, 'client, 'conn>, Error> { let child_id = self.screen.client.generate_id(); let mut window_attributes = vec![ ( xcb::CW_EVENT_MASK, xcb::GC_GRAPHICS_EXPOSURES | xcb::EVENT_MASK_KEY_PRESS ) ]; if let Some(colormap) = colormap { window_attributes.push((xcb::CW_COLORMAP, colormap.id)); } if let Some(background_pixel) = background_pixel { window_attributes.push((xcb::CW_BACK_PIXEL, background_pixel)) } if let Some(border_pixel) = border_pixel { window_attributes.push((xcb::CW_BORDER_PIXEL, border_pixel)); } let visual_id = match visual_id { Some(visual_id) => visual_id, None => self.screen.xcb_screen.root_visual() }; if let Err(e) = xcb::create_window_checked( &self.screen.client
{ let event = match self.conn.poll_for_event() { Some(event) => event, None => return None, }; match event.response_type() & !0x80 { xcb::EXPOSE => return Some(Event::ExposedEvent), xcb::KEY_PRESS => return Some(Event::KeyEvent(KeyEvent::KeyPress)), xcb::KEY_RELEASE => return Some(Event::KeyEvent(KeyEvent::KeyReleased)), event => { println!("UNKOWN EVENT {:?}", event); return None; } }; }
identifier_body
models.py
in CustomOrder.get_my_grms(self, profile): grmcfv = bpoi.custom_field_values.get(field__name=f'{grm.role.name}_approver_id') if grmcfv and not grmcfv.int_value: grmcfv.int_value = grm.profile.user.id grmcfv.save() history_msg = _("The '{order}' order has been partially approved by {role_label}.").format(order=escape(self), role_label=grm.role.label) self.add_event('APPROVED', history_msg, profile=profile) def get_my_grms(self, profile=None):
def is_multilevel_approval(self): """ multilevel approvals need to display the roles that have order.approve permissions based on a BPOI custom_field_value where the field name has an "_approver_id" at the end, and a valid role exists on the Group for that cfv field name returns a dictionary of the roles or an empty dict """ if not self.orderitem_set.first(): return {} oi = self.orderitem_set.first().cast() if not oi or not hasattr(oi, 'blueprintitemarguments_set'): return {} bpoi = oi.blueprintitemarguments_set.first() approval_levels = {} if not bpoi: return {} for cfv in bpoi.custom_field_values.filter(field__name__endswith='_approver_id'): role_name = cfv.field.name.replace('_approver_id', '') ml_approver_role = Role.objects.get(name=role_name, permissions__name='order.approve') if ml_approver_role: approval_levels[ml_approver_role] = cfv.value return approval_levels def should_auto_approve(self): """ Return True if this order should be automatically approved. An order should be auto approved if either it's group has auto approve enabled, if the submitter is also an approver on this group, or if all of its order items have environments with auto approve enabled. and now if the multi_level auto approval roles are granted to this user profile """ if self.group and self.group.allow_auto_approval: return True # some orders (like those duplicated by CIT) will not have owners if self.is_multilevel_approval(): if self.has_all_approver_roles(self.owner, self.group): return True return False else: if self.owner and self.owner.has_permission('order.approve', self.group): return True return False def has_all_approver_roles(self, profile, group): ''' for multi_level approvals we want to know if we can approve the order as part of should_auto_approve() ''' #Roles r_needed = Role.objects.filter(grouprolemembership__group=group, permissions__name='order.approve') if len(r_needed) > 1: r_needed = r_needed.exclude(name='approver').distinct() #GroupRoleMemberships r_owned = CustomOrder.get_my_grms(self, profile) if len(r_needed) == len(r_owned): #if the number of GRMs == the number of Roles for that group return True return False def start_approval_process(self, request=None): """ This method determines what order process should be taken, and takes it. By default, the process is to email the approvers, but this can be overriden by customers to instead call out to a hook, and that can be overridden by auto-approval (set on the group or env, or by the owner being an approver or a super admin). This method returns a message summarizing what action was taken. `request` is needed to determine the current portal URL; if not passed, default portal URL is used. """ # done here to avoid circular import from cbhooks.models import HookPoint hook_point = HookPoint.objects.filter(name="order_approval").first() orch_actions = cbhooks._get_orchestration_actions_to_run(hook_point) if orch_actions: #the orchestration action NEEDs to be first in order to allow a hook # to model the approval process correctly and not have something # auto-approve before the hook is run logger.debug("Order Approval orchestration actions exist, so bypassing built-in approver emails.") try: cbhooks.run_hooks("order_approval", order=self) except cbhooks.exceptions.HookFailureException as e: msg = _("Failed to run hook for order approval. Status: {status}," " Output: {output}, Errors: {errors}").format(status=e.status, output=e.output, errors=e.errors) raise CloudBoltException(msg) return "" #now that the hooks have run, check if it should be auto-approved profile = request.get_user_profile() if self.is_multilevel_approval(): self.approve_my_grms(profile) if self.should_auto_approve(): logger.debug("Order can be automatically approved, attempting approval by {}".format(self.owner)) jobs, msg = self.approve(self.owner) if jobs: msg = render_to_string( 'orders/approved_msg.html', { 'order': self, 'autoapproved': True, 'num_jobs': len(jobs), 'extramsg': msg, }) return msg else: # No auto approval and no approval hooks, so go with # the default process of emailing a set of approvers, unless the # owner is an approver. msg = _("Order #{order_id} has been submitted for approval. ").format(order_id=self.id) msg += orders.mail.email_approvers(self, request) logger.debug(msg) return msg def approve(self, approver=None, parent_job=None): """ Sets this order to the "Active" status and kicks off the jobs needed to complete this order. One job of the appropriate type ('provision' or 'decom') is kicked off per OrderItem for this order. An exception to this statement is if the "quantity" field on the OrderItem is set, then a set of identical jobs will be kicked off (however many are specified by quantity). Returns list of jobs and error messages from any cleanup of order items. """ if self.status != 'PENDING': msg = _( "Only orders that are in 'PENDING' state can be approved. " "Current state of order is '{status}'." ).format(status=self.status) raise CloudBoltException(msg) approve_this_order = False if self.is_multilevel_approval(): logger.info('models.approve is multilevel!') self.approve_my_grms(approver) logger.info(f'models.approve after approve_my_grms ({approver})!') if self.is_multilevel_approval(): logger.info('models.approve ml approval complete!') approve_this_order = True else: logger.info('models.approve is NOT multilevel!') #single-level approval approve_this_order = True if not approve_this_order: #should only kick off if multilevel approvals msg = _( "Cannot fully approve this order. Multilevel approvals not complete. " "Current state of order is '{status}'." ).format(status=self.status) return [], msg try: # Raise an error to bubble up specific reason as part of the exception self.group.quota_set.can_use(raise_error=True, **self.net_usage()) except QuotaSetError as quota_set_error: raise QuotaError(_( "Cannot approve order #{order_id} because doing so would exceed the " "quota for group '{group}'. {error}" ).format(order_id=self.id, group=self.group, error=quota_set_error)) # Before we create job records, order the order items to make # sure decom jobs are queued before prov jobs. the job engine # may still parallelize them, that's something we can revisit # later. In the meantime, customers can set the concurrency # level to 1 to prevent this. # we're taking advantage of the fact that "decom" comes before # "prov" in the alphabet here. order_items = [oi.cast() for oi in self.top_level_items.order_by( "real_type", "add_date")] order_items, msg = self.__filter_illegal_order_items(order_items) if not order_items: msg = _("{message} There are no valid order items left. This order is " "being marked as complete.").format(message
''' in a multilevel approval, we need a get the GroupRoleMembership mappings and exclude the default approvers role as well, if there's only one role.name == approvers ''' if not profile: profile = self.owner owned_grms = profile.grouprolemembership_set.filter(group=self.group, role__permissions__name='order.approve') if len(owned_grms) > 1: #multilevel approvals ignore the "approver" GRM owned_grms = owned_grms.exclude(role__name='approver') return owned_grms
identifier_body
models.py
cfv and not grmcfv.int_value: grmcfv.int_value = grm.profile.user.id grmcfv.save() history_msg = _("The '{order}' order has been partially approved by {role_label}.").format(order=escape(self), role_label=grm.role.label) self.add_event('APPROVED', history_msg, profile=profile) def get_my_grms(self, profile=None): ''' in a multilevel approval, we need a get the GroupRoleMembership mappings and exclude the default approvers role as well, if there's only one role.name == approvers ''' if not profile: profile = self.owner owned_grms = profile.grouprolemembership_set.filter(group=self.group, role__permissions__name='order.approve') if len(owned_grms) > 1: #multilevel approvals ignore the "approver" GRM owned_grms = owned_grms.exclude(role__name='approver') return owned_grms def is_multilevel_approval(self): """ multilevel approvals need to display the roles that have order.approve permissions based on a BPOI custom_field_value where the field name has an "_approver_id" at the end, and a valid role exists on the Group for that cfv field name returns a dictionary of the roles or an empty dict """ if not self.orderitem_set.first(): return {} oi = self.orderitem_set.first().cast() if not oi or not hasattr(oi, 'blueprintitemarguments_set'): return {} bpoi = oi.blueprintitemarguments_set.first() approval_levels = {} if not bpoi: return {} for cfv in bpoi.custom_field_values.filter(field__name__endswith='_approver_id'): role_name = cfv.field.name.replace('_approver_id', '') ml_approver_role = Role.objects.get(name=role_name, permissions__name='order.approve') if ml_approver_role: approval_levels[ml_approver_role] = cfv.value return approval_levels def should_auto_approve(self): """ Return True if this order should be automatically approved. An order should be auto approved if either it's group has auto approve enabled, if the submitter is also an approver on this group, or if all of its order items have environments with auto approve enabled. and now if the multi_level auto approval roles are granted to this user profile """ if self.group and self.group.allow_auto_approval: return True # some orders (like those duplicated by CIT) will not have owners if self.is_multilevel_approval(): if self.has_all_approver_roles(self.owner, self.group): return True return False else: if self.owner and self.owner.has_permission('order.approve', self.group): return True return False def has_all_approver_roles(self, profile, group): ''' for multi_level approvals we want to know if we can approve the order as part of should_auto_approve() ''' #Roles r_needed = Role.objects.filter(grouprolemembership__group=group, permissions__name='order.approve') if len(r_needed) > 1: r_needed = r_needed.exclude(name='approver').distinct() #GroupRoleMemberships r_owned = CustomOrder.get_my_grms(self, profile) if len(r_needed) == len(r_owned): #if the number of GRMs == the number of Roles for that group return True return False def start_approval_process(self, request=None): """ This method determines what order process should be taken, and takes it. By default, the process is to email the approvers, but this can be overriden by customers to instead call out to a hook, and that can be overridden by auto-approval (set on the group or env, or by the owner being an approver or a super admin). This method returns a message summarizing what action was taken. `request` is needed to determine the current portal URL; if not passed, default portal URL is used. """ # done here to avoid circular import from cbhooks.models import HookPoint hook_point = HookPoint.objects.filter(name="order_approval").first() orch_actions = cbhooks._get_orchestration_actions_to_run(hook_point) if orch_actions: #the orchestration action NEEDs to be first in order to allow a hook # to model the approval process correctly and not have something # auto-approve before the hook is run logger.debug("Order Approval orchestration actions exist, so bypassing built-in approver emails.") try: cbhooks.run_hooks("order_approval", order=self) except cbhooks.exceptions.HookFailureException as e: msg = _("Failed to run hook for order approval. Status: {status}," " Output: {output}, Errors: {errors}").format(status=e.status, output=e.output, errors=e.errors) raise CloudBoltException(msg) return "" #now that the hooks have run, check if it should be auto-approved profile = request.get_user_profile() if self.is_multilevel_approval(): self.approve_my_grms(profile) if self.should_auto_approve(): logger.debug("Order can be automatically approved, attempting approval by {}".format(self.owner)) jobs, msg = self.approve(self.owner) if jobs: msg = render_to_string( 'orders/approved_msg.html', { 'order': self, 'autoapproved': True, 'num_jobs': len(jobs), 'extramsg': msg, }) return msg else: # No auto approval and no approval hooks, so go with # the default process of emailing a set of approvers, unless the # owner is an approver. msg = _("Order #{order_id} has been submitted for approval. ").format(order_id=self.id) msg += orders.mail.email_approvers(self, request) logger.debug(msg) return msg def approve(self, approver=None, parent_job=None): """ Sets this order to the "Active" status and kicks off the jobs needed to complete this order. One job of the appropriate type ('provision' or 'decom') is kicked off per OrderItem for this order. An exception to this statement is if the "quantity" field on the OrderItem is set, then a set of identical jobs will be kicked off (however many are specified by quantity). Returns list of jobs and error messages from any cleanup of order items. """ if self.status != 'PENDING': msg = _( "Only orders that are in 'PENDING' state can be approved. " "Current state of order is '{status}'." ).format(status=self.status) raise CloudBoltException(msg) approve_this_order = False if self.is_multilevel_approval(): logger.info('models.approve is multilevel!') self.approve_my_grms(approver) logger.info(f'models.approve after approve_my_grms ({approver})!') if self.is_multilevel_approval(): logger.info('models.approve ml approval complete!') approve_this_order = True else: logger.info('models.approve is NOT multilevel!') #single-level approval approve_this_order = True if not approve_this_order: #should only kick off if multilevel approvals msg = _( "Cannot fully approve this order. Multilevel approvals not complete. " "Current state of order is '{status}'." ).format(status=self.status) return [], msg try: # Raise an error to bubble up specific reason as part of the exception self.group.quota_set.can_use(raise_error=True, **self.net_usage()) except QuotaSetError as quota_set_error: raise QuotaError(_( "Cannot approve order #{order_id} because doing so would exceed the " "quota for group '{group}'. {error}" ).format(order_id=self.id, group=self.group, error=quota_set_error)) # Before we create job records, order the order items to make # sure decom jobs are queued before prov jobs. the job engine # may still parallelize them, that's something we can revisit # later. In the meantime, customers can set the concurrency # level to 1 to prevent this. # we're taking advantage of the fact that "decom" comes before # "prov" in the alphabet here. order_items = [oi.cast() for oi in self.top_level_items.order_by( "real_type", "add_date")] order_items, msg = self.__filter_illegal_order_items(order_items) if not order_items: msg = _("{message} There are no valid order items left. This order is " "being marked as complete.").format(message=msg) self.complete("SUCCESS") return [], msg self.status = "ACTIVE" self.approved_by = approver
self.approve_date = get_current_time()
random_line_split
models.py
CustomOrder.get_my_grms(self, profile): grmcfv = bpoi.custom_field_values.get(field__name=f'{grm.role.name}_approver_id') if grmcfv and not grmcfv.int_value: grmcfv.int_value = grm.profile.user.id grmcfv.save() history_msg = _("The '{order}' order has been partially approved by {role_label}.").format(order=escape(self), role_label=grm.role.label) self.add_event('APPROVED', history_msg, profile=profile) def get_my_grms(self, profile=None): ''' in a multilevel approval, we need a get the GroupRoleMembership mappings and exclude the default approvers role as well, if there's only one role.name == approvers ''' if not profile: profile = self.owner owned_grms = profile.grouprolemembership_set.filter(group=self.group, role__permissions__name='order.approve') if len(owned_grms) > 1: #multilevel approvals ignore the "approver" GRM owned_grms = owned_grms.exclude(role__name='approver') return owned_grms def is_multilevel_approval(self): """ multilevel approvals need to display the roles that have order.approve permissions based on a BPOI custom_field_value where the field name has an "_approver_id" at the end, and a valid role exists on the Group for that cfv field name returns a dictionary of the roles or an empty dict """ if not self.orderitem_set.first(): return {} oi = self.orderitem_set.first().cast() if not oi or not hasattr(oi, 'blueprintitemarguments_set'): return {} bpoi = oi.blueprintitemarguments_set.first() approval_levels = {} if not bpoi: return {} for cfv in bpoi.custom_field_values.filter(field__name__endswith='_approver_id'): role_name = cfv.field.name.replace('_approver_id', '') ml_approver_role = Role.objects.get(name=role_name, permissions__name='order.approve') if ml_approver_role: approval_levels[ml_approver_role] = cfv.value return approval_levels def should_auto_approve(self): """ Return True if this order should be automatically approved. An order should be auto approved if either it's group has auto approve enabled, if the submitter is also an approver on this group, or if all of its order items have environments with auto approve enabled. and now if the multi_level auto approval roles are granted to this user profile """ if self.group and self.group.allow_auto_approval: return True # some orders (like those duplicated by CIT) will not have owners if self.is_multilevel_approval(): if self.has_all_approver_roles(self.owner, self.group): return True return False else: if self.owner and self.owner.has_permission('order.approve', self.group): return True return False def has_all_approver_roles(self, profile, group): ''' for multi_level approvals we want to know if we can approve the order as part of should_auto_approve() ''' #Roles r_needed = Role.objects.filter(grouprolemembership__group=group, permissions__name='order.approve') if len(r_needed) > 1: r_needed = r_needed.exclude(name='approver').distinct() #GroupRoleMemberships r_owned = CustomOrder.get_my_grms(self, profile) if len(r_needed) == len(r_owned): #if the number of GRMs == the number of Roles for that group return True return False def
(self, request=None): """ This method determines what order process should be taken, and takes it. By default, the process is to email the approvers, but this can be overriden by customers to instead call out to a hook, and that can be overridden by auto-approval (set on the group or env, or by the owner being an approver or a super admin). This method returns a message summarizing what action was taken. `request` is needed to determine the current portal URL; if not passed, default portal URL is used. """ # done here to avoid circular import from cbhooks.models import HookPoint hook_point = HookPoint.objects.filter(name="order_approval").first() orch_actions = cbhooks._get_orchestration_actions_to_run(hook_point) if orch_actions: #the orchestration action NEEDs to be first in order to allow a hook # to model the approval process correctly and not have something # auto-approve before the hook is run logger.debug("Order Approval orchestration actions exist, so bypassing built-in approver emails.") try: cbhooks.run_hooks("order_approval", order=self) except cbhooks.exceptions.HookFailureException as e: msg = _("Failed to run hook for order approval. Status: {status}," " Output: {output}, Errors: {errors}").format(status=e.status, output=e.output, errors=e.errors) raise CloudBoltException(msg) return "" #now that the hooks have run, check if it should be auto-approved profile = request.get_user_profile() if self.is_multilevel_approval(): self.approve_my_grms(profile) if self.should_auto_approve(): logger.debug("Order can be automatically approved, attempting approval by {}".format(self.owner)) jobs, msg = self.approve(self.owner) if jobs: msg = render_to_string( 'orders/approved_msg.html', { 'order': self, 'autoapproved': True, 'num_jobs': len(jobs), 'extramsg': msg, }) return msg else: # No auto approval and no approval hooks, so go with # the default process of emailing a set of approvers, unless the # owner is an approver. msg = _("Order #{order_id} has been submitted for approval. ").format(order_id=self.id) msg += orders.mail.email_approvers(self, request) logger.debug(msg) return msg def approve(self, approver=None, parent_job=None): """ Sets this order to the "Active" status and kicks off the jobs needed to complete this order. One job of the appropriate type ('provision' or 'decom') is kicked off per OrderItem for this order. An exception to this statement is if the "quantity" field on the OrderItem is set, then a set of identical jobs will be kicked off (however many are specified by quantity). Returns list of jobs and error messages from any cleanup of order items. """ if self.status != 'PENDING': msg = _( "Only orders that are in 'PENDING' state can be approved. " "Current state of order is '{status}'." ).format(status=self.status) raise CloudBoltException(msg) approve_this_order = False if self.is_multilevel_approval(): logger.info('models.approve is multilevel!') self.approve_my_grms(approver) logger.info(f'models.approve after approve_my_grms ({approver})!') if self.is_multilevel_approval(): logger.info('models.approve ml approval complete!') approve_this_order = True else: logger.info('models.approve is NOT multilevel!') #single-level approval approve_this_order = True if not approve_this_order: #should only kick off if multilevel approvals msg = _( "Cannot fully approve this order. Multilevel approvals not complete. " "Current state of order is '{status}'." ).format(status=self.status) return [], msg try: # Raise an error to bubble up specific reason as part of the exception self.group.quota_set.can_use(raise_error=True, **self.net_usage()) except QuotaSetError as quota_set_error: raise QuotaError(_( "Cannot approve order #{order_id} because doing so would exceed the " "quota for group '{group}'. {error}" ).format(order_id=self.id, group=self.group, error=quota_set_error)) # Before we create job records, order the order items to make # sure decom jobs are queued before prov jobs. the job engine # may still parallelize them, that's something we can revisit # later. In the meantime, customers can set the concurrency # level to 1 to prevent this. # we're taking advantage of the fact that "decom" comes before # "prov" in the alphabet here. order_items = [oi.cast() for oi in self.top_level_items.order_by( "real_type", "add_date")] order_items, msg = self.__filter_illegal_order_items(order_items) if not order_items: msg = _("{message} There are no valid order items left. This order is " "being marked as complete.").format(message
start_approval_process
identifier_name
models.py
in CustomOrder.get_my_grms(self, profile): grmcfv = bpoi.custom_field_values.get(field__name=f'{grm.role.name}_approver_id') if grmcfv and not grmcfv.int_value: grmcfv.int_value = grm.profile.user.id grmcfv.save() history_msg = _("The '{order}' order has been partially approved by {role_label}.").format(order=escape(self), role_label=grm.role.label) self.add_event('APPROVED', history_msg, profile=profile) def get_my_grms(self, profile=None): ''' in a multilevel approval, we need a get the GroupRoleMembership mappings and exclude the default approvers role as well, if there's only one role.name == approvers ''' if not profile: profile = self.owner owned_grms = profile.grouprolemembership_set.filter(group=self.group, role__permissions__name='order.approve') if len(owned_grms) > 1: #multilevel approvals ignore the "approver" GRM owned_grms = owned_grms.exclude(role__name='approver') return owned_grms def is_multilevel_approval(self): """ multilevel approvals need to display the roles that have order.approve permissions based on a BPOI custom_field_value where the field name has an "_approver_id" at the end, and a valid role exists on the Group for that cfv field name returns a dictionary of the roles or an empty dict """ if not self.orderitem_set.first(): return {} oi = self.orderitem_set.first().cast() if not oi or not hasattr(oi, 'blueprintitemarguments_set'): return {} bpoi = oi.blueprintitemarguments_set.first() approval_levels = {} if not bpoi: return {} for cfv in bpoi.custom_field_values.filter(field__name__endswith='_approver_id'): role_name = cfv.field.name.replace('_approver_id', '') ml_approver_role = Role.objects.get(name=role_name, permissions__name='order.approve') if ml_approver_role: approval_levels[ml_approver_role] = cfv.value return approval_levels def should_auto_approve(self): """ Return True if this order should be automatically approved. An order should be auto approved if either it's group has auto approve enabled, if the submitter is also an approver on this group, or if all of its order items have environments with auto approve enabled. and now if the multi_level auto approval roles are granted to this user profile """ if self.group and self.group.allow_auto_approval:
# some orders (like those duplicated by CIT) will not have owners if self.is_multilevel_approval(): if self.has_all_approver_roles(self.owner, self.group): return True return False else: if self.owner and self.owner.has_permission('order.approve', self.group): return True return False def has_all_approver_roles(self, profile, group): ''' for multi_level approvals we want to know if we can approve the order as part of should_auto_approve() ''' #Roles r_needed = Role.objects.filter(grouprolemembership__group=group, permissions__name='order.approve') if len(r_needed) > 1: r_needed = r_needed.exclude(name='approver').distinct() #GroupRoleMemberships r_owned = CustomOrder.get_my_grms(self, profile) if len(r_needed) == len(r_owned): #if the number of GRMs == the number of Roles for that group return True return False def start_approval_process(self, request=None): """ This method determines what order process should be taken, and takes it. By default, the process is to email the approvers, but this can be overriden by customers to instead call out to a hook, and that can be overridden by auto-approval (set on the group or env, or by the owner being an approver or a super admin). This method returns a message summarizing what action was taken. `request` is needed to determine the current portal URL; if not passed, default portal URL is used. """ # done here to avoid circular import from cbhooks.models import HookPoint hook_point = HookPoint.objects.filter(name="order_approval").first() orch_actions = cbhooks._get_orchestration_actions_to_run(hook_point) if orch_actions: #the orchestration action NEEDs to be first in order to allow a hook # to model the approval process correctly and not have something # auto-approve before the hook is run logger.debug("Order Approval orchestration actions exist, so bypassing built-in approver emails.") try: cbhooks.run_hooks("order_approval", order=self) except cbhooks.exceptions.HookFailureException as e: msg = _("Failed to run hook for order approval. Status: {status}," " Output: {output}, Errors: {errors}").format(status=e.status, output=e.output, errors=e.errors) raise CloudBoltException(msg) return "" #now that the hooks have run, check if it should be auto-approved profile = request.get_user_profile() if self.is_multilevel_approval(): self.approve_my_grms(profile) if self.should_auto_approve(): logger.debug("Order can be automatically approved, attempting approval by {}".format(self.owner)) jobs, msg = self.approve(self.owner) if jobs: msg = render_to_string( 'orders/approved_msg.html', { 'order': self, 'autoapproved': True, 'num_jobs': len(jobs), 'extramsg': msg, }) return msg else: # No auto approval and no approval hooks, so go with # the default process of emailing a set of approvers, unless the # owner is an approver. msg = _("Order #{order_id} has been submitted for approval. ").format(order_id=self.id) msg += orders.mail.email_approvers(self, request) logger.debug(msg) return msg def approve(self, approver=None, parent_job=None): """ Sets this order to the "Active" status and kicks off the jobs needed to complete this order. One job of the appropriate type ('provision' or 'decom') is kicked off per OrderItem for this order. An exception to this statement is if the "quantity" field on the OrderItem is set, then a set of identical jobs will be kicked off (however many are specified by quantity). Returns list of jobs and error messages from any cleanup of order items. """ if self.status != 'PENDING': msg = _( "Only orders that are in 'PENDING' state can be approved. " "Current state of order is '{status}'." ).format(status=self.status) raise CloudBoltException(msg) approve_this_order = False if self.is_multilevel_approval(): logger.info('models.approve is multilevel!') self.approve_my_grms(approver) logger.info(f'models.approve after approve_my_grms ({approver})!') if self.is_multilevel_approval(): logger.info('models.approve ml approval complete!') approve_this_order = True else: logger.info('models.approve is NOT multilevel!') #single-level approval approve_this_order = True if not approve_this_order: #should only kick off if multilevel approvals msg = _( "Cannot fully approve this order. Multilevel approvals not complete. " "Current state of order is '{status}'." ).format(status=self.status) return [], msg try: # Raise an error to bubble up specific reason as part of the exception self.group.quota_set.can_use(raise_error=True, **self.net_usage()) except QuotaSetError as quota_set_error: raise QuotaError(_( "Cannot approve order #{order_id} because doing so would exceed the " "quota for group '{group}'. {error}" ).format(order_id=self.id, group=self.group, error=quota_set_error)) # Before we create job records, order the order items to make # sure decom jobs are queued before prov jobs. the job engine # may still parallelize them, that's something we can revisit # later. In the meantime, customers can set the concurrency # level to 1 to prevent this. # we're taking advantage of the fact that "decom" comes before # "prov" in the alphabet here. order_items = [oi.cast() for oi in self.top_level_items.order_by( "real_type", "add_date")] order_items, msg = self.__filter_illegal_order_items(order_items) if not order_items: msg = _("{message} There are no valid order items left. This order is " "being marked as complete.").format(message
return True
conditional_block
usage.py
def getInstances(region): creds = credentials() try: conn = ec2.connect_to_region(region, **creds) instances = [] reservations = conn.get_all_reservations() for reservation in reservations: for instance in reservation.instances: instances.append(instance) except boto.exception.EC2ResponseError: return [] return instances def getVolumes(region): creds = credentials() try: conn = ec2.connect_to_region(region, **creds) volumes = conn.get_all_volumes() except boto.exception.EC2ResponseError: return [] return volumes # snapshots got this thing where there are public, private, and owned by me: defaults to all or public? # we're interested in the ones owned by us, so select 'owner_id' = 794321122735 # can use owner='self' as a parameter to get_all_snapshots() too def getSnapshots(region): creds = credentials() try: conn = ec2.connect_to_region(region, **creds) snapshots = conn.get_all_snapshots(owner='self') except boto.exception.EC2ResponseError: return [] return snapshots def getImages(region): """Return images for one given region, owned by self""" creds = credentials() try: conn = ec2.connect_to_region(region, **creds) images = conn.get_all_images(owners=['self']) except boto.exception.EC2ResponseError: return [] return images def getSnapshotsOf(image): """Return list of snapshot_ids associated with the given image""" snapshotIds = [] deviceMapping = image.block_device_mapping # dict of devices devices = deviceMapping.keys() for d in devices: snapshotId = deviceMapping[d].snapshot_id if snapshotId is not None: snapshotIds.append(snapshotId.encode()) return snapshotIds def getImagesD(region): """Use dictionaries 'cos we'll have to cross-reference to get snapshots that go with the AMIs returns list of dictionaries representing images from one region """ images = getImages(region) imageDicts = [] for im in images: imageDict = {"name": im.name, "id": im.id, "region": im.region.name, "state": im.state, "created": im.creationDate, "type": im.type, "KEEP": getKeepTag(im), "name_tag": get_name_tag(im), "snapshots": getSnapshotsOf(im), "description": im.description, "PROD": isProduction(im) } imageDicts.append(imageDict) return imageDicts def getSnapshotsD(region): """ return a list of dictionaries representing snapshots from one region """ # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it) snapshots = getSnapshots(region) snapshotsDicts = [] ims = getImages(region) for s in snapshots: amis = getAmisOf(s, ims) amiIds = [] amiKeeps = [] if len(amis) == 1: amiIds = amis[0].id.encode() amiKeeps = getKeepTag(amis[0]) elif len(amis) == 0: amiIds = "-------no-AMI-found" amiKeeps = "-------no-AMI-found" else: for a in amis: amiIds.append(a.id.encode()) amiKeeps.append(getKeepTag(a)) snapshotsDict = {"id": s.id, "status": s.status, "region": s.region.name, "progress": s.progress, "start_time": s.start_time, "volume_id": s.volume_id, "volume_size": s.volume_size, "KEEP-tag": getKeepTag(s), "Name": get_name_tag(s), "AMI(s)": amiIds, "AMI_KEEP-tags": amiKeeps, "PROD": isProduction(s), "Description": s.description } snapshotsDicts.append(snapshotsDict) return snapshotsDicts def getVolumesD(region): """ return a list of dictionaries representing volumes from one region """ volumes = getVolumes(region) instances = getInstancesD(region) volumesDicts = [] for v in volumesDicts: volumesDict = {"id": v.id, "KEEP-tag": getKeepTag(v), "instance_KEEP-tag": getKeepTag(getInstanceOf(v)), "instance": v.attach_data.instance_id, "status": v.status, "size": v.size, "create-time": v.create_time, "region": v.region.name, "zone": v.zone, "snapshot_id": v.snapshot_id, "PROD": isProduction(v) } def getInstancesD(region): """ return a list of dictionaries representing instances for one region, will help with volume-instance-KEEP-tag look-up. Maybe. """ instances = getInstances(region) instancesDicts = {"id": i.id, "KEEP-tag": getKeepTag(i), "instance_type": i.instance_type, "state": i.state, "launch_time": i.launch_time, "security_groups": getGroups(i), "region": i.region.name, "PROD": isProduction(i) } ########## Seems to work ################### def getAmisOf(snapshot, images): """retrieve list of AMIs that refer to a given snapshot""" amis = [] for im in images: snapshotsOfThisIm = getSnapshotsOf(im) for soti in snapshotsOfThisIm: if soti == snapshot.id: amis.append(im) return amis def getKeepTag(obj): """If tag with key='KEEP' exists, return its value (can be an empty string), else it's '-------no-tag'""" if 'KEEP' in obj.tags: return obj.tags['KEEP'] else: return "-------no-tag" # try: # tag = obj.tags['KEEP'] # except: # # Note: some with empty KEEP-tags, through web console they look the same as those untagged # return "-----" # return tag def isProduction(obj): """Returns true if the object (instance, volume, snapshot, AMI) has a tag with 'PROD' for key""" return 'PROD' in obj.tags # This is deprecated? obj.tags.has_key('PROD') def get_name_tag(obj): """Name is a tag that might not exist, but if it does, it's very helpful for users to identify their resources""" if 'Name' in obj.tags: return obj.tags['Name'] else: return "" def getGroups(instance): if len(instance.groups) == 1: # if there's only one group, then unpack it return instance.groups[0].name else: # in the not-expected case where there is more than one groups, deal with it groupList = [] for g in instance.groups: groupList.append(g.name) return groupList def getInstanceOf(volume): """ Returns the actual instance (if only instance_id is needed, can access directly from volume) (if KEEP tag is needed, maybe it's better to grab it from a local dictionary list of instances) """ # ughhhhhhhh refactor later (shouldn't do this for every single volume, takes forever) creds = credentials() conn = ec2.connect_to_region(volume.region.name, **creds) ins_id = volume.attach_data.instance_id reservation = conn.get_all_instances(instance_ids=ins_id)[0] return reservation.instances[0] ############################################################################################################################### def generateInfoVolumes(regions): """ Write volumes to file """ print "\nWriting volumes info to output file %s" % volumes_data_output_file with open(volumes_data_output_file, 'w') as f1: f1.write("VOLUMES\n") f1.write( "Name\tvolume_ID\tKEEP-tag_of_volume\tKEEP-tag_of_instance\tproduction?\tvolume_attachment_state\tassociated_instance\tinstance_state\tsize\tcreate_time\tregion\tzone\tassociated_snapshot\n\n") for r in regions: volumes = getVolumes(r) print "." # give some feedback to the user for v in volumes: f1.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (get_name_tag(v), v.id, getKeepTag(v), getKeepTag(getInstanceOf(v)), isProduction(v), v.attachment_state(), v.attach_data.instance_id, v.status, v.size, v.create_time, v.region.name, v.zone, v.snapshot_id)) def generateInfoSnapshots(regions): """ Write snapshots to file """ print "Writing snapshots info to output file %s" % snapshots_data_output_file snapshots = [] for r in regions: snapshots += getSnapshotsD(r) print "." # feedback for the user with open(snapshots_data_output_file, 'w') as f2: f2.write("SNAPSHOTS\n") f2.write( "Name\tsnapshot_id\tKEEP-tag_of
return {"aws_access_key_id": os.environ['AWS_ACCESS_KEY'], "aws_secret_access_key": os.environ['AWS_SECRET_KEY']}
identifier_body
usage.py
credentials() try: conn = ec2.connect_to_region(region, **creds) volumes = conn.get_all_volumes() except boto.exception.EC2ResponseError: return [] return volumes # snapshots got this thing where there are public, private, and owned by me: defaults to all or public? # we're interested in the ones owned by us, so select 'owner_id' = 794321122735 # can use owner='self' as a parameter to get_all_snapshots() too def getSnapshots(region): creds = credentials() try: conn = ec2.connect_to_region(region, **creds) snapshots = conn.get_all_snapshots(owner='self') except boto.exception.EC2ResponseError: return [] return snapshots def getImages(region): """Return images for one given region, owned by self""" creds = credentials() try: conn = ec2.connect_to_region(region, **creds) images = conn.get_all_images(owners=['self']) except boto.exception.EC2ResponseError: return [] return images def getSnapshotsOf(image): """Return list of snapshot_ids associated with the given image""" snapshotIds = [] deviceMapping = image.block_device_mapping # dict of devices devices = deviceMapping.keys() for d in devices: snapshotId = deviceMapping[d].snapshot_id if snapshotId is not None: snapshotIds.append(snapshotId.encode()) return snapshotIds def getImagesD(region): """Use dictionaries 'cos we'll have to cross-reference to get snapshots that go with the AMIs returns list of dictionaries representing images from one region """ images = getImages(region) imageDicts = [] for im in images: imageDict = {"name": im.name, "id": im.id, "region": im.region.name, "state": im.state, "created": im.creationDate, "type": im.type, "KEEP": getKeepTag(im), "name_tag": get_name_tag(im), "snapshots": getSnapshotsOf(im), "description": im.description, "PROD": isProduction(im) } imageDicts.append(imageDict) return imageDicts def getSnapshotsD(region): """ return a list of dictionaries representing snapshots from one region """ # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it) snapshots = getSnapshots(region) snapshotsDicts = [] ims = getImages(region) for s in snapshots: amis = getAmisOf(s, ims) amiIds = [] amiKeeps = [] if len(amis) == 1: amiIds = amis[0].id.encode() amiKeeps = getKeepTag(amis[0]) elif len(amis) == 0: amiIds = "-------no-AMI-found" amiKeeps = "-------no-AMI-found" else:
snapshotsDict = {"id": s.id, "status": s.status, "region": s.region.name, "progress": s.progress, "start_time": s.start_time, "volume_id": s.volume_id, "volume_size": s.volume_size, "KEEP-tag": getKeepTag(s), "Name": get_name_tag(s), "AMI(s)": amiIds, "AMI_KEEP-tags": amiKeeps, "PROD": isProduction(s), "Description": s.description } snapshotsDicts.append(snapshotsDict) return snapshotsDicts def getVolumesD(region): """ return a list of dictionaries representing volumes from one region """ volumes = getVolumes(region) instances = getInstancesD(region) volumesDicts = [] for v in volumesDicts: volumesDict = {"id": v.id, "KEEP-tag": getKeepTag(v), "instance_KEEP-tag": getKeepTag(getInstanceOf(v)), "instance": v.attach_data.instance_id, "status": v.status, "size": v.size, "create-time": v.create_time, "region": v.region.name, "zone": v.zone, "snapshot_id": v.snapshot_id, "PROD": isProduction(v) } def getInstancesD(region): """ return a list of dictionaries representing instances for one region, will help with volume-instance-KEEP-tag look-up. Maybe. """ instances = getInstances(region) instancesDicts = {"id": i.id, "KEEP-tag": getKeepTag(i), "instance_type": i.instance_type, "state": i.state, "launch_time": i.launch_time, "security_groups": getGroups(i), "region": i.region.name, "PROD": isProduction(i) } ########## Seems to work ################### def getAmisOf(snapshot, images): """retrieve list of AMIs that refer to a given snapshot""" amis = [] for im in images: snapshotsOfThisIm = getSnapshotsOf(im) for soti in snapshotsOfThisIm: if soti == snapshot.id: amis.append(im) return amis def getKeepTag(obj): """If tag with key='KEEP' exists, return its value (can be an empty string), else it's '-------no-tag'""" if 'KEEP' in obj.tags: return obj.tags['KEEP'] else: return "-------no-tag" # try: # tag = obj.tags['KEEP'] # except: # # Note: some with empty KEEP-tags, through web console they look the same as those untagged # return "-----" # return tag def isProduction(obj): """Returns true if the object (instance, volume, snapshot, AMI) has a tag with 'PROD' for key""" return 'PROD' in obj.tags # This is deprecated? obj.tags.has_key('PROD') def get_name_tag(obj): """Name is a tag that might not exist, but if it does, it's very helpful for users to identify their resources""" if 'Name' in obj.tags: return obj.tags['Name'] else: return "" def getGroups(instance): if len(instance.groups) == 1: # if there's only one group, then unpack it return instance.groups[0].name else: # in the not-expected case where there is more than one groups, deal with it groupList = [] for g in instance.groups: groupList.append(g.name) return groupList def getInstanceOf(volume): """ Returns the actual instance (if only instance_id is needed, can access directly from volume) (if KEEP tag is needed, maybe it's better to grab it from a local dictionary list of instances) """ # ughhhhhhhh refactor later (shouldn't do this for every single volume, takes forever) creds = credentials() conn = ec2.connect_to_region(volume.region.name, **creds) ins_id = volume.attach_data.instance_id reservation = conn.get_all_instances(instance_ids=ins_id)[0] return reservation.instances[0] ############################################################################################################################### def generateInfoVolumes(regions): """ Write volumes to file """ print "\nWriting volumes info to output file %s" % volumes_data_output_file with open(volumes_data_output_file, 'w') as f1: f1.write("VOLUMES\n") f1.write( "Name\tvolume_ID\tKEEP-tag_of_volume\tKEEP-tag_of_instance\tproduction?\tvolume_attachment_state\tassociated_instance\tinstance_state\tsize\tcreate_time\tregion\tzone\tassociated_snapshot\n\n") for r in regions: volumes = getVolumes(r) print "." # give some feedback to the user for v in volumes: f1.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (get_name_tag(v), v.id, getKeepTag(v), getKeepTag(getInstanceOf(v)), isProduction(v), v.attachment_state(), v.attach_data.instance_id, v.status, v.size, v.create_time, v.region.name, v.zone, v.snapshot_id)) def generateInfoSnapshots(regions): """ Write snapshots to file """ print "Writing snapshots info to output file %s" % snapshots_data_output_file snapshots = [] for r in regions: snapshots += getSnapshotsD(r) print "." # feedback for the user with open(snapshots_data_output_file, 'w') as f2: f2.write("SNAPSHOTS\n") f2.write( "Name\tsnapshot_id\tKEEP-tag_of_snapshot\tKEEP-tag_of_AMI\tproduction?\tassociated_AMI\tstart_time\tstatus" "\tregion\tprogress\tassociated_volume\tvolume_size\tdescription\n\n") for s in snapshots: f2.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (s['Name'], s['id'], s['KEEP-tag'], s['AMI_KEEP-tags'], s['PROD'], s['AMI(s)'], s
for a in amis: amiIds.append(a.id.encode()) amiKeeps.append(getKeepTag(a))
conditional_block
usage.py
credentials() try: conn = ec2.connect_to_region(region, **creds) volumes = conn.get_all_volumes() except boto.exception.EC2ResponseError: return [] return volumes # snapshots got this thing where there are public, private, and owned by me: defaults to all or public? # we're interested in the ones owned by us, so select 'owner_id' = 794321122735 # can use owner='self' as a parameter to get_all_snapshots() too def getSnapshots(region): creds = credentials() try: conn = ec2.connect_to_region(region, **creds) snapshots = conn.get_all_snapshots(owner='self') except boto.exception.EC2ResponseError: return [] return snapshots def getImages(region): """Return images for one given region, owned by self""" creds = credentials() try: conn = ec2.connect_to_region(region, **creds) images = conn.get_all_images(owners=['self']) except boto.exception.EC2ResponseError: return [] return images def getSnapshotsOf(image): """Return list of snapshot_ids associated with the given image""" snapshotIds = [] deviceMapping = image.block_device_mapping # dict of devices devices = deviceMapping.keys() for d in devices: snapshotId = deviceMapping[d].snapshot_id if snapshotId is not None: snapshotIds.append(snapshotId.encode()) return snapshotIds def getImagesD(region): """Use dictionaries 'cos we'll have to cross-reference to get snapshots that go with the AMIs returns list of dictionaries representing images from one region """ images = getImages(region) imageDicts = [] for im in images: imageDict = {"name": im.name, "id": im.id, "region": im.region.name, "state": im.state, "created": im.creationDate, "type": im.type, "KEEP": getKeepTag(im), "name_tag": get_name_tag(im), "snapshots": getSnapshotsOf(im), "description": im.description, "PROD": isProduction(im) } imageDicts.append(imageDict) return imageDicts def getSnapshotsD(region): """ return a list of dictionaries representing snapshots from one region """ # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it) snapshots = getSnapshots(region) snapshotsDicts = [] ims = getImages(region) for s in snapshots: amis = getAmisOf(s, ims) amiIds = [] amiKeeps = [] if len(amis) == 1: amiIds = amis[0].id.encode() amiKeeps = getKeepTag(amis[0]) elif len(amis) == 0: amiIds = "-------no-AMI-found" amiKeeps = "-------no-AMI-found" else: for a in amis: amiIds.append(a.id.encode()) amiKeeps.append(getKeepTag(a)) snapshotsDict = {"id": s.id, "status": s.status, "region": s.region.name, "progress": s.progress, "start_time": s.start_time, "volume_id": s.volume_id, "volume_size": s.volume_size, "KEEP-tag": getKeepTag(s), "Name": get_name_tag(s), "AMI(s)": amiIds, "AMI_KEEP-tags": amiKeeps,
"Description": s.description } snapshotsDicts.append(snapshotsDict) return snapshotsDicts def getVolumesD(region): """ return a list of dictionaries representing volumes from one region """ volumes = getVolumes(region) instances = getInstancesD(region) volumesDicts = [] for v in volumesDicts: volumesDict = {"id": v.id, "KEEP-tag": getKeepTag(v), "instance_KEEP-tag": getKeepTag(getInstanceOf(v)), "instance": v.attach_data.instance_id, "status": v.status, "size": v.size, "create-time": v.create_time, "region": v.region.name, "zone": v.zone, "snapshot_id": v.snapshot_id, "PROD": isProduction(v) } def getInstancesD(region): """ return a list of dictionaries representing instances for one region, will help with volume-instance-KEEP-tag look-up. Maybe. """ instances = getInstances(region) instancesDicts = {"id": i.id, "KEEP-tag": getKeepTag(i), "instance_type": i.instance_type, "state": i.state, "launch_time": i.launch_time, "security_groups": getGroups(i), "region": i.region.name, "PROD": isProduction(i) } ########## Seems to work ################### def getAmisOf(snapshot, images): """retrieve list of AMIs that refer to a given snapshot""" amis = [] for im in images: snapshotsOfThisIm = getSnapshotsOf(im) for soti in snapshotsOfThisIm: if soti == snapshot.id: amis.append(im) return amis def getKeepTag(obj): """If tag with key='KEEP' exists, return its value (can be an empty string), else it's '-------no-tag'""" if 'KEEP' in obj.tags: return obj.tags['KEEP'] else: return "-------no-tag" # try: # tag = obj.tags['KEEP'] # except: # # Note: some with empty KEEP-tags, through web console they look the same as those untagged # return "-----" # return tag def isProduction(obj): """Returns true if the object (instance, volume, snapshot, AMI) has a tag with 'PROD' for key""" return 'PROD' in obj.tags # This is deprecated? obj.tags.has_key('PROD') def get_name_tag(obj): """Name is a tag that might not exist, but if it does, it's very helpful for users to identify their resources""" if 'Name' in obj.tags: return obj.tags['Name'] else: return "" def getGroups(instance): if len(instance.groups) == 1: # if there's only one group, then unpack it return instance.groups[0].name else: # in the not-expected case where there is more than one groups, deal with it groupList = [] for g in instance.groups: groupList.append(g.name) return groupList def getInstanceOf(volume): """ Returns the actual instance (if only instance_id is needed, can access directly from volume) (if KEEP tag is needed, maybe it's better to grab it from a local dictionary list of instances) """ # ughhhhhhhh refactor later (shouldn't do this for every single volume, takes forever) creds = credentials() conn = ec2.connect_to_region(volume.region.name, **creds) ins_id = volume.attach_data.instance_id reservation = conn.get_all_instances(instance_ids=ins_id)[0] return reservation.instances[0] ############################################################################################################################### def generateInfoVolumes(regions): """ Write volumes to file """ print "\nWriting volumes info to output file %s" % volumes_data_output_file with open(volumes_data_output_file, 'w') as f1: f1.write("VOLUMES\n") f1.write( "Name\tvolume_ID\tKEEP-tag_of_volume\tKEEP-tag_of_instance\tproduction?\tvolume_attachment_state\tassociated_instance\tinstance_state\tsize\tcreate_time\tregion\tzone\tassociated_snapshot\n\n") for r in regions: volumes = getVolumes(r) print "." # give some feedback to the user for v in volumes: f1.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (get_name_tag(v), v.id, getKeepTag(v), getKeepTag(getInstanceOf(v)), isProduction(v), v.attachment_state(), v.attach_data.instance_id, v.status, v.size, v.create_time, v.region.name, v.zone, v.snapshot_id)) def generateInfoSnapshots(regions): """ Write snapshots to file """ print "Writing snapshots info to output file %s" % snapshots_data_output_file snapshots = [] for r in regions: snapshots += getSnapshotsD(r) print "." # feedback for the user with open(snapshots_data_output_file, 'w') as f2: f2.write("SNAPSHOTS\n") f2.write( "Name\tsnapshot_id\tKEEP-tag_of_snapshot\tKEEP-tag_of_AMI\tproduction?\tassociated_AMI\tstart_time\tstatus" "\tregion\tprogress\tassociated_volume\tvolume_size\tdescription\n\n") for s in snapshots: f2.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (s['Name'], s['id'], s['KEEP-tag'], s['AMI_KEEP-tags'], s['PROD'], s['AMI(s)'], s['
"PROD": isProduction(s),
random_line_split
usage.py
credentials() try: conn = ec2.connect_to_region(region, **creds) volumes = conn.get_all_volumes() except boto.exception.EC2ResponseError: return [] return volumes # snapshots got this thing where there are public, private, and owned by me: defaults to all or public? # we're interested in the ones owned by us, so select 'owner_id' = 794321122735 # can use owner='self' as a parameter to get_all_snapshots() too def getSnapshots(region): creds = credentials() try: conn = ec2.connect_to_region(region, **creds) snapshots = conn.get_all_snapshots(owner='self') except boto.exception.EC2ResponseError: return [] return snapshots def getImages(region): """Return images for one given region, owned by self""" creds = credentials() try: conn = ec2.connect_to_region(region, **creds) images = conn.get_all_images(owners=['self']) except boto.exception.EC2ResponseError: return [] return images def getSnapshotsOf(image): """Return list of snapshot_ids associated with the given image""" snapshotIds = [] deviceMapping = image.block_device_mapping # dict of devices devices = deviceMapping.keys() for d in devices: snapshotId = deviceMapping[d].snapshot_id if snapshotId is not None: snapshotIds.append(snapshotId.encode()) return snapshotIds def getImagesD(region): """Use dictionaries 'cos we'll have to cross-reference to get snapshots that go with the AMIs returns list of dictionaries representing images from one region """ images = getImages(region) imageDicts = [] for im in images: imageDict = {"name": im.name, "id": im.id, "region": im.region.name, "state": im.state, "created": im.creationDate, "type": im.type, "KEEP": getKeepTag(im), "name_tag": get_name_tag(im), "snapshots": getSnapshotsOf(im), "description": im.description, "PROD": isProduction(im) } imageDicts.append(imageDict) return imageDicts def getSnapshotsD(region): """ return a list of dictionaries representing snapshots from one region """ # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it) snapshots = getSnapshots(region) snapshotsDicts = [] ims = getImages(region) for s in snapshots: amis = getAmisOf(s, ims) amiIds = [] amiKeeps = [] if len(amis) == 1: amiIds = amis[0].id.encode() amiKeeps = getKeepTag(amis[0]) elif len(amis) == 0: amiIds = "-------no-AMI-found" amiKeeps = "-------no-AMI-found" else: for a in amis: amiIds.append(a.id.encode()) amiKeeps.append(getKeepTag(a)) snapshotsDict = {"id": s.id, "status": s.status, "region": s.region.name, "progress": s.progress, "start_time": s.start_time, "volume_id": s.volume_id, "volume_size": s.volume_size, "KEEP-tag": getKeepTag(s), "Name": get_name_tag(s), "AMI(s)": amiIds, "AMI_KEEP-tags": amiKeeps, "PROD": isProduction(s), "Description": s.description } snapshotsDicts.append(snapshotsDict) return snapshotsDicts def getVolumesD(region): """ return a list of dictionaries representing volumes from one region """ volumes = getVolumes(region) instances = getInstancesD(region) volumesDicts = [] for v in volumesDicts: volumesDict = {"id": v.id, "KEEP-tag": getKeepTag(v), "instance_KEEP-tag": getKeepTag(getInstanceOf(v)), "instance": v.attach_data.instance_id, "status": v.status, "size": v.size, "create-time": v.create_time, "region": v.region.name, "zone": v.zone, "snapshot_id": v.snapshot_id, "PROD": isProduction(v) } def getInstancesD(region): """ return a list of dictionaries representing instances for one region, will help with volume-instance-KEEP-tag look-up. Maybe. """ instances = getInstances(region) instancesDicts = {"id": i.id, "KEEP-tag": getKeepTag(i), "instance_type": i.instance_type, "state": i.state, "launch_time": i.launch_time, "security_groups": getGroups(i), "region": i.region.name, "PROD": isProduction(i) } ########## Seems to work ################### def getAmisOf(snapshot, images): """retrieve list of AMIs that refer to a given snapshot""" amis = [] for im in images: snapshotsOfThisIm = getSnapshotsOf(im) for soti in snapshotsOfThisIm: if soti == snapshot.id: amis.append(im) return amis def getKeepTag(obj): """If tag with key='KEEP' exists, return its value (can be an empty string), else it's '-------no-tag'""" if 'KEEP' in obj.tags: return obj.tags['KEEP'] else: return "-------no-tag" # try: # tag = obj.tags['KEEP'] # except: # # Note: some with empty KEEP-tags, through web console they look the same as those untagged # return "-----" # return tag def isProduction(obj): """Returns true if the object (instance, volume, snapshot, AMI) has a tag with 'PROD' for key""" return 'PROD' in obj.tags # This is deprecated? obj.tags.has_key('PROD') def get_name_tag(obj): """Name is a tag that might not exist, but if it does, it's very helpful for users to identify their resources""" if 'Name' in obj.tags: return obj.tags['Name'] else: return "" def getGroups(instance): if len(instance.groups) == 1: # if there's only one group, then unpack it return instance.groups[0].name else: # in the not-expected case where there is more than one groups, deal with it groupList = [] for g in instance.groups: groupList.append(g.name) return groupList def getInstanceOf(volume): """ Returns the actual instance (if only instance_id is needed, can access directly from volume) (if KEEP tag is needed, maybe it's better to grab it from a local dictionary list of instances) """ # ughhhhhhhh refactor later (shouldn't do this for every single volume, takes forever) creds = credentials() conn = ec2.connect_to_region(volume.region.name, **creds) ins_id = volume.attach_data.instance_id reservation = conn.get_all_instances(instance_ids=ins_id)[0] return reservation.instances[0] ############################################################################################################################### def
(regions): """ Write volumes to file """ print "\nWriting volumes info to output file %s" % volumes_data_output_file with open(volumes_data_output_file, 'w') as f1: f1.write("VOLUMES\n") f1.write( "Name\tvolume_ID\tKEEP-tag_of_volume\tKEEP-tag_of_instance\tproduction?\tvolume_attachment_state\tassociated_instance\tinstance_state\tsize\tcreate_time\tregion\tzone\tassociated_snapshot\n\n") for r in regions: volumes = getVolumes(r) print "." # give some feedback to the user for v in volumes: f1.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (get_name_tag(v), v.id, getKeepTag(v), getKeepTag(getInstanceOf(v)), isProduction(v), v.attachment_state(), v.attach_data.instance_id, v.status, v.size, v.create_time, v.region.name, v.zone, v.snapshot_id)) def generateInfoSnapshots(regions): """ Write snapshots to file """ print "Writing snapshots info to output file %s" % snapshots_data_output_file snapshots = [] for r in regions: snapshots += getSnapshotsD(r) print "." # feedback for the user with open(snapshots_data_output_file, 'w') as f2: f2.write("SNAPSHOTS\n") f2.write( "Name\tsnapshot_id\tKEEP-tag_of_snapshot\tKEEP-tag_of_AMI\tproduction?\tassociated_AMI\tstart_time\tstatus" "\tregion\tprogress\tassociated_volume\tvolume_size\tdescription\n\n") for s in snapshots: f2.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (s['Name'], s['id'], s['KEEP-tag'], s['AMI_KEEP-tags'], s['PROD'], s['AMI(s)'], s
generateInfoVolumes
identifier_name
enum.go
RatingCode = 2 // 2 = Clean RatingCodeExplicitOld RatingCode = 4 // 4 = Explicit (old) ) type PlayGapMode int const ( PlayGapInsertGap PlayGapMode = 0 // Insert Gap PlayGapNoGap PlayGapMode = 1 // No Gap ) type AppleStoreAccountType int const ( AppleStoreAccountTypeITunes AppleStoreAccountType = 0 AppleStoreAccountTypeAOL AppleStoreAccountType = 1 ) type LocationRole int const ( LocationRoleShooting LocationRole = 0 LocationRoleReal LocationRole = 1 LocationRoleFictional LocationRole = 2 ) type AppleStoreCountry int const ( AppleStoreUSA AppleStoreCountry = 143441 // United States AppleStoreFRA AppleStoreCountry = 143442 // France AppleStoreDEU AppleStoreCountry = 143443 // Germany AppleStoreGBR AppleStoreCountry = 143444 // United Kingdom AppleStoreAUT AppleStoreCountry = 143445 // Austria AppleStoreBEL AppleStoreCountry = 143446 // Belgium AppleStoreFIN AppleStoreCountry = 143447 // Finland AppleStoreGRC AppleStoreCountry = 143448 // Greece AppleStoreIRL AppleStoreCountry = 143449 // Ireland AppleStoreITA AppleStoreCountry = 143450 // Italy AppleStoreLUX AppleStoreCountry = 143451 // Luxembourg AppleStoreNLD AppleStoreCountry = 143452 // Netherlands AppleStorePRT AppleStoreCountry = 143453 // Portugal AppleStoreESP AppleStoreCountry = 143454 // Spain AppleStoreCAN AppleStoreCountry = 143455 // Canada AppleStoreSWE AppleStoreCountry = 143456 // Sweden AppleStoreNOR AppleStoreCountry = 143457 // Norway AppleStoreDNK AppleStoreCountry = 143458 // Denmark AppleStoreCHE AppleStoreCountry = 143459 // Switzerland AppleStoreAUS AppleStoreCountry = 143460 // Australia AppleStoreNZL AppleStoreCountry = 143461 // New Zealand AppleStoreJPN AppleStoreCountry = 143462 // Japan AppleStoreHKG AppleStoreCountry = 143463 // Hong Kong AppleStoreSGP AppleStoreCountry = 143464 // Singapore AppleStoreCHN AppleStoreCountry = 143465 // China AppleStoreKOR AppleStoreCountry = 143466 // Republic of Korea AppleStoreIND AppleStoreCountry = 143467 // India AppleStoreMEX AppleStoreCountry = 143468 // Mexico AppleStoreRUS AppleStoreCountry = 143469 // Russia AppleStoreTWN AppleStoreCountry = 143470 // Taiwan AppleStoreVNM AppleStoreCountry = 143471 // Vietnam AppleStoreZAF AppleStoreCountry = 143472 // South Africa AppleStoreMYS AppleStoreCountry = 143473 // Malaysia AppleStorePHL AppleStoreCountry = 143474 // Philippines AppleStoreTHA AppleStoreCountry = 143475 // Thailand AppleStoreIDN AppleStoreCountry = 143476 // Indonesia AppleStorePAK AppleStoreCountry = 143477 // Pakistan AppleStorePOL AppleStoreCountry = 143478 // Poland AppleStoreSAU AppleStoreCountry = 143479 // Saudi Arabia AppleStoreTUR AppleStoreCountry = 143480 // Turkey AppleStoreARE AppleStoreCountry = 143481 // United Arab Emirates AppleStoreHUN AppleStoreCountry = 143482 // Hungary AppleStoreCHL AppleStoreCountry = 143483 // Chile AppleStoreNPL AppleStoreCountry = 143484 // Nepal AppleStorePAN AppleStoreCountry = 143485 // Panama AppleStoreLKA AppleStoreCountry = 143486 // Sri Lanka AppleStoreROU AppleStoreCountry = 143487 // Romania AppleStoreCZE AppleStoreCountry = 143489 // Czech Republic AppleStoreISR AppleStoreCountry = 143491 // Israel AppleStoreUKR AppleStoreCountry = 143492 // Ukraine AppleStoreKWT AppleStoreCountry = 143493 // Kuwait AppleStoreHRV AppleStoreCountry = 143494 // Croatia AppleStoreCRI AppleStoreCountry = 143495 // Costa Rica AppleStoreSVK AppleStoreCountry = 143496 // Slovakia AppleStoreLBN AppleStoreCountry = 143497 // Lebanon AppleStoreQAT AppleStoreCountry = 143498 // Qatar AppleStoreSVN AppleStoreCountry = 143499 // Slovenia AppleStoreCOL AppleStoreCountry = 143501 // Colombia AppleStoreVEN AppleStoreCountry = 143502 // Venezuela AppleStoreBRA AppleStoreCountry = 143503 // Brazil AppleStoreGTM AppleStoreCountry = 143504 // Guatemala AppleStoreARG AppleStoreCountry = 143505 // Argentina AppleStoreSLV AppleStoreCountry = 143506 // El Salvador AppleStorePER AppleStoreCountry = 143507 // Peru AppleStoreDOM AppleStoreCountry = 143508 // Dominican Republic AppleStoreECU AppleStoreCountry = 143509 // Ecuador AppleStoreHND AppleStoreCountry = 143510 // Honduras AppleStoreJAM AppleStoreCountry = 143511 // Jamaica AppleStoreNIC AppleStoreCountry = 143512 // Nicaragua AppleStorePRY AppleStoreCountry = 143513 // Paraguay AppleStoreURY AppleStoreCountry = 143514 // Uruguay AppleStoreMAC AppleStoreCountry = 143515 // Macau AppleStoreEGY AppleStoreCountry = 143516 // Egypt AppleStoreKAZ AppleStoreCountry = 143517 // Kazakhstan AppleStoreEST AppleStoreCountry = 143518 // Estonia AppleStoreLVA AppleStoreCountry = 143519 // Latvia AppleStoreLTU AppleStoreCountry = 143520 // Lithuania AppleStoreMLT AppleStoreCountry = 143521 // Malta AppleStoreMDA AppleStoreCountry = 143523 // Moldova AppleStoreARM AppleStoreCountry = 143524 // Armenia AppleStoreBWA AppleStoreCountry = 143525 // Botswana AppleStoreBGR AppleStoreCountry = 143526 // Bulgaria AppleStoreJOR AppleStoreCountry = 143528 // Jordan AppleStoreKEN AppleStoreCountry = 143529 // Kenya AppleStoreMKD AppleStoreCountry = 143530 // Macedonia AppleStoreMDG AppleStoreCountry = 143531 // Madagascar AppleStoreMLI AppleStoreCountry = 143532 // Mali AppleStoreMUS AppleStoreCountry = 143533 // Mauritius AppleStoreNER AppleStoreCountry = 143534 // Niger AppleStoreSEN AppleStoreCountry = 143535 // Senegal AppleStoreTUN AppleStoreCountry = 143536 // Tunisia AppleStoreUGA AppleStoreCountry = 143537 // Uganda AppleStoreAIA AppleStoreCountry = 143538 // Anguilla AppleStoreBHS AppleStoreCountry = 143539 // Bahamas AppleStoreATG AppleStoreCountry = 143540 // Antigua and Barbuda AppleStoreBRB AppleStoreCountry = 143541 // Barbados AppleStoreBMU AppleStoreCountry = 143542 // Bermuda AppleStoreVGB AppleStoreCountry = 143543 // British Virgin Islands AppleStoreCYM AppleStoreCountry = 143544 // Cayman Islands
random_line_split
enum.go
case MediaTypePodcast: return "Podcast" default: buf := bytes.Buffer{} buf.WriteByte('(') buf.WriteString(strconv.FormatInt(int64(x), 10)) buf.WriteByte(')') return buf.String() } } type RatingCode int const ( RatingCodeNone RatingCode = 0 // 0 = None RatingCodeExplicit RatingCode = 1 // 1 = Explicit RatingCodeClean RatingCode = 2 // 2 = Clean RatingCodeExplicitOld RatingCode = 4 // 4 = Explicit (old) ) type PlayGapMode int const ( PlayGapInsertGap PlayGapMode = 0 // Insert Gap PlayGapNoGap PlayGapMode = 1 // No Gap ) type AppleStoreAccountType int const ( AppleStoreAccountTypeITunes AppleStoreAccountType = 0 AppleStoreAccountTypeAOL AppleStoreAccountType = 1 ) type LocationRole int const ( LocationRoleShooting LocationRole = 0 LocationRoleReal LocationRole = 1 LocationRoleFictional LocationRole = 2 ) type AppleStoreCountry int const ( AppleStoreUSA AppleStoreCountry = 143441 // United States AppleStoreFRA AppleStoreCountry = 143442 // France AppleStoreDEU AppleStoreCountry = 143443 // Germany AppleStoreGBR AppleStoreCountry = 143444 // United Kingdom AppleStoreAUT AppleStoreCountry = 143445 // Austria AppleStoreBEL AppleStoreCountry = 143446 // Belgium AppleStoreFIN AppleStoreCountry = 143447 // Finland AppleStoreGRC AppleStoreCountry = 143448 // Greece AppleStoreIRL AppleStoreCountry = 143449 // Ireland AppleStoreITA AppleStoreCountry = 143450 // Italy AppleStoreLUX AppleStoreCountry = 143451 // Luxembourg AppleStoreNLD AppleStoreCountry = 143452 // Netherlands AppleStorePRT AppleStoreCountry = 143453 // Portugal AppleStoreESP AppleStoreCountry = 143454 // Spain AppleStoreCAN AppleStoreCountry = 143455 // Canada AppleStoreSWE AppleStoreCountry = 143456 // Sweden AppleStoreNOR AppleStoreCountry = 143457 // Norway AppleStoreDNK AppleStoreCountry = 143458 // Denmark AppleStoreCHE AppleStoreCountry = 143459 // Switzerland AppleStoreAUS AppleStoreCountry = 143460 // Australia AppleStoreNZL AppleStoreCountry = 143461 // New Zealand AppleStoreJPN AppleStoreCountry = 143462 // Japan AppleStoreHKG AppleStoreCountry = 143463 // Hong Kong AppleStoreSGP AppleStoreCountry = 143464 // Singapore AppleStoreCHN AppleStoreCountry = 143465 // China AppleStoreKOR AppleStoreCountry = 143466 // Republic of Korea AppleStoreIND AppleStoreCountry = 143467 // India AppleStoreMEX AppleStoreCountry = 143468 // Mexico AppleStoreRUS AppleStoreCountry = 143469 // Russia AppleStoreTWN AppleStoreCountry = 143470 // Taiwan AppleStoreVNM AppleStoreCountry = 143471 // Vietnam AppleStoreZAF AppleStoreCountry = 143472 // South Africa AppleStoreMYS AppleStoreCountry = 143473 // Malaysia AppleStorePHL AppleStoreCountry = 143474 // Philippines AppleStoreTHA AppleStoreCountry = 143475 // Thailand AppleStoreIDN AppleStoreCountry = 143476 // Indonesia AppleStorePAK AppleStoreCountry = 143477 // Pakistan AppleStorePOL AppleStoreCountry = 143478 // Poland AppleStoreSAU AppleStoreCountry = 143479 // Saudi Arabia AppleStoreTUR AppleStoreCountry = 143480 // Turkey AppleStoreARE AppleStoreCountry = 143481 // United Arab Emirates AppleStoreHUN AppleStoreCountry = 143482 // Hungary AppleStoreCHL AppleStoreCountry = 143483 // Chile AppleStoreNPL AppleStoreCountry = 143484 // Nepal AppleStorePAN AppleStoreCountry = 143485 // Panama AppleStoreLKA AppleStoreCountry = 143486 // Sri Lanka AppleStoreROU AppleStoreCountry = 143487 // Romania AppleStoreCZE AppleStoreCountry = 143489 // Czech Republic AppleStoreISR AppleStoreCountry = 143491 // Israel AppleStoreUKR AppleStoreCountry = 143492 // Ukraine AppleStoreKWT AppleStoreCountry = 143493 // Kuwait AppleStoreHRV AppleStoreCountry = 143494 // Croatia AppleStoreCRI AppleStoreCountry = 143495 // Costa Rica AppleStoreSVK AppleStoreCountry = 143496 // Slovakia AppleStoreLBN AppleStoreCountry = 143497 // Lebanon AppleStoreQAT AppleStoreCountry = 143498 // Qatar AppleStoreSVN AppleStoreCountry = 143499 // Slovenia AppleStoreCOL AppleStoreCountry = 143501 // Colombia AppleStoreVEN AppleStoreCountry = 143502 // Venezuela AppleStoreBRA AppleStoreCountry = 143503 // Brazil AppleStoreGTM AppleStoreCountry = 143504 // Guatemala AppleStoreARG AppleStoreCountry = 143505 // Argentina AppleStoreSLV AppleStoreCountry = 143506 // El Salvador AppleStorePER AppleStoreCountry = 143507 // Peru AppleStoreDOM AppleStoreCountry = 143508 // Dominican Republic AppleStoreECU AppleStoreCountry = 143509 // Ecuador AppleStoreHND AppleStoreCountry = 143510 // Honduras AppleStoreJAM AppleStoreCountry = 143511 // Jamaica AppleStoreNIC AppleStoreCountry = 143512 // Nicaragua AppleStorePRY AppleStoreCountry = 143513 // Paraguay AppleStoreURY AppleStoreCountry = 143514 // Uruguay AppleStoreMAC AppleStoreCountry = 143515 // Macau AppleStoreEGY AppleStoreCountry = 143516 // Egypt AppleStoreKAZ AppleStoreCountry = 143517 // Kazakhstan AppleStoreEST AppleStoreCountry = 143518 // Estonia AppleStoreLVA AppleStoreCountry = 143519 // Latvia AppleStoreLTU AppleStoreCountry = 143520 // Lithuania AppleStoreMLT AppleStoreCountry = 143521 // Malta AppleStoreMDA AppleStoreCountry = 143523 // Moldova AppleStoreARM AppleStoreCountry = 143524 // Armenia AppleStoreBWA AppleStoreCountry = 143525 // Botswana AppleStoreBGR AppleStoreCountry = 143526 // Bulgaria AppleStoreJOR AppleStoreCountry = 143528 // Jordan AppleStoreKEN AppleStoreCountry = 143529 // Kenya AppleStoreMKD AppleStoreCountry = 143530 // Macedonia AppleStoreMDG AppleStoreCountry = 143531 // Madagascar AppleStoreMLI AppleStoreCountry = 143532 // Mali AppleStoreMUS AppleStoreCountry = 143533 // Mauritius AppleStoreNER AppleStoreCountry = 143534 // Niger Apple
{ switch x { case MediaTypeHomeVideo: return "Home Video" case MediaTypeMusic: return "Music" case MediaTypeAudiobook: return "Audiobook" case MediaTypeBookmark: return "Whacked Bookmark" case MediaTypeMusicVideo: return "Music Video" case MediaTypeMovie: return "Movie" case MediaTypeTVShow: return "TV Show" case MediaTypeBooklet: return "Booklet" case MediaTypeRingtone: return "Ringtone"
identifier_body
enum.go
() string { switch x { case MediaTypeHomeVideo: return "Home Video" case MediaTypeMusic: return "Music" case MediaTypeAudiobook: return "Audiobook" case MediaTypeBookmark: return "Whacked Bookmark" case MediaTypeMusicVideo: return "Music Video" case MediaTypeMovie: return "Movie" case MediaTypeTVShow: return "TV Show" case MediaTypeBooklet: return "Booklet" case MediaTypeRingtone: return "Ringtone" case MediaTypePodcast: return "Podcast" default: buf := bytes.Buffer{} buf.WriteByte('(') buf.WriteString(strconv.FormatInt(int64(x), 10)) buf.WriteByte(')') return buf.String() } } type RatingCode int const ( RatingCodeNone RatingCode = 0 // 0 = None RatingCodeExplicit RatingCode = 1 // 1 = Explicit RatingCodeClean RatingCode = 2 // 2 = Clean RatingCodeExplicitOld RatingCode = 4 // 4 = Explicit (old) ) type PlayGapMode int const ( PlayGapInsertGap PlayGapMode = 0 // Insert Gap PlayGapNoGap PlayGapMode = 1 // No Gap ) type AppleStoreAccountType int const ( AppleStoreAccountTypeITunes AppleStoreAccountType = 0 AppleStoreAccountTypeAOL AppleStoreAccountType = 1 ) type LocationRole int const ( LocationRoleShooting LocationRole = 0 LocationRoleReal LocationRole = 1 LocationRoleFictional LocationRole = 2 ) type AppleStoreCountry int const ( AppleStoreUSA AppleStoreCountry = 143441 // United States AppleStoreFRA AppleStoreCountry = 143442 // France AppleStoreDEU AppleStoreCountry = 143443 // Germany AppleStoreGBR AppleStoreCountry = 143444 // United Kingdom AppleStoreAUT AppleStoreCountry = 143445 // Austria AppleStoreBEL AppleStoreCountry = 143446 // Belgium AppleStoreFIN AppleStoreCountry = 143447 // Finland AppleStoreGRC AppleStoreCountry = 143448 // Greece AppleStoreIRL AppleStoreCountry = 143449 // Ireland AppleStoreITA AppleStoreCountry = 143450 // Italy AppleStoreLUX AppleStoreCountry = 143451 // Luxembourg AppleStoreNLD AppleStoreCountry = 143452 // Netherlands AppleStorePRT AppleStoreCountry = 143453 // Portugal AppleStoreESP AppleStoreCountry = 143454 // Spain AppleStoreCAN AppleStoreCountry = 143455 // Canada AppleStoreSWE AppleStoreCountry = 143456 // Sweden AppleStoreNOR AppleStoreCountry = 143457 // Norway AppleStoreDNK AppleStoreCountry = 143458 // Denmark AppleStoreCHE AppleStoreCountry = 143459 // Switzerland AppleStoreAUS AppleStoreCountry = 143460 // Australia AppleStoreNZL AppleStoreCountry = 143461 // New Zealand AppleStoreJPN AppleStoreCountry = 143462 // Japan AppleStoreHKG AppleStoreCountry = 143463 // Hong Kong AppleStoreSGP AppleStoreCountry = 143464 // Singapore AppleStoreCHN AppleStoreCountry = 143465 // China AppleStoreKOR AppleStoreCountry = 143466 // Republic of Korea AppleStoreIND AppleStoreCountry = 143467 // India AppleStoreMEX AppleStoreCountry = 143468 // Mexico AppleStoreRUS AppleStoreCountry = 143469 // Russia AppleStoreTWN AppleStoreCountry = 143470 // Taiwan AppleStoreVNM AppleStoreCountry = 143471 // Vietnam AppleStoreZAF AppleStoreCountry = 143472 // South Africa AppleStoreMYS AppleStoreCountry = 143473 // Malaysia AppleStorePHL AppleStoreCountry = 143474 // Philippines AppleStoreTHA AppleStoreCountry = 143475 // Thailand AppleStoreIDN AppleStoreCountry = 143476 // Indonesia AppleStorePAK AppleStoreCountry = 143477 // Pakistan AppleStorePOL AppleStoreCountry = 143478 // Poland AppleStoreSAU AppleStoreCountry = 143479 // Saudi Arabia AppleStoreTUR AppleStoreCountry = 143480 // Turkey AppleStoreARE AppleStoreCountry = 143481 // United Arab Emirates AppleStoreHUN AppleStoreCountry = 143482 // Hungary AppleStoreCHL AppleStoreCountry = 143483 // Chile AppleStoreNPL AppleStoreCountry = 143484 // Nepal AppleStorePAN AppleStoreCountry = 143485 // Panama AppleStoreLKA AppleStoreCountry = 143486 // Sri Lanka AppleStoreROU AppleStoreCountry = 143487 // Romania AppleStoreCZE AppleStoreCountry = 143489 // Czech Republic AppleStoreISR AppleStoreCountry = 143491 // Israel AppleStoreUKR AppleStoreCountry = 143492 // Ukraine AppleStoreKWT AppleStoreCountry = 143493 // Kuwait AppleStoreHRV AppleStoreCountry = 143494 // Croatia AppleStoreCRI AppleStoreCountry = 143495 // Costa Rica AppleStoreSVK AppleStoreCountry = 143496 // Slovakia AppleStoreLBN AppleStoreCountry = 143497 // Lebanon AppleStoreQAT AppleStoreCountry = 143498 // Qatar AppleStoreSVN AppleStoreCountry = 143499 // Slovenia AppleStoreCOL AppleStoreCountry = 143501 // Colombia AppleStoreVEN AppleStoreCountry = 143502 // Venezuela AppleStoreBRA AppleStoreCountry = 143503 // Brazil AppleStoreGTM AppleStoreCountry = 143504 // Guatemala AppleStoreARG AppleStoreCountry = 143505 // Argentina AppleStoreSLV AppleStoreCountry = 143506 // El Salvador AppleStorePER AppleStoreCountry = 143507 // Peru AppleStoreDOM AppleStoreCountry = 143508 // Dominican Republic AppleStoreECU AppleStoreCountry = 143509 // Ecuador AppleStoreHND AppleStoreCountry = 143510 // Honduras AppleStoreJAM AppleStoreCountry = 143511 // Jamaica AppleStoreNIC AppleStoreCountry = 143512 // Nicaragua AppleStorePRY AppleStoreCountry = 143513 // Paraguay AppleStoreURY AppleStoreCountry = 143514 // Uruguay AppleStoreMAC AppleStoreCountry = 143515 // Macau AppleStoreEGY AppleStoreCountry = 143516 // Egypt AppleStoreKAZ AppleStoreCountry = 143517 // Kazakhstan AppleStoreEST AppleStoreCountry = 143518 // Estonia AppleStoreLVA AppleStoreCountry = 143519 // Latvia AppleStoreLTU AppleStoreCountry = 143520 // Lithuania AppleStoreMLT AppleStoreCountry = 143521 // Malta AppleStoreMDA AppleStoreCountry = 143523 // Moldova AppleStoreARM AppleStoreCountry = 143524 // Armenia AppleStoreBWA AppleStoreCountry = 143525 // Botswana AppleStoreBGR AppleStoreCountry = 143526 // Bulgaria AppleStoreJOR AppleStoreCountry = 143528 // Jordan AppleStoreKEN AppleStoreCountry = 143529 // Kenya AppleStoreMKD AppleStoreCountry = 143530 // Macedonia AppleStoreMDG AppleStoreCountry = 143531 // Madagascar AppleStoreMLI AppleStoreCountry = 143532 // Mali AppleStoreMUS AppleStoreCountry = 143533 // Mauritius AppleStoreNER AppleStoreCountry = 143534 // Niger
String
identifier_name
insert_organisations.py
def select_from_list(matches): for m, (name, alias) in enumerate(matches): print( " %4d %s %s" % (m, name, (alias and ("[%s]" % alias) or "")) ) print() print("Choose name or non-numeric to exit: ", end=' ') choice = input() try: choice = int(choice) except ValueError: LOG.warning("Could not convert %s to integer.", choice) return None if choice >= len(matches) or choice < 0: LOG.error("%d is out of range.", choice) return None return matches[choice][0] def closest_names(name, names, orm): matches = set() lower = orm.query(Org.name) \ .filter(Org.name > name) \ .order_by(Org.name.asc()) \ .limit(3) \ .all() higher = orm.query(Org.name) \ .filter(Org.name < name) \ .order_by(Org.name.desc()) \ .limit(3) \ .all() for (name2, ) in lower + higher: matches.add((name2, None)) for name2, alias in names: ratio = Levenshtein.ratio(name.lower(), name2.lower()) if ratio > 0.8: matches.add((name2, alias)) if not matches: return None matches = sorted(list(matches)) print() print("\n%s\n" % name) existing_name = select_from_list(matches) return existing_name def get_org(orm, name): name = name.lower() query = orm.query(Org) \ .filter(func.lower(Org.name) == name) try: return query.one() except NoResultFound: pass except MultipleResultsFound: LOG.warning("Multiple results found for name '%s'.", name) return query.first() query = orm.query(Orgalias) \ .filter(func.lower(Orgalias.name) == name) try: return query.one().org except NoResultFound: pass except MultipleResultsFound: LOG.warning("Multiple results found for alias '%s'.", name) return query.first().org return None def get_candidates(es, text): data = { "query": { "multi_match": { "fields": [ "alias_all.straight^3", "alias_all.fuzzy", ], "query": text } } } LOG.debug("Search query: %s", repr(data)) results = es.search(data, index="mango", doc_type="org") LOG.debug("Results: %s", repr(results)) org_list = [] for hit in results["hits"]["hits"]: source = hit["_source"] source["score"] = hit["_score"] org_list.append(source) return org_list def
(es, text_orig, context=None, just_search=False): """Returns False to skip""" # pylint: disable=redefined-variable-type # `org_id` may be `None`, `False` or string. org_id = None text_search = text_orig while True: if context and context.get("refresh", None): # Necessarily imprecise way of allowing recently # inserted alias to appear in results time.sleep(1) context["refresh"] = False candidates = get_candidates(es, text_search) if not candidates: break sys.stderr.write( ("\nFind: '\033[92m%s\033[0m'\n\n" % (text_orig)) ) for i, org in enumerate(candidates, 1): sys.stderr.write( " %4d: \033[37m%-5d %s\033[0m\n" % ( i, org["org_id"], org["score"]) ) for name in org["alias_all"]: sys.stderr.write( (" \033[94m%s\033[0m\n" % name) ) sys.stderr.write("\n") sys.stderr.write(" Empty: None of the above\n") sys.stderr.write(" Text: Alternative search\n: ") sys.stderr.write(" '-': Skip\n\n: ") if just_search: return choice = input() choice = choice.strip() if not len(choice): org_id = None break sys.stderr.write("\n") if choice == "-": org_id = False break sys.stderr.write("\n") try: choice = int(choice) except ValueError: text_search = choice continue if choice == 0: org_id = " " break if choice > len(candidates): continue org_id = candidates[choice - 1]["org_id"] break return org_id def select_org(orm, name, context, search=True): """Returns False to skip""" name = sanitise_name(name) org = get_org(orm, name) if org: return org if not search: return es = orm.get_bind().search if es is None: LOG.error("Cannot connect to Elasticsearch.") sys.exit(1) org_id = search_org(es, name, context=context) if not org_id: return org_id try: org = orm.query(Org).filter_by(org_id=org_id).one() except NoResultFound as e: LOG.warning("No result found for '%s', org_id '%d'.", name, org_id) raise e # Adds new `Orgalias` to `Org`. Orgalias(name, org, moderation_user=context["user"], public=None) context["refresh"] = True es.refresh() # Calling `refresh` here appears not to make any difference, but in # theory should be a good idea. # Waiting for inserted org to be searchable here doesn't seem to work. return org def insert_fast( data, orm, public=None, tag_names=None, dry_run=None, address_exclusive=None, search=True, org_id_whitelist=None ): user = orm.query(User).filter_by(user_id=-1).one() tag_names = tag_names or [] tags = [] for tag_name in tag_names: tag = Orgtag.get( orm, tag_name, moderation_user=user, public=public, ) tags.append(tag) context = { "refresh": False, "user": user } for chunk in data: # pylint: disable=maybe-no-member has_address = None LOG.info("\n%s\n", chunk["name"]) org = select_org(orm, chunk["name"], context, search) if ( org is False or (org_id_whitelist and ((not org) or (org.org_id not in org_id_whitelist))) ): LOG.info("Skipping org: %s", org and org.org_id) orm.rollback() continue if not org: LOG.warning("\nCreating org %s\n", chunk["name"]) org = Org(chunk["name"], moderation_user=user, public=public,) orm.add(org) # Querying org address list on a new org would trigger a commit has_address = False else: has_address = bool(org.address_list) if tags: org.orgtag_list = list(set(tags + org.orgtag_list)) if "tag" in chunk: for tag_name in chunk["tag"]: tag = Orgtag.get( orm, tag_name, moderation_user=user, public=public, ) if tag not in org.orgtag_list: org.orgtag_list.append(tag) if "address" in chunk and not (address_exclusive and has_address): for address_data in chunk["address"]: if address_data["postal"] in \ [address.postal for address in org.address_list]: continue address = Address( address_data["postal"], address_data["source"], moderation_user=user, public=None, ) address.geocode() LOG.debug(address) orm.add(address) org.address_list.append(address) if "contact" in chunk: for contact_data in chunk["contact"]: text = sanitise_name(contact_data["text"]) match = False for contact in org.contact_list: if ( contact.text == text and contact.medium.name == contact_data["medium"] ): match = True break if match: continue try: medium = orm.query(Medium) \ .filter_by(name=contact_data["medium"]) \ .one() except NoResultFound: LOG.warning("%s: No such medium", contact_data["medium"]) continue contact = Contact( medium, text, source=contact_data["source"], moderation_user=user, public=None, ) LOG.debug(contact) orm.add(contact) org.contact_list.append(contact) if "note" in chunk: for note_data in chunk["note"]: if note_data["text"] in [note.text for note in org.note_list]: continue note = Note( note_data
search_org
identifier_name
insert_organisations.py
def select_from_list(matches): for m, (name, alias) in enumerate(matches): print( " %4d %s %s" % (m, name, (alias and ("[%s]" % alias) or "")) ) print() print("Choose name or non-numeric to exit: ", end=' ') choice = input() try: choice = int(choice) except ValueError: LOG.warning("Could not convert %s to integer.", choice) return None if choice >= len(matches) or choice < 0: LOG.error("%d is out of range.", choice) return None return matches[choice][0] def closest_names(name, names, orm): matches = set() lower = orm.query(Org.name) \ .filter(Org.name > name) \ .order_by(Org.name.asc()) \ .limit(3) \ .all() higher = orm.query(Org.name) \ .filter(Org.name < name) \ .order_by(Org.name.desc()) \ .limit(3) \ .all() for (name2, ) in lower + higher: matches.add((name2, None)) for name2, alias in names: ratio = Levenshtein.ratio(name.lower(), name2.lower()) if ratio > 0.8: matches.add((name2, alias)) if not matches: return None matches = sorted(list(matches)) print() print("\n%s\n" % name) existing_name = select_from_list(matches) return existing_name def get_org(orm, name): name = name.lower() query = orm.query(Org) \ .filter(func.lower(Org.name) == name) try: return query.one() except NoResultFound: pass except MultipleResultsFound: LOG.warning("Multiple results found for name '%s'.", name) return query.first() query = orm.query(Orgalias) \ .filter(func.lower(Orgalias.name) == name) try: return query.one().org except NoResultFound: pass except MultipleResultsFound: LOG.warning("Multiple results found for alias '%s'.", name) return query.first().org return None def get_candidates(es, text): data = { "query": { "multi_match": { "fields": [ "alias_all.straight^3", "alias_all.fuzzy", ], "query": text } } } LOG.debug("Search query: %s", repr(data)) results = es.search(data, index="mango", doc_type="org") LOG.debug("Results: %s", repr(results)) org_list = [] for hit in results["hits"]["hits"]: source = hit["_source"] source["score"] = hit["_score"] org_list.append(source) return org_list def search_org(es, text_orig, context=None, just_search=False): """Returns False to skip""" # pylint: disable=redefined-variable-type # `org_id` may be `None`, `False` or string. org_id = None text_search = text_orig while True: if context and context.get("refresh", None): # Necessarily imprecise way of allowing recently # inserted alias to appear in results time.sleep(1) context["refresh"] = False candidates = get_candidates(es, text_search) if not candidates: break sys.stderr.write( ("\nFind: '\033[92m%s\033[0m'\n\n" % (text_orig)) ) for i, org in enumerate(candidates, 1): sys.stderr.write( " %4d: \033[37m%-5d %s\033[0m\n" % ( i, org["org_id"], org["score"]) ) for name in org["alias_all"]: sys.stderr.write( (" \033[94m%s\033[0m\n" % name) ) sys.stderr.write("\n") sys.stderr.write(" Empty: None of the above\n") sys.stderr.write(" Text: Alternative search\n: ") sys.stderr.write(" '-': Skip\n\n: ") if just_search: return choice = input() choice = choice.strip() if not len(choice): org_id = None break sys.stderr.write("\n") if choice == "-": org_id = False break sys.stderr.write("\n") try: choice = int(choice) except ValueError: text_search = choice continue if choice == 0: org_id = " " break if choice > len(candidates): continue org_id = candidates[choice - 1]["org_id"] break return org_id def select_org(orm, name, context, search=True): """Returns False to skip""" name = sanitise_name(name) org = get_org(orm, name) if org: return org if not search: return es = orm.get_bind().search if es is None: LOG.error("Cannot connect to Elasticsearch.") sys.exit(1) org_id = search_org(es, name, context=context) if not org_id: return org_id try: org = orm.query(Org).filter_by(org_id=org_id).one() except NoResultFound as e: LOG.warning("No result found for '%s', org_id '%d'.", name, org_id) raise e # Adds new `Orgalias` to `Org`. Orgalias(name, org, moderation_user=context["user"], public=None) context["refresh"] = True es.refresh() # Calling `refresh` here appears not to make any difference, but in # theory should be a good idea. # Waiting for inserted org to be searchable here doesn't seem to work. return org def insert_fast( data, orm, public=None, tag_names=None, dry_run=None, address_exclusive=None, search=True, org_id_whitelist=None ): user = orm.query(User).filter_by(user_id=-1).one() tag_names = tag_names or [] tags = [] for tag_name in tag_names: tag = Orgtag.get( orm, tag_name, moderation_user=user, public=public, ) tags.append(tag) context = { "refresh": False, "user": user } for chunk in data: # pylint: disable=maybe-no-member has_address = None LOG.info("\n%s\n", chunk["name"]) org = select_org(orm, chunk["name"], context, search) if ( org is False or (org_id_whitelist and ((not org) or (org.org_id not in org_id_whitelist))) ): LOG.info("Skipping org: %s", org and org.org_id) orm.rollback() continue if not org: LOG.warning("\nCreating org %s\n", chunk["name"]) org = Org(chunk["name"], moderation_user=user, public=public,) orm.add(org) # Querying org address list on a new org would trigger a commit has_address = False else: has_address = bool(org.address_list) if tags: org.orgtag_list = list(set(tags + org.orgtag_list)) if "tag" in chunk: for tag_name in chunk["tag"]: tag = Orgtag.get( orm, tag_name, moderation_user=user, public=public, ) if tag not in org.orgtag_list: org.orgtag_list.append(tag) if "address" in chunk and not (address_exclusive and has_address): for address_data in chunk["address"]: if address_data["postal"] in \ [address.postal for address in org.address_list]: continue address = Address( address_data["postal"], address_data["source"], moderation_user=user, public=None, ) address.geocode() LOG.debug(address) orm.add(address) org.address_list.append(address) if "contact" in chunk: for contact_data in chunk["contact"]: text = sanitise_name(contact_data["text"]) match = False for contact in org.contact_list: if ( contact.text == text and contact.medium.name == contact_data["medium"] ): match = True break if match: continue try: medium = orm.query(Medium) \ .filter_by(name=contact_data["medium"]) \ .one() except NoResultFound: LOG.warning("%s: No such medium", contact_data["medium"]) continue contact = Contact( medium, text, source=contact_data["source"], moderation_user=user, public=None, ) LOG.debug(contact) orm.add(contact) org.contact_list.append(contact)
for note_data in chunk["note"]: if note_data["text"] in [note.text for note in org.note_list]: continue note = Note( note_data["
if "note" in chunk:
random_line_split
insert_organisations.py
def select_from_list(matches): for m, (name, alias) in enumerate(matches): print( " %4d %s %s" % (m, name, (alias and ("[%s]" % alias) or "")) ) print() print("Choose name or non-numeric to exit: ", end=' ') choice = input() try: choice = int(choice) except ValueError: LOG.warning("Could not convert %s to integer.", choice) return None if choice >= len(matches) or choice < 0: LOG.error("%d is out of range.", choice) return None return matches[choice][0] def closest_names(name, names, orm):
if not matches: return None matches = sorted(list(matches)) print() print("\n%s\n" % name) existing_name = select_from_list(matches) return existing_name def get_org(orm, name): name = name.lower() query = orm.query(Org) \ .filter(func.lower(Org.name) == name) try: return query.one() except NoResultFound: pass except MultipleResultsFound: LOG.warning("Multiple results found for name '%s'.", name) return query.first() query = orm.query(Orgalias) \ .filter(func.lower(Orgalias.name) == name) try: return query.one().org except NoResultFound: pass except MultipleResultsFound: LOG.warning("Multiple results found for alias '%s'.", name) return query.first().org return None def get_candidates(es, text): data = { "query": { "multi_match": { "fields": [ "alias_all.straight^3", "alias_all.fuzzy", ], "query": text } } } LOG.debug("Search query: %s", repr(data)) results = es.search(data, index="mango", doc_type="org") LOG.debug("Results: %s", repr(results)) org_list = [] for hit in results["hits"]["hits"]: source = hit["_source"] source["score"] = hit["_score"] org_list.append(source) return org_list def search_org(es, text_orig, context=None, just_search=False): """Returns False to skip""" # pylint: disable=redefined-variable-type # `org_id` may be `None`, `False` or string. org_id = None text_search = text_orig while True: if context and context.get("refresh", None): # Necessarily imprecise way of allowing recently # inserted alias to appear in results time.sleep(1) context["refresh"] = False candidates = get_candidates(es, text_search) if not candidates: break sys.stderr.write( ("\nFind: '\033[92m%s\033[0m'\n\n" % (text_orig)) ) for i, org in enumerate(candidates, 1): sys.stderr.write( " %4d: \033[37m%-5d %s\033[0m\n" % ( i, org["org_id"], org["score"]) ) for name in org["alias_all"]: sys.stderr.write( (" \033[94m%s\033[0m\n" % name) ) sys.stderr.write("\n") sys.stderr.write(" Empty: None of the above\n") sys.stderr.write(" Text: Alternative search\n: ") sys.stderr.write(" '-': Skip\n\n: ") if just_search: return choice = input() choice = choice.strip() if not len(choice): org_id = None break sys.stderr.write("\n") if choice == "-": org_id = False break sys.stderr.write("\n") try: choice = int(choice) except ValueError: text_search = choice continue if choice == 0: org_id = " " break if choice > len(candidates): continue org_id = candidates[choice - 1]["org_id"] break return org_id def select_org(orm, name, context, search=True): """Returns False to skip""" name = sanitise_name(name) org = get_org(orm, name) if org: return org if not search: return es = orm.get_bind().search if es is None: LOG.error("Cannot connect to Elasticsearch.") sys.exit(1) org_id = search_org(es, name, context=context) if not org_id: return org_id try: org = orm.query(Org).filter_by(org_id=org_id).one() except NoResultFound as e: LOG.warning("No result found for '%s', org_id '%d'.", name, org_id) raise e # Adds new `Orgalias` to `Org`. Orgalias(name, org, moderation_user=context["user"], public=None) context["refresh"] = True es.refresh() # Calling `refresh` here appears not to make any difference, but in # theory should be a good idea. # Waiting for inserted org to be searchable here doesn't seem to work. return org def insert_fast( data, orm, public=None, tag_names=None, dry_run=None, address_exclusive=None, search=True, org_id_whitelist=None ): user = orm.query(User).filter_by(user_id=-1).one() tag_names = tag_names or [] tags = [] for tag_name in tag_names: tag = Orgtag.get( orm, tag_name, moderation_user=user, public=public, ) tags.append(tag) context = { "refresh": False, "user": user } for chunk in data: # pylint: disable=maybe-no-member has_address = None LOG.info("\n%s\n", chunk["name"]) org = select_org(orm, chunk["name"], context, search) if ( org is False or (org_id_whitelist and ((not org) or (org.org_id not in org_id_whitelist))) ): LOG.info("Skipping org: %s", org and org.org_id) orm.rollback() continue if not org: LOG.warning("\nCreating org %s\n", chunk["name"]) org = Org(chunk["name"], moderation_user=user, public=public,) orm.add(org) # Querying org address list on a new org would trigger a commit has_address = False else: has_address = bool(org.address_list) if tags: org.orgtag_list = list(set(tags + org.orgtag_list)) if "tag" in chunk: for tag_name in chunk["tag"]: tag = Orgtag.get( orm, tag_name, moderation_user=user, public=public, ) if tag not in org.orgtag_list: org.orgtag_list.append(tag) if "address" in chunk and not (address_exclusive and has_address): for address_data in chunk["address"]: if address_data["postal"] in \ [address.postal for address in org.address_list]: continue address = Address( address_data["postal"], address_data["source"], moderation_user=user, public=None, ) address.geocode() LOG.debug(address) orm.add(address) org.address_list.append(address) if "contact" in chunk: for contact_data in chunk["contact"]: text = sanitise_name(contact_data["text"]) match = False for contact in org.contact_list: if ( contact.text == text and contact.medium.name == contact_data["medium"] ): match = True break if match: continue try: medium = orm.query(Medium) \ .filter_by(name=contact_data["medium"]) \ .one() except NoResultFound: LOG.warning("%s: No such medium", contact_data["medium"]) continue contact = Contact( medium, text, source=contact_data["source"], moderation_user=user, public=None, ) LOG.debug(contact) orm.add(contact) org.contact_list.append(contact) if "note" in chunk: for note_data in chunk["note"]: if note_data["text"] in [note.text for note in org.note_list]: continue note = Note( note_data
matches = set() lower = orm.query(Org.name) \ .filter(Org.name > name) \ .order_by(Org.name.asc()) \ .limit(3) \ .all() higher = orm.query(Org.name) \ .filter(Org.name < name) \ .order_by(Org.name.desc()) \ .limit(3) \ .all() for (name2, ) in lower + higher: matches.add((name2, None)) for name2, alias in names: ratio = Levenshtein.ratio(name.lower(), name2.lower()) if ratio > 0.8: matches.add((name2, alias))
identifier_body
insert_organisations.py
def select_from_list(matches): for m, (name, alias) in enumerate(matches): print( " %4d %s %s" % (m, name, (alias and ("[%s]" % alias) or "")) ) print() print("Choose name or non-numeric to exit: ", end=' ') choice = input() try: choice = int(choice) except ValueError: LOG.warning("Could not convert %s to integer.", choice) return None if choice >= len(matches) or choice < 0: LOG.error("%d is out of range.", choice) return None return matches[choice][0] def closest_names(name, names, orm): matches = set() lower = orm.query(Org.name) \ .filter(Org.name > name) \ .order_by(Org.name.asc()) \ .limit(3) \ .all() higher = orm.query(Org.name) \ .filter(Org.name < name) \ .order_by(Org.name.desc()) \ .limit(3) \ .all() for (name2, ) in lower + higher: matches.add((name2, None)) for name2, alias in names: ratio = Levenshtein.ratio(name.lower(), name2.lower()) if ratio > 0.8: matches.add((name2, alias)) if not matches: return None matches = sorted(list(matches)) print() print("\n%s\n" % name) existing_name = select_from_list(matches) return existing_name def get_org(orm, name): name = name.lower() query = orm.query(Org) \ .filter(func.lower(Org.name) == name) try: return query.one() except NoResultFound: pass except MultipleResultsFound: LOG.warning("Multiple results found for name '%s'.", name) return query.first() query = orm.query(Orgalias) \ .filter(func.lower(Orgalias.name) == name) try: return query.one().org except NoResultFound: pass except MultipleResultsFound: LOG.warning("Multiple results found for alias '%s'.", name) return query.first().org return None def get_candidates(es, text): data = { "query": { "multi_match": { "fields": [ "alias_all.straight^3", "alias_all.fuzzy", ], "query": text } } } LOG.debug("Search query: %s", repr(data)) results = es.search(data, index="mango", doc_type="org") LOG.debug("Results: %s", repr(results)) org_list = [] for hit in results["hits"]["hits"]: source = hit["_source"] source["score"] = hit["_score"] org_list.append(source) return org_list def search_org(es, text_orig, context=None, just_search=False): """Returns False to skip""" # pylint: disable=redefined-variable-type # `org_id` may be `None`, `False` or string. org_id = None text_search = text_orig while True: if context and context.get("refresh", None): # Necessarily imprecise way of allowing recently # inserted alias to appear in results time.sleep(1) context["refresh"] = False candidates = get_candidates(es, text_search) if not candidates: break sys.stderr.write( ("\nFind: '\033[92m%s\033[0m'\n\n" % (text_orig)) ) for i, org in enumerate(candidates, 1): sys.stderr.write( " %4d: \033[37m%-5d %s\033[0m\n" % ( i, org["org_id"], org["score"]) ) for name in org["alias_all"]: sys.stderr.write( (" \033[94m%s\033[0m\n" % name) ) sys.stderr.write("\n") sys.stderr.write(" Empty: None of the above\n") sys.stderr.write(" Text: Alternative search\n: ") sys.stderr.write(" '-': Skip\n\n: ") if just_search: return choice = input() choice = choice.strip() if not len(choice): org_id = None break sys.stderr.write("\n") if choice == "-": org_id = False break sys.stderr.write("\n") try: choice = int(choice) except ValueError: text_search = choice continue if choice == 0: org_id = " " break if choice > len(candidates): continue org_id = candidates[choice - 1]["org_id"] break return org_id def select_org(orm, name, context, search=True): """Returns False to skip""" name = sanitise_name(name) org = get_org(orm, name) if org: return org if not search:
es = orm.get_bind().search if es is None: LOG.error("Cannot connect to Elasticsearch.") sys.exit(1) org_id = search_org(es, name, context=context) if not org_id: return org_id try: org = orm.query(Org).filter_by(org_id=org_id).one() except NoResultFound as e: LOG.warning("No result found for '%s', org_id '%d'.", name, org_id) raise e # Adds new `Orgalias` to `Org`. Orgalias(name, org, moderation_user=context["user"], public=None) context["refresh"] = True es.refresh() # Calling `refresh` here appears not to make any difference, but in # theory should be a good idea. # Waiting for inserted org to be searchable here doesn't seem to work. return org def insert_fast( data, orm, public=None, tag_names=None, dry_run=None, address_exclusive=None, search=True, org_id_whitelist=None ): user = orm.query(User).filter_by(user_id=-1).one() tag_names = tag_names or [] tags = [] for tag_name in tag_names: tag = Orgtag.get( orm, tag_name, moderation_user=user, public=public, ) tags.append(tag) context = { "refresh": False, "user": user } for chunk in data: # pylint: disable=maybe-no-member has_address = None LOG.info("\n%s\n", chunk["name"]) org = select_org(orm, chunk["name"], context, search) if ( org is False or (org_id_whitelist and ((not org) or (org.org_id not in org_id_whitelist))) ): LOG.info("Skipping org: %s", org and org.org_id) orm.rollback() continue if not org: LOG.warning("\nCreating org %s\n", chunk["name"]) org = Org(chunk["name"], moderation_user=user, public=public,) orm.add(org) # Querying org address list on a new org would trigger a commit has_address = False else: has_address = bool(org.address_list) if tags: org.orgtag_list = list(set(tags + org.orgtag_list)) if "tag" in chunk: for tag_name in chunk["tag"]: tag = Orgtag.get( orm, tag_name, moderation_user=user, public=public, ) if tag not in org.orgtag_list: org.orgtag_list.append(tag) if "address" in chunk and not (address_exclusive and has_address): for address_data in chunk["address"]: if address_data["postal"] in \ [address.postal for address in org.address_list]: continue address = Address( address_data["postal"], address_data["source"], moderation_user=user, public=None, ) address.geocode() LOG.debug(address) orm.add(address) org.address_list.append(address) if "contact" in chunk: for contact_data in chunk["contact"]: text = sanitise_name(contact_data["text"]) match = False for contact in org.contact_list: if ( contact.text == text and contact.medium.name == contact_data["medium"] ): match = True break if match: continue try: medium = orm.query(Medium) \ .filter_by(name=contact_data["medium"]) \ .one() except NoResultFound: LOG.warning("%s: No such medium", contact_data["medium"]) continue contact = Contact( medium, text, source=contact_data["source"], moderation_user=user, public=None, ) LOG.debug(contact) orm.add(contact) org.contact_list.append(contact) if "note" in chunk: for note_data in chunk["note"]: if note_data["text"] in [note.text for note in org.note_list]: continue note = Note( note_data["
return
conditional_block
29.js
"1": "<sup>1</sup> Fjala e Zotit që iu drejtua Joelit, birit të Pethuelit.", "2": "<sup>2</sup> Dëgjoni këtë, o pleq, dëgjoni, ju të gjithë banorë të vendit. A ka ndodhur vallë një gjë e tillë në ditët tuaja apo në ditët e etërve tuaj?", "3": "<sup>3</sup> Tregojani bijve tuaj, dhe bijtë tuaj bijve të tyre, dhe bijtë e tyre brezit tjetër.", "4": "<sup>4</sup> Atë që la krimbi e hëngri karkaleci, atë që la karkaleci e hëngri larva e karkalecit, atë që la larva e hëngri bulkthi.", "5": "<sup>5</sup> Zgjohuni, o të dehur, dhe qani; vajtoni ju të gjithë, që pini verë, për mushtin që ju hoqën nga goja.", "6": "<sup>6</sup> Sepse një komb i fortë dhe i panumërt ka dalë kundër vendit tim. Dhëmbët e tij janë dhëmbë luani, dhe ka stërdhëmbë luaneshe.", "7": "<sup>7</sup> Ka shkatërruar hardhinë time, e ka bërë copë-copë fikun tim, ia ka hequr lëvoren krejt dhe e ka hedhur tutje; degët e tij kanë mbetur të bardha.", "8": "<sup>8</sup> Vajto si një virgjëreshë e veshur me thes për dhëndrin e rinisë së saj.", "9": "<sup>9</sup> Nga shtëpia e Zotit janë zhdukur ofertat e ushqimit dhe libacionet; priftërinjtë, ministrat e Zotit, pikëllohen.", "10": "<sup>10</sup> Fusha është shkretuar, vendi është në zi, sepse gruri u prish, mushti u tha dhe vaji humbi.", "11": "<sup>11</sup> Pikëllohuni, o bujq, vajtoni, o vreshtarë, për grurin dhe për elbin, sepse të korrat e arave humbën.", "12": "<sup>12</sup> Hardhia u tha, fiku u tha, shega, hurma, molla dhe tërë drurët e fushës u thanë; nuk ka gëzim midis bijve të njerëzve.", "13": "<sup>13</sup> Ngjeshuni me thes dhe mbani zi, o priftërinj, vajtoni, ministra të altarit. Ejani, rrini tërë natën të veshur me thasë, o ministra të Perëndisë tim, sepse oferta e ushqimit dhe libacioni u zhduk nga shtëpia e Perëndisë tuaj.", "14": "<sup>14</sup> Shpallni agjërim, thërrisni një kuvend solemn. Mblidhni pleqtë dhe tërë banorët e vendit në shtëpinë e Zotit, Perëndisë tuaj, dhe i klithni Zotit.", "15": "<sup>15</sup> Mjerë ajo ditë! Sepse dita e Zotit është e afërt; po, do të vijë si një shkatërrim nga i Plotfuqishmi.", "16": "<sup>16</sup> A nuk u hoq vallë ushqimi para syve tona, dhe gëzimi dhe hareja nga shtëpia e Perëndisë tonë?", "17": "<sup>17</sup> Farërat po thahen nën plisa, depot janë katandisur të shkreta, hambarët e grurit po rrënohen, sepse gruri u tha.", "18": "<sup>18</sup> Sa vuajnë kafshët! Kopetë e gjedhëve sillen më kot, sepse nuk ka kullotë për ta; lëngojnë edhe kopetë e deleve.", "19": "<sup>19</sup> Te ty, o Zot, unë këlthas, sepse një zjarr ka gllabëruar të gjitha tokat për kullotë dhe një flakë ka djegur të gjithë drurët e fushës.", "20": "<sup>20</sup> Edhe kafshët e fushave i ngrenë sytë drejt teje, sepse rrjedhat e ujit janë tharë dhe zjarri ka gllabëruar tokat për kullotë." }, "2": { "1": "<sup>1</sup> I bini borisë në Sion dhe jepni kushtrimin në malin tim të shenjtë! Le të dridhen të gjithë banorët e vendit, sepse dita e Zotit po vjen, është e afërt,", "2": "<sup>2</sup> po vjen dita e territ dhe e errësirës së dendur, ditë resh dhe mjegulle. Ashtu si përhapet agimi mbi malet, po vjen një popull i shumtë dhe i fuqishëm, të cilit kurrkush nuk i ka ngjarë më parë dhe as nuk do të ketë më kurrë për shumë breza që do të vijnë.", "3": "<sup>3</sup> Para tij një zjarr po gllabëron dhe pas tij një flakë po djeg. Përpara tij vendi është si kopshti i Edenit; dhe pas tij është si një shkretëtirë e mjeruar; po, asgjë nuk i shpëton atij.", "4": "<sup>4</sup> Pamja e tyre është si pamja e kuajve, dhe rendin si kuaj të shpejtë.", "5": "<sup>5</sup> Ata hidhen mbi majat e maleve me zhurmë qerresh, si brambullima e flakës së zjarrit që djeg kallamishtet, si një popull i fortë që është rreshtuar për betejë.", "6": "<sup>6</sup> Përpara tyre popujt përpëliten nga dhembja, çdo fytyrë zbehet.", "7": "<sup>7</sup> Rendin si njerëz trima, ngjiten mbi muret si luftëtarë; secili ndjek rrugën e vet pa devijuar prej saj.", "8": "<sup>8</sup> Askush nuk e shtyn fqinjin e tij, secili ndjek shtegun e vet; sulen në mes të shigjetave, por nuk plagosen.", "9": "<sup>9</sup> I bien qytetit kryq e tërthor, rendin mbi muret, ngjiten në shtëpi, hyjnë në to nga dritaret si vjedhës.", "10": "<sup>10</sup> Para tyre dridhet toka, dridhen qiejt, dielli dhe hëna erren dhe yjet humbin shkëlqimin e tyre.", "11": "<sup>11</sup> Zoti bën që t'i dëgjohet zëri para ushtrisë së tij, sepse fusha e tij
var book = { "name": "Joeli", "numChapters": 3, "chapters": { "1": {
random_line_split
simulationlf.py
_phases)[0] # We won't work on the slack node because it is pointless, so we're only taking nodes starting with the second brackets = brackets[1:] for i in range(nb_brackets): current_bracket = brackets[i] # p = self.power_definition() z[i] = (current_bracket.get_branch().calculate_impedance()) for j in range(nb_brackets): # If there is a path, we change K[i,j,k,k] to -1 if i + 2 in network.find_path(0, j): for k in range(3): K[i][j][k][k] = -1 K = np.vstack([np.hstack(c) for c in K]) K = K[vec_phases_index][vec_phases_index] # z = np.reshape(z, (19,1)) Zbr = block_diag(z).toarray() Zbr = Zbr[vec_phases_index][vec_phases_index] # Transforming all of our matrixes into real matrixes. At this point, they were just arrays. K = np.mat(K) Zbr = np.mat(Zbr) # Zbr = Zbr[vec_phases_index[:], vec_phases_index[:]] # np.resize(Zbr, (len(Zbr)*3, len(Zbr)*3)) # End of Grid_definition return { 'K': K, 'Zbr': Zbr, 'vec_phases_index': vec_phases_index } # LOAD FLOW METHOD def load_flow(self, network): """ This method will implement the Load Flow algorithm. :param: network: the network on which we want to do the load flow. :return: dic: A dictionnary containing every matrix/array involved in the load flow resolution. """ # main.m alpha = 1 nb_brackets = network.get_nb_brackets()-1 # Battery settings bat_node = 2 bat_phase = 2 bat = (bat_node-2)*3 + bat_phase Ebat = 0 Ebat_max = 120000 Pbat = 60000 # End # Grid_definition.m grid = self.grid_definition(network) K = grid['K'] Zbr = grid['Zbr'] vec_phases_index = grid['vec_phases_index'] # End of Grid_Definition brackets = network.get_brackets()[1:] network_nodes = [brackets[i].get_node() for i in range(nb_brackets)] # load_flow.m Ibus = np.zeros((3 * nb_brackets), dtype=np.complex128) Ibus = Ibus[:, np.newaxis] Vnl = network.get_slack_voltage() Vnl = Vnl[vec_phases_index] Vbus = Vnl Vbr_prev = Vnl # If we don't define Tmp as a N-Dim Array, the Tile function will broadcast it to a N-Dim Array of shape # (1, 1, 57) instead of letting it be (57, 1, 1). This will result by producing a new matrix of shape # (1, 570, 96). I guess that the tile function will perform some multiplication on the dimensions # and then will join'em. If Vnl(57,1) & Newmat(10,96): # Result = (1, 57*10, 96)... Which is not really what we want. Tmp = (Vnl * 0) Tmp = Tmp[:, np.newaxis] V = np.tile(Tmp, (1,1,1)) I = np.tile(Tmp, (1,1,1)) # We don't use the Tmp matrix here because Vnl won't be broadcasted to a 3D matrix but to a 1D. So the bug # that has been resolved earlier won't happen here # Imean = np.tile(Vnl*0, (96)) # Vmean = np.tile(Vnl*0, (96)) powers = [] for node in network_nodes: n_pow = [] for user in node.get_users(): n_pow.append(user.get_P()) powers.extend(n_pow) """ Here, we are assigning the NumPy functions we are going to use into the load flow loop to gain a little bit more efficiency. """ # NumPy Functions conj = np.conj divide = np.divide absolute = np.abs less = np.less zeros = np.zeros # Here is the wrapping of the load flow: # h = 0, nb iterations # q = 0, 96 P = np.asarray(powers) P = divide(P, 2) Q = np.dot(P, np.array([0])) # Initializing arrays to optimize Ibr = zeros((nb_brackets, 1)) Vbr = zeros((nb_brackets, 1)) # Before we enter the loop, we make sure we are going to work with matrices instead of arrays. Ibr = np.matrix(Ibr) Vbr = np.matrix(Vbr) # LOAD FLOW LOOP k = 0 t = process_time() while True: k += 1 bal = 0 for i in range(len(P)): if k == 1: Ibus[i] = -(np.matrix(np.complex(P[i], Q[i])/Vbus[i]).conj()) else: Ibus[i] = -(np.matrix(np.complex(P[i], Q[i]) / Vbus[i]).conj()) if i % 3 == bat: bal = bal + P[i] if bat != 0: if bal < 0: if Ebat < Ebat_max: Ibus[bat] = min([conj(-Pbat/Vbus[bat]), conj(bal/Vbus[bat]), conj(-(Ebat_max - Ebat)/(Vbus[bat]*0.25))]) Ebat += absolute(np.dot(Ibus[bat], Vbus[bat])) * 0.25 elif Ebat > 0: Ibus[bat] = min([conj(Pbat/Vbus[bat]), conj(bal/Vbus[bat]), conj(Ebat/(Vbus[bat]*0.25))]) Ebat -= absolute(np.dot(Ibus[bat], Vbus[bat])) * 0. Ibr = K * Ibus Vbr = Zbr * Ibr if (less(divide(absolute(Vbr - Vbr_prev), absolute(Vbr + 0.0000000000000001)), self.__tolerance)).all(): break Vbr = Vbr_prev + (alpha * (Vbr - Vbr_prev)) Vbr_prev = Vbr Vbus = Vnl + np.dot(K.conj().T, Vbr) Vbus = Vnl + np.dot(K.conj().T, Vbr) V[:] = Vbus[:, :, np.newaxis] I[:] = Ibr[:, :, np.newaxis] Pbr = Qbr = np.array([[[0 for k in range(2)]for j in range(len(vec_phases_index))] for i in range(nb_brackets)]) for i in range(nb_brackets): for j in range(len(vec_phases_index)): i_to_j = self.powerflow(Vbus[i], Ibr[i]) j_to_i = self.powerflow(Vbus[i+1], Ibr[i]) Pbr[i][j][0] = i_to_j['active'] Pbr[i][j][1] = j_to_i['active'] Qbr[i][j][0] = i_to_j['reactive'] Qbr[i][j][1] = j_to_i['reactive'] print(np.shape(Pbr), Qbr.shape) # END OF LOAD FLOW # End of load_flow.m print("Process executed in", process_time() - t, "s") dic = { 'Ibus_bat': Ibus[bat], 'Ebat': Ebat, 'V': V, 'Vbr': Vbr, 'Vbus': Vbus, 'I': I, 'Ibus': Ibus, 'Ibr': Ibr, 'Zbr': Zbr, 'P': P, 'K': K, 'Vnl': Vnl, 'Pbr': Pbr, 'Qbr': Qbr } return dic def powerflow(self, voltage, intensity, conj=np.conj, real=np.real, imag=np.imag):
'reactive': imag(flow) } def printMenu(self, network): np.set_printoptions(threshold=np.nan, suppress=True, precision=10) # import re while True: # This block is relevant if we use a timestamp. # It will check the user's input. # If you uncomment the block
flow = voltage * conj(intensity) return { 'active': real(flow),
random_line_split
simulationlf.py
def get_delta_time(self): return self.__delta_time # SETTERS/MUTATORS def set_nb_iterations(self, nb): self.__nb_iterations = nb def set_tolerance(self, t): self.__tolerance = t def set_delta_time(self, d): self.__delta_time = d def grid_definition(self, network): zeros = np.zeros # As we are working with brackets that are containing a node and a branch, the number of brackets corresponds # to the number of nodes nb_brackets = network.get_nb_brackets()-1 # Boolean vector of nodes phases [ [1, 1, 1], [1, 1, 1], ...] vec_phases = np.ones((1, 3 * nb_brackets)) vec_phases = vec_phases[0] # Number of phases for each node/bracket num_phases = zeros((1, nb_brackets+1)) num_phases = num_phases[0] # Parent line impedances (intermediate step for Zbr construction z = [0 for i in range(nb_brackets)] # Power matrix (3 possible phases, number of nodes) # p = zeros(3, nb_brackets) # K = cell(nb_brackets, nb_brackets): This kind of Matlab variable can be translated into a nested list. K = [[np.zeros((3, 3), int) for j in range(nb_brackets)] for i in range(nb_brackets)] # As we are going to use the list a certain number of times, we are assigning it to a variable for more # efficiency brackets = network.get_brackets() vec_phases[0:3] = brackets[0].get_branch().get_phases() num_phases[0] = np.sum(brackets[0].get_branch().get_phases()) for i in range(1, nb_brackets+1): current_bracket = brackets[i-1] vec_phases[3*(i-1):3*(i-1)+3] = current_bracket.get_branch().get_phases() num_phases[i] = np.sum(current_bracket.get_branch().get_phases()) vec_phases_index = np.nonzero(vec_phases)[0] # We won't work on the slack node because it is pointless, so we're only taking nodes starting with the second brackets = brackets[1:] for i in range(nb_brackets): current_bracket = brackets[i] # p = self.power_definition() z[i] = (current_bracket.get_branch().calculate_impedance()) for j in range(nb_brackets): # If there is a path, we change K[i,j,k,k] to -1 if i + 2 in network.find_path(0, j): for k in range(3): K[i][j][k][k] = -1 K = np.vstack([np.hstack(c) for c in K]) K = K[vec_phases_index][vec_phases_index] # z = np.reshape(z, (19,1)) Zbr = block_diag(z).toarray() Zbr = Zbr[vec_phases_index][vec_phases_index] # Transforming all of our matrixes into real matrixes. At this point, they were just arrays. K = np.mat(K) Zbr = np.mat(Zbr) # Zbr = Zbr[vec_phases_index[:], vec_phases_index[:]] # np.resize(Zbr, (len(Zbr)*3, len(Zbr)*3)) # End of Grid_definition return { 'K': K, 'Zbr': Zbr, 'vec_phases_index': vec_phases_index } # LOAD FLOW METHOD def load_flow(self, network): """ This method will implement the Load Flow algorithm. :param: network: the network on which we want to do the load flow. :return: dic: A dictionnary containing every matrix/array involved in the load flow resolution. """ # main.m alpha = 1 nb_brackets = network.get_nb_brackets()-1 # Battery settings bat_node = 2 bat_phase = 2 bat = (bat_node-2)*3 + bat_phase Ebat = 0 Ebat_max = 120000 Pbat = 60000 # End # Grid_definition.m grid = self.grid_definition(network) K = grid['K'] Zbr = grid['Zbr'] vec_phases_index = grid['vec_phases_index'] # End of Grid_Definition brackets = network.get_brackets()[1:] network_nodes = [brackets[i].get_node() for i in range(nb_brackets)] # load_flow.m Ibus = np.zeros((3 * nb_brackets), dtype=np.complex128) Ibus = Ibus[:, np.newaxis] Vnl = network.get_slack_voltage() Vnl = Vnl[vec_phases_index] Vbus = Vnl Vbr_prev = Vnl # If we don't define Tmp as a N-Dim Array, the Tile function will broadcast it to a N-Dim Array of shape # (1, 1, 57) instead of letting it be (57, 1, 1). This will result by producing a new matrix of shape # (1, 570, 96). I guess that the tile function will perform some multiplication on the dimensions # and then will join'em. If Vnl(57,1) & Newmat(10,96): # Result = (1, 57*10, 96)... Which is not really what we want. Tmp = (Vnl * 0) Tmp = Tmp[:, np.newaxis] V = np.tile(Tmp, (1,1,1)) I = np.tile(Tmp, (1,1,1)) # We don't use the Tmp matrix here because Vnl won't be broadcasted to a 3D matrix but to a 1D. So the bug # that has been resolved earlier won't happen here # Imean = np.tile(Vnl*0, (96)) # Vmean = np.tile(Vnl*0, (96)) powers = [] for node in network_nodes: n_pow = [] for user in node.get_users(): n_pow.append(user.get_P()) powers.extend(n_pow) """ Here, we are assigning the NumPy functions we are going to use into the load flow loop to gain a little bit more efficiency. """ # NumPy Functions conj = np.conj divide = np.divide absolute = np.abs less = np.less zeros = np.zeros # Here is the wrapping of the load flow: # h = 0, nb iterations # q = 0, 96 P = np.asarray(powers) P = divide(P, 2) Q = np.dot(P, np.array([0])) # Initializing arrays to optimize Ibr = zeros((nb_brackets, 1)) Vbr = zeros((nb_brackets, 1)) # Before we enter the loop, we make sure we are going to work with matrices instead of arrays. Ibr = np.matrix(Ibr) Vbr = np.matrix(Vbr) # LOAD FLOW LOOP k = 0 t = process_time() while True: k += 1 bal = 0 for i in range(len(P)): if k == 1: Ibus[i] = -(np.matrix(np.complex(P[i], Q[i])/Vbus[i]).conj()) else: Ibus[i] = -(np.matrix(np.complex(P[i], Q[i]) / Vbus[i]).conj()) if i % 3 == bat: bal = bal + P[i] if bat != 0: if bal < 0: if Ebat < Ebat_max: Ibus[bat] = min([conj(-Pbat/Vbus[bat]), conj(bal/Vbus[bat]), conj(-(Ebat_max - Ebat)/(Vbus[bat]*0.25))]) Ebat += absolute(np.dot(Ibus[bat], Vbus[bat])) * 0.25 elif Ebat > 0: Ibus[bat] = min([conj(Pbat/Vbus[bat]), conj(bal/Vbus[bat]), conj(Ebat/(Vbus[bat]*0.25))]) Ebat -= absolute(np.dot(Ibus[bat], Vbus[bat])) * 0. Ibr = K * Ibus Vbr = Zbr * Ibr if (less(divide(absolute(Vbr - Vbr_prev), absolute(Vbr + 0.0000000000000001)), self.__tolerance)).all(): break Vbr = Vbr_prev + (alpha * (Vbr - Vbr
return self.__tolerance
identifier_body
simulationlf.py
(self): return self.__nb_iterations def get_tolerance(self): return self.__tolerance def get_delta_time(self): return self.__delta_time # SETTERS/MUTATORS def set_nb_iterations(self, nb): self.__nb_iterations = nb def set_tolerance(self, t): self.__tolerance = t def set_delta_time(self, d): self.__delta_time = d def grid_definition(self, network): zeros = np.zeros # As we are working with brackets that are containing a node and a branch, the number of brackets corresponds # to the number of nodes nb_brackets = network.get_nb_brackets()-1 # Boolean vector of nodes phases [ [1, 1, 1], [1, 1, 1], ...] vec_phases = np.ones((1, 3 * nb_brackets)) vec_phases = vec_phases[0] # Number of phases for each node/bracket num_phases = zeros((1, nb_brackets+1)) num_phases = num_phases[0] # Parent line impedances (intermediate step for Zbr construction z = [0 for i in range(nb_brackets)] # Power matrix (3 possible phases, number of nodes) # p = zeros(3, nb_brackets) # K = cell(nb_brackets, nb_brackets): This kind of Matlab variable can be translated into a nested list. K = [[np.zeros((3, 3), int) for j in range(nb_brackets)] for i in range(nb_brackets)] # As we are going to use the list a certain number of times, we are assigning it to a variable for more # efficiency brackets = network.get_brackets() vec_phases[0:3] = brackets[0].get_branch().get_phases() num_phases[0] = np.sum(brackets[0].get_branch().get_phases()) for i in range(1, nb_brackets+1): current_bracket = brackets[i-1] vec_phases[3*(i-1):3*(i-1)+3] = current_bracket.get_branch().get_phases() num_phases[i] = np.sum(current_bracket.get_branch().get_phases()) vec_phases_index = np.nonzero(vec_phases)[0] # We won't work on the slack node because it is pointless, so we're only taking nodes starting with the second brackets = brackets[1:] for i in range(nb_brackets): current_bracket = brackets[i] # p = self.power_definition() z[i] = (current_bracket.get_branch().calculate_impedance()) for j in range(nb_brackets): # If there is a path, we change K[i,j,k,k] to -1 if i + 2 in network.find_path(0, j): for k in range(3): K[i][j][k][k] = -1 K = np.vstack([np.hstack(c) for c in K]) K = K[vec_phases_index][vec_phases_index] # z = np.reshape(z, (19,1)) Zbr = block_diag(z).toarray() Zbr = Zbr[vec_phases_index][vec_phases_index] # Transforming all of our matrixes into real matrixes. At this point, they were just arrays. K = np.mat(K) Zbr = np.mat(Zbr) # Zbr = Zbr[vec_phases_index[:], vec_phases_index[:]] # np.resize(Zbr, (len(Zbr)*3, len(Zbr)*3)) # End of Grid_definition return { 'K': K, 'Zbr': Zbr, 'vec_phases_index': vec_phases_index } # LOAD FLOW METHOD def load_flow(self, network): """ This method will implement the Load Flow algorithm. :param: network: the network on which we want to do the load flow. :return: dic: A dictionnary containing every matrix/array involved in the load flow resolution. """ # main.m alpha = 1 nb_brackets = network.get_nb_brackets()-1 # Battery settings bat_node = 2 bat_phase = 2 bat = (bat_node-2)*3 + bat_phase Ebat = 0 Ebat_max = 120000 Pbat = 60000 # End # Grid_definition.m grid = self.grid_definition(network) K = grid['K'] Zbr = grid['Zbr'] vec_phases_index = grid['vec_phases_index'] # End of Grid_Definition brackets = network.get_brackets()[1:] network_nodes = [brackets[i].get_node() for i in range(nb_brackets)] # load_flow.m Ibus = np.zeros((3 * nb_brackets), dtype=np.complex128) Ibus = Ibus[:, np.newaxis] Vnl = network.get_slack_voltage() Vnl = Vnl[vec_phases_index] Vbus = Vnl Vbr_prev = Vnl # If we don't define Tmp as a N-Dim Array, the Tile function will broadcast it to a N-Dim Array of shape # (1, 1, 57) instead of letting it be (57, 1, 1). This will result by producing a new matrix of shape # (1, 570, 96). I guess that the tile function will perform some multiplication on the dimensions # and then will join'em. If Vnl(57,1) & Newmat(10,96): # Result = (1, 57*10, 96)... Which is not really what we want. Tmp = (Vnl * 0) Tmp = Tmp[:, np.newaxis] V = np.tile(Tmp, (1,1,1)) I = np.tile(Tmp, (1,1,1)) # We don't use the Tmp matrix here because Vnl won't be broadcasted to a 3D matrix but to a 1D. So the bug # that has been resolved earlier won't happen here # Imean = np.tile(Vnl*0, (96)) # Vmean = np.tile(Vnl*0, (96)) powers = [] for node in network_nodes: n_pow = [] for user in node.get_users(): n_pow.append(user.get_P()) powers.extend(n_pow) """ Here, we are assigning the NumPy functions we are going to use into the load flow loop to gain a little bit more efficiency. """ # NumPy Functions conj = np.conj divide = np.divide absolute = np.abs less = np.less zeros = np.zeros # Here is the wrapping of the load flow: # h = 0, nb iterations # q = 0, 96 P = np.asarray(powers) P = divide(P, 2) Q = np.dot(P, np.array([0])) # Initializing arrays to optimize Ibr = zeros((nb_brackets, 1)) Vbr = zeros((nb_brackets, 1)) # Before we enter the loop, we make sure we are going to work with matrices instead of arrays. Ibr = np.matrix(Ibr) Vbr = np.matrix(Vbr) # LOAD FLOW LOOP k = 0 t = process_time() while True: k += 1 bal = 0 for i in range(len(P)): if k == 1: Ibus[i] = -(np.matrix(np.complex(P[i], Q[i])/Vbus[i]).conj()) else: Ibus[i] = -(np.matrix(np.complex(P[i], Q[i]) / Vbus[i]).conj()) if i % 3 == bat: bal = bal + P[i] if bat != 0: if bal < 0: if Ebat < Ebat_max: Ibus[bat] = min([conj(-Pbat/Vbus[bat]), conj(bal/Vbus[bat]), conj(-(Ebat_max - Ebat)/(Vbus[bat]*0.25))]) Ebat += absolute(np.dot(Ibus[bat], Vbus[bat])) * 0.25 elif Ebat > 0: Ibus[bat] = min([conj(Pbat/Vbus[bat]), conj(bal/Vbus[bat]), conj(Ebat/(Vbus[bat]*0.25))]) Ebat -= absolute(np.dot(Ibus[bat], Vbus[bat])) * 0. Ibr = K * Ibus Vbr = Zbr * Ibr if (less(divide(absolute(Vbr - Vbr_prev), absolute(Vbr + 0.0000000000000001)), self.__tolerance)).all(): break
get_nb_iterations
identifier_name
simulationlf.py
_phases)[0] # We won't work on the slack node because it is pointless, so we're only taking nodes starting with the second brackets = brackets[1:] for i in range(nb_brackets): current_bracket = brackets[i] # p = self.power_definition() z[i] = (current_bracket.get_branch().calculate_impedance()) for j in range(nb_brackets): # If there is a path, we change K[i,j,k,k] to -1 if i + 2 in network.find_path(0, j): for k in range(3): K[i][j][k][k] = -1 K = np.vstack([np.hstack(c) for c in K]) K = K[vec_phases_index][vec_phases_index] # z = np.reshape(z, (19,1)) Zbr = block_diag(z).toarray() Zbr = Zbr[vec_phases_index][vec_phases_index] # Transforming all of our matrixes into real matrixes. At this point, they were just arrays. K = np.mat(K) Zbr = np.mat(Zbr) # Zbr = Zbr[vec_phases_index[:], vec_phases_index[:]] # np.resize(Zbr, (len(Zbr)*3, len(Zbr)*3)) # End of Grid_definition return { 'K': K, 'Zbr': Zbr, 'vec_phases_index': vec_phases_index } # LOAD FLOW METHOD def load_flow(self, network): """ This method will implement the Load Flow algorithm. :param: network: the network on which we want to do the load flow. :return: dic: A dictionnary containing every matrix/array involved in the load flow resolution. """ # main.m alpha = 1 nb_brackets = network.get_nb_brackets()-1 # Battery settings bat_node = 2 bat_phase = 2 bat = (bat_node-2)*3 + bat_phase Ebat = 0 Ebat_max = 120000 Pbat = 60000 # End # Grid_definition.m grid = self.grid_definition(network) K = grid['K'] Zbr = grid['Zbr'] vec_phases_index = grid['vec_phases_index'] # End of Grid_Definition brackets = network.get_brackets()[1:] network_nodes = [brackets[i].get_node() for i in range(nb_brackets)] # load_flow.m Ibus = np.zeros((3 * nb_brackets), dtype=np.complex128) Ibus = Ibus[:, np.newaxis] Vnl = network.get_slack_voltage() Vnl = Vnl[vec_phases_index] Vbus = Vnl Vbr_prev = Vnl # If we don't define Tmp as a N-Dim Array, the Tile function will broadcast it to a N-Dim Array of shape # (1, 1, 57) instead of letting it be (57, 1, 1). This will result by producing a new matrix of shape # (1, 570, 96). I guess that the tile function will perform some multiplication on the dimensions # and then will join'em. If Vnl(57,1) & Newmat(10,96): # Result = (1, 57*10, 96)... Which is not really what we want. Tmp = (Vnl * 0) Tmp = Tmp[:, np.newaxis] V = np.tile(Tmp, (1,1,1)) I = np.tile(Tmp, (1,1,1)) # We don't use the Tmp matrix here because Vnl won't be broadcasted to a 3D matrix but to a 1D. So the bug # that has been resolved earlier won't happen here # Imean = np.tile(Vnl*0, (96)) # Vmean = np.tile(Vnl*0, (96)) powers = [] for node in network_nodes: n_pow = [] for user in node.get_users(): n_pow.append(user.get_P()) powers.extend(n_pow) """ Here, we are assigning the NumPy functions we are going to use into the load flow loop to gain a little bit more efficiency. """ # NumPy Functions conj = np.conj divide = np.divide absolute = np.abs less = np.less zeros = np.zeros # Here is the wrapping of the load flow: # h = 0, nb iterations # q = 0, 96 P = np.asarray(powers) P = divide(P, 2) Q = np.dot(P, np.array([0])) # Initializing arrays to optimize Ibr = zeros((nb_brackets, 1)) Vbr = zeros((nb_brackets, 1)) # Before we enter the loop, we make sure we are going to work with matrices instead of arrays. Ibr = np.matrix(Ibr) Vbr = np.matrix(Vbr) # LOAD FLOW LOOP k = 0 t = process_time() while True:
Ebat -= absolute(np.dot(Ibus[bat], Vbus[bat])) * 0. Ibr = K * Ibus Vbr = Zbr * Ibr if (less(divide(absolute(Vbr - Vbr_prev), absolute(Vbr + 0.0000000000000001)), self.__tolerance)).all(): break Vbr = Vbr_prev + (alpha * (Vbr - Vbr_prev)) Vbr_prev = Vbr Vbus = Vnl + np.dot(K.conj().T, Vbr) Vbus = Vnl + np.dot(K.conj().T, Vbr) V[:] = Vbus[:, :, np.newaxis] I[:] = Ibr[:, :, np.newaxis] Pbr = Qbr = np.array([[[0 for k in range(2)]for j in range(len(vec_phases_index))] for i in range(nb_brackets)]) for i in range(nb_brackets): for j in range(len(vec_phases_index)): i_to_j = self.powerflow(Vbus[i], Ibr[i]) j_to_i = self.powerflow(Vbus[i+1], Ibr[i]) Pbr[i][j][0] = i_to_j['active'] Pbr[i][j][1] = j_to_i['active'] Qbr[i][j][0] = i_to_j['reactive'] Qbr[i][j][1] = j_to_i['reactive'] print(np.shape(Pbr), Qbr.shape) # END OF LOAD FLOW # End of load_flow.m print("Process executed in", process_time() - t, "s") dic = { 'Ibus_bat': Ibus[bat], 'Ebat': Ebat, 'V': V, 'Vbr': Vbr, 'Vbus': Vbus, 'I': I, 'Ibus': Ibus, 'Ibr': Ibr, 'Zbr': Zbr, 'P': P, 'K': K, 'Vnl': Vnl, 'Pbr': Pbr, 'Qbr': Qbr } return dic def powerflow(self, voltage, intensity, conj=np.conj, real=np.real, imag=np.imag): flow = voltage * conj(intensity) return { 'active': real(flow), 'reactive': imag(flow) } def printMenu(self, network): np.set_printoptions(threshold=np.nan, suppress=True, precision=10) # import re while True: # This block is relevant if we use a timestamp. # It will check the user's input. # If you uncomment the block
k += 1 bal = 0 for i in range(len(P)): if k == 1: Ibus[i] = -(np.matrix(np.complex(P[i], Q[i])/Vbus[i]).conj()) else: Ibus[i] = -(np.matrix(np.complex(P[i], Q[i]) / Vbus[i]).conj()) if i % 3 == bat: bal = bal + P[i] if bat != 0: if bal < 0: if Ebat < Ebat_max: Ibus[bat] = min([conj(-Pbat/Vbus[bat]), conj(bal/Vbus[bat]), conj(-(Ebat_max - Ebat)/(Vbus[bat]*0.25))]) Ebat += absolute(np.dot(Ibus[bat], Vbus[bat])) * 0.25 elif Ebat > 0: Ibus[bat] = min([conj(Pbat/Vbus[bat]), conj(bal/Vbus[bat]), conj(Ebat/(Vbus[bat]*0.25))])
conditional_block
mortgage_pandas.py
test['upb_12'] = test['current_actual_upb'] test.drop(columns=['current_actual_upb'], inplace=True) test['upb_12'] = test['upb_12'].fillna(999999999) test['delinquency_12'] = test['delinquency_12'].fillna(-1) joined_df = test.merge(everdf, how='left', on=['loan_id']) del(everdf) del(test) joined_df['ever_30'] = joined_df['ever_30'].fillna(-1) joined_df['ever_90'] = joined_df['ever_90'].fillna(-1) joined_df['ever_180'] = joined_df['ever_180'].fillna(-1) joined_df['delinquency_30'] = joined_df['delinquency_30'].fillna(-1) joined_df['delinquency_90'] = joined_df['delinquency_90'].fillna(-1) joined_df['delinquency_180'] = joined_df['delinquency_180'].fillna(-1) joined_df['timestamp_year'] = joined_df['timestamp_year'].astype('int32') joined_df['timestamp_month'] = joined_df['timestamp_month'].astype('int32') return joined_df def create_12_mon_features(joined_df, **kwargs): testdfs = [] n_months = 12 for y in range(1, n_months + 1): tmpdf = joined_df[['loan_id', 'timestamp_year', 'timestamp_month', 'delinquency_12', 'upb_12']] tmpdf['josh_months'] = tmpdf['timestamp_year'] * 12 + tmpdf['timestamp_month'] tmpdf['josh_mody_n'] = np.floor((tmpdf['josh_months'].astype('float64') - 24000 - y) / 12) tmpdf = tmpdf.groupby(['loan_id', 'josh_mody_n'], as_index=False).agg({'delinquency_12': 'max','upb_12': 'min'}) tmpdf['delinquency_12'] = (tmpdf['delinquency_12']>3).astype('int32') tmpdf['delinquency_12'] +=(tmpdf['upb_12']==0).astype('int32') tmpdf['timestamp_year'] = np.floor(((tmpdf['josh_mody_n'] * n_months) + 24000 + (y - 1)) / 12).astype('int16') tmpdf['timestamp_month'] = np.int8(y) tmpdf.drop(columns=['josh_mody_n'], inplace=True) testdfs.append(tmpdf) del(tmpdf) del(joined_df) return pd.concat(testdfs) def combine_joined_12_mon(joined_df, testdf, **kwargs): joined_df.drop(columns=['delinquency_12', 'upb_12'], inplace=True) joined_df['timestamp_year'] = joined_df['timestamp_year'].astype('int16') joined_df['timestamp_month'] = joined_df['timestamp_month'].astype('int8') return joined_df.merge(testdf, how='left', on=['loan_id', 'timestamp_year', 'timestamp_month']) def final_performance_delinquency(merged, joined_df, **kwargs): merged['timestamp_month'] = merged['monthly_reporting_period'].dt.month merged['timestamp_month'] = merged['timestamp_month'].astype('int8') merged['timestamp_year'] = merged['monthly_reporting_period'].dt.year merged['timestamp_year'] = merged['timestamp_year'].astype('int16') merged = merged.merge(joined_df, how='left', on=['loan_id', 'timestamp_year', 'timestamp_month']) merged.drop(columns=['timestamp_year'], inplace=True) merged.drop(columns=['timestamp_month'], inplace=True) return merged def join_perf_acq_pdfs(perf, acq, **kwargs): return perf.merge(acq, how='left', on=['loan_id']) def last_mile_cleaning(df, **kwargs): #for col, dtype in df.dtypes.iteritems(): # if str(dtype)=='category': # df[col] = df[col].cat.codes df['delinquency_12'] = df['delinquency_12'] > 0 df['delinquency_12'] = df['delinquency_12'].fillna(False).astype('int32') return df #.to_arrow(index=False) # Load database reporting functions pathToReportDir = os.path.join(pathlib.Path(__file__).parent, "..", "report") print(pathToReportDir) sys.path.insert(1, pathToReportDir) import report parser = argparse.ArgumentParser(description='Run Mortgage benchmark using pandas') parser.add_argument('-r', default="report_pandas.csv", help="Report file name.") parser.add_argument('-df', default=1, type=int, help="Number of datafiles (quarters) to input into database for processing.") parser.add_argument('-dp', required=True, help="Path to root of mortgage datafiles directory (contains names.csv).") parser.add_argument('-i', dest="iterations", default=5, type=int, help="Number of iterations to run every benchmark. Best result is selected.") parser.add_argument("-db-server", default="localhost", help="Host name of MySQL server") parser.add_argument("-db-port", default=3306, type=int, help="Port number of MySQL server") parser.add_argument("-db-user", default="", help="Username to use to connect to MySQL database. If user name is specified, script attempts to store results in MySQL database using other -db-* parameters.") parser.add_argument("-db-pass", default="omniscidb", help="Password to use to connect to MySQL database") parser.add_argument("-db-name", default="omniscidb", help="MySQL database to use to store benchmark results") parser.add_argument("-db-table", help="Table to use to store results for this benchmark.") parser.add_argument("-commit", default="1234567890123456789012345678901234567890", help="Commit hash to use to record this benchmark results") args = parser.parse_args() if args.df <= 0: print("Bad number of data files specified", args.df) sys.exit(1) if args.iterations < 1: print("Bad number of iterations specified", args.t) db_reporter = None if args.db_user is not "": print("Connecting to database") db = mysql.connector.connect(host=args.db_server, port=args.db_port, user=args.db_user, passwd=args.db_pass, db=args.db_name); db_reporter = report.DbReport(db, args.db_table, { 'FilesNumber': 'INT UNSIGNED NOT NULL', 'FragmentSize': 'BIGINT UNSIGNED NOT NULL', 'BenchName': 'VARCHAR(500) NOT NULL', 'BestExecTimeMS': 'BIGINT UNSIGNED', 'BestTotalTimeMS': 'BIGINT UNSIGNED', 'WorstExecTimeMS': 'BIGINT UNSIGNED', 'WorstTotalTimeMS': 'BIGINT UNSIGNED', 'AverageExecTimeMS': 'BIGINT UNSIGNED', 'AverageTotalTimeMS': 'BIGINT UNSIGNED' }, { 'ScriptName': 'mortgage_pandas.py', 'CommitHash': args.commit }) data_directory = args.dp benchName = "mortgage_pandas" perf_data_path = os.path.join(data_directory, "perf") perf_format_path = os.path.join(perf_data_path, "Performance_%sQ%s.txt") bestExecTime = float("inf") bestTotalTime = float("inf") worstExecTime = 0 worstTotalTime = 0 avgExecTime = 0 avgTotalTime = 0 for iii in range(1, args.iterations + 1):
dataFilesNumber = 0 time_ETL = time.time() exec_time_total = 0 print("RUNNING BENCHMARK NUMBER", benchName, "ITERATION NUMBER", iii) for quarter in range(0, args.df): year = 2000 + quarter // 4 perf_file = perf_format_path % (str(year), str(quarter % 4 + 1)) files = [f for f in pathlib.Path(perf_data_path).iterdir() if f.match('Performance_%sQ%s.txt*' % (str(year), str(quarter % 4 + 1)))] for f in files: dataframe, exec_time = run_pd_workflow(year = year, quarter = (quarter % 4 + 1), perf_file = str(f)) exec_time_total += exec_time dataFilesNumber += 1 time_ETL_end = time.time() ttt = (time_ETL_end - time_ETL) * 1000 print("ITERATION", iii, "EXEC TIME: ", exec_time_total, "TOTAL TIME: ", ttt) if bestExecTime > exec_time_total: bestExecTime = exec_time_total if worstExecTime < exec_time_total:
conditional_block
mortgage_pandas.py
relocation_mortgage_indicator': CategoricalDtype(['N', 'Y']), 'year_quarter': np.int64 } a = pd.read_csv(acquisition_path, names=columns, delimiter='|', dtype=dtypes, parse_dates=[6,7], error_bad_lines=True, warn_bad_lines=True, na_filter=True) return a def pd_load_names(**kwargs): """ Loads names used for renaming the banks Returns ------- PD DataFrame """ cols = [ 'seller_name', 'new' ] dtypes = {'seller_name':str, 'new':str} return pd.read_csv(os.path.join(data_directory, "names.csv"), names=cols, delimiter='|', dtype=dtypes) def create_ever_features(pdf, **kwargs): everdf = pdf[['loan_id', 'current_loan_delinquency_status']] everdf = everdf.groupby('loan_id').max() del(pdf) everdf['ever_30'] = (everdf['current_loan_delinquency_status'] >= 1).astype('int8') everdf['ever_90'] = (everdf['current_loan_delinquency_status'] >= 3).astype('int8') everdf['ever_180'] = (everdf['current_loan_delinquency_status'] >= 6).astype('int8') everdf.drop(columns=['current_loan_delinquency_status'], inplace=True) return everdf def create_delinq_features(pdf, **kwargs): delinq_pdf = pdf[['loan_id', 'monthly_reporting_period', 'current_loan_delinquency_status']] del(pdf) delinq_30 = delinq_pdf.query('current_loan_delinquency_status >= 1')[['loan_id', 'monthly_reporting_period']].groupby('loan_id').min() delinq_30['delinquency_30'] = delinq_30['monthly_reporting_period'] delinq_30.drop(columns=['monthly_reporting_period'], inplace=True) delinq_90 = delinq_pdf.query('current_loan_delinquency_status >= 3')[['loan_id', 'monthly_reporting_period']].groupby('loan_id').min() delinq_90['delinquency_90'] = delinq_90['monthly_reporting_period'] delinq_90.drop(columns=['monthly_reporting_period'], inplace=True) delinq_180 = delinq_pdf.query('current_loan_delinquency_status >= 6')[['loan_id', 'monthly_reporting_period']].groupby('loan_id').min() delinq_180['delinquency_180'] = delinq_180['monthly_reporting_period'] delinq_180.drop(columns=['monthly_reporting_period'], inplace=True) del(delinq_pdf) delinq_merge = delinq_30.merge(delinq_90, how='left', on=['loan_id']) delinq_merge['delinquency_90'] = delinq_merge['delinquency_90'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]')) delinq_merge = delinq_merge.merge(delinq_180, how='left', on=['loan_id']) delinq_merge['delinquency_180'] = delinq_merge['delinquency_180'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]')) del(delinq_30) del(delinq_90) del(delinq_180) return delinq_merge def join_ever_delinq_features(everdf_tmp, delinq_merge, **kwargs): everdf = everdf_tmp.merge(delinq_merge, on=['loan_id'], how='left') del(everdf_tmp) del(delinq_merge) everdf['delinquency_30'] = everdf['delinquency_30'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]')) everdf['delinquency_90'] = everdf['delinquency_90'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]')) everdf['delinquency_180'] = everdf['delinquency_180'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]')) return everdf def create_joined_df(pdf, everdf, **kwargs): test = pdf[['loan_id', 'monthly_reporting_period', 'current_loan_delinquency_status', 'current_actual_upb']] del(pdf) test['timestamp'] = test['monthly_reporting_period'] test.drop(columns=['monthly_reporting_period'], inplace=True) test['timestamp_month'] = test['timestamp'].dt.month test['timestamp_year'] = test['timestamp'].dt.year test['delinquency_12'] = test['current_loan_delinquency_status'] test.drop(columns=['current_loan_delinquency_status'], inplace=True) test['upb_12'] = test['current_actual_upb'] test.drop(columns=['current_actual_upb'], inplace=True) test['upb_12'] = test['upb_12'].fillna(999999999) test['delinquency_12'] = test['delinquency_12'].fillna(-1) joined_df = test.merge(everdf, how='left', on=['loan_id']) del(everdf) del(test) joined_df['ever_30'] = joined_df['ever_30'].fillna(-1) joined_df['ever_90'] = joined_df['ever_90'].fillna(-1) joined_df['ever_180'] = joined_df['ever_180'].fillna(-1) joined_df['delinquency_30'] = joined_df['delinquency_30'].fillna(-1) joined_df['delinquency_90'] = joined_df['delinquency_90'].fillna(-1) joined_df['delinquency_180'] = joined_df['delinquency_180'].fillna(-1) joined_df['timestamp_year'] = joined_df['timestamp_year'].astype('int32') joined_df['timestamp_month'] = joined_df['timestamp_month'].astype('int32') return joined_df def create_12_mon_features(joined_df, **kwargs): testdfs = [] n_months = 12 for y in range(1, n_months + 1): tmpdf = joined_df[['loan_id', 'timestamp_year', 'timestamp_month', 'delinquency_12', 'upb_12']] tmpdf['josh_months'] = tmpdf['timestamp_year'] * 12 + tmpdf['timestamp_month'] tmpdf['josh_mody_n'] = np.floor((tmpdf['josh_months'].astype('float64') - 24000 - y) / 12) tmpdf = tmpdf.groupby(['loan_id', 'josh_mody_n'], as_index=False).agg({'delinquency_12': 'max','upb_12': 'min'}) tmpdf['delinquency_12'] = (tmpdf['delinquency_12']>3).astype('int32') tmpdf['delinquency_12'] +=(tmpdf['upb_12']==0).astype('int32') tmpdf['timestamp_year'] = np.floor(((tmpdf['josh_mody_n'] * n_months) + 24000 + (y - 1)) / 12).astype('int16') tmpdf['timestamp_month'] = np.int8(y) tmpdf.drop(columns=['josh_mody_n'], inplace=True) testdfs.append(tmpdf) del(tmpdf) del(joined_df) return pd.concat(testdfs) def combine_joined_12_mon(joined_df, testdf, **kwargs): joined_df.drop(columns=['delinquency_12', 'upb_12'], inplace=True) joined_df['timestamp_year'] = joined_df['timestamp_year'].astype('int16') joined_df['timestamp_month'] = joined_df['timestamp_month'].astype('int8') return joined_df.merge(testdf, how='left', on=['loan_id', 'timestamp_year', 'timestamp_month']) def final_performance_delinquency(merged, joined_df, **kwargs):
merged['timestamp_month'] = merged['monthly_reporting_period'].dt.month merged['timestamp_month'] = merged['timestamp_month'].astype('int8') merged['timestamp_year'] = merged['monthly_reporting_period'].dt.year merged['timestamp_year'] = merged['timestamp_year'].astype('int16') merged = merged.merge(joined_df, how='left', on=['loan_id', 'timestamp_year', 'timestamp_month']) merged.drop(columns=['timestamp_year'], inplace=True) merged.drop(columns=['timestamp_month'], inplace=True) return merged
identifier_body
mortgage_pandas.py
import pathlib import sys import argparse def run_pd_workflow(quarter=1, year=2000, perf_file="", **kwargs): t1 = time.time() names = pd_load_names() year_string = str(year) + "Q" + str(quarter) + ".txt" acq_file = os.path.join(data_directory, "acq", "Acquisition_" + year_string) print("READING DATAFILE", acq_file) acq_pdf = pd_load_acquisition_csv(acq_file) print("READING DATAFILE", perf_file) perf_df_tmp = pd_load_performance_csv(perf_file) print("read time", (time.time() - t1) * 1000) t1 = time.time() acq_pdf = acq_pdf.merge(names, how='left', on=['seller_name']) acq_pdf.drop(columns=['seller_name'], inplace=True) acq_pdf['seller_name'] = acq_pdf['new'] acq_pdf.drop(columns=['new'], inplace=True) pdf = perf_df_tmp everdf = create_ever_features(pdf) delinq_merge = create_delinq_features(pdf) everdf = join_ever_delinq_features(everdf, delinq_merge) del(delinq_merge) joined_df = create_joined_df(pdf, everdf) testdf = create_12_mon_features(joined_df) joined_df = combine_joined_12_mon(joined_df, testdf) del(testdf) perf_df = final_performance_delinquency(pdf, joined_df) del(pdf, joined_df) final_pdf = join_perf_acq_pdfs(perf_df, acq_pdf) del(perf_df) del(acq_pdf) print("compute time", (time.time() - t1) * 1000) final_pdf = last_mile_cleaning(final_pdf) exec_time = (time.time() - t1) * 1000 print("compute time with copy to host", exec_time) return final_pdf, exec_time def pd_load_performance_csv(performance_path, **kwargs): """ Loads performance data Returns ------- PD DataFrame """ cols = [ "loan_id", "monthly_reporting_period", "servicer", "interest_rate", "current_actual_upb", "loan_age", "remaining_months_to_legal_maturity", "adj_remaining_months_to_maturity", "maturity_date", "msa", "current_loan_delinquency_status", "mod_flag", "zero_balance_code", "zero_balance_effective_date", "last_paid_installment_date", "foreclosed_after", "disposition_date", "foreclosure_costs", "prop_preservation_and_repair_costs", "asset_recovery_costs", "misc_holding_expenses", "holding_taxes", "net_sale_proceeds", "credit_enhancement_proceeds", "repurchase_make_whole_proceeds", "other_foreclosure_proceeds", "non_interest_bearing_upb", "principal_forgiveness_upb", "repurchase_make_whole_proceeds_flag", "foreclosure_principal_write_off_amount", "servicing_activity_indicator" ] dtypes = { "loan_id": np.int64, "monthly_reporting_period": str, "servicer": str, "interest_rate": np.float64, "current_actual_upb": np.float64, "loan_age": np.float64, "remaining_months_to_legal_maturity": np.float64, "adj_remaining_months_to_maturity": np.float64, "maturity_date": str, "msa": np.float64, "current_loan_delinquency_status": np.int32, "mod_flag": CategoricalDtype(['N', 'Y']), "zero_balance_code": CategoricalDtype(['01', '02', '06', '09', '03', '15', '16']), "zero_balance_effective_date": str, "last_paid_installment_date": str, "foreclosed_after": str, "disposition_date": str, "foreclosure_costs": np.float64, "prop_preservation_and_repair_costs": np.float64, "asset_recovery_costs": np.float64, "misc_holding_expenses": np.float64, "holding_taxes": np.float64, "net_sale_proceeds": np.float64, "credit_enhancement_proceeds": np.float64, "repurchase_make_whole_proceeds": np.float64, "other_foreclosure_proceeds": np.float64, "non_interest_bearing_upb": np.float64, "principal_forgiveness_upb": np.float64, "repurchase_make_whole_proceeds_flag": CategoricalDtype(['N', 'Y']), "foreclosure_principal_write_off_amount": np.float64, "servicing_activity_indicator": CategoricalDtype(['N', 'Y']), } return pd.read_csv(performance_path, names=cols, delimiter='|', dtype=dtypes, parse_dates=[1,8,13,14,15,16]) def pd_load_acquisition_csv(acquisition_path, **kwargs): """ Loads acquisition data Returns ------- PD DataFrame """ columns = [ 'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term', 'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score', 'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state', 'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type', 'relocation_mortgage_indicator', 'year_quarter' ] dtypes = { 'loan_id': np.int64, 'orig_channel': CategoricalDtype(['B', 'C', 'R']), 'seller_name': str, 'orig_interest_rate': np.float64, 'orig_upb': np.int64, 'orig_loan_term': np.int64, 'orig_date': str, 'first_pay_date': str, 'orig_ltv': np.float64, 'orig_cltv': np.float64, 'num_borrowers': np.float64, 'dti': np.float64, 'borrower_credit_score': np.float64, 'first_home_buyer': CategoricalDtype(['N', 'U', 'Y']), 'loan_purpose': CategoricalDtype(['C', 'P', 'R', 'U']), 'property_type': CategoricalDtype(['CO', 'CP', 'MH', 'PU', 'SF']), 'num_units': np.int64, 'occupancy_status': CategoricalDtype(['I', 'P', 'S']), 'property_state': CategoricalDtype( ['AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DC', 'DE', 'FL', 'GA', 'HI', 'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD', 'ME', 'MI', 'MN', 'MO', 'MS', 'MT', 'NC', 'ND', 'NE', 'NH', 'NJ', 'NM', 'NV', 'NY', 'OH', 'OK', 'OR', 'PA', 'PR', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VA', 'VI', 'VT', 'WA', 'WI', 'WV', 'WY']), 'zip': np.int64, 'mortgage_insurance_percent': np.float64, 'product_type': CategoricalDtype(['FRM']), 'coborrow_credit_score': np.float64, 'mortgage_insurance_type': np.float64, 'relocation_mortgage_indicator': CategoricalDtype(['N', 'Y']), 'year_quarter': np.int64 } a = pd.read_csv(acquisition_path, names=columns, delimiter='|', dtype=dtypes, parse_dates=[6,7], error_bad_lines=True, warn_bad_lines=True, na_filter=True) return a def pd_load_names(**kwargs): """ Loads names used for renaming the banks Returns ------- PD DataFrame """ cols = [ 'seller_name', 'new' ] dtypes = {'seller_name':str, 'new':str} return pd.read_csv(os.path.join(data_directory, "names.csv"), names=cols, delimiter='|', dtype=dtypes) def create_ever_features(pdf, **kwargs): everdf = pdf[['loan_id', 'current_loan_delinquency_status']] everdf = everdf.groupby('loan_id').max() del(pdf) everdf['ever_30'] = (everdf['current_loan_delinquency_status']
from pandas.api.types import CategoricalDtype from io import StringIO from glob import glob import os import time
random_line_split
mortgage_pandas.py
, "holding_taxes": np.float64, "net_sale_proceeds": np.float64, "credit_enhancement_proceeds": np.float64, "repurchase_make_whole_proceeds": np.float64, "other_foreclosure_proceeds": np.float64, "non_interest_bearing_upb": np.float64, "principal_forgiveness_upb": np.float64, "repurchase_make_whole_proceeds_flag": CategoricalDtype(['N', 'Y']), "foreclosure_principal_write_off_amount": np.float64, "servicing_activity_indicator": CategoricalDtype(['N', 'Y']), } return pd.read_csv(performance_path, names=cols, delimiter='|', dtype=dtypes, parse_dates=[1,8,13,14,15,16]) def
(acquisition_path, **kwargs): """ Loads acquisition data Returns ------- PD DataFrame """ columns = [ 'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term', 'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score', 'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state', 'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type', 'relocation_mortgage_indicator', 'year_quarter' ] dtypes = { 'loan_id': np.int64, 'orig_channel': CategoricalDtype(['B', 'C', 'R']), 'seller_name': str, 'orig_interest_rate': np.float64, 'orig_upb': np.int64, 'orig_loan_term': np.int64, 'orig_date': str, 'first_pay_date': str, 'orig_ltv': np.float64, 'orig_cltv': np.float64, 'num_borrowers': np.float64, 'dti': np.float64, 'borrower_credit_score': np.float64, 'first_home_buyer': CategoricalDtype(['N', 'U', 'Y']), 'loan_purpose': CategoricalDtype(['C', 'P', 'R', 'U']), 'property_type': CategoricalDtype(['CO', 'CP', 'MH', 'PU', 'SF']), 'num_units': np.int64, 'occupancy_status': CategoricalDtype(['I', 'P', 'S']), 'property_state': CategoricalDtype( ['AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DC', 'DE', 'FL', 'GA', 'HI', 'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD', 'ME', 'MI', 'MN', 'MO', 'MS', 'MT', 'NC', 'ND', 'NE', 'NH', 'NJ', 'NM', 'NV', 'NY', 'OH', 'OK', 'OR', 'PA', 'PR', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VA', 'VI', 'VT', 'WA', 'WI', 'WV', 'WY']), 'zip': np.int64, 'mortgage_insurance_percent': np.float64, 'product_type': CategoricalDtype(['FRM']), 'coborrow_credit_score': np.float64, 'mortgage_insurance_type': np.float64, 'relocation_mortgage_indicator': CategoricalDtype(['N', 'Y']), 'year_quarter': np.int64 } a = pd.read_csv(acquisition_path, names=columns, delimiter='|', dtype=dtypes, parse_dates=[6,7], error_bad_lines=True, warn_bad_lines=True, na_filter=True) return a def pd_load_names(**kwargs): """ Loads names used for renaming the banks Returns ------- PD DataFrame """ cols = [ 'seller_name', 'new' ] dtypes = {'seller_name':str, 'new':str} return pd.read_csv(os.path.join(data_directory, "names.csv"), names=cols, delimiter='|', dtype=dtypes) def create_ever_features(pdf, **kwargs): everdf = pdf[['loan_id', 'current_loan_delinquency_status']] everdf = everdf.groupby('loan_id').max() del(pdf) everdf['ever_30'] = (everdf['current_loan_delinquency_status'] >= 1).astype('int8') everdf['ever_90'] = (everdf['current_loan_delinquency_status'] >= 3).astype('int8') everdf['ever_180'] = (everdf['current_loan_delinquency_status'] >= 6).astype('int8') everdf.drop(columns=['current_loan_delinquency_status'], inplace=True) return everdf def create_delinq_features(pdf, **kwargs): delinq_pdf = pdf[['loan_id', 'monthly_reporting_period', 'current_loan_delinquency_status']] del(pdf) delinq_30 = delinq_pdf.query('current_loan_delinquency_status >= 1')[['loan_id', 'monthly_reporting_period']].groupby('loan_id').min() delinq_30['delinquency_30'] = delinq_30['monthly_reporting_period'] delinq_30.drop(columns=['monthly_reporting_period'], inplace=True) delinq_90 = delinq_pdf.query('current_loan_delinquency_status >= 3')[['loan_id', 'monthly_reporting_period']].groupby('loan_id').min() delinq_90['delinquency_90'] = delinq_90['monthly_reporting_period'] delinq_90.drop(columns=['monthly_reporting_period'], inplace=True) delinq_180 = delinq_pdf.query('current_loan_delinquency_status >= 6')[['loan_id', 'monthly_reporting_period']].groupby('loan_id').min() delinq_180['delinquency_180'] = delinq_180['monthly_reporting_period'] delinq_180.drop(columns=['monthly_reporting_period'], inplace=True) del(delinq_pdf) delinq_merge = delinq_30.merge(delinq_90, how='left', on=['loan_id']) delinq_merge['delinquency_90'] = delinq_merge['delinquency_90'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]')) delinq_merge = delinq_merge.merge(delinq_180, how='left', on=['loan_id']) delinq_merge['delinquency_180'] = delinq_merge['delinquency_180'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]')) del(delinq_30) del(delinq_90) del(delinq_180) return delinq_merge def join_ever_delinq_features(everdf_tmp, delinq_merge, **kwargs): everdf = everdf_tmp.merge(delinq_merge, on=['loan_id'], how='left') del(everdf_tmp) del(delinq_merge) everdf['delinquency_30'] = everdf['delinquency_30'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]')) everdf['delinquency_90'] = everdf['delinquency_90'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]')) everdf['delinquency_180'] = everdf['delinquency_180'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]')) return everdf def create_joined_df(pdf, everdf, **kwargs): test = pdf[['loan_id', 'monthly_reporting_period', 'current_loan_delinquency_status', 'current_actual_upb']] del(pdf) test['timestamp'] = test['monthly_reporting_period'] test.drop(columns=['monthly_reporting_period'], inplace=True) test['timestamp_month'] = test['timestamp'].dt.month test['timestamp_year'] = test['timestamp'].dt.year test['delinquency_12'] = test['current_loan_delinquency_status'] test.drop(columns=['current_loan_delinquency_status'], inplace=True) test['upb_12'] = test['current_actual_upb'] test.drop(columns=['current_actual_upb'], inplace=True) test['upb_12'] = test['upb_12'].fillna
pd_load_acquisition_csv
identifier_name
api.rs
Some(val) => val, None => "http".to_string() }; let host = match api_confs.host.clone() { Some(val) => val, None => HOST_URL.to_string() }; let host_url = match api_confs.port.clone() { Some(port) => format!("{}://{}:{}", scheme, host, port), None => format!("{}://{}", scheme, host ) }; format!("{}/{}/{}/{}", host_url, lang, prod_key, version) } //it's used to build API url fn configs_to_url(api_confs: &ApiConfigs, resource_path: &str) -> Result<hyper::Url, hyper::error::ParseError> { let url_str = match api_confs.port { None => { format!( "{}://{}/{}/{}", api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(), api_confs.path.clone().unwrap(), resource_path, ) }, Some(port) => format!( "{}://{}:{}/{}/{}", api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(), port, api_confs.path.clone().unwrap(), resource_path ) }; Url::parse(url_str.as_str()) } fn request_json<'a>(uri: &Url, proxy_confs: &'a ProxyConfigs) -> Option<String> { let ssl = NativeTlsClient::new().unwrap(); let connector = HttpsConnector::new(ssl); //use proxy only iff user has defined proxy host and port let mut client = if proxy_confs.is_complete() { let host = Cow::from(proxy_confs.host.clone().unwrap()); let port = proxy_confs.port.clone().unwrap(); let scheme = proxy_confs.scheme.clone().unwrap_or("http".to_string()); let ssl_proxy = NativeTlsClient::new().unwrap(); let proxy = client::ProxyConfig::new ( scheme.as_str(), host, port, connector, ssl_proxy ); Client::with_proxy_config(proxy) } else { Client::with_connector(connector) }; client.set_read_timeout(Some(Duration::new(5,0))); let mut res = client.get(uri.as_str()).send().expect("Failed to fetch results from the url"); let mut body = String::new(); res.read_to_string(&mut body).expect("Failed to read response body"); Some(body) } pub fn fetch_product_details_by_sha(confs: &Configs, file_sha: &str) -> Result<product::ProductMatch, Error> { let sha_res = fetch_product_by_sha(&confs, file_sha); match sha_res { Ok(m) => { let sha = m.sha.expect("No product sha from SHA result"); let product = m.product.expect("No product info from SHA result"); match fetch_product( &confs, &product.language, &product.prod_key, &product.version ) { Ok(mut m) => { m.sha = Some(sha); Ok(m) }, Err(e) => { println!("Failed to fetch product details for sha: {}", file_sha); Err(e) } } }, Err(e) => Err(e) } } pub fn fetch_product_by_sha(confs: &Configs, sha: &str) -> Result<product::ProductMatch, io::Error> { let api_confs = confs.api.clone(); let resource_path = format!("products/sha/{}", encode_sha(sha) ); let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) { Ok(the_url) => the_url, Err(_) => { return Err( Error::new( ErrorKind::InvalidData, "The values of API configs make up non-valid URL" ) ) } }; //attach query params resource_url .query_pairs_mut() .clear() .append_pair("api_key", api_confs.key.clone().unwrap().as_str()); let json_txt = request_json( &resource_url, &confs.proxy ); process_sha_response(json_txt) } //replaces base64 special characters with HTML safe percentage encoding //source: https://en.wikipedia.org/wiki/Base64#URL_applications pub fn encode_sha<'a>(sha: &'a str) -> String { let encoded_sha = sha.to_string(); encoded_sha.replace("+", "%2B") .replace("/", "%2F") .replace("=", "%3D") .trim().to_string() } pub fn encode_prod_key<'b>(prod_key: &'b str) -> String { let encoded_prod_key = prod_key.to_string(); encoded_prod_key .replace(".", "~") .replace("/", ":") .trim().to_string() } pub fn encode_language<'b>(lang: &'b str) -> String { let encoded_lang = lang.to_string(); encoded_lang.replace(".", "").trim().to_lowercase().to_string() } pub fn fetch_product<'a>( confs: &Configs, lang: &str, prod_key: &str, version: &str ) -> Result<product::ProductMatch, io::Error> { let api_confs = confs.api.clone(); let encoded_prod_key = encode_prod_key(&prod_key); let encoded_lang = encode_language(lang); let resource_path = format!("products/{}/{}", encoded_lang.clone(), encoded_prod_key.clone()); let prod_url = to_product_url( &confs.api, encoded_lang.clone().as_str(), prod_key, version ); let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) { Ok(the_url) => the_url, Err(_) => { return Err( Error::new( ErrorKind::InvalidData, "The values of API configs make up non-valid URL" ) ) } }; //attach query params resource_url .query_pairs_mut() .clear() .append_pair("prod_version", version) .append_pair("api_key", api_confs.key.clone().unwrap().as_str()); let json_txt = request_json( &resource_url, &confs.proxy ); process_product_response(json_txt, Some(prod_url)) } #[derive(Serialize, Deserialize, Debug)] struct
{ error: String } #[derive(Serialize, Deserialize, Debug)] struct ShaItem { language: String, prod_key: String, version: String, sha_value: String, sha_method: String, prod_type: Option<String>, group_id: Option<String>, artifact_id: Option<String>, classifier: Option<String>, packaging: Option<String> } //-- helper functions pub fn process_sha_response(json_text: Option<String> ) -> Result<product::ProductMatch, io::Error> { if json_text.is_none() { return Err( Error::new(ErrorKind::Other, "No response from API") ) } let res: serde_json::Value = serde_json::from_str(json_text.unwrap().as_str())?; if res.is_object() && res.get("error").is_some() { let e = Error::new( ErrorKind::Other, r#"API rate limit reached. Go to https://www.versioneye.com and upgrade your subscription to a higher plan."# ); return Err(e); } if !res.is_array() { let e = Error::new( ErrorKind::Other, "Unsupported SHA response - expected array"); return Err(e); } let shas = res.as_array().unwrap(); if shas.len() == 0 { let e = Error::new( ErrorKind::Other, "No match for the SHA"); return Err(e); } let doc:ShaItem = serde_json::from_value(shas[0].clone()).unwrap(); let the_prod = product::Product { name: "".to_string(), language: doc.language, prod_key: doc.prod_key, version: doc.version, prod_type: doc.prod_type }; let the_sha = product::ProductSHA { packaging: doc.packaging.unwrap_or("unknown".to_string()), method: doc.sha_method, value: doc.sha_value, filepath: None }; Ok(product::ProductMatch::new(the_prod, the_sha)) } // converts the response of product endpoint into ProductMatch struct #[derive(Serialize, Deserialize, Debug)] struct ProductItem { name: String, language: String, prod_key: String, version: String, prod_type: String, } #[derive(Serialize, Deserialize, Debug)] struct LicenseItem { name: String, url: Option<String> } pub fn process_product_response( json_text: Option<String>, prod_url: Option<String> ) -> Result<product::ProductMatch, io::Error> { if json_text.is_none() { return Err( Error::new( ErrorKind::Other, "No response from API") ) } let res: serde_json::Value = serde_json::from_str( &json_text.unwrap().as_str() )?; if !res.is_object() { return Err(Error::new(ErrorKind::Other, "No product details")); } //if response includes error field in HTTP200 response // NB! it may include other errors than limit, but @Rob asked to see custom
ApiError
identifier_name
api.rs
Some(val) => val, None => "http".to_string() }; let host = match api_confs.host.clone() { Some(val) => val, None => HOST_URL.to_string() }; let host_url = match api_confs.port.clone() { Some(port) => format!("{}://{}:{}", scheme, host, port), None => format!("{}://{}", scheme, host ) }; format!("{}/{}/{}/{}", host_url, lang, prod_key, version) } //it's used to build API url fn configs_to_url(api_confs: &ApiConfigs, resource_path: &str) -> Result<hyper::Url, hyper::error::ParseError> { let url_str = match api_confs.port { None => { format!( "{}://{}/{}/{}", api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(), api_confs.path.clone().unwrap(), resource_path, ) }, Some(port) => format!( "{}://{}:{}/{}/{}", api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(), port, api_confs.path.clone().unwrap(), resource_path ) }; Url::parse(url_str.as_str()) } fn request_json<'a>(uri: &Url, proxy_confs: &'a ProxyConfigs) -> Option<String> { let ssl = NativeTlsClient::new().unwrap(); let connector = HttpsConnector::new(ssl); //use proxy only iff user has defined proxy host and port let mut client = if proxy_confs.is_complete() { let host = Cow::from(proxy_confs.host.clone().unwrap()); let port = proxy_confs.port.clone().unwrap(); let scheme = proxy_confs.scheme.clone().unwrap_or("http".to_string()); let ssl_proxy = NativeTlsClient::new().unwrap(); let proxy = client::ProxyConfig::new ( scheme.as_str(), host, port, connector, ssl_proxy ); Client::with_proxy_config(proxy) } else { Client::with_connector(connector) }; client.set_read_timeout(Some(Duration::new(5,0))); let mut res = client.get(uri.as_str()).send().expect("Failed to fetch results from the url"); let mut body = String::new(); res.read_to_string(&mut body).expect("Failed to read response body"); Some(body) } pub fn fetch_product_details_by_sha(confs: &Configs, file_sha: &str) -> Result<product::ProductMatch, Error> { let sha_res = fetch_product_by_sha(&confs, file_sha); match sha_res { Ok(m) => { let sha = m.sha.expect("No product sha from SHA result"); let product = m.product.expect("No product info from SHA result"); match fetch_product( &confs, &product.language, &product.prod_key, &product.version ) { Ok(mut m) => { m.sha = Some(sha); Ok(m) }, Err(e) => { println!("Failed to fetch product details for sha: {}", file_sha); Err(e) } } }, Err(e) => Err(e) } } pub fn fetch_product_by_sha(confs: &Configs, sha: &str) -> Result<product::ProductMatch, io::Error> { let api_confs = confs.api.clone(); let resource_path = format!("products/sha/{}", encode_sha(sha) ); let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) { Ok(the_url) => the_url, Err(_) => { return Err( Error::new( ErrorKind::InvalidData, "The values of API configs make up non-valid URL" ) ) } }; //attach query params resource_url .query_pairs_mut() .clear() .append_pair("api_key", api_confs.key.clone().unwrap().as_str()); let json_txt = request_json( &resource_url, &confs.proxy ); process_sha_response(json_txt) } //replaces base64 special characters with HTML safe percentage encoding //source: https://en.wikipedia.org/wiki/Base64#URL_applications pub fn encode_sha<'a>(sha: &'a str) -> String { let encoded_sha = sha.to_string(); encoded_sha.replace("+", "%2B") .replace("/", "%2F") .replace("=", "%3D") .trim().to_string() } pub fn encode_prod_key<'b>(prod_key: &'b str) -> String { let encoded_prod_key = prod_key.to_string(); encoded_prod_key .replace(".", "~") .replace("/", ":") .trim().to_string() } pub fn encode_language<'b>(lang: &'b str) -> String { let encoded_lang = lang.to_string(); encoded_lang.replace(".", "").trim().to_lowercase().to_string() } pub fn fetch_product<'a>( confs: &Configs, lang: &str, prod_key: &str, version: &str ) -> Result<product::ProductMatch, io::Error> { let api_confs = confs.api.clone(); let encoded_prod_key = encode_prod_key(&prod_key); let encoded_lang = encode_language(lang); let resource_path = format!("products/{}/{}", encoded_lang.clone(), encoded_prod_key.clone()); let prod_url = to_product_url( &confs.api, encoded_lang.clone().as_str(), prod_key, version ); let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) { Ok(the_url) => the_url, Err(_) => { return Err( Error::new( ErrorKind::InvalidData, "The values of API configs make up non-valid URL" ) ) } }; //attach query params resource_url .query_pairs_mut() .clear() .append_pair("prod_version", version) .append_pair("api_key", api_confs.key.clone().unwrap().as_str()); let json_txt = request_json( &resource_url, &confs.proxy ); process_product_response(json_txt, Some(prod_url)) } #[derive(Serialize, Deserialize, Debug)] struct ApiError { error: String } #[derive(Serialize, Deserialize, Debug)] struct ShaItem { language: String, prod_key: String, version: String, sha_value: String, sha_method: String, prod_type: Option<String>, group_id: Option<String>, artifact_id: Option<String>, classifier: Option<String>, packaging: Option<String> } //-- helper functions pub fn process_sha_response(json_text: Option<String> ) -> Result<product::ProductMatch, io::Error>
let e = Error::new( ErrorKind::Other, "Unsupported SHA response - expected array"); return Err(e); } let shas = res.as_array().unwrap(); if shas.len() == 0 { let e = Error::new( ErrorKind::Other, "No match for the SHA"); return Err(e); } let doc:ShaItem = serde_json::from_value(shas[0].clone()).unwrap(); let the_prod = product::Product { name: "".to_string(), language: doc.language, prod_key: doc.prod_key, version: doc.version, prod_type: doc.prod_type }; let the_sha = product::ProductSHA { packaging: doc.packaging.unwrap_or("unknown".to_string()), method: doc.sha_method, value: doc.sha_value, filepath: None }; Ok(product::ProductMatch::new(the_prod, the_sha)) } // converts the response of product endpoint into ProductMatch struct #[derive(Serialize, Deserialize, Debug)] struct ProductItem { name: String, language: String, prod_key: String, version: String, prod_type: String, } #[derive(Serialize, Deserialize, Debug)] struct LicenseItem { name: String, url: Option<String> } pub fn process_product_response( json_text: Option<String>, prod_url: Option<String> ) -> Result<product::ProductMatch, io::Error> { if json_text.is_none() { return Err( Error::new( ErrorKind::Other, "No response from API") ) } let res: serde_json::Value = serde_json::from_str( &json_text.unwrap().as_str() )?; if !res.is_object() { return Err(Error::new(ErrorKind::Other, "No product details")); } //if response includes error field in HTTP200 response // NB! it may include other errors than limit, but @Rob asked to see custom
{ if json_text.is_none() { return Err( Error::new(ErrorKind::Other, "No response from API") ) } let res: serde_json::Value = serde_json::from_str(json_text.unwrap().as_str())?; if res.is_object() && res.get("error").is_some() { let e = Error::new( ErrorKind::Other, r#"API rate limit reached. Go to https://www.versioneye.com and upgrade your subscription to a higher plan."# ); return Err(e); } if !res.is_array() {
identifier_body
api.rs
Some(val) => val, None => "http".to_string() }; let host = match api_confs.host.clone() { Some(val) => val, None => HOST_URL.to_string() }; let host_url = match api_confs.port.clone() { Some(port) => format!("{}://{}:{}", scheme, host, port), None => format!("{}://{}", scheme, host ) }; format!("{}/{}/{}/{}", host_url, lang, prod_key, version) } //it's used to build API url fn configs_to_url(api_confs: &ApiConfigs, resource_path: &str) -> Result<hyper::Url, hyper::error::ParseError> { let url_str = match api_confs.port { None => { format!( "{}://{}/{}/{}", api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(), api_confs.path.clone().unwrap(), resource_path, ) }, Some(port) => format!( "{}://{}:{}/{}/{}", api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(), port, api_confs.path.clone().unwrap(), resource_path ) }; Url::parse(url_str.as_str()) } fn request_json<'a>(uri: &Url, proxy_confs: &'a ProxyConfigs) -> Option<String> { let ssl = NativeTlsClient::new().unwrap(); let connector = HttpsConnector::new(ssl); //use proxy only iff user has defined proxy host and port let mut client = if proxy_confs.is_complete() { let host = Cow::from(proxy_confs.host.clone().unwrap()); let port = proxy_confs.port.clone().unwrap(); let scheme = proxy_confs.scheme.clone().unwrap_or("http".to_string()); let ssl_proxy = NativeTlsClient::new().unwrap(); let proxy = client::ProxyConfig::new ( scheme.as_str(), host, port, connector, ssl_proxy ); Client::with_proxy_config(proxy) } else { Client::with_connector(connector) }; client.set_read_timeout(Some(Duration::new(5,0))); let mut res = client.get(uri.as_str()).send().expect("Failed to fetch results from the url"); let mut body = String::new(); res.read_to_string(&mut body).expect("Failed to read response body"); Some(body) } pub fn fetch_product_details_by_sha(confs: &Configs, file_sha: &str) -> Result<product::ProductMatch, Error> { let sha_res = fetch_product_by_sha(&confs, file_sha); match sha_res { Ok(m) => { let sha = m.sha.expect("No product sha from SHA result"); let product = m.product.expect("No product info from SHA result"); match fetch_product( &confs, &product.language, &product.prod_key, &product.version ) { Ok(mut m) => { m.sha = Some(sha); Ok(m) }, Err(e) => { println!("Failed to fetch product details for sha: {}", file_sha); Err(e) } } }, Err(e) => Err(e) } } pub fn fetch_product_by_sha(confs: &Configs, sha: &str) -> Result<product::ProductMatch, io::Error> { let api_confs = confs.api.clone(); let resource_path = format!("products/sha/{}", encode_sha(sha) ); let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) { Ok(the_url) => the_url, Err(_) => { return Err( Error::new( ErrorKind::InvalidData, "The values of API configs make up non-valid URL" ) ) } };
.clear() .append_pair("api_key", api_confs.key.clone().unwrap().as_str()); let json_txt = request_json( &resource_url, &confs.proxy ); process_sha_response(json_txt) } //replaces base64 special characters with HTML safe percentage encoding //source: https://en.wikipedia.org/wiki/Base64#URL_applications pub fn encode_sha<'a>(sha: &'a str) -> String { let encoded_sha = sha.to_string(); encoded_sha.replace("+", "%2B") .replace("/", "%2F") .replace("=", "%3D") .trim().to_string() } pub fn encode_prod_key<'b>(prod_key: &'b str) -> String { let encoded_prod_key = prod_key.to_string(); encoded_prod_key .replace(".", "~") .replace("/", ":") .trim().to_string() } pub fn encode_language<'b>(lang: &'b str) -> String { let encoded_lang = lang.to_string(); encoded_lang.replace(".", "").trim().to_lowercase().to_string() } pub fn fetch_product<'a>( confs: &Configs, lang: &str, prod_key: &str, version: &str ) -> Result<product::ProductMatch, io::Error> { let api_confs = confs.api.clone(); let encoded_prod_key = encode_prod_key(&prod_key); let encoded_lang = encode_language(lang); let resource_path = format!("products/{}/{}", encoded_lang.clone(), encoded_prod_key.clone()); let prod_url = to_product_url( &confs.api, encoded_lang.clone().as_str(), prod_key, version ); let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) { Ok(the_url) => the_url, Err(_) => { return Err( Error::new( ErrorKind::InvalidData, "The values of API configs make up non-valid URL" ) ) } }; //attach query params resource_url .query_pairs_mut() .clear() .append_pair("prod_version", version) .append_pair("api_key", api_confs.key.clone().unwrap().as_str()); let json_txt = request_json( &resource_url, &confs.proxy ); process_product_response(json_txt, Some(prod_url)) } #[derive(Serialize, Deserialize, Debug)] struct ApiError { error: String } #[derive(Serialize, Deserialize, Debug)] struct ShaItem { language: String, prod_key: String, version: String, sha_value: String, sha_method: String, prod_type: Option<String>, group_id: Option<String>, artifact_id: Option<String>, classifier: Option<String>, packaging: Option<String> } //-- helper functions pub fn process_sha_response(json_text: Option<String> ) -> Result<product::ProductMatch, io::Error> { if json_text.is_none() { return Err( Error::new(ErrorKind::Other, "No response from API") ) } let res: serde_json::Value = serde_json::from_str(json_text.unwrap().as_str())?; if res.is_object() && res.get("error").is_some() { let e = Error::new( ErrorKind::Other, r#"API rate limit reached. Go to https://www.versioneye.com and upgrade your subscription to a higher plan."# ); return Err(e); } if !res.is_array() { let e = Error::new( ErrorKind::Other, "Unsupported SHA response - expected array"); return Err(e); } let shas = res.as_array().unwrap(); if shas.len() == 0 { let e = Error::new( ErrorKind::Other, "No match for the SHA"); return Err(e); } let doc:ShaItem = serde_json::from_value(shas[0].clone()).unwrap(); let the_prod = product::Product { name: "".to_string(), language: doc.language, prod_key: doc.prod_key, version: doc.version, prod_type: doc.prod_type }; let the_sha = product::ProductSHA { packaging: doc.packaging.unwrap_or("unknown".to_string()), method: doc.sha_method, value: doc.sha_value, filepath: None }; Ok(product::ProductMatch::new(the_prod, the_sha)) } // converts the response of product endpoint into ProductMatch struct #[derive(Serialize, Deserialize, Debug)] struct ProductItem { name: String, language: String, prod_key: String, version: String, prod_type: String, } #[derive(Serialize, Deserialize, Debug)] struct LicenseItem { name: String, url: Option<String> } pub fn process_product_response( json_text: Option<String>, prod_url: Option<String> ) -> Result<product::ProductMatch, io::Error> { if json_text.is_none() { return Err( Error::new( ErrorKind::Other, "No response from API") ) } let res: serde_json::Value = serde_json::from_str( &json_text.unwrap().as_str() )?; if !res.is_object() { return Err(Error::new(ErrorKind::Other, "No product details")); } //if response includes error field in HTTP200 response // NB! it may include other errors than limit, but @Rob asked to see custom
//attach query params resource_url .query_pairs_mut()
random_line_split
api.rs
Some(val) => val, None => "http".to_string() }; let host = match api_confs.host.clone() { Some(val) => val, None => HOST_URL.to_string() }; let host_url = match api_confs.port.clone() { Some(port) => format!("{}://{}:{}", scheme, host, port), None => format!("{}://{}", scheme, host ) }; format!("{}/{}/{}/{}", host_url, lang, prod_key, version) } //it's used to build API url fn configs_to_url(api_confs: &ApiConfigs, resource_path: &str) -> Result<hyper::Url, hyper::error::ParseError> { let url_str = match api_confs.port { None => { format!( "{}://{}/{}/{}", api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(), api_confs.path.clone().unwrap(), resource_path, ) }, Some(port) => format!( "{}://{}:{}/{}/{}", api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(), port, api_confs.path.clone().unwrap(), resource_path ) }; Url::parse(url_str.as_str()) } fn request_json<'a>(uri: &Url, proxy_confs: &'a ProxyConfigs) -> Option<String> { let ssl = NativeTlsClient::new().unwrap(); let connector = HttpsConnector::new(ssl); //use proxy only iff user has defined proxy host and port let mut client = if proxy_confs.is_complete() { let host = Cow::from(proxy_confs.host.clone().unwrap()); let port = proxy_confs.port.clone().unwrap(); let scheme = proxy_confs.scheme.clone().unwrap_or("http".to_string()); let ssl_proxy = NativeTlsClient::new().unwrap(); let proxy = client::ProxyConfig::new ( scheme.as_str(), host, port, connector, ssl_proxy ); Client::with_proxy_config(proxy) } else { Client::with_connector(connector) }; client.set_read_timeout(Some(Duration::new(5,0))); let mut res = client.get(uri.as_str()).send().expect("Failed to fetch results from the url"); let mut body = String::new(); res.read_to_string(&mut body).expect("Failed to read response body"); Some(body) } pub fn fetch_product_details_by_sha(confs: &Configs, file_sha: &str) -> Result<product::ProductMatch, Error> { let sha_res = fetch_product_by_sha(&confs, file_sha); match sha_res { Ok(m) =>
, Err(e) => Err(e) } } pub fn fetch_product_by_sha(confs: &Configs, sha: &str) -> Result<product::ProductMatch, io::Error> { let api_confs = confs.api.clone(); let resource_path = format!("products/sha/{}", encode_sha(sha) ); let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) { Ok(the_url) => the_url, Err(_) => { return Err( Error::new( ErrorKind::InvalidData, "The values of API configs make up non-valid URL" ) ) } }; //attach query params resource_url .query_pairs_mut() .clear() .append_pair("api_key", api_confs.key.clone().unwrap().as_str()); let json_txt = request_json( &resource_url, &confs.proxy ); process_sha_response(json_txt) } //replaces base64 special characters with HTML safe percentage encoding //source: https://en.wikipedia.org/wiki/Base64#URL_applications pub fn encode_sha<'a>(sha: &'a str) -> String { let encoded_sha = sha.to_string(); encoded_sha.replace("+", "%2B") .replace("/", "%2F") .replace("=", "%3D") .trim().to_string() } pub fn encode_prod_key<'b>(prod_key: &'b str) -> String { let encoded_prod_key = prod_key.to_string(); encoded_prod_key .replace(".", "~") .replace("/", ":") .trim().to_string() } pub fn encode_language<'b>(lang: &'b str) -> String { let encoded_lang = lang.to_string(); encoded_lang.replace(".", "").trim().to_lowercase().to_string() } pub fn fetch_product<'a>( confs: &Configs, lang: &str, prod_key: &str, version: &str ) -> Result<product::ProductMatch, io::Error> { let api_confs = confs.api.clone(); let encoded_prod_key = encode_prod_key(&prod_key); let encoded_lang = encode_language(lang); let resource_path = format!("products/{}/{}", encoded_lang.clone(), encoded_prod_key.clone()); let prod_url = to_product_url( &confs.api, encoded_lang.clone().as_str(), prod_key, version ); let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) { Ok(the_url) => the_url, Err(_) => { return Err( Error::new( ErrorKind::InvalidData, "The values of API configs make up non-valid URL" ) ) } }; //attach query params resource_url .query_pairs_mut() .clear() .append_pair("prod_version", version) .append_pair("api_key", api_confs.key.clone().unwrap().as_str()); let json_txt = request_json( &resource_url, &confs.proxy ); process_product_response(json_txt, Some(prod_url)) } #[derive(Serialize, Deserialize, Debug)] struct ApiError { error: String } #[derive(Serialize, Deserialize, Debug)] struct ShaItem { language: String, prod_key: String, version: String, sha_value: String, sha_method: String, prod_type: Option<String>, group_id: Option<String>, artifact_id: Option<String>, classifier: Option<String>, packaging: Option<String> } //-- helper functions pub fn process_sha_response(json_text: Option<String> ) -> Result<product::ProductMatch, io::Error> { if json_text.is_none() { return Err( Error::new(ErrorKind::Other, "No response from API") ) } let res: serde_json::Value = serde_json::from_str(json_text.unwrap().as_str())?; if res.is_object() && res.get("error").is_some() { let e = Error::new( ErrorKind::Other, r#"API rate limit reached. Go to https://www.versioneye.com and upgrade your subscription to a higher plan."# ); return Err(e); } if !res.is_array() { let e = Error::new( ErrorKind::Other, "Unsupported SHA response - expected array"); return Err(e); } let shas = res.as_array().unwrap(); if shas.len() == 0 { let e = Error::new( ErrorKind::Other, "No match for the SHA"); return Err(e); } let doc:ShaItem = serde_json::from_value(shas[0].clone()).unwrap(); let the_prod = product::Product { name: "".to_string(), language: doc.language, prod_key: doc.prod_key, version: doc.version, prod_type: doc.prod_type }; let the_sha = product::ProductSHA { packaging: doc.packaging.unwrap_or("unknown".to_string()), method: doc.sha_method, value: doc.sha_value, filepath: None }; Ok(product::ProductMatch::new(the_prod, the_sha)) } // converts the response of product endpoint into ProductMatch struct #[derive(Serialize, Deserialize, Debug)] struct ProductItem { name: String, language: String, prod_key: String, version: String, prod_type: String, } #[derive(Serialize, Deserialize, Debug)] struct LicenseItem { name: String, url: Option<String> } pub fn process_product_response( json_text: Option<String>, prod_url: Option<String> ) -> Result<product::ProductMatch, io::Error> { if json_text.is_none() { return Err( Error::new( ErrorKind::Other, "No response from API") ) } let res: serde_json::Value = serde_json::from_str( &json_text.unwrap().as_str() )?; if !res.is_object() { return Err(Error::new(ErrorKind::Other, "No product details")); } //if response includes error field in HTTP200 response // NB! it may include other errors than limit, but @Rob asked to see
{ let sha = m.sha.expect("No product sha from SHA result"); let product = m.product.expect("No product info from SHA result"); match fetch_product( &confs, &product.language, &product.prod_key, &product.version ) { Ok(mut m) => { m.sha = Some(sha); Ok(m) }, Err(e) => { println!("Failed to fetch product details for sha: {}", file_sha); Err(e) } } }
conditional_block
txn_ext.rs
isticLocks, TxnExt, TRANSFER_LEADER_COMMAND_REPLY_CTX, }; use slog::{error, info, Logger}; use crate::{ batch::StoreContext, raft::Peer, router::{PeerMsg, PeerTick}, worker::pd, SimpleWriteEncoder, }; pub struct TxnContext { ext: Arc<TxnExt>, extra_op: Arc<AtomicCell<ExtraOp>>, reactivate_memory_lock_ticks: usize, } impl Default for TxnContext { #[inline] fn default() -> Self { Self { ext: Arc::default(), extra_op: Arc::new(AtomicCell::new(ExtraOp::Noop)), reactivate_memory_lock_ticks: 0, } } } impl TxnContext { #[inline] pub fn on_region_changed(&self, term: u64, region: &Region) { let mut pessimistic_locks = self.ext.pessimistic_locks.write(); pessimistic_locks.term = term; pessimistic_locks.version = region.get_region_epoch().get_version(); } #[inline] pub fn on_became_leader<EK: KvEngine, ER: RaftEngine, T>( &self, ctx: &mut StoreContext<EK, ER, T>, term: u64, region: &Region, logger: &Logger, ) { // A more recent read may happen on the old leader. So max ts should // be updated after a peer becomes leader. self.require_updating_max_ts(ctx, term, region, logger); // Init the in-memory pessimistic lock table when the peer becomes leader. let mut pessimistic_locks = self.ext.pessimistic_locks.write(); pessimistic_locks.status = LocksStatus::Normal; pessimistic_locks.term = term; pessimistic_locks.version = region.get_region_epoch().get_version(); } #[inline] pub fn after_commit_merge<EK: KvEngine, ER: RaftEngine, T>( &self, ctx: &StoreContext<EK, ER, T>, term: u64, region: &Region, logger: &Logger, ) { // If a follower merges into a leader, a more recent read may happen // on the leader of the follower. So max ts should be updated after // a region merge. self.require_updating_max_ts(ctx, term, region, logger); } #[inline] pub fn on_became_follower(&self, term: u64, region: &Region) { let mut pessimistic_locks = self.ext.pessimistic_locks.write(); pessimistic_locks.status = LocksStatus::NotLeader; pessimistic_locks.clear(); pessimistic_locks.term = term; pessimistic_locks.version = region.get_region_epoch().get_version(); } #[inline] pub fn ext(&self) -> &Arc<TxnExt> { &self.ext } #[inline] pub fn extra_op(&self) -> &Arc<AtomicCell<ExtraOp>> { &self.extra_op } fn require_updating_max_ts<EK, ER, T>( &self, ctx: &StoreContext<EK, ER, T>, term: u64, region: &Region, logger: &Logger, ) where EK: KvEngine, ER: RaftEngine, { let epoch = region.get_region_epoch(); let term_low_bits = term & ((1 << 32) - 1); // 32 bits let version_lot_bits = epoch.get_version() & ((1 << 31) - 1); // 31 bits let initial_status = (term_low_bits << 32) | (version_lot_bits << 1); self.ext .max_ts_sync_status .store(initial_status, Ordering::SeqCst); info!( logger, "require updating max ts"; "initial_status" => initial_status, ); let task = pd::Task::UpdateMaxTimestamp { region_id: region.get_id(), initial_status, txn_ext: self.ext.clone(), }; if let Err(e) = ctx.schedulers.pd.schedule(task) { error!(logger, "failed to notify pd with UpdateMaxTimestamp"; "err" => ?e); } } pub fn split(&self, regions: &[Region], derived: &Region) -> Vec<PeerPessimisticLocks> { // Group in-memory pessimistic locks in the original region into new regions. // The locks of new regions will be put into the corresponding new regions // later. And the locks belonging to the old region will stay in the original // map. let mut pessimistic_locks = self.ext.pessimistic_locks.write(); // Update the version so the concurrent reader will fail due to EpochNotMatch // instead of PessimisticLockNotFound. pessimistic_locks.version = derived.get_region_epoch().get_version(); pessimistic_locks.group_by_regions(regions, derived) } pub fn init_with_lock(&self, locks: PeerPessimisticLocks) { let mut pessimistic_locks = self.ext.pessimistic_locks.write(); *pessimistic_locks = locks; } } impl<EK: KvEngine, ER: RaftEngine> Peer<EK, ER> { /// Returns True means the tick is consumed, otherwise the tick should be /// rescheduled. pub fn on_reactivate_memory_lock_tick<T>(&mut self, ctx: &mut StoreContext<EK, ER, T>)
&& txn_context.reactivate_memory_lock_ticks >= ctx.cfg.reactive_memory_lock_timeout_tick { pessimistic_locks.status = LocksStatus::Normal; txn_context.reactivate_memory_lock_ticks = 0; } else { drop(pessimistic_locks); self.add_pending_tick(PeerTick::ReactivateMemoryLock); } } // Returns whether we should propose another TransferLeader command. This is // for: // - Considering the amount of pessimistic locks can be big, it can reduce // unavailable time caused by waiting for the transferee catching up logs. // - Make transferring leader strictly after write commands that executes before // proposing the locks, preventing unexpected lock loss. pub fn propose_locks_before_transfer_leader<T>( &mut self, ctx: &mut StoreContext<EK, ER, T>, msg: &eraftpb::Message, ) -> bool { // 1. Disable in-memory pessimistic locks. // Clone to make borrow checker happy when registering ticks. let txn_ext = self.txn_context().ext.clone(); let mut pessimistic_locks = txn_ext.pessimistic_locks.write(); // If the message context == TRANSFER_LEADER_COMMAND_REPLY_CTX, the message // is a reply to a transfer leader command before. If the locks status remain // in the TransferringLeader status, we can safely initiate transferring leader // now. // If it's not in TransferringLeader status now, it is probably because several // ticks have passed after proposing the locks in the last time and we // reactivate the memory locks. Then, we should propose the locks again. if msg.get_context() == TRANSFER_LEADER_COMMAND_REPLY_CTX && pessimistic_locks.status == LocksStatus::TransferringLeader { return false; } // If it is not writable, it's probably because it's a retried TransferLeader // and the locks have been proposed. But we still need to return true to // propose another TransferLeader command. Otherwise, some write requests that // have marked some locks as deleted will fail because raft rejects more // proposals. // It is OK to return true here if it's in other states like MergingRegion or // NotLeader. In those cases, the locks will fail to propose and nothing will // happen. if !pessimistic_locks.is_writable() { return true; } pessimistic_locks.status = LocksStatus::TransferringLeader; self.txn_context_mut().reactivate_memory_lock_ticks = 0; self.add_pending_tick(PeerTick::ReactivateMemoryLock); // 2. Propose pessimistic locks if pessimistic_locks.is_empty() { return false; } // FIXME: Raft command has size limit. Either limit the
{ // If it is not leader, we needn't reactivate by tick. In-memory pessimistic // lock will be enabled when this region becomes leader again. if !self.is_leader() { return; } let transferring_leader = self.raft_group().raft.lead_transferee.is_some(); let txn_context = self.txn_context_mut(); let mut pessimistic_locks = txn_context.ext.pessimistic_locks.write(); // And this tick is currently only used for the leader transfer failure case. if pessimistic_locks.status != LocksStatus::TransferringLeader { return; } txn_context.reactivate_memory_lock_ticks += 1; // `lead_transferee` is not set immediately after the lock status changes. So, // we need the tick count condition to avoid reactivating too early. if !transferring_leader
identifier_body
txn_ext.rs
isticLocks, TxnExt, TRANSFER_LEADER_COMMAND_REPLY_CTX, }; use slog::{error, info, Logger}; use crate::{ batch::StoreContext, raft::Peer, router::{PeerMsg, PeerTick}, worker::pd, SimpleWriteEncoder, }; pub struct TxnContext { ext: Arc<TxnExt>, extra_op: Arc<AtomicCell<ExtraOp>>, reactivate_memory_lock_ticks: usize, } impl Default for TxnContext { #[inline] fn default() -> Self { Self { ext: Arc::default(), extra_op: Arc::new(AtomicCell::new(ExtraOp::Noop)), reactivate_memory_lock_ticks: 0, } } } impl TxnContext { #[inline] pub fn on_region_changed(&self, term: u64, region: &Region) { let mut pessimistic_locks = self.ext.pessimistic_locks.write(); pessimistic_locks.term = term; pessimistic_locks.version = region.get_region_epoch().get_version(); } #[inline] pub fn on_became_leader<EK: KvEngine, ER: RaftEngine, T>( &self, ctx: &mut StoreContext<EK, ER, T>, term: u64, region: &Region, logger: &Logger, ) { // A more recent read may happen on the old leader. So max ts should // be updated after a peer becomes leader. self.require_updating_max_ts(ctx, term, region, logger); // Init the in-memory pessimistic lock table when the peer becomes leader. let mut pessimistic_locks = self.ext.pessimistic_locks.write(); pessimistic_locks.status = LocksStatus::Normal; pessimistic_locks.term = term; pessimistic_locks.version = region.get_region_epoch().get_version(); } #[inline] pub fn after_commit_merge<EK: KvEngine, ER: RaftEngine, T>( &self, ctx: &StoreContext<EK, ER, T>, term: u64, region: &Region, logger: &Logger, ) { // If a follower merges into a leader, a more recent read may happen // on the leader of the follower. So max ts should be updated after // a region merge. self.require_updating_max_ts(ctx, term, region, logger); } #[inline] pub fn on_became_follower(&self, term: u64, region: &Region) { let mut pessimistic_locks = self.ext.pessimistic_locks.write(); pessimistic_locks.status = LocksStatus::NotLeader; pessimistic_locks.clear(); pessimistic_locks.term = term; pessimistic_locks.version = region.get_region_epoch().get_version(); } #[inline] pub fn ext(&self) -> &Arc<TxnExt> { &self.ext } #[inline] pub fn extra_op(&self) -> &Arc<AtomicCell<ExtraOp>> { &self.extra_op } fn require_updating_max_ts<EK, ER, T>( &self, ctx: &StoreContext<EK, ER, T>, term: u64, region: &Region, logger: &Logger, ) where EK: KvEngine, ER: RaftEngine, { let epoch = region.get_region_epoch(); let term_low_bits = term & ((1 << 32) - 1); // 32 bits let version_lot_bits = epoch.get_version() & ((1 << 31) - 1); // 31 bits let initial_status = (term_low_bits << 32) | (version_lot_bits << 1); self.ext .max_ts_sync_status .store(initial_status, Ordering::SeqCst); info!( logger, "require updating max ts"; "initial_status" => initial_status, ); let task = pd::Task::UpdateMaxTimestamp { region_id: region.get_id(), initial_status, txn_ext: self.ext.clone(), }; if let Err(e) = ctx.schedulers.pd.schedule(task) { error!(logger, "failed to notify pd with UpdateMaxTimestamp"; "err" => ?e); } } pub fn split(&self, regions: &[Region], derived: &Region) -> Vec<PeerPessimisticLocks> { // Group in-memory pessimistic locks in the original region into new regions. // The locks of new regions will be put into the corresponding new regions // later. And the locks belonging to the old region will stay in the original // map. let mut pessimistic_locks = self.ext.pessimistic_locks.write(); // Update the version so the concurrent reader will fail due to EpochNotMatch // instead of PessimisticLockNotFound. pessimistic_locks.version = derived.get_region_epoch().get_version(); pessimistic_locks.group_by_regions(regions, derived) } pub fn init_with_lock(&self, locks: PeerPessimisticLocks) { let mut pessimistic_locks = self.ext.pessimistic_locks.write(); *pessimistic_locks = locks; } } impl<EK: KvEngine, ER: RaftEngine> Peer<EK, ER> { /// Returns True means the tick is consumed, otherwise the tick should be /// rescheduled. pub fn
<T>(&mut self, ctx: &mut StoreContext<EK, ER, T>) { // If it is not leader, we needn't reactivate by tick. In-memory pessimistic // lock will be enabled when this region becomes leader again. if !self.is_leader() { return; } let transferring_leader = self.raft_group().raft.lead_transferee.is_some(); let txn_context = self.txn_context_mut(); let mut pessimistic_locks = txn_context.ext.pessimistic_locks.write(); // And this tick is currently only used for the leader transfer failure case. if pessimistic_locks.status != LocksStatus::TransferringLeader { return; } txn_context.reactivate_memory_lock_ticks += 1; // `lead_transferee` is not set immediately after the lock status changes. So, // we need the tick count condition to avoid reactivating too early. if !transferring_leader && txn_context.reactivate_memory_lock_ticks >= ctx.cfg.reactive_memory_lock_timeout_tick { pessimistic_locks.status = LocksStatus::Normal; txn_context.reactivate_memory_lock_ticks = 0; } else { drop(pessimistic_locks); self.add_pending_tick(PeerTick::ReactivateMemoryLock); } } // Returns whether we should propose another TransferLeader command. This is // for: // - Considering the amount of pessimistic locks can be big, it can reduce // unavailable time caused by waiting for the transferee catching up logs. // - Make transferring leader strictly after write commands that executes before // proposing the locks, preventing unexpected lock loss. pub fn propose_locks_before_transfer_leader<T>( &mut self, ctx: &mut StoreContext<EK, ER, T>, msg: &eraftpb::Message, ) -> bool { // 1. Disable in-memory pessimistic locks. // Clone to make borrow checker happy when registering ticks. let txn_ext = self.txn_context().ext.clone(); let mut pessimistic_locks = txn_ext.pessimistic_locks.write(); // If the message context == TRANSFER_LEADER_COMMAND_REPLY_CTX, the message // is a reply to a transfer leader command before. If the locks status remain // in the TransferringLeader status, we can safely initiate transferring leader // now. // If it's not in TransferringLeader status now, it is probably because several // ticks have passed after proposing the locks in the last time and we // reactivate the memory locks. Then, we should propose the locks again. if msg.get_context() == TRANSFER_LEADER_COMMAND_REPLY_CTX && pessimistic_locks.status == LocksStatus::TransferringLeader { return false; } // If it is not writable, it's probably because it's a retried TransferLeader // and the locks have been proposed. But we still need to return true to // propose another TransferLeader command. Otherwise, some write requests that // have marked some locks as deleted will fail because raft rejects more // proposals. // It is OK to return true here if it's in other states like MergingRegion or // NotLeader. In those cases, the locks will fail to propose and nothing will // happen. if !pessimistic_locks.is_writable() { return true; } pessimistic_locks.status = LocksStatus::TransferringLeader; self.txn_context_mut().reactivate_memory_lock_ticks = 0; self.add_pending_tick(PeerTick::ReactivateMemoryLock); // 2. Propose pessimistic locks if pessimistic_locks.is_empty() { return false; } // FIXME: Raft command has size limit. Either limit
on_reactivate_memory_lock_tick
identifier_name
txn_ext.rs
_op: Arc::new(AtomicCell::new(ExtraOp::Noop)), reactivate_memory_lock_ticks: 0, } } } impl TxnContext { #[inline] pub fn on_region_changed(&self, term: u64, region: &Region) { let mut pessimistic_locks = self.ext.pessimistic_locks.write(); pessimistic_locks.term = term; pessimistic_locks.version = region.get_region_epoch().get_version(); } #[inline] pub fn on_became_leader<EK: KvEngine, ER: RaftEngine, T>( &self, ctx: &mut StoreContext<EK, ER, T>, term: u64, region: &Region, logger: &Logger, ) { // A more recent read may happen on the old leader. So max ts should // be updated after a peer becomes leader. self.require_updating_max_ts(ctx, term, region, logger); // Init the in-memory pessimistic lock table when the peer becomes leader. let mut pessimistic_locks = self.ext.pessimistic_locks.write(); pessimistic_locks.status = LocksStatus::Normal; pessimistic_locks.term = term; pessimistic_locks.version = region.get_region_epoch().get_version(); } #[inline] pub fn after_commit_merge<EK: KvEngine, ER: RaftEngine, T>( &self, ctx: &StoreContext<EK, ER, T>, term: u64, region: &Region, logger: &Logger, ) { // If a follower merges into a leader, a more recent read may happen // on the leader of the follower. So max ts should be updated after // a region merge. self.require_updating_max_ts(ctx, term, region, logger); } #[inline] pub fn on_became_follower(&self, term: u64, region: &Region) { let mut pessimistic_locks = self.ext.pessimistic_locks.write(); pessimistic_locks.status = LocksStatus::NotLeader; pessimistic_locks.clear(); pessimistic_locks.term = term; pessimistic_locks.version = region.get_region_epoch().get_version(); } #[inline] pub fn ext(&self) -> &Arc<TxnExt> { &self.ext } #[inline] pub fn extra_op(&self) -> &Arc<AtomicCell<ExtraOp>> { &self.extra_op } fn require_updating_max_ts<EK, ER, T>( &self, ctx: &StoreContext<EK, ER, T>, term: u64, region: &Region, logger: &Logger, ) where EK: KvEngine, ER: RaftEngine, { let epoch = region.get_region_epoch(); let term_low_bits = term & ((1 << 32) - 1); // 32 bits let version_lot_bits = epoch.get_version() & ((1 << 31) - 1); // 31 bits let initial_status = (term_low_bits << 32) | (version_lot_bits << 1); self.ext .max_ts_sync_status .store(initial_status, Ordering::SeqCst); info!( logger, "require updating max ts"; "initial_status" => initial_status, ); let task = pd::Task::UpdateMaxTimestamp { region_id: region.get_id(), initial_status, txn_ext: self.ext.clone(), }; if let Err(e) = ctx.schedulers.pd.schedule(task) { error!(logger, "failed to notify pd with UpdateMaxTimestamp"; "err" => ?e); } } pub fn split(&self, regions: &[Region], derived: &Region) -> Vec<PeerPessimisticLocks> { // Group in-memory pessimistic locks in the original region into new regions. // The locks of new regions will be put into the corresponding new regions // later. And the locks belonging to the old region will stay in the original // map. let mut pessimistic_locks = self.ext.pessimistic_locks.write(); // Update the version so the concurrent reader will fail due to EpochNotMatch // instead of PessimisticLockNotFound. pessimistic_locks.version = derived.get_region_epoch().get_version(); pessimistic_locks.group_by_regions(regions, derived) } pub fn init_with_lock(&self, locks: PeerPessimisticLocks) { let mut pessimistic_locks = self.ext.pessimistic_locks.write(); *pessimistic_locks = locks; } } impl<EK: KvEngine, ER: RaftEngine> Peer<EK, ER> { /// Returns True means the tick is consumed, otherwise the tick should be /// rescheduled. pub fn on_reactivate_memory_lock_tick<T>(&mut self, ctx: &mut StoreContext<EK, ER, T>) { // If it is not leader, we needn't reactivate by tick. In-memory pessimistic // lock will be enabled when this region becomes leader again. if !self.is_leader() { return; } let transferring_leader = self.raft_group().raft.lead_transferee.is_some(); let txn_context = self.txn_context_mut(); let mut pessimistic_locks = txn_context.ext.pessimistic_locks.write(); // And this tick is currently only used for the leader transfer failure case. if pessimistic_locks.status != LocksStatus::TransferringLeader { return; } txn_context.reactivate_memory_lock_ticks += 1; // `lead_transferee` is not set immediately after the lock status changes. So, // we need the tick count condition to avoid reactivating too early. if !transferring_leader && txn_context.reactivate_memory_lock_ticks >= ctx.cfg.reactive_memory_lock_timeout_tick { pessimistic_locks.status = LocksStatus::Normal; txn_context.reactivate_memory_lock_ticks = 0; } else { drop(pessimistic_locks); self.add_pending_tick(PeerTick::ReactivateMemoryLock); } } // Returns whether we should propose another TransferLeader command. This is // for: // - Considering the amount of pessimistic locks can be big, it can reduce // unavailable time caused by waiting for the transferee catching up logs. // - Make transferring leader strictly after write commands that executes before // proposing the locks, preventing unexpected lock loss. pub fn propose_locks_before_transfer_leader<T>( &mut self, ctx: &mut StoreContext<EK, ER, T>, msg: &eraftpb::Message, ) -> bool { // 1. Disable in-memory pessimistic locks. // Clone to make borrow checker happy when registering ticks. let txn_ext = self.txn_context().ext.clone(); let mut pessimistic_locks = txn_ext.pessimistic_locks.write(); // If the message context == TRANSFER_LEADER_COMMAND_REPLY_CTX, the message // is a reply to a transfer leader command before. If the locks status remain // in the TransferringLeader status, we can safely initiate transferring leader // now. // If it's not in TransferringLeader status now, it is probably because several // ticks have passed after proposing the locks in the last time and we // reactivate the memory locks. Then, we should propose the locks again. if msg.get_context() == TRANSFER_LEADER_COMMAND_REPLY_CTX && pessimistic_locks.status == LocksStatus::TransferringLeader { return false; } // If it is not writable, it's probably because it's a retried TransferLeader // and the locks have been proposed. But we still need to return true to // propose another TransferLeader command. Otherwise, some write requests that // have marked some locks as deleted will fail because raft rejects more // proposals. // It is OK to return true here if it's in other states like MergingRegion or // NotLeader. In those cases, the locks will fail to propose and nothing will // happen. if !pessimistic_locks.is_writable() { return true; } pessimistic_locks.status = LocksStatus::TransferringLeader; self.txn_context_mut().reactivate_memory_lock_ticks = 0; self.add_pending_tick(PeerTick::ReactivateMemoryLock); // 2. Propose pessimistic locks if pessimistic_locks.is_empty() { return false; } // FIXME: Raft command has size limit. Either limit the total size of // pessimistic locks in a region, or split commands here. let mut encoder = SimpleWriteEncoder::with_capacity(512); let mut lock_count = 0; { // Downgrade to a read guard, do not block readers in the scheduler as far as // possible.
let pessimistic_locks = RwLockWriteGuard::downgrade(pessimistic_locks); fail::fail_point!("invalidate_locks_before_transfer_leader"); for (key, (lock, deleted)) in &*pessimistic_locks { if *deleted {
random_line_split
txn_ext.rs
Locks, TxnExt, TRANSFER_LEADER_COMMAND_REPLY_CTX, }; use slog::{error, info, Logger}; use crate::{ batch::StoreContext, raft::Peer, router::{PeerMsg, PeerTick}, worker::pd, SimpleWriteEncoder, }; pub struct TxnContext { ext: Arc<TxnExt>, extra_op: Arc<AtomicCell<ExtraOp>>, reactivate_memory_lock_ticks: usize, } impl Default for TxnContext { #[inline] fn default() -> Self { Self { ext: Arc::default(), extra_op: Arc::new(AtomicCell::new(ExtraOp::Noop)), reactivate_memory_lock_ticks: 0, } } } impl TxnContext { #[inline] pub fn on_region_changed(&self, term: u64, region: &Region) { let mut pessimistic_locks = self.ext.pessimistic_locks.write(); pessimistic_locks.term = term; pessimistic_locks.version = region.get_region_epoch().get_version(); } #[inline] pub fn on_became_leader<EK: KvEngine, ER: RaftEngine, T>( &self, ctx: &mut StoreContext<EK, ER, T>, term: u64, region: &Region, logger: &Logger, ) { // A more recent read may happen on the old leader. So max ts should // be updated after a peer becomes leader. self.require_updating_max_ts(ctx, term, region, logger); // Init the in-memory pessimistic lock table when the peer becomes leader. let mut pessimistic_locks = self.ext.pessimistic_locks.write(); pessimistic_locks.status = LocksStatus::Normal; pessimistic_locks.term = term; pessimistic_locks.version = region.get_region_epoch().get_version(); } #[inline] pub fn after_commit_merge<EK: KvEngine, ER: RaftEngine, T>( &self, ctx: &StoreContext<EK, ER, T>, term: u64, region: &Region, logger: &Logger, ) { // If a follower merges into a leader, a more recent read may happen // on the leader of the follower. So max ts should be updated after // a region merge. self.require_updating_max_ts(ctx, term, region, logger); } #[inline] pub fn on_became_follower(&self, term: u64, region: &Region) { let mut pessimistic_locks = self.ext.pessimistic_locks.write(); pessimistic_locks.status = LocksStatus::NotLeader; pessimistic_locks.clear(); pessimistic_locks.term = term; pessimistic_locks.version = region.get_region_epoch().get_version(); } #[inline] pub fn ext(&self) -> &Arc<TxnExt> { &self.ext } #[inline] pub fn extra_op(&self) -> &Arc<AtomicCell<ExtraOp>> { &self.extra_op } fn require_updating_max_ts<EK, ER, T>( &self, ctx: &StoreContext<EK, ER, T>, term: u64, region: &Region, logger: &Logger, ) where EK: KvEngine, ER: RaftEngine, { let epoch = region.get_region_epoch(); let term_low_bits = term & ((1 << 32) - 1); // 32 bits let version_lot_bits = epoch.get_version() & ((1 << 31) - 1); // 31 bits let initial_status = (term_low_bits << 32) | (version_lot_bits << 1); self.ext .max_ts_sync_status .store(initial_status, Ordering::SeqCst); info!( logger, "require updating max ts"; "initial_status" => initial_status, ); let task = pd::Task::UpdateMaxTimestamp { region_id: region.get_id(), initial_status, txn_ext: self.ext.clone(), }; if let Err(e) = ctx.schedulers.pd.schedule(task) { error!(logger, "failed to notify pd with UpdateMaxTimestamp"; "err" => ?e); } } pub fn split(&self, regions: &[Region], derived: &Region) -> Vec<PeerPessimisticLocks> { // Group in-memory pessimistic locks in the original region into new regions. // The locks of new regions will be put into the corresponding new regions // later. And the locks belonging to the old region will stay in the original // map. let mut pessimistic_locks = self.ext.pessimistic_locks.write(); // Update the version so the concurrent reader will fail due to EpochNotMatch // instead of PessimisticLockNotFound. pessimistic_locks.version = derived.get_region_epoch().get_version(); pessimistic_locks.group_by_regions(regions, derived) } pub fn init_with_lock(&self, locks: PeerPessimisticLocks) { let mut pessimistic_locks = self.ext.pessimistic_locks.write(); *pessimistic_locks = locks; } } impl<EK: KvEngine, ER: RaftEngine> Peer<EK, ER> { /// Returns True means the tick is consumed, otherwise the tick should be /// rescheduled. pub fn on_reactivate_memory_lock_tick<T>(&mut self, ctx: &mut StoreContext<EK, ER, T>) { // If it is not leader, we needn't reactivate by tick. In-memory pessimistic // lock will be enabled when this region becomes leader again. if !self.is_leader() { return; } let transferring_leader = self.raft_group().raft.lead_transferee.is_some(); let txn_context = self.txn_context_mut(); let mut pessimistic_locks = txn_context.ext.pessimistic_locks.write(); // And this tick is currently only used for the leader transfer failure case. if pessimistic_locks.status != LocksStatus::TransferringLeader { return; } txn_context.reactivate_memory_lock_ticks += 1; // `lead_transferee` is not set immediately after the lock status changes. So, // we need the tick count condition to avoid reactivating too early. if !transferring_leader && txn_context.reactivate_memory_lock_ticks >= ctx.cfg.reactive_memory_lock_timeout_tick { pessimistic_locks.status = LocksStatus::Normal; txn_context.reactivate_memory_lock_ticks = 0; } else
} // Returns whether we should propose another TransferLeader command. This is // for: // - Considering the amount of pessimistic locks can be big, it can reduce // unavailable time caused by waiting for the transferee catching up logs. // - Make transferring leader strictly after write commands that executes before // proposing the locks, preventing unexpected lock loss. pub fn propose_locks_before_transfer_leader<T>( &mut self, ctx: &mut StoreContext<EK, ER, T>, msg: &eraftpb::Message, ) -> bool { // 1. Disable in-memory pessimistic locks. // Clone to make borrow checker happy when registering ticks. let txn_ext = self.txn_context().ext.clone(); let mut pessimistic_locks = txn_ext.pessimistic_locks.write(); // If the message context == TRANSFER_LEADER_COMMAND_REPLY_CTX, the message // is a reply to a transfer leader command before. If the locks status remain // in the TransferringLeader status, we can safely initiate transferring leader // now. // If it's not in TransferringLeader status now, it is probably because several // ticks have passed after proposing the locks in the last time and we // reactivate the memory locks. Then, we should propose the locks again. if msg.get_context() == TRANSFER_LEADER_COMMAND_REPLY_CTX && pessimistic_locks.status == LocksStatus::TransferringLeader { return false; } // If it is not writable, it's probably because it's a retried TransferLeader // and the locks have been proposed. But we still need to return true to // propose another TransferLeader command. Otherwise, some write requests that // have marked some locks as deleted will fail because raft rejects more // proposals. // It is OK to return true here if it's in other states like MergingRegion or // NotLeader. In those cases, the locks will fail to propose and nothing will // happen. if !pessimistic_locks.is_writable() { return true; } pessimistic_locks.status = LocksStatus::TransferringLeader; self.txn_context_mut().reactivate_memory_lock_ticks = 0; self.add_pending_tick(PeerTick::ReactivateMemoryLock); // 2. Propose pessimistic locks if pessimistic_locks.is_empty() { return false; } // FIXME: Raft command has size limit. Either limit
{ drop(pessimistic_locks); self.add_pending_tick(PeerTick::ReactivateMemoryLock); }
conditional_block
trx_mgr.go
return nil } // CheckSignerKey checks if the transaction is signed by correct public key. func (e *TrxEntry) CheckSignerKey(fetcher *AuthFetcher) error { if err := fetcher.CheckPublicKey(e.signer, e.signerKey); err != nil { return e.SetError(fmt.Errorf("signature failed: %s", err.Error())) } return nil } // CheckInBlockTrxs checks if the transaction is a duplicate of any old transaction. func (e *TrxEntry) CheckInBlockTrxs(checker *InBlockTrxChecker) error { if checker.Has(e.result.SigTrx) { return e.SetError(errors.New("found duplicate in-block trx")) } return nil } func (e *TrxEntry) GetTrxResult() *prototype.TransactionWrapperWithInfo { return e.result } func (e *TrxEntry) GetTrxSize() int { return e.size } func (e *TrxEntry) GetTrxSigner() string { return e.signer } func (e *TrxEntry) GetTrxSigningKey() *prototype.PublicKeyType { return e.signerKey } const ( // maximum count of transactions that are waiting to be packed to blocks. // if this limit is reached, any incoming transaction will be refused directly. sMaxWaitingCount = constants.TrxMaxExpirationTime * 2000 // threshold over which cleanings are necessary sWaitingCountWaterMark = sMaxWaitingCount / 10 // minimal interval between cleanings sMinCleanupInterval = 10 * time.Second // shrink the waiting/fetched pools every 100K transactions sShrinkCountWaterMark = 100000 ) // ITrxMgrPlugin is an interface of manager plugins. type ITrxMgrPlugin interface { BlockApplied(b *prototype.SignedBlock) // called once after a block is successfully applied. BlockReverted(blockNum uint64) // called once after a block is successfully reverted. BlockCommitted(blockNum uint64) // called once after a block is successfully committed. } // The transaction manager. type TrxMgr struct { chainId prototype.ChainId // the chain db iservices.IDatabaseRW // the database log *logrus.Logger // the logger headTime uint32 // timestamp of head block, in seconds waiting map[string]*TrxEntry // transactions waiting to be packed to blocks, trxId -> entry waitingLock sync.RWMutex // lock of waiting transactions fetched map[string]*TrxEntry // transactions being packed to a block, trxId -> entry fetchedLock sync.RWMutex // lock of fetched transactions auth *AuthFetcher // checker of transaction signatures tapos *TaposChecker // checker of transaction tapos history *InBlockTrxChecker // checker of transaction duplication plugins []ITrxMgrPlugin // manager plugins, consisting of above checkers lastCleanTime time.Time // last time we clean up expired waiting transactions shrinkCounter uint64 // a counter to determine when to shrink pools } // NewTrxMgr creates an instance of TrxMgr. func NewTrxMgr(chainId prototype.ChainId, db iservices.IDatabaseRW, logger *logrus.Logger, lastBlock, commitBlock uint64) *TrxMgr { auth := NewAuthFetcher(db, logger, lastBlock, commitBlock) tapos := NewTaposChecker(db, logger, lastBlock) history := NewInBlockTrxChecker(db, logger, lastBlock) return &TrxMgr{ chainId: chainId, db: db, log: logger, headTime: (&DynamicGlobalPropsRW{db:db}).GetProps().GetTime().GetUtcSeconds(), waiting: make(map[string]*TrxEntry), fetched: make(map[string]*TrxEntry), auth: auth, tapos: tapos, history: history, plugins: []ITrxMgrPlugin{ auth, tapos, history }, lastCleanTime: time.Now(), } } // AddTrx processes an incoming transaction. // AddTrx returns nil if the incoming transaction is accepted, otherwise an error is returned. // If a non-nil callback is given, it will be called once asynchronously with the final process result. func (m *TrxMgr) AddTrx(trx *prototype.SignedTransaction, callback TrxCallback) error { entry := NewTrxMgrEntry(m.chainId, trx, callback) // very basic nil pointer check if trx == nil || trx.Signature == nil { err := entry.SetError(errors.New("invalid trx")) m.deliverEntry(entry) return err } // very basic duplication check if m.isProcessingTrx(trx) != nil { err := entry.SetError(errors.New("trx already in process")) m.deliverEntry(entry) return err } c := make(chan error) go func() { ok := false // check the transaction if entry.InitCheck() != nil || m.checkTrx(entry, atomic.LoadUint32(&m.headTime), false) != nil { // deliver if failed m.deliverEntry(entry) } else { // if passed, try adding it to the waiting pool m.waitingLock.Lock() m.fetchedLock.RLock() ok = m.addToWaiting(entry) > 0 m.fetchedLock.RUnlock() m.waitingLock.Unlock() } if !ok { c <- errors.New(entry.result.Receipt.ErrorInfo) } else { c <- nil } }() return <-c } // WaitingCount returns number of transactions that are waiting to be packed to blocks. func (m *TrxMgr) WaitingCount() int { m.waitingLock.RLock() defer m.waitingLock.RUnlock() return len(m.waiting) } // FetchTrx fetches a batch of transactions from waiting pool. // Block producer should call FetchTrx to collect transactions of new blocks. func (m *TrxMgr) FetchTrx(blockTime uint32, maxCount, maxSize int) (entries []*TrxEntry) { m.waitingLock.Lock() defer m.waitingLock.Unlock() m.fetchedLock.Lock() defer m.fetchedLock.Unlock() counter, size := 0, 0 // traverse the waiting pool for s, e := range m.waiting { // check count limit if maxCount > 0 && counter >= maxCount { break } // check size limit if maxSize > 0 && size >= maxSize { break } // check the transaction again // although transactions in the waiting pool had passed checks when they entered, // but chain state is keep changing, we have to redo state-dependent checks. if err := m.checkTrx(e, blockTime, true); err != nil { // if failed, deliver the transaction. m.log.Debugf("TRXMGR: FetchTrx check failed: %v, trxId=%x", err, []byte(e.trxId)) m.deliverEntry(e) } else { // if passed, pick it entries = append(entries, e) // add it to the fetched pool m.fetched[s] = e counter++ size += e.size } // remove from waiting pool delete(m.waiting, s) } return } // ReturnTrx notifies that some previously fetched transactions can't be packed into a block due to errors. // Block producer should call ReturnTrx for transactions that failed being applied. func (m *TrxMgr) ReturnTrx(entries ...*TrxEntry) { m.log.Debug("TRXMGR: ReturnTrx begin") timing := common.NewTiming() timing.Begin() m.fetchedLock.Lock() defer m.fetchedLock.Unlock() timing.Mark() for _, e := range entries { // any returning transaction should be previously fetched f := m.fetched[e.trxId] if f != nil { m.deliverEntry(f) delete(m.fetched, e.trxId) } } timing.End() m.log.Debugf("TRXMGR: ReturnTrx end: #tx=%d, %s", len(entries), timing.String()) } // CheckBlockTrxs checks if transactions of a block are valid. // If everything is ok, CheckBlockTrxs returns a TrxEntry slice for transactions and nil error, otherwise, a nil slice // and an error is returned. func (m *TrxMgr) CheckBlockTrxs(b *prototype.SignedBlock) (entries []*TrxEntry, err error) { m.log.Debugf("TRXMGR: CheckBlockTrxs begin %d", b.SignedHeader.Number()) t0 := common.EasyTimer() if count := len(b.Transactions); count > 0 { blockTime := b.SignedHeader.Header.Timestamp.UtcSeconds errs := make([]error, count) entries = make([]*TrxEntry, count)
{ return e.SetError(fmt.Errorf("tapos failed: %s", err.Error())) }
conditional_block
trx_mgr.go
x) { return e.SetError(errors.New("found duplicate in-block trx")) } return nil } func (e *TrxEntry) GetTrxResult() *prototype.TransactionWrapperWithInfo { return e.result } func (e *TrxEntry) GetTrxSize() int { return e.size } func (e *TrxEntry) GetTrxSigner() string { return e.signer } func (e *TrxEntry) GetTrxSigningKey() *prototype.PublicKeyType { return e.signerKey } const ( // maximum count of transactions that are waiting to be packed to blocks. // if this limit is reached, any incoming transaction will be refused directly. sMaxWaitingCount = constants.TrxMaxExpirationTime * 2000 // threshold over which cleanings are necessary sWaitingCountWaterMark = sMaxWaitingCount / 10 // minimal interval between cleanings sMinCleanupInterval = 10 * time.Second // shrink the waiting/fetched pools every 100K transactions sShrinkCountWaterMark = 100000 ) // ITrxMgrPlugin is an interface of manager plugins. type ITrxMgrPlugin interface { BlockApplied(b *prototype.SignedBlock) // called once after a block is successfully applied. BlockReverted(blockNum uint64) // called once after a block is successfully reverted. BlockCommitted(blockNum uint64) // called once after a block is successfully committed. } // The transaction manager. type TrxMgr struct { chainId prototype.ChainId // the chain db iservices.IDatabaseRW // the database log *logrus.Logger // the logger headTime uint32 // timestamp of head block, in seconds waiting map[string]*TrxEntry // transactions waiting to be packed to blocks, trxId -> entry waitingLock sync.RWMutex // lock of waiting transactions fetched map[string]*TrxEntry // transactions being packed to a block, trxId -> entry fetchedLock sync.RWMutex // lock of fetched transactions auth *AuthFetcher // checker of transaction signatures tapos *TaposChecker // checker of transaction tapos history *InBlockTrxChecker // checker of transaction duplication plugins []ITrxMgrPlugin // manager plugins, consisting of above checkers lastCleanTime time.Time // last time we clean up expired waiting transactions shrinkCounter uint64 // a counter to determine when to shrink pools } // NewTrxMgr creates an instance of TrxMgr. func NewTrxMgr(chainId prototype.ChainId, db iservices.IDatabaseRW, logger *logrus.Logger, lastBlock, commitBlock uint64) *TrxMgr { auth := NewAuthFetcher(db, logger, lastBlock, commitBlock) tapos := NewTaposChecker(db, logger, lastBlock) history := NewInBlockTrxChecker(db, logger, lastBlock) return &TrxMgr{ chainId: chainId, db: db, log: logger, headTime: (&DynamicGlobalPropsRW{db:db}).GetProps().GetTime().GetUtcSeconds(), waiting: make(map[string]*TrxEntry), fetched: make(map[string]*TrxEntry), auth: auth, tapos: tapos, history: history, plugins: []ITrxMgrPlugin{ auth, tapos, history }, lastCleanTime: time.Now(), } } // AddTrx processes an incoming transaction. // AddTrx returns nil if the incoming transaction is accepted, otherwise an error is returned. // If a non-nil callback is given, it will be called once asynchronously with the final process result. func (m *TrxMgr) AddTrx(trx *prototype.SignedTransaction, callback TrxCallback) error { entry := NewTrxMgrEntry(m.chainId, trx, callback) // very basic nil pointer check if trx == nil || trx.Signature == nil { err := entry.SetError(errors.New("invalid trx")) m.deliverEntry(entry) return err } // very basic duplication check if m.isProcessingTrx(trx) != nil { err := entry.SetError(errors.New("trx already in process")) m.deliverEntry(entry) return err } c := make(chan error) go func() { ok := false // check the transaction if entry.InitCheck() != nil || m.checkTrx(entry, atomic.LoadUint32(&m.headTime), false) != nil { // deliver if failed m.deliverEntry(entry) } else { // if passed, try adding it to the waiting pool m.waitingLock.Lock() m.fetchedLock.RLock() ok = m.addToWaiting(entry) > 0 m.fetchedLock.RUnlock() m.waitingLock.Unlock() } if !ok { c <- errors.New(entry.result.Receipt.ErrorInfo) } else { c <- nil } }() return <-c } // WaitingCount returns number of transactions that are waiting to be packed to blocks. func (m *TrxMgr) WaitingCount() int { m.waitingLock.RLock() defer m.waitingLock.RUnlock() return len(m.waiting) } // FetchTrx fetches a batch of transactions from waiting pool. // Block producer should call FetchTrx to collect transactions of new blocks. func (m *TrxMgr) FetchTrx(blockTime uint32, maxCount, maxSize int) (entries []*TrxEntry) { m.waitingLock.Lock() defer m.waitingLock.Unlock() m.fetchedLock.Lock() defer m.fetchedLock.Unlock() counter, size := 0, 0 // traverse the waiting pool for s, e := range m.waiting { // check count limit if maxCount > 0 && counter >= maxCount { break } // check size limit if maxSize > 0 && size >= maxSize { break } // check the transaction again // although transactions in the waiting pool had passed checks when they entered, // but chain state is keep changing, we have to redo state-dependent checks. if err := m.checkTrx(e, blockTime, true); err != nil { // if failed, deliver the transaction. m.log.Debugf("TRXMGR: FetchTrx check failed: %v, trxId=%x", err, []byte(e.trxId)) m.deliverEntry(e) } else { // if passed, pick it entries = append(entries, e) // add it to the fetched pool m.fetched[s] = e counter++ size += e.size } // remove from waiting pool delete(m.waiting, s) } return } // ReturnTrx notifies that some previously fetched transactions can't be packed into a block due to errors. // Block producer should call ReturnTrx for transactions that failed being applied. func (m *TrxMgr) ReturnTrx(entries ...*TrxEntry) { m.log.Debug("TRXMGR: ReturnTrx begin") timing := common.NewTiming() timing.Begin() m.fetchedLock.Lock() defer m.fetchedLock.Unlock() timing.Mark() for _, e := range entries { // any returning transaction should be previously fetched f := m.fetched[e.trxId] if f != nil { m.deliverEntry(f) delete(m.fetched, e.trxId) } } timing.End() m.log.Debugf("TRXMGR: ReturnTrx end: #tx=%d, %s", len(entries), timing.String()) } // CheckBlockTrxs checks if transactions of a block are valid. // If everything is ok, CheckBlockTrxs returns a TrxEntry slice for transactions and nil error, otherwise, a nil slice // and an error is returned. func (m *TrxMgr) CheckBlockTrxs(b *prototype.SignedBlock) (entries []*TrxEntry, err error) { m.log.Debugf("TRXMGR: CheckBlockTrxs begin %d", b.SignedHeader.Number()) t0 := common.EasyTimer() if count := len(b.Transactions); count > 0 { blockTime := b.SignedHeader.Header.Timestamp.UtcSeconds errs := make([]error, count) entries = make([]*TrxEntry, count) errIdx := int32(-1) var wg sync.WaitGroup wg.Add(count) // check transactions asynchronously for i := 0; i < count; i++ { go func(idx int) { defer wg.Done() var err error trx := b.Transactions[idx].SigTrx e := NewTrxMgrEntry(m.chainId, trx, nil) // do we need the initial check? // yes for transactions that we never met, otherwise no. needInitCheck := true
// if we have met this transaction before, skip initial check and fill up extra information. // this voids doing the expensive public key recovery again.
random_line_split
trx_mgr.go
correct public key. func (e *TrxEntry) CheckSignerKey(fetcher *AuthFetcher) error { if err := fetcher.CheckPublicKey(e.signer, e.signerKey); err != nil { return e.SetError(fmt.Errorf("signature failed: %s", err.Error())) } return nil } // CheckInBlockTrxs checks if the transaction is a duplicate of any old transaction. func (e *TrxEntry) CheckInBlockTrxs(checker *InBlockTrxChecker) error { if checker.Has(e.result.SigTrx) { return e.SetError(errors.New("found duplicate in-block trx")) } return nil } func (e *TrxEntry) GetTrxResult() *prototype.TransactionWrapperWithInfo { return e.result } func (e *TrxEntry) GetTrxSize() int { return e.size } func (e *TrxEntry) GetTrxSigner() string { return e.signer } func (e *TrxEntry) GetTrxSigningKey() *prototype.PublicKeyType { return e.signerKey } const ( // maximum count of transactions that are waiting to be packed to blocks. // if this limit is reached, any incoming transaction will be refused directly. sMaxWaitingCount = constants.TrxMaxExpirationTime * 2000 // threshold over which cleanings are necessary sWaitingCountWaterMark = sMaxWaitingCount / 10 // minimal interval between cleanings sMinCleanupInterval = 10 * time.Second // shrink the waiting/fetched pools every 100K transactions sShrinkCountWaterMark = 100000 ) // ITrxMgrPlugin is an interface of manager plugins. type ITrxMgrPlugin interface { BlockApplied(b *prototype.SignedBlock) // called once after a block is successfully applied. BlockReverted(blockNum uint64) // called once after a block is successfully reverted. BlockCommitted(blockNum uint64) // called once after a block is successfully committed. } // The transaction manager. type TrxMgr struct { chainId prototype.ChainId // the chain db iservices.IDatabaseRW // the database log *logrus.Logger // the logger headTime uint32 // timestamp of head block, in seconds waiting map[string]*TrxEntry // transactions waiting to be packed to blocks, trxId -> entry waitingLock sync.RWMutex // lock of waiting transactions fetched map[string]*TrxEntry // transactions being packed to a block, trxId -> entry fetchedLock sync.RWMutex // lock of fetched transactions auth *AuthFetcher // checker of transaction signatures tapos *TaposChecker // checker of transaction tapos history *InBlockTrxChecker // checker of transaction duplication plugins []ITrxMgrPlugin // manager plugins, consisting of above checkers lastCleanTime time.Time // last time we clean up expired waiting transactions shrinkCounter uint64 // a counter to determine when to shrink pools } // NewTrxMgr creates an instance of TrxMgr. func NewTrxMgr(chainId prototype.ChainId, db iservices.IDatabaseRW, logger *logrus.Logger, lastBlock, commitBlock uint64) *TrxMgr { auth := NewAuthFetcher(db, logger, lastBlock, commitBlock) tapos := NewTaposChecker(db, logger, lastBlock) history := NewInBlockTrxChecker(db, logger, lastBlock) return &TrxMgr{ chainId: chainId, db: db, log: logger, headTime: (&DynamicGlobalPropsRW{db:db}).GetProps().GetTime().GetUtcSeconds(), waiting: make(map[string]*TrxEntry), fetched: make(map[string]*TrxEntry), auth: auth, tapos: tapos, history: history, plugins: []ITrxMgrPlugin{ auth, tapos, history }, lastCleanTime: time.Now(), } } // AddTrx processes an incoming transaction. // AddTrx returns nil if the incoming transaction is accepted, otherwise an error is returned. // If a non-nil callback is given, it will be called once asynchronously with the final process result. func (m *TrxMgr) AddTrx(trx *prototype.SignedTransaction, callback TrxCallback) error { entry := NewTrxMgrEntry(m.chainId, trx, callback) // very basic nil pointer check if trx == nil || trx.Signature == nil { err := entry.SetError(errors.New("invalid trx")) m.deliverEntry(entry) return err } // very basic duplication check if m.isProcessingTrx(trx) != nil { err := entry.SetError(errors.New("trx already in process")) m.deliverEntry(entry) return err } c := make(chan error) go func() { ok := false // check the transaction if entry.InitCheck() != nil || m.checkTrx(entry, atomic.LoadUint32(&m.headTime), false) != nil { // deliver if failed m.deliverEntry(entry) } else { // if passed, try adding it to the waiting pool m.waitingLock.Lock() m.fetchedLock.RLock() ok = m.addToWaiting(entry) > 0 m.fetchedLock.RUnlock() m.waitingLock.Unlock() } if !ok { c <- errors.New(entry.result.Receipt.ErrorInfo) } else { c <- nil } }() return <-c } // WaitingCount returns number of transactions that are waiting to be packed to blocks. func (m *TrxMgr)
() int { m.waitingLock.RLock() defer m.waitingLock.RUnlock() return len(m.waiting) } // FetchTrx fetches a batch of transactions from waiting pool. // Block producer should call FetchTrx to collect transactions of new blocks. func (m *TrxMgr) FetchTrx(blockTime uint32, maxCount, maxSize int) (entries []*TrxEntry) { m.waitingLock.Lock() defer m.waitingLock.Unlock() m.fetchedLock.Lock() defer m.fetchedLock.Unlock() counter, size := 0, 0 // traverse the waiting pool for s, e := range m.waiting { // check count limit if maxCount > 0 && counter >= maxCount { break } // check size limit if maxSize > 0 && size >= maxSize { break } // check the transaction again // although transactions in the waiting pool had passed checks when they entered, // but chain state is keep changing, we have to redo state-dependent checks. if err := m.checkTrx(e, blockTime, true); err != nil { // if failed, deliver the transaction. m.log.Debugf("TRXMGR: FetchTrx check failed: %v, trxId=%x", err, []byte(e.trxId)) m.deliverEntry(e) } else { // if passed, pick it entries = append(entries, e) // add it to the fetched pool m.fetched[s] = e counter++ size += e.size } // remove from waiting pool delete(m.waiting, s) } return } // ReturnTrx notifies that some previously fetched transactions can't be packed into a block due to errors. // Block producer should call ReturnTrx for transactions that failed being applied. func (m *TrxMgr) ReturnTrx(entries ...*TrxEntry) { m.log.Debug("TRXMGR: ReturnTrx begin") timing := common.NewTiming() timing.Begin() m.fetchedLock.Lock() defer m.fetchedLock.Unlock() timing.Mark() for _, e := range entries { // any returning transaction should be previously fetched f := m.fetched[e.trxId] if f != nil { m.deliverEntry(f) delete(m.fetched, e.trxId) } } timing.End() m.log.Debugf("TRXMGR: ReturnTrx end: #tx=%d, %s", len(entries), timing.String()) } // CheckBlockTrxs checks if transactions of a block are valid. // If everything is ok, CheckBlockTrxs returns a TrxEntry slice for transactions and nil error, otherwise, a nil slice // and an error is returned. func (m *TrxMgr) CheckBlockTrxs(b *prototype.SignedBlock) (entries []*TrxEntry, err error) { m.log.Debugf("TRXMGR: CheckBlockTrxs begin %d", b.SignedHeader.Number()) t0 := common.EasyTimer() if count := len(b.Transactions); count > 0 { blockTime := b.SignedHeader.Header.Timestamp.UtcSeconds errs := make([]error, count) entries = make([]*TrxEntry, count) errIdx := int32(-1) var wg sync.WaitGroup wg.Add(count) // check transactions asynchronously for i := 0; i
WaitingCount
identifier_name
trx_mgr.go
// Deliver calls entry's callback function. func (e *TrxEntry) Deliver() { if e.callback != nil { e.callback(e.result) } } // InitCheck fills extra information of the entry, and do a basic validation check. // Note that InitCheck is independent from chain state. We should do it only once for each transaction. func (e *TrxEntry) InitCheck() error { trx := e.result.SigTrx // basic check if err := trx.Validate(); err != nil { return e.SetError(err) } if trxId, err := trx.Id(); err != nil { return e.SetError(err) } else { e.trxId = string(trxId.Hash) } // transaction size limit check e.size = proto.Size(trx) if e.size > constants.MaxTransactionSize { return e.SetError(fmt.Errorf("trx too large, size = %d > %d", e.size, constants.MaxTransactionSize)) } // get the signer account name creator := "" if creators := trx.GetOpCreatorsMap(); len(creators) != 1 { return e.SetError(fmt.Errorf("non-unique trx creators, found %d", len(creators))) } else { for creator = range creators { break } } e.signer = creator // recover the signing public key from signature if signKey, err := trx.ExportPubKeys(e.chainId); err != nil { return e.SetError(fmt.Errorf("cannot export signing key: %s", err.Error())) } else { e.signerKey = signKey } return nil } // CheckExpiration checks if the transaction is valid based on its expiration. func (e *TrxEntry) CheckExpiration(blockTime uint32) error { expiration := e.result.SigTrx.GetTrx().GetExpiration().GetUtcSeconds() if expiration < blockTime { return e.SetError(fmt.Errorf("trx expired, %d < %d", expiration, blockTime)) } if expiration > blockTime + constants.TrxMaxExpirationTime { return e.SetError(fmt.Errorf("trx expiration too long, %d > %d + %d", expiration, blockTime, constants.TrxMaxExpirationTime)) } return nil } // CheckTapos checks if the transaction is valid based on its tapos information. func (e *TrxEntry) CheckTapos(checker *TaposChecker) error { if err := checker.Check(e.result.SigTrx.Trx); err != nil { return e.SetError(fmt.Errorf("tapos failed: %s", err.Error())) } return nil } // CheckSignerKey checks if the transaction is signed by correct public key. func (e *TrxEntry) CheckSignerKey(fetcher *AuthFetcher) error { if err := fetcher.CheckPublicKey(e.signer, e.signerKey); err != nil { return e.SetError(fmt.Errorf("signature failed: %s", err.Error())) } return nil } // CheckInBlockTrxs checks if the transaction is a duplicate of any old transaction. func (e *TrxEntry) CheckInBlockTrxs(checker *InBlockTrxChecker) error { if checker.Has(e.result.SigTrx) { return e.SetError(errors.New("found duplicate in-block trx")) } return nil } func (e *TrxEntry) GetTrxResult() *prototype.TransactionWrapperWithInfo { return e.result } func (e *TrxEntry) GetTrxSize() int { return e.size } func (e *TrxEntry) GetTrxSigner() string { return e.signer } func (e *TrxEntry) GetTrxSigningKey() *prototype.PublicKeyType { return e.signerKey } const ( // maximum count of transactions that are waiting to be packed to blocks. // if this limit is reached, any incoming transaction will be refused directly. sMaxWaitingCount = constants.TrxMaxExpirationTime * 2000 // threshold over which cleanings are necessary sWaitingCountWaterMark = sMaxWaitingCount / 10 // minimal interval between cleanings sMinCleanupInterval = 10 * time.Second // shrink the waiting/fetched pools every 100K transactions sShrinkCountWaterMark = 100000 ) // ITrxMgrPlugin is an interface of manager plugins. type ITrxMgrPlugin interface { BlockApplied(b *prototype.SignedBlock) // called once after a block is successfully applied. BlockReverted(blockNum uint64) // called once after a block is successfully reverted. BlockCommitted(blockNum uint64) // called once after a block is successfully committed. } // The transaction manager. type TrxMgr struct { chainId prototype.ChainId // the chain db iservices.IDatabaseRW // the database log *logrus.Logger // the logger headTime uint32 // timestamp of head block, in seconds waiting map[string]*TrxEntry // transactions waiting to be packed to blocks, trxId -> entry waitingLock sync.RWMutex // lock of waiting transactions fetched map[string]*TrxEntry // transactions being packed to a block, trxId -> entry fetchedLock sync.RWMutex // lock of fetched transactions auth *AuthFetcher // checker of transaction signatures tapos *TaposChecker // checker of transaction tapos history *InBlockTrxChecker // checker of transaction duplication plugins []ITrxMgrPlugin // manager plugins, consisting of above checkers lastCleanTime time.Time // last time we clean up expired waiting transactions shrinkCounter uint64 // a counter to determine when to shrink pools } // NewTrxMgr creates an instance of TrxMgr. func NewTrxMgr(chainId prototype.ChainId, db iservices.IDatabaseRW, logger *logrus.Logger, lastBlock, commitBlock uint64) *TrxMgr { auth := NewAuthFetcher(db, logger, lastBlock, commitBlock) tapos := NewTaposChecker(db, logger, lastBlock) history := NewInBlockTrxChecker(db, logger, lastBlock) return &TrxMgr{ chainId: chainId, db: db, log: logger, headTime: (&DynamicGlobalPropsRW{db:db}).GetProps().GetTime().GetUtcSeconds(), waiting: make(map[string]*TrxEntry), fetched: make(map[string]*TrxEntry), auth: auth, tapos: tapos, history: history, plugins: []ITrxMgrPlugin{ auth, tapos, history }, lastCleanTime: time.Now(), } } // AddTrx processes an incoming transaction. // AddTrx returns nil if the incoming transaction is accepted, otherwise an error is returned. // If a non-nil callback is given, it will be called once asynchronously with the final process result. func (m *TrxMgr) AddTrx(trx *prototype.SignedTransaction, callback TrxCallback) error { entry := NewTrxMgrEntry(m.chainId, trx, callback) // very basic nil pointer check if trx == nil || trx.Signature == nil { err := entry.SetError(errors.New("invalid trx")) m.deliverEntry(entry) return err } // very basic duplication check if m.isProcessingTrx(trx) != nil { err := entry.SetError(errors.New("trx already in process")) m.deliverEntry(entry) return err } c := make(chan error) go func() { ok := false // check the transaction if entry.InitCheck() != nil || m.checkTrx(entry, atomic.LoadUint32(&m.headTime), false) != nil { // deliver if failed m.deliverEntry(entry) } else { // if passed, try adding it to the waiting pool m.waitingLock.Lock() m.fetchedLock.RLock() ok = m.addToWaiting(entry) > 0 m.fetchedLock.RUnlock() m.waitingLock.Unlock() } if !ok { c <- errors.New(entry.result.Receipt.ErrorInfo) } else { c <- nil } }() return <-c } // WaitingCount returns number of transactions that are waiting to be packed to blocks. func (m *TrxMgr) WaitingCount() int { m.waitingLock.RLock() defer m.waitingLock.RUnlock() return len(m.waiting) } // FetchTrx fetches a batch of transactions from waiting pool. // Block producer should call FetchTrx to collect transactions of new blocks. func (m *TrxMgr) FetchTrx(blockTime uint32, maxCount, maxSize int) (entries []*TrxEntry) { m.waitingLock.Lock() defer m.waitingLock.Unlock() m.fetchedLock.Lock() defer m.fetchedLock.Unlock() counter, size := 0, 0 // traverse the waiting pool for s, e := range m.waiting { // check count limit if
{ e.result.Receipt.Status = prototype.StatusError e.result.Receipt.ErrorInfo = err.Error() return err }
identifier_body
mixhop_trainer.py
('adj_pows', '1', 'Comma-separated list of Adjacency powers. Setting to "1" ' 'recovers valinna GCN. Setting to "0,1,2" uses ' '[A^0, A^1, A^2]. Further, you can feed as ' '"0:20:10,1:10:10", where the syntax is ' '<pow>:<capacity in layer1>:<capacity in layer2>. The ' 'number of layers equals number of entries in ' '--hidden_dims_csv, plus one (for the output layer). The ' 'capacities do *NOT* have to add-up to the corresponding ' 'entry in hidden_dims_csv. They will be re-scaled if ' 'necessary.') # Training Flags. flags.DEFINE_integer('num_train_steps', 400, 'Number of training steps.') flags.DEFINE_integer('early_stop_steps', 50, 'If the validation accuracy does ' 'not increase for this many steps, training is halted.') flags.DEFINE_float('l2reg', 5e-4, 'L2 Regularization on Kernels.') flags.DEFINE_float('input_dropout', 0.7, 'Dropout applied at input layer') flags.DEFINE_float('layer_dropout', 0.9, 'Dropout applied at hidden layers') flags.DEFINE_string('optimizer', 'GradientDescentOptimizer', 'Name of optimizer to use. Must be member of tf.train.') flags.DEFINE_float('learn_rate', 0.5, 'Learning Rate for the optimizer.') flags.DEFINE_float('lr_decrement_ratio_of_initial', 0.01, 'Learning rate will be decremented by ' 'this value * --learn_rate.') flags.DEFINE_float('lr_decrement_every', 40, 'Learning rate will be decremented every this many steps.') flags.DEFINE_bool('use_signac', False, 'Use signac and put all args into signac workspace.') flags.DEFINE_string('signac_root', None, 'Root path for signac project.') flags.DEFINE_bool('debug', False, 'Debug code in VS Code') flags.DEFINE_bool("_l2_normalization", True, "") flags.DEFINE_bool("_batch_normalization", True, "") flags.DEFINE_bool("_psum_output", True, "") flags.DEFINE_bool("identity_feature", False, "") FLAGS = flags.FLAGS def GetEncodedParams(): """Summarizes all flag values in a string, to be used in output filenames.""" if FLAGS.debug: import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=('localhost', 5678), redirect_output=True) ptvsd.wait_for_attach() breakpoint() if FLAGS.use_signac: import signac project = signac.get_project(root=FLAGS.signac_root) job = project.open_job(dict( dataset_name=FLAGS.dataset_name, run_id=FLAGS.run_id, optimizer=FLAGS.optimizer, learn_rate=FLAGS.learn_rate, l2reg=FLAGS.l2reg, output_layer=FLAGS.output_layer, nonlinearity=FLAGS.nonlinearity, adj_powers=FLAGS.adj_pows.replace(',', 'x').replace(':', '.'), architecture=FLAGS.architecture, identity_feature=FLAGS.identity_feature )).init() FLAGS.results_dir = job.fn(FLAGS.results_dir) FLAGS.train_dir = job.fn(FLAGS.train_dir) FLAGS.run_id = "" params = '_'.join([ 'ds-%s' % FLAGS.dataset_name, 'r-%s' % FLAGS.run_id, 'opt-%s' % FLAGS.optimizer, 'lr-%g' % FLAGS.learn_rate, 'l2-%g' % FLAGS.l2reg, 'o-%s' % FLAGS.output_layer, 'act-%s' % FLAGS.nonlinearity, 'pows-%s' % FLAGS.adj_pows.replace(',', 'x').replace(':', '.'), ]) gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: for gpu in gpus:
logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") return params class AccuracyMonitor(object): """Monitors and remembers model parameters @ best validation accuracy.""" def __init__(self, sess, early_stop_steps): """Initializes AccuracyMonitor. Args: sess: (singleton) instance of tf.Session that is used for training. early_stop_steps: int with number of steps to allow without any improvement on the validation accuracy. """ self._early_stop_steps = early_stop_steps self._sess = sess # (validate accuracy, test accuracy, step #), recorded at best validate # accuracy. self.best = (0, 0, 0) # Will be populated to dict of all tensorflow variable names to their values # as numpy arrays. self.params_at_best = None def mark_accuracy(self, validate_accuracy, test_accuracy, i): curr_accuracy = (float(validate_accuracy), float(test_accuracy), i) self.curr_accuracy = curr_accuracy if curr_accuracy > self.best: self.best = curr_accuracy all_variables = tf.global_variables() all_variable_values = self._sess.run(all_variables) params_at_best_validate = ( {var.name: val for var, val in zip(all_variables, all_variable_values)}) self.params_at_best = params_at_best_validate if i > self.best[-1] + self._early_stop_steps: return False return True # TODO(haija): move to utils. class AdjacencyPowersParser(object): def __init__(self): powers = FLAGS.adj_pows.split(',') has_colon = None self._powers = [] self._ratios = [] for i, p in enumerate(powers): if i == 0: has_colon = (':' in p) else: if has_colon != (':' in p): raise ValueError( 'Error in flag --adj_pows. Either all powers or non should ' 'include ":"') # components = p.split(':') self._powers.append(int(components[0])) if has_colon: self._ratios.append(list(map(float, components[1:]))) else: self._ratios.append([1]) def powers(self): return self._powers def output_capacity(self, num_classes): if all([len(s) == 1 and s[0] == 1 for s in self._ratios]): return num_classes * len(self._powers) else: return sum([s[-1] for s in self._ratios]) def divide_capacity(self, layer_index, total_dim): sizes = [l[min(layer_index, len(l)-1)] for l in self._ratios] sum_units = numpy.sum(sizes) size_per_unit = total_dim / float(sum_units) dims = [] for s in sizes[:-1]: dim = int(numpy.round(s * size_per_unit)) dims.append(dim) dims.append(total_dim - sum(dims)) return dims def main(unused_argv): encoded_params = GetEncodedParams() output_results_file = os.path.join( FLAGS.results_dir, encoded_params + '.json') output_model_file = os.path.join( FLAGS.train_dir, encoded_params + '.pkl') if os.path.exists(output_results_file) and not FLAGS.retrain: print('Exiting early. Results are already computed: %s. Pass flag ' '--retrain to override' % output_results_file) return 0 ### LOAD DATASET # The adjacency matrix is also normalized in this step dataset = mixhop_dataset.ReadDataset(FLAGS.dataset_dir, FLAGS.dataset_name, use_identity=FLAGS.identity_feature) ### MODEL REQUIREMENTS (Placeholders, adjacency tensor, regularizers) x = dataset.sparse_allx_tensor() y = tf.placeholder(tf.float32, [None, dataset.ally.shape[1]], name='y') ph_indices = tf.placeholder(tf.int64, [None]) is_training = tf.placeholder_with_default(True, [], name='is_training') pows_parser = AdjacencyPowersParser() # Parses flag --adj_pows num_x_entries = dataset.x_indices.shape[0] sparse_adj = dataset.sparse_adj_tensor() kernel_regularizer = keras_regularizers.l2(FLAGS.l2reg) ### BUILD MODEL model = mixhop_model.MixHopModel( sparse_adj, x, is_training, kernel_regularizer) if FLAGS.architecture: model.load_architecture_from_file(FLAGS.architecture) else: model.add_layer('mixhop_model', 'sparse_dropout', FLAGS.input_dropout, num_x_entries, pass_is_training=True) model.add_layer('tf', 'sparse_tensor_to_dense') if FLAGS._l2_normalization: model.add_layer('tf.nn', 'l2_normalize', axis=1) power_parser = AdjacencyPowersParser() layer_dims = list(map(int, FLAGS.hidden_dims_csv.split(','))) layer_dims.append(power_parser.output_capacity(dataset.ally.shape[1])) for j, dim in enumerate(layer_dims): if j != 0: model.add_layer('tf.layers', 'dropout', FLAGS.layer_dropout, pass_training=True) capacities = power_parser.divide_capacity(j, dim)
tf.config.experimental.set_memory_growth(gpu, True)
conditional_block
mixhop_trainer.py
('adj_pows', '1', 'Comma-separated list of Adjacency powers. Setting to "1" ' 'recovers valinna GCN. Setting to "0,1,2" uses ' '[A^0, A^1, A^2]. Further, you can feed as ' '"0:20:10,1:10:10", where the syntax is ' '<pow>:<capacity in layer1>:<capacity in layer2>. The ' 'number of layers equals number of entries in ' '--hidden_dims_csv, plus one (for the output layer). The ' 'capacities do *NOT* have to add-up to the corresponding ' 'entry in hidden_dims_csv. They will be re-scaled if ' 'necessary.') # Training Flags. flags.DEFINE_integer('num_train_steps', 400, 'Number of training steps.') flags.DEFINE_integer('early_stop_steps', 50, 'If the validation accuracy does ' 'not increase for this many steps, training is halted.') flags.DEFINE_float('l2reg', 5e-4, 'L2 Regularization on Kernels.') flags.DEFINE_float('input_dropout', 0.7, 'Dropout applied at input layer') flags.DEFINE_float('layer_dropout', 0.9, 'Dropout applied at hidden layers') flags.DEFINE_string('optimizer', 'GradientDescentOptimizer', 'Name of optimizer to use. Must be member of tf.train.') flags.DEFINE_float('learn_rate', 0.5, 'Learning Rate for the optimizer.') flags.DEFINE_float('lr_decrement_ratio_of_initial', 0.01, 'Learning rate will be decremented by ' 'this value * --learn_rate.') flags.DEFINE_float('lr_decrement_every', 40, 'Learning rate will be decremented every this many steps.') flags.DEFINE_bool('use_signac', False, 'Use signac and put all args into signac workspace.') flags.DEFINE_string('signac_root', None, 'Root path for signac project.') flags.DEFINE_bool('debug', False, 'Debug code in VS Code') flags.DEFINE_bool("_l2_normalization", True, "") flags.DEFINE_bool("_batch_normalization", True, "") flags.DEFINE_bool("_psum_output", True, "") flags.DEFINE_bool("identity_feature", False, "") FLAGS = flags.FLAGS def GetEncodedParams(): """Summarizes all flag values in a string, to be used in output filenames.""" if FLAGS.debug: import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=('localhost', 5678), redirect_output=True) ptvsd.wait_for_attach() breakpoint() if FLAGS.use_signac: import signac project = signac.get_project(root=FLAGS.signac_root) job = project.open_job(dict( dataset_name=FLAGS.dataset_name, run_id=FLAGS.run_id, optimizer=FLAGS.optimizer, learn_rate=FLAGS.learn_rate, l2reg=FLAGS.l2reg, output_layer=FLAGS.output_layer, nonlinearity=FLAGS.nonlinearity, adj_powers=FLAGS.adj_pows.replace(',', 'x').replace(':', '.'), architecture=FLAGS.architecture, identity_feature=FLAGS.identity_feature )).init() FLAGS.results_dir = job.fn(FLAGS.results_dir) FLAGS.train_dir = job.fn(FLAGS.train_dir) FLAGS.run_id = "" params = '_'.join([ 'ds-%s' % FLAGS.dataset_name, 'r-%s' % FLAGS.run_id, 'opt-%s' % FLAGS.optimizer, 'lr-%g' % FLAGS.learn_rate, 'l2-%g' % FLAGS.l2reg, 'o-%s' % FLAGS.output_layer, 'act-%s' % FLAGS.nonlinearity, 'pows-%s' % FLAGS.adj_pows.replace(',', 'x').replace(':', '.'), ]) gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") return params class AccuracyMonitor(object): """Monitors and remembers model parameters @ best validation accuracy.""" def __init__(self, sess, early_stop_steps): """Initializes AccuracyMonitor. Args: sess: (singleton) instance of tf.Session that is used for training. early_stop_steps: int with number of steps to allow without any improvement on the validation accuracy. """ self._early_stop_steps = early_stop_steps self._sess = sess # (validate accuracy, test accuracy, step #), recorded at best validate # accuracy. self.best = (0, 0, 0) # Will be populated to dict of all tensorflow variable names to their values # as numpy arrays. self.params_at_best = None def mark_accuracy(self, validate_accuracy, test_accuracy, i): curr_accuracy = (float(validate_accuracy), float(test_accuracy), i) self.curr_accuracy = curr_accuracy if curr_accuracy > self.best: self.best = curr_accuracy all_variables = tf.global_variables() all_variable_values = self._sess.run(all_variables) params_at_best_validate = ( {var.name: val for var, val in zip(all_variables, all_variable_values)}) self.params_at_best = params_at_best_validate if i > self.best[-1] + self._early_stop_steps: return False return True # TODO(haija): move to utils. class AdjacencyPowersParser(object): def __init__(self): powers = FLAGS.adj_pows.split(',') has_colon = None self._powers = [] self._ratios = [] for i, p in enumerate(powers): if i == 0: has_colon = (':' in p) else: if has_colon != (':' in p): raise ValueError( 'Error in flag --adj_pows. Either all powers or non should ' 'include ":"') # components = p.split(':') self._powers.append(int(components[0])) if has_colon: self._ratios.append(list(map(float, components[1:]))) else: self._ratios.append([1]) def powers(self): return self._powers def output_capacity(self, num_classes): if all([len(s) == 1 and s[0] == 1 for s in self._ratios]): return num_classes * len(self._powers) else: return sum([s[-1] for s in self._ratios]) def divide_capacity(self, layer_index, total_dim):
def main(unused_argv): encoded_params = GetEncodedParams() output_results_file = os.path.join( FLAGS.results_dir, encoded_params + '.json') output_model_file = os.path.join( FLAGS.train_dir, encoded_params + '.pkl') if os.path.exists(output_results_file) and not FLAGS.retrain: print('Exiting early. Results are already computed: %s. Pass flag ' '--retrain to override' % output_results_file) return 0 ### LOAD DATASET # The adjacency matrix is also normalized in this step dataset = mixhop_dataset.ReadDataset(FLAGS.dataset_dir, FLAGS.dataset_name, use_identity=FLAGS.identity_feature) ### MODEL REQUIREMENTS (Placeholders, adjacency tensor, regularizers) x = dataset.sparse_allx_tensor() y = tf.placeholder(tf.float32, [None, dataset.ally.shape[1]], name='y') ph_indices = tf.placeholder(tf.int64, [None]) is_training = tf.placeholder_with_default(True, [], name='is_training') pows_parser = AdjacencyPowersParser() # Parses flag --adj_pows num_x_entries = dataset.x_indices.shape[0] sparse_adj = dataset.sparse_adj_tensor() kernel_regularizer = keras_regularizers.l2(FLAGS.l2reg) ### BUILD MODEL model = mixhop_model.MixHopModel( sparse_adj, x, is_training, kernel_regularizer) if FLAGS.architecture: model.load_architecture_from_file(FLAGS.architecture) else: model.add_layer('mixhop_model', 'sparse_dropout', FLAGS.input_dropout, num_x_entries, pass_is_training=True) model.add_layer('tf', 'sparse_tensor_to_dense') if FLAGS._l2_normalization: model.add_layer('tf.nn', 'l2_normalize', axis=1) power_parser = AdjacencyPowersParser() layer_dims = list(map(int, FLAGS.hidden_dims_csv.split(','))) layer_dims.append(power_parser.output_capacity(dataset.ally.shape[1])) for j, dim in enumerate(layer_dims): if j != 0: model.add_layer('tf.layers', 'dropout', FLAGS.layer_dropout, pass_training=True) capacities = power_parser.divide_capacity(j, dim) model
sizes = [l[min(layer_index, len(l)-1)] for l in self._ratios] sum_units = numpy.sum(sizes) size_per_unit = total_dim / float(sum_units) dims = [] for s in sizes[:-1]: dim = int(numpy.round(s * size_per_unit)) dims.append(dim) dims.append(total_dim - sum(dims)) return dims
identifier_body
mixhop_trainer.py
('adj_pows', '1', 'Comma-separated list of Adjacency powers. Setting to "1" ' 'recovers valinna GCN. Setting to "0,1,2" uses ' '[A^0, A^1, A^2]. Further, you can feed as ' '"0:20:10,1:10:10", where the syntax is ' '<pow>:<capacity in layer1>:<capacity in layer2>. The ' 'number of layers equals number of entries in ' '--hidden_dims_csv, plus one (for the output layer). The ' 'capacities do *NOT* have to add-up to the corresponding ' 'entry in hidden_dims_csv. They will be re-scaled if ' 'necessary.') # Training Flags. flags.DEFINE_integer('num_train_steps', 400, 'Number of training steps.') flags.DEFINE_integer('early_stop_steps', 50, 'If the validation accuracy does ' 'not increase for this many steps, training is halted.') flags.DEFINE_float('l2reg', 5e-4, 'L2 Regularization on Kernels.') flags.DEFINE_float('input_dropout', 0.7, 'Dropout applied at input layer') flags.DEFINE_float('layer_dropout', 0.9, 'Dropout applied at hidden layers') flags.DEFINE_string('optimizer', 'GradientDescentOptimizer', 'Name of optimizer to use. Must be member of tf.train.') flags.DEFINE_float('learn_rate', 0.5, 'Learning Rate for the optimizer.') flags.DEFINE_float('lr_decrement_ratio_of_initial', 0.01, 'Learning rate will be decremented by ' 'this value * --learn_rate.') flags.DEFINE_float('lr_decrement_every', 40, 'Learning rate will be decremented every this many steps.') flags.DEFINE_bool('use_signac', False, 'Use signac and put all args into signac workspace.') flags.DEFINE_string('signac_root', None, 'Root path for signac project.') flags.DEFINE_bool('debug', False, 'Debug code in VS Code') flags.DEFINE_bool("_l2_normalization", True, "") flags.DEFINE_bool("_batch_normalization", True, "") flags.DEFINE_bool("_psum_output", True, "") flags.DEFINE_bool("identity_feature", False, "") FLAGS = flags.FLAGS def GetEncodedParams(): """Summarizes all flag values in a string, to be used in output filenames.""" if FLAGS.debug: import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=('localhost', 5678), redirect_output=True) ptvsd.wait_for_attach() breakpoint() if FLAGS.use_signac: import signac project = signac.get_project(root=FLAGS.signac_root) job = project.open_job(dict( dataset_name=FLAGS.dataset_name, run_id=FLAGS.run_id, optimizer=FLAGS.optimizer, learn_rate=FLAGS.learn_rate, l2reg=FLAGS.l2reg, output_layer=FLAGS.output_layer, nonlinearity=FLAGS.nonlinearity, adj_powers=FLAGS.adj_pows.replace(',', 'x').replace(':', '.'), architecture=FLAGS.architecture, identity_feature=FLAGS.identity_feature )).init() FLAGS.results_dir = job.fn(FLAGS.results_dir) FLAGS.train_dir = job.fn(FLAGS.train_dir) FLAGS.run_id = "" params = '_'.join([ 'ds-%s' % FLAGS.dataset_name, 'r-%s' % FLAGS.run_id, 'opt-%s' % FLAGS.optimizer, 'lr-%g' % FLAGS.learn_rate, 'l2-%g' % FLAGS.l2reg, 'o-%s' % FLAGS.output_layer, 'act-%s' % FLAGS.nonlinearity, 'pows-%s' % FLAGS.adj_pows.replace(',', 'x').replace(':', '.'), ]) gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") return params class AccuracyMonitor(object): """Monitors and remembers model parameters @ best validation accuracy.""" def __init__(self, sess, early_stop_steps): """Initializes AccuracyMonitor. Args: sess: (singleton) instance of tf.Session that is used for training. early_stop_steps: int with number of steps to allow without any improvement on the validation accuracy. """ self._early_stop_steps = early_stop_steps self._sess = sess # (validate accuracy, test accuracy, step #), recorded at best validate # accuracy. self.best = (0, 0, 0) # Will be populated to dict of all tensorflow variable names to their values # as numpy arrays. self.params_at_best = None def mark_accuracy(self, validate_accuracy, test_accuracy, i): curr_accuracy = (float(validate_accuracy), float(test_accuracy), i) self.curr_accuracy = curr_accuracy if curr_accuracy > self.best: self.best = curr_accuracy all_variables = tf.global_variables() all_variable_values = self._sess.run(all_variables) params_at_best_validate = ( {var.name: val for var, val in zip(all_variables, all_variable_values)}) self.params_at_best = params_at_best_validate if i > self.best[-1] + self._early_stop_steps: return False return True # TODO(haija): move to utils. class AdjacencyPowersParser(object): def
(self): powers = FLAGS.adj_pows.split(',') has_colon = None self._powers = [] self._ratios = [] for i, p in enumerate(powers): if i == 0: has_colon = (':' in p) else: if has_colon != (':' in p): raise ValueError( 'Error in flag --adj_pows. Either all powers or non should ' 'include ":"') # components = p.split(':') self._powers.append(int(components[0])) if has_colon: self._ratios.append(list(map(float, components[1:]))) else: self._ratios.append([1]) def powers(self): return self._powers def output_capacity(self, num_classes): if all([len(s) == 1 and s[0] == 1 for s in self._ratios]): return num_classes * len(self._powers) else: return sum([s[-1] for s in self._ratios]) def divide_capacity(self, layer_index, total_dim): sizes = [l[min(layer_index, len(l)-1)] for l in self._ratios] sum_units = numpy.sum(sizes) size_per_unit = total_dim / float(sum_units) dims = [] for s in sizes[:-1]: dim = int(numpy.round(s * size_per_unit)) dims.append(dim) dims.append(total_dim - sum(dims)) return dims def main(unused_argv): encoded_params = GetEncodedParams() output_results_file = os.path.join( FLAGS.results_dir, encoded_params + '.json') output_model_file = os.path.join( FLAGS.train_dir, encoded_params + '.pkl') if os.path.exists(output_results_file) and not FLAGS.retrain: print('Exiting early. Results are already computed: %s. Pass flag ' '--retrain to override' % output_results_file) return 0 ### LOAD DATASET # The adjacency matrix is also normalized in this step dataset = mixhop_dataset.ReadDataset(FLAGS.dataset_dir, FLAGS.dataset_name, use_identity=FLAGS.identity_feature) ### MODEL REQUIREMENTS (Placeholders, adjacency tensor, regularizers) x = dataset.sparse_allx_tensor() y = tf.placeholder(tf.float32, [None, dataset.ally.shape[1]], name='y') ph_indices = tf.placeholder(tf.int64, [None]) is_training = tf.placeholder_with_default(True, [], name='is_training') pows_parser = AdjacencyPowersParser() # Parses flag --adj_pows num_x_entries = dataset.x_indices.shape[0] sparse_adj = dataset.sparse_adj_tensor() kernel_regularizer = keras_regularizers.l2(FLAGS.l2reg) ### BUILD MODEL model = mixhop_model.MixHopModel( sparse_adj, x, is_training, kernel_regularizer) if FLAGS.architecture: model.load_architecture_from_file(FLAGS.architecture) else: model.add_layer('mixhop_model', 'sparse_dropout', FLAGS.input_dropout, num_x_entries, pass_is_training=True) model.add_layer('tf', 'sparse_tensor_to_dense') if FLAGS._l2_normalization: model.add_layer('tf.nn', 'l2_normalize', axis=1) power_parser = AdjacencyPowersParser() layer_dims = list(map(int, FLAGS.hidden_dims_csv.split(','))) layer_dims.append(power_parser.output_capacity(dataset.ally.shape[1])) for j, dim in enumerate(layer_dims): if j != 0: model.add_layer('tf.layers', 'dropout', FLAGS.layer_dropout, pass_training=True) capacities = power_parser.divide_capacity(j, dim)
__init__
identifier_name
mixhop_trainer.py
adj_pows', '1', 'Comma-separated list of Adjacency powers. Setting to "1" ' 'recovers valinna GCN. Setting to "0,1,2" uses ' '[A^0, A^1, A^2]. Further, you can feed as ' '"0:20:10,1:10:10", where the syntax is ' '<pow>:<capacity in layer1>:<capacity in layer2>. The ' 'number of layers equals number of entries in ' '--hidden_dims_csv, plus one (for the output layer). The ' 'capacities do *NOT* have to add-up to the corresponding ' 'entry in hidden_dims_csv. They will be re-scaled if ' 'necessary.') # Training Flags. flags.DEFINE_integer('num_train_steps', 400, 'Number of training steps.') flags.DEFINE_integer('early_stop_steps', 50, 'If the validation accuracy does ' 'not increase for this many steps, training is halted.') flags.DEFINE_float('l2reg', 5e-4, 'L2 Regularization on Kernels.') flags.DEFINE_float('input_dropout', 0.7, 'Dropout applied at input layer') flags.DEFINE_float('layer_dropout', 0.9, 'Dropout applied at hidden layers') flags.DEFINE_string('optimizer', 'GradientDescentOptimizer', 'Name of optimizer to use. Must be member of tf.train.') flags.DEFINE_float('learn_rate', 0.5, 'Learning Rate for the optimizer.') flags.DEFINE_float('lr_decrement_ratio_of_initial', 0.01, 'Learning rate will be decremented by ' 'this value * --learn_rate.') flags.DEFINE_float('lr_decrement_every', 40, 'Learning rate will be decremented every this many steps.') flags.DEFINE_bool('use_signac', False, 'Use signac and put all args into signac workspace.') flags.DEFINE_string('signac_root', None, 'Root path for signac project.') flags.DEFINE_bool('debug', False, 'Debug code in VS Code') flags.DEFINE_bool("_l2_normalization", True, "") flags.DEFINE_bool("_batch_normalization", True, "") flags.DEFINE_bool("_psum_output", True, "") flags.DEFINE_bool("identity_feature", False, "") FLAGS = flags.FLAGS def GetEncodedParams(): """Summarizes all flag values in a string, to be used in output filenames.""" if FLAGS.debug: import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=('localhost', 5678), redirect_output=True) ptvsd.wait_for_attach() breakpoint() if FLAGS.use_signac: import signac project = signac.get_project(root=FLAGS.signac_root) job = project.open_job(dict( dataset_name=FLAGS.dataset_name, run_id=FLAGS.run_id, optimizer=FLAGS.optimizer, learn_rate=FLAGS.learn_rate, l2reg=FLAGS.l2reg, output_layer=FLAGS.output_layer, nonlinearity=FLAGS.nonlinearity, adj_powers=FLAGS.adj_pows.replace(',', 'x').replace(':', '.'), architecture=FLAGS.architecture, identity_feature=FLAGS.identity_feature )).init() FLAGS.results_dir = job.fn(FLAGS.results_dir) FLAGS.train_dir = job.fn(FLAGS.train_dir) FLAGS.run_id = "" params = '_'.join([ 'ds-%s' % FLAGS.dataset_name, 'r-%s' % FLAGS.run_id, 'opt-%s' % FLAGS.optimizer, 'lr-%g' % FLAGS.learn_rate, 'l2-%g' % FLAGS.l2reg, 'o-%s' % FLAGS.output_layer, 'act-%s' % FLAGS.nonlinearity, 'pows-%s' % FLAGS.adj_pows.replace(',', 'x').replace(':', '.'), ]) gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") return params class AccuracyMonitor(object): """Monitors and remembers model parameters @ best validation accuracy.""" def __init__(self, sess, early_stop_steps): """Initializes AccuracyMonitor. Args: sess: (singleton) instance of tf.Session that is used for training. early_stop_steps: int with number of steps to allow without any improvement on the validation accuracy. """ self._early_stop_steps = early_stop_steps self._sess = sess # (validate accuracy, test accuracy, step #), recorded at best validate # accuracy. self.best = (0, 0, 0) # Will be populated to dict of all tensorflow variable names to their values # as numpy arrays. self.params_at_best = None def mark_accuracy(self, validate_accuracy, test_accuracy, i): curr_accuracy = (float(validate_accuracy), float(test_accuracy), i) self.curr_accuracy = curr_accuracy if curr_accuracy > self.best: self.best = curr_accuracy all_variables = tf.global_variables() all_variable_values = self._sess.run(all_variables) params_at_best_validate = ( {var.name: val for var, val in zip(all_variables, all_variable_values)}) self.params_at_best = params_at_best_validate if i > self.best[-1] + self._early_stop_steps: return False return True # TODO(haija): move to utils. class AdjacencyPowersParser(object): def __init__(self): powers = FLAGS.adj_pows.split(',') has_colon = None self._powers = [] self._ratios = [] for i, p in enumerate(powers): if i == 0: has_colon = (':' in p) else: if has_colon != (':' in p): raise ValueError( 'Error in flag --adj_pows. Either all powers or non should ' 'include ":"') # components = p.split(':') self._powers.append(int(components[0])) if has_colon: self._ratios.append(list(map(float, components[1:]))) else: self._ratios.append([1]) def powers(self): return self._powers def output_capacity(self, num_classes): if all([len(s) == 1 and s[0] == 1 for s in self._ratios]): return num_classes * len(self._powers) else: return sum([s[-1] for s in self._ratios]) def divide_capacity(self, layer_index, total_dim): sizes = [l[min(layer_index, len(l)-1)] for l in self._ratios] sum_units = numpy.sum(sizes) size_per_unit = total_dim / float(sum_units) dims = [] for s in sizes[:-1]: dim = int(numpy.round(s * size_per_unit)) dims.append(dim) dims.append(total_dim - sum(dims)) return dims def main(unused_argv): encoded_params = GetEncodedParams() output_results_file = os.path.join( FLAGS.results_dir, encoded_params + '.json') output_model_file = os.path.join( FLAGS.train_dir, encoded_params + '.pkl') if os.path.exists(output_results_file) and not FLAGS.retrain: print('Exiting early. Results are already computed: %s. Pass flag ' '--retrain to override' % output_results_file) return 0 ### LOAD DATASET # The adjacency matrix is also normalized in this step dataset = mixhop_dataset.ReadDataset(FLAGS.dataset_dir, FLAGS.dataset_name, use_identity=FLAGS.identity_feature) ### MODEL REQUIREMENTS (Placeholders, adjacency tensor, regularizers) x = dataset.sparse_allx_tensor() y = tf.placeholder(tf.float32, [None, dataset.ally.shape[1]], name='y') ph_indices = tf.placeholder(tf.int64, [None]) is_training = tf.placeholder_with_default(True, [], name='is_training') pows_parser = AdjacencyPowersParser() # Parses flag --adj_pows num_x_entries = dataset.x_indices.shape[0] sparse_adj = dataset.sparse_adj_tensor() kernel_regularizer = keras_regularizers.l2(FLAGS.l2reg) ### BUILD MODEL model = mixhop_model.MixHopModel( sparse_adj, x, is_training, kernel_regularizer) if FLAGS.architecture: model.load_architecture_from_file(FLAGS.architecture) else: model.add_layer('mixhop_model', 'sparse_dropout', FLAGS.input_dropout, num_x_entries, pass_is_training=True) model.add_layer('tf', 'sparse_tensor_to_dense') if FLAGS._l2_normalization:
layer_dims = list(map(int, FLAGS.hidden_dims_csv.split(','))) layer_dims.append(power_parser.output_capacity(dataset.ally.shape[1])) for j, dim in enumerate(layer_dims): if j != 0: model.add_layer('tf.layers', 'dropout', FLAGS.layer_dropout, pass_training=True) capacities = power_parser.divide_capacity(j, dim) model.add
model.add_layer('tf.nn', 'l2_normalize', axis=1) power_parser = AdjacencyPowersParser()
random_line_split
lib.rs
Day { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", day_string(*self)) } } /// Enum with the months of the year. #[derive(Debug, Clone, Copy)] pub enum Month { January, February, March, April, May, June, July, August, September, October, November, December, } /// Maps the `Month` enum to a string representation, e.g. "January". pub fn month_string(month: Month) -> &'static str { match month { Month::January => "January", Month::February => "February", Month::March => "March", Month::April => "April", Month::May => "May", Month::June => "June", Month::July => "July", Month::August => "August", Month::September => "September", Month::October => "October", Month::November => "November", Month::December => "December", } } /// Maps the `Month` enum to a shortened string representation, e.g. "Jan". pub fn month_abbrev_string(month: Month) -> &'static str { &month_string(month)[0..3] } impl fmt::Display for Month { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", month_string(*self)) } }
if year % 400 == 0 { 366 } else if year % 100 == 0 { 365 } else if year % 4 == 0 { 366 } else { 365 } } /// Takes in a year and month (e.g. 2020, February) and returns the number of days in that month. pub fn days_in_month(year: u64, month: Month) -> u64 { match month { Month::January => 31, Month::February if days_in_year(year) == 366 => 29, Month::February => 28, Month::March => 31, Month::April => 30, Month::May => 31, Month::June => 30, Month::July => 31, Month::August => 31, Month::September => 30, Month::October => 31, Month::November => 30, Month::December => 31, } } /// Converts a `Month` enum to an integer in the range 1-12. pub fn index_from_month(month: Month) -> u64 { match month { Month::January => 1, Month::February => 2, Month::March => 3, Month::April => 4, Month::May => 5, Month::June => 6, Month::July => 7, Month::August => 8, Month::September => 9, Month::October => 10, Month::November => 11, Month::December => 12, } } /// Converts an integer in the range 1-12 into the corresponding `Month` enum. /// Values outside the 1-12 range are converted to `None`. pub fn month_from_index(index: u64) -> Option<Month> { match index { 1 => Some(Month::January), 2 => Some(Month::February), 3 => Some(Month::March), 4 => Some(Month::April), 5 => Some(Month::May), 6 => Some(Month::June), 7 => Some(Month::July), 8 => Some(Month::August), 9 => Some(Month::September), 10 => Some(Month::October), 11 => Some(Month::November), 12 => Some(Month::December), _ => None, } } /// Returns the number of seconds in a day. pub fn seconds_in_day() -> u64 { 24 * 60 * 60 } /// Returns the number of seconds in an hour. pub fn seconds_in_hour() -> u64 { 60 * 60 } /// Returns the number of seconds in a minute. pub fn seconds_in_minute() -> u64 { 60 } /// Conceptually this is a thin wrapper for `std::time::SystemTime`, but provides /// more useful functions. The impl of this struct has functions that allow easily /// extracting the year/month/date/etc. for the given point in time. In actual fact /// the internal representation of this struct is a `Duration` since the unix epoch, /// so that error-handling is only required once upon creating the instance, and /// not for each attempt at extracting date/time fields. pub struct PostEpochTime { delta: Duration, } impl PostEpochTime { /// Create a `PostEpochTime` from a `SystemTime`. The `SystemTime` must be temporally /// in the future relative to the unix epoch, or an error will be returned. pub fn from(st: &SystemTime) -> Result<Self, SystemTimeError> { Ok(PostEpochTime { delta: st.duration_since(SystemTime::UNIX_EPOCH)?, }) } /// Create a `PostEpochTime` for the current instant. The current instant must be /// in the future relative to the unix epoch, or an error will be returned. pub fn now() -> Result<Self, SystemTimeError> { Self::from(&SystemTime::now()) } /// Returns the number of milliseconds passed since the unix epoch. pub fn milliseconds_since_epoch(&self) -> u128 { self.delta.as_millis() } /// Returns the number of microseconds passed since the unix epoch. pub fn microseconds_since_epoch(&self) -> u128 { self.delta.as_micros() } /// Returns the number of nanoseconds passed since the unix epoch. pub fn nanoseconds_since_epoch(&self) -> u128 { self.delta.as_nanos() } /// Returns the number of complete seconds passed since the unix epoch. pub fn seconds_since_epoch(&self) -> u64 { self.delta.as_secs() } /// Returns the number of complete days passed since the unix epoch. pub fn days_since_epoch(&self) -> u64 { self.delta.as_secs() / seconds_in_day() } /// Returns the day of the week that this point in time falls on. pub fn day_of_week(&self) -> Day { match self.days_since_epoch() % 7 { 0 => Day::Thursday, 1 => Day::Friday, 2 => Day::Saturday, 3 => Day::Sunday, 4 => Day::Monday, 5 => Day::Tuesday, 6 => Day::Wednesday, _ => panic!("Modulo operator is broken"), } } fn year_split(&self) -> (u64, u64) { let mut days = self.days_since_epoch(); let mut year = 1970; loop { let in_year = days_in_year(year); if days < in_year { break; } days -= in_year; year += 1; } (year, days) } /// Returns the year (e.g. 2020) this point in time falls on. pub fn year(&self) -> u64 { self.year_split().0 } /// Returns the day of the year for this point in time (1-indexed). /// A return value of 1 indicates January 1, a value of 2 indicates January 2, /// and so on. If the year is a leap year the largest returned value /// would be 366, and for non-leap years it would be 365. pub fn day_of_year(&self) -> u64 { self.year_split().1 + 1 } fn month_split(&self) -> (Month, u64) { let (year, mut days) = self.year_split(); let mut month = Month::January; loop { let in_month = days_in_month(year, month); if days < in_month { break; } days -= in_month; month = month_from_index(index_from_month(month) + 1).expect("Month should never overflow"); } (month, days) } /// Returns the month this point in time falls on. pub fn month(&self) -> Month { self.month_split().0 } /// Returns the day of the month for this point in time (1-indexed). /// A return value of 1 means it falls on the first of the month. The maximum /// returned value will be 31. pub fn day_of_month(&self) -> u64 { self.month_split().1 + 1 } ///
/// Takes in a year (e.g. 2019) and returns the number of days in that year. pub fn days_in_year(year: u64) -> u64 {
random_line_split
lib.rs
Day { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", day_string(*self)) } } /// Enum with the months of the year. #[derive(Debug, Clone, Copy)] pub enum Month { January, February, March, April, May, June, July, August, September, October, November, December, } /// Maps the `Month` enum to a string representation, e.g. "January". pub fn month_string(month: Month) -> &'static str { match month { Month::January => "January", Month::February => "February", Month::March => "March", Month::April => "April", Month::May => "May", Month::June => "June", Month::July => "July", Month::August => "August", Month::September => "September", Month::October => "October", Month::November => "November", Month::December => "December", } } /// Maps the `Month` enum to a shortened string representation, e.g. "Jan". pub fn month_abbrev_string(month: Month) -> &'static str { &month_string(month)[0..3] } impl fmt::Display for Month { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", month_string(*self)) } } /// Takes in a year (e.g. 2019) and returns the number of days in that year. pub fn days_in_year(year: u64) -> u64 { if year % 400 == 0 { 366 } else if year % 100 == 0 { 365 } else if year % 4 == 0 { 366 } else { 365 } } /// Takes in a year and month (e.g. 2020, February) and returns the number of days in that month. pub fn days_in_month(year: u64, month: Month) -> u64 { match month { Month::January => 31, Month::February if days_in_year(year) == 366 => 29, Month::February => 28, Month::March => 31, Month::April => 30, Month::May => 31, Month::June => 30, Month::July => 31, Month::August => 31, Month::September => 30, Month::October => 31, Month::November => 30, Month::December => 31, } } /// Converts a `Month` enum to an integer in the range 1-12. pub fn index_from_month(month: Month) -> u64 { match month { Month::January => 1, Month::February => 2, Month::March => 3, Month::April => 4, Month::May => 5, Month::June => 6, Month::July => 7, Month::August => 8, Month::September => 9, Month::October => 10, Month::November => 11, Month::December => 12, } } /// Converts an integer in the range 1-12 into the corresponding `Month` enum. /// Values outside the 1-12 range are converted to `None`. pub fn month_from_index(index: u64) -> Option<Month> { match index { 1 => Some(Month::January), 2 => Some(Month::February), 3 => Some(Month::March), 4 => Some(Month::April), 5 => Some(Month::May), 6 => Some(Month::June), 7 => Some(Month::July), 8 => Some(Month::August), 9 => Some(Month::September), 10 => Some(Month::October), 11 => Some(Month::November), 12 => Some(Month::December), _ => None, } } /// Returns the number of seconds in a day. pub fn seconds_in_day() -> u64 { 24 * 60 * 60 } /// Returns the number of seconds in an hour. pub fn seconds_in_hour() -> u64 { 60 * 60 } /// Returns the number of seconds in a minute. pub fn seconds_in_minute() -> u64 { 60 } /// Conceptually this is a thin wrapper for `std::time::SystemTime`, but provides /// more useful functions. The impl of this struct has functions that allow easily /// extracting the year/month/date/etc. for the given point in time. In actual fact /// the internal representation of this struct is a `Duration` since the unix epoch, /// so that error-handling is only required once upon creating the instance, and /// not for each attempt at extracting date/time fields. pub struct PostEpochTime { delta: Duration, } impl PostEpochTime { /// Create a `PostEpochTime` from a `SystemTime`. The `SystemTime` must be temporally /// in the future relative to the unix epoch, or an error will be returned. pub fn from(st: &SystemTime) -> Result<Self, SystemTimeError> { Ok(PostEpochTime { delta: st.duration_since(SystemTime::UNIX_EPOCH)?, }) } /// Create a `PostEpochTime` for the current instant. The current instant must be /// in the future relative to the unix epoch, or an error will be returned. pub fn now() -> Result<Self, SystemTimeError>
/// Returns the number of milliseconds passed since the unix epoch. pub fn milliseconds_since_epoch(&self) -> u128 { self.delta.as_millis() } /// Returns the number of microseconds passed since the unix epoch. pub fn microseconds_since_epoch(&self) -> u128 { self.delta.as_micros() } /// Returns the number of nanoseconds passed since the unix epoch. pub fn nanoseconds_since_epoch(&self) -> u128 { self.delta.as_nanos() } /// Returns the number of complete seconds passed since the unix epoch. pub fn seconds_since_epoch(&self) -> u64 { self.delta.as_secs() } /// Returns the number of complete days passed since the unix epoch. pub fn days_since_epoch(&self) -> u64 { self.delta.as_secs() / seconds_in_day() } /// Returns the day of the week that this point in time falls on. pub fn day_of_week(&self) -> Day { match self.days_since_epoch() % 7 { 0 => Day::Thursday, 1 => Day::Friday, 2 => Day::Saturday, 3 => Day::Sunday, 4 => Day::Monday, 5 => Day::Tuesday, 6 => Day::Wednesday, _ => panic!("Modulo operator is broken"), } } fn year_split(&self) -> (u64, u64) { let mut days = self.days_since_epoch(); let mut year = 1970; loop { let in_year = days_in_year(year); if days < in_year { break; } days -= in_year; year += 1; } (year, days) } /// Returns the year (e.g. 2020) this point in time falls on. pub fn year(&self) -> u64 { self.year_split().0 } /// Returns the day of the year for this point in time (1-indexed). /// A return value of 1 indicates January 1, a value of 2 indicates January 2, /// and so on. If the year is a leap year the largest returned value /// would be 366, and for non-leap years it would be 365. pub fn day_of_year(&self) -> u64 { self.year_split().1 + 1 } fn month_split(&self) -> (Month, u64) { let (year, mut days) = self.year_split(); let mut month = Month::January; loop { let in_month = days_in_month(year, month); if days < in_month { break; } days -= in_month; month = month_from_index(index_from_month(month) + 1).expect("Month should never overflow"); } (month, days) } /// Returns the month this point in time falls on. pub fn month(&self) -> Month { self.month_split().0 } /// Returns the day of the month for this point in time (1-indexed). /// A return value of 1 means it falls on the first of the month. The maximum /// returned value will be 31. pub fn day_of_month(&self) -> u64 { self.month_split().1 + 1 }
{ Self::from(&SystemTime::now()) }
identifier_body
lib.rs
Day { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", day_string(*self)) } } /// Enum with the months of the year. #[derive(Debug, Clone, Copy)] pub enum Month { January, February, March, April, May, June, July, August, September, October, November, December, } /// Maps the `Month` enum to a string representation, e.g. "January". pub fn month_string(month: Month) -> &'static str { match month { Month::January => "January", Month::February => "February", Month::March => "March", Month::April => "April", Month::May => "May", Month::June => "June", Month::July => "July", Month::August => "August", Month::September => "September", Month::October => "October", Month::November => "November", Month::December => "December", } } /// Maps the `Month` enum to a shortened string representation, e.g. "Jan". pub fn
(month: Month) -> &'static str { &month_string(month)[0..3] } impl fmt::Display for Month { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", month_string(*self)) } } /// Takes in a year (e.g. 2019) and returns the number of days in that year. pub fn days_in_year(year: u64) -> u64 { if year % 400 == 0 { 366 } else if year % 100 == 0 { 365 } else if year % 4 == 0 { 366 } else { 365 } } /// Takes in a year and month (e.g. 2020, February) and returns the number of days in that month. pub fn days_in_month(year: u64, month: Month) -> u64 { match month { Month::January => 31, Month::February if days_in_year(year) == 366 => 29, Month::February => 28, Month::March => 31, Month::April => 30, Month::May => 31, Month::June => 30, Month::July => 31, Month::August => 31, Month::September => 30, Month::October => 31, Month::November => 30, Month::December => 31, } } /// Converts a `Month` enum to an integer in the range 1-12. pub fn index_from_month(month: Month) -> u64 { match month { Month::January => 1, Month::February => 2, Month::March => 3, Month::April => 4, Month::May => 5, Month::June => 6, Month::July => 7, Month::August => 8, Month::September => 9, Month::October => 10, Month::November => 11, Month::December => 12, } } /// Converts an integer in the range 1-12 into the corresponding `Month` enum. /// Values outside the 1-12 range are converted to `None`. pub fn month_from_index(index: u64) -> Option<Month> { match index { 1 => Some(Month::January), 2 => Some(Month::February), 3 => Some(Month::March), 4 => Some(Month::April), 5 => Some(Month::May), 6 => Some(Month::June), 7 => Some(Month::July), 8 => Some(Month::August), 9 => Some(Month::September), 10 => Some(Month::October), 11 => Some(Month::November), 12 => Some(Month::December), _ => None, } } /// Returns the number of seconds in a day. pub fn seconds_in_day() -> u64 { 24 * 60 * 60 } /// Returns the number of seconds in an hour. pub fn seconds_in_hour() -> u64 { 60 * 60 } /// Returns the number of seconds in a minute. pub fn seconds_in_minute() -> u64 { 60 } /// Conceptually this is a thin wrapper for `std::time::SystemTime`, but provides /// more useful functions. The impl of this struct has functions that allow easily /// extracting the year/month/date/etc. for the given point in time. In actual fact /// the internal representation of this struct is a `Duration` since the unix epoch, /// so that error-handling is only required once upon creating the instance, and /// not for each attempt at extracting date/time fields. pub struct PostEpochTime { delta: Duration, } impl PostEpochTime { /// Create a `PostEpochTime` from a `SystemTime`. The `SystemTime` must be temporally /// in the future relative to the unix epoch, or an error will be returned. pub fn from(st: &SystemTime) -> Result<Self, SystemTimeError> { Ok(PostEpochTime { delta: st.duration_since(SystemTime::UNIX_EPOCH)?, }) } /// Create a `PostEpochTime` for the current instant. The current instant must be /// in the future relative to the unix epoch, or an error will be returned. pub fn now() -> Result<Self, SystemTimeError> { Self::from(&SystemTime::now()) } /// Returns the number of milliseconds passed since the unix epoch. pub fn milliseconds_since_epoch(&self) -> u128 { self.delta.as_millis() } /// Returns the number of microseconds passed since the unix epoch. pub fn microseconds_since_epoch(&self) -> u128 { self.delta.as_micros() } /// Returns the number of nanoseconds passed since the unix epoch. pub fn nanoseconds_since_epoch(&self) -> u128 { self.delta.as_nanos() } /// Returns the number of complete seconds passed since the unix epoch. pub fn seconds_since_epoch(&self) -> u64 { self.delta.as_secs() } /// Returns the number of complete days passed since the unix epoch. pub fn days_since_epoch(&self) -> u64 { self.delta.as_secs() / seconds_in_day() } /// Returns the day of the week that this point in time falls on. pub fn day_of_week(&self) -> Day { match self.days_since_epoch() % 7 { 0 => Day::Thursday, 1 => Day::Friday, 2 => Day::Saturday, 3 => Day::Sunday, 4 => Day::Monday, 5 => Day::Tuesday, 6 => Day::Wednesday, _ => panic!("Modulo operator is broken"), } } fn year_split(&self) -> (u64, u64) { let mut days = self.days_since_epoch(); let mut year = 1970; loop { let in_year = days_in_year(year); if days < in_year { break; } days -= in_year; year += 1; } (year, days) } /// Returns the year (e.g. 2020) this point in time falls on. pub fn year(&self) -> u64 { self.year_split().0 } /// Returns the day of the year for this point in time (1-indexed). /// A return value of 1 indicates January 1, a value of 2 indicates January 2, /// and so on. If the year is a leap year the largest returned value /// would be 366, and for non-leap years it would be 365. pub fn day_of_year(&self) -> u64 { self.year_split().1 + 1 } fn month_split(&self) -> (Month, u64) { let (year, mut days) = self.year_split(); let mut month = Month::January; loop { let in_month = days_in_month(year, month); if days < in_month { break; } days -= in_month; month = month_from_index(index_from_month(month) + 1).expect("Month should never overflow"); } (month, days) } /// Returns the month this point in time falls on. pub fn month(&self) -> Month { self.month_split().0 } /// Returns the day of the month for this point in time (1-indexed). /// A return value of 1 means it falls on the first of the month. The maximum /// returned value will be 31. pub fn day_of_month(&self) -> u64 { self.month_split().1 + 1 }
month_abbrev_string
identifier_name
Main.py
.minH ) ) ) face_num= None #初始化人脸序号 for (x, y, w, h) in faces: cv2.rectangle( self.image , (x, y), (x + w, y + h), (0, 0 , 255), 2) id, confidence = self.recognizer.predict(gray[y:y + h, x:x + w]) if confidence < 100 : #50%的识别置信度 result= self.names[id] confidencestr = "{0}%".format(round(100 - confidence)) # go_api( round(100 - confidence) , int( idnum ) , tag , names) else: confidencestr = "{0}%".format(round(100 - confidence)) cv2.putText( self.image, result , (x + 5, y - 5), self.font, 1, (0, 0, 255), 2 ) cv2.putText( self.image, confidencestr , (x + 5, y + h - 5), self.font, 1, (0, 0, 0), 1) #网络识别的工具函数 #对图片的格式进行转换 def transimage( self , image_name ): f = open( image_name ,'rb') img = base64.b64encode(f.read()) return img def logging( self , name ): curren_time = time.asctime(time.localtime(time.time())) f = open('Log.txt','a+') f.write("Person: " + name + " " + "Time:" + str(curren_time)+'\n') f.close() #上传到百度api进行人脸检测 def go_api( self , image): result = client.search(str(image, 'utf-8'), IMAGE_TYPE, GROUP) if result['error_msg'] == 'SUCCESS': name = result['result']['user_list'][0]['user_id'] score = result['result']['user_list'][0]['score'] if score > 80: print("Welcome %s !" % name) self.logging( name ) # recong_result=QMessageBox.information( self ,\ # "登录消息" , "识别成功,是否进入相应功能区" , QMessageBox.Ok |QMessageBox.Cancel ) # if recong_result == QMessageBox.Ok : # pass # else: # self.close() # self.closeCamera() else: print("Sorry...I don't know you !") name = 'Unknow' return name,score if result['error_msg'] == 'pic not has face': print('There is no face in image!') return "NO FACE", None else: print(result['error_code']+' ' + result['error_code']) return "ERROR" , None def recognize_face_intnet( self ): font = cv2.FONT_HERSHEY_SIMPLEX flag,self.image = self.cap.read() #从视频流中读取 self.image = cv2.resize(self.image, (480,320) ) #把读到的帧的大小重新设置为 480*320 gray = cv2.cvtColor( self.image , cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray, 1.3, 5) if self.OnceBaiduAPI_flag == False : self.OnceBaiduAPI_flag = True cv2.imwrite("youtemp.png", self.image ) self.name , self.score = self.go_api( self.transimage( image_name ) ) for (x,y,w,h) in faces: cv2.rectangle(self.image,(x,y),(x+w,y+h), (0, 0 , 255), 2) # roi_gray = gray[y:y+h, x:x+w] roi_color = self.image[y:y+h, x:x+w] cv2.putText(self.image, self.name , (x+5,y-5), font, 1, (255,255,255), 2 ) cv2.putText(self.image, str( self.score ), (x+5,y+h-5), font, 1, (255,255,0), 1 ) def closeCamera(self): self.timer_camera.stop() self.cap.release() self.OnceBaiduAPI_flag = False self.cameraLabel.clear() self.cameraButton.setText('打开摄像头') # self.my_thread.terminate() #这里作为功能启动区,代表着识别成功后就启动该功能 def Function_run( self ): pass #这个类主要是管理注册逻辑,这里为什么要用QDialog呢,当然也可以用Qwidget,这俩都是毛坯房,但是 #QDialog有exec方法,Qwidget是没有的。exec_()方法可以让窗口成为模态窗口,而调用show()方法, #窗口是非模态的。模态窗口将程序控制权占据,只有对当前窗口关闭后才能操作其他窗口; class Signin_Dialog( QDialog ): def __init__( self ): super().__init__() #控件的初始化 self.setWindowTitle('注册系统') self.resize( 300 , 250 ) self.user_name_label = QLabel( "user_namer:" ,self ) self.user_line_dialog = QLineEdit( self ) self.passwd_label = QLabel( 'password:' , self ) self.passwd_line_dialog = QLineEdit( self ) self.passwd_re_label = QLabel( 're_password' , self ) self.passwd_re_line = QLineEdit( self ) self.sure_signin_botton = QPushButton( '确认' , self ) self.cancel_button = QPushButton( '取消', self ) #将布局类实例化 self.v_layout = QVBoxLayout() self.h_layout = QHBoxLayout() self.grid_layout = QGridLayout() #将布局初始化 self.__layput_init() #将确认按钮初始化 self.sure_siginin_botton_init() self.line_init() self.sure_botton_init() def __layput_init( self ): self.grid_layout.addWidget( self.user_name_label , 0 , 0 ) self.grid_layout.addWidget( self.user_line_dialog , 0 , 1 ) self.grid_layout.addWidget( self.passwd_label , 1 , 0 ) self.grid_layout.addWidget( self.passwd_line_dialog , 1 , 1 ) self.grid_layout.addWidget( self.passwd_re_label , 2 , 0 ) self.grid_layout.addWidget( self.passwd_re_line , 2 , 1 ) self.h_layout.addWidget( self.sure_signin_botton ) self.h_layout.addWidget( self.cancel_button ) self.v_layout.addLayout( self.grid_layout ) self.v_layout.addLayout( self.h_layout ) self.setLayout( self.v_layout ) def line_init( self ): self.user_line_dialog.setPlaceholderText( "请输入你的用户账号" ) self.passwd_line_dialog.setPlaceholderText( "请输入你的密码") self.passwd_re_line.setPlaceholderText( '请再次输入你的密码' ) self.passwd_line_dialog.setEchoMode( QLineEdit.Password ) self.passwd_re_line.setEchoMode( QLineEdit.Password ) self.user_line_dialog.textChanged.connect( self.check_input ) self.passwd_line_dialog.textChanged.connect( self.check_input ) self.passwd_re_line.textChanged.connect( self.check_input ) def check_input( self ): if self.user_line_dialog.text() and self.passwd_line_dialog.text() and self.passwd_re_line.text(): self.sure_signin_botton.setEnabled( True ) else: self.sure_signin_botton.setEnabled( False ) def sure_siginin_botton_init( self ): self.sure_signin_botton.setEnabled( False ) #确认按钮与数据库的关联初始化 def sure_botton_init( self ): self.sure_signin_botton.clicked.connect( self.check_data ) # def clearText( text_path ): # with open(text_path, 'w') as f1: # f1.seek(0) # f1.truncate() # # print("清空数据") #如果按钮按下 def check_data( self ): #--------判断用户是否存在-------------- f_all = open( user_path , 'r+') read_dict = eval( f_all.read() ) f_all.close() if self.passwd_line_dialog.text( ) != self.passwd_re_line.text( ) : QMessageBox.critical( self , '注册消息' ,'两次密码输入不一致' , QMessageBox.Ok | QMessageBox.Cancel ) elif self.user_line_dialog.text() not in read_dict : read_dict[self.user_line_dialog.text()] = self.passwd_line_dialog.text() # self.clearText( user_path ) with open(user_path, 'w') as f1: f1.write( str( read_dict ) ) QMessageBox.information( self , '注册消息' , '注册成功' , QMessageBox.Ok ) self.close() else: QMessageBox.critical( self , '注册消息', '注册失败,操作有误' , QMessageBox.Ok ) self.user_line_dialog.clear() self.passwd_line_dialog.clear() self.passwd_re_line.clear()
random_line_split
Main.py
recognize_face_intnet( self ): font = cv2.FONT_HERSHEY_SIMPLEX flag,self.image = self.cap.read() #从视频流中读取 self.image = cv2.resize(self.image, (480,320) ) #把读到的帧的大小重新设置为 480*320 gray = cv2.cvtColor( self.image , cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray, 1.3, 5) if self.OnceBaiduAPI_flag == False : self.OnceBaiduAPI_flag = True cv2.imwrite("youtemp.png", self.image ) self.name , self.score = self.go_api( self.transimage( image_name ) ) for (x,y,w,h) in faces: cv2.rectangle(self.image,(x,y),(x+w,y+h), (0, 0 , 255), 2) # roi_gray = gray[y:y+h, x:x+w] roi_color = self.image[y:y+h, x:x+w] cv2.putText(self.image, self.name , (x+5,y-5), font, 1, (255,255,255), 2 ) cv2.putText(self.image, str( self.score ), (x+5,y+h-5), font, 1, (255,255,0), 1 ) def closeCamera(self): self.timer_camera.stop() self.cap.release() self.OnceBaiduAPI_flag = False self.cameraLabel.clear() self.cameraButton.setText('打开摄像头') # self.my_thread.terminate() #这里作为功能启动区,代表着识别成功后就启动该功能 def Function_run( self ): pass #这个类主要是管理注册逻辑,这里为什么要用QDialog呢,当然也可以用Qwidget,这俩都是毛坯房,但是 #QDialog有exec方法,Qwidget是没有的。exec_()方法可以让窗口成为模态窗口,而调用show()方法, #窗口是非模态的。模态窗口将程序控制权占据,只有对当前窗口关闭后才能操作其他窗口; class Signin_Dialog( QDialog ): def __init__( self ): super().__init__() #控件的初始化 self.setWindowTitle('注册系统') self.resize( 300 , 250 ) self.user_name_label = QLabel( "user_namer:" ,self ) self.user_line_dialog = QLineEdit( self ) self.passwd_label = QLabel( 'password:' , self ) self.passwd_line_dialog = QLineEdit( self ) self.passwd_re_label = QLabel( 're_password' , self ) self.passwd_re_line = QLineEdit( self ) self.sure_signin_botton = QPushButton( '确认' , self ) self.cancel_button = QPushButton( '取消', self ) #将布局类实例化 self.v_layout = QVBoxLayout() self.h_layout = QHBoxLayout() self.grid_layout = QGridLayout() #将布局初始化 self.__layput_init() #将确认按钮初始化 self.sure_siginin_botton_init() self.line_init() self.sure_botton_init() def __layput_init( self ): self.grid_layout.addWidget( self.user_name_label , 0 , 0 ) self.grid_layout.addWidget( self.user_line_dialog , 0 , 1 ) self.grid_layout.addWidget( self.passwd_label , 1 , 0 ) self.grid_layout.addWidget( self.passwd_line_dialog , 1 , 1 ) self.grid_layout.addWidget( self.passwd_re_label , 2 , 0 ) self.grid_layout.addWidget( self.passwd_re_line , 2 , 1 ) self.h_layout.addWidget( self.sure_signin_botton ) self.h_layout.addWidget( self.cancel_button ) self.v_layout.addLayout( self.grid_layout ) self.v_layout.addLayout( self.h_layout ) self.setLayout( self.v_layout ) def line_init( self ): self.user_line_dialog.setPlaceholderText( "请输入你的用户账号" ) self.passwd_line_dialog.setPlaceholderText( "请输入你的密码") self.passwd_re_line.setPlaceholderText( '请再次输入你的密码' ) self.passwd_line_dialog.setEchoMode( QLineEdit.Password ) self.passwd_re_line.setEchoMode( QLineEdit.Password ) self.user_line_dialog.textChanged.connect( self.check_input ) self.passwd_line_dialog.textChanged.connect( self.check_input ) self.passwd_re_line.textChanged.connect( self.check_input ) def check_input( self ): if self.user_line_dialog.text() and self.passwd_line_dialog.text() and self.passwd_re_line.text(): self.sure_signin_botton.setEnabled( True ) else: self.sure_signin_botton.setEnabled( False ) def sure_siginin_botton_init( self ): self.sure_signin_botton.setEnabled( False ) #确认按钮与数据库的关联初始化 def sure_botton_init( self ): self.sure_signin_botton.clicked.connect( self.check_data ) # def clearText( text_path ): # with open(text_path, 'w') as f1: # f1.seek(0) # f1.truncate() # # print("清空数据") #如果按钮按下 def check_data( self ): #--------判断用户是否存在-------------- f_all = open( user_path , 'r+') read_dict = eval( f_all.read() ) f_all.close() if self.passwd_line_dialog.text( ) != self.passwd_re_line.text( ) : QMessageBox.critical( self , '注册消息' ,'两次密码输入不一致' , QMessageBox.Ok | QMessageBox.Cancel ) elif self.user_line_dialog.text() not in read_dict : read_dict[self.user_line_dialog.text()] = self.passwd_line_dialog.text() # self.clearText( user_path ) with open(user_path, 'w') as f1: f1.write( str( read_dict ) ) QMessageBox.information( self , '注册消息' , '注册成功' , QMessageBox.Ok ) self.close() else: QMessageBox.critical( self , '注册消息', '注册失败,操作有误' , QMessageBox.Ok ) self.user_line_dialog.clear() self.passwd_line_dialog.clear() self.passwd_re_line.clear() class CollectPicture_Page( QDialog ): mysignal = pyqtSignal( ) def __init__( self ): super().__init__() self.setWindowTitle('人脸数据集收集和训练') self.resize( 1000 ,500 ) self.IsHome_button = QRadioButton( "本地收集" , self) self.IsInternet_button = QRadioButton( "网络收集" ,self ) self.collect_start_button = QPushButton("开始收集", self ) self.train_run_button = QPushButton( "开始训练" ,self ) self.return_button = QPushButton( "取消" , self ) self.cameraLabel = QLabel( 'camera' ,self ) self.cameraLabel.resize( 480,320 ) self.cameraLabel.setAlignment( Qt.AlignCenter ) self.h_col_style_layout = QHBoxLayout() self.v_col_styly_layout = QVBoxLayout() self.h_col_layout = QHBoxLayout() self.v_layout = QVBoxLayout() self.cap = cv2.VideoCapture( ) self.collect_time = QTimer() self.layout_init() self.button_init() self.slot_init() def layout_init( self ): self.h_col_style_layout.addWidget( self.IsHome_button ) self.h_col_style_layout.addWidget( self.IsInternet_button ) self.h_col_style_layout.addStretch(1) self.h_col_layout.addWidget( self.collect_start_button ) self.h_col_layout.addWidget( self.train_run_button ) self.h_col_layout.addWidget( self.return_button ) self.v_layout.addWidget( self.cameraLabel ) self.v_layout.addLayout( self.h_col_style_layout ) self.v_layout.addLayout( self.h_col_layout ) self.setLayout( self.v_layout ) def button_init( self ): self.return_button.clicked.connect( self.cancel_task ) self.collect_start_button.clicked.connect( self.openCamera ) self.train_run_button.clicked.connect( self.Training_faces ) self.IsHome_button.setChecked( True ) def slot_init( self ): self.collect_time.timeout.connect( self.show_camera ) self.mysignal.connect( self.collect_signal_run ) def camera_init( self ): self.unregisterFlag = False self.face_detector = cv2.CascadeClassifier( path ) self.count = 0 fl = open( user_path ,
'r+') real_dict = eval( fl.read() ) names = list( real_dict.keys() ) fl.close() self.collect_name , ok = QInputDialog.getText( self , '请输入你的名字' ,'必须是已经注册的名字!' ) if self.collect_name in names: self.face_id = names.index( self.collect_name ) + 1 #face_id = input('\n enter user id:') #输入序号,表示某人的一些列照片 print('\n Initializing face capture. Look at the camera and wait ...') else: QMessageBox.warning( self ,'异常状态' , '请去注册' , QMessageBox.Ok ) self.unregisterFlag = True def cancel_task( self ): self.collect_time.stop()
identifier_body
Main.py
self.SigninPage_exe ) self.face_recongition_button.clicked.connect( self.Face_start_exe ) self.collect_buttton.clicked.connect( self.Collect_page_exe ) def SigninPage_exe( self ): self.sginin_page.exec( )#启动注册页面 # 人脸识别页面 def Face_start_exe( self ): self.face_page.exec( ) # 人脸收集页面启动 def Collect_page_exe( self ): self.colleet_page.exec( ) def check_login_info( self ): f_all = open( user_path , 'r+') read_dict = eval( f_all.read( ) ) f_all.close() if self.user_line.text() not in read_dict: QMessageBox.critical( self , '登录消息' , "登录失败,请填写正确的用户名" , QMessageBox.Ok ) elif read_dict[ self.user_line.text() ] == self.passwd_line.text(): QMessageBox.information( self , '登录消息' , "登录成功" , QMessageBox.Ok ) else: QMessageBox.critical( self , '登录消息' , "登录失败,请填写正确的密码" , QMessageBox.Ok ) def login_input_init( self ): self.login_button.setEnabled( False ) # def facepass( self ): # get_name=self.recognize_face()#返回识别的人名 # if get_name=="unknown": # reply = QMessageBox.information(self, '提示', '人脸识别失败', QMessageBox.Close) # else: # reply = QMessageBox.information(self, '提示', "欢迎您:"+get_name, QMessageBox.Ok) # print("编写其他程序") # #多线程进行网络监听 # class My_theard( QThread ): # my_signal = pyqtSignal( int ) # def __init__( self ): # super().__init__() # def run( self ): # while True: # exit_code = os.system('ping www.baidu.com') # if exit_code: # print("----------没网,启动本地识别-------------------") # else: # print("----------有网,启动网络识别-------------------") # self.my_signal.emit( exit_code ) # self.sleep( 10 ) faceCascade = cv2.CascadeClassifier( path ) class Face_start( QDialog ): def __init__( self ): super().__init__() self.setWindowTitle( '人脸识别' ) self.resize( 1000 ,500 ) self.cameraLabel = QLabel( 'camera', self ) self.cameraLabel.resize(480 ,320 ) self.cameraLabel.setAlignment( Qt.AlignCenter ) self.timer_camera = QTimer() self.cap = cv2.VideoCapture() #初始化摄像头 self.recognizer = cv2.face.LBPHFaceRecognizer_create() self.recognizer.read( trainer_path ) #识别时间10秒;如果置信度大于60%,则识别成功并退出界面;否则至10秒后识别失败并退出 self.font = cv2.FONT_HERSHEY_SIMPLEX #初始化多线程,进行网络监听 # self.my_thread = My_theard( ) # self.my_thread.start() fl = open( user_path , 'r+') read_dict = eval( fl.read() ) self.names = list( read_dict.keys( ) ) fl.close() # tag = [] # for i in range( len( names ) ) : # tag.append( eval("False") ) self.minW = 0.1 * self.cap.get(3) self.minH = 0.1 * self.cap.get(4) #网络识别一次的初始化标志位 self.OnceBaiduAPI_flag = False self.layout_main = QVBoxLayout() self.layout_fun_button = QHBoxLayout() self.layout_data_show = QHBoxLayout() self.cameraButton = QPushButton(u'打开相机') # self.button_close.setMinimumHeight(50) self.layout_init() self.slot_init() def layout_init( self ): self.layout_data_show.addWidget( self.cameraLabel ) self.layout_fun_button.addWidget( self.cameraButton ) # self.layout_fun_button.addStretch(1) self.layout_main.addLayout( self.layout_data_show ) self.layout_main.addLayout( self.layout_fun_button ) self.setLayout( self.layout_main ) def slot_init(self): self.timer_camera.timeout.connect(self.show_camera) #信号和槽连接 # self.returnButton.clicked.connect(self.returnSignal) self.cameraButton.clicked.connect(self.slotCameraButton) # self.cameraButton.clicked.connect( self.recognize_face ) # self.my_thread.my_signal.connect( self.get_Intnet_code ) def get_Intnet_code( self , exitcode ): self.exit_code = exitcode #打开关闭摄像头控制 def slotCameraButton(self): if self.timer_camera.isActive() == False: #打开摄像头并显示图像信息 self.openCamera() else: #关闭摄像头并清空显示信息 self.closeCamera() def show_camera(self): if exit_code :#没网就用OpenCV self.recognize_face() else: #有网的用百度api self.recognize_face_intnet() self.image = cv2.cvtCol
ge,cv2.COLOR_BGR2RGB ) #视频色彩转换回RGB,这样才是现实的颜色 #pyqt显示逻辑 showImage = QImage( self.image.data, self.image.shape[1] , self.image.shape[0], QImage.Format_RGB888 ) self.cameraLabel.setPixmap(QPixmap.fromImage(showImage)) #打开摄像头 def openCamera(self): flag = self.cap.open( cap_id ) if flag == False: msg = QMessageBox.Warning(self, u'Warning', u'请检测相机与电脑是否连接正确',\ buttons=QMessageBox.Ok, defaultButton=QMessageBox.Ok) else: self.timer_camera.start(30) self.cameraButton.setText('关闭摄像头') def face_recongnition_start( self ): faces = faceCascade.detectMultiScale( self.gray, scaleFactor=1.2, minNeighbors=5, minSize=(20, 20) ) for (x,y,w,h) in faces: cv2.rectangle(self.gray, (x, y), (x + w, y + w), (255,0,0),2 ) roi_gray = self.gray [y:y+h, x:x+w] roi_color = self.image [y:y+h, x:x+w] def recognize_face( self ): flag,self.image = self.cap.read() #从视频流中读取 self.image = cv2.resize(self.image,(480,320)) #把读到的帧的大小重新设置为 640x480 result = "unknown" #初始化识别失败 gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.2, minNeighbors=5, minSize=(int( self.minW), int( self.minH ) ) ) face_num= None #初始化人脸序号 for (x, y, w, h) in faces: cv2.rectangle( self.image , (x, y), (x + w, y + h), (0, 0 , 255), 2) id, confidence = self.recognizer.predict(gray[y:y + h, x:x + w]) if confidence < 100 : #50%的识别置信度 result= self.names[id] confidencestr = "{0}%".format(round(100 - confidence)) # go_api( round(100 - confidence) , int( idnum ) , tag , names) else: confidencestr = "{0}%".format(round(100 - confidence)) cv2.putText( self.image, result , (x + 5, y - 5), self.font, 1, (0, 0, 255), 2 ) cv2.putText( self.image, confidencestr , (x + 5, y + h - 5), self.font, 1, (0, 0, 0), 1) #网络识别的工具函数 #对图片的格式进行转换 def transimage( self , image_name ): f = open( image_name ,'rb') img = base64.b64encode(f.read()) return img def logging( self , name ): curren_time = time.asctime(time.localtime(time.time())) f = open('Log.txt','a+') f.write("Person: " + name + " " + "Time:" + str(curren_time)+'\n') f.close() #上传到百度api进行人脸检测 def go_api( self , image): result = client.search(str(image, 'utf-8'), IMAGE_TYPE, GROUP) if result['error_msg'] == 'SUCCESS': name = result['result']['user_list'][0]['user_id'] score = result['result']['user_list'][0]['score'] if score > 80: print("Welcome %s !" % name) self.logging( name ) # recong_result=
or(self.ima
identifier_name
Main.py
.resize( 1000 ,500 ) self.cameraLabel = QLabel( 'camera', self ) self.cameraLabel.resize(480 ,320 ) self.cameraLabel.setAlignment( Qt.AlignCenter ) self.timer_camera = QTimer() self.cap = cv2.VideoCapture() #初始化摄像头 self.recognizer = cv2.face.LBPHFaceRecognizer_create() self.recognizer.read( trainer_path ) #识别时间10秒;如果置信度大于60%,则识别成功并退出界面;否则至10秒后识别失败并退出 self.font = cv2.FONT_HERSHEY_SIMPLEX #初始化多线程,进行网络监听 # self.my_thread = My_theard( ) # self.my_thread.start() fl = open( user_path , 'r+') read_dict = eval( fl.read() ) self.names = list( read_dict.keys( ) ) fl.close() # tag = [] # for i in range( len( names ) ) : # tag.append( eval("False") ) self.minW = 0.1 * self.cap.get(3) self.minH = 0.1 * self.cap.get(4) #网络识别一次的初始化标志位 self.OnceBaiduAPI_flag = False self.layout_main = QVBoxLayout() self.layout_fun_button = QHBoxLayout() self.layout_data_show = QHBoxLayout() self.cameraButton = QPushButton(u'打开相机') # self.button_close.setMinimumHeight(50) self.layout_init() self.slot_init() def layout_init( self ): self.layout_data_show.addWidget( self.cameraLabel ) self.layout_fun_button.addWidget( self.cameraButton ) # self.layout_fun_button.addStretch(1) self.layout_main.addLayout( self.layout_data_show ) self.layout_main.addLayout( self.layout_fun_button ) self.setLayout( self.layout_main ) def slot_init(self): self.timer_camera.timeout.connect(self.show_camera) #信号和槽连接 # self.returnButton.clicked.connect(self.returnSignal) self.cameraButton.clicked.connect(self.slotCameraButton) # self.cameraButton.clicked.connect( self.recognize_face ) # self.my_thread.my_signal.connect( self.get_Intnet_code ) def get_Intnet_code( self , exitcode ): self.exit_code = exitcode #打开关闭摄像头控制 def slotCameraButton(self): if self.timer_camera.isActive() == False: #打开摄像头并显示图像信息 self.openCamera() else: #关闭摄像头并清空显示信息 self.closeCamera() def show_camera(self): if exit_code :#没网就用OpenCV self.recognize_face() else: #有网的用百度api self.recognize_face_intnet() self.image = cv2.cvtColor(self.image,cv2.COLOR_BGR2RGB ) #视频色彩转换回RGB,这样才是现实的颜色 #pyqt显示逻辑 showImage = QImage( self.image.data, self.image.shape[1] , self.image.shape[0], QImage.Format_RGB888 ) self.cameraLabel.setPixmap(QPixmap.fromImage(showImage)) #打开摄像头 def openCamera(self): flag = self.cap.open( cap_id ) if flag == False: msg = QMessageBox.Warning(self, u'Warning', u'请检测相机与电脑是否连接正确',\ buttons=QMessageBox.Ok, defaultButton=QMessageBox.Ok) else: self.timer_camera.start(30) self.cameraButton.setText('关闭摄像头') def face_recongnition_start( self ): faces = faceCascade.detectMultiScale( self.gray, scaleFactor=1.2, minNeighbors=5, minSize=(20, 20) ) for (x,y,w,h) in faces: cv2.rectangle(self.gray, (x, y), (x + w, y + w), (255,0,0),2 ) roi_gray = self.gray [y:y+h, x:x+w] roi_color = self.image [y:y+h, x:x+w] def recognize_face( self ): flag,self.image = self.cap.read() #从视频流中读取 self.image = cv2.resize(self.image,(480,320)) #把读到的帧的大小重新设置为 640x480 result = "unknown" #初始化识别失败 gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.2, minNeighbors=5, minSize=(int( self.minW), int( self.minH ) ) ) face_num= None #初始化人脸序号 for (x, y, w, h) in faces: cv2.rectangle( self.image , (x, y), (x + w, y + h), (0, 0 , 255), 2) id, confidence = self.recognizer.predict(gray[y:y + h, x:x + w]) if confidence < 100 : #50%的识别置信度 result= self.names[id] confidencestr = "{0}%".format(round(100 - confidence)) # go_api( round(100 - confidence) , int( idnum ) , tag , names) else: confidencestr = "{0}%".format(round(100 - confidence)) cv2.putText( self.image, result , (x + 5, y - 5), self.font, 1, (0, 0, 255), 2 ) cv2.putText( self.image, confidencestr , (x + 5, y + h - 5), self.font, 1, (0, 0, 0), 1) #网络识别的工具函数 #对图片的格式进行转换 def transimage( self , image_name ): f = open( image_name ,'rb') img = base64.b64encode(f.read()) return img def logging( self , name ): curren_time = time.asctime(time.localtime(time.time())) f = open('Log.txt','a+') f.write("Person: " + name + " " + "Time:" + str(curren_time)+'\n') f.close() #上传到百度api进行人脸检测 def go_api( self , image): result = client.search(str(image, 'utf-8'), IMAGE_TYPE, GROUP) if result['error_msg'] == 'SUCCESS': name = result['result']['user_list'][0]['user_id'] score = result['result']['user_list'][0]['score'] if score > 80: print("Welcome %s !" % name) self.logging( name ) # recong_result=QMessageBox.information( self ,\ # "登录消息" , "识别成功,是否进入相应功能区" , QMessageBox.Ok |QMessageBox.Cancel ) # if recong_result == QMessageBox.Ok : # pass # else: # self.close() # self.closeCamera() else: print("Sorry...I don't know you !") name = 'Unknow' return name,score if result['error_msg'] == 'pic not has face': print('There is no face in image!') return "NO FACE", None else: print(result['error_code']+' ' + result['error_code']) return "ERROR" , None def recognize_face_intnet( self ): font = cv2.FONT_HERSHEY_SIMPLEX flag,self.image = self.cap.read() #从视频流中读取 self.image = cv2.resize(self.image, (480,320) ) #把读到的帧的大小重新设置为 480*320 gray = cv2.cvtColor( self.image , cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray, 1.3, 5) if self.OnceBaiduAPI_flag == False : self.OnceBaiduAPI_flag = True cv2.imwrite("youtemp.png", self.image ) self.name , self.score = self.go_api( self.transimage( image_name ) ) for (x,y,w,h) in faces: cv2.rectangle(self.image,(x,y),(x+w,y+h), (0, 0 , 255), 2) # roi_gray = gray[y:y+h, x:x+w] roi_color = self.image[y:y+h, x:x+w]
cv2.putText(self.image, self.name , (x+5,y-5), font, 1, (255,255,255), 2 ) cv2.putText(self.image, str( self.score ), (x+5,y+h-5), font, 1, (255,255,0), 1 ) def closeCamera(self): self.timer_camera.stop() self.cap.release() self.OnceBaiduAPI_flag = False self.cameraLabel.clear() self.cameraButton.setText('打开摄像头') # self.my_thread.terminate() #这里作为功能启动区,
conditional_block
SPT_AGN_emcee_sampler_MPI.py
# Phi*(z) = 10**(log(Phi*(z)) phi_star = 10 ** log_phi_star(redshift) * (cosmo.h / u.Mpc) ** 3 # QLF slopes alpha1 = -3.35 # alpha in Table 2 alpha2 = -0.37 # beta in Table 2 Phi = 0.4 * np.log(10) * L_L_star * phi_star * (L_L_star ** -alpha1 + L_L_star ** -alpha2) ** -1 return Phi def model_rate_opted(params, cluster_id, r_r500, j_mag, integral=False): """ Our generating model. Parameters ---------- params : tuple Tuple of (theta, eta, zeta, beta, rc, C) cluster_id : str SPT ID of our cluster in the catalog dictionary r_r500 : array-like A vector of radii of objects within the cluster normalized by the cluster's r500 j_mag : array-like A vector of J-band absolute magnitudes to be used in the luminosity function integral : bool, optional Flag indicating if the luminosity function factor of the model should be integrated. Defaults to `False`. Returns ------- model A surface density profile of objects as a function of radius and luminosity. """ if args.cluster_only: # Unpack our parameters theta, eta, zeta, beta, rc = params # Set background parameter to 0 C = 0 elif args.background_only: # Unpack our parameters C, = params # Set all other parameters to 0 theta, eta, zeta, beta, rc = [0.]*5 else: # Unpack our parameters theta, eta, zeta, beta, rc, C = params # Extract our data from the catalog dictionary z = catalog_dict[cluster_id]['redshift'] m = catalog_dict[cluster_id]['m500'] r500 = catalog_dict[cluster_id]['r500'] # Luminosity function number if integral: lum_funct_value = np.trapz(luminosity_function(j_mag, z), j_mag) else: lum_funct_value = luminosity_function(j_mag, z) if args.no_luminosity or args.poisson_only: LF = 1 else: LF = cosmo.angular_diameter_distance(z) ** 2 * r500 * lum_funct_value # Convert our background surface density from angular units into units of r500^-2 background = (C / u.arcmin ** 2) * cosmo.arcsec_per_kpc_proper(z).to(u.arcmin / u.Mpc) ** 2 * r500 ** 2 # Our amplitude is determined from the cluster data a = theta * (1 + z) ** eta * (m / (1e15 * u.Msun)) ** zeta * LF model = a * (1 + (r_r500 / rc) ** 2) ** (-1.5 * beta + 0.5) + background return model.value # Set our log-likelihood def lnlike(param): lnlike_list = [] for cluster_id in catalog_dict: # Get the good pixel fraction for this cluster gpf_all = catalog_dict[cluster_id]['gpf_rall'] # Get the radial positions of the AGN radial_r500_maxr = catalog_dict[cluster_id]['radial_r500_maxr'] # Get the completeness weights for the AGN completeness_weight_maxr = catalog_dict[cluster_id]['completeness_weight_maxr'] # Get the AGN sample degrees of membership if args.no_selection_membership or args.poisson_only: agn_membership = 1 else: agn_membership = catalog_dict[cluster_id]['agn_membership_maxr'] # Get the J-band absolute magnitudes j_band_abs_mag = catalog_dict[cluster_id]['j_abs_mag'] # Get the radial mesh for integration rall = catalog_dict[cluster_id]['rall'] # Get the luminosity mesh for integration jall = catalog_dict[cluster_id]['jall'] # Compute the completeness ratio for this cluster if args.no_completeness or args.poisson_only: completeness_ratio = 1. else: completeness_ratio = len(completeness_weight_maxr) / np.sum(completeness_weight_maxr) # Compute the model rate at the locations of the AGN. ni = model_rate_opted(param, cluster_id, radial_r500_maxr, j_band_abs_mag) # Compute the full model along the radial direction. # The completeness weight is set to `1` as the model in the integration is assumed to be complete. n_mesh = model_rate_opted(param, cluster_id, rall, jall, integral=True) # Use a spatial poisson point-process log-likelihood cluster_lnlike = (np.sum(np.log(ni * radial_r500_maxr) * agn_membership) - completeness_ratio * trap_weight(n_mesh * 2 * np.pi * rall, rall, weight=gpf_all)) lnlike_list.append(cluster_lnlike) total_lnlike = np.sum(lnlike_list) return total_lnlike # For our prior, we will choose uninformative priors for all our parameters and for the constant field value we will use # a gaussian distribution set by the values obtained from the SDWFS data set. def lnprior(params): # Set our hyperparameters # h_rc = 0.25 # h_rc_err = 0.1 h_C = 0.333 h_C_err = 0.024 # Extract our parameters if args.cluster_only: theta, eta, zeta, beta, rc = params C = 0. elif args.background_only: C, = params theta, eta, zeta, beta, rc = [0.]*5 else: theta, eta, zeta, beta, rc, C = params # Define all priors if (0.0 <= theta <= np.inf and -6. <= eta <= 6. and -3. <= zeta <= 3. and -3. <= beta <= 3. and 0.05 <= rc <= 0.5 and 0.0 <= C < np.inf): theta_lnprior = 0.0 eta_lnprior = 0.0 beta_lnprior = 0.0 zeta_lnprior = 0.0 # rc_lnprior = -0.5 * np.sum((rc - h_rc) ** 2 / h_rc_err ** 2) rc_lnprior = 0.0 if args.cluster_only: C_lnprior = 0. else: C_lnprior = -0.5 * np.sum((C - h_C) ** 2 / h_C_err ** 2) # C_lnprior = 0.0 else: theta_lnprior = -np.inf eta_lnprior = -np.inf beta_lnprior = -np.inf zeta_lnprior = -np.inf rc_lnprior = -np.inf C_lnprior = -np.inf # Assuming all parameters are independent the joint log-prior is total_lnprior = theta_lnprior + eta_lnprior + zeta_lnprior + beta_lnprior + rc_lnprior + C_lnprior return total_lnprior # Define the log-posterior probability def lnpost(params): lp = lnprior(params) # Check the finiteness of the prior. if not np.isfinite(lp): return -np.inf return lp + lnlike(params) hcc_prefix = '/work/mei/bfloyd/SPT_AGN/' # hcc_prefix = '' parser = ArgumentParser(description='Runs MCMC sampler') parser.add_argument('--restart', help='Allows restarting the chain in place rather than resetting the chain.', action='store_true') parser.add_argument('name', help='Chain name', type=str) parser.add_argument('--no-luminosity', action='store_true', help='Deactivate luminosity dependence in model.') parser.add_argument('--no-selection-membership', action='store_true', help='Deactivate fuzzy degree of membership for AGN selection
""" Assef+11 QLF using luminosity and density evolution. Parameters ---------- abs_mag : astropy table-like Rest-frame J-band absolute magnitude. redshift : astropy table-like Cluster redshift Returns ------- Phi : ndarray Luminosity density """ # L/L_*(z) = 10**(0.4 * (M_*(z) - M)) L_L_star = 10 ** (0.4 * (m_star(redshift) - abs_mag))
identifier_body
SPT_AGN_emcee_sampler_MPI.py
phi_star = 10 ** log_phi_star(redshift) * (cosmo.h / u.Mpc) ** 3 # QLF slopes alpha1 = -3.35 # alpha in Table 2 alpha2 = -0.37 # beta in Table 2 Phi = 0.4 * np.log(10) * L_L_star * phi_star * (L_L_star ** -alpha1 + L_L_star ** -alpha2) ** -1 return Phi def model_rate_opted(params, cluster_id, r_r500, j_mag, integral=False): """ Our generating model. Parameters ---------- params : tuple Tuple of (theta, eta, zeta, beta, rc, C) cluster_id : str SPT ID of our cluster in the catalog dictionary r_r500 : array-like A vector of radii of objects within the cluster normalized by the cluster's r500 j_mag : array-like A vector of J-band absolute magnitudes to be used in the luminosity function integral : bool, optional Flag indicating if the luminosity function factor of the model should be integrated. Defaults to `False`. Returns ------- model A surface density profile of objects as a function of radius and luminosity. """ if args.cluster_only: # Unpack our parameters theta, eta, zeta, beta, rc = params # Set background parameter to 0 C = 0 elif args.background_only: # Unpack our parameters C, = params # Set all other parameters to 0 theta, eta, zeta, beta, rc = [0.]*5 else: # Unpack our parameters theta, eta, zeta, beta, rc, C = params # Extract our data from the catalog dictionary z = catalog_dict[cluster_id]['redshift'] m = catalog_dict[cluster_id]['m500'] r500 = catalog_dict[cluster_id]['r500'] # Luminosity function number if integral: lum_funct_value = np.trapz(luminosity_function(j_mag, z), j_mag) else: lum_funct_value = luminosity_function(j_mag, z) if args.no_luminosity or args.poisson_only: LF = 1 else: LF = cosmo.angular_diameter_distance(z) ** 2 * r500 * lum_funct_value # Convert our background surface density from angular units into units of r500^-2 background = (C / u.arcmin ** 2) * cosmo.arcsec_per_kpc_proper(z).to(u.arcmin / u.Mpc) ** 2 * r500 ** 2 # Our amplitude is determined from the cluster data a = theta * (1 + z) ** eta * (m / (1e15 * u.Msun)) ** zeta * LF model = a * (1 + (r_r500 / rc) ** 2) ** (-1.5 * beta + 0.5) + background return model.value # Set our log-likelihood def lnlike(param): lnlike_list = [] for cluster_id in catalog_dict: # Get the good pixel fraction for this cluster gpf_all = catalog_dict[cluster_id]['gpf_rall'] # Get the radial positions of the AGN radial_r500_maxr = catalog_dict[cluster_id]['radial_r500_maxr'] # Get the completeness weights for the AGN completeness_weight_maxr = catalog_dict[cluster_id]['completeness_weight_maxr'] # Get the AGN sample degrees of membership if args.no_selection_membership or args.poisson_only: agn_membership = 1 else: agn_membership = catalog_dict[cluster_id]['agn_membership_maxr'] # Get the J-band absolute magnitudes j_band_abs_mag = catalog_dict[cluster_id]['j_abs_mag'] # Get the radial mesh for integration rall = catalog_dict[cluster_id]['rall'] # Get the luminosity mesh for integration jall = catalog_dict[cluster_id]['jall'] # Compute the completeness ratio for this cluster if args.no_completeness or args.poisson_only: completeness_ratio = 1. else: completeness_ratio = len(completeness_weight_maxr) / np.sum(completeness_weight_maxr) # Compute the model rate at the locations of the AGN. ni = model_rate_opted(param, cluster_id, radial_r500_maxr, j_band_abs_mag) # Compute the full model along the radial direction. # The completeness weight is set to `1` as the model in the integration is assumed to be complete. n_mesh = model_rate_opted(param, cluster_id, rall, jall, integral=True) # Use a spatial poisson point-process log-likelihood cluster_lnlike = (np.sum(np.log(ni * radial_r500_maxr) * agn_membership) - completeness_ratio * trap_weight(n_mesh * 2 * np.pi * rall, rall, weight=gpf_all)) lnlike_list.append(cluster_lnlike) total_lnlike = np.sum(lnlike_list) return total_lnlike # For our prior, we will choose uninformative priors for all our parameters and for the constant field value we will use # a gaussian distribution set by the values obtained from the SDWFS data set. def lnprior(params): # Set our hyperparameters # h_rc = 0.25 # h_rc_err = 0.1 h_C = 0.333 h_C_err = 0.024 # Extract our parameters if args.cluster_only: theta, eta, zeta, beta, rc = params C = 0. elif args.background_only: C, = params theta, eta, zeta, beta, rc = [0.]*5 else:
# Define all priors if (0.0 <= theta <= np.inf and -6. <= eta <= 6. and -3. <= zeta <= 3. and -3. <= beta <= 3. and 0.05 <= rc <= 0.5 and 0.0 <= C < np.inf): theta_lnprior = 0.0 eta_lnprior = 0.0 beta_lnprior = 0.0 zeta_lnprior = 0.0 # rc_lnprior = -0.5 * np.sum((rc - h_rc) ** 2 / h_rc_err ** 2) rc_lnprior = 0.0 if args.cluster_only: C_lnprior = 0. else: C_lnprior = -0.5 * np.sum((C - h_C) ** 2 / h_C_err ** 2) # C_lnprior = 0.0 else: theta_lnprior = -np.inf eta_lnprior = -np.inf beta_lnprior = -np.inf zeta_lnprior = -np.inf rc_lnprior = -np.inf C_lnprior = -np.inf # Assuming all parameters are independent the joint log-prior is total_lnprior = theta_lnprior + eta_lnprior + zeta_lnprior + beta_lnprior + rc_lnprior + C_lnprior return total_lnprior # Define the log-posterior probability def lnpost(params): lp = lnprior(params) # Check the finiteness of the prior. if not np.isfinite(lp): return -np.inf return lp + lnlike(params) hcc_prefix = '/work/mei/bfloyd/SPT_AGN/' # hcc_prefix = '' parser = ArgumentParser(description='Runs MCMC sampler') parser.add_argument('--restart', help='Allows restarting the chain in place rather than resetting the chain.', action='store_true') parser.add_argument('name', help='Chain name', type=str) parser.add_argument('--no-luminosity', action='store_true', help='Deactivate luminosity dependence in model.') parser.add_argument('--no-selection-membership', action='store_true', help='Deactivate fuzzy degree of membership for AGN selection in likelihood function.') parser.add_argument('--no-completeness', action='store_true', help='Deactivate photometric completeness correction in likelihood function.') parser.add_argument('--poisson-only', action='store_true', help='Use a pure Poisson likelihood function with a model that has no luminosity dependence.') parser_grp = parser.add_mutually_exclusive_group() parser_grp.add_argument('--cluster-only', action='store_true', help='Sample only on cluster objects.') parser_grp.add_argument('--background-only', action='store_true', help='Sample only on background objects.') args = parser.parse_args() # Load in the prepossessing file preprocess_file =
theta, eta, zeta, beta, rc, C = params
conditional_block
SPT_AGN_emcee_sampler_MPI.py
_star = 10 ** log_phi_star(redshift) * (cosmo.h / u.Mpc) ** 3 # QLF slopes alpha1 = -3.35 # alpha in Table 2 alpha2 = -0.37 # beta in Table 2 Phi = 0.4 * np.log(10) * L_L_star * phi_star * (L_L_star ** -alpha1 + L_L_star ** -alpha2) ** -1 return Phi def
(params, cluster_id, r_r500, j_mag, integral=False): """ Our generating model. Parameters ---------- params : tuple Tuple of (theta, eta, zeta, beta, rc, C) cluster_id : str SPT ID of our cluster in the catalog dictionary r_r500 : array-like A vector of radii of objects within the cluster normalized by the cluster's r500 j_mag : array-like A vector of J-band absolute magnitudes to be used in the luminosity function integral : bool, optional Flag indicating if the luminosity function factor of the model should be integrated. Defaults to `False`. Returns ------- model A surface density profile of objects as a function of radius and luminosity. """ if args.cluster_only: # Unpack our parameters theta, eta, zeta, beta, rc = params # Set background parameter to 0 C = 0 elif args.background_only: # Unpack our parameters C, = params # Set all other parameters to 0 theta, eta, zeta, beta, rc = [0.]*5 else: # Unpack our parameters theta, eta, zeta, beta, rc, C = params # Extract our data from the catalog dictionary z = catalog_dict[cluster_id]['redshift'] m = catalog_dict[cluster_id]['m500'] r500 = catalog_dict[cluster_id]['r500'] # Luminosity function number if integral: lum_funct_value = np.trapz(luminosity_function(j_mag, z), j_mag) else: lum_funct_value = luminosity_function(j_mag, z) if args.no_luminosity or args.poisson_only: LF = 1 else: LF = cosmo.angular_diameter_distance(z) ** 2 * r500 * lum_funct_value # Convert our background surface density from angular units into units of r500^-2 background = (C / u.arcmin ** 2) * cosmo.arcsec_per_kpc_proper(z).to(u.arcmin / u.Mpc) ** 2 * r500 ** 2 # Our amplitude is determined from the cluster data a = theta * (1 + z) ** eta * (m / (1e15 * u.Msun)) ** zeta * LF model = a * (1 + (r_r500 / rc) ** 2) ** (-1.5 * beta + 0.5) + background return model.value # Set our log-likelihood def lnlike(param): lnlike_list = [] for cluster_id in catalog_dict: # Get the good pixel fraction for this cluster gpf_all = catalog_dict[cluster_id]['gpf_rall'] # Get the radial positions of the AGN radial_r500_maxr = catalog_dict[cluster_id]['radial_r500_maxr'] # Get the completeness weights for the AGN completeness_weight_maxr = catalog_dict[cluster_id]['completeness_weight_maxr'] # Get the AGN sample degrees of membership if args.no_selection_membership or args.poisson_only: agn_membership = 1 else: agn_membership = catalog_dict[cluster_id]['agn_membership_maxr'] # Get the J-band absolute magnitudes j_band_abs_mag = catalog_dict[cluster_id]['j_abs_mag'] # Get the radial mesh for integration rall = catalog_dict[cluster_id]['rall'] # Get the luminosity mesh for integration jall = catalog_dict[cluster_id]['jall'] # Compute the completeness ratio for this cluster if args.no_completeness or args.poisson_only: completeness_ratio = 1. else: completeness_ratio = len(completeness_weight_maxr) / np.sum(completeness_weight_maxr) # Compute the model rate at the locations of the AGN. ni = model_rate_opted(param, cluster_id, radial_r500_maxr, j_band_abs_mag) # Compute the full model along the radial direction. # The completeness weight is set to `1` as the model in the integration is assumed to be complete. n_mesh = model_rate_opted(param, cluster_id, rall, jall, integral=True) # Use a spatial poisson point-process log-likelihood cluster_lnlike = (np.sum(np.log(ni * radial_r500_maxr) * agn_membership) - completeness_ratio * trap_weight(n_mesh * 2 * np.pi * rall, rall, weight=gpf_all)) lnlike_list.append(cluster_lnlike) total_lnlike = np.sum(lnlike_list) return total_lnlike # For our prior, we will choose uninformative priors for all our parameters and for the constant field value we will use # a gaussian distribution set by the values obtained from the SDWFS data set. def lnprior(params): # Set our hyperparameters # h_rc = 0.25 # h_rc_err = 0.1 h_C = 0.333 h_C_err = 0.024 # Extract our parameters if args.cluster_only: theta, eta, zeta, beta, rc = params C = 0. elif args.background_only: C, = params theta, eta, zeta, beta, rc = [0.]*5 else: theta, eta, zeta, beta, rc, C = params # Define all priors if (0.0 <= theta <= np.inf and -6. <= eta <= 6. and -3. <= zeta <= 3. and -3. <= beta <= 3. and 0.05 <= rc <= 0.5 and 0.0 <= C < np.inf): theta_lnprior = 0.0 eta_lnprior = 0.0 beta_lnprior = 0.0 zeta_lnprior = 0.0 # rc_lnprior = -0.5 * np.sum((rc - h_rc) ** 2 / h_rc_err ** 2) rc_lnprior = 0.0 if args.cluster_only: C_lnprior = 0. else: C_lnprior = -0.5 * np.sum((C - h_C) ** 2 / h_C_err ** 2) # C_lnprior = 0.0 else: theta_lnprior = -np.inf eta_lnprior = -np.inf beta_lnprior = -np.inf zeta_lnprior = -np.inf rc_lnprior = -np.inf C_lnprior = -np.inf # Assuming all parameters are independent the joint log-prior is total_lnprior = theta_lnprior + eta_lnprior + zeta_lnprior + beta_lnprior + rc_lnprior + C_lnprior return total_lnprior # Define the log-posterior probability def lnpost(params): lp = lnprior(params) # Check the finiteness of the prior. if not np.isfinite(lp): return -np.inf return lp + lnlike(params) hcc_prefix = '/work/mei/bfloyd/SPT_AGN/' # hcc_prefix = '' parser = ArgumentParser(description='Runs MCMC sampler') parser.add_argument('--restart', help='Allows restarting the chain in place rather than resetting the chain.', action='store_true') parser.add_argument('name', help='Chain name', type=str) parser.add_argument('--no-luminosity', action='store_true', help='Deactivate luminosity dependence in model.') parser.add_argument('--no-selection-membership', action='store_true', help='Deactivate fuzzy degree of membership for AGN selection in likelihood function.') parser.add_argument('--no-completeness', action='store_true', help='Deactivate photometric completeness correction in likelihood function.') parser.add_argument('--poisson-only', action='store_true', help='Use a pure Poisson likelihood function with a model that has no luminosity dependence.') parser_grp = parser.add_mutually_exclusive_group() parser_grp.add_argument('--cluster-only', action='store_true', help='Sample only on cluster objects.') parser_grp.add_argument('--background-only', action='store_true', help='Sample only on background objects.') args = parser.parse_args() # Load in the prepossessing file preprocess_file =
model_rate_opted
identifier_name
SPT_AGN_emcee_sampler_MPI.py
j_mag : array-like A vector of J-band absolute magnitudes to be used in the luminosity function integral : bool, optional Flag indicating if the luminosity function factor of the model should be integrated. Defaults to `False`. Returns ------- model A surface density profile of objects as a function of radius and luminosity. """ if args.cluster_only: # Unpack our parameters theta, eta, zeta, beta, rc = params # Set background parameter to 0 C = 0 elif args.background_only: # Unpack our parameters C, = params # Set all other parameters to 0 theta, eta, zeta, beta, rc = [0.]*5 else: # Unpack our parameters theta, eta, zeta, beta, rc, C = params # Extract our data from the catalog dictionary z = catalog_dict[cluster_id]['redshift'] m = catalog_dict[cluster_id]['m500'] r500 = catalog_dict[cluster_id]['r500'] # Luminosity function number if integral: lum_funct_value = np.trapz(luminosity_function(j_mag, z), j_mag) else: lum_funct_value = luminosity_function(j_mag, z) if args.no_luminosity or args.poisson_only: LF = 1 else: LF = cosmo.angular_diameter_distance(z) ** 2 * r500 * lum_funct_value # Convert our background surface density from angular units into units of r500^-2 background = (C / u.arcmin ** 2) * cosmo.arcsec_per_kpc_proper(z).to(u.arcmin / u.Mpc) ** 2 * r500 ** 2 # Our amplitude is determined from the cluster data a = theta * (1 + z) ** eta * (m / (1e15 * u.Msun)) ** zeta * LF model = a * (1 + (r_r500 / rc) ** 2) ** (-1.5 * beta + 0.5) + background return model.value # Set our log-likelihood def lnlike(param): lnlike_list = [] for cluster_id in catalog_dict: # Get the good pixel fraction for this cluster gpf_all = catalog_dict[cluster_id]['gpf_rall'] # Get the radial positions of the AGN radial_r500_maxr = catalog_dict[cluster_id]['radial_r500_maxr'] # Get the completeness weights for the AGN completeness_weight_maxr = catalog_dict[cluster_id]['completeness_weight_maxr'] # Get the AGN sample degrees of membership if args.no_selection_membership or args.poisson_only: agn_membership = 1 else: agn_membership = catalog_dict[cluster_id]['agn_membership_maxr'] # Get the J-band absolute magnitudes j_band_abs_mag = catalog_dict[cluster_id]['j_abs_mag'] # Get the radial mesh for integration rall = catalog_dict[cluster_id]['rall'] # Get the luminosity mesh for integration jall = catalog_dict[cluster_id]['jall'] # Compute the completeness ratio for this cluster if args.no_completeness or args.poisson_only: completeness_ratio = 1. else: completeness_ratio = len(completeness_weight_maxr) / np.sum(completeness_weight_maxr) # Compute the model rate at the locations of the AGN. ni = model_rate_opted(param, cluster_id, radial_r500_maxr, j_band_abs_mag) # Compute the full model along the radial direction. # The completeness weight is set to `1` as the model in the integration is assumed to be complete. n_mesh = model_rate_opted(param, cluster_id, rall, jall, integral=True) # Use a spatial poisson point-process log-likelihood cluster_lnlike = (np.sum(np.log(ni * radial_r500_maxr) * agn_membership) - completeness_ratio * trap_weight(n_mesh * 2 * np.pi * rall, rall, weight=gpf_all)) lnlike_list.append(cluster_lnlike) total_lnlike = np.sum(lnlike_list) return total_lnlike # For our prior, we will choose uninformative priors for all our parameters and for the constant field value we will use # a gaussian distribution set by the values obtained from the SDWFS data set. def lnprior(params): # Set our hyperparameters # h_rc = 0.25 # h_rc_err = 0.1 h_C = 0.333 h_C_err = 0.024 # Extract our parameters if args.cluster_only: theta, eta, zeta, beta, rc = params C = 0. elif args.background_only: C, = params theta, eta, zeta, beta, rc = [0.]*5 else: theta, eta, zeta, beta, rc, C = params # Define all priors if (0.0 <= theta <= np.inf and -6. <= eta <= 6. and -3. <= zeta <= 3. and -3. <= beta <= 3. and 0.05 <= rc <= 0.5 and 0.0 <= C < np.inf): theta_lnprior = 0.0 eta_lnprior = 0.0 beta_lnprior = 0.0 zeta_lnprior = 0.0 # rc_lnprior = -0.5 * np.sum((rc - h_rc) ** 2 / h_rc_err ** 2) rc_lnprior = 0.0 if args.cluster_only: C_lnprior = 0. else: C_lnprior = -0.5 * np.sum((C - h_C) ** 2 / h_C_err ** 2) # C_lnprior = 0.0 else: theta_lnprior = -np.inf eta_lnprior = -np.inf beta_lnprior = -np.inf zeta_lnprior = -np.inf rc_lnprior = -np.inf C_lnprior = -np.inf # Assuming all parameters are independent the joint log-prior is total_lnprior = theta_lnprior + eta_lnprior + zeta_lnprior + beta_lnprior + rc_lnprior + C_lnprior return total_lnprior # Define the log-posterior probability def lnpost(params): lp = lnprior(params) # Check the finiteness of the prior. if not np.isfinite(lp): return -np.inf return lp + lnlike(params) hcc_prefix = '/work/mei/bfloyd/SPT_AGN/' # hcc_prefix = '' parser = ArgumentParser(description='Runs MCMC sampler') parser.add_argument('--restart', help='Allows restarting the chain in place rather than resetting the chain.', action='store_true') parser.add_argument('name', help='Chain name', type=str) parser.add_argument('--no-luminosity', action='store_true', help='Deactivate luminosity dependence in model.') parser.add_argument('--no-selection-membership', action='store_true', help='Deactivate fuzzy degree of membership for AGN selection in likelihood function.') parser.add_argument('--no-completeness', action='store_true', help='Deactivate photometric completeness correction in likelihood function.') parser.add_argument('--poisson-only', action='store_true', help='Use a pure Poisson likelihood function with a model that has no luminosity dependence.') parser_grp = parser.add_mutually_exclusive_group() parser_grp.add_argument('--cluster-only', action='store_true', help='Sample only on cluster objects.') parser_grp.add_argument('--background-only', action='store_true', help='Sample only on background objects.') args = parser.parse_args() # Load in the prepossessing file preprocess_file = os.path.abspath('SPTcl_IRAGN_preprocessing.json') with open(preprocess_file, 'r') as f: catalog_dict = json.load(f) # Go through the catalog dictionary and recasting the cluster's mass and r500 to quantities and recast all the list-type # data to numpy arrays for cluster_id, cluster_info in catalog_dict.items(): catalog_dict[cluster_id]['m500'] = cluster_info['m500'] * u.Msun catalog_dict[cluster_id]['r500'] = cluster_info['r500'] * u.Mpc for data_name, data in filter(lambda x: isinstance(x[1], list), cluster_info.items()): catalog_dict[cluster_id][data_name] = np.array(data) # Set up our MCMC sampler. # Set the number of dimensions for the parameter space and the number of walkers to use to explore the space.
ndim = 5 if args.cluster_only else (1 if args.background_only else 6)
random_line_split
client_enum_component_type.go
00 ComponentsVendorCategoriesKey = 401 ComponentsVendorSalesKey = 402 ComponentsKiosksKey = 500 ComponentsCurrencyLookupsKey = 600 ComponentsPresentationNodesKey = 700 ComponentsCollectiblesKey = 800 ComponentsRecordsKey = 900 ComponentsTransitoryKey = 1000 ) type ComponentType string func ComponentTypesManyS(keys ...int) (out []string) { for _, e := range ComponentTypesMany(keys...) { out = append(out, string(e)) } return out } func ComponentTypesMany(keys ...int) (out []ComponentType) { for _, key := range keys { eout, _, _ := ComponentTypesE(key) out = append(out, eout) } return out } func ComponentTypes(key int) ComponentType { out, _, _ := ComponentTypesE(key) return out } func ComponentTypesE(key int) (ComponentType, string, error) { switch key { case ComponentsNoneKey: return "None", "", nil case ComponentsProfilesKey: description := ` Profiles is the most basic component, only relevant when calling GetProfile. This returns basic information about the profile, which is almost nothing: - a list of characterIds, - some information about the last time you logged in, and; - that most sobering statistic: how long you've played. ` return "Profiles", description, nil case ComponentsVendorReceiptsKey: description := ` Only applicable for GetProfile, this will return information about receipts for refundable vendor items. ` return "VendorReceipts", description, nil case ComponentsProfileInventoriesKey: description := ` Asking for this will get you the profile-level inventories, such as your Vault buckets (yeah, the Vault is really inventory buckets located on your Profile) ` return "ProfileInventories", description, nil case ComponentsProfileCurrenciesKey: description := ` This will get you a summary of items on your Profile that we consider to be "currencies", such as Glimmer. I mean, if there's Glimmer in Destiny 2. I didn't say there was Glimmer. ` return "ProfileCurrencies", description, nil case ComponentsProfileProgressionKey: description := ` This will get you any progression-related information that exists on a Profile-wide level, across all characters. ` return "ProfileProgression", description, nil case ComponentsPlatformSilverKey: description := ` This will get you information about the silver that this profile has on every platform on which it plays. You may only request this component for the logged in user's Profile, and will not receive it if you request it for another Profile. ` return "PlatformSilver", description, nil case ComponentsCharactersKey: description := ` This will get you summary info about each of the characters in the profile. ` return "Characters", description, nil case ComponentsCharacterInventoriesKey: description := ` This will get you information about any non-equipped items on the character or character(s) in question, if you're allowed to see it. You have to either be authenticated as that user, or that user must allow anonymous viewing of their non-equipped items in Bungie.Net settings to actually get results. ` return "CharacterInventories", description, nil case ComponentsCharacterProgressionsKey: description := ` This will get you information about the progression (faction, experience, etc... "levels") relevant to each character. You have to either be authenticated as that user, or that user must allow anonymous viewing of their progression info in Bungie.Net settings to actually get results. ` return "CharacterProgressions", description, nil case CharacterRenderDataKey: description := ` This will get you just enough information to be able to render the character in 3D if you have written a 3D rendering library for Destiny Characters, or "borrowed" ours. It's okay, I won't tell anyone if you're using it. I'm no snitch. (actually, we don't care if you use it - go to town) ` return "RenderData", description, nil case ComponentsCharacterActivitiesKey: description := ` This will return info about activities that a user can see and gating on it, if you are the currently authenticated user or the user has elected to allow anonymous viewing of its progression info. Note that the data returned by this can be unfortunately problematic and relatively unreliable in some cases. We'll eventually work on making it more consistently reliable. ` return "CharacterActivities", description, nil case ComponentsCharacterEquipmentKey: description := ` This will return info about the equipped items on the character(s). Everyone can see this. ` return "CharacterEquipment", description, nil case ComponentsItemInstancesKey: description := ` This will return basic info about instanced items - whether they can be equipped, their tracked status, and some info commonly needed in many places (current damage type, primary stat value, etc) ` return "ItemInstances", description, nil case ComponentsItemObjectivesKey: description := ` Items can have Objectives (DestinyObjectiveDefinition) bound to them. If they do, this will return info for items that have such bound objectives. ` return "ItemObjectives", description, nil case ComponentsItemPerksKey: description := ` Items can have perks (DestinyPerkDefinition). If they do, this will return info for what perks are active on items. ` return "ItemPerks", description, nil case ComponentsItemRenderDataKey: description := ` If you just want to render the weapon, this is just enough info to do that rendering. ` return "ItemRenderData", description, nil case ComponentsItemStatsKey: description := ` Items can have stats, like rate of fire. Asking for this component will return requested item's stats if they have stats. ` return "ItemStats", description, nil case ComponentsItemSocketsKey: description := ` Items can have sockets, where plugs can be inserted. Asking for this component will return all info relevant to the sockets on items that have them. ` return "ItemSockets", description, nil case ComponentsItemTalentGridsKey: description := ` Items can have talent grids, though that matters a lot less frequently than it used to. Asking for this component will return all relevant info about activated Nodes and Steps on this talent grid, like the good ol' days. ` return "ItemTalentGrids", description, nil case ComponentsItemCommonDataKey: description := ` Items that *aren't* instanced still have important information you need to know: - how much of it you have,; - the itemHash so you can look up their DestinyInventoryItemDefinition,; - whether they're locked,; - etc... Both instanced and non-instanced items will have these properties. You will get this automatically with Inventory components - you only need to pass this when calling GetItem on a specific item. ` return "ItemCommonData", description, nil case ComponentsItemPlugStatesKey: description := ` Items that are "Plugs" can be inserted into sockets. This returns statuses about those plugs and why they can/can't be inserted. I hear you giggling, there's nothing funny about inserting plugs. Get your head out of the gutter and pay attention! ` return "ItemPlugStates", description, nil case ComponentsVendorsKey: description := ` When obtaining vendor information, this will return summary information about the Vendor or Vendors being returned. ` return "Vendors", description, nil case ComponentsVendorCategoriesKey: description := ` When obtaining vendor information, this will return information about the categories of items provided by the Vendor. ` return "VendorCategories", description, nil case ComponentsVendorSalesKey: description := ` When obtaining vendor information, this will return the information about items being sold by the Vendor. ` return "VendorSales", description, nil case ComponentsKiosksKey: description := ` Asking for this component will return you the account's Kiosk statuses: that is, what items have been filled out/acquired. But only if you are the currently authenticated user or the user has elected to allow anonymous viewing of its progression info. ` return "Kiosks", description, nil case ComponentsCurrencyLookupsKey: description := ` A "shortcut" component that will give you all of the item hashes/quantities of items that the requested character can use to determine if an action (purchasing, socket insertion) has the required currency. (recall that all currencies are just items, and that some vendor purchases require items that you might not traditionally consider to be a "currency", like plugs/mods!) ` return "CurrencyLookups", description, nil case ComponentsPresentationNodesKey: description := ` Returns summary status information about all "Presentation Nodes". See DestinyPresentationNodeDefinition for more details, but the gist is that these are entities used by the game UI to bucket Collectibles and Records into a hierarchy of categories. You may ask for and use this data if you want to perform similar bucketing in your own UI: or you can skip it and roll your own. ` return "PresentationNodes", description, nil case ComponentsCollectiblesKey: description := ` Returns summary status information about all "Collectibles".
These are records of what items you've discovered while playing Destiny, and some other basic information. For detailed information, you will have to call a separate endpoint devoted to the purpose. `
random_line_split