file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
dicer.py | 2, cv2.LINE_AA)
pos_img = np.zeros(shape=[100, 100, 1], dtype=np.uint8)
cv2.imshow('Press any key to exit', grey)
print('Error - stopping')
cv2.waitKey() # Taste drücken, zum beenden
elif GPIO.input(18) == 0 and gpios == True: # Temperaturreais prüfen wenn RPi vorhanden
print('Temperature relay is offline, stopping')
else:
dicer_ready = True
global_steptime = 0.00015 # Abstand zwischen den Schritten
# blobdetektor konfigurieren
blob_params = cv2.SimpleBlobDetector_Params()
blob_params.filterByColor = True
blob_params.filterByArea = True
blob_params.minArea = 100
blob_params.filterByCircularity = True
blob_params.minCircularity = 0.7
blob_params.filterByInertia = False
blob_params.filterByConvexity = False
all_numbers = [0] * 9 # [one, two, three, four, five, six, errorcnt, rollnumber, std_dev
def interr(channel):
global gpios
global dicer_ready
global interrupted
gpios = False
dicer_ready = False
interrupted = True
print('Interrupt')
def step_plus(steptime):
GPIO.output(17, GPIO.LOW)
GPIO.output(4, GPIO.HIGH)
time.sleep(steptime)
GPIO.output(4, GPIO.LOW)
time.sleep(steptime)
def step_minus(steptime):
GPIO.output(17, GPIO.HIGH)
GPIO.output(4, GPIO.HIGH)
time.sleep(steptime)
GPIO.output(4, GPIO.LOW)
time.sleep(steptime)
GPIO.output(17, GPIO.LOW)
def clock(now):
time_seconds = int((time.time() - now))
t_hr = int(time_seconds / 3600)
t_min = int(time_seconds / 60) - (t_hr * 60)
t_sec = int(time_seconds) - (t_min * 60)
showTime = str(t_hr) + ':' + str(t_min).zfill(2)
print(showTime)
return showTime
def write_email(numbers, ctime, error, header_name):
server = smtplib.SMTP('SERVERADRESSE', PORTNR)
server.starttls()
server.login('LOGIN-BENUTZERNAME', 'PASSWORT')
msg = MIMEMultipart()
msg['From'] = 'ABSENDER'
msg['To'] = 'EMPFAENGER'
if error:
msg['Subject'] = 'Error'
else:
msg['Cc'] = 'KOPIE ADRESSE'
msg['Subject'] = header_name
message = str(numbers[0]) + ',' + str(numbers[1]) + ',' + str(numbers[2]) + ',' + str(numbers[3]) + ',' + str(
numbers[4]) + ',' + str(numbers[5]) + ' Err: ' + str(numbers[6]) + ' All: ' + str(
numbers[7]) + '\n' + 'Zeit: '+ str(ctime)
msg.attach(MIMEText(message))
server.send_message(msg)
def logging(numbers, ctime, log_name):
file = open(log_name, 'w')
file.write('Einz:' + str(numbers[0]) + '\n')
file.write('Zwei:' + str(numbers[1]) + '\n')
file.write("Drei: " + str(numbers[2]) + '\n')
file.write("Vier: " + str(numbers[3]) + '\n')
file.write("Fuenf: " + str(numbers[4]) + '\n')
file.write("Sechs: " + str(numbers[5]) + '\n')
file.write('Fehler: ' + str(numbers[6]) + '\n')
file.write('Gesamt: ' + str(numbers[7]) + '\n')
file.write('Standardabw: ' + str(numbers[8]) + '\n')
file.write('Zeit: ' + str(ctime) + '\n')
file.close()
def get_image | or i in range(5):
ret, frame = cap.read()
#cv2.imwrite('frame.png',frame)
# Bildausschnitte von Würfel und Positionserkennung
y = 160
h = 240
x = 220
w = 240
dice_image = frame[y:y + h, x:x + w]
grey = cv2.cvtColor(dice_image, cv2.COLOR_BGR2GRAY)
#cv2.imshow('input', grey)
#cv2.imwrite('real_image.png',frame)
y = 120
h = 15
pos_img = frame[y:y + h, x:x + w]
pos_img = cv2.cvtColor(pos_img, cv2.COLOR_BGR2GRAY)
#cv2.imwrite('pos_raw.png',pos_img)
ret, pos_img = cv2.threshold(pos_img, 245, 255, cv2.THRESH_BINARY)
#cv2.imshow('pos', pos_img)
#cv2.imwrite('pos.png',pos_img)
return grey, pos_img
def hough_detector(input_img):
#cv2.imshow('hough_input', input_image)
img = cv2.medianBlur(input_img, 5) # Bild gätten mit Gauß
cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) # Farbraum umwandeln (nur für die farbigen Kreise)
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20, param1=200, param2=10, minRadius=5,
maxRadius=25) # param1: Schwellenwert, param2: muss man ausprobieren
h_number = 0
try: # Kreise zählen und markieren
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
# draw the outer circle
cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
h_number += 1
except:
print('HOUGH DETECTOR ERROR, NO CIRCLES FOUND')
cv2.putText(cimg, str(h_number), (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 50), 2, cv2.LINE_AA)
cv2.imshow('hough detector', cimg)
cv2.imwrite('hough detector.png', cimg)
return h_number
def img_processing(image_input): # Bild vorbereitung
image_input = cv2.medianBlur(image_input, 3) # Bild gätten mit Gauß
ret, binary_image = cv2.threshold(image_input, 220, 255,
cv2.THRESH_BINARY) # Schwellenwertbild
#cv2.imwrite('binary1.png', binary_image)
if darknumbers: # Wenn dunkle Würfelaugen, dann Bereich um den Würfel weiß machen
w = binary_image.shape[1] #y
h = binary_image.shape[0] #x
mask = np.zeros((h + 2, w + 2), np.uint8)
cv2.floodFill(binary_image, mask, (0,0), 255);
mask = np.zeros((h + 2, w + 2), np.uint8)
cv2.floodFill(binary_image, mask, (h,w), 255);
else:
binary_image = cv2.bitwise_not(binary_image) # Bei hellen Würfelaugen reicht invertieren des Bildes
#cv2.imwrite('binary2.png', binary_image)
kernel_round = np.array([[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, | s():
f | identifier_name |
dicer.py | ptime):
GPIO.output(17, GPIO.LOW)
GPIO.output(4, GPIO.HIGH)
time.sleep(steptime)
GPIO.output(4, GPIO.LOW)
time.sleep(steptime)
def step_minus(steptime):
GPIO.output(17, GPIO.HIGH)
GPIO.output(4, GPIO.HIGH)
time.sleep(steptime)
GPIO.output(4, GPIO.LOW)
time.sleep(steptime)
GPIO.output(17, GPIO.LOW)
def clock(now):
time_seconds = int((time.time() - now))
t_hr = int(time_seconds / 3600)
t_min = int(time_seconds / 60) - (t_hr * 60)
t_sec = int(time_seconds) - (t_min * 60)
showTime = str(t_hr) + ':' + str(t_min).zfill(2)
print(showTime)
return showTime
def write_email(numbers, ctime, error, header_name):
server = smtplib.SMTP('SERVERADRESSE', PORTNR)
server.starttls()
server.login('LOGIN-BENUTZERNAME', 'PASSWORT')
msg = MIMEMultipart()
msg['From'] = 'ABSENDER'
msg['To'] = 'EMPFAENGER'
if error:
msg['Subject'] = 'Error'
else:
msg['Cc'] = 'KOPIE ADRESSE'
msg['Subject'] = header_name
message = str(numbers[0]) + ',' + str(numbers[1]) + ',' + str(numbers[2]) + ',' + str(numbers[3]) + ',' + str(
numbers[4]) + ',' + str(numbers[5]) + ' Err: ' + str(numbers[6]) + ' All: ' + str(
numbers[7]) + '\n' + 'Zeit: '+ str(ctime)
msg.attach(MIMEText(message))
server.send_message(msg)
def logging(numbers, ctime, log_name):
file = open(log_name, 'w')
file.write('Einz:' + str(numbers[0]) + '\n')
file.write('Zwei:' + str(numbers[1]) + '\n')
file.write("Drei: " + str(numbers[2]) + '\n')
file.write("Vier: " + str(numbers[3]) + '\n')
file.write("Fuenf: " + str(numbers[4]) + '\n')
file.write("Sechs: " + str(numbers[5]) + '\n')
file.write('Fehler: ' + str(numbers[6]) + '\n')
file.write('Gesamt: ' + str(numbers[7]) + '\n')
file.write('Standardabw: ' + str(numbers[8]) + '\n')
file.write('Zeit: ' + str(ctime) + '\n')
file.close()
def get_images():
for i in range(5):
ret, frame = cap.read()
#cv2.imwrite('frame.png',frame)
# Bildausschnitte von Würfel und Positionserkennung
y = 160
h = 240
x = 220
w = 240
dice_image = frame[y:y + h, x:x + w]
grey = cv2.cvtColor(dice_image, cv2.COLOR_BGR2GRAY)
#cv2.imshow('input', grey)
#cv2.imwrite('real_image.png',frame)
y = 120
h = 15
pos_img = frame[y:y + h, x:x + w]
pos_img = cv2.cvtColor(pos_img, cv2.COLOR_BGR2GRAY)
#cv2.imwrite('pos_raw.png',pos_img)
ret, pos_img = cv2.threshold(pos_img, 245, 255, cv2.THRESH_BINARY)
#cv2.imshow('pos', pos_img)
#cv2.imwrite('pos.png',pos_img)
return grey, pos_img
def hough_detector(input_img):
#cv2.imshow('hough_input', input_image)
img = cv2.medianBlur(input_img, 5) # Bild gätten mit Gauß
cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) # Farbraum umwandeln (nur für die farbigen Kreise)
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20, param1=200, param2=10, minRadius=5,
maxRadius=25) # param1: Schwellenwert, param2: muss man ausprobieren
h_number = 0
try: # Kreise zählen und markieren
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
# draw the outer circle
cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
h_number += 1
except:
print('HOUGH DETECTOR ERROR, NO CIRCLES FOUND')
cv2.putText(cimg, str(h_number), (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 50), 2, cv2.LINE_AA)
cv2.imshow('hough detector', cimg)
cv2.imwrite('hough detector.png', cimg)
return h_number
def img_processing(image_input): # Bild vorbereitung
image_input = cv2.medianBlur(image_input, 3) # Bild gätten mit Gauß
ret, binary_image = cv2.threshold(image_input, 220, 255,
cv2.THRESH_BINARY) # Schwellenwertbild
#cv2.imwrite('binary1.png', binary_image)
if darknumbers: # Wenn dunkle Würfelaugen, dann Bereich um den Würfel weiß machen
w = binary_image.shape[1] #y
h = binary_image.shape[0] #x
mask = np.zeros((h + 2, w + 2), np.uint8)
cv2.floodFill(binary_image, mask, (0,0), 255);
mask = np.zeros((h + 2, w + 2), np.uint8)
cv2.floodFill(binary_image, mask, (h,w), 255);
else:
binary_image = cv2.bitwise_not(binary_image) # Bei hellen Würfelaugen reicht invertieren des Bildes
#cv2.imwrite('binary2.png', binary_image)
kernel_round = np.array([[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0]], dtype=np.uint8) # Kreisförmige Maske erzeugen
dilate = cv2.dilate(binary_image,kernel_round, iterations=3) # Dilatation anwenden
erode = cv2.erode(dilate, kernel_round, iterations=2) # Erosion anwenden
return erode
def counting(image, all_numbers, dice_image, raw_numbers_name):
one = all_numbers[0]
| two = all_numbers[1]
three = all_numbers[2]
four = all_numbers[3]
five = all_numbers[4]
six = all_numbers[5]
errorcnt = all_numbers[6]
success_rolls= all_numbers[7]
detector = cv2.SimpleBlobDetector_create(blob_params)
keypoints = detector.detect(image)
img_with_keypoints = cv2.drawKeypoints(image, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
blob_number = 0
for i in keypoints[0:]:
blob_number = blob_number + 1
cv2.putText(img_with_keypoints, str(blob_number), (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2,
cv2.LINE_AA)
| identifier_body |
|
dicer.py |
log_name = 'log_seite2' # Name der Log Datei (Zusammenfassung der Messreihe): Wird NICHT fortgesetzt
raw_numbers_name = 'raw_seite2' # Name der Datei, in der alle Würfe einzeln gespeichert werden: Wird fortgesetzt
email_header = 'dicer - seite2' # Emailbetreff
darknumbers = False # Dunkle Würfelaugen?
send_email = True # Email mit Messdaten versenden?
email_log_number = 6000 # Nach wie vielen Würfen soll jeweils eine Email geschrieben werden?
error_logging = True #Bild bei Fehler speichern?
measures = 18000 #Anzahl der Messungen: -1 für unendlich
#Uhrzeit, wenn automatisch beendet werden soll (funktionert, ist aber gerade deaktiviert: Zeile 311):
#endtime_hr = 22
#endtime_min = 45
cap = cv2.VideoCapture(0) # Bildquelle: (Zahl ändern, falls mehrere Kameras angeschlossen sind (auch interne Webcams))
###########################################################################################################################
print('Setting up...')
interrupted = False
dicer_ready = False
ret, frame = cap.read() # Test, ob Kamera funktionert
if ret is not True: #Wenn Kamera nicht geht, Dummy Image laden
dicer_ready = False
grey = cv2.imread('dummy_image.png', 0)
cv2.putText(grey, 'NO CAMERA', (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
pos_img = np.zeros(shape=[100, 100, 1], dtype=np.uint8)
cv2.imshow('Press any key to exit', grey)
print('Error - stopping')
cv2.waitKey() # Taste drücken, zum beenden
elif GPIO.input(18) == 0 and gpios == True: # Temperaturreais prüfen wenn RPi vorhanden
print('Temperature relay is offline, stopping')
else:
dicer_ready = True
global_steptime = 0.00015 # Abstand zwischen den Schritten
# blobdetektor konfigurieren
blob_params = cv2.SimpleBlobDetector_Params()
blob_params.filterByColor = True
blob_params.filterByArea = True
blob_params.minArea = 100
blob_params.filterByCircularity = True
blob_params.minCircularity = 0.7
blob_params.filterByInertia = False
blob_params.filterByConvexity = False
all_numbers = [0] * 9 # [one, two, three, four, five, six, errorcnt, rollnumber, std_dev
def interr(channel):
global gpios
global dicer_ready
global interrupted
gpios = False
dicer_ready = False
interrupted = True
print('Interrupt')
def step_plus(steptime):
GPIO.output(17, GPIO.LOW)
GPIO.output(4, GPIO.HIGH)
time.sleep(steptime)
GPIO.output(4, GPIO.LOW)
time.sleep(steptime)
def step_minus(steptime):
GPIO.output(17, GPIO.HIGH)
GPIO.output(4, GPIO.HIGH)
time.sleep(steptime)
GPIO.output(4, GPIO.LOW)
time.sleep(steptime)
GPIO.output(17, GPIO.LOW)
def clock(now):
time_seconds = int((time.time() - now))
t_hr = int(time_seconds / 3600)
t_min = int(time_seconds / 60) - (t_hr * 60)
t_sec = int(time_seconds) - (t_min * 60)
showTime = str(t_hr) + ':' + str(t_min).zfill(2)
print(showTime)
return showTime
def write_email(numbers, ctime, error, header_name):
server = smtplib.SMTP('SERVERADRESSE', PORTNR)
server.starttls()
server.login('LOGIN-BENUTZERNAME', 'PASSWORT')
msg = MIMEMultipart()
msg['From'] = 'ABSENDER'
msg['To'] = 'EMPFAENGER'
if error:
msg['Subject'] = 'Error'
else:
msg['Cc'] = 'KOPIE ADRESSE'
msg['Subject'] = header_name
message = str(numbers[0]) + ',' + str(numbers[1]) + ',' + str(numbers[2]) + ',' + str(numbers[3]) + ',' + str(
numbers[4]) + ',' + str(numbers[5]) + ' Err: ' + str(numbers[6]) + ' All: ' + str(
numbers[7]) + '\n' + 'Zeit: '+ str(ctime)
msg.attach(MIMEText(message))
server.send_message(msg)
def logging(numbers, ctime, log_name):
file = open(log_name, 'w')
file.write('Einz:' + str(numbers[0]) + '\n')
file.write('Zwei:' + str(numbers[1]) + '\n')
file.write("Drei: " + str(numbers[2]) + '\n')
file.write("Vier: " + str(numbers[3]) + '\n')
file.write("Fuenf: " + str(numbers[4]) + '\n')
file.write("Sechs: " + str(numbers[5]) + '\n')
file.write('Fehler: ' + str(numbers[6]) + '\n')
file.write('Gesamt: ' + str(numbers[7]) + '\n')
file.write('Standardabw: ' + str(numbers[8]) + '\n')
file.write('Zeit: ' + str(ctime) + '\n')
file.close()
def get_images():
for i in range(5):
ret, frame = cap.read()
#cv2.imwrite('frame.png',frame)
# Bildausschnitte von Würfel und Positionserkennung
y = 160
h = 240
x = 220
w = 240
dice_image = frame[y:y + h, x:x + w]
grey = cv2.cvtColor(dice_image, cv2.COLOR_BGR2GRAY)
#cv2.imshow('input', grey)
#cv2.imwrite('real_image.png',frame)
y = 120
h = 15
pos_img = frame[y:y + h, x:x + w]
pos_img = cv2.cvtColor(pos_img, cv2.COLOR_BGR2GRAY)
#cv2.imwrite('pos_raw.png',pos_img)
ret, pos_img = cv2.threshold(pos_img, 245, 255, cv2.THRESH_BINARY)
#cv2.imshow('pos', pos_img)
#cv2.imwrite('pos.png',pos_img)
return grey, pos_img
def hough_detector(input_img):
#cv2.imshow('hough_input', input_image)
img = cv2.medianBlur(input_img, 5) # Bild gätten mit Gauß
cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) # Farbraum umwandeln (nur für die farbigen Kreise)
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20, param1=200, param2=10, minRadius=5,
maxRadius=25) # param1: Schwellenwert, param2: muss man ausprobieren
h_number = 0
try: # Kreise zählen und markieren
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
# draw the outer circle
cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
h_number += 1
except:
print('HOUGH DETECTOR ERROR, NO CIRCLES FOUND')
cv2.putText(cimg, str(h_number), (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 50), 2, cv2.LINE_AA)
cv2.imshow('hough detector', cimg)
cv2.imwrite('hough detector.png', cimg)
return h_number
def img_processing(image_input): # Bild vorbereitung
image_input = cv2.medianBlur(image_input, 3) # Bild gätten mit Gauß
ret, binary_image = cv2.threshold(image_input, 220, 255,
cv2.THRESH_BINARY) # Schwellenwertbild
#cv2.imwrite('binary1.png', binary_image)
if darknumbers: # Wenn dunkle Würfelaugen, dann Bereich um den Würfel weiß machen
w = binary_image.shape[1] #y
h = binary_image.shape[0] # | random_line_split |
||
repository.go | Requested record was not found")
type conflictErr struct {
IDs []string
}
func (e conflictErr) Error() string {
return fmt.Sprintf("Operation failed due to conflicts with: %s", e.IDs)
}
type repository struct {
db *sqlx.DB
closers []io.Closer
listMoodsAsc, listMoodsDesc, findMood, deleteMood, setMood *sqlx.NamedStmt
listConvosAsc, listConvosDesc, insertConvo, getConvo, deleteConvo *sqlx.NamedStmt
findConvoLines, findMoodLines, insertLine, getLine, deleteLine *sqlx.NamedStmt
}
type listArgs struct {
Before, After string
Limit int
}
var builtinMoods = []*Mood{
{"default", "oo", " ", false, 0},
{"borg", "==", " ", false, 0},
{"dead", "xx", "U ", false, 0},
{"greedy", "$$", " ", false, 0},
{"stoned", "**", "U ", false, 0},
{"tired", "--", " ", false, 0},
{"wired", "OO", " ", false, 0},
{"young", "..", " ", false, 0},
}
type moodRec struct {
IntID int
Mood
}
type lineRec struct {
Eyes, Tongue sql.NullString
Line
}
type convoRec struct {
IntID int
Conversation
}
func newRepository(db *sqlx.DB) (*repository, error) {
r := repository{db: db}
stmts := map[string]**sqlx.NamedStmt{
findMood: &r.findMood,
setMood: &r.setMood,
deleteMood: &r.deleteMood,
insertConvo: &r.insertConvo,
getConvo: &r.getConvo,
deleteConvo: &r.deleteConvo,
findConvoLines: &r.findConvoLines,
findMoodLines: &r.findMoodLines,
insertLine: &r.insertLine,
getLine: &r.getLine,
deleteLine: &r.deleteLine,
fmt.Sprintf(listConvos, ">", "ASC"): &r.listConvosAsc,
fmt.Sprintf(listConvos, "<", "DESC"): &r.listConvosDesc,
fmt.Sprintf(listMoods, ">", "ASC"): &r.listMoodsAsc,
fmt.Sprintf(listMoods, "<", "DESC"): &r.listMoodsDesc,
}
for sqlStr, stmt := range stmts {
prepped, err := db.PrepareNamed(sqlStr)
*stmt = prepped
if err != nil {
return nil, fmt.Errorf("preparing statement %q: %v", sqlStr, err)
}
r.closers = append(r.closers, prepped)
}
return &r, nil
}
func (r *repository) Close() error {
for _, closer := range r.closers {
if err := closer.Close(); err != nil {
return fmt.Errorf("closing %s: %v", closer, err)
}
}
return nil
}
func (r *repository) ListMoods(userID string, args listArgs) ([]Mood, bool, error) {
sources := make([]func(bool, listArgs) ([]Mood, bool, error), 2)
var sourceNames []string
userSrc := func(asc bool, args listArgs) ([]Mood, bool, error) {
return r.listUserMoods(userID, asc, args)
}
var asc bool
if sortAsc(args) {
asc = true
sources[0] = userSrc
sources[1] = r.listBuiltinMoods
sourceNames = []string{"user", "built-in"}
} else {
asc = false
sources[1] = userSrc
sources[0] = r.listBuiltinMoods
sourceNames = []string{"built-in", "user"}
}
moods, _, err := sources[0](asc, args)
if err != nil {
if err != errCursorNotFound {
return nil, false, fmt.Errorf("listing %s moods %v", sourceNames[0], err)
}
} else {
args.Limit = args.Limit - len(moods)
args.Before = ""
args.After = ""
if len(moods) == args.Limit {
return moods, true, nil
}
}
moreMoods, hasMore, err := sources[1](asc, args)
if err != nil {
if err != errCursorNotFound {
err = fmt.Errorf("listing %s moods %v", sourceNames[1], err)
}
return nil, false, err
}
for _, mood := range moreMoods {
moods = append(moods, mood)
}
return moods, hasMore, nil
}
func (r *repository) listBuiltinMoods(asc bool, args listArgs) ([]Mood, bool, error) {
var moods []Mood
cursor := args.After
if !asc {
cursor = args.Before
}
limit := args.Limit + 1
found := args.After == "" && args.Before == ""
for i := 0; i < len(builtinMoods); i++ {
var mood *Mood
if asc {
mood = builtinMoods[i]
} else {
mood = builtinMoods[len(builtinMoods)-1-i]
}
if found {
moods = append(moods, *mood)
if len(moods) == limit {
break
}
} else if mood.Name == cursor {
found = true
}
}
if !found {
return nil, false, errCursorNotFound
}
hasMore := len(moods) > args.Limit
if hasMore {
moods = moods[:args.Limit]
}
return moods, hasMore, nil
}
func (r *repository) listUserMoods(userID string, asc bool, args listArgs) ([]Mood, bool, error) {
var moods []Mood
cursor := args.After
query := r.listMoodsAsc
if !asc {
cursor = args.Before
query = r.listMoodsDesc
}
cursorID := -1
if cursor != "" {
var mood moodRec
err := r.findMood.Get(&mood, struct{ UserID, Name string }{userID, cursor})
if err == sql.ErrNoRows {
return nil, false, errCursorNotFound
} else if err != nil {
return nil, false, fmt.Errorf("finding mood cursor %q for user %q: %v", cursor, userID, err)
} else {
cursorID = mood.IntID
}
}
rows, err := query.Queryx(struct {
UserID string
CursorID, Limit int
}{userID, cursorID, args.Limit + 1})
if err != nil {
return nil, false, fmt.Errorf("listing user moods: %v", err)
}
defer rows.Close()
for rows.Next() {
var rec moodRec
if err := rows.StructScan(&rec); err != nil {
return nil, false, fmt.Errorf("scanning user mood: %v", err)
}
rec.UserDefined = true
rec.id = rec.IntID
moods = append(moods, rec.Mood)
}
hasMore := len(moods) > args.Limit
if hasMore {
moods = moods[:args.Limit]
}
return moods, hasMore, nil
}
func (r *repository) GetMood(userID, name string) (*Mood, error) {
for _, builtin := range builtinMoods {
if builtin.Name == name |
}
var rec moodRec
err := r.findMood.Get(&rec, struct{ UserID, Name string }{userID, name})
if err == sql.ErrNoRows {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("getting user mood: %v", err)
}
rec.UserDefined = true
rec.id = rec.IntID
return &rec.Mood, nil
}
func (r *repository) SetMood(userID string, mood *Mood) error {
if isBuiltin(mood.Name) {
return errBuiltinMood
}
var id int
err := r.setMood.QueryRow(struct {
UserID, Name, Eyes, Tongue string
}{
userID, mood.Name, mood.Eyes, mood.Tongue,
}).Scan(&id)
if err != nil {
return fmt.Errorf("upserting user mood: %v", err)
}
if id == 0 {
return fmt.Errorf("unable to update mood %q", mood.Name)
}
mood.id = id
return nil
}
func (r *repository) DeleteMood(userID, name string) error {
if isBuiltin(name) {
| {
// Copy to prevent modifying builtins by the caller
mood := *builtin
return &mood, nil
} | conditional_block |
repository.go | ) error {
if isBuiltin(mood.Name) {
return errBuiltinMood
}
var id int
err := r.setMood.QueryRow(struct {
UserID, Name, Eyes, Tongue string
}{
userID, mood.Name, mood.Eyes, mood.Tongue,
}).Scan(&id)
if err != nil {
return fmt.Errorf("upserting user mood: %v", err)
}
if id == 0 {
return fmt.Errorf("unable to update mood %q", mood.Name)
}
mood.id = id
return nil
}
func (r *repository) DeleteMood(userID, name string) error {
if isBuiltin(name) {
return errBuiltinMood
}
queryArgs := struct{ UserID, Name string }{userID, name}
if err := doDelete(r.deleteMood, queryArgs); err != nil {
if dbErr, ok := err.(*pq.Error); !ok || dbErr.Code != dbErrFKViolation {
return err
}
// List the lines that are preventing us from deleting the mood.
// There's a per-user race condition here but since this is mostly
// meant to provide informative help, it's probably not worth
// wrapping the entire thing in a transaction.
var lineIDs []string
if err := r.findMoodLines.Select(&lineIDs, queryArgs); err != nil {
return fmt.Errorf("listing lines for mood %q and user %q: %v", name, userID, err)
}
return conflictErr{lineIDs}
}
return nil
}
func (r *repository) ListConversations(userID string, args listArgs) ([]Conversation, bool, error) {
var convos []Conversation
cursor := args.After
query := r.listConvosAsc
if !sortAsc(args) {
cursor = args.Before
query = r.listConvosDesc
}
cursorID := -1
if cursor != "" {
var convo convoRec
err := r.getConvo.Get(&convo, struct{ UserID, PublicID string }{userID, cursor})
if err == sql.ErrNoRows {
return nil, false, errCursorNotFound
} else if err != nil {
return nil, false, fmt.Errorf("finding conversation cursor %q for user %q: %v", cursor, userID, err)
} else {
cursorID = convo.IntID
}
}
rows, err := query.Queryx(struct {
UserID string
CursorID, Limit int
}{userID, cursorID, args.Limit + 1})
if err != nil {
return nil, false, fmt.Errorf("listing conversations for user %s: %v", userID, err)
}
defer rows.Close()
for rows.Next() {
var rec convoRec
if err := rows.StructScan(&rec); err != nil {
return nil, false, fmt.Errorf("scanning conversation: %v", err)
}
rec.id = rec.IntID
convos = append(convos, rec.Conversation)
}
hasMore := len(convos) > args.Limit
if hasMore {
convos = convos[:args.Limit]
}
return convos, hasMore, nil
}
func (r *repository) NewConversation(userID, heading string) (*Conversation, error) {
var publicID string
for i := 0; i < maxInsertRetries; i++ {
rv, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
return nil, fmt.Errorf("generating random ID: %v", err)
}
publicID = convoIDPrefix + strconv.FormatUint(rv.Uint64(), 36)
var id int
err = r.insertConvo.QueryRow(struct {
PublicID, UserID, Heading string
}{publicID, userID, heading}).Scan(&id)
if err == nil {
return &Conversation{
ID: publicID,
Heading: heading,
id: id,
}, nil
}
dbErr, ok := err.(*pq.Error)
if !ok || dbErr.Code != dbErrDupUnique {
return nil, fmt.Errorf("inserting conversation: %v", err)
}
}
return nil, errors.New("Unable to insert a new, unique conversation")
}
func (r *repository) GetConversation(userID, convoID string) (*Conversation, error) {
var convo convoRec
err := r.getConvo.Get(&convo, struct{ UserID, PublicID string }{userID, convoID})
if err == sql.ErrNoRows {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("finding conversation %q for user %q: %v", convoID, userID, err)
}
rows, err := r.findConvoLines.Queryx(struct{ ID int }{convo.IntID})
if err != nil {
return nil, fmt.Errorf("retrieving lines for %q: %v", convoID, err)
}
defer rows.Close()
convo.Lines = make([]Line, 0)
for rows.Next() {
var rec lineRec
if err := rows.StructScan(&rec); err != nil {
return nil, fmt.Errorf("scanning line for %q: %v", convoID, err)
}
setLineMood(&rec)
if rec.mood == nil {
return nil, fmt.Errorf("line %s does not have a valid mood", rec.ID)
}
convo.Lines = append(convo.Lines, rec.Line)
}
convo.Conversation.id = convo.IntID
return &convo.Conversation, nil
}
func (r *repository) DeleteConversation(userID, convoID string) error {
if err := doDelete(r.deleteConvo, struct{ UserID, PublicID string }{userID, convoID}); err != nil {
return err
}
return nil
}
func (r *repository) InsertLine(userID, convoID string, line *Line) error {
var publicID string
var convo convoRec
err := r.getConvo.Get(&convo, struct{ UserID, PublicID string }{userID, convoID})
if err != nil {
return fmt.Errorf("finding conversation %s for user %s: %v", convoID, userID, err)
}
for i := 0; i < maxInsertRetries; i++ {
rv, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
return fmt.Errorf("generating random ID: %v", err)
}
publicID = lineIDPrefix + strconv.FormatUint(rv.Uint64(), 36)
var moodID sql.NullInt64
if line.mood.id != 0 {
moodID.Int64 = int64(line.mood.id)
moodID.Valid = true
}
_, err = r.insertLine.Exec(struct {
PublicID, Animal, Text, MoodName string
Think bool
MoodID sql.NullInt64
ConversationID int
}{
publicID, line.Animal, line.Text, line.MoodName,
line.Think,
moodID,
convo.IntID,
})
if err == nil {
line.ID = publicID
return nil
}
dbErr, ok := err.(*pq.Error)
if !ok || dbErr.Code != dbErrDupUnique {
return fmt.Errorf("inserting line: %v", err)
}
}
return errors.New("unable to insert a new, unique line")
}
func (r *repository) GetLine(userID, convoID, lineID string) (*Line, error) {
var rec lineRec
err := r.getLine.Get(&rec, struct{ UserID, ConvoID, LineID string }{userID, convoID, lineID})
if err == sql.ErrNoRows {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("getting line: %v", err)
}
setLineMood(&rec)
if rec.mood == nil {
return nil, fmt.Errorf("Line %s does not have a valid mood", rec.ID)
}
return &rec.Line, nil
}
func (r *repository) DeleteLine(userID, convoID, lineID string) error {
if err := doDelete(r.deleteLine, struct{ UserID, ConvoID, LineID string }{userID, convoID, lineID}); err != nil {
return err
}
return nil
}
func setLineMood(rec *lineRec) {
if rec.Eyes.Valid {
rec.mood = &Mood{
Name: rec.MoodName,
Eyes: rec.Eyes.String,
Tongue: rec.Tongue.String,
UserDefined: true,
}
return
}
for _, mood := range builtinMoods {
if strings.EqualFold(mood.Name, rec.MoodName) {
m := *mood
rec.mood = &m
return
}
}
}
func isBuiltin(name string) bool {
for _, builtin := range builtinMoods {
if strings.EqualFold(builtin.Name, name) {
return true
}
}
return false
}
func | sortAsc | identifier_name |
|
repository.go | ood(userID string, mood *Mood) error {
if isBuiltin(mood.Name) {
return errBuiltinMood
}
var id int
err := r.setMood.QueryRow(struct {
UserID, Name, Eyes, Tongue string
}{
userID, mood.Name, mood.Eyes, mood.Tongue,
}).Scan(&id)
if err != nil {
return fmt.Errorf("upserting user mood: %v", err)
}
if id == 0 {
return fmt.Errorf("unable to update mood %q", mood.Name)
}
mood.id = id
return nil
}
func (r *repository) DeleteMood(userID, name string) error {
if isBuiltin(name) {
return errBuiltinMood
}
queryArgs := struct{ UserID, Name string }{userID, name}
if err := doDelete(r.deleteMood, queryArgs); err != nil {
if dbErr, ok := err.(*pq.Error); !ok || dbErr.Code != dbErrFKViolation {
return err
}
// List the lines that are preventing us from deleting the mood.
// There's a per-user race condition here but since this is mostly
// meant to provide informative help, it's probably not worth
// wrapping the entire thing in a transaction.
var lineIDs []string
if err := r.findMoodLines.Select(&lineIDs, queryArgs); err != nil {
return fmt.Errorf("listing lines for mood %q and user %q: %v", name, userID, err)
}
return conflictErr{lineIDs}
}
return nil
}
func (r *repository) ListConversations(userID string, args listArgs) ([]Conversation, bool, error) {
var convos []Conversation
cursor := args.After
query := r.listConvosAsc
if !sortAsc(args) {
cursor = args.Before
query = r.listConvosDesc
}
cursorID := -1
if cursor != "" {
var convo convoRec
err := r.getConvo.Get(&convo, struct{ UserID, PublicID string }{userID, cursor})
if err == sql.ErrNoRows {
return nil, false, errCursorNotFound
} else if err != nil {
return nil, false, fmt.Errorf("finding conversation cursor %q for user %q: %v", cursor, userID, err)
} else {
cursorID = convo.IntID
}
}
rows, err := query.Queryx(struct {
UserID string
CursorID, Limit int
}{userID, cursorID, args.Limit + 1})
if err != nil {
return nil, false, fmt.Errorf("listing conversations for user %s: %v", userID, err)
}
defer rows.Close()
for rows.Next() {
var rec convoRec
if err := rows.StructScan(&rec); err != nil {
return nil, false, fmt.Errorf("scanning conversation: %v", err)
}
rec.id = rec.IntID
convos = append(convos, rec.Conversation)
}
hasMore := len(convos) > args.Limit
if hasMore {
convos = convos[:args.Limit]
}
return convos, hasMore, nil
}
func (r *repository) NewConversation(userID, heading string) (*Conversation, error) {
var publicID string
for i := 0; i < maxInsertRetries; i++ {
rv, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
return nil, fmt.Errorf("generating random ID: %v", err)
}
publicID = convoIDPrefix + strconv.FormatUint(rv.Uint64(), 36)
var id int
err = r.insertConvo.QueryRow(struct {
PublicID, UserID, Heading string
}{publicID, userID, heading}).Scan(&id)
if err == nil {
return &Conversation{
ID: publicID,
Heading: heading,
id: id,
}, nil
}
dbErr, ok := err.(*pq.Error)
if !ok || dbErr.Code != dbErrDupUnique {
return nil, fmt.Errorf("inserting conversation: %v", err)
}
}
return nil, errors.New("Unable to insert a new, unique conversation")
}
func (r *repository) GetConversation(userID, convoID string) (*Conversation, error) {
var convo convoRec
err := r.getConvo.Get(&convo, struct{ UserID, PublicID string }{userID, convoID})
if err == sql.ErrNoRows {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("finding conversation %q for user %q: %v", convoID, userID, err)
}
rows, err := r.findConvoLines.Queryx(struct{ ID int }{convo.IntID})
if err != nil {
return nil, fmt.Errorf("retrieving lines for %q: %v", convoID, err)
}
defer rows.Close()
convo.Lines = make([]Line, 0)
for rows.Next() {
var rec lineRec
if err := rows.StructScan(&rec); err != nil {
return nil, fmt.Errorf("scanning line for %q: %v", convoID, err)
}
setLineMood(&rec)
if rec.mood == nil {
return nil, fmt.Errorf("line %s does not have a valid mood", rec.ID)
}
convo.Lines = append(convo.Lines, rec.Line)
}
convo.Conversation.id = convo.IntID
return &convo.Conversation, nil
}
func (r *repository) DeleteConversation(userID, convoID string) error {
if err := doDelete(r.deleteConvo, struct{ UserID, PublicID string }{userID, convoID}); err != nil {
return err
}
return nil
}
func (r *repository) InsertLine(userID, convoID string, line *Line) error {
var publicID string
var convo convoRec
err := r.getConvo.Get(&convo, struct{ UserID, PublicID string }{userID, convoID})
if err != nil {
return fmt.Errorf("finding conversation %s for user %s: %v", convoID, userID, err)
}
for i := 0; i < maxInsertRetries; i++ {
rv, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
return fmt.Errorf("generating random ID: %v", err)
}
publicID = lineIDPrefix + strconv.FormatUint(rv.Uint64(), 36)
var moodID sql.NullInt64
if line.mood.id != 0 {
moodID.Int64 = int64(line.mood.id)
moodID.Valid = true
}
_, err = r.insertLine.Exec(struct {
PublicID, Animal, Text, MoodName string
Think bool
MoodID sql.NullInt64
ConversationID int
}{
publicID, line.Animal, line.Text, line.MoodName,
line.Think,
moodID,
convo.IntID,
})
if err == nil {
line.ID = publicID
return nil
}
dbErr, ok := err.(*pq.Error)
if !ok || dbErr.Code != dbErrDupUnique {
return fmt.Errorf("inserting line: %v", err)
}
}
return errors.New("unable to insert a new, unique line")
}
func (r *repository) GetLine(userID, convoID, lineID string) (*Line, error) {
var rec lineRec
err := r.getLine.Get(&rec, struct{ UserID, ConvoID, LineID string }{userID, convoID, lineID})
if err == sql.ErrNoRows {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("getting line: %v", err)
}
setLineMood(&rec)
if rec.mood == nil {
return nil, fmt.Errorf("Line %s does not have a valid mood", rec.ID)
}
return &rec.Line, nil
}
func (r *repository) DeleteLine(userID, convoID, lineID string) error {
if err := doDelete(r.deleteLine, struct{ UserID, ConvoID, LineID string }{userID, convoID, lineID}); err != nil {
return err
}
return nil
}
func setLineMood(rec *lineRec) {
if rec.Eyes.Valid {
rec.mood = &Mood{
Name: rec.MoodName,
Eyes: rec.Eyes.String,
Tongue: rec.Tongue.String,
UserDefined: true,
}
return
}
for _, mood := range builtinMoods {
if strings.EqualFold(mood.Name, rec.MoodName) {
m := *mood
rec.mood = &m
return
}
}
}
func isBuiltin(name string) bool {
for _, builtin := range builtinMoods {
if strings.EqualFold(builtin.Name, name) {
return true
}
} | random_line_split |
||
repository.go | },
{"borg", "==", " ", false, 0},
{"dead", "xx", "U ", false, 0},
{"greedy", "$$", " ", false, 0},
{"stoned", "**", "U ", false, 0},
{"tired", "--", " ", false, 0},
{"wired", "OO", " ", false, 0},
{"young", "..", " ", false, 0},
}
type moodRec struct {
IntID int
Mood
}
type lineRec struct {
Eyes, Tongue sql.NullString
Line
}
type convoRec struct {
IntID int
Conversation
}
func newRepository(db *sqlx.DB) (*repository, error) {
r := repository{db: db}
stmts := map[string]**sqlx.NamedStmt{
findMood: &r.findMood,
setMood: &r.setMood,
deleteMood: &r.deleteMood,
insertConvo: &r.insertConvo,
getConvo: &r.getConvo,
deleteConvo: &r.deleteConvo,
findConvoLines: &r.findConvoLines,
findMoodLines: &r.findMoodLines,
insertLine: &r.insertLine,
getLine: &r.getLine,
deleteLine: &r.deleteLine,
fmt.Sprintf(listConvos, ">", "ASC"): &r.listConvosAsc,
fmt.Sprintf(listConvos, "<", "DESC"): &r.listConvosDesc,
fmt.Sprintf(listMoods, ">", "ASC"): &r.listMoodsAsc,
fmt.Sprintf(listMoods, "<", "DESC"): &r.listMoodsDesc,
}
for sqlStr, stmt := range stmts {
prepped, err := db.PrepareNamed(sqlStr)
*stmt = prepped
if err != nil {
return nil, fmt.Errorf("preparing statement %q: %v", sqlStr, err)
}
r.closers = append(r.closers, prepped)
}
return &r, nil
}
func (r *repository) Close() error {
for _, closer := range r.closers {
if err := closer.Close(); err != nil {
return fmt.Errorf("closing %s: %v", closer, err)
}
}
return nil
}
func (r *repository) ListMoods(userID string, args listArgs) ([]Mood, bool, error) {
sources := make([]func(bool, listArgs) ([]Mood, bool, error), 2)
var sourceNames []string
userSrc := func(asc bool, args listArgs) ([]Mood, bool, error) {
return r.listUserMoods(userID, asc, args)
}
var asc bool
if sortAsc(args) {
asc = true
sources[0] = userSrc
sources[1] = r.listBuiltinMoods
sourceNames = []string{"user", "built-in"}
} else {
asc = false
sources[1] = userSrc
sources[0] = r.listBuiltinMoods
sourceNames = []string{"built-in", "user"}
}
moods, _, err := sources[0](asc, args)
if err != nil {
if err != errCursorNotFound {
return nil, false, fmt.Errorf("listing %s moods %v", sourceNames[0], err)
}
} else {
args.Limit = args.Limit - len(moods)
args.Before = ""
args.After = ""
if len(moods) == args.Limit {
return moods, true, nil
}
}
moreMoods, hasMore, err := sources[1](asc, args)
if err != nil {
if err != errCursorNotFound {
err = fmt.Errorf("listing %s moods %v", sourceNames[1], err)
}
return nil, false, err
}
for _, mood := range moreMoods {
moods = append(moods, mood)
}
return moods, hasMore, nil
}
func (r *repository) listBuiltinMoods(asc bool, args listArgs) ([]Mood, bool, error) {
var moods []Mood
cursor := args.After
if !asc {
cursor = args.Before
}
limit := args.Limit + 1
found := args.After == "" && args.Before == ""
for i := 0; i < len(builtinMoods); i++ {
var mood *Mood
if asc {
mood = builtinMoods[i]
} else {
mood = builtinMoods[len(builtinMoods)-1-i]
}
if found {
moods = append(moods, *mood)
if len(moods) == limit {
break
}
} else if mood.Name == cursor {
found = true
}
}
if !found {
return nil, false, errCursorNotFound
}
hasMore := len(moods) > args.Limit
if hasMore {
moods = moods[:args.Limit]
}
return moods, hasMore, nil
}
func (r *repository) listUserMoods(userID string, asc bool, args listArgs) ([]Mood, bool, error) {
var moods []Mood
cursor := args.After
query := r.listMoodsAsc
if !asc {
cursor = args.Before
query = r.listMoodsDesc
}
cursorID := -1
if cursor != "" {
var mood moodRec
err := r.findMood.Get(&mood, struct{ UserID, Name string }{userID, cursor})
if err == sql.ErrNoRows {
return nil, false, errCursorNotFound
} else if err != nil {
return nil, false, fmt.Errorf("finding mood cursor %q for user %q: %v", cursor, userID, err)
} else {
cursorID = mood.IntID
}
}
rows, err := query.Queryx(struct {
UserID string
CursorID, Limit int
}{userID, cursorID, args.Limit + 1})
if err != nil {
return nil, false, fmt.Errorf("listing user moods: %v", err)
}
defer rows.Close()
for rows.Next() {
var rec moodRec
if err := rows.StructScan(&rec); err != nil {
return nil, false, fmt.Errorf("scanning user mood: %v", err)
}
rec.UserDefined = true
rec.id = rec.IntID
moods = append(moods, rec.Mood)
}
hasMore := len(moods) > args.Limit
if hasMore {
moods = moods[:args.Limit]
}
return moods, hasMore, nil
}
func (r *repository) GetMood(userID, name string) (*Mood, error) {
for _, builtin := range builtinMoods {
if builtin.Name == name {
// Copy to prevent modifying builtins by the caller
mood := *builtin
return &mood, nil
}
}
var rec moodRec
err := r.findMood.Get(&rec, struct{ UserID, Name string }{userID, name})
if err == sql.ErrNoRows {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("getting user mood: %v", err)
}
rec.UserDefined = true
rec.id = rec.IntID
return &rec.Mood, nil
}
func (r *repository) SetMood(userID string, mood *Mood) error {
if isBuiltin(mood.Name) {
return errBuiltinMood
}
var id int
err := r.setMood.QueryRow(struct {
UserID, Name, Eyes, Tongue string
}{
userID, mood.Name, mood.Eyes, mood.Tongue,
}).Scan(&id)
if err != nil {
return fmt.Errorf("upserting user mood: %v", err)
}
if id == 0 {
return fmt.Errorf("unable to update mood %q", mood.Name)
}
mood.id = id
return nil
}
func (r *repository) DeleteMood(userID, name string) error | {
if isBuiltin(name) {
return errBuiltinMood
}
queryArgs := struct{ UserID, Name string }{userID, name}
if err := doDelete(r.deleteMood, queryArgs); err != nil {
if dbErr, ok := err.(*pq.Error); !ok || dbErr.Code != dbErrFKViolation {
return err
}
// List the lines that are preventing us from deleting the mood.
// There's a per-user race condition here but since this is mostly
// meant to provide informative help, it's probably not worth
// wrapping the entire thing in a transaction.
var lineIDs []string
if err := r.findMoodLines.Select(&lineIDs, queryArgs); err != nil {
return fmt.Errorf("listing lines for mood %q and user %q: %v", name, userID, err)
}
| identifier_body |
|
The Movies Database.py |
#tmdb_movies = sc.textFile('tmdb_5000_movies.csv')
tmdb_movies = sc.textFile(sys.argv[1], 1)
#Remove header and split data
header = tmdb_movies.first()
#Split by , followed by non-whitespace
regex = re.compile(',(?=\\S)')
tmdb_movies = tmdb_movies.filter(lambda x: x != header).map(lambda x: regex.split(x))
print('Number of rows before cleaning: ', tmdb_movies.count())
#Filter rows based on data type and value
tmdb_movies_filtered = tmdb_movies.filter(correctRows)
#Only keep used columns (title(x[6]), Budget (x[0]), Genre(x[1]), Popularity(x[8]),
# Release Date(x[11]), Revenue(x[12]),
# Profit(x[12]-x[0]), Runtime(x[13]), Average Rating(x[18]))
tmdb_movies_filtered = tmdb_movies_filtered.map(lambda x: (x[6], float(x[0]), genre(x[1]), float(x[8]),
datetime.strptime(x[11], '%Y-%m-%d'),
float(x[12]), float(x[12])-float(x[0]),
float(x[13]), float(x[18])))
#Remove movies with profit <= 0
tmdb_movies_filtered = tmdb_movies_filtered.filter(lambda x: x[6] > 0)
print('Number of rows after cleaning: ', tmdb_movies_filtered.count())
# Updated Columns:
# title(x[0])
# Budget (x[1])
# Genre(x[2])
# Popularity(x[3])
# Release Date(x[4])
# Revenue(x[5])
# Profit (x[6])
# Runtime(x[7])
# Average Rating(x[8])
## Top 10 Most Profitable Movie Titles
profit_title = tmdb_movies_filtered.map(lambda x: (x[0], x[6])).\
reduceByKey(add)
profit_title_top = profit_title.top(20, lambda x: x[1])
print('Titles sorted based on Profit:', profit_title_top)
x = [i[0] for i in profit_title_top]
y = [i[1] for i in profit_title_top]
fig = plt.figure(figsize = (20, 5))
plt.bar(x, y, color ='rosybrown', width = 0.5)
plt.xlabel("Movie Title")
plt.xticks(rotation=70)
plt.ylabel("Profit")
plt.title("Profit per Title")
plt.show()
## Average Rating per Genre
avgRating_genre = tmdb_movies_filtered.map(lambda x: (x[8], x[2])).flatMapValues(lambda x: x).\
map(lambda x: (x[1], (x[0], 1))).\
reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).\
map(lambda x: (x[0], round(x[1][0]/x[1][1],2)))
avgRating_genre_top = avgRating_genre.top(20, lambda x: x[1])
print('Genres sorted based on Average Rating:', avgRating_genre_top)
x = [i[0] for i in avgRating_genre_top]
y = [i[1] for i in avgRating_genre_top]
fig = plt.figure(figsize = (15, 5))
plt.bar(x, y, color ='mediumpurple', width = 0.5)
plt.xlabel("Genre")
plt.xticks(rotation=40)
plt.ylabel("Average Rating")
plt.title("Average Rating per Genre")
plt.show()
## Average Profit per Genre
profit_genre = tmdb_movies_filtered.map(lambda x: (x[6], x[2])).flatMapValues(lambda x: x).\
map(lambda x: (x[1], (x[0], 1))).\
reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).\
map(lambda x: (x[0], round(x[1][0]/x[1][1],2)))
profit_genre_top = profit_genre.top(20, lambda x: x[1])
print('Genres sorted based on Average Profit:', profit_genre_top)
x = [i[0] for i in profit_genre_top]
y = [i[1] for i in profit_genre_top]
fig = plt.figure(figsize = (15, 5))
plt.bar(x, y, color ='tan', width = 0.5)
plt.xlabel("Genre")
plt.xticks(rotation=30)
plt.ylabel("Profit")
plt.title("Average Profit per Genre")
plt.show()
## Budget vs Profit per Genre
budget_genre = tmdb_movies_filtered.map(lambda x: (x[1], x[2])).flatMapValues(lambda x: x).\
map(lambda x: (x[1], (x[0], 1))).\
reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).\
map(lambda x: (x[0], round(x[1][0]/x[1][1],2)))
budget_genre = budget_genre.top(20)
profit_genre = tmdb_movies_filtered.map(lambda x: (x[6], x[2])).flatMapValues(lambda x: x).\
map(lambda x: (x[1], (x[0], 1))).\
reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).\
map(lambda x: (x[0], round(x[1][0]/x[1][1],2)))
profit_genre = profit_genre.top(20)
x = [i[0] for i in profit_genre]
y1 = [i[1] for i in budget_genre]
y2 = [i[1] for i in profit_genre]
df = pd.DataFrame({'Genre': x, 'Budget': y1, 'Profit': y2}, index = x).sort_values(
by = 'Profit', ascending=False)
fig, ax = plt.subplots(figsize=(20, 5))
position = np.arange(len(x))
budget = ax.bar(position, df['Budget'], width = 0.4, color = 'lightgreen')
# Same thing, but offset the x by the width of the bar.
profit = ax.bar(position + 0.4, df['Profit'], width=0.4, color = 'slategrey')
plt.legend(('Budget per Genre','Profit per Genre'))
plt.title("Budget vs Profit per Genre")
ax.set_xticks(position + 0.4 / 2)
ax.set_xticklabels(df['Genre'])
## Profit per Release Month
profit_month = tmdb_movies_filtered.map(lambda x: (x[4].month, (x[6], 1))).\
reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).\
map(lambda x: (x[0], round(x[1][0]/x[1][1],2)))
profit_month_top = profit_month.top(12, lambda x: x[1])
print('Months sorted based on Average Profit:', profit_month_top)
x = [i[0] for i in profit_month_top]
y = [i[1] for i in profit_month_top]
fig = plt.figure(figsize = (15, 5))
plt.bar(x, y, color ='sandybrown', width = 0.5)
plt.xlabel("Month")
plt.ylabel("Profit")
plt.title("Average Profit per Month")
plt.show()
## Linear Regression to Predict Profit (Revenue - Budget):
Prediction is based on Budget, Genre, Popularity, Release Month and Runtime
print("Linear Regression started at: ", datetime.now().strftime("%H:%M:%S"))
#Since Genre is categorical, StringIndexer will be used to map each category to a corresponding
#numerical value
#To use StringIndexer, we will need to conver to DF first
linear_profitDF = tmdb_movies_filtered.map(lambda x: (x[6], x[1], x[2], x[3], x[4].month, x[7])).\
toDF(['label', 'Budget', 'Genre', 'Population',
'ReleaseMonth', 'Runtime'])
linear_profitDF = linear_profitDF.select('label', 'Budget', explode('Genre'), 'Population',
'ReleaseMonth', 'Runtime')
#Create StringIndexer instance
genreIndexer = StringIndexer(inputCol="col", outputCol="Genre")
#Fit the indexer and transform to generate the new DF
linear_profitDF = genreIndexer.fit(linear_profitDF).transform(linear_profitDF).drop('col')
#Use VectorAssembler to assemble all features into one column
vecAssembler = VectorAssembler().setInputCols(['Budget', 'Genre', 'Population', 'ReleaseMonth',
'Runtime']).setOutputCol('features')
linear_profitDF = vecAssembler.transform(linear_profitDF).select('label', 'features')
#Split data into train and test (40/60)
linearTrainDF, linearTestDF = linear_profitDF.randomSplit([0.4, 0.6 | if (float(p[0])>0 and float(p[8])>0 and float(p[12])>0 and float(p[13])>0 and float(p[18])>0):
if (len(p[1])>2 and len(p[9])>2):
return p | conditional_block |
|
The Movies Database.py | 2]),
# Profit(x[12]-x[0]), Runtime(x[13]), Average Rating(x[18]))
tmdb_movies_filtered = tmdb_movies_filtered.map(lambda x: (x[6], float(x[0]), genre(x[1]), float(x[8]),
datetime.strptime(x[11], '%Y-%m-%d'),
float(x[12]), float(x[12])-float(x[0]),
float(x[13]), float(x[18])))
#Remove movies with profit <= 0
tmdb_movies_filtered = tmdb_movies_filtered.filter(lambda x: x[6] > 0)
print('Number of rows after cleaning: ', tmdb_movies_filtered.count())
# Updated Columns:
# title(x[0])
# Budget (x[1])
# Genre(x[2]) | # Revenue(x[5])
# Profit (x[6])
# Runtime(x[7])
# Average Rating(x[8])
## Top 10 Most Profitable Movie Titles
profit_title = tmdb_movies_filtered.map(lambda x: (x[0], x[6])).\
reduceByKey(add)
profit_title_top = profit_title.top(20, lambda x: x[1])
print('Titles sorted based on Profit:', profit_title_top)
x = [i[0] for i in profit_title_top]
y = [i[1] for i in profit_title_top]
fig = plt.figure(figsize = (20, 5))
plt.bar(x, y, color ='rosybrown', width = 0.5)
plt.xlabel("Movie Title")
plt.xticks(rotation=70)
plt.ylabel("Profit")
plt.title("Profit per Title")
plt.show()
## Average Rating per Genre
avgRating_genre = tmdb_movies_filtered.map(lambda x: (x[8], x[2])).flatMapValues(lambda x: x).\
map(lambda x: (x[1], (x[0], 1))).\
reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).\
map(lambda x: (x[0], round(x[1][0]/x[1][1],2)))
avgRating_genre_top = avgRating_genre.top(20, lambda x: x[1])
print('Genres sorted based on Average Rating:', avgRating_genre_top)
x = [i[0] for i in avgRating_genre_top]
y = [i[1] for i in avgRating_genre_top]
fig = plt.figure(figsize = (15, 5))
plt.bar(x, y, color ='mediumpurple', width = 0.5)
plt.xlabel("Genre")
plt.xticks(rotation=40)
plt.ylabel("Average Rating")
plt.title("Average Rating per Genre")
plt.show()
## Average Profit per Genre
profit_genre = tmdb_movies_filtered.map(lambda x: (x[6], x[2])).flatMapValues(lambda x: x).\
map(lambda x: (x[1], (x[0], 1))).\
reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).\
map(lambda x: (x[0], round(x[1][0]/x[1][1],2)))
profit_genre_top = profit_genre.top(20, lambda x: x[1])
print('Genres sorted based on Average Profit:', profit_genre_top)
x = [i[0] for i in profit_genre_top]
y = [i[1] for i in profit_genre_top]
fig = plt.figure(figsize = (15, 5))
plt.bar(x, y, color ='tan', width = 0.5)
plt.xlabel("Genre")
plt.xticks(rotation=30)
plt.ylabel("Profit")
plt.title("Average Profit per Genre")
plt.show()
## Budget vs Profit per Genre
budget_genre = tmdb_movies_filtered.map(lambda x: (x[1], x[2])).flatMapValues(lambda x: x).\
map(lambda x: (x[1], (x[0], 1))).\
reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).\
map(lambda x: (x[0], round(x[1][0]/x[1][1],2)))
budget_genre = budget_genre.top(20)
profit_genre = tmdb_movies_filtered.map(lambda x: (x[6], x[2])).flatMapValues(lambda x: x).\
map(lambda x: (x[1], (x[0], 1))).\
reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).\
map(lambda x: (x[0], round(x[1][0]/x[1][1],2)))
profit_genre = profit_genre.top(20)
x = [i[0] for i in profit_genre]
y1 = [i[1] for i in budget_genre]
y2 = [i[1] for i in profit_genre]
df = pd.DataFrame({'Genre': x, 'Budget': y1, 'Profit': y2}, index = x).sort_values(
by = 'Profit', ascending=False)
fig, ax = plt.subplots(figsize=(20, 5))
position = np.arange(len(x))
budget = ax.bar(position, df['Budget'], width = 0.4, color = 'lightgreen')
# Same thing, but offset the x by the width of the bar.
profit = ax.bar(position + 0.4, df['Profit'], width=0.4, color = 'slategrey')
plt.legend(('Budget per Genre','Profit per Genre'))
plt.title("Budget vs Profit per Genre")
ax.set_xticks(position + 0.4 / 2)
ax.set_xticklabels(df['Genre'])
## Profit per Release Month
profit_month = tmdb_movies_filtered.map(lambda x: (x[4].month, (x[6], 1))).\
reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).\
map(lambda x: (x[0], round(x[1][0]/x[1][1],2)))
profit_month_top = profit_month.top(12, lambda x: x[1])
print('Months sorted based on Average Profit:', profit_month_top)
x = [i[0] for i in profit_month_top]
y = [i[1] for i in profit_month_top]
fig = plt.figure(figsize = (15, 5))
plt.bar(x, y, color ='sandybrown', width = 0.5)
plt.xlabel("Month")
plt.ylabel("Profit")
plt.title("Average Profit per Month")
plt.show()
## Linear Regression to Predict Profit (Revenue - Budget):
Prediction is based on Budget, Genre, Popularity, Release Month and Runtime
print("Linear Regression started at: ", datetime.now().strftime("%H:%M:%S"))
#Since Genre is categorical, StringIndexer will be used to map each category to a corresponding
#numerical value
#To use StringIndexer, we will need to conver to DF first
linear_profitDF = tmdb_movies_filtered.map(lambda x: (x[6], x[1], x[2], x[3], x[4].month, x[7])).\
toDF(['label', 'Budget', 'Genre', 'Population',
'ReleaseMonth', 'Runtime'])
linear_profitDF = linear_profitDF.select('label', 'Budget', explode('Genre'), 'Population',
'ReleaseMonth', 'Runtime')
#Create StringIndexer instance
genreIndexer = StringIndexer(inputCol="col", outputCol="Genre")
#Fit the indexer and transform to generate the new DF
linear_profitDF = genreIndexer.fit(linear_profitDF).transform(linear_profitDF).drop('col')
#Use VectorAssembler to assemble all features into one column
vecAssembler = VectorAssembler().setInputCols(['Budget', 'Genre', 'Population', 'ReleaseMonth',
'Runtime']).setOutputCol('features')
linear_profitDF = vecAssembler.transform(linear_profitDF).select('label', 'features')
#Split data into train and test (40/60)
linearTrainDF, linearTestDF = linear_profitDF.randomSplit([0.4, 0.6], seed=123)
#Create the model instance
lr = LinearRegression(featuresCol='features', labelCol='label', predictionCol='prediction')
#We use a ParamGridBuilder to construct a grid of parameters to search over.
#(This is from https://spark.apache.org/docs/latest/ml-tuning.html)
paramGrid = ParamGridBuilder()\
.addGrid(lr.regParam, [0.1, 0.01, 0.001, 0.0001]) \
.addGrid(lr.fitIntercept, [False, True])\
.addGrid(lr.elasticNetParam, [0.0, 0.5, 1.0])\
.build()
#CrossValidation with k = 3 folds for hyperparameter tuning: it will try all combinations of values
#and computes the average evaluation metric for the 3 Models produced by fitting the Estimator on
#the 3 different (training, test) dataset pairs
#(This is from https://spark.apache.org/docs/latest/ml-tuning.html)
crossval = CrossValidator(estimator=lr,
| # Popularity(x[3])
# Release Date(x[4]) | random_line_split |
The Movies Database.py | x[1], (x[0], 1))).\
reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).\
map(lambda x: (x[0], round(x[1][0]/x[1][1],2)))
avgRating_genre_top = avgRating_genre.top(20, lambda x: x[1])
print('Genres sorted based on Average Rating:', avgRating_genre_top)
x = [i[0] for i in avgRating_genre_top]
y = [i[1] for i in avgRating_genre_top]
fig = plt.figure(figsize = (15, 5))
plt.bar(x, y, color ='mediumpurple', width = 0.5)
plt.xlabel("Genre")
plt.xticks(rotation=40)
plt.ylabel("Average Rating")
plt.title("Average Rating per Genre")
plt.show()
## Average Profit per Genre
profit_genre = tmdb_movies_filtered.map(lambda x: (x[6], x[2])).flatMapValues(lambda x: x).\
map(lambda x: (x[1], (x[0], 1))).\
reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).\
map(lambda x: (x[0], round(x[1][0]/x[1][1],2)))
profit_genre_top = profit_genre.top(20, lambda x: x[1])
print('Genres sorted based on Average Profit:', profit_genre_top)
x = [i[0] for i in profit_genre_top]
y = [i[1] for i in profit_genre_top]
fig = plt.figure(figsize = (15, 5))
plt.bar(x, y, color ='tan', width = 0.5)
plt.xlabel("Genre")
plt.xticks(rotation=30)
plt.ylabel("Profit")
plt.title("Average Profit per Genre")
plt.show()
## Budget vs Profit per Genre
budget_genre = tmdb_movies_filtered.map(lambda x: (x[1], x[2])).flatMapValues(lambda x: x).\
map(lambda x: (x[1], (x[0], 1))).\
reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).\
map(lambda x: (x[0], round(x[1][0]/x[1][1],2)))
budget_genre = budget_genre.top(20)
profit_genre = tmdb_movies_filtered.map(lambda x: (x[6], x[2])).flatMapValues(lambda x: x).\
map(lambda x: (x[1], (x[0], 1))).\
reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).\
map(lambda x: (x[0], round(x[1][0]/x[1][1],2)))
profit_genre = profit_genre.top(20)
x = [i[0] for i in profit_genre]
y1 = [i[1] for i in budget_genre]
y2 = [i[1] for i in profit_genre]
df = pd.DataFrame({'Genre': x, 'Budget': y1, 'Profit': y2}, index = x).sort_values(
by = 'Profit', ascending=False)
fig, ax = plt.subplots(figsize=(20, 5))
position = np.arange(len(x))
budget = ax.bar(position, df['Budget'], width = 0.4, color = 'lightgreen')
# Same thing, but offset the x by the width of the bar.
profit = ax.bar(position + 0.4, df['Profit'], width=0.4, color = 'slategrey')
plt.legend(('Budget per Genre','Profit per Genre'))
plt.title("Budget vs Profit per Genre")
ax.set_xticks(position + 0.4 / 2)
ax.set_xticklabels(df['Genre'])
## Profit per Release Month
profit_month = tmdb_movies_filtered.map(lambda x: (x[4].month, (x[6], 1))).\
reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).\
map(lambda x: (x[0], round(x[1][0]/x[1][1],2)))
profit_month_top = profit_month.top(12, lambda x: x[1])
print('Months sorted based on Average Profit:', profit_month_top)
x = [i[0] for i in profit_month_top]
y = [i[1] for i in profit_month_top]
fig = plt.figure(figsize = (15, 5))
plt.bar(x, y, color ='sandybrown', width = 0.5)
plt.xlabel("Month")
plt.ylabel("Profit")
plt.title("Average Profit per Month")
plt.show()
## Linear Regression to Predict Profit (Revenue - Budget):
Prediction is based on Budget, Genre, Popularity, Release Month and Runtime
print("Linear Regression started at: ", datetime.now().strftime("%H:%M:%S"))
#Since Genre is categorical, StringIndexer will be used to map each category to a corresponding
#numerical value
#To use StringIndexer, we will need to conver to DF first
linear_profitDF = tmdb_movies_filtered.map(lambda x: (x[6], x[1], x[2], x[3], x[4].month, x[7])).\
toDF(['label', 'Budget', 'Genre', 'Population',
'ReleaseMonth', 'Runtime'])
linear_profitDF = linear_profitDF.select('label', 'Budget', explode('Genre'), 'Population',
'ReleaseMonth', 'Runtime')
#Create StringIndexer instance
genreIndexer = StringIndexer(inputCol="col", outputCol="Genre")
#Fit the indexer and transform to generate the new DF
linear_profitDF = genreIndexer.fit(linear_profitDF).transform(linear_profitDF).drop('col')
#Use VectorAssembler to assemble all features into one column
vecAssembler = VectorAssembler().setInputCols(['Budget', 'Genre', 'Population', 'ReleaseMonth',
'Runtime']).setOutputCol('features')
linear_profitDF = vecAssembler.transform(linear_profitDF).select('label', 'features')
#Split data into train and test (40/60)
linearTrainDF, linearTestDF = linear_profitDF.randomSplit([0.4, 0.6], seed=123)
#Create the model instance
lr = LinearRegression(featuresCol='features', labelCol='label', predictionCol='prediction')
#We use a ParamGridBuilder to construct a grid of parameters to search over.
#(This is from https://spark.apache.org/docs/latest/ml-tuning.html)
paramGrid = ParamGridBuilder()\
.addGrid(lr.regParam, [0.1, 0.01, 0.001, 0.0001]) \
.addGrid(lr.fitIntercept, [False, True])\
.addGrid(lr.elasticNetParam, [0.0, 0.5, 1.0])\
.build()
#CrossValidation with k = 3 folds for hyperparameter tuning: it will try all combinations of values
#and computes the average evaluation metric for the 3 Models produced by fitting the Estimator on
#the 3 different (training, test) dataset pairs
#(This is from https://spark.apache.org/docs/latest/ml-tuning.html)
crossval = CrossValidator(estimator=lr,
estimatorParamMaps=paramGrid,
evaluator=RegressionEvaluator(),
numFolds=3)
#Fitting the training data
model = crossval.fit(linearTrainDF)
#Testing the model
ytest_ypred = model.transform(linearTestDF)
#Evaluating the model using testing result
linearEvaluator = RegressionEvaluator(predictionCol="prediction", labelCol="label", metricName="r2")
print("R Squared (r2): ", round(linearEvaluator.evaluate(ytest_ypred), 2)*100, '%')
#This means the model can explain 48% of variability in Profit
print("Linear Regression ended at: ", datetime.now().strftime("%H:%M:%S"))
## Logistic Regression to Evaluate Movie Success:
- 1 (Successful): If Average Rating is >= 6.0
- 0 (Unsuccessful): If Average Rating is < 6.0
---------
Prediction is based on Budget, Popularity and Runtime
Logistic Regression without using Weights (Unbalanced)
print("Unbalanced Logistic Regression started at: ", datetime.now().strftime("%H:%M:%S"))
#Budget x[1], popularity x[3] and runtime x[7]
logit_success = tmdb_movies_filtered.map(lambda x: (1 if x[8] >= 6 else 0, [x[1], x[3], x[7]]))
#Check if data is balanced/unbalanced
print("Number of 1's vs 0's:",
(logit_success.filter(lambda x: x[0] == 1).count(), logit_success.filter(lambda x: x[0] == 0).count()))
#Split data into train and test (40/60)
logitTrain, logitTest = logit_success.randomSplit([0.4, 0.6], seed=123)
#Load and parse the data
def parsePoint(line):
| return LabeledPoint(line[0], line[1]) | identifier_body |
|
The Movies Database.py | flatMapValues(lambda x: x).\
map(lambda x: (x[1], (x[0], 1))).\
reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).\
map(lambda x: (x[0], round(x[1][0]/x[1][1],2)))
avgRating_genre_top = avgRating_genre.top(20, lambda x: x[1])
print('Genres sorted based on Average Rating:', avgRating_genre_top)
x = [i[0] for i in avgRating_genre_top]
y = [i[1] for i in avgRating_genre_top]
fig = plt.figure(figsize = (15, 5))
plt.bar(x, y, color ='mediumpurple', width = 0.5)
plt.xlabel("Genre")
plt.xticks(rotation=40)
plt.ylabel("Average Rating")
plt.title("Average Rating per Genre")
plt.show()
## Average Profit per Genre
profit_genre = tmdb_movies_filtered.map(lambda x: (x[6], x[2])).flatMapValues(lambda x: x).\
map(lambda x: (x[1], (x[0], 1))).\
reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).\
map(lambda x: (x[0], round(x[1][0]/x[1][1],2)))
profit_genre_top = profit_genre.top(20, lambda x: x[1])
print('Genres sorted based on Average Profit:', profit_genre_top)
x = [i[0] for i in profit_genre_top]
y = [i[1] for i in profit_genre_top]
fig = plt.figure(figsize = (15, 5))
plt.bar(x, y, color ='tan', width = 0.5)
plt.xlabel("Genre")
plt.xticks(rotation=30)
plt.ylabel("Profit")
plt.title("Average Profit per Genre")
plt.show()
## Budget vs Profit per Genre
budget_genre = tmdb_movies_filtered.map(lambda x: (x[1], x[2])).flatMapValues(lambda x: x).\
map(lambda x: (x[1], (x[0], 1))).\
reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).\
map(lambda x: (x[0], round(x[1][0]/x[1][1],2)))
budget_genre = budget_genre.top(20)
profit_genre = tmdb_movies_filtered.map(lambda x: (x[6], x[2])).flatMapValues(lambda x: x).\
map(lambda x: (x[1], (x[0], 1))).\
reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).\
map(lambda x: (x[0], round(x[1][0]/x[1][1],2)))
profit_genre = profit_genre.top(20)
x = [i[0] for i in profit_genre]
y1 = [i[1] for i in budget_genre]
y2 = [i[1] for i in profit_genre]
df = pd.DataFrame({'Genre': x, 'Budget': y1, 'Profit': y2}, index = x).sort_values(
by = 'Profit', ascending=False)
fig, ax = plt.subplots(figsize=(20, 5))
position = np.arange(len(x))
budget = ax.bar(position, df['Budget'], width = 0.4, color = 'lightgreen')
# Same thing, but offset the x by the width of the bar.
profit = ax.bar(position + 0.4, df['Profit'], width=0.4, color = 'slategrey')
plt.legend(('Budget per Genre','Profit per Genre'))
plt.title("Budget vs Profit per Genre")
ax.set_xticks(position + 0.4 / 2)
ax.set_xticklabels(df['Genre'])
## Profit per Release Month
profit_month = tmdb_movies_filtered.map(lambda x: (x[4].month, (x[6], 1))).\
reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).\
map(lambda x: (x[0], round(x[1][0]/x[1][1],2)))
profit_month_top = profit_month.top(12, lambda x: x[1])
print('Months sorted based on Average Profit:', profit_month_top)
x = [i[0] for i in profit_month_top]
y = [i[1] for i in profit_month_top]
fig = plt.figure(figsize = (15, 5))
plt.bar(x, y, color ='sandybrown', width = 0.5)
plt.xlabel("Month")
plt.ylabel("Profit")
plt.title("Average Profit per Month")
plt.show()
## Linear Regression to Predict Profit (Revenue - Budget):
Prediction is based on Budget, Genre, Popularity, Release Month and Runtime
print("Linear Regression started at: ", datetime.now().strftime("%H:%M:%S"))
#Since Genre is categorical, StringIndexer will be used to map each category to a corresponding
#numerical value
#To use StringIndexer, we will need to conver to DF first
linear_profitDF = tmdb_movies_filtered.map(lambda x: (x[6], x[1], x[2], x[3], x[4].month, x[7])).\
toDF(['label', 'Budget', 'Genre', 'Population',
'ReleaseMonth', 'Runtime'])
linear_profitDF = linear_profitDF.select('label', 'Budget', explode('Genre'), 'Population',
'ReleaseMonth', 'Runtime')
#Create StringIndexer instance
genreIndexer = StringIndexer(inputCol="col", outputCol="Genre")
#Fit the indexer and transform to generate the new DF
linear_profitDF = genreIndexer.fit(linear_profitDF).transform(linear_profitDF).drop('col')
#Use VectorAssembler to assemble all features into one column
vecAssembler = VectorAssembler().setInputCols(['Budget', 'Genre', 'Population', 'ReleaseMonth',
'Runtime']).setOutputCol('features')
linear_profitDF = vecAssembler.transform(linear_profitDF).select('label', 'features')
#Split data into train and test (40/60)
linearTrainDF, linearTestDF = linear_profitDF.randomSplit([0.4, 0.6], seed=123)
#Create the model instance
lr = LinearRegression(featuresCol='features', labelCol='label', predictionCol='prediction')
#We use a ParamGridBuilder to construct a grid of parameters to search over.
#(This is from https://spark.apache.org/docs/latest/ml-tuning.html)
paramGrid = ParamGridBuilder()\
.addGrid(lr.regParam, [0.1, 0.01, 0.001, 0.0001]) \
.addGrid(lr.fitIntercept, [False, True])\
.addGrid(lr.elasticNetParam, [0.0, 0.5, 1.0])\
.build()
#CrossValidation with k = 3 folds for hyperparameter tuning: it will try all combinations of values
#and computes the average evaluation metric for the 3 Models produced by fitting the Estimator on
#the 3 different (training, test) dataset pairs
#(This is from https://spark.apache.org/docs/latest/ml-tuning.html)
crossval = CrossValidator(estimator=lr,
estimatorParamMaps=paramGrid,
evaluator=RegressionEvaluator(),
numFolds=3)
#Fitting the training data
model = crossval.fit(linearTrainDF)
#Testing the model
ytest_ypred = model.transform(linearTestDF)
#Evaluating the model using testing result
linearEvaluator = RegressionEvaluator(predictionCol="prediction", labelCol="label", metricName="r2")
print("R Squared (r2): ", round(linearEvaluator.evaluate(ytest_ypred), 2)*100, '%')
#This means the model can explain 48% of variability in Profit
print("Linear Regression ended at: ", datetime.now().strftime("%H:%M:%S"))
## Logistic Regression to Evaluate Movie Success:
- 1 (Successful): If Average Rating is >= 6.0
- 0 (Unsuccessful): If Average Rating is < 6.0
---------
Prediction is based on Budget, Popularity and Runtime
Logistic Regression without using Weights (Unbalanced)
print("Unbalanced Logistic Regression started at: ", datetime.now().strftime("%H:%M:%S"))
#Budget x[1], popularity x[3] and runtime x[7]
logit_success = tmdb_movies_filtered.map(lambda x: (1 if x[8] >= 6 else 0, [x[1], x[3], x[7]]))
#Check if data is balanced/unbalanced
print("Number of 1's vs 0's:",
(logit_success.filter(lambda x: x[0] == 1).count(), logit_success.filter(lambda x: x[0] == 0).count()))
#Split data into train and test (40/60)
logitTrain, logitTest = logit_success.randomSplit([0.4, 0.6], seed=123)
#Load and parse the data
def | parsePoint | identifier_name |
|
network.py |
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(formatter)
logger.addHandler(stdout_handler)
def train_emnist(u_epochs):
#######################################################
################### Network setup #####################
# batch_size - Number of images given to the model at a particular instance
# v_length - Dimension of flattened input image size i.e. if input image size is [28x28], then v_length = 784
# network inputs
|
trainData = trainData.astype("float32")
testData = testData.astype("float32")
trainData /= 255
testData /= 255
logger.debug("[INFO] after re-shape")
# print new shape
logger.debug("[INFO] train data shape: {}".format(trainData.shape))
logger.debug("[INFO] test data shape: {}".format(testData.shape))
logger.debug("[INFO] train samples: {}".format(trainData.shape[0]))
logger.debug("[INFO] test samples: {}".format(testData.shape[0]))
# convert class vectors to binary class matrices --> one-hot encoding
mTrainLabels = np_utils.to_categorical(trainLabels, n_classes)
mTestLabels = np_utils.to_categorical(testLabels, n_classes)
# create the model
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(256))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(n_classes))
model.add(Activation("softmax"))
# summarize the model
model.summary()
# compile
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# fit the model
history = model.fit(trainData, mTrainLabels, validation_data=(testData, mTestLabels), batch_size=batch_size, epochs=epochs, verbose=2)
# print the history keys
logger.debug(history.history.keys())
# evaluate the model
scores = model.evaluate(testData, mTestLabels, verbose=0)
# history plot for accuracy
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("Model Accuracy")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.legend(["train", "test"], loc="upper left")
plt.show()
# history plot for accuracy
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("Model Loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend(["train", "test"], loc="upper left")
plt.show()
# print the results
logger.debug("[INFO] test score - {}".format(scores[0]))
logger.debug("[INFO] test accuracy - {}".format(scores[1]))
#model.save_weights('first_try.h5')
return model
def load_trained_model():
#######################################################
################### Network setup #####################
n_classes = 62
train_size = 697932
test_size = 116323
v_length = 784
# split the emnist data into train and test
trainData, trainLabels = emnist.extract_training_samples('byclass')
testData, testLabels = emnist.extract_test_samples('byclass')
# reshape the dataset
trainData = trainData.reshape(train_size, v_length)
testData = testData.reshape(test_size, v_length)
trainData = trainData.astype("float32")
testData = testData.astype("float32")
trainData /= 255
testData /= 255
# convert class vectors to binary class matrices --> one-hot encoding
mTrainLabels = np_utils.to_categorical(trainLabels, n_classes)
mTestLabels = np_utils.to_categorical(testLabels, n_classes)
# create the model
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(256))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(n_classes))
model.add(Activation("softmax"))
# load weights
model.load_weights('model_weights.h5')
# summarize the model
model.summary()
# compile
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# evaluate the model
scores = model.evaluate(testData, mTestLabels, verbose=0)
return model
def test_emnist(model, n1, n2):
##################### TEST #####################
# EMNIST output infos as numbers, so I created a label dict, so it will output it respective class
label_value = {'0':'0', '1':'1', '2':'2', '3':'3', '4':'4', '5':'5', '6':'6', '7':'7', '8':'8', '9':'9',
'10':'A', '11':'B', '12':'C', '13':'D', '14':'E', '15':'F', '16':'G', '17':'H', '18':'I', '19':'J',
'20':'K', '21':'L', '22':'M', '23':'N', '24':'O', '25':'P', '26':'Q', '27':'R', '28':'S', '29':'T',
'30':'U', '31':'V', '32':'W', '33':'X', '34':'Y', '35':'Z', '36':'a', '37':'b', '38':'c', '39':'d',
'40':'e', '41':'f', '42':'g', '43':'h', '44':'i', '45':'j', '46':'k', '47':'l', '48':'m', '49':'n',
'50':'o', '51':'p', '52':'q', '53':'r', '54':'s', '55':'t', '56':'u', '57':'v', '58':'w', '59':'x',
'60':'y', '61':'z'}
# grab some test images from the test data
a = n1
b = n2
v_length = 784
test_size = 116323
# load train data
testData, testLabels = emnist.extract_test_samples('byclass')
# reshape
testData = testData.reshape(test_size, v_length)
testData = testData.astype("float32")
testData /= 255
test_images = testData[a:b]
# reshape the test images to standard 28x28 format
test_images = test_images.reshape(test_images.shape[0], 28, 28)
# loop over each of the test images
for i, test_image in enumerate(test_images, start=1):
# grab a copy of test image for viewing
org_image = test_image
# reshape the test image to [1x784] format so that our model understands
test_image = test_image.reshape(1,784)
# make prediction on test image using our trained model
prediction = model.predict_classes(test_image, verbose=0)
# display the prediction and image
logger.debug("I think the character is - {}".format(label_value[str(prediction[0])]))
plt.subplot(220+i)
plt.imshow(org_image, cmap=plt.get_cmap('gray'))
logger.debug('Press Q to close')
plt.show()
# params: 1- mlmodel, 2- root path to the prediction imgs, 3- how many imgs we have in imgs_path
def identify_plate(model, imgs_path, test_size):
# EMNIST output infos as numbers, so I created a label dict, so it will output it respective class
label_value = {'0':'0', '1':'1', '2':'2', '3':'3', '4':'4', '5':' | epochs = u_epochs
n_classes = 62
batch_size = 256
train_size = 697932
test_size = 116323
v_length = 784
# split the emnist data into train and test
trainData, trainLabels = emnist.extract_training_samples('byclass')
testData, testLabels = emnist.extract_test_samples('byclass')
# print shapes
logger.debug("[INFO] train data shape: {}".format(trainData.shape))
logger.debug("[INFO] test data shape: {}".format(testData.shape))
logger.debug("[INFO] train samples: {}".format(trainData.shape[0]))
logger.debug("[INFO] test samples: {}".format(testData.shape[0]))
# reshape the dataset
trainData = trainData.reshape(train_size, v_length)
testData = testData.reshape(test_size, v_length) | identifier_body |
network.py |
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(formatter)
logger.addHandler(stdout_handler)
def | (u_epochs):
#######################################################
################### Network setup #####################
# batch_size - Number of images given to the model at a particular instance
# v_length - Dimension of flattened input image size i.e. if input image size is [28x28], then v_length = 784
# network inputs
epochs = u_epochs
n_classes = 62
batch_size = 256
train_size = 697932
test_size = 116323
v_length = 784
# split the emnist data into train and test
trainData, trainLabels = emnist.extract_training_samples('byclass')
testData, testLabels = emnist.extract_test_samples('byclass')
# print shapes
logger.debug("[INFO] train data shape: {}".format(trainData.shape))
logger.debug("[INFO] test data shape: {}".format(testData.shape))
logger.debug("[INFO] train samples: {}".format(trainData.shape[0]))
logger.debug("[INFO] test samples: {}".format(testData.shape[0]))
# reshape the dataset
trainData = trainData.reshape(train_size, v_length)
testData = testData.reshape(test_size, v_length)
trainData = trainData.astype("float32")
testData = testData.astype("float32")
trainData /= 255
testData /= 255
logger.debug("[INFO] after re-shape")
# print new shape
logger.debug("[INFO] train data shape: {}".format(trainData.shape))
logger.debug("[INFO] test data shape: {}".format(testData.shape))
logger.debug("[INFO] train samples: {}".format(trainData.shape[0]))
logger.debug("[INFO] test samples: {}".format(testData.shape[0]))
# convert class vectors to binary class matrices --> one-hot encoding
mTrainLabels = np_utils.to_categorical(trainLabels, n_classes)
mTestLabels = np_utils.to_categorical(testLabels, n_classes)
# create the model
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(256))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(n_classes))
model.add(Activation("softmax"))
# summarize the model
model.summary()
# compile
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# fit the model
history = model.fit(trainData, mTrainLabels, validation_data=(testData, mTestLabels), batch_size=batch_size, epochs=epochs, verbose=2)
# print the history keys
logger.debug(history.history.keys())
# evaluate the model
scores = model.evaluate(testData, mTestLabels, verbose=0)
# history plot for accuracy
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("Model Accuracy")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.legend(["train", "test"], loc="upper left")
plt.show()
# history plot for accuracy
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("Model Loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend(["train", "test"], loc="upper left")
plt.show()
# print the results
logger.debug("[INFO] test score - {}".format(scores[0]))
logger.debug("[INFO] test accuracy - {}".format(scores[1]))
#model.save_weights('first_try.h5')
return model
def load_trained_model():
#######################################################
################### Network setup #####################
n_classes = 62
train_size = 697932
test_size = 116323
v_length = 784
# split the emnist data into train and test
trainData, trainLabels = emnist.extract_training_samples('byclass')
testData, testLabels = emnist.extract_test_samples('byclass')
# reshape the dataset
trainData = trainData.reshape(train_size, v_length)
testData = testData.reshape(test_size, v_length)
trainData = trainData.astype("float32")
testData = testData.astype("float32")
trainData /= 255
testData /= 255
# convert class vectors to binary class matrices --> one-hot encoding
mTrainLabels = np_utils.to_categorical(trainLabels, n_classes)
mTestLabels = np_utils.to_categorical(testLabels, n_classes)
# create the model
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(256))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(n_classes))
model.add(Activation("softmax"))
# load weights
model.load_weights('model_weights.h5')
# summarize the model
model.summary()
# compile
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# evaluate the model
scores = model.evaluate(testData, mTestLabels, verbose=0)
return model
def test_emnist(model, n1, n2):
##################### TEST #####################
# EMNIST output infos as numbers, so I created a label dict, so it will output it respective class
label_value = {'0':'0', '1':'1', '2':'2', '3':'3', '4':'4', '5':'5', '6':'6', '7':'7', '8':'8', '9':'9',
'10':'A', '11':'B', '12':'C', '13':'D', '14':'E', '15':'F', '16':'G', '17':'H', '18':'I', '19':'J',
'20':'K', '21':'L', '22':'M', '23':'N', '24':'O', '25':'P', '26':'Q', '27':'R', '28':'S', '29':'T',
'30':'U', '31':'V', '32':'W', '33':'X', '34':'Y', '35':'Z', '36':'a', '37':'b', '38':'c', '39':'d',
'40':'e', '41':'f', '42':'g', '43':'h', '44':'i', '45':'j', '46':'k', '47':'l', '48':'m', '49':'n',
'50':'o', '51':'p', '52':'q', '53':'r', '54':'s', '55':'t', '56':'u', '57':'v', '58':'w', '59':'x',
'60':'y', '61':'z'}
# grab some test images from the test data
a = n1
b = n2
v_length = 784
test_size = 116323
# load train data
testData, testLabels = emnist.extract_test_samples('byclass')
# reshape
testData = testData.reshape(test_size, v_length)
testData = testData.astype("float32")
testData /= 255
test_images = testData[a:b]
# reshape the test images to standard 28x28 format
test_images = test_images.reshape(test_images.shape[0], 28, 28)
# loop over each of the test images
for i, test_image in enumerate(test_images, start=1):
# grab a copy of test image for viewing
org_image = test_image
# reshape the test image to [1x784] format so that our model understands
test_image = test_image.reshape(1,784)
# make prediction on test image using our trained model
prediction = model.predict_classes(test_image, verbose=0)
# display the prediction and image
logger.debug("I think the character is - {}".format(label_value[str(prediction[0])]))
plt.subplot(220+i)
plt.imshow(org_image, cmap=plt.get_cmap('gray'))
logger.debug('Press Q to close')
plt.show()
# params: 1- mlmodel, 2- root path to the prediction imgs, 3- how many imgs we have in imgs_path
def identify_plate(model, imgs_path, test_size):
# EMNIST output infos as numbers, so I created a label dict, so it will output it respective class
label_value = {'0':'0', '1':'1', '2':'2', '3':'3', '4':'4', '5':' | train_emnist | identifier_name |
network.py | verbose=2)
# print the history keys
logger.debug(history.history.keys())
# evaluate the model
scores = model.evaluate(testData, mTestLabels, verbose=0)
# history plot for accuracy
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("Model Accuracy")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.legend(["train", "test"], loc="upper left")
plt.show()
# history plot for accuracy
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("Model Loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend(["train", "test"], loc="upper left")
plt.show()
# print the results
logger.debug("[INFO] test score - {}".format(scores[0]))
logger.debug("[INFO] test accuracy - {}".format(scores[1]))
#model.save_weights('first_try.h5')
return model
def load_trained_model():
#######################################################
################### Network setup #####################
n_classes = 62
train_size = 697932
test_size = 116323
v_length = 784
# split the emnist data into train and test
trainData, trainLabels = emnist.extract_training_samples('byclass')
testData, testLabels = emnist.extract_test_samples('byclass')
# reshape the dataset
trainData = trainData.reshape(train_size, v_length)
testData = testData.reshape(test_size, v_length)
trainData = trainData.astype("float32")
testData = testData.astype("float32")
trainData /= 255
testData /= 255
# convert class vectors to binary class matrices --> one-hot encoding
mTrainLabels = np_utils.to_categorical(trainLabels, n_classes)
mTestLabels = np_utils.to_categorical(testLabels, n_classes)
# create the model
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(256))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(n_classes))
model.add(Activation("softmax"))
# load weights
model.load_weights('model_weights.h5')
# summarize the model
model.summary()
# compile
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# evaluate the model
scores = model.evaluate(testData, mTestLabels, verbose=0)
return model
def test_emnist(model, n1, n2):
##################### TEST #####################
# EMNIST output infos as numbers, so I created a label dict, so it will output it respective class
label_value = {'0':'0', '1':'1', '2':'2', '3':'3', '4':'4', '5':'5', '6':'6', '7':'7', '8':'8', '9':'9',
'10':'A', '11':'B', '12':'C', '13':'D', '14':'E', '15':'F', '16':'G', '17':'H', '18':'I', '19':'J',
'20':'K', '21':'L', '22':'M', '23':'N', '24':'O', '25':'P', '26':'Q', '27':'R', '28':'S', '29':'T',
'30':'U', '31':'V', '32':'W', '33':'X', '34':'Y', '35':'Z', '36':'a', '37':'b', '38':'c', '39':'d',
'40':'e', '41':'f', '42':'g', '43':'h', '44':'i', '45':'j', '46':'k', '47':'l', '48':'m', '49':'n',
'50':'o', '51':'p', '52':'q', '53':'r', '54':'s', '55':'t', '56':'u', '57':'v', '58':'w', '59':'x',
'60':'y', '61':'z'}
# grab some test images from the test data
a = n1
b = n2
v_length = 784
test_size = 116323
# load train data
testData, testLabels = emnist.extract_test_samples('byclass')
# reshape
testData = testData.reshape(test_size, v_length)
testData = testData.astype("float32")
testData /= 255
test_images = testData[a:b]
# reshape the test images to standard 28x28 format
test_images = test_images.reshape(test_images.shape[0], 28, 28)
# loop over each of the test images
for i, test_image in enumerate(test_images, start=1):
# grab a copy of test image for viewing
org_image = test_image
# reshape the test image to [1x784] format so that our model understands
test_image = test_image.reshape(1,784)
# make prediction on test image using our trained model
prediction = model.predict_classes(test_image, verbose=0)
# display the prediction and image
logger.debug("I think the character is - {}".format(label_value[str(prediction[0])]))
plt.subplot(220+i)
plt.imshow(org_image, cmap=plt.get_cmap('gray'))
logger.debug('Press Q to close')
plt.show()
# params: 1- mlmodel, 2- root path to the prediction imgs, 3- how many imgs we have in imgs_path
def identify_plate(model, imgs_path, test_size):
# EMNIST output infos as numbers, so I created a label dict, so it will output it respective class
label_value = {'0':'0', '1':'1', '2':'2', '3':'3', '4':'4', '5':'5', '6':'6', '7':'7', '8':'8', '9':'9',
'10':'A', '11':'B', '12':'C', '13':'D', '14':'E', '15':'F', '16':'G', '17':'H', '18':'I', '19':'J',
'20':'K', '21':'L', '22':'M', '23':'N', '24':'O', '25':'P', '26':'Q', '27':'R', '28':'S', '29':'T',
'30':'U', '31':'V', '32':'W', '33':'X', '34':'Y', '35':'Z', '36':'a', '37':'b', '38':'c', '39':'d',
'40':'e', '41':'f', '42':'g', '43':'h', '44':'i', '45':'j', '46':'k', '47':'l', '48':'m', '49':'n',
'50':'o', '51':'p', '52':'q', '53':'r', '54':'s', '55':'t', '56':'u', '57':'v', '58':'w', '59':'x',
'60':'y', '61':'z'}
# 28*28
v_length = 784
# open imgs
testData = []
for img in imgs_path:
testData.append(cv2.imread(img, 0))
# normalize the img data
testData = np.array(testData)
testData = testData.reshape(test_size, v_length)
testData = testData.astype('float32')
testData /= 255
test_images = testData
test_images = test_images.reshape(test_images.shape[0], 28, 28)
#logger.debug(test_images.shape)
# in pos_predict i will store the original img and its respective prediction
plate_predictions, predict_result = [], []
for test_image in test_images:
# grab a copy of test image for viewing
| original_img = test_image
# reshape the test image to [1x784] format so that our model understands
test_image = test_image.reshape(1, 784)
# make prediction on test image using our trained model
prediction = model.predict_classes(test_image, verbose=0)
plate_predictions.append(label_value[str(prediction[0])])
predict_result.append([original_img, label_value[str(prediction[0])]])
#plt.imshow(org_image, cmap=plt.get_cmap('gray')) | conditional_block |
|
network.py |
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(formatter)
logger.addHandler(stdout_handler)
def train_emnist(u_epochs):
#######################################################
################### Network setup #####################
# batch_size - Number of images given to the model at a particular instance
# v_length - Dimension of flattened input image size i.e. if input image size is [28x28], then v_length = 784
# network inputs
epochs = u_epochs
n_classes = 62
batch_size = 256
train_size = 697932
test_size = 116323
v_length = 784
# split the emnist data into train and test
trainData, trainLabels = emnist.extract_training_samples('byclass')
testData, testLabels = emnist.extract_test_samples('byclass')
# print shapes
logger.debug("[INFO] train data shape: {}".format(trainData.shape))
logger.debug("[INFO] test data shape: {}".format(testData.shape))
logger.debug("[INFO] train samples: {}".format(trainData.shape[0]))
logger.debug("[INFO] test samples: {}".format(testData.shape[0]))
# reshape the dataset
trainData = trainData.reshape(train_size, v_length)
testData = testData.reshape(test_size, v_length)
trainData = trainData.astype("float32")
testData = testData.astype("float32")
trainData /= 255
testData /= 255
logger.debug("[INFO] after re-shape")
# print new shape
logger.debug("[INFO] train data shape: {}".format(trainData.shape))
logger.debug("[INFO] test data shape: {}".format(testData.shape))
logger.debug("[INFO] train samples: {}".format(trainData.shape[0]))
logger.debug("[INFO] test samples: {}".format(testData.shape[0]))
# convert class vectors to binary class matrices --> one-hot encoding
mTrainLabels = np_utils.to_categorical(trainLabels, n_classes)
mTestLabels = np_utils.to_categorical(testLabels, n_classes)
# create the model
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(256))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(n_classes))
model.add(Activation("softmax"))
# summarize the model
model.summary()
# compile
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# fit the model
history = model.fit(trainData, mTrainLabels, validation_data=(testData, mTestLabels), batch_size=batch_size, epochs=epochs, verbose=2)
# print the history keys
logger.debug(history.history.keys())
# evaluate the model
scores = model.evaluate(testData, mTestLabels, verbose=0)
# history plot for accuracy
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("Model Accuracy")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.legend(["train", "test"], loc="upper left")
plt.show()
# history plot for accuracy
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("Model Loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend(["train", "test"], loc="upper left")
plt.show()
# print the results
logger.debug("[INFO] test score - {}".format(scores[0]))
logger.debug("[INFO] test accuracy - {}".format(scores[1]))
#model.save_weights('first_try.h5')
return model
def load_trained_model():
#######################################################
################### Network setup #####################
n_classes = 62
train_size = 697932
test_size = 116323
v_length = 784
# split the emnist data into train and test
trainData, trainLabels = emnist.extract_training_samples('byclass')
testData, testLabels = emnist.extract_test_samples('byclass')
# reshape the dataset
trainData = trainData.reshape(train_size, v_length)
testData = testData.reshape(test_size, v_length)
trainData = trainData.astype("float32")
testData = testData.astype("float32")
trainData /= 255
testData /= 255
# convert class vectors to binary class matrices --> one-hot encoding
mTrainLabels = np_utils.to_categorical(trainLabels, n_classes)
mTestLabels = np_utils.to_categorical(testLabels, n_classes)
# create the model
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(256))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(n_classes))
model.add(Activation("softmax"))
# load weights
model.load_weights('model_weights.h5')
# summarize the model
model.summary()
# compile
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# evaluate the model
scores = model.evaluate(testData, mTestLabels, verbose=0)
return model
def test_emnist(model, n1, n2):
##################### TEST #####################
# EMNIST output infos as numbers, so I created a label dict, so it will output it respective class
label_value = {'0':'0', '1':'1', '2':'2', '3':'3', '4':'4', '5':'5', '6':'6', '7':'7', '8':'8', '9':'9',
'10':'A', '11':'B', '12':'C', '13':'D', '14':'E', '15':'F', '16':'G', '17':'H', '18':'I', '19':'J',
'20':'K', '21':'L', '22':'M', '23':'N', '24':'O', '25':'P', '26':'Q', '27':'R', '28':'S', '29':'T',
'30':'U', '31':'V', '32':'W', '33':'X', '34':'Y', '35':'Z', '36':'a', '37':'b', '38':'c', '39':'d',
'40':'e', '41':'f', '42':'g', '43':'h', '44':'i', '45':'j', '46':'k', '47':'l', '48':'m', '49':'n',
'50':'o', '51':'p', '52':'q', '53':'r', '54':'s', '55':'t', '56':'u', '57':'v', '58':'w', '59':'x',
'60':'y', '61':'z'}
# grab some test images from the test data
a = n1
b = n2
v_length = 784
test_size = 116323
# load train data
testData, testLabels = emnist.extract_test_samples('byclass')
# reshape
testData = testData.reshape(test_size, v_length)
testData = testData.astype("float32")
testData /= 255
test_images = testData[a:b]
# reshape the test images to standard 28x28 format
test_images = test_images.reshape(test_images.shape[0], 28, 28)
# loop over each of the test images
for i, test_image in enumerate(test_images, start=1):
# grab a copy of test image for viewing
org_image = test_image
# reshape the test image to [1x784] format so that our model understands
test_image = test_image.reshape(1,784)
# make prediction on test image using our trained model
prediction = model.predict_classes(test_image, verbose=0)
# display the prediction and image
logger.debug("I think the character is - {}".format(label_value[str(prediction[0])])) |
# params: 1- mlmodel, 2- root path to the prediction imgs, 3- how many imgs we have in imgs_path
def identify_plate(model, imgs_path, test_size):
# EMNIST output infos as numbers, so I created a label dict, so it will output it respective class
label_value = {'0':'0', '1':'1', '2':'2', '3':'3', '4':'4', '5':' | plt.subplot(220+i)
plt.imshow(org_image, cmap=plt.get_cmap('gray'))
logger.debug('Press Q to close')
plt.show() | random_line_split |
HOG_SVM_FaceDetection.py | detector was trained. If you are building a webcam or selfie application that uses face detection, you can significantly improve speed by resizing the image to the appropriate size.
#
# ## <font style = "color:rgb(50,120,229)">Classifying a patch</font>
#
# From the previous subsection, we know many patches of the image at many locations and scale are evaluated to check if there is an object inside the patch.
#
# The evaluation is done using the classifier that is able to classify a patch into object vs. background. When an SVM is used as a classifier, the two classes are labeled as -1 (background) and 1 (object). When the response for the SVM classifier is greater than 0, the patch has a greater probability of belonging to the object class. In practice, if the SVM classifier response is greater than a threshold (called the **hitTreshold** in OpenCV) we say it is an object. A high hitTreshold would result in fewer detected objects.
#
# ## <font style = "color:rgb(50,120,229)">Grouping Rectangles ( non-maximum Suppression )</font>
#
# As mentioned before, the detector may detect multiple rectangles for the same object. Grouping of these rectangles is referred to **non-maximum suppression**.
#
# We know from the previous section, each bounding box has an associated SVM response. These responses are often called **weights.** The greater the weights, the more likely it is that the box contains an object.
#
# The goal of non-maximum suppression is to come up with a smaller list of rectangles and a weight associated with every rectangle in the final list.
#
# There are many different ways of doing non-maximum suppression. In OpenCV, the following steps are employed by the HOG::groupRectangles method internally.
#
#
#
# 1. **Clustering**: Rectangles are clustered based on overlap between rectangles. The idea is to represent an entire cluster with just one rectangle.
#
# 1. **Pruning:** If the number of rectangles in a cluster are less than a threshold, the cluster is removed.
#
# 2. **Cluster average:** The average of all rectangles in every cluster is found. This rectangle is added to the final list.
#
# 3. **Pick maximum SVM response:** The SVM response (weight) associated with this average rectangle is the maximum of the weights of all rectangles in the cluster.
#
# 2. **Filtering smaller rectangles:** The final list is further pruned by removing smaller rectangles inside larger ones if the number of rectangles in the original cluster was less than 3.
#
# <font style="color:rgb(255,0,0)">**Note:**</font>From the above description, we can see that not all rectangles completely contained inside another rectangle are moved by OpenCV’s non-maximum suppression.
# # <font style = "color:rgb(50,120,229)">HOG + SVM based Object Detection using OpenCV</font>
#
# In this section, we will learn the methods of the HOGDescriptor class that are useful of using a trained HOG classifier as a detector.
# In[148]:
import os
import glob
import cv2
import numpy as np
import matplotlib.pyplot as plt
from dataPath import DATA_PATH
get_ipython().run_line_magic('matplotlib', 'inline')
# In[149]:
import matplotlib
matplotlib.rcParams['figure.figsize'] = (10.0,10.0)
matplotlib.rcParams['image.cmap'] = 'gray'
# ## <font style = "color:rgb(50,120,229)">How to train HOG + SVM Pedestrian Detector</font>
#
# In this section we will learn how to train a HOG + SVM based object detector. We will train a People Detector using INRIA’s Pedestrian dataset. The folder contains two sub-folders -
# 1. train_64x128_H96 - contains the cropped images of pedestrians and resized to 64x128 ( Positive Examples )
# 1. test_64x128_H96 - contains cropped images which do not contain pedestrians. ( Negative Examples )
#
# The training data can also be downloaded from **[this link](https://www.filepicker.io/api/file/VQKdmzKqSLWruVhx7Xdd)**. Please unzip the file INRIAPerson.zip and keep it in the `../data/images/` folder before running the code if it is not already present.
# ## <font style="color:rgb(50,120,229)">Data Handling utilities</font>
# Functions to read images from a given directory and return their corresponding labels (+1 or -1)
# In[150]:
# returns image paths in given folder
# with extensions as defined in imgExts
def getImagePaths(folder, imgExts):
imagePaths = []
for x in os.listdir(folder):
xPath = os.path.join(folder, x)
if os.path.splitext(xPath)[1] in imgExts:
imagePaths.append(xPath)
return imagePaths
#change image sizes to match
width = 128
height = 128
dim = (width, height)
# read images in a folder
# return list of images and labels
def getD | der, classLabel):
#change image sizes to match
width = 128
height = 128
dim = (width, height)
images = []
labels = []
imagePaths = getImagePaths(folder, ['.jpg', '.png', '.jpeg'])
for imagePath in imagePaths:
# print(imagePath)
im = cv2.imread(imagePath, cv2.IMREAD_COLOR)
resized = cv2.resize(im, dim, interpolation = cv2.INTER_AREA)
images.append(resized)
labels.append(classLabel)
return images, labels
# ## <font style="color:rgb(50,120,229)">Setup SVM Classifier</font>
# Wrappers around SVM functions to initialize SVM, train on data and labels, predict labels and evaluate model’s error rate on test data
# In[151]:
# Initialize SVM with parameters
def svmInit(C, gamma):
model = cv2.ml.SVM_create()
model.setGamma(gamma)
model.setC(C)
model.setKernel(cv2.ml.SVM_LINEAR)
model.setType(cv2.ml.SVM_C_SVC)
model.setTermCriteria((cv2.TERM_CRITERIA_EPS +
cv2.TERM_CRITERIA_MAX_ITER,
1000, 1e-3))
return model
# Train SVM on data and labels
def svmTrain(model, samples, labels):
model.train(samples, cv2.ml.ROW_SAMPLE, labels)
# predict labels for given samples
def svmPredict(model, samples):
return model.predict(samples)[1]
# evaluate a model by comparing
# predicted labels and ground truth
def svmEvaluate(model, samples, labels):
labels = labels[:, np.newaxis]
pred = model.predict(samples)[1]
correct = np.sum((labels == pred))
err = (labels != pred).mean()
print('label -- 1:{}, -1:{}'.format(np.sum(pred == 1),
np.sum(pred == -1)))
return correct, err * 100
# create a directory if it doesn't exist
def createDir(folder):
try:
os.makedirs(folder)
except OSError:
print('{}: already exists'.format(folder))
except Exception as e:
print(e)
# ## <font style="color:rgb(50,120,229)">Setup HoG Feature Detector</font>
# Functions to compute HOG descriptors for a set of images and convert HOG descriptor to data format used by SVM
#
# In[152]:
# compute HOG features for given images
def computeHOG(hog, images):
hogFeatures = []
for image in images:
hogFeature = hog.compute(image)
hogFeatures.append(hogFeature)
return hogFeatures
# Convert HOG descriptors to format recognized by SVM
def prepareData(hogFeatures):
featureVectorLength = len(hogFeatures[0])
data = np.float32(hogFeatures).reshape(-1, featureVectorLength)
return data
# Initialize HOG with parameters
#
# In[153]:
# Initialize HOG parameters
winSize = (128, 128)
blockSize = (16, 16)
blockStride = (8, 8)
cellSize = (8, 8)
nbins = 9
derivAperture = 1
winSigma = -1
histogramNormType = 0
L2HysThreshold = 0.2
gammaCorrection = True
nlevels = 64
signedGradient = False
# Initialize HOG
hog = cv2.HOGDescriptor(winSize, blockSize, blockStride,
cellSize, nbins,derivAperture,
winSigma, histogramNormType, L2HysThreshold,
gammaCorrection, nlevels,signedGradient)
# ## <font style="color:rgb(50,120 | ataset(fol | identifier_name |
HOG_SVM_FaceDetection.py | this is a good idea.
# In[158]:
# ================================ Query Model =============================================
# Run object detector on a query image to find pedestrians
# We will load the model again and test the model
# This is just to explain how to load an SVM model
# You can use the model directly
if queryModel:
# load model
model = cv2.ml.SVM_load(DATA_PATH + 'models/face.yml')
# extract support vector and rho(bias) from model
sv = model.getSupportVectors()
rho, aplha, svidx = model.getDecisionFunction(0)
svmDetector = np.zeros(sv.shape[1] + 1, dtype=sv.dtype)
svmDetector[:-1] = -sv[:]
svmDetector[-1] = rho
# set our SVMDetector in HOG
hog.setSVMDetector(svmDetector)
filename = DATA_PATH + "images/hillary_clinton.jpg"
queryImage = cv2.imread(filename, cv2.IMREAD_COLOR)
# We will run pedestrian detector at an fixed height image
finalHeight = 800.0
scale = finalHeight / queryImage.shape[0]
queryImage = cv2.resize(queryImage, None, fx=scale, fy=scale)
# detectMultiScale will detect at nlevels of image by scaling up
# and scaling down resized image by scale of 1.05
bboxes, weights = hog.detectMultiScale(queryImage, winStride=(8, 8),
padding=(32, 32), scale=1.05,
finalThreshold=2, hitThreshold=1.0)
# draw detected bounding boxes over image
for bbox in bboxes:
x1, y1, w, h = bbox
x2, y2 = x1 + w, y1 + h
cv2.rectangle(queryImage, (x1, y1), (x2, y2),
(0, 255, 0), thickness=3, lineType=cv2.LINE_AA)
plt.imshow(queryImage[:,:,::-1])
plt.show()
# ## <font style = "color:rgb(50,120,229)">Comparison of our trained model with OpenCV's Pedestrian Detector</font>
#
# We have so far tested our model on 64x128 image patches. Now we will run our model on some real life images. We will also compare our results with OpenCV’s default People Detector. OpenCV’s people detector is also trained on INRIA dataset. We will draw Green rectangles for results from our model and red boxes for results from OpenCV’s people detector.
# In[159]:
# Initialize HOG
# Initialize HOG parameters
winSize = (64, 128)
blockSize = (16, 16)
blockStride = (8, 8)
cellSize = (8, 8)
nbins = 9
derivAperture = 1
winSigma = -1
histogramNormType = 0
L2HysThreshold = 0.2
gammaCorrection = True
nlevels = 64
signedGradient = False
# Initialize HOG
hog = cv2.HOGDescriptor(winSize, blockSize, blockStride,
cellSize, nbins, derivAperture,
winSigma, histogramNormType, L2HysThreshold,
gammaCorrection, nlevels, signedGradient)
# Load model, extract support vectors and decision function (rho) from this model. Now create a SVM detector for HOG by appending rho at the end of support vectors. Now set this SVM detector in HOG.
#
#
# In[160]:
# Load model trained by us
model = cv2.ml.SVM_load(DATA_PATH + 'models/pedestrian.yml')
sv = model.getSupportVectors()
rho, aplha, svidx = model.getDecisionFunction(0)
svmDetectorTrained = np.zeros(sv.shape[1] + 1, dtype=sv.dtype)
svmDetectorTrained[:-1] = -sv[:]
svmDetectorTrained[-1] = rho
# set SVMDetector trained by us in HOG
hog.setSVMDetector(svmDetectorTrained)
# Initialize HOG and set OpenCV’s default People Detector as SVM detector in HOG.
#
#
# In[161]:
# OpenCV's HOG based Pedestrian Detector
hogDefault = cv2.HOGDescriptor(winSize, blockSize, blockStride,
cellSize, nbins, derivAperture,
winSigma, histogramNormType,
L2HysThreshold,gammaCorrection,
nlevels, signedGradient)
svmDetectorDefault = cv2.HOGDescriptor_getDefaultPeopleDetector()
hogDefault.setSVMDetector(svmDetectorDefault)
# Read images for pedestrians directory which has some real world images. We will run detection on images with height = 800
#
#
# In[162]:
# read images from pedestrians directory
imageDir = DATA_PATH + 'images/pedestrians'
imagePaths = glob.glob(os.path.join(imageDir, '*.jpg'))
# We will run pedestrian detector at an fixed height image
finalHeight = 800.0
for imagePath in imagePaths:
print('processing: {}'.format(imagePath))
# read image
im = cv2.imread(imagePath, cv2.IMREAD_COLOR)
# resize image to height finalHeight
scale = finalHeight / im.shape[0]
im = cv2.resize(im, None, fx=scale, fy=scale)
# Detect people using trained and default SVM detectors
# detectMultiScale using detector trained by us
bboxes, weights = hog.detectMultiScale(im, winStride=(8, 8),
padding=(32, 32),scale=1.05,
finalThreshold=2,hitThreshold=1.0)
# detectMultiScale using default detector
bboxes2, weights2 = hogDefault.detectMultiScale(im, winStride=(8, 8),
padding=(32, 32),scale=1.05,
finalThreshold=2,hitThreshold=0)
# Draw found rectangles on image. We will draw
# green boxes for people detected by trained model and
# red boxes for people detected by OpenCV’s default model.
# print pedestrians detected
if len(bboxes) > 0:
print('Trained Detector :: pedestrians detected: {}'
.format(bboxes.shape[0]))
if len(bboxes2) > 0:
print('Default Detector :: pedestrians detected: {}'
.format(bboxes2.shape[0]))
# Draw detected bouunding boxes over image
# Red = default detector, Green = Trained Detector
for bbox in bboxes:
x1, y1, w, h = bbox
x2, y2 = x1 + w, y1 + h
cv2.rectangle(im, (x1, y1), (x2, y2),
(0, 255, 0), thickness=3,
lineType=cv2.LINE_AA)
for bbox in bboxes2:
x1, y1, w, h = bbox
x2, y2 = x1 + w, y1 + h
cv2.rectangle(im, (x1, y1), (x2, y2),
(0, 0, 255), thickness=3,
lineType=cv2.LINE_AA)
# Finally show the result and also write it to disk.
# Show final result
plt.imshow(im[:,:,::-1])
# Write image to disk
imResultPath = os.path.join('results', os.path.basename(imagePath))
cv2.imwrite(imResultPath, im)
plt.show()
# ### <font style = "color:rgb(50,120,229)">Hard Negative Mining</font>
#
# You may have noticed that our model produces slightly more False Positives. It is because we haven’t done negative mining. For negative mining you just have to run this detector on negative images ( You can download some images which do not contain persons).
#
# The model is giving false positives because it is finding it difficult to predict these patches correctly. So, we want to train it on some difficult examples. This is also called Hard Negative mining.
#
# When you run the detector on full negative images, write all those patches ( bbox ) which are predicted as positive (i.e. predicted label = 1) into a directory (let’s call it falsePosPatches). Make sure that you resize this patch to 64x128 before writing to falsePosPatches directory.
#
# Now train the model again this time with images in posPatches as positive images and images in negPatches + falsePosPatches as negative images.
# # <font style = "color:rgb(50,120,229)">References and Further Reading</font> | #
# 1. [https://lear.inrialpes.fr/people/triggs/pubs/Dalal-cvpr05.pdf](https://lear.inrialpes.fr/people/triggs/pubs/Dalal-cvpr05.pdf)
#
# 3. [https://en.wikipedia.org/wiki/Support_vector_machine](https://en.wikipedia.org/wiki/Support_vector_machine) | random_line_split |
|
HOG_SVM_FaceDetection.py | object detector was trained. If you are building a webcam or selfie application that uses face detection, you can significantly improve speed by resizing the image to the appropriate size.
#
# ## <font style = "color:rgb(50,120,229)">Classifying a patch</font>
#
# From the previous subsection, we know many patches of the image at many locations and scale are evaluated to check if there is an object inside the patch.
#
# The evaluation is done using the classifier that is able to classify a patch into object vs. background. When an SVM is used as a classifier, the two classes are labeled as -1 (background) and 1 (object). When the response for the SVM classifier is greater than 0, the patch has a greater probability of belonging to the object class. In practice, if the SVM classifier response is greater than a threshold (called the **hitTreshold** in OpenCV) we say it is an object. A high hitTreshold would result in fewer detected objects.
#
# ## <font style = "color:rgb(50,120,229)">Grouping Rectangles ( non-maximum Suppression )</font>
#
# As mentioned before, the detector may detect multiple rectangles for the same object. Grouping of these rectangles is referred to **non-maximum suppression**.
#
# We know from the previous section, each bounding box has an associated SVM response. These responses are often called **weights.** The greater the weights, the more likely it is that the box contains an object.
#
# The goal of non-maximum suppression is to come up with a smaller list of rectangles and a weight associated with every rectangle in the final list.
#
# There are many different ways of doing non-maximum suppression. In OpenCV, the following steps are employed by the HOG::groupRectangles method internally.
#
#
#
# 1. **Clustering**: Rectangles are clustered based on overlap between rectangles. The idea is to represent an entire cluster with just one rectangle.
#
# 1. **Pruning:** If the number of rectangles in a cluster are less than a threshold, the cluster is removed.
#
# 2. **Cluster average:** The average of all rectangles in every cluster is found. This rectangle is added to the final list.
#
# 3. **Pick maximum SVM response:** The SVM response (weight) associated with this average rectangle is the maximum of the weights of all rectangles in the cluster.
#
# 2. **Filtering smaller rectangles:** The final list is further pruned by removing smaller rectangles inside larger ones if the number of rectangles in the original cluster was less than 3.
#
# <font style="color:rgb(255,0,0)">**Note:**</font>From the above description, we can see that not all rectangles completely contained inside another rectangle are moved by OpenCV’s non-maximum suppression.
# # <font style = "color:rgb(50,120,229)">HOG + SVM based Object Detection using OpenCV</font>
#
# In this section, we will learn the methods of the HOGDescriptor class that are useful of using a trained HOG classifier as a detector.
# In[148]:
import os
import glob
import cv2
import numpy as np
import matplotlib.pyplot as plt
from dataPath import DATA_PATH
get_ipython().run_line_magic('matplotlib', 'inline')
# In[149]:
import matplotlib
matplotlib.rcParams['figure.figsize'] = (10.0,10.0)
matplotlib.rcParams['image.cmap'] = 'gray'
# ## <font style = "color:rgb(50,120,229)">How to train HOG + SVM Pedestrian Detector</font>
#
# In this section we will learn how to train a HOG + SVM based object detector. We will train a People Detector using INRIA’s Pedestrian dataset. The folder contains two sub-folders -
# 1. train_64x128_H96 - contains the cropped images of pedestrians and resized to 64x128 ( Positive Examples )
# 1. test_64x128_H96 - contains cropped images which do not contain pedestrians. ( Negative Examples )
#
# The training data can also be downloaded from **[this link](https://www.filepicker.io/api/file/VQKdmzKqSLWruVhx7Xdd)**. Please unzip the file INRIAPerson.zip and keep it in the `../data/images/` folder before running the code if it is not already present.
# ## <font style="color:rgb(50,120,229)">Data Handling utilities</font>
# Functions to read images from a given directory and return their corresponding labels (+1 or -1)
# In[150]:
# returns image paths in given folder
# with extensions as defined in imgExts
def getImagePaths(folder, imgExts):
imagePaths = []
for x in os.listdir(folder):
xPat | eturn imagePaths
#change image sizes to match
width = 128
height = 128
dim = (width, height)
# read images in a folder
# return list of images and labels
def getDataset(folder, classLabel):
#change image sizes to match
width = 128
height = 128
dim = (width, height)
images = []
labels = []
imagePaths = getImagePaths(folder, ['.jpg', '.png', '.jpeg'])
for imagePath in imagePaths:
# print(imagePath)
im = cv2.imread(imagePath, cv2.IMREAD_COLOR)
resized = cv2.resize(im, dim, interpolation = cv2.INTER_AREA)
images.append(resized)
labels.append(classLabel)
return images, labels
# ## <font style="color:rgb(50,120,229)">Setup SVM Classifier</font>
# Wrappers around SVM functions to initialize SVM, train on data and labels, predict labels and evaluate model’s error rate on test data
# In[151]:
# Initialize SVM with parameters
def svmInit(C, gamma):
model = cv2.ml.SVM_create()
model.setGamma(gamma)
model.setC(C)
model.setKernel(cv2.ml.SVM_LINEAR)
model.setType(cv2.ml.SVM_C_SVC)
model.setTermCriteria((cv2.TERM_CRITERIA_EPS +
cv2.TERM_CRITERIA_MAX_ITER,
1000, 1e-3))
return model
# Train SVM on data and labels
def svmTrain(model, samples, labels):
model.train(samples, cv2.ml.ROW_SAMPLE, labels)
# predict labels for given samples
def svmPredict(model, samples):
return model.predict(samples)[1]
# evaluate a model by comparing
# predicted labels and ground truth
def svmEvaluate(model, samples, labels):
labels = labels[:, np.newaxis]
pred = model.predict(samples)[1]
correct = np.sum((labels == pred))
err = (labels != pred).mean()
print('label -- 1:{}, -1:{}'.format(np.sum(pred == 1),
np.sum(pred == -1)))
return correct, err * 100
# create a directory if it doesn't exist
def createDir(folder):
try:
os.makedirs(folder)
except OSError:
print('{}: already exists'.format(folder))
except Exception as e:
print(e)
# ## <font style="color:rgb(50,120,229)">Setup HoG Feature Detector</font>
# Functions to compute HOG descriptors for a set of images and convert HOG descriptor to data format used by SVM
#
# In[152]:
# compute HOG features for given images
def computeHOG(hog, images):
hogFeatures = []
for image in images:
hogFeature = hog.compute(image)
hogFeatures.append(hogFeature)
return hogFeatures
# Convert HOG descriptors to format recognized by SVM
def prepareData(hogFeatures):
featureVectorLength = len(hogFeatures[0])
data = np.float32(hogFeatures).reshape(-1, featureVectorLength)
return data
# Initialize HOG with parameters
#
# In[153]:
# Initialize HOG parameters
winSize = (128, 128)
blockSize = (16, 16)
blockStride = (8, 8)
cellSize = (8, 8)
nbins = 9
derivAperture = 1
winSigma = -1
histogramNormType = 0
L2HysThreshold = 0.2
gammaCorrection = True
nlevels = 64
signedGradient = False
# Initialize HOG
hog = cv2.HOGDescriptor(winSize, blockSize, blockStride,
cellSize, nbins,derivAperture,
winSigma, histogramNormType, L2HysThreshold,
gammaCorrection, nlevels,signedGradient)
# ## <font style="color:rgb(50,120 | h = os.path.join(folder, x)
if os.path.splitext(xPath)[1] in imgExts:
imagePaths.append(xPath)
r | conditional_block |
HOG_SVM_FaceDetection.py | object detector was trained. If you are building a webcam or selfie application that uses face detection, you can significantly improve speed by resizing the image to the appropriate size.
#
# ## <font style = "color:rgb(50,120,229)">Classifying a patch</font>
#
# From the previous subsection, we know many patches of the image at many locations and scale are evaluated to check if there is an object inside the patch.
#
# The evaluation is done using the classifier that is able to classify a patch into object vs. background. When an SVM is used as a classifier, the two classes are labeled as -1 (background) and 1 (object). When the response for the SVM classifier is greater than 0, the patch has a greater probability of belonging to the object class. In practice, if the SVM classifier response is greater than a threshold (called the **hitTreshold** in OpenCV) we say it is an object. A high hitTreshold would result in fewer detected objects.
#
# ## <font style = "color:rgb(50,120,229)">Grouping Rectangles ( non-maximum Suppression )</font>
#
# As mentioned before, the detector may detect multiple rectangles for the same object. Grouping of these rectangles is referred to **non-maximum suppression**.
#
# We know from the previous section, each bounding box has an associated SVM response. These responses are often called **weights.** The greater the weights, the more likely it is that the box contains an object.
#
# The goal of non-maximum suppression is to come up with a smaller list of rectangles and a weight associated with every rectangle in the final list.
#
# There are many different ways of doing non-maximum suppression. In OpenCV, the following steps are employed by the HOG::groupRectangles method internally.
#
#
#
# 1. **Clustering**: Rectangles are clustered based on overlap between rectangles. The idea is to represent an entire cluster with just one rectangle.
#
# 1. **Pruning:** If the number of rectangles in a cluster are less than a threshold, the cluster is removed.
#
# 2. **Cluster average:** The average of all rectangles in every cluster is found. This rectangle is added to the final list.
#
# 3. **Pick maximum SVM response:** The SVM response (weight) associated with this average rectangle is the maximum of the weights of all rectangles in the cluster.
#
# 2. **Filtering smaller rectangles:** The final list is further pruned by removing smaller rectangles inside larger ones if the number of rectangles in the original cluster was less than 3.
#
# <font style="color:rgb(255,0,0)">**Note:**</font>From the above description, we can see that not all rectangles completely contained inside another rectangle are moved by OpenCV’s non-maximum suppression.
# # <font style = "color:rgb(50,120,229)">HOG + SVM based Object Detection using OpenCV</font>
#
# In this section, we will learn the methods of the HOGDescriptor class that are useful of using a trained HOG classifier as a detector.
# In[148]:
import os
import glob
import cv2
import numpy as np
import matplotlib.pyplot as plt
from dataPath import DATA_PATH
get_ipython().run_line_magic('matplotlib', 'inline')
# In[149]:
import matplotlib
matplotlib.rcParams['figure.figsize'] = (10.0,10.0)
matplotlib.rcParams['image.cmap'] = 'gray'
# ## <font style = "color:rgb(50,120,229)">How to train HOG + SVM Pedestrian Detector</font>
#
# In this section we will learn how to train a HOG + SVM based object detector. We will train a People Detector using INRIA’s Pedestrian dataset. The folder contains two sub-folders -
# 1. train_64x128_H96 - contains the cropped images of pedestrians and resized to 64x128 ( Positive Examples )
# 1. test_64x128_H96 - contains cropped images which do not contain pedestrians. ( Negative Examples )
#
# The training data can also be downloaded from **[this link](https://www.filepicker.io/api/file/VQKdmzKqSLWruVhx7Xdd)**. Please unzip the file INRIAPerson.zip and keep it in the `../data/images/` folder before running the code if it is not already present.
# ## <font style="color:rgb(50,120,229)">Data Handling utilities</font>
# Functions to read images from a given directory and return their corresponding labels (+1 or -1)
# In[150]:
# returns image paths in given folder
# with extensions as defined in imgExts
def getImagePaths(folder, imgExts):
imagePaths = []
for x in os.listdir(folder):
xPath = os.path.join(folder, x)
if os.path.splitext(xPath)[1] in imgExts:
imagePaths.append(xPath)
return imagePaths
#change image sizes to match
width = 128
height = 128
dim = (width, height)
# read images in a folder
# return list of images and labels
def getDataset(folder, classLabel):
#change image sizes to match
width = 128
height = 128
dim = (width, height)
images = []
labels = []
imagePaths = getImagePaths(folder, ['.jpg', '.png', '.jpeg'])
for imagePath in imagePaths:
# print(imagePath)
im = cv2.imread(imagePath, cv2.IMREAD_COLOR)
resized = cv2.resize(im, dim, interpolation = cv2.INTER_AREA)
images.append(resized)
labels.append(classLabel)
return images, labels
# ## <font style="color:rgb(50,120,229)">Setup SVM Classifier</font>
# Wrappers around SVM functions to initialize SVM, train on data and labels, predict labels and evaluate model’s error rate on test data
# In[151]:
# Initialize SVM with parameters
def svmInit(C, gamma):
model = cv2.ml.SVM_create()
model.setGamma(gamma)
model.setC(C)
model.setKernel(cv2.ml.SVM_LINEAR)
model.setType(cv2.ml.SVM_C_SVC)
model.setTermCriteria((cv2.TERM_CRITERIA_EPS +
cv2.TERM_CRITERIA_MAX_ITER,
1000, 1e-3))
return model
# Train SVM on data and labels
def svmTrain(model, samples, labels):
model. | redict labels for given samples
def svmPredict(model, samples):
return model.predict(samples)[1]
# evaluate a model by comparing
# predicted labels and ground truth
def svmEvaluate(model, samples, labels):
labels = labels[:, np.newaxis]
pred = model.predict(samples)[1]
correct = np.sum((labels == pred))
err = (labels != pred).mean()
print('label -- 1:{}, -1:{}'.format(np.sum(pred == 1),
np.sum(pred == -1)))
return correct, err * 100
# create a directory if it doesn't exist
def createDir(folder):
try:
os.makedirs(folder)
except OSError:
print('{}: already exists'.format(folder))
except Exception as e:
print(e)
# ## <font style="color:rgb(50,120,229)">Setup HoG Feature Detector</font>
# Functions to compute HOG descriptors for a set of images and convert HOG descriptor to data format used by SVM
#
# In[152]:
# compute HOG features for given images
def computeHOG(hog, images):
hogFeatures = []
for image in images:
hogFeature = hog.compute(image)
hogFeatures.append(hogFeature)
return hogFeatures
# Convert HOG descriptors to format recognized by SVM
def prepareData(hogFeatures):
featureVectorLength = len(hogFeatures[0])
data = np.float32(hogFeatures).reshape(-1, featureVectorLength)
return data
# Initialize HOG with parameters
#
# In[153]:
# Initialize HOG parameters
winSize = (128, 128)
blockSize = (16, 16)
blockStride = (8, 8)
cellSize = (8, 8)
nbins = 9
derivAperture = 1
winSigma = -1
histogramNormType = 0
L2HysThreshold = 0.2
gammaCorrection = True
nlevels = 64
signedGradient = False
# Initialize HOG
hog = cv2.HOGDescriptor(winSize, blockSize, blockStride,
cellSize, nbins,derivAperture,
winSigma, histogramNormType, L2HysThreshold,
gammaCorrection, nlevels,signedGradient)
# ## <font style="color:rgb(50,120 | train(samples, cv2.ml.ROW_SAMPLE, labels)
# p | identifier_body |
cli.py | publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
-----------------------------
This file is part of Zap AppImage Package Manager
"""
import json
import os
import shutil
import sys
import click
import urllib.parse
from .utils import is_valid_url
from zap.config.config import ConfigManager
from zap.execute.execute import Execute
from progressbar import progressbar
from . import __version__
from . import __doc__ as lic
from .zap import Zap, parse_gh_url
from .utils import format_colors as fc
def show_version(ctx, param, value):
"""Prints the version of the utility"""
if not value or ctx.resilient_parsing:
return
click.echo('Zap AppImage utility')
click.echo('version: {}'.format(__version__))
ctx.exit()
def show_license(ctx, param, value):
"""Prints the license of the utility"""
if not value or ctx.resilient_parsing:
return
click.echo(lic) | expose_value=False, is_eager=True)
@click.option('--license', '--lic', is_flag=True, callback=show_license,
expose_value=False, is_eager=True)
def cli():
""" 🗲 Zap: A command line interface to install appimages"""
pass
@cli.command('install')
@click.argument('appname')
@click.option('-d', '--select-default',
'select_default', default=False, is_flag=True,
help="Always select first option while installing.")
@click.option('-e', '--executable',
'executable', default=False,
help="Name of the executable, (default: appname)")
@click.option('-f', '--force/--no-force',
'force_refresh', default=False,
help="Force install the app without checking.")
@click.option('--from',
'from_url', default=False,
help="Install a specific appimage from a URL (url should be "
"downloadable by wget and should end with .AppImage)")
def install(appname, **kwargs):
"""Installs an appimage"""
z = Zap(appname)
z.install(**kwargs)
@cli.command()
@click.argument('appname')
def remove(appname):
"""Removes an appimage"""
z = Zap(appname)
z.remove()
@cli.command()
@click.option('-i', '--interactive/--no-interactive',
'interactive', default=False,
help="Interactively edit the configuration")
def config(interactive=False):
"""Shows the config or allows to configure the configuration"""
cfg = ConfigManager()
if interactive:
cfg.setup_config_interactive()
print(cfg)
@cli.command()
@click.argument('appname')
def appdata(appname):
"""Shows the config of an app"""
z = Zap(appname)
z.appdata(stdout=True)
@cli.command()
@click.argument('appname')
@click.option('-a', '--appimageupdate/--no-appimageupdate',
'use_appimageupdate', default=True,
help="Use AppImageupdate tool to update apps.")
def update(appname, use_appimageupdate=True):
"""Updates an appimage using appimageupdate tool"""
z = Zap(appname)
z.update(use_appimageupdate=use_appimageupdate)
@cli.command()
@click.argument('appname')
@click.option('-a', '--appimageupdate/--no-appimageupdate',
'use_appimageupdate', default=True,
help="Use AppImageupdate tool to update apps.")
def check_for_updates(appname, use_appimageupdate=True):
"""Updates an appimage using appimageupdate tool"""
z = Zap(appname)
z.check_for_updates(use_appimageupdate=use_appimageupdate)
@cli.command()
@click.argument('appname')
def show(appname):
"""Get the url to the app and open it in your web browser ($BROWSER)"""
z = Zap(appname)
z.show()
@cli.command()
def upgrade():
"""Upgrade all appimages using AppImageUpdate"""
config = ConfigManager()
apps = config['apps']
for i, app in progressbar(enumerate(apps), redirect_stdout=True):
z = Zap(app)
if i == 0:
z.update(show_spinner=False)
else:
z.update(check_appimage_update=False, show_spinner=False)
@cli.command()
@click.argument('url')
def xdg(url):
"""Parse xdg url"""
from .gui.xdg import gtk_zap_downloader
p_url = urllib.parse.urlparse(url)
query = urllib.parse.parse_qs(p_url.query)
appname = query.get('app')[0]
tag = query.get('tag')[0]
asset_id = query.get('id')[0]
print(appname, tag, asset_id, type(tag))
z = Zap(appname)
if p_url.netloc == 'install':
print(tag, asset_id)
z.install(tag_name=tag,
download_file_in_tag=asset_id,
downloader=gtk_zap_downloader, always_proceed=True)
elif p_url.netloc == 'remove':
z.remove()
else:
print("Invalid url")
@cli.command()
@click.argument('appname')
def get_md5(appname):
"""Get md5 of an appimage"""
z = Zap(appname)
z.get_md5()
@cli.command()
@click.argument('appname')
def is_integrated(appname):
"""Checks if appimage is integrated with the desktop"""
z = Zap(appname)
z.is_integrated()
@cli.command('list')
def ls():
"""Lists all the appimages"""
cfgmgr = ConfigManager()
apps = cfgmgr['apps']
for i in apps:
print(fc("- {g}{appname}{rst}", appname=i))
@cli.command()
@click.argument('appname')
def integrate(appname):
"""Integrate an installed appimage to the desktop"""
z = Zap(appname)
z.integrate()
@cli.command()
@click.argument('url')
@click.option('-d', '--select-default',
'select_default', default=False, is_flag=True,
help="Always select first option while installing.")
@click.option('-e', '--executable',
'executable', default=False,
help="Name of the executable, (default: last part of url)")
@click.option('-f', '--force/--no-force',
'force_refresh', default=False,
help="Force install the app without checking.")
def install_gh(url, executable, **kwargs):
"""Installs an appimage from GitHub repository URL (caution)"""
# https://stackoverflow.com/q/7160737/
is_valid = is_valid_url(url)
if not is_valid:
print(fc("{r}Error:{rst} Invalid URL"))
sys.exit(1)
cb_data = json.loads(json.dumps(parse_gh_url(url)))
if executable:
appname = executable
else:
appname = url.split('/')[-1]
z = Zap(appname)
z.install(executable=executable, cb_data=cb_data,
additional_data={'url': url, 'executable': executable},
**kwargs)
@cli.command()
def disintegrate():
"""Remove zap and optionally remove all the appimages installed with zap"""
click.confirm('Do you really want to uninstall?', abort=True)
if click.confirm('Do you want to remove installed AppImages?'):
cfgmgr = ConfigManager()
if os.path.exists(cfgmgr['bin']):
print(fc("{y}Removing bin for appimages{rst}"))
shutil.rmtree(cfgmgr['bin'], ignore_errors=True)
if os.path.exists(cfgmgr['storageDirectory']):
print(fc("{y}Removing storageDirectory for appimages{rst}"))
shutil.rmtree(cfgmgr['storageDirectory'], ignore_errors=True)
print(fc("{y}Removing zap binary entrypoint{rst}"))
for path in os.getenv('PATH').split(os.pathsep):
zap_bin = os.path.join(path, 'zap')
if os.path.exists(zap_bin):
os.remove(zap_bin)
break
print(fc("{y}Removing zap AppImage {rst}"))
dot_zap = os.path.join(os.path.expanduser('~'), '.zap')
if os.path.exists(dot_zap):
shutil.rmtree(dot_zap, ignore_errors=True)
@cli.command()
@click.argument('appname')
@click.option('-F', '--firejail',
'firejail', default=False,
help="Sandbox the app with firejail")
def x(appname, firejail=False):
"""Execute a Zap installed app (optionally with sandboxing / firejail)"""
z = Zap(appname)
if not z.is_installed:
print("{} is not installed yet.".format(appname))
return
path_to_appimage = z.appdata().get('path')
Execute(path_to_appimage, use_firejail=firejail)
print("Done!" | ctx.exit()
@click.group()
@click.option('--version', is_flag=True, callback=show_version, | random_line_split |
cli.py | publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
-----------------------------
This file is part of Zap AppImage Package Manager
"""
import json
import os
import shutil
import sys
import click
import urllib.parse
from .utils import is_valid_url
from zap.config.config import ConfigManager
from zap.execute.execute import Execute
from progressbar import progressbar
from . import __version__
from . import __doc__ as lic
from .zap import Zap, parse_gh_url
from .utils import format_colors as fc
def show_version(ctx, param, value):
"""Prints the version of the utility"""
if not value or ctx.resilient_parsing:
return
click.echo('Zap AppImage utility')
click.echo('version: {}'.format(__version__))
ctx.exit()
def show_license(ctx, param, value):
"""Prints the license of the utility"""
if not value or ctx.resilient_parsing:
return
click.echo(lic)
ctx.exit()
@click.group()
@click.option('--version', is_flag=True, callback=show_version,
expose_value=False, is_eager=True)
@click.option('--license', '--lic', is_flag=True, callback=show_license,
expose_value=False, is_eager=True)
def cli():
""" 🗲 Zap: A command line interface to install appimages"""
pass
@cli.command('install')
@click.argument('appname')
@click.option('-d', '--select-default',
'select_default', default=False, is_flag=True,
help="Always select first option while installing.")
@click.option('-e', '--executable',
'executable', default=False,
help="Name of the executable, (default: appname)")
@click.option('-f', '--force/--no-force',
'force_refresh', default=False,
help="Force install the app without checking.")
@click.option('--from',
'from_url', default=False,
help="Install a specific appimage from a URL (url should be "
"downloadable by wget and should end with .AppImage)")
def install(appname, **kwargs):
"""Installs an appimage"""
z = Zap(appname)
z.install(**kwargs)
@cli.command()
@click.argument('appname')
def remove(appname):
"""Removes an appimage"""
z = Zap(appname)
z.remove()
@cli.command()
@click.option('-i', '--interactive/--no-interactive',
'interactive', default=False,
help="Interactively edit the configuration")
def config(interactive=False):
"""Shows the config or allows to configure the configuration"""
cfg = ConfigManager()
if interactive:
cfg.setup_config_interactive()
print(cfg)
@cli.command()
@click.argument('appname')
def appdata(appname):
"""Shows the config of an app"""
z = Zap(appname)
z.appdata(stdout=True)
@cli.command()
@click.argument('appname')
@click.option('-a', '--appimageupdate/--no-appimageupdate',
'use_appimageupdate', default=True,
help="Use AppImageupdate tool to update apps.")
def update(appname, use_appimageupdate=True):
"""Updates an appimage using appimageupdate tool"""
z = Zap(appname)
z.update(use_appimageupdate=use_appimageupdate)
@cli.command()
@click.argument('appname')
@click.option('-a', '--appimageupdate/--no-appimageupdate',
'use_appimageupdate', default=True,
help="Use AppImageupdate tool to update apps.")
def check_for_updates(appname, use_appimageupdate=True):
"""Updates an appimage using appimageupdate tool"""
z = Zap(appname)
z.check_for_updates(use_appimageupdate=use_appimageupdate)
@cli.command()
@click.argument('appname')
def show(appname):
"""Get the url to the app and open it in your web browser ($BROWSER)"""
z = Zap(appname)
z.show()
@cli.command()
def upgrade():
"""Upgrade all appimages using AppImageUpdate"""
config = ConfigManager()
apps = config['apps']
for i, app in progressbar(enumerate(apps), redirect_stdout=True):
z = Zap(app)
if i == 0:
z.update(show_spinner=False)
else:
z.update(check_appimage_update=False, show_spinner=False)
@cli.command()
@click.argument('url')
def xdg(url):
"""Parse xdg url"""
from .gui.xdg import gtk_zap_downloader
p_url = urllib.parse.urlparse(url)
query = urllib.parse.parse_qs(p_url.query)
appname = query.get('app')[0]
tag = query.get('tag')[0]
asset_id = query.get('id')[0]
print(appname, tag, asset_id, type(tag))
z = Zap(appname)
if p_url.netloc == 'install':
pri | elif p_url.netloc == 'remove':
z.remove()
else:
print("Invalid url")
@cli.command()
@click.argument('appname')
def get_md5(appname):
"""Get md5 of an appimage"""
z = Zap(appname)
z.get_md5()
@cli.command()
@click.argument('appname')
def is_integrated(appname):
"""Checks if appimage is integrated with the desktop"""
z = Zap(appname)
z.is_integrated()
@cli.command('list')
def ls():
"""Lists all the appimages"""
cfgmgr = ConfigManager()
apps = cfgmgr['apps']
for i in apps:
print(fc("- {g}{appname}{rst}", appname=i))
@cli.command()
@click.argument('appname')
def integrate(appname):
"""Integrate an installed appimage to the desktop"""
z = Zap(appname)
z.integrate()
@cli.command()
@click.argument('url')
@click.option('-d', '--select-default',
'select_default', default=False, is_flag=True,
help="Always select first option while installing.")
@click.option('-e', '--executable',
'executable', default=False,
help="Name of the executable, (default: last part of url)")
@click.option('-f', '--force/--no-force',
'force_refresh', default=False,
help="Force install the app without checking.")
def install_gh(url, executable, **kwargs):
"""Installs an appimage from GitHub repository URL (caution)"""
# https://stackoverflow.com/q/7160737/
is_valid = is_valid_url(url)
if not is_valid:
print(fc("{r}Error:{rst} Invalid URL"))
sys.exit(1)
cb_data = json.loads(json.dumps(parse_gh_url(url)))
if executable:
appname = executable
else:
appname = url.split('/')[-1]
z = Zap(appname)
z.install(executable=executable, cb_data=cb_data,
additional_data={'url': url, 'executable': executable},
**kwargs)
@cli.command()
def disintegrate():
"""Remove zap and optionally remove all the appimages installed with zap"""
click.confirm('Do you really want to uninstall?', abort=True)
if click.confirm('Do you want to remove installed AppImages?'):
cfgmgr = ConfigManager()
if os.path.exists(cfgmgr['bin']):
print(fc("{y}Removing bin for appimages{rst}"))
shutil.rmtree(cfgmgr['bin'], ignore_errors=True)
if os.path.exists(cfgmgr['storageDirectory']):
print(fc("{y}Removing storageDirectory for appimages{rst}"))
shutil.rmtree(cfgmgr['storageDirectory'], ignore_errors=True)
print(fc("{y}Removing zap binary entrypoint{rst}"))
for path in os.getenv('PATH').split(os.pathsep):
zap_bin = os.path.join(path, 'zap')
if os.path.exists(zap_bin):
os.remove(zap_bin)
break
print(fc("{y}Removing zap AppImage {rst}"))
dot_zap = os.path.join(os.path.expanduser('~'), '.zap')
if os.path.exists(dot_zap):
shutil.rmtree(dot_zap, ignore_errors=True)
@cli.command()
@click.argument('appname')
@click.option('-F', '--firejail',
'firejail', default=False,
help="Sandbox the app with firejail")
def x(appname, firejail=False):
"""Execute a Zap installed app (optionally with sandboxing / firejail)"""
z = Zap(appname)
if not z.is_installed:
print("{} is not installed yet.".format(appname))
return
path_to_appimage = z.appdata().get('path')
Execute(path_to_appimage, use_firejail=firejail)
print(" | nt(tag, asset_id)
z.install(tag_name=tag,
download_file_in_tag=asset_id,
downloader=gtk_zap_downloader, always_proceed=True)
| conditional_block |
cli.py | publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
-----------------------------
This file is part of Zap AppImage Package Manager
"""
import json
import os
import shutil
import sys
import click
import urllib.parse
from .utils import is_valid_url
from zap.config.config import ConfigManager
from zap.execute.execute import Execute
from progressbar import progressbar
from . import __version__
from . import __doc__ as lic
from .zap import Zap, parse_gh_url
from .utils import format_colors as fc
def show_version(ctx, param, value):
"""Prints the version of the utility"""
if not value or ctx.resilient_parsing:
return
click.echo('Zap AppImage utility')
click.echo('version: {}'.format(__version__))
ctx.exit()
def show_license(ctx, param, value):
"""Prints the license of the utility"""
if not value or ctx.resilient_parsing:
return
click.echo(lic)
ctx.exit()
@click.group()
@click.option('--version', is_flag=True, callback=show_version,
expose_value=False, is_eager=True)
@click.option('--license', '--lic', is_flag=True, callback=show_license,
expose_value=False, is_eager=True)
def cli():
""" 🗲 Zap: A command line interface to install appimages"""
pass
@cli.command('install')
@click.argument('appname')
@click.option('-d', '--select-default',
'select_default', default=False, is_flag=True,
help="Always select first option while installing.")
@click.option('-e', '--executable',
'executable', default=False,
help="Name of the executable, (default: appname)")
@click.option('-f', '--force/--no-force',
'force_refresh', default=False,
help="Force install the app without checking.")
@click.option('--from',
'from_url', default=False,
help="Install a specific appimage from a URL (url should be "
"downloadable by wget and should end with .AppImage)")
def install(appname, **kwargs):
"""Installs an appimage"""
z = Zap(appname)
z.install(**kwargs)
@cli.command()
@click.argument('appname')
def remove(appname):
"""Removes an appimage"""
z = Zap(appname)
z.remove()
@cli.command()
@click.option('-i', '--interactive/--no-interactive',
'interactive', default=False,
help="Interactively edit the configuration")
def config(interactive=False):
"""Shows the config or allows to configure the configuration"""
cfg = ConfigManager()
if interactive:
cfg.setup_config_interactive()
print(cfg)
@cli.command()
@click.argument('appname')
def appdata(appname):
"""Shows the config of an app"""
z = Zap(appname)
z.appdata(stdout=True)
@cli.command()
@click.argument('appname')
@click.option('-a', '--appimageupdate/--no-appimageupdate',
'use_appimageupdate', default=True,
help="Use AppImageupdate tool to update apps.")
def update(appname, use_appimageupdate=True):
"""Updates an appimage using appimageupdate tool"""
z = Zap(appname)
z.update(use_appimageupdate=use_appimageupdate)
@cli.command()
@click.argument('appname')
@click.option('-a', '--appimageupdate/--no-appimageupdate',
'use_appimageupdate', default=True,
help="Use AppImageupdate tool to update apps.")
def check_for_updates(appname, use_appimageupdate=True):
"""Updates an appimage using appimageupdate tool"""
z = Zap(appname)
z.check_for_updates(use_appimageupdate=use_appimageupdate)
@cli.command()
@click.argument('appname')
def show(appname):
"""Get the url to the app and open it in your web browser ($BROWSER)"""
z = Zap(appname)
z.show()
@cli.command()
def upg |
"""Upgrade all appimages using AppImageUpdate"""
config = ConfigManager()
apps = config['apps']
for i, app in progressbar(enumerate(apps), redirect_stdout=True):
z = Zap(app)
if i == 0:
z.update(show_spinner=False)
else:
z.update(check_appimage_update=False, show_spinner=False)
@cli.command()
@click.argument('url')
def xdg(url):
"""Parse xdg url"""
from .gui.xdg import gtk_zap_downloader
p_url = urllib.parse.urlparse(url)
query = urllib.parse.parse_qs(p_url.query)
appname = query.get('app')[0]
tag = query.get('tag')[0]
asset_id = query.get('id')[0]
print(appname, tag, asset_id, type(tag))
z = Zap(appname)
if p_url.netloc == 'install':
print(tag, asset_id)
z.install(tag_name=tag,
download_file_in_tag=asset_id,
downloader=gtk_zap_downloader, always_proceed=True)
elif p_url.netloc == 'remove':
z.remove()
else:
print("Invalid url")
@cli.command()
@click.argument('appname')
def get_md5(appname):
"""Get md5 of an appimage"""
z = Zap(appname)
z.get_md5()
@cli.command()
@click.argument('appname')
def is_integrated(appname):
"""Checks if appimage is integrated with the desktop"""
z = Zap(appname)
z.is_integrated()
@cli.command('list')
def ls():
"""Lists all the appimages"""
cfgmgr = ConfigManager()
apps = cfgmgr['apps']
for i in apps:
print(fc("- {g}{appname}{rst}", appname=i))
@cli.command()
@click.argument('appname')
def integrate(appname):
"""Integrate an installed appimage to the desktop"""
z = Zap(appname)
z.integrate()
@cli.command()
@click.argument('url')
@click.option('-d', '--select-default',
'select_default', default=False, is_flag=True,
help="Always select first option while installing.")
@click.option('-e', '--executable',
'executable', default=False,
help="Name of the executable, (default: last part of url)")
@click.option('-f', '--force/--no-force',
'force_refresh', default=False,
help="Force install the app without checking.")
def install_gh(url, executable, **kwargs):
"""Installs an appimage from GitHub repository URL (caution)"""
# https://stackoverflow.com/q/7160737/
is_valid = is_valid_url(url)
if not is_valid:
print(fc("{r}Error:{rst} Invalid URL"))
sys.exit(1)
cb_data = json.loads(json.dumps(parse_gh_url(url)))
if executable:
appname = executable
else:
appname = url.split('/')[-1]
z = Zap(appname)
z.install(executable=executable, cb_data=cb_data,
additional_data={'url': url, 'executable': executable},
**kwargs)
@cli.command()
def disintegrate():
"""Remove zap and optionally remove all the appimages installed with zap"""
click.confirm('Do you really want to uninstall?', abort=True)
if click.confirm('Do you want to remove installed AppImages?'):
cfgmgr = ConfigManager()
if os.path.exists(cfgmgr['bin']):
print(fc("{y}Removing bin for appimages{rst}"))
shutil.rmtree(cfgmgr['bin'], ignore_errors=True)
if os.path.exists(cfgmgr['storageDirectory']):
print(fc("{y}Removing storageDirectory for appimages{rst}"))
shutil.rmtree(cfgmgr['storageDirectory'], ignore_errors=True)
print(fc("{y}Removing zap binary entrypoint{rst}"))
for path in os.getenv('PATH').split(os.pathsep):
zap_bin = os.path.join(path, 'zap')
if os.path.exists(zap_bin):
os.remove(zap_bin)
break
print(fc("{y}Removing zap AppImage {rst}"))
dot_zap = os.path.join(os.path.expanduser('~'), '.zap')
if os.path.exists(dot_zap):
shutil.rmtree(dot_zap, ignore_errors=True)
@cli.command()
@click.argument('appname')
@click.option('-F', '--firejail',
'firejail', default=False,
help="Sandbox the app with firejail")
def x(appname, firejail=False):
"""Execute a Zap installed app (optionally with sandboxing / firejail)"""
z = Zap(appname)
if not z.is_installed:
print("{} is not installed yet.".format(appname))
return
path_to_appimage = z.appdata().get('path')
Execute(path_to_appimage, use_firejail=firejail)
print | rade(): | identifier_name |
cli.py | publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
-----------------------------
This file is part of Zap AppImage Package Manager
"""
import json
import os
import shutil
import sys
import click
import urllib.parse
from .utils import is_valid_url
from zap.config.config import ConfigManager
from zap.execute.execute import Execute
from progressbar import progressbar
from . import __version__
from . import __doc__ as lic
from .zap import Zap, parse_gh_url
from .utils import format_colors as fc
def show_version(ctx, param, value):
"""Prints the version of the utility"""
if not value or ctx.resilient_parsing:
return
click.echo('Zap AppImage utility')
click.echo('version: {}'.format(__version__))
ctx.exit()
def show_license(ctx, param, value):
"""Prints the license of the utility"""
if not value or ctx.resilient_parsing:
return
click.echo(lic)
ctx.exit()
@click.group()
@click.option('--version', is_flag=True, callback=show_version,
expose_value=False, is_eager=True)
@click.option('--license', '--lic', is_flag=True, callback=show_license,
expose_value=False, is_eager=True)
def cli():
""" 🗲 Zap: A command line interface to install appimages"""
pass
@cli.command('install')
@click.argument('appname')
@click.option('-d', '--select-default',
'select_default', default=False, is_flag=True,
help="Always select first option while installing.")
@click.option('-e', '--executable',
'executable', default=False,
help="Name of the executable, (default: appname)")
@click.option('-f', '--force/--no-force',
'force_refresh', default=False,
help="Force install the app without checking.")
@click.option('--from',
'from_url', default=False,
help="Install a specific appimage from a URL (url should be "
"downloadable by wget and should end with .AppImage)")
def install(appname, **kwargs):
"""Installs an appimage"""
z = Zap(appname)
z.install(**kwargs)
@cli.command()
@click.argument('appname')
def remove(appname):
"""Removes an appimage"""
z = Zap(appname)
z.remove()
@cli.command()
@click.option('-i', '--interactive/--no-interactive',
'interactive', default=False,
help="Interactively edit the configuration")
def config(interactive=False):
"""Shows the config or allows to configure the configuration"""
cfg = ConfigManager()
if interactive:
cfg.setup_config_interactive()
print(cfg)
@cli.command()
@click.argument('appname')
def appdata(appname):
"""Shows the config of an app"""
z = Zap(appname)
z.appdata(stdout=True)
@cli.command()
@click.argument('appname')
@click.option('-a', '--appimageupdate/--no-appimageupdate',
'use_appimageupdate', default=True,
help="Use AppImageupdate tool to update apps.")
def update(appname, use_appimageupdate=True):
"""Updates an appimage using appimageupdate tool"""
z = Zap(appname)
z.update(use_appimageupdate=use_appimageupdate)
@cli.command()
@click.argument('appname')
@click.option('-a', '--appimageupdate/--no-appimageupdate',
'use_appimageupdate', default=True,
help="Use AppImageupdate tool to update apps.")
def check_for_updates(appname, use_appimageupdate=True):
"""Updates an appimage using appimageupdate tool"""
z = Zap(appname)
z.check_for_updates(use_appimageupdate=use_appimageupdate)
@cli.command()
@click.argument('appname')
def show(appname):
"""Get the url to the app and open it in your web browser ($BROWSER)"""
z = Zap(appname)
z.show()
@cli.command()
def upgrade():
"""Upgrade all appimages using AppImageUpdate"""
config = ConfigManager()
apps = config['apps']
for i, app in progressbar(enumerate(apps), redirect_stdout=True):
z = Zap(app)
if i == 0:
z.update(show_spinner=False)
else:
z.update(check_appimage_update=False, show_spinner=False)
@cli.command()
@click.argument('url')
def xdg(url):
"""Parse xdg url"""
from .gui.xdg import gtk_zap_downloader
p_url = urllib.parse.urlparse(url)
query = urllib.parse.parse_qs(p_url.query)
appname = query.get('app')[0]
tag = query.get('tag')[0]
asset_id = query.get('id')[0]
print(appname, tag, asset_id, type(tag))
z = Zap(appname)
if p_url.netloc == 'install':
print(tag, asset_id)
z.install(tag_name=tag,
download_file_in_tag=asset_id,
downloader=gtk_zap_downloader, always_proceed=True)
elif p_url.netloc == 'remove':
z.remove()
else:
print("Invalid url")
@cli.command()
@click.argument('appname')
def get_md5(appname):
""" | @cli.command()
@click.argument('appname')
def is_integrated(appname):
"""Checks if appimage is integrated with the desktop"""
z = Zap(appname)
z.is_integrated()
@cli.command('list')
def ls():
"""Lists all the appimages"""
cfgmgr = ConfigManager()
apps = cfgmgr['apps']
for i in apps:
print(fc("- {g}{appname}{rst}", appname=i))
@cli.command()
@click.argument('appname')
def integrate(appname):
"""Integrate an installed appimage to the desktop"""
z = Zap(appname)
z.integrate()
@cli.command()
@click.argument('url')
@click.option('-d', '--select-default',
'select_default', default=False, is_flag=True,
help="Always select first option while installing.")
@click.option('-e', '--executable',
'executable', default=False,
help="Name of the executable, (default: last part of url)")
@click.option('-f', '--force/--no-force',
'force_refresh', default=False,
help="Force install the app without checking.")
def install_gh(url, executable, **kwargs):
"""Installs an appimage from GitHub repository URL (caution)"""
# https://stackoverflow.com/q/7160737/
is_valid = is_valid_url(url)
if not is_valid:
print(fc("{r}Error:{rst} Invalid URL"))
sys.exit(1)
cb_data = json.loads(json.dumps(parse_gh_url(url)))
if executable:
appname = executable
else:
appname = url.split('/')[-1]
z = Zap(appname)
z.install(executable=executable, cb_data=cb_data,
additional_data={'url': url, 'executable': executable},
**kwargs)
@cli.command()
def disintegrate():
"""Remove zap and optionally remove all the appimages installed with zap"""
click.confirm('Do you really want to uninstall?', abort=True)
if click.confirm('Do you want to remove installed AppImages?'):
cfgmgr = ConfigManager()
if os.path.exists(cfgmgr['bin']):
print(fc("{y}Removing bin for appimages{rst}"))
shutil.rmtree(cfgmgr['bin'], ignore_errors=True)
if os.path.exists(cfgmgr['storageDirectory']):
print(fc("{y}Removing storageDirectory for appimages{rst}"))
shutil.rmtree(cfgmgr['storageDirectory'], ignore_errors=True)
print(fc("{y}Removing zap binary entrypoint{rst}"))
for path in os.getenv('PATH').split(os.pathsep):
zap_bin = os.path.join(path, 'zap')
if os.path.exists(zap_bin):
os.remove(zap_bin)
break
print(fc("{y}Removing zap AppImage {rst}"))
dot_zap = os.path.join(os.path.expanduser('~'), '.zap')
if os.path.exists(dot_zap):
shutil.rmtree(dot_zap, ignore_errors=True)
@cli.command()
@click.argument('appname')
@click.option('-F', '--firejail',
'firejail', default=False,
help="Sandbox the app with firejail")
def x(appname, firejail=False):
"""Execute a Zap installed app (optionally with sandboxing / firejail)"""
z = Zap(appname)
if not z.is_installed:
print("{} is not installed yet.".format(appname))
return
path_to_appimage = z.appdata().get('path')
Execute(path_to_appimage, use_firejail=firejail)
print("Done!" | Get md5 of an appimage"""
z = Zap(appname)
z.get_md5()
| identifier_body |
cmpH5Sort.py | keys() for x in alnGroups]
uPulseDatasets = reduce(lambda x,y: set.union(set(x), set(y)), pulseDatasets)
if (not all(map(lambda x : set(x) == uPulseDatasets, pulseDatasets))):
log.error("All alignment groups need to have the same datasets.")
raise Exception("Can only repack cmp.h5 files with consistent datasets across alignment groups.")
readGroupPaths = dict(zip(cH5[format.ALN_GROUP_ID], [ x for x in cH5[format.ALN_GROUP_PATH]]))
refGroupPaths = dict(zip(cH5[format.REF_GROUP_ID], [ x for x in cH5[format.REF_GROUP_PATH]]))
uPDAndType = dict(zip(uPulseDatasets, [ cH5[readGroupPaths.values()[0]][z].dtype for z in uPulseDatasets ]))
def getDataset(read, ds):
return(cH5[readGroupPaths[read[format.ALN_ID]]][ds])
def getRefGroup(gID):
return(cH5[refGroupPaths[gID]])
offsets = cH5[format.REF_OFFSET_TABLE].value
sAI = cH5[format.ALN_INDEX]
orderedRefPaths = [""] * offsets.shape[0]
for row in xrange(0, offsets.shape[0]):
log.msg("Processing reference group: %d of %d" % (row + 1, offsets.shape[0]))
orderedRefPaths[row] = "/".join([getRefGroup(offsets[row, 0]).name, SORTED])
fRow = int(offsets[row, 1])
lRow = int(offsets[row, 2])
## Don't really have to do anything if there are no references
## which aligned.
if (lRow == fRow):
continue
## Make a new Group.
newGroup = getRefGroup(offsets[row, 0]).create_group(SORTED)
log.msg("Created new read group: %s" % SORTED)
## Go through each read and write it into the new vector.
reads = sAI[fRow:lRow, ]
totalSizes = reads[:, format.OFFSET_END] - reads[:, format.OFFSET_BEGIN]
for pulseDataset in uPulseDatasets:
log.msg("Processing dataset: %s" % pulseDataset)
newDS = array([0]*sum(1 + totalSizes), dtype = uPDAndType[pulseDataset])
currentStart = 0
for readIdx in xrange(0, reads.shape[0]):
read = reads[readIdx, ]
gStart, gEnd = currentStart, currentStart + totalSizes[readIdx]
newDS[gStart:gEnd] = getDataset(read, pulseDataset)[read[format.OFFSET_BEGIN]:read[format.OFFSET_END]]
currentStart = gEnd + 1
newGroup.create_dataset(pulseDataset, data = newDS, dtype = uPDAndType[pulseDataset], maxshape = None)
## After we've moved all of the data we can move the offsets.
currentStart = 0
for i in xrange(0, reads.shape[0]):
reads[i, format.OFFSET_BEGIN] = currentStart
reads[i, format.OFFSET_END] = currentStart + totalSizes[i]
reads[i, format.ALN_ID] = row
currentStart = reads[i, format.OFFSET_END] + 1
sAI[fRow:lRow,] = reads
## Now remake the AlnGroup Dataset.
log.msg("Writing new AlnGroupPath values.")
del(cH5[format.ALN_GROUP_PATH])
del(cH5[format.ALN_GROUP_ID])
cH5.create_dataset(format.ALN_GROUP_PATH, data = orderedRefPaths,
dtype = H5.new_vlen(str), maxshape = None)
cH5.create_dataset(format.ALN_GROUP_ID, data = range(0, offsets.shape[0]),
dtype = "int32", maxshape = None)
for rg in readGroupPaths.values():
del(cH5[rg])
def sortCmpH5(inFile, outFile, deep, jobs, log):
"""
This routine takes a cmp.h5 file and sorts the AlignmentIndex
table adding two additional columns for fast access. In addition,
a new top-level attribute is added to the indicate that the file
has been sorted, as well as a table to indicate the blocks of the
alignment index associated with each reference group.
"""
success = False;
if (outFile):
log.msg("Copying: " + inFile + " to " + outFile)
shutil.copyfile(inFile, outFile)
inFile = outFile
try:
cH5 = H5.File(inFile, 'a')
format = CmpH5Format(cH5)
log.msg("Read cmp.h5 with version %s" % format.VERSION)
aI = cH5[format.ALN_INDEX]
originalAttrs = aI.attrs.items()
## empty is a special case. In general, h5py handles
## zero-length slices poorly and therefore I don't want to
## make them. Therefore, I maintain the 'empty' variable to
## indicate that. This makes some code less pleasing, e.g.,
## computing the reference index data structure.
if (aI.shape[0] == 0):
log.warn("Warning: %s empty!" % inFile)
success = True;
return True;
# sort the AlignmentIndex
aord = lexsort([aI[:,format.TARGET_END], aI[:,format.TARGET_START],
aI[:,format.REF_ID]])
assert(len(aord) == aI.shape[0])
sAI = aI.value[aord,:]
del(aI)
log.msg("Sorted AlignmentIndex.")
# construct reference offset datastructure.
refSeqIDs = cH5[format.REF_GROUP_ID]
offsets = computeRefIndexTable(refSeqIDs.value, sAI[:,format.REF_ID])
log.msg("Constructed offset datastructure.")
# fill overlap and back columns.
for row in range(0, offsets.shape[0]):
fRow = int(offsets[row, 1])
lRow = int(offsets[row, 2])
if (lRow - fRow <= 0):
continue
sAI[fRow:lRow, (format.N_BACK, format.N_OVERLAP)] = \
computeIndicesDP(sAI[fRow:lRow, format.TARGET_START],
sAI[fRow:lRow, format.TARGET_END])
log.msg("Constructed indices.")
# modify the cmp.h5 file.
# We want to keep the chunking info on the dataset.
del(cH5[format.ALN_INDEX])
cH5.create_dataset(format.ALN_INDEX, data = sAI, dtype = h5t.NATIVE_UINT32,
maxshape = (None, None))
## If the file is already sorted there's no harm in resorting.
if (__pathExists(cH5, format.REF_OFFSET_TABLE)):
log.msg(format.REF_OFFSET_TABLE + " already exists, deleting.")
del(cH5[format.REF_OFFSET_TABLE])
## create the offset datastructure in the file.
cH5.create_dataset(format.REF_OFFSET_TABLE, data = offsets,
dtype = h5t.NATIVE_UINT32, maxshape = (None, None))
## add the index attribute.
cH5['/'].attrs.create("Index", ['REF_ID', 'TARGET_START', 'TARGET_END'])
## fixup attributes.
for oA in originalAttrs:
cH5[format.ALN_INDEX].attrs.create(oA[0], oA[1])
## deep repacking.
if (deep):
log.msg("Repacking alignment arrays.")
__repackDataArrays(cH5, format, log)
## memory free.
del sAI
## manage any extra datasets.
for extraTable in format.extraTables:
if (__pathExists(cH5, extraTable)):
log.msg("Sorting table: %s" % extraTable)
eTable = cH5[extraTable].value
if (len(eTable.shape) == 1):
eTable = eTable[aord]
else:
eTable = eTable[aord,:]
## save attributes, if any for re-writing below.
originalAttrs = cH5[extraTable].attrs.items()
del(cH5[extraTable])
cH5.create_dataset(extraTable, data = eTable,
maxshape = tuple([None for x in eTable.shape]))
for oA in originalAttrs:
cH5[extraTable].attrs.create(oA[0], oA[1])
## if you make it this far, set the flag.
success = True
except Exception, E:
log.error(E)
if (os.path.exists(outFile)):
pass
finally:
try:
cH5.close()
except:
pass
finally:
return(success)
class Loggy:
def __init__(self, level):
|
def write(self, msg, level):
if (self.level >= level): sys.stderr.write(str(msg) + "\n | self.level = level | identifier_body |
cmpH5Sort.py | keys() for x in alnGroups]
uPulseDatasets = reduce(lambda x,y: set.union(set(x), set(y)), pulseDatasets)
if (not all(map(lambda x : set(x) == uPulseDatasets, pulseDatasets))):
log.error("All alignment groups need to have the same datasets.")
raise Exception("Can only repack cmp.h5 files with consistent datasets across alignment groups.")
readGroupPaths = dict(zip(cH5[format.ALN_GROUP_ID], [ x for x in cH5[format.ALN_GROUP_PATH]]))
refGroupPaths = dict(zip(cH5[format.REF_GROUP_ID], [ x for x in cH5[format.REF_GROUP_PATH]]))
uPDAndType = dict(zip(uPulseDatasets, [ cH5[readGroupPaths.values()[0]][z].dtype for z in uPulseDatasets ]))
def getDataset(read, ds):
return(cH5[readGroupPaths[read[format.ALN_ID]]][ds])
def getRefGroup(gID):
return(cH5[refGroupPaths[gID]])
offsets = cH5[format.REF_OFFSET_TABLE].value
sAI = cH5[format.ALN_INDEX]
orderedRefPaths = [""] * offsets.shape[0]
for row in xrange(0, offsets.shape[0]):
log.msg("Processing reference group: %d of %d" % (row + 1, offsets.shape[0]))
orderedRefPaths[row] = "/".join([getRefGroup(offsets[row, 0]).name, SORTED])
fRow = int(offsets[row, 1])
lRow = int(offsets[row, 2])
## Don't really have to do anything if there are no references
## which aligned.
if (lRow == fRow):
continue
## Make a new Group.
newGroup = getRefGroup(offsets[row, 0]).create_group(SORTED)
log.msg("Created new read group: %s" % SORTED)
## Go through each read and write it into the new vector.
reads = sAI[fRow:lRow, ]
totalSizes = reads[:, format.OFFSET_END] - reads[:, format.OFFSET_BEGIN]
for pulseDataset in uPulseDatasets:
log.msg("Processing dataset: %s" % pulseDataset)
newDS = array([0]*sum(1 + totalSizes), dtype = uPDAndType[pulseDataset])
currentStart = 0
for readIdx in xrange(0, reads.shape[0]):
read = reads[readIdx, ]
gStart, gEnd = currentStart, currentStart + totalSizes[readIdx]
newDS[gStart:gEnd] = getDataset(read, pulseDataset)[read[format.OFFSET_BEGIN]:read[format.OFFSET_END]]
currentStart = gEnd + 1
newGroup.create_dataset(pulseDataset, data = newDS, dtype = uPDAndType[pulseDataset], maxshape = None)
## After we've moved all of the data we can move the offsets.
currentStart = 0
for i in xrange(0, reads.shape[0]):
reads[i, format.OFFSET_BEGIN] = currentStart
reads[i, format.OFFSET_END] = currentStart + totalSizes[i]
reads[i, format.ALN_ID] = row
currentStart = reads[i, format.OFFSET_END] + 1
sAI[fRow:lRow,] = reads
## Now remake the AlnGroup Dataset.
log.msg("Writing new AlnGroupPath values.")
del(cH5[format.ALN_GROUP_PATH])
del(cH5[format.ALN_GROUP_ID])
cH5.create_dataset(format.ALN_GROUP_PATH, data = orderedRefPaths,
dtype = H5.new_vlen(str), maxshape = None)
cH5.create_dataset(format.ALN_GROUP_ID, data = range(0, offsets.shape[0]),
dtype = "int32", maxshape = None)
for rg in readGroupPaths.values():
del(cH5[rg])
def | (inFile, outFile, deep, jobs, log):
"""
This routine takes a cmp.h5 file and sorts the AlignmentIndex
table adding two additional columns for fast access. In addition,
a new top-level attribute is added to the indicate that the file
has been sorted, as well as a table to indicate the blocks of the
alignment index associated with each reference group.
"""
success = False;
if (outFile):
log.msg("Copying: " + inFile + " to " + outFile)
shutil.copyfile(inFile, outFile)
inFile = outFile
try:
cH5 = H5.File(inFile, 'a')
format = CmpH5Format(cH5)
log.msg("Read cmp.h5 with version %s" % format.VERSION)
aI = cH5[format.ALN_INDEX]
originalAttrs = aI.attrs.items()
## empty is a special case. In general, h5py handles
## zero-length slices poorly and therefore I don't want to
## make them. Therefore, I maintain the 'empty' variable to
## indicate that. This makes some code less pleasing, e.g.,
## computing the reference index data structure.
if (aI.shape[0] == 0):
log.warn("Warning: %s empty!" % inFile)
success = True;
return True;
# sort the AlignmentIndex
aord = lexsort([aI[:,format.TARGET_END], aI[:,format.TARGET_START],
aI[:,format.REF_ID]])
assert(len(aord) == aI.shape[0])
sAI = aI.value[aord,:]
del(aI)
log.msg("Sorted AlignmentIndex.")
# construct reference offset datastructure.
refSeqIDs = cH5[format.REF_GROUP_ID]
offsets = computeRefIndexTable(refSeqIDs.value, sAI[:,format.REF_ID])
log.msg("Constructed offset datastructure.")
# fill overlap and back columns.
for row in range(0, offsets.shape[0]):
fRow = int(offsets[row, 1])
lRow = int(offsets[row, 2])
if (lRow - fRow <= 0):
continue
sAI[fRow:lRow, (format.N_BACK, format.N_OVERLAP)] = \
computeIndicesDP(sAI[fRow:lRow, format.TARGET_START],
sAI[fRow:lRow, format.TARGET_END])
log.msg("Constructed indices.")
# modify the cmp.h5 file.
# We want to keep the chunking info on the dataset.
del(cH5[format.ALN_INDEX])
cH5.create_dataset(format.ALN_INDEX, data = sAI, dtype = h5t.NATIVE_UINT32,
maxshape = (None, None))
## If the file is already sorted there's no harm in resorting.
if (__pathExists(cH5, format.REF_OFFSET_TABLE)):
log.msg(format.REF_OFFSET_TABLE + " already exists, deleting.")
del(cH5[format.REF_OFFSET_TABLE])
## create the offset datastructure in the file.
cH5.create_dataset(format.REF_OFFSET_TABLE, data = offsets,
dtype = h5t.NATIVE_UINT32, maxshape = (None, None))
## add the index attribute.
cH5['/'].attrs.create("Index", ['REF_ID', 'TARGET_START', 'TARGET_END'])
## fixup attributes.
for oA in originalAttrs:
cH5[format.ALN_INDEX].attrs.create(oA[0], oA[1])
## deep repacking.
if (deep):
log.msg("Repacking alignment arrays.")
__repackDataArrays(cH5, format, log)
## memory free.
del sAI
## manage any extra datasets.
for extraTable in format.extraTables:
if (__pathExists(cH5, extraTable)):
log.msg("Sorting table: %s" % extraTable)
eTable = cH5[extraTable].value
if (len(eTable.shape) == 1):
eTable = eTable[aord]
else:
eTable = eTable[aord,:]
## save attributes, if any for re-writing below.
originalAttrs = cH5[extraTable].attrs.items()
del(cH5[extraTable])
cH5.create_dataset(extraTable, data = eTable,
maxshape = tuple([None for x in eTable.shape]))
for oA in originalAttrs:
cH5[extraTable].attrs.create(oA[0], oA[1])
## if you make it this far, set the flag.
success = True
except Exception, E:
log.error(E)
if (os.path.exists(outFile)):
pass
finally:
try:
cH5.close()
except:
pass
finally:
return(success)
class Loggy:
def __init__(self, level):
self.level = level
def write(self, msg, level):
if (self.level >= level): sys.stderr.write(str(msg) + "\ | sortCmpH5 | identifier_name |
cmpH5Sort.py | End = currentStart, currentStart + totalSizes[readIdx]
newDS[gStart:gEnd] = getDataset(read, pulseDataset)[read[format.OFFSET_BEGIN]:read[format.OFFSET_END]]
currentStart = gEnd + 1
newGroup.create_dataset(pulseDataset, data = newDS, dtype = uPDAndType[pulseDataset], maxshape = None)
## After we've moved all of the data we can move the offsets.
currentStart = 0
for i in xrange(0, reads.shape[0]):
reads[i, format.OFFSET_BEGIN] = currentStart
reads[i, format.OFFSET_END] = currentStart + totalSizes[i]
reads[i, format.ALN_ID] = row
currentStart = reads[i, format.OFFSET_END] + 1
sAI[fRow:lRow,] = reads
## Now remake the AlnGroup Dataset.
log.msg("Writing new AlnGroupPath values.")
del(cH5[format.ALN_GROUP_PATH])
del(cH5[format.ALN_GROUP_ID])
cH5.create_dataset(format.ALN_GROUP_PATH, data = orderedRefPaths,
dtype = H5.new_vlen(str), maxshape = None)
cH5.create_dataset(format.ALN_GROUP_ID, data = range(0, offsets.shape[0]),
dtype = "int32", maxshape = None)
for rg in readGroupPaths.values():
del(cH5[rg])
def sortCmpH5(inFile, outFile, deep, jobs, log):
"""
This routine takes a cmp.h5 file and sorts the AlignmentIndex
table adding two additional columns for fast access. In addition,
a new top-level attribute is added to the indicate that the file
has been sorted, as well as a table to indicate the blocks of the
alignment index associated with each reference group.
"""
success = False;
if (outFile):
log.msg("Copying: " + inFile + " to " + outFile)
shutil.copyfile(inFile, outFile)
inFile = outFile
try:
cH5 = H5.File(inFile, 'a')
format = CmpH5Format(cH5)
log.msg("Read cmp.h5 with version %s" % format.VERSION)
aI = cH5[format.ALN_INDEX]
originalAttrs = aI.attrs.items()
## empty is a special case. In general, h5py handles
## zero-length slices poorly and therefore I don't want to
## make them. Therefore, I maintain the 'empty' variable to
## indicate that. This makes some code less pleasing, e.g.,
## computing the reference index data structure.
if (aI.shape[0] == 0):
log.warn("Warning: %s empty!" % inFile)
success = True;
return True;
# sort the AlignmentIndex
aord = lexsort([aI[:,format.TARGET_END], aI[:,format.TARGET_START],
aI[:,format.REF_ID]])
assert(len(aord) == aI.shape[0])
sAI = aI.value[aord,:]
del(aI)
log.msg("Sorted AlignmentIndex.")
# construct reference offset datastructure.
refSeqIDs = cH5[format.REF_GROUP_ID]
offsets = computeRefIndexTable(refSeqIDs.value, sAI[:,format.REF_ID])
log.msg("Constructed offset datastructure.")
# fill overlap and back columns.
for row in range(0, offsets.shape[0]):
fRow = int(offsets[row, 1])
lRow = int(offsets[row, 2])
if (lRow - fRow <= 0):
continue
sAI[fRow:lRow, (format.N_BACK, format.N_OVERLAP)] = \
computeIndicesDP(sAI[fRow:lRow, format.TARGET_START],
sAI[fRow:lRow, format.TARGET_END])
log.msg("Constructed indices.")
# modify the cmp.h5 file.
# We want to keep the chunking info on the dataset.
del(cH5[format.ALN_INDEX])
cH5.create_dataset(format.ALN_INDEX, data = sAI, dtype = h5t.NATIVE_UINT32,
maxshape = (None, None))
## If the file is already sorted there's no harm in resorting.
if (__pathExists(cH5, format.REF_OFFSET_TABLE)):
log.msg(format.REF_OFFSET_TABLE + " already exists, deleting.")
del(cH5[format.REF_OFFSET_TABLE])
## create the offset datastructure in the file.
cH5.create_dataset(format.REF_OFFSET_TABLE, data = offsets,
dtype = h5t.NATIVE_UINT32, maxshape = (None, None))
## add the index attribute.
cH5['/'].attrs.create("Index", ['REF_ID', 'TARGET_START', 'TARGET_END'])
## fixup attributes.
for oA in originalAttrs:
cH5[format.ALN_INDEX].attrs.create(oA[0], oA[1])
## deep repacking.
if (deep):
log.msg("Repacking alignment arrays.")
__repackDataArrays(cH5, format, log)
## memory free.
del sAI
## manage any extra datasets.
for extraTable in format.extraTables:
if (__pathExists(cH5, extraTable)):
log.msg("Sorting table: %s" % extraTable)
eTable = cH5[extraTable].value
if (len(eTable.shape) == 1):
eTable = eTable[aord]
else:
eTable = eTable[aord,:]
## save attributes, if any for re-writing below.
originalAttrs = cH5[extraTable].attrs.items()
del(cH5[extraTable])
cH5.create_dataset(extraTable, data = eTable,
maxshape = tuple([None for x in eTable.shape]))
for oA in originalAttrs:
cH5[extraTable].attrs.create(oA[0], oA[1])
## if you make it this far, set the flag.
success = True
except Exception, E:
log.error(E)
if (os.path.exists(outFile)):
pass
finally:
try:
cH5.close()
except:
pass
finally:
return(success)
class Loggy:
def __init__(self, level):
self.level = level
def write(self, msg, level):
if (self.level >= level): sys.stderr.write(str(msg) + "\n")
def error(self, msg): self.write(msg, 0)
def warn(self, msg): self.write(msg, 1)
def msg(self, msg): self.write(msg, 2)
def main():
usage = \
""" %prog [options] input-file [output-file]
Sort cmp.h5 files. If output-file is unspecified the input-file is
overwritten. If there are a number of reference groups then the
indexing processing can occur in parallel.
version: """ + __VERSION__
parser = OptionParser(usage)
parser.add_option("-s", "--silent", dest = "silent", action = "store_false", \
default = False, help = "print nothing.")
parser.add_option("-v", "--verbose", dest = "verbose", action = "store_true", \
default = False, help = "print debugging information")
parser.add_option("-d", "--deep", dest = "deep", action = "store_true", default = False, \
help = "whether a deep sorting should be conducted, i.e. sort the AlignmentArrays")
parser.add_option("-j", "--jobs", dest = "jobs", default = 1, \
help = "Number of child processes to launch. This only speeds up processing if there are multiple references groups. Not yet Implemented.")
parser.add_option("--tmpDir", dest = "tmpdir", default = "/tmp", \
help = "Temporary directory to use when sorting in-place.")
(options, args) = parser.parse_args()
if (not len(args)):
parser.print_help()
exit(1)
infile = args[0]
## we do this in a temporary file because it is safer.
if (len(args) < 2):
ofile = tempfile.NamedTemporaryFile(dir=options.tmpdir)
outfile = ofile.name
else:
outfile = args[1]
log = Loggy(2 if options.verbose else 1 if not options.silent else 0)
success = sortCmpH5(infile, outfile, deep = options.deep, jobs = options.jobs, log = log)
if (not success):
log.error("Error during sorting. Exiting! Original file %s should still be intact." % infile)
exit(1)
else:
## add to the file log.
| cmpH5 = CmpH5Factory.factory.create(outfile, 'a')
cmpH5.log("cmpH5Sort.py", __VERSION__, str(datetime.datetime.now()), ' '.join(sys.argv), "Sorting")
cmpH5.close()
if (len(args) < 2):
shutil.copyfile(outfile, infile)
ofile.close()
exit(0) | conditional_block |
|
cmpH5Sort.py | keys() for x in alnGroups]
uPulseDatasets = reduce(lambda x,y: set.union(set(x), set(y)), pulseDatasets)
if (not all(map(lambda x : set(x) == uPulseDatasets, pulseDatasets))):
log.error("All alignment groups need to have the same datasets.")
raise Exception("Can only repack cmp.h5 files with consistent datasets across alignment groups.")
readGroupPaths = dict(zip(cH5[format.ALN_GROUP_ID], [ x for x in cH5[format.ALN_GROUP_PATH]]))
refGroupPaths = dict(zip(cH5[format.REF_GROUP_ID], [ x for x in cH5[format.REF_GROUP_PATH]]))
uPDAndType = dict(zip(uPulseDatasets, [ cH5[readGroupPaths.values()[0]][z].dtype for z in uPulseDatasets ]))
def getDataset(read, ds):
return(cH5[readGroupPaths[read[format.ALN_ID]]][ds])
def getRefGroup(gID):
return(cH5[refGroupPaths[gID]])
offsets = cH5[format.REF_OFFSET_TABLE].value
sAI = cH5[format.ALN_INDEX]
orderedRefPaths = [""] * offsets.shape[0]
for row in xrange(0, offsets.shape[0]):
log.msg("Processing reference group: %d of %d" % (row + 1, offsets.shape[0]))
orderedRefPaths[row] = "/".join([getRefGroup(offsets[row, 0]).name, SORTED]) | ## Don't really have to do anything if there are no references
## which aligned.
if (lRow == fRow):
continue
## Make a new Group.
newGroup = getRefGroup(offsets[row, 0]).create_group(SORTED)
log.msg("Created new read group: %s" % SORTED)
## Go through each read and write it into the new vector.
reads = sAI[fRow:lRow, ]
totalSizes = reads[:, format.OFFSET_END] - reads[:, format.OFFSET_BEGIN]
for pulseDataset in uPulseDatasets:
log.msg("Processing dataset: %s" % pulseDataset)
newDS = array([0]*sum(1 + totalSizes), dtype = uPDAndType[pulseDataset])
currentStart = 0
for readIdx in xrange(0, reads.shape[0]):
read = reads[readIdx, ]
gStart, gEnd = currentStart, currentStart + totalSizes[readIdx]
newDS[gStart:gEnd] = getDataset(read, pulseDataset)[read[format.OFFSET_BEGIN]:read[format.OFFSET_END]]
currentStart = gEnd + 1
newGroup.create_dataset(pulseDataset, data = newDS, dtype = uPDAndType[pulseDataset], maxshape = None)
## After we've moved all of the data we can move the offsets.
currentStart = 0
for i in xrange(0, reads.shape[0]):
reads[i, format.OFFSET_BEGIN] = currentStart
reads[i, format.OFFSET_END] = currentStart + totalSizes[i]
reads[i, format.ALN_ID] = row
currentStart = reads[i, format.OFFSET_END] + 1
sAI[fRow:lRow,] = reads
## Now remake the AlnGroup Dataset.
log.msg("Writing new AlnGroupPath values.")
del(cH5[format.ALN_GROUP_PATH])
del(cH5[format.ALN_GROUP_ID])
cH5.create_dataset(format.ALN_GROUP_PATH, data = orderedRefPaths,
dtype = H5.new_vlen(str), maxshape = None)
cH5.create_dataset(format.ALN_GROUP_ID, data = range(0, offsets.shape[0]),
dtype = "int32", maxshape = None)
for rg in readGroupPaths.values():
del(cH5[rg])
def sortCmpH5(inFile, outFile, deep, jobs, log):
"""
This routine takes a cmp.h5 file and sorts the AlignmentIndex
table adding two additional columns for fast access. In addition,
a new top-level attribute is added to the indicate that the file
has been sorted, as well as a table to indicate the blocks of the
alignment index associated with each reference group.
"""
success = False;
if (outFile):
log.msg("Copying: " + inFile + " to " + outFile)
shutil.copyfile(inFile, outFile)
inFile = outFile
try:
cH5 = H5.File(inFile, 'a')
format = CmpH5Format(cH5)
log.msg("Read cmp.h5 with version %s" % format.VERSION)
aI = cH5[format.ALN_INDEX]
originalAttrs = aI.attrs.items()
## empty is a special case. In general, h5py handles
## zero-length slices poorly and therefore I don't want to
## make them. Therefore, I maintain the 'empty' variable to
## indicate that. This makes some code less pleasing, e.g.,
## computing the reference index data structure.
if (aI.shape[0] == 0):
log.warn("Warning: %s empty!" % inFile)
success = True;
return True;
# sort the AlignmentIndex
aord = lexsort([aI[:,format.TARGET_END], aI[:,format.TARGET_START],
aI[:,format.REF_ID]])
assert(len(aord) == aI.shape[0])
sAI = aI.value[aord,:]
del(aI)
log.msg("Sorted AlignmentIndex.")
# construct reference offset datastructure.
refSeqIDs = cH5[format.REF_GROUP_ID]
offsets = computeRefIndexTable(refSeqIDs.value, sAI[:,format.REF_ID])
log.msg("Constructed offset datastructure.")
# fill overlap and back columns.
for row in range(0, offsets.shape[0]):
fRow = int(offsets[row, 1])
lRow = int(offsets[row, 2])
if (lRow - fRow <= 0):
continue
sAI[fRow:lRow, (format.N_BACK, format.N_OVERLAP)] = \
computeIndicesDP(sAI[fRow:lRow, format.TARGET_START],
sAI[fRow:lRow, format.TARGET_END])
log.msg("Constructed indices.")
# modify the cmp.h5 file.
# We want to keep the chunking info on the dataset.
del(cH5[format.ALN_INDEX])
cH5.create_dataset(format.ALN_INDEX, data = sAI, dtype = h5t.NATIVE_UINT32,
maxshape = (None, None))
## If the file is already sorted there's no harm in resorting.
if (__pathExists(cH5, format.REF_OFFSET_TABLE)):
log.msg(format.REF_OFFSET_TABLE + " already exists, deleting.")
del(cH5[format.REF_OFFSET_TABLE])
## create the offset datastructure in the file.
cH5.create_dataset(format.REF_OFFSET_TABLE, data = offsets,
dtype = h5t.NATIVE_UINT32, maxshape = (None, None))
## add the index attribute.
cH5['/'].attrs.create("Index", ['REF_ID', 'TARGET_START', 'TARGET_END'])
## fixup attributes.
for oA in originalAttrs:
cH5[format.ALN_INDEX].attrs.create(oA[0], oA[1])
## deep repacking.
if (deep):
log.msg("Repacking alignment arrays.")
__repackDataArrays(cH5, format, log)
## memory free.
del sAI
## manage any extra datasets.
for extraTable in format.extraTables:
if (__pathExists(cH5, extraTable)):
log.msg("Sorting table: %s" % extraTable)
eTable = cH5[extraTable].value
if (len(eTable.shape) == 1):
eTable = eTable[aord]
else:
eTable = eTable[aord,:]
## save attributes, if any for re-writing below.
originalAttrs = cH5[extraTable].attrs.items()
del(cH5[extraTable])
cH5.create_dataset(extraTable, data = eTable,
maxshape = tuple([None for x in eTable.shape]))
for oA in originalAttrs:
cH5[extraTable].attrs.create(oA[0], oA[1])
## if you make it this far, set the flag.
success = True
except Exception, E:
log.error(E)
if (os.path.exists(outFile)):
pass
finally:
try:
cH5.close()
except:
pass
finally:
return(success)
class Loggy:
def __init__(self, level):
self.level = level
def write(self, msg, level):
if (self.level >= level): sys.stderr.write(str(msg) + "\ |
fRow = int(offsets[row, 1])
lRow = int(offsets[row, 2])
| random_line_split |
main.rs | impl TypeMapKey for db::MyDbContext {
type Value = db::MyDbContext;
}
impl TypeMapKey for autopanic::Gramma {
type Value = autopanic::Gramma;
}
struct Handler;
#[async_trait]
impl EventHandler for Handler {
async fn guild_create(&self, ctx: Context, guild: Guild, is_new: bool) {
let mut data = ctx.data.write().await;
let mut dbcontext = data
.get_mut::<MyDbContext>()
.expect("Expected MyDbContext in TypeMap.");
let id = &guild.id.0;
if let Some(s) = dbcontext.fetch_settings(id).await {
println!("Found guild {} settings", id);
dbcontext.cache.insert(*id, s);
} else {
println!("Creating a new settings row for guild {}", id);
dbcontext.add_guild(id).await; // also adds to cache
//greet_new_guild(&ctx, &guild).await;
};
set_status(&ctx).await;
}
async fn channel_pins_update(&self, ctx: Context, _pins: ChannelPinsUpdateEvent) {
println!("yeet doing a garbage run");
garbage_collect(&ctx);
println!("done");
}
async fn guild_member_addition(&self, ctx: Context, guild_id: GuildId, mut new_member: Member) {
println!("new member joined {}: {}", guild_id, new_member.user.name);
{
let mut data = ctx.data.write().await;
let mut grammy = data
.get_mut::<autopanic::Gramma>()
.expect("Expected your momma in TypeMap.");
let mut mom = grammy.get(&guild_id.0);
mom.recent_users.insert(
new_member
.joined_at
.unwrap()
.timestamp_millis()
.try_into()
.unwrap(),
new_member.user.id.0,
);
}
check_against_joins(&ctx, guild_id.0).await;
check_against_blacklist(&ctx, new_member, guild_id.0).await;
}
async fn message(&self, ctx: Context, new_message: Message) {
/*
if new_message.content.len() > 20_usize {
println!("Message! {}...", &new_message.content[..19]);
} else {
println!("Message! {}", &new_message.content);
}*/
// we use the message timestamp instead of time::now because of potential lag of events
let timestamp: u64 = new_message.timestamp.timestamp_millis().try_into().unwrap();
let guild = new_message.guild_id.unwrap().0;
let author = new_message.author.id.0;
let mut data = ctx.data.write().await;
let mut grammy = data
.get_mut::<autopanic::Gramma>()
.expect("Expected your momma in TypeMap.");
let mut mom = grammy.get(&guild);
if !new_message.mentions.is_empty() {
mom.userpings
.insert(timestamp, (new_message.mentions.len(), author));
}
if !new_message.mention_roles.is_empty() {
mom.rollpings
.insert(timestamp, (new_message.mentions.len(), author));
}
if !new_message.mention_roles.is_empty() || !new_message.mentions.is_empty() {
autopanic::check_against_pings(&ctx, mom, guild).await;
}
}
async fn ready(&self, ctx: Context, ready: Ready) {
println!("{} is connected!", ready.user.name);
}
}
pub async fn better_default_channel(guild: &Guild, uid: UserId) -> Option<Vec<&GuildChannel>> {
let member = guild.members.get(&uid)?;
let mut out = vec![];
for channel in guild.channels.values() {
if channel.kind == ChannelType::Text
&& guild
.user_permissions_in(channel, member)
.ok()?
.send_messages()
&& guild
.user_permissions_in(channel, member)
.ok()?
.read_messages()
{
let x = guild.user_permissions_in(channel, member).expect("goo");
//return Some(channel);
dbg!(x);
println!("{:?}", x.bits);
println!("{}", channel.name);
out.push(channel);
}
}
if out.is_empty() {
None
} else {
Some(out)
}
}
async fn greet_new_guild(ctx: &Context, guild: &Guild) {
println!("h");
if let Some(channelvec) = better_default_channel(guild, UserId(802019556801511424_u64)).await {
println!("i");
for channel in channelvec {
println!("{}", channel.name);
let res = channel.say(&ctx, "
Thanks for adding me to the server! Here's some next steps:\n
Configure who can run most commands (like turning on or off panic mode): run `bb-settings set roll_that_can_panic Staff` for example (if you have a roll called Staff)\n
I recommend that you set up a log channel for me to talk in (and set it like `bb-settings set logs #mychannel` but replace mychannel with the actual one) \n
Also probs tell me a roll for me to ping when I automatically detect a raid and go into panic mode (`bb-settings set notify raidresponders` - replacing raidresponders with that roll)\n
Reviewing default settings is recommended - `bb-settings` and adjust them as you wish. `bb-help` shows all my commands.\n
If you find yourself needing support, there's a support server invite in `bb-about`\
").await;
if res.is_ok() {
return;
}
}
} else {
println!(
"hey i wanted to greet {} {} but they wont let everyone talk",
guild.name, guild.id.0
);
}
}
async fn set_status(ctx: &Context) {
ctx.shard.set_status(OnlineStatus::DoNotDisturb);
let s = format!("to {} guilds | bb-help", ctx.cache.guild_count().await);
ctx.shard.set_activity(Some(Activity::listening(&*s)));
}
#[group]
#[commands(panic, uinfo, forceban, help, delete)]
struct General;
#[group]
#[commands(about, ping, die, update, free, git_push, garbage, foo)] // status)]
struct Meta;
#[group]
// Sets multiple prefixes for a group.
// This requires us to call commands in this group
// via `~emoji` (or `~em`) instead of just `~`.
#[prefixes("settings", "s")]
// Set a description to appear if a user wants to display a single group
// e.g. via help using the group-name or one of its prefixes.
// Summary only appears when listing multiple groups.
// Sets a command that will be executed if only a group-prefix was passed.
#[default_command(show)]
#[commands(reset, set)]
struct Settings;
#[group]
#[prefixes("blacklist", "bl")]
#[default_command(blacklist_show)]
#[commands(remove, add)]
struct Blacklist;
#[hook] // this appears not to work
async fn before(ctx: &Context, msg: &Message, command_name: &str) -> bool {
println!(
"Got command '{}' by user '{}'",
command_name, msg.author.name
);
true // if `before` returns false, command processing doesn't happen.
}
#[hook]
async fn after(_ctx: &Context, _msg: &Message, command_name: &str, command_result: CommandResult) {
match command_result {
Ok(()) => println!("Processed command '{}'", command_name),
Err(why) => println!("Command '{}' returned error {:?}", command_name, why),
}
}
#[hook]
async fn unknown_command(_ctx: &Context, _msg: &Message, unknown_command_name: &str) {
println!("Could not find command named '{}'", unknown_command_name);
}
#[hook]
async fn | (ctx: &Context, msg: &Message, error: DispatchError) {
if let DispatchError::Ratelimited(info) = error {
// We notify them only once.
if info.is_first_try {
let _ = msg
.channel_id
.say(
&ctx.http,
&format!("Try this again in {} seconds.", info.as_secs()),
)
.await;
}
}
}
#[tokio::main]
async fn main() {
let start = SystemTime::now();
let since_the_epoch = start
.duration_since(UNIX_EPOCH)
.expect("Time went backwards");
println!("{:?}", since_the_epoch);
// Configure the client with your Discord bot token in the environment.
let token = env::var("DISCORD_TOKEN").expect("Expected a token in the environment");
let http = Http::new_with_token(&token);
// We will fetch your bot's owners and id
let bot_id = match http.get_current_application_info().await {
Ok(_) => match http.get_current_user().await {
Ok(bot_id) => bot_id.id,
Err(why) => panic!("Could not access the bot id: {:?}", why),
},
Err(why) => panic!("Could not access application info: {:?}", why),
};
let framework = StandardFramework::new()
.configure(|c| {
c.with_whitespace(true)
.on_mention(Some(bot_id))
.prefix(" | dispatch_error | identifier_name |
main.rs | impl TypeMapKey for db::MyDbContext {
type Value = db::MyDbContext;
}
impl TypeMapKey for autopanic::Gramma {
type Value = autopanic::Gramma;
}
struct Handler;
#[async_trait]
impl EventHandler for Handler {
async fn guild_create(&self, ctx: Context, guild: Guild, is_new: bool) {
let mut data = ctx.data.write().await;
let mut dbcontext = data
.get_mut::<MyDbContext>()
.expect("Expected MyDbContext in TypeMap.");
let id = &guild.id.0;
if let Some(s) = dbcontext.fetch_settings(id).await {
println!("Found guild {} settings", id);
dbcontext.cache.insert(*id, s);
} else {
println!("Creating a new settings row for guild {}", id);
dbcontext.add_guild(id).await; // also adds to cache
//greet_new_guild(&ctx, &guild).await;
};
set_status(&ctx).await;
}
async fn channel_pins_update(&self, ctx: Context, _pins: ChannelPinsUpdateEvent) {
println!("yeet doing a garbage run");
garbage_collect(&ctx);
println!("done");
}
async fn guild_member_addition(&self, ctx: Context, guild_id: GuildId, mut new_member: Member) {
println!("new member joined {}: {}", guild_id, new_member.user.name);
{
let mut data = ctx.data.write().await;
let mut grammy = data
.get_mut::<autopanic::Gramma>()
.expect("Expected your momma in TypeMap.");
let mut mom = grammy.get(&guild_id.0);
mom.recent_users.insert(
new_member
.joined_at
.unwrap()
.timestamp_millis()
.try_into()
.unwrap(),
new_member.user.id.0,
);
}
check_against_joins(&ctx, guild_id.0).await;
check_against_blacklist(&ctx, new_member, guild_id.0).await;
}
async fn message(&self, ctx: Context, new_message: Message) {
/*
if new_message.content.len() > 20_usize {
println!("Message! {}...", &new_message.content[..19]);
} else {
println!("Message! {}", &new_message.content);
}*/
// we use the message timestamp instead of time::now because of potential lag of events
let timestamp: u64 = new_message.timestamp.timestamp_millis().try_into().unwrap();
let guild = new_message.guild_id.unwrap().0;
let author = new_message.author.id.0;
let mut data = ctx.data.write().await;
let mut grammy = data
.get_mut::<autopanic::Gramma>()
.expect("Expected your momma in TypeMap.");
let mut mom = grammy.get(&guild);
if !new_message.mentions.is_empty() {
mom.userpings
.insert(timestamp, (new_message.mentions.len(), author));
}
if !new_message.mention_roles.is_empty() {
mom.rollpings
.insert(timestamp, (new_message.mentions.len(), author));
}
if !new_message.mention_roles.is_empty() || !new_message.mentions.is_empty() {
autopanic::check_against_pings(&ctx, mom, guild).await;
}
}
async fn ready(&self, ctx: Context, ready: Ready) {
println!("{} is connected!", ready.user.name);
}
}
pub async fn better_default_channel(guild: &Guild, uid: UserId) -> Option<Vec<&GuildChannel>> {
let member = guild.members.get(&uid)?;
let mut out = vec![];
for channel in guild.channels.values() {
if channel.kind == ChannelType::Text
&& guild
.user_permissions_in(channel, member)
.ok()?
.send_messages()
&& guild
.user_permissions_in(channel, member)
.ok()?
.read_messages()
{
let x = guild.user_permissions_in(channel, member).expect("goo");
//return Some(channel);
dbg!(x);
println!("{:?}", x.bits);
println!("{}", channel.name);
out.push(channel);
}
}
if out.is_empty() | else {
Some(out)
}
}
async fn greet_new_guild(ctx: &Context, guild: &Guild) {
println!("h");
if let Some(channelvec) = better_default_channel(guild, UserId(802019556801511424_u64)).await {
println!("i");
for channel in channelvec {
println!("{}", channel.name);
let res = channel.say(&ctx, "
Thanks for adding me to the server! Here's some next steps:\n
Configure who can run most commands (like turning on or off panic mode): run `bb-settings set roll_that_can_panic Staff` for example (if you have a roll called Staff)\n
I recommend that you set up a log channel for me to talk in (and set it like `bb-settings set logs #mychannel` but replace mychannel with the actual one) \n
Also probs tell me a roll for me to ping when I automatically detect a raid and go into panic mode (`bb-settings set notify raidresponders` - replacing raidresponders with that roll)\n
Reviewing default settings is recommended - `bb-settings` and adjust them as you wish. `bb-help` shows all my commands.\n
If you find yourself needing support, there's a support server invite in `bb-about`\
").await;
if res.is_ok() {
return;
}
}
} else {
println!(
"hey i wanted to greet {} {} but they wont let everyone talk",
guild.name, guild.id.0
);
}
}
async fn set_status(ctx: &Context) {
ctx.shard.set_status(OnlineStatus::DoNotDisturb);
let s = format!("to {} guilds | bb-help", ctx.cache.guild_count().await);
ctx.shard.set_activity(Some(Activity::listening(&*s)));
}
#[group]
#[commands(panic, uinfo, forceban, help, delete)]
struct General;
#[group]
#[commands(about, ping, die, update, free, git_push, garbage, foo)] // status)]
struct Meta;
#[group]
// Sets multiple prefixes for a group.
// This requires us to call commands in this group
// via `~emoji` (or `~em`) instead of just `~`.
#[prefixes("settings", "s")]
// Set a description to appear if a user wants to display a single group
// e.g. via help using the group-name or one of its prefixes.
// Summary only appears when listing multiple groups.
// Sets a command that will be executed if only a group-prefix was passed.
#[default_command(show)]
#[commands(reset, set)]
struct Settings;
#[group]
#[prefixes("blacklist", "bl")]
#[default_command(blacklist_show)]
#[commands(remove, add)]
struct Blacklist;
#[hook] // this appears not to work
async fn before(ctx: &Context, msg: &Message, command_name: &str) -> bool {
println!(
"Got command '{}' by user '{}'",
command_name, msg.author.name
);
true // if `before` returns false, command processing doesn't happen.
}
#[hook]
async fn after(_ctx: &Context, _msg: &Message, command_name: &str, command_result: CommandResult) {
match command_result {
Ok(()) => println!("Processed command '{}'", command_name),
Err(why) => println!("Command '{}' returned error {:?}", command_name, why),
}
}
#[hook]
async fn unknown_command(_ctx: &Context, _msg: &Message, unknown_command_name: &str) {
println!("Could not find command named '{}'", unknown_command_name);
}
#[hook]
async fn dispatch_error(ctx: &Context, msg: &Message, error: DispatchError) {
if let DispatchError::Ratelimited(info) = error {
// We notify them only once.
if info.is_first_try {
let _ = msg
.channel_id
.say(
&ctx.http,
&format!("Try this again in {} seconds.", info.as_secs()),
)
.await;
}
}
}
#[tokio::main]
async fn main() {
let start = SystemTime::now();
let since_the_epoch = start
.duration_since(UNIX_EPOCH)
.expect("Time went backwards");
println!("{:?}", since_the_epoch);
// Configure the client with your Discord bot token in the environment.
let token = env::var("DISCORD_TOKEN").expect("Expected a token in the environment");
let http = Http::new_with_token(&token);
// We will fetch your bot's owners and id
let bot_id = match http.get_current_application_info().await {
Ok(_) => match http.get_current_user().await {
Ok(bot_id) => bot_id.id,
Err(why) => panic!("Could not access the bot id: {:?}", why),
},
Err(why) => panic!("Could not access application info: {:?}", why),
};
let framework = StandardFramework::new()
.configure(|c| {
c.with_whitespace(true)
.on_mention(Some(bot_id))
.prefix(" | {
None
} | conditional_block |
main.rs | impl TypeMapKey for db::MyDbContext {
type Value = db::MyDbContext;
}
impl TypeMapKey for autopanic::Gramma {
type Value = autopanic::Gramma;
}
struct Handler;
#[async_trait]
impl EventHandler for Handler {
async fn guild_create(&self, ctx: Context, guild: Guild, is_new: bool) {
let mut data = ctx.data.write().await;
let mut dbcontext = data
.get_mut::<MyDbContext>()
.expect("Expected MyDbContext in TypeMap.");
let id = &guild.id.0;
if let Some(s) = dbcontext.fetch_settings(id).await {
println!("Found guild {} settings", id);
dbcontext.cache.insert(*id, s);
} else {
println!("Creating a new settings row for guild {}", id);
dbcontext.add_guild(id).await; // also adds to cache
//greet_new_guild(&ctx, &guild).await;
};
set_status(&ctx).await;
}
async fn channel_pins_update(&self, ctx: Context, _pins: ChannelPinsUpdateEvent) {
println!("yeet doing a garbage run");
garbage_collect(&ctx);
println!("done");
}
async fn guild_member_addition(&self, ctx: Context, guild_id: GuildId, mut new_member: Member) {
println!("new member joined {}: {}", guild_id, new_member.user.name);
{
let mut data = ctx.data.write().await;
let mut grammy = data
.get_mut::<autopanic::Gramma>()
.expect("Expected your momma in TypeMap.");
let mut mom = grammy.get(&guild_id.0);
mom.recent_users.insert(
new_member
.joined_at
.unwrap()
.timestamp_millis()
.try_into()
.unwrap(),
new_member.user.id.0,
);
}
check_against_joins(&ctx, guild_id.0).await;
check_against_blacklist(&ctx, new_member, guild_id.0).await;
}
async fn message(&self, ctx: Context, new_message: Message) {
/*
if new_message.content.len() > 20_usize {
println!("Message! {}...", &new_message.content[..19]);
} else {
println!("Message! {}", &new_message.content);
}*/
// we use the message timestamp instead of time::now because of potential lag of events
let timestamp: u64 = new_message.timestamp.timestamp_millis().try_into().unwrap();
let guild = new_message.guild_id.unwrap().0;
let author = new_message.author.id.0;
let mut data = ctx.data.write().await;
let mut grammy = data
.get_mut::<autopanic::Gramma>()
.expect("Expected your momma in TypeMap.");
let mut mom = grammy.get(&guild);
if !new_message.mentions.is_empty() {
mom.userpings
.insert(timestamp, (new_message.mentions.len(), author));
}
if !new_message.mention_roles.is_empty() {
mom.rollpings
.insert(timestamp, (new_message.mentions.len(), author));
}
if !new_message.mention_roles.is_empty() || !new_message.mentions.is_empty() {
autopanic::check_against_pings(&ctx, mom, guild).await;
}
}
async fn ready(&self, ctx: Context, ready: Ready) {
println!("{} is connected!", ready.user.name);
}
}
| pub async fn better_default_channel(guild: &Guild, uid: UserId) -> Option<Vec<&GuildChannel>> {
let member = guild.members.get(&uid)?;
let mut out = vec![];
for channel in guild.channels.values() {
if channel.kind == ChannelType::Text
&& guild
.user_permissions_in(channel, member)
.ok()?
.send_messages()
&& guild
.user_permissions_in(channel, member)
.ok()?
.read_messages()
{
let x = guild.user_permissions_in(channel, member).expect("goo");
//return Some(channel);
dbg!(x);
println!("{:?}", x.bits);
println!("{}", channel.name);
out.push(channel);
}
}
if out.is_empty() {
None
} else {
Some(out)
}
}
async fn greet_new_guild(ctx: &Context, guild: &Guild) {
println!("h");
if let Some(channelvec) = better_default_channel(guild, UserId(802019556801511424_u64)).await {
println!("i");
for channel in channelvec {
println!("{}", channel.name);
let res = channel.say(&ctx, "
Thanks for adding me to the server! Here's some next steps:\n
Configure who can run most commands (like turning on or off panic mode): run `bb-settings set roll_that_can_panic Staff` for example (if you have a roll called Staff)\n
I recommend that you set up a log channel for me to talk in (and set it like `bb-settings set logs #mychannel` but replace mychannel with the actual one) \n
Also probs tell me a roll for me to ping when I automatically detect a raid and go into panic mode (`bb-settings set notify raidresponders` - replacing raidresponders with that roll)\n
Reviewing default settings is recommended - `bb-settings` and adjust them as you wish. `bb-help` shows all my commands.\n
If you find yourself needing support, there's a support server invite in `bb-about`\
").await;
if res.is_ok() {
return;
}
}
} else {
println!(
"hey i wanted to greet {} {} but they wont let everyone talk",
guild.name, guild.id.0
);
}
}
async fn set_status(ctx: &Context) {
ctx.shard.set_status(OnlineStatus::DoNotDisturb);
let s = format!("to {} guilds | bb-help", ctx.cache.guild_count().await);
ctx.shard.set_activity(Some(Activity::listening(&*s)));
}
#[group]
#[commands(panic, uinfo, forceban, help, delete)]
struct General;
#[group]
#[commands(about, ping, die, update, free, git_push, garbage, foo)] // status)]
struct Meta;
#[group]
// Sets multiple prefixes for a group.
// This requires us to call commands in this group
// via `~emoji` (or `~em`) instead of just `~`.
#[prefixes("settings", "s")]
// Set a description to appear if a user wants to display a single group
// e.g. via help using the group-name or one of its prefixes.
// Summary only appears when listing multiple groups.
// Sets a command that will be executed if only a group-prefix was passed.
#[default_command(show)]
#[commands(reset, set)]
struct Settings;
#[group]
#[prefixes("blacklist", "bl")]
#[default_command(blacklist_show)]
#[commands(remove, add)]
struct Blacklist;
#[hook] // this appears not to work
async fn before(ctx: &Context, msg: &Message, command_name: &str) -> bool {
println!(
"Got command '{}' by user '{}'",
command_name, msg.author.name
);
true // if `before` returns false, command processing doesn't happen.
}
#[hook]
async fn after(_ctx: &Context, _msg: &Message, command_name: &str, command_result: CommandResult) {
match command_result {
Ok(()) => println!("Processed command '{}'", command_name),
Err(why) => println!("Command '{}' returned error {:?}", command_name, why),
}
}
#[hook]
async fn unknown_command(_ctx: &Context, _msg: &Message, unknown_command_name: &str) {
println!("Could not find command named '{}'", unknown_command_name);
}
#[hook]
async fn dispatch_error(ctx: &Context, msg: &Message, error: DispatchError) {
if let DispatchError::Ratelimited(info) = error {
// We notify them only once.
if info.is_first_try {
let _ = msg
.channel_id
.say(
&ctx.http,
&format!("Try this again in {} seconds.", info.as_secs()),
)
.await;
}
}
}
#[tokio::main]
async fn main() {
let start = SystemTime::now();
let since_the_epoch = start
.duration_since(UNIX_EPOCH)
.expect("Time went backwards");
println!("{:?}", since_the_epoch);
// Configure the client with your Discord bot token in the environment.
let token = env::var("DISCORD_TOKEN").expect("Expected a token in the environment");
let http = Http::new_with_token(&token);
// We will fetch your bot's owners and id
let bot_id = match http.get_current_application_info().await {
Ok(_) => match http.get_current_user().await {
Ok(bot_id) => bot_id.id,
Err(why) => panic!("Could not access the bot id: {:?}", why),
},
Err(why) => panic!("Could not access application info: {:?}", why),
};
let framework = StandardFramework::new()
.configure(|c| {
c.with_whitespace(true)
.on_mention(Some(bot_id))
.prefix("bb | random_line_split |
|
charisma.js | + '<p><b>消息内容:</b>' + obj.guid + '</p>'
+ content;
$('#confirm #txt').html(content);
url = js_getEntry(_project_title, 'Message', 'ajaxdel?guid=' + id);
$('#confirm #btn-submit').attr('data-url', url);
});
$('#confirm #btn-submit').live('click', function(e){
e.preventDefault();
url = $(this).attr('data-url');
$.get(url, function(data){
alert(data.text);
location.reload();
});
});
var servers_txt = $.cookie('current_servers_txt');
//$('#servers a[text="'+servers_txt+'"]').find('i').addClass('icon-ok');
// $('#servers a[data-id="'+servers_txt+'"]').find('i').addClass('icon-ok');
// var o2 = $('#servers a[data-id="'+servers_txt+'ssdsd"]');
// var o3 = $('#servers');
if(servers_txt != null)
$('#current-server').text($.cookie('current_servers_txt'));
$('.li-server').live('click', function(e){
e.preventDefault();
current_server_txt=$(this).attr('data-id');
current_server=$(this).attr('data-value');
$.cookie('current_servers',current_server,{expires:365});
$.cookie('current_servers_txt',current_server_txt,{expires:365});
// switch_theme(current_theme);
$('#current-server').text(current_server_txt);
$('#servers i').removeClass('icon-ok');
$(this).find('i').addClass('icon-ok');
$('#connectpopup').modal('show');
$.get(js_getEntry(_project_title, 'Server', 'ajaxconnect'),
{
name: current_server,
},
function(data){
if(data.result == 1)
//alert(data.str);
$('#connectpopup #txt').text("连接成功:" + data.str);
else
alert("无法连接...");
});
});
$('#connectpopup').on('hidden', function(){
location.reload();
// alert($.cookie("current_server"));
// $('#servers #' + $.cookie("current_server")).removeClass('icon-ok');
});
function switch_theme(theme_name)
{
$('#bs-css').attr('href','css/bootstrap-'+theme_name+'.css');
}
//disbaling some functions for Internet Explorer
if($.browser.msie)
{
$('#is-ajax').prop('checked',false);
$('#for-is-ajax').hide();
$('#toggle-fullscreen').hide();
$('.login-box').find('.input-large'). | 'span10');
}
//highlight current / active link
$('ul.main-menu li a').each(function(){
if($($(this))[0].href==String(window.location))
$(this).parent().addClass('active');
});
//establish history variables
var
History = window.History, // Note: We are using a capital H instead of a lower h
State = History.getState(),
$log = $('#log');
//bind to State Change
History.Adapter.bind(window,'statechange',function(){ // Note: We are using statechange instead of popstate
var State = History.getState(); // Note: We are using History.getState() instead of event.state
$.ajax({
url:State.url,
success:function(msg){
$('#content').html($(msg).find('#content').html());
$('#loading').remove();
$('#content').fadeIn();
var newTitle = $(msg).filter('title').text();
$('title').text(newTitle);
docReady();
}
});
});
//ajaxify menus
$('a.ajax-link').click(function(e){
if($.browser.msie) e.which=1;
if(e.which!=1 || !$('#is-ajax').prop('checked') || $(this).parent().hasClass('active')) return;
e.preventDefault();
if($('.btn-navbar').is(':visible'))
{
$('.btn-navbar').click();
}
$('#loading').remove();
$('#content').fadeOut().parent().append('<div id="loading" class="center">Loading...<div class="center"></div></div>');
var $clink=$(this);
History.pushState(null, null, $clink.attr('href'));
$('ul.main-menu li.active').removeClass('active');
$clink.parent('li').addClass('active');
});
//animating menus on hover
$('ul.main-menu li:not(.nav-header)').hover(function(){
$(this).animate({'margin-left':'+=5'},300);
},
function(){
$(this).animate({'margin-left':'-=5'},300);
});
//other things to do on document ready, seperated for ajax calls
docReady();
});
function docReady(){
//prevent # links from moving to top
$('a[href="#"][data-top!=true]').click(function(e){
e.preventDefault();
});
//rich text editor
$('.cleditor').cleditor({
height: 180,
});
//datepicker
$('.datepicker').datepicker();
// $('.datetimepicker').datetimepicker();
//notifications
$('.noty').click(function(e){
e.preventDefault();
var options = $.parseJSON($(this).attr('data-noty-options'));
noty(options);
});
//uniform - styler for checkbox, radio and file input
$("input:checkbox, input:radio, input:file").not('[data-no-uniform="true"],#uniform-is-ajax').uniform();
//chosen - improves select
$('[data-rel="chosen"],[rel="chosen"]').chosen();
//tabs
$('#myTab a:first').tab('show');
$('#myTab a').click(function (e) {
e.preventDefault();
$(this).tab('show');
});
//makes elements soratble, elements that sort need to have id attribute to save the result
$('.sortable').sortable({
revert:true,
cancel:'.btn,.box-content,.nav-header',
update:function(event,ui){
//line below gives the ids of elements, you can make ajax call here to save it to the database
//console.log($(this).sortable('toArray'));
}
});
//slider
$('.slider').slider({range:true,values:[10,65]});
//tooltip
$('[rel="tooltip"],[data-rel="tooltip"]').tooltip({"placement":"bottom",delay: { show: 400, hide: 200 }});
//auto grow textarea
$('textarea.autogrow').autogrow();
//file manager
var elf = $('.file-manager').elfinder({
url : 'misc/elfinder-connector/connector.php' // connector URL (REQUIRED)
}).elfinder('instance');
//iOS / iPhone style toggle switch
$('.iphone-toggle').iphoneStyle();
//star rating
$('.raty').raty({
score : 4 //default stars
});
//uploadify - multiple uploads
$('#file_upload').uploadify({
'swf' : 'misc/uploadify.swf',
'uploader' : 'misc/uploadify.php'
// Put your options here
});
//gallery controlls container animation
// $('ul.gallery li').hover(function(){
// $('img',this).fadeToggle(1000);
// $(this).find('.gallery-controls').remove();
// $(this).append('<div class="well gallery-controls">'+
// '<p><a href="#" class="gallery-edit btn"><i class="icon-edit"></i></a> <a href="#" class="gallery-delete btn"><i class="icon-remove"></i></a></p>'+
// '</div>');
// $(this).find('.gallery-controls').stop().animate({'margin-top':'-1'},400,'easeInQuint');
// },function(){
// $('img',this).fadeToggle(1000);
// $(this).find('.gallery-controls').stop().animate({'margin-top':'-30'},200,'easeInQuint',function(){
// $(this).remove();
// });
// });
//gallery image controls example
//gallery delete
$('.thumbnails').on('click','.gallery-delete',function(e){
e.preventDefault();
//get image id
//alert($(this).parents('.thumbnail').attr('id'));
$(this).parents('.thumbnail').fadeOut();
});
//gallery edit
$('.thumbnails').on('click','.gallery-edit',function(e){
e.preventDefault();
//get image id
//alert($(this).parents('.thumbnail').attr('id'));
});
//gallery colorbox
// $('.thumbnail a').colorbox({rel:'thumbnail a', transition:"elastic", maxWidth:"95%", maxHeight:"95%"});
//datatable
$('.datatable').dataTable({
"sDom": "<'row-fluid'<'span6'l><'span6'f>r>t<'row-fluid'<'span12'i><'span12 center'p>>",
"sPaginationType": "bootstrap",
"aaSorting": [ [5,'desc'], [6,' | removeClass( | identifier_name |
charisma.js | i += 1)
d1.push([i, parseInt(Math.random() * 30)]);
var d2 = [];
for (var i = 0; i <= 10; i += 1)
d2.push([i, parseInt(Math.random() * 30)]);
var d3 = [];
for (var i = 0; i <= 10; i += 1)
d3.push([i, parseInt(Math.random() * 30)]);
var stack = 0, bars = true, lines = false, steps = false;
function plotWithOptions() {
$.plot($("#stackchart"), [ d1, d2, d3 ], {
series: {
stack: stack,
lines: { show: lines, fill: true, steps: steps },
bars: { show: bars, barWidth: 0.6 }
}
});
}
plotWithOptions();
$(".stackControls input").click(function (e) {
e.preventDefault();
stack = $(this).val() == "With stacking" ? true : null;
plotWithOptions();
});
$(".graphControls input").click(function (e) {
e.preventDefault();
bars = $(this).val().indexOf("Bars") != -1;
lines = $(this).val().indexOf("Lines") != -1;
steps = $(this).val().indexOf("steps") != -1;
plotWithOptions();
});
}
//pie chart
var data = [
{ label: "Internet Explorer", data: 12},
{ label: "Mobile", data: 27},
{ label: "Safari", data: 85},
{ label: "Opera", data: 64},
{ label: "Firefox", data: 90},
{ label: "Chrome", data: 112}
];
if($("#piechart").length)
{
$.plot($("#piechart"), data,
{
series: {
pie: {
show: true
}
},
grid: {
hoverable: true,
clickable: true
},
legend: {
show: false
}
});
function pieHover(event, pos, obj)
{
if (!obj)
return;
percent = parseFloat(obj.series.percent).toFixed(2);
$("#hover").html('<span style="font-weight: bold; color: '+obj.series.color+'">'+obj.series.label+' ('+percent+'%)</span>');
}
$("#piechart").bind("plothover", pieHover);
}
//donut chart
if($("#donutchart").length)
{
$.plot($("#donutchart"), data,
{
series: {
pie: {
innerRadius: 0.5,
show: true
}
},
legend: {
show: false
}
});
}
// we use an inline data source in the example, usually data would
// be fetched from a server
var data = [], totalPoints = 300;
function getRandomData() {
if (data.length > 0)
data = data.slice(1);
// do a random walk
while (data.length < totalPoints) {
var prev = data.length > 0 ? data[data.length - 1] : 50;
var y = prev + Math.random() * 10 - 5;
if (y < 0)
y = 0;
if (y > 100)
y = 100;
data.push(y);
}
// zip the generated y values with the x values
var res = [];
for (var i = 0; i < data.length; ++i)
res.push([i, data[i]])
return res;
}
// setup control widget
var updateInterval = 30;
$("#updateInterval").val(updateInterval).change(function () {
var v = $(this).val();
if (v && !isNaN(+v)) {
updateInterval = +v;
if (updateInterval < 1)
updateInterval = 1;
if (updateInterval > 2000)
updateInterval = 2000;
$(this).val("" + updateInterval);
}
});
//realtime chart
if($("#realtimechart").length)
{
var options = {
series: { shadowSize: 1 }, // drawing is faster without shadows
yaxis: { min: 0, max: 100 },
xaxis: { show: false }
};
var plot = $.plot($("#realtimechart"), [ getRandomData() ], options);
function update() {
plot.setData([ getRandomData() ]);
// since the axes don't change, we don't need to call plot.setupGrid()
plot.draw();
setTimeout(update, updateInterval);
}
update();
}
}
//additional functions for data table
$.fn.dataTableExt.oApi.fnPagingInfo = function ( oSettings )
{
return {
"iStart": oSettings._iDisplayStart,
"iEnd": oSettings.fnDisplayEnd(),
"iLength": oSettings._iDisplayLength,
"iTotal": oSettings.fnRecordsTotal(),
"iFilteredTotal": oSettings.fnRecordsDisplay(),
"iPage": Math.ceil( oSettings._iDisplayStart / oSettings._iDisplayLength ),
"iTotalPages": Math.ceil( oSettings.fnRecordsDisplay() / oSettings._iDisplayLength )
};
}
$.extend( $.fn.dataTableExt.oPagination, {
"bootstrap": {
"fnInit": function( oSettings, nPaging, fnDraw ) {
var oLang = oSettings.oLanguage.oPaginate;
var fnClickHandler = function ( e ) {
e.preventDefault();
if ( oSettings.oApi._fnPageChange(oSettings, e.data.action) ) {
fnDraw( oSettings );
}
};
$(nPaging).addClass('pagination').append(
'<ul>'+
'<li class="prev disabled"><a href="#">← '+oLang.sPrevious+'</a></li>'+
'<li class="next disabled"><a href="#">'+oLang.sNext+' → </a></li>'+
'</ul>'
);
var els = $('a', nPaging);
$(els[0]).bind( 'click.DT', { action: "previous" }, fnClickHandler );
$(els[1]).bind( 'click.DT', { action: "next" }, fnClickHandler );
},
"fnUpdate": function ( oSettings, fnDraw ) {
var iListLength = 5;
var oPaging = oSettings.oInstance.fnPagingInfo();
var an = oSettings.aanFeatures.p;
var i, j, sClass, iStart, iEnd, iHalf=Math.floor(iListLength/2);
if ( oPaging.iTotalPages < iListLength) {
iStart = 1;
iEnd = oPaging.iTotalPages;
}
else if ( oPaging.iPage <= iHalf ) {
iStart = 1;
iEnd = iListLength;
} else if ( oPaging.iPage >= (oPaging.iTotalPages-iHalf) ) {
iStart = oPaging.iTotalPages - iListLength + 1;
iEnd = oPaging.iTotalPages;
} else {
iStart = oPaging.iPage - iHalf + 1;
iEnd = iStart + iListLength - 1;
}
for ( i=0, iLen=an.length ; i<iLen ; i++ ) {
// remove the middle elements
$('li:gt(0)', an[i]).filter(':not(:last)').remove();
// add the new list items and their event handlers
for ( j=iStart ; j<=iEnd ; j++ ) {
sClass = (j==oPaging.iPage+1) ? 'class="active"' : '';
$('<li '+sClass+'><a href="#">'+j+'</a></li>')
.insertBefore( $('li:last', an[i])[0] )
.bind('click', function (e) {
e.preventDefault();
oSettings._iDisplayStart = (parseInt($('a', this).text(),10)-1) * oPaging.iLength;
fnDraw( oSettings );
} );
}
// add / remove disabled classes from the static elements
if ( oPaging.iPage === 0 ) {
$('li:first', an[i]).addClass('disabled');
} else {
$('li:first', an[i]).removeClass('disabled'); | }
if ( oPaging.iPage === oPaging.iTotalPages-1 || oPaging.iTotalPages === 0 ) {
$('li:last', an[i]).addClass('disabled');
} else { | random_line_split |
|
charisma.js | });
$('#addBoard').click(function(e){
e.preventDefault();
$('#myModal1').modal('show');
});
$('#addServer').click(function(e){
e.preventDefault();
$('#myModal2').modal('show');
});
$('.modifyBoard').click(function(e){
e.preventDefault();
var srctxt = $(this).attr('data-json');
var txt = '[' + unescape(srctxt) + ']';
var obj = eval(txt)[0];
$('#myModal').modal('show');
$('form#modifyboardform #boardid').attr("value", obj.guid);
$('form#modifyboardform #boardpriority').attr("value", obj.priority);
$('form#modifyboardform #boardtitle').attr("value", obj.title);
var s1 = $('form#modifyboardform #boardselect').attr("value");
if(obj.isValid == "1")
$('form#modifyboardform #boardselect').get(0).selectedIndex = 0;
else
$('form#modifyboardform #boardselect').get(0).selectedIndex = 1;
$('form#modifyboardform #jmpselect').get(0).selectedIndex = parseInt(obj.type) - 1;
var o = $('form#modifyboardform #boardcontent').cleditor()[0];
$('form#modifyboardform #boardcontent').val(obj.content.replace(/\+/g, " "));
o.updateFrame();
});
$('.modifyServer').click(function(e){
e.preventDefault();
var txt = $(this).attr('data-json');
var obj = eval("[" + txt + "]")[0];
$('#myModal3').modal('show');
$('form#modifyserverform #serverid').attr("value", obj.id);
$('form#modifyserverform #servername').attr("value", obj.name);
$('form#modifyserverform #serverhost').attr("value", obj.host);
$('form#modifyserverform #serverport').attr("value", obj.port);
$('form#modifyserverform #serverinfo').attr("value", obj.text);
$('form#modifyserverform #serveruser').attr("value", obj.username);
$('form#modifyserverform #serverpassword').attr("value", obj.password);
$('form #modifyserverform #serverstatus1').attr("checked", "checked");
$('form #modifyserverform #serverstatus2').attr("checked", "");
//$('#myModal').modal('show');
});
$('.btn-moveup').live("click", function(e){
e.preventDefault();
var tr_obj = $(this).parents("tr");
var index = tr_obj.index();
if(index <= 0)
{
alert("Can not move up!");
}else
{
var tr_pre = tr_obj.prev();
var sortid = tr_obj.attr("priority");
var gid0 = tr_obj.attr("guid");
var gid1 = tr_pre.attr("guid");
$('#connectpopup').modal('show');
var url = js_getEntry(_project_title, "Board", "ajaxsort");
$.get(url,
{
guid0: gid0,
guid1: gid1
},
function(data){
//location.reload();
$('#connectpopup').modal('hide');
var text0 = tr_obj.html();
var text1 = tr_pre.html();
tr_obj.html(text1);
tr_pre.html(text0);
});
}
});
$('.btn-movedown').live("click", function(e){
e.preventDefault();
var tr_obj = $(this).parents("tr");
var index = tr_obj.index();
if(index >= tr_obj.parent().children().size() - 1)
{
alert("Can not move down!");
}else
{
var tr_next = tr_obj.next();
var sortid = tr_obj.attr("priority");
var gid0 = tr_obj.attr("guid");
var gid1 = tr_next.attr("guid");
var url = js_getEntry(_project_title, "Board", "ajaxsort");
$('#connectpopup').modal('show');
$.get(url,
{
guid0: gid0,
guid1: gid1
},
function(data){
// alert(data.result);
// location.reload();
$('#connectpopup').modal('hide');
var text0 = tr_obj.html();
var text1 = tr_next.html();
tr_obj.html(text1);
tr_next.html(text0);
});
//location.reload();
}
});
$('.btn-exchange').live("click", function(e){
e.preventDefault();
var tr_obj = $(this).parents("tr");
var tr_nxt = tr_obj.next();
var text0 = tr_obj.html();
var text1 = tr_nxt.html();
tr_obj.html(text1);
tr_nxt.html(text0);
var tds = tr_obj.children('td').toArray();
});
//initialize the external events for calender
$('#external-events div.external-event').each(function() {
// it doesn't need to have a start or end
var eventObject = {
title: $.trim($(this).text()) // use the element's text as the event title
};
// store the Event Object in the DOM element so we can get to it later
$(this).data('eventObject', eventObject);
// make the event draggable using jQuery UI
$(this).draggable({
zIndex: 999,
revert: true, // will cause the event to go back to its
revertDuration: 0 // original position after the drag
});
});
//stack chart
if($("#stackchart").length)
{
var d1 = [];
for (var i = 0; i <= 10; i += 1)
d1.push([i, parseInt(Math.random() * 30)]);
var d2 = [];
for (var i = 0; i <= 10; i += 1)
d2.push([i, parseInt(Math.random() * 30)]);
var d3 = [];
for (var i = 0; i <= 10; i += 1)
d3.push([i, parseInt(Math.random() * 30)]);
var stack = 0, bars = true, lines = false, steps = false;
function plotWithOptions() {
$.plot($("#stackchart"), [ d1, d2, d3 ], {
series: {
stack: stack,
lines: { show: lines, fill: true, steps: steps },
bars: { show: bars, barWidth: 0.6 }
}
});
}
plotWithOptions();
$(".stackControls input").click(function (e) {
e.preventDefault();
stack = $(this).val() == "With stacking" ? true : null;
plotWithOptions();
});
$(".graphControls input").click(function (e) {
e.preventDefault();
bars = $(this).val().indexOf("Bars") != -1;
lines = $(this).val().indexOf("Lines") != -1;
steps = $(this).val().indexOf("steps") != -1;
plotWithOptions();
});
}
//pie chart
var data = [
{ label: "Internet Explorer", data: 12},
{ label: "Mobile", data: 27},
{ label: "Safari", data: 85},
{ label: "Opera", data: 64},
{ label: "Firefox", data: 90},
{ label: "Chrome", data: 112}
];
if($("#piechart").length)
{
$.plot($("#piechart"), data,
{
series: {
pie: {
show: true
}
},
grid: {
hoverable: true,
clickable: true
},
legend: {
show: false
}
});
function pieHover(event, pos, obj)
{
if (!obj)
return;
percent = parseFloat(obj.series.percent).toFixed(2);
$("#hover").html('<span style="font-weight: bold; color: '+obj.series.color+'">'+obj.series.label+' ('+percent+'%)</span>');
}
$("#piechart").bind("plothover", pieHover);
}
//donut chart
if($("#donutchart").length)
{
$.plot($("#donutchart"), data,
{
series: {
pie: {
innerRadius: 0.5,
show: true
}
},
legend: {
show: false
}
});
| }
// we use an inline data source in the example, usually data would
// be fetched from a server
var data = [], totalPoints = 300;
function getRandomData() {
if (data.length > 0)
data = data.slice(1);
| identifier_body |
|
api_op_CreateRoute.go | 192.0.2.3 , and the route table includes the following two IPv4
// routes:
// - 192.0.2.0/24 (goes to some target A)
// - 192.0.2.0/28 (goes to some target B)
//
// Both routes apply to the traffic destined for 192.0.2.3 . However, the second
// route in the list covers a smaller number of IP addresses and is therefore more
// specific, so we use that route to determine where to target the traffic. For
// more information about route tables, see Route tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html)
// in the Amazon VPC User Guide.
func (c *Client) CreateRoute(ctx context.Context, params *CreateRouteInput, optFns ...func(*Options)) (*CreateRouteOutput, error) {
if params == nil {
params = &CreateRouteInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateRoute", params, optFns, c.addOperationCreateRouteMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateRouteOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateRouteInput struct {
// The ID of the route table for the route.
//
// This member is required.
RouteTableId *string
// The ID of the carrier gateway. You can only use this option when the VPC
// contains a subnet which is associated with a Wavelength Zone.
CarrierGatewayId *string
// The Amazon Resource Name (ARN) of the core network.
CoreNetworkArn *string
// The IPv4 CIDR address block used for the destination match. Routing decisions
// are based on the most specific match. We modify the specified CIDR block to its
// canonical form; for example, if you specify 100.68.0.18/18 , we modify it to
// 100.68.0.0/18 .
DestinationCidrBlock *string
// The IPv6 CIDR block used for the destination match. Routing decisions are based
// on the most specific match.
DestinationIpv6CidrBlock *string
// The ID of a prefix list used for the destination match.
DestinationPrefixListId *string
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have the
// required permissions, the error response is DryRunOperation . Otherwise, it is
// UnauthorizedOperation .
DryRun *bool
// [IPv6 traffic only] The ID of an egress-only internet gateway.
EgressOnlyInternetGatewayId *string
// The ID of an internet gateway or virtual private gateway attached to your VPC.
GatewayId *string
// The ID of a NAT instance in your VPC. The operation fails if you specify an
// instance ID unless exactly one network interface is attached.
InstanceId *string
// The ID of the local gateway.
LocalGatewayId *string
// [IPv4 traffic only] The ID of a NAT gateway.
NatGatewayId *string
// The ID of a network interface.
NetworkInterfaceId *string
// The ID of a transit gateway.
TransitGatewayId *string
// The ID of a VPC endpoint. Supported for Gateway Load Balancer endpoints only.
VpcEndpointId *string
// The ID of a VPC peering connection.
VpcPeeringConnectionId *string
noSmithyDocumentSerde
}
type CreateRouteOutput struct {
// Returns true if the request succeeds; otherwise, it returns an error.
Return *bool
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateRouteMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsEc2query_serializeOpCreateRoute{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsEc2query_deserializeOpCreateRoute{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addCreateRouteResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpCreateRouteValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateRoute(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCreateRoute(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "ec2",
OperationName: "CreateRoute",
}
}
type opCreateRouteResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opCreateRouteResolveEndpointMiddleware) ID() string |
func (m *opCreateRouteResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "ec2"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "ec2"
} else {
signingName = *v4Scheme | {
return "ResolveEndpointV2"
} | identifier_body |
api_op_CreateRoute.go | address 192.0.2.3 , and the route table includes the following two IPv4
// routes:
// - 192.0.2.0/24 (goes to some target A)
// - 192.0.2.0/28 (goes to some target B)
//
// Both routes apply to the traffic destined for 192.0.2.3 . However, the second
// route in the list covers a smaller number of IP addresses and is therefore more
// specific, so we use that route to determine where to target the traffic. For
// more information about route tables, see Route tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html)
// in the Amazon VPC User Guide.
func (c *Client) CreateRoute(ctx context.Context, params *CreateRouteInput, optFns ...func(*Options)) (*CreateRouteOutput, error) {
if params == nil {
params = &CreateRouteInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateRoute", params, optFns, c.addOperationCreateRouteMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateRouteOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateRouteInput struct {
// The ID of the route table for the route.
//
// This member is required.
RouteTableId *string
// The ID of the carrier gateway. You can only use this option when the VPC
// contains a subnet which is associated with a Wavelength Zone.
CarrierGatewayId *string
// The Amazon Resource Name (ARN) of the core network.
CoreNetworkArn *string
// The IPv4 CIDR address block used for the destination match. Routing decisions
// are based on the most specific match. We modify the specified CIDR block to its
// canonical form; for example, if you specify 100.68.0.18/18 , we modify it to
// 100.68.0.0/18 .
DestinationCidrBlock *string
// The IPv6 CIDR block used for the destination match. Routing decisions are based
// on the most specific match.
DestinationIpv6CidrBlock *string
// The ID of a prefix list used for the destination match.
DestinationPrefixListId *string
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have the
// required permissions, the error response is DryRunOperation . Otherwise, it is
// UnauthorizedOperation .
DryRun *bool
// [IPv6 traffic only] The ID of an egress-only internet gateway.
EgressOnlyInternetGatewayId *string
// The ID of an internet gateway or virtual private gateway attached to your VPC.
GatewayId *string
// The ID of a NAT instance in your VPC. The operation fails if you specify an
// instance ID unless exactly one network interface is attached.
InstanceId *string
// The ID of the local gateway.
LocalGatewayId *string
// [IPv4 traffic only] The ID of a NAT gateway.
NatGatewayId *string
// The ID of a network interface.
NetworkInterfaceId *string
// The ID of a transit gateway.
TransitGatewayId *string
// The ID of a VPC endpoint. Supported for Gateway Load Balancer endpoints only.
VpcEndpointId *string
// The ID of a VPC peering connection.
VpcPeeringConnectionId *string
noSmithyDocumentSerde
}
type CreateRouteOutput struct {
// Returns true if the request succeeds; otherwise, it returns an error.
Return *bool
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateRouteMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsEc2query_serializeOpCreateRoute{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsEc2query_deserializeOpCreateRoute{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addCreateRouteResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpCreateRouteValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateRoute(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCreateRoute(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "ec2",
OperationName: "CreateRoute",
}
}
type opCreateRouteResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opCreateRouteResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opCreateRouteResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
| for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "ec2"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "ec2"
} else {
signingName = *v4Scheme.Sign | req.URL = &resolvedEndpoint.URI
| random_line_split |
api_op_CreateRoute.go | 192.0.2.3 , and the route table includes the following two IPv4
// routes:
// - 192.0.2.0/24 (goes to some target A)
// - 192.0.2.0/28 (goes to some target B)
//
// Both routes apply to the traffic destined for 192.0.2.3 . However, the second
// route in the list covers a smaller number of IP addresses and is therefore more
// specific, so we use that route to determine where to target the traffic. For
// more information about route tables, see Route tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html)
// in the Amazon VPC User Guide.
func (c *Client) CreateRoute(ctx context.Context, params *CreateRouteInput, optFns ...func(*Options)) (*CreateRouteOutput, error) {
if params == nil {
params = &CreateRouteInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateRoute", params, optFns, c.addOperationCreateRouteMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateRouteOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateRouteInput struct {
// The ID of the route table for the route.
//
// This member is required.
RouteTableId *string
// The ID of the carrier gateway. You can only use this option when the VPC
// contains a subnet which is associated with a Wavelength Zone.
CarrierGatewayId *string
// The Amazon Resource Name (ARN) of the core network.
CoreNetworkArn *string
// The IPv4 CIDR address block used for the destination match. Routing decisions
// are based on the most specific match. We modify the specified CIDR block to its
// canonical form; for example, if you specify 100.68.0.18/18 , we modify it to
// 100.68.0.0/18 .
DestinationCidrBlock *string
// The IPv6 CIDR block used for the destination match. Routing decisions are based
// on the most specific match.
DestinationIpv6CidrBlock *string
// The ID of a prefix list used for the destination match.
DestinationPrefixListId *string
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have the
// required permissions, the error response is DryRunOperation . Otherwise, it is
// UnauthorizedOperation .
DryRun *bool
// [IPv6 traffic only] The ID of an egress-only internet gateway.
EgressOnlyInternetGatewayId *string
// The ID of an internet gateway or virtual private gateway attached to your VPC.
GatewayId *string
// The ID of a NAT instance in your VPC. The operation fails if you specify an
// instance ID unless exactly one network interface is attached.
InstanceId *string
// The ID of the local gateway.
LocalGatewayId *string
// [IPv4 traffic only] The ID of a NAT gateway.
NatGatewayId *string
// The ID of a network interface.
NetworkInterfaceId *string
// The ID of a transit gateway.
TransitGatewayId *string
// The ID of a VPC endpoint. Supported for Gateway Load Balancer endpoints only.
VpcEndpointId *string
// The ID of a VPC peering connection.
VpcPeeringConnectionId *string
noSmithyDocumentSerde
}
type CreateRouteOutput struct {
// Returns true if the request succeeds; otherwise, it returns an error.
Return *bool
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateRouteMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsEc2query_serializeOpCreateRoute{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsEc2query_deserializeOpCreateRoute{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addCreateRouteResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpCreateRouteValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateRoute(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCreateRoute(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "ec2",
OperationName: "CreateRoute",
}
}
type opCreateRouteResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opCreateRouteResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opCreateRouteResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "ec2"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil | else {
signingName = *v4Scheme.Sign | {
signingName = "ec2"
} | conditional_block |
api_op_CreateRoute.go | 192.0.2.3 , and the route table includes the following two IPv4
// routes:
// - 192.0.2.0/24 (goes to some target A)
// - 192.0.2.0/28 (goes to some target B)
//
// Both routes apply to the traffic destined for 192.0.2.3 . However, the second
// route in the list covers a smaller number of IP addresses and is therefore more
// specific, so we use that route to determine where to target the traffic. For
// more information about route tables, see Route tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html)
// in the Amazon VPC User Guide.
func (c *Client) CreateRoute(ctx context.Context, params *CreateRouteInput, optFns ...func(*Options)) (*CreateRouteOutput, error) {
if params == nil {
params = &CreateRouteInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateRoute", params, optFns, c.addOperationCreateRouteMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateRouteOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateRouteInput struct {
// The ID of the route table for the route.
//
// This member is required.
RouteTableId *string
// The ID of the carrier gateway. You can only use this option when the VPC
// contains a subnet which is associated with a Wavelength Zone.
CarrierGatewayId *string
// The Amazon Resource Name (ARN) of the core network.
CoreNetworkArn *string
// The IPv4 CIDR address block used for the destination match. Routing decisions
// are based on the most specific match. We modify the specified CIDR block to its
// canonical form; for example, if you specify 100.68.0.18/18 , we modify it to
// 100.68.0.0/18 .
DestinationCidrBlock *string
// The IPv6 CIDR block used for the destination match. Routing decisions are based
// on the most specific match.
DestinationIpv6CidrBlock *string
// The ID of a prefix list used for the destination match.
DestinationPrefixListId *string
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have the
// required permissions, the error response is DryRunOperation . Otherwise, it is
// UnauthorizedOperation .
DryRun *bool
// [IPv6 traffic only] The ID of an egress-only internet gateway.
EgressOnlyInternetGatewayId *string
// The ID of an internet gateway or virtual private gateway attached to your VPC.
GatewayId *string
// The ID of a NAT instance in your VPC. The operation fails if you specify an
// instance ID unless exactly one network interface is attached.
InstanceId *string
// The ID of the local gateway.
LocalGatewayId *string
// [IPv4 traffic only] The ID of a NAT gateway.
NatGatewayId *string
// The ID of a network interface.
NetworkInterfaceId *string
// The ID of a transit gateway.
TransitGatewayId *string
// The ID of a VPC endpoint. Supported for Gateway Load Balancer endpoints only.
VpcEndpointId *string
// The ID of a VPC peering connection.
VpcPeeringConnectionId *string
noSmithyDocumentSerde
}
type CreateRouteOutput struct {
// Returns true if the request succeeds; otherwise, it returns an error.
Return *bool
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) | (stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsEc2query_serializeOpCreateRoute{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsEc2query_deserializeOpCreateRoute{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addCreateRouteResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpCreateRouteValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateRoute(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCreateRoute(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "ec2",
OperationName: "CreateRoute",
}
}
type opCreateRouteResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opCreateRouteResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opCreateRouteResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "ec2"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "ec2"
} else {
signingName = *v4Scheme.Sign | addOperationCreateRouteMiddlewares | identifier_name |
irc_comm.rs | msg: S2,
) -> Result<Option<LibReaction<Message>>>
where
S1: Borrow<str>,
S2: Display,
| ));
Ok(())
})?;
}
match wrapped_msg.len() {
0 => Ok(None),
1 => Ok(Some(wrapped_msg.remove(0))),
_ => Ok(Some(LibReaction::Multi(wrapped_msg.into_vec()))),
}
}
fn compose_msgs<S1, S2, M>(
&self,
dest: MsgDest,
addressee: S1,
msgs: M,
) -> Result<Option<LibReaction<Message>>>
where
S1: Borrow<str>,
S2: Display,
M: IntoIterator<Item = S2>,
{
// Not `SmallVec`, because we're guessing that the caller expects multiple messages.
let mut output = Vec::new();
for msg in msgs {
match self.compose_msg(dest, addressee.borrow(), msg)? {
Some(m) => output.push(m),
None => {}
}
}
match output.len() {
0 => Ok(None),
1 => Ok(Some(output.remove(0))),
_ => Ok(Some(LibReaction::Multi(output))),
}
}
fn prefix_len(&self, server_id: ServerId) -> Result<usize> {
Ok(self.read_msg_prefix(server_id)?.len())
}
}
fn wrap_msg<F>(
state: &State,
MsgDest { server_id, target }: MsgDest,
msg: &str,
mut f: F,
) -> Result<()>
where
F: FnMut(&str) -> Result<()>,
{
// :nick!user@host PRIVMSG target :message
// :nick!user@host NOTICE target :message
let raw_len_limit = 512;
let punctuation_len = {
let line_terminator_len = 2;
let spaces = 3;
let colons = 2;
colons + spaces + line_terminator_len
};
let cmd_len = "PRIVMSG".len();
let metadata_len = state.prefix_len(server_id)? + cmd_len + target.len() + punctuation_len;
let msg_len_limit = raw_len_limit - metadata_len;
if msg.len() < msg_len_limit {
return f(msg);
}
let mut split_end_idx = 0;
let lines = msg.match_indices(char::is_whitespace)
.peekable()
.batching(|iter| {
debug_assert!(msg.len() >= msg_len_limit);
let split_start_idx = split_end_idx;
if split_start_idx >= msg.len() {
return None;
}
while let Some(&(next_space_idx, _)) = iter.peek() {
if msg[split_start_idx..next_space_idx].len() < msg_len_limit {
split_end_idx = next_space_idx;
iter.next();
} else {
break;
}
}
if iter.peek().is_none() {
split_end_idx = msg.len()
} else if split_end_idx <= split_start_idx {
split_end_idx = cmp::min(split_start_idx + msg_len_limit, msg.len())
}
Some(msg[split_start_idx..split_end_idx].trim())
});
for line in lines {
f(line)?
}
Ok(())
}
fn handle_reaction(
state: &Arc<State>,
server_id: ServerId,
prefix: OwningMsgPrefix,
target: &str,
reaction: Reaction,
bot_nick: String,
) -> Result<Option<LibReaction<Message>>> {
let (reply_target, reply_addressee) = if target == bot_nick {
(prefix.parse().nick.unwrap(), "")
} else {
(target, prefix.parse().nick.unwrap_or(""))
};
let reply_dest = MsgDest {
server_id,
target: reply_target,
};
match reaction {
Reaction::None => Ok(None),
Reaction::Msg(s) => state.compose_msg(reply_dest, "", &s),
Reaction::Msgs(a) => state.compose_msgs(reply_dest, "", a.iter()),
Reaction::Reply(s) => state.compose_msg(reply_dest, reply_addressee, &s),
Reaction::Replies(a) => state.compose_msgs(reply_dest, reply_addressee, a.iter()),
Reaction::RawMsg(s) => Ok(Some(LibReaction::RawMsg(s.parse()?))),
Reaction::Quit(msg) => Ok(Some(mk_quit(msg))),
}
}
fn handle_bot_command_or_trigger(
state: &Arc<State>,
server_id: ServerId,
prefix: OwningMsgPrefix,
target: String,
msg: String,
bot_nick: String,
) -> Option<LibReaction<Message>> {
let reaction = (|| {
let metadata = MsgMetadata {
prefix: prefix.parse(),
dest: MsgDest {
server_id,
target: &target,
},
};
let cmd_ln = parse_msg_to_nick(&msg, metadata.dest.target, &bot_nick).unwrap_or("");
let mut cmd_name_and_args = cmd_ln.splitn(2, char::is_whitespace);
let cmd_name = cmd_name_and_args.next().unwrap_or("");
let cmd_args = cmd_name_and_args.next().unwrap_or("").trim();
if let Some(r) = bot_cmd::run(state, cmd_name, cmd_args, &metadata)? {
Ok(bot_command_reaction(cmd_name, r))
} else if let Some(r) = trigger::run_any_matching(state, cmd_ln, &metadata)? {
Ok(bot_command_reaction("<trigger>", r))
} else {
Ok(Reaction::None)
}
})();
match reaction
.and_then(|reaction| handle_reaction(state, server_id, prefix, &target, reaction, bot_nick))
{
Ok(r) => r,
Err(e) => Some(LibReaction::RawMsg(
aatxe::Command::PRIVMSG(
target,
format!("Encountered error while trying to handle message: {}", e),
).into(),
)),
}
}
fn bot_command_reaction(cmd_name: &str, result: BotCmdResult) -> Reaction {
let cmd_result = match result {
BotCmdResult::Ok(r) => Ok(r),
BotCmdResult::Unauthorized => Err(format!(
"My apologies, but you do not appear to have sufficient \
authority to use my {:?} command.",
cmd_name
).into()),
BotCmdResult::SyntaxErr => Err("Syntax error. Try my `help` command.".into()),
BotCmdResult::ArgMissing(arg_name) => Err(format!(
"Syntax error: For command {:?}, the argument {:?} \
is required, but it was not given.",
cmd_name, arg_name
).into()),
BotCmdResult::ArgMissing1To1(arg_name) => Err(format!(
"Syntax error: When command {:?} is used \
outside of a channel, the argument {:?} is \
required, but it was not given.",
cmd_name, arg_name
).into()),
BotCmdResult::LibErr(e) => Err(format!("Error: {}", e).into()),
BotCmdResult::UserErrMsg(s) => Err(format!("User error: {}", s).into()),
BotCmdResult::BotErrMsg(s) => Err(format!("Internal error: {}", s).into()),
};
match cmd_result {
Ok(r) => r,
Err(s) => Reaction::Msg(s),
}
}
pub fn mk_quit<'a>(msg: Option<Cow<'a, str>>) -> LibReaction<Message> {
let quit = aatxe::Command::QUIT(
msg.map(Cow::into_owned)
.or_else(|| Some(pkg_info::BRIEF_CREDITS_STRING.clone())),
).into();
LibReaction::RawMsg(quit)
}
pub(super) fn handle_msg(
state: &Arc<State>,
server_id: ServerId,
outbox: &OutboxPort,
input_msg: Message,
) -> Result<()> {
trace!(
"[{}] Received {:?}",
state.server_socket_addr_dbg_string(server_id),
input_msg.to_string().trim_right_matches("\r\n")
);
match input_msg {
Message {
command: aatxe::Command::PRIVMSG(target, msg),
prefix,
..
} => handle_privmsg(
state,
server_id,
outbox,
OwningMsgPrefix::from_string(prefix.unwrap_or_default()),
target,
msg,
),
Message {
command: aatxe::Command::Response(aatxe::Response::RPL_MYINFO, ..),
..
} => {
push_to_outbox(outbox, server_id, handle_004(state, server_id)?);
Ok(())
}
_ => Ok(()),
}
}
fn handle_privmsg(
state: | {
let final_msg = format!(
"{}{}{}",
addressee.borrow(),
if addressee.borrow().is_empty() {
""
} else {
&self.addressee_suffix
},
msg,
);
info!("Sending message to {:?}: {:?}", dest, final_msg);
let mut wrapped_msg = SmallVec::<[_; 1]>::new();
for input_line in final_msg.lines() {
wrap_msg(self, dest, input_line, |output_line| {
wrapped_msg.push(LibReaction::RawMsg(
aatxe::Command::PRIVMSG(dest.target.to_owned(), output_line.to_owned()).into(), | identifier_body |
irc_comm.rs | msg: S2,
) -> Result<Option<LibReaction<Message>>>
where
S1: Borrow<str>,
S2: Display,
{
let final_msg = format!(
"{}{}{}",
addressee.borrow(),
if addressee.borrow().is_empty() {
""
} else {
&self.addressee_suffix
},
msg,
);
info!("Sending message to {:?}: {:?}", dest, final_msg);
let mut wrapped_msg = SmallVec::<[_; 1]>::new();
for input_line in final_msg.lines() {
wrap_msg(self, dest, input_line, |output_line| {
wrapped_msg.push(LibReaction::RawMsg(
aatxe::Command::PRIVMSG(dest.target.to_owned(), output_line.to_owned()).into(),
));
Ok(())
})?;
}
match wrapped_msg.len() {
0 => Ok(None),
1 => Ok(Some(wrapped_msg.remove(0))),
_ => Ok(Some(LibReaction::Multi(wrapped_msg.into_vec()))),
}
}
fn compose_msgs<S1, S2, M>(
&self,
dest: MsgDest,
addressee: S1,
msgs: M,
) -> Result<Option<LibReaction<Message>>>
where
S1: Borrow<str>,
S2: Display,
M: IntoIterator<Item = S2>,
{
// Not `SmallVec`, because we're guessing that the caller expects multiple messages.
let mut output = Vec::new();
for msg in msgs {
match self.compose_msg(dest, addressee.borrow(), msg)? {
Some(m) => output.push(m),
None => {}
}
}
match output.len() {
0 => Ok(None),
1 => Ok(Some(output.remove(0))),
_ => Ok(Some(LibReaction::Multi(output))),
}
}
fn prefix_len(&self, server_id: ServerId) -> Result<usize> {
Ok(self.read_msg_prefix(server_id)?.len())
}
}
fn wrap_msg<F>(
state: &State,
MsgDest { server_id, target }: MsgDest,
msg: &str,
mut f: F,
) -> Result<()>
where
F: FnMut(&str) -> Result<()>,
{
// :nick!user@host PRIVMSG target :message
// :nick!user@host NOTICE target :message
let raw_len_limit = 512;
let punctuation_len = {
let line_terminator_len = 2;
let spaces = 3;
let colons = 2;
colons + spaces + line_terminator_len
};
let cmd_len = "PRIVMSG".len();
let metadata_len = state.prefix_len(server_id)? + cmd_len + target.len() + punctuation_len;
let msg_len_limit = raw_len_limit - metadata_len;
if msg.len() < msg_len_limit {
return f(msg);
}
let mut split_end_idx = 0;
let lines = msg.match_indices(char::is_whitespace)
.peekable()
.batching(|iter| {
debug_assert!(msg.len() >= msg_len_limit);
let split_start_idx = split_end_idx;
if split_start_idx >= msg.len() {
return None;
}
while let Some(&(next_space_idx, _)) = iter.peek() {
if msg[split_start_idx..next_space_idx].len() < msg_len_limit {
split_end_idx = next_space_idx;
iter.next();
} else {
break;
}
}
if iter.peek().is_none() {
split_end_idx = msg.len()
} else if split_end_idx <= split_start_idx {
split_end_idx = cmp::min(split_start_idx + msg_len_limit, msg.len())
}
Some(msg[split_start_idx..split_end_idx].trim())
});
for line in lines {
f(line)?
}
Ok(())
}
fn handle_reaction(
state: &Arc<State>,
server_id: ServerId,
prefix: OwningMsgPrefix,
target: &str,
reaction: Reaction,
bot_nick: String,
) -> Result<Option<LibReaction<Message>>> {
let (reply_target, reply_addressee) = if target == bot_nick {
(prefix.parse().nick.unwrap(), "")
} else {
(target, prefix.parse().nick.unwrap_or(""))
};
let reply_dest = MsgDest {
server_id,
target: reply_target,
};
match reaction {
Reaction::None => Ok(None),
Reaction::Msg(s) => state.compose_msg(reply_dest, "", &s),
Reaction::Msgs(a) => state.compose_msgs(reply_dest, "", a.iter()),
Reaction::Reply(s) => state.compose_msg(reply_dest, reply_addressee, &s),
Reaction::Replies(a) => state.compose_msgs(reply_dest, reply_addressee, a.iter()),
Reaction::RawMsg(s) => Ok(Some(LibReaction::RawMsg(s.parse()?))),
Reaction::Quit(msg) => Ok(Some(mk_quit(msg))),
}
}
fn handle_bot_command_or_trigger(
state: &Arc<State>,
server_id: ServerId,
prefix: OwningMsgPrefix,
target: String,
msg: String,
bot_nick: String,
) -> Option<LibReaction<Message>> {
let reaction = (|| {
let metadata = MsgMetadata {
prefix: prefix.parse(),
dest: MsgDest {
server_id,
target: &target,
},
};
let cmd_ln = parse_msg_to_nick(&msg, metadata.dest.target, &bot_nick).unwrap_or("");
let mut cmd_name_and_args = cmd_ln.splitn(2, char::is_whitespace);
let cmd_name = cmd_name_and_args.next().unwrap_or("");
let cmd_args = cmd_name_and_args.next().unwrap_or("").trim();
if let Some(r) = bot_cmd::run(state, cmd_name, cmd_args, &metadata)? {
Ok(bot_command_reaction(cmd_name, r))
} else if let Some(r) = trigger::run_any_matching(state, cmd_ln, &metadata)? {
Ok(bot_command_reaction("<trigger>", r))
} else {
Ok(Reaction::None)
}
})();
match reaction
.and_then(|reaction| handle_reaction(state, server_id, prefix, &target, reaction, bot_nick))
{
Ok(r) => r,
Err(e) => Some(LibReaction::RawMsg(
aatxe::Command::PRIVMSG(
target,
format!("Encountered error while trying to handle message: {}", e),
).into(),
)),
}
}
fn bot_command_reaction(cmd_name: &str, result: BotCmdResult) -> Reaction {
let cmd_result = match result {
BotCmdResult::Ok(r) => Ok(r),
BotCmdResult::Unauthorized => Err(format!(
"My apologies, but you do not appear to have sufficient \
authority to use my {:?} command.",
cmd_name
).into()),
BotCmdResult::SyntaxErr => Err("Syntax error. Try my `help` command.".into()),
BotCmdResult::ArgMissing(arg_name) => Err(format!(
"Syntax error: For command {:?}, the argument {:?} \
is required, but it was not given.",
cmd_name, arg_name
).into()),
BotCmdResult::ArgMissing1To1(arg_name) => Err(format!(
"Syntax error: When command {:?} is used \
outside of a channel, the argument {:?} is \
required, but it was not given.",
cmd_name, arg_name
).into()),
BotCmdResult::LibErr(e) => Err(format!("Error: {}", e).into()),
BotCmdResult::UserErrMsg(s) => Err(format!("User error: {}", s).into()),
BotCmdResult::BotErrMsg(s) => Err(format!("Internal error: {}", s).into()),
};
match cmd_result {
Ok(r) => r,
Err(s) => Reaction::Msg(s),
}
}
pub fn | <'a>(msg: Option<Cow<'a, str>>) -> LibReaction<Message> {
let quit = aatxe::Command::QUIT(
msg.map(Cow::into_owned)
.or_else(|| Some(pkg_info::BRIEF_CREDITS_STRING.clone())),
).into();
LibReaction::RawMsg(quit)
}
pub(super) fn handle_msg(
state: &Arc<State>,
server_id: ServerId,
outbox: &OutboxPort,
input_msg: Message,
) -> Result<()> {
trace!(
"[{}] Received {:?}",
state.server_socket_addr_dbg_string(server_id),
input_msg.to_string().trim_right_matches("\r\n")
);
match input_msg {
Message {
command: aatxe::Command::PRIVMSG(target, msg),
prefix,
..
} => handle_privmsg(
state,
server_id,
outbox,
OwningMsgPrefix::from_string(prefix.unwrap_or_default()),
target,
msg,
),
Message {
command: aatxe::Command::Response(aatxe::Response::RPL_MYINFO, ..),
..
} => {
push_to_outbox(outbox, server_id, handle_004(state, server_id)?);
Ok(())
}
_ => Ok(()),
}
}
fn handle_privmsg(
state: | mk_quit | identifier_name |
irc_comm.rs | msg: S2,
) -> Result<Option<LibReaction<Message>>>
where
S1: Borrow<str>,
S2: Display,
{
let final_msg = format!(
"{}{}{}",
addressee.borrow(),
if addressee.borrow().is_empty() {
""
} else {
&self.addressee_suffix
},
msg,
);
info!("Sending message to {:?}: {:?}", dest, final_msg);
let mut wrapped_msg = SmallVec::<[_; 1]>::new();
for input_line in final_msg.lines() {
wrap_msg(self, dest, input_line, |output_line| {
wrapped_msg.push(LibReaction::RawMsg(
aatxe::Command::PRIVMSG(dest.target.to_owned(), output_line.to_owned()).into(),
));
Ok(())
})?;
}
match wrapped_msg.len() {
0 => Ok(None),
1 => Ok(Some(wrapped_msg.remove(0))),
_ => Ok(Some(LibReaction::Multi(wrapped_msg.into_vec()))),
}
}
fn compose_msgs<S1, S2, M>(
&self,
dest: MsgDest,
addressee: S1,
msgs: M,
) -> Result<Option<LibReaction<Message>>>
where
S1: Borrow<str>,
S2: Display,
M: IntoIterator<Item = S2>,
{
// Not `SmallVec`, because we're guessing that the caller expects multiple messages.
let mut output = Vec::new();
for msg in msgs {
match self.compose_msg(dest, addressee.borrow(), msg)? {
Some(m) => output.push(m),
None => {}
}
}
match output.len() {
0 => Ok(None),
1 => Ok(Some(output.remove(0))),
_ => Ok(Some(LibReaction::Multi(output))),
}
}
fn prefix_len(&self, server_id: ServerId) -> Result<usize> {
Ok(self.read_msg_prefix(server_id)?.len())
}
}
fn wrap_msg<F>(
state: &State,
MsgDest { server_id, target }: MsgDest,
msg: &str,
mut f: F,
) -> Result<()>
where
F: FnMut(&str) -> Result<()>,
{
// :nick!user@host PRIVMSG target :message
// :nick!user@host NOTICE target :message
let raw_len_limit = 512;
let punctuation_len = {
let line_terminator_len = 2;
let spaces = 3;
let colons = 2;
colons + spaces + line_terminator_len
};
let cmd_len = "PRIVMSG".len();
let metadata_len = state.prefix_len(server_id)? + cmd_len + target.len() + punctuation_len;
let msg_len_limit = raw_len_limit - metadata_len;
if msg.len() < msg_len_limit {
return f(msg);
}
let mut split_end_idx = 0;
let lines = msg.match_indices(char::is_whitespace)
.peekable()
.batching(|iter| {
debug_assert!(msg.len() >= msg_len_limit);
let split_start_idx = split_end_idx;
if split_start_idx >= msg.len() {
return None;
}
while let Some(&(next_space_idx, _)) = iter.peek() {
if msg[split_start_idx..next_space_idx].len() < msg_len_limit {
split_end_idx = next_space_idx;
iter.next();
} else {
break;
}
}
if iter.peek().is_none() {
split_end_idx = msg.len()
} else if split_end_idx <= split_start_idx {
split_end_idx = cmp::min(split_start_idx + msg_len_limit, msg.len())
}
Some(msg[split_start_idx..split_end_idx].trim())
});
for line in lines {
f(line)?
}
Ok(())
}
fn handle_reaction(
state: &Arc<State>,
server_id: ServerId,
prefix: OwningMsgPrefix,
target: &str,
reaction: Reaction,
bot_nick: String,
) -> Result<Option<LibReaction<Message>>> {
let (reply_target, reply_addressee) = if target == bot_nick {
(prefix.parse().nick.unwrap(), "")
} else {
(target, prefix.parse().nick.unwrap_or(""))
};
let reply_dest = MsgDest {
server_id,
target: reply_target,
};
match reaction {
Reaction::None => Ok(None),
Reaction::Msg(s) => state.compose_msg(reply_dest, "", &s),
Reaction::Msgs(a) => state.compose_msgs(reply_dest, "", a.iter()),
Reaction::Reply(s) => state.compose_msg(reply_dest, reply_addressee, &s),
Reaction::Replies(a) => state.compose_msgs(reply_dest, reply_addressee, a.iter()),
Reaction::RawMsg(s) => Ok(Some(LibReaction::RawMsg(s.parse()?))),
Reaction::Quit(msg) => Ok(Some(mk_quit(msg))),
}
}
fn handle_bot_command_or_trigger(
state: &Arc<State>,
server_id: ServerId,
prefix: OwningMsgPrefix,
target: String,
msg: String,
bot_nick: String,
) -> Option<LibReaction<Message>> {
let reaction = (|| {
let metadata = MsgMetadata {
prefix: prefix.parse(),
dest: MsgDest {
server_id,
target: &target,
},
};
let cmd_ln = parse_msg_to_nick(&msg, metadata.dest.target, &bot_nick).unwrap_or("");
let mut cmd_name_and_args = cmd_ln.splitn(2, char::is_whitespace);
let cmd_name = cmd_name_and_args.next().unwrap_or("");
let cmd_args = cmd_name_and_args.next().unwrap_or("").trim();
if let Some(r) = bot_cmd::run(state, cmd_name, cmd_args, &metadata)? {
Ok(bot_command_reaction(cmd_name, r))
} else if let Some(r) = trigger::run_any_matching(state, cmd_ln, &metadata)? {
Ok(bot_command_reaction("<trigger>", r))
} else {
Ok(Reaction::None)
}
})();
match reaction
.and_then(|reaction| handle_reaction(state, server_id, prefix, &target, reaction, bot_nick))
{
Ok(r) => r,
Err(e) => Some(LibReaction::RawMsg(
aatxe::Command::PRIVMSG(
target,
format!("Encountered error while trying to handle message: {}", e),
).into(),
)),
}
}
fn bot_command_reaction(cmd_name: &str, result: BotCmdResult) -> Reaction {
let cmd_result = match result {
BotCmdResult::Ok(r) => Ok(r),
BotCmdResult::Unauthorized => Err(format!(
"My apologies, but you do not appear to have sufficient \
authority to use my {:?} command.",
cmd_name
).into()),
BotCmdResult::SyntaxErr => Err("Syntax error. Try my `help` command.".into()),
BotCmdResult::ArgMissing(arg_name) => Err(format!(
"Syntax error: For command {:?}, the argument {:?} \
is required, but it was not given.",
cmd_name, arg_name
).into()),
BotCmdResult::ArgMissing1To1(arg_name) => Err(format!(
"Syntax error: When command {:?} is used \
outside of a channel, the argument {:?} is \
required, but it was not given.",
cmd_name, arg_name
).into()),
BotCmdResult::LibErr(e) => Err(format!("Error: {}", e).into()),
BotCmdResult::UserErrMsg(s) => Err(format!("User error: {}", s).into()),
BotCmdResult::BotErrMsg(s) => Err(format!("Internal error: {}", s).into()),
};
match cmd_result {
Ok(r) => r,
Err(s) => Reaction::Msg(s),
}
}
pub fn mk_quit<'a>(msg: Option<Cow<'a, str>>) -> LibReaction<Message> {
let quit = aatxe::Command::QUIT(
msg.map(Cow::into_owned)
.or_else(|| Some(pkg_info::BRIEF_CREDITS_STRING.clone())),
).into();
LibReaction::RawMsg(quit)
}
pub(super) fn handle_msg(
state: &Arc<State>,
server_id: ServerId,
outbox: &OutboxPort,
input_msg: Message,
) -> Result<()> {
trace!(
"[{}] Received {:?}",
state.server_socket_addr_dbg_string(server_id),
input_msg.to_string().trim_right_matches("\r\n")
);
match input_msg {
Message {
command: aatxe::Command::PRIVMSG(target, msg),
prefix,
..
} => handle_privmsg(
state,
server_id,
outbox,
OwningMsgPrefix::from_string(prefix.unwrap_or_default()),
target,
msg,
),
Message {
command: aatxe::Command::Response(aatxe::Response::RPL_MYINFO, ..),
..
} => {
push_to_outbox(outbox, server_id, handle_004(state, server_id)?);
Ok(())
}
_ => Ok(()),
}
}
fn handle_privmsg( | state: | random_line_split |
|
irc_comm.rs | msg: S2,
) -> Result<Option<LibReaction<Message>>>
where
S1: Borrow<str>,
S2: Display,
{
let final_msg = format!(
"{}{}{}",
addressee.borrow(),
if addressee.borrow().is_empty() {
""
} else {
&self.addressee_suffix
},
msg,
);
info!("Sending message to {:?}: {:?}", dest, final_msg);
let mut wrapped_msg = SmallVec::<[_; 1]>::new();
for input_line in final_msg.lines() {
wrap_msg(self, dest, input_line, |output_line| {
wrapped_msg.push(LibReaction::RawMsg(
aatxe::Command::PRIVMSG(dest.target.to_owned(), output_line.to_owned()).into(),
));
Ok(())
})?;
}
match wrapped_msg.len() {
0 => Ok(None),
1 => Ok(Some(wrapped_msg.remove(0))),
_ => Ok(Some(LibReaction::Multi(wrapped_msg.into_vec()))),
}
}
fn compose_msgs<S1, S2, M>(
&self,
dest: MsgDest,
addressee: S1,
msgs: M,
) -> Result<Option<LibReaction<Message>>>
where
S1: Borrow<str>,
S2: Display,
M: IntoIterator<Item = S2>,
{
// Not `SmallVec`, because we're guessing that the caller expects multiple messages.
let mut output = Vec::new();
for msg in msgs {
match self.compose_msg(dest, addressee.borrow(), msg)? {
Some(m) => output.push(m),
None => {}
}
}
match output.len() {
0 => Ok(None),
1 => Ok(Some(output.remove(0))),
_ => Ok(Some(LibReaction::Multi(output))),
}
}
fn prefix_len(&self, server_id: ServerId) -> Result<usize> {
Ok(self.read_msg_prefix(server_id)?.len())
}
}
fn wrap_msg<F>(
state: &State,
MsgDest { server_id, target }: MsgDest,
msg: &str,
mut f: F,
) -> Result<()>
where
F: FnMut(&str) -> Result<()>,
{
// :nick!user@host PRIVMSG target :message
// :nick!user@host NOTICE target :message
let raw_len_limit = 512;
let punctuation_len = {
let line_terminator_len = 2;
let spaces = 3;
let colons = 2;
colons + spaces + line_terminator_len
};
let cmd_len = "PRIVMSG".len();
let metadata_len = state.prefix_len(server_id)? + cmd_len + target.len() + punctuation_len;
let msg_len_limit = raw_len_limit - metadata_len;
if msg.len() < msg_len_limit {
return f(msg);
}
let mut split_end_idx = 0;
let lines = msg.match_indices(char::is_whitespace)
.peekable()
.batching(|iter| {
debug_assert!(msg.len() >= msg_len_limit);
let split_start_idx = split_end_idx;
if split_start_idx >= msg.len() {
return None;
}
while let Some(&(next_space_idx, _)) = iter.peek() {
if msg[split_start_idx..next_space_idx].len() < msg_len_limit {
split_end_idx = next_space_idx;
iter.next();
} else {
break;
}
}
if iter.peek().is_none() {
split_end_idx = msg.len()
} else if split_end_idx <= split_start_idx {
split_end_idx = cmp::min(split_start_idx + msg_len_limit, msg.len())
}
Some(msg[split_start_idx..split_end_idx].trim())
});
for line in lines {
f(line)?
}
Ok(())
}
fn handle_reaction(
state: &Arc<State>,
server_id: ServerId,
prefix: OwningMsgPrefix,
target: &str,
reaction: Reaction,
bot_nick: String,
) -> Result<Option<LibReaction<Message>>> {
let (reply_target, reply_addressee) = if target == bot_nick {
(prefix.parse().nick.unwrap(), "")
} else {
(target, prefix.parse().nick.unwrap_or(""))
};
let reply_dest = MsgDest {
server_id,
target: reply_target,
};
match reaction {
Reaction::None => Ok(None),
Reaction::Msg(s) => state.compose_msg(reply_dest, "", &s),
Reaction::Msgs(a) => state.compose_msgs(reply_dest, "", a.iter()),
Reaction::Reply(s) => state.compose_msg(reply_dest, reply_addressee, &s),
Reaction::Replies(a) => state.compose_msgs(reply_dest, reply_addressee, a.iter()),
Reaction::RawMsg(s) => Ok(Some(LibReaction::RawMsg(s.parse()?))),
Reaction::Quit(msg) => Ok(Some(mk_quit(msg))),
}
}
fn handle_bot_command_or_trigger(
state: &Arc<State>,
server_id: ServerId,
prefix: OwningMsgPrefix,
target: String,
msg: String,
bot_nick: String,
) -> Option<LibReaction<Message>> {
let reaction = (|| {
let metadata = MsgMetadata {
prefix: prefix.parse(),
dest: MsgDest {
server_id,
target: &target,
},
};
let cmd_ln = parse_msg_to_nick(&msg, metadata.dest.target, &bot_nick).unwrap_or("");
let mut cmd_name_and_args = cmd_ln.splitn(2, char::is_whitespace);
let cmd_name = cmd_name_and_args.next().unwrap_or("");
let cmd_args = cmd_name_and_args.next().unwrap_or("").trim();
if let Some(r) = bot_cmd::run(state, cmd_name, cmd_args, &metadata)? {
Ok(bot_command_reaction(cmd_name, r))
} else if let Some(r) = trigger::run_any_matching(state, cmd_ln, &metadata)? {
Ok(bot_command_reaction("<trigger>", r))
} else {
Ok(Reaction::None)
}
})();
match reaction
.and_then(|reaction| handle_reaction(state, server_id, prefix, &target, reaction, bot_nick))
{
Ok(r) => r,
Err(e) => Some(LibReaction::RawMsg(
aatxe::Command::PRIVMSG(
target,
format!("Encountered error while trying to handle message: {}", e),
).into(),
)),
}
}
fn bot_command_reaction(cmd_name: &str, result: BotCmdResult) -> Reaction {
let cmd_result = match result {
BotCmdResult::Ok(r) => Ok(r),
BotCmdResult::Unauthorized => Err(format!(
"My apologies, but you do not appear to have sufficient \
authority to use my {:?} command.",
cmd_name
).into()),
BotCmdResult::SyntaxErr => Err("Syntax error. Try my `help` command.".into()),
BotCmdResult::ArgMissing(arg_name) => Err(format!(
"Syntax error: For command {:?}, the argument {:?} \
is required, but it was not given.",
cmd_name, arg_name
).into()),
BotCmdResult::ArgMissing1To1(arg_name) => Err(format!(
"Syntax error: When command {:?} is used \
outside of a channel, the argument {:?} is \
required, but it was not given.",
cmd_name, arg_name
).into()),
BotCmdResult::LibErr(e) => Err(format!("Error: {}", e).into()),
BotCmdResult::UserErrMsg(s) => Err(format!("User error: {}", s).into()),
BotCmdResult::BotErrMsg(s) => Err(format!("Internal error: {}", s).into()),
};
match cmd_result {
Ok(r) => r,
Err(s) => Reaction::Msg(s),
}
}
pub fn mk_quit<'a>(msg: Option<Cow<'a, str>>) -> LibReaction<Message> {
let quit = aatxe::Command::QUIT(
msg.map(Cow::into_owned)
.or_else(|| Some(pkg_info::BRIEF_CREDITS_STRING.clone())),
).into();
LibReaction::RawMsg(quit)
}
pub(super) fn handle_msg(
state: &Arc<State>,
server_id: ServerId,
outbox: &OutboxPort,
input_msg: Message,
) -> Result<()> {
trace!(
"[{}] Received {:?}",
state.server_socket_addr_dbg_string(server_id),
input_msg.to_string().trim_right_matches("\r\n")
);
match input_msg {
Message {
command: aatxe::Command::PRIVMSG(target, msg),
prefix,
..
} => handle_privmsg(
state,
server_id,
outbox,
OwningMsgPrefix::from_string(prefix.unwrap_or_default()),
target,
msg,
),
Message {
command: aatxe::Command::Response(aatxe::Response::RPL_MYINFO, ..),
..
} => |
_ => Ok(()),
}
}
fn handle_privmsg(
state | {
push_to_outbox(outbox, server_id, handle_004(state, server_id)?);
Ok(())
} | conditional_block |
wechat_mp.go | Config struct {
AppId string `json:"app_id"` // 公众号appId
AppSecret string `json:"app_secret"` // 公众号appSecret
Token string `json:"token"` // 公众号Token
EncodingAESKey string `json:"encoding_aes_key,omitempty"` // 公众号EncodingAESKey
}
WechatMp struct {
Configure WechatMpConfig
AccessToken *WechatAccessToken // 保存微信accessToken
AccessTokenHandler AccessTokenHandlerFunc // 处理微信accessToken,如果有缓存,可以将accessToken存储到缓存中,默认存储到内存中
SubscribeHandler SubscribeHandlerFunc // 关注微信公众号处理方法
UnSubscribeHandler UnSubscribeHandlerFunc // 取消关注公众号处理方法
ScanHandler ScanHandlerFunc // 扫描此微信公众号生成的二维码处理方法
LocationHandler LocationHandlerFunc // 上报地理位置的处理方法
MenuClickHandler MenuClickHandlerFunc // 自定义菜单点击的处理方法
MenuViewHandler MenuViewHandlerFunc // 自定义菜单跳转外链的处理方法
QualificationVerifySuccessHandler QualificationVerifySuccessHandlerFunc // 资质认证成功处理方法
QualificationVerifyFailHandler QualificationVerifyFailHandlerFunc // 资质认证失败处理方法
NamingVerifySuccessHandler NamingVerifySuccessHandlerFunc // 名称认证成功的处理方法
NamingVerifyFailHandler NamingVerifyFailHandlerFunc // 名称认证失败的处理方法
AnnualRenewHandler AnnualRenewHandlerFunc // 年审通知的处理方法
VerifyExpiredHandler VerifyExpireHandlerFunc // 认证过期失效通知的处理方法
SendTemplateFinishHandler SendTemplateFinishHandlerFunc // 发送模板消息结果通知
TextMessageHandler TextMessageHandlerFunc // 发送文本信息的处理方法
ImageMessageHandler ImageMessageHandlerFunc // 发送图片消息的处理方法
VoiceMessageHandler VoiceMessageHandlerFunc // 发送语言消息的处理方法
VideoMessageHandler VideoMessageHandlerFunc // 发送视频消息的处理方法
ShortVideoMessageHandler ShortVideoMessageHandlerFunc // 发送短视频消息的处理方法
LocationMessageHandler LocationMessageHandlerFunc // 上报地理位置的处理方法
LinkMessageHandler LinkMessageHandlerFunc // 发送链接消息的处理方法
}
)
// 新建一个微信公众号
func New(wechatMpConfig *WechatMpConfig) *WechatMp {
var wechatMp = &WechatMp{}
wechatMp.Configure = *wechatMpConfig
wechatMp.SetAccessTokenHandlerFunc(WechatMpDefaultAccessTokenHandlerFunc)
return wechatMp
}
// 用户在设置微信公众号服务器配置,并开启后,微信会发送一次认证请求,此函数即做此验证用
func (wm *WechatMp) AuthWechatServer(r *http.Request) string {
echostr := r.FormValue(WechatRequestEchostr)
if wm.checkWechatSource(r) {
return echostr
}
return WechatResponseStringInvalid
}
// 检验认证来源是否为微信
func (wm *WechatMp) checkWechatSource(r *http.Request) bool {
timestamp := r.FormValue(WechatRequestTimestamp)
nonce := r.FormValue(WechatRequestNonce)
signature := r.FormValue(WechatRequestSignature)
return CheckWechatAuthSign(signature, wm.Configure.Token, timestamp, nonce)
}
// 检验消息来源,并且提取消息
func (wm *WechatMp) checkMessageSource(r *http.Request) (bool, []byte) {
//openid := r.FormValue("openid") // openid,暂时还没想到为什么传值过来
timestamp := r.FormValue(WechatRequestTimestamp)
nonce := r.FormValue(WechatRequestNonce)
// 读取request body
body, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
fmt.Fprintf(common.WechatErrorLoggerWriter, "checkMessageSource ioutil.ReadAll(r.Body) error: %+v\n", err)
return false, nil
}
// 判断消息是否经过加密
encrypt_type := r.FormValue(WechatRequestEncryptType)
if encrypt_type == WechatEncryptType {
// 如果消息已经加密
msg_signature := r.FormValue(WechatRequestMessageSignature)
var msgEncryptRequest MsgEncryptReque | ck := CheckWechatAuthSign(msg_signature, timestamp, nonce, wm.Configure.Token, msgEncryptRequest.Encrypt)
var message []byte
if check {
// 验证成功,解密消息,返回正文的二进制数组格式
message, err = wm.aesDecryptMessage(msgEncryptRequest.Encrypt)
if err != nil {
fmt.Fprintf(common.WechatErrorLoggerWriter, "checkMessageSource wm.aesDecryptMessage(msgEncryptBody.Encrypt) error: %+v\n", err)
return false, nil
}
}
return check, message
}
// 如果消息未加密
signature := r.FormValue(WechatRequestSignature)
return CheckWechatAuthSign(signature, wm.Configure.Token, timestamp, nonce), body
}
// 加密后的微信消息结构
type MsgEncryptRequest struct {
XMLName xml.Name `xml:"xml"`
ToUserName string // 开发者微信号
Encrypt string // 加密的消息正文
}
// 响应加密消息的结构
type MsgEncryptResponse struct {
XMLName xml.Name `xml:"xml"`
Encrypt CDATAText // 加密的响应正文
MsgSignature CDATAText // 响应正文加密的签名
TimeStamp int64 // 时间戳
Nonce CDATAText // 随机字符串
}
// 加密发送消息
func (wm *WechatMp) AESEncryptMessage(plainData []byte) (*MsgEncryptResponse, error) {
// 获取正文的length
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.BigEndian, int32(len(plainData)))
if err != nil {
return nil, fmt.Errorf("aesEncryptMessage binary.Write error: %+v\n", err)
}
msgLength := buf.Bytes()
// 获取16位字节数组
randomBytes := common.GetRandomString(16)
plainData = bytes.Join([][]byte{randomBytes, msgLength, plainData, []byte(wm.Configure.AppId)}, nil)
// 微信的EncodingAESKey是被编了码的, 使用前需要base64解码
// = 为占位符
aesKey, err := base64.StdEncoding.DecodeString(wm.Configure.EncodingAESKey + "=")
if err != nil {
return nil, fmt.Errorf("aesDecryptMessage base64 decode EncodingAESKey error: %+v\n", err)
}
cipherData, err := AESEncrypt(plainData, aesKey)
if err != nil {
return nil, fmt.Errorf("aesDecryptMessage AESEncrypt error: %+v\n", err)
}
encryptMessage := base64.StdEncoding.EncodeToString(cipherData)
timeStamp := time.Now().Unix()
nonce := strconv.FormatInt(timeStamp, 10)
msgEncryptResponse := new(MsgEncryptResponse)
msgEncryptResponse.Encrypt = Value2CDATA(encryptMessage)
msgEncryptResponse.MsgSignature = Value2CDATA(SignMsg(wm.Configure.Token, nonce, string(timeStamp), encryptMessage))
msgEncryptResponse.TimeStamp = timeStamp
msgEncryptResponse.Nonce = Value2CDATA(nonce)
return msgEncryptResponse, nil
}
// 解密收到的消息
func (wm *WechatMp) aesDecryptMessage(cipherMessage string) ([]byte, error) {
// 微信的EncodingAESKey是被编了码的, 使用前需要base64解码
// = 为占位符
aesKey, err := base64.StdEncoding.DecodeString(wm.Configure.EncodingAESKey + "=")
if err != nil {
return nil, fmt.Errorf("aesDecryptMessage base64 decode EncodingAESKey error: %+v\n", err)
}
message, err := base64.StdEncoding.DecodeString(cipherMessage)
if err != nil {
return nil, fmt.Errorf("aesDecryptMessage base64 decode encryptMessage error: %+v\n", err)
}
message, err = AESDecrypt(message, aesKey)
if err != nil {
return nil, fmt.Errorf("aesDecryptMessage AESDecrypt error: %+v\n", err)
}
// 解密完成后,提取正文
return wm.extractDecryptMessage(message)
}
// 从解密后的消息中,提取正文msg
// msg_encrypt = Base64_Encode(AES_Encrypt[random(16B) + msg_len(4B) + msg + $AppID])
func (wm *WechatMp) extractDecryptMessage(plainData []byte) ([]byte, error) {
| st
if err = xml.Unmarshal(body, &msgEncryptRequest); err != nil {
fmt.Fprintf(common.WechatErrorLoggerWriter, "checkMessageSource xml.Unmarshal(body, &msgEncryptBody) error: %+v\n", err)
return false, nil
}
che | identifier_body |
wechat_mp.go | checkWechatSource(r *http.Request) bool {
timestamp := r.FormValue(WechatRequestTimestamp)
nonce := r.FormValue(WechatRequestNonce)
signature := r.FormValue(WechatRequestSignature)
return CheckWechatAuthSign(signature, wm.Configure.Token, timestamp, nonce)
}
// 检验消息来源,并且提取消息
func (wm *WechatMp) checkMessageSource(r *http.Request) (bool, []byte) {
//openid := r.FormValue("openid") // openid,暂时还没想到为什么传值过来
timestamp := r.FormValue(WechatRequestTimestamp)
nonce := r.FormValue(WechatRequestNonce)
// 读取request body
body, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
fmt.Fprintf(common.WechatErrorLoggerWriter, "checkMessageSource ioutil.ReadAll(r.Body) error: %+v\n", err)
return false, nil
}
// 判断消息是否经过加密
encrypt_type := r.FormValue(WechatRequestEncryptType)
if encrypt_type == WechatEncryptType {
// 如果消息已经加密
msg_signature := r.FormValue(WechatRequestMessageSignature)
var msgEncryptRequest MsgEncryptRequest
if err = xml.Unmarshal(body, &msgEncryptRequest); err != nil {
fmt.Fprintf(common.WechatErrorLoggerWriter, "checkMessageSource xml.Unmarshal(body, &msgEncryptBody) error: %+v\n", err)
return false, nil
}
check := CheckWechatAuthSign(msg_signature, timestamp, nonce, wm.Configure.Token, msgEncryptRequest.Encrypt)
var message []byte
if check {
// 验证成功,解密消息,返回正文的二进制数组格式
message, err = wm.aesDecryptMessage(msgEncryptRequest.Encrypt)
if err != nil {
fmt.Fprintf(common.WechatErrorLoggerWriter, "checkMessageSource wm.aesDecryptMessage(msgEncryptBody.Encrypt) error: %+v\n", err)
return false, nil
}
}
return check, message
}
// 如果消息未加密
signature := r.FormValue(WechatRequestSignature)
return CheckWechatAuthSign(signature, wm.Configure.Token, timestamp, nonce), body
}
// 加密后的微信消息结构
type MsgEncryptRequest struct {
XMLName xml.Name `xml:"xml"`
ToUserName string // 开发者微信号
Encrypt string // 加密的消息正文
}
// 响应加密消息的结构
type MsgEncryptResponse struct {
XMLName xml.Name `xml:"xml"`
Encrypt CDATAText // 加密的响应正文
MsgSignature CDATAText // 响应正文加密的签名
TimeStamp int64 // 时间戳
Nonce CDATAText // 随机字符串
}
// 加密发送消息
func (wm *WechatMp) AESEncryptMessage(plainData []byte) (*MsgEncryptResponse, error) {
// 获取正文的length
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.BigEndian, int32(len(plainData)))
if err != nil {
return nil, fmt.Errorf("aesEncryptMessage binary.Write error: %+v\n", err)
}
msgLength := buf.Bytes()
// 获取16位字节数组
randomBytes := common.GetRandomString(16)
plainData = bytes.Join([][]byte{randomBytes, msgLength, plainData, []byte(wm.Configure.AppId)}, nil)
// 微信的EncodingAESKey是被编了码的, 使用前需要base64解码
// = 为占位符
aesKey, err := base64.StdEncoding.DecodeString(wm.Configure.EncodingAESKey + "=")
if err != nil {
return nil, fmt.Errorf("aesDecryptMessage base64 decode EncodingAESKey error: %+v\n", err)
}
cipherData, err := AESEncrypt(plainData, aesKey)
if err != nil {
return nil, fmt.Errorf("aesDecryptMessage AESEncrypt error: %+v\n", err)
}
encryptMessage := base64.StdEncoding.EncodeToString(cipherData)
timeStamp := time.Now().Unix()
nonce := strconv.FormatInt(timeStamp, 10)
msgEncryptResponse := new(MsgEncryptResponse)
msgEncryptResponse.Encrypt = Value2CDATA(encryptMessage)
msgEncryptResponse.MsgSignature = Value2CDATA(SignMsg(wm.Configure.Token, nonce, string(timeStamp), encryptMessage))
msgEncryptResponse.TimeStamp = timeStamp
msgEncryptResponse.Nonce = Value2CDATA(nonce)
return msgEncryptResponse, nil
}
// 解密收到的消息
func (wm *WechatMp) aesDecryptMessage(cipherMessage string) ([]byte, error) {
// 微信的EncodingAESKey是被编了码的, 使用前需要base64解码
// = 为占位符
aesKey, err := base64.StdEncoding.DecodeString(wm.Configure.EncodingAESKey + "=")
if err != nil {
return nil, fmt.Errorf("aesDecryptMessage base64 decode EncodingAESKey error: %+v\n", err)
}
message, err := base64.StdEncoding.DecodeString(cipherMessage)
if err != nil {
return nil, fmt.Errorf("aesDecryptMessage base64 decode encryptMessage error: %+v\n", err)
}
message, err = AESDecrypt(message, aesKey)
if err != nil {
return nil, fmt.Errorf("aesDecryptMessage AESDecrypt error: %+v\n", err)
}
// 解密完成后,提取正文
return wm.extractDecryptMessage(message)
}
// 从解密后的消息中,提取正文msg
// msg_encrypt = Base64_Encode(AES_Encrypt[random(16B) + msg_len(4B) + msg + $AppID])
func (wm *WechatMp) extractDecryptMessage(plainData []byte) ([]byte, error) {
// 前16位是随机字符, 直接跳过,17至20是正文的长度,先读取正文的长度
buf := bytes.NewBuffer(plainData[16:20])
var msgLength int32
err := binary.Read(buf, binary.BigEndian, &msgLength)
if err != nil {
return nil, fmt.Errorf("extractDecryptMessage binary.Read(msgLength) error: %+v\n", err)
}
// 正文之后是appid, 可以再次验证,计算appid的起始位置
appIdStart := msgLength + 20
// 获取appid,并进行验证
appId := string(plainData[appIdStart:])
if wm.Configure.AppId != appId {
// 验证消息中的appid未通过
return nil, fmt.Errorf("local appid (%s) is not equal of message appid (%s)\n", wm.Configure.AppId, appId)
}
return plainData[20:appIdStart], nil
}
// 微信服务推送消息接收方法
func (wm *WechatMp) CallBackFunc(r *http.Request) string {
// 首先,验证消息是否从微信服务发出
valid, body := wm.checkMessageSource(r)
if !valid {
fmt.Fprintln(common.WechatErrorLoggerWriter, WechatResponseStringInvalid)
return WechatResponseStringFail
}
return wm.wechatMessageHandler(body)
}
// 设置全局获取微信accessToken的方法
func (wm *WechatMp) SetAccessTokenHandlerFunc(handlerFunc AccessTokenHandlerFunc) {
wm.AccessTokenHandler = handlerFunc
}
// 设置处理关注事件的方法
func (wm *WechatMp) SetSubscribeHandlerFunc(handlerFunc SubscribeHandlerFunc) {
wm.SubscribeHandler = handlerFunc
}
// 设置处理取消关注事件的方法
func (wm *WechatMp) SetUnSubscribeHandlerFunc(handlerFunc UnSubscribeHandlerFunc) {
wm.UnSubscribeHandler = handlerFunc
}
// 设置处理扫描事件的方法
func (wm *WechatMp) SetScanHandlerFunc(handlerFunc ScanHandlerFunc) {
wm.ScanHandler = handlerFunc
}
// 设置处理上报地理位置的方法
func (wm *WechatMp) SetLocationHandlerFunc(handlerFunc LocationHandlerFunc) {
wm.LocationHandler = handlerFunc
}
// 设置处理自定义菜单点击事件的方法
func (wm *WechatMp) SetMenuClickHandlerFunc(handlerFunc MenuClickHandlerFunc) {
wm.MenuClickHandler = handlerFunc
}
// 设置处理自定义菜单跳转外链事件的方法
func (wm *WechatMp) SetMenuViewHandlerFunc(handlerFunc MenuViewHandlerFunc) {
wm.MenuViewHandler = handlerFunc
}
// 设置处理微信text消息事件方法
func (wm *WechatMp) SetTextHandlerFunc(handlerFunc TextMessageHandlerFunc) {
wm.TextMessageHandler = handlerFunc
}
// 设置处理微信image消息事件方法
func (wm *WechatMp) SetImageHandlerFunc(handlerFunc ImageMessageHandlerFunc) {
wm.ImageMessageHandler = handlerFunc
}
// 设置处理微信voice消息事件方法
func (wm *WechatMp) SetVoiceHandlerFunc(handlerFunc VoiceMessageHandlerFunc) {
wm.VoiceMessageHandler = handlerFunc
}
// 设置处理微信video消息事件方法
func (wm *WechatMp) SetVideoHandlerFunc(handlerFunc VideoMessageHandlerFunc) {
wm.VideoMessageHandler = handlerFunc
}
| identifier_name |
||
wechat_mp.go | 微信公众号服务器配置,并开启后,微信会发送一次认证请求,此函数即做此验证用
func (wm *WechatMp) AuthWechatServer(r *http.Request) string {
echostr := r.FormValue(WechatRequestEchostr)
if wm.checkWechatSource(r) {
return echostr
}
return WechatResponseStringInvalid
}
// 检验认证来源是否为微信
func (wm *WechatMp) checkWechatSource(r *http.Request) bool {
timestamp := r.FormValue(WechatRequestTimestamp)
nonce := r.FormValue(WechatRequestNonce)
signature := r.FormValue(WechatRequestSignature)
return CheckWechatAuthSign(signature, wm.Configure.Token, timestamp, nonce)
}
// 检验消息来源,并且提取消息
func (wm *WechatMp) checkMessageSource(r *http.Request) (bool, []byte) {
//openid := r.FormValue("openid") // openid,暂时还没想到为什么传值过来
timestamp := r.FormValue(WechatRequestTimestamp)
nonce := r.FormValue(WechatRequestNonce)
// 读取request body
body, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
fmt.Fprintf(common.WechatErrorLoggerWriter, "checkMessageSource ioutil.ReadAll(r.Body) error: %+v\n", err)
return false, nil
}
// 判断消息是否经过加密
encrypt_type := r.FormValue(WechatRequestEncryptType)
if encrypt_type == WechatEncryptType {
// 如果消息已经加密
msg_signature := r.FormValue(WechatRequestMessageSignature)
var msgEncryptRequest MsgEncryptRequest
if err = xml.Unmarshal(body, &msgEncryptRequest); err != nil {
fmt.Fprintf(common.WechatErrorLoggerWriter, "checkMessageSource xml.Unmarshal(body, &msgEncryptBody) error: %+v\n", err)
return false, nil
}
check := CheckWechatAuthSign(msg_signature, timestamp, nonce, wm.Configure.Token, msgEncryptRequest.Encrypt)
var message []byte
if check {
// 验证成功,解密消息,返回正文的二进制数组格式
message, err = wm.aesDecryptMessage(msgEncryptRequest.Encrypt)
if err != nil {
fmt.Fprintf(common.WechatErrorLoggerWriter, "checkMessageSource wm.aesDecryptMessage(msgEncryptBody.Encrypt) error: %+v\n", err)
return false, nil
}
}
return check, message
}
// 如果消息未加密
signature := r.FormValue(WechatRequestSignature)
return CheckWechatAuthSign(signature, wm.Configure.Token, timestamp, nonce), body
}
// 加密后的微信消息结构
type MsgEncryptRequest struct {
XMLName xml.Name `xml:"xml"`
ToUserName string // 开发者微信号
Encrypt string // 加密的消息正文
}
// 响应加密消息的结构
type MsgEncryptResponse struct {
XMLName xml.Name `xml:"xml"`
Encrypt CDATAText // 加密的响应正文
MsgSignature CDATAText // 响应正文加密的签名
TimeStamp int64 // 时间戳
Nonce CDATAText // 随机字符串
}
// 加密发送消息
func (wm *WechatMp) AESEncryptMessage(plainData []byte) (*MsgEncryptResponse, error) {
// 获取正文的length
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.BigEndian, int32(len(plainData)))
if err != nil {
return nil, fmt.Errorf("aesEncryptMessage binary.Write error: %+v\n", err)
}
msgLength := buf.Bytes()
// 获取16位字节数组
randomBytes := common.GetRandomString(16)
plainData = bytes.Join([][]byte{randomBytes, msgLength, plainData, []byte(wm.Configure.AppId)}, nil)
// 微信的EncodingAESKey是被编了码的, 使用前需要base64解码
// = 为占位符
aesKey, err := base64.StdEncoding.DecodeString(wm.Configure.EncodingAESKey + "=")
if err != nil {
return nil, fmt.Errorf("aesDecryptMessage base64 decode EncodingAESKey error: %+v\n", err)
}
cipherData, err := AESEncrypt(plainData, aesKey)
if err != nil {
return nil, fmt.Errorf("aesDecryptMessage AESEncrypt error: %+v\n", err)
}
encryptMessage := base64.StdEncoding.EncodeToString(cipherData)
timeStamp := time.Now().Unix()
nonce := strconv.FormatInt(timeStamp, 10)
msgEncryptResponse := new(MsgEncryptResponse)
msgEncryptResponse.Encrypt = Value2CDATA(encryptMessage)
msgEncryptResponse.MsgSignature = Value2CDATA(SignMsg(wm.Configure.Token, nonce, string(timeStamp), encryptMessage))
msgEncryptResponse.TimeStamp = timeStamp
msgEncryptResponse.Nonce = Value2CDATA(nonce)
return msgEncryptResponse, nil
}
// 解密收到的消息
func (wm *WechatMp) aesDecryptMessage(cipherMessage string) ([]byte, error) {
// 微信的EncodingAESKey是被编了码的, 使用前需要base64解码
// = 为占位符
aesKey, err := base64.StdEncoding.DecodeString(wm.Configure.EncodingAESKey + "=")
if err != nil {
return nil, fmt.Errorf("aesDecryptMessage base64 decode EncodingAESKey error: %+v\n", err)
}
message, err := base64.StdEncoding.DecodeString(cipherMessage)
if err != nil {
return nil, fmt.Errorf("aesDecryptMessage base64 decode encryptMessage error: %+v\n", err)
}
message, err = AESDecrypt(message, aesKey)
if err != nil {
return nil, fmt.Errorf("aesDecryptMessage AESDecrypt error: %+v\n", err)
}
// 解密完成后,提取正文
return wm.extractDecryptMessage(message)
}
// 从解密后的消息中,提取正文msg
// msg_encrypt = Base64_Encode(AES_Encrypt[random(16B) + msg_len(4B) + msg + $AppID])
func (wm *WechatMp) extractDecryptMessage(plainData []byte) ([]byte, error) {
// 前16位是随机字符, 直接跳过,17至20是正文的长度,先读取正文的长度
buf := bytes.NewBuffer(plainData[16:20])
var msgLength int32
err := binary.Read(buf, binary.BigEndian, &msgLength)
if err != nil {
return nil, fmt.Errorf("extractDecryptMessage binary.Read(msgLength) error: %+v\n", err)
}
// 正文之后是appid, 可以再次验证,计算appid的起始位置
appIdStart := msgLength + 20
// 获取appid,并进行验证
appId := string(plainData[appIdStart:])
if wm.Configure.AppId != appId {
// 验证消息中的appid未通过
return nil, fmt.Errorf("local appid (%s) is not equal of message appid (%s)\n", wm.Configure.AppId, appId)
}
return plainData[20:appIdStart], nil
}
// 微信服务推送消息接收方法
func (wm *WechatMp) CallBackFunc(r *http.Request) string {
// 首先,验证消息是否从微信服务发出
valid, body := wm.checkMessageSource(r)
if !valid {
fmt.Fprintln(common.WechatErrorLoggerWriter, WechatResponseStringInvalid)
return WechatResponseStringFail
}
return wm.wechatMessageHandler(body)
}
// 设置全局获取微信accessToken的方法
func (wm *WechatMp) SetAccessTokenHandlerFunc(handlerFunc AccessTokenHandlerFunc) {
wm.AccessTokenHandler = handlerFunc
}
// 设置处理关注事件的方法
func (wm *WechatMp) SetSubscribeHandlerFunc(handlerFunc SubscribeHandlerFunc) {
wm.SubscribeHandler = handlerFunc
}
// 设置处理取消关注事件的方法
func (wm *WechatMp) SetUnSubscribeHandlerFunc(handlerFunc UnSubscribeHandlerFunc) {
wm.UnSubscribeHandler = handlerFunc
}
// 设置处理扫描事件的方法
func (wm *WechatMp) SetScanHandlerFunc(handlerFunc ScanHandlerFunc) {
wm.ScanHandler = handlerFunc
}
// 设置处理上报地理位置的方法
func (wm *WechatMp) SetLocationHandlerFunc(handlerFunc LocationHandlerFunc) {
wm.LocationHandler = handlerFunc
}
// 设置处理自定义菜单点击事件的方法
func (wm *WechatMp) SetMenuClickHandlerFunc(handlerFunc MenuClickHandlerFunc) {
wm.MenuClickHandler = handlerFunc
}
// 设置处理自定义菜单跳转外链事件的方法
func (wm *WechatMp) SetMenuViewHandlerFunc(handlerFunc MenuViewHandlerFunc) {
wm.MenuViewHandler = handlerFunc
}
// 设置处理微信text消息事件方法
func (wm *WechatMp) | SetTextHandlerFunc(handlerFunc TextMessageHandlerFunc) {
wm.TextMessageHandler = handlerFunc
}
// 设置处理微信image消息事件方法
func (wm *WechatMp) SetImageHand | conditional_block |
|
wechat_mp.go | MpConfig struct {
AppId string `json:"app_id"` // 公众号appId
AppSecret string `json:"app_secret"` // 公众号appSecret
Token string `json:"token"` // 公众号Token
EncodingAESKey string `json:"encoding_aes_key,omitempty"` // 公众号EncodingAESKey
}
WechatMp struct {
Configure WechatMpConfig
AccessToken *WechatAccessToken // 保存微信accessToken
AccessTokenHandler AccessTokenHandlerFunc // 处理微信accessToken,如果有缓存,可以将accessToken存储到缓存中,默认存储到内存中
SubscribeHandler SubscribeHandlerFunc // 关注微信公众号处理方法
UnSubscribeHandler UnSubscribeHandlerFunc // 取消关注公众号处理方法
ScanHandler ScanHandlerFunc // 扫描此微信公众号生成的二维码处理方法
LocationHandler LocationHandlerFunc // 上报地理位置的处理方法
MenuClickHandler MenuClickHandlerFunc // 自定义菜单点击的处理方法
MenuViewHandler MenuViewHandlerFunc // 自定义菜单跳转外链的处理方法
QualificationVerifySuccessHandler QualificationVerifySuccessHandlerFunc // 资质认证成功处理方法
QualificationVerifyFailHandler QualificationVerifyFailHandlerFunc // 资质认证失败处理方法
NamingVerifySuccessHandler NamingVerifySuccessHandlerFunc // 名称认证成功的处理方法
NamingVerifyFailHandler NamingVerifyFailHandlerFunc // 名称认证失败的处理方法
AnnualRenewHandler AnnualRenewHandlerFunc // 年审通知的处理方法
VerifyExpiredHandler VerifyExpireHandlerFunc // 认证过期失效通知的处理方法
SendTemplateFinishHandler SendTemplateFinishHandlerFunc // 发送模板消息结果通知
TextMessageHandler TextMessageHandlerFunc // 发送文本信息的处理方法
ImageMessageHandler ImageMessageHandlerFunc // 发送图片消息的处理方法
VoiceMessageHandler VoiceMessageHandlerFunc // 发送语言消息的处理方法
VideoMessageHandler VideoMessageHandlerFunc // 发送视频消息的处理方法
ShortVideoMessageHandler ShortVideoMessageHandlerFunc // 发送短视频消息的处理方法
LocationMessageHandler LocationMessageHandlerFunc // 上报地理位置的处理方法
LinkMessageHandler LinkMessageHandlerFunc // 发送链接消息的处理方法
}
)
// 新建一个微信公众号
func New(wechatMpConfig *WechatMpConfig) *WechatMp {
var wechatMp = &WechatMp{}
wechatMp.Configure = *wechatMpConfig
wechatMp.SetAccessTokenHandlerFunc(WechatMpDefaultAccessTokenHandlerFunc)
return wechatMp
}
// 用户在设置微信公众号服务器配置,并开启后,微信会发送一次认证请求,此函数即做此验证用
func (wm *WechatMp) AuthWechatServer(r *http.Request) string {
echostr := r.FormValue(WechatRequestEchostr)
if wm.checkWechatSource(r) {
return echostr
}
return WechatResponseStringInvalid
}
// 检验认证来源是否为微信
func (wm *WechatMp) checkWechatSource(r *http.Request) bool {
timestamp := r.FormValue(WechatRequestTimestamp)
nonce := r.FormValue(WechatRequestNonce)
signature := r.FormValue(WechatRequestSignature)
return CheckWechatAuthSign(signature, wm.Configure.Token, timestamp, nonce)
}
// 检验消息来源,并且提取消息
func (wm *WechatMp) checkMessageSource(r *http.Request) (bool, []byte) {
//openid := r.FormValue("openid") // openid,暂时还没想到为什么传值过来
timestamp := r.FormValue(WechatRequestTimestamp)
nonce := r.FormValue(WechatRequestNonce)
// 读取request body
body, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
fmt.Fprintf(common.WechatErrorLoggerWriter, "checkMessageSource ioutil.ReadAll(r.Body) error: %+v\n", err)
return false, nil
}
// 判断消息是否经过加密
encrypt_type := r.FormValue(WechatRequestEncryptType)
if encrypt_type == WechatEncryptType {
// 如果消息已经加密
msg_signature := r.FormValue(WechatRequestMessageSignature)
var msgEncryptRequest MsgEncryptRequest
if err = xml.Unmarshal(body, &msgEncryptRequest); err != nil {
fmt.Fprintf(common.WechatErrorLoggerWriter, "checkMessageSource xml.Unmarshal(body, &msgEncryptBody) error: %+v\n", err)
return false, nil
}
check := CheckWechatAuthSign(msg_signature, timestamp, nonce, wm.Configure.Token, msgEncryptRequest.Encrypt)
var message []byte
if check {
// 验证成功,解密消息,返回正文的二进制数组格式
message, err = wm.aesDecryptMessage(msgEncryptRequest.Encrypt)
if err != nil {
fmt.Fprintf(common.WechatErrorLoggerWriter, "checkMessageSource wm.aesDecryptMessage(msgEncryptBody.Encrypt) error: %+v\n", err)
return false, nil
} | // 如果消息未加密
signature := r.FormValue(WechatRequestSignature)
return CheckWechatAuthSign(signature, wm.Configure.Token, timestamp, nonce), body
}
// 加密后的微信消息结构
type MsgEncryptRequest struct {
XMLName xml.Name `xml:"xml"`
ToUserName string // 开发者微信号
Encrypt string // 加密的消息正文
}
// 响应加密消息的结构
type MsgEncryptResponse struct {
XMLName xml.Name `xml:"xml"`
Encrypt CDATAText // 加密的响应正文
MsgSignature CDATAText // 响应正文加密的签名
TimeStamp int64 // 时间戳
Nonce CDATAText // 随机字符串
}
// 加密发送消息
func (wm *WechatMp) AESEncryptMessage(plainData []byte) (*MsgEncryptResponse, error) {
// 获取正文的length
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.BigEndian, int32(len(plainData)))
if err != nil {
return nil, fmt.Errorf("aesEncryptMessage binary.Write error: %+v\n", err)
}
msgLength := buf.Bytes()
// 获取16位字节数组
randomBytes := common.GetRandomString(16)
plainData = bytes.Join([][]byte{randomBytes, msgLength, plainData, []byte(wm.Configure.AppId)}, nil)
// 微信的EncodingAESKey是被编了码的, 使用前需要base64解码
// = 为占位符
aesKey, err := base64.StdEncoding.DecodeString(wm.Configure.EncodingAESKey + "=")
if err != nil {
return nil, fmt.Errorf("aesDecryptMessage base64 decode EncodingAESKey error: %+v\n", err)
}
cipherData, err := AESEncrypt(plainData, aesKey)
if err != nil {
return nil, fmt.Errorf("aesDecryptMessage AESEncrypt error: %+v\n", err)
}
encryptMessage := base64.StdEncoding.EncodeToString(cipherData)
timeStamp := time.Now().Unix()
nonce := strconv.FormatInt(timeStamp, 10)
msgEncryptResponse := new(MsgEncryptResponse)
msgEncryptResponse.Encrypt = Value2CDATA(encryptMessage)
msgEncryptResponse.MsgSignature = Value2CDATA(SignMsg(wm.Configure.Token, nonce, string(timeStamp), encryptMessage))
msgEncryptResponse.TimeStamp = timeStamp
msgEncryptResponse.Nonce = Value2CDATA(nonce)
return msgEncryptResponse, nil
}
// 解密收到的消息
func (wm *WechatMp) aesDecryptMessage(cipherMessage string) ([]byte, error) {
// 微信的EncodingAESKey是被编了码的, 使用前需要base64解码
// = 为占位符
aesKey, err := base64.StdEncoding.DecodeString(wm.Configure.EncodingAESKey + "=")
if err != nil {
return nil, fmt.Errorf("aesDecryptMessage base64 decode EncodingAESKey error: %+v\n", err)
}
message, err := base64.StdEncoding.DecodeString(cipherMessage)
if err != nil {
return nil, fmt.Errorf("aesDecryptMessage base64 decode encryptMessage error: %+v\n", err)
}
message, err = AESDecrypt(message, aesKey)
if err != nil {
return nil, fmt.Errorf("aesDecryptMessage AESDecrypt error: %+v\n", err)
}
// 解密完成后,提取正文
return wm.extractDecryptMessage(message)
}
// 从解密后的消息中,提取正文msg
// msg_encrypt = Base64_Encode(AES_Encrypt[random(16B) + msg_len(4B) + msg + $AppID])
func (wm *WechatMp) extractDecryptMessage(plainData []byte) ([]byte, error) {
// � | }
return check, message
} | random_line_split |
utils.py | '''
Image with dicome attribute [0028,0004] == MONOCHROME1 needs to
be inverted. Otherwise, our way to detect the knee will not work.
:param image_array:
:return:
'''
print('Invert Monochrome ')
print(image_array.shape, np.mean(image_array), np.min(image_array), np.max(image_array))
# image_array = -image_array + 255.0 # our method
image_array = image_array.max() - image_array
print(image_array.shape, np.mean(image_array), np.min(image_array), np.max(image_array))
return image_array
def interpolate_resolution(image_dicom, scaling_factor=0.2):
'''
Obtain fixed resolution from image dicom
:param image_dicom:
:param scaling_factor:
:return:
'''
print('Obtain Fix Resolution:')
image_array = image_dicom.pixel_array
print(image_array.shape,np.mean(image_array),np.min(image_array),np.max(image_array))
x = image_dicom[0x28, 0x30].value[0]
y = image_dicom[0x28, 0x30].value[1]
image_array = ndimage.zoom(image_array, [x / scaling_factor, y / scaling_factor])
print(image_array.shape,np.mean(image_array),np.min(image_array),np.max(image_array))
return image_array
def get_center_image(img,img_size = (2048,2048)):
'''
Get the center of image
:param img:
:param img_size:
:return:
'''
rows,cols = img.shape
center_x = rows // 2
center_y = cols // 2
img_crop = img[center_x - img_size[0] // 2: center_x + img_size[0] // 2,
center_y - img_size[1] // 2: center_y + img_size[1] // 2]
return img_crop
def padding(img,img_size = (2048,2048)):
'''
Padding image array to a specific size
:param img:
:param img_size:
:return:
'''
rows,cols = img.shape
x_padding = img_size[0] - rows
y_padding = img_size[1] - cols
if x_padding > 0:
before_x,after_x = x_padding // 2, x_padding - x_padding // 2
else:
before_x,after_x = 0,0
if y_padding > 0:
before_y,after_y = y_padding // 2, y_padding - y_padding // 2
else:
before_y,after_y = 0,0
return np.pad(img,((before_x,after_x),(before_y,after_y)),'constant'),before_x,before_y
def global_contrast_normalization_oulu(img,lim1,multiplier = 255):
'''
This part is taken from oulu's lab. This how they did global contrast normalization.
:param img:
:param lim1:
:param multiplier:
:return:
'''
img -= lim1
img /= img.max()
img *= multiplier
return img
def global_contrast_normalization(img, s=1, lambda_=10, epsilon=1e-8):
'''
Apply global contrast normalization based on image array.
Deprecated since it is not working ...
:param img:
:param s:
:param lambda_:
:param epsilon:
:return:
'''
# replacement for the loop
print('Global contrast normalization:')
print(img.shape, np.mean(img), np.min(img), np.max(img))
X_average = np.mean(img)
#print('Mean: ', X_average)
img_center = img - X_average
# `su` is here the mean, instead of the sum
contrast = np.sqrt(lambda_ + np.mean(img_center ** 2))
img = s * img_center / max(contrast, epsilon)
print(img.shape, np.mean(img), np.min(img), np.max(img))
# scipy can handle it
return img
def hist_truncation(img,cut_min=5,cut_max = 99):
'''
Apply 5th and 99th truncation on the figure.
:param img:
:param cut_min:
:param cut_max:
:return:
'''
print('Trim histogram')
print(img.shape, np.mean(img), np.min(img), np.max(img))
lim1,lim2 = np.percentile(img,[cut_min, cut_max])
img_ = img.copy()
img_[img < lim1] = lim1
img_[img > lim2] = lim2
print(img_.shape, np.mean(img_), np.min(img_), np.max(img_))
img_ = global_contrast_normalization_oulu(img_,lim1,multiplier=255)
print(img_.shape, np.mean(img_), np.min(img_), np.max(img_))
return img_
def extract_knee(image_array, side, offset = None):
| if row_start < 0 or row_end > (image_array.shape[0] - 1):
row_start = round(image_array.shape[0] / 2) - 512
row_end = round(image_array.shape[0] / 2) + 512
#print('Row Indices Final: ', row_start, row_end)
# For right knee, crop columns to be centered at the maximum sum of the LHS of original image array
# Shift over by 500 columns in edge cases with white outer bars
if side == 1:
col_center = 500 + np.argmax(col_sums[500:round(col_sums.shape[0] / 2)])
#print('Column Indices for Right Original: ', col_center - 512, col_center + 512)
# If column is below original image array size, then start cropping on left hand border and go out 1024 columns
if (col_center - 512) < 0:
#print('Column Indices for Right Final: ', 0, 1024)
if offset:
image_array = image_array[row_start + offset[0]:row_end + offset[0], :1024]
else:
image_array = image_array[row_start:row_end, :1024]
else:
if offset:
image_array = image_array[row_start + offset[0]:row_end + offset[0], (col_center - 512) + offset[1]:(col_center + 512)+ offset[1]]
else:
image_array = image_array[row_start:row_end, (col_center - 512):(col_center + 512)]
#print('Column Indices for Right Final: ', col_center - 512, col_center + 512)
# For left knee, crop columns to be centered at the maximum sum of the RHS of original image array
# Shift over by 500 columns in edge cases with white outer bars
if side == 0:
col_center = round(col_sums.shape[0] / 2) + np.argmax(
col_sums[round(col_sums.shape[0] / 2):col_sums.shape[0] - 500])
#print('Column Indices for Left Original: ', col_center - 512, col_center + 512)
# If column is above original image array size, then start cropping on right hand border and go in 1024 columns
if (col_center + 512) > (image_array.shape[1] - 1):
#print('Column Indices for Left Final: ', image_array.shape[1] - 1024, image_array.shape[1] - 1)
if offset:
image_array = image_array[row_start + offset[0]:row_end + offset[0], image_array.shape[1] - 1024:]
else:
image_array = image_array[row_start :row_end, image_array.shape[1] - 1024:]
else:
if | '''
Extrack knee part from image array
:param image_array:
:param side: 0: left knee; 1: right knee
:param offset: if does not work, you can manually change the shape
:return:
'''
#print('Dimensions of image: ', image_array.shape)
# Compute the sum of each row and column
col_sums = np.sum(image_array, axis=0)
row_sums = np.sum(image_array, axis=1)
# Row index for cropping is centered at the minimum of the row_sums array
row_start = np.argmin(row_sums) - 512
row_end = np.argmin(row_sums) + 512
#print('Row Indices Original: ', row_start, row_end)
# However, if either the start or end of the row values is beyond the original image array shape
# We center the cropped image at the center row of the original image array | identifier_body |
utils.py | after_y)),'constant'),before_x,before_y
def global_contrast_normalization_oulu(img,lim1,multiplier = 255):
'''
This part is taken from oulu's lab. This how they did global contrast normalization.
:param img:
:param lim1:
:param multiplier:
:return:
'''
img -= lim1
img /= img.max()
img *= multiplier
return img
def global_contrast_normalization(img, s=1, lambda_=10, epsilon=1e-8):
'''
Apply global contrast normalization based on image array.
Deprecated since it is not working ...
:param img:
:param s:
:param lambda_:
:param epsilon:
:return:
'''
# replacement for the loop
print('Global contrast normalization:')
print(img.shape, np.mean(img), np.min(img), np.max(img))
X_average = np.mean(img)
#print('Mean: ', X_average)
img_center = img - X_average
# `su` is here the mean, instead of the sum
contrast = np.sqrt(lambda_ + np.mean(img_center ** 2))
img = s * img_center / max(contrast, epsilon)
print(img.shape, np.mean(img), np.min(img), np.max(img))
# scipy can handle it
return img
def hist_truncation(img,cut_min=5,cut_max = 99):
'''
Apply 5th and 99th truncation on the figure.
:param img:
:param cut_min:
:param cut_max:
:return:
'''
print('Trim histogram')
print(img.shape, np.mean(img), np.min(img), np.max(img))
lim1,lim2 = np.percentile(img,[cut_min, cut_max])
img_ = img.copy()
img_[img < lim1] = lim1
img_[img > lim2] = lim2
print(img_.shape, np.mean(img_), np.min(img_), np.max(img_))
img_ = global_contrast_normalization_oulu(img_,lim1,multiplier=255)
print(img_.shape, np.mean(img_), np.min(img_), np.max(img_))
return img_
def extract_knee(image_array, side, offset = None):
'''
Extrack knee part from image array
:param image_array:
:param side: 0: left knee; 1: right knee
:param offset: if does not work, you can manually change the shape
:return:
'''
#print('Dimensions of image: ', image_array.shape)
# Compute the sum of each row and column
col_sums = np.sum(image_array, axis=0)
row_sums = np.sum(image_array, axis=1)
# Row index for cropping is centered at the minimum of the row_sums array
row_start = np.argmin(row_sums) - 512
row_end = np.argmin(row_sums) + 512
#print('Row Indices Original: ', row_start, row_end)
# However, if either the start or end of the row values is beyond the original image array shape
# We center the cropped image at the center row of the original image array
if row_start < 0 or row_end > (image_array.shape[0] - 1):
row_start = round(image_array.shape[0] / 2) - 512
row_end = round(image_array.shape[0] / 2) + 512
#print('Row Indices Final: ', row_start, row_end)
# For right knee, crop columns to be centered at the maximum sum of the LHS of original image array
# Shift over by 500 columns in edge cases with white outer bars
if side == 1:
col_center = 500 + np.argmax(col_sums[500:round(col_sums.shape[0] / 2)])
#print('Column Indices for Right Original: ', col_center - 512, col_center + 512)
# If column is below original image array size, then start cropping on left hand border and go out 1024 columns
if (col_center - 512) < 0:
#print('Column Indices for Right Final: ', 0, 1024)
if offset:
image_array = image_array[row_start + offset[0]:row_end + offset[0], :1024]
else:
image_array = image_array[row_start:row_end, :1024]
else:
if offset:
image_array = image_array[row_start + offset[0]:row_end + offset[0], (col_center - 512) + offset[1]:(col_center + 512)+ offset[1]]
else:
image_array = image_array[row_start:row_end, (col_center - 512):(col_center + 512)]
#print('Column Indices for Right Final: ', col_center - 512, col_center + 512)
# For left knee, crop columns to be centered at the maximum sum of the RHS of original image array
# Shift over by 500 columns in edge cases with white outer bars
if side == 0:
col_center = round(col_sums.shape[0] / 2) + np.argmax(
col_sums[round(col_sums.shape[0] / 2):col_sums.shape[0] - 500])
#print('Column Indices for Left Original: ', col_center - 512, col_center + 512)
# If column is above original image array size, then start cropping on right hand border and go in 1024 columns
if (col_center + 512) > (image_array.shape[1] - 1):
#print('Column Indices for Left Final: ', image_array.shape[1] - 1024, image_array.shape[1] - 1)
if offset:
image_array = image_array[row_start + offset[0]:row_end + offset[0], image_array.shape[1] - 1024:]
else:
image_array = image_array[row_start :row_end, image_array.shape[1] - 1024:]
else:
if offset:
image_array = image_array[row_start + offset[0]:row_end + offset[0], (col_center - 512) + offset[1]:(col_center + 512) + offset[1]]
else:
image_array = image_array[row_start:row_end, (col_center - 512):(col_center + 512)]
#print('Column Indices for Left Final: ', col_center - 512, col_center + 512)
return image_array
'''
Code below is from OULU lab. It includes how they did the preprocessing and extract knee from
images
'''
def process_file(data,pad):
raw_img = data.pixel_array
r_, c_ = raw_img.shape
img = interpolate_resolution(data).astype(np.float64)
photoInterpretation = data[0x28, 0x04].value # return a string of photometric interpretation
# print('######### PHOTO INTER {} #########'.format(photoInterpretation))
if photoInterpretation not in ['MONOCHROME2', 'MONOCHROME1']:
raise ValueError('Wrong Value of Photo Interpretation: {}'.format(photoInterpretation))
elif photoInterpretation == 'MONOCHROME1':
img = invert_Monochrome1(img)
r, c = img.shape
ratio_r = r / r_
ratio_c = c / c_
img = hist_truncation(img)
#img = global_contrast_normalization(img)
# define spacing, sizemm, pad
tmp = np.zeros((img.shape[0] + 2 * pad, img.shape[1] + 2 * pad))
tmp[pad:pad + img.shape[0], pad:pad + img.shape[1]] = img
return tmp,ratio_c,ratio_r
def image_preprocessing_oulu(data_folder,file):
localizer = KneeLocalizer()
bbox = worker(file, data_folder, localizer) # output a string
patch_left, patch_right = read_file_oulu(os.path.join(data_folder, file), bbox)
return patch_left,patch_right
def read_file_oulu(file_path,bbox,sizemm=140,pad=300):
'''
:param file_path: file path the DICOM Data
:param bbox: file name + box frame corrdinates as a list
:param sizemm: size
:param pad: padding size
:return: pixel data of left knee and right knee
'''
data = dicom.read_file(file_path)
bbox = bbox.split(' ')
bbox = np.array([int(i) for i in bbox[1:]])
print(bbox) | if -1 in bbox: # if the algorithm says there is no knee in the figure.
return None,None
# process_xray | random_line_split |
|
utils.py | '''
Image with dicome attribute [0028,0004] == MONOCHROME1 needs to
be inverted. Otherwise, our way to detect the knee will not work.
:param image_array:
:return:
'''
print('Invert Monochrome ')
print(image_array.shape, np.mean(image_array), np.min(image_array), np.max(image_array))
# image_array = -image_array + 255.0 # our method
image_array = image_array.max() - image_array
print(image_array.shape, np.mean(image_array), np.min(image_array), np.max(image_array))
return image_array
def | (image_dicom, scaling_factor=0.2):
'''
Obtain fixed resolution from image dicom
:param image_dicom:
:param scaling_factor:
:return:
'''
print('Obtain Fix Resolution:')
image_array = image_dicom.pixel_array
print(image_array.shape,np.mean(image_array),np.min(image_array),np.max(image_array))
x = image_dicom[0x28, 0x30].value[0]
y = image_dicom[0x28, 0x30].value[1]
image_array = ndimage.zoom(image_array, [x / scaling_factor, y / scaling_factor])
print(image_array.shape,np.mean(image_array),np.min(image_array),np.max(image_array))
return image_array
def get_center_image(img,img_size = (2048,2048)):
'''
Get the center of image
:param img:
:param img_size:
:return:
'''
rows,cols = img.shape
center_x = rows // 2
center_y = cols // 2
img_crop = img[center_x - img_size[0] // 2: center_x + img_size[0] // 2,
center_y - img_size[1] // 2: center_y + img_size[1] // 2]
return img_crop
def padding(img,img_size = (2048,2048)):
'''
Padding image array to a specific size
:param img:
:param img_size:
:return:
'''
rows,cols = img.shape
x_padding = img_size[0] - rows
y_padding = img_size[1] - cols
if x_padding > 0:
before_x,after_x = x_padding // 2, x_padding - x_padding // 2
else:
before_x,after_x = 0,0
if y_padding > 0:
before_y,after_y = y_padding // 2, y_padding - y_padding // 2
else:
before_y,after_y = 0,0
return np.pad(img,((before_x,after_x),(before_y,after_y)),'constant'),before_x,before_y
def global_contrast_normalization_oulu(img,lim1,multiplier = 255):
'''
This part is taken from oulu's lab. This how they did global contrast normalization.
:param img:
:param lim1:
:param multiplier:
:return:
'''
img -= lim1
img /= img.max()
img *= multiplier
return img
def global_contrast_normalization(img, s=1, lambda_=10, epsilon=1e-8):
'''
Apply global contrast normalization based on image array.
Deprecated since it is not working ...
:param img:
:param s:
:param lambda_:
:param epsilon:
:return:
'''
# replacement for the loop
print('Global contrast normalization:')
print(img.shape, np.mean(img), np.min(img), np.max(img))
X_average = np.mean(img)
#print('Mean: ', X_average)
img_center = img - X_average
# `su` is here the mean, instead of the sum
contrast = np.sqrt(lambda_ + np.mean(img_center ** 2))
img = s * img_center / max(contrast, epsilon)
print(img.shape, np.mean(img), np.min(img), np.max(img))
# scipy can handle it
return img
def hist_truncation(img,cut_min=5,cut_max = 99):
'''
Apply 5th and 99th truncation on the figure.
:param img:
:param cut_min:
:param cut_max:
:return:
'''
print('Trim histogram')
print(img.shape, np.mean(img), np.min(img), np.max(img))
lim1,lim2 = np.percentile(img,[cut_min, cut_max])
img_ = img.copy()
img_[img < lim1] = lim1
img_[img > lim2] = lim2
print(img_.shape, np.mean(img_), np.min(img_), np.max(img_))
img_ = global_contrast_normalization_oulu(img_,lim1,multiplier=255)
print(img_.shape, np.mean(img_), np.min(img_), np.max(img_))
return img_
def extract_knee(image_array, side, offset = None):
'''
Extrack knee part from image array
:param image_array:
:param side: 0: left knee; 1: right knee
:param offset: if does not work, you can manually change the shape
:return:
'''
#print('Dimensions of image: ', image_array.shape)
# Compute the sum of each row and column
col_sums = np.sum(image_array, axis=0)
row_sums = np.sum(image_array, axis=1)
# Row index for cropping is centered at the minimum of the row_sums array
row_start = np.argmin(row_sums) - 512
row_end = np.argmin(row_sums) + 512
#print('Row Indices Original: ', row_start, row_end)
# However, if either the start or end of the row values is beyond the original image array shape
# We center the cropped image at the center row of the original image array
if row_start < 0 or row_end > (image_array.shape[0] - 1):
row_start = round(image_array.shape[0] / 2) - 512
row_end = round(image_array.shape[0] / 2) + 512
#print('Row Indices Final: ', row_start, row_end)
# For right knee, crop columns to be centered at the maximum sum of the LHS of original image array
# Shift over by 500 columns in edge cases with white outer bars
if side == 1:
col_center = 500 + np.argmax(col_sums[500:round(col_sums.shape[0] / 2)])
#print('Column Indices for Right Original: ', col_center - 512, col_center + 512)
# If column is below original image array size, then start cropping on left hand border and go out 1024 columns
if (col_center - 512) < 0:
#print('Column Indices for Right Final: ', 0, 1024)
if offset:
image_array = image_array[row_start + offset[0]:row_end + offset[0], :1024]
else:
image_array = image_array[row_start:row_end, :1024]
else:
if offset:
image_array = image_array[row_start + offset[0]:row_end + offset[0], (col_center - 512) + offset[1]:(col_center + 512)+ offset[1]]
else:
image_array = image_array[row_start:row_end, (col_center - 512):(col_center + 512)]
#print('Column Indices for Right Final: ', col_center - 512, col_center + 512)
# For left knee, crop columns to be centered at the maximum sum of the RHS of original image array
# Shift over by 500 columns in edge cases with white outer bars
if side == 0:
col_center = round(col_sums.shape[0] / 2) + np.argmax(
col_sums[round(col_sums.shape[0] / 2):col_sums.shape[0] - 500])
#print('Column Indices for Left Original: ', col_center - 512, col_center + 512)
# If column is above original image array size, then start cropping on right hand border and go in 1024 columns
if (col_center + 512) > (image_array.shape[1] - 1):
#print('Column Indices for Left Final: ', image_array.shape[1] - 1024, image_array.shape[1] - 1)
if offset:
image_array = image_array[row_start + offset[0]:row_end + offset[0], image_array.shape[1] - 1024:]
else:
image_array = image_array[row_start :row_end, image_array.shape[1] - 1024:]
else:
| interpolate_resolution | identifier_name |
utils.py | '''
Image with dicome attribute [0028,0004] == MONOCHROME1 needs to
be inverted. Otherwise, our way to detect the knee will not work.
:param image_array:
:return:
'''
print('Invert Monochrome ')
print(image_array.shape, np.mean(image_array), np.min(image_array), np.max(image_array))
# image_array = -image_array + 255.0 # our method
image_array = image_array.max() - image_array
print(image_array.shape, np.mean(image_array), np.min(image_array), np.max(image_array))
return image_array
def interpolate_resolution(image_dicom, scaling_factor=0.2):
'''
Obtain fixed resolution from image dicom
:param image_dicom:
:param scaling_factor:
:return:
'''
print('Obtain Fix Resolution:')
image_array = image_dicom.pixel_array
print(image_array.shape,np.mean(image_array),np.min(image_array),np.max(image_array))
x = image_dicom[0x28, 0x30].value[0]
y = image_dicom[0x28, 0x30].value[1]
image_array = ndimage.zoom(image_array, [x / scaling_factor, y / scaling_factor])
print(image_array.shape,np.mean(image_array),np.min(image_array),np.max(image_array))
return image_array
def get_center_image(img,img_size = (2048,2048)):
'''
Get the center of image
:param img:
:param img_size:
:return:
'''
rows,cols = img.shape
center_x = rows // 2
center_y = cols // 2
img_crop = img[center_x - img_size[0] // 2: center_x + img_size[0] // 2,
center_y - img_size[1] // 2: center_y + img_size[1] // 2]
return img_crop
def padding(img,img_size = (2048,2048)):
'''
Padding image array to a specific size
:param img:
:param img_size:
:return:
'''
rows,cols = img.shape
x_padding = img_size[0] - rows
y_padding = img_size[1] - cols
if x_padding > 0:
|
else:
before_x,after_x = 0,0
if y_padding > 0:
before_y,after_y = y_padding // 2, y_padding - y_padding // 2
else:
before_y,after_y = 0,0
return np.pad(img,((before_x,after_x),(before_y,after_y)),'constant'),before_x,before_y
def global_contrast_normalization_oulu(img,lim1,multiplier = 255):
'''
This part is taken from oulu's lab. This how they did global contrast normalization.
:param img:
:param lim1:
:param multiplier:
:return:
'''
img -= lim1
img /= img.max()
img *= multiplier
return img
def global_contrast_normalization(img, s=1, lambda_=10, epsilon=1e-8):
'''
Apply global contrast normalization based on image array.
Deprecated since it is not working ...
:param img:
:param s:
:param lambda_:
:param epsilon:
:return:
'''
# replacement for the loop
print('Global contrast normalization:')
print(img.shape, np.mean(img), np.min(img), np.max(img))
X_average = np.mean(img)
#print('Mean: ', X_average)
img_center = img - X_average
# `su` is here the mean, instead of the sum
contrast = np.sqrt(lambda_ + np.mean(img_center ** 2))
img = s * img_center / max(contrast, epsilon)
print(img.shape, np.mean(img), np.min(img), np.max(img))
# scipy can handle it
return img
def hist_truncation(img,cut_min=5,cut_max = 99):
'''
Apply 5th and 99th truncation on the figure.
:param img:
:param cut_min:
:param cut_max:
:return:
'''
print('Trim histogram')
print(img.shape, np.mean(img), np.min(img), np.max(img))
lim1,lim2 = np.percentile(img,[cut_min, cut_max])
img_ = img.copy()
img_[img < lim1] = lim1
img_[img > lim2] = lim2
print(img_.shape, np.mean(img_), np.min(img_), np.max(img_))
img_ = global_contrast_normalization_oulu(img_,lim1,multiplier=255)
print(img_.shape, np.mean(img_), np.min(img_), np.max(img_))
return img_
def extract_knee(image_array, side, offset = None):
'''
Extrack knee part from image array
:param image_array:
:param side: 0: left knee; 1: right knee
:param offset: if does not work, you can manually change the shape
:return:
'''
#print('Dimensions of image: ', image_array.shape)
# Compute the sum of each row and column
col_sums = np.sum(image_array, axis=0)
row_sums = np.sum(image_array, axis=1)
# Row index for cropping is centered at the minimum of the row_sums array
row_start = np.argmin(row_sums) - 512
row_end = np.argmin(row_sums) + 512
#print('Row Indices Original: ', row_start, row_end)
# However, if either the start or end of the row values is beyond the original image array shape
# We center the cropped image at the center row of the original image array
if row_start < 0 or row_end > (image_array.shape[0] - 1):
row_start = round(image_array.shape[0] / 2) - 512
row_end = round(image_array.shape[0] / 2) + 512
#print('Row Indices Final: ', row_start, row_end)
# For right knee, crop columns to be centered at the maximum sum of the LHS of original image array
# Shift over by 500 columns in edge cases with white outer bars
if side == 1:
col_center = 500 + np.argmax(col_sums[500:round(col_sums.shape[0] / 2)])
#print('Column Indices for Right Original: ', col_center - 512, col_center + 512)
# If column is below original image array size, then start cropping on left hand border and go out 1024 columns
if (col_center - 512) < 0:
#print('Column Indices for Right Final: ', 0, 1024)
if offset:
image_array = image_array[row_start + offset[0]:row_end + offset[0], :1024]
else:
image_array = image_array[row_start:row_end, :1024]
else:
if offset:
image_array = image_array[row_start + offset[0]:row_end + offset[0], (col_center - 512) + offset[1]:(col_center + 512)+ offset[1]]
else:
image_array = image_array[row_start:row_end, (col_center - 512):(col_center + 512)]
#print('Column Indices for Right Final: ', col_center - 512, col_center + 512)
# For left knee, crop columns to be centered at the maximum sum of the RHS of original image array
# Shift over by 500 columns in edge cases with white outer bars
if side == 0:
col_center = round(col_sums.shape[0] / 2) + np.argmax(
col_sums[round(col_sums.shape[0] / 2):col_sums.shape[0] - 500])
#print('Column Indices for Left Original: ', col_center - 512, col_center + 512)
# If column is above original image array size, then start cropping on right hand border and go in 1024 columns
if (col_center + 512) > (image_array.shape[1] - 1):
#print('Column Indices for Left Final: ', image_array.shape[1] - 1024, image_array.shape[1] - 1)
if offset:
image_array = image_array[row_start + offset[0]:row_end + offset[0], image_array.shape[1] - 1024:]
else:
image_array = image_array[row_start :row_end, image_array.shape[1] - 1024:]
else:
| before_x,after_x = x_padding // 2, x_padding - x_padding // 2 | conditional_block |
Assignment 3 notes.py | = (ScimEn.merge(energy, on='Country')
.merge(GDP, on='Country'))
energy[energy['Country'].str.contains('United')]
GDP[GDP['Country'].str.contains('United')]
sub_str = {'^([^\d\(]+).*' : r'\1'}
en2 = energy.iloc[232].replace(to_replace={'Country' : sub_str}, )
energy[energy['Country'].str.contains('United')].replace(to_replace={'Country' : sub_str})
energy[energy['Country'].replace(to_replace='^([^\d\(]+).*', value=r'\1', regex=True)
"""
# Question 2 (6.6%)
"""
The previous question joined three datasets then reduced this to just the top 15 entries. When you joined the datasets, but before you reduced this to the top 15 items, how many entries did you lose?
This function should return a single number.
"""
def answer_two():
# get the dataframes; all indexed to 'Country'
energy = read_and_clean_energy_dataframe()
GDP = read_and_clean_GDP_dataframe()
ScimEn = read_and_clean_ScimEn_dataframe()
# merge sequence to get columns in the requested order
intersection = ScimEn.merge(energy, on='Country').merge(GDP, on='Country')
union = ScimEn.merge(energy, on='Country', how='outer').merge(GDP, on='Country', how='outer')
# return np.max([len(energy), len(GDP), len(ScimEn)]) - len(result) == incorrect answer
return len(union) - len(intersection)
answer_two()
# Question 3 (6.6%)
"""
What is the average GDP over the last 10 years for each country? (exclude missing values from this calculation.)
This function should return a Series named avgGDP with 15 countries and their average GDP sorted in descending order.
"""
def answer_three():
def ave(row):
# data = row[['2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015']]
data = row[np.r_[2006:2016].astype(str)]
return pd.Series({'avgGDP': np.mean(data)})
Top15 = answer_one()
Top15['avgGDP'] = Top15.apply(ave, axis=1)
Top15.sort_values('avgGDP', ascending=False, inplace=True)
return Top15['avgGDP']
answer_three()
Top15.iloc[[0,-1]]
# Question 4 (6.6%)
"""
By how much had the GDP changed over the 10 year span for the country with the 6th largest average GDP?
From answer_three,
This function should return a single number.
"""
def answer_four():
Top15 = answer_one()
# decade = ['2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015']
decade = np.r_[2006:2016].astype(str)
sixth_country = answer_three().index[5]
sixth_gdp = Top15.loc[sixth_country][decade]
return max(sixth_gdp) - min(sixth_gdp)
answer_four()
# Question 6 (6.6%)
"""
What country has the maximum % Renewable and what is the percentage?
This function should return a tuple with the name of the country and the percentage.
"""
def answer_six():
Top15 = answer_one()
cty = Top15['% Renewable'].idxmax()
return tuple([cty, Top15.loc[cty]['% Renewable']])
# Too complicated
# lst = (Top15.loc[Top15['% Renewable'] == Top15['% Renewable'].max()]['% Renewable'])
# return tuple([lst.index, lst])
print(answer_six())
type(answer_six())
# Question 7 (6.6%)
"""
Create a new column that is the ratio of Self-Citations to Total Citations. What is the maximum value for this new column, and what country has the highest ratio?
This function should return a tuple with the name of the country and the ratio.
"""
def answer_seven():
Top15 = answer_one()
Top15['Citation Ratio'] = Top15['Self-citations'] / Top15['Citations']
cnty = Top15['Citation Ratio'].idxmax()
return tuple([cnty, Top15.loc[cnty]['Citation Ratio']])
answer_seven()
# Question 8 (6.6%)¶
"""
Create a column that estimates the population using Energy Supply and Energy Supply per capita. What is the third most populous country according to this estimate?
This function should return a single string value.
"""
def answer_eight():
Top15 = answer_one()
Top15['Population Estimate'] = Top15['Energy Supply'] / Top15['Energy Supply per Capita']
Top15.sort_values('Population Estimate', ascending=False, inplace=True)
return Top15.iloc[2].name
answer_eight()
# Question 9 (6.6%)
"""
Create a column that estimates the number of citable documents per person. What is the correlation between the number of citable documents per capita and the energy supply per capita? Use the .corr() method, (Pearson's correlation).
This function should return a single number.
(Optional: Use the built-in function plot9() to visualize the relationship between Energy Supply per Capita vs. Citable docs per Capita)
"""
def answer_nine():
Top15 = answer_one()
Top15['Population Estimate'] = Top15['Energy Supply'] / Top15['Energy Supply per Capita']
Top15['Citations per Capita'] = Top15['Citations'] / Top15['Population Estimate']
return Top15['Citations per Capita'].corr(Top15['Energy Supply per Capita'])
answer_nine()
# Question 10 (6.6%)
"""
Create a new column with a 1 if the country's % Renewable value is at or above the median for all countries in the top 15, and a 0 if the country's % Renewable value is below the median.
This function should return a series named HighRenew whose index is the country name sorted in ascending order of rank.
"""
def answer_ten():
Top15 = answer_one()
# Find the mean and create the boolean column converted to int
Top15['HighRenew'] = (Top15['% Renewable'] >= Top15['% Renewable'].median()).astype(int)
# sort df ascending by '% Renewable'
Top15.sort_values('% Renewable', inplace=True)
return Top15['HighRenew']
answer_ten()
#Question 11 (6.6%)¶
"""
Use the following dictionary to group the Countries by Continent, then create a dateframe that displays the sample size (the number of countries in each continent bin), and the sum, mean, and std deviation for the estimated population of each country.
ContinentDict = {'China':'Asia',
'United States':'North America',
'Japan':'Asia',
'United Kingdom':'Europe',
'Russian Federation':'Europe',
'Canada':'North America',
'Germany':'Europe',
'India':'Asia',
'France':'Europe',
'South Korea':'Asia',
'Italy':'Europe',
'Spain':'Europe',
'Iran':'Asia',
'Australia':'Australia',
'Brazil':'South America'}
This function should return a DataFrame with index named Continent ['Asia', 'Australia', 'Europe', 'North America', 'South America'] and columns ['size', 'sum', 'mean', 'std']
"""
def top15():
# create base df
Top15 = answer_one()
# add continents
ContinentDict = {'China':'Asia',
'United States':'North America',
'Japan':'Asia',
'United Kingdom':'Europe',
'Russian Federation':'Europe',
'Canada':'North America',
'Germany':'Europe',
'India':'Asia',
'France':'Europe',
'South Korea':'Asia',
'Italy':'Europe',
'Spain':'Europe',
'Iran':'Asia',
'Australia':'Australia',
'Brazil':'South America'}
# add continents
Top15['Continent'] = pd.Series(ContinentDict, name='Continent')
return Top15
def answer_eleven():
# get the top 15 countries, with continents
Top15 = top15()
# estimate populations
Top15['Population Estimate'] = Top15['Energy Supply'] / Top15['Energy Supply per Capita']
# filter and reindex
pop_stats = (Top15.filter(['Continent', 'Country', 'Population Estimate'])
.reset_index()
.set_index('Continent')) | """ Terribly ugly solution, but it works """ | random_line_split |
|
Assignment 3 notes.py | ,7,6,14,37,32,7,8,8,18,7,5,11,17,7,7,8,14,16,4,7,6,13,16,5,6,7,7,5,9,6,9,7,10,4,9,8,6,13,6,5,8,7,7,5,9,4,4,7,11,6,5,7,5,6,6,10,5,8,6,10,32,6,7,7,7,5,13,9,10,10,6,8,8,4,5,16,10,10,9,6,10,8,10,10,7,10,7,7,5,5,11,13,11,9,5,7,4,24,6,4,8,5,6,16,8,4,11,6,8,11,5,11,19,7,7,18,6,12,21,11,25,32,5,21,12,7,6,10,12,9,12,8,8,15,7,12,11,5,9,18,5,8,9,6,11,20,10,8,41,11,4,5,19,7,6,12,24,6,6,7,20,14,27,13,28,7,10,7,9,8,25,5,6,8'
outcome = ['Failed\n', 'Passed\n']
energy = pd.DataFrame()
energy['original'] = pd.read_excel('Energy Indicators.xls',
usecols=[1],encoding='utf-8',
index_col=0).loc['Afghanistan':'Zimbabwe'].index.tolist()
energy['tested'] = countries.str.len()
energy['actual'] = encodedC.split(',')
energy['actual'] = energy['actual'].astype(int)
try:
energy['Country'] = countries
except Exception as e:
print('Failed, error: ',e)
res = 'Test number of records: '
res += outcome[len(countries)==len(energy)]
res += 'Test the column name: '
res += outcome [countries.name == 'Country']
res += 'Equality Test: '
res += outcome[energy['tested'].equals(energy['actual'])]
if not energy['tested'].equals(energy['actual']):
res += '\nMismatched countries:\n'
mismatch = energy.loc[energy['tested'] != (energy['actual']), [
'original', 'Country', 'tested', 'actual']].values.tolist()
res += '\n'.join('"{:}" miss-cleaned as "{:}"'.format(o, r)
for o, r, s, v in mismatch)
return res
# print(test_energy(get_energy().loc[:,'Country']))
print(test_energy(energy.loc[:,'Country']))
def test_gdp(countries):
"""
Input: a series/ the Country column in GDP
utf-8 encoded i.e. when reading GDP use
encoding='utf-8'
"""
encodedC = '5,7,11,6,7,10,20,9,7,14,19,9,7,10,7,7,5,12,10,8,7,12,22,7,6,7,7,6,8,17,6,8,24,6,30,11,15,5,5,13,8,11,8,7,10,10,22,4,7,14,6,14,7,8,8,7,18,7,43,26,19,45,21,7,16,9,7,5,7,8,14,40,7,4,6,13,21,5,14,7,5,9,6,11,13,17,6,7,9,9,4,6,11,9,8,38,7,5,7,9,16,9,9,9,8,11,5,14,7,4,4,7,6,5,7,6,5,10,5,15,8,8,19,11,6,6,49,7,7,7,5,9,25,44,10,13,9,19,19,7,25,9,10,6,16,24,7,6,7,10,8,26,6,16,13,14,4,5,7,50,10,8,24,10,10,9,6,8,13,7,13,5,7,9,11,6,5,5,11,12,4,18,8,6,4,11,5,16,6,24,11,25,8,8,27,25,16,5,7,18,6,10,12,5,7,9,15,12,11,10,7,6,42,11,18,12,21,8,15,8,6,9,25,10,20,24,4,42,44,4,8,10,12,52,49,11,5,23,41,19,7,6,6,8,6,7,19,7,13,10,30,13,22,21,7,7,18,5,5,11,12,16,6,8'
outcome = ['Failed\n', 'Passed\n']
GDP = pd.DataFrame()
GDP['original'] = pd.read_csv('world_bank.csv',
usecols=[0],encoding='utf-8',
index_col=0).loc['Aruba':'Zimbabwe'].index.tolist()
GDP['tested'] = countries.str.len()
GDP['actual'] = encodedC.split(',')
GDP['actual'] = GDP['actual'].astype(int)
try:
GDP['Country'] = countries
except Exception as e:
print('Failed, error: ',e)
res = 'Test number of records: '
res += outcome[len(countries)==len(GDP)]
res += 'Test the column name: '
res += outcome [countries.name == 'Country']
res += 'Equality Test: '
res += outcome[GDP['tested'].equals(GDP['actual'])]
if not GDP['tested'].equals(GDP['actual']):
re | return res
print(test_gdp(GDP['Country']))
"""
# Alternative merge strategy
# merge the first two, then the third in the requested order
merged2 = pd.merge(ScimEn, energy, how='inner', left_index=True, right_index=True)
merged3 = pd.merge(merged2, GDP, how='inner', left_index=True, right_index=True)
result = (ScimEn.merge(energy, on='Country')
.merge(GDP, on='Country'))
energy[energy['Country'].str.contains('United')]
GDP[GDP['Country'].str.contains('United')]
sub_str = {'^([^\d\(]+).*' : r'\1'}
en2 = energy.iloc[232].replace(to_replace={'Country' : sub_str}, )
energy[energy['Country'].str.contains('United')].replace(to_replace={'Country' : sub_str})
energy[energy['Country'].replace(to_replace='^([^\d\(]+).*', value=r'\1', regex=True)
"""
# Question 2 (6.6%)
"""
The previous question joined three datasets then reduced this to just the top 15 entries. When you joined the datasets, but before you reduced this to the top 15 items, how many entries did you lose?
This function should return a single number.
"""
def answer_two():
# get the dataframes; all indexed to 'Country'
energy = read_and_clean_energy_dataframe()
GDP = read_and_clean_GDP_dataframe()
ScimEn = read_and_clean_ScimEn_dataframe()
# merge sequence to | s += '\nMismatched countries:\n'
mismatch = GDP.loc[GDP['tested'] != (GDP['actual']), [
'original', 'Country', 'tested', 'actual']].values.tolist()
res += '\n'.join('"{:}" miss-cleaned as "{:}"'.format(o, r)
for o, r, s, v in mismatch)
| conditional_block |
Assignment 3 notes.py | ():
# get the dataframes; all indexed to
energy = read_and_clean_energy_dataframe()
GDP = read_and_clean_GDP_dataframe()
ScimEn = read_and_clean_ScimEn_dataframe()
# merge sequence to get columns in the requested order
result = ScimEn.merge(energy, on='Country').merge(GDP, on='Country')
return result.head(15)
answer_one()
# ----
# From the forums
# Assumes 'Country' is a column, not an index
energy.iloc[[0,-1]]
len(energy) # 227
GDP.iloc[[0,-1]]
len(GDP) # 264
ScimEn.iloc[[0,-1]]
len(ScimEn) # 191
def test_energy(countries):
"""
Input: a series/ the Country column in Energy
utf-8 encoded i.e. when reading Energy use
encoding='utf-8'
"""
encodedC = '11,7,7,14,7,6,8,19,9,7,5,9,7,10,7,7,10,8,7,7,6,5,7,6,7,32,22,8,6,22,17,8,12,7,10,8,8,6,14,24,4,5,5,9,42,8,7,5,12,10,13,7,4,7,6,14,37,32,7,8,8,18,7,5,11,17,7,7,8,14,16,4,7,6,13,16,5,6,7,7,5,9,6,9,7,10,4,9,8,6,13,6,5,8,7,7,5,9,4,4,7,11,6,5,7,5,6,6,10,5,8,6,10,32,6,7,7,7,5,13,9,10,10,6,8,8,4,5,16,10,10,9,6,10,8,10,10,7,10,7,7,5,5,11,13,11,9,5,7,4,24,6,4,8,5,6,16,8,4,11,6,8,11,5,11,19,7,7,18,6,12,21,11,25,32,5,21,12,7,6,10,12,9,12,8,8,15,7,12,11,5,9,18,5,8,9,6,11,20,10,8,41,11,4,5,19,7,6,12,24,6,6,7,20,14,27,13,28,7,10,7,9,8,25,5,6,8'
outcome = ['Failed\n', 'Passed\n']
energy = pd.DataFrame()
energy['original'] = pd.read_excel('Energy Indicators.xls',
usecols=[1],encoding='utf-8',
index_col=0).loc['Afghanistan':'Zimbabwe'].index.tolist()
energy['tested'] = countries.str.len()
energy['actual'] = encodedC.split(',')
energy['actual'] = energy['actual'].astype(int)
try:
energy['Country'] = countries
except Exception as e:
print('Failed, error: ',e)
res = 'Test number of records: '
res += outcome[len(countries)==len(energy)]
res += 'Test the column name: '
res += outcome [countries.name == 'Country']
res += 'Equality Test: '
res += outcome[energy['tested'].equals(energy['actual'])]
if not energy['tested'].equals(energy['actual']):
res += '\nMismatched countries:\n'
mismatch = energy.loc[energy['tested'] != (energy['actual']), [
'original', 'Country', 'tested', 'actual']].values.tolist()
res += '\n'.join('"{:}" miss-cleaned as "{:}"'.format(o, r)
for o, r, s, v in mismatch)
return res
# print(test_energy(get_energy().loc[:,'Country']))
print(test_energy(energy.loc[:,'Country']))
def test_gdp(countries):
"""
Input: a series/ the Country column in GDP
utf-8 encoded i.e. when reading GDP use
encoding='utf-8'
"""
encodedC = '5,7,11,6,7,10,20,9,7,14,19,9,7,10,7,7,5,12,10,8,7,12,22,7,6,7,7,6,8,17,6,8,24,6,30,11,15,5,5,13,8,11,8,7,10,10,22,4,7,14,6,14,7,8,8,7,18,7,43,26,19,45,21,7,16,9,7,5,7,8,14,40,7,4,6,13,21,5,14,7,5,9,6,11,13,17,6,7,9,9,4,6,11,9,8,38,7,5,7,9,16,9,9,9,8,11,5,14,7,4,4,7,6,5,7,6,5,10,5,15,8,8,19,11,6,6,49,7,7,7,5,9,25,44,10,13,9,19,19,7,25,9,10,6,16,24,7,6,7,10,8,26,6,16,13,14,4,5,7,50,10,8,24,10,10,9,6,8,13,7,13,5,7,9,11,6,5,5,11,12,4,18,8,6,4,11,5,16,6,24,11,25,8,8,27,25,16,5,7,18,6,10,12,5,7,9,15,12,11,10,7,6,42,11,18,12,21,8,15,8,6,9,25,10,20,24,4,42,44,4,8,10,12,52,49,11,5,23,41,19,7,6,6,8,6,7,19,7,13,10,30,13,22,21,7,7,18,5,5,11,12,16,6,8'
outcome = ['Failed\n', 'Passed\n']
GDP = pd.DataFrame()
GDP['original'] = pd.read_csv('world_bank.csv',
usecols=[0],encoding='utf-8',
index_col=0).loc['Aruba':'Zimbabwe'].index.tolist()
GDP['tested'] = countries.str.len()
GDP['actual'] = encodedC.split(',')
GDP['actual'] = GDP['actual'].astype(int)
try:
GDP['Country'] = countries
except Exception as e:
print('Failed, error: ',e)
res = 'Test number of records: '
res += outcome[len(countries)==len(GDP)]
res += 'Test the column name: '
res += outcome [countries.name == 'Country']
res += 'Equality Test: '
res += outcome[GDP['tested'].equals(GDP['actual'])]
if not GDP['tested'].equals(GDP['actual']):
res += '\nMismatched countries:\n'
mismatch = GDP.loc[GDP['tested'] != (GDP['actual']), [
'original', 'Country', 'tested', 'actual']].values.tolist()
res += '\n'.join('"{:}" miss-cleaned as "{:}"'.format(o, r)
for o, r, s | answer_one | identifier_name |
|
Assignment 3 notes.py | 1,7,16,9,7,5,7,8,14,40,7,4,6,13,21,5,14,7,5,9,6,11,13,17,6,7,9,9,4,6,11,9,8,38,7,5,7,9,16,9,9,9,8,11,5,14,7,4,4,7,6,5,7,6,5,10,5,15,8,8,19,11,6,6,49,7,7,7,5,9,25,44,10,13,9,19,19,7,25,9,10,6,16,24,7,6,7,10,8,26,6,16,13,14,4,5,7,50,10,8,24,10,10,9,6,8,13,7,13,5,7,9,11,6,5,5,11,12,4,18,8,6,4,11,5,16,6,24,11,25,8,8,27,25,16,5,7,18,6,10,12,5,7,9,15,12,11,10,7,6,42,11,18,12,21,8,15,8,6,9,25,10,20,24,4,42,44,4,8,10,12,52,49,11,5,23,41,19,7,6,6,8,6,7,19,7,13,10,30,13,22,21,7,7,18,5,5,11,12,16,6,8'
outcome = ['Failed\n', 'Passed\n']
GDP = pd.DataFrame()
GDP['original'] = pd.read_csv('world_bank.csv',
usecols=[0],encoding='utf-8',
index_col=0).loc['Aruba':'Zimbabwe'].index.tolist()
GDP['tested'] = countries.str.len()
GDP['actual'] = encodedC.split(',')
GDP['actual'] = GDP['actual'].astype(int)
try:
GDP['Country'] = countries
except Exception as e:
print('Failed, error: ',e)
res = 'Test number of records: '
res += outcome[len(countries)==len(GDP)]
res += 'Test the column name: '
res += outcome [countries.name == 'Country']
res += 'Equality Test: '
res += outcome[GDP['tested'].equals(GDP['actual'])]
if not GDP['tested'].equals(GDP['actual']):
res += '\nMismatched countries:\n'
mismatch = GDP.loc[GDP['tested'] != (GDP['actual']), [
'original', 'Country', 'tested', 'actual']].values.tolist()
res += '\n'.join('"{:}" miss-cleaned as "{:}"'.format(o, r)
for o, r, s, v in mismatch)
return res
print(test_gdp(GDP['Country']))
"""
# Alternative merge strategy
# merge the first two, then the third in the requested order
merged2 = pd.merge(ScimEn, energy, how='inner', left_index=True, right_index=True)
merged3 = pd.merge(merged2, GDP, how='inner', left_index=True, right_index=True)
result = (ScimEn.merge(energy, on='Country')
.merge(GDP, on='Country'))
energy[energy['Country'].str.contains('United')]
GDP[GDP['Country'].str.contains('United')]
sub_str = {'^([^\d\(]+).*' : r'\1'}
en2 = energy.iloc[232].replace(to_replace={'Country' : sub_str}, )
energy[energy['Country'].str.contains('United')].replace(to_replace={'Country' : sub_str})
energy[energy['Country'].replace(to_replace='^([^\d\(]+).*', value=r'\1', regex=True)
"""
# Question 2 (6.6%)
"""
The previous question joined three datasets then reduced this to just the top 15 entries. When you joined the datasets, but before you reduced this to the top 15 items, how many entries did you lose?
This function should return a single number.
"""
def answer_two():
# get the dataframes; all indexed to 'Country'
energy = read_and_clean_energy_dataframe()
GDP = read_and_clean_GDP_dataframe()
ScimEn = read_and_clean_ScimEn_dataframe()
# merge sequence to get columns in the requested order
intersection = ScimEn.merge(energy, on='Country').merge(GDP, on='Country')
union = ScimEn.merge(energy, on='Country', how='outer').merge(GDP, on='Country', how='outer')
# return np.max([len(energy), len(GDP), len(ScimEn)]) - len(result) == incorrect answer
return len(union) - len(intersection)
answer_two()
# Question 3 (6.6%)
"""
What is the average GDP over the last 10 years for each country? (exclude missing values from this calculation.)
This function should return a Series named avgGDP with 15 countries and their average GDP sorted in descending order.
"""
def answer_three():
def ave(row):
# data = row[['2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015']]
data = row[np.r_[2006:2016].astype(str)]
return pd.Series({'avgGDP': np.mean(data)})
Top15 = answer_one()
Top15['avgGDP'] = Top15.apply(ave, axis=1)
Top15.sort_values('avgGDP', ascending=False, inplace=True)
return Top15['avgGDP']
answer_three()
Top15.iloc[[0,-1]]
# Question 4 (6.6%)
"""
By how much had the GDP changed over the 10 year span for the country with the 6th largest average GDP?
From answer_three,
This function should return a single number.
"""
def answer_four():
Top15 = answer_one()
# decade = ['2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015']
decade = np.r_[2006:2016].astype(str)
sixth_country = answer_three().index[5]
sixth_gdp = Top15.loc[sixth_country][decade]
return max(sixth_gdp) - min(sixth_gdp)
answer_four()
# Question 6 (6.6%)
"""
What country has the maximum % Renewable and what is the percentage?
This function should return a tuple with the name of the country and the percentage.
"""
def answer_six():
Top15 = answer_one()
cty = Top15['% Renewable'].idxmax()
return tuple([cty, Top15.loc[cty]['% Renewable']])
# Too complicated
# lst = (Top15.loc[Top15['% Renewable'] == Top15['% Renewable'].max()]['% Renewable'])
# return tuple([lst.index, lst])
print(answer_six())
type(answer_six())
# Question 7 (6.6%)
"""
Create a new column that is the ratio of Self-Citations to Total Citations. What is the maximum value for this new column, and what country has the highest ratio?
This function should return a tuple with the name of the country and the ratio.
"""
def answer_seven():
Top15 = answer_one()
Top15['Citation Ratio'] = Top15['Self-citations'] / Top15['Citations']
cnty = Top15['Citation Ratio'].idxmax()
return tuple([cnty, Top15.loc[cnty]['Citation Ratio']])
answer_seven()
# Question 8 (6.6%)¶
"""
Create a column that estimates the population using Energy Supply and Energy Supply per capita. What is the third most populous country according to this estimate?
This function should return a single string value.
"""
def answer_eight():
Top | 15 = answer_one()
Top15['Population Estimate'] = Top15['Energy Supply'] / Top15['Energy Supply per Capita']
Top15.sort_values('Population Estimate', ascending=False, inplace=True)
return Top15.iloc[2].name
a | identifier_body |
|
estimator.py | flags.DEFINE_string('checkpoint_path', None, 'Path to load checkpoint.')
flags.DEFINE_string('gin_file', None, 'Gin config file.')
flags.DEFINE_multi_string('gin_param', None, 'Gin config parameters.')
FLAGS = flags.FLAGS
_CONFIG_GIN = 'operative_config-0.gin'
def main(_):
if FLAGS.gin_file:
gin_paths = [FLAGS.gin_file]
elif (FLAGS.checkpoint_dir or FLAGS.checkpoint_path):
checkpoint_dir = FLAGS.checkpoint_dir
if checkpoint_dir is None:
checkpoint_dir = os.path.dirname(FLAGS.checkpoint_path)
gin_paths = [os.path.join(checkpoint_dir, _CONFIG_GIN)]
else:
gin_paths = []
gin.parse_config_files_and_bindings(gin_paths, FLAGS.gin_param)
estimator = Estimator()
getattr(estimator, FLAGS.do)()
class InputFn(object):
@staticmethod
def create_dir(base_dir):
dir_path = os.path.join(
base_dir,
datetime.datetime.now().strftime('%Y-%m-%d-%H%M%S-%f'))
os.makedirs(dir_path)
return dir_path
@property
def root_dir(self):
return FLAGS.root_dir
@property
def data_dir(self):
return os.path.join(self.root_dir, 'data')
@property
def model_dir_root(self):
return os.path.join(self.root_dir, 'models')
@property
def result_dir_root(self):
return os.path.join(self.root_dir, 'results')
@property
def split_dir_root(self):
return os.path.join(self.data_dir, 'splits')
@property
def tfrecord_dir_root(self):
return os.path.join(self.data_dir, 'tfrecords')
def _write_tfrecord(self, tfrecords, tfrecord_path):
if not isinstance(tfrecords, list):
tfrecords = [tfrecords]
writer = tf.io.TFRecordWriter(tfrecord_path)
for (num, tfrecord) in enumerate(tfrecords):
feature = {
key: ndarray_feature(value)
for (key, value) in tfrecord.items()}
if num == 0:
logging.info(f'Caching features {list(feature)} to {tfrecord_path}')
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
def _dataset_from_df(self, df, training):
dataset = tf.data.Dataset.range(len(df))
if training:
dataset = dataset.shuffle(len(df))
dataset = dataset.repeat()
def _get_item(i):
return df.iloc[i].to_list()
dtypes = [
tf.as_dtype(np.asarray(df[column].iloc[0]))
for column in df.columns]
dataset = dataset.map(
lambda i: tf.numpy_function(_get_item, [i], dtypes),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(
lambda *values: dict(zip(df.columns, values)),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
def _example_to_features_and_labels(self, example, label_keys):
features = example.copy()
labels = {}
for key in label_keys:
value = features.pop(key, None)
if value is not None:
labels[key] = value
return (features, labels)
def _input_fn_train_or_eval(self, training, batch_size):
pass
def _input_fn_predict(self, batch_size):
pass
def | (self, batch_size, mode):
if mode == tf.estimator.ModeKeys.TRAIN:
return self._input_fn_train_or_eval(
training=True, batch_size=batch_size)
elif mode == tf.estimator.ModeKeys.EVAL:
return self._input_fn_train_or_eval(
training=False, batch_size=batch_size)
elif mode == tf.estimator.ModeKeys.PREDICT:
return self._input_fn_predict(
batch_size=batch_size)
class ModelFn(object):
def _get_global_step(self):
return tf_v1.train.get_global_step()
def _register_model_updates(self, model, features):
update_ops = model.get_updates_for(None) + model.get_updates_for(features.values())
for update_op in update_ops:
tf_v1.add_to_collection(tf_v1.GraphKeys.UPDATE_OPS, update_op)
def _regularization_loss(self):
with tf.name_scope('reg_loss'):
return (
self._train_spec.weight_decay *
tf.add_n([
tf.nn.l2_loss(var)
for var in tf_v1.trainable_variables()]))
def model_fn(self, features, labels, mode):
pass
def _build_estimator_spec(self,
mode,
predictions,
model_loss=None,
metrics=None,
print_tensors=None):
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode, predictions=predictions)
if print_tensors:
print_ops = [
tf.print(f'{key} =', value, summarize=256)
for (key, value) in print_tensors.items()]
with tf.control_dependencies(print_ops):
model_loss = tf.identity(model_loss)
total_loss = model_loss
metrics = metrics or {}
metrics['loss/model_loss'] = model_loss
if mode == tf.estimator.ModeKeys.TRAIN:
if self._train_spec.weight_decay:
reg_loss = self._regularization_loss()
total_loss = total_loss + reg_loss
metrics['loss/reg_loss'] = reg_loss
global_step = self._get_global_step()
if self._train_spec.lr_decay_steps:
learning_rate = tf_v1.train.exponential_decay(
self._train_spec.lr,
global_step=global_step,
decay_steps=self._train_spec.lr_decay_steps,
decay_rate=self._train_spec.lr_decay_rate,
staircase=True)
else:
learning_rate = tf.constant(self._train_spec.lr)
metrics['learning_rate'] = learning_rate
trainable_variables = tf_v1.get_collection(tf_v1.GraphKeys.TRAINABLE_VARIABLES)
lr_groups = self._train_spec.lr_groups
lr_group_to_variables = collections.defaultdict(list)
for variable in trainable_variables:
has_match = False
for (lr_group_name, lr_scaling) in lr_groups.items():
if (not has_match) and re.match(lr_group_name, variable.name):
has_match = True
if lr_scaling:
lr_group_to_variables[lr_group_name].append(variable)
if not has_match:
lr_group_to_variables['__all__'].append(variable)
logging.info('Learning rate groups')
logging.info(pprint.pformat(lr_group_to_variables))
variables = sum(list(lr_group_to_variables.values()), [])
gradients = tf.gradients(total_loss, variables)
if self._train_spec.gradient_clip:
(gradients, _) = tf.clip_by_global_norm(
gradients, self._train_spec.gradient_clip)
variable_to_gradient = dict(zip(variables, gradients))
apply_ops = []
for lr_group_name in lr_group_to_variables.keys():
lr_scaling = lr_groups.get(lr_group_name, 1.0) # for __all__
if not lr_scaling:
continue
optimizer = self._train_spec.optimizer_cls(
learning_rate=lr_scaling * learning_rate)
optimizer_variables = lr_group_to_variables[lr_group_name]
gradient_variables = [
(variable_to_gradient[variable], variable)
for variable in optimizer_variables]
apply_op = optimizer.apply_gradients(
gradient_variables, global_step=global_step)
apply_ops.append(apply_op)
update_ops = tf_v1.get_collection(tf_v1.GraphKeys.UPDATE_OPS)
train_op = tf.group(update_ops, *apply_ops)
for (key, value) in metrics.items():
if isinstance(value, tuple): # global metric
scalar = value[0]
else: # local metric
scalar = value
tf_v1.summary.scalar(key, scalar)
return tf.estimator.EstimatorSpec(
mode=mode, loss=total_loss, train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {}
for (key, value) in metrics.items():
if isinstance(value, tuple): # global metric
eval_metric_op = value
else: # local metric
eval_metric_op = tf_v1.metrics.mean(value)
eval_metric_ops[key] = eval_metric_op
evaluation_hooks = []
summary_op = tf_v1.summary.merge_all()
if summary_op is not None:
summary_hook = tf.estimator.SummarySaverHook(
save_steps=self._eval_spec.save_summary_per_steps,
output_dir=self._estimator.eval_dir(self._eval_spec.name),
summary_op=summary_op)
evaluation_hooks.append(summary_hook)
return tf.estimator.EstimatorSpec(
mode=mode, loss=total_loss, eval_metric_ops=eval_metric_ops,
evaluation_hooks=evaluation_hooks)
@gin.configurable
class Estimator(InputFn, ModelFn):
def __new__(cls, estimator_cls=None):
if estimator_cls is None:
return super(Estimator, cls).__new__(cls)
elif issubclass(estimator_cls, Estimator):
return super(Estimator, cls).__new__(estimator_cls)
else:
obj = super(estimator_cls, estimator_cls).__new__(estimator_cls)
obj.__init__()
return obj
def train_eval(self):
model_dir = self.create_dir(self.model_dir_root)
shutil.copy(FLAGS | input_fn | identifier_name |
estimator.py | flags.DEFINE_string('checkpoint_path', None, 'Path to load checkpoint.')
flags.DEFINE_string('gin_file', None, 'Gin config file.')
flags.DEFINE_multi_string('gin_param', None, 'Gin config parameters.')
FLAGS = flags.FLAGS
_CONFIG_GIN = 'operative_config-0.gin'
def main(_):
if FLAGS.gin_file:
gin_paths = [FLAGS.gin_file]
elif (FLAGS.checkpoint_dir or FLAGS.checkpoint_path):
|
else:
gin_paths = []
gin.parse_config_files_and_bindings(gin_paths, FLAGS.gin_param)
estimator = Estimator()
getattr(estimator, FLAGS.do)()
class InputFn(object):
@staticmethod
def create_dir(base_dir):
dir_path = os.path.join(
base_dir,
datetime.datetime.now().strftime('%Y-%m-%d-%H%M%S-%f'))
os.makedirs(dir_path)
return dir_path
@property
def root_dir(self):
return FLAGS.root_dir
@property
def data_dir(self):
return os.path.join(self.root_dir, 'data')
@property
def model_dir_root(self):
return os.path.join(self.root_dir, 'models')
@property
def result_dir_root(self):
return os.path.join(self.root_dir, 'results')
@property
def split_dir_root(self):
return os.path.join(self.data_dir, 'splits')
@property
def tfrecord_dir_root(self):
return os.path.join(self.data_dir, 'tfrecords')
def _write_tfrecord(self, tfrecords, tfrecord_path):
if not isinstance(tfrecords, list):
tfrecords = [tfrecords]
writer = tf.io.TFRecordWriter(tfrecord_path)
for (num, tfrecord) in enumerate(tfrecords):
feature = {
key: ndarray_feature(value)
for (key, value) in tfrecord.items()}
if num == 0:
logging.info(f'Caching features {list(feature)} to {tfrecord_path}')
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
def _dataset_from_df(self, df, training):
dataset = tf.data.Dataset.range(len(df))
if training:
dataset = dataset.shuffle(len(df))
dataset = dataset.repeat()
def _get_item(i):
return df.iloc[i].to_list()
dtypes = [
tf.as_dtype(np.asarray(df[column].iloc[0]))
for column in df.columns]
dataset = dataset.map(
lambda i: tf.numpy_function(_get_item, [i], dtypes),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(
lambda *values: dict(zip(df.columns, values)),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
def _example_to_features_and_labels(self, example, label_keys):
features = example.copy()
labels = {}
for key in label_keys:
value = features.pop(key, None)
if value is not None:
labels[key] = value
return (features, labels)
def _input_fn_train_or_eval(self, training, batch_size):
pass
def _input_fn_predict(self, batch_size):
pass
def input_fn(self, batch_size, mode):
if mode == tf.estimator.ModeKeys.TRAIN:
return self._input_fn_train_or_eval(
training=True, batch_size=batch_size)
elif mode == tf.estimator.ModeKeys.EVAL:
return self._input_fn_train_or_eval(
training=False, batch_size=batch_size)
elif mode == tf.estimator.ModeKeys.PREDICT:
return self._input_fn_predict(
batch_size=batch_size)
class ModelFn(object):
def _get_global_step(self):
return tf_v1.train.get_global_step()
def _register_model_updates(self, model, features):
update_ops = model.get_updates_for(None) + model.get_updates_for(features.values())
for update_op in update_ops:
tf_v1.add_to_collection(tf_v1.GraphKeys.UPDATE_OPS, update_op)
def _regularization_loss(self):
with tf.name_scope('reg_loss'):
return (
self._train_spec.weight_decay *
tf.add_n([
tf.nn.l2_loss(var)
for var in tf_v1.trainable_variables()]))
def model_fn(self, features, labels, mode):
pass
def _build_estimator_spec(self,
mode,
predictions,
model_loss=None,
metrics=None,
print_tensors=None):
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode, predictions=predictions)
if print_tensors:
print_ops = [
tf.print(f'{key} =', value, summarize=256)
for (key, value) in print_tensors.items()]
with tf.control_dependencies(print_ops):
model_loss = tf.identity(model_loss)
total_loss = model_loss
metrics = metrics or {}
metrics['loss/model_loss'] = model_loss
if mode == tf.estimator.ModeKeys.TRAIN:
if self._train_spec.weight_decay:
reg_loss = self._regularization_loss()
total_loss = total_loss + reg_loss
metrics['loss/reg_loss'] = reg_loss
global_step = self._get_global_step()
if self._train_spec.lr_decay_steps:
learning_rate = tf_v1.train.exponential_decay(
self._train_spec.lr,
global_step=global_step,
decay_steps=self._train_spec.lr_decay_steps,
decay_rate=self._train_spec.lr_decay_rate,
staircase=True)
else:
learning_rate = tf.constant(self._train_spec.lr)
metrics['learning_rate'] = learning_rate
trainable_variables = tf_v1.get_collection(tf_v1.GraphKeys.TRAINABLE_VARIABLES)
lr_groups = self._train_spec.lr_groups
lr_group_to_variables = collections.defaultdict(list)
for variable in trainable_variables:
has_match = False
for (lr_group_name, lr_scaling) in lr_groups.items():
if (not has_match) and re.match(lr_group_name, variable.name):
has_match = True
if lr_scaling:
lr_group_to_variables[lr_group_name].append(variable)
if not has_match:
lr_group_to_variables['__all__'].append(variable)
logging.info('Learning rate groups')
logging.info(pprint.pformat(lr_group_to_variables))
variables = sum(list(lr_group_to_variables.values()), [])
gradients = tf.gradients(total_loss, variables)
if self._train_spec.gradient_clip:
(gradients, _) = tf.clip_by_global_norm(
gradients, self._train_spec.gradient_clip)
variable_to_gradient = dict(zip(variables, gradients))
apply_ops = []
for lr_group_name in lr_group_to_variables.keys():
lr_scaling = lr_groups.get(lr_group_name, 1.0) # for __all__
if not lr_scaling:
continue
optimizer = self._train_spec.optimizer_cls(
learning_rate=lr_scaling * learning_rate)
optimizer_variables = lr_group_to_variables[lr_group_name]
gradient_variables = [
(variable_to_gradient[variable], variable)
for variable in optimizer_variables]
apply_op = optimizer.apply_gradients(
gradient_variables, global_step=global_step)
apply_ops.append(apply_op)
update_ops = tf_v1.get_collection(tf_v1.GraphKeys.UPDATE_OPS)
train_op = tf.group(update_ops, *apply_ops)
for (key, value) in metrics.items():
if isinstance(value, tuple): # global metric
scalar = value[0]
else: # local metric
scalar = value
tf_v1.summary.scalar(key, scalar)
return tf.estimator.EstimatorSpec(
mode=mode, loss=total_loss, train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {}
for (key, value) in metrics.items():
if isinstance(value, tuple): # global metric
eval_metric_op = value
else: # local metric
eval_metric_op = tf_v1.metrics.mean(value)
eval_metric_ops[key] = eval_metric_op
evaluation_hooks = []
summary_op = tf_v1.summary.merge_all()
if summary_op is not None:
summary_hook = tf.estimator.SummarySaverHook(
save_steps=self._eval_spec.save_summary_per_steps,
output_dir=self._estimator.eval_dir(self._eval_spec.name),
summary_op=summary_op)
evaluation_hooks.append(summary_hook)
return tf.estimator.EstimatorSpec(
mode=mode, loss=total_loss, eval_metric_ops=eval_metric_ops,
evaluation_hooks=evaluation_hooks)
@gin.configurable
class Estimator(InputFn, ModelFn):
def __new__(cls, estimator_cls=None):
if estimator_cls is None:
return super(Estimator, cls).__new__(cls)
elif issubclass(estimator_cls, Estimator):
return super(Estimator, cls).__new__(estimator_cls)
else:
obj = super(estimator_cls, estimator_cls).__new__(estimator_cls)
obj.__init__()
return obj
def train_eval(self):
model_dir = self.create_dir(self.model_dir_root)
shutil.copy(FLAGS | checkpoint_dir = FLAGS.checkpoint_dir
if checkpoint_dir is None:
checkpoint_dir = os.path.dirname(FLAGS.checkpoint_path)
gin_paths = [os.path.join(checkpoint_dir, _CONFIG_GIN)] | conditional_block |
estimator.py | flags.DEFINE_string('checkpoint_path', None, 'Path to load checkpoint.')
flags.DEFINE_string('gin_file', None, 'Gin config file.')
flags.DEFINE_multi_string('gin_param', None, 'Gin config parameters.')
FLAGS = flags.FLAGS
_CONFIG_GIN = 'operative_config-0.gin'
def main(_):
if FLAGS.gin_file:
gin_paths = [FLAGS.gin_file]
elif (FLAGS.checkpoint_dir or FLAGS.checkpoint_path):
checkpoint_dir = FLAGS.checkpoint_dir
if checkpoint_dir is None:
checkpoint_dir = os.path.dirname(FLAGS.checkpoint_path)
gin_paths = [os.path.join(checkpoint_dir, _CONFIG_GIN)]
else:
gin_paths = []
gin.parse_config_files_and_bindings(gin_paths, FLAGS.gin_param)
estimator = Estimator()
getattr(estimator, FLAGS.do)()
class InputFn(object):
@staticmethod
def create_dir(base_dir):
dir_path = os.path.join(
base_dir,
datetime.datetime.now().strftime('%Y-%m-%d-%H%M%S-%f'))
os.makedirs(dir_path)
return dir_path
@property
def root_dir(self):
return FLAGS.root_dir
@property
def data_dir(self):
return os.path.join(self.root_dir, 'data')
@property
def model_dir_root(self):
return os.path.join(self.root_dir, 'models')
@property
def result_dir_root(self):
return os.path.join(self.root_dir, 'results')
@property
def split_dir_root(self):
return os.path.join(self.data_dir, 'splits')
@property
def tfrecord_dir_root(self):
return os.path.join(self.data_dir, 'tfrecords')
def _write_tfrecord(self, tfrecords, tfrecord_path):
if not isinstance(tfrecords, list):
tfrecords = [tfrecords]
writer = tf.io.TFRecordWriter(tfrecord_path)
for (num, tfrecord) in enumerate(tfrecords):
feature = {
key: ndarray_feature(value)
for (key, value) in tfrecord.items()}
if num == 0:
logging.info(f'Caching features {list(feature)} to {tfrecord_path}')
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
def _dataset_from_df(self, df, training):
dataset = tf.data.Dataset.range(len(df))
if training:
dataset = dataset.shuffle(len(df))
dataset = dataset.repeat()
def _get_item(i):
return df.iloc[i].to_list()
dtypes = [
tf.as_dtype(np.asarray(df[column].iloc[0]))
for column in df.columns]
dataset = dataset.map(
lambda i: tf.numpy_function(_get_item, [i], dtypes),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(
lambda *values: dict(zip(df.columns, values)),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
def _example_to_features_and_labels(self, example, label_keys):
features = example.copy()
labels = {}
for key in label_keys:
value = features.pop(key, None)
if value is not None:
labels[key] = value
return (features, labels)
def _input_fn_train_or_eval(self, training, batch_size):
pass
def _input_fn_predict(self, batch_size):
pass
def input_fn(self, batch_size, mode): | return self._input_fn_train_or_eval(
training=False, batch_size=batch_size)
elif mode == tf.estimator.ModeKeys.PREDICT:
return self._input_fn_predict(
batch_size=batch_size)
class ModelFn(object):
def _get_global_step(self):
return tf_v1.train.get_global_step()
def _register_model_updates(self, model, features):
update_ops = model.get_updates_for(None) + model.get_updates_for(features.values())
for update_op in update_ops:
tf_v1.add_to_collection(tf_v1.GraphKeys.UPDATE_OPS, update_op)
def _regularization_loss(self):
with tf.name_scope('reg_loss'):
return (
self._train_spec.weight_decay *
tf.add_n([
tf.nn.l2_loss(var)
for var in tf_v1.trainable_variables()]))
def model_fn(self, features, labels, mode):
pass
def _build_estimator_spec(self,
mode,
predictions,
model_loss=None,
metrics=None,
print_tensors=None):
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode, predictions=predictions)
if print_tensors:
print_ops = [
tf.print(f'{key} =', value, summarize=256)
for (key, value) in print_tensors.items()]
with tf.control_dependencies(print_ops):
model_loss = tf.identity(model_loss)
total_loss = model_loss
metrics = metrics or {}
metrics['loss/model_loss'] = model_loss
if mode == tf.estimator.ModeKeys.TRAIN:
if self._train_spec.weight_decay:
reg_loss = self._regularization_loss()
total_loss = total_loss + reg_loss
metrics['loss/reg_loss'] = reg_loss
global_step = self._get_global_step()
if self._train_spec.lr_decay_steps:
learning_rate = tf_v1.train.exponential_decay(
self._train_spec.lr,
global_step=global_step,
decay_steps=self._train_spec.lr_decay_steps,
decay_rate=self._train_spec.lr_decay_rate,
staircase=True)
else:
learning_rate = tf.constant(self._train_spec.lr)
metrics['learning_rate'] = learning_rate
trainable_variables = tf_v1.get_collection(tf_v1.GraphKeys.TRAINABLE_VARIABLES)
lr_groups = self._train_spec.lr_groups
lr_group_to_variables = collections.defaultdict(list)
for variable in trainable_variables:
has_match = False
for (lr_group_name, lr_scaling) in lr_groups.items():
if (not has_match) and re.match(lr_group_name, variable.name):
has_match = True
if lr_scaling:
lr_group_to_variables[lr_group_name].append(variable)
if not has_match:
lr_group_to_variables['__all__'].append(variable)
logging.info('Learning rate groups')
logging.info(pprint.pformat(lr_group_to_variables))
variables = sum(list(lr_group_to_variables.values()), [])
gradients = tf.gradients(total_loss, variables)
if self._train_spec.gradient_clip:
(gradients, _) = tf.clip_by_global_norm(
gradients, self._train_spec.gradient_clip)
variable_to_gradient = dict(zip(variables, gradients))
apply_ops = []
for lr_group_name in lr_group_to_variables.keys():
lr_scaling = lr_groups.get(lr_group_name, 1.0) # for __all__
if not lr_scaling:
continue
optimizer = self._train_spec.optimizer_cls(
learning_rate=lr_scaling * learning_rate)
optimizer_variables = lr_group_to_variables[lr_group_name]
gradient_variables = [
(variable_to_gradient[variable], variable)
for variable in optimizer_variables]
apply_op = optimizer.apply_gradients(
gradient_variables, global_step=global_step)
apply_ops.append(apply_op)
update_ops = tf_v1.get_collection(tf_v1.GraphKeys.UPDATE_OPS)
train_op = tf.group(update_ops, *apply_ops)
for (key, value) in metrics.items():
if isinstance(value, tuple): # global metric
scalar = value[0]
else: # local metric
scalar = value
tf_v1.summary.scalar(key, scalar)
return tf.estimator.EstimatorSpec(
mode=mode, loss=total_loss, train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {}
for (key, value) in metrics.items():
if isinstance(value, tuple): # global metric
eval_metric_op = value
else: # local metric
eval_metric_op = tf_v1.metrics.mean(value)
eval_metric_ops[key] = eval_metric_op
evaluation_hooks = []
summary_op = tf_v1.summary.merge_all()
if summary_op is not None:
summary_hook = tf.estimator.SummarySaverHook(
save_steps=self._eval_spec.save_summary_per_steps,
output_dir=self._estimator.eval_dir(self._eval_spec.name),
summary_op=summary_op)
evaluation_hooks.append(summary_hook)
return tf.estimator.EstimatorSpec(
mode=mode, loss=total_loss, eval_metric_ops=eval_metric_ops,
evaluation_hooks=evaluation_hooks)
@gin.configurable
class Estimator(InputFn, ModelFn):
def __new__(cls, estimator_cls=None):
if estimator_cls is None:
return super(Estimator, cls).__new__(cls)
elif issubclass(estimator_cls, Estimator):
return super(Estimator, cls).__new__(estimator_cls)
else:
obj = super(estimator_cls, estimator_cls).__new__(estimator_cls)
obj.__init__()
return obj
def train_eval(self):
model_dir = self.create_dir(self.model_dir_root)
shutil.copy(FLAGS.g | if mode == tf.estimator.ModeKeys.TRAIN:
return self._input_fn_train_or_eval(
training=True, batch_size=batch_size)
elif mode == tf.estimator.ModeKeys.EVAL: | random_line_split |
estimator.py | flags.DEFINE_string('checkpoint_path', None, 'Path to load checkpoint.')
flags.DEFINE_string('gin_file', None, 'Gin config file.')
flags.DEFINE_multi_string('gin_param', None, 'Gin config parameters.')
FLAGS = flags.FLAGS
_CONFIG_GIN = 'operative_config-0.gin'
def main(_):
if FLAGS.gin_file:
gin_paths = [FLAGS.gin_file]
elif (FLAGS.checkpoint_dir or FLAGS.checkpoint_path):
checkpoint_dir = FLAGS.checkpoint_dir
if checkpoint_dir is None:
checkpoint_dir = os.path.dirname(FLAGS.checkpoint_path)
gin_paths = [os.path.join(checkpoint_dir, _CONFIG_GIN)]
else:
gin_paths = []
gin.parse_config_files_and_bindings(gin_paths, FLAGS.gin_param)
estimator = Estimator()
getattr(estimator, FLAGS.do)()
class InputFn(object):
@staticmethod
def create_dir(base_dir):
dir_path = os.path.join(
base_dir,
datetime.datetime.now().strftime('%Y-%m-%d-%H%M%S-%f'))
os.makedirs(dir_path)
return dir_path
@property
def root_dir(self):
return FLAGS.root_dir
@property
def data_dir(self):
return os.path.join(self.root_dir, 'data')
@property
def model_dir_root(self):
|
@property
def result_dir_root(self):
return os.path.join(self.root_dir, 'results')
@property
def split_dir_root(self):
return os.path.join(self.data_dir, 'splits')
@property
def tfrecord_dir_root(self):
return os.path.join(self.data_dir, 'tfrecords')
def _write_tfrecord(self, tfrecords, tfrecord_path):
if not isinstance(tfrecords, list):
tfrecords = [tfrecords]
writer = tf.io.TFRecordWriter(tfrecord_path)
for (num, tfrecord) in enumerate(tfrecords):
feature = {
key: ndarray_feature(value)
for (key, value) in tfrecord.items()}
if num == 0:
logging.info(f'Caching features {list(feature)} to {tfrecord_path}')
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
def _dataset_from_df(self, df, training):
dataset = tf.data.Dataset.range(len(df))
if training:
dataset = dataset.shuffle(len(df))
dataset = dataset.repeat()
def _get_item(i):
return df.iloc[i].to_list()
dtypes = [
tf.as_dtype(np.asarray(df[column].iloc[0]))
for column in df.columns]
dataset = dataset.map(
lambda i: tf.numpy_function(_get_item, [i], dtypes),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(
lambda *values: dict(zip(df.columns, values)),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
def _example_to_features_and_labels(self, example, label_keys):
features = example.copy()
labels = {}
for key in label_keys:
value = features.pop(key, None)
if value is not None:
labels[key] = value
return (features, labels)
def _input_fn_train_or_eval(self, training, batch_size):
pass
def _input_fn_predict(self, batch_size):
pass
def input_fn(self, batch_size, mode):
if mode == tf.estimator.ModeKeys.TRAIN:
return self._input_fn_train_or_eval(
training=True, batch_size=batch_size)
elif mode == tf.estimator.ModeKeys.EVAL:
return self._input_fn_train_or_eval(
training=False, batch_size=batch_size)
elif mode == tf.estimator.ModeKeys.PREDICT:
return self._input_fn_predict(
batch_size=batch_size)
class ModelFn(object):
def _get_global_step(self):
return tf_v1.train.get_global_step()
def _register_model_updates(self, model, features):
update_ops = model.get_updates_for(None) + model.get_updates_for(features.values())
for update_op in update_ops:
tf_v1.add_to_collection(tf_v1.GraphKeys.UPDATE_OPS, update_op)
def _regularization_loss(self):
with tf.name_scope('reg_loss'):
return (
self._train_spec.weight_decay *
tf.add_n([
tf.nn.l2_loss(var)
for var in tf_v1.trainable_variables()]))
def model_fn(self, features, labels, mode):
pass
def _build_estimator_spec(self,
mode,
predictions,
model_loss=None,
metrics=None,
print_tensors=None):
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode, predictions=predictions)
if print_tensors:
print_ops = [
tf.print(f'{key} =', value, summarize=256)
for (key, value) in print_tensors.items()]
with tf.control_dependencies(print_ops):
model_loss = tf.identity(model_loss)
total_loss = model_loss
metrics = metrics or {}
metrics['loss/model_loss'] = model_loss
if mode == tf.estimator.ModeKeys.TRAIN:
if self._train_spec.weight_decay:
reg_loss = self._regularization_loss()
total_loss = total_loss + reg_loss
metrics['loss/reg_loss'] = reg_loss
global_step = self._get_global_step()
if self._train_spec.lr_decay_steps:
learning_rate = tf_v1.train.exponential_decay(
self._train_spec.lr,
global_step=global_step,
decay_steps=self._train_spec.lr_decay_steps,
decay_rate=self._train_spec.lr_decay_rate,
staircase=True)
else:
learning_rate = tf.constant(self._train_spec.lr)
metrics['learning_rate'] = learning_rate
trainable_variables = tf_v1.get_collection(tf_v1.GraphKeys.TRAINABLE_VARIABLES)
lr_groups = self._train_spec.lr_groups
lr_group_to_variables = collections.defaultdict(list)
for variable in trainable_variables:
has_match = False
for (lr_group_name, lr_scaling) in lr_groups.items():
if (not has_match) and re.match(lr_group_name, variable.name):
has_match = True
if lr_scaling:
lr_group_to_variables[lr_group_name].append(variable)
if not has_match:
lr_group_to_variables['__all__'].append(variable)
logging.info('Learning rate groups')
logging.info(pprint.pformat(lr_group_to_variables))
variables = sum(list(lr_group_to_variables.values()), [])
gradients = tf.gradients(total_loss, variables)
if self._train_spec.gradient_clip:
(gradients, _) = tf.clip_by_global_norm(
gradients, self._train_spec.gradient_clip)
variable_to_gradient = dict(zip(variables, gradients))
apply_ops = []
for lr_group_name in lr_group_to_variables.keys():
lr_scaling = lr_groups.get(lr_group_name, 1.0) # for __all__
if not lr_scaling:
continue
optimizer = self._train_spec.optimizer_cls(
learning_rate=lr_scaling * learning_rate)
optimizer_variables = lr_group_to_variables[lr_group_name]
gradient_variables = [
(variable_to_gradient[variable], variable)
for variable in optimizer_variables]
apply_op = optimizer.apply_gradients(
gradient_variables, global_step=global_step)
apply_ops.append(apply_op)
update_ops = tf_v1.get_collection(tf_v1.GraphKeys.UPDATE_OPS)
train_op = tf.group(update_ops, *apply_ops)
for (key, value) in metrics.items():
if isinstance(value, tuple): # global metric
scalar = value[0]
else: # local metric
scalar = value
tf_v1.summary.scalar(key, scalar)
return tf.estimator.EstimatorSpec(
mode=mode, loss=total_loss, train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {}
for (key, value) in metrics.items():
if isinstance(value, tuple): # global metric
eval_metric_op = value
else: # local metric
eval_metric_op = tf_v1.metrics.mean(value)
eval_metric_ops[key] = eval_metric_op
evaluation_hooks = []
summary_op = tf_v1.summary.merge_all()
if summary_op is not None:
summary_hook = tf.estimator.SummarySaverHook(
save_steps=self._eval_spec.save_summary_per_steps,
output_dir=self._estimator.eval_dir(self._eval_spec.name),
summary_op=summary_op)
evaluation_hooks.append(summary_hook)
return tf.estimator.EstimatorSpec(
mode=mode, loss=total_loss, eval_metric_ops=eval_metric_ops,
evaluation_hooks=evaluation_hooks)
@gin.configurable
class Estimator(InputFn, ModelFn):
def __new__(cls, estimator_cls=None):
if estimator_cls is None:
return super(Estimator, cls).__new__(cls)
elif issubclass(estimator_cls, Estimator):
return super(Estimator, cls).__new__(estimator_cls)
else:
obj = super(estimator_cls, estimator_cls).__new__(estimator_cls)
obj.__init__()
return obj
def train_eval(self):
model_dir = self.create_dir(self.model_dir_root)
shutil.copy(FLAGS | return os.path.join(self.root_dir, 'models') | identifier_body |
settings.rs | ::ParsedPkcs12 doesn't impl Clone yet
}
#[derive(Clone)]
pub struct IdentityStore(Vec<u8>, String);
impl TlsSettings {
/// Generate a filled out settings struct from the given optional
/// option set, interpreted as client options. If `options` is
/// `None`, the result is set to defaults (ie empty).
pub fn from_options(options: &Option<TlsOptions>) -> Result<Self> {
Self::from_options_base(options, false)
}
pub(super) fn from_options_base(
options: &Option<TlsOptions>,
for_server: bool,
) -> Result<Self> {
let default = TlsOptions::default();
let options = options.as_ref().unwrap_or(&default);
if !for_server {
if options.verify_certificate == Some(false) {
warn!(
"`verify_certificate` is DISABLED, this may lead to security vulnerabilities"
);
}
if options.verify_hostname == Some(false) {
warn!("`verify_hostname` is DISABLED, this may lead to security vulnerabilities");
}
}
if options.key_path.is_some() && options.crt_path.is_none() {
return Err(TlsError::MissingCrtKeyFile.into());
}
let authority = match options.ca_path {
None => None,
Some(ref path) => Some(load_x509(path)?),
};
let identity = match options.crt_path {
None => None,
Some(ref crt_path) => {
let name = crt_path.to_string_lossy().to_string();
let cert_data = open_read(crt_path, "certificate")?;
let key_pass: &str = options.key_pass.as_ref().map(|s| s.as_str()).unwrap_or("");
match Pkcs12::from_der(&cert_data) {
// Certificate file is DER encoded PKCS#12 archive
Ok(pkcs12) => {
// Verify password
pkcs12.parse(&key_pass).context(ParsePkcs12)?;
Some(IdentityStore(cert_data, key_pass.to_string()))
}
Err(source) => {
if options.key_path.is_none() {
return Err(TlsError::ParsePkcs12 { source });
}
// Identity is a PEM encoded certficate+key pair
let crt = load_x509(crt_path)?;
let key_path = options.key_path.as_ref().unwrap();
let key = load_key(&key_path, &options.key_pass)?;
let pkcs12 = Pkcs12::builder()
.build("", &name, &key, &crt)
.context(Pkcs12Error)?;
let identity = pkcs12.to_der().context(DerExportError)?;
// Build the resulting parsed PKCS#12 archive,
// but don't store it, as it cannot be cloned.
// This is just for error checking.
pkcs12.parse("").context(TlsIdentityError)?;
Some(IdentityStore(identity, "".into()))
}
}
}
};
Ok(Self {
verify_certificate: options.verify_certificate.unwrap_or(!for_server),
verify_hostname: options.verify_hostname.unwrap_or(!for_server),
authority,
identity,
})
}
fn identity(&self) -> Option<ParsedPkcs12> {
// This data was test-built previously, so we can just use it
// here and expect the results will not fail. This can all be
// reworked when `openssl::pkcs12::ParsedPkcs12` gains the Clone
// impl.
self.identity.as_ref().map(|identity| {
Pkcs12::from_der(&identity.0)
.expect("Could not build PKCS#12 archive from parsed data")
.parse(&identity.1)
.expect("Could not parse stored PKCS#12 archive")
})
}
pub(super) fn apply_context(&self, context: &mut SslContextBuilder) -> Result<()> {
context.set_verify(if self.verify_certificate {
SslVerifyMode::PEER | SslVerifyMode::FAIL_IF_NO_PEER_CERT
} else {
SslVerifyMode::NONE
});
if let Some(identity) = self.identity() {
context
.set_certificate(&identity.cert)
.context(SetCertificate)?;
context
.set_private_key(&identity.pkey)
.context(SetPrivateKey)?;
if let Some(chain) = identity.chain {
for cert in chain {
context
.add_extra_chain_cert(cert)
.context(AddExtraChainCert)?;
}
}
}
if let Some(certificate) = &self.authority {
let mut store = X509StoreBuilder::new().context(NewStoreBuilder)?;
store
.add_cert(certificate.clone())
.context(AddCertToStore)?;
context
.set_verify_cert_store(store.build())
.context(SetVerifyCert)?;
}
Ok(())
}
pub fn apply_connect_configuration(&self, connection: &mut ConnectConfiguration) {
connection.set_verify_hostname(self.verify_hostname);
}
}
impl Debug for TlsSettings {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
f.debug_struct("TlsSettings")
.field("verify_certificate", &self.verify_certificate)
.field("verify_hostname", &self.verify_hostname)
.finish()
}
}
pub type MaybeTlsSettings = MaybeTls<(), TlsSettings>;
impl MaybeTlsSettings {
/// Generate an optional settings struct from the given optional
/// configuration reference. If `config` is `None`, TLS is
/// disabled. The `for_server` parameter indicates the options
/// should be interpreted as being for a TLS server, which requires
/// an identity certificate and changes the certificate verification
/// default to false.
pub fn from_config(config: &Option<TlsConfig>, for_server: bool) -> Result<Self> {
match config {
None => Ok(Self::Raw(())), // No config, no TLS settings
Some(config) => match config.enabled.unwrap_or(false) {
false => Ok(Self::Raw(())), // Explicitly disabled, still no TLS settings
true => {
let tls =
TlsSettings::from_options_base(&Some(config.options.clone()), for_server)?;
match (for_server, &tls.identity) {
// Servers require an identity certificate
(true, None) => Err(TlsError::MissingRequiredIdentity.into()),
_ => Ok(Self::Tls(tls)),
}
}
},
}
}
}
impl From<TlsSettings> for MaybeTlsSettings {
fn from(tls: TlsSettings) -> Self {
Self::Tls(tls)
}
}
/// Load a private key from a named file
fn load_key(filename: &Path, pass_phrase: &Option<String>) -> Result<PKey<Private>> {
let data = open_read(filename, "key")?;
match pass_phrase {
None => Ok(PKey::private_key_from_der(&data)
.or_else(|_| PKey::private_key_from_pem(&data))
.with_context(|| PrivateKeyParseError { filename })?),
Some(phrase) => Ok(
PKey::private_key_from_pkcs8_passphrase(&data, phrase.as_bytes())
.or_else(|_| PKey::private_key_from_pem_passphrase(&data, phrase.as_bytes()))
.with_context(|| PrivateKeyParseError { filename })?,
),
}
}
/// Load an X.509 certificate from a named file
fn load_x509(filename: &Path) -> Result<X509> {
let data = open_read(filename, "certificate")?;
Ok(X509::from_der(&data)
.or_else(|_| X509::from_pem(&data))
.with_context(|| X509ParseError { filename })?)
}
fn open_read(filename: &Path, note: &'static str) -> Result<Vec<u8>> {
let mut text = Vec::<u8>::new();
File::open(filename)
.with_context(|| FileOpenFailed { note, filename })?
.read_to_end(&mut text)
.with_context(|| FileReadFailed { note, filename })?;
Ok(text)
}
#[cfg(test)]
mod test {
use super::*;
const TEST_PKCS12: &str = "tests/data/localhost.p12";
const TEST_PEM_CRT: &str = "tests/data/localhost.crt";
const TEST_PEM_KEY: &str = "tests/data/localhost.key";
#[test]
fn from_options_pkcs12() {
let options = TlsOptions {
crt_path: Some(TEST_PKCS12.into()),
key_pass: Some("NOPASS".into()),
..Default::default()
};
let settings =
TlsSettings::from_options(&Some(options)).expect("Failed to load PKCS#12 certificate");
assert!(settings.identity.is_some());
assert!(settings.authority.is_none());
}
#[test]
fn from_options_pem() | {
let options = TlsOptions {
crt_path: Some(TEST_PEM_CRT.into()),
key_path: Some(TEST_PEM_KEY.into()),
..Default::default()
};
let settings =
TlsSettings::from_options(&Some(options)).expect("Failed to load PEM certificate");
assert!(settings.identity.is_some());
assert!(settings.authority.is_none());
} | identifier_body |
|
settings.rs | (super) verify_hostname: bool,
authority: Option<X509>,
pub(super) identity: Option<IdentityStore>, // openssl::pkcs12::ParsedPkcs12 doesn't impl Clone yet
}
#[derive(Clone)]
pub struct IdentityStore(Vec<u8>, String);
impl TlsSettings {
/// Generate a filled out settings struct from the given optional
/// option set, interpreted as client options. If `options` is
/// `None`, the result is set to defaults (ie empty).
pub fn from_options(options: &Option<TlsOptions>) -> Result<Self> {
Self::from_options_base(options, false)
}
pub(super) fn from_options_base(
options: &Option<TlsOptions>,
for_server: bool,
) -> Result<Self> {
let default = TlsOptions::default();
let options = options.as_ref().unwrap_or(&default);
if !for_server {
if options.verify_certificate == Some(false) {
warn!(
"`verify_certificate` is DISABLED, this may lead to security vulnerabilities"
);
}
if options.verify_hostname == Some(false) {
warn!("`verify_hostname` is DISABLED, this may lead to security vulnerabilities");
}
}
if options.key_path.is_some() && options.crt_path.is_none() {
return Err(TlsError::MissingCrtKeyFile.into());
}
let authority = match options.ca_path {
None => None,
Some(ref path) => Some(load_x509(path)?),
};
let identity = match options.crt_path {
None => None,
Some(ref crt_path) => {
let name = crt_path.to_string_lossy().to_string();
let cert_data = open_read(crt_path, "certificate")?;
let key_pass: &str = options.key_pass.as_ref().map(|s| s.as_str()).unwrap_or("");
match Pkcs12::from_der(&cert_data) {
// Certificate file is DER encoded PKCS#12 archive
Ok(pkcs12) => {
// Verify password
pkcs12.parse(&key_pass).context(ParsePkcs12)?;
Some(IdentityStore(cert_data, key_pass.to_string()))
}
Err(source) => {
if options.key_path.is_none() {
return Err(TlsError::ParsePkcs12 { source });
}
// Identity is a PEM encoded certficate+key pair
let crt = load_x509(crt_path)?;
let key_path = options.key_path.as_ref().unwrap();
let key = load_key(&key_path, &options.key_pass)?;
let pkcs12 = Pkcs12::builder()
.build("", &name, &key, &crt)
.context(Pkcs12Error)?;
let identity = pkcs12.to_der().context(DerExportError)?;
// Build the resulting parsed PKCS#12 archive,
// but don't store it, as it cannot be cloned.
// This is just for error checking.
pkcs12.parse("").context(TlsIdentityError)?;
Some(IdentityStore(identity, "".into()))
}
}
}
};
Ok(Self {
verify_certificate: options.verify_certificate.unwrap_or(!for_server),
verify_hostname: options.verify_hostname.unwrap_or(!for_server),
authority,
identity,
})
}
fn identity(&self) -> Option<ParsedPkcs12> {
// This data was test-built previously, so we can just use it
// here and expect the results will not fail. This can all be
// reworked when `openssl::pkcs12::ParsedPkcs12` gains the Clone
// impl.
self.identity.as_ref().map(|identity| {
Pkcs12::from_der(&identity.0)
.expect("Could not build PKCS#12 archive from parsed data")
.parse(&identity.1)
.expect("Could not parse stored PKCS#12 archive")
})
}
pub(super) fn apply_context(&self, context: &mut SslContextBuilder) -> Result<()> {
context.set_verify(if self.verify_certificate {
SslVerifyMode::PEER | SslVerifyMode::FAIL_IF_NO_PEER_CERT
} else {
SslVerifyMode::NONE
});
if let Some(identity) = self.identity() {
context
.set_certificate(&identity.cert)
.context(SetCertificate)?;
context
.set_private_key(&identity.pkey)
.context(SetPrivateKey)?;
if let Some(chain) = identity.chain {
for cert in chain {
context
.add_extra_chain_cert(cert)
.context(AddExtraChainCert)?;
}
}
}
if let Some(certificate) = &self.authority {
let mut store = X509StoreBuilder::new().context(NewStoreBuilder)?;
store
.add_cert(certificate.clone())
.context(AddCertToStore)?;
context
.set_verify_cert_store(store.build())
.context(SetVerifyCert)?;
}
Ok(())
}
pub fn apply_connect_configuration(&self, connection: &mut ConnectConfiguration) {
connection.set_verify_hostname(self.verify_hostname);
}
}
impl Debug for TlsSettings {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
f.debug_struct("TlsSettings")
.field("verify_certificate", &self.verify_certificate)
.field("verify_hostname", &self.verify_hostname)
.finish()
}
}
pub type MaybeTlsSettings = MaybeTls<(), TlsSettings>;
impl MaybeTlsSettings {
/// Generate an optional settings struct from the given optional
/// configuration reference. If `config` is `None`, TLS is
/// disabled. The `for_server` parameter indicates the options
/// should be interpreted as being for a TLS server, which requires
/// an identity certificate and changes the certificate verification
/// default to false.
pub fn from_config(config: &Option<TlsConfig>, for_server: bool) -> Result<Self> {
match config {
None => Ok(Self::Raw(())), // No config, no TLS settings
Some(config) => match config.enabled.unwrap_or(false) {
false => Ok(Self::Raw(())), // Explicitly disabled, still no TLS settings
true => {
let tls =
TlsSettings::from_options_base(&Some(config.options.clone()), for_server)?;
match (for_server, &tls.identity) {
// Servers require an identity certificate
(true, None) => Err(TlsError::MissingRequiredIdentity.into()),
_ => Ok(Self::Tls(tls)),
}
}
},
}
}
}
impl From<TlsSettings> for MaybeTlsSettings {
fn from(tls: TlsSettings) -> Self {
Self::Tls(tls)
}
}
/// Load a private key from a named file
fn load_key(filename: &Path, pass_phrase: &Option<String>) -> Result<PKey<Private>> {
let data = open_read(filename, "key")?;
match pass_phrase {
None => Ok(PKey::private_key_from_der(&data)
.or_else(|_| PKey::private_key_from_pem(&data))
.with_context(|| PrivateKeyParseError { filename })?),
Some(phrase) => Ok(
PKey::private_key_from_pkcs8_passphrase(&data, phrase.as_bytes())
.or_else(|_| PKey::private_key_from_pem_passphrase(&data, phrase.as_bytes()))
.with_context(|| PrivateKeyParseError { filename })?,
),
}
}
/// Load an X.509 certificate from a named file
fn load_x509(filename: &Path) -> Result<X509> {
let data = open_read(filename, "certificate")?;
Ok(X509::from_der(&data)
.or_else(|_| X509::from_pem(&data))
.with_context(|| X509ParseError { filename })?)
}
fn open_read(filename: &Path, note: &'static str) -> Result<Vec<u8>> {
let mut text = Vec::<u8>::new();
File::open(filename)
.with_context(|| FileOpenFailed { note, filename })?
.read_to_end(&mut text)
.with_context(|| FileReadFailed { note, filename })?;
Ok(text)
}
#[cfg(test)]
mod test {
use super::*;
const TEST_PKCS12: &str = "tests/data/localhost.p12";
const TEST_PEM_CRT: &str = "tests/data/localhost.crt";
const TEST_PEM_KEY: &str = "tests/data/localhost.key";
#[test]
fn | () {
let options = TlsOptions {
crt_path: Some(TEST_PKCS12.into()),
key_pass: Some("NOPASS".into()),
..Default::default()
};
let settings =
TlsSettings::from_options(&Some(options)).expect("Failed to load PKCS#12 certificate");
assert!(settings.identity.is_some());
assert!(settings.authority.is_none());
}
#[test]
fn from_options_pem() {
let options = TlsOptions {
crt_path: Some(TEST_PEM_CRT.into()),
key_path: Some(TEST_PEM_KEY.into()),
..Default::default()
};
let settings =
TlsSettings | from_options_pkcs12 | identifier_name |
settings.rs | (super) verify_hostname: bool,
authority: Option<X509>,
pub(super) identity: Option<IdentityStore>, // openssl::pkcs12::ParsedPkcs12 doesn't impl Clone yet
}
#[derive(Clone)]
pub struct IdentityStore(Vec<u8>, String);
impl TlsSettings {
/// Generate a filled out settings struct from the given optional
/// option set, interpreted as client options. If `options` is
/// `None`, the result is set to defaults (ie empty).
pub fn from_options(options: &Option<TlsOptions>) -> Result<Self> {
Self::from_options_base(options, false)
}
pub(super) fn from_options_base(
options: &Option<TlsOptions>,
for_server: bool,
) -> Result<Self> {
let default = TlsOptions::default();
let options = options.as_ref().unwrap_or(&default);
if !for_server {
if options.verify_certificate == Some(false) {
warn!(
"`verify_certificate` is DISABLED, this may lead to security vulnerabilities"
);
}
if options.verify_hostname == Some(false) {
warn!("`verify_hostname` is DISABLED, this may lead to security vulnerabilities");
}
}
if options.key_path.is_some() && options.crt_path.is_none() {
return Err(TlsError::MissingCrtKeyFile.into());
}
let authority = match options.ca_path {
None => None,
Some(ref path) => Some(load_x509(path)?),
};
| let name = crt_path.to_string_lossy().to_string();
let cert_data = open_read(crt_path, "certificate")?;
let key_pass: &str = options.key_pass.as_ref().map(|s| s.as_str()).unwrap_or("");
match Pkcs12::from_der(&cert_data) {
// Certificate file is DER encoded PKCS#12 archive
Ok(pkcs12) => {
// Verify password
pkcs12.parse(&key_pass).context(ParsePkcs12)?;
Some(IdentityStore(cert_data, key_pass.to_string()))
}
Err(source) => {
if options.key_path.is_none() {
return Err(TlsError::ParsePkcs12 { source });
}
// Identity is a PEM encoded certficate+key pair
let crt = load_x509(crt_path)?;
let key_path = options.key_path.as_ref().unwrap();
let key = load_key(&key_path, &options.key_pass)?;
let pkcs12 = Pkcs12::builder()
.build("", &name, &key, &crt)
.context(Pkcs12Error)?;
let identity = pkcs12.to_der().context(DerExportError)?;
// Build the resulting parsed PKCS#12 archive,
// but don't store it, as it cannot be cloned.
// This is just for error checking.
pkcs12.parse("").context(TlsIdentityError)?;
Some(IdentityStore(identity, "".into()))
}
}
}
};
Ok(Self {
verify_certificate: options.verify_certificate.unwrap_or(!for_server),
verify_hostname: options.verify_hostname.unwrap_or(!for_server),
authority,
identity,
})
}
fn identity(&self) -> Option<ParsedPkcs12> {
// This data was test-built previously, so we can just use it
// here and expect the results will not fail. This can all be
// reworked when `openssl::pkcs12::ParsedPkcs12` gains the Clone
// impl.
self.identity.as_ref().map(|identity| {
Pkcs12::from_der(&identity.0)
.expect("Could not build PKCS#12 archive from parsed data")
.parse(&identity.1)
.expect("Could not parse stored PKCS#12 archive")
})
}
pub(super) fn apply_context(&self, context: &mut SslContextBuilder) -> Result<()> {
context.set_verify(if self.verify_certificate {
SslVerifyMode::PEER | SslVerifyMode::FAIL_IF_NO_PEER_CERT
} else {
SslVerifyMode::NONE
});
if let Some(identity) = self.identity() {
context
.set_certificate(&identity.cert)
.context(SetCertificate)?;
context
.set_private_key(&identity.pkey)
.context(SetPrivateKey)?;
if let Some(chain) = identity.chain {
for cert in chain {
context
.add_extra_chain_cert(cert)
.context(AddExtraChainCert)?;
}
}
}
if let Some(certificate) = &self.authority {
let mut store = X509StoreBuilder::new().context(NewStoreBuilder)?;
store
.add_cert(certificate.clone())
.context(AddCertToStore)?;
context
.set_verify_cert_store(store.build())
.context(SetVerifyCert)?;
}
Ok(())
}
pub fn apply_connect_configuration(&self, connection: &mut ConnectConfiguration) {
connection.set_verify_hostname(self.verify_hostname);
}
}
impl Debug for TlsSettings {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
f.debug_struct("TlsSettings")
.field("verify_certificate", &self.verify_certificate)
.field("verify_hostname", &self.verify_hostname)
.finish()
}
}
pub type MaybeTlsSettings = MaybeTls<(), TlsSettings>;
impl MaybeTlsSettings {
/// Generate an optional settings struct from the given optional
/// configuration reference. If `config` is `None`, TLS is
/// disabled. The `for_server` parameter indicates the options
/// should be interpreted as being for a TLS server, which requires
/// an identity certificate and changes the certificate verification
/// default to false.
pub fn from_config(config: &Option<TlsConfig>, for_server: bool) -> Result<Self> {
match config {
None => Ok(Self::Raw(())), // No config, no TLS settings
Some(config) => match config.enabled.unwrap_or(false) {
false => Ok(Self::Raw(())), // Explicitly disabled, still no TLS settings
true => {
let tls =
TlsSettings::from_options_base(&Some(config.options.clone()), for_server)?;
match (for_server, &tls.identity) {
// Servers require an identity certificate
(true, None) => Err(TlsError::MissingRequiredIdentity.into()),
_ => Ok(Self::Tls(tls)),
}
}
},
}
}
}
impl From<TlsSettings> for MaybeTlsSettings {
fn from(tls: TlsSettings) -> Self {
Self::Tls(tls)
}
}
/// Load a private key from a named file
fn load_key(filename: &Path, pass_phrase: &Option<String>) -> Result<PKey<Private>> {
let data = open_read(filename, "key")?;
match pass_phrase {
None => Ok(PKey::private_key_from_der(&data)
.or_else(|_| PKey::private_key_from_pem(&data))
.with_context(|| PrivateKeyParseError { filename })?),
Some(phrase) => Ok(
PKey::private_key_from_pkcs8_passphrase(&data, phrase.as_bytes())
.or_else(|_| PKey::private_key_from_pem_passphrase(&data, phrase.as_bytes()))
.with_context(|| PrivateKeyParseError { filename })?,
),
}
}
/// Load an X.509 certificate from a named file
fn load_x509(filename: &Path) -> Result<X509> {
let data = open_read(filename, "certificate")?;
Ok(X509::from_der(&data)
.or_else(|_| X509::from_pem(&data))
.with_context(|| X509ParseError { filename })?)
}
fn open_read(filename: &Path, note: &'static str) -> Result<Vec<u8>> {
let mut text = Vec::<u8>::new();
File::open(filename)
.with_context(|| FileOpenFailed { note, filename })?
.read_to_end(&mut text)
.with_context(|| FileReadFailed { note, filename })?;
Ok(text)
}
#[cfg(test)]
mod test {
use super::*;
const TEST_PKCS12: &str = "tests/data/localhost.p12";
const TEST_PEM_CRT: &str = "tests/data/localhost.crt";
const TEST_PEM_KEY: &str = "tests/data/localhost.key";
#[test]
fn from_options_pkcs12() {
let options = TlsOptions {
crt_path: Some(TEST_PKCS12.into()),
key_pass: Some("NOPASS".into()),
..Default::default()
};
let settings =
TlsSettings::from_options(&Some(options)).expect("Failed to load PKCS#12 certificate");
assert!(settings.identity.is_some());
assert!(settings.authority.is_none());
}
#[test]
fn from_options_pem() {
let options = TlsOptions {
crt_path: Some(TEST_PEM_CRT.into()),
key_path: Some(TEST_PEM_KEY.into()),
..Default::default()
};
let settings =
TlsSettings:: | let identity = match options.crt_path {
None => None,
Some(ref crt_path) => { | random_line_split |
calculate_profiles.py | ,
type=str,
help="Path to the output aligned directory. Required."
)
parser.add_argument("--overview",
default=None,
type=str,
help="Path to the output description csv. Required. Pairs with <--aligned> directory."
)
parser.add_argument("--k",
default=-1,
type=int,
help="Size of the k-mer created by BCALM. Required."
)
parser.add_argument("--input",
default=None,
type=str,
help="Path to the input file."
)
parser.set_defaults(all_sqs_result=False)
args = parser.parse_args([] if "__file__" not in globals() else None)
bases = dict(A=0, C=1, G=2, T=3)
bases['-'] = 4
rev_bases = {v: k for k, v in bases.items()}
global_alignment_ident_no = 0
operations = {
'.' : 0,
'-' : 1,
'|' : 0
}
class AlignmentProfile:
def __init__(self, width, df, identifier):
self.ident = identifier
self.profile = np.zeros((5, width))
self.repre_sq = ""
self.seq_alignments = None # this will be a pandas df
self.seq_align_counter = -1
self.calculate_profile(df)
def calculate_profile(self, df):
self.seq_alignments = pd.DataFrame([(index, *np.zeros(self.profile.shape[1], dtype=np.int8)) for index in df.index])
unwrapped_sq = df['sq'].str.split('', expand=True)
unwrapped_sq = unwrapped_sq.drop(columns=[unwrapped_sq.columns[0], unwrapped_sq.columns[-1]])
counts = np.stack(df['count'].values)
for base in bases:
a = unwrapped_sq != base
newX = np.ma.array(counts, mask=a)
new_counts = newX.sum(axis=0)
self.profile[bases[base], :] += new_counts
# repre_sq
maxs = np.argmax(self.profile, axis=0)
self.repre_sq = "".join([rev_bases[x] for x in maxs])
def add_sequence(self, new_sq, new_counts, nice, sq_index):
offset = re.search(nice['target_aligned'].replace('-', ''), self.repre_sq).start(0)
x = self.profile
# padding with the following number of observed positions (sum of all bases)
# pad profile with insertions
insertions = np.where(np.array(list(nice['target_aligned'])) == '-')[0]
for i, index in enumerate(insertions):
if x.shape[1] >= index:
value = 0
else:
value = x[:, index].sum()
x = np.insert(x, index + offset, [0, 0, 0, 0, value], axis=1)
self.seq_alignments.insert(loc=int(index+offset), column=self.seq_align_counter, value=1)
self.seq_align_counter -= 1
# pad new counts with deletions
aligned_query = np.array(list(nice['query_aligned']))
deletions = np.where(aligned_query == '-')[0]
for i, index in enumerate(deletions):
value = new_counts[index]
new_counts = np.insert(new_counts, index, value, axis=0)
i = offset
for base, count in zip(aligned_query, new_counts):
x[bases[base], i] += count
i += 1
self.profile = x
# store new sequence alignment
added_alignment = -np.ones(self.profile.shape[1])
for i, char in enumerate(nice['target_aligned']):
if char == '-':
added_alignment[offset + i] = 1
else:
added_alignment[offset + i] = 0
self.seq_alignments.loc[-1] = [sq_index, *added_alignment] # adding a row
self.seq_alignments.index = self.seq_alignments.index + 1 # shifting index
# recalculate repre_sq -- the most probable one
maxs = np.argmax(self.profile, axis=0)
self.repre_sq = "".join([rev_bases[x] for x in maxs if rev_bases[x] != '-']) # '-' is removed from the sq
def dst_func(x, y):
return (np.array(x) != np.array(y)).sum()
def read_alignment(filename):
for line in open(filename):
sq, count = line.strip('\n').split(';')
yield sq, np.array([int(x) for x in count.split(',')]), count
def | (df_group, l, dst=dst_func):
sqs = df_group.reset_index()['sq']
n = len(sqs)
if n <= 1:
return np.zeros(n)
dst_matrix = np.zeros((n, n))
for i in range(n):
for j in range(i):
d = dst(sqs[i], sqs[j])
dst_matrix[i, j] = d
dst_matrix[j, i] = d
model = AgglomerativeClustering(distance_threshold=threshold * l,
n_clusters=None,
linkage='complete',
affinity='precomputed')
clusters = model.fit_predict(dst_matrix)
return clusters
aligned_sqs_file = args.input
k = args.k
misses = args.misses
pools = args.pools
threshold = misses / k
if args.aligned is None:
output_profile_dir = aligned_sqs_file + "_profiles"
else:
output_profile_dir = args.aligned
if args.overview is None:
output_csv_file = aligned_sqs_file + "_overview.csv"
else:
output_csv_file = args.overview
# read
df = pd.DataFrame(read_alignment(aligned_sqs_file))
df.columns = ['sq', 'count', 'str_count']
df['length'] = df['sq'].str.len()
# df['alignment'] = -1 # every aligned sq has an alignment identification
groups = df.groupby(by='length')
unique_lengths = df['length'].sort_values(ascending=False).unique()
against = []
longest = unique_lengths[0]
df_group = groups.get_group(longest).copy()
clusters = cluster_group(df_group, longest)
df_group['cluster'] = clusters
alignments = {
}
for cluster, cluster_df in df_group.groupby(by='cluster'):
alignment = AlignmentProfile(longest, cluster_df, global_alignment_ident_no)
alignments[global_alignment_ident_no] = alignment
global_alignment_ident_no += 1
against.append(alignment)
# df.loc[df['sq'].isin(cluster_df['sq']), 'alignment'] = alignment.ident
# to each sequence
start = time.time()
# print(df.groupby(by='length').get_group(longest))
# print("running on shorter")
with Bar("Processing length groups...", max=len(unique_lengths) - 1) as bar:
for length in unique_lengths[1:]:
bar.next()
df_group = groups.get_group(length).copy()
def getDistanceAndAlignment(sq):
# this is a fallback, it should not happen
maxval = np.floor(threshold * len(sq))
min = np.inf
min_target = None
if maxval < 1:
return min,min_target
for target in against:
align_res = edlib.align(sq, target.repre_sq, mode='HW', task='distance', k=maxval)
if align_res['editDistance'] != -1:
if min > align_res['editDistance']:
if align_res['editDistance'] == 0:
return align_res['editDistance'], target.ident
min = align_res['editDistance']
min_target = target
if min_target is not None:
min_target = min_target.ident
return min, min_target
x = length * threshold
if length * threshold >= 1:
# try align
with Pool(pools) as pool:
result = pool.map(getDistanceAndAlignment, df_group['sq'])
df_group['aligned'] = result
# add aligned to profiles
aligned = df_group[df_group['aligned'] != (np.inf, None)]
for index, row in aligned.iterrows():
to = alignments[row['aligned'][1]]
align_res = edlib.align(row.sq, to.repre_sq, mode='HW', task='path')
nice = edlib.getNiceAlignment(align_res, row.sq, to.repre_sq)
to.add_sequence(row.sq, row['count'], nice, index)
# df.loc[df['sq'] == row.sq, 'alignment'] = to.ident
# cluster unaligned, add to against
unaligned = df_group[df_group['aligned'] == (np.inf, None)].copy()
clusters = cluster_group(unaligned, length)
unaligned['cluster'] = clusters
for cluster, cluster_df in unaligned.groupby(by='cluster'):
alignment = AlignmentProfile(length, cluster_df, global_alignment_ident_no)
alignments[global_alignment_ident_no] = alignment
global_alignment_ident_no += 1
against.append(alignment)
else:
# threshold is less than one, no clustering nor alignment takes place
df_group["aligned"] = [(np.inf, None) for _ in range(len(df_group))]
unaligned = df_group.copy()
unaligned["cluster"] = list(range(len(unaligned)))
# print(f"pseudoclustering elapsed: {time.time() - s | cluster_group | identifier_name |
calculate_profiles.py | =None,
type=str,
help="Path to the output aligned directory. Required."
)
parser.add_argument("--overview",
default=None,
type=str,
help="Path to the output description csv. Required. Pairs with <--aligned> directory."
)
parser.add_argument("--k",
default=-1,
type=int,
help="Size of the k-mer created by BCALM. Required."
)
parser.add_argument("--input",
default=None,
type=str,
help="Path to the input file."
)
parser.set_defaults(all_sqs_result=False)
args = parser.parse_args([] if "__file__" not in globals() else None)
bases = dict(A=0, C=1, G=2, T=3)
bases['-'] = 4
rev_bases = {v: k for k, v in bases.items()}
global_alignment_ident_no = 0
operations = {
'.' : 0,
'-' : 1,
'|' : 0
}
class AlignmentProfile:
def __init__(self, width, df, identifier):
self.ident = identifier
self.profile = np.zeros((5, width))
self.repre_sq = ""
self.seq_alignments = None # this will be a pandas df
self.seq_align_counter = -1
self.calculate_profile(df)
def calculate_profile(self, df):
self.seq_alignments = pd.DataFrame([(index, *np.zeros(self.profile.shape[1], dtype=np.int8)) for index in df.index])
unwrapped_sq = df['sq'].str.split('', expand=True)
unwrapped_sq = unwrapped_sq.drop(columns=[unwrapped_sq.columns[0], unwrapped_sq.columns[-1]])
counts = np.stack(df['count'].values)
for base in bases:
a = unwrapped_sq != base
newX = np.ma.array(counts, mask=a)
new_counts = newX.sum(axis=0)
self.profile[bases[base], :] += new_counts
# repre_sq
maxs = np.argmax(self.profile, axis=0)
self.repre_sq = "".join([rev_bases[x] for x in maxs])
def add_sequence(self, new_sq, new_counts, nice, sq_index):
offset = re.search(nice['target_aligned'].replace('-', ''), self.repre_sq).start(0)
x = self.profile
# padding with the following number of observed positions (sum of all bases)
# pad profile with insertions
insertions = np.where(np.array(list(nice['target_aligned'])) == '-')[0]
for i, index in enumerate(insertions):
if x.shape[1] >= index:
value = 0
else:
value = x[:, index].sum()
x = np.insert(x, index + offset, [0, 0, 0, 0, value], axis=1)
self.seq_alignments.insert(loc=int(index+offset), column=self.seq_align_counter, value=1)
self.seq_align_counter -= 1
# pad new counts with deletions
aligned_query = np.array(list(nice['query_aligned']))
deletions = np.where(aligned_query == '-')[0]
for i, index in enumerate(deletions):
value = new_counts[index]
new_counts = np.insert(new_counts, index, value, axis=0)
i = offset
for base, count in zip(aligned_query, new_counts):
x[bases[base], i] += count
i += 1
self.profile = x
# store new sequence alignment
added_alignment = -np.ones(self.profile.shape[1])
for i, char in enumerate(nice['target_aligned']):
if char == '-':
added_alignment[offset + i] = 1
else:
added_alignment[offset + i] = 0
self.seq_alignments.loc[-1] = [sq_index, *added_alignment] # adding a row
self.seq_alignments.index = self.seq_alignments.index + 1 # shifting index
# recalculate repre_sq -- the most probable one
maxs = np.argmax(self.profile, axis=0)
self.repre_sq = "".join([rev_bases[x] for x in maxs if rev_bases[x] != '-']) # '-' is removed from the sq
def dst_func(x, y):
return (np.array(x) != np.array(y)).sum()
def read_alignment(filename):
for line in open(filename):
sq, count = line.strip('\n').split(';')
yield sq, np.array([int(x) for x in count.split(',')]), count
def cluster_group(df_group, l, dst=dst_func):
sqs = df_group.reset_index()['sq']
n = len(sqs)
if n <= 1:
return np.zeros(n)
dst_matrix = np.zeros((n, n))
for i in range(n):
for j in range(i):
d = dst(sqs[i], sqs[j])
dst_matrix[i, j] = d
dst_matrix[j, i] = d
model = AgglomerativeClustering(distance_threshold=threshold * l,
n_clusters=None,
linkage='complete',
affinity='precomputed')
clusters = model.fit_predict(dst_matrix)
return clusters
aligned_sqs_file = args.input
k = args.k
misses = args.misses
pools = args.pools
threshold = misses / k
if args.aligned is None:
output_profile_dir = aligned_sqs_file + "_profiles"
else:
output_profile_dir = args.aligned
if args.overview is None:
output_csv_file = aligned_sqs_file + "_overview.csv"
else:
output_csv_file = args.overview
# read
df = pd.DataFrame(read_alignment(aligned_sqs_file))
df.columns = ['sq', 'count', 'str_count']
df['length'] = df['sq'].str.len()
# df['alignment'] = -1 # every aligned sq has an alignment identification
groups = df.groupby(by='length')
unique_lengths = df['length'].sort_values(ascending=False).unique()
against = []
longest = unique_lengths[0]
df_group = groups.get_group(longest).copy()
clusters = cluster_group(df_group, longest)
df_group['cluster'] = clusters
alignments = {
}
for cluster, cluster_df in df_group.groupby(by='cluster'):
alignment = AlignmentProfile(longest, cluster_df, global_alignment_ident_no)
alignments[global_alignment_ident_no] = alignment
global_alignment_ident_no += 1 |
start = time.time()
# print(df.groupby(by='length').get_group(longest))
# print("running on shorter")
with Bar("Processing length groups...", max=len(unique_lengths) - 1) as bar:
for length in unique_lengths[1:]:
bar.next()
df_group = groups.get_group(length).copy()
def getDistanceAndAlignment(sq):
# this is a fallback, it should not happen
maxval = np.floor(threshold * len(sq))
min = np.inf
min_target = None
if maxval < 1:
return min,min_target
for target in against:
align_res = edlib.align(sq, target.repre_sq, mode='HW', task='distance', k=maxval)
if align_res['editDistance'] != -1:
if min > align_res['editDistance']:
if align_res['editDistance'] == 0:
return align_res['editDistance'], target.ident
min = align_res['editDistance']
min_target = target
if min_target is not None:
min_target = min_target.ident
return min, min_target
x = length * threshold
if length * threshold >= 1:
# try align
with Pool(pools) as pool:
result = pool.map(getDistanceAndAlignment, df_group['sq'])
df_group['aligned'] = result
# add aligned to profiles
aligned = df_group[df_group['aligned'] != (np.inf, None)]
for index, row in aligned.iterrows():
to = alignments[row['aligned'][1]]
align_res = edlib.align(row.sq, to.repre_sq, mode='HW', task='path')
nice = edlib.getNiceAlignment(align_res, row.sq, to.repre_sq)
to.add_sequence(row.sq, row['count'], nice, index)
# df.loc[df['sq'] == row.sq, 'alignment'] = to.ident
# cluster unaligned, add to against
unaligned = df_group[df_group['aligned'] == (np.inf, None)].copy()
clusters = cluster_group(unaligned, length)
unaligned['cluster'] = clusters
for cluster, cluster_df in unaligned.groupby(by='cluster'):
alignment = AlignmentProfile(length, cluster_df, global_alignment_ident_no)
alignments[global_alignment_ident_no] = alignment
global_alignment_ident_no += 1
against.append(alignment)
else:
# threshold is less than one, no clustering nor alignment takes place
df_group["aligned"] = [(np.inf, None) for _ in range(len(df_group))]
unaligned = df_group.copy()
unaligned["cluster"] = list(range(len(unaligned)))
# print(f"pseudoclustering elapsed: {time.time() - s}")
| against.append(alignment)
# df.loc[df['sq'].isin(cluster_df['sq']), 'alignment'] = alignment.ident
# to each sequence | random_line_split |
calculate_profiles.py | ,
type=str,
help="Path to the output aligned directory. Required."
)
parser.add_argument("--overview",
default=None,
type=str,
help="Path to the output description csv. Required. Pairs with <--aligned> directory."
)
parser.add_argument("--k",
default=-1,
type=int,
help="Size of the k-mer created by BCALM. Required."
)
parser.add_argument("--input",
default=None,
type=str,
help="Path to the input file."
)
parser.set_defaults(all_sqs_result=False)
args = parser.parse_args([] if "__file__" not in globals() else None)
bases = dict(A=0, C=1, G=2, T=3)
bases['-'] = 4
rev_bases = {v: k for k, v in bases.items()}
global_alignment_ident_no = 0
operations = {
'.' : 0,
'-' : 1,
'|' : 0
}
class AlignmentProfile:
def __init__(self, width, df, identifier):
self.ident = identifier
self.profile = np.zeros((5, width))
self.repre_sq = ""
self.seq_alignments = None # this will be a pandas df
self.seq_align_counter = -1
self.calculate_profile(df)
def calculate_profile(self, df):
self.seq_alignments = pd.DataFrame([(index, *np.zeros(self.profile.shape[1], dtype=np.int8)) for index in df.index])
unwrapped_sq = df['sq'].str.split('', expand=True)
unwrapped_sq = unwrapped_sq.drop(columns=[unwrapped_sq.columns[0], unwrapped_sq.columns[-1]])
counts = np.stack(df['count'].values)
for base in bases:
a = unwrapped_sq != base
newX = np.ma.array(counts, mask=a)
new_counts = newX.sum(axis=0)
self.profile[bases[base], :] += new_counts
# repre_sq
maxs = np.argmax(self.profile, axis=0)
self.repre_sq = "".join([rev_bases[x] for x in maxs])
def add_sequence(self, new_sq, new_counts, nice, sq_index):
offset = re.search(nice['target_aligned'].replace('-', ''), self.repre_sq).start(0)
x = self.profile
# padding with the following number of observed positions (sum of all bases)
# pad profile with insertions
insertions = np.where(np.array(list(nice['target_aligned'])) == '-')[0]
for i, index in enumerate(insertions):
if x.shape[1] >= index:
value = 0
else:
value = x[:, index].sum()
x = np.insert(x, index + offset, [0, 0, 0, 0, value], axis=1)
self.seq_alignments.insert(loc=int(index+offset), column=self.seq_align_counter, value=1)
self.seq_align_counter -= 1
# pad new counts with deletions
aligned_query = np.array(list(nice['query_aligned']))
deletions = np.where(aligned_query == '-')[0]
for i, index in enumerate(deletions):
value = new_counts[index]
new_counts = np.insert(new_counts, index, value, axis=0)
i = offset
for base, count in zip(aligned_query, new_counts):
x[bases[base], i] += count
i += 1
self.profile = x
# store new sequence alignment
added_alignment = -np.ones(self.profile.shape[1])
for i, char in enumerate(nice['target_aligned']):
if char == '-':
added_alignment[offset + i] = 1
else:
added_alignment[offset + i] = 0
self.seq_alignments.loc[-1] = [sq_index, *added_alignment] # adding a row
self.seq_alignments.index = self.seq_alignments.index + 1 # shifting index
# recalculate repre_sq -- the most probable one
maxs = np.argmax(self.profile, axis=0)
self.repre_sq = "".join([rev_bases[x] for x in maxs if rev_bases[x] != '-']) # '-' is removed from the sq
def dst_func(x, y):
return (np.array(x) != np.array(y)).sum()
def read_alignment(filename):
|
def cluster_group(df_group, l, dst=dst_func):
sqs = df_group.reset_index()['sq']
n = len(sqs)
if n <= 1:
return np.zeros(n)
dst_matrix = np.zeros((n, n))
for i in range(n):
for j in range(i):
d = dst(sqs[i], sqs[j])
dst_matrix[i, j] = d
dst_matrix[j, i] = d
model = AgglomerativeClustering(distance_threshold=threshold * l,
n_clusters=None,
linkage='complete',
affinity='precomputed')
clusters = model.fit_predict(dst_matrix)
return clusters
aligned_sqs_file = args.input
k = args.k
misses = args.misses
pools = args.pools
threshold = misses / k
if args.aligned is None:
output_profile_dir = aligned_sqs_file + "_profiles"
else:
output_profile_dir = args.aligned
if args.overview is None:
output_csv_file = aligned_sqs_file + "_overview.csv"
else:
output_csv_file = args.overview
# read
df = pd.DataFrame(read_alignment(aligned_sqs_file))
df.columns = ['sq', 'count', 'str_count']
df['length'] = df['sq'].str.len()
# df['alignment'] = -1 # every aligned sq has an alignment identification
groups = df.groupby(by='length')
unique_lengths = df['length'].sort_values(ascending=False).unique()
against = []
longest = unique_lengths[0]
df_group = groups.get_group(longest).copy()
clusters = cluster_group(df_group, longest)
df_group['cluster'] = clusters
alignments = {
}
for cluster, cluster_df in df_group.groupby(by='cluster'):
alignment = AlignmentProfile(longest, cluster_df, global_alignment_ident_no)
alignments[global_alignment_ident_no] = alignment
global_alignment_ident_no += 1
against.append(alignment)
# df.loc[df['sq'].isin(cluster_df['sq']), 'alignment'] = alignment.ident
# to each sequence
start = time.time()
# print(df.groupby(by='length').get_group(longest))
# print("running on shorter")
with Bar("Processing length groups...", max=len(unique_lengths) - 1) as bar:
for length in unique_lengths[1:]:
bar.next()
df_group = groups.get_group(length).copy()
def getDistanceAndAlignment(sq):
# this is a fallback, it should not happen
maxval = np.floor(threshold * len(sq))
min = np.inf
min_target = None
if maxval < 1:
return min,min_target
for target in against:
align_res = edlib.align(sq, target.repre_sq, mode='HW', task='distance', k=maxval)
if align_res['editDistance'] != -1:
if min > align_res['editDistance']:
if align_res['editDistance'] == 0:
return align_res['editDistance'], target.ident
min = align_res['editDistance']
min_target = target
if min_target is not None:
min_target = min_target.ident
return min, min_target
x = length * threshold
if length * threshold >= 1:
# try align
with Pool(pools) as pool:
result = pool.map(getDistanceAndAlignment, df_group['sq'])
df_group['aligned'] = result
# add aligned to profiles
aligned = df_group[df_group['aligned'] != (np.inf, None)]
for index, row in aligned.iterrows():
to = alignments[row['aligned'][1]]
align_res = edlib.align(row.sq, to.repre_sq, mode='HW', task='path')
nice = edlib.getNiceAlignment(align_res, row.sq, to.repre_sq)
to.add_sequence(row.sq, row['count'], nice, index)
# df.loc[df['sq'] == row.sq, 'alignment'] = to.ident
# cluster unaligned, add to against
unaligned = df_group[df_group['aligned'] == (np.inf, None)].copy()
clusters = cluster_group(unaligned, length)
unaligned['cluster'] = clusters
for cluster, cluster_df in unaligned.groupby(by='cluster'):
alignment = AlignmentProfile(length, cluster_df, global_alignment_ident_no)
alignments[global_alignment_ident_no] = alignment
global_alignment_ident_no += 1
against.append(alignment)
else:
# threshold is less than one, no clustering nor alignment takes place
df_group["aligned"] = [(np.inf, None) for _ in range(len(df_group))]
unaligned = df_group.copy()
unaligned["cluster"] = list(range(len(unaligned)))
# print(f"pseudoclustering elapsed: {time.time() - s}")
| for line in open(filename):
sq, count = line.strip('\n').split(';')
yield sq, np.array([int(x) for x in count.split(',')]), count | identifier_body |
calculate_profiles.py | ,
type=str,
help="Path to the output aligned directory. Required."
)
parser.add_argument("--overview",
default=None,
type=str,
help="Path to the output description csv. Required. Pairs with <--aligned> directory."
)
parser.add_argument("--k",
default=-1,
type=int,
help="Size of the k-mer created by BCALM. Required."
)
parser.add_argument("--input",
default=None,
type=str,
help="Path to the input file."
)
parser.set_defaults(all_sqs_result=False)
args = parser.parse_args([] if "__file__" not in globals() else None)
bases = dict(A=0, C=1, G=2, T=3)
bases['-'] = 4
rev_bases = {v: k for k, v in bases.items()}
global_alignment_ident_no = 0
operations = {
'.' : 0,
'-' : 1,
'|' : 0
}
class AlignmentProfile:
def __init__(self, width, df, identifier):
self.ident = identifier
self.profile = np.zeros((5, width))
self.repre_sq = ""
self.seq_alignments = None # this will be a pandas df
self.seq_align_counter = -1
self.calculate_profile(df)
def calculate_profile(self, df):
self.seq_alignments = pd.DataFrame([(index, *np.zeros(self.profile.shape[1], dtype=np.int8)) for index in df.index])
unwrapped_sq = df['sq'].str.split('', expand=True)
unwrapped_sq = unwrapped_sq.drop(columns=[unwrapped_sq.columns[0], unwrapped_sq.columns[-1]])
counts = np.stack(df['count'].values)
for base in bases:
a = unwrapped_sq != base
newX = np.ma.array(counts, mask=a)
new_counts = newX.sum(axis=0)
self.profile[bases[base], :] += new_counts
# repre_sq
maxs = np.argmax(self.profile, axis=0)
self.repre_sq = "".join([rev_bases[x] for x in maxs])
def add_sequence(self, new_sq, new_counts, nice, sq_index):
offset = re.search(nice['target_aligned'].replace('-', ''), self.repre_sq).start(0)
x = self.profile
# padding with the following number of observed positions (sum of all bases)
# pad profile with insertions
insertions = np.where(np.array(list(nice['target_aligned'])) == '-')[0]
for i, index in enumerate(insertions):
if x.shape[1] >= index:
value = 0
else:
value = x[:, index].sum()
x = np.insert(x, index + offset, [0, 0, 0, 0, value], axis=1)
self.seq_alignments.insert(loc=int(index+offset), column=self.seq_align_counter, value=1)
self.seq_align_counter -= 1
# pad new counts with deletions
aligned_query = np.array(list(nice['query_aligned']))
deletions = np.where(aligned_query == '-')[0]
for i, index in enumerate(deletions):
|
i = offset
for base, count in zip(aligned_query, new_counts):
x[bases[base], i] += count
i += 1
self.profile = x
# store new sequence alignment
added_alignment = -np.ones(self.profile.shape[1])
for i, char in enumerate(nice['target_aligned']):
if char == '-':
added_alignment[offset + i] = 1
else:
added_alignment[offset + i] = 0
self.seq_alignments.loc[-1] = [sq_index, *added_alignment] # adding a row
self.seq_alignments.index = self.seq_alignments.index + 1 # shifting index
# recalculate repre_sq -- the most probable one
maxs = np.argmax(self.profile, axis=0)
self.repre_sq = "".join([rev_bases[x] for x in maxs if rev_bases[x] != '-']) # '-' is removed from the sq
def dst_func(x, y):
return (np.array(x) != np.array(y)).sum()
def read_alignment(filename):
for line in open(filename):
sq, count = line.strip('\n').split(';')
yield sq, np.array([int(x) for x in count.split(',')]), count
def cluster_group(df_group, l, dst=dst_func):
sqs = df_group.reset_index()['sq']
n = len(sqs)
if n <= 1:
return np.zeros(n)
dst_matrix = np.zeros((n, n))
for i in range(n):
for j in range(i):
d = dst(sqs[i], sqs[j])
dst_matrix[i, j] = d
dst_matrix[j, i] = d
model = AgglomerativeClustering(distance_threshold=threshold * l,
n_clusters=None,
linkage='complete',
affinity='precomputed')
clusters = model.fit_predict(dst_matrix)
return clusters
aligned_sqs_file = args.input
k = args.k
misses = args.misses
pools = args.pools
threshold = misses / k
if args.aligned is None:
output_profile_dir = aligned_sqs_file + "_profiles"
else:
output_profile_dir = args.aligned
if args.overview is None:
output_csv_file = aligned_sqs_file + "_overview.csv"
else:
output_csv_file = args.overview
# read
df = pd.DataFrame(read_alignment(aligned_sqs_file))
df.columns = ['sq', 'count', 'str_count']
df['length'] = df['sq'].str.len()
# df['alignment'] = -1 # every aligned sq has an alignment identification
groups = df.groupby(by='length')
unique_lengths = df['length'].sort_values(ascending=False).unique()
against = []
longest = unique_lengths[0]
df_group = groups.get_group(longest).copy()
clusters = cluster_group(df_group, longest)
df_group['cluster'] = clusters
alignments = {
}
for cluster, cluster_df in df_group.groupby(by='cluster'):
alignment = AlignmentProfile(longest, cluster_df, global_alignment_ident_no)
alignments[global_alignment_ident_no] = alignment
global_alignment_ident_no += 1
against.append(alignment)
# df.loc[df['sq'].isin(cluster_df['sq']), 'alignment'] = alignment.ident
# to each sequence
start = time.time()
# print(df.groupby(by='length').get_group(longest))
# print("running on shorter")
with Bar("Processing length groups...", max=len(unique_lengths) - 1) as bar:
for length in unique_lengths[1:]:
bar.next()
df_group = groups.get_group(length).copy()
def getDistanceAndAlignment(sq):
# this is a fallback, it should not happen
maxval = np.floor(threshold * len(sq))
min = np.inf
min_target = None
if maxval < 1:
return min,min_target
for target in against:
align_res = edlib.align(sq, target.repre_sq, mode='HW', task='distance', k=maxval)
if align_res['editDistance'] != -1:
if min > align_res['editDistance']:
if align_res['editDistance'] == 0:
return align_res['editDistance'], target.ident
min = align_res['editDistance']
min_target = target
if min_target is not None:
min_target = min_target.ident
return min, min_target
x = length * threshold
if length * threshold >= 1:
# try align
with Pool(pools) as pool:
result = pool.map(getDistanceAndAlignment, df_group['sq'])
df_group['aligned'] = result
# add aligned to profiles
aligned = df_group[df_group['aligned'] != (np.inf, None)]
for index, row in aligned.iterrows():
to = alignments[row['aligned'][1]]
align_res = edlib.align(row.sq, to.repre_sq, mode='HW', task='path')
nice = edlib.getNiceAlignment(align_res, row.sq, to.repre_sq)
to.add_sequence(row.sq, row['count'], nice, index)
# df.loc[df['sq'] == row.sq, 'alignment'] = to.ident
# cluster unaligned, add to against
unaligned = df_group[df_group['aligned'] == (np.inf, None)].copy()
clusters = cluster_group(unaligned, length)
unaligned['cluster'] = clusters
for cluster, cluster_df in unaligned.groupby(by='cluster'):
alignment = AlignmentProfile(length, cluster_df, global_alignment_ident_no)
alignments[global_alignment_ident_no] = alignment
global_alignment_ident_no += 1
against.append(alignment)
else:
# threshold is less than one, no clustering nor alignment takes place
df_group["aligned"] = [(np.inf, None) for _ in range(len(df_group))]
unaligned = df_group.copy()
unaligned["cluster"] = list(range(len(unaligned)))
# print(f"pseudoclustering elapsed: {time.time() - s | value = new_counts[index]
new_counts = np.insert(new_counts, index, value, axis=0) | conditional_block |
game.py | _K: logic.down}
self.setWindowFlags(
QtCore.Qt.CustomizeWindowHint |
QtCore.Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground, True)
self.center()
self.settings()
self.restoreStates()
self.fontDatabase = QFontDatabase()
self.fontDatabase.addApplicationFont(APP_FOLDER + "/resources/fonts/JackportCollegeNcv-1MZe.ttf")
self.fontDatabase.addApplicationFont(APP_FOLDER + "/resources/fonts/Rosemary-Bold.ttf")
self.lScore.setFont(QFont("JACKPORT COLLEGE NCV", 40))
self.lHiScore.setFont(QFont("JACKPORT COLLEGE NCV", 40))
self.bExit.setFont(QFont("JACKPORT COLLEGE NCV", 32))
self.lMediaSet.setFont(QFont("Rosemary", 38))
swipeCurImg = QPixmap(APP_FOLDER + "/resources/images/swipeCursor.png")
handCurImg = QPixmap(APP_FOLDER + "/resources/images/handCursor.png")
self.cursors = [handCurImg, swipeCurImg]
self.centralwidget.setCursor(QCursor(self.cursors[0], 15, 2))
self.frame.setCursor(QCursor(self.cursors[1], 35, 35))
self.frame.setAttribute(Qt.WA_StyledBackground, (True))
self.animateBackground()
self.musIcon()
self.sndIcon()
self.playMusic()
self.init_grid()
self.matrix = logic.new_game(c.GRID_LEN)
self.history_matrixs = []
self.update_grid_cells()
self.restoreGame()
# Αντιστοίχιση ενεργειών κίνησης ποντικιού και κλικ
self.bHelp.clicked.connect(self.bHelpClicked)
self.bAnim.clicked.connect(self.animStartStop)
self.sndslider.valueChanged.connect(self.sndslvalchanged)
self.musslider.valueChanged.connect(self.musslvalchanged)
self.frame.installEventFilter(self)
# Μέθοδος ενεργοποίησης/απενεργοποίησης του κινούμενου bakcground
def animStartStop(self):
if self.movie != None and self.movie.state() == QMovie.Running:
self.movie.stop()
self.bAnim.setStyleSheet("QPushButton{\n"
"border-image: url(:/resources/images/anim_off.png)\n"
"}")
else:
self.animateBackground()
self.bAnim.setStyleSheet("QPushButton{\n"
"border-image: url(:/resources/images/anim_on.png)\n"
"}")
# Σχεδίαση του βασικού πλαισίου
def init_grid(self):
for i in range(c.GRID_LEN):
for j in range(c.GRID_LEN):
self.gridBoard.addWidget(self.setTile("empty"), i, j)
def generate_next(self):
index = (gen(), gen())
while self.matrix[index[0]][index[1]] != 0:
index = (gen(), gen())
self.matrix[index[0]][index[1]] = 2
def update_grid_cells(self):
for i in range(c.GRID_LEN):
for j in range(c.GRID_LEN):
new_number = self.matrix[i][j]
if new_number == 0:
self.replaceTile("empty", i, j)
else:
self.replaceTile(new_number, i, j)
# Δημιουργία των αριθμητικών πλακιδίων
def setTile(self, item):
tile = QLabel('')
tile.setFocusPolicy(Qt.NoFocus)
tile.setFixedWidth(135)
tile.setFixedHeight(140)
path = c.CELL_IMAGE_DICT.get(item)
tile.setStyleSheet("QLabel{\n"
"border-image: url(:" + str(path) + ")\n"
"}")
tile.setObjectName(str(item))
return tile
# Μέθοδος αντικατάστασης πλακιδίου
def replaceTile(self, newTile, pX, pY):
item = self.gridBoard.itemAtPosition(pX, pY)
self.gridBoard.removeWidget(item.widget())
self.gridBoard.addWidget(self.setTile(newTile), pX, pY)
# Ενεργοποίηση των πλήκτρων χειρισμού
def keyPressEvent(self, event):
key = event.key()
modifiers = QtWidgets.QApplication.keyboardModifiers()
if modifiers == QtCore.Qt.ControlModifier:
if key == QtCore.Qt.Key_Home:
# print("is up!")
if self.frameGeometry().y() > 0:
self.move(self.frameGeometry().x(), self.frameGeometry().y() - self.speed)
elif key == QtCore.Qt.Key_End:
# print("is down!")
if self.frameGeometry().y() > 0:
self.move(self.frameGeometry().x(), self.frameGeometry().y() + self.speed)
elif key == QtCore.Qt.Key_Delete:
# print("is left!")
if self.frameGeometry().x() > 0:
self.move(self.frameGeometry().x() - self.speed, self.frameGeometry().y())
elif key == QtCore.Qt.Key_PageDown:
# print("is right!")
if self.frameGeometry().x() > 0:
self.move(self.frameGeometry().x() + self.speed, self.frameGeometry().y())
if key == c.KEY_BACK or key == c.KEY_BACK_ALT:
if len(self.history_matrixs) > 1:
self.matrix = self.history_matrixs.pop()
self.update_grid_cells()
#print('back on step total step:', len(self.history_matrixs))
# Έλεγχος Αν το παιχνίδι τελείωσε και αν έχει κερδηθεί ή χαθεί
elif key in self.commands:
self.frame.setCursor(Qt.BlankCursor)
self.matrix, done = self.commands[key](self.matrix)
if done:
self.stateOfGame()
def stateOfGame(self):
self.playSound(APP_FOLDER + c.SOUNDS_DICT["move"])
self.matrix = logic.add_two(self.matrix)
# record last move
self.history_matrixs.append(self.matrix)
#print(self.history_matrixs[-1])
self.update_grid_cells()
if logic.game_state(self.matrix) == 'win':
if logic.winNum != 65535:
print("num: " + str(logic.winNum))
for key, value in c.SOUNDS_DICT.items():
if key == str(logic.winNum):
self.playSound(APP_FOLDER + value)
winLooseDlg.WinLooseDialog(self).dialogTypes("WIN")
print("Κερδίσες")
else:
winLooseDlg.WinLooseDialog(self).dialogTypes("ENDGAME")
if logic.game_state(self.matrix) == 'lose':
self.playSound(APP_FOLDER + c.SOUNDS_DICT["lose"])
winLooseDlg.WinLooseDialog(self).dialogTypes("LOOSE")
print("Έχασες")
self.lScore.setText(str(c.SCORE))
if int(self.lHiScore.text()) < int(self.lScore.text()):
self.lHiScore.setText(self.lScore.text())
# Μέθοδος για χειρσμό με κλικ και σύρσιμο του ποντικιού
def eventFilter(self, source, event):
if event.type() == QtCore.QEvent.MouseMove:
self.frame.setCursor(QCursor(self.cursors[1], 35, 35))
if event.buttons() == QtCore.Qt.LeftButton:
if len(self.points) > 2:
startx, starty = self.points[0][0], self.points[0][1]
for i in range(len(self.points)):
self.points[i] = (self.points[i][0] - startx, self.points[i][1] - starty)
self.points.append((event.localPos().x(), event.localPos().y()))
# print(self.points)
if event.type() == QtCore.QEvent.MouseButtonRelease and event.button() == QtCore.Qt.LeftButton:
# print("Released!")
self.mouseDown = False
strokes = moosegesture.getGesture(self.points)
if len(strokes)>0:
strokeText = str(strokes[-1])
# print(strokeText)
if strokeText == "R":
self.matrix, done = logic.right(self.matrix)
if done:
self.stateOfGame()
elif strokeText == "L":
self.matrix, done = logic.left(self.matrix)
if done:
self.stateOfGame()
elif strokeText == "U":
self.matrix, done = logic.up(self.matrix)
| if done:
self.stateOfGame()
elif strokeText == "D":
| conditional_block |
|
game.py | for j in range(c.GRID_LEN):
new_number = self.matrix[i][j]
if new_number == 0:
self.replaceTile("empty", i, j)
else:
self.replaceTile(new_number, i, j)
# Δημιουργία των αριθμητικών πλακιδίων
def setTile(self, item):
tile = QLabel('')
tile.setFocusPolicy(Qt.NoFocus)
tile.setFixedWidth(135)
tile.setFixedHeight(140)
path = c.CELL_IMAGE_DICT.get(item)
tile.setStyleSheet("QLabel{\n"
"border-image: url(:" + str(path) + ")\n"
"}")
tile.setObjectName(str(item))
return tile
# Μέθοδος αντικατάστασης πλακιδίου
def replaceTile(self, newTile, pX, pY):
item = self.gridBoard.itemAtPosition(pX, pY)
self.gridBoard.removeWidget(item.widget())
self.gridBoard.addWidget(self.setTile(newTile), pX, pY)
# Ενεργοποίηση των πλήκτρων χειρισμού
def keyPressEvent(self, event):
key = event.key()
modifiers = QtWidgets.QApplication.keyboardModifiers()
if modifiers == QtCore.Qt.ControlModifier:
if key == QtCore.Qt.Key_Home:
# print("is up!")
if self.frameGeometry().y() > 0:
self.move(self.frameGeometry().x(), self.frameGeometry().y() - self.speed)
elif key == QtCore.Qt.Key_End:
# print("is down!")
if self.frameGeometry().y() > 0:
self.move(self.frameGeometry().x(), self.frameGeometry().y() + self.speed)
elif key == QtCore.Qt.Key_Delete:
# print("is left!")
if self.frameGeometry().x() > 0:
self.move(self.frameGeometry().x() - self.speed, self.frameGeometry().y())
elif key == QtCore.Qt.Key_PageDown:
# print("is right!")
if self.frameGeometry().x() > 0:
self.move(self.frameGeometry().x() + self.speed, self.frameGeometry().y())
if key == c.KEY_BACK or key == c.KEY_BACK_ALT:
if len(self.history_matrixs) > 1:
self.matrix = self.history_matrixs.pop()
self.update_grid_cells()
#print('back on step total step:', len(self.history_matrixs))
# Έλεγχος Αν το παιχνίδι τελείωσε και αν έχει κερδηθεί ή χαθεί
elif key in self.commands:
self.frame.setCursor(Qt.BlankCursor)
self.matrix, done = self.commands[key](self.matrix)
if done:
self.stateOfGame()
def stateOfGame(self):
self.playSound(APP_FOLDER + c.SOUNDS_DICT["move"])
self.matrix = logic.add_two(self.matrix)
# record last move
self.history_matrixs.append(self.matrix)
#print(self.history_matrixs[-1])
self.update_grid_cells()
if logic.game_state(self.matrix) == 'win':
if logic.winNum != 65535:
print("num: " + str(logic.winNum))
for key, value in c.SOUNDS_DICT.items():
if key == str(logic.winNum):
self.playSound(APP_FOLDER + value)
winLooseDlg.WinLooseDialog(self).dialogTypes("WIN")
print("Κερδίσες")
else:
winLooseDlg.WinLooseDialog(self).dialogTypes("ENDGAME")
if logic.game_state(self.matrix) == 'lose':
self.playSound(APP_FOLDER + c.SOUNDS_DICT["lose"])
winLooseDlg.WinLooseDialog(self).dialogTypes("LOOSE")
print("Έχασες")
self.lScore.setText(str(c.SCORE))
if int(self.lHiScore.text()) < int(self.lScore.text()):
self.lHiScore.setText(self.lScore.text())
# Μέθοδος για χειρσμό με κλικ και σύρσιμο του ποντικιού
def eventFilter(self, source, event):
if event.type() == QtCore.QEvent.MouseMove:
self.frame.setCursor(QCursor(self.cursors[1], 35, 35))
if event.buttons() == QtCore.Qt.LeftButton:
if len(self.points) > 2:
startx, starty = self.points[0][0], self.points[0][1]
for i in range(len(self.points)):
self.points[i] = (self.points[i][0] - startx, self.points[i][1] - starty)
self.points.append((event.localPos().x(), event.localPos().y()))
# print(self.points)
if event.type() == QtCore.QEvent.MouseButtonRelease and event.button() == QtCore.Qt.LeftButton:
# print("Released!")
self.mouseDown = False
strokes = moosegesture.getGesture(self.points)
if len(strokes)>0:
strokeText = str(strokes[-1])
# print(strokeText)
if strokeText == "R":
self.matrix, done = logic.right(self.matrix)
if done:
self.stateOfGame()
elif strokeText == "L":
self.matrix, done = logic.left(self.matrix)
if done:
self.stateOfGame()
elif strokeText == "U":
self.matrix, done = logic.up(self.matrix)
if done:
self.stateOfGame()
elif strokeText == "D":
self.matrix, done = logic.down(self.matrix)
if done:
self.stateOfGame()
strokes.clear()
else:
return True
return self.frame.eventFilter(source, event)
# Μέθοδος για την αναπαραγωγή των ήχων
def playSound(self, sound):
pygame.mixer.pre_init(frequency=44100, size=-16, channels=3, buffer=512)
pygame.mixer.init()
effect = pygame.mixer.Sound(sound)
self.sndslider.setMinimum(0)
self.sndslider.setMaximum(100)
vol = self.sndslider.value()
effect.set_volume(vol / 100)
pygame.mixer.find_channel().play(effect)
# Έλεγχος για αλλαγές στο εικονίδιο ήχου
def sndslvalchanged(self):
self.sndIcon()
# Αλλαγή του εικονιδίου ήχου
def sndIcon(self):
if self.sndslider.value() == 0:
self.lSound.setStyleSheet("QLabel{\n"
"border-image: url(:/resources/images/sound_off.png)\n"
"}")
else:
self.lSound.setStyleSheet("QLabel{\n"
"border-image: url(:/resources/images/sound_on.png)\n"
"}")
# Μέθοδος για την αnaπαραγωγή μουσικής
def playMusic(self):
pygame.mixer.init(frequency=44100, size=-16, channels=1, buffer=512)
pygame.mixer.music.load(os.path.join(APP_FOLDER, "resources/sounds/backmusic.ogg"))
self.musslider.setMinimum(0)
self.musslider.setMaximum(100)
musvol = self.musslider.value()
pygame.mixer.music.set_volume(musvol / 100)
pygame.mixer.music.play(loops=-1)
self.musslider.valueChanged.connect(self.musslvalchanged)
def musslvalchanged(self):
pygame.mixer.music.pause()
pygame.mixer.music.set_volume(self.musslider.value() / 100)
pygame.mixer.music.unpause()
self.musIcon()
def musIcon(self):
if self.musslider.value() == 0:
self.lMusic.setStyleSheet("QLabel{\n"
"border-image: url(:/resources/images/music_off.png)\n"
"}")
else:
self.lMusic.setStyleSheet("QLabel{\n"
"border-image: url(:/resources/images/music_on.png)\n"
"}")
def chkGrid(self):
if len(self.history_matrixs) != 0:
lst = self.history_matrixs[ | -1]
else:
lst=self.settings().value("gameState")
nums=[]
for i in range(len(lst)):
for j in range(len(lst[0])):
if lst[i][j]!=0:
nums.append(lst[i][j])
print(nums)
return nums
def bHelpClicked(self):
helpDlg.HelpDialog(self).exec()
pyqtSlot()
def on_bPlay_clicked(self):
self.gridBoard.bl | identifier_body |
|
game.py | def __init__(self, parent=None):
# Αρχικοποίηση του γραφικού περιβάλλοντος
super(Game, self).__init__(parent)
print(APP_FOLDER)
self.setupUi(self)
c.SCORE=self.settings().value("score", 0, type=int)
# Μεταβλητές
self.points = []
self.speed = 30
self.movie = None
self.commands = {c.KEY_UP: logic.up, c.KEY_DOWN: logic.down,
c.KEY_LEFT: logic.left, c.KEY_RIGHT: logic.right,
c.KEY_UP_ALT: logic.up, c.KEY_DOWN_ALT: logic.down,
c.KEY_LEFT_ALT: logic.left, c.KEY_RIGHT_ALT: logic.right,
c.KEY_J: logic.left, c.KEY_L: logic.right,
c.KEY_I: logic.up, c.KEY_K: logic.down}
self.setWindowFlags(
QtCore.Qt.CustomizeWindowHint |
QtCore.Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground, True)
self.center()
self.settings()
self.restoreStates()
self.fontDatabase = QFontDatabase()
self.fontDatabase.addApplicationFont(APP_FOLDER + "/resources/fonts/JackportCollegeNcv-1MZe.ttf")
self.fontDatabase.addApplicationFont(APP_FOLDER + "/resources/fonts/Rosemary-Bold.ttf")
self.lScore.setFont(QFont("JACKPORT COLLEGE NCV", 40))
self.lHiScore.setFont(QFont("JACKPORT COLLEGE NCV", 40))
self.bExit.setFont(QFont("JACKPORT COLLEGE NCV", 32))
self.lMediaSet.setFont(QFont("Rosemary", 38))
swipeCurImg = QPixmap(APP_FOLDER + "/resources/images/swipeCursor.png")
handCurImg = QPixmap(APP_FOLDER + "/resources/images/handCursor.png")
self.cursors = [handCurImg, swipeCurImg]
self.centralwidget.setCursor(QCursor(self.cursors[0], 15, 2))
self.frame.setCursor(QCursor(self.cursors[1], 35, 35))
self.frame.setAttribute(Qt.WA_StyledBackground, (True))
self.animateBackground()
self.musIcon()
self.sndIcon()
self.playMusic()
self.init_grid()
self.matrix = logic.new_game(c.GRID_LEN)
self.history_matrixs = []
self.update_grid_cells()
self.restoreGame()
# Αντιστοίχιση ενεργειών κίνησης ποντικιού και κλικ
self.bHelp.clicked.connect(self.bHelpClicked)
self.bAnim.clicked.connect(self.animStartStop)
self.sndslider.valueChanged.connect(self.sndslvalchanged)
self.musslider.valueChanged.connect(self.musslvalchanged)
self.frame.installEventFilter(self)
# Μέθοδος ενεργοποίησης/απενεργοποίησης του κινούμενου bakcground
def animStartStop(self):
if self.movie != None and self.movie.state() == QMovie.Running:
self.movie.stop()
self.bAnim.setStyleSheet("QPushButton{\n"
"border-image: url(:/resources/images/anim_off.png)\n"
"}")
else:
self.animateBackground()
self.bAnim.setStyleSheet("QPushButton{\n"
"border-image: url(:/resources/images/anim_on.png)\n"
"}")
# Σχεδίαση του βασικού πλαισίου
def init_grid(self):
for i in range(c.GRID_LEN):
for j in range(c.GRID_LEN):
self.gridBoard.addWidget(self.setTile("empty"), i, j)
def generate_next(self):
index = (gen(), gen())
while self.matrix[index[0]][index[1]] != 0:
index = (gen(), gen())
self.matrix[index[0]][index[1]] = 2
def update_grid_cells(self):
for i in range(c.GRID_LEN):
for j in range(c.GRID_LEN):
new_number = self.matrix[i][j]
if new_number == 0:
self.replaceTile("empty", i, j)
else:
self.replaceTile(new_number, i, j)
# Δημιουργία των αριθμητικών πλακιδίων
def setTile(self, item):
tile = QLabel('')
tile.setFocusPolicy(Qt.NoFocus)
tile.setFixedWidth(135)
tile.setFixedHeight(140)
path = c.CELL_IMAGE_DICT.get(item)
tile.setStyleSheet("QLabel{\n"
"border-image: url(:" + str(path) + ")\n"
"}")
tile.setObjectName(str(item))
return tile
# Μέθοδος αντικατάστασης πλακιδίου
def replaceTile(self, newTile, pX, pY):
item = self.gridBoard.itemAtPosition(pX, pY)
self.gridBoard.removeWidget(item.widget())
self.gridBoard.addWidget(self.setTile(newTile), pX, pY)
# Ενεργοποίηση των πλήκτρων χειρισμού
def keyPressEvent(self, event):
key = event.key()
modifiers = QtWidgets.QApplication.keyboardModifiers()
if modifiers == QtCore.Qt.ControlModifier:
if key == QtCore.Qt.Key_Home:
# print("is up!")
if self.frameGeometry().y() > 0:
self.move(self.frameGeometry().x(), self.frameGeometry().y() - self.speed)
elif key == QtCore.Qt.Key_End:
# print("is down!")
if self.frameGeometry().y() > 0:
self.move(self.frameGeometry().x(), self.frameGeometry().y() + self.speed)
elif key == QtCore.Qt.Key_Delete:
# print("is left!")
if self.frameGeometry().x() > 0:
self.move(self.frameGeometry().x() - self.speed, self.frameGeometry().y())
elif key == QtCore.Qt.Key_PageDown:
# print("is right!")
if self.frameGeometry().x() > 0:
self.move(self.frameGeometry().x() + self.speed, self.frameGeometry().y())
if key == c.KEY_BACK or key == c.KEY_BACK_ALT:
if len(self.history_matrixs) > 1:
self.matrix = self.history_matrixs.pop()
self.update_grid_cells()
#print('back on step total step:', len(self.history_matrixs))
# Έλεγχος Αν το παιχνίδι τελείωσε και αν έχει κερδηθεί ή χαθεί
elif key in self.commands:
self.frame.setCursor(Qt.BlankCursor)
self.matrix, done = self.commands[key](self.matrix)
if done:
self.stateOfGame()
def stateOfGame(self):
self.playSound(APP_FOLDER + c.SOUNDS_DICT["move"])
self.matrix = logic.add_two(self.matrix)
# record last move
self.history_matrixs.append(self.matrix)
#print(self.history_matrixs[-1])
self.update_grid_cells()
if logic.game_state(self.matrix) == 'win':
if logic.winNum != 65535:
print("num: " + str(logic.winNum))
for key, value in c.SOUNDS_DICT.items():
if key == str(logic.winNum):
self.playSound(APP_FOLDER + value)
winLooseDlg.WinLooseDialog(self).dialogTypes("WIN")
print("Κερδίσες")
else:
winLooseDlg.WinLooseDialog(self).dialogTypes("ENDGAME")
if logic.game_state(self.matrix) == 'lose':
self.playSound(APP_FOLDER + c.SOUNDS_DICT["lose"])
winLooseDlg.WinLooseDialog(self).dialogTypes("LOOSE")
print("Έχασες")
self.lScore.setText(str(c.SCORE))
if int(self.lHiScore.text()) < int(self.lScore.text()):
self.lHiScore.setText(self.lScore.text())
# Μέθοδος για χειρσμό με κλικ και σύρσιμο του ποντικιού
def eventFilter(self, source, event):
if event.type() == QtCore.QEvent.MouseMove:
self.frame.setCursor(QCursor(self.cursors[1], 35, 35))
if event.buttons() == QtCore.Qt.LeftButton:
if len(self.points) > 2:
startx, starty = self.points[0][0], self.points[0][ |
def settings(self):
settings = QSettings()
return settings
| random_line_split |
|
game.py | RID_LEN):
for j in range(c.GRID_LEN):
self.gridBoard.addWidget(self.setTile("empty"), i, j)
def generate_next(self):
index = (gen(), gen())
while self.matrix[index[0]][index[1]] != 0:
index = (gen(), gen())
self.matrix[index[0]][index[1]] = 2
def update_grid_cells(self):
for i in range(c.GRID_LEN):
for j in range(c.GRID_LEN):
new_number = self.matrix[i][j]
if new_number == 0:
self.replaceTile("empty", i, j)
else:
self.replaceTile(new_number, i, j)
# Δημιουργία των αριθμητικών πλακιδίων
def setTile(self, item):
tile = QLabel('')
tile.setFocusPolicy(Qt.NoFocus)
tile.setFixedWidth(135)
tile.setFixedHeight(140)
path = c.CELL_IMAGE_DICT.get(item)
tile.setStyleSheet("QLabel{\n"
"border-image: url(:" + str(path) + ")\n"
"}")
tile.setObjectName(str(item))
return tile
# Μέθοδος αντικατάστασης πλακιδίου
def replaceTile(self, newTile, pX, pY):
item = self.gridBoard.itemAtPosition(pX, pY)
self.gridBoard.removeWidget(item.widget())
self.gridBoard.addWidget(self.setTile(newTile), pX, pY)
# Ενεργοποίηση των πλήκτρων χειρισμού
def keyPressEvent(self, event):
key = event.key()
modifiers = QtWidgets.QApplication.keyboardModifiers()
if modifiers == QtCore.Qt.ControlModifier:
if key == QtCore.Qt.Key_Home:
# print("is up!")
if self.frameGeometry().y() > 0:
self.move(self.frameGeometry().x(), self.frameGeometry().y() - self.speed)
elif key == QtCore.Qt.Key_End:
# print("is down!")
if self.frameGeometry().y() > 0:
self.move(self.frameGeometry().x(), self.frameGeometry().y() + self.speed)
elif key == QtCore.Qt.Key_Delete:
# print("is left!")
if self.frameGeometry().x() > 0:
self.move(self.frameGeometry().x() - self.speed, self.frameGeometry().y())
elif key == QtCore.Qt.Key_PageDown:
# print("is right!")
if self.frameGeometry().x() > 0:
self.move(self.frameGeometry().x() + self.speed, self.frameGeometry().y())
if key == c.KEY_BACK or key == c.KEY_BACK_ALT:
if len(self.history_matrixs) > 1:
self.matrix = self.history_matrixs.pop()
self.update_grid_cells()
#print('back on step total step:', len(self.history_matrixs))
# Έλεγχος Αν το παιχνίδι τελείωσε και αν έχει κερδηθεί ή χαθεί
elif key in self.commands:
self.frame.setCursor(Qt.BlankCursor)
self.matrix, done = self.commands[key](self.matrix)
if done:
self.stateOfGame()
def stateOfGame(self):
self.playSound(APP_FOLDER + c.SOUNDS_DICT["move"])
self.matrix = logic.add_two(self.matrix)
# record last move
self.history_matrixs.append(self.matrix)
#print(self.history_matrixs[-1])
self.update_grid_cells()
if logic.game_state(self.matrix) == 'win':
if logic.winNum != 65535:
print("num: " + str(logic.winNum))
for key, value in c.SOUNDS_DICT.items():
if key == str(logic.winNum):
self.playSound(APP_FOLDER + value)
winLooseDlg.WinLooseDialog(self).dialogTypes("WIN")
print("Κερδίσες")
else:
winLooseDlg.WinLooseDialog(self).dialogTypes("ENDGAME")
if logic.game_state(self.matrix) == 'lose':
self.playSound(APP_FOLDER + c.SOUNDS_DICT["lose"])
winLooseDlg.WinLooseDialog(self).dialogTypes("LOOSE")
print("Έχασες")
self.lScore.setText(str(c.SCORE))
if int(self.lHiScore.text()) < int(self.lScore.text()):
self.lHiScore.setText(self.lScore.text())
# Μέθοδος για χειρσμό με κλικ και σύρσιμο του ποντικιού
def eventFilter(self, source, event):
if event.type() == QtCore.QEvent.MouseMove:
self.frame.setCursor(QCursor(self.cursors[1], 35, 35))
if event.buttons() == QtCore.Qt.LeftButton:
if len(self.points) > 2:
startx, starty = self.points[0][0], self.points[0][1]
for i in range(len(self.points)):
self.points[i] = (self.points[i][0] - startx, self.points[i][1] - starty)
self.points.append((event.localPos().x(), event.localPos().y()))
# print(self.points)
if event.type() == QtCore.QEvent.MouseButtonRelease and event.button() == QtCore.Qt.LeftButton:
# print("Released!")
self.mouseDown = False
strokes = moosegesture.getGesture(self.points)
if len(strokes)>0:
strokeText = str(strokes[-1])
# print(strokeText)
if strokeText == "R":
self.matrix, done = logic.right(self.matrix)
if done:
self.stateOfGame()
elif strokeText == "L":
self.matrix, done = logic.left(self.matrix)
if done:
self.stateOfGame()
elif strokeText == "U":
self.matrix, done = logic.up(self.matrix)
if done:
self.stateOfGame()
elif strokeText == "D":
self.matrix, done = logic.down(self.matrix)
if done:
self.stateOfGame()
strokes.clear()
else:
return True
return self.frame.eventFilter(source, event)
# Μέθοδος για την αναπαραγωγή των ήχων
def playSound(self, sound):
pygame.mixer.pre_init(frequency=44100, size=-16, channels=3, buffer=512)
pygame.mixer.init()
effect = pygame.mixer.Sound(sound)
self.sndslider.setMinimum(0)
self.sndslider.setMaximum(100)
vol = self.sndslider.value()
effect.set_volume(vol / 100)
pygame.mixer.find_channel().play(effect)
# Έλεγχος για αλλαγές στο εικονίδιο ήχου
def sndslvalchanged(self):
self.sndIcon()
# Αλλαγή του εικονιδίου ήχου
def sndIcon(self):
if self.sndslider.value() == 0:
self.lSound.setStyleSheet("QLabel{\n"
"border-image: url(:/resources/images/sound_off.png)\n"
"}")
else:
self.lSound.setStyleSheet("QLabel{\n"
"border-image: url(:/resources/images/sound_on.png)\n"
"}")
# Μέθοδος για την αnaπαραγωγή μουσικής
def playMusic(self):
pygame.mixer.init(frequency=44100, size=-16, channels=1, buffer=512)
pygame.mixer.music.load(os.path.join(APP_FOLDER, "resources/sounds/backmusic.ogg"))
self.musslider.setMinimum(0)
self.musslider.setMaximum(100)
musvol = self.musslider.value()
pygame.mixer.music.set_volume(musvol / 100)
pygame.mixer.music.play(loops=-1)
self.musslider.valueChanged.connect(self.musslvalchanged)
def musslvalchanged(self):
pygame.mixer.music.pause()
pygame.mixer.music.set_volume(self.musslider.value() / 100)
pygame.mixer.music.unpause()
self.musIcon()
def musIcon(self):
if self.musslider.value() == 0:
self.lMusic.setStyleSheet("QLabel{\n"
"border-image: url(:/resources/images/music_off.png)\n"
"}")
else:
self.lMusic.setStyleSheet("QLabel{\n"
"border-image: url(:/resources/images/music_on.png)\n"
"}")
def chkGrid(self):
if len(self.history_matrixs) != 0:
lst | = self. | identifier_name |
|
key.go | Now := time.Now().UTC().Truncate(time.Second)
if tokenBuildRequest.UtcNotBefore == nil {
tokenBuildRequest.UtcNotBefore = &utcNow
}
tokenBuildRequest.Claims.Issued = jwt.NewNumericTime(utcNow)
tokenBuildRequest.Claims.NotBefore = jwt.NewNumericTime(*tokenBuildRequest.UtcNotBefore)
if tokenBuildRequest.UtcExpires == nil {
tokenBuildRequest.Claims.Expires = nil
} else {
tokenBuildRequest.Claims.Expires = jwt.NewNumericTime(*tokenBuildRequest.UtcExpires)
}
tokenBuildRequest.Claims.KeyID = cachedItem.CurrentVersionId
tokenBuildRequest.Claims.Issuer = baseUrl
if tokenBuildRequest.Claims.Audiences == nil {
tokenBuildRequest.Claims.Audiences = []string{}
}
tokenBuildRequest.Claims.Audiences = append(tokenBuildRequest.Claims.Audiences, tokenBuildRequest.Claims.Issuer)
keyClient, err := GetKeyClient()
if err != nil {
return
}
keyVaultUrl := viper.GetString("keyVault.KeyVaultUrl") //"https://P7KeyValut.vault.azure.net/"
keyIdentifier := viper.GetString("keyVault.KeyIdentifier") //"P7IdentityServer4SelfSigned"
ctx := context.Background()
byteToken, err := keyClient.Sign2(ctx, &tokenBuildRequest.Claims, azKeyvault.RS256, keyVaultUrl, keyIdentifier, tokenBuildRequest.Claims.KeyID)
if err != nil {
return
}
token = string(byteToken)
return
}
func RSA256AzureSign(ctx context.Context, data []byte) (kid *string, signature *string, err error) {
keyClient := getKeysClient()
sEnc := util.ByteArraySha256Encode64(data)
keyOperationResult, err := keyClient.Sign(ctx, "https://P7KeyValut.vault.azure.net/", "P7IdentityServer4SelfSigned", "", keyvault.KeySignParameters{
Algorithm: azKeyvault.RS256,
Value: &sEnc,
})
if err != nil {
return
}
return keyOperationResult.Kid, keyOperationResult.Result, nil
}
func (keyClient *BaseClient2) Sign2(
ctx context.Context,
claims *jwt.Claims,
alg azKeyvault.JSONWebKeySignatureAlgorithm,
vaultBaseURL string,
keyName string,
keyVersion string) (token []byte, err error) {
tokenWithoutSignature, err := claims.FormatWithoutSign(string(alg))
if err != nil {
return nil, err
}
sEnc := util.ByteArraySha256Encode64(tokenWithoutSignature)
keyOperationResult, err := keyClient.Sign(ctx, vaultBaseURL, keyName, keyVersion, azKeyvault.KeySignParameters{
Algorithm: alg,
Value: &sEnc,
})
if err != nil {
return
}
token = append(tokenWithoutSignature, '.')
token = append(token, []byte(*keyOperationResult.Result)...)
return token, nil
}
func (keyClient *BaseClient2) GetActiveKeysVersion2(ctx context.Context, keyVaultUrl string, keyIdentifier string) (finalResult []azKeyvault.KeyBundle, currentKeyBundle azKeyvault.KeyBundle, err error) {
keyId := "123"
claims := jwt.Claims{
Registered: jwt.Registered{
Subject: "kkazanova",
Audiences: []string{"KGB", "RU"},
},
Set: map[string]interface{}{
"iss": nil,
"sub": "karcher",
"aud": "ISIS",
},
KeyID: keyId,
}
token, _ := keyClient.Sign2(ctx, &claims, azKeyvault.RS256, keyVaultUrl, keyIdentifier, keyId)
sToken := string(token)
fmt.Println(sToken)
// Length requirements defined by 2.2.2.9.1 RSA Private Key BLOB (https://msdn.microsoft.com/en-us/library/cc250013.aspx).
/*
PubExp (4 bytes): Length MUST be 4 bytes.
This field MUST be present as an unsigned integer in little-endian format.
The value of this field MUST be the RSA public key exponent for this key. The client SHOULD set this value to 65,537.
E is comming back as an Base64Url encoded byte[] of size 3.
*/
var maxResults int32 = 10
pageResult, err := keyClient.GetKeyVersions(ctx,
keyVaultUrl,
keyIdentifier,
&maxResults)
if err != nil {
return
}
utcNow := time.Now().UTC()
for {
for _, element := range pageResult.Values() {
// element is the element from someSlice for where we are
if *element.Attributes.Enabled {
var keyExpire time.Time
keyExpire = time.Time(*element.Attributes.Expires)
if keyExpire.After(utcNow) {
parts := strings.Split(*element.Kid, "/")
lastItemVersion := parts[len(parts)-1]
keyBundle, er := keyClient.GetKey(ctx,
keyVaultUrl,
keyIdentifier,
lastItemVersion)
if er != nil {
err = er
return
}
fixedE := fixE(*keyBundle.Key.E)
*keyBundle.Key.E = fixedE
finalResult = append(finalResult, keyBundle)
}
}
}
if !pageResult.NotDone() {
break
}
err = pageResult.Next()
if err != nil {
return
}
}
sort.Slice(finalResult[:], func(i, j int) bool {
notBeforeA := time.Time(*finalResult[i].Attributes.NotBefore)
notBeforeB := time.Time(*finalResult[j].Attributes.NotBefore)
return notBeforeA.After(notBeforeB)
})
for _, element := range finalResult {
notVBefore := time.Time(*element.Attributes.NotBefore)
if notVBefore.Before(utcNow) {
currentKeyBundle = element
break
}
}
return
}
func GetKeyClient() (keyClient BaseClient2, err error) {
baseClient := getKeysClient()
keyClient = newBaseClient2(baseClient)
err = nil
return
}
func GetActiveKeysVersion(ctx context.Context) (finalResult []azKeyvault.KeyBundle, currentKeyBundle azKeyvault.KeyBundle, err error) {
keyVaultUrl := viper.GetString("keyVault.KeyVaultUrl") //"https://P7KeyValut.vault.azure.net/"
keyIdentifier := viper.GetString("keyVault.KeyIdentifier") //"P7IdentityServer4SelfSigned"
//keyClient := getKeysClient()
baseClient2, err := GetKeyClient()
if err != nil {
return
}
finalResult, currentKeyBundle, err = baseClient2.GetActiveKeysVersion2(ctx, keyVaultUrl, keyIdentifier)
return
}
// CreateKeyBundle creates a key in the specified keyvault
func CreateKey(ctx context.Context, vaultName, keyName string) (key azKeyvault.KeyBundle, err error) {
vaultsClient := getVaultsClient()
vault, err := vaultsClient.Get(ctx, config.BaseGroupName(), vaultName)
if err != nil {
return
}
vaultURL := *vault.Properties.VaultURI
keyClient := getKeysClient()
return keyClient.CreateKey(
ctx,
vaultURL,
keyName,
azKeyvault.KeyCreateParameters{
KeyAttributes: &azKeyvault.KeyAttributes{
Enabled: to.BoolPtr(true),
},
KeySize: to.Int32Ptr(2048), // As of writing this sample, 2048 is the only supported KeySize.
KeyOps: &[]azKeyvault.JSONWebKeyOperation{
azKeyvault.Encrypt,
azKeyvault.Decrypt,
},
Kty: azKeyvault.EC,
})
}
func fixE(base64EncodedE string) string {
sDec, _ := b64.StdEncoding.DecodeString(base64EncodedE)
sDec = forceByteArrayLength(sDec, 4)
sEnc := b64.StdEncoding.EncodeToString(sDec)
parts := strings.Split(sEnc, "=")
sEnc = parts[0]
return sEnc
}
func forceByteArrayLength(slice []byte, requireLength int) []byte {
n := len(slice)
if n >= requireLength {
return slice
}
newSlice := make([]byte, requireLength)
offset := requireLength - n
copy(newSlice[offset:], slice)
slice = newSlice
return slice
}
type CachedKeyVersions struct {
CurrentKeyBundle azKeyvault.KeyBundle
CurrentVersionId string
WellKnownOpenidConfigurationJwksResponse renderings.WellKnownOpenidConfigurationJwksResponse
}
func GetCachedKeyVersions() (cachedResponse CachedKeyVersions, err error) | {
var cachedItem interface{}
var found bool
cachedItem, found = cache.Get(cacheKey)
if !found {
err = DoKeyvaultBackground()
if err != nil {
log.Fatalf("failed to DoKeyvaultBackground: %v\n", err.Error())
return
}
cachedItem, found = cache.Get(cacheKey)
if !found {
err = errors.New("critical failure to DoKeyvaultBackground")
log.Fatalln(err.Error())
return
}
} | identifier_body |
|
key.go |
Claims jwt.Claims
}
type BaseClient2 struct {
azKeyvault.BaseClient
}
func newBaseClient2(base azKeyvault.BaseClient) BaseClient2 {
return BaseClient2{
BaseClient: base,
}
}
func getKeysClient() azKeyvault.BaseClient {
keyClient := azKeyvault.New()
a, _ := iam.GetKeyvaultAuthorizer()
keyClient.Authorizer = a
keyClient.AddToUserAgent(config.UserAgent())
return keyClient
}
func GetSecret(name string) (result keyvault.SecretBundle, err error) {
ctx := context.Background()
keyClient := getKeysClient()
return keyClient.GetSecret(ctx, "https://P7KeyValut.vault.azure.net/", name, "")
}
func GetKeysVersion(ctx context.Context) (result azKeyvault.KeyListResultPage, err error) {
keyClient := getKeysClient()
var maxResults int32 = 10
result, err = keyClient.GetKeyVersions(ctx, "https://P7KeyValut.vault.azure.net/", "P7IdentityServer4SelfSigned", &maxResults)
return
}
func MintToken(c echo.Context, tokenBuildRequest *TokenBuildRequest) (token string, err error) {
cachedItem, err := GetCachedKeyVersions()
if err != nil {
return
}
baseUrl := util.GetBaseUrl(c)
utcNow := time.Now().UTC().Truncate(time.Second)
if tokenBuildRequest.UtcNotBefore == nil {
tokenBuildRequest.UtcNotBefore = &utcNow
}
tokenBuildRequest.Claims.Issued = jwt.NewNumericTime(utcNow)
tokenBuildRequest.Claims.NotBefore = jwt.NewNumericTime(*tokenBuildRequest.UtcNotBefore)
if tokenBuildRequest.UtcExpires == nil {
tokenBuildRequest.Claims.Expires = nil
} else {
tokenBuildRequest.Claims.Expires = jwt.NewNumericTime(*tokenBuildRequest.UtcExpires)
}
tokenBuildRequest.Claims.KeyID = cachedItem.CurrentVersionId
tokenBuildRequest.Claims.Issuer = baseUrl
if tokenBuildRequest.Claims.Audiences == nil {
tokenBuildRequest.Claims.Audiences = []string{}
}
tokenBuildRequest.Claims.Audiences = append(tokenBuildRequest.Claims.Audiences, tokenBuildRequest.Claims.Issuer)
keyClient, err := GetKeyClient()
if err != nil {
return
}
keyVaultUrl := viper.GetString("keyVault.KeyVaultUrl") //"https://P7KeyValut.vault.azure.net/"
keyIdentifier := viper.GetString("keyVault.KeyIdentifier") //"P7IdentityServer4SelfSigned"
ctx := context.Background()
byteToken, err := keyClient.Sign2(ctx, &tokenBuildRequest.Claims, azKeyvault.RS256, keyVaultUrl, keyIdentifier, tokenBuildRequest.Claims.KeyID)
if err != nil {
return
}
token = string(byteToken)
return
}
func RSA256AzureSign(ctx context.Context, data []byte) (kid *string, signature *string, err error) {
keyClient := getKeysClient()
sEnc := util.ByteArraySha256Encode64(data)
keyOperationResult, err := keyClient.Sign(ctx, "https://P7KeyValut.vault.azure.net/", "P7IdentityServer4SelfSigned", "", keyvault.KeySignParameters{
Algorithm: azKeyvault.RS256,
Value: &sEnc,
})
if err != nil {
return
}
return keyOperationResult.Kid, keyOperationResult.Result, nil
}
func (keyClient *BaseClient2) Sign2(
ctx context.Context,
claims *jwt.Claims,
alg azKeyvault.JSONWebKeySignatureAlgorithm,
vaultBaseURL string,
keyName string,
keyVersion string) (token []byte, err error) {
tokenWithoutSignature, err := claims.FormatWithoutSign(string(alg))
if err != nil {
return nil, err
}
sEnc := util.ByteArraySha256Encode64(tokenWithoutSignature)
keyOperationResult, err := keyClient.Sign(ctx, vaultBaseURL, keyName, keyVersion, azKeyvault.KeySignParameters{
Algorithm: alg,
Value: &sEnc,
})
if err != nil {
return
}
token = append(tokenWithoutSignature, '.')
token = append(token, []byte(*keyOperationResult.Result)...)
return token, nil
}
func (keyClient *BaseClient2) GetActiveKeysVersion2(ctx context.Context, keyVaultUrl string, keyIdentifier string) (finalResult []azKeyvault.KeyBundle, currentKeyBundle azKeyvault.KeyBundle, err error) {
keyId := "123"
claims := jwt.Claims{
Registered: jwt.Registered{
Subject: "kkazanova",
Audiences: []string{"KGB", "RU"},
},
Set: map[string]interface{}{
"iss": nil,
"sub": "karcher",
"aud": "ISIS",
},
KeyID: keyId,
}
token, _ := keyClient.Sign2(ctx, &claims, azKeyvault.RS256, keyVaultUrl, keyIdentifier, keyId)
sToken := string(token)
fmt.Println(sToken)
// Length requirements defined by 2.2.2.9.1 RSA Private Key BLOB (https://msdn.microsoft.com/en-us/library/cc250013.aspx).
/*
PubExp (4 bytes): Length MUST be 4 bytes.
This field MUST be present as an unsigned integer in little-endian format.
The value of this field MUST be the RSA public key exponent for this key. The client SHOULD set this value to 65,537.
E is comming back as an Base64Url encoded byte[] of size 3.
*/
var maxResults int32 = 10
pageResult, err := keyClient.GetKeyVersions(ctx,
keyVaultUrl,
keyIdentifier,
&maxResults)
if err != nil {
return
}
utcNow := time.Now().UTC()
for {
for _, element := range pageResult.Values() {
// element is the element from someSlice for where we are
if *element.Attributes.Enabled {
var keyExpire time.Time
keyExpire = time.Time(*element.Attributes.Expires)
if keyExpire.After(utcNow) {
parts := strings.Split(*element.Kid, "/")
lastItemVersion := parts[len(parts)-1]
keyBundle, er := keyClient.GetKey(ctx,
keyVaultUrl,
keyIdentifier,
lastItemVersion)
if er != nil {
err = er
return
}
fixedE := fixE(*keyBundle.Key.E)
*keyBundle.Key.E = fixedE
finalResult = append(finalResult, keyBundle)
}
}
}
if !pageResult.NotDone() {
break
}
err = pageResult.Next()
if err != nil {
return
}
}
sort.Slice(finalResult[:], func(i, j int) bool {
notBeforeA := time.Time(*finalResult[i].Attributes.NotBefore)
notBeforeB := time.Time(*finalResult[j].Attributes.NotBefore)
return notBeforeA.After(notBeforeB)
})
for _, element := range finalResult {
notVBefore := time.Time(*element.Attributes.NotBefore)
if notVBefore.Before(utcNow) {
currentKeyBundle = element
break
}
}
return
}
func GetKeyClient() (keyClient BaseClient2, err error) {
baseClient := getKeysClient()
keyClient = newBaseClient2(baseClient)
err = nil
return
}
func GetActiveKeysVersion(ctx context.Context) (finalResult []azKeyvault.KeyBundle, currentKeyBundle azKeyvault.KeyBundle, err error) {
keyVaultUrl := viper.GetString("keyVault.KeyVaultUrl") //"https://P7KeyValut.vault.azure.net/"
keyIdentifier := viper.GetString("keyVault.KeyIdentifier") //"P7IdentityServer4SelfSigned"
//keyClient := getKeysClient()
baseClient2, err := GetKeyClient()
if err != nil {
return
}
finalResult, currentKeyBundle, err = baseClient2.GetActiveKeysVersion2(ctx, keyVaultUrl, keyIdentifier)
return
}
// CreateKeyBundle creates a key in the specified keyvault
func CreateKey(ctx context.Context, vaultName, keyName string) (key azKeyvault.KeyBundle, err error) {
vaultsClient := getVaultsClient()
vault, err := vaultsClient.Get(ctx, config.BaseGroupName(), vaultName)
if err != nil {
return
}
vaultURL := *vault.Properties.VaultURI
keyClient := getKeysClient()
return keyClient.CreateKey(
ctx,
vaultURL,
keyName,
azKeyvault.KeyCreateParameters{
KeyAttributes: &azKeyvault.KeyAttributes{
Enabled: to.BoolPtr(true),
},
KeySize: to.Int32Ptr(2048), // As of writing this sample, 2048 is the only supported KeySize.
KeyOps: &[]azKeyvault.JSONWebKeyOperation{
azKeyvault.Encrypt,
azKeyvault.Decrypt,
},
Kty: azKeyvault.EC,
})
}
func | (base64EncodedE string) string {
sDec, _ := b64.StdEncoding.DecodeString(base64Encoded | fixE | identifier_name |
key.go |
Claims jwt.Claims
}
type BaseClient2 struct {
azKeyvault.BaseClient
}
func newBaseClient2(base azKeyvault.BaseClient) BaseClient2 {
return BaseClient2{
BaseClient: base,
}
}
func getKeysClient() azKeyvault.BaseClient {
keyClient := azKeyvault.New()
a, _ := iam.GetKeyvaultAuthorizer()
keyClient.Authorizer = a
keyClient.AddToUserAgent(config.UserAgent())
return keyClient
}
func GetSecret(name string) (result keyvault.SecretBundle, err error) {
ctx := context.Background()
keyClient := getKeysClient()
return keyClient.GetSecret(ctx, "https://P7KeyValut.vault.azure.net/", name, "")
}
func GetKeysVersion(ctx context.Context) (result azKeyvault.KeyListResultPage, err error) {
keyClient := getKeysClient()
var maxResults int32 = 10
result, err = keyClient.GetKeyVersions(ctx, "https://P7KeyValut.vault.azure.net/", "P7IdentityServer4SelfSigned", &maxResults)
return
}
func MintToken(c echo.Context, tokenBuildRequest *TokenBuildRequest) (token string, err error) {
cachedItem, err := GetCachedKeyVersions()
if err != nil {
return
}
baseUrl := util.GetBaseUrl(c)
utcNow := time.Now().UTC().Truncate(time.Second)
if tokenBuildRequest.UtcNotBefore == nil {
tokenBuildRequest.UtcNotBefore = &utcNow
}
tokenBuildRequest.Claims.Issued = jwt.NewNumericTime(utcNow)
tokenBuildRequest.Claims.NotBefore = jwt.NewNumericTime(*tokenBuildRequest.UtcNotBefore)
if tokenBuildRequest.UtcExpires == nil {
tokenBuildRequest.Claims.Expires = nil
} else {
tokenBuildRequest.Claims.Expires = jwt.NewNumericTime(*tokenBuildRequest.UtcExpires)
}
tokenBuildRequest.Claims.KeyID = cachedItem.CurrentVersionId
tokenBuildRequest.Claims.Issuer = baseUrl
if tokenBuildRequest.Claims.Audiences == nil {
tokenBuildRequest.Claims.Audiences = []string{}
}
tokenBuildRequest.Claims.Audiences = append(tokenBuildRequest.Claims.Audiences, tokenBuildRequest.Claims.Issuer)
keyClient, err := GetKeyClient()
if err != nil {
return
}
keyVaultUrl := viper.GetString("keyVault.KeyVaultUrl") //"https://P7KeyValut.vault.azure.net/"
keyIdentifier := viper.GetString("keyVault.KeyIdentifier") //"P7IdentityServer4SelfSigned"
ctx := context.Background()
byteToken, err := keyClient.Sign2(ctx, &tokenBuildRequest.Claims, azKeyvault.RS256, keyVaultUrl, keyIdentifier, tokenBuildRequest.Claims.KeyID)
if err != nil {
return
}
token = string(byteToken)
return
}
func RSA256AzureSign(ctx context.Context, data []byte) (kid *string, signature *string, err error) {
keyClient := getKeysClient()
sEnc := util.ByteArraySha256Encode64(data)
keyOperationResult, err := keyClient.Sign(ctx, "https://P7KeyValut.vault.azure.net/", "P7IdentityServer4SelfSigned", "", keyvault.KeySignParameters{
Algorithm: azKeyvault.RS256,
Value: &sEnc,
})
if err != nil {
return
}
return keyOperationResult.Kid, keyOperationResult.Result, nil
}
func (keyClient *BaseClient2) Sign2(
ctx context.Context,
claims *jwt.Claims,
alg azKeyvault.JSONWebKeySignatureAlgorithm,
vaultBaseURL string,
keyName string,
keyVersion string) (token []byte, err error) {
tokenWithoutSignature, err := claims.FormatWithoutSign(string(alg))
if err != nil {
return nil, err
}
sEnc := util.ByteArraySha256Encode64(tokenWithoutSignature)
keyOperationResult, err := keyClient.Sign(ctx, vaultBaseURL, keyName, keyVersion, azKeyvault.KeySignParameters{
Algorithm: alg,
Value: &sEnc,
})
if err != nil {
return
}
token = append(tokenWithoutSignature, '.')
token = append(token, []byte(*keyOperationResult.Result)...)
return token, nil
}
func (keyClient *BaseClient2) GetActiveKeysVersion2(ctx context.Context, keyVaultUrl string, keyIdentifier string) (finalResult []azKeyvault.KeyBundle, currentKeyBundle azKeyvault.KeyBundle, err error) {
keyId := "123"
claims := jwt.Claims{
Registered: jwt.Registered{
Subject: "kkazanova",
Audiences: []string{"KGB", "RU"},
},
Set: map[string]interface{}{
"iss": nil,
"sub": "karcher",
"aud": "ISIS",
},
KeyID: keyId,
}
token, _ := keyClient.Sign2(ctx, &claims, azKeyvault.RS256, keyVaultUrl, keyIdentifier, keyId)
sToken := string(token)
fmt.Println(sToken)
// Length requirements defined by 2.2.2.9.1 RSA Private Key BLOB (https://msdn.microsoft.com/en-us/library/cc250013.aspx).
/*
PubExp (4 bytes): Length MUST be 4 bytes.
This field MUST be present as an unsigned integer in little-endian format.
The value of this field MUST be the RSA public key exponent for this key. The client SHOULD set this value to 65,537.
E is comming back as an Base64Url encoded byte[] of size 3.
*/
var maxResults int32 = 10
pageResult, err := keyClient.GetKeyVersions(ctx,
keyVaultUrl,
keyIdentifier,
&maxResults)
if err != nil {
return
}
utcNow := time.Now().UTC()
for {
for _, element := range pageResult.Values() {
// element is the element from someSlice for where we are
if *element.Attributes.Enabled {
var keyExpire time.Time
keyExpire = time.Time(*element.Attributes.Expires)
if keyExpire.After(utcNow) {
parts := strings.Split(*element.Kid, "/")
lastItemVersion := parts[len(parts)-1]
keyBundle, er := keyClient.GetKey(ctx,
keyVaultUrl,
keyIdentifier,
lastItemVersion)
if er != nil {
err = er
return
}
fixedE := fixE(*keyBundle.Key.E)
*keyBundle.Key.E = fixedE
finalResult = append(finalResult, keyBundle)
}
}
}
if !pageResult.NotDone() {
break
}
err = pageResult.Next()
if err != nil {
return
}
}
sort.Slice(finalResult[:], func(i, j int) bool {
notBeforeA := time.Time(*finalResult[i].Attributes.NotBefore)
notBeforeB := time.Time(*finalResult[j].Attributes.NotBefore)
return notBeforeA.After(notBeforeB)
})
for _, element := range finalResult {
notVBefore := time.Time(*element.Attributes.NotBefore)
if notVBefore.Before(utcNow) {
currentKeyBundle = element
break
}
}
return
}
func GetKeyClient() (keyClient BaseClient2, err error) {
baseClient := getKeysClient()
keyClient = newBaseClient2(baseClient)
err = nil
return
}
func GetActiveKeysVersion(ctx context.Context) (finalResult []azKeyvault.KeyBundle, currentKeyBundle azKeyvault.KeyBundle, err error) {
keyVaultUrl := viper.GetString("keyVault.KeyVaultUrl") //"https://P7KeyValut.vault.azure.net/"
keyIdentifier := viper.GetString("keyVault.KeyIdentifier") //"P7IdentityServer4SelfSigned"
//keyClient := getKeysClient()
baseClient2, err := GetKeyClient()
if err != nil {
return
}
finalResult, currentKeyBundle, err = baseClient2.GetActiveKeysVersion2(ctx, keyVaultUrl, keyIdentifier)
return
}
// CreateKeyBundle creates a key in the specified keyvault
func CreateKey(ctx context.Context, vaultName, keyName string) (key azKeyvault.KeyBundle, err error) {
vaultsClient := getVaultsClient()
vault, err := vaultsClient.Get(ctx, config.BaseGroupName(), vaultName)
if err != nil {
return
}
vaultURL := *vault.Properties.VaultURI
keyClient := getKeysClient()
return keyClient.CreateKey(
ctx,
vaultURL,
keyName,
azKeyvault.KeyCreateParameters{
KeyAttributes: &azKeyvault.KeyAttributes{ | azKeyvault.Encrypt,
azKeyvault.Decrypt,
},
Kty: azKeyvault.EC,
})
}
func fixE(base64EncodedE string) string {
sDec, _ := b64.StdEncoding.DecodeString(base64EncodedE | Enabled: to.BoolPtr(true),
},
KeySize: to.Int32Ptr(2048), // As of writing this sample, 2048 is the only supported KeySize.
KeyOps: &[]azKeyvault.JSONWebKeyOperation{ | random_line_split |
key.go | Claims jwt.Claims
}
type BaseClient2 struct {
azKeyvault.BaseClient
}
func newBaseClient2(base azKeyvault.BaseClient) BaseClient2 {
return BaseClient2{
BaseClient: base,
}
}
func getKeysClient() azKeyvault.BaseClient {
keyClient := azKeyvault.New()
a, _ := iam.GetKeyvaultAuthorizer()
keyClient.Authorizer = a
keyClient.AddToUserAgent(config.UserAgent())
return keyClient
}
func GetSecret(name string) (result keyvault.SecretBundle, err error) {
ctx := context.Background()
keyClient := getKeysClient()
return keyClient.GetSecret(ctx, "https://P7KeyValut.vault.azure.net/", name, "")
}
func GetKeysVersion(ctx context.Context) (result azKeyvault.KeyListResultPage, err error) {
keyClient := getKeysClient()
var maxResults int32 = 10
result, err = keyClient.GetKeyVersions(ctx, "https://P7KeyValut.vault.azure.net/", "P7IdentityServer4SelfSigned", &maxResults)
return
}
func MintToken(c echo.Context, tokenBuildRequest *TokenBuildRequest) (token string, err error) {
cachedItem, err := GetCachedKeyVersions()
if err != nil {
return
}
baseUrl := util.GetBaseUrl(c)
utcNow := time.Now().UTC().Truncate(time.Second)
if tokenBuildRequest.UtcNotBefore == nil {
tokenBuildRequest.UtcNotBefore = &utcNow
}
tokenBuildRequest.Claims.Issued = jwt.NewNumericTime(utcNow)
tokenBuildRequest.Claims.NotBefore = jwt.NewNumericTime(*tokenBuildRequest.UtcNotBefore)
if tokenBuildRequest.UtcExpires == nil {
tokenBuildRequest.Claims.Expires = nil
} else {
tokenBuildRequest.Claims.Expires = jwt.NewNumericTime(*tokenBuildRequest.UtcExpires)
}
tokenBuildRequest.Claims.KeyID = cachedItem.CurrentVersionId
tokenBuildRequest.Claims.Issuer = baseUrl
if tokenBuildRequest.Claims.Audiences == nil {
tokenBuildRequest.Claims.Audiences = []string{}
}
tokenBuildRequest.Claims.Audiences = append(tokenBuildRequest.Claims.Audiences, tokenBuildRequest.Claims.Issuer)
keyClient, err := GetKeyClient()
if err != nil {
return
}
keyVaultUrl := viper.GetString("keyVault.KeyVaultUrl") //"https://P7KeyValut.vault.azure.net/"
keyIdentifier := viper.GetString("keyVault.KeyIdentifier") //"P7IdentityServer4SelfSigned"
ctx := context.Background()
byteToken, err := keyClient.Sign2(ctx, &tokenBuildRequest.Claims, azKeyvault.RS256, keyVaultUrl, keyIdentifier, tokenBuildRequest.Claims.KeyID)
if err != nil {
return
}
token = string(byteToken)
return
}
func RSA256AzureSign(ctx context.Context, data []byte) (kid *string, signature *string, err error) {
keyClient := getKeysClient()
sEnc := util.ByteArraySha256Encode64(data)
keyOperationResult, err := keyClient.Sign(ctx, "https://P7KeyValut.vault.azure.net/", "P7IdentityServer4SelfSigned", "", keyvault.KeySignParameters{
Algorithm: azKeyvault.RS256,
Value: &sEnc,
})
if err != nil {
return
}
return keyOperationResult.Kid, keyOperationResult.Result, nil
}
func (keyClient *BaseClient2) Sign2(
ctx context.Context,
claims *jwt.Claims,
alg azKeyvault.JSONWebKeySignatureAlgorithm,
vaultBaseURL string,
keyName string,
keyVersion string) (token []byte, err error) {
tokenWithoutSignature, err := claims.FormatWithoutSign(string(alg))
if err != nil {
return nil, err
}
sEnc := util.ByteArraySha256Encode64(tokenWithoutSignature)
keyOperationResult, err := keyClient.Sign(ctx, vaultBaseURL, keyName, keyVersion, azKeyvault.KeySignParameters{
Algorithm: alg,
Value: &sEnc,
})
if err != nil {
return
}
token = append(tokenWithoutSignature, '.')
token = append(token, []byte(*keyOperationResult.Result)...)
return token, nil
}
func (keyClient *BaseClient2) GetActiveKeysVersion2(ctx context.Context, keyVaultUrl string, keyIdentifier string) (finalResult []azKeyvault.KeyBundle, currentKeyBundle azKeyvault.KeyBundle, err error) {
keyId := "123"
claims := jwt.Claims{
Registered: jwt.Registered{
Subject: "kkazanova",
Audiences: []string{"KGB", "RU"},
},
Set: map[string]interface{}{
"iss": nil,
"sub": "karcher",
"aud": "ISIS",
},
KeyID: keyId,
}
token, _ := keyClient.Sign2(ctx, &claims, azKeyvault.RS256, keyVaultUrl, keyIdentifier, keyId)
sToken := string(token)
fmt.Println(sToken)
// Length requirements defined by 2.2.2.9.1 RSA Private Key BLOB (https://msdn.microsoft.com/en-us/library/cc250013.aspx).
/*
PubExp (4 bytes): Length MUST be 4 bytes.
This field MUST be present as an unsigned integer in little-endian format.
The value of this field MUST be the RSA public key exponent for this key. The client SHOULD set this value to 65,537.
E is comming back as an Base64Url encoded byte[] of size 3.
*/
var maxResults int32 = 10
pageResult, err := keyClient.GetKeyVersions(ctx,
keyVaultUrl,
keyIdentifier,
&maxResults)
if err != nil {
return
}
utcNow := time.Now().UTC()
for {
for _, element := range pageResult.Values() {
// element is the element from someSlice for where we are
if *element.Attributes.Enabled {
var keyExpire time.Time
keyExpire = time.Time(*element.Attributes.Expires)
if keyExpire.After(utcNow) |
}
}
if !pageResult.NotDone() {
break
}
err = pageResult.Next()
if err != nil {
return
}
}
sort.Slice(finalResult[:], func(i, j int) bool {
notBeforeA := time.Time(*finalResult[i].Attributes.NotBefore)
notBeforeB := time.Time(*finalResult[j].Attributes.NotBefore)
return notBeforeA.After(notBeforeB)
})
for _, element := range finalResult {
notVBefore := time.Time(*element.Attributes.NotBefore)
if notVBefore.Before(utcNow) {
currentKeyBundle = element
break
}
}
return
}
func GetKeyClient() (keyClient BaseClient2, err error) {
baseClient := getKeysClient()
keyClient = newBaseClient2(baseClient)
err = nil
return
}
func GetActiveKeysVersion(ctx context.Context) (finalResult []azKeyvault.KeyBundle, currentKeyBundle azKeyvault.KeyBundle, err error) {
keyVaultUrl := viper.GetString("keyVault.KeyVaultUrl") //"https://P7KeyValut.vault.azure.net/"
keyIdentifier := viper.GetString("keyVault.KeyIdentifier") //"P7IdentityServer4SelfSigned"
//keyClient := getKeysClient()
baseClient2, err := GetKeyClient()
if err != nil {
return
}
finalResult, currentKeyBundle, err = baseClient2.GetActiveKeysVersion2(ctx, keyVaultUrl, keyIdentifier)
return
}
// CreateKeyBundle creates a key in the specified keyvault
func CreateKey(ctx context.Context, vaultName, keyName string) (key azKeyvault.KeyBundle, err error) {
vaultsClient := getVaultsClient()
vault, err := vaultsClient.Get(ctx, config.BaseGroupName(), vaultName)
if err != nil {
return
}
vaultURL := *vault.Properties.VaultURI
keyClient := getKeysClient()
return keyClient.CreateKey(
ctx,
vaultURL,
keyName,
azKeyvault.KeyCreateParameters{
KeyAttributes: &azKeyvault.KeyAttributes{
Enabled: to.BoolPtr(true),
},
KeySize: to.Int32Ptr(2048), // As of writing this sample, 2048 is the only supported KeySize.
KeyOps: &[]azKeyvault.JSONWebKeyOperation{
azKeyvault.Encrypt,
azKeyvault.Decrypt,
},
Kty: azKeyvault.EC,
})
}
func fixE(base64EncodedE string) string {
sDec, _ := b64.StdEncoding.DecodeString(base64Encoded | {
parts := strings.Split(*element.Kid, "/")
lastItemVersion := parts[len(parts)-1]
keyBundle, er := keyClient.GetKey(ctx,
keyVaultUrl,
keyIdentifier,
lastItemVersion)
if er != nil {
err = er
return
}
fixedE := fixE(*keyBundle.Key.E)
*keyBundle.Key.E = fixedE
finalResult = append(finalResult, keyBundle)
} | conditional_block |
layout_rope.rs | but we might add more stuff.
pub struct Layout(PietTextLayout);
#[derive(Clone, Default)]
pub struct LayoutRope(Node<LayoutInfo>);
pub struct LayoutRopeBuilder(TreeBuilder<LayoutInfo>);
/// The height metric of the rope, which is in raw Height fractions.
struct HeightMetric;
/// The base metric of the rope, which just counts the number of layouts.
pub struct BaseMetric;
// This technically doesn't have to be newtyped, we could impl leaf on
// the Vec directly, but this feels cleaner.
#[derive(Clone, Default)]
struct LayoutLeaf {
data: Vec<(Height, Arc<Layout>)>,
}
#[derive(Clone)]
struct LayoutInfo {
/// The height of this section of rope.
height: Height,
}
impl std::ops::Add for Height {
type Output = Self;
fn add(self, other: Self) -> Self {
Height(self.0 + other.0)
}
}
impl std::ops::AddAssign for Height {
fn add_assign(&mut self, other: Self) {
self.0 += other.0
}
}
impl Height {
/// The number of fractional bits in the representation.
pub const HEIGHT_FRAC_BITS: usize = 8;
/// The scale factor for converting from `f64`.
pub const SCALE_FACTOR: f64 = (1 << Self::HEIGHT_FRAC_BITS) as f64;
pub const ZERO: Height = Height(0);
pub fn from_raw_frac(frac: usize) -> Height {
Height(frac)
}
pub fn as_raw_frac(self) -> usize {
self.0
}
pub fn from_f64(height: f64) -> Height {
Height((height * Self::SCALE_FACTOR).round() as usize)
}
pub fn to_f64(self) -> f64 {
self.0 as f64 / Self::SCALE_FACTOR
}
}
impl Layout {
pub fn new(inner: PietTextLayout) -> Layout {
Layout(inner)
}
pub fn piet_layout(&self) -> &PietTextLayout {
&self.0
}
pub fn height(&self) -> Height {
let size = self.0.size();
Height::from_f64(size.height)
}
}
impl NodeInfo for LayoutInfo {
type L = LayoutLeaf;
fn accumulate(&mut self, other: &Self) {
self.height += other.height;
}
fn compute_info(leaf: &Self::L) -> Self {
let mut height = Height::ZERO;
for (leaf_height, _) in &leaf.data {
height += *leaf_height;
}
LayoutInfo { height }
}
}
impl DefaultMetric for LayoutInfo {
type DefaultMetric = BaseMetric;
}
const MIN_LEAF: usize = 16;
const MAX_LEAF: usize = 32;
impl Leaf for LayoutLeaf {
fn len(&self) -> usize {
self.data.len()
}
fn is_ok_child(&self) -> bool {
self.data.len() >= MIN_LEAF
}
fn push_maybe_split(&mut self, other: &Self, iv: Interval) -> Option<Self> {
let (start, end) = iv.start_end();
self.data.extend_from_slice(&other.data[start..end]);
if self.len() <= MAX_LEAF {
None
} else {
let splitpoint = self.len() / 2;
let right_vec = self.data.split_off(splitpoint);
Some(LayoutLeaf { data: right_vec })
}
}
}
impl From<Vec<(Height, Arc<Layout>)>> for LayoutRope {
fn from(v: Vec<(Height, Arc<Layout>)>) -> Self {
LayoutRope(Node::from_leaf(LayoutLeaf { data: v }))
}
}
impl LayoutRope {
/// The number of layouts in the rope.
pub fn len(&self) -> usize {
self.0.len()
}
/// The total height of the rope.
pub fn height(&self) -> Height {
Height::from_raw_frac(self.0.measure::<HeightMetric>())
}
/// A rope consisting of a single layout.
pub fn singleton(item: Layout) -> LayoutRope {
LayoutRope(Node::from_leaf(Self::singleton_leaf(item)))
}
fn singleton_leaf(item: Layout) -> LayoutLeaf {
let height = item.height();
LayoutLeaf {
data: vec![(height, Arc::new(item))],
}
}
pub fn get(&self, index: usize) -> Option<(Height, &Layout)> {
let cursor = Cursor::new(&self.0, index);
cursor
.get_leaf()
.and_then(|(leaf, offset)| leaf.data.get(offset))
.map(|(height, layout)| (*height, &**layout))
}
// These mutation methods might go away in favor of using the builder.
pub fn push(&mut self, item: Layout) {
let el = Self::singleton(item);
// This could be optimized more.
self.0 = Node::concat(self.0.clone(), el.0)
}
pub fn | (&mut self, index: usize) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
self.push_subseq(&mut b, Interval::new(index + 1, self.len()));
self.0 = b.build();
}
pub fn set(&mut self, index: usize, item: Layout) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
b.push_leaf(Self::singleton_leaf(item));
self.push_subseq(&mut b, Interval::new(index + 1, self.len()));
self.0 = b.build();
}
pub fn insert(&mut self, index: usize, value: Layout) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
b.push_leaf(Self::singleton_leaf(value));
self.push_subseq(&mut b, Interval::new(index, self.len()));
self.0 = b.build();
}
fn iter_chunks(&self, range: impl IntervalBounds) -> ChunkIter {
let Interval { start, end } = range.into_interval(self.len());
ChunkIter {
cursor: Cursor::new(&self.0, start),
end,
}
}
/// The height at the top of the layout at the given index.
///
/// This is simply the sum of the heights of the layouts that come before
/// it.
pub fn height_of_index(&self, index: usize) -> Height {
Height::from_raw_frac(self.0.count::<HeightMetric>(index))
}
/// The layout at the given height.
///
/// Edge cases get interesting (especially since zero-height layouts are
/// not forbidden), so here is a more precise spec: it is the first layout
/// that either contains (in the closed-open interval sense) the given
/// height, or is a zero-height layout at the given height.
///
/// If the total height is given and the rope does not end on a zero-height
/// layout, then it returns the number of layouts.
///
/// TODO: is there a simpler way to state that? It seems more complicated
/// than it should be.
pub fn index_of_height(&self, height: Height) -> usize {
self.0
.count_base_units::<HeightMetric>(height.as_raw_frac())
}
fn push_subseq(&self, b: &mut TreeBuilder<LayoutInfo>, iv: Interval) {
// TODO: if we make the push_subseq method in xi-rope public, we can save some
// allocations.
b.push(self.0.subseq(iv));
}
}
impl LayoutRopeBuilder {
pub fn new() -> LayoutRopeBuilder {
LayoutRopeBuilder(TreeBuilder::new())
}
#[allow(unused)]
pub fn push_rope_slice(&mut self, other: &LayoutRope, range: Range<usize>) {
// TODO: use push_subseq method on TreeBuilder when that lands.
self.0.push(other.0.subseq(Interval::from(range)))
}
pub fn push_layout(&mut self, layout: Layout) {
// Maybe move the body of singleton_leaf to here?
self.0.push_leaf(LayoutRope::singleton_leaf(layout))
}
pub fn build(self) -> LayoutRope {
LayoutRope(self.0.build())
}
}
impl<'a> IntoIterator for &'a LayoutRope {
// Maybe `(Height, &'a Layout)` would be better, not to expose the internal
// representation, but it's a bit more work.
type Item = &'a (Height, Arc<Layout>);
type IntoIter = std::iter::Flatten<ChunkIter<'a>>;
fn into_iter(self) -> Self::IntoIter {
self.iter_chunks(..).flatten()
}
}
pub struct ChunkIter<'a> {
cursor: Cursor<'a, LayoutInfo>,
end: usize,
}
impl<'a> Iterator for ChunkIter<'a> {
type Item = &'a [(Height, Arc<Layout>)];
fn next(&mut self) -> Option<Self::Item> {
if self.cursor.pos() >= self.end {
return None;
}
let (leaf | remove | identifier_name |
layout_rope.rs | but we might add more stuff.
pub struct Layout(PietTextLayout);
#[derive(Clone, Default)]
pub struct LayoutRope(Node<LayoutInfo>);
pub struct LayoutRopeBuilder(TreeBuilder<LayoutInfo>);
/// The height metric of the rope, which is in raw Height fractions.
struct HeightMetric;
/// The base metric of the rope, which just counts the number of layouts.
pub struct BaseMetric;
// This technically doesn't have to be newtyped, we could impl leaf on
// the Vec directly, but this feels cleaner.
#[derive(Clone, Default)]
struct LayoutLeaf {
data: Vec<(Height, Arc<Layout>)>,
}
#[derive(Clone)]
struct LayoutInfo {
/// The height of this section of rope.
height: Height,
}
impl std::ops::Add for Height {
type Output = Self;
fn add(self, other: Self) -> Self {
Height(self.0 + other.0)
}
}
impl std::ops::AddAssign for Height {
fn add_assign(&mut self, other: Self) {
self.0 += other.0
}
}
impl Height {
/// The number of fractional bits in the representation.
pub const HEIGHT_FRAC_BITS: usize = 8;
/// The scale factor for converting from `f64`.
pub const SCALE_FACTOR: f64 = (1 << Self::HEIGHT_FRAC_BITS) as f64;
pub const ZERO: Height = Height(0);
pub fn from_raw_frac(frac: usize) -> Height {
Height(frac)
}
pub fn as_raw_frac(self) -> usize {
self.0
}
pub fn from_f64(height: f64) -> Height {
Height((height * Self::SCALE_FACTOR).round() as usize)
}
pub fn to_f64(self) -> f64 {
self.0 as f64 / Self::SCALE_FACTOR
}
}
impl Layout {
pub fn new(inner: PietTextLayout) -> Layout {
Layout(inner)
}
pub fn piet_layout(&self) -> &PietTextLayout {
&self.0
}
pub fn height(&self) -> Height {
let size = self.0.size();
Height::from_f64(size.height)
}
}
impl NodeInfo for LayoutInfo {
type L = LayoutLeaf;
fn accumulate(&mut self, other: &Self) {
self.height += other.height;
}
fn compute_info(leaf: &Self::L) -> Self {
let mut height = Height::ZERO;
for (leaf_height, _) in &leaf.data {
height += *leaf_height;
}
LayoutInfo { height }
}
}
impl DefaultMetric for LayoutInfo {
type DefaultMetric = BaseMetric;
}
const MIN_LEAF: usize = 16;
const MAX_LEAF: usize = 32;
impl Leaf for LayoutLeaf {
fn len(&self) -> usize {
self.data.len()
}
fn is_ok_child(&self) -> bool {
self.data.len() >= MIN_LEAF
}
fn push_maybe_split(&mut self, other: &Self, iv: Interval) -> Option<Self> {
let (start, end) = iv.start_end();
self.data.extend_from_slice(&other.data[start..end]);
if self.len() <= MAX_LEAF {
None
} else {
let splitpoint = self.len() / 2;
let right_vec = self.data.split_off(splitpoint);
Some(LayoutLeaf { data: right_vec })
}
}
}
impl From<Vec<(Height, Arc<Layout>)>> for LayoutRope {
fn from(v: Vec<(Height, Arc<Layout>)>) -> Self {
LayoutRope(Node::from_leaf(LayoutLeaf { data: v }))
}
}
impl LayoutRope {
/// The number of layouts in the rope.
pub fn len(&self) -> usize {
self.0.len()
}
/// The total height of the rope.
pub fn height(&self) -> Height {
Height::from_raw_frac(self.0.measure::<HeightMetric>())
}
/// A rope consisting of a single layout.
pub fn singleton(item: Layout) -> LayoutRope {
LayoutRope(Node::from_leaf(Self::singleton_leaf(item)))
}
fn singleton_leaf(item: Layout) -> LayoutLeaf {
let height = item.height();
LayoutLeaf {
data: vec![(height, Arc::new(item))],
}
}
pub fn get(&self, index: usize) -> Option<(Height, &Layout)> {
let cursor = Cursor::new(&self.0, index);
cursor
.get_leaf()
.and_then(|(leaf, offset)| leaf.data.get(offset))
.map(|(height, layout)| (*height, &**layout))
}
// These mutation methods might go away in favor of using the builder.
pub fn push(&mut self, item: Layout) {
let el = Self::singleton(item);
// This could be optimized more.
self.0 = Node::concat(self.0.clone(), el.0)
}
pub fn remove(&mut self, index: usize) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
self.push_subseq(&mut b, Interval::new(index + 1, self.len()));
self.0 = b.build();
}
pub fn set(&mut self, index: usize, item: Layout) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
b.push_leaf(Self::singleton_leaf(item));
self.push_subseq(&mut b, Interval::new(index + 1, self.len()));
self.0 = b.build();
}
pub fn insert(&mut self, index: usize, value: Layout) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
b.push_leaf(Self::singleton_leaf(value));
self.push_subseq(&mut b, Interval::new(index, self.len()));
self.0 = b.build();
}
fn iter_chunks(&self, range: impl IntervalBounds) -> ChunkIter {
let Interval { start, end } = range.into_interval(self.len());
ChunkIter {
cursor: Cursor::new(&self.0, start),
end,
}
}
/// The height at the top of the layout at the given index.
///
/// This is simply the sum of the heights of the layouts that come before
/// it.
pub fn height_of_index(&self, index: usize) -> Height {
Height::from_raw_frac(self.0.count::<HeightMetric>(index))
}
/// The layout at the given height.
///
/// Edge cases get interesting (especially since zero-height layouts are
/// not forbidden), so here is a more precise spec: it is the first layout
/// that either contains (in the closed-open interval sense) the given
/// height, or is a zero-height layout at the given height.
///
/// If the total height is given and the rope does not end on a zero-height
/// layout, then it returns the number of layouts.
///
/// TODO: is there a simpler way to state that? It seems more complicated
/// than it should be.
pub fn index_of_height(&self, height: Height) -> usize |
fn push_subseq(&self, b: &mut TreeBuilder<LayoutInfo>, iv: Interval) {
// TODO: if we make the push_subseq method in xi-rope public, we can save some
// allocations.
b.push(self.0.subseq(iv));
}
}
impl LayoutRopeBuilder {
pub fn new() -> LayoutRopeBuilder {
LayoutRopeBuilder(TreeBuilder::new())
}
#[allow(unused)]
pub fn push_rope_slice(&mut self, other: &LayoutRope, range: Range<usize>) {
// TODO: use push_subseq method on TreeBuilder when that lands.
self.0.push(other.0.subseq(Interval::from(range)))
}
pub fn push_layout(&mut self, layout: Layout) {
// Maybe move the body of singleton_leaf to here?
self.0.push_leaf(LayoutRope::singleton_leaf(layout))
}
pub fn build(self) -> LayoutRope {
LayoutRope(self.0.build())
}
}
impl<'a> IntoIterator for &'a LayoutRope {
// Maybe `(Height, &'a Layout)` would be better, not to expose the internal
// representation, but it's a bit more work.
type Item = &'a (Height, Arc<Layout>);
type IntoIter = std::iter::Flatten<ChunkIter<'a>>;
fn into_iter(self) -> Self::IntoIter {
self.iter_chunks(..).flatten()
}
}
pub struct ChunkIter<'a> {
cursor: Cursor<'a, LayoutInfo>,
end: usize,
}
impl<'a> Iterator for ChunkIter<'a> {
type Item = &'a [(Height, Arc<Layout>)];
fn next(&mut self) -> Option<Self::Item> {
if self.cursor.pos() >= self.end {
return None;
}
let ( | {
self.0
.count_base_units::<HeightMetric>(height.as_raw_frac())
} | identifier_body |
layout_rope.rs | but we might add more stuff.
pub struct Layout(PietTextLayout);
#[derive(Clone, Default)]
pub struct LayoutRope(Node<LayoutInfo>);
pub struct LayoutRopeBuilder(TreeBuilder<LayoutInfo>);
/// The height metric of the rope, which is in raw Height fractions.
struct HeightMetric;
/// The base metric of the rope, which just counts the number of layouts.
pub struct BaseMetric;
// This technically doesn't have to be newtyped, we could impl leaf on
// the Vec directly, but this feels cleaner.
#[derive(Clone, Default)]
struct LayoutLeaf {
data: Vec<(Height, Arc<Layout>)>,
}
#[derive(Clone)]
struct LayoutInfo {
/// The height of this section of rope.
height: Height,
}
impl std::ops::Add for Height {
type Output = Self;
fn add(self, other: Self) -> Self {
Height(self.0 + other.0)
}
}
impl std::ops::AddAssign for Height {
fn add_assign(&mut self, other: Self) {
self.0 += other.0
}
}
impl Height {
/// The number of fractional bits in the representation.
pub const HEIGHT_FRAC_BITS: usize = 8;
/// The scale factor for converting from `f64`.
pub const SCALE_FACTOR: f64 = (1 << Self::HEIGHT_FRAC_BITS) as f64;
pub const ZERO: Height = Height(0);
pub fn from_raw_frac(frac: usize) -> Height {
Height(frac)
}
pub fn as_raw_frac(self) -> usize {
self.0
}
pub fn from_f64(height: f64) -> Height {
Height((height * Self::SCALE_FACTOR).round() as usize)
}
pub fn to_f64(self) -> f64 {
self.0 as f64 / Self::SCALE_FACTOR
}
}
impl Layout {
pub fn new(inner: PietTextLayout) -> Layout {
Layout(inner)
}
pub fn piet_layout(&self) -> &PietTextLayout {
&self.0
}
pub fn height(&self) -> Height {
let size = self.0.size();
Height::from_f64(size.height)
}
}
impl NodeInfo for LayoutInfo {
type L = LayoutLeaf;
fn accumulate(&mut self, other: &Self) {
self.height += other.height;
}
fn compute_info(leaf: &Self::L) -> Self {
let mut height = Height::ZERO;
for (leaf_height, _) in &leaf.data {
height += *leaf_height;
}
LayoutInfo { height }
}
}
impl DefaultMetric for LayoutInfo {
type DefaultMetric = BaseMetric;
}
const MIN_LEAF: usize = 16;
const MAX_LEAF: usize = 32;
impl Leaf for LayoutLeaf {
fn len(&self) -> usize {
self.data.len()
}
fn is_ok_child(&self) -> bool {
self.data.len() >= MIN_LEAF
}
fn push_maybe_split(&mut self, other: &Self, iv: Interval) -> Option<Self> {
let (start, end) = iv.start_end();
self.data.extend_from_slice(&other.data[start..end]);
if self.len() <= MAX_LEAF {
None
} else |
}
}
impl From<Vec<(Height, Arc<Layout>)>> for LayoutRope {
fn from(v: Vec<(Height, Arc<Layout>)>) -> Self {
LayoutRope(Node::from_leaf(LayoutLeaf { data: v }))
}
}
impl LayoutRope {
/// The number of layouts in the rope.
pub fn len(&self) -> usize {
self.0.len()
}
/// The total height of the rope.
pub fn height(&self) -> Height {
Height::from_raw_frac(self.0.measure::<HeightMetric>())
}
/// A rope consisting of a single layout.
pub fn singleton(item: Layout) -> LayoutRope {
LayoutRope(Node::from_leaf(Self::singleton_leaf(item)))
}
fn singleton_leaf(item: Layout) -> LayoutLeaf {
let height = item.height();
LayoutLeaf {
data: vec![(height, Arc::new(item))],
}
}
pub fn get(&self, index: usize) -> Option<(Height, &Layout)> {
let cursor = Cursor::new(&self.0, index);
cursor
.get_leaf()
.and_then(|(leaf, offset)| leaf.data.get(offset))
.map(|(height, layout)| (*height, &**layout))
}
// These mutation methods might go away in favor of using the builder.
pub fn push(&mut self, item: Layout) {
let el = Self::singleton(item);
// This could be optimized more.
self.0 = Node::concat(self.0.clone(), el.0)
}
pub fn remove(&mut self, index: usize) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
self.push_subseq(&mut b, Interval::new(index + 1, self.len()));
self.0 = b.build();
}
pub fn set(&mut self, index: usize, item: Layout) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
b.push_leaf(Self::singleton_leaf(item));
self.push_subseq(&mut b, Interval::new(index + 1, self.len()));
self.0 = b.build();
}
pub fn insert(&mut self, index: usize, value: Layout) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
b.push_leaf(Self::singleton_leaf(value));
self.push_subseq(&mut b, Interval::new(index, self.len()));
self.0 = b.build();
}
fn iter_chunks(&self, range: impl IntervalBounds) -> ChunkIter {
let Interval { start, end } = range.into_interval(self.len());
ChunkIter {
cursor: Cursor::new(&self.0, start),
end,
}
}
/// The height at the top of the layout at the given index.
///
/// This is simply the sum of the heights of the layouts that come before
/// it.
pub fn height_of_index(&self, index: usize) -> Height {
Height::from_raw_frac(self.0.count::<HeightMetric>(index))
}
/// The layout at the given height.
///
/// Edge cases get interesting (especially since zero-height layouts are
/// not forbidden), so here is a more precise spec: it is the first layout
/// that either contains (in the closed-open interval sense) the given
/// height, or is a zero-height layout at the given height.
///
/// If the total height is given and the rope does not end on a zero-height
/// layout, then it returns the number of layouts.
///
/// TODO: is there a simpler way to state that? It seems more complicated
/// than it should be.
pub fn index_of_height(&self, height: Height) -> usize {
self.0
.count_base_units::<HeightMetric>(height.as_raw_frac())
}
fn push_subseq(&self, b: &mut TreeBuilder<LayoutInfo>, iv: Interval) {
// TODO: if we make the push_subseq method in xi-rope public, we can save some
// allocations.
b.push(self.0.subseq(iv));
}
}
impl LayoutRopeBuilder {
pub fn new() -> LayoutRopeBuilder {
LayoutRopeBuilder(TreeBuilder::new())
}
#[allow(unused)]
pub fn push_rope_slice(&mut self, other: &LayoutRope, range: Range<usize>) {
// TODO: use push_subseq method on TreeBuilder when that lands.
self.0.push(other.0.subseq(Interval::from(range)))
}
pub fn push_layout(&mut self, layout: Layout) {
// Maybe move the body of singleton_leaf to here?
self.0.push_leaf(LayoutRope::singleton_leaf(layout))
}
pub fn build(self) -> LayoutRope {
LayoutRope(self.0.build())
}
}
impl<'a> IntoIterator for &'a LayoutRope {
// Maybe `(Height, &'a Layout)` would be better, not to expose the internal
// representation, but it's a bit more work.
type Item = &'a (Height, Arc<Layout>);
type IntoIter = std::iter::Flatten<ChunkIter<'a>>;
fn into_iter(self) -> Self::IntoIter {
self.iter_chunks(..).flatten()
}
}
pub struct ChunkIter<'a> {
cursor: Cursor<'a, LayoutInfo>,
end: usize,
}
impl<'a> Iterator for ChunkIter<'a> {
type Item = &'a [(Height, Arc<Layout>)];
fn next(&mut self) -> Option<Self::Item> {
if self.cursor.pos() >= self.end {
return None;
}
let ( | {
let splitpoint = self.len() / 2;
let right_vec = self.data.split_off(splitpoint);
Some(LayoutLeaf { data: right_vec })
} | conditional_block |
layout_rope.rs | , but we might add more stuff.
pub struct Layout(PietTextLayout);
#[derive(Clone, Default)]
pub struct LayoutRope(Node<LayoutInfo>);
pub struct LayoutRopeBuilder(TreeBuilder<LayoutInfo>);
/// The height metric of the rope, which is in raw Height fractions.
struct HeightMetric;
/// The base metric of the rope, which just counts the number of layouts.
pub struct BaseMetric;
// This technically doesn't have to be newtyped, we could impl leaf on
// the Vec directly, but this feels cleaner.
#[derive(Clone, Default)]
struct LayoutLeaf {
data: Vec<(Height, Arc<Layout>)>,
}
#[derive(Clone)]
struct LayoutInfo {
/// The height of this section of rope.
height: Height,
}
impl std::ops::Add for Height { | type Output = Self;
fn add(self, other: Self) -> Self {
Height(self.0 + other.0)
}
}
impl std::ops::AddAssign for Height {
fn add_assign(&mut self, other: Self) {
self.0 += other.0
}
}
impl Height {
/// The number of fractional bits in the representation.
pub const HEIGHT_FRAC_BITS: usize = 8;
/// The scale factor for converting from `f64`.
pub const SCALE_FACTOR: f64 = (1 << Self::HEIGHT_FRAC_BITS) as f64;
pub const ZERO: Height = Height(0);
pub fn from_raw_frac(frac: usize) -> Height {
Height(frac)
}
pub fn as_raw_frac(self) -> usize {
self.0
}
pub fn from_f64(height: f64) -> Height {
Height((height * Self::SCALE_FACTOR).round() as usize)
}
pub fn to_f64(self) -> f64 {
self.0 as f64 / Self::SCALE_FACTOR
}
}
impl Layout {
pub fn new(inner: PietTextLayout) -> Layout {
Layout(inner)
}
pub fn piet_layout(&self) -> &PietTextLayout {
&self.0
}
pub fn height(&self) -> Height {
let size = self.0.size();
Height::from_f64(size.height)
}
}
impl NodeInfo for LayoutInfo {
type L = LayoutLeaf;
fn accumulate(&mut self, other: &Self) {
self.height += other.height;
}
fn compute_info(leaf: &Self::L) -> Self {
let mut height = Height::ZERO;
for (leaf_height, _) in &leaf.data {
height += *leaf_height;
}
LayoutInfo { height }
}
}
impl DefaultMetric for LayoutInfo {
type DefaultMetric = BaseMetric;
}
const MIN_LEAF: usize = 16;
const MAX_LEAF: usize = 32;
impl Leaf for LayoutLeaf {
fn len(&self) -> usize {
self.data.len()
}
fn is_ok_child(&self) -> bool {
self.data.len() >= MIN_LEAF
}
fn push_maybe_split(&mut self, other: &Self, iv: Interval) -> Option<Self> {
let (start, end) = iv.start_end();
self.data.extend_from_slice(&other.data[start..end]);
if self.len() <= MAX_LEAF {
None
} else {
let splitpoint = self.len() / 2;
let right_vec = self.data.split_off(splitpoint);
Some(LayoutLeaf { data: right_vec })
}
}
}
impl From<Vec<(Height, Arc<Layout>)>> for LayoutRope {
fn from(v: Vec<(Height, Arc<Layout>)>) -> Self {
LayoutRope(Node::from_leaf(LayoutLeaf { data: v }))
}
}
impl LayoutRope {
/// The number of layouts in the rope.
pub fn len(&self) -> usize {
self.0.len()
}
/// The total height of the rope.
pub fn height(&self) -> Height {
Height::from_raw_frac(self.0.measure::<HeightMetric>())
}
/// A rope consisting of a single layout.
pub fn singleton(item: Layout) -> LayoutRope {
LayoutRope(Node::from_leaf(Self::singleton_leaf(item)))
}
fn singleton_leaf(item: Layout) -> LayoutLeaf {
let height = item.height();
LayoutLeaf {
data: vec![(height, Arc::new(item))],
}
}
pub fn get(&self, index: usize) -> Option<(Height, &Layout)> {
let cursor = Cursor::new(&self.0, index);
cursor
.get_leaf()
.and_then(|(leaf, offset)| leaf.data.get(offset))
.map(|(height, layout)| (*height, &**layout))
}
// These mutation methods might go away in favor of using the builder.
pub fn push(&mut self, item: Layout) {
let el = Self::singleton(item);
// This could be optimized more.
self.0 = Node::concat(self.0.clone(), el.0)
}
pub fn remove(&mut self, index: usize) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
self.push_subseq(&mut b, Interval::new(index + 1, self.len()));
self.0 = b.build();
}
pub fn set(&mut self, index: usize, item: Layout) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
b.push_leaf(Self::singleton_leaf(item));
self.push_subseq(&mut b, Interval::new(index + 1, self.len()));
self.0 = b.build();
}
pub fn insert(&mut self, index: usize, value: Layout) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
b.push_leaf(Self::singleton_leaf(value));
self.push_subseq(&mut b, Interval::new(index, self.len()));
self.0 = b.build();
}
fn iter_chunks(&self, range: impl IntervalBounds) -> ChunkIter {
let Interval { start, end } = range.into_interval(self.len());
ChunkIter {
cursor: Cursor::new(&self.0, start),
end,
}
}
/// The height at the top of the layout at the given index.
///
/// This is simply the sum of the heights of the layouts that come before
/// it.
pub fn height_of_index(&self, index: usize) -> Height {
Height::from_raw_frac(self.0.count::<HeightMetric>(index))
}
/// The layout at the given height.
///
/// Edge cases get interesting (especially since zero-height layouts are
/// not forbidden), so here is a more precise spec: it is the first layout
/// that either contains (in the closed-open interval sense) the given
/// height, or is a zero-height layout at the given height.
///
/// If the total height is given and the rope does not end on a zero-height
/// layout, then it returns the number of layouts.
///
/// TODO: is there a simpler way to state that? It seems more complicated
/// than it should be.
pub fn index_of_height(&self, height: Height) -> usize {
self.0
.count_base_units::<HeightMetric>(height.as_raw_frac())
}
fn push_subseq(&self, b: &mut TreeBuilder<LayoutInfo>, iv: Interval) {
// TODO: if we make the push_subseq method in xi-rope public, we can save some
// allocations.
b.push(self.0.subseq(iv));
}
}
impl LayoutRopeBuilder {
pub fn new() -> LayoutRopeBuilder {
LayoutRopeBuilder(TreeBuilder::new())
}
#[allow(unused)]
pub fn push_rope_slice(&mut self, other: &LayoutRope, range: Range<usize>) {
// TODO: use push_subseq method on TreeBuilder when that lands.
self.0.push(other.0.subseq(Interval::from(range)))
}
pub fn push_layout(&mut self, layout: Layout) {
// Maybe move the body of singleton_leaf to here?
self.0.push_leaf(LayoutRope::singleton_leaf(layout))
}
pub fn build(self) -> LayoutRope {
LayoutRope(self.0.build())
}
}
impl<'a> IntoIterator for &'a LayoutRope {
// Maybe `(Height, &'a Layout)` would be better, not to expose the internal
// representation, but it's a bit more work.
type Item = &'a (Height, Arc<Layout>);
type IntoIter = std::iter::Flatten<ChunkIter<'a>>;
fn into_iter(self) -> Self::IntoIter {
self.iter_chunks(..).flatten()
}
}
pub struct ChunkIter<'a> {
cursor: Cursor<'a, LayoutInfo>,
end: usize,
}
impl<'a> Iterator for ChunkIter<'a> {
type Item = &'a [(Height, Arc<Layout>)];
fn next(&mut self) -> Option<Self::Item> {
if self.cursor.pos() >= self.end {
return None;
}
let (leaf | random_line_split |
|
switching_utils.py | iff you would like to save the simulation as a GIF.
'''
#Initialize planners if not yet done
for car in world.cars:
if (isinstance(car, PlannerCar) and car.planner is None):
car.initialize_planner()
if (world.verbose):
print(f"Executing {exp_name} for {time_steps} time steps...")
#Model Switching Car
ms_car = world.cars[ms_car_index]
world.reset(seed)
if (world.verbose):
world.render()
if (experiment_args.save_gif):
frames = []
frames.append(world.render("rgb_array"))
#Reward accrued at each time step.
reward_ts = []
for t in range(time_steps):
'''
Step world and get controls all cars
took and the new state of the world.
'''
_, control, new_state = world.step()
if (world.verbose):
world.render()
if (experiment_args.save_gif):
frames.append(world.render("rgb_array"))
ms_control = control[ms_car_index]
#Reward for the model switching car.
rew = ms_car.reward_fn(tf.stack(new_state), ms_control).numpy()
reward_ts.append(rew)
#Computational Time Breakdown
if (world.verbose):
ct_breakdown = ms_car.planner.avg_comp_time_breakdown()
print(f"T: {t + 1}; R: {rew:.2f}; CT: {ct_breakdown}")
if (experiment_args.num_run == 1):
model_ts = ms_car.planner.models_used
#Average Computational Time Time Series
avg_step_times = ms_car.planner.get_avg_comp_times()
print()
#Gather reward and computation time information
#Single Computational Time Time Series
step_times = ms_car.planner.get_comp_times()
#Display reward and computation time graphs
display_rewards(reward_ts, model_ts)
display_computational_times(step_times['overall'],
avg_step_times['overall'])
if (experiment_args.save_gif):
clip = ImageSequenceClip(frames, fps=int(1 / world.dt))
clip.speedx(0.5).write_gif(f"{exp_name}.gif", program="ffmpeg")
#return np.mean(reward_ts), avg_step_times['overall'][-1], model_usage
return reward_ts
def execute_many_experiments(exp_name, world, time_steps, experiment_args,
ms_car_index = 0):
switching_parameters = {"comp_times": {"Naive": experiment_args.naive_ct,
"Turn": experiment_args.turn_ct,
"Tom": experiment_args.tom_ct},
"cooldowns": {"up": experiment_args.up_cd,
"down": experiment_args.down_cd},
"trust_radius": experiment_args.tr}
world.cars[ms_car_index].initialize_planner()
world.cars[ms_car_index].planner.update_switching_parameteters(switching_parameters)
run_log = dict()
for lambd in experiment_args.lambdas:
print()
print(f"Using Lambda: {lambd}")
world.cars[ms_car_index].planner.update_switching_parameteters({"lambda": float(lambd)})
lambd_log = []
run_time = 0
for i, seed in enumerate(experiment_args.seeds):
if (run_time == 0):
print(f"Running Experiment {i + 1}/{experiment_args.num_run}", end = "\r")
else:
et = (experiment_args.num_run - i) * run_time / (i - 1)
print(f"Running Experiment {i + 1}/{experiment_args.num_run}, Expected Time Left: {et:.0f}s ", end = "\r")
if (i >= 1):
start_time = time.time()
#mean_rew, mean_ct, model_usage
reward_ts = execute_experiment(exp_name, world, time_steps, experiment_args,
seed = seed, ms_car_index = ms_car_index)
if (i >= 1):
run_time += time.time() - start_time
ms_car = world.cars[ms_car_index]
states = [[list(car_s.numpy()) for car_s in all_car_s] for all_car_s in world.past_states]
actions = [list(c) for c in ms_car.control_log]
models = ms_car.planner.models_used
heur_comp_log = ms_car.planner.heuristic_computation_log
if (len(heur_comp_log) < time_steps):
heur_comp_log += [""] * (time_steps - len(heur_comp_log))
planning_times = ms_car.planner.get_comp_times()['planning']
decision_times = ms_car.planner.get_comp_times()['decision']
lambd_log += list(zip(states, actions, reward_ts, models, heur_comp_log, planning_times, decision_times))
assert(len(lambd_log) == time_steps * (i + 1))
run_log[lambd] = lambd_log
print(f"Finished Running {experiment_args.num_run} Experiments!" + " " * 30)
complete_experiment_log= {'name': exp_name, 'time_steps': time_steps,
'num_run': experiment_args.num_run,
'models': ms_car.planner.model_ladder,
'log_indices': ['state', 'action', 'reward',
'model', 'heur_comp_log',
'planning_time','decision_time'],
'runs': run_log}
return complete_experiment_log
def | (reward_ts, model_ts):
'''
Displays reward for each time step gained by the car
in a plot. Color codes by model used and presents
an appropriate legend.
'''
plt.title("Reward by Model")
start_time = 0
cur_model = model_ts[0]
used_models = [cur_model]
for t, model in enumerate(model_ts):
if (model != cur_model):
plt.plot(range(start_time, t + 1),
reward_ts[start_time:t + 1],
color = COLORS[MODELS.index(cur_model)])
start_time = t
cur_model = model
if (model not in used_models):
used_models.append(model)
plt.plot(range(start_time, len(model_ts)),
reward_ts[start_time:],
color = COLORS[MODELS.index(cur_model)])
patch = lambda i: mpatches.Patch(color=COLORS[MODELS.index(used_models[i])],
label=used_models[i])
plt.legend(handles=[patch(i) for i in range(len(used_models))])
plt.xlabel("Time Step Number")
plt.ylabel("Reward")
plt.show()
def display_computational_times(sing_ct_ts, avg_ct_ts):
'''
Displays computation time for each time step
of planning done by the car. Also plots for
every value of t, the average of the first
t computation times.
'''
plt.title("Computational Time")
plt.plot(sing_ct_ts, color = 'r')
plt.plot(avg_ct_ts, color = 'b')
plt.legend(["Single Step", "Average"])
plt.xlabel("Time Step Number")
plt.ylabel("Computational Time")
plt.show()
def default_experiment_params():
'''
Returns a dictionary containing default paramters
for an experiment.
Convention is Car 0 is the robot car, Car 1 is
the human car. Any further cars are either fixed
velocity or we don't bother about.
An experiment may modify the returned dictionary
for any custom parameter settings.
'''
exp_params = dict()
exp_params["Naive"] = {"horizon": 5, "n_iter": 20}
exp_params["Turn"] = {"horizon": 5, "n_iter": 20}
exp_params["Tom"] = {"horizon": 5, "n_iter": 20}
for m in MODELS:
exp_params[m]["h_index"] = 1
return exp_params
def default_visualizer_args(EXP_NAME):
return {'name': EXP_NAME, 'display_model': True,
'display_y': True, 'follow_main_car': True}
def switch_model_pstr(model_name, experiment_params):
# Returns a string describing the model parameters
model_params = experiment_params[model_name]
return f"({model_params['horizon']},{model_params['n_iter']})"
def exp_str(experiment_params, experiment_args):
'''
Return the string capturing the experiment's planning mechanisms
based on the experiment parameters and experiment type.
'''
EXP_STRS = [None] * 11
pstrs = {m: switch_model_pstr(m, experiment_params) for m in MODELS}
# Single Model Experiments
EXP_STRS[0] = f"Naive{pstrs['Naive']}"
EXP_STRS[1] = f"Tu{pstrs['Turn']}"
EXP_STRS[2] = f"Tom{pstrs['Tom']}"
# Model Switching Experiments
EXP_STRS[3] = f"switch({EXP_STRS[0]}, {EXP_STRS[1]})"
EXP_STRS[4] = f"switch({EXP_STRS[1]}, {EXP_STRS[2]})"
EXP_STRS[4] = f"switch({EXP_STRS[0]}, | display_rewards | identifier_name |
switching_utils.py | def execute_many_experiments(exp_name, world, time_steps, experiment_args,
ms_car_index = 0):
switching_parameters = {"comp_times": {"Naive": experiment_args.naive_ct,
"Turn": experiment_args.turn_ct,
"Tom": experiment_args.tom_ct},
"cooldowns": {"up": experiment_args.up_cd,
"down": experiment_args.down_cd},
"trust_radius": experiment_args.tr}
world.cars[ms_car_index].initialize_planner()
world.cars[ms_car_index].planner.update_switching_parameteters(switching_parameters)
run_log = dict()
for lambd in experiment_args.lambdas:
print()
print(f"Using Lambda: {lambd}")
world.cars[ms_car_index].planner.update_switching_parameteters({"lambda": float(lambd)})
lambd_log = []
run_time = 0
for i, seed in enumerate(experiment_args.seeds):
if (run_time == 0):
print(f"Running Experiment {i + 1}/{experiment_args.num_run}", end = "\r")
else:
et = (experiment_args.num_run - i) * run_time / (i - 1)
print(f"Running Experiment {i + 1}/{experiment_args.num_run}, Expected Time Left: {et:.0f}s ", end = "\r")
if (i >= 1):
start_time = time.time()
#mean_rew, mean_ct, model_usage
reward_ts = execute_experiment(exp_name, world, time_steps, experiment_args,
seed = seed, ms_car_index = ms_car_index)
if (i >= 1):
run_time += time.time() - start_time
ms_car = world.cars[ms_car_index]
states = [[list(car_s.numpy()) for car_s in all_car_s] for all_car_s in world.past_states]
actions = [list(c) for c in ms_car.control_log]
models = ms_car.planner.models_used
heur_comp_log = ms_car.planner.heuristic_computation_log
if (len(heur_comp_log) < time_steps):
heur_comp_log += [""] * (time_steps - len(heur_comp_log))
planning_times = ms_car.planner.get_comp_times()['planning']
decision_times = ms_car.planner.get_comp_times()['decision']
lambd_log += list(zip(states, actions, reward_ts, models, heur_comp_log, planning_times, decision_times))
assert(len(lambd_log) == time_steps * (i + 1))
run_log[lambd] = lambd_log
print(f"Finished Running {experiment_args.num_run} Experiments!" + " " * 30)
complete_experiment_log= {'name': exp_name, 'time_steps': time_steps,
'num_run': experiment_args.num_run,
'models': ms_car.planner.model_ladder,
'log_indices': ['state', 'action', 'reward',
'model', 'heur_comp_log',
'planning_time','decision_time'],
'runs': run_log}
return complete_experiment_log
def display_rewards(reward_ts, model_ts):
'''
Displays reward for each time step gained by the car
in a plot. Color codes by model used and presents
an appropriate legend.
'''
plt.title("Reward by Model")
start_time = 0
cur_model = model_ts[0]
used_models = [cur_model]
for t, model in enumerate(model_ts):
if (model != cur_model):
plt.plot(range(start_time, t + 1),
reward_ts[start_time:t + 1],
color = COLORS[MODELS.index(cur_model)])
start_time = t
cur_model = model
if (model not in used_models):
used_models.append(model)
plt.plot(range(start_time, len(model_ts)),
reward_ts[start_time:],
color = COLORS[MODELS.index(cur_model)])
patch = lambda i: mpatches.Patch(color=COLORS[MODELS.index(used_models[i])],
label=used_models[i])
plt.legend(handles=[patch(i) for i in range(len(used_models))])
plt.xlabel("Time Step Number")
plt.ylabel("Reward")
plt.show()
def display_computational_times(sing_ct_ts, avg_ct_ts):
'''
Displays computation time for each time step
of planning done by the car. Also plots for
every value of t, the average of the first
t computation times.
'''
plt.title("Computational Time")
plt.plot(sing_ct_ts, color = 'r')
plt.plot(avg_ct_ts, color = 'b')
plt.legend(["Single Step", "Average"])
plt.xlabel("Time Step Number")
plt.ylabel("Computational Time")
plt.show()
def default_experiment_params():
'''
Returns a dictionary containing default paramters
for an experiment.
Convention is Car 0 is the robot car, Car 1 is
the human car. Any further cars are either fixed
velocity or we don't bother about.
An experiment may modify the returned dictionary
for any custom parameter settings.
'''
exp_params = dict()
exp_params["Naive"] = {"horizon": 5, "n_iter": 20}
exp_params["Turn"] = {"horizon": 5, "n_iter": 20}
exp_params["Tom"] = {"horizon": 5, "n_iter": 20}
for m in MODELS:
exp_params[m]["h_index"] = 1
return exp_params
def default_visualizer_args(EXP_NAME):
return {'name': EXP_NAME, 'display_model': True,
'display_y': True, 'follow_main_car': True}
def switch_model_pstr(model_name, experiment_params):
# Returns a string describing the model parameters
model_params = experiment_params[model_name]
return f"({model_params['horizon']},{model_params['n_iter']})"
def exp_str(experiment_params, experiment_args):
'''
Return the string capturing the experiment's planning mechanisms
based on the experiment parameters and experiment type.
'''
EXP_STRS = [None] * 11
pstrs = {m: switch_model_pstr(m, experiment_params) for m in MODELS}
# Single Model Experiments
EXP_STRS[0] = f"Naive{pstrs['Naive']}"
EXP_STRS[1] = f"Tu{pstrs['Turn']}"
EXP_STRS[2] = f"Tom{pstrs['Tom']}"
# Model Switching Experiments
EXP_STRS[3] = f"switch({EXP_STRS[0]}, {EXP_STRS[1]})"
EXP_STRS[4] = f"switch({EXP_STRS[1]}, {EXP_STRS[2]})"
EXP_STRS[4] = f"switch({EXP_STRS[0]}, {EXP_STRS[2]})"
EXP_STRS[6] = f"switch({EXP_STRS[0]}, {EXP_STRS[1]}, {EXP_STRS[2]})"
return EXP_STRS[experiment_args.exp_type]
def setup_switching(planner_args, use_models):
'''
Sets up appropriate planner arguments for switching along
series of models.
'''
planner_args["init_model"] = use_models[0]
planner_args["use_models"] = set(use_models)
def planner_params(experiment_params, experiment_args = None, exp_type = None):
'''
Supply the planner type and arguments based on the parameters
of the experiment and the type of the experiment.
'''
planner_params = dict()
for model in MODELS:
planner_params[model] = experiment_params[model]
planner_args = {"planner_specific_args": planner_params}
if (experiment_args is not None):
exp_type = experiment_args.exp_type
if (exp_type == 0):
planner_type = "ModelSwitcher"
planner_args["init_model"] = "Naive"
planner_args["enable_switching"] = False
elif (exp_type == 1):
planner_type = "ModelSwitcher"
planner_args["init_model"] = "Turn"
planner_args["enable_switching"] = False
elif (exp_type == 2):
planner_type = "ModelSwitcher"
planner_args["init_model"] = "Tom"
planner_args["enable_switching"] = False
elif (exp_type == 3):
planner_type = "ModelSwitcher"
use_models = ["Naive", "Turn"]
setup_switching(planner_args, use_models)
elif (exp_type == 4):
planner_type = "ModelSwitcher"
use_models = ["Turn", "Tom"]
setup_switching(planner_args, use_models)
elif (exp_type == 5):
planner_type = "ModelSwitcher"
use_models = ["Naive", "Tom"]
setup_switching(planner_args, use_models)
elif (exp_type == 6):
planner_type = "ModelSwitcher"
use_models = ["Naive", "Turn", "Tom"]
setup_switching(planner_args, use_models)
else:
| raise Exception(f"Invalid Experiment Type: {exp_type}") | conditional_block |
|
switching_utils.py | iff you would like to save the simulation as a GIF.
'''
#Initialize planners if not yet done
for car in world.cars:
if (isinstance(car, PlannerCar) and car.planner is None):
car.initialize_planner()
if (world.verbose):
print(f"Executing {exp_name} for {time_steps} time steps...")
#Model Switching Car
ms_car = world.cars[ms_car_index]
world.reset(seed)
if (world.verbose):
world.render()
if (experiment_args.save_gif):
frames = []
frames.append(world.render("rgb_array"))
#Reward accrued at each time step.
reward_ts = []
for t in range(time_steps):
'''
Step world and get controls all cars
took and the new state of the world.
'''
_, control, new_state = world.step()
if (world.verbose):
world.render()
if (experiment_args.save_gif):
frames.append(world.render("rgb_array"))
ms_control = control[ms_car_index]
#Reward for the model switching car.
rew = ms_car.reward_fn(tf.stack(new_state), ms_control).numpy()
reward_ts.append(rew)
#Computational Time Breakdown
if (world.verbose):
ct_breakdown = ms_car.planner.avg_comp_time_breakdown()
print(f"T: {t + 1}; R: {rew:.2f}; CT: {ct_breakdown}")
if (experiment_args.num_run == 1):
model_ts = ms_car.planner.models_used
#Average Computational Time Time Series
avg_step_times = ms_car.planner.get_avg_comp_times()
print()
#Gather reward and computation time information
#Single Computational Time Time Series
step_times = ms_car.planner.get_comp_times()
#Display reward and computation time graphs
display_rewards(reward_ts, model_ts)
display_computational_times(step_times['overall'],
avg_step_times['overall'])
if (experiment_args.save_gif):
clip = ImageSequenceClip(frames, fps=int(1 / world.dt)) | def execute_many_experiments(exp_name, world, time_steps, experiment_args,
ms_car_index = 0):
switching_parameters = {"comp_times": {"Naive": experiment_args.naive_ct,
"Turn": experiment_args.turn_ct,
"Tom": experiment_args.tom_ct},
"cooldowns": {"up": experiment_args.up_cd,
"down": experiment_args.down_cd},
"trust_radius": experiment_args.tr}
world.cars[ms_car_index].initialize_planner()
world.cars[ms_car_index].planner.update_switching_parameteters(switching_parameters)
run_log = dict()
for lambd in experiment_args.lambdas:
print()
print(f"Using Lambda: {lambd}")
world.cars[ms_car_index].planner.update_switching_parameteters({"lambda": float(lambd)})
lambd_log = []
run_time = 0
for i, seed in enumerate(experiment_args.seeds):
if (run_time == 0):
print(f"Running Experiment {i + 1}/{experiment_args.num_run}", end = "\r")
else:
et = (experiment_args.num_run - i) * run_time / (i - 1)
print(f"Running Experiment {i + 1}/{experiment_args.num_run}, Expected Time Left: {et:.0f}s ", end = "\r")
if (i >= 1):
start_time = time.time()
#mean_rew, mean_ct, model_usage
reward_ts = execute_experiment(exp_name, world, time_steps, experiment_args,
seed = seed, ms_car_index = ms_car_index)
if (i >= 1):
run_time += time.time() - start_time
ms_car = world.cars[ms_car_index]
states = [[list(car_s.numpy()) for car_s in all_car_s] for all_car_s in world.past_states]
actions = [list(c) for c in ms_car.control_log]
models = ms_car.planner.models_used
heur_comp_log = ms_car.planner.heuristic_computation_log
if (len(heur_comp_log) < time_steps):
heur_comp_log += [""] * (time_steps - len(heur_comp_log))
planning_times = ms_car.planner.get_comp_times()['planning']
decision_times = ms_car.planner.get_comp_times()['decision']
lambd_log += list(zip(states, actions, reward_ts, models, heur_comp_log, planning_times, decision_times))
assert(len(lambd_log) == time_steps * (i + 1))
run_log[lambd] = lambd_log
print(f"Finished Running {experiment_args.num_run} Experiments!" + " " * 30)
complete_experiment_log= {'name': exp_name, 'time_steps': time_steps,
'num_run': experiment_args.num_run,
'models': ms_car.planner.model_ladder,
'log_indices': ['state', 'action', 'reward',
'model', 'heur_comp_log',
'planning_time','decision_time'],
'runs': run_log}
return complete_experiment_log
def display_rewards(reward_ts, model_ts):
'''
Displays reward for each time step gained by the car
in a plot. Color codes by model used and presents
an appropriate legend.
'''
plt.title("Reward by Model")
start_time = 0
cur_model = model_ts[0]
used_models = [cur_model]
for t, model in enumerate(model_ts):
if (model != cur_model):
plt.plot(range(start_time, t + 1),
reward_ts[start_time:t + 1],
color = COLORS[MODELS.index(cur_model)])
start_time = t
cur_model = model
if (model not in used_models):
used_models.append(model)
plt.plot(range(start_time, len(model_ts)),
reward_ts[start_time:],
color = COLORS[MODELS.index(cur_model)])
patch = lambda i: mpatches.Patch(color=COLORS[MODELS.index(used_models[i])],
label=used_models[i])
plt.legend(handles=[patch(i) for i in range(len(used_models))])
plt.xlabel("Time Step Number")
plt.ylabel("Reward")
plt.show()
def display_computational_times(sing_ct_ts, avg_ct_ts):
'''
Displays computation time for each time step
of planning done by the car. Also plots for
every value of t, the average of the first
t computation times.
'''
plt.title("Computational Time")
plt.plot(sing_ct_ts, color = 'r')
plt.plot(avg_ct_ts, color = 'b')
plt.legend(["Single Step", "Average"])
plt.xlabel("Time Step Number")
plt.ylabel("Computational Time")
plt.show()
def default_experiment_params():
'''
Returns a dictionary containing default paramters
for an experiment.
Convention is Car 0 is the robot car, Car 1 is
the human car. Any further cars are either fixed
velocity or we don't bother about.
An experiment may modify the returned dictionary
for any custom parameter settings.
'''
exp_params = dict()
exp_params["Naive"] = {"horizon": 5, "n_iter": 20}
exp_params["Turn"] = {"horizon": 5, "n_iter": 20}
exp_params["Tom"] = {"horizon": 5, "n_iter": 20}
for m in MODELS:
exp_params[m]["h_index"] = 1
return exp_params
def default_visualizer_args(EXP_NAME):
return {'name': EXP_NAME, 'display_model': True,
'display_y': True, 'follow_main_car': True}
def switch_model_pstr(model_name, experiment_params):
# Returns a string describing the model parameters
model_params = experiment_params[model_name]
return f"({model_params['horizon']},{model_params['n_iter']})"
def exp_str(experiment_params, experiment_args):
'''
Return the string capturing the experiment's planning mechanisms
based on the experiment parameters and experiment type.
'''
EXP_STRS = [None] * 11
pstrs = {m: switch_model_pstr(m, experiment_params) for m in MODELS}
# Single Model Experiments
EXP_STRS[0] = f"Naive{pstrs['Naive']}"
EXP_STRS[1] = f"Tu{pstrs['Turn']}"
EXP_STRS[2] = f"Tom{pstrs['Tom']}"
# Model Switching Experiments
EXP_STRS[3] = f"switch({EXP_STRS[0]}, {EXP_STRS[1]})"
EXP_STRS[4] = f"switch({EXP_STRS[1]}, {EXP_STRS[2]})"
EXP_STRS[4] = f"switch({EXP_STRS[0]}, { | clip.speedx(0.5).write_gif(f"{exp_name}.gif", program="ffmpeg")
#return np.mean(reward_ts), avg_step_times['overall'][-1], model_usage
return reward_ts
| random_line_split |
switching_utils.py | iff you would like to save the simulation as a GIF.
'''
#Initialize planners if not yet done
for car in world.cars:
if (isinstance(car, PlannerCar) and car.planner is None):
car.initialize_planner()
if (world.verbose):
print(f"Executing {exp_name} for {time_steps} time steps...")
#Model Switching Car
ms_car = world.cars[ms_car_index]
world.reset(seed)
if (world.verbose):
world.render()
if (experiment_args.save_gif):
frames = []
frames.append(world.render("rgb_array"))
#Reward accrued at each time step.
reward_ts = []
for t in range(time_steps):
'''
Step world and get controls all cars
took and the new state of the world.
'''
_, control, new_state = world.step()
if (world.verbose):
world.render()
if (experiment_args.save_gif):
frames.append(world.render("rgb_array"))
ms_control = control[ms_car_index]
#Reward for the model switching car.
rew = ms_car.reward_fn(tf.stack(new_state), ms_control).numpy()
reward_ts.append(rew)
#Computational Time Breakdown
if (world.verbose):
ct_breakdown = ms_car.planner.avg_comp_time_breakdown()
print(f"T: {t + 1}; R: {rew:.2f}; CT: {ct_breakdown}")
if (experiment_args.num_run == 1):
model_ts = ms_car.planner.models_used
#Average Computational Time Time Series
avg_step_times = ms_car.planner.get_avg_comp_times()
print()
#Gather reward and computation time information
#Single Computational Time Time Series
step_times = ms_car.planner.get_comp_times()
#Display reward and computation time graphs
display_rewards(reward_ts, model_ts)
display_computational_times(step_times['overall'],
avg_step_times['overall'])
if (experiment_args.save_gif):
clip = ImageSequenceClip(frames, fps=int(1 / world.dt))
clip.speedx(0.5).write_gif(f"{exp_name}.gif", program="ffmpeg")
#return np.mean(reward_ts), avg_step_times['overall'][-1], model_usage
return reward_ts
def execute_many_experiments(exp_name, world, time_steps, experiment_args,
ms_car_index = 0):
| else:
et = (experiment_args.num_run - i) * run_time / (i - 1)
print(f"Running Experiment {i + 1}/{experiment_args.num_run}, Expected Time Left: {et:.0f}s ", end = "\r")
if (i >= 1):
start_time = time.time()
#mean_rew, mean_ct, model_usage
reward_ts = execute_experiment(exp_name, world, time_steps, experiment_args,
seed = seed, ms_car_index = ms_car_index)
if (i >= 1):
run_time += time.time() - start_time
ms_car = world.cars[ms_car_index]
states = [[list(car_s.numpy()) for car_s in all_car_s] for all_car_s in world.past_states]
actions = [list(c) for c in ms_car.control_log]
models = ms_car.planner.models_used
heur_comp_log = ms_car.planner.heuristic_computation_log
if (len(heur_comp_log) < time_steps):
heur_comp_log += [""] * (time_steps - len(heur_comp_log))
planning_times = ms_car.planner.get_comp_times()['planning']
decision_times = ms_car.planner.get_comp_times()['decision']
lambd_log += list(zip(states, actions, reward_ts, models, heur_comp_log, planning_times, decision_times))
assert(len(lambd_log) == time_steps * (i + 1))
run_log[lambd] = lambd_log
print(f"Finished Running {experiment_args.num_run} Experiments!" + " " * 30)
complete_experiment_log= {'name': exp_name, 'time_steps': time_steps,
'num_run': experiment_args.num_run,
'models': ms_car.planner.model_ladder,
'log_indices': ['state', 'action', 'reward',
'model', 'heur_comp_log',
'planning_time','decision_time'],
'runs': run_log}
return complete_experiment_log
def display_rewards(reward_ts, model_ts):
'''
Displays reward for each time step gained by the car
in a plot. Color codes by model used and presents
an appropriate legend.
'''
plt.title("Reward by Model")
start_time = 0
cur_model = model_ts[0]
used_models = [cur_model]
for t, model in enumerate(model_ts):
if (model != cur_model):
plt.plot(range(start_time, t + 1),
reward_ts[start_time:t + 1],
color = COLORS[MODELS.index(cur_model)])
start_time = t
cur_model = model
if (model not in used_models):
used_models.append(model)
plt.plot(range(start_time, len(model_ts)),
reward_ts[start_time:],
color = COLORS[MODELS.index(cur_model)])
patch = lambda i: mpatches.Patch(color=COLORS[MODELS.index(used_models[i])],
label=used_models[i])
plt.legend(handles=[patch(i) for i in range(len(used_models))])
plt.xlabel("Time Step Number")
plt.ylabel("Reward")
plt.show()
def display_computational_times(sing_ct_ts, avg_ct_ts):
'''
Displays computation time for each time step
of planning done by the car. Also plots for
every value of t, the average of the first
t computation times.
'''
plt.title("Computational Time")
plt.plot(sing_ct_ts, color = 'r')
plt.plot(avg_ct_ts, color = 'b')
plt.legend(["Single Step", "Average"])
plt.xlabel("Time Step Number")
plt.ylabel("Computational Time")
plt.show()
def default_experiment_params():
'''
Returns a dictionary containing default paramters
for an experiment.
Convention is Car 0 is the robot car, Car 1 is
the human car. Any further cars are either fixed
velocity or we don't bother about.
An experiment may modify the returned dictionary
for any custom parameter settings.
'''
exp_params = dict()
exp_params["Naive"] = {"horizon": 5, "n_iter": 20}
exp_params["Turn"] = {"horizon": 5, "n_iter": 20}
exp_params["Tom"] = {"horizon": 5, "n_iter": 20}
for m in MODELS:
exp_params[m]["h_index"] = 1
return exp_params
def default_visualizer_args(EXP_NAME):
return {'name': EXP_NAME, 'display_model': True,
'display_y': True, 'follow_main_car': True}
def switch_model_pstr(model_name, experiment_params):
# Returns a string describing the model parameters
model_params = experiment_params[model_name]
return f"({model_params['horizon']},{model_params['n_iter']})"
def exp_str(experiment_params, experiment_args):
'''
Return the string capturing the experiment's planning mechanisms
based on the experiment parameters and experiment type.
'''
EXP_STRS = [None] * 11
pstrs = {m: switch_model_pstr(m, experiment_params) for m in MODELS}
# Single Model Experiments
EXP_STRS[0] = f"Naive{pstrs['Naive']}"
EXP_STRS[1] = f"Tu{pstrs['Turn']}"
EXP_STRS[2] = f"Tom{pstrs['Tom']}"
# Model Switching Experiments
EXP_STRS[3] = f"switch({EXP_STRS[0]}, {EXP_STRS[1]})"
EXP_STRS[4] = f"switch({EXP_STRS[1]}, {EXP_STRS[2]})"
EXP_STRS[4] = f"switch({EXP_STRS[0]}, | switching_parameters = {"comp_times": {"Naive": experiment_args.naive_ct,
"Turn": experiment_args.turn_ct,
"Tom": experiment_args.tom_ct},
"cooldowns": {"up": experiment_args.up_cd,
"down": experiment_args.down_cd},
"trust_radius": experiment_args.tr}
world.cars[ms_car_index].initialize_planner()
world.cars[ms_car_index].planner.update_switching_parameteters(switching_parameters)
run_log = dict()
for lambd in experiment_args.lambdas:
print()
print(f"Using Lambda: {lambd}")
world.cars[ms_car_index].planner.update_switching_parameteters({"lambda": float(lambd)})
lambd_log = []
run_time = 0
for i, seed in enumerate(experiment_args.seeds):
if (run_time == 0):
print(f"Running Experiment {i + 1}/{experiment_args.num_run}", end = "\r") | identifier_body |
sectioning.js | cookieStore.get('username'));
//$('#loginModal').modal('hide');
}
$scope.techTable=[];
$scope.tech={};
$http.get(TECHNICIAN_URL_BASE)
.success(function(data) {
//alert("success loading technicians")
$scope.techTable=data;
console.log($scope.techTable);
})
.error(function(data) {
// alert("Errors in loading technicians");
});
$scope.getAssetDetails=function () {
//var tempString = $scope.npNumberValue + ";";
//var test = tempString.substring(0,tempString.indexOf(";"));
//$scope.npNumberValue=test;
//var test= test1.replace("/","|").replace(" ","|");
//var test= $scope.npNumberValue.replace("/","|").replace(" ","|");
//console.log("port 8081");
console.log("NP Number " + $scope.npNumberValue);
console.log(STATION_URL_BASE+"/scan?assetId="+$scope.npNumberValue+"&stationId=4");
var assetId = $scope.npNumberValue;
$http.get(STATION_URL_BASE+"/scan?assetId="+encodeURIComponent(assetId)+"&stationId=4",{headers: {'Content-Type': 'application/json','Content-Type':'text/html'}})
.then(function successCallback(response) {
$scope.asset=response.data;
console.log($scope.asset);
$scope.populateTable();
$scope.getPatientDetails($scope.asset.reqId);
$scope.step1=false;
},function errorCallBack(err) {
if(err.status>=400 && err.status<=500)
alert("Invalid Scan. Please Scan Again");
/*else
alert("Check your Internet Connection");*/
})
}
$scope.getPatientDetails=function(request_id) {
var root = $scope.npNumberValue.substring(0, $scope.npNumberValue.indexOf(":"));
console.log($scope.npNumberValue);
console.log(root);
$http.get(REQUEST_URL_BASE+"/patientdetails?samplerequestid="+request_id)
.success(function (data) {
$scope.patient=data;
});
}
$scope.alreadyExists=function() {
/* if($scope.asset.nextState!=2)
alert("This is not tissue belonging to Sectioning Station. Press Done to Refresh Page");
*/}
$scope.number=1;
$scope.save=function(){
/*for(var i=1;i<=$scope.asset.quantity;i++)
{
if(flag==true){
$scope.checkNpNumber();
flag=false;
}
else
$scope.create();
}*/
console.log("Inside save");
$scope.generateSlides();
console.log("after create");
$scope.asset.quantity=1;
//$scope.populateTable();
}
$scope.generateSlides=function()
{
console.log($scope.npNumberValue+"hghg");
var url=ASSET_URL_BASE+"/addslide?technician="+$scope.username+"&number="+$scope.number+"&stationId=4"+"&quantity="+$scope.asset.quantity;
console.log(url);
$http.post(url,$scope.asset).success(function(data) {
console.log("successs");
$scope.populateTable();
})
/*$scope.populateTable();*/
}
$scope.assetTable=[];
$scope.populateTable=function ()
{
var url = ASSET_URL_BASE+"/getassets?npBase="+$scope.npNumberValue;
console.log("entered populate " + url);
$scope.assetTable=[];
var test= $scope.npNumberValue;//.replace("/","|").replace(" ","|");
$http.get(url)
.success(function (data) {
for(var asset in data)
{
// TODO : Changed this
//if(data[asset].npNumber.match(/^X?[0-9]*[/][0-9]{2}[:][0][0]/) || data[asset].npNumber.match(/^X?[0-9]*[/][0-9]{2}[:]\w+$/)){
//console.log($scope.assetTable[asset].npNumber.substring($scope.assetTable[asset].npNumber.indexOf(":")+1,$scope.assetTable[asset].npNumber.indexOf(":")+3));
$scope.assetTable.push(data[asset]);
//else
console.log(data[asset]);
}
console.log("data & length: "+data,$scope.assetTable.length);
if($scope.assetTable.length==0)
$scope.generateBlocks();
});
}
$scope.edit = {};
$scope.edit=function (editasset)
{
console.log(editasset);
//$('#editModal').modal('show');
$scope.edit.npNumber = editasset.assetId;
$scope.edit.fixative = editasset.fixative;
$scope.edit.biopsy = editasset.biopsy;
$scope.edit.assetType = editasset.assetType;
}
$scope.update=function(){
var biopsy = $scope.edit.biopsy;
var fixative = $scope.edit.fixative;
var assetId = $scope.edit.npNumber.id.value;
console.log("III "+ $scope.edit.npNumber);
var url = ASSET_URL_BASE + "/updateasset" + "?assetId=" + encodeURIComponent(assetId) + "&biopsy=" + biopsy + "&fixative=" + fixative ;
console.log("URLupdate " + url);
$http.put(url ,{headers: {'Content-Type': 'application/json'}})
.then(function successCallback(response) {
$('#editModal').modal('hide');
$scope.populateTable();
//window.location.reload();
}, function errorCallback(response) {
});
}
$scope.printAsset= function (printNp) {
console.log("printing...");
var array = [
{
Np_Number : printNp//.replace("|","/").replace("|"," ").replace("|"," ").replace("|"," ")
}];
console.log(array);
$scope.queue=$scope.queue.concat(array);
//alasql('SELECT * INTO CSV("BlockNpNumber.csv",{headers:true}) FROM ?',[array]);
};
$scope.printAllAssets= function () {
console.log("printing...All");
var array =[];
array=alasql('SELECT npNumber FROM ?',[$scope.assetTable])
var array2=[];
for(var i=0;i<array.length;i++)
{
array2[i]={Np_Number : array[i].npNumber};
}
// console.log(array);
console.log(array2);
alasql('SELECT * INTO CSV("BlockNpNumber.csv",{headers:true}) FROM ?',[array2]);
};
$scope.queue=[];
$scope.addQueue=function(){
if($scope.assetTable.length==0)
alert("There is no bottles scaned")
else
{
console.log("adding to queue");
var array =[];
array=alasql('SELECT npNumber FROM ?',[$scope.assetTable])
var array2=[];
for(var i=0;i<array.length;i++)
{
array2[i]={Np_Number : array[i].npNumber};
}
// console.log(array);
console.log(array2);
$scope.queue=$scope.queue.concat(array2);
console.log($scope.queue);
$scope.clearAll();
}
}
$scope.printQueue=function(){
alasql('SELECT * INTO CSV("BlockNpNumber.csv",{headers:true}) FROM ?',[$scope.queue]);
}
$scope.clearAll= function(){
$scope.task = false;
$scope.npNumberValue='';
$scope.asset='';
$scope.assetData = {};
$scope.patient={};
$scope.step1=true;
$scope.number=1;
$scope.assetTable=[];
}
$scope.delete = function(asset){
var deleteUser = $window.confirm('Are you absolutely sure you want to delete?');
if(deleteUser){
if(asset.currentState==2) // TODO
{
var assetId = asset.assetId.id.value; //.replace("/#/g", "-");
var url = ASSET_URL_BASE + "/deleteasset" + "?assetId=" + encodeURIComponent(assetId);
console.log("URL delete" + url);
$http.delete(url,{headers: {'Content-Type': 'application/json'}})
.then(function successCallback(response) {
$scope.populateTable();
console.log("deleted...");
}, function errorCallback(response) {
});
}
else
alert("The Block has already passed through Embedding. You are not allowed to delete it");
}
}
$scope.scanScreen=function () {
$scope.task = false;
$scope.pendingTissue=false;
$scope.completedTissue=false;
$scope.scanTissue=true;
}
$scope.scanTissue=true;
$scope.assetTasksTable=[];
$scope.getPendingTasks=function ()
{
$scope.scanTissue=false;
$scope.pendingTissue=true;
$scope.completedTissue=false;
$scope.label="Pending Assets";
$scope.assetTasksTable=[];
$scope.task=true;
var url=STATION_URL_BASE+"/pendingassets?stationId=4";
console.log(url); | $http.get(url)
.success(function (data) { | random_line_split |
|
Object_detection_image.py | is copied from Google's example at
## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
## and some is copied from Dat Tran's example at
## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py
## but I changed it to make it Suitable for our application
# Import packages
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
import pandas as pd
from os.path import join
#color detection 1 |
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Import utilites(utils folder)
from utils import label_map_util
from utils import visualization_utils as vis_util
#CGFC_functions folder
from CGFC_functions import colorDetector as color_Detector
from CGFC_functions import category_Dic
from CGFC_functions import CGFCConfig
# Name of the directory containing the object detection module we're using
MODEL_NAME = 'inference_graph'
# Grab path to current working directory
CWD_PATH = os.getcwd()
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt')
# Path to image
#PATH_TO_IMAGE = os.path.join(CWD_PATH,IMAGE_NAME)
# Number of classes the object detector can identify
NUM_CLASSES = 24
# Load the label map.
# Label maps map indices to category names, so that when our convolution
# network predicts `5`, we know that this corresponds to `king`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Load image using OpenCV and
# expand image dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
# crop coth items
def cropDetectedCloths(image,bbox):
#Crop image by bbox
ymin = bbox[0]
xmin = bbox[1]
ymax = bbox[2]
xmax = bbox[3]
(im_height,im_width,im_color) = image.shape
(xminn, xmaxx, yminn, ymaxx) = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height)
bboxWidth=xmaxx-xminn
bboxHeight=ymaxx-yminn
crop_img = image[int(yminn+(bboxHeight*1/10)):int(ymaxx-(bboxHeight*1/10)), int(xminn+(bboxWidth*1/10)):int(xmaxx-(bboxWidth*1/10))]
#cv2.imshow("cropped", crop_img)
return crop_img
#use to detect cloths in image
def Detect_Cloths(image):
# image = cv2.imread(PATH_TO_IMAGE)
image_expanded = np.expand_dims(image, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_expanded})
output = pd.DataFrame(
{'image' : [image],
'boxes' : [boxes],
'classes' : [classes],
'scores' : [scores] })
#print(output['scores'][0])
return output
#call colors dominent color detector function in CGFC_Functions\colorDetector.js
def colorRecognition(image,bbox):
#color-Recognition 1
dominet_colors=color_Detector.dominant_color_detector(crop_img,3)
#Cloth detection whole process start from here
def ClothDetectionAnalyse(image,tagData,gender):
min_score_thresh=CGFCConfig.min_score_thresh
detectedData=Detect_Cloths(image)
boxes=detectedData['boxes'][0]
scores=detectedData['scores'][0]
classes=detectedData['classes'][0]
print("###################################################################################")
bestResults=[]
bestBBox=[]
bestScores=[]
bestClasses=[]
UpperOrLower=[]
normBBoxes=np.squeeze(boxes)
normScores=np.squeeze(scores)
normClasses=np.squeeze(classes)
isLowerBodyClothAdded=False
isUpperBodyClothAdded=False
for index,className in enumerate(normClasses):
className=category_index[className]['name']
#if score>=min_score_thresh:
#gender based filter
if((gender=='Male') & (className not in category_Dic.Female_Cloths)&(className not in category_Dic.Attributes)):
if((className in category_Dic.UpperBody) & (isUpperBodyClothAdded==False)):
UpperOrLower.append("Upperbody")
bestResults.append(index)
bestBBox.append(normBBoxes[index])
bestScores.append(normScores[index])
bestClasses.append(normClasses[index])
print("isUpper male:",className)
isUpperBodyClothAdded=True;
elif((className in category_Dic.LowerBody) & (isLowerBodyClothAdded==False)):
UpperOrLower.append("LowerBody")
bestResults.append(index)
bestBBox.append(normBBoxes[index])
bestScores.append(normScores[index])
bestClasses.append(normClasses[index])
isLowerBodyClothAdded=True;
print("isLower male:",className)
if((isLowerBodyClothAdded==True) & (isUpperBodyClothAdded==True)):
break
elif((gender=='Female') & (className not in category_Dic.Attributes)):
if((className in category_Dic.UpperBody) & (isUpperBodyClothAdded==False)):
UpperOrLower.append("Upperbody")
bestResults.append(index)
bestBBox.append(normBBoxes[index])
bestScores.append(normScores[index])
bestClasses.append(normClasses[index])
print("isUpper Female :",className)
isUpperBodyClothAdded=True;
elif((className in category_Dic.LowerBody) & (isLowerBodyClothAdded==False)):
UpperOrLower.append("LowerBody")
bestResults.append(index)
bestBBox.append(normBBoxes[index])
bestScores.append(normScores[index])
bestClasses.append(normClasses[index])
isLowerBodyClothAdded=True;
print("isLower Female:",className)
if((isLowerBodyClothAdded==True) & (isUpperBodyClothAdded==True)):
break
className=category_index[normClasses[index]]['name']
for index,score in enumerate(normScores):
if ((score>=min_score_thresh) &(className in category_Dic.Attributes)):
bestResults.append(index)
bestBBox.append(normBBoxes[index])
bestScores.append(score)
bestClasses.append(normClasses[index])
crop_image_Data = pd.DataFrame()
for index,bbox in enumerate(bestBBox):
crop_img=cropDetectedCloths(image,bbox)
dominet_colors=color_Detector.dominant_color_detector(crop_img,3)
colors=[]
colorMax=dominet_colors[0]
#print("dominet_colors : ",dominet_colors)
for color in dominet_colors:
#get Only one value
if(color[1]>colorMax[1]):
colorMax=color
className=category_index[bestClasses[index]]['name']
clothType=None
clothStyle=None
if (className in category_Dic.Attributes):
clothType=className
clothStyle=None
else:
clothType,clothStyle=className.split("_")
print("Final color : ", | #import__color recognition
from sklearn.cluster import KMeans
from sklearn import metrics
| random_line_split |
Object_detection_image.py | copied from Google's example at
## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
## and some is copied from Dat Tran's example at
## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py
## but I changed it to make it Suitable for our application
# Import packages
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
import pandas as pd
from os.path import join
#color detection 1
#import__color recognition
from sklearn.cluster import KMeans
from sklearn import metrics
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Import utilites(utils folder)
from utils import label_map_util
from utils import visualization_utils as vis_util
#CGFC_functions folder
from CGFC_functions import colorDetector as color_Detector
from CGFC_functions import category_Dic
from CGFC_functions import CGFCConfig
# Name of the directory containing the object detection module we're using
MODEL_NAME = 'inference_graph'
# Grab path to current working directory
CWD_PATH = os.getcwd()
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt')
# Path to image
#PATH_TO_IMAGE = os.path.join(CWD_PATH,IMAGE_NAME)
# Number of classes the object detector can identify
NUM_CLASSES = 24
# Load the label map.
# Label maps map indices to category names, so that when our convolution
# network predicts `5`, we know that this corresponds to `king`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Load image using OpenCV and
# expand image dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
# crop coth items
def cropDetectedCloths(image,bbox):
#Crop image by bbox
ymin = bbox[0]
xmin = bbox[1]
ymax = bbox[2]
xmax = bbox[3]
(im_height,im_width,im_color) = image.shape
(xminn, xmaxx, yminn, ymaxx) = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height)
bboxWidth=xmaxx-xminn
bboxHeight=ymaxx-yminn
crop_img = image[int(yminn+(bboxHeight*1/10)):int(ymaxx-(bboxHeight*1/10)), int(xminn+(bboxWidth*1/10)):int(xmaxx-(bboxWidth*1/10))]
#cv2.imshow("cropped", crop_img)
return crop_img
#use to detect cloths in image
def Detect_Cloths(image):
# image = cv2.imread(PATH_TO_IMAGE)
image_expanded = np.expand_dims(image, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_expanded})
output = pd.DataFrame(
{'image' : [image],
'boxes' : [boxes],
'classes' : [classes],
'scores' : [scores] })
#print(output['scores'][0])
return output
#call colors dominent color detector function in CGFC_Functions\colorDetector.js
def colorRecognition(image,bbox):
#color-Recognition 1
|
#Cloth detection whole process start from here
def ClothDetectionAnalyse(image,tagData,gender):
min_score_thresh=CGFCConfig.min_score_thresh
detectedData=Detect_Cloths(image)
boxes=detectedData['boxes'][0]
scores=detectedData['scores'][0]
classes=detectedData['classes'][0]
print("###################################################################################")
bestResults=[]
bestBBox=[]
bestScores=[]
bestClasses=[]
UpperOrLower=[]
normBBoxes=np.squeeze(boxes)
normScores=np.squeeze(scores)
normClasses=np.squeeze(classes)
isLowerBodyClothAdded=False
isUpperBodyClothAdded=False
for index,className in enumerate(normClasses):
className=category_index[className]['name']
#if score>=min_score_thresh:
#gender based filter
if((gender=='Male') & (className not in category_Dic.Female_Cloths)&(className not in category_Dic.Attributes)):
if((className in category_Dic.UpperBody) & (isUpperBodyClothAdded==False)):
UpperOrLower.append("Upperbody")
bestResults.append(index)
bestBBox.append(normBBoxes[index])
bestScores.append(normScores[index])
bestClasses.append(normClasses[index])
print("isUpper male:",className)
isUpperBodyClothAdded=True;
elif((className in category_Dic.LowerBody) & (isLowerBodyClothAdded==False)):
UpperOrLower.append("LowerBody")
bestResults.append(index)
bestBBox.append(normBBoxes[index])
bestScores.append(normScores[index])
bestClasses.append(normClasses[index])
isLowerBodyClothAdded=True;
print("isLower male:",className)
if((isLowerBodyClothAdded==True) & (isUpperBodyClothAdded==True)):
break
elif((gender=='Female') & (className not in category_Dic.Attributes)):
if((className in category_Dic.UpperBody) & (isUpperBodyClothAdded==False)):
UpperOrLower.append("Upperbody")
bestResults.append(index)
bestBBox.append(normBBoxes[index])
bestScores.append(normScores[index])
bestClasses.append(normClasses[index])
print("isUpper Female :",className)
isUpperBodyClothAdded=True;
elif((className in category_Dic.LowerBody) & (isLowerBodyClothAdded==False)):
UpperOrLower.append("LowerBody")
bestResults.append(index)
bestBBox.append(normBBoxes[index])
bestScores.append(normScores[index])
bestClasses.append(normClasses[index])
isLowerBodyClothAdded=True;
print("isLower Female:",className)
if((isLowerBodyClothAdded==True) & (isUpperBodyClothAdded==True)):
break
className=category_index[normClasses[index]]['name']
for index,score in enumerate(normScores):
if ((score>=min_score_thresh) &(className in category_Dic.Attributes)):
bestResults.append(index)
bestBBox.append(normBBoxes[index])
bestScores.append(score)
bestClasses.append(normClasses[index])
crop_image_Data = pd.DataFrame()
for index,bbox in enumerate(bestBBox):
crop_img=cropDetectedCloths(image,bbox)
dominet_colors=color_Detector.dominant_color_detector(crop_img,3)
colors=[]
colorMax=dominet_colors[0]
#print("dominet_colors : ",dominet_colors)
for color in dominet_colors:
#get Only one value
if(color[1]>colorMax[1]):
colorMax=color
className=category_index[bestClasses[index]]['name']
clothType=None
clothStyle=None
if (className in category_Dic.Attributes):
clothType=className
clothStyle=None
else:
clothType,clothStyle=className.split("_")
print("Final color : ", | dominet_colors=color_Detector.dominant_color_detector(crop_img,3) | identifier_body |
Object_detection_image.py | copied from Google's example at
## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
## and some is copied from Dat Tran's example at
## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py
## but I changed it to make it Suitable for our application
# Import packages
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
import pandas as pd
from os.path import join
#color detection 1
#import__color recognition
from sklearn.cluster import KMeans
from sklearn import metrics
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Import utilites(utils folder)
from utils import label_map_util
from utils import visualization_utils as vis_util
#CGFC_functions folder
from CGFC_functions import colorDetector as color_Detector
from CGFC_functions import category_Dic
from CGFC_functions import CGFCConfig
# Name of the directory containing the object detection module we're using
MODEL_NAME = 'inference_graph'
# Grab path to current working directory
CWD_PATH = os.getcwd()
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt')
# Path to image
#PATH_TO_IMAGE = os.path.join(CWD_PATH,IMAGE_NAME)
# Number of classes the object detector can identify
NUM_CLASSES = 24
# Load the label map.
# Label maps map indices to category names, so that when our convolution
# network predicts `5`, we know that this corresponds to `king`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Load image using OpenCV and
# expand image dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
# crop coth items
def cropDetectedCloths(image,bbox):
#Crop image by bbox
ymin = bbox[0]
xmin = bbox[1]
ymax = bbox[2]
xmax = bbox[3]
(im_height,im_width,im_color) = image.shape
(xminn, xmaxx, yminn, ymaxx) = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height)
bboxWidth=xmaxx-xminn
bboxHeight=ymaxx-yminn
crop_img = image[int(yminn+(bboxHeight*1/10)):int(ymaxx-(bboxHeight*1/10)), int(xminn+(bboxWidth*1/10)):int(xmaxx-(bboxWidth*1/10))]
#cv2.imshow("cropped", crop_img)
return crop_img
#use to detect cloths in image
def Detect_Cloths(image):
# image = cv2.imread(PATH_TO_IMAGE)
image_expanded = np.expand_dims(image, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_expanded})
output = pd.DataFrame(
{'image' : [image],
'boxes' : [boxes],
'classes' : [classes],
'scores' : [scores] })
#print(output['scores'][0])
return output
#call colors dominent color detector function in CGFC_Functions\colorDetector.js
def colorRecognition(image,bbox):
#color-Recognition 1
dominet_colors=color_Detector.dominant_color_detector(crop_img,3)
#Cloth detection whole process start from here
def | (image,tagData,gender):
min_score_thresh=CGFCConfig.min_score_thresh
detectedData=Detect_Cloths(image)
boxes=detectedData['boxes'][0]
scores=detectedData['scores'][0]
classes=detectedData['classes'][0]
print("###################################################################################")
bestResults=[]
bestBBox=[]
bestScores=[]
bestClasses=[]
UpperOrLower=[]
normBBoxes=np.squeeze(boxes)
normScores=np.squeeze(scores)
normClasses=np.squeeze(classes)
isLowerBodyClothAdded=False
isUpperBodyClothAdded=False
for index,className in enumerate(normClasses):
className=category_index[className]['name']
#if score>=min_score_thresh:
#gender based filter
if((gender=='Male') & (className not in category_Dic.Female_Cloths)&(className not in category_Dic.Attributes)):
if((className in category_Dic.UpperBody) & (isUpperBodyClothAdded==False)):
UpperOrLower.append("Upperbody")
bestResults.append(index)
bestBBox.append(normBBoxes[index])
bestScores.append(normScores[index])
bestClasses.append(normClasses[index])
print("isUpper male:",className)
isUpperBodyClothAdded=True;
elif((className in category_Dic.LowerBody) & (isLowerBodyClothAdded==False)):
UpperOrLower.append("LowerBody")
bestResults.append(index)
bestBBox.append(normBBoxes[index])
bestScores.append(normScores[index])
bestClasses.append(normClasses[index])
isLowerBodyClothAdded=True;
print("isLower male:",className)
if((isLowerBodyClothAdded==True) & (isUpperBodyClothAdded==True)):
break
elif((gender=='Female') & (className not in category_Dic.Attributes)):
if((className in category_Dic.UpperBody) & (isUpperBodyClothAdded==False)):
UpperOrLower.append("Upperbody")
bestResults.append(index)
bestBBox.append(normBBoxes[index])
bestScores.append(normScores[index])
bestClasses.append(normClasses[index])
print("isUpper Female :",className)
isUpperBodyClothAdded=True;
elif((className in category_Dic.LowerBody) & (isLowerBodyClothAdded==False)):
UpperOrLower.append("LowerBody")
bestResults.append(index)
bestBBox.append(normBBoxes[index])
bestScores.append(normScores[index])
bestClasses.append(normClasses[index])
isLowerBodyClothAdded=True;
print("isLower Female:",className)
if((isLowerBodyClothAdded==True) & (isUpperBodyClothAdded==True)):
break
className=category_index[normClasses[index]]['name']
for index,score in enumerate(normScores):
if ((score>=min_score_thresh) &(className in category_Dic.Attributes)):
bestResults.append(index)
bestBBox.append(normBBoxes[index])
bestScores.append(score)
bestClasses.append(normClasses[index])
crop_image_Data = pd.DataFrame()
for index,bbox in enumerate(bestBBox):
crop_img=cropDetectedCloths(image,bbox)
dominet_colors=color_Detector.dominant_color_detector(crop_img,3)
colors=[]
colorMax=dominet_colors[0]
#print("dominet_colors : ",dominet_colors)
for color in dominet_colors:
#get Only one value
if(color[1]>colorMax[1]):
colorMax=color
className=category_index[bestClasses[index]]['name']
clothType=None
clothStyle=None
if (className in category_Dic.Attributes):
clothType=className
clothStyle=None
else:
clothType,clothStyle=className.split("_")
print("Final color : | ClothDetectionAnalyse | identifier_name |
Object_detection_image.py | copied from Google's example at
## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
## and some is copied from Dat Tran's example at
## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py
## but I changed it to make it Suitable for our application
# Import packages
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
import pandas as pd
from os.path import join
#color detection 1
#import__color recognition
from sklearn.cluster import KMeans
from sklearn import metrics
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Import utilites(utils folder)
from utils import label_map_util
from utils import visualization_utils as vis_util
#CGFC_functions folder
from CGFC_functions import colorDetector as color_Detector
from CGFC_functions import category_Dic
from CGFC_functions import CGFCConfig
# Name of the directory containing the object detection module we're using
MODEL_NAME = 'inference_graph'
# Grab path to current working directory
CWD_PATH = os.getcwd()
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt')
# Path to image
#PATH_TO_IMAGE = os.path.join(CWD_PATH,IMAGE_NAME)
# Number of classes the object detector can identify
NUM_CLASSES = 24
# Load the label map.
# Label maps map indices to category names, so that when our convolution
# network predicts `5`, we know that this corresponds to `king`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Load image using OpenCV and
# expand image dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
# crop coth items
def cropDetectedCloths(image,bbox):
#Crop image by bbox
ymin = bbox[0]
xmin = bbox[1]
ymax = bbox[2]
xmax = bbox[3]
(im_height,im_width,im_color) = image.shape
(xminn, xmaxx, yminn, ymaxx) = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height)
bboxWidth=xmaxx-xminn
bboxHeight=ymaxx-yminn
crop_img = image[int(yminn+(bboxHeight*1/10)):int(ymaxx-(bboxHeight*1/10)), int(xminn+(bboxWidth*1/10)):int(xmaxx-(bboxWidth*1/10))]
#cv2.imshow("cropped", crop_img)
return crop_img
#use to detect cloths in image
def Detect_Cloths(image):
# image = cv2.imread(PATH_TO_IMAGE)
image_expanded = np.expand_dims(image, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_expanded})
output = pd.DataFrame(
{'image' : [image],
'boxes' : [boxes],
'classes' : [classes],
'scores' : [scores] })
#print(output['scores'][0])
return output
#call colors dominent color detector function in CGFC_Functions\colorDetector.js
def colorRecognition(image,bbox):
#color-Recognition 1
dominet_colors=color_Detector.dominant_color_detector(crop_img,3)
#Cloth detection whole process start from here
def ClothDetectionAnalyse(image,tagData,gender):
min_score_thresh=CGFCConfig.min_score_thresh
detectedData=Detect_Cloths(image)
boxes=detectedData['boxes'][0]
scores=detectedData['scores'][0]
classes=detectedData['classes'][0]
print("###################################################################################")
bestResults=[]
bestBBox=[]
bestScores=[]
bestClasses=[]
UpperOrLower=[]
normBBoxes=np.squeeze(boxes)
normScores=np.squeeze(scores)
normClasses=np.squeeze(classes)
isLowerBodyClothAdded=False
isUpperBodyClothAdded=False
for index,className in enumerate(normClasses):
className=category_index[className]['name']
#if score>=min_score_thresh:
#gender based filter
if((gender=='Male') & (className not in category_Dic.Female_Cloths)&(className not in category_Dic.Attributes)):
if((className in category_Dic.UpperBody) & (isUpperBodyClothAdded==False)):
UpperOrLower.append("Upperbody")
bestResults.append(index)
bestBBox.append(normBBoxes[index])
bestScores.append(normScores[index])
bestClasses.append(normClasses[index])
print("isUpper male:",className)
isUpperBodyClothAdded=True;
elif((className in category_Dic.LowerBody) & (isLowerBodyClothAdded==False)):
UpperOrLower.append("LowerBody")
bestResults.append(index)
bestBBox.append(normBBoxes[index])
bestScores.append(normScores[index])
bestClasses.append(normClasses[index])
isLowerBodyClothAdded=True;
print("isLower male:",className)
if((isLowerBodyClothAdded==True) & (isUpperBodyClothAdded==True)):
break
elif((gender=='Female') & (className not in category_Dic.Attributes)):
if((className in category_Dic.UpperBody) & (isUpperBodyClothAdded==False)):
UpperOrLower.append("Upperbody")
bestResults.append(index)
bestBBox.append(normBBoxes[index])
bestScores.append(normScores[index])
bestClasses.append(normClasses[index])
print("isUpper Female :",className)
isUpperBodyClothAdded=True;
elif((className in category_Dic.LowerBody) & (isLowerBodyClothAdded==False)):
UpperOrLower.append("LowerBody")
bestResults.append(index)
bestBBox.append(normBBoxes[index])
bestScores.append(normScores[index])
bestClasses.append(normClasses[index])
isLowerBodyClothAdded=True;
print("isLower Female:",className)
if((isLowerBodyClothAdded==True) & (isUpperBodyClothAdded==True)):
break
className=category_index[normClasses[index]]['name']
for index,score in enumerate(normScores):
|
crop_image_Data = pd.DataFrame()
for index,bbox in enumerate(bestBBox):
crop_img=cropDetectedCloths(image,bbox)
dominet_colors=color_Detector.dominant_color_detector(crop_img,3)
colors=[]
colorMax=dominet_colors[0]
#print("dominet_colors : ",dominet_colors)
for color in dominet_colors:
#get Only one value
if(color[1]>colorMax[1]):
colorMax=color
className=category_index[bestClasses[index]]['name']
clothType=None
clothStyle=None
if (className in category_Dic.Attributes):
clothType=className
clothStyle=None
else:
clothType,clothStyle=className.split("_")
print("Final color : ", | if ((score>=min_score_thresh) &(className in category_Dic.Attributes)):
bestResults.append(index)
bestBBox.append(normBBoxes[index])
bestScores.append(score)
bestClasses.append(normClasses[index]) | conditional_block |
all8a54.js | -unique-id-'+initIterator;
$t.addClass('swiper-'+index + ' initialized').attr('id', index);
$t.find('.pagination').addClass('pagination-'+index);
var autoPlayVar = parseInt($t.attr('data-autoplay'),10);
var slidesPerViewVar = $t.attr('data-slides-per-view');
if(slidesPerViewVar == 'responsive'){
slidesPerViewVar = updateSlidesPerView($t);
}
else slidesPerViewVar = parseInt(slidesPerViewVar,10);
var directionVar = $t.attr('data-direction');
if(!directionVar){ directionVar='horizontal'; }
var loopVar = parseInt($t.attr('data-loop'),10);
var speedVar = parseInt($t.attr('data-speed'),10);
var centerVar = parseInt($t.attr('data-center'),10);
var mousewheelControl = parseInt($t.attr('data-mousewheel-control'),10);
if(!mousewheelControl){ mousewheelControl = 0;}
swipers['swiper-'+index] = new Swiper('.swiper-'+index,{
speed: speedVar,
pagination: '.pagination-'+index,
loop: loopVar,
mode: directionVar,
paginationClickable: true,
autoplay: autoPlayVar,
slidesPerView: slidesPerViewVar,
keyboardControl: true,
calculateHeight: true,
simulateTouch: true,
roundLengths: true,
mousewheelControl: mousewheelControl,
centeredSlides: centerVar,
onInit: function(swiper){
var activeIndex = (loopVar===true)?swiper.activeIndex:swiper.activeLoopIndex;
if($t.closest('.swiper-6').length) {
$('.prev-item').on('click', function(){
var eqIndex = $(this).closest('.wpb_wrapper').find('.prev-item').index(this);
$('.prev-item').removeClass('active');
$(this).addClass('active');
swiper.swipeTo(eqIndex);
swiper.stopAutoplay();
return false;
});
}
},
onSlideChangeStart: function(swiper) {
var activeIndex = (loopVar===true)?swiper.activeIndex:swiper.activeLoopIndex;
$('.count span i').text(activeIndex+1);
if($t.closest('.swiper-two-bg').length){
$t.closest('.wpb_wrapper').find('.bg-wrapp .clip.active').removeClass('active');
$t.closest('.wpb_wrapper').find('.bg-wrapp .clip').eq(activeIndex).addClass('active');
}
if($t.closest('.anime-slide').length){
$t.find('.swiper-slide.active').removeClass('active');
}
},
onSlideChangeEnd: function(swiper){
var activeIndex = (loopVar===true)?swiper.activeIndex:swiper.activeLoopIndex;
if($t.closest('.swiper-6').length){
var eqIndex = $('.prev-item').index(this);
$('.prev-item').removeClass('active');
$t.closest('.wpb_wrapper').find('.prev-item').eq(activeIndex).addClass('active');
}
if($t.closest('.anime-slide').length){
var qVal = $t.find('.swiper-slide-active').attr('data-val');
$t.find('.swiper-slide[data-val="'+qVal+'"]').addClass('active');
}
},
/*new start*/
onFirstInit: function(swiper){
centering();
}
/* new end*/
});
swipers['swiper-'+index].reInit();
if($t.attr('data-slides-per-view')=='responsive'){
var paginationSpan = $t.find('.pagination span');
var paginationSlice = paginationSpan.hide().slice(0,(paginationSpan.length+1-slidesPerViewVar));
if(paginationSlice.length<=1 || slidesPerViewVar>=$t.find('.swiper-slide').length) $t.addClass('pagination-hidden');
else $t.removeClass('pagination-hidden');
paginationSlice.show();
}
initIterator++;
});
$('.swiper-container.connected-to-bottom-swiper').each(function(){
var $t = $(this);
if($t.closest('.testi-wrapper').find('.connected-to-top-swiper').length){
swipers['swiper-'+$t.attr('id')].addCallback('SlideChangeStart', function(swiper){
swipers['swiper-'+$t.closest('.testi-wrapper').find('.connected-to-top-swiper').attr('id')].swipeTo(swiper.activeIndex);
});
}
});
}
$('.swiper-arrow-left').on('click', function(){
swipers['swiper-'+$(this).closest('.arrows').find('.swiper-container').attr('id')].swipePrev();
});
$('.swiper-arrow-right').on('click', function(){
swipers['swiper-'+$(this).closest('.arrows').find('.swiper-container').attr('id')].swipeNext();
});
/*============================*/
/* DROPDOWN */
/*============================*/
$('.nav-menu-icon a').on('click', function() {
if ($('nav').hasClass('slide-menu')){
$('nav').removeClass('slide-menu');
$(this).removeClass('active');
$('body').css({'overflow':'auto'});
}else {
$('nav').addClass('slide-menu');
$(this).addClass('active');
$('body').css({'overflow':'hidden'});
}
return false;
});
$('nav > ul > li').on('click', function(){
if ($(this).find('.dropmenu').hasClass('slidemenu')) {
$(this).find('.dropmenu').removeClass('slidemenu');
}else{
$('nav > ul > li').find('.dropmenu').removeClass('slidemenu');
$(this).find('.dropmenu').addClass('slidemenu');
}
});
/***********************************/
/*VIDEO POPUP*/
/**********************************/
$(document).on('click', '.video-open', function(){
$('.video-player').addClass('active');
var videoSource = $(this).attr('data-src');
$('.video-player iframe').attr('src', videoSource);
$('body').css({'overflow':'hidden'});
});
$('.video-player .close-iframe').on('click', function(){
$('.video-player iframe').attr('src', '');
setTimeout(function(){$('.video-player').removeClass('active');}, 300);
$('body').css({'overflow':'auto'});
});
/*============================*/
/* WINDOW LOAD */
/*============================*/
function IsJsonString(str) {
try {
JSON.parse(str);
} catch (e) {
return false;
}
return true;
}
function get_content(data_query,callback){
$.ajax({
url: data_query.ajax_url,
success: function(data){
if (IsJsonString(data)) {
data = jQuery.parseJSON(data);
data.post_url = data_query.post_url;
} else {
var data_r = {};
data_r.status = 'ok';
data_r.type = 'html';
data_r.content = data;
data = data_r;
}
callback(data);
},
error: function(error){
$('#pop_up').find('.popup').html('<div class="team-desc"><div class="title"><h1>NO CONNECTION</h1></div></div>');
console.log(error);
$('.preload').fadeOut();
$.fancybox( '#pop_up' );
}
});
}
function | (data){
if (data.status == 'ok') {
var popup_cont = '';
if (data.type == 'ajax') {
if (data.thumbnail) popup_cont += data.thumbnail;
popup_cont += '<div class="team-desc">';
popup_cont += ' <div class="title">';
popup_cont += ' <h4>' + data.time + '</h4>';
popup_cont += ' <h2>' + data.title + '</h2>';
popup_cont += data.content;
if(data.comments) popup_cont += data.comments;
popup_cont += ' </div>';
popup_cont += '</div>';
} else {
popup_cont = data.content;
}
$('#pop_up').find('.popup .content').html(popup_cont);
history.pushState(null, null, data.post_url);
$.fancybox( '#pop_up' , {
afterLoad: function () {
if ( window.the_ID) {
initSwiper();
}
},
afterClose: function(){
history.back();
$("body").css("overflow","auto");
},
beforeShow: function(){
var slides = $('.fancybox-placeholder').closest('.swiper-wrapper').find('.swiper-slide'),
count_slide = slides.length,
current_post_id = $('#pop_up').attr('data-post-id'),
first_slide_id = slides.first().attr('data-post-id'),
last_slide_id = slides.last().attr('data-post-id');
$('.blog_arrow').show();
if (count_slide <= 1) {
$('.blog_arrow').hide();
};
if (current_post_id == first_slide_id) $('.blog_arrow-prev').hide();
if (current_post_id == last_slide_id) $('.blog_arrow-next'). | render_content | identifier_name |
all8a54.js |
if ($('.home-slider.anime-slide').length) {
$('.home-slider.anime-slide').closest('.vc_row').addClass('nrg-prod-row-full-height');
};
if ($('.home-slider.arrow-center').length) {
$('.home-slider.arrow-center').closest('.vc_row').addClass('nrg-prod-row-full-height');
};
pageCalculations();
function updateSlidesPerView(swiperContainer){
if(winW>=addPoint) return parseInt(swiperContainer.attr('data-add-slides'),10);
else if(winW>=lgPoint) return parseInt(swiperContainer.attr('data-lg-slides'),10);
else if(winW>=mdPoint) return parseInt(swiperContainer.attr('data-md-slides'),10);
else if(winW>=smPoint) return parseInt(swiperContainer.attr('data-sm-slides'),10);
else return parseInt(swiperContainer.attr('data-xs-slides'),10);
}
function resizeCall(){
pageCalculations();
$('.swiper-container.initialized[data-slides-per-view="responsive"]').each(function(){
var thisSwiper = swipers['swiper-'+$(this).attr('id')], $t = $(this), slidesPerViewVar = updateSlidesPerView($t), centerVar = thisSwiper.params.centeredSlides;
thisSwiper.params.slidesPerView = slidesPerViewVar;
thisSwiper.reInit();
if(!centerVar){
var paginationSpan = $t.find('.pagination span');
var paginationSlice = paginationSpan.hide().slice(0,(paginationSpan.length+1-slidesPerViewVar));
if(paginationSlice.length<=1 || slidesPerViewVar>=$t.find('.swiper-slide').length) $t.addClass('pagination-hidden');
else $t.removeClass('pagination-hidden');
paginationSlice.show();
}
});
}
if(!_ismobile){
$(window).resize(function(){
resizeCall();
});
} else{
window.addEventListener("orientationchange", function() {
resizeCall();
}, false);
}
if ($('.video-iframe').length) {
$('.video-iframe').html('<iframe class="box-size" src="#"></iframe>')
};
/*=====================*/
/* 07 - swiper sliders */
/*=====================*/
function initSwiper(){
var initIterator = 0;
$('.swiper-container').each(function(){
var $t = $(this);
var index = 'swiper-unique-id-'+initIterator;
$t.addClass('swiper-'+index + ' initialized').attr('id', index);
$t.find('.pagination').addClass('pagination-'+index);
var autoPlayVar = parseInt($t.attr('data-autoplay'),10);
var slidesPerViewVar = $t.attr('data-slides-per-view');
if(slidesPerViewVar == 'responsive'){
slidesPerViewVar = updateSlidesPerView($t);
}
else slidesPerViewVar = parseInt(slidesPerViewVar,10);
var directionVar = $t.attr('data-direction');
if(!directionVar){ directionVar='horizontal'; }
var loopVar = parseInt($t.attr('data-loop'),10);
var speedVar = parseInt($t.attr('data-speed'),10);
var centerVar = parseInt($t.attr('data-center'),10);
var mousewheelControl = parseInt($t.attr('data-mousewheel-control'),10);
if(!mousewheelControl){ mousewheelControl = 0;}
swipers['swiper-'+index] = new Swiper('.swiper-'+index,{
speed: speedVar,
pagination: '.pagination-'+index,
loop: loopVar,
mode: directionVar,
paginationClickable: true,
autoplay: autoPlayVar,
slidesPerView: slidesPerViewVar,
keyboardControl: true,
calculateHeight: true,
simulateTouch: true,
roundLengths: true,
mousewheelControl: mousewheelControl,
centeredSlides: centerVar,
onInit: function(swiper){
var activeIndex = (loopVar===true)?swiper.activeIndex:swiper.activeLoopIndex;
if($t.closest('.swiper-6').length) {
$('.prev-item').on('click', function(){
var eqIndex = $(this).closest('.wpb_wrapper').find('.prev-item').index(this);
$('.prev-item').removeClass('active');
$(this).addClass('active');
swiper.swipeTo(eqIndex);
swiper.stopAutoplay();
return false;
});
}
},
onSlideChangeStart: function(swiper) {
var activeIndex = (loopVar===true)?swiper.activeIndex:swiper.activeLoopIndex;
$('.count span i').text(activeIndex+1);
if($t.closest('.swiper-two-bg').length){
$t.closest('.wpb_wrapper').find('.bg-wrapp .clip.active').removeClass('active');
$t.closest('.wpb_wrapper').find('.bg-wrapp .clip').eq(activeIndex).addClass('active');
}
if($t.closest('.anime-slide').length){
$t.find('.swiper-slide.active').removeClass('active');
}
},
onSlideChangeEnd: function(swiper){
var activeIndex = (loopVar===true)?swiper.activeIndex:swiper.activeLoopIndex;
if($t.closest('.swiper-6').length){
var eqIndex = $('.prev-item').index(this);
$('.prev-item').removeClass('active');
$t.closest('.wpb_wrapper').find('.prev-item').eq(activeIndex).addClass('active');
}
if($t.closest('.anime-slide').length){
var qVal = $t.find('.swiper-slide-active').attr('data-val');
$t.find('.swiper-slide[data-val="'+qVal+'"]').addClass('active');
}
},
/*new start*/
onFirstInit: function(swiper){
centering();
}
/* new end*/
});
swipers['swiper-'+index].reInit();
if($t.attr('data-slides-per-view')=='responsive'){
var paginationSpan = $t.find('.pagination span');
var paginationSlice = paginationSpan.hide().slice(0,(paginationSpan.length+1-slidesPerViewVar));
if(paginationSlice.length<=1 || slidesPerViewVar>=$t.find('.swiper-slide').length) $t.addClass('pagination-hidden');
else $t.removeClass('pagination-hidden');
paginationSlice.show();
}
initIterator++;
});
$('.swiper-container.connected-to-bottom-swiper').each(function(){
var $t = $(this);
if($t.closest('.testi-wrapper').find('.connected-to-top-swiper').length){
swipers['swiper-'+$t.attr('id')].addCallback('SlideChangeStart', function(swiper){
swipers['swiper-'+$t.closest('.testi-wrapper').find('.connected-to-top-swiper').attr('id')].swipeTo(swiper.activeIndex);
});
}
});
}
$('.swiper-arrow-left').on('click', function(){
swipers['swiper-'+$(this).closest('.arrows').find('.swiper-container').attr('id')].swipePrev();
});
$('.swiper-arrow-right').on('click', function(){
swipers['swiper-'+$(this).closest('.arrows').find('.swiper-container').attr('id')].swipeNext();
});
/*============================*/
/* DROPDOWN */
/*============================*/
$('.nav-menu-icon a').on('click', function() {
if ($('nav').hasClass('slide-menu')){
$('nav').removeClass('slide-menu');
$(this).removeClass('active');
$('body').css({'overflow':'auto'});
}else {
$('nav').addClass('slide-menu');
$(this).addClass('active');
$('body').css({'overflow':'hidden'});
}
return false;
});
$('nav > ul > li').on('click', function(){
if ($(this).find('.dropmenu').hasClass('slidemenu')) {
$(this).find('.dropmenu').removeClass('slidemenu');
}else{
$('nav > ul > li').find('.dropmenu').removeClass('slidemenu');
$(this).find('.dropmenu').addClass('slidemenu');
}
});
/***********************************/
/*VIDEO POPUP*/
/**********************************/
$(document).on('click', '.video-open', function(){
$('.video-player').addClass('active');
var videoSource = $(this).attr('data-src');
$('.video-player iframe').attr('src', videoSource);
$('body').css({'overflow':'hidden'});
});
$('.video-player .close-iframe').on('click', function(){
$('.video-player iframe').attr('src', '');
setTimeout(function(){$('.video-player').removeClass('active');}, 300);
$('body').css({'overflow':'auto'});
});
/*============================*/
/* WINDOW LOAD */
/*============================*/
function IsJsonString(str) {
try {
JSON.parse(str);
} catch (e) {
return false;
}
return true;
}
function get_content(data_query,callback){
$.ajax({
url: data_query.ajax_url,
success | {
winW = $(window).width();
winH = $(window).height();
} | identifier_body |
|
all8a54.js | _content(data_query,callback){
$.ajax({
url: data_query.ajax_url,
success: function(data){
if (IsJsonString(data)) {
data = jQuery.parseJSON(data);
data.post_url = data_query.post_url;
} else {
var data_r = {};
data_r.status = 'ok';
data_r.type = 'html';
data_r.content = data;
data = data_r;
}
callback(data);
},
error: function(error){
$('#pop_up').find('.popup').html('<div class="team-desc"><div class="title"><h1>NO CONNECTION</h1></div></div>');
console.log(error);
$('.preload').fadeOut();
$.fancybox( '#pop_up' );
}
});
}
function render_content(data){
if (data.status == 'ok') {
var popup_cont = '';
if (data.type == 'ajax') {
if (data.thumbnail) popup_cont += data.thumbnail;
popup_cont += '<div class="team-desc">';
popup_cont += ' <div class="title">';
popup_cont += ' <h4>' + data.time + '</h4>';
popup_cont += ' <h2>' + data.title + '</h2>';
popup_cont += data.content;
if(data.comments) popup_cont += data.comments;
popup_cont += ' </div>';
popup_cont += '</div>';
} else {
popup_cont = data.content;
}
$('#pop_up').find('.popup .content').html(popup_cont);
history.pushState(null, null, data.post_url);
$.fancybox( '#pop_up' , {
afterLoad: function () {
if ( window.the_ID) {
initSwiper();
}
},
afterClose: function(){
history.back();
$("body").css("overflow","auto");
},
beforeShow: function(){
var slides = $('.fancybox-placeholder').closest('.swiper-wrapper').find('.swiper-slide'),
count_slide = slides.length,
current_post_id = $('#pop_up').attr('data-post-id'),
first_slide_id = slides.first().attr('data-post-id'),
last_slide_id = slides.last().attr('data-post-id');
$('.blog_arrow').show();
if (count_slide <= 1) {
$('.blog_arrow').hide();
};
if (current_post_id == first_slide_id) $('.blog_arrow-prev').hide();
if (current_post_id == last_slide_id) $('.blog_arrow-next').hide();
},
afterShow: function(){
$("body").css("overflow","hidden");
$('.preload').fadeOut();
},
helpers: {
title : { type : 'inside' },
overlay: {
locked: false
}
}
} );
} else {
$('#pop_up').find('.popup').html('<div class="team-desc"><div class="title"><h1>'+data.error+'</h1></div></div>');
$('.preload').fadeOut();
$.fancybox( '#pop_up');
}
}
if ($(".fancybox").length){
// open popup. use fancybox
$(document).on('click','.fancybox', function(){
$.fancybox.close();
if (this.href.indexOf("#team") != '-1') {
$.fancybox( '#'+this.hash , {
afterLoad: function () {
initSwiper();
},
helpers: {
title: { type : 'inside' }
}
});
return false;
};
var data_query= {};
data_query.post_url = this.href;
data_query.ajax_url = $(this).attr('data-ajax-url');
var active_post_id = $(this).closest('.swiper-slide').attr('data-post-id');
$('#pop_up').attr('data-post-id', active_post_id );
if (!$('.preload').is(':visible')) {
$('.preload').css('background-color', 'rgba(26, 26, 26, 0.8)').fadeIn();
}
get_content(data_query,
// callback
function(data){
render_content(data);
}
);
return false;
});
$(document).on('click',".blog_arrow",function(){
var current_slide_id = $('#pop_up').attr('data-post-id'),
current_slide = $('.swiper-slide[data-post-id='+current_slide_id+']');
var second_slide = ( $(this).hasClass('blog_arrow-prev') ) ? current_slide.prev('.swiper-slide') : current_slide.next('.swiper-slide');
if (second_slide.length) {
second_slide.find('a.fancybox').trigger('click');
$(this).show();
} else {
$(this).hide();
}
});
}
$(window).on('load', function(){
// load popup content for single
if (window.the_ID) {
if ($('.swiper-slide[data-post-id='+the_ID()+']').length) {
$('.swiper-slide[data-post-id='+the_ID()+']').find('a.fancybox').trigger('click');
if (!$('.preload').is(':visible')) {
$('.preload').fadeIn();
}
} else {
$.fancybox.close();
if (this.href.indexOf("#team") != '-1') {
$.fancybox( '#'+this.hash , {
afterLoad: function () {
initSwiper();
},
helpers: {
title: { type : 'inside' }
}
});
return false;
};
var data_query= {};
data_query.post_url = this.href;
data_query.ajax_url = $(this).attr('data-ajax-url');
var active_post_id = $(this).closest('.swiper-slide').attr('data-post-id');
$('#pop_up').attr('data-post-id', active_post_id );
//$(this).addClass('active-fancy-slide');
if (!$('.preload').is(':visible')) {
$('.preload').css('background-color', 'rgba(26, 26, 26, 0.8)').fadeIn();
}
get_content(data_query,
// callback
function(data){
render_content(data);
}
);
}
} else {
if ($('body').hasClass('single')) {
};
initSwiper();
$('.preload').fadeOut();
}
});
//Sidebar
$('.show-sidebar').click(function(){
var sidebar = $('#tertiary');
var button = $('.show-out');
sidebar.toggleClass('open');
button.toggleClass('open');
button.find('i').toggleClass('fa-angle-left').toggleClass('fa-angle-right');
if (sidebar.hasClass('open')){
sidebar.animate({'right': 0});
button.animate({'right': '320px'});
} else {
sidebar.animate({'right': '-320px'});
button.animate({'right': 0});
}
});
$('.menu-item-has-children .item_arrow, a.menu-item-has-children[href="#"]').on('touchstart click', function(){
$(this).closest('li').find(' > .sub-menu').toggleClass('active');
$(this).toggleClass('fa-plus fa-minus');
return false;
});
if ($('.clip.active').length) {
if (winW > 991) {
$('.hide-content > .clip > .bg.bg-bg-chrome.act').hide();
} else {
$('.hide-content > .clip > .bg.bg-bg-chrome.act').show();
}
}
function centering() {
var body = $('body');
if ( body.hasClass('single') ) return;
if ( body.hasClass('blog') ) return;
if ( body.hasClass('search') ) return;
$('.home-slider').each(function(index, el) {
var $el = $(el),
_half_height = $el.outerHeight()/2,
_half_width = ($el.outerWidth()/2);
$el.css({
'margin-top': - _half_height,
'margin-left': - _half_width
});
});
}
$(window).resize(function() {
/* Act on the event */
centering();
});
$(window).load(function() {
/* Act on the event */
centering();
});
//AJAX
if (window.load_more_post !== undefined) | {
var pageNum = parseInt(load_more_post.startPage) + 1;
// The maximum number of pages the current query can return.
var max = parseInt(load_more_post.maxPages);
// The link of the next page of posts.
var nextLink = load_more_post.nextLink;
$('.load-more').on('click', function () {
if(pageNum <= max) {
// Show that we're working.
$('.icon-load',this).addClass('load');
$('<div>').load(nextLink + ' .news-slider',
function() {
$('.home-slider.fullheight .swiper-wrapper').append($(this).find('.swiper-wrapper').html()); | conditional_block |
|
all8a54.js | if(slidesPerViewVar == 'responsive'){
slidesPerViewVar = updateSlidesPerView($t);
}
else slidesPerViewVar = parseInt(slidesPerViewVar,10);
var directionVar = $t.attr('data-direction');
if(!directionVar){ directionVar='horizontal'; }
var loopVar = parseInt($t.attr('data-loop'),10);
var speedVar = parseInt($t.attr('data-speed'),10);
var centerVar = parseInt($t.attr('data-center'),10);
var mousewheelControl = parseInt($t.attr('data-mousewheel-control'),10);
if(!mousewheelControl){ mousewheelControl = 0;}
swipers['swiper-'+index] = new Swiper('.swiper-'+index,{
speed: speedVar,
pagination: '.pagination-'+index,
loop: loopVar,
mode: directionVar,
paginationClickable: true,
autoplay: autoPlayVar,
slidesPerView: slidesPerViewVar,
keyboardControl: true,
calculateHeight: true,
simulateTouch: true,
roundLengths: true,
mousewheelControl: mousewheelControl,
centeredSlides: centerVar,
onInit: function(swiper){
var activeIndex = (loopVar===true)?swiper.activeIndex:swiper.activeLoopIndex;
if($t.closest('.swiper-6').length) {
$('.prev-item').on('click', function(){
var eqIndex = $(this).closest('.wpb_wrapper').find('.prev-item').index(this);
$('.prev-item').removeClass('active');
$(this).addClass('active');
swiper.swipeTo(eqIndex);
swiper.stopAutoplay();
return false;
});
}
},
onSlideChangeStart: function(swiper) {
var activeIndex = (loopVar===true)?swiper.activeIndex:swiper.activeLoopIndex;
$('.count span i').text(activeIndex+1);
if($t.closest('.swiper-two-bg').length){
$t.closest('.wpb_wrapper').find('.bg-wrapp .clip.active').removeClass('active');
$t.closest('.wpb_wrapper').find('.bg-wrapp .clip').eq(activeIndex).addClass('active');
}
if($t.closest('.anime-slide').length){
$t.find('.swiper-slide.active').removeClass('active');
}
},
onSlideChangeEnd: function(swiper){
var activeIndex = (loopVar===true)?swiper.activeIndex:swiper.activeLoopIndex;
if($t.closest('.swiper-6').length){
var eqIndex = $('.prev-item').index(this);
$('.prev-item').removeClass('active');
$t.closest('.wpb_wrapper').find('.prev-item').eq(activeIndex).addClass('active');
}
if($t.closest('.anime-slide').length){
var qVal = $t.find('.swiper-slide-active').attr('data-val');
$t.find('.swiper-slide[data-val="'+qVal+'"]').addClass('active');
}
},
/*new start*/
onFirstInit: function(swiper){
centering();
}
/* new end*/
});
swipers['swiper-'+index].reInit();
if($t.attr('data-slides-per-view')=='responsive'){
var paginationSpan = $t.find('.pagination span');
var paginationSlice = paginationSpan.hide().slice(0,(paginationSpan.length+1-slidesPerViewVar));
if(paginationSlice.length<=1 || slidesPerViewVar>=$t.find('.swiper-slide').length) $t.addClass('pagination-hidden');
else $t.removeClass('pagination-hidden');
paginationSlice.show();
}
initIterator++;
});
$('.swiper-container.connected-to-bottom-swiper').each(function(){
var $t = $(this);
if($t.closest('.testi-wrapper').find('.connected-to-top-swiper').length){
swipers['swiper-'+$t.attr('id')].addCallback('SlideChangeStart', function(swiper){
swipers['swiper-'+$t.closest('.testi-wrapper').find('.connected-to-top-swiper').attr('id')].swipeTo(swiper.activeIndex);
});
}
});
}
$('.swiper-arrow-left').on('click', function(){
swipers['swiper-'+$(this).closest('.arrows').find('.swiper-container').attr('id')].swipePrev();
});
$('.swiper-arrow-right').on('click', function(){
swipers['swiper-'+$(this).closest('.arrows').find('.swiper-container').attr('id')].swipeNext();
});
/*============================*/
/* DROPDOWN */
/*============================*/
$('.nav-menu-icon a').on('click', function() {
if ($('nav').hasClass('slide-menu')){
$('nav').removeClass('slide-menu');
$(this).removeClass('active');
$('body').css({'overflow':'auto'});
}else {
$('nav').addClass('slide-menu');
$(this).addClass('active');
$('body').css({'overflow':'hidden'});
}
return false;
});
$('nav > ul > li').on('click', function(){
if ($(this).find('.dropmenu').hasClass('slidemenu')) {
$(this).find('.dropmenu').removeClass('slidemenu');
}else{
$('nav > ul > li').find('.dropmenu').removeClass('slidemenu');
$(this).find('.dropmenu').addClass('slidemenu');
}
});
/***********************************/
/*VIDEO POPUP*/
/**********************************/
$(document).on('click', '.video-open', function(){
$('.video-player').addClass('active');
var videoSource = $(this).attr('data-src');
$('.video-player iframe').attr('src', videoSource);
$('body').css({'overflow':'hidden'});
});
$('.video-player .close-iframe').on('click', function(){
$('.video-player iframe').attr('src', '');
setTimeout(function(){$('.video-player').removeClass('active');}, 300);
$('body').css({'overflow':'auto'});
});
/*============================*/
/* WINDOW LOAD */
/*============================*/
function IsJsonString(str) {
try {
JSON.parse(str);
} catch (e) {
return false;
}
return true;
}
function get_content(data_query,callback){
$.ajax({
url: data_query.ajax_url,
success: function(data){
if (IsJsonString(data)) {
data = jQuery.parseJSON(data);
data.post_url = data_query.post_url;
} else {
var data_r = {};
data_r.status = 'ok';
data_r.type = 'html';
data_r.content = data;
data = data_r;
}
callback(data);
},
error: function(error){
$('#pop_up').find('.popup').html('<div class="team-desc"><div class="title"><h1>NO CONNECTION</h1></div></div>');
console.log(error);
$('.preload').fadeOut();
$.fancybox( '#pop_up' );
}
});
}
function render_content(data){
if (data.status == 'ok') {
var popup_cont = '';
if (data.type == 'ajax') {
if (data.thumbnail) popup_cont += data.thumbnail;
popup_cont += '<div class="team-desc">';
popup_cont += ' <div class="title">';
popup_cont += ' <h4>' + data.time + '</h4>';
popup_cont += ' <h2>' + data.title + '</h2>';
popup_cont += data.content;
if(data.comments) popup_cont += data.comments;
popup_cont += ' </div>';
popup_cont += '</div>';
} else {
popup_cont = data.content;
}
$('#pop_up').find('.popup .content').html(popup_cont);
history.pushState(null, null, data.post_url);
$.fancybox( '#pop_up' , {
afterLoad: function () {
if ( window.the_ID) {
initSwiper();
}
},
afterClose: function(){
history.back();
$("body").css("overflow","auto");
},
beforeShow: function(){
var slides = $('.fancybox-placeholder').closest('.swiper-wrapper').find('.swiper-slide'),
count_slide = slides.length,
current_post_id = $('#pop_up').attr('data-post-id'),
first_slide_id = slides.first().attr('data-post-id'),
last_slide_id = slides.last().attr('data-post-id');
$('.blog_arrow').show();
if (count_slide <= 1) {
$('.blog_arrow').hide();
};
if (current_post_id == first_slide_id) $('.blog_arrow-prev').hide();
if (current_post_id == last_slide_id) $('.blog_arrow-next').hide();
},
afterShow: function(){
$("body").css("overflow","hidden");
$('.preload').fadeOut();
},
helpers: {
title : { type : 'inside' },
overlay: {
locked: false
} | }
} );
} else { | random_line_split |
|
test_dbinterface.py | %s' % __name__)
def tearDownModule():
"""Tear down module after all TestCases are run."""
pass
# logPoint('module %s' % __name__)
class TestDBInterface(unittest.TestCase):
PORT = 29101
HOST = 'localhost'
EXP_ID = 'TEST_EXP_ID'
DATABASE_NAME = 'TFUTILS_TESTDB'
COLLECTION_NAME = 'TFUTILS_TESTCOL'
CACHE_DIR = 'TFUTILS_TEST_CACHE_DIR'
@classmethod
def setUpClass(cls):
"""Set up class once before any test methods are run."""
cls.setup_log()
cls.setup_conn()
cls.setup_cache()
cls.setup_params()
@classmethod
def tearDownClass(cls):
"""Tear down class after all test methods have run."""
cls.remove_directory(cls.CACHE_DIR)
cls.remove_database(cls.DATABASE_NAME)
# Close primary MongoDB connection.
cls.conn.close()
def setUp(self):
"""Set up class before _each_ test method is executed.
Creates a tensorflow session and instantiates a dbinterface.
"""
self.setup_model()
self.sess = tf.Session(
config=tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True),
log_device_placement=self.params['log_device_placement'],
))
# TODO: Determine whether this should be called here or
# in dbinterface.initialize()
self.sess.run(tf.global_variables_initializer())
self.dbinterface = DBInterface(
sess=self.sess,
params=self.params,
cache_dir=self.CACHE_DIR,
save_params=self.save_params,
load_params=self.load_params)
self.step = 0
def tearDown(self):
"""Tear Down is called after _each_ test method is executed."""
self.sess.close()
@unittest.skip("skipping")
def test_init(self):
# TODO: Test all permutations of __init__ params.
pass
@unittest.skip("skipping")
def test_load_rec(self):
pass
@unittest.skip("skipping")
def test_initialize(self):
pass
def test_get_restore_vars(self):
# First, train model and save a checkpoint
self.train_model() # weights_name='Weights'
saved_path = self.save_test_checkpoint()
# Create a new model with different variable names.
self.setup_model(weights_name='Filters')
# Reset var_list in DBInterface
self.dbinterface.var_list = {
var.op.name: var for var in tf.global_variables()}
# Restore first checkpoint vars.
mapping = {'Weights': 'Filters'}
self.dbinterface.load_param_dict = mapping
restore_vars = self.dbinterface.get_restore_vars(saved_path)
self.log.info('restore_vars:')
for name, var in restore_vars.items():
if name in mapping.keys():
|
def test_filter_var_list(self):
var_list = {var.op.name: var for var in tf.global_variables()}
# Test None
self.dbinterface.to_restore = None
filtered_var_list = self.dbinterface.filter_var_list(var_list)
self.assertEqual(filtered_var_list, var_list)
# Test list of strings
self.dbinterface.to_restore = ['Weights']
filtered_var_list = self.dbinterface.filter_var_list(var_list)
for name, var in filtered_var_list.items():
self.assertIn(name, ['Weights'])
self.assertNotIn(name, ['Bias', 'global_step'])
# Test regex
self.dbinterface.to_restore = re.compile(r'Bias')
filtered_var_list = self.dbinterface.filter_var_list(var_list)
for name, var in filtered_var_list.items():
self.assertIn(name, ['Bias'])
self.assertNotIn(name, ['Weights', 'global_step'])
# Test invalid type (should raise TypeError)
self.dbinterface.to_restore = {'invalid_key': 'invalid_value'}
with self.assertRaises(TypeError):
filtered_var_list = self.dbinterface.filter_var_list(var_list)
@unittest.skip("skipping")
def test_tf_saver(self):
pass
@unittest.skip("skipping")
def test_load_from_db(self):
pass
@unittest.skip("skipping")
def test_save(self):
self.dbinterface.initialize()
self.dbinterface.start_time_step = time.time()
train_res = self.train_model(num_steps=100)
self.dbinterface.save(train_res=train_res, step=self.step)
@unittest.skip("skipping")
def test_sync_with_host(self):
pass
@unittest.skip("skipping")
def test_save_thread(self):
pass
@unittest.skip("skipping")
def test_initialize_from_ckpt(self):
save_path = self.save_test_checkpoint()
self.load_test_checkpoint(save_path)
def train_model(self, num_steps=100):
x_train = [1, 2, 3, 4]
y_train = [0, -1, -2, -3]
x = tf.get_default_graph().get_tensor_by_name('x:0')
y = tf.get_default_graph().get_tensor_by_name('y:0')
feed_dict = {x: x_train, y: y_train}
pre_global_step = self.sess.run(self.global_step)
for step in range(num_steps):
train_res = self.sess.run(self.train_targets, feed_dict=feed_dict)
self.log.info('Step: {}, loss: {}'.format(step, train_res['loss']))
post_global_step = self.sess.run(self.global_step)
self.assertEqual(pre_global_step + num_steps, post_global_step)
self.step += num_steps
return train_res
def save_test_checkpoint(self):
self.log.info('Saving checkpoint to {}'.format(self.save_path))
saved_checkpoint_path = self.dbinterface.tf_saver.save(self.sess,
save_path=self.save_path,
global_step=self.global_step,
write_meta_graph=False)
self.log.info('Checkpoint saved to {}'.format(saved_checkpoint_path))
return saved_checkpoint_path
def load_test_checkpoint(self, save_path):
reader = tf.train.NewCheckpointReader(save_path)
saved_shapes = reader.get_variable_to_shape_map()
self.log.info('Saved Vars:\n' + str(saved_shapes.keys()))
for name in saved_shapes.keys():
self.log.info(
'Name: {}, Tensor: {}'.format(name, reader.get_tensor(name)))
def setup_model(self, weights_name='Weights', bias_name='Bias'):
"""Set up simple tensorflow model."""
tf.reset_default_graph()
self.global_step = tf.get_variable(
'global_step', [],
dtype=tf.int64, trainable=False,
initializer=tf.constant_initializer(0))
# Model parameters and placeholders.
x = tf.placeholder(tf.float32, name='x')
y = tf.placeholder(tf.float32, name='y')
W = tf.get_variable(weights_name, [1], dtype=tf.float32)
b = tf.get_variable(bias_name, [1], dtype=tf.float32)
# Model output, loss and optimizer.
linear_model = W * x + b
loss = tf.reduce_sum(tf.square(linear_model - y))
optimizer_base = tf.train.GradientDescentOptimizer(0.01)
# Model train op.
optimizer = optimizer_base.minimize(
loss, global_step=self.global_step)
# Train targets.
self.train_targets = {'loss': loss,
'optimizer': optimizer}
@classmethod
def setup_log(cls):
cls.log = logging.getLogger(':'.join([__name__, cls.__name__]))
cls.log.setLevel('DEBUG')
@classmethod
def setup_conn(cls):
cls.conn = pymongo.MongoClient(host=cls.HOST, port=cls.PORT)
@classmethod
def setup_cache(cls):
cls.cache_dir = os.path.join(cls.CACHE_DIR,
'%s:%d' % (cls.HOST, cls.PORT),
cls.DATABASE_NAME,
cls.COLLECTION_NAME,
cls.EXP_ID)
cls.makedirs(cls.cache_dir)
cls.save_path = os.path.join(cls.cache_dir, 'checkpoint')
@classmethod
def setup_params(cls):
cls.model_params = {'func': model.mnist_tfutils_new,
'devices': ['/gpu:0', '/gpu:1'],
'prefix': 'model_0'}
cls.save_params = {
'host': cls.HOST,
'port': cls.PORT,
'dbname': cls.DATABASE_NAME,
'collname': cls.COLLECTION_NAME,
'exp_id': cls.EXP_ID,
'save_valid_freq': 20,
'save_filters_freq': 200,
'cache_filters_freq': 100}
cls.train_params = {
'data_params': {'func': data.build_data,
'batch_size': 100,
'group': 'train',
'directory': TFUTILS_HOME},
'num_steps': 500}
cls.loss_params = {
'targets': ['labels'],
'agg_func': tf.reduce_mean,
'loss_per_case_func': tf.nn.sparse_softmax_cross_entropy_with_logits}
cls.load_params = {'do_restore': True}
cls.optimizer_params = {'func': optimizer.ClipOptimizer,
'optimizer_class': tf.train.MomentumOptimizer,
'clip': True,
'optimizer_kwargs':{'momentum': 0.9}}
cls.learning_rate_params = {'learning_rate': 0.05,
'decay_steps': | self.log.info('(name, var.name): ({}, {})'.format(name, var.name))
self.assertEqual(var.op.name, mapping[name]) | conditional_block |
test_dbinterface.py | %s' % __name__)
def tearDownModule():
"""Tear down module after all TestCases are run."""
pass
# logPoint('module %s' % __name__)
class TestDBInterface(unittest.TestCase):
PORT = 29101
HOST = 'localhost'
EXP_ID = 'TEST_EXP_ID'
DATABASE_NAME = 'TFUTILS_TESTDB'
COLLECTION_NAME = 'TFUTILS_TESTCOL'
CACHE_DIR = 'TFUTILS_TEST_CACHE_DIR'
@classmethod
def setUpClass(cls):
"""Set up class once before any test methods are run."""
cls.setup_log()
cls.setup_conn()
cls.setup_cache()
cls.setup_params()
@classmethod
def tearDownClass(cls):
"""Tear down class after all test methods have run."""
cls.remove_directory(cls.CACHE_DIR)
cls.remove_database(cls.DATABASE_NAME)
# Close primary MongoDB connection.
cls.conn.close()
def setUp(self):
"""Set up class before _each_ test method is executed. | """
self.setup_model()
self.sess = tf.Session(
config=tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True),
log_device_placement=self.params['log_device_placement'],
))
# TODO: Determine whether this should be called here or
# in dbinterface.initialize()
self.sess.run(tf.global_variables_initializer())
self.dbinterface = DBInterface(
sess=self.sess,
params=self.params,
cache_dir=self.CACHE_DIR,
save_params=self.save_params,
load_params=self.load_params)
self.step = 0
def tearDown(self):
"""Tear Down is called after _each_ test method is executed."""
self.sess.close()
@unittest.skip("skipping")
def test_init(self):
# TODO: Test all permutations of __init__ params.
pass
@unittest.skip("skipping")
def test_load_rec(self):
pass
@unittest.skip("skipping")
def test_initialize(self):
pass
def test_get_restore_vars(self):
# First, train model and save a checkpoint
self.train_model() # weights_name='Weights'
saved_path = self.save_test_checkpoint()
# Create a new model with different variable names.
self.setup_model(weights_name='Filters')
# Reset var_list in DBInterface
self.dbinterface.var_list = {
var.op.name: var for var in tf.global_variables()}
# Restore first checkpoint vars.
mapping = {'Weights': 'Filters'}
self.dbinterface.load_param_dict = mapping
restore_vars = self.dbinterface.get_restore_vars(saved_path)
self.log.info('restore_vars:')
for name, var in restore_vars.items():
if name in mapping.keys():
self.log.info('(name, var.name): ({}, {})'.format(name, var.name))
self.assertEqual(var.op.name, mapping[name])
def test_filter_var_list(self):
var_list = {var.op.name: var for var in tf.global_variables()}
# Test None
self.dbinterface.to_restore = None
filtered_var_list = self.dbinterface.filter_var_list(var_list)
self.assertEqual(filtered_var_list, var_list)
# Test list of strings
self.dbinterface.to_restore = ['Weights']
filtered_var_list = self.dbinterface.filter_var_list(var_list)
for name, var in filtered_var_list.items():
self.assertIn(name, ['Weights'])
self.assertNotIn(name, ['Bias', 'global_step'])
# Test regex
self.dbinterface.to_restore = re.compile(r'Bias')
filtered_var_list = self.dbinterface.filter_var_list(var_list)
for name, var in filtered_var_list.items():
self.assertIn(name, ['Bias'])
self.assertNotIn(name, ['Weights', 'global_step'])
# Test invalid type (should raise TypeError)
self.dbinterface.to_restore = {'invalid_key': 'invalid_value'}
with self.assertRaises(TypeError):
filtered_var_list = self.dbinterface.filter_var_list(var_list)
@unittest.skip("skipping")
def test_tf_saver(self):
pass
@unittest.skip("skipping")
def test_load_from_db(self):
pass
@unittest.skip("skipping")
def test_save(self):
self.dbinterface.initialize()
self.dbinterface.start_time_step = time.time()
train_res = self.train_model(num_steps=100)
self.dbinterface.save(train_res=train_res, step=self.step)
@unittest.skip("skipping")
def test_sync_with_host(self):
pass
@unittest.skip("skipping")
def test_save_thread(self):
pass
@unittest.skip("skipping")
def test_initialize_from_ckpt(self):
save_path = self.save_test_checkpoint()
self.load_test_checkpoint(save_path)
def train_model(self, num_steps=100):
x_train = [1, 2, 3, 4]
y_train = [0, -1, -2, -3]
x = tf.get_default_graph().get_tensor_by_name('x:0')
y = tf.get_default_graph().get_tensor_by_name('y:0')
feed_dict = {x: x_train, y: y_train}
pre_global_step = self.sess.run(self.global_step)
for step in range(num_steps):
train_res = self.sess.run(self.train_targets, feed_dict=feed_dict)
self.log.info('Step: {}, loss: {}'.format(step, train_res['loss']))
post_global_step = self.sess.run(self.global_step)
self.assertEqual(pre_global_step + num_steps, post_global_step)
self.step += num_steps
return train_res
def save_test_checkpoint(self):
self.log.info('Saving checkpoint to {}'.format(self.save_path))
saved_checkpoint_path = self.dbinterface.tf_saver.save(self.sess,
save_path=self.save_path,
global_step=self.global_step,
write_meta_graph=False)
self.log.info('Checkpoint saved to {}'.format(saved_checkpoint_path))
return saved_checkpoint_path
def load_test_checkpoint(self, save_path):
reader = tf.train.NewCheckpointReader(save_path)
saved_shapes = reader.get_variable_to_shape_map()
self.log.info('Saved Vars:\n' + str(saved_shapes.keys()))
for name in saved_shapes.keys():
self.log.info(
'Name: {}, Tensor: {}'.format(name, reader.get_tensor(name)))
def setup_model(self, weights_name='Weights', bias_name='Bias'):
"""Set up simple tensorflow model."""
tf.reset_default_graph()
self.global_step = tf.get_variable(
'global_step', [],
dtype=tf.int64, trainable=False,
initializer=tf.constant_initializer(0))
# Model parameters and placeholders.
x = tf.placeholder(tf.float32, name='x')
y = tf.placeholder(tf.float32, name='y')
W = tf.get_variable(weights_name, [1], dtype=tf.float32)
b = tf.get_variable(bias_name, [1], dtype=tf.float32)
# Model output, loss and optimizer.
linear_model = W * x + b
loss = tf.reduce_sum(tf.square(linear_model - y))
optimizer_base = tf.train.GradientDescentOptimizer(0.01)
# Model train op.
optimizer = optimizer_base.minimize(
loss, global_step=self.global_step)
# Train targets.
self.train_targets = {'loss': loss,
'optimizer': optimizer}
@classmethod
def setup_log(cls):
cls.log = logging.getLogger(':'.join([__name__, cls.__name__]))
cls.log.setLevel('DEBUG')
@classmethod
def setup_conn(cls):
cls.conn = pymongo.MongoClient(host=cls.HOST, port=cls.PORT)
@classmethod
def setup_cache(cls):
cls.cache_dir = os.path.join(cls.CACHE_DIR,
'%s:%d' % (cls.HOST, cls.PORT),
cls.DATABASE_NAME,
cls.COLLECTION_NAME,
cls.EXP_ID)
cls.makedirs(cls.cache_dir)
cls.save_path = os.path.join(cls.cache_dir, 'checkpoint')
@classmethod
def setup_params(cls):
cls.model_params = {'func': model.mnist_tfutils_new,
'devices': ['/gpu:0', '/gpu:1'],
'prefix': 'model_0'}
cls.save_params = {
'host': cls.HOST,
'port': cls.PORT,
'dbname': cls.DATABASE_NAME,
'collname': cls.COLLECTION_NAME,
'exp_id': cls.EXP_ID,
'save_valid_freq': 20,
'save_filters_freq': 200,
'cache_filters_freq': 100}
cls.train_params = {
'data_params': {'func': data.build_data,
'batch_size': 100,
'group': 'train',
'directory': TFUTILS_HOME},
'num_steps': 500}
cls.loss_params = {
'targets': ['labels'],
'agg_func': tf.reduce_mean,
'loss_per_case_func': tf.nn.sparse_softmax_cross_entropy_with_logits}
cls.load_params = {'do_restore': True}
cls.optimizer_params = {'func': optimizer.ClipOptimizer,
'optimizer_class': tf.train.MomentumOptimizer,
'clip': True,
'optimizer_kwargs':{'momentum': 0.9}}
cls.learning_rate_params = {'learning_rate': 0.05,
'decay_steps': |
Creates a tensorflow session and instantiates a dbinterface.
| random_line_split |
test_dbinterface.py | %s' % __name__)
def tearDownModule():
"""Tear down module after all TestCases are run."""
pass
# logPoint('module %s' % __name__)
class TestDBInterface(unittest.TestCase):
PORT = 29101
HOST = 'localhost'
EXP_ID = 'TEST_EXP_ID'
DATABASE_NAME = 'TFUTILS_TESTDB'
COLLECTION_NAME = 'TFUTILS_TESTCOL'
CACHE_DIR = 'TFUTILS_TEST_CACHE_DIR'
@classmethod
def setUpClass(cls):
"""Set up class once before any test methods are run."""
cls.setup_log()
cls.setup_conn()
cls.setup_cache()
cls.setup_params()
@classmethod
def tearDownClass(cls):
"""Tear down class after all test methods have run."""
cls.remove_directory(cls.CACHE_DIR)
cls.remove_database(cls.DATABASE_NAME)
# Close primary MongoDB connection.
cls.conn.close()
def setUp(self):
"""Set up class before _each_ test method is executed.
Creates a tensorflow session and instantiates a dbinterface.
"""
self.setup_model()
self.sess = tf.Session(
config=tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True),
log_device_placement=self.params['log_device_placement'],
))
# TODO: Determine whether this should be called here or
# in dbinterface.initialize()
self.sess.run(tf.global_variables_initializer())
self.dbinterface = DBInterface(
sess=self.sess,
params=self.params,
cache_dir=self.CACHE_DIR,
save_params=self.save_params,
load_params=self.load_params)
self.step = 0
def tearDown(self):
"""Tear Down is called after _each_ test method is executed."""
self.sess.close()
@unittest.skip("skipping")
def test_init(self):
# TODO: Test all permutations of __init__ params.
pass
@unittest.skip("skipping")
def test_load_rec(self):
pass
@unittest.skip("skipping")
def test_initialize(self):
pass
def test_get_restore_vars(self):
# First, train model and save a checkpoint
self.train_model() # weights_name='Weights'
saved_path = self.save_test_checkpoint()
# Create a new model with different variable names.
self.setup_model(weights_name='Filters')
# Reset var_list in DBInterface
self.dbinterface.var_list = {
var.op.name: var for var in tf.global_variables()}
# Restore first checkpoint vars.
mapping = {'Weights': 'Filters'}
self.dbinterface.load_param_dict = mapping
restore_vars = self.dbinterface.get_restore_vars(saved_path)
self.log.info('restore_vars:')
for name, var in restore_vars.items():
if name in mapping.keys():
self.log.info('(name, var.name): ({}, {})'.format(name, var.name))
self.assertEqual(var.op.name, mapping[name])
def test_filter_var_list(self):
var_list = {var.op.name: var for var in tf.global_variables()}
# Test None
self.dbinterface.to_restore = None
filtered_var_list = self.dbinterface.filter_var_list(var_list)
self.assertEqual(filtered_var_list, var_list)
# Test list of strings
self.dbinterface.to_restore = ['Weights']
filtered_var_list = self.dbinterface.filter_var_list(var_list)
for name, var in filtered_var_list.items():
self.assertIn(name, ['Weights'])
self.assertNotIn(name, ['Bias', 'global_step'])
# Test regex
self.dbinterface.to_restore = re.compile(r'Bias')
filtered_var_list = self.dbinterface.filter_var_list(var_list)
for name, var in filtered_var_list.items():
self.assertIn(name, ['Bias'])
self.assertNotIn(name, ['Weights', 'global_step'])
# Test invalid type (should raise TypeError)
self.dbinterface.to_restore = {'invalid_key': 'invalid_value'}
with self.assertRaises(TypeError):
filtered_var_list = self.dbinterface.filter_var_list(var_list)
@unittest.skip("skipping")
def test_tf_saver(self):
pass
@unittest.skip("skipping")
def test_load_from_db(self):
pass
@unittest.skip("skipping")
def test_save(self):
self.dbinterface.initialize()
self.dbinterface.start_time_step = time.time()
train_res = self.train_model(num_steps=100)
self.dbinterface.save(train_res=train_res, step=self.step)
@unittest.skip("skipping")
def test_sync_with_host(self):
pass
@unittest.skip("skipping")
def test_save_thread(self):
pass
@unittest.skip("skipping")
def test_initialize_from_ckpt(self):
save_path = self.save_test_checkpoint()
self.load_test_checkpoint(save_path)
def train_model(self, num_steps=100):
x_train = [1, 2, 3, 4]
y_train = [0, -1, -2, -3]
x = tf.get_default_graph().get_tensor_by_name('x:0')
y = tf.get_default_graph().get_tensor_by_name('y:0')
feed_dict = {x: x_train, y: y_train}
pre_global_step = self.sess.run(self.global_step)
for step in range(num_steps):
train_res = self.sess.run(self.train_targets, feed_dict=feed_dict)
self.log.info('Step: {}, loss: {}'.format(step, train_res['loss']))
post_global_step = self.sess.run(self.global_step)
self.assertEqual(pre_global_step + num_steps, post_global_step)
self.step += num_steps
return train_res
def | (self):
self.log.info('Saving checkpoint to {}'.format(self.save_path))
saved_checkpoint_path = self.dbinterface.tf_saver.save(self.sess,
save_path=self.save_path,
global_step=self.global_step,
write_meta_graph=False)
self.log.info('Checkpoint saved to {}'.format(saved_checkpoint_path))
return saved_checkpoint_path
def load_test_checkpoint(self, save_path):
reader = tf.train.NewCheckpointReader(save_path)
saved_shapes = reader.get_variable_to_shape_map()
self.log.info('Saved Vars:\n' + str(saved_shapes.keys()))
for name in saved_shapes.keys():
self.log.info(
'Name: {}, Tensor: {}'.format(name, reader.get_tensor(name)))
def setup_model(self, weights_name='Weights', bias_name='Bias'):
"""Set up simple tensorflow model."""
tf.reset_default_graph()
self.global_step = tf.get_variable(
'global_step', [],
dtype=tf.int64, trainable=False,
initializer=tf.constant_initializer(0))
# Model parameters and placeholders.
x = tf.placeholder(tf.float32, name='x')
y = tf.placeholder(tf.float32, name='y')
W = tf.get_variable(weights_name, [1], dtype=tf.float32)
b = tf.get_variable(bias_name, [1], dtype=tf.float32)
# Model output, loss and optimizer.
linear_model = W * x + b
loss = tf.reduce_sum(tf.square(linear_model - y))
optimizer_base = tf.train.GradientDescentOptimizer(0.01)
# Model train op.
optimizer = optimizer_base.minimize(
loss, global_step=self.global_step)
# Train targets.
self.train_targets = {'loss': loss,
'optimizer': optimizer}
@classmethod
def setup_log(cls):
cls.log = logging.getLogger(':'.join([__name__, cls.__name__]))
cls.log.setLevel('DEBUG')
@classmethod
def setup_conn(cls):
cls.conn = pymongo.MongoClient(host=cls.HOST, port=cls.PORT)
@classmethod
def setup_cache(cls):
cls.cache_dir = os.path.join(cls.CACHE_DIR,
'%s:%d' % (cls.HOST, cls.PORT),
cls.DATABASE_NAME,
cls.COLLECTION_NAME,
cls.EXP_ID)
cls.makedirs(cls.cache_dir)
cls.save_path = os.path.join(cls.cache_dir, 'checkpoint')
@classmethod
def setup_params(cls):
cls.model_params = {'func': model.mnist_tfutils_new,
'devices': ['/gpu:0', '/gpu:1'],
'prefix': 'model_0'}
cls.save_params = {
'host': cls.HOST,
'port': cls.PORT,
'dbname': cls.DATABASE_NAME,
'collname': cls.COLLECTION_NAME,
'exp_id': cls.EXP_ID,
'save_valid_freq': 20,
'save_filters_freq': 200,
'cache_filters_freq': 100}
cls.train_params = {
'data_params': {'func': data.build_data,
'batch_size': 100,
'group': 'train',
'directory': TFUTILS_HOME},
'num_steps': 500}
cls.loss_params = {
'targets': ['labels'],
'agg_func': tf.reduce_mean,
'loss_per_case_func': tf.nn.sparse_softmax_cross_entropy_with_logits}
cls.load_params = {'do_restore': True}
cls.optimizer_params = {'func': optimizer.ClipOptimizer,
'optimizer_class': tf.train.MomentumOptimizer,
'clip': True,
'optimizer_kwargs':{'momentum': 0.9}}
cls.learning_rate_params = {'learning_rate': 0.05,
'decay_steps': | save_test_checkpoint | identifier_name |
test_dbinterface.py |
def tearDownModule():
"""Tear down module after all TestCases are run."""
pass
# logPoint('module %s' % __name__)
class TestDBInterface(unittest.TestCase):
PORT = 29101
HOST = 'localhost'
EXP_ID = 'TEST_EXP_ID'
DATABASE_NAME = 'TFUTILS_TESTDB'
COLLECTION_NAME = 'TFUTILS_TESTCOL'
CACHE_DIR = 'TFUTILS_TEST_CACHE_DIR'
@classmethod
def setUpClass(cls):
"""Set up class once before any test methods are run."""
cls.setup_log()
cls.setup_conn()
cls.setup_cache()
cls.setup_params()
@classmethod
def tearDownClass(cls):
"""Tear down class after all test methods have run."""
cls.remove_directory(cls.CACHE_DIR)
cls.remove_database(cls.DATABASE_NAME)
# Close primary MongoDB connection.
cls.conn.close()
def setUp(self):
"""Set up class before _each_ test method is executed.
Creates a tensorflow session and instantiates a dbinterface.
"""
self.setup_model()
self.sess = tf.Session(
config=tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True),
log_device_placement=self.params['log_device_placement'],
))
# TODO: Determine whether this should be called here or
# in dbinterface.initialize()
self.sess.run(tf.global_variables_initializer())
self.dbinterface = DBInterface(
sess=self.sess,
params=self.params,
cache_dir=self.CACHE_DIR,
save_params=self.save_params,
load_params=self.load_params)
self.step = 0
def tearDown(self):
"""Tear Down is called after _each_ test method is executed."""
self.sess.close()
@unittest.skip("skipping")
def test_init(self):
# TODO: Test all permutations of __init__ params.
pass
@unittest.skip("skipping")
def test_load_rec(self):
pass
@unittest.skip("skipping")
def test_initialize(self):
pass
def test_get_restore_vars(self):
# First, train model and save a checkpoint
self.train_model() # weights_name='Weights'
saved_path = self.save_test_checkpoint()
# Create a new model with different variable names.
self.setup_model(weights_name='Filters')
# Reset var_list in DBInterface
self.dbinterface.var_list = {
var.op.name: var for var in tf.global_variables()}
# Restore first checkpoint vars.
mapping = {'Weights': 'Filters'}
self.dbinterface.load_param_dict = mapping
restore_vars = self.dbinterface.get_restore_vars(saved_path)
self.log.info('restore_vars:')
for name, var in restore_vars.items():
if name in mapping.keys():
self.log.info('(name, var.name): ({}, {})'.format(name, var.name))
self.assertEqual(var.op.name, mapping[name])
def test_filter_var_list(self):
var_list = {var.op.name: var for var in tf.global_variables()}
# Test None
self.dbinterface.to_restore = None
filtered_var_list = self.dbinterface.filter_var_list(var_list)
self.assertEqual(filtered_var_list, var_list)
# Test list of strings
self.dbinterface.to_restore = ['Weights']
filtered_var_list = self.dbinterface.filter_var_list(var_list)
for name, var in filtered_var_list.items():
self.assertIn(name, ['Weights'])
self.assertNotIn(name, ['Bias', 'global_step'])
# Test regex
self.dbinterface.to_restore = re.compile(r'Bias')
filtered_var_list = self.dbinterface.filter_var_list(var_list)
for name, var in filtered_var_list.items():
self.assertIn(name, ['Bias'])
self.assertNotIn(name, ['Weights', 'global_step'])
# Test invalid type (should raise TypeError)
self.dbinterface.to_restore = {'invalid_key': 'invalid_value'}
with self.assertRaises(TypeError):
filtered_var_list = self.dbinterface.filter_var_list(var_list)
@unittest.skip("skipping")
def test_tf_saver(self):
pass
@unittest.skip("skipping")
def test_load_from_db(self):
pass
@unittest.skip("skipping")
def test_save(self):
self.dbinterface.initialize()
self.dbinterface.start_time_step = time.time()
train_res = self.train_model(num_steps=100)
self.dbinterface.save(train_res=train_res, step=self.step)
@unittest.skip("skipping")
def test_sync_with_host(self):
pass
@unittest.skip("skipping")
def test_save_thread(self):
pass
@unittest.skip("skipping")
def test_initialize_from_ckpt(self):
save_path = self.save_test_checkpoint()
self.load_test_checkpoint(save_path)
def train_model(self, num_steps=100):
x_train = [1, 2, 3, 4]
y_train = [0, -1, -2, -3]
x = tf.get_default_graph().get_tensor_by_name('x:0')
y = tf.get_default_graph().get_tensor_by_name('y:0')
feed_dict = {x: x_train, y: y_train}
pre_global_step = self.sess.run(self.global_step)
for step in range(num_steps):
train_res = self.sess.run(self.train_targets, feed_dict=feed_dict)
self.log.info('Step: {}, loss: {}'.format(step, train_res['loss']))
post_global_step = self.sess.run(self.global_step)
self.assertEqual(pre_global_step + num_steps, post_global_step)
self.step += num_steps
return train_res
def save_test_checkpoint(self):
self.log.info('Saving checkpoint to {}'.format(self.save_path))
saved_checkpoint_path = self.dbinterface.tf_saver.save(self.sess,
save_path=self.save_path,
global_step=self.global_step,
write_meta_graph=False)
self.log.info('Checkpoint saved to {}'.format(saved_checkpoint_path))
return saved_checkpoint_path
def load_test_checkpoint(self, save_path):
reader = tf.train.NewCheckpointReader(save_path)
saved_shapes = reader.get_variable_to_shape_map()
self.log.info('Saved Vars:\n' + str(saved_shapes.keys()))
for name in saved_shapes.keys():
self.log.info(
'Name: {}, Tensor: {}'.format(name, reader.get_tensor(name)))
def setup_model(self, weights_name='Weights', bias_name='Bias'):
"""Set up simple tensorflow model."""
tf.reset_default_graph()
self.global_step = tf.get_variable(
'global_step', [],
dtype=tf.int64, trainable=False,
initializer=tf.constant_initializer(0))
# Model parameters and placeholders.
x = tf.placeholder(tf.float32, name='x')
y = tf.placeholder(tf.float32, name='y')
W = tf.get_variable(weights_name, [1], dtype=tf.float32)
b = tf.get_variable(bias_name, [1], dtype=tf.float32)
# Model output, loss and optimizer.
linear_model = W * x + b
loss = tf.reduce_sum(tf.square(linear_model - y))
optimizer_base = tf.train.GradientDescentOptimizer(0.01)
# Model train op.
optimizer = optimizer_base.minimize(
loss, global_step=self.global_step)
# Train targets.
self.train_targets = {'loss': loss,
'optimizer': optimizer}
@classmethod
def setup_log(cls):
cls.log = logging.getLogger(':'.join([__name__, cls.__name__]))
cls.log.setLevel('DEBUG')
@classmethod
def setup_conn(cls):
cls.conn = pymongo.MongoClient(host=cls.HOST, port=cls.PORT)
@classmethod
def setup_cache(cls):
cls.cache_dir = os.path.join(cls.CACHE_DIR,
'%s:%d' % (cls.HOST, cls.PORT),
cls.DATABASE_NAME,
cls.COLLECTION_NAME,
cls.EXP_ID)
cls.makedirs(cls.cache_dir)
cls.save_path = os.path.join(cls.cache_dir, 'checkpoint')
@classmethod
def setup_params(cls):
cls.model_params = {'func': model.mnist_tfutils_new,
'devices': ['/gpu:0', '/gpu:1'],
'prefix': 'model_0'}
cls.save_params = {
'host': cls.HOST,
'port': cls.PORT,
'dbname': cls.DATABASE_NAME,
'collname': cls.COLLECTION_NAME,
'exp_id': cls.EXP_ID,
'save_valid_freq': 20,
'save_filters_freq': 200,
'cache_filters_freq': 100}
cls.train_params = {
'data_params': {'func': data.build_data,
'batch_size': 100,
'group': 'train',
'directory': TFUTILS_HOME},
'num_steps': 500}
cls.loss_params = {
'targets': ['labels'],
'agg_func': tf.reduce_mean,
'loss_per_case_func': tf.nn.sparse_softmax_cross_entropy_with_logits}
cls.load_params = {'do_restore': True}
cls.optimizer_params = {'func': optimizer.ClipOptimizer,
'optimizer_class': tf.train.MomentumOptimizer,
'clip': True,
'optimizer_kwargs':{'momentum': 0.9 | """Set up module once, before any TestCases are run."""
logging.basicConfig()
# logPoint('module %s' % __name__) | identifier_body |
|
space_invaders.py | = screen
self.sprite = sprite
self.rect = rect
self.update_dimensions()
self.update_mask()
self.set_exists(True)
# Assigns an id using current time in microseconds
def create_random_id(self):
self.id = int(time() * SECONDS_TO_MICRO_SECONDS)
# Ensures destroyed object won't be redrawn; however, object needs to be
# removed externally (i.e. where it is stored)
def destroy(self):
self.set_exists(False)
def update_rect(self):
self.rect = self.sprite.get_rect()
def update_dimensions(self):
self.dimensions = self.sprite.get_size()
def update_mask(self):
self.mask = pygame.mask.from_surface(self.sprite)
#print("\nmask {}\n".format(self.mask.__dir__()))
def get_mask(self):
return self.mask
def get_coordinates(self):
return (
self.rect.left,
self.rect.right,
self.rect.top,
self.rect.bottom)
# Distance between left side and left side of screen
def get_left_gap(self):
return self.rect.left
# Distance between right side and right side of screen
def get_right_gap(self):
return (WIDTH_SCREEN - self.rect.right)
def exists(self):
return self._exists
# Determine if two sprites overlap/collide
def check_overlaps(self, basic_sprite):
value = self.mask.overlap(
basic_sprite.get_mask(),
(basic_sprite.rect.left - self.rect.left,
basic_sprite.rect.top - self.rect.top))
return value
def set_exists(self, exists):
self._exists = exists
# Move to position unless outside of allowed coordinates; returns actual
# position delta in contrast with asked
def set_location(self, x, y):
center_change = [
self.rect.centerx,
self.rect.centery]
self.rect.centerx = x
self.rect.centery = y
# Ensure within allowed coordinates
if self.rect.left < MARGIN_SCREEN:
self.rect.centerx = MARGIN_SCREEN + self.dimensions[0] // 2
elif self.rect.right > (WIDTH_SCREEN - MARGIN_SCREEN):
self.rect.centerx = (
(WIDTH_SCREEN - MARGIN_SCREEN) - self.dimensions[0] // 2)
# Return true position delta
center_change[0] = self.rect.centerx - center_change[0]
center_change[1] = self.rect.centery - center_change[1]
return center_change
# Scale sprite to box container (max_dimension X max_dimension)
def scale_to_fit(self, max_dimension):
scale_factor = (
float(max_dimension) / float(max(*self.dimensions)))
width = int(float(self.dimensions[0]) * scale_factor)
height = int(float(self.dimensions[1]) * scale_factor)
self.sprite = pygame.transform.scale(self.sprite, (width, height))
self.update_rect()
self.update_dimensions()
self.update_mask()
# Translate by some delta ensuring to stay within allowed range
def translate(self, x, y):
return self.set_location(self.rect.centerx + x, self.rect.centery + y)
# Only redraw if 'exists'
def redraw(self):
if self.exists():
self.screen.blit(self.sprite, self.rect)
return self.exists()
class Background(BasicSprite):
def __init__(self, screen):
super().__init__(
screen,
pygame.Surface(screen.get_size()),
(0, 0))
self.sprite.fill(TUPLE_COLOR_BLACK)
# Simple Text Label
class Text(BasicSprite):
def __init__(self, screen, text, color, font, size):
self.text = text
self.color = color
self.font = font
self.size = size
self.my_font = pygame.font.SysFont(font, size)
self.label = self.my_font.render(text, 1, color)
super().__init__(
screen,
self.label,
self.label.get_rect())
# Base spaceship
class SpaceShip(BasicSprite):
def __init__(self, screen, ship_image, default_square_color):
# Attempt to load image
try:
sprite = pygame.image.load(ship_image)
# Create rect instead
except Exception as e:
print("{}\nLoading default square".format(e))
sprite = pygame.Surface((LENGTH_BOX_SHIP, LENGTH_BOX_SHIP))
# Set color
sprite.fill(default_square_color)
super().__init__(screen, sprite, sprite.get_rect())
self.scale_to_fit(LENGTH_BOX_SHIP)
# default location
self.set_location(0, 0)
class HumanSpaceShip(SpaceShip):
def __init__(self, screen):
super().__init__(screen, IMAGE_RICHARD_SIMMONS, TUPLE_COLOR_GREEN)
# Floor division set to middle
self.set_location(WIDTH_SCREEN / 2, HEIGHT_SCREEN / 2)
# Set 0 acceleration
self.acceleration = [0, 0]
# Center within allowed human coordinates
def center(self):
x = WIDTH_FRAME_PLAYER / 2
y = (
HEIGHT_SCREEN -
(HEIGHT_FRAME_PLAYER / 2))
self.set_location(x, y)
# Accelerate in only -/+ x direction; inhibit acceleration while
# approaching sides
def accelerate(self, x, y):
# X
self.acceleration[0] += x
gap = WIDTH_FRAME_PLAYER_HALF
if sign(self.acceleration[0]) > 0:
gap = (
WIDTH_FRAME_PLAYER -
self.rect.centerx -
self.dimensions[0] // 2)
elif sign(self.acceleration[0]) < 0:
gap = (
self.rect.centerx -
self.dimensions[0] +
self.dimensions[0] // 2)
gap = int(float(gap) * 0.75)
limit_x = min(
int(ACCELERATION_VALUE_MAX *
gap * ACCELERATION_MULTIPLIER / WIDTH_FRAME_PLAYER),
ACCELERATION_VALUE_MAX)
self.acceleration[0] = (
sign(self.acceleration[0]) *
min(abs(self.acceleration[0]), limit_x))
# Y - Unfinished since restricted y-movement
self.acceleration[1] += y
self.acceleration[1] = (
sign(self.acceleration[1]) *
min(self.acceleration[1], ACCELERATION_VALUE_MAX))
# Decrement acceleration to inhibit continuous movement
def deccelerate(self):
if abs(self.acceleration[0]) > 0:
self.acceleration[0] = int(
float(self.acceleration[0]) * DECCELERATION_FACTOR)
if abs(self.acceleration[1]) > 0:
self.acceleration[1] = int(
float(self.acceleration[1]) * DECCELERATION_FACTOR)
def redraw(self):
self.translate(self.acceleration[0], self.acceleration[1])
super(SpaceShip, self).redraw()
self.deccelerate()
class OpponentSpaceShip(SpaceShip):
def __init__(self, screen):
super().__init__(screen, IMAGE_BIG_MAC, TUPLE_COLOR_RED)
# Floor division set to middle
self.set_location(WIDTH_SCREEN / 2, HEIGHT_SCREEN / 2)
# Handles all opponent space ships
class OpponentSquadron:
def __init__(self, screen, row_and_column_size):
|
# Return front ships
def get_front_line_ships(self):
return self.front_line
# Evenly space out ships within initial allowed range
def setup_ships(self):
start_bottom_edge = int(
float(HEIGHT_FRAME_OPPONENTS) * FACTOR_HEIGHT_FRAME_OPPONENTS)
horizontal_separation = (
(WIDTH_SCREEN - (2 * MARGIN_OPPONENTS)) / self.row_and_column_size)
vertical_separation = start_bottom_edge / self.row_and_column_size
for r in range(0, self.row_and_column_size):
for c in range(0, self.row_and_column_size):
ship = OpponentSpaceShip(self.screen)
id = ship.id
x = int(
(0.5 + float(r)) * horizontal_separation +
MARGIN_OPPONENTS)
y = int((0.5 + float(c)) * vertical_separation)
ship.set_location(x, y)
if r == 0:
self.left[id] = ship
if r == (self.row_and_column_size - 1):
self.right[id] = ship
if c == (self.row_and_column_size - 1):
self.front_line[id] = ship
self.ships[id] = ship
# Check whether left or right ships reached allowed edge/coordinates
def check_reached_boundary(self):
ships = self.left
if self.direction == DIRECTION_RIGHT:
ships = self.right
ship = list(ships.values())[0]
#
gap = MARGIN_SCREEN * 2
if self.direction == DIRECTION_RIGHT:
gap = ship.get_right_gap()
else:
gap = ship.get_left_gap()
#
return (gap <= MARGIN_SCREEN)
# Update which direction | self.direction = DIRECTION_RIGHT
self.direction_previous = self.direction
self.screen = screen
self.row_and_column_size = row_and_column_size
self.ships = {}
self.left = {}
self.right = {}
self.front_line = {}
self.setup_ships() | identifier_body |
space_invaders.py | = screen
self.sprite = sprite
self.rect = rect
self.update_dimensions()
self.update_mask()
self.set_exists(True)
# Assigns an id using current time in microseconds
def create_random_id(self):
self.id = int(time() * SECONDS_TO_MICRO_SECONDS)
# Ensures destroyed object won't be redrawn; however, object needs to be
# removed externally (i.e. where it is stored)
def destroy(self):
self.set_exists(False)
def update_rect(self):
self.rect = self.sprite.get_rect()
def update_dimensions(self):
self.dimensions = self.sprite.get_size()
def update_mask(self):
self.mask = pygame.mask.from_surface(self.sprite)
#print("\nmask {}\n".format(self.mask.__dir__()))
def get_mask(self):
return self.mask
def get_coordinates(self):
return (
self.rect.left,
self.rect.right,
self.rect.top,
self.rect.bottom)
# Distance between left side and left side of screen
def get_left_gap(self):
return self.rect.left
# Distance between right side and right side of screen
def get_right_gap(self):
return (WIDTH_SCREEN - self.rect.right)
def exists(self):
return self._exists
# Determine if two sprites overlap/collide
def check_overlaps(self, basic_sprite):
value = self.mask.overlap(
basic_sprite.get_mask(),
(basic_sprite.rect.left - self.rect.left,
basic_sprite.rect.top - self.rect.top))
return value
def set_exists(self, exists):
self._exists = exists
# Move to position unless outside of allowed coordinates; returns actual
# position delta in contrast with asked
def set_location(self, x, y):
center_change = [
self.rect.centerx,
self.rect.centery]
self.rect.centerx = x
self.rect.centery = y
# Ensure within allowed coordinates
if self.rect.left < MARGIN_SCREEN:
self.rect.centerx = MARGIN_SCREEN + self.dimensions[0] // 2
elif self.rect.right > (WIDTH_SCREEN - MARGIN_SCREEN):
self.rect.centerx = (
(WIDTH_SCREEN - MARGIN_SCREEN) - self.dimensions[0] // 2)
# Return true position delta
center_change[0] = self.rect.centerx - center_change[0]
center_change[1] = self.rect.centery - center_change[1]
return center_change
# Scale sprite to box container (max_dimension X max_dimension)
def scale_to_fit(self, max_dimension):
scale_factor = (
float(max_dimension) / float(max(*self.dimensions)))
width = int(float(self.dimensions[0]) * scale_factor)
height = int(float(self.dimensions[1]) * scale_factor)
self.sprite = pygame.transform.scale(self.sprite, (width, height))
self.update_rect()
self.update_dimensions()
self.update_mask()
# Translate by some delta ensuring to stay within allowed range
def translate(self, x, y):
return self.set_location(self.rect.centerx + x, self.rect.centery + y)
# Only redraw if 'exists'
def redraw(self):
if self.exists():
self.screen.blit(self.sprite, self.rect)
return self.exists()
class Background(BasicSprite):
def __init__(self, screen):
super().__init__(
screen,
pygame.Surface(screen.get_size()),
(0, 0))
self.sprite.fill(TUPLE_COLOR_BLACK)
# Simple Text Label
class Text(BasicSprite):
def __init__(self, screen, text, color, font, size):
self.text = text
self.color = color
self.font = font
self.size = size
self.my_font = pygame.font.SysFont(font, size)
self.label = self.my_font.render(text, 1, color)
super().__init__(
screen,
self.label,
self.label.get_rect())
# Base spaceship
class SpaceShip(BasicSprite):
def __init__(self, screen, ship_image, default_square_color):
# Attempt to load image
try:
sprite = pygame.image.load(ship_image)
# Create rect instead
except Exception as e:
print("{}\nLoading default square".format(e))
sprite = pygame.Surface((LENGTH_BOX_SHIP, LENGTH_BOX_SHIP))
# Set color
sprite.fill(default_square_color)
super().__init__(screen, sprite, sprite.get_rect())
self.scale_to_fit(LENGTH_BOX_SHIP)
# default location
self.set_location(0, 0)
class HumanSpaceShip(SpaceShip):
def __init__(self, screen):
super().__init__(screen, IMAGE_RICHARD_SIMMONS, TUPLE_COLOR_GREEN)
# Floor division set to middle
self.set_location(WIDTH_SCREEN / 2, HEIGHT_SCREEN / 2)
# Set 0 acceleration
self.acceleration = [0, 0]
# Center within allowed human coordinates
def center(self):
x = WIDTH_FRAME_PLAYER / 2
y = (
HEIGHT_SCREEN -
(HEIGHT_FRAME_PLAYER / 2))
self.set_location(x, y)
# Accelerate in only -/+ x direction; inhibit acceleration while
# approaching sides
def accelerate(self, x, y):
# X
self.acceleration[0] += x
gap = WIDTH_FRAME_PLAYER_HALF
if sign(self.acceleration[0]) > 0:
gap = (
WIDTH_FRAME_PLAYER -
self.rect.centerx -
self.dimensions[0] // 2)
elif sign(self.acceleration[0]) < 0:
gap = (
self.rect.centerx -
self.dimensions[0] +
self.dimensions[0] // 2)
gap = int(float(gap) * 0.75)
limit_x = min(
int(ACCELERATION_VALUE_MAX *
gap * ACCELERATION_MULTIPLIER / WIDTH_FRAME_PLAYER),
ACCELERATION_VALUE_MAX)
self.acceleration[0] = (
sign(self.acceleration[0]) *
min(abs(self.acceleration[0]), limit_x))
# Y - Unfinished since restricted y-movement
self.acceleration[1] += y
self.acceleration[1] = (
sign(self.acceleration[1]) *
min(self.acceleration[1], ACCELERATION_VALUE_MAX))
# Decrement acceleration to inhibit continuous movement
def deccelerate(self):
if abs(self.acceleration[0]) > 0:
self.acceleration[0] = int(
float(self.acceleration[0]) * DECCELERATION_FACTOR)
if abs(self.acceleration[1]) > 0:
self.acceleration[1] = int(
float(self.acceleration[1]) * DECCELERATION_FACTOR)
def redraw(self):
self.translate(self.acceleration[0], self.acceleration[1])
super(SpaceShip, self).redraw()
self.deccelerate()
class OpponentSpaceShip(SpaceShip):
def __init__(self, screen):
super().__init__(screen, IMAGE_BIG_MAC, TUPLE_COLOR_RED)
# Floor division set to middle
self.set_location(WIDTH_SCREEN / 2, HEIGHT_SCREEN / 2)
# Handles all opponent space ships
class OpponentSquadron:
def __init__(self, screen, row_and_column_size):
self.direction = DIRECTION_RIGHT
self.direction_previous = self.direction
self.screen = screen
self.row_and_column_size = row_and_column_size
self.ships = {}
self.left = {}
self.right = {}
self.front_line = {}
self.setup_ships()
# Return front ships
def get_front_line_ships(self):
return self.front_line
# Evenly space out ships within initial allowed range
def setup_ships(self):
start_bottom_edge = int(
float(HEIGHT_FRAME_OPPONENTS) * FACTOR_HEIGHT_FRAME_OPPONENTS)
horizontal_separation = (
(WIDTH_SCREEN - (2 * MARGIN_OPPONENTS)) / self.row_and_column_size)
vertical_separation = start_bottom_edge / self.row_and_column_size
for r in range(0, self.row_and_column_size):
for c in range(0, self.row_and_column_size):
ship = OpponentSpaceShip(self.screen)
id = ship.id
x = int(
(0.5 + float(r)) * horizontal_separation +
MARGIN_OPPONENTS)
y = int((0.5 + float(c)) * vertical_separation)
ship.set_location(x, y)
if r == 0:
|
if r == (self.row_and_column_size - 1):
self.right[id] = ship
if c == (self.row_and_column_size - 1):
self.front_line[id] = ship
self.ships[id] = ship
# Check whether left or right ships reached allowed edge/coordinates
def check_reached_boundary(self):
ships = self.left
if self.direction == DIRECTION_RIGHT:
ships = self.right
ship = list(ships.values())[0]
#
gap = MARGIN_SCREEN * 2
if self.direction == DIRECTION_RIGHT:
gap = ship.get_right_gap()
else:
gap = ship.get_left_gap()
#
return (gap <= MARGIN_SCREEN)
# Update which direction ships | self.left[id] = ship | conditional_block |
space_invaders.py | Label
class Text(BasicSprite):
def __init__(self, screen, text, color, font, size):
self.text = text
self.color = color
self.font = font
self.size = size
self.my_font = pygame.font.SysFont(font, size)
self.label = self.my_font.render(text, 1, color)
super().__init__(
screen,
self.label,
self.label.get_rect())
# Base spaceship
class SpaceShip(BasicSprite):
def __init__(self, screen, ship_image, default_square_color):
# Attempt to load image
try:
sprite = pygame.image.load(ship_image)
# Create rect instead
except Exception as e:
print("{}\nLoading default square".format(e))
sprite = pygame.Surface((LENGTH_BOX_SHIP, LENGTH_BOX_SHIP))
# Set color
sprite.fill(default_square_color)
super().__init__(screen, sprite, sprite.get_rect())
self.scale_to_fit(LENGTH_BOX_SHIP)
# default location
self.set_location(0, 0)
class HumanSpaceShip(SpaceShip):
def __init__(self, screen):
super().__init__(screen, IMAGE_RICHARD_SIMMONS, TUPLE_COLOR_GREEN)
# Floor division set to middle
self.set_location(WIDTH_SCREEN / 2, HEIGHT_SCREEN / 2)
# Set 0 acceleration
self.acceleration = [0, 0]
# Center within allowed human coordinates
def center(self):
x = WIDTH_FRAME_PLAYER / 2
y = (
HEIGHT_SCREEN -
(HEIGHT_FRAME_PLAYER / 2))
self.set_location(x, y)
# Accelerate in only -/+ x direction; inhibit acceleration while
# approaching sides
def accelerate(self, x, y):
# X
self.acceleration[0] += x
gap = WIDTH_FRAME_PLAYER_HALF
if sign(self.acceleration[0]) > 0:
gap = (
WIDTH_FRAME_PLAYER -
self.rect.centerx -
self.dimensions[0] // 2)
elif sign(self.acceleration[0]) < 0:
gap = (
self.rect.centerx -
self.dimensions[0] +
self.dimensions[0] // 2)
gap = int(float(gap) * 0.75)
limit_x = min(
int(ACCELERATION_VALUE_MAX *
gap * ACCELERATION_MULTIPLIER / WIDTH_FRAME_PLAYER),
ACCELERATION_VALUE_MAX)
self.acceleration[0] = (
sign(self.acceleration[0]) *
min(abs(self.acceleration[0]), limit_x))
# Y - Unfinished since restricted y-movement
self.acceleration[1] += y
self.acceleration[1] = (
sign(self.acceleration[1]) *
min(self.acceleration[1], ACCELERATION_VALUE_MAX))
# Decrement acceleration to inhibit continuous movement
def deccelerate(self):
if abs(self.acceleration[0]) > 0:
self.acceleration[0] = int(
float(self.acceleration[0]) * DECCELERATION_FACTOR)
if abs(self.acceleration[1]) > 0:
self.acceleration[1] = int(
float(self.acceleration[1]) * DECCELERATION_FACTOR)
def redraw(self):
self.translate(self.acceleration[0], self.acceleration[1])
super(SpaceShip, self).redraw()
self.deccelerate()
class OpponentSpaceShip(SpaceShip):
def __init__(self, screen):
super().__init__(screen, IMAGE_BIG_MAC, TUPLE_COLOR_RED)
# Floor division set to middle
self.set_location(WIDTH_SCREEN / 2, HEIGHT_SCREEN / 2)
# Handles all opponent space ships
class OpponentSquadron:
def __init__(self, screen, row_and_column_size):
self.direction = DIRECTION_RIGHT
self.direction_previous = self.direction
self.screen = screen
self.row_and_column_size = row_and_column_size
self.ships = {}
self.left = {}
self.right = {}
self.front_line = {}
self.setup_ships()
# Return front ships
def get_front_line_ships(self):
return self.front_line
# Evenly space out ships within initial allowed range
def setup_ships(self):
start_bottom_edge = int(
float(HEIGHT_FRAME_OPPONENTS) * FACTOR_HEIGHT_FRAME_OPPONENTS)
horizontal_separation = (
(WIDTH_SCREEN - (2 * MARGIN_OPPONENTS)) / self.row_and_column_size)
vertical_separation = start_bottom_edge / self.row_and_column_size
for r in range(0, self.row_and_column_size):
for c in range(0, self.row_and_column_size):
ship = OpponentSpaceShip(self.screen)
id = ship.id
x = int(
(0.5 + float(r)) * horizontal_separation +
MARGIN_OPPONENTS)
y = int((0.5 + float(c)) * vertical_separation)
ship.set_location(x, y)
if r == 0:
self.left[id] = ship
if r == (self.row_and_column_size - 1):
self.right[id] = ship
if c == (self.row_and_column_size - 1):
self.front_line[id] = ship
self.ships[id] = ship
# Check whether left or right ships reached allowed edge/coordinates
def check_reached_boundary(self):
ships = self.left
if self.direction == DIRECTION_RIGHT:
ships = self.right
ship = list(ships.values())[0]
#
gap = MARGIN_SCREEN * 2
if self.direction == DIRECTION_RIGHT:
gap = ship.get_right_gap()
else:
gap = ship.get_left_gap()
#
return (gap <= MARGIN_SCREEN)
# Update which direction ships are flying in
def update_direction(self):
tmp_direction = self.direction
# Currently moving left
if ((self.direction == DIRECTION_LEFT) or
(self.direction == DIRECTION_RIGHT)):
if self.check_reached_boundary():
self.direction = DIRECTION_DOWN
self.direction_previous = tmp_direction
# Switch to left or right?
elif self.direction == DIRECTION_DOWN:
if self.direction_previous == DIRECTION_LEFT:
self.direction = DIRECTION_RIGHT
else:
self.direction = DIRECTION_LEFT
self.direction_previous = tmp_direction
# Calculate translation delta and move
def move_ships(self):
translation = [0, 0]
#
self.update_direction()
#
if self.direction == DIRECTION_LEFT:
translation = [-1 * INCREMENT_MOVE_X_OPPONENT, 0]
elif self.direction == DIRECTION_RIGHT:
translation = [INCREMENT_MOVE_X_OPPONENT, 0]
elif self.direction == DIRECTION_DOWN:
translation = [0, INCREMENT_MOVE_Y_OPPONENT]
#
'''
ships_to_move = {
id: ship
for id, ship in ships_to_move.items() if ship not in ships_moved}
'''
#for id, ship in ships_to_move.items():
for id, ship in self.ships.items():
ship.translate(translation[0], translation[1])
def update(self):
self.move_ships()
for id, ship in self.ships.items():
ship.redraw()
# print("{} coords: {}".format(ship.id, ship.get_coordinates()))
class Game:
def __init__(self):
pygame.init()
self.init_winner()
self.init_screen()
self.init_human_ship()
self.init_opponent_squadron()
def init_winner(self):
self.winner = WINNER_NONE
self.winner_text = None
def init_screen(self):
self.screen = pygame.display.set_mode(
(WIDTH_SCREEN, HEIGHT_SCREEN))
self.background = Background(self.screen)
def init_human_ship(self):
self.human_ship = HumanSpaceShip(self.screen)
self.human_ship.center()
def init_opponent_squadron(self):
self.opponent_squadron = OpponentSquadron(
self.screen, COUNT_COLUMN_AND_ROW_OPPONENT)
def check_collisions(self):
if self.human_ship is not None:
collided = False
ships = self.opponent_squadron.get_front_line_ships().items()
for id, ship in ships:
if self.human_ship.check_overlaps(ship):
ship.destroy()
collided = True
#
if collided:
self.human_ship.destroy()
def clean_up(self):
if self.human_ship is not None:
if not self.human_ship.exists():
self.human_ship = None
def update_winner(self):
if self.winner == WINNER_NONE:
text = None
color = None
if self.human_ship is None:
self.winner = WINNER_OPPONENT
text = "Opponent"
color = TUPLE_COLOR_RED
elif self.opponent_squadron is None:
self.winner = WINNER_HUMAN
text = "Human"
color = TUPLE_COLOR_GREEN
else:
self.winner = WINNER_NONE
#
if self.winner != WINNER_NONE:
text = "{} Wins!".format(text)
self.winner_text = Text( | self.screen, text, color, "arial", 60) | random_line_split |
|
space_invaders.py |
# Move to position unless outside of allowed coordinates; returns actual
# position delta in contrast with asked
def set_location(self, x, y):
center_change = [
self.rect.centerx,
self.rect.centery]
self.rect.centerx = x
self.rect.centery = y
# Ensure within allowed coordinates
if self.rect.left < MARGIN_SCREEN:
self.rect.centerx = MARGIN_SCREEN + self.dimensions[0] // 2
elif self.rect.right > (WIDTH_SCREEN - MARGIN_SCREEN):
self.rect.centerx = (
(WIDTH_SCREEN - MARGIN_SCREEN) - self.dimensions[0] // 2)
# Return true position delta
center_change[0] = self.rect.centerx - center_change[0]
center_change[1] = self.rect.centery - center_change[1]
return center_change
# Scale sprite to box container (max_dimension X max_dimension)
def scale_to_fit(self, max_dimension):
scale_factor = (
float(max_dimension) / float(max(*self.dimensions)))
width = int(float(self.dimensions[0]) * scale_factor)
height = int(float(self.dimensions[1]) * scale_factor)
self.sprite = pygame.transform.scale(self.sprite, (width, height))
self.update_rect()
self.update_dimensions()
self.update_mask()
# Translate by some delta ensuring to stay within allowed range
def translate(self, x, y):
return self.set_location(self.rect.centerx + x, self.rect.centery + y)
# Only redraw if 'exists'
def redraw(self):
if self.exists():
self.screen.blit(self.sprite, self.rect)
return self.exists()
class Background(BasicSprite):
def __init__(self, screen):
super().__init__(
screen,
pygame.Surface(screen.get_size()),
(0, 0))
self.sprite.fill(TUPLE_COLOR_BLACK)
# Simple Text Label
class Text(BasicSprite):
def __init__(self, screen, text, color, font, size):
self.text = text
self.color = color
self.font = font
self.size = size
self.my_font = pygame.font.SysFont(font, size)
self.label = self.my_font.render(text, 1, color)
super().__init__(
screen,
self.label,
self.label.get_rect())
# Base spaceship
class SpaceShip(BasicSprite):
def __init__(self, screen, ship_image, default_square_color):
# Attempt to load image
try:
sprite = pygame.image.load(ship_image)
# Create rect instead
except Exception as e:
print("{}\nLoading default square".format(e))
sprite = pygame.Surface((LENGTH_BOX_SHIP, LENGTH_BOX_SHIP))
# Set color
sprite.fill(default_square_color)
super().__init__(screen, sprite, sprite.get_rect())
self.scale_to_fit(LENGTH_BOX_SHIP)
# default location
self.set_location(0, 0)
class HumanSpaceShip(SpaceShip):
def __init__(self, screen):
super().__init__(screen, IMAGE_RICHARD_SIMMONS, TUPLE_COLOR_GREEN)
# Floor division set to middle
self.set_location(WIDTH_SCREEN / 2, HEIGHT_SCREEN / 2)
# Set 0 acceleration
self.acceleration = [0, 0]
# Center within allowed human coordinates
def center(self):
x = WIDTH_FRAME_PLAYER / 2
y = (
HEIGHT_SCREEN -
(HEIGHT_FRAME_PLAYER / 2))
self.set_location(x, y)
# Accelerate in only -/+ x direction; inhibit acceleration while
# approaching sides
def accelerate(self, x, y):
# X
self.acceleration[0] += x
gap = WIDTH_FRAME_PLAYER_HALF
if sign(self.acceleration[0]) > 0:
gap = (
WIDTH_FRAME_PLAYER -
self.rect.centerx -
self.dimensions[0] // 2)
elif sign(self.acceleration[0]) < 0:
gap = (
self.rect.centerx -
self.dimensions[0] +
self.dimensions[0] // 2)
gap = int(float(gap) * 0.75)
limit_x = min(
int(ACCELERATION_VALUE_MAX *
gap * ACCELERATION_MULTIPLIER / WIDTH_FRAME_PLAYER),
ACCELERATION_VALUE_MAX)
self.acceleration[0] = (
sign(self.acceleration[0]) *
min(abs(self.acceleration[0]), limit_x))
# Y - Unfinished since restricted y-movement
self.acceleration[1] += y
self.acceleration[1] = (
sign(self.acceleration[1]) *
min(self.acceleration[1], ACCELERATION_VALUE_MAX))
# Decrement acceleration to inhibit continuous movement
def deccelerate(self):
if abs(self.acceleration[0]) > 0:
self.acceleration[0] = int(
float(self.acceleration[0]) * DECCELERATION_FACTOR)
if abs(self.acceleration[1]) > 0:
self.acceleration[1] = int(
float(self.acceleration[1]) * DECCELERATION_FACTOR)
def redraw(self):
self.translate(self.acceleration[0], self.acceleration[1])
super(SpaceShip, self).redraw()
self.deccelerate()
class OpponentSpaceShip(SpaceShip):
def __init__(self, screen):
super().__init__(screen, IMAGE_BIG_MAC, TUPLE_COLOR_RED)
# Floor division set to middle
self.set_location(WIDTH_SCREEN / 2, HEIGHT_SCREEN / 2)
# Handles all opponent space ships
class OpponentSquadron:
def __init__(self, screen, row_and_column_size):
self.direction = DIRECTION_RIGHT
self.direction_previous = self.direction
self.screen = screen
self.row_and_column_size = row_and_column_size
self.ships = {}
self.left = {}
self.right = {}
self.front_line = {}
self.setup_ships()
# Return front ships
def get_front_line_ships(self):
return self.front_line
# Evenly space out ships within initial allowed range
def setup_ships(self):
start_bottom_edge = int(
float(HEIGHT_FRAME_OPPONENTS) * FACTOR_HEIGHT_FRAME_OPPONENTS)
horizontal_separation = (
(WIDTH_SCREEN - (2 * MARGIN_OPPONENTS)) / self.row_and_column_size)
vertical_separation = start_bottom_edge / self.row_and_column_size
for r in range(0, self.row_and_column_size):
for c in range(0, self.row_and_column_size):
ship = OpponentSpaceShip(self.screen)
id = ship.id
x = int(
(0.5 + float(r)) * horizontal_separation +
MARGIN_OPPONENTS)
y = int((0.5 + float(c)) * vertical_separation)
ship.set_location(x, y)
if r == 0:
self.left[id] = ship
if r == (self.row_and_column_size - 1):
self.right[id] = ship
if c == (self.row_and_column_size - 1):
self.front_line[id] = ship
self.ships[id] = ship
# Check whether left or right ships reached allowed edge/coordinates
def check_reached_boundary(self):
ships = self.left
if self.direction == DIRECTION_RIGHT:
ships = self.right
ship = list(ships.values())[0]
#
gap = MARGIN_SCREEN * 2
if self.direction == DIRECTION_RIGHT:
gap = ship.get_right_gap()
else:
gap = ship.get_left_gap()
#
return (gap <= MARGIN_SCREEN)
# Update which direction ships are flying in
def update_direction(self):
tmp_direction = self.direction
# Currently moving left
if ((self.direction == DIRECTION_LEFT) or
(self.direction == DIRECTION_RIGHT)):
if self.check_reached_boundary():
self.direction = DIRECTION_DOWN
self.direction_previous = tmp_direction
# Switch to left or right?
elif self.direction == DIRECTION_DOWN:
if self.direction_previous == DIRECTION_LEFT:
self.direction = DIRECTION_RIGHT
else:
self.direction = DIRECTION_LEFT
self.direction_previous = tmp_direction
# Calculate translation delta and move
def move_ships(self):
translation = [0, 0]
#
self.update_direction()
#
if self.direction == DIRECTION_LEFT:
translation = [-1 * INCREMENT_MOVE_X_OPPONENT, 0]
elif self.direction == DIRECTION_RIGHT:
translation = [INCREMENT_MOVE_X_OPPONENT, 0]
elif self.direction == DIRECTION_DOWN:
translation = [0, INCREMENT_MOVE_Y_OPPONENT]
#
'''
ships_to_move = {
id: ship
for id, ship in ships_to_move.items() if ship not in ships_moved}
'''
#for id, ship in ships_to_move.items():
for id, ship in self.ships.items():
ship.translate(translation[0], translation[1])
def update(self):
self.move_ships()
for id, ship in self.ships.items():
ship.redraw()
# print("{} coords: {}".format(ship.id, ship.get_coordinates()))
class Game:
def | __init__ | identifier_name |
|
full-site.js | $choicesModal.find('.modal-footer').html("");
var $firstButton;
for (var i in buttons) {
var btn = buttons[i];
var attrsString = "";
for (var key in btn.attrs) {
var value = btn.attrs[key];
attrsString += key + '="' + value + '" ';
}
var $button = $('<a target="_self" ' + attrsString + ' onclick="' + btn.clickAction + '">' + btn.textValue + '</a>');
if (!$firstButton) {
$firstButton = $button;
}
$choicesModal.find('.modal-footer').append($button);
}
$choicesModal.modal({keyboard: true});
$choicesModal.on('shown.bs.modal', function () {
if ($firstButton && window.location == window.parent.location) {
$firstButton.focus();
}
});
$choicesModal.modal('show');
$(".btnPrint").printPage();
$choicesModal.off('hidden.bs.modal');
$choicesModal.on('hidden.bs.modal', function (e) {
if (onCancelFunction)
onCancelFunction();
});
}
function htmlEncode(str) {
return str.replace(/</g, '<').replace(/>/g, '>').replace(/'/g, ''').replace(/"/g, '"');
}
function closeDialog() {
$('#choices-modal').modal('hide');
}
function userStateChange(data, triggerLoginEvent) {
var data = typeof data == "undefined" ? null : data;
// $('.alert-danger').remove();
$('.login-slid-div').slideUp(300);
if (data) {
if(data.user.avatar){
$(".userImage").html('<i><img src="/'+data.user.avatar+'" /></i> ' + data.user.username + '<span class="caret"></span>');// responsive
$('.dev-user-profile').html('<i><img class="img-circle dev-profile-image" src="/'+data.user.avatar+'"/></i> '+data.user.username+'<span class="caret"></span>')
}else{
$(".userImage").html('<i class="fas fa-user-circle" ></i> ' + data.user.username + '<span class="caret"></span>');// responsive
$('.dev-user-profile').html('<i class="fas fa-user-circle fa-2x" style="margin-top: 5px;"></i> '+data.user.username+'<span class="caret"></span>')
}
$('.dev-anon-container').addClass('hide');
$('.dev-login-in').removeClass('hide');
// responsive
$('.userNotLogged').addClass('hide');
$('.userLogged').removeClass('hide');
if (data.user.materialCreate) {
$('.dev-material-create').removeClass('hide');
}
if (data.user.backend) {
$('.dev-backend-control').removeClass('hide');
}
if (data.user.comicsCreate) {
$('.dev-comics-create').removeClass('hide');
}
isLoggedIn = true;
if (triggerLoginEvent) {
$(window).trigger('user.loggedin');
}
$('.top-note').addClass('hidden');
for (var variableName in data.injectJSVariables) {
window[variableName] = data.injectJSVariables[variableName];
}
for (var fileId in data.injectFiles) {
loadScript(data.injectFiles[fileId], null, fileId);
onLogoutRemoveIds.push(fileId);
}
if (typeof afterLoginPerformAction === 'function') {
afterLoginPerformAction();
afterLoginPerformAction = null;
}
// if($('#login-popup').is(':visible')){
// lightcase.close();
// }
} else {
$('.dev-user-profile').html("");
// $('[type="password"]').val("");
$('.dev-anon-container').removeClass('hide');
$('.dev-login-in').addClass('hide');
$('#dev-material-create').addClass('hide');
$('#dev-backend-control').addClass('hide');
$('#dev-comics-create').addClass('hide');
if (typeof timerNotificationsInterval !== 'undefined' && timerNotificationsInterval) {
clearInterval(timerNotificationsInterval);
}
var userStatusLognout = isLoggedIn;
isLoggedIn = false;
if (userStatusLognout) {
$(window).trigger('user.loggedout');
}
$('.top-note').removeClass('hidden');
for (var fileIdIndex in onLogoutRemoveIds) {
$('#' + onLogoutRemoveIds[fileIdIndex]).remove();
}
}
}
function showAuthError(error) {
if (++failCount >= 3 || error.indexOf("Captcha") != -1) {
location.href = loginUrl;
} else {
showNotification('error',error);
// $('.dev-login-li').find('.alert').remove();
// $('.dev-login-li').prepend('<div class="alert alert-danger remove-5s">'
// + error + '</div>');
// if($('#ajax-form-login-resp').is(':visible')) $('#login-popup').lightcase('resize');
}
}
function SocialNetworkConnect(element) {
newWindow = window.open($(element).attr("data-url"), '', 'height=800, width=1000');
if (window.focus) {
newWindow.focus();
}
timer = setInterval(checkChild, 500);
}
function checkChild() {
if (errorMessage != false) {
if (newWindow.closed) {
msg = '<div class="alert alert-danger remove-5s">' + socialNetworkErrorMessage + '</div>';
if ($('.dev-login-li .alert').length > 0) {
$('.dev-login-li .alert').remove();
}
$('.dev-login-li').prepend(msg);
clearInterval(timer);
}
}
}
function show_email_modal() {
document.getElementById('form_email').value = "";
// $('#form_email').css('text-indent', '35px');
$('#form-modal .help-error').remove();
$('#form-modal .form-group').removeClass('is-invalid');
$('#form-modal').modal('show');
}
function getprayerTimeData() {
$.ajax({
url: getPrayerInfoUrl,
success: preparePrayerTimeWidget
});
}
// increaseFontSize and decreaseFontSize
var min = 16;
var max = 20;
function increaseFontSize() {
var p = $('.details-text');
for (i = 0; i < p.length; i++) {
if (p[i].style.fontSize) {
var s = parseInt(p[i].style.fontSize.replace("px", ""));
} else {
var s = 18;
}
if (s != max) {
s += 1;
}
p[i].style.fontSize = s + "px"
}
}
function decreaseFontSize() {
var p = $('.details-text');
for (i = 0; i < p.length; i++) {
if (p[i].style.fontSize) {
var s = parseInt(p[i].style.fontSize.replace("px", ""));
} else {
var s = 18;
}
if (s != min) {
s -= 1;
}
p[i].style.fontSize = s + "px"
}
}
function resetFontSize() {
var p = $('.details-text');
for (i = 0; i < p.length; i++) {
p[i].style.fontSize = "18px"
}
}
$('body').on('click','.largeFont',function () {
increaseFontSize();
});
$('body').on('click','.smallFont',function () {
decreaseFontSize();
});
$('body').on('click','.normalFont',function () {
resetFontSize(); | function sharePopup(url, w, h) {
var left = (screen.width / 2) - (w / 2);
var top = (screen.height / 2) - (h / 2);
return window.open(url, "share window", 'toolbar=no, location=no, directories=no, status=no, menubar=no, scrollbars=yes, copyhistory=no, width=' + w + ', height=' + h + ', top=' + top + ', left=' + left);
}
function loginToChat() {
$.ajax({
url: chatLoginUrl,
success: function (data) {
if (reoloadPageForChat && data.loggedIn) {
window.location.reload(true);
return;
}
loadScript('https://repository.chatwee.com/scripts/72e4b84d2ef104b50494d305ab4bde88.js', null, 'chatwee-js-tag');
}
});
}
function logoutFromChat() {
$.ajax({
url: chatLogoutUrl,
success: function() {
$('#chatwee-js-tag').remove();
}
});
}
$(document).on('shown.bs.tab', 'a[data-toggle="tab"]',function (e) {
var target = $(e.target).attr("href") // activated tab
if(target=='#tab_default_2'){
setTimeout(function(){
initFormValidation() ;
},200)
}
});
jQuery(document).ready(function ($) {
// $(window).on('user.loggedin', loginToChat);
// $(window).on('user.loggedout', logoutFromChat);
$('form[name=searchForm]').submit(function (e) {
if (typeof inAngularLayout === 'undefined') {
e.preventDefault();
$(this).data('submitted', true | });
| random_line_split |
full-site.js | choicesModal.find('.modal-footer').html("");
var $firstButton;
for (var i in buttons) {
var btn = buttons[i];
var attrsString = "";
for (var key in btn.attrs) {
var value = btn.attrs[key];
attrsString += key + '="' + value + '" ';
}
var $button = $('<a target="_self" ' + attrsString + ' onclick="' + btn.clickAction + '">' + btn.textValue + '</a>');
if (!$firstButton) {
$firstButton = $button;
}
$choicesModal.find('.modal-footer').append($button);
}
$choicesModal.modal({keyboard: true});
$choicesModal.on('shown.bs.modal', function () {
if ($firstButton && window.location == window.parent.location) {
$firstButton.focus();
}
});
$choicesModal.modal('show');
$(".btnPrint").printPage();
$choicesModal.off('hidden.bs.modal');
$choicesModal.on('hidden.bs.modal', function (e) {
if (onCancelFunction)
onCancelFunction();
});
}
function htmlEncode(str) {
return str.replace(/</g, '<').replace(/>/g, '>').replace(/'/g, ''').replace(/"/g, '"');
}
function closeDialog() {
$('#choices-modal').modal('hide');
}
function userStateChange(data, triggerLoginEvent) {
var data = typeof data == "undefined" ? null : data;
// $('.alert-danger').remove();
$('.login-slid-div').slideUp(300);
if (data) {
if(data.user.avatar){
$(".userImage").html('<i><img src="/'+data.user.avatar+'" /></i> ' + data.user.username + '<span class="caret"></span>');// responsive
$('.dev-user-profile').html('<i><img class="img-circle dev-profile-image" src="/'+data.user.avatar+'"/></i> '+data.user.username+'<span class="caret"></span>')
}else{
$(".userImage").html('<i class="fas fa-user-circle" ></i> ' + data.user.username + '<span class="caret"></span>');// responsive
$('.dev-user-profile').html('<i class="fas fa-user-circle fa-2x" style="margin-top: 5px;"></i> '+data.user.username+'<span class="caret"></span>')
}
$('.dev-anon-container').addClass('hide');
$('.dev-login-in').removeClass('hide');
// responsive
$('.userNotLogged').addClass('hide');
$('.userLogged').removeClass('hide');
if (data.user.materialCreate) {
$('.dev-material-create').removeClass('hide');
}
if (data.user.backend) {
$('.dev-backend-control').removeClass('hide');
}
if (data.user.comicsCreate) {
$('.dev-comics-create').removeClass('hide');
}
isLoggedIn = true;
if (triggerLoginEvent) {
$(window).trigger('user.loggedin');
}
$('.top-note').addClass('hidden');
for (var variableName in data.injectJSVariables) {
window[variableName] = data.injectJSVariables[variableName];
}
for (var fileId in data.injectFiles) {
loadScript(data.injectFiles[fileId], null, fileId);
onLogoutRemoveIds.push(fileId);
}
if (typeof afterLoginPerformAction === 'function') {
afterLoginPerformAction();
afterLoginPerformAction = null;
}
// if($('#login-popup').is(':visible')){
// lightcase.close();
// }
} else {
$('.dev-user-profile').html("");
// $('[type="password"]').val("");
$('.dev-anon-container').removeClass('hide');
$('.dev-login-in').addClass('hide');
$('#dev-material-create').addClass('hide');
$('#dev-backend-control').addClass('hide');
$('#dev-comics-create').addClass('hide');
if (typeof timerNotificationsInterval !== 'undefined' && timerNotificationsInterval) {
clearInterval(timerNotificationsInterval);
}
var userStatusLognout = isLoggedIn;
isLoggedIn = false;
if (userStatusLognout) {
$(window).trigger('user.loggedout');
}
$('.top-note').removeClass('hidden');
for (var fileIdIndex in onLogoutRemoveIds) {
$('#' + onLogoutRemoveIds[fileIdIndex]).remove();
}
}
}
function showAuthError(error) {
if (++failCount >= 3 || error.indexOf("Captcha") != -1) {
location.href = loginUrl;
} else {
showNotification('error',error);
// $('.dev-login-li').find('.alert').remove();
// $('.dev-login-li').prepend('<div class="alert alert-danger remove-5s">'
// + error + '</div>');
// if($('#ajax-form-login-resp').is(':visible')) $('#login-popup').lightcase('resize');
}
}
function SocialNetworkConnect(element) {
newWindow = window.open($(element).attr("data-url"), '', 'height=800, width=1000');
if (window.focus) {
newWindow.focus();
}
timer = setInterval(checkChild, 500);
}
function checkChild() {
if (errorMessage != false) {
if (newWindow.closed) {
msg = '<div class="alert alert-danger remove-5s">' + socialNetworkErrorMessage + '</div>';
if ($('.dev-login-li .alert').length > 0) {
$('.dev-login-li .alert').remove();
}
$('.dev-login-li').prepend(msg);
clearInterval(timer);
}
}
}
function show_email_modal() |
function getprayerTimeData() {
$.ajax({
url: getPrayerInfoUrl,
success: preparePrayerTimeWidget
});
}
// increaseFontSize and decreaseFontSize
var min = 16;
var max = 20;
function increaseFontSize() {
var p = $('.details-text');
for (i = 0; i < p.length; i++) {
if (p[i].style.fontSize) {
var s = parseInt(p[i].style.fontSize.replace("px", ""));
} else {
var s = 18;
}
if (s != max) {
s += 1;
}
p[i].style.fontSize = s + "px"
}
}
function decreaseFontSize() {
var p = $('.details-text');
for (i = 0; i < p.length; i++) {
if (p[i].style.fontSize) {
var s = parseInt(p[i].style.fontSize.replace("px", ""));
} else {
var s = 18;
}
if (s != min) {
s -= 1;
}
p[i].style.fontSize = s + "px"
}
}
function resetFontSize() {
var p = $('.details-text');
for (i = 0; i < p.length; i++) {
p[i].style.fontSize = "18px"
}
}
$('body').on('click','.largeFont',function () {
increaseFontSize();
});
$('body').on('click','.smallFont',function () {
decreaseFontSize();
});
$('body').on('click','.normalFont',function () {
resetFontSize();
});
function sharePopup(url, w, h) {
var left = (screen.width / 2) - (w / 2);
var top = (screen.height / 2) - (h / 2);
return window.open(url, "share window", 'toolbar=no, location=no, directories=no, status=no, menubar=no, scrollbars=yes, copyhistory=no, width=' + w + ', height=' + h + ', top=' + top + ', left=' + left);
}
function loginToChat() {
$.ajax({
url: chatLoginUrl,
success: function (data) {
if (reoloadPageForChat && data.loggedIn) {
window.location.reload(true);
return;
}
loadScript('https://repository.chatwee.com/scripts/72e4b84d2ef104b50494d305ab4bde88.js', null, 'chatwee-js-tag');
}
});
}
function logoutFromChat() {
$.ajax({
url: chatLogoutUrl,
success: function() {
$('#chatwee-js-tag').remove();
}
});
}
$(document).on('shown.bs.tab', 'a[data-toggle="tab"]',function (e) {
var target = $(e.target).attr("href") // activated tab
if(target=='#tab_default_2'){
setTimeout(function(){
initFormValidation() ;
},200)
}
});
jQuery(document).ready(function ($) {
// $(window).on('user.loggedin', loginToChat);
// $(window).on('user.loggedout', logoutFromChat);
$('form[name=searchForm]').submit(function (e) {
if (typeof inAngularLayout === 'undefined') {
e.preventDefault();
$(this).data('submitted', | {
document.getElementById('form_email').value = "";
// $('#form_email').css('text-indent', '35px');
$('#form-modal .help-error').remove();
$('#form-modal .form-group').removeClass('is-invalid');
$('#form-modal').modal('show');
} | identifier_body |
full-site.js | $choicesModal.find('.modal-footer').html("");
var $firstButton;
for (var i in buttons) {
var btn = buttons[i];
var attrsString = "";
for (var key in btn.attrs) {
var value = btn.attrs[key];
attrsString += key + '="' + value + '" ';
}
var $button = $('<a target="_self" ' + attrsString + ' onclick="' + btn.clickAction + '">' + btn.textValue + '</a>');
if (!$firstButton) {
$firstButton = $button;
}
$choicesModal.find('.modal-footer').append($button);
}
$choicesModal.modal({keyboard: true});
$choicesModal.on('shown.bs.modal', function () {
if ($firstButton && window.location == window.parent.location) {
$firstButton.focus();
}
});
$choicesModal.modal('show');
$(".btnPrint").printPage();
$choicesModal.off('hidden.bs.modal');
$choicesModal.on('hidden.bs.modal', function (e) {
if (onCancelFunction)
onCancelFunction();
});
}
function htmlEncode(str) {
return str.replace(/</g, '<').replace(/>/g, '>').replace(/'/g, ''').replace(/"/g, '"');
}
function | () {
$('#choices-modal').modal('hide');
}
function userStateChange(data, triggerLoginEvent) {
var data = typeof data == "undefined" ? null : data;
// $('.alert-danger').remove();
$('.login-slid-div').slideUp(300);
if (data) {
if(data.user.avatar){
$(".userImage").html('<i><img src="/'+data.user.avatar+'" /></i> ' + data.user.username + '<span class="caret"></span>');// responsive
$('.dev-user-profile').html('<i><img class="img-circle dev-profile-image" src="/'+data.user.avatar+'"/></i> '+data.user.username+'<span class="caret"></span>')
}else{
$(".userImage").html('<i class="fas fa-user-circle" ></i> ' + data.user.username + '<span class="caret"></span>');// responsive
$('.dev-user-profile').html('<i class="fas fa-user-circle fa-2x" style="margin-top: 5px;"></i> '+data.user.username+'<span class="caret"></span>')
}
$('.dev-anon-container').addClass('hide');
$('.dev-login-in').removeClass('hide');
// responsive
$('.userNotLogged').addClass('hide');
$('.userLogged').removeClass('hide');
if (data.user.materialCreate) {
$('.dev-material-create').removeClass('hide');
}
if (data.user.backend) {
$('.dev-backend-control').removeClass('hide');
}
if (data.user.comicsCreate) {
$('.dev-comics-create').removeClass('hide');
}
isLoggedIn = true;
if (triggerLoginEvent) {
$(window).trigger('user.loggedin');
}
$('.top-note').addClass('hidden');
for (var variableName in data.injectJSVariables) {
window[variableName] = data.injectJSVariables[variableName];
}
for (var fileId in data.injectFiles) {
loadScript(data.injectFiles[fileId], null, fileId);
onLogoutRemoveIds.push(fileId);
}
if (typeof afterLoginPerformAction === 'function') {
afterLoginPerformAction();
afterLoginPerformAction = null;
}
// if($('#login-popup').is(':visible')){
// lightcase.close();
// }
} else {
$('.dev-user-profile').html("");
// $('[type="password"]').val("");
$('.dev-anon-container').removeClass('hide');
$('.dev-login-in').addClass('hide');
$('#dev-material-create').addClass('hide');
$('#dev-backend-control').addClass('hide');
$('#dev-comics-create').addClass('hide');
if (typeof timerNotificationsInterval !== 'undefined' && timerNotificationsInterval) {
clearInterval(timerNotificationsInterval);
}
var userStatusLognout = isLoggedIn;
isLoggedIn = false;
if (userStatusLognout) {
$(window).trigger('user.loggedout');
}
$('.top-note').removeClass('hidden');
for (var fileIdIndex in onLogoutRemoveIds) {
$('#' + onLogoutRemoveIds[fileIdIndex]).remove();
}
}
}
function showAuthError(error) {
if (++failCount >= 3 || error.indexOf("Captcha") != -1) {
location.href = loginUrl;
} else {
showNotification('error',error);
// $('.dev-login-li').find('.alert').remove();
// $('.dev-login-li').prepend('<div class="alert alert-danger remove-5s">'
// + error + '</div>');
// if($('#ajax-form-login-resp').is(':visible')) $('#login-popup').lightcase('resize');
}
}
function SocialNetworkConnect(element) {
newWindow = window.open($(element).attr("data-url"), '', 'height=800, width=1000');
if (window.focus) {
newWindow.focus();
}
timer = setInterval(checkChild, 500);
}
function checkChild() {
if (errorMessage != false) {
if (newWindow.closed) {
msg = '<div class="alert alert-danger remove-5s">' + socialNetworkErrorMessage + '</div>';
if ($('.dev-login-li .alert').length > 0) {
$('.dev-login-li .alert').remove();
}
$('.dev-login-li').prepend(msg);
clearInterval(timer);
}
}
}
function show_email_modal() {
document.getElementById('form_email').value = "";
// $('#form_email').css('text-indent', '35px');
$('#form-modal .help-error').remove();
$('#form-modal .form-group').removeClass('is-invalid');
$('#form-modal').modal('show');
}
function getprayerTimeData() {
$.ajax({
url: getPrayerInfoUrl,
success: preparePrayerTimeWidget
});
}
// increaseFontSize and decreaseFontSize
var min = 16;
var max = 20;
function increaseFontSize() {
var p = $('.details-text');
for (i = 0; i < p.length; i++) {
if (p[i].style.fontSize) {
var s = parseInt(p[i].style.fontSize.replace("px", ""));
} else {
var s = 18;
}
if (s != max) {
s += 1;
}
p[i].style.fontSize = s + "px"
}
}
function decreaseFontSize() {
var p = $('.details-text');
for (i = 0; i < p.length; i++) {
if (p[i].style.fontSize) {
var s = parseInt(p[i].style.fontSize.replace("px", ""));
} else {
var s = 18;
}
if (s != min) {
s -= 1;
}
p[i].style.fontSize = s + "px"
}
}
function resetFontSize() {
var p = $('.details-text');
for (i = 0; i < p.length; i++) {
p[i].style.fontSize = "18px"
}
}
$('body').on('click','.largeFont',function () {
increaseFontSize();
});
$('body').on('click','.smallFont',function () {
decreaseFontSize();
});
$('body').on('click','.normalFont',function () {
resetFontSize();
});
function sharePopup(url, w, h) {
var left = (screen.width / 2) - (w / 2);
var top = (screen.height / 2) - (h / 2);
return window.open(url, "share window", 'toolbar=no, location=no, directories=no, status=no, menubar=no, scrollbars=yes, copyhistory=no, width=' + w + ', height=' + h + ', top=' + top + ', left=' + left);
}
function loginToChat() {
$.ajax({
url: chatLoginUrl,
success: function (data) {
if (reoloadPageForChat && data.loggedIn) {
window.location.reload(true);
return;
}
loadScript('https://repository.chatwee.com/scripts/72e4b84d2ef104b50494d305ab4bde88.js', null, 'chatwee-js-tag');
}
});
}
function logoutFromChat() {
$.ajax({
url: chatLogoutUrl,
success: function() {
$('#chatwee-js-tag').remove();
}
});
}
$(document).on('shown.bs.tab', 'a[data-toggle="tab"]',function (e) {
var target = $(e.target).attr("href") // activated tab
if(target=='#tab_default_2'){
setTimeout(function(){
initFormValidation() ;
},200)
}
});
jQuery(document).ready(function ($) {
// $(window).on('user.loggedin', loginToChat);
// $(window).on('user.loggedout', logoutFromChat);
$('form[name=searchForm]').submit(function (e) {
if (typeof inAngularLayout === 'undefined') {
e.preventDefault();
$(this).data('submitted', | closeDialog | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.