code
stringlengths
31
1.05M
apis
list
extract_api
stringlengths
97
1.91M
#Author: <NAME> #Contact: <EMAIL> #Date: Aug 02, 2020 import numpy as np def cal_emp_cdf(insamples): ''' This is function to calcualte emperical CDF of Dirichlet distributed facies proportion samples. Variables: insamples - input samples of facies proportions, 3D array, [n_seis_features, n_posterior_samples, n_facies] ''' smpls_cdf_libs = [] for i in range(len(insamples)): samples = insamples[i] cdfs = [np.count_nonzero(samples[j,0]>samples[:,0])/samples.shape[0] \ for j in range(samples.shape[0])] cdfs = np.asarray(cdfs) smpls_cdf_libs.append(np.c_[samples, cdfs]) return smpls_cdf_libs
[ "numpy.count_nonzero", "numpy.asarray" ]
[((598, 614), 'numpy.asarray', 'np.asarray', (['cdfs'], {}), '(cdfs)\n', (608, 614), True, 'import numpy as np\n'), ((469, 516), 'numpy.count_nonzero', 'np.count_nonzero', (['(samples[j, 0] > samples[:, 0])'], {}), '(samples[j, 0] > samples[:, 0])\n', (485, 516), True, 'import numpy as np\n')]
import cv2 import numpy as np import matplotlib.pyplot as plt from skimage.filters import gabor import mahotas as mt import pandas as pd from glob import glob from skimage.feature import local_binary_pattern def fun1(img_mask,Label): count = 0 gaborenergy1 = [] gaborentropy1 = [] w1=[] h1=[] area1 = [] perimeter1 = [] rectArea1= [] aspectratio1 = [] rectangularity1 = [] circularity1 = [] equi_diameter1 = [] red_mean1 = [] green_mean1 = [] blue_mean1 = [] red_var1 = [] blue_var1 = [] green_var1 = [] contrast1 = [] correlation1 = [] inversedifferencemoments1 = [] entropy1 = [] Label1 = [] LBP = [] extent1= [] solidity1=[] hull_area1=[] equi_diameter1 = [] radius = 3 no_points = 8 * radius img_names = glob(img_mask) iasd=0 for fn in img_names: #print('processing %s...' % fn,i) print(iasd,end="\t") iasd=iasd+1 img = cv2.imread(fn) #cv2.imshow("original",img) ####### Converting image to grayscale ######### gs = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # GABOR filter.................................................................... gaborFilt_real, gaborFilt_imag = gabor(gs, frequency=0.6) gaborFilt = (gaborFilt_real ** 2 + gaborFilt_imag ** 2) // 2 #fig, ax = plt.subplots(1, 3) #ax[0].imshow(gaborFilt_real, cmap='gray') #ax[1].imshow(gaborFilt_imag, cmap='gray') #ax[2].imshow(gaborFilt, cmap='gray') #plt.show() # energy and entropy of GABOR filter response...................................... gabor_hist, _ = np.histogram(gaborFilt, 8) gabor_hist = np.array(gabor_hist, dtype=float) gabor_prob = np.divide(gabor_hist, np.sum(gabor_hist)) gabor_energy = np.sum(gabor_prob ** 2) gabor_entropy = -np.sum(np.multiply(gabor_prob, np.log2(gabor_prob))) #print("gabor_energy:" + str(gabor_energy)) #print("gabor_entropy:" + str(gabor_entropy)) count = count+1 #print(count) #########################local_binary_pattern######################### lbp = local_binary_pattern(gs, no_points, radius, method='uniform') ###### Smoothing image using Guassian filter blur = cv2.GaussianBlur(gs, (25,25),0) #print(gs.shape) ####Adaptive image thresholding using Otsu's thresholding method ret_otsu,im_bw_otsu = cv2.threshold(blur,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) #cv2.imshow("Thresholding",im_bw_otsu) ####Boundary extraction using sobel filters sobelx64f = cv2.Sobel(im_bw_otsu,cv2.CV_64F,1,0,ksize=5) abs_sobel64f = np.absolute(sobelx64f) sobel_8u = np.uint8(abs_sobel64f) #cv2.imshow("Boundary Extraction",abs_sobel64f) ret_sobel,im_bw_sobel = cv2.threshold(sobel_8u,1,255,cv2.THRESH_BINARY) #cv2.imshow("boundary",im_bw_sobel) kernel_edge = np.ones((15,15),np.uint8) closing_edge = cv2.morphologyEx(im_bw_sobel, cv2.MORPH_CLOSE, kernel_edge) #cv2.imshow("Closing Edge",closing_edge) #cv2.imshow("Boundary ",im_bw_otsu) ##### Boundary extraction using contours ret, thresh = cv2.threshold(gs, 127, 255, 0) contours, hierarchy = cv2.findContours(im_bw_otsu, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) len(contours) cnt=contours[0] len(cnt) plottedContour = cv2.drawContours(gs,contours,-1,(0,255,0),10) #cv2.imshow("Plotted Contour",plottedContour) ##### Shape based features M = cv2.moments(cnt) #print("MOments: ",M) area = cv2.contourArea(cnt) #print("Area",area) perimeter = cv2.arcLength(cnt,True) #print("Perimeter",perimeter) rect = cv2.minAreaRect(cnt) box = cv2.boxPoints(rect) box = np.int0(box) contours_im = cv2.drawContours(im_bw_otsu,[box],0,(255,255,255),2) #cv2.imshow("best fit rect",contours_im) #ellipse = cv2.fitEllipse(cnt) #im = cv2.ellipse(im_bw_otsu,ellipse,(255,255,255),2) #cv2.imshow("") x,y,w,h = cv2.boundingRect(cnt) aspect_ratio = float(w)/h #print("Aspect Ratio: ",aspect_ratio) ######### Extent############# rect_area = w * h extent = float(area) / rect_area ######### solidity ############# hull = cv2.convexHull(cnt) hull_area = cv2.contourArea(hull) if hull_area != 0: solidity = float(area) / hull_area else: solidity = 0 ####Shape based features calculated - Aspect ratio, rectangularity, circularity if area !=0: rectangularity =w*h/area circularity = ((perimeter) ** 2) / area else: rectangularity=0 circularity = 0 #print("rectangularity: ",rectangularity) #print("circularity: ",circularity) equi_diameter = np.sqrt(4*area/np.pi) #print("equi_diameter:",equi_diameter) #(x,y),(MA,ma),angle = cv2.fitEllipse(cnt) #cv2.imshow("Original1",img) ###Calculating color based features - mean, std-dev of the RGB channels red_channel = img[:,:,0] #cv2.imshow("red channel: ",red_channel) green_channel = img[:,:,1] #cv2.imshow("green channel: ",green_channel) blue_channel = img[:,:,2] #cv2.imshow("blue channel: ",blue_channel) g=np.mean(blue_channel) h = np.mean(red_channel) i = np.mean(green_channel) #print("RedMean",h) #print("BlueMean",g) #print("GreenMean", i) blue_channel[blue_channel == 255] = 0 green_channel[green_channel == 255] = 0 red_channel[red_channel == 255] = 0 red_mean = np.mean(red_channel) #print("red_mean: ",red_mean) green_mean = np.mean(green_channel) #print("green_mean",green_mean) blue_mean = np.mean(blue_channel) #print("blue_mean: ",blue_mean) red_var = np.std(red_channel) #print("red_var: ",red_var) blue_var = np.std(blue_channel) green_var = np.std(green_channel) ######### Texture Features ########## textures = mt.features.haralick(gs) ht_mean = textures.mean(axis=0) #print(ht_mean) #print(ht_mean[1]) #contrast #print(ht_mean[2]) #correlation #print(ht_mean[4]) #inverse difference moments #print(ht_mean[8]) #entropy gaborenergy1.append(gabor_energy) gaborentropy1.append(gabor_entropy) w1.append(w) h1.append(h) area1.append(area) rectArea1.append(rect_area) perimeter1.append(perimeter) aspectratio1.append(aspect_ratio) rectangularity1.append(rectangularity) circularity1.append(circularity) equi_diameter1.append(equi_diameter) red_mean1.append(red_mean) green_mean1.append(green_mean) blue_mean1.append(blue_mean) red_var1.append(red_var) blue_var1.append(blue_var) green_var1.append(green_var) contrast1.append(ht_mean[1]) correlation1.append(ht_mean[2]) inversedifferencemoments1.append(ht_mean[4]) entropy1.append(ht_mean[8]) LBP.append(lbp) extent1.append(extent) solidity1.append(solidity) hull_area1.append(hull_area) # dictionary of lists dict1 = {'Label':Label,'gaborenergy': gaborenergy1, 'gaborentropy': gaborentropy1,'width':w1,'Length':h1, 'area': area1,'Rect_Area':rectArea1, 'perimeter': perimeter1,'Extent': extent1, 'Solidity':solidity1,'Hull_Area':hull_area1,'AspectRatio': aspectratio1, 'Rectangularity': rectangularity1, 'Circularity': circularity1, 'EquiDimeter': equi_diameter1, 'RedMean': red_mean1, 'GreenMean': green_mean1, 'BlueMean': blue_mean1, 'RedVar': red_var1,'BlueVar': blue_var1,'GreenVar': green_var1, 'contrast': contrast1, 'correlation': correlation1, 'inverse difference moments': inversedifferencemoments1, 'entropy': entropy1 } df = pd.DataFrame(dict1) # f=open("f1.csv","a") # saving the dataframe df.to_csv("Labled_DATAUpdate1.csv", mode='a', header=False)
[ "numpy.uint8", "numpy.sqrt", "numpy.array", "mahotas.features.haralick", "numpy.mean", "numpy.histogram", "cv2.threshold", "cv2.arcLength", "cv2.contourArea", "cv2.minAreaRect", "pandas.DataFrame", "glob.glob", "cv2.drawContours", "numpy.ones", "cv2.boxPoints", "cv2.boundingRect", "numpy.int0", "cv2.morphologyEx", "cv2.cvtColor", "skimage.filters.gabor", "cv2.moments", "numpy.std", "numpy.log2", "cv2.GaussianBlur", "cv2.imread", "cv2.convexHull", "numpy.absolute", "numpy.sum", "cv2.findContours", "cv2.Sobel", "skimage.feature.local_binary_pattern" ]
[((879, 893), 'glob.glob', 'glob', (['img_mask'], {}), '(img_mask)\n', (883, 893), False, 'from glob import glob\n'), ((8435, 8454), 'pandas.DataFrame', 'pd.DataFrame', (['dict1'], {}), '(dict1)\n', (8447, 8454), True, 'import pandas as pd\n'), ((1041, 1055), 'cv2.imread', 'cv2.imread', (['fn'], {}), '(fn)\n', (1051, 1055), False, 'import cv2\n'), ((1160, 1197), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (1172, 1197), False, 'import cv2\n'), ((1334, 1358), 'skimage.filters.gabor', 'gabor', (['gs'], {'frequency': '(0.6)'}), '(gs, frequency=0.6)\n', (1339, 1358), False, 'from skimage.filters import gabor\n'), ((1754, 1780), 'numpy.histogram', 'np.histogram', (['gaborFilt', '(8)'], {}), '(gaborFilt, 8)\n', (1766, 1780), True, 'import numpy as np\n'), ((1803, 1836), 'numpy.array', 'np.array', (['gabor_hist'], {'dtype': 'float'}), '(gabor_hist, dtype=float)\n', (1811, 1836), True, 'import numpy as np\n'), ((1925, 1948), 'numpy.sum', 'np.sum', (['(gabor_prob ** 2)'], {}), '(gabor_prob ** 2)\n', (1931, 1948), True, 'import numpy as np\n'), ((2281, 2342), 'skimage.feature.local_binary_pattern', 'local_binary_pattern', (['gs', 'no_points', 'radius'], {'method': '"""uniform"""'}), "(gs, no_points, radius, method='uniform')\n", (2301, 2342), False, 'from skimage.feature import local_binary_pattern\n'), ((2413, 2446), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gs', '(25, 25)', '(0)'], {}), '(gs, (25, 25), 0)\n', (2429, 2446), False, 'import cv2\n'), ((2576, 2644), 'cv2.threshold', 'cv2.threshold', (['blur', '(0)', '(255)', '(cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)'], {}), '(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n', (2589, 2644), False, 'import cv2\n'), ((2762, 2810), 'cv2.Sobel', 'cv2.Sobel', (['im_bw_otsu', 'cv2.CV_64F', '(1)', '(0)'], {'ksize': '(5)'}), '(im_bw_otsu, cv2.CV_64F, 1, 0, ksize=5)\n', (2771, 2810), False, 'import cv2\n'), ((2831, 2853), 'numpy.absolute', 'np.absolute', (['sobelx64f'], {}), '(sobelx64f)\n', (2842, 2853), True, 'import numpy as np\n'), ((2874, 2896), 'numpy.uint8', 'np.uint8', (['abs_sobel64f'], {}), '(abs_sobel64f)\n', (2882, 2896), True, 'import numpy as np\n'), ((2987, 3037), 'cv2.threshold', 'cv2.threshold', (['sobel_8u', '(1)', '(255)', 'cv2.THRESH_BINARY'], {}), '(sobel_8u, 1, 255, cv2.THRESH_BINARY)\n', (3000, 3037), False, 'import cv2\n'), ((3101, 3128), 'numpy.ones', 'np.ones', (['(15, 15)', 'np.uint8'], {}), '((15, 15), np.uint8)\n', (3108, 3128), True, 'import numpy as np\n'), ((3151, 3210), 'cv2.morphologyEx', 'cv2.morphologyEx', (['im_bw_sobel', 'cv2.MORPH_CLOSE', 'kernel_edge'], {}), '(im_bw_sobel, cv2.MORPH_CLOSE, kernel_edge)\n', (3167, 3210), False, 'import cv2\n'), ((3373, 3403), 'cv2.threshold', 'cv2.threshold', (['gs', '(127)', '(255)', '(0)'], {}), '(gs, 127, 255, 0)\n', (3386, 3403), False, 'import cv2\n'), ((3435, 3503), 'cv2.findContours', 'cv2.findContours', (['im_bw_otsu', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(im_bw_otsu, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (3451, 3503), False, 'import cv2\n'), ((3596, 3647), 'cv2.drawContours', 'cv2.drawContours', (['gs', 'contours', '(-1)', '(0, 255, 0)', '(10)'], {}), '(gs, contours, -1, (0, 255, 0), 10)\n', (3612, 3647), False, 'import cv2\n'), ((3744, 3760), 'cv2.moments', 'cv2.moments', (['cnt'], {}), '(cnt)\n', (3755, 3760), False, 'import cv2\n'), ((3810, 3830), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (3825, 3830), False, 'import cv2\n'), ((3887, 3911), 'cv2.arcLength', 'cv2.arcLength', (['cnt', '(True)'], {}), '(cnt, True)\n', (3900, 3911), False, 'import cv2\n'), ((3968, 3988), 'cv2.minAreaRect', 'cv2.minAreaRect', (['cnt'], {}), '(cnt)\n', (3983, 3988), False, 'import cv2\n'), ((4004, 4023), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (4017, 4023), False, 'import cv2\n'), ((4039, 4051), 'numpy.int0', 'np.int0', (['box'], {}), '(box)\n', (4046, 4051), True, 'import numpy as np\n'), ((4075, 4133), 'cv2.drawContours', 'cv2.drawContours', (['im_bw_otsu', '[box]', '(0)', '(255, 255, 255)', '(2)'], {}), '(im_bw_otsu, [box], 0, (255, 255, 255), 2)\n', (4091, 4133), False, 'import cv2\n'), ((4315, 4336), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (4331, 4336), False, 'import cv2\n'), ((4591, 4610), 'cv2.convexHull', 'cv2.convexHull', (['cnt'], {}), '(cnt)\n', (4605, 4610), False, 'import cv2\n'), ((4632, 4653), 'cv2.contourArea', 'cv2.contourArea', (['hull'], {}), '(hull)\n', (4647, 4653), False, 'import cv2\n'), ((5180, 5205), 'numpy.sqrt', 'np.sqrt', (['(4 * area / np.pi)'], {}), '(4 * area / np.pi)\n', (5187, 5205), True, 'import numpy as np\n'), ((5693, 5714), 'numpy.mean', 'np.mean', (['blue_channel'], {}), '(blue_channel)\n', (5700, 5714), True, 'import numpy as np\n'), ((5728, 5748), 'numpy.mean', 'np.mean', (['red_channel'], {}), '(red_channel)\n', (5735, 5748), True, 'import numpy as np\n'), ((5762, 5784), 'numpy.mean', 'np.mean', (['green_channel'], {}), '(green_channel)\n', (5769, 5784), True, 'import numpy as np\n'), ((6041, 6061), 'numpy.mean', 'np.mean', (['red_channel'], {}), '(red_channel)\n', (6048, 6061), True, 'import numpy as np\n'), ((6125, 6147), 'numpy.mean', 'np.mean', (['green_channel'], {}), '(green_channel)\n', (6132, 6147), True, 'import numpy as np\n'), ((6212, 6233), 'numpy.mean', 'np.mean', (['blue_channel'], {}), '(blue_channel)\n', (6219, 6233), True, 'import numpy as np\n'), ((6296, 6315), 'numpy.std', 'np.std', (['red_channel'], {}), '(red_channel)\n', (6302, 6315), True, 'import numpy as np\n'), ((6375, 6395), 'numpy.std', 'np.std', (['blue_channel'], {}), '(blue_channel)\n', (6381, 6395), True, 'import numpy as np\n'), ((6417, 6438), 'numpy.std', 'np.std', (['green_channel'], {}), '(green_channel)\n', (6423, 6438), True, 'import numpy as np\n'), ((6506, 6530), 'mahotas.features.haralick', 'mt.features.haralick', (['gs'], {}), '(gs)\n', (6526, 6530), True, 'import mahotas as mt\n'), ((1881, 1899), 'numpy.sum', 'np.sum', (['gabor_hist'], {}), '(gabor_hist)\n', (1887, 1899), True, 'import numpy as np\n'), ((2006, 2025), 'numpy.log2', 'np.log2', (['gabor_prob'], {}), '(gabor_prob)\n', (2013, 2025), True, 'import numpy as np\n')]
from __future__ import annotations from typing import Any import numpy as np AR_i8: np.ndarray[Any, np.dtype[np.int_]] = np.arange(10) ar_iter = np.lib.Arrayterator(AR_i8) ar_iter.var ar_iter.buf_size ar_iter.start ar_iter.stop ar_iter.step ar_iter.shape ar_iter.flat ar_iter.__array__() for i in ar_iter: pass ar_iter[0] ar_iter[...] ar_iter[:] ar_iter[0, 0, 0] ar_iter[..., 0, :]
[ "numpy.lib.Arrayterator", "numpy.arange" ]
[((124, 137), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (133, 137), True, 'import numpy as np\n'), ((148, 174), 'numpy.lib.Arrayterator', 'np.lib.Arrayterator', (['AR_i8'], {}), '(AR_i8)\n', (167, 174), True, 'import numpy as np\n')]
import numpy as np import eqsig from liquepy.element.models import ShearTest from liquepy.element import assess def test_with_one_cycle_no_dissipation(): strs = np.array([0, -1, -2, -3, -4, -3, -2, -1, 0, 1, 2, 3, 4, 3, 2, 1, 0]) tau = np.array([0, -2, -4, -6, -8, -6, -4, -2, 0, 2, 4, 6, 8, 6, 4, 2, 0]) expected_energy = 0 assert np.isclose(expected_energy, assess.calc_diss_energy_fd(tau, strs)[-1]) def test_with_one_cycle_no_dissipation_with_offset(): strs = np.array([0, -1, -2, -3, -4, -3, -2, -1, 0, 1, 2, 3, 4, 3, 2, 1, 0]) + 4 tau = np.array([0, -2, -4, -6, -8, -6, -4, -2, 0, 2, 4, 6, 8, 6, 4, 2, 0]) expected_energy = 0 assert np.isclose(expected_energy, assess.calc_diss_energy_fd(tau, strs)[-1]) def test_with_one_cycle_circle(): angle = np.linspace(0, 2 * np.pi, 3600) strs = 4 * np.sin(angle) tau = 4 * np.cos(angle) expected_energy = 4 ** 2 * np.pi assert np.isclose(expected_energy, assess.calc_diss_energy_fd(tau, strs)[-1]) def test_with_one_cycle_circle_with_offset(): angle = np.linspace(0, 2 * np.pi, 3600) strs = 4 * np.sin(angle) + 4 tau = 4 * np.cos(angle) + 10 expected_energy = 4 ** 2 * np.pi assert np.isclose(expected_energy, assess.calc_diss_energy_fd(tau, strs)[-1]) def test_with_one_cycle_triangles(): strs = np.array([0, -1, -2, -3, -4, -4, -3, -2, -1, 0, 1, 2, 3, 4, 4, 3, 2, 1, 0]) tau = np.array([0, -2, -4, -6, -8, 0, 0, 0, 0, 0, 2, 4, 6, 8, 0, 0, 0, 0, 0]) expected_energy = 8 * 4. assert np.isclose(expected_energy, assess.calc_diss_energy_fd(tau, strs)[-1]) def test_average_of_absolute_simple(): values = np.array([4, -3]) expected = 12.5 / 7 av_abs = assess.average_of_absolute_via_trapz(values) assert np.isclose(av_abs, expected), (av_abs, expected) def test_average_of_absolute_matching_neg(): values = np.array([3, -3, 3]) expected = 1.5 av_abs = assess.average_of_absolute_via_trapz(values) assert np.isclose(av_abs[0], expected), (av_abs[0], expected) assert np.isclose(av_abs[1], expected), (av_abs[1], expected) def test_determine_cum_stored_energy_series_simple(): gamma = np.array([0, 4, 0, -3, 0]) tau = np.array([0, 4, 0, -3, 0]) two_times_triangle_1 = 2 * (4 * 4 / 2) two_times_triangle_2 = 2 * (3 * 3 / 2) expected_energy = two_times_triangle_1 + two_times_triangle_2 et = ShearTest(tau, gamma, 1) energy = assess.calc_case_et(et) assert energy[-1] == expected_energy, (energy[-1], expected_energy) def test_small_cycle_behaviour_increases_case(): gamma_1 = np.array([0, 4, -2, 0]) tau_1 = np.array([0, 4, -4, 0]) et_1 = ShearTest(tau_1, gamma_1, 1) energy_1 = assess.calc_case_et(et_1) gamma_2 = np.array([0, 4, 3, 4, -2, 0]) tau_2 = np.array([0, 4, 1, 1, -4, 0]) et_2 = ShearTest(tau_2, gamma_2, 1) energy_2 = assess.calc_case_et(et_2) assert energy_2[-1] > energy_1[-1] def skip_test_strain_bulge_behaviour_increases_case(): gamma_1 = np.array([0, 4, -2, 0]) tau_1 = np.array([0, 4, -4, 0]) et_1 = ShearTest(tau_1, gamma_1, 1) energy_1 = assess.calc_case_et(et_1) gamma_2 = np.array([0, 4, 4.1, -2, 0]) tau_2 = np.array([0, 4, 1, -4, 0]) et_2 = ShearTest(tau_2, gamma_2, 1) energy_2 = assess.calc_case_et(et_2) assert energy_2[-1] > energy_1[-1] def test_determine_cum_stored_energy_series_simple_up_down(): """ /\ :return: """ gamma = np.array([0., 1., 0.5]) tau = np.array([0., 1., 0]) expected_delta_e = 0.75 # two triangles (1x1x0.5 + 1x0.5x0.5) et = ShearTest(tau, gamma) energy = assess.calc_case_et(et) assert energy[-1] == expected_delta_e, energy def test_determine_cum_stored_energy_series_simple_up_down_neg(): gamma = np.array([0., 1., -1]) tau = np.array([0., 1., -1]) expected_delta_e = 1.5 et = ShearTest(tau, gamma) energy = assess.calc_case_et(et) assert energy[-1] == expected_delta_e, energy def test_determine_cum_stored_energy_series_simple_close_loop(): gamma = np.array([1., -1, 1]) tau = np.array([1., -1, 1]) expected_delta_e = 2 et = ShearTest(tau, gamma) energy = assess.calc_case_et(et) assert energy[-1] == expected_delta_e, energy def test_determine_cum_stored_energy_series_simple_4points(): gamma = np.array([0, 1, -1, 2]) tau = np.array([0, 1, -1, 1]) step_1 = (0 + 1) / 2 * (1 - 0) step_2 = (0 + 2) / 2 * (1 - 0) expected_delta_e = step_1 * 4 + step_2 et = ShearTest(tau, gamma) energy = assess.calc_case_et(et) assert energy[-1] == expected_delta_e, (energy, expected_delta_e) def test_determine_cum_stored_energy_series_simple_trapz_zero(): gamma = np.array([0, 2, 1]) tau = np.array([0, 2, 1]) step_1 = (0 + 2) / 2 * (2 - 0) step_2 = (2 + 1) / 2 * abs(2 - 1) expected_delta_e = step_1 + step_2 et = ShearTest(tau, gamma) energy = assess.calc_case_et(et) assert energy[-1] == expected_delta_e, (energy, expected_delta_e) def test_determine_cum_stored_energy_series_simple_trapz(): gamma = np.array([1, 3, 2]) tau = np.array([1, 2, 0]) step_1 = (0 + 1) / 2 * (2 - 0) step_2 = (2 + 1) / 2 * abs(2 - 0) expected_delta_e = step_1 + step_2 et = ShearTest(tau, gamma) energy = assess.calc_case_et(et) assert energy[-1] == expected_delta_e, (energy, expected_delta_e) def test_determine_cum_stored_energy_series_simple_5points(): gamma = np.array([0, 2, 1, 3, 2]) tau = np.array([0, 2, 1, 2, 0]) step_1 = (0 + 2) / 2 * (2 - 0) step_2 = (2 + 1) / 2 * abs(2 - 1) step_3 = (0 + 1) / 2 * (2 - 0) step_4 = (2 + 1) / 2 * abs(2 - 0) expected_delta_e = step_1 + step_2 + step_3 + step_4 et = ShearTest(tau, gamma) energy = assess.calc_case_et(et) assert energy[-1] == expected_delta_e, (energy, expected_delta_e) def test_case_et_simple_6points(): gamma = np.array([0, 1, 0.5, 1.5, -1, 2]) tau = np.array([0, 1, 0.5, 1, -1, 1]) expected_delta_e = 4.375 et = ShearTest(tau, gamma) energy = assess.calc_case_et(et) assert energy[-1] == expected_delta_e, (energy, expected_delta_e) def test_get_energy_peaks_for_cyclic_loading(): fs = np.array([0, 1., 2., 3., 4., 5., 5.5, 5.5, 4., 3., 2.5, 2.0, 1., 0., -1, -2, -5, 1, 3, 3.5, 2.5, 3.5, 2.5, -1, -3]) ds = np.array([0, 0.5, 1., 1.5, 2.5, 3., 4.25, 5.5, 5.5, 5.25, 5.5, 5.25, 4., 3., 1.5, 0.5, -3, -2, -1, -0.5, -0.75, 1.5, 1., -1.5, -5]) inds = assess.get_energy_peaks_for_cyclic_loading(-fs, -ds) expected = np.array([0, 7, 16, 21, 24]) assert np.sum(abs(inds - expected)) == 0
[ "liquepy.element.assess.calc_diss_energy_fd", "numpy.isclose", "liquepy.element.assess.get_energy_peaks_for_cyclic_loading", "liquepy.element.models.ShearTest", "numpy.array", "numpy.linspace", "liquepy.element.assess.average_of_absolute_via_trapz", "numpy.cos", "numpy.sin", "liquepy.element.assess.calc_case_et" ]
[((169, 237), 'numpy.array', 'np.array', (['[0, -1, -2, -3, -4, -3, -2, -1, 0, 1, 2, 3, 4, 3, 2, 1, 0]'], {}), '([0, -1, -2, -3, -4, -3, -2, -1, 0, 1, 2, 3, 4, 3, 2, 1, 0])\n', (177, 237), True, 'import numpy as np\n'), ((248, 316), 'numpy.array', 'np.array', (['[0, -2, -4, -6, -8, -6, -4, -2, 0, 2, 4, 6, 8, 6, 4, 2, 0]'], {}), '([0, -2, -4, -6, -8, -6, -4, -2, 0, 2, 4, 6, 8, 6, 4, 2, 0])\n', (256, 316), True, 'import numpy as np\n'), ((573, 641), 'numpy.array', 'np.array', (['[0, -2, -4, -6, -8, -6, -4, -2, 0, 2, 4, 6, 8, 6, 4, 2, 0]'], {}), '([0, -2, -4, -6, -8, -6, -4, -2, 0, 2, 4, 6, 8, 6, 4, 2, 0])\n', (581, 641), True, 'import numpy as np\n'), ((796, 827), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(3600)'], {}), '(0, 2 * np.pi, 3600)\n', (807, 827), True, 'import numpy as np\n'), ((1064, 1095), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(3600)'], {}), '(0, 2 * np.pi, 3600)\n', (1075, 1095), True, 'import numpy as np\n'), ((1331, 1406), 'numpy.array', 'np.array', (['[0, -1, -2, -3, -4, -4, -3, -2, -1, 0, 1, 2, 3, 4, 4, 3, 2, 1, 0]'], {}), '([0, -1, -2, -3, -4, -4, -3, -2, -1, 0, 1, 2, 3, 4, 4, 3, 2, 1, 0])\n', (1339, 1406), True, 'import numpy as np\n'), ((1417, 1488), 'numpy.array', 'np.array', (['[0, -2, -4, -6, -8, 0, 0, 0, 0, 0, 2, 4, 6, 8, 0, 0, 0, 0, 0]'], {}), '([0, -2, -4, -6, -8, 0, 0, 0, 0, 0, 2, 4, 6, 8, 0, 0, 0, 0, 0])\n', (1425, 1488), True, 'import numpy as np\n'), ((1654, 1671), 'numpy.array', 'np.array', (['[4, -3]'], {}), '([4, -3])\n', (1662, 1671), True, 'import numpy as np\n'), ((1709, 1753), 'liquepy.element.assess.average_of_absolute_via_trapz', 'assess.average_of_absolute_via_trapz', (['values'], {}), '(values)\n', (1745, 1753), False, 'from liquepy.element import assess\n'), ((1765, 1793), 'numpy.isclose', 'np.isclose', (['av_abs', 'expected'], {}), '(av_abs, expected)\n', (1775, 1793), True, 'import numpy as np\n'), ((1874, 1894), 'numpy.array', 'np.array', (['[3, -3, 3]'], {}), '([3, -3, 3])\n', (1882, 1894), True, 'import numpy as np\n'), ((1927, 1971), 'liquepy.element.assess.average_of_absolute_via_trapz', 'assess.average_of_absolute_via_trapz', (['values'], {}), '(values)\n', (1963, 1971), False, 'from liquepy.element import assess\n'), ((1983, 2014), 'numpy.isclose', 'np.isclose', (['av_abs[0]', 'expected'], {}), '(av_abs[0], expected)\n', (1993, 2014), True, 'import numpy as np\n'), ((2049, 2080), 'numpy.isclose', 'np.isclose', (['av_abs[1]', 'expected'], {}), '(av_abs[1], expected)\n', (2059, 2080), True, 'import numpy as np\n'), ((2173, 2199), 'numpy.array', 'np.array', (['[0, 4, 0, -3, 0]'], {}), '([0, 4, 0, -3, 0])\n', (2181, 2199), True, 'import numpy as np\n'), ((2210, 2236), 'numpy.array', 'np.array', (['[0, 4, 0, -3, 0]'], {}), '([0, 4, 0, -3, 0])\n', (2218, 2236), True, 'import numpy as np\n'), ((2398, 2422), 'liquepy.element.models.ShearTest', 'ShearTest', (['tau', 'gamma', '(1)'], {}), '(tau, gamma, 1)\n', (2407, 2422), False, 'from liquepy.element.models import ShearTest\n'), ((2436, 2459), 'liquepy.element.assess.calc_case_et', 'assess.calc_case_et', (['et'], {}), '(et)\n', (2455, 2459), False, 'from liquepy.element import assess\n'), ((2598, 2621), 'numpy.array', 'np.array', (['[0, 4, -2, 0]'], {}), '([0, 4, -2, 0])\n', (2606, 2621), True, 'import numpy as np\n'), ((2634, 2657), 'numpy.array', 'np.array', (['[0, 4, -4, 0]'], {}), '([0, 4, -4, 0])\n', (2642, 2657), True, 'import numpy as np\n'), ((2669, 2697), 'liquepy.element.models.ShearTest', 'ShearTest', (['tau_1', 'gamma_1', '(1)'], {}), '(tau_1, gamma_1, 1)\n', (2678, 2697), False, 'from liquepy.element.models import ShearTest\n'), ((2713, 2738), 'liquepy.element.assess.calc_case_et', 'assess.calc_case_et', (['et_1'], {}), '(et_1)\n', (2732, 2738), False, 'from liquepy.element import assess\n'), ((2753, 2782), 'numpy.array', 'np.array', (['[0, 4, 3, 4, -2, 0]'], {}), '([0, 4, 3, 4, -2, 0])\n', (2761, 2782), True, 'import numpy as np\n'), ((2795, 2824), 'numpy.array', 'np.array', (['[0, 4, 1, 1, -4, 0]'], {}), '([0, 4, 1, 1, -4, 0])\n', (2803, 2824), True, 'import numpy as np\n'), ((2836, 2864), 'liquepy.element.models.ShearTest', 'ShearTest', (['tau_2', 'gamma_2', '(1)'], {}), '(tau_2, gamma_2, 1)\n', (2845, 2864), False, 'from liquepy.element.models import ShearTest\n'), ((2880, 2905), 'liquepy.element.assess.calc_case_et', 'assess.calc_case_et', (['et_2'], {}), '(et_2)\n', (2899, 2905), False, 'from liquepy.element import assess\n'), ((3017, 3040), 'numpy.array', 'np.array', (['[0, 4, -2, 0]'], {}), '([0, 4, -2, 0])\n', (3025, 3040), True, 'import numpy as np\n'), ((3053, 3076), 'numpy.array', 'np.array', (['[0, 4, -4, 0]'], {}), '([0, 4, -4, 0])\n', (3061, 3076), True, 'import numpy as np\n'), ((3088, 3116), 'liquepy.element.models.ShearTest', 'ShearTest', (['tau_1', 'gamma_1', '(1)'], {}), '(tau_1, gamma_1, 1)\n', (3097, 3116), False, 'from liquepy.element.models import ShearTest\n'), ((3132, 3157), 'liquepy.element.assess.calc_case_et', 'assess.calc_case_et', (['et_1'], {}), '(et_1)\n', (3151, 3157), False, 'from liquepy.element import assess\n'), ((3172, 3200), 'numpy.array', 'np.array', (['[0, 4, 4.1, -2, 0]'], {}), '([0, 4, 4.1, -2, 0])\n', (3180, 3200), True, 'import numpy as np\n'), ((3213, 3239), 'numpy.array', 'np.array', (['[0, 4, 1, -4, 0]'], {}), '([0, 4, 1, -4, 0])\n', (3221, 3239), True, 'import numpy as np\n'), ((3251, 3279), 'liquepy.element.models.ShearTest', 'ShearTest', (['tau_2', 'gamma_2', '(1)'], {}), '(tau_2, gamma_2, 1)\n', (3260, 3279), False, 'from liquepy.element.models import ShearTest\n'), ((3295, 3320), 'liquepy.element.assess.calc_case_et', 'assess.calc_case_et', (['et_2'], {}), '(et_2)\n', (3314, 3320), False, 'from liquepy.element import assess\n'), ((3472, 3497), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.5]'], {}), '([0.0, 1.0, 0.5])\n', (3480, 3497), True, 'import numpy as np\n'), ((3506, 3529), 'numpy.array', 'np.array', (['[0.0, 1.0, 0]'], {}), '([0.0, 1.0, 0])\n', (3514, 3529), True, 'import numpy as np\n'), ((3604, 3625), 'liquepy.element.models.ShearTest', 'ShearTest', (['tau', 'gamma'], {}), '(tau, gamma)\n', (3613, 3625), False, 'from liquepy.element.models import ShearTest\n'), ((3639, 3662), 'liquepy.element.assess.calc_case_et', 'assess.calc_case_et', (['et'], {}), '(et)\n', (3658, 3662), False, 'from liquepy.element import assess\n'), ((3793, 3817), 'numpy.array', 'np.array', (['[0.0, 1.0, -1]'], {}), '([0.0, 1.0, -1])\n', (3801, 3817), True, 'import numpy as np\n'), ((3826, 3850), 'numpy.array', 'np.array', (['[0.0, 1.0, -1]'], {}), '([0.0, 1.0, -1])\n', (3834, 3850), True, 'import numpy as np\n'), ((3885, 3906), 'liquepy.element.models.ShearTest', 'ShearTest', (['tau', 'gamma'], {}), '(tau, gamma)\n', (3894, 3906), False, 'from liquepy.element.models import ShearTest\n'), ((3920, 3943), 'liquepy.element.assess.calc_case_et', 'assess.calc_case_et', (['et'], {}), '(et)\n', (3939, 3943), False, 'from liquepy.element import assess\n'), ((4073, 4095), 'numpy.array', 'np.array', (['[1.0, -1, 1]'], {}), '([1.0, -1, 1])\n', (4081, 4095), True, 'import numpy as np\n'), ((4105, 4127), 'numpy.array', 'np.array', (['[1.0, -1, 1]'], {}), '([1.0, -1, 1])\n', (4113, 4127), True, 'import numpy as np\n'), ((4162, 4183), 'liquepy.element.models.ShearTest', 'ShearTest', (['tau', 'gamma'], {}), '(tau, gamma)\n', (4171, 4183), False, 'from liquepy.element.models import ShearTest\n'), ((4197, 4220), 'liquepy.element.assess.calc_case_et', 'assess.calc_case_et', (['et'], {}), '(et)\n', (4216, 4220), False, 'from liquepy.element import assess\n'), ((4348, 4371), 'numpy.array', 'np.array', (['[0, 1, -1, 2]'], {}), '([0, 1, -1, 2])\n', (4356, 4371), True, 'import numpy as np\n'), ((4382, 4405), 'numpy.array', 'np.array', (['[0, 1, -1, 1]'], {}), '([0, 1, -1, 1])\n', (4390, 4405), True, 'import numpy as np\n'), ((4528, 4549), 'liquepy.element.models.ShearTest', 'ShearTest', (['tau', 'gamma'], {}), '(tau, gamma)\n', (4537, 4549), False, 'from liquepy.element.models import ShearTest\n'), ((4563, 4586), 'liquepy.element.assess.calc_case_et', 'assess.calc_case_et', (['et'], {}), '(et)\n', (4582, 4586), False, 'from liquepy.element import assess\n'), ((4737, 4756), 'numpy.array', 'np.array', (['[0, 2, 1]'], {}), '([0, 2, 1])\n', (4745, 4756), True, 'import numpy as np\n'), ((4767, 4786), 'numpy.array', 'np.array', (['[0, 2, 1]'], {}), '([0, 2, 1])\n', (4775, 4786), True, 'import numpy as np\n'), ((4908, 4929), 'liquepy.element.models.ShearTest', 'ShearTest', (['tau', 'gamma'], {}), '(tau, gamma)\n', (4917, 4929), False, 'from liquepy.element.models import ShearTest\n'), ((4943, 4966), 'liquepy.element.assess.calc_case_et', 'assess.calc_case_et', (['et'], {}), '(et)\n', (4962, 4966), False, 'from liquepy.element import assess\n'), ((5112, 5131), 'numpy.array', 'np.array', (['[1, 3, 2]'], {}), '([1, 3, 2])\n', (5120, 5131), True, 'import numpy as np\n'), ((5142, 5161), 'numpy.array', 'np.array', (['[1, 2, 0]'], {}), '([1, 2, 0])\n', (5150, 5161), True, 'import numpy as np\n'), ((5283, 5304), 'liquepy.element.models.ShearTest', 'ShearTest', (['tau', 'gamma'], {}), '(tau, gamma)\n', (5292, 5304), False, 'from liquepy.element.models import ShearTest\n'), ((5318, 5341), 'liquepy.element.assess.calc_case_et', 'assess.calc_case_et', (['et'], {}), '(et)\n', (5337, 5341), False, 'from liquepy.element import assess\n'), ((5489, 5514), 'numpy.array', 'np.array', (['[0, 2, 1, 3, 2]'], {}), '([0, 2, 1, 3, 2])\n', (5497, 5514), True, 'import numpy as np\n'), ((5525, 5550), 'numpy.array', 'np.array', (['[0, 2, 1, 2, 0]'], {}), '([0, 2, 1, 2, 0])\n', (5533, 5550), True, 'import numpy as np\n'), ((5763, 5784), 'liquepy.element.models.ShearTest', 'ShearTest', (['tau', 'gamma'], {}), '(tau, gamma)\n', (5772, 5784), False, 'from liquepy.element.models import ShearTest\n'), ((5798, 5821), 'liquepy.element.assess.calc_case_et', 'assess.calc_case_et', (['et'], {}), '(et)\n', (5817, 5821), False, 'from liquepy.element import assess\n'), ((5942, 5975), 'numpy.array', 'np.array', (['[0, 1, 0.5, 1.5, -1, 2]'], {}), '([0, 1, 0.5, 1.5, -1, 2])\n', (5950, 5975), True, 'import numpy as np\n'), ((5986, 6017), 'numpy.array', 'np.array', (['[0, 1, 0.5, 1, -1, 1]'], {}), '([0, 1, 0.5, 1, -1, 1])\n', (5994, 6017), True, 'import numpy as np\n'), ((6056, 6077), 'liquepy.element.models.ShearTest', 'ShearTest', (['tau', 'gamma'], {}), '(tau, gamma)\n', (6065, 6077), False, 'from liquepy.element.models import ShearTest\n'), ((6091, 6114), 'liquepy.element.assess.calc_case_et', 'assess.calc_case_et', (['et'], {}), '(et)\n', (6110, 6114), False, 'from liquepy.element import assess\n'), ((6244, 6373), 'numpy.array', 'np.array', (['[0, 1.0, 2.0, 3.0, 4.0, 5.0, 5.5, 5.5, 4.0, 3.0, 2.5, 2.0, 1.0, 0.0, -1, -2,\n -5, 1, 3, 3.5, 2.5, 3.5, 2.5, -1, -3]'], {}), '([0, 1.0, 2.0, 3.0, 4.0, 5.0, 5.5, 5.5, 4.0, 3.0, 2.5, 2.0, 1.0, \n 0.0, -1, -2, -5, 1, 3, 3.5, 2.5, 3.5, 2.5, -1, -3])\n', (6252, 6373), True, 'import numpy as np\n'), ((6388, 6528), 'numpy.array', 'np.array', (['[0, 0.5, 1.0, 1.5, 2.5, 3.0, 4.25, 5.5, 5.5, 5.25, 5.5, 5.25, 4.0, 3.0, 1.5,\n 0.5, -3, -2, -1, -0.5, -0.75, 1.5, 1.0, -1.5, -5]'], {}), '([0, 0.5, 1.0, 1.5, 2.5, 3.0, 4.25, 5.5, 5.5, 5.25, 5.5, 5.25, 4.0,\n 3.0, 1.5, 0.5, -3, -2, -1, -0.5, -0.75, 1.5, 1.0, -1.5, -5])\n', (6396, 6528), True, 'import numpy as np\n'), ((6550, 6602), 'liquepy.element.assess.get_energy_peaks_for_cyclic_loading', 'assess.get_energy_peaks_for_cyclic_loading', (['(-fs)', '(-ds)'], {}), '(-fs, -ds)\n', (6592, 6602), False, 'from liquepy.element import assess\n'), ((6618, 6646), 'numpy.array', 'np.array', (['[0, 7, 16, 21, 24]'], {}), '([0, 7, 16, 21, 24])\n', (6626, 6646), True, 'import numpy as np\n'), ((490, 558), 'numpy.array', 'np.array', (['[0, -1, -2, -3, -4, -3, -2, -1, 0, 1, 2, 3, 4, 3, 2, 1, 0]'], {}), '([0, -1, -2, -3, -4, -3, -2, -1, 0, 1, 2, 3, 4, 3, 2, 1, 0])\n', (498, 558), True, 'import numpy as np\n'), ((843, 856), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (849, 856), True, 'import numpy as np\n'), ((871, 884), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (877, 884), True, 'import numpy as np\n'), ((380, 417), 'liquepy.element.assess.calc_diss_energy_fd', 'assess.calc_diss_energy_fd', (['tau', 'strs'], {}), '(tau, strs)\n', (406, 417), False, 'from liquepy.element import assess\n'), ((705, 742), 'liquepy.element.assess.calc_diss_energy_fd', 'assess.calc_diss_energy_fd', (['tau', 'strs'], {}), '(tau, strs)\n', (731, 742), False, 'from liquepy.element import assess\n'), ((961, 998), 'liquepy.element.assess.calc_diss_energy_fd', 'assess.calc_diss_energy_fd', (['tau', 'strs'], {}), '(tau, strs)\n', (987, 998), False, 'from liquepy.element import assess\n'), ((1111, 1124), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1117, 1124), True, 'import numpy as np\n'), ((1143, 1156), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1149, 1156), True, 'import numpy as np\n'), ((1238, 1275), 'liquepy.element.assess.calc_diss_energy_fd', 'assess.calc_diss_energy_fd', (['tau', 'strs'], {}), '(tau, strs)\n', (1264, 1275), False, 'from liquepy.element import assess\n'), ((1557, 1594), 'liquepy.element.assess.calc_diss_energy_fd', 'assess.calc_diss_energy_fd', (['tau', 'strs'], {}), '(tau, strs)\n', (1583, 1594), False, 'from liquepy.element import assess\n')]
import numpy as np import cv2 def nms(bboxs, thresh): # get all parameters x1, y1, x2, y2, scores = [bboxs[:, i] for i in range(len(bboxs[0]))] # calculate all areas of boxed areas = (x2 - x1 + 1) * (y2 - y1 + 1) # sort boxes according to their class score sorted_index = scores.argsort()[::-1] # result list result = [] while sorted_index.size > 0: # get the box with largest score max_box = bboxs[sorted_index[0]] # add it to our result result.append(max_box) # calculate intersection coordinates xx1 = np.maximum(max_box[0], x1[sorted_index[1:]]) yy1 = np.maximum(max_box[1], y1[sorted_index[1:]]) xx2 = np.minimum(max_box[2], x2[sorted_index[1:]]) yy2 = np.minimum(max_box[3], y2[sorted_index[1:]]) # calculate intersection area w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) intersection = w * h # calculate ious ious = intersection / (areas[sorted_index[0]] + areas[sorted_index[1:]] - intersection) # retain all the boxes whose ious are less than the threshold sorted_index = sorted_index[1:][ious <= thresh] return result def draw_bbox(bboxs, pic_name): pic = np.zeros((850, 850), np.uint8) for bbox in bboxs: x1, y1, x2, y2 = map(int, bbox[:-1]) pic = cv2.rectangle(pic, (x1, y1), (x2, y2), (255, 0, 0), 2) cv2.imshow(pic_name,pic) cv2.waitKey(0) if __name__ == "__main__": bboxs = np.array([ [720, 690, 820, 800, 0.5], [204, 102, 358, 250, 0.5], [257, 118, 380, 250, 0.8], [700, 700, 800, 800, 0.4], [280, 135, 400, 250, 0.7], [255, 118, 360, 235, 0.7]]) thresh = 0.3 draw_bbox(bboxs, "Before_NMS") result = nms(bboxs, thresh) draw_bbox(result, "After_NMS")
[ "cv2.rectangle", "numpy.minimum", "cv2.imshow", "numpy.array", "numpy.zeros", "numpy.maximum", "cv2.waitKey" ]
[((1286, 1316), 'numpy.zeros', 'np.zeros', (['(850, 850)', 'np.uint8'], {}), '((850, 850), np.uint8)\n', (1294, 1316), True, 'import numpy as np\n'), ((1458, 1483), 'cv2.imshow', 'cv2.imshow', (['pic_name', 'pic'], {}), '(pic_name, pic)\n', (1468, 1483), False, 'import cv2\n'), ((1487, 1501), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1498, 1501), False, 'import cv2\n'), ((1543, 1725), 'numpy.array', 'np.array', (['[[720, 690, 820, 800, 0.5], [204, 102, 358, 250, 0.5], [257, 118, 380, 250,\n 0.8], [700, 700, 800, 800, 0.4], [280, 135, 400, 250, 0.7], [255, 118, \n 360, 235, 0.7]]'], {}), '([[720, 690, 820, 800, 0.5], [204, 102, 358, 250, 0.5], [257, 118, \n 380, 250, 0.8], [700, 700, 800, 800, 0.4], [280, 135, 400, 250, 0.7], [\n 255, 118, 360, 235, 0.7]])\n', (1551, 1725), True, 'import numpy as np\n'), ((598, 642), 'numpy.maximum', 'np.maximum', (['max_box[0]', 'x1[sorted_index[1:]]'], {}), '(max_box[0], x1[sorted_index[1:]])\n', (608, 642), True, 'import numpy as np\n'), ((657, 701), 'numpy.maximum', 'np.maximum', (['max_box[1]', 'y1[sorted_index[1:]]'], {}), '(max_box[1], y1[sorted_index[1:]])\n', (667, 701), True, 'import numpy as np\n'), ((716, 760), 'numpy.minimum', 'np.minimum', (['max_box[2]', 'x2[sorted_index[1:]]'], {}), '(max_box[2], x2[sorted_index[1:]])\n', (726, 760), True, 'import numpy as np\n'), ((775, 819), 'numpy.minimum', 'np.minimum', (['max_box[3]', 'y2[sorted_index[1:]]'], {}), '(max_box[3], y2[sorted_index[1:]])\n', (785, 819), True, 'import numpy as np\n'), ((871, 901), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + 1)'], {}), '(0.0, xx2 - xx1 + 1)\n', (881, 901), True, 'import numpy as np\n'), ((914, 944), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + 1)'], {}), '(0.0, yy2 - yy1 + 1)\n', (924, 944), True, 'import numpy as np\n'), ((1399, 1453), 'cv2.rectangle', 'cv2.rectangle', (['pic', '(x1, y1)', '(x2, y2)', '(255, 0, 0)', '(2)'], {}), '(pic, (x1, y1), (x2, y2), (255, 0, 0), 2)\n', (1412, 1453), False, 'import cv2\n')]
# Copyright (c) Facebook, Inc. and its affiliates. import os ''' This forces the environment to use only 1 cpu when running. This could be helpful when launching multiple environment simulatenously. ''' os.environ['OPENBLAS_NUM_THREADS'] = '1' os.environ['MKL_NUM_THREADS'] = '1' # os.environ['CUDA_VISIBLE_DEVICES'] = '-1' import numpy as np import copy import pybullet as pb import pybullet_data from bullet import bullet_client from bullet import bullet_utils as bu from fairmotion.ops import conversions from fairmotion.ops import math from fairmotion.utils import constants import sim_agent import sim_obstacle import importlib.util class Env(object): ''' This environment defines a base environment where the simulated characters exist and they are controlled by tracking controllers ''' def __init__(self, fps_sim, fps_act, char_info_module, sim_char_file, ref_motion_scale, actuation, self_collision=None, contactable_body=None, verbose=False, ): self._num_agent = len(sim_char_file) assert self._num_agent > 0 assert self._num_agent == len(char_info_module) assert self._num_agent == len(ref_motion_scale) self._char_info = [] for i in range(self._num_agent): ''' Load Character Info Moudle ''' spec = importlib.util.spec_from_file_location( "char_info%d"%(i), char_info_module[i]) char_info = importlib.util.module_from_spec(spec) spec.loader.exec_module(char_info) self._char_info.append(char_info) ''' Modfiy Contactable Body Parts ''' if contactable_body: contact_allow_all = True if 'all' in contactable_body else False for joint in list(char_info.contact_allow_map.keys()): char_info.contact_allow_map[joint] = \ contact_allow_all or char_info.joint_name[joint] in contactable_body self._v_up = self._char_info[0].v_up_env ''' Define PyBullet Client ''' self._pb_client = bullet_client.BulletClient( connection_mode=pb.DIRECT, options=' --opengl2') self._pb_client.setAdditionalSearchPath(pybullet_data.getDataPath()) ''' timestep for physics simulation ''' self._dt_sim = 1.0/fps_sim ''' timestep for control of dynamic controller ''' self._dt_act = 1.0/fps_act if fps_sim%fps_act != 0: raise Exception('FPS_SIM should be a multiples of FPS_ACT') self._num_substep = fps_sim//fps_act self._verbose = verbose self.setup_physics_scene(sim_char_file, self._char_info, ref_motion_scale, self_collision, actuation) ''' Elapsed time after the environment starts ''' self._elapsed_time = 0.0 ''' For tracking the length of current episode ''' self._episode_len = 0.0 ''' Create a Manager for Handling Obstacles ''' self._obs_manager = sim_obstacle.ObstacleManager( self._pb_client, self._dt_act, self._char_info[0].v_up_env) ''' Save the initial pybullet state to clear all thing before calling reset ''' self._init_state = None self.reset() self._init_state = self._pb_client.saveState() def setup_physics_scene(self, sim_char_file, char_info, ref_motion_scale, self_collision, actuation): self._pb_client.resetSimulation() self.create_ground() self._agent = [] for i in range(self._num_agent): self._agent.append(sim_agent.SimAgent(name='sim_agent_%d'%(i), pybullet_client=self._pb_client, model_file=sim_char_file[i], char_info=char_info[i], ref_scale=ref_motion_scale[i], self_collision=self_collision[i], actuation=actuation[i], kinematic_only=False, verbose=self._verbose)) def create_ground(self): ''' Create Plane ''' if np.allclose(np.array([0.0, 0.0, 1.0]), self._v_up): R_plane = constants.eye_R() else: R_plane = math.R_from_vectors(np.array([0.0, 0.0, 1.0]), self._v_up) self._plane_id = \ self._pb_client.loadURDF( "plane_implicit.urdf", [0, 0, 0], conversions.R2Q(R_plane), useMaximalCoordinates=True) self._pb_client.changeDynamics(self._plane_id, linkIndex=-1, lateralFriction=0.9) ''' Dynamics parameters ''' assert np.allclose(np.linalg.norm(self._v_up), 1.0) gravity = -9.8 * self._v_up self._pb_client.setGravity(gravity[0], gravity[1], gravity[2]) self._pb_client.setTimeStep(self._dt_sim) self._pb_client.setPhysicsEngineParameter(numSubSteps=2) self._pb_client.setPhysicsEngineParameter(numSolverIterations=10) # self._pb_client.setPhysicsEngineParameter(solverResidualThreshold=1e-10) def check_collision(self, body_id1, body_id2, link_id1=None, link_id2=None): ''' collision between two bodies ''' pts = self._pb_client.getContactPoints( bodyA=body_id1, bodyB=body_id2, linkIndexA=link_id1, linkIndexB=link_id2) return len(p) > 0 # def check_falldown(self, agent, plane_id=None): # ''' check if any non-allowed body part hits the ground ''' # if plane_id is None: plane_id = self._plane_id # pts = self._pb_client.getContactPoints() # for p in pts: # part = None # #ignore self-collision # if p[1] == p[2]: continue # if p[1] == agent._body_id and p[2] == plane_id: part = p[3] # if p[2] == agent._body_id and p[1] == plane_id: part = p[4] # #ignore collision of other agents # if part == None: continue # if not agent._char_info.contact_allow_map[part]: return True # return False def check_falldown(self, agent, plane_id=None): ''' check if any non-allowed body part hits the ground ''' if plane_id is None: plane_id = self._plane_id pts = self._pb_client.getContactPoints( bodyA=agent._body_id, bodyB=plane_id) for p in pts: part = p[3] if p[1] == agent._body_id else p[4] if agent._char_info.contact_allow_map[part]: continue else: return True return False def is_sim_div(self, agent): ''' TODO: check divergence of simulation ''' return False def step(self, target_poses=[]): ''' One Step-forward Simulation ''' ''' Increase elapsed time ''' self._elapsed_time += self._dt_act self._episode_len += self._dt_act ''' Update simulation ''' for _ in range(self._num_substep): for i, target_pose in enumerate(target_poses): self._agent[i].actuate(pose=target_pose, vel=None) self._pb_client.stepSimulation() self._obs_manager.update() def reset(self, time=0.0, poses=None, vels=None, pb_state_id=None): ''' remove obstacles in the scene ''' self._obs_manager.clear() ''' Restore internal pybullet state by uisng the saved info when Env was initially created ''' if pb_state_id is not None: self._pb_client.restoreState(pb_state_id) self._elapsed_time = time if poses is None: if self._init_state is not None: self._pb_client.restoreState(self._init_state) else: for i in range(self._num_agent): pose = poses[i] vel = None if vels is None else vels[i] self._agent[i].set_pose(pose, vel) self._episode_len = 0.0 def add_noise_to_pose_vel(self, agent, pose, vel=None, return_as_copied=True): ''' Add a little bit of noise to the given pose and velocity ''' ref_pose = copy.deepcopy(pose) if return_as_copied else pose if vel: ref_vel = copy.deepcopy(vel) if return_as_copied else vel dof_cnt = 0 for j in agent._joint_indices: joint_type = agent.get_joint_type(j) ''' Ignore fixed joints ''' if joint_type == self._pb_client.JOINT_FIXED: continue ''' Ignore if there is no corresponding joint ''' if agent._char_info.bvh_map[j] == None: continue T = ref_pose.get_transform(agent._char_info.bvh_map[j], local=True) R, p = conversions.T2Rp(T) if joint_type == self._pb_client.JOINT_SPHERICAL: dR = math.random_rotation( mu_theta=agent._char_info.noise_pose[j][0], sigma_theta=agent._char_info.noise_pose[j][1], lower_theta=agent._char_info.noise_pose[j][2], upper_theta=agent._char_info.noise_pose[j][3]) dof_cnt += 3 elif joint_type == self._pb_client.JOINT_REVOLUTE: theta = math.truncnorm( mu=agent._char_info.noise_pose[j][0], sigma=agent._char_info.noise_pose[j][1], lower=agent._char_info.noise_pose[j][2], upper=agent._char_info.noise_pose[j][3]) joint_axis = agent.get_joint_axis(j) dR = conversions.A2R(joint_axis*theta) dof_cnt += 1 else: raise NotImplementedError T_new = conversions.Rp2T(np.dot(R, dR), p) ref_pose.set_transform(agent._char_info.bvh_map[j], T_new, do_ortho_norm=False, local=True) if vel is not None: dw = math.truncnorm( mu=np.full(3, agent._char_info.noise_vel[j][0]), sigma=np.full(3, agent._char_info.noise_vel[j][1]), lower=np.full(3, agent._char_info.noise_vel[j][2]), upper=np.full(3, agent._char_info.noise_vel[j][3])) ref_vel.data_local[j][:3] += dw return ref_pose, ref_vel def render(self, rm, ground_height=0.0): colors = rm.COLORS_FOR_AGENTS rm.gl.glEnable(rm.gl.GL_LIGHTING) rm.gl.glEnable(rm.gl.GL_BLEND) rm.gl.glBlendFunc(rm.gl.GL_SRC_ALPHA, rm.gl.GL_ONE_MINUS_SRC_ALPHA) for i in range(self._num_agent): sim_agent = self._agent[i] char_info = self._char_info[i] if rm.flag['sim_model']: rm.gl.glEnable(rm.gl.GL_DEPTH_TEST) if rm.flag['shadow']: rm.gl.glPushMatrix() d = np.array([1, 1, 1]) d = d - math.projectionOnVector(d, char_info.v_up_env) offset = (0.001 + ground_height) * char_info.v_up_env rm.gl.glTranslatef(offset[0], offset[1], offset[2]) rm.gl.glScalef(d[0], d[1], d[2]) rm.bullet_render.render_model(self._pb_client, sim_agent._body_id, draw_link=True, draw_link_info=False, draw_joint=False, draw_joint_geom=False, ee_indices=None, color=[0.5,0.5,0.5,1.0], lighting=False) rm.gl.glPopMatrix() rm.bullet_render.render_model(self._pb_client, sim_agent._body_id, draw_link=True, draw_link_info=True, draw_joint=rm.flag['joint'], draw_joint_geom=True, ee_indices=char_info.end_effector_indices, color=colors[i]) if rm.flag['collision'] and self._elapsed_time > 0.0: rm.gl.glPushAttrib(rm.gl.GL_LIGHTING|rm.gl.GL_DEPTH_TEST|rm.gl.GL_BLEND) rm.gl.glEnable(rm.gl.GL_BLEND) rm.bullet_render.render_contacts(self._pb_client, sim_agent._body_id) rm.gl.glPopAttrib() if rm.flag['com_vel']: p, Q, v, w = sim_agent.get_root_state() p, v = sim_agent.get_com_and_com_vel() rm.gl_render.render_arrow(p, p+v, D=0.01, color=[0, 0, 0, 1]) if rm.flag['facing_frame']: rm.gl.glPushAttrib(rm.gl.GL_LIGHTING|rm.gl.GL_DEPTH_TEST|rm.gl.GL_BLEND) rm.gl.glEnable(rm.gl.GL_BLEND) rm.gl_render.render_transform( sim_agent.get_facing_transform(ground_height), scale=0.5, use_arrow=True) rm.gl.glPopAttrib() if rm.flag['obstacle']: self._obs_manager.render() if __name__ == '__main__': import env_renderer as er import render_module as rm from fairmotion.viz.utils import TimeChecker rm.initialize() class EnvRenderer(er.EnvRenderer): def __init__(self, **kwargs): super().__init__(**kwargs) self.time_checker_auto_play = TimeChecker() self.reset() def reset(self): self.env.reset() def one_step(self): # a = np.zeros(100) self.env.step() def extra_render_callback(self): self.env.render(self.rm) def extra_idle_callback(self): time_elapsed = self.time_checker_auto_play.get_time(restart=False) if self.rm.flag['auto_play'] and time_elapsed >= self.env._dt_act: self.time_checker_auto_play.begin() self.one_step() def extra_keyboard_callback(self, key): if key == b'r': self.reset() elif key == b'O': size = np.random.uniform(0.1, 0.3, 3) p, Q, v, w = self.env._agent[0].get_root_state() self.env._obs_manager.throw(p, size=size) print('=====Motion Tracking Controller=====') env = Env(fps_sim=480, fps_act=30, verbose=False, char_info_module=['amass_char_info.py'], sim_char_file=['data/character/amass.urdf'], ref_motion_scale=[1.0], self_collision=[True], actuation=["spd"]) cam = rm.camera.Camera(pos=np.array([12.0, 0.0, 12.0]), origin=np.array([0.0, 0.0, 0.0]), vup=np.array([0.0, 0.0, 1.0]), fov=30.0) renderer = EnvRenderer(env=env, cam=cam) renderer.run()
[ "pybullet_data.getDataPath", "fairmotion.ops.math.projectionOnVector", "numpy.array", "copy.deepcopy", "numpy.linalg.norm", "sim_agent.get_root_state", "render_module.gl.glPushAttrib", "render_module.bullet_render.render_contacts", "render_module.initialize", "render_module.gl.glBlendFunc", "fairmotion.viz.utils.TimeChecker", "render_module.gl_render.render_arrow", "numpy.dot", "fairmotion.ops.conversions.T2Rp", "render_module.gl.glEnable", "sim_agent.SimAgent", "render_module.bullet_render.render_model", "render_module.gl.glPushMatrix", "fairmotion.ops.conversions.A2R", "sim_agent.get_facing_transform", "fairmotion.ops.math.truncnorm", "render_module.gl.glPopMatrix", "bullet.bullet_client.BulletClient", "fairmotion.utils.constants.eye_R", "fairmotion.ops.conversions.R2Q", "render_module.gl.glPopAttrib", "render_module.gl.glScalef", "sim_obstacle.ObstacleManager", "fairmotion.ops.math.random_rotation", "numpy.random.uniform", "numpy.full", "render_module.gl.glTranslatef", "sim_agent.get_com_and_com_vel" ]
[((14150, 14165), 'render_module.initialize', 'rm.initialize', ([], {}), '()\n', (14163, 14165), True, 'import render_module as rm\n'), ((2242, 2317), 'bullet.bullet_client.BulletClient', 'bullet_client.BulletClient', ([], {'connection_mode': 'pb.DIRECT', 'options': '""" --opengl2"""'}), "(connection_mode=pb.DIRECT, options=' --opengl2')\n", (2268, 2317), False, 'from bullet import bullet_client\n'), ((3292, 3385), 'sim_obstacle.ObstacleManager', 'sim_obstacle.ObstacleManager', (['self._pb_client', 'self._dt_act', 'self._char_info[0].v_up_env'], {}), '(self._pb_client, self._dt_act, self._char_info\n [0].v_up_env)\n', (3320, 3385), False, 'import sim_obstacle\n'), ((10957, 10990), 'render_module.gl.glEnable', 'rm.gl.glEnable', (['rm.gl.GL_LIGHTING'], {}), '(rm.gl.GL_LIGHTING)\n', (10971, 10990), True, 'import render_module as rm\n'), ((10999, 11029), 'render_module.gl.glEnable', 'rm.gl.glEnable', (['rm.gl.GL_BLEND'], {}), '(rm.gl.GL_BLEND)\n', (11013, 11029), True, 'import render_module as rm\n'), ((11038, 11105), 'render_module.gl.glBlendFunc', 'rm.gl.glBlendFunc', (['rm.gl.GL_SRC_ALPHA', 'rm.gl.GL_ONE_MINUS_SRC_ALPHA'], {}), '(rm.gl.GL_SRC_ALPHA, rm.gl.GL_ONE_MINUS_SRC_ALPHA)\n', (11055, 11105), True, 'import render_module as rm\n'), ((2379, 2406), 'pybullet_data.getDataPath', 'pybullet_data.getDataPath', ([], {}), '()\n', (2404, 2406), False, 'import pybullet_data\n'), ((4634, 4659), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (4642, 4659), True, 'import numpy as np\n'), ((4696, 4713), 'fairmotion.utils.constants.eye_R', 'constants.eye_R', ([], {}), '()\n', (4711, 4713), False, 'from fairmotion.utils import constants\n'), ((4958, 4982), 'fairmotion.ops.conversions.R2Q', 'conversions.R2Q', (['R_plane'], {}), '(R_plane)\n', (4973, 4982), False, 'from fairmotion.ops import conversions\n'), ((5183, 5209), 'numpy.linalg.norm', 'np.linalg.norm', (['self._v_up'], {}), '(self._v_up)\n', (5197, 5209), True, 'import numpy as np\n'), ((8705, 8724), 'copy.deepcopy', 'copy.deepcopy', (['pose'], {}), '(pose)\n', (8718, 8724), False, 'import copy\n'), ((9310, 9329), 'fairmotion.ops.conversions.T2Rp', 'conversions.T2Rp', (['T'], {}), '(T)\n', (9326, 9329), False, 'from fairmotion.ops import conversions\n'), ((14325, 14338), 'fairmotion.viz.utils.TimeChecker', 'TimeChecker', ([], {}), '()\n', (14336, 14338), False, 'from fairmotion.viz.utils import TimeChecker\n'), ((15570, 15597), 'numpy.array', 'np.array', (['[12.0, 0.0, 12.0]'], {}), '([12.0, 0.0, 12.0])\n', (15578, 15597), True, 'import numpy as np\n'), ((15633, 15658), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (15641, 15658), True, 'import numpy as np\n'), ((15692, 15717), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (15700, 15717), True, 'import numpy as np\n'), ((3884, 4161), 'sim_agent.SimAgent', 'sim_agent.SimAgent', ([], {'name': "('sim_agent_%d' % i)", 'pybullet_client': 'self._pb_client', 'model_file': 'sim_char_file[i]', 'char_info': 'char_info[i]', 'ref_scale': 'ref_motion_scale[i]', 'self_collision': 'self_collision[i]', 'actuation': 'actuation[i]', 'kinematic_only': '(False)', 'verbose': 'self._verbose'}), "(name='sim_agent_%d' % i, pybullet_client=self._pb_client,\n model_file=sim_char_file[i], char_info=char_info[i], ref_scale=\n ref_motion_scale[i], self_collision=self_collision[i], actuation=\n actuation[i], kinematic_only=False, verbose=self._verbose)\n", (3902, 4161), False, 'import sim_agent\n'), ((4770, 4795), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (4778, 4795), True, 'import numpy as np\n'), ((8793, 8811), 'copy.deepcopy', 'copy.deepcopy', (['vel'], {}), '(vel)\n', (8806, 8811), False, 'import copy\n'), ((9413, 9627), 'fairmotion.ops.math.random_rotation', 'math.random_rotation', ([], {'mu_theta': 'agent._char_info.noise_pose[j][0]', 'sigma_theta': 'agent._char_info.noise_pose[j][1]', 'lower_theta': 'agent._char_info.noise_pose[j][2]', 'upper_theta': 'agent._char_info.noise_pose[j][3]'}), '(mu_theta=agent._char_info.noise_pose[j][0],\n sigma_theta=agent._char_info.noise_pose[j][1], lower_theta=agent.\n _char_info.noise_pose[j][2], upper_theta=agent._char_info.noise_pose[j][3])\n', (9433, 9627), False, 'from fairmotion.ops import math\n'), ((10307, 10320), 'numpy.dot', 'np.dot', (['R', 'dR'], {}), '(R, dR)\n', (10313, 10320), True, 'import numpy as np\n'), ((11283, 11318), 'render_module.gl.glEnable', 'rm.gl.glEnable', (['rm.gl.GL_DEPTH_TEST'], {}), '(rm.gl.GL_DEPTH_TEST)\n', (11297, 11318), True, 'import render_module as rm\n'), ((12380, 12607), 'render_module.bullet_render.render_model', 'rm.bullet_render.render_model', (['self._pb_client', 'sim_agent._body_id'], {'draw_link': '(True)', 'draw_link_info': '(True)', 'draw_joint': "rm.flag['joint']", 'draw_joint_geom': '(True)', 'ee_indices': 'char_info.end_effector_indices', 'color': 'colors[i]'}), "(self._pb_client, sim_agent._body_id,\n draw_link=True, draw_link_info=True, draw_joint=rm.flag['joint'],\n draw_joint_geom=True, ee_indices=char_info.end_effector_indices, color=\n colors[i])\n", (12409, 12607), True, 'import render_module as rm\n'), ((9816, 10001), 'fairmotion.ops.math.truncnorm', 'math.truncnorm', ([], {'mu': 'agent._char_info.noise_pose[j][0]', 'sigma': 'agent._char_info.noise_pose[j][1]', 'lower': 'agent._char_info.noise_pose[j][2]', 'upper': 'agent._char_info.noise_pose[j][3]'}), '(mu=agent._char_info.noise_pose[j][0], sigma=agent._char_info\n .noise_pose[j][1], lower=agent._char_info.noise_pose[j][2], upper=agent\n ._char_info.noise_pose[j][3])\n', (9830, 10001), False, 'from fairmotion.ops import math\n'), ((10147, 10182), 'fairmotion.ops.conversions.A2R', 'conversions.A2R', (['(joint_axis * theta)'], {}), '(joint_axis * theta)\n', (10162, 10182), False, 'from fairmotion.ops import conversions\n'), ((11377, 11397), 'render_module.gl.glPushMatrix', 'rm.gl.glPushMatrix', ([], {}), '()\n', (11395, 11397), True, 'import render_module as rm\n'), ((11422, 11441), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (11430, 11441), True, 'import numpy as np\n'), ((11611, 11662), 'render_module.gl.glTranslatef', 'rm.gl.glTranslatef', (['offset[0]', 'offset[1]', 'offset[2]'], {}), '(offset[0], offset[1], offset[2])\n', (11629, 11662), True, 'import render_module as rm\n'), ((11683, 11715), 'render_module.gl.glScalef', 'rm.gl.glScalef', (['d[0]', 'd[1]', 'd[2]'], {}), '(d[0], d[1], d[2])\n', (11697, 11715), True, 'import render_module as rm\n'), ((11736, 11951), 'render_module.bullet_render.render_model', 'rm.bullet_render.render_model', (['self._pb_client', 'sim_agent._body_id'], {'draw_link': '(True)', 'draw_link_info': '(False)', 'draw_joint': '(False)', 'draw_joint_geom': '(False)', 'ee_indices': 'None', 'color': '[0.5, 0.5, 0.5, 1.0]', 'lighting': '(False)'}), '(self._pb_client, sim_agent._body_id,\n draw_link=True, draw_link_info=False, draw_joint=False, draw_joint_geom\n =False, ee_indices=None, color=[0.5, 0.5, 0.5, 1.0], lighting=False)\n', (11765, 11951), True, 'import render_module as rm\n'), ((12343, 12362), 'render_module.gl.glPopMatrix', 'rm.gl.glPopMatrix', ([], {}), '()\n', (12360, 12362), True, 'import render_module as rm\n'), ((13013, 13089), 'render_module.gl.glPushAttrib', 'rm.gl.glPushAttrib', (['(rm.gl.GL_LIGHTING | rm.gl.GL_DEPTH_TEST | rm.gl.GL_BLEND)'], {}), '(rm.gl.GL_LIGHTING | rm.gl.GL_DEPTH_TEST | rm.gl.GL_BLEND)\n', (13031, 13089), True, 'import render_module as rm\n'), ((13106, 13136), 'render_module.gl.glEnable', 'rm.gl.glEnable', (['rm.gl.GL_BLEND'], {}), '(rm.gl.GL_BLEND)\n', (13120, 13136), True, 'import render_module as rm\n'), ((13157, 13226), 'render_module.bullet_render.render_contacts', 'rm.bullet_render.render_contacts', (['self._pb_client', 'sim_agent._body_id'], {}), '(self._pb_client, sim_agent._body_id)\n', (13189, 13226), True, 'import render_module as rm\n'), ((13247, 13266), 'render_module.gl.glPopAttrib', 'rm.gl.glPopAttrib', ([], {}), '()\n', (13264, 13266), True, 'import render_module as rm\n'), ((13339, 13365), 'sim_agent.get_root_state', 'sim_agent.get_root_state', ([], {}), '()\n', (13363, 13365), False, 'import sim_agent\n'), ((13393, 13424), 'sim_agent.get_com_and_com_vel', 'sim_agent.get_com_and_com_vel', ([], {}), '()\n', (13422, 13424), False, 'import sim_agent\n'), ((13445, 13508), 'render_module.gl_render.render_arrow', 'rm.gl_render.render_arrow', (['p', '(p + v)'], {'D': '(0.01)', 'color': '[0, 0, 0, 1]'}), '(p, p + v, D=0.01, color=[0, 0, 0, 1])\n', (13470, 13508), True, 'import render_module as rm\n'), ((13571, 13647), 'render_module.gl.glPushAttrib', 'rm.gl.glPushAttrib', (['(rm.gl.GL_LIGHTING | rm.gl.GL_DEPTH_TEST | rm.gl.GL_BLEND)'], {}), '(rm.gl.GL_LIGHTING | rm.gl.GL_DEPTH_TEST | rm.gl.GL_BLEND)\n', (13589, 13647), True, 'import render_module as rm\n'), ((13664, 13694), 'render_module.gl.glEnable', 'rm.gl.glEnable', (['rm.gl.GL_BLEND'], {}), '(rm.gl.GL_BLEND)\n', (13678, 13694), True, 'import render_module as rm\n'), ((13914, 13933), 'render_module.gl.glPopAttrib', 'rm.gl.glPopAttrib', ([], {}), '()\n', (13931, 13933), True, 'import render_module as rm\n'), ((15023, 15053), 'numpy.random.uniform', 'np.random.uniform', (['(0.1)', '(0.3)', '(3)'], {}), '(0.1, 0.3, 3)\n', (15040, 15053), True, 'import numpy as np\n'), ((10521, 10565), 'numpy.full', 'np.full', (['(3)', 'agent._char_info.noise_vel[j][0]'], {}), '(3, agent._char_info.noise_vel[j][0])\n', (10528, 10565), True, 'import numpy as np\n'), ((10593, 10637), 'numpy.full', 'np.full', (['(3)', 'agent._char_info.noise_vel[j][1]'], {}), '(3, agent._char_info.noise_vel[j][1])\n', (10600, 10637), True, 'import numpy as np\n'), ((10665, 10709), 'numpy.full', 'np.full', (['(3)', 'agent._char_info.noise_vel[j][2]'], {}), '(3, agent._char_info.noise_vel[j][2])\n', (10672, 10709), True, 'import numpy as np\n'), ((10737, 10781), 'numpy.full', 'np.full', (['(3)', 'agent._char_info.noise_vel[j][3]'], {}), '(3, agent._char_info.noise_vel[j][3])\n', (10744, 10781), True, 'import numpy as np\n'), ((11470, 11516), 'fairmotion.ops.math.projectionOnVector', 'math.projectionOnVector', (['d', 'char_info.v_up_env'], {}), '(d, char_info.v_up_env)\n', (11493, 11516), False, 'from fairmotion.ops import math\n'), ((13770, 13815), 'sim_agent.get_facing_transform', 'sim_agent.get_facing_transform', (['ground_height'], {}), '(ground_height)\n', (13800, 13815), False, 'import sim_agent\n')]
import logging import os import sys sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "../../../"))) sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "../../../../FedML"))) try: from fedml_core.distributed.client.client_manager import ClientManager from fedml_core.distributed.communication.message import Message except ImportError: from FedML.fedml_core.distributed.client.client_manager import ClientManager from FedML.fedml_core.distributed.communication.message import Message from .message_define import MyMessage from .utils import random_matrix,transform_list_to_tensor, post_complete_message_to_sweep_process from .GoWrappers import * import numpy as np import time class FedAVGClientManager(ClientManager): def __init__(self,trainer,worker_num,robust,log_degree, log_scale, resiliency,params_count,args, comm, rank, size, backend="MPI"): super().__init__(args, comm, rank, size, backend) self.worker_num = worker_num self.num_rounds = args.comm_round self.robust = robust self.status = 1 if not self.robust: self.status = 1 self.log_degree = log_degree self.log_scale = log_scale self.resiliency = resiliency self.trainer = trainer self.params_count = params_count #print("params_count",params_count) self.shamirshare_list = [] self.SSstr = None self.collective_shamirshare = dict() self.flag_shamirshare_uploaded_dict = dict() for idx in range(self.worker_num): self.flag_shamirshare_uploaded_dict[idx] = False self.compression = args.compression self.rate = args.compression_rate if self.compression == 0: self.rate = 1.0 self.samples = int(self.params_count / self.rate) self.error = np.zeros((self.params_count,1)) self.alpha = args.compression_alpha self.beta = 1 / self.alpha / (self.rate + 1 + 1 / self.alpha) def register_message_receive_handlers(self): self.register_message_receive_handler(MyMessage.MSG_TYPE_S2C_PUBLIC_KEY_TO_CLIENT,self.handle_message_public_key_from_server) self.register_message_receive_handler(MyMessage.MSG_TYPE_S2C_INIT_CONFIG,self.handle_message_init) self.register_message_receive_handler(MyMessage.MSG_TYPE_S2C_SEND_AGGR_ENCRYPTED_MODEL,self.handle_message_enc_aggregated_model_from_server) self.register_message_receive_handler(MyMessage.MSG_TYPE_S2C_SEND_DECRYPTION_INFO,self.handle_message_decryption_info_from_server) self.register_message_receive_handler(MyMessage.MSG_TYPE_S2C_SYNC_MODEL_TO_CLIENT,self.handle_message_receive_model_from_server) self.register_message_receive_handler(MyMessage.MSG_TYPE_C2C_SEND_PROCESSED_SS,self.handle_message_shamirshares) def run(self): super().run() def handle_message_shamirshares(self,msg_params): sender_id = msg_params.get(MyMessage.MSG_ARG_KEY_SENDER) #logging.info("handle_message_client %d receive_ss_from_client %d."% (self.get_sender_id(),sender_id)) shamirshares = msg_params.get(MyMessage.MSG_ARG_KEY_SS) self.flag_shamirshare_uploaded_dict[sender_id-1] = True self.collective_shamirshare[sender_id-1] = shamirshares all_received = self.check_whether_all_receive() self.shamirshare_list.append(shamirshares) if all_received: collecitve_shamirshare = ':'.join(self.shamirshare_list) collecitve_shamirshare += "\n" #print("gen css of client", self.get_sender_id()) self.SSstr = genShamirShareString_robust(collecitve_shamirshare, self.worker_num, self.log_degree,self.log_scale) #print("gen shamirshare string") self.send_message_CPK_to_server(0,self.CPK) #def handle_message_receive_model_from_server(self): def handle_message_receive_model_from_server(self, msg_params): #logging.info("handle_message_receive_model_from_server.") model_params = msg_params.get(MyMessage.MSG_ARG_KEY_MODEL_PARAMS) client_index = msg_params.get(MyMessage.MSG_ARG_KEY_CLIENT_INDEX) if self.args.is_mobile == 1: model_params = transform_list_to_tensor(model_params) self.trainer.update_model(model_params) self.trainer.update_dataset(int(client_index)) self.round_idx += 1 self.__train(self.round_idx) if self.round_idx == self.num_rounds - 1: # post_complete_message_to_sweep_process(self.args) self.finish() def handle_message_init(self, msg_params): global_model_params = msg_params.get(MyMessage.MSG_ARG_KEY_MODEL_PARAMS) client_index = msg_params.get(MyMessage.MSG_ARG_KEY_CLIENT_INDEX) if self.args.is_mobile == 1: global_model_params = transform_list_to_tensor(global_model_params) self.trainer.update_model(global_model_params) self.trainer.update_dataset(int(client_index)) self.round_idx = 0 self.__train(self.round_idx) def check_whether_all_receive(self): for idx in range(self.worker_num): if not self.flag_shamirshare_uploaded_dict[idx]: return False for idx in range(self.worker_num): self.flag_shamirshare_uploaded_dict[idx] = False return True def __train(self,round_idx): logging.info("#######training########### round_id = %d" % self.round_idx) weights, local_sample_num = self.trainer.train(self.round_idx) #print(weights[0:10]) weights = weights.reshape(-1,1) error_compensated = weights + self.error if self.compression==1: phi = random_matrix(self.alpha/2/self.samples, self.samples,self.params_count,seed = round_idx) compressed = self.beta * phi.dot(error_compensated) recov = phi.transpose().dot(compressed) self.error = error_compensated - recov else: compressed = weights enc_weights = self.encrypt(compressed) self.send_model_to_server(0, enc_weights, local_sample_num) def handle_message_decryption_info_from_server(self,msg_params): decryptionParticipation = msg_params.get(MyMessage.MSG_ARG_KEY_DECRYPTION_PARTICIPATION) decryptionCoefficients = msg_params.get(MyMessage.MSG_ARG_KEY_DECRYPTION_COEFFI) if decryptionParticipation == 1: tpk = msg_params.get(MyMessage.MSG_ARG_KEY_TPK) PCKSShare = genPCKSShare(self.enc_aggregated_model,tpk,self.SSstr, decryptionCoefficients, self.samples, self.robust, self.log_degree, self.log_scale) self.send_PCKS_share_to_server(PCKSShare) def handle_message_public_key_from_server(self,msg_params): print("Setup Phase time", time.time() - self.init) self.pk = msg_params.get(MyMessage.MSG_ARG_KEY_PUBLIC_KEY) self.send_message_phase1_done_to_server() def send_message_phase1_done_to_server(self): message = Message(MyMessage.MSG_TYPE_C2S_PHASE1_DONE, self.get_sender_id(), 0) message.add_params(MyMessage.MSG_ARG_KEY_PHASE1_FLAG, "1") self.send_message(message) def send_PCKS_share_to_server(self,PCKS_shair): message = Message(MyMessage.MSG_TYPE_C2S_PCKS_SHARE, self.get_sender_id(), 0) message.add_params(MyMessage.MSG_ARG_KEY_PCKS_SHARE, PCKS_shair) self.send_message(message) def handle_message_enc_aggregated_model_from_server(self,msg_params): #client_index = msg_params.get(MyMessage.MSG_ARG_KEY_CLIENT_INDEX) self.enc_aggregated_model = msg_params.get(MyMessage.MSG_ARG_KEY_ENCRYPTED_MODEL_PARAMS) self.announce_liveness_status() def announce_liveness_status(self): message = Message(MyMessage.MSG_TYPE_C2S_SEND_LIVENESS_STATUS, self.get_sender_id(), 0) message.add_params(MyMessage.MSG_ARG_KEY_LIVENESS_STATUS,self.status) self.send_message(message) def send_SS(self): self.init = time.time() ShamirShares, self.CPK = genShamirShares(self.worker_num,self.log_degree,self.log_scale, self.resiliency) ShamirShares = ShamirShares.decode() sharesArr = ShamirShares.split(':') assert len(sharesArr)-1==self.worker_num for partyCntr in range(self.worker_num): sharedParts = sharesArr[partyCntr].split('/') assert len(sharedParts)==2 if int(sharedParts[0])+1 == self.get_sender_id(): self.flag_shamirshare_uploaded_dict[int(sharedParts[0])] = True self.collective_shamirshare[int(sharedParts[0])] = sharedParts[1] self.shamirshare_list.append(sharedParts[1]) else: self.send_message_ShamirShares(int(sharedParts[0])+1,sharedParts[1]) def send_pk_to_server(self): self.init = time.time() CPK, self.SSstr= genCollectiveKeyShare_not_robust(self.worker_num,self.log_degree,self.log_scale, self.resiliency) self.send_message_CPK_to_server(0,CPK) def send_message_ShamirShares(self, receive_id, ShamirShares): message = Message(MyMessage.MSG_TYPE_C2C_SEND_PROCESSED_SS, self.get_sender_id(), receive_id) message.add_params(MyMessage.MSG_ARG_KEY_SS, ShamirShares) self.send_message(message) def send_message_CPK_to_server(self, receive_id, CPK): #logging.info("send_message_CPK_to_server. receive_id = %d" % receive_id) message = Message(MyMessage.MSG_TYPE_C2S_SEND_CPK_TO_SERVER, self.get_sender_id(), receive_id) message.add_params(MyMessage.MSG_ARG_KEY_CPK, CPK) self.send_message(message) def send_model_to_server(self, receive_id, weights, local_sample_num): message = Message(MyMessage.MSG_TYPE_C2S_SEND_ENC_MODEL_TO_SERVER, self.get_sender_id(), receive_id) message.add_params(MyMessage.MSG_ARG_KEY_ENCRYPTED_MODEL_PARAMS, weights) message.add_params(MyMessage.MSG_ARG_KEY_NUM_SAMPLES, local_sample_num) self.send_message(message) def encrypt(self,weights): ct = encrypt(weights.reshape(-1), self.pk, self.SSstr, self.robust,self.log_degree, self.log_scale, self.resiliency) return ct
[ "numpy.zeros", "os.getcwd", "logging.info", "time.time" ]
[((1855, 1887), 'numpy.zeros', 'np.zeros', (['(self.params_count, 1)'], {}), '((self.params_count, 1))\n', (1863, 1887), True, 'import numpy as np\n'), ((5430, 5503), 'logging.info', 'logging.info', (["('#######training########### round_id = %d' % self.round_idx)"], {}), "('#######training########### round_id = %d' % self.round_idx)\n", (5442, 5503), False, 'import logging\n'), ((8057, 8068), 'time.time', 'time.time', ([], {}), '()\n', (8066, 8068), False, 'import time\n'), ((8910, 8921), 'time.time', 'time.time', ([], {}), '()\n', (8919, 8921), False, 'import time\n'), ((85, 96), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (94, 96), False, 'import os\n'), ((161, 172), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (170, 172), False, 'import os\n'), ((6839, 6850), 'time.time', 'time.time', ([], {}), '()\n', (6848, 6850), False, 'import time\n')]
# -*- coding: utf-8 -*- import pandas as pd import re import pickle from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer import numpy as np from dl_architecture import make_charvec, build_model from keras.callbacks import ModelCheckpoint from keras import backend as K from sklearn.preprocessing import Normalizer from sklearn import pipeline from sklearn.base import BaseEstimator, TransformerMixin from sklearn import preprocessing from sklearn.metrics import f1_score, accuracy_score, confusion_matrix from collections import defaultdict from bm25 import BM25Transformer import gc def remove_email(text, replace_token): return re.sub(r'[\w\.-]+@[\w\.-]+', replace_token, text) def remove_url(text, replace_token): regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' return re.sub(regex, replace_token, text) def preprocess(df_data): df_data['text_clean'] = df_data['text'].map(lambda x: remove_url(x, "HTTPURL")) df_data['text_clean'] = df_data['text_clean'].map(lambda x: remove_email(x, 'EMAIL')) return df_data def preprocess_data(df_data, target, drop, tags_to_idx = []): df_data = preprocess(df_data) # shuffle the corpus and optionaly choose the chunk you want to use if you don't want to use the whole thing - will be much faster df_data = df_data.sample(frac=1, random_state=1) tags = df_data[target].tolist() if len(tags_to_idx) < 1: tags_to_idx = list(set(df_data[target].tolist())) df_data = df_data.drop([target], axis=1) if len(drop) > 0: df_data = df_data.drop(drop, axis=1) y = np.array([tags_to_idx.index(tmp_y) for tmp_y in tags]) return df_data, y, tags_to_idx class text_col(BaseEstimator, TransformerMixin): def __init__(self, key): self.key = key def fit(self, x, y=None): return self def transform(self, data_dict): return data_dict[self.key] #fit and transform numeric features, used in scikit Feature union class digit_col(BaseEstimator, TransformerMixin): def fit(self, x, y=None): return self def transform(self, hd_searches): d_col_drops=['text', 'no_punctuation', 'no_stopwords', 'text_clean', 'affixes', 'affix_punct'] hd_searches = hd_searches.drop(d_col_drops,axis=1).values scaler = preprocessing.MinMaxScaler().fit(hd_searches) return scaler.transform(hd_searches) def train(xtrain, ytrain, xval, yval, lang, tags_to_idx, weighting): if weighting =='tfidf': path = "./models/model_" + lang + "_weights.hdf5" elif weighting == 'bm25': path = "./models/model_" + lang + "_bm25_weights.hdf5" checkpointer = ModelCheckpoint(filepath=path, verbose=1, monitor="val_acc", save_best_only=True, mode="max") #print("Train and dev shape: ", xtrain.shape, xval.shape) counts = defaultdict(int) for c in ytrain.tolist(): counts[c] += 1 if lang!='all': character_vectorizer = CountVectorizer(analyzer='char', ngram_range=(3,6), lowercase=False, min_df=5, max_df=0.3) else: character_vectorizer = CountVectorizer(analyzer='char_wb', ngram_range=(3,5), lowercase=False, min_df=5, max_df=0.3) if weighting == 'tfidf': transformer = TfidfTransformer(sublinear_tf=True) elif weighting == 'bm25': transformer = BM25Transformer() tfidf_matrix = pipeline.Pipeline([ ('character', pipeline.Pipeline( [('s5', text_col(key='text_clean')), ('character_vectorizer', character_vectorizer), ('tfidf_character', transformer)])), ('scale', Normalizer())]) tfidf_matrix = tfidf_matrix.fit(xtrain) tfidf_matrix_test = tfidf_matrix.transform(xtrain) print('tfidf matrix size: ', tfidf_matrix_test.shape) ngrams_matrix_shape = tfidf_matrix_test.shape[1] tfidf_matrix_val = tfidf_matrix.transform(xval) charvec, char_vocab, max_train_len_char = make_charvec(xtrain.text_clean.tolist()) char_vocab_size = len(char_vocab) + 2 charvec_shape = charvec.shape[1] charvec_val, _, _ = make_charvec(xval.text_clean.tolist(), train=False, char_vocab=char_vocab, max_text_len=max_train_len_char) num_classes = len(set(yval.tolist())) textmodel_data = ngrams_matrix_shape, num_classes, charvec_shape, char_vocab_size, tfidf_matrix, char_vocab, max_train_len_char, tags_to_idx if weighting == 'tfidf': data_path = 'models/model_' + lang + '_data.pk' elif weighting == 'bm25': data_path = 'models/model_' + lang + '_bm25_data.pk' with open(data_path, 'wb') as f: pickle.dump(textmodel_data, f, protocol=2) if lang != 'all': if lang not in ['sg', 'ar']: num_epoch = 20 else: num_epoch = 80 else: num_epoch = 10 model = build_model(ngrams_matrix_shape, num_classes, charvec_shape, char_vocab_size) model.fit([tfidf_matrix_test, charvec], ytrain, validation_data=([tfidf_matrix_val, charvec_val], yval), batch_size=16, epochs=num_epoch, verbose=0, callbacks=[checkpointer]) K.clear_session() gc.collect() return model def test_trained_model(data_test, target, drop, lang, weighting): if weighting == 'tfidf': data_path = 'models/model_' + lang + '_data.pk' elif weighting == 'bm25': data_path = 'models/model_' + lang + '_bm25_data.pk' textmodel_data = pickle.load(open(data_path, 'rb')) unigrams_shape, num_classes, charvec_shape, char_vocab_size,tfidf_matrix, char_vocab, max_train_len_char, tags_to_idx = textmodel_data xtest, ytest, _ = preprocess_data(data_test, target, drop, tags_to_idx=tags_to_idx) tfidf_matrix_test = tfidf_matrix.transform(xtest) charvec_test, _, _ = make_charvec(xtest.text_clean.tolist(), train=False, char_vocab=char_vocab, max_text_len=max_train_len_char) model = build_model(unigrams_shape, num_classes, charvec_shape, char_vocab_size) if weighting =='tfidf': path = "./models/model_" + lang + "_weights.hdf5" elif weighting == 'bm25': path = "./models/model_" + lang + "_bm25_weights.hdf5" model.load_weights(path) predictions = model.predict([tfidf_matrix_test, charvec_test]).argmax(axis=-1) macro = str(f1_score(ytest, predictions, average='macro')) micro = str(f1_score(ytest, predictions, average='micro')) weighted = str(f1_score(ytest, predictions, average='weighted')) accuracy = str(accuracy_score(ytest, predictions)) print('Test F1 macro:', macro) print('Test F1 micro:', micro) print('Test F1 weighted:', weighted) print('Test accuracy:', accuracy) print('Test confusion matrix:', confusion_matrix(ytest, predictions)) def test_all(data_test, target, drop, langs=['es','fa','fr','idmy','pt','slavic'], weighting='tfidf'): if weighting == 'tfidf': data_path = 'models/model_all_data.pk' elif weighting == 'bm25': data_path = 'models/model_all_bm25_data.pk' textmodel_data_all = pickle.load(open(data_path, 'rb')) unigrams_shape, num_classes, charvec_shape, char_vocab_size, tfidf_matrix, char_vocab, max_train_len_char, group_tags_to_idx = textmodel_data_all xtest, ytest, _ = preprocess_data(data_test, target, drop, tags_to_idx=group_tags_to_idx) tfidf_matrix_test = tfidf_matrix.transform(xtest) charvec_test, _, _ = make_charvec(xtest.text_clean.tolist(), train=False, char_vocab=char_vocab, max_text_len=max_train_len_char) model = build_model(unigrams_shape, num_classes, charvec_shape, char_vocab_size) if weighting =='tfidf': path = "./models/model_all_weights.hdf5" elif weighting == 'bm25': path = "./models/model_all_bm25_weights.hdf5" model.load_weights(path) predictions = model.predict([tfidf_matrix_test, charvec_test]).argmax(axis=-1) print('Test F1 macro lang group:', f1_score(ytest, predictions, average='macro')) print('Test F1 micro lang group:', f1_score(ytest, predictions, average='micro')) print('Test F1 weighted lang group:', f1_score(ytest, predictions, average='weighted')) print('Test accuracy lang group:', accuracy_score(ytest, predictions)) print('Test confusion matrix lang group:', confusion_matrix(ytest, predictions)) df_predictions = pd.DataFrame({'lang_group_pred': predictions}) xtest.reset_index(drop=True, inplace=True) df_true = pd.DataFrame({'lang_group': ytest}) df_data = pd.concat([xtest, df_true, df_predictions], axis=1) K.clear_session() gc.collect() all_predictions = [] for lang in langs: lang_idx = group_tags_to_idx.index(lang) filtered_data = df_data.loc[df_data['lang_group_pred'] == lang_idx] if weighting == 'tfidf': data_path = 'models/model_' + lang + '_data.pk' elif weighting == 'bm25': data_path = 'models/model_' + lang + '_bm25_data.pk' textmodel_data = pickle.load(open(data_path, 'rb')) unigrams_shape, num_classes, charvec_shape, char_vocab_size, tfidf_matrix, char_vocab, max_train_len_char, tags_to_idx = textmodel_data tfidf_matrix_test = tfidf_matrix.transform(filtered_data).toarray() charvec_test, _, _ = make_charvec(filtered_data.text_clean.tolist(), train=False, char_vocab=char_vocab, max_text_len=max_train_len_char) model = build_model(unigrams_shape, num_classes, charvec_shape, char_vocab_size) if weighting == 'tfidf': path = "./models/model_" + lang + "_weights.hdf5" elif weighting == 'bm25': path = "./models/model_" + lang + "_bm25_weights.hdf5" model.load_weights(path) predictions = model.predict([tfidf_matrix_test, charvec_test]).argmax(axis=-1) predictions = np.array([tags_to_idx[prediction] for prediction in predictions]) df_predictions = pd.DataFrame({'predictions': predictions}) df_predictions.reset_index(drop=True, inplace=True) ytest = filtered_data.variety df_ytest = pd.DataFrame({'y': ytest}) df_ytest.reset_index(drop=True, inplace=True) results = pd.concat([df_ytest, df_predictions], axis=1) all_predictions.append(results) all_data = pd.concat(all_predictions, axis=0) all_y = all_data.y all_preds = all_data.predictions print('Test all macro F1 score:', f1_score(all_y, all_preds, average='macro')) print('Test all micro F1 score:', f1_score(all_y, all_preds, average='micro')) print('Test all weighted F1 score:', f1_score(all_y, all_preds, average='weighted')) print('Test all accuracy score:', accuracy_score(all_y, all_preds)) print('Test all confusion matrix score:', confusion_matrix(all_y, all_preds))
[ "bm25.BM25Transformer", "sklearn.feature_extraction.text.TfidfTransformer", "sklearn.metrics.f1_score", "pickle.dump", "keras.callbacks.ModelCheckpoint", "sklearn.feature_extraction.text.CountVectorizer", "numpy.array", "collections.defaultdict", "keras.backend.clear_session", "dl_architecture.build_model", "gc.collect", "pandas.DataFrame", "re.sub", "sklearn.preprocessing.Normalizer", "pandas.concat", "sklearn.metrics.accuracy_score", "sklearn.preprocessing.MinMaxScaler", "sklearn.metrics.confusion_matrix" ]
[((663, 715), 're.sub', 're.sub', (['"""[\\\\w\\\\.-]+@[\\\\w\\\\.-]+"""', 'replace_token', 'text'], {}), "('[\\\\w\\\\.-]+@[\\\\w\\\\.-]+', replace_token, text)\n", (669, 715), False, 'import re\n'), ((856, 890), 're.sub', 're.sub', (['regex', 'replace_token', 'text'], {}), '(regex, replace_token, text)\n', (862, 890), False, 'import re\n'), ((2710, 2808), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'path', 'verbose': '(1)', 'monitor': '"""val_acc"""', 'save_best_only': '(True)', 'mode': '"""max"""'}), "(filepath=path, verbose=1, monitor='val_acc', save_best_only\n =True, mode='max')\n", (2725, 2808), False, 'from keras.callbacks import ModelCheckpoint\n'), ((3020, 3036), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (3031, 3036), False, 'from collections import defaultdict\n'), ((4981, 5058), 'dl_architecture.build_model', 'build_model', (['ngrams_matrix_shape', 'num_classes', 'charvec_shape', 'char_vocab_size'], {}), '(ngrams_matrix_shape, num_classes, charvec_shape, char_vocab_size)\n', (4992, 5058), False, 'from dl_architecture import make_charvec, build_model\n'), ((5243, 5260), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (5258, 5260), True, 'from keras import backend as K\n'), ((5265, 5277), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5275, 5277), False, 'import gc\n'), ((6026, 6098), 'dl_architecture.build_model', 'build_model', (['unigrams_shape', 'num_classes', 'charvec_shape', 'char_vocab_size'], {}), '(unigrams_shape, num_classes, charvec_shape, char_vocab_size)\n', (6037, 6098), False, 'from dl_architecture import make_charvec, build_model\n'), ((7634, 7706), 'dl_architecture.build_model', 'build_model', (['unigrams_shape', 'num_classes', 'charvec_shape', 'char_vocab_size'], {}), '(unigrams_shape, num_classes, charvec_shape, char_vocab_size)\n', (7645, 7706), False, 'from dl_architecture import make_charvec, build_model\n'), ((8427, 8473), 'pandas.DataFrame', 'pd.DataFrame', (["{'lang_group_pred': predictions}"], {}), "({'lang_group_pred': predictions})\n", (8439, 8473), True, 'import pandas as pd\n'), ((8535, 8570), 'pandas.DataFrame', 'pd.DataFrame', (["{'lang_group': ytest}"], {}), "({'lang_group': ytest})\n", (8547, 8570), True, 'import pandas as pd\n'), ((8585, 8636), 'pandas.concat', 'pd.concat', (['[xtest, df_true, df_predictions]'], {'axis': '(1)'}), '([xtest, df_true, df_predictions], axis=1)\n', (8594, 8636), True, 'import pandas as pd\n'), ((8642, 8659), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (8657, 8659), True, 'from keras import backend as K\n'), ((8664, 8676), 'gc.collect', 'gc.collect', ([], {}), '()\n', (8674, 8676), False, 'import gc\n'), ((10350, 10384), 'pandas.concat', 'pd.concat', (['all_predictions'], {'axis': '(0)'}), '(all_predictions, axis=0)\n', (10359, 10384), True, 'import pandas as pd\n'), ((3143, 3238), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'analyzer': '"""char"""', 'ngram_range': '(3, 6)', 'lowercase': '(False)', 'min_df': '(5)', 'max_df': '(0.3)'}), "(analyzer='char', ngram_range=(3, 6), lowercase=False,\n min_df=5, max_df=0.3)\n", (3158, 3238), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((3275, 3373), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'analyzer': '"""char_wb"""', 'ngram_range': '(3, 5)', 'lowercase': '(False)', 'min_df': '(5)', 'max_df': '(0.3)'}), "(analyzer='char_wb', ngram_range=(3, 5), lowercase=False,\n min_df=5, max_df=0.3)\n", (3290, 3373), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((3421, 3456), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {'sublinear_tf': '(True)'}), '(sublinear_tf=True)\n', (3437, 3456), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((4764, 4806), 'pickle.dump', 'pickle.dump', (['textmodel_data', 'f'], {'protocol': '(2)'}), '(textmodel_data, f, protocol=2)\n', (4775, 4806), False, 'import pickle\n'), ((6407, 6452), 'sklearn.metrics.f1_score', 'f1_score', (['ytest', 'predictions'], {'average': '"""macro"""'}), "(ytest, predictions, average='macro')\n", (6415, 6452), False, 'from sklearn.metrics import f1_score, accuracy_score, confusion_matrix\n'), ((6470, 6515), 'sklearn.metrics.f1_score', 'f1_score', (['ytest', 'predictions'], {'average': '"""micro"""'}), "(ytest, predictions, average='micro')\n", (6478, 6515), False, 'from sklearn.metrics import f1_score, accuracy_score, confusion_matrix\n'), ((6536, 6584), 'sklearn.metrics.f1_score', 'f1_score', (['ytest', 'predictions'], {'average': '"""weighted"""'}), "(ytest, predictions, average='weighted')\n", (6544, 6584), False, 'from sklearn.metrics import f1_score, accuracy_score, confusion_matrix\n'), ((6605, 6639), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['ytest', 'predictions'], {}), '(ytest, predictions)\n', (6619, 6639), False, 'from sklearn.metrics import f1_score, accuracy_score, confusion_matrix\n'), ((6826, 6862), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['ytest', 'predictions'], {}), '(ytest, predictions)\n', (6842, 6862), False, 'from sklearn.metrics import f1_score, accuracy_score, confusion_matrix\n'), ((8020, 8065), 'sklearn.metrics.f1_score', 'f1_score', (['ytest', 'predictions'], {'average': '"""macro"""'}), "(ytest, predictions, average='macro')\n", (8028, 8065), False, 'from sklearn.metrics import f1_score, accuracy_score, confusion_matrix\n'), ((8106, 8151), 'sklearn.metrics.f1_score', 'f1_score', (['ytest', 'predictions'], {'average': '"""micro"""'}), "(ytest, predictions, average='micro')\n", (8114, 8151), False, 'from sklearn.metrics import f1_score, accuracy_score, confusion_matrix\n'), ((8195, 8243), 'sklearn.metrics.f1_score', 'f1_score', (['ytest', 'predictions'], {'average': '"""weighted"""'}), "(ytest, predictions, average='weighted')\n", (8203, 8243), False, 'from sklearn.metrics import f1_score, accuracy_score, confusion_matrix\n'), ((8284, 8318), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['ytest', 'predictions'], {}), '(ytest, predictions)\n', (8298, 8318), False, 'from sklearn.metrics import f1_score, accuracy_score, confusion_matrix\n'), ((8367, 8403), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['ytest', 'predictions'], {}), '(ytest, predictions)\n', (8383, 8403), False, 'from sklearn.metrics import f1_score, accuracy_score, confusion_matrix\n'), ((9486, 9558), 'dl_architecture.build_model', 'build_model', (['unigrams_shape', 'num_classes', 'charvec_shape', 'char_vocab_size'], {}), '(unigrams_shape, num_classes, charvec_shape, char_vocab_size)\n', (9497, 9558), False, 'from dl_architecture import make_charvec, build_model\n'), ((9899, 9964), 'numpy.array', 'np.array', (['[tags_to_idx[prediction] for prediction in predictions]'], {}), '([tags_to_idx[prediction] for prediction in predictions])\n', (9907, 9964), True, 'import numpy as np\n'), ((9990, 10032), 'pandas.DataFrame', 'pd.DataFrame', (["{'predictions': predictions}"], {}), "({'predictions': predictions})\n", (10002, 10032), True, 'import pandas as pd\n'), ((10150, 10176), 'pandas.DataFrame', 'pd.DataFrame', (["{'y': ytest}"], {}), "({'y': ytest})\n", (10162, 10176), True, 'import pandas as pd\n'), ((10249, 10294), 'pandas.concat', 'pd.concat', (['[df_ytest, df_predictions]'], {'axis': '(1)'}), '([df_ytest, df_predictions], axis=1)\n', (10258, 10294), True, 'import pandas as pd\n'), ((10483, 10526), 'sklearn.metrics.f1_score', 'f1_score', (['all_y', 'all_preds'], {'average': '"""macro"""'}), "(all_y, all_preds, average='macro')\n", (10491, 10526), False, 'from sklearn.metrics import f1_score, accuracy_score, confusion_matrix\n'), ((10566, 10609), 'sklearn.metrics.f1_score', 'f1_score', (['all_y', 'all_preds'], {'average': '"""micro"""'}), "(all_y, all_preds, average='micro')\n", (10574, 10609), False, 'from sklearn.metrics import f1_score, accuracy_score, confusion_matrix\n'), ((10652, 10698), 'sklearn.metrics.f1_score', 'f1_score', (['all_y', 'all_preds'], {'average': '"""weighted"""'}), "(all_y, all_preds, average='weighted')\n", (10660, 10698), False, 'from sklearn.metrics import f1_score, accuracy_score, confusion_matrix\n'), ((10739, 10771), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['all_y', 'all_preds'], {}), '(all_y, all_preds)\n', (10753, 10771), False, 'from sklearn.metrics import f1_score, accuracy_score, confusion_matrix\n'), ((10819, 10853), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['all_y', 'all_preds'], {}), '(all_y, all_preds)\n', (10835, 10853), False, 'from sklearn.metrics import f1_score, accuracy_score, confusion_matrix\n'), ((3509, 3526), 'bm25.BM25Transformer', 'BM25Transformer', ([], {}), '()\n', (3524, 3526), False, 'from bm25 import BM25Transformer\n'), ((2350, 2378), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {}), '()\n', (2376, 2378), False, 'from sklearn import preprocessing\n'), ((3775, 3787), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {}), '()\n', (3785, 3787), False, 'from sklearn.preprocessing import Normalizer\n')]
from RNNs import QIFExpAddNoiseSyns import numpy as np import pickle import matplotlib.pyplot as plt from scipy.ndimage import gaussian_filter1d # STEP 0: Define simulation condition ##################################### # parse worker indices from script arguments idx_cond = 570 # STEP 1: Load pre-generated RNN parameters ########################################### path = "/home/rgast/PycharmProjects/BrainNetworks/RC/results" config = pickle.load(open(f"{path}/qif_micro_config.pkl", 'rb')) # connectivity matrix C = config['C'] # input inp = config['inp'] # input weights W_in = config['W_in'] # simulation config T = config['T'] dt = config['dt'] dts = config['dts'] cutoff = config['cutoff'] # target values targets = config['targets'] # adaptation strength alpha = 0.5 # config['alphas'][idx_cond] # eta eta = -3.8 # config['etas'][idx_cond] # STEP 2: define remaining network parameters ############################################# # general parameters N = C.shape[0] m = W_in.shape[0] n_folds = 5 ridge_alpha = 1e-3 # qif parameters Delta = 2.0 J = 15.0*np.sqrt(Delta) D = 0.0 tau_a = 10.0 tau_s = 0.8 # STEP 3: Evaluate classification performance of RNN #################################################### # setup QIF RNN qif_rnn = QIFExpAddNoiseSyns(C, eta, J, Delta=Delta, alpha=alpha, D=D, tau_s=tau_s, tau_a=tau_a) # perform simulation W_in[:, :] = 0.0 X = qif_rnn.run(T, dt, dts, inp=inp, W_in=W_in, state_record_key='t1', cutoff=cutoff) r_qif = np.mean(X, axis=1) # prepare training data buffer_val = 0 for i in range(X.shape[1]): X[:, i] = gaussian_filter1d(X[:, i], 0.05 / dts, mode='constant', cval=buffer_val) y = targets r_qif2 = np.mean(X, axis=1) # split into test and training data split = int(np.round(X.shape[0]*0.75, decimals=0)) X_train = X[:split, :] y_train = y[:split] X_test = X[split:, :] y_test = y[split:] # train RNN key, scores, coefs = qif_rnn.ridge_fit(X=X_train, y=y_train, alpha=ridge_alpha, k=n_folds, fit_intercept=False, copy_X=True, solver='lsqr') score, _ = qif_rnn.test(X=X_test, y=y_test, readout_key=key) y_predict = qif_rnn.predict(X=X, readout_key=key) print(f"Classification performance on test data: {score}") # plotting fig, axes = plt.subplots(nrows=4) ax1 = axes[0] ax1.plot(np.mean(X, axis=1)) ax2 = axes[1] im = ax2.imshow(X.T, aspect='auto', cmap="plasma", vmin=0, vmax=0.005) #plt.colorbar(im, ax=ax2, shrink=0.5) ax3 = axes[2] ax3.plot(y) ax3.plot(y_predict) plt.legend(['target', 'output']) ax4 = axes[3] start = int(cutoff/dt) ax4.plot(inp[0, start:]) ax4.plot(inp[1, start:]) plt.legend(['lorenz', 'stula']) plt.tight_layout() # plot connectivity fig2, ax = plt.subplots() im1 = ax.imshow(C, aspect='auto', cmap="plasma", vmin=0, vmax=np.max(C[:])) plt.colorbar(im1, ax=ax, shrink=0.5) plt.title('C') plt.tight_layout() print(f'Synaptic sparseness: {np.sum(C[:] == 0)/N**2}') plt.show()
[ "numpy.mean", "RNNs.QIFExpAddNoiseSyns", "scipy.ndimage.gaussian_filter1d", "numpy.sqrt", "numpy.round", "matplotlib.pyplot.colorbar", "numpy.max", "numpy.sum", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "matplotlib.pyplot.subplots", "matplotlib.pyplot.legend", "matplotlib.pyplot.show" ]
[((1264, 1354), 'RNNs.QIFExpAddNoiseSyns', 'QIFExpAddNoiseSyns', (['C', 'eta', 'J'], {'Delta': 'Delta', 'alpha': 'alpha', 'D': 'D', 'tau_s': 'tau_s', 'tau_a': 'tau_a'}), '(C, eta, J, Delta=Delta, alpha=alpha, D=D, tau_s=tau_s,\n tau_a=tau_a)\n', (1282, 1354), False, 'from RNNs import QIFExpAddNoiseSyns\n'), ((1484, 1502), 'numpy.mean', 'np.mean', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (1491, 1502), True, 'import numpy as np\n'), ((1679, 1697), 'numpy.mean', 'np.mean', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (1686, 1697), True, 'import numpy as np\n'), ((2256, 2277), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(4)'}), '(nrows=4)\n', (2268, 2277), True, 'import matplotlib.pyplot as plt\n'), ((2493, 2525), 'matplotlib.pyplot.legend', 'plt.legend', (["['target', 'output']"], {}), "(['target', 'output'])\n", (2503, 2525), True, 'import matplotlib.pyplot as plt\n'), ((2614, 2645), 'matplotlib.pyplot.legend', 'plt.legend', (["['lorenz', 'stula']"], {}), "(['lorenz', 'stula'])\n", (2624, 2645), True, 'import matplotlib.pyplot as plt\n'), ((2647, 2665), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2663, 2665), True, 'import matplotlib.pyplot as plt\n'), ((2698, 2712), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2710, 2712), True, 'import matplotlib.pyplot as plt\n'), ((2790, 2826), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im1'], {'ax': 'ax', 'shrink': '(0.5)'}), '(im1, ax=ax, shrink=0.5)\n', (2802, 2826), True, 'import matplotlib.pyplot as plt\n'), ((2827, 2841), 'matplotlib.pyplot.title', 'plt.title', (['"""C"""'], {}), "('C')\n", (2836, 2841), True, 'import matplotlib.pyplot as plt\n'), ((2843, 2861), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2859, 2861), True, 'import matplotlib.pyplot as plt\n'), ((2918, 2928), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2926, 2928), True, 'import matplotlib.pyplot as plt\n'), ((1082, 1096), 'numpy.sqrt', 'np.sqrt', (['Delta'], {}), '(Delta)\n', (1089, 1096), True, 'import numpy as np\n'), ((1585, 1657), 'scipy.ndimage.gaussian_filter1d', 'gaussian_filter1d', (['X[:, i]', '(0.05 / dts)'], {'mode': '"""constant"""', 'cval': 'buffer_val'}), "(X[:, i], 0.05 / dts, mode='constant', cval=buffer_val)\n", (1602, 1657), False, 'from scipy.ndimage import gaussian_filter1d\n'), ((1747, 1786), 'numpy.round', 'np.round', (['(X.shape[0] * 0.75)'], {'decimals': '(0)'}), '(X.shape[0] * 0.75, decimals=0)\n', (1755, 1786), True, 'import numpy as np\n'), ((2302, 2320), 'numpy.mean', 'np.mean', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (2309, 2320), True, 'import numpy as np\n'), ((2776, 2788), 'numpy.max', 'np.max', (['C[:]'], {}), '(C[:])\n', (2782, 2788), True, 'import numpy as np\n'), ((2892, 2909), 'numpy.sum', 'np.sum', (['(C[:] == 0)'], {}), '(C[:] == 0)\n', (2898, 2909), True, 'import numpy as np\n')]
import pandas as pd import numpy as np from sklearn.feature_selection import RFE from sklearn.linear_model import LogisticRegression import os import json def load_data(data_path): with open(data_path, "r") as fp: data = json.load(fp) #convert list -> np.array() inputs = np.array(data["features"]) targets = np.array(data["mms"]) print(inputs.shape, targets.shape) return inputs, targets def cramers_v(x, y): confusion_matrix = pd.crosstab(x,y) chi2,p,dof,ex = ss.chi2_contingency(confusion_matrix) n = confusion_matrix.sum().sum() phi2 = chi2/n r,k = confusion_matrix.shape phi2corr = max(0, phi2-((k-1)*(r-1))/(n-1)) rcorr = r-((r-1)**2)/(n-1) kcorr = k-((k-1)**2)/(n-1) return np.sqrt(phi2/min((k-1),(r-1))) if __name__ == "__main__": data_path = os.path.abspath("json/data.json") inputs, targets = load_data(data_path=data_path) for val in range(0, len(inputs[0])): X = (inputs[1], inputs[2]) Y = (targets[1], targets[2]) model = LogisticRegression(random_state=0, solver='lbfgs',multi_class='multinomial').fit(X, Y) rfe = RFE(model, 5) fit = rfe.fit(X, Y) print( fit.n_features_) print(f'Observing frame # {val}') print("Selected Features: %s"% fit.support_) print("Feature Ranking: %s"% fit.ranking_) #cramers_v(inputs, targets)
[ "pandas.crosstab", "sklearn.linear_model.LogisticRegression", "json.load", "numpy.array", "sklearn.feature_selection.RFE", "os.path.abspath" ]
[((294, 320), 'numpy.array', 'np.array', (["data['features']"], {}), "(data['features'])\n", (302, 320), True, 'import numpy as np\n'), ((335, 356), 'numpy.array', 'np.array', (["data['mms']"], {}), "(data['mms'])\n", (343, 356), True, 'import numpy as np\n'), ((470, 487), 'pandas.crosstab', 'pd.crosstab', (['x', 'y'], {}), '(x, y)\n', (481, 487), True, 'import pandas as pd\n'), ((830, 863), 'os.path.abspath', 'os.path.abspath', (['"""json/data.json"""'], {}), "('json/data.json')\n", (845, 863), False, 'import os\n'), ((234, 247), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (243, 247), False, 'import json\n'), ((1148, 1161), 'sklearn.feature_selection.RFE', 'RFE', (['model', '(5)'], {}), '(model, 5)\n', (1151, 1161), False, 'from sklearn.feature_selection import RFE\n'), ((1047, 1124), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)', 'solver': '"""lbfgs"""', 'multi_class': '"""multinomial"""'}), "(random_state=0, solver='lbfgs', multi_class='multinomial')\n", (1065, 1124), False, 'from sklearn.linear_model import LogisticRegression\n')]
from . import TorchModel, NUM_GESTURES import torch from torch import nn import numpy as np class ConvNet(TorchModel): def define_model(self, dim_in): self.conv = nn.Conv1d(dim_in[0], self.conv_filters, kernel_size=self.conv_kernel_size, stride=self.conv_stride, padding=self.conv_padding) self.conv_shape = [self.conv_filters, int(np.floor( (dim_in[1] - self.conv_kernel_size + 2 * self.conv_padding) / self.conv_stride) + 1)] model = torch.nn.Sequential( torch.nn.BatchNorm1d(dim_in), torch.nn.Linear(dim_in, dim_in * 32), torch.nn.LeakyReLU(), torch.nn.Linear(dim_in * 32, dim_in * 64), torch.nn.BatchNorm1d(dim_in * 64), torch.nn.LeakyReLU(), torch.nn.Linear(dim_in * 64, NUM_GESTURES), torch.nn.Softmax(dim=1) ) return model def forward_pass(self, sample): targets = torch.LongTensor(sample[1].type(torch.LongTensor)).to(self.device) predictions = self.model(sample[0].to(self.device)) return torch.nn.functional.cross_entropy(predictions, targets), [predictions, None] from . import TorchModel, NUM_GESTURES import torch class Structure(torch.nn.Module): """ The classifier with the best known performance on the NinaPro dataset thus far (using a variation of PaddedMultiRMS). """ def __init__(self, input_size, classes, convnet_filters, convnet_kernel_size, convnet_stride, convnet_padding, convnet_maxpooling, convnet_fc_num): """ In the constructor we instantiate two nn.Linear modules and assign them as member variables. """ super(Structure, self).__init__() # Layer 0: Batch Norm self.batch_norm1 = torch.nn.BatchNorm1d(np.product(input_size)) # Layer 1: Conv Layer self.conv = nn.Conv1d(input_size[0], convnet_filters, kernel_size=convnet_kernel_size, stride=convnet_stride, padding=convnet_padding) self.conv_shape = [convnet_filters, int(np.floor( (input_size[1] - convnet_kernel_size + 2 * convnet_padding) / convnet_stride) + 1)] # Layer 1.0: Maxpooling Layer self.maxpool = nn.MaxPool1d(convnet_maxpooling) self.maxpool_shape = [self.conv_shape[0], self.conv_shape[1] // convnet_maxpooling] # Layer 2: FC Layer self.fcn = nn.Sequential( nn.Linear(np.product(self.maxpool_shape), convnet_fc_num), nn.ReLU(inplace=True), nn.Linear(convnet_fc_num, classes), nn.Sigmoid() ) self.relu = nn.ReLU() def forward(self, x): if x.shape[0] > 1: x = self.batch_norm1(x.flatten(1)).view(*x.shape) x = self.relu(self.conv(x)) x = self.maxpool(x) y = self.fcn(x.flatten(1)) return y # # Yet another variation of FullyConnectedNNV2, leveraging the CustomNet module # class ConvNet(TorchModel): def define_model(self, dim_in): model = Structure(dim_in, NUM_GESTURES, self.convnet_filters, self.convnet_kernel_size, self.convnet_stride, self.convnet_padding, self.convnet_maxpooling, self.convnet_fc_num) return model def forward_pass(self, sample): targets = torch.LongTensor(sample[1].type(torch.LongTensor)).to(self.device) predictions = self.model(sample[0].to(self.device)) return torch.nn.functional.cross_entropy(predictions, targets), [predictions, None]
[ "torch.nn.MaxPool1d", "numpy.product", "torch.nn.ReLU", "torch.nn.Sigmoid", "torch.nn.LeakyReLU", "torch.nn.Softmax", "numpy.floor", "torch.nn.BatchNorm1d", "torch.nn.Linear", "torch.nn.functional.cross_entropy", "torch.nn.Conv1d" ]
[((178, 308), 'torch.nn.Conv1d', 'nn.Conv1d', (['dim_in[0]', 'self.conv_filters'], {'kernel_size': 'self.conv_kernel_size', 'stride': 'self.conv_stride', 'padding': 'self.conv_padding'}), '(dim_in[0], self.conv_filters, kernel_size=self.conv_kernel_size,\n stride=self.conv_stride, padding=self.conv_padding)\n', (187, 308), False, 'from torch import nn\n'), ((1917, 2043), 'torch.nn.Conv1d', 'nn.Conv1d', (['input_size[0]', 'convnet_filters'], {'kernel_size': 'convnet_kernel_size', 'stride': 'convnet_stride', 'padding': 'convnet_padding'}), '(input_size[0], convnet_filters, kernel_size=convnet_kernel_size,\n stride=convnet_stride, padding=convnet_padding)\n', (1926, 2043), False, 'from torch import nn\n'), ((2286, 2318), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (['convnet_maxpooling'], {}), '(convnet_maxpooling)\n', (2298, 2318), False, 'from torch import nn\n'), ((2684, 2693), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2691, 2693), False, 'from torch import nn\n'), ((543, 571), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['dim_in'], {}), '(dim_in)\n', (563, 571), False, 'import torch\n'), ((585, 621), 'torch.nn.Linear', 'torch.nn.Linear', (['dim_in', '(dim_in * 32)'], {}), '(dim_in, dim_in * 32)\n', (600, 621), False, 'import torch\n'), ((635, 655), 'torch.nn.LeakyReLU', 'torch.nn.LeakyReLU', ([], {}), '()\n', (653, 655), False, 'import torch\n'), ((669, 710), 'torch.nn.Linear', 'torch.nn.Linear', (['(dim_in * 32)', '(dim_in * 64)'], {}), '(dim_in * 32, dim_in * 64)\n', (684, 710), False, 'import torch\n'), ((724, 757), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['(dim_in * 64)'], {}), '(dim_in * 64)\n', (744, 757), False, 'import torch\n'), ((771, 791), 'torch.nn.LeakyReLU', 'torch.nn.LeakyReLU', ([], {}), '()\n', (789, 791), False, 'import torch\n'), ((805, 847), 'torch.nn.Linear', 'torch.nn.Linear', (['(dim_in * 64)', 'NUM_GESTURES'], {}), '(dim_in * 64, NUM_GESTURES)\n', (820, 847), False, 'import torch\n'), ((861, 884), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (877, 884), False, 'import torch\n'), ((1117, 1172), 'torch.nn.functional.cross_entropy', 'torch.nn.functional.cross_entropy', (['predictions', 'targets'], {}), '(predictions, targets)\n', (1150, 1172), False, 'import torch\n'), ((1842, 1864), 'numpy.product', 'np.product', (['input_size'], {}), '(input_size)\n', (1852, 1864), True, 'import numpy as np\n'), ((2557, 2578), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2564, 2578), False, 'from torch import nn\n'), ((2592, 2626), 'torch.nn.Linear', 'nn.Linear', (['convnet_fc_num', 'classes'], {}), '(convnet_fc_num, classes)\n', (2601, 2626), False, 'from torch import nn\n'), ((2640, 2652), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2650, 2652), False, 'from torch import nn\n'), ((3487, 3542), 'torch.nn.functional.cross_entropy', 'torch.nn.functional.cross_entropy', (['predictions', 'targets'], {}), '(predictions, targets)\n', (3520, 3542), False, 'import torch\n'), ((2496, 2526), 'numpy.product', 'np.product', (['self.maxpool_shape'], {}), '(self.maxpool_shape)\n', (2506, 2526), True, 'import numpy as np\n'), ((385, 478), 'numpy.floor', 'np.floor', (['((dim_in[1] - self.conv_kernel_size + 2 * self.conv_padding) / self.conv_stride\n )'], {}), '((dim_in[1] - self.conv_kernel_size + 2 * self.conv_padding) / self\n .conv_stride)\n', (393, 478), True, 'import numpy as np\n'), ((2118, 2208), 'numpy.floor', 'np.floor', (['((input_size[1] - convnet_kernel_size + 2 * convnet_padding) / convnet_stride)'], {}), '((input_size[1] - convnet_kernel_size + 2 * convnet_padding) /\n convnet_stride)\n', (2126, 2208), True, 'import numpy as np\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created: March 2020 Python without class! @author: <NAME> (RRCC) """ import numpy as np import matplotlib.pyplot as plt import argparse def readFile(fName): """ Returns ------- nDumps : TYPE DESCRIPTION. nPars : TYPE DESCRIPTION. times : TYPE DESCRIPTION. x : TYPE DESCRIPTION. vx : TYPE DESCRIPTION. mass : TYPE DESCRIPTION. rho : TYPE DESCRIPTION. p : TYPE DESCRIPTION. ie : TYPE DESCRIPTION. xm : TYPE DESCRIPTION. dx : TYPE DESCRIPTION. """ print('Opening File: ',fName) f = open(fName,'r') times = [] nDumps = 0 for lin in f: if lin.find("DONE") != -1: nDumps += 1 if lin.find("NP") != -1: iNP = lin.split()[1] print(nDumps," SPH1D output records found.") print(iNP, " particles used in simulation.") # iNP assumed constant! nPars = int(iNP) x = np.zeros((nDumps,nPars)) xm = np.zeros((nDumps,nPars-1)) dx = np.zeros((nDumps,nPars-1)) vx = np.zeros((nDumps,nPars)) mass = np.zeros((nDumps,nPars)) rho = np.zeros((nDumps,nPars)) p = np.zeros((nDumps,nPars)) ie = np.zeros((nDumps,nPars)) f.seek(0,0) for i in np.arange(nDumps): lin1 = f.readline().split() t = lin1[1] times.append(t) print("Processing record from time = ",t," s") f.readline() f.readline() j = 0 while j < nPars: lin = f.readline() nums = lin.split() x[i][j] = nums[0] vx[i][j] = nums[1] mass[i][j] = nums[2] rho[i][j] = nums[3] p[i][j] = nums[4] ie[i][j] = nums[5] j+=1 f.readline() dx[i] = x[i][1:] - x[i][0:-1] xm[i] = (x[i][1:] + x[i][0:-1])/2.0 f.close() return nDumps,nPars,times,x,vx,mass,rho,p,ie,xm,dx def plotAll(t,x,vx,mass,rho,p,ie,xm,dx): """ Parameters ---------- t : TYPE DESCRIPTION. x : TYPE DESCRIPTION. vx : TYPE DESCRIPTION. mass : TYPE DESCRIPTION. rho : TYPE DESCRIPTION. p : TYPE DESCRIPTION. ie : TYPE DESCRIPTION. xm : TYPE DESCRIPTION. dx : TYPE DESCRIPTION. Returns ------- None. """ plt.plot(xm,dx,label='delta-X') plt.title('Simulation Time = '+t+' s') plt.xlabel('Position') plt.ylabel('delta-X') plt.xlim(-.4,.4) plt.ylim(-.1,3) plt.legend() plt.show() plt.plot(x,vx,label='X-Velocity') plt.title('Simulation Time = '+t+' s') plt.xlabel('Position') plt.ylabel('Velocity') plt.xlim(-.4,.4) plt.ylim(-.5,1) plt.legend() plt.show() plt.plot(x,mass,label='Mass') plt.title('Simulation Time = '+t+' s') plt.xlabel('Position') plt.ylabel('Mass') plt.xlim(-.4,.4) plt.ylim(-.1,3) plt.legend() plt.show() plt.plot(x,rho,label='Density') plt.title('Simulation Time = '+t+' s') plt.xlabel('Position') plt.ylabel('Density') plt.xlim(-.4,.4) plt.ylim(-.1,3) plt.legend() plt.show() plt.plot(x,p,label='Pressure') plt.title('Simulation Time = '+t+' s') plt.xlabel('Position') plt.ylabel('Pressure') plt.xlim(-.4,.4) plt.ylim(-.1,1.2) plt.legend() plt.show() plt.plot(x,ie,label='Internal Energy') plt.title('Simulation Time = '+t+' s') plt.xlabel('Position') plt.ylabel('Internal Energy') plt.xlim(-.4,.4) plt.ylim(1,3) plt.legend() plt.show() def plotIE(t,x,ie): """ Parameters ---------- t : TYPE DESCRIPTION. x : TYPE DESCRIPTION. ie : TYPE DESCRIPTION. Returns ------- None. """ plt.plot(x,ie,label='Internal Energy') plt.title('Simulation Time = '+t+' s') plt.xlabel('Position') plt.ylabel('Internal Energy') plt.xlim(-.4,.4) plt.ylim(1,3) plt.legend() plt.show() def plotVEL(t,x,vx): """ Parameters ---------- t : TYPE DESCRIPTION. x : TYPE DESCRIPTION. vx : TYPE DESCRIPTION. Returns ------- None. """ plt.plot(x,vx,label='X-Velocity') plt.title('Simulation Time = '+t+' s') plt.xlabel('Position') plt.ylabel('Velocity') plt.xlim(-.4,.4) plt.ylim(-.5,1) plt.legend() plt.show() def plotP(t,x,p): """ Parameters ---------- t : TYPE DESCRIPTION. x : TYPE DESCRIPTION. p : TYPE DESCRIPTION. Returns ------- None. """ plt.plot(x,p,label='Pressure') plt.title('Simulation Time = '+t+' s') plt.xlabel('Position') plt.ylabel('Pressure') plt.xlim(-.4,.4) plt.ylim(-.1,1.2) plt.legend() plt.show() def plotDEN(t,x,rho): """ Parameters ---------- t : TYPE DESCRIPTION. x : TYPE DESCRIPTION. rho : TYPE DESCRIPTION. Returns ------- None. """ plt.plot(x,rho,label='Density') plt.title('Simulation Time = '+t+' s') plt.xlabel('Position') plt.ylabel('Density') plt.xlim(-.4,.4) plt.ylim(-.1,3) plt.legend() plt.show() def plotDX(t,xm,dx): """ Parameters ---------- t : TYPE DESCRIPTION. xm : TYPE DESCRIPTION. dx : TYPE DESCRIPTION. Returns ------- None. """ plt.plot(xm,dx,label='delta-X') plt.title('Simulation Time = '+t+' s') plt.xlabel('Position') plt.ylabel('delta-X') plt.xlim(-.4,.4) plt.ylim(-.1,3) plt.legend() plt.show() def getUserOptions(): """ Returns ------- TYPE DESCRIPTION. """ parser = argparse.ArgumentParser( description='Welcome to sphPlot help...', epilog='Example:\n>python sphPlot.py -i <input_file>') # Required argument parser.add_argument( '-i', required=True, help="'file/location/SPH1D_Output.txt' is required") return parser.parse_args() def main(args): """ Parameters ---------- args : TYPE DESCRIPTION. Returns ------- None. """ nDumps,nPars,t,x,vx,mass,rho,p,ie,xm,dx = readFile(args.i) for i in np.arange(nDumps): print("Plotting Time: ",t[i]," s") plotVEL(t[i],x[i],vx[i]) if __name__ == '__main__': args = getUserOptions() main(args)
[ "argparse.ArgumentParser", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.legend", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "numpy.zeros", "matplotlib.pyplot.title", "matplotlib.pyplot.xlim", "matplotlib.pyplot.ylim", "numpy.arange", "matplotlib.pyplot.show" ]
[((1063, 1088), 'numpy.zeros', 'np.zeros', (['(nDumps, nPars)'], {}), '((nDumps, nPars))\n', (1071, 1088), True, 'import numpy as np\n'), ((1098, 1127), 'numpy.zeros', 'np.zeros', (['(nDumps, nPars - 1)'], {}), '((nDumps, nPars - 1))\n', (1106, 1127), True, 'import numpy as np\n'), ((1135, 1164), 'numpy.zeros', 'np.zeros', (['(nDumps, nPars - 1)'], {}), '((nDumps, nPars - 1))\n', (1143, 1164), True, 'import numpy as np\n'), ((1172, 1197), 'numpy.zeros', 'np.zeros', (['(nDumps, nPars)'], {}), '((nDumps, nPars))\n', (1180, 1197), True, 'import numpy as np\n'), ((1209, 1234), 'numpy.zeros', 'np.zeros', (['(nDumps, nPars)'], {}), '((nDumps, nPars))\n', (1217, 1234), True, 'import numpy as np\n'), ((1245, 1270), 'numpy.zeros', 'np.zeros', (['(nDumps, nPars)'], {}), '((nDumps, nPars))\n', (1253, 1270), True, 'import numpy as np\n'), ((1279, 1304), 'numpy.zeros', 'np.zeros', (['(nDumps, nPars)'], {}), '((nDumps, nPars))\n', (1287, 1304), True, 'import numpy as np\n'), ((1314, 1339), 'numpy.zeros', 'np.zeros', (['(nDumps, nPars)'], {}), '((nDumps, nPars))\n', (1322, 1339), True, 'import numpy as np\n'), ((1370, 1387), 'numpy.arange', 'np.arange', (['nDumps'], {}), '(nDumps)\n', (1379, 1387), True, 'import numpy as np\n'), ((2544, 2577), 'matplotlib.pyplot.plot', 'plt.plot', (['xm', 'dx'], {'label': '"""delta-X"""'}), "(xm, dx, label='delta-X')\n", (2552, 2577), True, 'import matplotlib.pyplot as plt\n'), ((2581, 2623), 'matplotlib.pyplot.title', 'plt.title', (["('Simulation Time = ' + t + ' s')"], {}), "('Simulation Time = ' + t + ' s')\n", (2590, 2623), True, 'import matplotlib.pyplot as plt\n'), ((2625, 2647), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position"""'], {}), "('Position')\n", (2635, 2647), True, 'import matplotlib.pyplot as plt\n'), ((2653, 2674), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""delta-X"""'], {}), "('delta-X')\n", (2663, 2674), True, 'import matplotlib.pyplot as plt\n'), ((2680, 2699), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.4)', '(0.4)'], {}), '(-0.4, 0.4)\n', (2688, 2699), True, 'import matplotlib.pyplot as plt\n'), ((2702, 2719), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(3)'], {}), '(-0.1, 3)\n', (2710, 2719), True, 'import matplotlib.pyplot as plt\n'), ((2723, 2735), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2733, 2735), True, 'import matplotlib.pyplot as plt\n'), ((2741, 2751), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2749, 2751), True, 'import matplotlib.pyplot as plt\n'), ((2757, 2792), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'vx'], {'label': '"""X-Velocity"""'}), "(x, vx, label='X-Velocity')\n", (2765, 2792), True, 'import matplotlib.pyplot as plt\n'), ((2796, 2838), 'matplotlib.pyplot.title', 'plt.title', (["('Simulation Time = ' + t + ' s')"], {}), "('Simulation Time = ' + t + ' s')\n", (2805, 2838), True, 'import matplotlib.pyplot as plt\n'), ((2840, 2862), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position"""'], {}), "('Position')\n", (2850, 2862), True, 'import matplotlib.pyplot as plt\n'), ((2868, 2890), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Velocity"""'], {}), "('Velocity')\n", (2878, 2890), True, 'import matplotlib.pyplot as plt\n'), ((2896, 2915), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.4)', '(0.4)'], {}), '(-0.4, 0.4)\n', (2904, 2915), True, 'import matplotlib.pyplot as plt\n'), ((2918, 2935), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.5)', '(1)'], {}), '(-0.5, 1)\n', (2926, 2935), True, 'import matplotlib.pyplot as plt\n'), ((2939, 2951), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2949, 2951), True, 'import matplotlib.pyplot as plt\n'), ((2957, 2967), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2965, 2967), True, 'import matplotlib.pyplot as plt\n'), ((2973, 3004), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'mass'], {'label': '"""Mass"""'}), "(x, mass, label='Mass')\n", (2981, 3004), True, 'import matplotlib.pyplot as plt\n'), ((3008, 3050), 'matplotlib.pyplot.title', 'plt.title', (["('Simulation Time = ' + t + ' s')"], {}), "('Simulation Time = ' + t + ' s')\n", (3017, 3050), True, 'import matplotlib.pyplot as plt\n'), ((3052, 3074), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position"""'], {}), "('Position')\n", (3062, 3074), True, 'import matplotlib.pyplot as plt\n'), ((3080, 3098), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mass"""'], {}), "('Mass')\n", (3090, 3098), True, 'import matplotlib.pyplot as plt\n'), ((3104, 3123), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.4)', '(0.4)'], {}), '(-0.4, 0.4)\n', (3112, 3123), True, 'import matplotlib.pyplot as plt\n'), ((3126, 3143), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(3)'], {}), '(-0.1, 3)\n', (3134, 3143), True, 'import matplotlib.pyplot as plt\n'), ((3147, 3159), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3157, 3159), True, 'import matplotlib.pyplot as plt\n'), ((3165, 3175), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3173, 3175), True, 'import matplotlib.pyplot as plt\n'), ((3181, 3214), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'rho'], {'label': '"""Density"""'}), "(x, rho, label='Density')\n", (3189, 3214), True, 'import matplotlib.pyplot as plt\n'), ((3218, 3260), 'matplotlib.pyplot.title', 'plt.title', (["('Simulation Time = ' + t + ' s')"], {}), "('Simulation Time = ' + t + ' s')\n", (3227, 3260), True, 'import matplotlib.pyplot as plt\n'), ((3262, 3284), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position"""'], {}), "('Position')\n", (3272, 3284), True, 'import matplotlib.pyplot as plt\n'), ((3290, 3311), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Density"""'], {}), "('Density')\n", (3300, 3311), True, 'import matplotlib.pyplot as plt\n'), ((3317, 3336), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.4)', '(0.4)'], {}), '(-0.4, 0.4)\n', (3325, 3336), True, 'import matplotlib.pyplot as plt\n'), ((3339, 3356), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(3)'], {}), '(-0.1, 3)\n', (3347, 3356), True, 'import matplotlib.pyplot as plt\n'), ((3360, 3372), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3370, 3372), True, 'import matplotlib.pyplot as plt\n'), ((3378, 3388), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3386, 3388), True, 'import matplotlib.pyplot as plt\n'), ((3394, 3426), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'p'], {'label': '"""Pressure"""'}), "(x, p, label='Pressure')\n", (3402, 3426), True, 'import matplotlib.pyplot as plt\n'), ((3430, 3472), 'matplotlib.pyplot.title', 'plt.title', (["('Simulation Time = ' + t + ' s')"], {}), "('Simulation Time = ' + t + ' s')\n", (3439, 3472), True, 'import matplotlib.pyplot as plt\n'), ((3474, 3496), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position"""'], {}), "('Position')\n", (3484, 3496), True, 'import matplotlib.pyplot as plt\n'), ((3502, 3524), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pressure"""'], {}), "('Pressure')\n", (3512, 3524), True, 'import matplotlib.pyplot as plt\n'), ((3530, 3549), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.4)', '(0.4)'], {}), '(-0.4, 0.4)\n', (3538, 3549), True, 'import matplotlib.pyplot as plt\n'), ((3552, 3571), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(1.2)'], {}), '(-0.1, 1.2)\n', (3560, 3571), True, 'import matplotlib.pyplot as plt\n'), ((3575, 3587), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3585, 3587), True, 'import matplotlib.pyplot as plt\n'), ((3593, 3603), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3601, 3603), True, 'import matplotlib.pyplot as plt\n'), ((3609, 3649), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'ie'], {'label': '"""Internal Energy"""'}), "(x, ie, label='Internal Energy')\n", (3617, 3649), True, 'import matplotlib.pyplot as plt\n'), ((3653, 3695), 'matplotlib.pyplot.title', 'plt.title', (["('Simulation Time = ' + t + ' s')"], {}), "('Simulation Time = ' + t + ' s')\n", (3662, 3695), True, 'import matplotlib.pyplot as plt\n'), ((3697, 3719), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position"""'], {}), "('Position')\n", (3707, 3719), True, 'import matplotlib.pyplot as plt\n'), ((3725, 3754), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Internal Energy"""'], {}), "('Internal Energy')\n", (3735, 3754), True, 'import matplotlib.pyplot as plt\n'), ((3760, 3779), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.4)', '(0.4)'], {}), '(-0.4, 0.4)\n', (3768, 3779), True, 'import matplotlib.pyplot as plt\n'), ((3782, 3796), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(1)', '(3)'], {}), '(1, 3)\n', (3790, 3796), True, 'import matplotlib.pyplot as plt\n'), ((3801, 3813), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3811, 3813), True, 'import matplotlib.pyplot as plt\n'), ((3819, 3829), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3827, 3829), True, 'import matplotlib.pyplot as plt\n'), ((4064, 4104), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'ie'], {'label': '"""Internal Energy"""'}), "(x, ie, label='Internal Energy')\n", (4072, 4104), True, 'import matplotlib.pyplot as plt\n'), ((4108, 4150), 'matplotlib.pyplot.title', 'plt.title', (["('Simulation Time = ' + t + ' s')"], {}), "('Simulation Time = ' + t + ' s')\n", (4117, 4150), True, 'import matplotlib.pyplot as plt\n'), ((4152, 4174), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position"""'], {}), "('Position')\n", (4162, 4174), True, 'import matplotlib.pyplot as plt\n'), ((4180, 4209), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Internal Energy"""'], {}), "('Internal Energy')\n", (4190, 4209), True, 'import matplotlib.pyplot as plt\n'), ((4215, 4234), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.4)', '(0.4)'], {}), '(-0.4, 0.4)\n', (4223, 4234), True, 'import matplotlib.pyplot as plt\n'), ((4237, 4251), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(1)', '(3)'], {}), '(1, 3)\n', (4245, 4251), True, 'import matplotlib.pyplot as plt\n'), ((4256, 4268), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4266, 4268), True, 'import matplotlib.pyplot as plt\n'), ((4274, 4284), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4282, 4284), True, 'import matplotlib.pyplot as plt\n'), ((4520, 4555), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'vx'], {'label': '"""X-Velocity"""'}), "(x, vx, label='X-Velocity')\n", (4528, 4555), True, 'import matplotlib.pyplot as plt\n'), ((4559, 4601), 'matplotlib.pyplot.title', 'plt.title', (["('Simulation Time = ' + t + ' s')"], {}), "('Simulation Time = ' + t + ' s')\n", (4568, 4601), True, 'import matplotlib.pyplot as plt\n'), ((4603, 4625), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position"""'], {}), "('Position')\n", (4613, 4625), True, 'import matplotlib.pyplot as plt\n'), ((4631, 4653), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Velocity"""'], {}), "('Velocity')\n", (4641, 4653), True, 'import matplotlib.pyplot as plt\n'), ((4659, 4678), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.4)', '(0.4)'], {}), '(-0.4, 0.4)\n', (4667, 4678), True, 'import matplotlib.pyplot as plt\n'), ((4681, 4698), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.5)', '(1)'], {}), '(-0.5, 1)\n', (4689, 4698), True, 'import matplotlib.pyplot as plt\n'), ((4702, 4714), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4712, 4714), True, 'import matplotlib.pyplot as plt\n'), ((4720, 4730), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4728, 4730), True, 'import matplotlib.pyplot as plt\n'), ((4962, 4994), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'p'], {'label': '"""Pressure"""'}), "(x, p, label='Pressure')\n", (4970, 4994), True, 'import matplotlib.pyplot as plt\n'), ((4998, 5040), 'matplotlib.pyplot.title', 'plt.title', (["('Simulation Time = ' + t + ' s')"], {}), "('Simulation Time = ' + t + ' s')\n", (5007, 5040), True, 'import matplotlib.pyplot as plt\n'), ((5042, 5064), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position"""'], {}), "('Position')\n", (5052, 5064), True, 'import matplotlib.pyplot as plt\n'), ((5070, 5092), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pressure"""'], {}), "('Pressure')\n", (5080, 5092), True, 'import matplotlib.pyplot as plt\n'), ((5098, 5117), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.4)', '(0.4)'], {}), '(-0.4, 0.4)\n', (5106, 5117), True, 'import matplotlib.pyplot as plt\n'), ((5120, 5139), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(1.2)'], {}), '(-0.1, 1.2)\n', (5128, 5139), True, 'import matplotlib.pyplot as plt\n'), ((5143, 5155), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5153, 5155), True, 'import matplotlib.pyplot as plt\n'), ((5161, 5171), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5169, 5171), True, 'import matplotlib.pyplot as plt\n'), ((5409, 5442), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'rho'], {'label': '"""Density"""'}), "(x, rho, label='Density')\n", (5417, 5442), True, 'import matplotlib.pyplot as plt\n'), ((5446, 5488), 'matplotlib.pyplot.title', 'plt.title', (["('Simulation Time = ' + t + ' s')"], {}), "('Simulation Time = ' + t + ' s')\n", (5455, 5488), True, 'import matplotlib.pyplot as plt\n'), ((5490, 5512), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position"""'], {}), "('Position')\n", (5500, 5512), True, 'import matplotlib.pyplot as plt\n'), ((5518, 5539), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Density"""'], {}), "('Density')\n", (5528, 5539), True, 'import matplotlib.pyplot as plt\n'), ((5545, 5564), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.4)', '(0.4)'], {}), '(-0.4, 0.4)\n', (5553, 5564), True, 'import matplotlib.pyplot as plt\n'), ((5567, 5584), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(3)'], {}), '(-0.1, 3)\n', (5575, 5584), True, 'import matplotlib.pyplot as plt\n'), ((5588, 5600), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5598, 5600), True, 'import matplotlib.pyplot as plt\n'), ((5606, 5616), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5614, 5616), True, 'import matplotlib.pyplot as plt\n'), ((5853, 5886), 'matplotlib.pyplot.plot', 'plt.plot', (['xm', 'dx'], {'label': '"""delta-X"""'}), "(xm, dx, label='delta-X')\n", (5861, 5886), True, 'import matplotlib.pyplot as plt\n'), ((5890, 5932), 'matplotlib.pyplot.title', 'plt.title', (["('Simulation Time = ' + t + ' s')"], {}), "('Simulation Time = ' + t + ' s')\n", (5899, 5932), True, 'import matplotlib.pyplot as plt\n'), ((5934, 5956), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position"""'], {}), "('Position')\n", (5944, 5956), True, 'import matplotlib.pyplot as plt\n'), ((5962, 5983), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""delta-X"""'], {}), "('delta-X')\n", (5972, 5983), True, 'import matplotlib.pyplot as plt\n'), ((5989, 6008), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.4)', '(0.4)'], {}), '(-0.4, 0.4)\n', (5997, 6008), True, 'import matplotlib.pyplot as plt\n'), ((6011, 6028), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(3)'], {}), '(-0.1, 3)\n', (6019, 6028), True, 'import matplotlib.pyplot as plt\n'), ((6032, 6044), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6042, 6044), True, 'import matplotlib.pyplot as plt\n'), ((6050, 6060), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6058, 6060), True, 'import matplotlib.pyplot as plt\n'), ((6184, 6312), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Welcome to sphPlot help..."""', 'epilog': '"""Example:\n>python sphPlot.py -i <input_file>"""'}), '(description=\'Welcome to sphPlot help...\', epilog=\n """Example:\n>python sphPlot.py -i <input_file>""")\n', (6207, 6312), False, 'import argparse\n'), ((6741, 6758), 'numpy.arange', 'np.arange', (['nDumps'], {}), '(nDumps)\n', (6750, 6758), True, 'import numpy as np\n')]
from __future__ import print_function import json import logging import sys import time from utils.chronograph import Chronograph import grpc import numpy as np from grpc._channel import _Rendezvous import taranis_pb2 import taranis_pb2_grpc DB_NAME = 'db3' INDEX_NAME = 'basic_index' DIMENSION = 128 # dimension N_LISTS = 4096 n_batch = 10000 n_training_vectors = 1000 # DB_NAME = 'db2' # INDEX_NAME = 'basic_index' # DIMENSION = 128 # dimension # N_LISTS = 4 # n_batch = 100 # n_training_vectors = 1000 # set up logger logger = logging.getLogger() logger.setLevel(logging.DEBUG) # anything debug and above passes through to the handler level fh = logging.StreamHandler(stream=sys.stdout) fh.setLevel(logging.DEBUG) logger.addHandler(fh) def run(): with grpc.insecure_channel('localhost:50051') as channel: stub = taranis_pb2_grpc.TaranisStub(channel) try: # # Delete the database if it already exists, and recreate it # try: # my_database = stub.getDatabase(taranis_pb2.DatabaseNameModel(name=DB_NAME)) # logging.info("Found database {}".format(my_database.name)) # stub.deleteDatabase(taranis_pb2.DatabaseNameModel(name=DB_NAME)) # logging.info("Deleted database {}".format(DB_NAME)) # except _Rendezvous as e: # logging.info("{} : {}".format(e.code(), e.details())) # # response = stub.createDatabase(taranis_pb2.NewDatabaseModel(name=DB_NAME)) # logging.info("Created database {} at {}".format(response.name, response.created_at)) # # # Check if database exists # my_database = stub.getDatabase(taranis_pb2.DatabaseNameModel(name=DB_NAME)) # logging.info("Found database {}".format(my_database.name)) # # # Delete the index if it already exists and recreate it # try: # my_index = stub.getIndex(taranis_pb2.IndexQueryModel(db_name=DB_NAME, index_name=INDEX_NAME)) # logging.info("Found Index {}".format(my_index.index_name)) # stub.deleteIndex(taranis_pb2.IndexQueryModel(db_name=DB_NAME, index_name=INDEX_NAME)) # logging.info("Deleted Index {}".format(my_index.index_name)) # except _Rendezvous as e: # logging.info("{} : {}".format(e.code(), e.details())) # # response = stub.createIndex(taranis_pb2.NewIndexModel(db_name=DB_NAME, index_name=INDEX_NAME, # config=json.dumps(dict(index_type="IVFPQ", # dimension=DIMENSION, # n_list=N_LISTS, # metric="METRIC_L2", # n_probes=4)))) # logging.info("Created index {} at {}".format(response.index_name, response.created_at)) # # my_index = stub.getIndex(taranis_pb2.IndexQueryModel(db_name=DB_NAME, index_name=INDEX_NAME)) # logging.info("Found Index {}".format(my_index.index_name)) # # vid = 0 # for b in range(0, n_batch): # logging.info("Batch {} on {}".format(b, n_batch)) # payload = taranis_pb2.NewVectorsModel() # payload.db_name = DB_NAME # for i in range(b * n_training_vectors, (b + 1) * n_training_vectors): # v = payload.vectors.add() # v.id = vid # v.data = np.random.Generator().random((DIMENSION,), dtype=np.float32).tobytes() # # v.data = np.random.random_sample((DIMENSION,)).tobytes() # v.metadata = json.dumps(dict(aaa="aaa", bbb="bbb")) # vid += 1 # response = stub.addVectors(payload) # logging.info("Added {} vectors".format(n_training_vectors)) # # # Train the index # response = stub.trainIndex(taranis_pb2.IndexQueryModel(db_name=DB_NAME, index_name=INDEX_NAME)) # logging.info("Trained index {} for db {}".format(INDEX_NAME, DB_NAME)) # # # reencode all vectors in database # response = stub.reindex(taranis_pb2.IndexQueryModel(db_name=DB_NAME, index_name=INDEX_NAME)) cg = Chronograph(name="Testing Chronograph", verbosity=1, logger=logger, log_lvl="INFO", start_timing=False) for b in range(0, 100): query = taranis_pb2.VectorsQueryModel(db_name=DB_NAME) for i in np.random.randint(0, n_batch * n_training_vectors, 100, np.int64).tolist(): query.ids.append(i) random_vectors: taranis_pb2.VectorsReplyModel = stub.getVectors(query) search_request = taranis_pb2.SearchRequestModel(db_name=DB_NAME, index_name=INDEX_NAME, k=100, n_probe=5) for v in random_vectors.vectors: search_request.vectors.append(v.data) cg.start("searchVectors") result_list: taranis_pb2.SearchResultListModel = stub.searchVectors(search_request) cg.stop() for sr, qid in zip(result_list.results, query.ids): print("{} : {}".format(qid, sr.knn[0])) cg.report(printout=True) except _Rendezvous as e: logging.error("{} : {}".format(e.code(), e.details())) if __name__ == '__main__': logging.basicConfig(level="INFO") run()
[ "logging.getLogger", "logging.basicConfig", "taranis_pb2_grpc.TaranisStub", "logging.StreamHandler", "utils.chronograph.Chronograph", "grpc.insecure_channel", "numpy.random.randint", "taranis_pb2.SearchRequestModel", "taranis_pb2.VectorsQueryModel" ]
[((541, 560), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (558, 560), False, 'import logging\n'), ((661, 701), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (682, 701), False, 'import logging\n'), ((5803, 5836), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': '"""INFO"""'}), "(level='INFO')\n", (5822, 5836), False, 'import logging\n'), ((773, 813), 'grpc.insecure_channel', 'grpc.insecure_channel', (['"""localhost:50051"""'], {}), "('localhost:50051')\n", (794, 813), False, 'import grpc\n'), ((841, 878), 'taranis_pb2_grpc.TaranisStub', 'taranis_pb2_grpc.TaranisStub', (['channel'], {}), '(channel)\n', (869, 878), False, 'import taranis_pb2_grpc\n'), ((4662, 4770), 'utils.chronograph.Chronograph', 'Chronograph', ([], {'name': '"""Testing Chronograph"""', 'verbosity': '(1)', 'logger': 'logger', 'log_lvl': '"""INFO"""', 'start_timing': '(False)'}), "(name='Testing Chronograph', verbosity=1, logger=logger, log_lvl\n ='INFO', start_timing=False)\n", (4673, 4770), False, 'from utils.chronograph import Chronograph\n'), ((4827, 4873), 'taranis_pb2.VectorsQueryModel', 'taranis_pb2.VectorsQueryModel', ([], {'db_name': 'DB_NAME'}), '(db_name=DB_NAME)\n', (4856, 4873), False, 'import taranis_pb2\n'), ((5137, 5230), 'taranis_pb2.SearchRequestModel', 'taranis_pb2.SearchRequestModel', ([], {'db_name': 'DB_NAME', 'index_name': 'INDEX_NAME', 'k': '(100)', 'n_probe': '(5)'}), '(db_name=DB_NAME, index_name=INDEX_NAME, k=\n 100, n_probe=5)\n', (5167, 5230), False, 'import taranis_pb2\n'), ((4899, 4964), 'numpy.random.randint', 'np.random.randint', (['(0)', '(n_batch * n_training_vectors)', '(100)', 'np.int64'], {}), '(0, n_batch * n_training_vectors, 100, np.int64)\n', (4916, 4964), True, 'import numpy as np\n')]
# read delta G values from equilibrator_results.tsv infile = open('equilibrator_results.tsv', 'r') import numpy as np def read_dg(infile): dg_list = [] for line in infile: if not line.startswith("'"): # skips line with headers pass else: line = line.strip("\n") line_list = line.split("\t") dg = line_list[2].split(" ")[0] dg = dg.strip("(") dg_list.append(float(dg)) dg_array = np.array(dg_list) return(dg_array)
[ "numpy.array" ]
[((408, 425), 'numpy.array', 'np.array', (['dg_list'], {}), '(dg_list)\n', (416, 425), True, 'import numpy as np\n')]
import torch import numpy as np import torch.nn as nn import torch.distributed as dist import torch.nn.functional as F from torch import Tensor from typing import Any from typing import Dict from typing import Tuple from typing import Optional from cftool.misc import update_dict from cftool.misc import shallow_copy_dict from torch.nn.parallel import DistributedDataParallel as DDP from ..encoder import Encoder1DBase from ....data import CVLoader from ....types import tensor_dict_type from ....protocol import StepOutputs from ....protocol import TrainerState from ....protocol import MetricsOutputs from ....protocol import ModelWithCustomSteps from ....constants import LOSS_KEY from ....constants import INPUT_KEY from ....constants import LATENT_KEY from ....misc.toolkit import to_device from ....misc.toolkit import l2_normalize from ....misc.toolkit import get_world_size from ....misc.toolkit import has_batch_norms def _get_dino_defaults(name: str) -> Dict[str, Any]: if name == "vit": return {"patch_size": 16, "drop_path_rate": 0.1} return {} class Scheduler: def __init__(self, values: np.ndarray): self.values = values self.max_idx = len(values) - 1 def __getitem__(self, index: int) -> Any: return self.values[min(index, self.max_idx)] def cosine_scheduler( base_value: float, final_value: float, epochs: int, num_step_per_epoch: int, warmup_epochs: int = 0, start_warmup_value: int = 0, ) -> Scheduler: warmup_schedule = np.array([]) warmup_iters = warmup_epochs * num_step_per_epoch if warmup_epochs > 0: warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters) iters = np.arange(epochs * num_step_per_epoch - warmup_iters) diff = base_value - final_value schedule = final_value + 0.5 * diff * (1.0 + np.cos(np.pi * iters / len(iters))) schedule = np.concatenate((warmup_schedule, schedule)) assert len(schedule) == epochs * num_step_per_epoch return Scheduler(schedule) class MultiCropWrapper(nn.Module): def __init__(self, backbone: nn.Module, head: nn.Module): super().__init__() backbone.fc, backbone.head = nn.Identity(), nn.Identity() self.backbone = backbone self.head = head def forward( self, batch_idx: int, batch: tensor_dict_type, state: Optional[TrainerState] = None, *, img_end_idx: Optional[int] = None, **kwargs: Any, ) -> Tensor: img_crops = batch[INPUT_KEY] if not isinstance(img_crops, list): img_crops = batch[INPUT_KEY] = [img_crops] if img_end_idx is not None: img_crops = img_crops[:img_end_idx] idx_crops = torch.cumsum( torch.unique_consecutive( torch.tensor([img_crop.shape[-1] for img_crop in img_crops]), return_counts=True, )[1], 0, ) outputs = [] start_idx = 0 for end_idx in idx_crops: local_batch = shallow_copy_dict(batch) local_batch[INPUT_KEY] = torch.cat(img_crops[start_idx:end_idx]) idx_rs = self.backbone(batch_idx, local_batch, state, **kwargs) idx_out = idx_rs[LATENT_KEY] if isinstance(idx_out, tuple): idx_out = idx_out[0] outputs.append(idx_out) start_idx = end_idx return self.head(torch.cat(outputs)) class DINOHead(nn.Module): def __init__( self, in_dim: int, out_dim: int, batch_norm: bool = False, norm_last_layer: bool = True, *, num_layers: int = 3, latent_dim: int = 2048, bottleneck_dim: int = 256, ): super().__init__() num_layers = max(num_layers, 1) if num_layers == 1: self.mapping = nn.Linear(in_dim, bottleneck_dim) else: blocks = [nn.Linear(in_dim, latent_dim)] if batch_norm: blocks.append(nn.BatchNorm1d(latent_dim)) blocks.append(nn.GELU()) for _ in range(num_layers - 2): blocks.append(nn.Linear(latent_dim, latent_dim)) if batch_norm: blocks.append(nn.BatchNorm1d(latent_dim)) blocks.append(nn.GELU()) blocks.append(nn.Linear(latent_dim, bottleneck_dim)) self.mapping = nn.Sequential(*blocks) self.apply(self._init_weights) last = nn.Linear(bottleneck_dim, out_dim, bias=False) self.last_layer = nn.utils.weight_norm(last) self.last_layer.weight_g.data.fill_(1) if norm_last_layer: self.last_layer.weight_g.requires_grad = False def _init_weights(self, m: nn.Module) -> None: if isinstance(m, nn.Linear): nn.init.trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) def forward(self, net: Tensor) -> Tensor: net = self.mapping(net) net = nn.functional.normalize(net, dim=-1, p=2) net = self.last_layer(net) return net class DINOLoss(nn.Module): center: torch.Tensor def __init__( self, out_dim: int, teacher_temp: float, warmup_teacher_temp: float, warmup_teacher_temp_epochs: int, teacher_temp_epochs: int, *, student_temp: float = 0.1, center_momentum: float = 0.9, ): super().__init__() self.student_temp = student_temp self.center_momentum = center_momentum self.register_buffer("center", torch.zeros(1, out_dim)) teacher_temp_constant_epochs = teacher_temp_epochs - warmup_teacher_temp_epochs self.teacher_temp_schedule = Scheduler( np.concatenate( ( np.linspace( warmup_teacher_temp, teacher_temp, warmup_teacher_temp_epochs, ), np.ones(teacher_temp_constant_epochs) * teacher_temp, ) ) ) self.num_epochs = teacher_temp_epochs def forward( self, epoch: int, num_crops: int, student_output: Tensor, teacher_output: Tensor, ) -> Tensor: student_logits = student_output / self.student_temp student_logits_list = student_logits.chunk(num_crops) temp = self.teacher_temp_schedule[epoch] teacher_logits = F.softmax((teacher_output - self.center) / temp, dim=-1) teacher_logits_list = teacher_logits.detach().chunk(2) total_loss = 0.0 num_loss_terms = 0 for it, t_logit in enumerate(teacher_logits_list): for iv, v_logit in enumerate(student_logits_list): if iv == it: continue loss = torch.sum(-t_logit * F.log_softmax(v_logit, dim=-1), dim=-1) total_loss += loss.mean() num_loss_terms += 1 total_loss /= num_loss_terms self.update_center(teacher_output) return total_loss @torch.no_grad() def update_center(self, teacher_output: Tensor) -> None: batch_center = torch.sum(teacher_output, dim=0, keepdim=True) if dist.is_initialized(): dist.all_reduce(batch_center) batch_center = batch_center / (len(teacher_output) * get_world_size()) m = self.center_momentum self.center = self.center * m + batch_center * (1.0 - m) class DINOEvaluateLoss: def __init__(self, train_loss: DINOLoss): self.train_loss = train_loss def __call__( self, epoch: int, student_output: Tensor, teacher_output: Tensor, ) -> float: s_logits = student_output / self.train_loss.student_temp temp = self.train_loss.teacher_temp_schedule[epoch] centered = teacher_output - self.train_loss.center t_logits = F.softmax(centered / temp, dim=-1) loss = torch.sum(-t_logits * F.log_softmax(s_logits, dim=-1), dim=-1).mean() return loss.item() @ModelWithCustomSteps.register("dino") class DINO(ModelWithCustomSteps): custom_params_groups = True custom_ddp_initialization = True lr_schedule: Optional[Scheduler] wd_schedule: Optional[Scheduler] momentum_schedule: Optional[Scheduler] def __init__( self, encoder1d: str = "vit", encoder1d_config: Optional[Dict[str, Any]] = None, student_specific: Optional[Dict[str, Any]] = None, teacher_specific: Optional[Dict[str, Any]] = None, *, out_dim: int = 65536, use_bn_in_head: bool = False, norm_last_layer: bool = True, teacher_temp: float = 0.07, momentum_teacher: float = 0.996, warmup_teacher_temp: float = 0.04, warmup_teacher_temp_epochs: int = 30, teacher_temp_epochs: int, freeze_last_layer: int = 1, weight_decay: float = 0.04, weight_decay_end: float = 0.4, warmup_epochs: int = 10, ): super().__init__() base = update_dict(encoder1d_config or {}, _get_dino_defaults(encoder1d)) student_cfg = update_dict(student_specific or {}, shallow_copy_dict(base)) teacher_cfg = update_dict(teacher_specific or {}, shallow_copy_dict(base)) student = Encoder1DBase.make(encoder1d, student_cfg) teacher = Encoder1DBase.make(encoder1d, teacher_cfg) self.ddp_student = self.ddp_teacher = None self.student = MultiCropWrapper( student, DINOHead( student.latent_dim, out_dim, use_bn_in_head, norm_last_layer, ), ) self.teacher = MultiCropWrapper( teacher, DINOHead(teacher.latent_dim, out_dim, use_bn_in_head), ) self.freeze_last_layer = freeze_last_layer self.teacher.load_state_dict(self.student.state_dict()) self.loss = DINOLoss( out_dim, teacher_temp, warmup_teacher_temp, warmup_teacher_temp_epochs, teacher_temp_epochs, ) self.evaluate_loss = DINOEvaluateLoss(self.loss) self.momentum_teacher = momentum_teacher self.teacher_temp_epochs = teacher_temp_epochs self.weight_decay = weight_decay self.weight_decay_end = weight_decay_end self.warmup_epochs = warmup_epochs self.lr_schedule = None self.wd_schedule = None self.momentum_schedule = None @property def student_for_training(self) -> MultiCropWrapper: return self.ddp_student or self.student @property def teacher_for_training(self) -> MultiCropWrapper: return self.ddp_teacher or self.teacher def forward( self, batch_idx: int, batch: tensor_dict_type, state: Optional[TrainerState] = None, **kwargs: Any, ) -> tensor_dict_type: net = self.student.backbone(batch_idx, batch, state, **kwargs)[LATENT_KEY] net = l2_normalize(net) return {LATENT_KEY: net} def onnx_forward(self, batch: tensor_dict_type) -> Any: inp = batch[INPUT_KEY] net = self.get_latent(inp, determinate=True) return net.view(inp.shape[0], self.student.backbone.latent_dim) def get_latent(self, net: Tensor, **kwargs: Any) -> Tensor: return self.forward(0, {INPUT_KEY: net}, **kwargs)[LATENT_KEY] def get_logits(self, net: Tensor) -> Tensor: return self.student(0, {INPUT_KEY: net}) def state_dict( self, destination: Any = None, prefix: str = "", keep_vars: bool = False, ) -> Any: states = super().state_dict(destination, prefix, keep_vars) for k in list(states.keys()): if k.startswith("ddp"): states.pop(k) return states def summary_forward(self, batch_idx: int, batch: tensor_dict_type) -> None: self.student(batch_idx, to_device(batch, self.device)) def _get_outputs( self, batch_idx: int, batch: tensor_dict_type, trainer: Any, forward_kwargs: Dict[str, Any], ) -> tensor_dict_type: teacher_output = self.teacher_for_training( batch_idx, batch, trainer.state, img_end_idx=2, **forward_kwargs, ) student_output = self.student_for_training( batch_idx, batch, trainer.state, **forward_kwargs, ) return {"student": student_output, "teacher": teacher_output} def _get_loss( self, batch_idx: int, batch: tensor_dict_type, trainer: Any, forward_kwargs: Dict[str, Any], ) -> Tuple[tensor_dict_type, Tensor]: with torch.cuda.amp.autocast(enabled=trainer.use_amp): outputs = self._get_outputs(batch_idx, batch, trainer, forward_kwargs) epoch = trainer.state.epoch num_crops = len(batch[INPUT_KEY]) student_output = outputs["student"] teacher_output = outputs["teacher"] loss = self.loss(epoch, num_crops, student_output, teacher_output) return outputs, loss def train_step( self, batch_idx: int, batch: tensor_dict_type, trainer: Any, forward_kwargs: Dict[str, Any], loss_kwargs: Dict[str, Any], ) -> StepOutputs: state = trainer.state if self.lr_schedule is None: self.lr_schedule = cosine_scheduler( self.lr * (len(batch[INPUT_KEY][0]) * get_world_size()) / 256.0, # type: ignore self.min_lr, self.teacher_temp_epochs, state.num_step_per_epoch, warmup_epochs=self.warmup_epochs, ) if self.wd_schedule is None: self.wd_schedule = cosine_scheduler( self.weight_decay, self.weight_decay_end, self.teacher_temp_epochs, state.num_step_per_epoch, ) # manual scheduling optimizer = trainer.optimizers["all"] for i, param_group in enumerate(optimizer.param_groups): param_group["lr"] = self.lr_schedule[state.step] if i == 0: param_group["weight_decay"] = self.wd_schedule[state.step] # forward pass rs, loss = self._get_loss(batch_idx, batch, trainer, forward_kwargs) # backward pass optimizer.zero_grad() trainer.grad_scaler.scale(loss).backward() # clip norm if trainer.clip_norm > 0.0: trainer.grad_scaler.unscale_(optimizer) nn.utils.clip_grad_norm_( self.student_for_training.parameters(), max_norm=trainer.clip_norm, ) # freeze last layer if state.epoch <= self.freeze_last_layer: for n, p in self.student.named_parameters(): if "last_layer" in n: p.grad = None # update parameters trainer.grad_scaler.step(optimizer) trainer.grad_scaler.update() # update momentum teacher if self.momentum_schedule is None: self.momentum_schedule = cosine_scheduler( self.momentum_teacher, 1.0, self.teacher_temp_epochs, state.num_step_per_epoch, ) with torch.no_grad(): m = self.momentum_schedule[state.step] for param_q, param_k in zip( self.student.parameters(), self.teacher.parameters(), ): param_k.data.mul_(m).add_((1.0 - m) * param_q.detach().data) # return return StepOutputs(rs, {LOSS_KEY: loss.item()}) def evaluate_step( # type: ignore self, loader: CVLoader, portion: float, trainer: Any, ) -> MetricsOutputs: losses = [] for i, batch in enumerate(loader): if i / len(loader) >= portion: break batch = to_device(batch, self.device) outputs = self._get_outputs(i, batch, trainer, {}) losses.append( self.evaluate_loss( trainer.state.epoch, outputs["student"], outputs["teacher"], ) ) # gather mean_loss = sum(losses) / len(losses) return MetricsOutputs( -mean_loss, { "loss": mean_loss, "lr": self.lr_schedule[trainer.state.step], # type: ignore "wd": self.wd_schedule[trainer.state.step], # type: ignore }, ) @staticmethod def params_groups(m: nn.Module) -> Any: regularized = [] bias_and_norm = [] for name, param in m.named_parameters(): if not param.requires_grad: continue if name.endswith(".bias") or len(param.shape) == 1: bias_and_norm.append(param) else: regularized.append(param) return [{"params": regularized}, {"params": bias_and_norm, "weight_decay": 0.0}] def _init_with_trainer(self, trainer: Any) -> None: self.teacher_for_training.requires_grad_(False) def init_ddp(self, trainer: Any) -> None: if has_batch_norms(self.student): self.student = nn.SyncBatchNorm.convert_sync_batchnorm(self.student) self.teacher = nn.SyncBatchNorm.convert_sync_batchnorm(self.teacher) self.ddp_student = DDP(self.student, device_ids=[trainer.rank]) self.ddp_teacher = DDP(self.teacher, device_ids=[trainer.rank]) self.ddp_teacher.requires_grad_(False) # type: ignore def permute_trainer_config(self, trainer_config: Dict[str, Any]) -> None: # TODO : make `permute_trainer_config` more general if trainer_config["clip_norm"] == 0.0: trainer_config["clip_norm"] = 3.0 if trainer_config["lr"] is None: trainer_config["lr"] = 0.0005 self.lr = trainer_config["lr"] self.min_lr = trainer_config.pop("min_lr", 1.0e-6) if trainer_config["optimizer_name"] is None: trainer_config["optimizer_name"] = "adamw" trainer_config["scheduler_name"] = "none" __all__ = [ "DINO", ]
[ "torch.nn.GELU", "torch.nn.init.constant_", "torch.nn.Sequential", "numpy.array", "torch.nn.BatchNorm1d", "torch.sum", "torch.nn.init.trunc_normal_", "torch.nn.functional.softmax", "numpy.arange", "numpy.linspace", "torch.cuda.amp.autocast", "numpy.concatenate", "torch.nn.Identity", "torch.nn.parallel.DistributedDataParallel", "torch.nn.SyncBatchNorm.convert_sync_batchnorm", "numpy.ones", "torch.distributed.all_reduce", "torch.nn.functional.normalize", "torch.nn.functional.log_softmax", "torch.cat", "torch.distributed.is_initialized", "cftool.misc.shallow_copy_dict", "torch.tensor", "torch.nn.Linear", "torch.no_grad", "torch.nn.utils.weight_norm", "torch.zeros" ]
[((1525, 1537), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1533, 1537), True, 'import numpy as np\n'), ((1714, 1767), 'numpy.arange', 'np.arange', (['(epochs * num_step_per_epoch - warmup_iters)'], {}), '(epochs * num_step_per_epoch - warmup_iters)\n', (1723, 1767), True, 'import numpy as np\n'), ((1904, 1947), 'numpy.concatenate', 'np.concatenate', (['(warmup_schedule, schedule)'], {}), '((warmup_schedule, schedule))\n', (1918, 1947), True, 'import numpy as np\n'), ((7235, 7250), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7248, 7250), False, 'import torch\n'), ((1644, 1701), 'numpy.linspace', 'np.linspace', (['start_warmup_value', 'base_value', 'warmup_iters'], {}), '(start_warmup_value, base_value, warmup_iters)\n', (1655, 1701), True, 'import numpy as np\n'), ((4525, 4571), 'torch.nn.Linear', 'nn.Linear', (['bottleneck_dim', 'out_dim'], {'bias': '(False)'}), '(bottleneck_dim, out_dim, bias=False)\n', (4534, 4571), True, 'import torch.nn as nn\n'), ((4598, 4624), 'torch.nn.utils.weight_norm', 'nn.utils.weight_norm', (['last'], {}), '(last)\n', (4618, 4624), True, 'import torch.nn as nn\n'), ((5104, 5145), 'torch.nn.functional.normalize', 'nn.functional.normalize', (['net'], {'dim': '(-1)', 'p': '(2)'}), '(net, dim=-1, p=2)\n', (5127, 5145), True, 'import torch.nn as nn\n'), ((6608, 6664), 'torch.nn.functional.softmax', 'F.softmax', (['((teacher_output - self.center) / temp)'], {'dim': '(-1)'}), '((teacher_output - self.center) / temp, dim=-1)\n', (6617, 6664), True, 'import torch.nn.functional as F\n'), ((7335, 7381), 'torch.sum', 'torch.sum', (['teacher_output'], {'dim': '(0)', 'keepdim': '(True)'}), '(teacher_output, dim=0, keepdim=True)\n', (7344, 7381), False, 'import torch\n'), ((7393, 7414), 'torch.distributed.is_initialized', 'dist.is_initialized', ([], {}), '()\n', (7412, 7414), True, 'import torch.distributed as dist\n'), ((8080, 8114), 'torch.nn.functional.softmax', 'F.softmax', (['(centered / temp)'], {'dim': '(-1)'}), '(centered / temp, dim=-1)\n', (8089, 8114), True, 'import torch.nn.functional as F\n'), ((17892, 17936), 'torch.nn.parallel.DistributedDataParallel', 'DDP', (['self.student'], {'device_ids': '[trainer.rank]'}), '(self.student, device_ids=[trainer.rank])\n', (17895, 17936), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((17964, 18008), 'torch.nn.parallel.DistributedDataParallel', 'DDP', (['self.teacher'], {'device_ids': '[trainer.rank]'}), '(self.teacher, device_ids=[trainer.rank])\n', (17967, 18008), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((2198, 2211), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (2209, 2211), True, 'import torch.nn as nn\n'), ((2213, 2226), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (2224, 2226), True, 'import torch.nn as nn\n'), ((3066, 3090), 'cftool.misc.shallow_copy_dict', 'shallow_copy_dict', (['batch'], {}), '(batch)\n', (3083, 3090), False, 'from cftool.misc import shallow_copy_dict\n'), ((3128, 3167), 'torch.cat', 'torch.cat', (['img_crops[start_idx:end_idx]'], {}), '(img_crops[start_idx:end_idx])\n', (3137, 3167), False, 'import torch\n'), ((3458, 3476), 'torch.cat', 'torch.cat', (['outputs'], {}), '(outputs)\n', (3467, 3476), False, 'import torch\n'), ((3890, 3923), 'torch.nn.Linear', 'nn.Linear', (['in_dim', 'bottleneck_dim'], {}), '(in_dim, bottleneck_dim)\n', (3899, 3923), True, 'import torch.nn as nn\n'), ((4448, 4470), 'torch.nn.Sequential', 'nn.Sequential', (['*blocks'], {}), '(*blocks)\n', (4461, 4470), True, 'import torch.nn as nn\n'), ((4860, 4901), 'torch.nn.init.trunc_normal_', 'nn.init.trunc_normal_', (['m.weight'], {'std': '(0.02)'}), '(m.weight, std=0.02)\n', (4881, 4901), True, 'import torch.nn as nn\n'), ((5694, 5717), 'torch.zeros', 'torch.zeros', (['(1)', 'out_dim'], {}), '(1, out_dim)\n', (5705, 5717), False, 'import torch\n'), ((7428, 7457), 'torch.distributed.all_reduce', 'dist.all_reduce', (['batch_center'], {}), '(batch_center)\n', (7443, 7457), True, 'import torch.distributed as dist\n'), ((9366, 9389), 'cftool.misc.shallow_copy_dict', 'shallow_copy_dict', (['base'], {}), '(base)\n', (9383, 9389), False, 'from cftool.misc import shallow_copy_dict\n'), ((9449, 9472), 'cftool.misc.shallow_copy_dict', 'shallow_copy_dict', (['base'], {}), '(base)\n', (9466, 9472), False, 'from cftool.misc import shallow_copy_dict\n'), ((13037, 13085), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {'enabled': 'trainer.use_amp'}), '(enabled=trainer.use_amp)\n', (13060, 13085), False, 'import torch\n'), ((15703, 15718), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15716, 15718), False, 'import torch\n'), ((17730, 17783), 'torch.nn.SyncBatchNorm.convert_sync_batchnorm', 'nn.SyncBatchNorm.convert_sync_batchnorm', (['self.student'], {}), '(self.student)\n', (17769, 17783), True, 'import torch.nn as nn\n'), ((17811, 17864), 'torch.nn.SyncBatchNorm.convert_sync_batchnorm', 'nn.SyncBatchNorm.convert_sync_batchnorm', (['self.teacher'], {}), '(self.teacher)\n', (17850, 17864), True, 'import torch.nn as nn\n'), ((3960, 3989), 'torch.nn.Linear', 'nn.Linear', (['in_dim', 'latent_dim'], {}), '(in_dim, latent_dim)\n', (3969, 3989), True, 'import torch.nn as nn\n'), ((4102, 4111), 'torch.nn.GELU', 'nn.GELU', ([], {}), '()\n', (4109, 4111), True, 'import torch.nn as nn\n'), ((4382, 4419), 'torch.nn.Linear', 'nn.Linear', (['latent_dim', 'bottleneck_dim'], {}), '(latent_dim, bottleneck_dim)\n', (4391, 4419), True, 'import torch.nn as nn\n'), ((4982, 5010), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (4999, 5010), True, 'import torch.nn as nn\n'), ((2822, 2882), 'torch.tensor', 'torch.tensor', (['[img_crop.shape[-1] for img_crop in img_crops]'], {}), '([img_crop.shape[-1] for img_crop in img_crops])\n', (2834, 2882), False, 'import torch\n'), ((4048, 4074), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['latent_dim'], {}), '(latent_dim)\n', (4062, 4074), True, 'import torch.nn as nn\n'), ((4187, 4220), 'torch.nn.Linear', 'nn.Linear', (['latent_dim', 'latent_dim'], {}), '(latent_dim, latent_dim)\n', (4196, 4220), True, 'import torch.nn as nn\n'), ((4345, 4354), 'torch.nn.GELU', 'nn.GELU', ([], {}), '()\n', (4352, 4354), True, 'import torch.nn as nn\n'), ((5921, 5995), 'numpy.linspace', 'np.linspace', (['warmup_teacher_temp', 'teacher_temp', 'warmup_teacher_temp_epochs'], {}), '(warmup_teacher_temp, teacher_temp, warmup_teacher_temp_epochs)\n', (5932, 5995), True, 'import numpy as np\n'), ((4287, 4313), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['latent_dim'], {}), '(latent_dim)\n', (4301, 4313), True, 'import torch.nn as nn\n'), ((6112, 6149), 'numpy.ones', 'np.ones', (['teacher_temp_constant_epochs'], {}), '(teacher_temp_constant_epochs)\n', (6119, 6149), True, 'import numpy as np\n'), ((7005, 7035), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['v_logit'], {'dim': '(-1)'}), '(v_logit, dim=-1)\n', (7018, 7035), True, 'import torch.nn.functional as F\n'), ((8152, 8183), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['s_logits'], {'dim': '(-1)'}), '(s_logits, dim=-1)\n', (8165, 8183), True, 'import torch.nn.functional as F\n')]
from __future__ import absolute_import # external modules from past.builtins import basestring import numpy as num # ANUGA modules import anuga.utilities.log as log from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a, \ netcdf_float from .asc2dem import asc2dem def dem2array(filename, variable_name='elevation', easting_min=None, easting_max=None, northing_min=None, northing_max=None, use_cache=False, verbose=False,): """Read Digitial Elevation model from the following NetCDF format (.dem) Example: ncols 3121 nrows 1800 xllcorner 722000 yllcorner 5893000 cellsize 25 NODATA_value -9999 138.3698 137.4194 136.5062 135.5558 .......... name_in should be .dem file to be read. """ import os from anuga.file.netcdf import NetCDFFile msg = 'Filename must be a text string' assert isinstance(filename, basestring), msg msg = 'Extension should be .dem' assert os.path.splitext(filename)[1] in ['.dem'], msg msg = 'Variable name must be a text string' assert isinstance(variable_name, basestring), msg # Get NetCDF infile = NetCDFFile(filename, netcdf_mode_r) if verbose: log.critical('Reading DEM from %s' % (filename)) ncols = int(infile.ncols) nrows = int(infile.nrows) xllcorner = float(infile.xllcorner) # Easting of lower left corner yllcorner = float(infile.yllcorner) # Northing of lower left corner cellsize = float(infile.cellsize) NODATA_value = float(infile.NODATA_value) zone = int(infile.zone) false_easting = float(infile.false_easting) false_northing = float(infile.false_northing) # Text strings projection = infile.projection datum = infile.datum units = infile.units Z = infile.variables[variable_name][:] Z = Z.reshape(nrows,ncols) Z = num.where(Z == NODATA_value , num.nan, Z) #changed the orientation of Z array to make it consistent with grd2array result Z = num.fliplr(Z.T) #print ncols, nrows, xllcorner,yllcorner, cellsize, NODATA_value, zone x = num.linspace(xllcorner, xllcorner+(ncols-1)*cellsize, ncols) y = num.linspace(yllcorner, yllcorner+(nrows-1)*cellsize, nrows) return x,y, Z
[ "numpy.where", "numpy.fliplr", "anuga.file.netcdf.NetCDFFile", "os.path.splitext", "numpy.linspace", "anuga.utilities.log.critical" ]
[((1287, 1322), 'anuga.file.netcdf.NetCDFFile', 'NetCDFFile', (['filename', 'netcdf_mode_r'], {}), '(filename, netcdf_mode_r)\n', (1297, 1322), False, 'from anuga.file.netcdf import NetCDFFile\n'), ((2004, 2044), 'numpy.where', 'num.where', (['(Z == NODATA_value)', 'num.nan', 'Z'], {}), '(Z == NODATA_value, num.nan, Z)\n', (2013, 2044), True, 'import numpy as num\n'), ((2138, 2153), 'numpy.fliplr', 'num.fliplr', (['Z.T'], {}), '(Z.T)\n', (2148, 2153), True, 'import numpy as num\n'), ((2239, 2305), 'numpy.linspace', 'num.linspace', (['xllcorner', '(xllcorner + (ncols - 1) * cellsize)', 'ncols'], {}), '(xllcorner, xllcorner + (ncols - 1) * cellsize, ncols)\n', (2251, 2305), True, 'import numpy as num\n'), ((2308, 2374), 'numpy.linspace', 'num.linspace', (['yllcorner', '(yllcorner + (nrows - 1) * cellsize)', 'nrows'], {}), '(yllcorner, yllcorner + (nrows - 1) * cellsize, nrows)\n', (2320, 2374), True, 'import numpy as num\n'), ((1341, 1387), 'anuga.utilities.log.critical', 'log.critical', (["('Reading DEM from %s' % filename)"], {}), "('Reading DEM from %s' % filename)\n", (1353, 1387), True, 'import anuga.utilities.log as log\n'), ((1095, 1121), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1111, 1121), False, 'import os\n')]
from htm_rl.modules.htm.pattern_memory import PatternMemory from htm.bindings.sdr import SDR import numpy as np from tqdm import tqdm EPS = 1e-12 def get_labels(pm: PatternMemory, data, input_size): labels = dict() input_pattern = SDR(input_size) for i, item in enumerate(data): input_pattern.sparse = item labels[i] = pm.compute(input_pattern, False) return labels def train(pm: PatternMemory, data, epochs, input_size, noise=0.0): input_pattern = SDR(input_size) indices = np.arange(len(data)) for epoch in tqdm(range(epochs)): np.random.shuffle(indices) for i in indices: if noise > 0: n_bits = int(noise * len(data[i])) bits_to_remove = np.random.choice(data[i], n_bits, replace=False) bits_to_add = np.random.choice(np.arange(input_size), n_bits, replace=False) noisy_sample = np.setdiff1d(data[i], bits_to_remove) noisy_sample = np.union1d(noisy_sample, bits_to_add) else: noisy_sample = data[i] input_pattern.sparse = noisy_sample pm.compute(input_pattern, True) # print(f'epoch {epoch}: {get_labels(pm, data, input_size)}') labels = get_labels(pm, data, input_size) return labels def test_retrieval(pm: PatternMemory, data, labels): iou = list() for i, item in enumerate(data): if labels[i] is not None: pattern = pm.get_pattern(labels[i]) iou.append(np.intersect1d(pattern, item).size/(np.union1d(pattern, item).size + EPS)) else: iou.append(0) return sum(iou)/len(iou) def generate_data(input_size, n_patterns, sparsity): data = [np.random.choice(np.arange(0, input_size), max(int(input_size * sparsity), 1), replace=False) for _ in range(n_patterns)] return data def main(): input_size = 1000 epochs = 20 seed = 5436 n_patterns = 1000 sparsity = 0.05 config = dict( input_size=input_size, max_segments=1000, min_distance=0.1, permanence_increment=0.1, permanence_decrement=0.01, segment_decrement=0.1, permanence_connected_threshold=0.5, seed=seed ) data = generate_data(input_size, n_patterns, sparsity) pm = PatternMemory(**config) labels = train(pm, data, epochs, input_size, noise=0.09) mean_iou = test_retrieval(pm, data, labels) print(mean_iou) if __name__ == '__main__': main()
[ "numpy.intersect1d", "numpy.union1d", "numpy.random.choice", "htm.bindings.sdr.SDR", "htm_rl.modules.htm.pattern_memory.PatternMemory", "numpy.setdiff1d", "numpy.arange", "numpy.random.shuffle" ]
[((243, 258), 'htm.bindings.sdr.SDR', 'SDR', (['input_size'], {}), '(input_size)\n', (246, 258), False, 'from htm.bindings.sdr import SDR\n'), ((491, 506), 'htm.bindings.sdr.SDR', 'SDR', (['input_size'], {}), '(input_size)\n', (494, 506), False, 'from htm.bindings.sdr import SDR\n'), ((2326, 2349), 'htm_rl.modules.htm.pattern_memory.PatternMemory', 'PatternMemory', ([], {}), '(**config)\n', (2339, 2349), False, 'from htm_rl.modules.htm.pattern_memory import PatternMemory\n'), ((588, 614), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (605, 614), True, 'import numpy as np\n'), ((1756, 1780), 'numpy.arange', 'np.arange', (['(0)', 'input_size'], {}), '(0, input_size)\n', (1765, 1780), True, 'import numpy as np\n'), ((751, 799), 'numpy.random.choice', 'np.random.choice', (['data[i]', 'n_bits'], {'replace': '(False)'}), '(data[i], n_bits, replace=False)\n', (767, 799), True, 'import numpy as np\n'), ((924, 961), 'numpy.setdiff1d', 'np.setdiff1d', (['data[i]', 'bits_to_remove'], {}), '(data[i], bits_to_remove)\n', (936, 961), True, 'import numpy as np\n'), ((993, 1030), 'numpy.union1d', 'np.union1d', (['noisy_sample', 'bits_to_add'], {}), '(noisy_sample, bits_to_add)\n', (1003, 1030), True, 'import numpy as np\n'), ((847, 868), 'numpy.arange', 'np.arange', (['input_size'], {}), '(input_size)\n', (856, 868), True, 'import numpy as np\n'), ((1528, 1557), 'numpy.intersect1d', 'np.intersect1d', (['pattern', 'item'], {}), '(pattern, item)\n', (1542, 1557), True, 'import numpy as np\n'), ((1564, 1589), 'numpy.union1d', 'np.union1d', (['pattern', 'item'], {}), '(pattern, item)\n', (1574, 1589), True, 'import numpy as np\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- # # ade: # Asynchronous Differential Evolution. # # Copyright (C) 2018-19 by <NAME>, # http://edsuom.com/ade # # See edsuom.com for API documentation as well as information about # Ed's background and other projects, software and otherwise. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the # License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an "AS # IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language # governing permissions and limitations under the License. """ Example script I{voc.py}: Identifying coefficients for the open-circuit voltage of an AGM lead-acid battery over time. This example reads a two-item-per-line CSV file. Each line contains: (B{1}) the time in seconds from some arbitrary starting time, (B{2}) the battery voltage with no charge or discharge current. Then it uses asynchronous differential evolution to efficiently find a nonlinear best-fit curve. """ import time import numpy as np from scipy import signal from twisted.internet import reactor, defer from asynqueue.process import ProcessQueue from yampex.plot import Plotter from ade.population import Population from ade.de import DifferentialEvolution from ade.image import ImageViewer from ade.util import * from data import TimeData class BatteryData(TimeData): """ Run L{setup} on my instance to decompress and load the voc.csv.bz2 CSV file. The CSV file isn't included in the I{ade} package and will automatically be downloaded from U{edsuom.com}. Here's the privacy policy for my site (it's short, as all good privacy policies should be):: Privacy policy: I don’t sniff out, track, or share anything identifying individual visitors to this site. There are no cookies or anything in place to let me see where you go on the Internetthat’s creepy. All I get (like anyone else with a web server), is plain vanilla server logs with “referral” info about which web page sent you to this one. @see: The L{Data} base class. """ basename = "voc" class Reporter(object): """ An instance of me is called each time a combination of parameters is found that's better than any of the others thus far. Prints the sum-of-squared error and parameter values to the console and updates a plot image (PNG) at I{plotFilePath}. @cvar plotFilePath: The file name in the current directory of a PNG file to write an update with a Matplotlib plot image of the actual vs. modeled temperature versus thermistor resistance curves. """ plotFilePath = "voc.png" N_curve_plot = 200 extrapolationMultiple = 3 def __init__(self, evaluator, population): """ C{Reporter(evaluator, population)} """ self.ev = evaluator self.prettyValues = population.pm.prettyValues self.pt = Plotter( 2, filePath=self.plotFilePath, width=15, height=10) self.pt.use_grid() self.pt.use_timex() ImageViewer(self.plotFilePath) def __call__(self, values, counter, SSE): """ Prints out a new best parameter combination and its curve vs observations, with lots of extrapolation to the right. """ def titlePart(*args): titleParts.append(sub(*args)) SSE_info = sub("SSE={:g}", SSE) titleParts = [] titlePart("Voltage vs Time (sec)") titlePart(SSE_info) titlePart("k={:d}", counter) msg(0, self.prettyValues(values, SSE_info+", with"), 0) with self.pt as sp: sp.set_title(", ".join(titleParts)) t = self.ev.t V = self.ev.X[:,0] # Model versus observations sp.add_line('-', 1) sp.set_ylabel("V") sp.set_zeroLine(values[-1]) sp.add_annotation(0, "Battery disconnect") sp.add_annotation(-1, "Last observation") sp.add_textBox("NE", "Estimated VOC: {:.2f} V", values[-1]) ax = sp(t, V) tm = np.linspace( t.min(), self.extrapolationMultiple*t.max(), self.N_curve_plot) V_curve = self.ev.curve(tm, *values) ax.plot(tm, V_curve, color='red', marker='o', markersize=2) # Residuals res = self.ev.curve(t, *values) - V sp.set_ylabel("dV") sp.set_zeroLine() k = np.argmax(np.abs(res[2:])) + 2 resPercentage = 100 * res[k]/V[k] sp(t, res) self.pt.show() class Evaluator(Picklable): """ I evaluate battery VOC model fitness. Construct an instance of me, run the L{setup} method, and wait (in non-blocking Twisted-friendly fashion) for the C{Deferred} it returns to fire. Then call the instance a bunch of times with parameter values for a L{curve} to get a (deferred) sum-of-squared-error fitness of the curve to the thermistor data. """ scale_SSE = 100 bounds = { # Initial rapid drop with up to 10 minute time constant 'a1': (0, 40), 'b1': (1, 10*60), 'c1': (1, 200), # Middle drop with 20 min to 2 hour time constant 'a2': (0, 600), 'b2': (20*60, 2*3600), 'c2': (50, 1000), # Slow settling with 1-12 hour time constant 'a3': (0, 800), 'b3': (3600, 12*3600), 'c3': (100, 4000), # A bit beyond the extremes for VOC of an AGM lead acid battery 'voc': (45, 54), } def setup(self): """ Returns a C{Deferred} that fires with two equal-length sequences, the names and bounds of all parameters to be determined. Also creates a dict of I{indices} in those sequences, keyed by parameter name. """ def done(null): for name in ('t', 'X'): setattr(self, name, getattr(data, name)) return names, bounds bounds = [] names = sorted(self.bounds.keys()) for name in names: bounds.append(self.bounds[name]) # The data data = BatteryData() return data.setup().addCallbacks(done, oops) def curve(self, t, *args): """ Given a 1-D time vector followed by arguments defining curve parameters, returns a 1-D vector of battery voltage over that time with with no charge or discharge current, with one particular but unknown SOC. The model implements this equation: M{V = a1*exp(-t/b1+c1) + ... ak*exp(-t/bk+ck) + voc} """ V = args[-1] for k in range(3): a, b, c = args[3*k:3*k+3] V += a*np.exp(-(t+c)/b) return V def __call__(self, values): """ Evaluation function for the parameter I{values}. """ V = self.X[:,0] V_curve = self.curve(self.t, *values) return self.scale_SSE * np.sum(np.square(V_curve - V)) class Runner(object): """ I run everything to fit a curve to thermistor data using asynchronous differential evolution. Construct an instance of me with an instance of L{Args} that has parsed command-line options, then have the Twisted reactor call the instance when it starts. Then start the reactor and watch the fun. """ def __init__(self, args): """ C{Runner(args)} """ self.args = args self.ev = Evaluator() N = args.N if args.N else ProcessQueue.cores()-1 self.q = ProcessQueue(N, returnFailure=True) self.fh = open("voc.log", 'w') if args.l else True msg(self.fh) @defer.inlineCallbacks def shutdown(self): """ Call this to shut me down when I'm done. Shuts down my C{ProcessQueue}, which can take a moment. Repeated calls have no effect. """ if self.q is not None: msg("Shutting down...") yield self.q.shutdown() msg("Task Queue is shut down") self.q = None msg("Goodbye") def evaluate(self, values): """ The function that gets called with each combination of parameters to be evaluated for fitness. """ if values is None: return self.shutdown() values = list(values) if self.q: return self.q.call(self.ev, values) @defer.inlineCallbacks def __call__(self): t0 = time.time() args = self.args names_bounds = yield self.ev.setup().addErrback(oops) self.p = Population( self.evaluate, names_bounds[0], names_bounds[1], popsize=args.p) yield self.p.setup().addErrback(oops) reporter = Reporter(self.ev, self.p) self.p.addCallback(reporter) F = [float(x) for x in args.F.split(',')] de = DifferentialEvolution( self.p, CR=args.C, F=F, maxiter=args.m, randomBase=not args.b, uniform=args.u, adaptive=not args.n, bitterEnd=args.e, logHandle=self.fh) yield de() yield self.shutdown() msg(0, "Final population:\n{}", self.p) msg(0, "Elapsed time: {:.2f} seconds", time.time()-t0, 0) msg(None) reactor.stop() def run(self): return self().addErrback(oops) def main(): """ Called when this module is run as a script. """ if args.h: return r = Runner(args) reactor.callWhenRunning(r.run) reactor.run() args = Args( """ Parameter finder for AGM lead-acid battery open-circuit voltage model using Differential Evolution. Downloads a compressed CSV file of real VOC data points from edsuom.com to the current directory (if it's not already present). The data points and the current best-fit curves are plotted in the PNG file (also in the current directory) pfinder.png. You can see the plots, automatically updated, with the Linux command "qiv -Te thermistor.png". (Possibly that other OS may have something that works, too.) Press the Enter key to quit early. """ ) args('-m', '--maxiter', 800, "Maximum number of DE generations to run") args('-e', '--bitter-end', "Keep working to the end even with little progress") args('-p', '--popsize', 20, "Population: # individuals per unknown parameter") args('-C', '--CR', 0.8, "DE Crossover rate CR") args('-F', '--F', "0.5,1.0", "DE mutation scaling F: two values for range") args('-b', '--best', "Use DE/best/1 instead of DE/rand/1") args('-n', '--not-adaptive', "Don't use automatic F adaptation") args('-u', '--uniform', "Initialize population uniformly instead of with LHS") args('-N', '--N-cores', 0, "Limit the number of CPU cores") args('-l', '--logfile', "Write results to logfile 'voc.log' instead of STDOUT") args(main)
[ "numpy.abs", "asynqueue.process.ProcessQueue", "twisted.internet.reactor.stop", "numpy.square", "numpy.exp", "yampex.plot.Plotter", "ade.de.DifferentialEvolution", "asynqueue.process.ProcessQueue.cores", "twisted.internet.reactor.run", "ade.population.Population", "twisted.internet.reactor.callWhenRunning", "time.time", "ade.image.ImageViewer" ]
[((9904, 9934), 'twisted.internet.reactor.callWhenRunning', 'reactor.callWhenRunning', (['r.run'], {}), '(r.run)\n', (9927, 9934), False, 'from twisted.internet import reactor, defer\n'), ((9939, 9952), 'twisted.internet.reactor.run', 'reactor.run', ([], {}), '()\n', (9950, 9952), False, 'from twisted.internet import reactor, defer\n'), ((3223, 3282), 'yampex.plot.Plotter', 'Plotter', (['(2)'], {'filePath': 'self.plotFilePath', 'width': '(15)', 'height': '(10)'}), '(2, filePath=self.plotFilePath, width=15, height=10)\n', (3230, 3282), False, 'from yampex.plot import Plotter\n'), ((3359, 3389), 'ade.image.ImageViewer', 'ImageViewer', (['self.plotFilePath'], {}), '(self.plotFilePath)\n', (3370, 3389), False, 'from ade.image import ImageViewer\n'), ((7956, 7991), 'asynqueue.process.ProcessQueue', 'ProcessQueue', (['N'], {'returnFailure': '(True)'}), '(N, returnFailure=True)\n', (7968, 7991), False, 'from asynqueue.process import ProcessQueue\n'), ((8892, 8903), 'time.time', 'time.time', ([], {}), '()\n', (8901, 8903), False, 'import time\n'), ((9008, 9083), 'ade.population.Population', 'Population', (['self.evaluate', 'names_bounds[0]', 'names_bounds[1]'], {'popsize': 'args.p'}), '(self.evaluate, names_bounds[0], names_bounds[1], popsize=args.p)\n', (9018, 9083), False, 'from ade.population import Population\n'), ((9300, 9467), 'ade.de.DifferentialEvolution', 'DifferentialEvolution', (['self.p'], {'CR': 'args.C', 'F': 'F', 'maxiter': 'args.m', 'randomBase': '(not args.b)', 'uniform': 'args.u', 'adaptive': '(not args.n)', 'bitterEnd': 'args.e', 'logHandle': 'self.fh'}), '(self.p, CR=args.C, F=F, maxiter=args.m, randomBase=\n not args.b, uniform=args.u, adaptive=not args.n, bitterEnd=args.e,\n logHandle=self.fh)\n', (9321, 9467), False, 'from ade.de import DifferentialEvolution\n'), ((9697, 9711), 'twisted.internet.reactor.stop', 'reactor.stop', ([], {}), '()\n', (9709, 9711), False, 'from twisted.internet import reactor, defer\n'), ((7097, 7117), 'numpy.exp', 'np.exp', (['(-(t + c) / b)'], {}), '(-(t + c) / b)\n', (7103, 7117), True, 'import numpy as np\n'), ((7358, 7380), 'numpy.square', 'np.square', (['(V_curve - V)'], {}), '(V_curve - V)\n', (7367, 7380), True, 'import numpy as np\n'), ((7916, 7936), 'asynqueue.process.ProcessQueue.cores', 'ProcessQueue.cores', ([], {}), '()\n', (7934, 7936), False, 'from asynqueue.process import ProcessQueue\n'), ((9652, 9663), 'time.time', 'time.time', ([], {}), '()\n', (9661, 9663), False, 'import time\n'), ((4780, 4795), 'numpy.abs', 'np.abs', (['res[2:]'], {}), '(res[2:])\n', (4786, 4795), True, 'import numpy as np\n')]
"""Calculations involving a pair of Cu atoms """ from typing import Union, Callable import numpy as np from ase import Atoms from ase.units import Ang try: from Morse import MorsePotential from util import map_func except ModuleNotFoundError: from .Morse import MorsePotential from .util import map_func def build_pair(d0: Union[float, int] = 1) -> Callable: """Closure to store the atoms object Args: d0 (Union[float, int], optional): default unit cell length Returns: Callable: function to apply strain """ calc = MorsePotential() a = Atoms('2Cu', positions=[(0., 0., 0.), (0., 0., d0 * Ang)]) a.set_calculator(calc) def change_distance(d: Union[float, int]) -> Atoms: """Function that returns the deformed unit cell under a given hydrostatic strain Args: d (Union[float, int]): distance (Å) Returns: Atoms: deformed atom pair """ a.positions[1, 2] = d * Ang return a return change_distance get_pair = build_pair() # set up the closure def get_pairwise_pe(d: Union[float, int]) -> float: """Calculate the potential energy of two atoms separated by the given distance Args: d (Union[float, int]): distance (Å) Returns: float: potential energy (eV) """ return get_pair(d).get_potential_energy() def get_pairwise_pes(arr: np.ndarray) -> np.ndarray: """Apply pairwise potential energy calculation to an array of distances Args: arr (np.ndarray): array of distances (Å) Returns: np.ndarray: array of potential energies (eV) """ return map_func(get_pairwise_pe, arr) def get_pairwise_force(d: Union[float, int]) -> float: """Calculate the force between two atoms separated by the given distance Args: d (Union[float, int]): distance (Å) Returns: float: force (eV/Å) """ return get_pair(d).get_forces()[1, 2] def get_pairwise_forces(arr: np.ndarray) -> np.ndarray: """Apply pairwise force calculation to an array of distances Args: arr (np.ndarray): array of distances (Å) Returns: np.ndarray: array of forces (eV/Å) """ return map_func(get_pairwise_force, arr) if __name__ == "__main__": print(get_pairwise_pe(2.5)) print(get_pairwise_pes(np.linspace(0, 5, 10)))
[ "ase.Atoms", "Morse.MorsePotential", "numpy.linspace", "util.map_func" ]
[((574, 590), 'Morse.MorsePotential', 'MorsePotential', ([], {}), '()\n', (588, 590), False, 'from Morse import MorsePotential\n'), ((599, 662), 'ase.Atoms', 'Atoms', (['"""2Cu"""'], {'positions': '[(0.0, 0.0, 0.0), (0.0, 0.0, d0 * Ang)]'}), "('2Cu', positions=[(0.0, 0.0, 0.0), (0.0, 0.0, d0 * Ang)])\n", (604, 662), False, 'from ase import Atoms\n'), ((1665, 1695), 'util.map_func', 'map_func', (['get_pairwise_pe', 'arr'], {}), '(get_pairwise_pe, arr)\n', (1673, 1695), False, 'from util import map_func\n'), ((2236, 2269), 'util.map_func', 'map_func', (['get_pairwise_force', 'arr'], {}), '(get_pairwise_force, arr)\n', (2244, 2269), False, 'from util import map_func\n'), ((2358, 2379), 'numpy.linspace', 'np.linspace', (['(0)', '(5)', '(10)'], {}), '(0, 5, 10)\n', (2369, 2379), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- """ Created on Fri May 5 16:20:14 2017 @author: <NAME> Program for WOS Cited References Analysis """ import pandas as pd import matplotlib.pyplot as plt import numpy as np from collections import Counter df = pd.read_pickle('concatenated.pkl') df = df.dropna(subset = ['PY','CR']) # Get rid of badly imported data cited_ref = df.CR orig_art_yr = df.PY a = cited_ref.size refs_per = np.zeros(a) # Citations per article name = [] # Citation author year = [] # Cited article year of pub age = [] # Cited article age wrt published art. journal = [] # Journal name of cited article for i, row in enumerate(cited_ref.values): auths = cited_ref.values[i] # Read the cell with all the citations for one article parts = auths.split(';') # Split the citations based on semi-colon refs_per[i] = 0; # Count the number of citations # Split the citation into parts based on comma to get the year and journal name for j in parts: if len(j.split(',')) == 3: n,y,jou = j.split(',') elif len(j.split(',')) == 4: n,y,jou,ver = j.split(',') elif len(j.split(',')) == 5: n,y,jou,ver,page = j.split(',') elif len(j.split(',')) == 6: n,y,jou,ver,page,doi = j.split(',') y = y.strip() if y.isdigit(): # Some citations don't have a year, throw them away name.append(n) year.append(y) year = [int(i) for i in year] temp = orig_art_yr.values[i] - float(y) age.append(temp) journal.append(jou) refs_per[i] += 1 else: pass ## Write the Top Most Cited Journals to csv file journal = [x.upper() for x in journal] # Convert all names to uppercase cc = Counter(journal) p = cc.most_common() cols = ['name','count'] pp = pd.DataFrame(p,columns = cols) pp['name'] = pp['name'].str.upper() # Convert all names to uppercase pp = pp.set_index('name') pp = pp.groupby(pp.index).sum() # Find duplicate names and add the counts pp = pp.sort_values(['count'], ascending = [False]) # Sort list by counts pp.to_csv('MaxCitedJournals.csv') # Write to csv file ############################################################################# # Let's make some figures # Number of articles published per year orig_art_yr = np.array(orig_art_yr,int) plt.figure() bins=np.arange(min(orig_art_yr), max(orig_art_yr)+2) plt.hist(orig_art_yr, bins) plt.xticks(orig_art_yr+0.5, orig_art_yr, rotation = 90) plt.ylabel('Number of articles published per year') # Year of publication of cited articles year = np.array(year) plt.figure() plt.hist(year, bins = np.arange(min(year), max(year) + 2, 1)) plt.xlabel('Year of publication of cited articles') # Age of cited references wrt. published article age = np.array(age) plt.figure() #plt.hist(age, bins=np.arange(min(age), max(age) + 2, 1)) plt.hist(age, bins = np.arange(0, 100, 1)) plt.xlabel('Age of cited articles (years)') plt.ylabel('Count') # Total number of cited references per year, and # Average number of cited references per article per year ref_peryear = [] avgref_peryear = [] xx = np.unique(orig_art_yr) i = min(orig_art_yr) for i in xx: ii = orig_art_yr == i p = refs_per[ii].sum() pp = refs_per[ii].mean() ref_peryear.append(p) avgref_peryear.append(pp) ref_peryear = np.array(ref_peryear) avgref_peryear = np.array(avgref_peryear) plt.figure() plt.plot(xx,ref_peryear,'o-') plt.xticks(xx, xx, rotation=90) plt.ylabel('Number of citations per year') plt.figure() plt.plot(xx,avgref_peryear,'o-') plt.xticks(xx, xx, rotation=90) plt.ylabel('Avg. number of citations per article, per year') ## Write to file temp1 = pd.DataFrame({'Original article year': orig_art_yr,'references per article': refs_per}) temp1.to_csv('OriginalArticle_Year_RefCount.csv') del temp1 temp1 = pd.DataFrame({'Cited journal year': year,'Cited journal age': age}) temp1.to_csv('CitedJournalAge.csv') del temp1 temp1 = pd.DataFrame({'Year': xx, 'Total refs per year': ref_peryear,'Ave refs per article per year': avgref_peryear}) temp1.to_csv('ReferenceStats.csv') del temp1
[ "pandas.read_pickle", "matplotlib.pyplot.hist", "numpy.unique", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "collections.Counter", "numpy.array", "numpy.zeros", "matplotlib.pyplot.figure", "pandas.DataFrame", "numpy.arange" ]
[((238, 272), 'pandas.read_pickle', 'pd.read_pickle', (['"""concatenated.pkl"""'], {}), "('concatenated.pkl')\n", (252, 272), True, 'import pandas as pd\n'), ((413, 424), 'numpy.zeros', 'np.zeros', (['a'], {}), '(a)\n', (421, 424), True, 'import numpy as np\n'), ((1901, 1917), 'collections.Counter', 'Counter', (['journal'], {}), '(journal)\n', (1908, 1917), False, 'from collections import Counter\n'), ((1968, 1997), 'pandas.DataFrame', 'pd.DataFrame', (['p'], {'columns': 'cols'}), '(p, columns=cols)\n', (1980, 1997), True, 'import pandas as pd\n'), ((2484, 2510), 'numpy.array', 'np.array', (['orig_art_yr', 'int'], {}), '(orig_art_yr, int)\n', (2492, 2510), True, 'import numpy as np\n'), ((2510, 2522), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2520, 2522), True, 'import matplotlib.pyplot as plt\n'), ((2576, 2603), 'matplotlib.pyplot.hist', 'plt.hist', (['orig_art_yr', 'bins'], {}), '(orig_art_yr, bins)\n', (2584, 2603), True, 'import matplotlib.pyplot as plt\n'), ((2604, 2659), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(orig_art_yr + 0.5)', 'orig_art_yr'], {'rotation': '(90)'}), '(orig_art_yr + 0.5, orig_art_yr, rotation=90)\n', (2614, 2659), True, 'import matplotlib.pyplot as plt\n'), ((2674, 2725), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of articles published per year"""'], {}), "('Number of articles published per year')\n", (2684, 2725), True, 'import matplotlib.pyplot as plt\n'), ((2777, 2791), 'numpy.array', 'np.array', (['year'], {}), '(year)\n', (2785, 2791), True, 'import numpy as np\n'), ((2792, 2804), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2802, 2804), True, 'import matplotlib.pyplot as plt\n'), ((2867, 2918), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year of publication of cited articles"""'], {}), "('Year of publication of cited articles')\n", (2877, 2918), True, 'import matplotlib.pyplot as plt\n'), ((2978, 2991), 'numpy.array', 'np.array', (['age'], {}), '(age)\n', (2986, 2991), True, 'import numpy as np\n'), ((2992, 3004), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3002, 3004), True, 'import matplotlib.pyplot as plt\n'), ((3106, 3149), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Age of cited articles (years)"""'], {}), "('Age of cited articles (years)')\n", (3116, 3149), True, 'import matplotlib.pyplot as plt\n'), ((3152, 3171), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (3162, 3171), True, 'import matplotlib.pyplot as plt\n'), ((3325, 3347), 'numpy.unique', 'np.unique', (['orig_art_yr'], {}), '(orig_art_yr)\n', (3334, 3347), True, 'import numpy as np\n'), ((3539, 3560), 'numpy.array', 'np.array', (['ref_peryear'], {}), '(ref_peryear)\n', (3547, 3560), True, 'import numpy as np\n'), ((3578, 3602), 'numpy.array', 'np.array', (['avgref_peryear'], {}), '(avgref_peryear)\n', (3586, 3602), True, 'import numpy as np\n'), ((3603, 3615), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3613, 3615), True, 'import matplotlib.pyplot as plt\n'), ((3616, 3647), 'matplotlib.pyplot.plot', 'plt.plot', (['xx', 'ref_peryear', '"""o-"""'], {}), "(xx, ref_peryear, 'o-')\n", (3624, 3647), True, 'import matplotlib.pyplot as plt\n'), ((3646, 3677), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xx', 'xx'], {'rotation': '(90)'}), '(xx, xx, rotation=90)\n', (3656, 3677), True, 'import matplotlib.pyplot as plt\n'), ((3692, 3734), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of citations per year"""'], {}), "('Number of citations per year')\n", (3702, 3734), True, 'import matplotlib.pyplot as plt\n'), ((3747, 3759), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3757, 3759), True, 'import matplotlib.pyplot as plt\n'), ((3760, 3794), 'matplotlib.pyplot.plot', 'plt.plot', (['xx', 'avgref_peryear', '"""o-"""'], {}), "(xx, avgref_peryear, 'o-')\n", (3768, 3794), True, 'import matplotlib.pyplot as plt\n'), ((3793, 3824), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xx', 'xx'], {'rotation': '(90)'}), '(xx, xx, rotation=90)\n', (3803, 3824), True, 'import matplotlib.pyplot as plt\n'), ((3839, 3899), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Avg. number of citations per article, per year"""'], {}), "('Avg. number of citations per article, per year')\n", (3849, 3899), True, 'import matplotlib.pyplot as plt\n'), ((3929, 4021), 'pandas.DataFrame', 'pd.DataFrame', (["{'Original article year': orig_art_yr, 'references per article': refs_per}"], {}), "({'Original article year': orig_art_yr,\n 'references per article': refs_per})\n", (3941, 4021), True, 'import pandas as pd\n'), ((4086, 4154), 'pandas.DataFrame', 'pd.DataFrame', (["{'Cited journal year': year, 'Cited journal age': age}"], {}), "({'Cited journal year': year, 'Cited journal age': age})\n", (4098, 4154), True, 'import pandas as pd\n'), ((4209, 4324), 'pandas.DataFrame', 'pd.DataFrame', (["{'Year': xx, 'Total refs per year': ref_peryear,\n 'Ave refs per article per year': avgref_peryear}"], {}), "({'Year': xx, 'Total refs per year': ref_peryear,\n 'Ave refs per article per year': avgref_peryear})\n", (4221, 4324), True, 'import pandas as pd\n'), ((3084, 3104), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(1)'], {}), '(0, 100, 1)\n', (3093, 3104), True, 'import numpy as np\n')]
import torch import torch.nn as nn import torch.nn.functional as F from getter import getter from CURE.CURE import CURELearner import numpy as np from matplotlib import pyplot as plt from pathlib import Path def lossplot(config: dict, save_path: str = None) -> None: """Plots the negative of the loss surface. One axis represents the normal direction; the other is a random direction.""" device = config["device"] get_dataloader, get_transformer, get_inverse_transformer, get_model = getter( config["dataset"], config["model_name"]) trainloader = get_dataloader(split="train", batch_size=config["batch_size_train"], shuffle=config["shuffle_train"]) testloader = get_dataloader(split="test", batch_size=config["batch_size_test"], shuffle=False) model = get_model() if config["use_checkpoint"]: checkpoint_path = Path("./data/checkpoints/") transformer = get_transformer() net_CURE = CURELearner(model, trainloader, testloader, lambda_0=config["lambda_0"], lambda_1=config["lambda_1"], lambda_2=config["lambda_2"], transformer=transformer, trial=None, image_min=config["image_min"], image_max=config["image_max"], device=config["device"], path=checkpoint_path / "best_model.data", acc=config["accuracy"]) net_CURE.set_optimizer(optim_alg=config["optimization_algorithm"], args=config["optimizer_arguments"]) net_CURE.import_state(checkpoint_path / config["checkpoint_file"]) model = net_CURE.net model = model.to(device) total_params = sum(p.numel() for p in model.parameters()) print("Total number of parameters: {}".format(total_params)) trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) print("Number of trainable parameters: {}".format(trainable_params)) transformer = get_transformer() inverse_transformer = get_inverse_transformer() L = nn.CrossEntropyLoss() img_shape = (3, 32, 32) flatten = False delta = 30 res = 101 n_points = res**2 for batch_idx, (inputs, targets) in enumerate(testloader): inputs, targets = inputs.to(device), targets.to(device) inputs.requires_grad_() outputs = model.eval()(inputs) loss = L(outputs, targets) gradient = torch.autograd.grad(loss, inputs, create_graph=True)[0] gradient = torch.flatten(gradient, start_dim=1) normal = F.normalize(gradient, dim=1) v = torch.rand_like(torch.zeros(normal.shape), device=device) v = F.normalize(v, dim=1) if not flatten: normal = normal.reshape(inputs.shape) v = v.reshape(inputs.shape) for k, x in enumerate(inputs): scalars = np.linspace(-delta, delta, res) grid = torch.empty(res, res) for i in range(res): for j in range(res): x_star = x if flatten: x_star = torch.flatten(x, start_dim=0) x_star = x_star + scalars[i]*normal[k] + scalars[j]*v[k] x_star = x_star.reshape((1,)+img_shape) y_star = model.eval()(x_star) y_true = torch.zeros_like(y_star) grid[i, j] = L(y_star, targets[k].unsqueeze(0)).detach() grid = grid.detach().numpy() scalars = np.outer(scalars, np.ones(res)) masks = [scalars, scalars.T] plt.figure() ax = plt.axes(projection='3d') ax.plot_surface(masks[0], masks[1], grid, cmap='viridis', edgecolor='none') ax.scatter(0, 0, grid[res // 2, res // 2]) ax.set_xlabel('Surface Normal Direction') ax.set_ylabel('Random Direction') if save_path is not None: plt.savefig(save_path + f"loss_{k}") plt.show() plt.pause(.001) # Prevents blocking if k > 3: exit()
[ "matplotlib.pyplot.savefig", "torch.nn.CrossEntropyLoss", "CURE.CURE.CURELearner", "getter.getter", "pathlib.Path", "numpy.ones", "torch.nn.functional.normalize", "numpy.linspace", "matplotlib.pyplot.figure", "torch.autograd.grad", "matplotlib.pyplot.axes", "matplotlib.pyplot.pause", "torch.zeros_like", "torch.empty", "torch.zeros", "torch.flatten", "matplotlib.pyplot.show" ]
[((500, 547), 'getter.getter', 'getter', (["config['dataset']", "config['model_name']"], {}), "(config['dataset'], config['model_name'])\n", (506, 547), False, 'from getter import getter\n'), ((1978, 1999), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1997, 1999), True, 'import torch.nn as nn\n'), ((865, 892), 'pathlib.Path', 'Path', (['"""./data/checkpoints/"""'], {}), "('./data/checkpoints/')\n", (869, 892), False, 'from pathlib import Path\n'), ((961, 1299), 'CURE.CURE.CURELearner', 'CURELearner', (['model', 'trainloader', 'testloader'], {'lambda_0': "config['lambda_0']", 'lambda_1': "config['lambda_1']", 'lambda_2': "config['lambda_2']", 'transformer': 'transformer', 'trial': 'None', 'image_min': "config['image_min']", 'image_max': "config['image_max']", 'device': "config['device']", 'path': "(checkpoint_path / 'best_model.data')", 'acc': "config['accuracy']"}), "(model, trainloader, testloader, lambda_0=config['lambda_0'],\n lambda_1=config['lambda_1'], lambda_2=config['lambda_2'], transformer=\n transformer, trial=None, image_min=config['image_min'], image_max=\n config['image_max'], device=config['device'], path=checkpoint_path /\n 'best_model.data', acc=config['accuracy'])\n", (972, 1299), False, 'from CURE.CURE import CURELearner\n'), ((2432, 2468), 'torch.flatten', 'torch.flatten', (['gradient'], {'start_dim': '(1)'}), '(gradient, start_dim=1)\n', (2445, 2468), False, 'import torch\n'), ((2486, 2514), 'torch.nn.functional.normalize', 'F.normalize', (['gradient'], {'dim': '(1)'}), '(gradient, dim=1)\n', (2497, 2514), True, 'import torch.nn.functional as F\n'), ((2598, 2619), 'torch.nn.functional.normalize', 'F.normalize', (['v'], {'dim': '(1)'}), '(v, dim=1)\n', (2609, 2619), True, 'import torch.nn.functional as F\n'), ((2357, 2409), 'torch.autograd.grad', 'torch.autograd.grad', (['loss', 'inputs'], {'create_graph': '(True)'}), '(loss, inputs, create_graph=True)\n', (2376, 2409), False, 'import torch\n'), ((2544, 2569), 'torch.zeros', 'torch.zeros', (['normal.shape'], {}), '(normal.shape)\n', (2555, 2569), False, 'import torch\n'), ((2797, 2828), 'numpy.linspace', 'np.linspace', (['(-delta)', 'delta', 'res'], {}), '(-delta, delta, res)\n', (2808, 2828), True, 'import numpy as np\n'), ((2848, 2869), 'torch.empty', 'torch.empty', (['res', 'res'], {}), '(res, res)\n', (2859, 2869), False, 'import torch\n'), ((3533, 3545), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3543, 3545), True, 'from matplotlib import pyplot as plt\n'), ((3563, 3588), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (3571, 3588), True, 'from matplotlib import pyplot as plt\n'), ((3919, 3929), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3927, 3929), True, 'from matplotlib import pyplot as plt\n'), ((3942, 3958), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (3951, 3958), True, 'from matplotlib import pyplot as plt\n'), ((3466, 3478), 'numpy.ones', 'np.ones', (['res'], {}), '(res)\n', (3473, 3478), True, 'import numpy as np\n'), ((3870, 3906), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_path + f'loss_{k}')"], {}), "(save_path + f'loss_{k}')\n", (3881, 3906), True, 'from matplotlib import pyplot as plt\n'), ((3282, 3306), 'torch.zeros_like', 'torch.zeros_like', (['y_star'], {}), '(y_star)\n', (3298, 3306), False, 'import torch\n'), ((3036, 3065), 'torch.flatten', 'torch.flatten', (['x'], {'start_dim': '(0)'}), '(x, start_dim=0)\n', (3049, 3065), False, 'import torch\n')]
#!/usr/bin/python # encoding: utf-8 import torch import albumentations as A import torch.nn as nn from torch.autograd import Variable from torch.utils.data import Dataset import collections from PIL import Image, ImageFilter import matplotlib.pyplot as plt import math import random import numpy as np import cv2 import os import PIL from data_gen import generate_rbox, generate_rbox2 from data_gen import load_gt_annoataion from data_gen import get_images import torchvision.transforms as transforms import tensorflow as tf from data_gen import draw_box_points class ImgAugTransform: def __init__(self): self.aug = A.Compose([ A.OneOf([ A.Blur(blur_limit=10,p=0.5), A.MedianBlur(blur_limit=5,p=0.5), ]), A.OneOf([ A.CoarseDropout(max_holes=7, min_holes=3, min_height=3, min_width=1, max_height=16, max_width=4, fill_value=255, p=0.5), A.CoarseDropout(max_holes=7, min_holes=3, min_height=3, min_width=1, max_height=16, max_width=4, fill_value=170, p=0.5), A.CoarseDropout(max_holes=7, min_holes=3, min_height=3, min_width=1, max_height=16, max_width=4, fill_value=85, p=0.5), A.CoarseDropout(max_holes=7, min_holes=3, min_height=3, min_width=1, max_height=16, max_width=4, fill_value=0, p=0.5), ]), # A.RandomContrast(limit=0.05, p=0.75), # A.RandomBrightness(limit=0.05, p=0.75), # A.RandomBrightnessContrast(contrast_limit=0.05, brightness_limit=0.05, p=0.75), ]) def __call__(self, img): img = np.array(img) transformed_img = self.aug(image=img)['image'] return Image.fromarray(transformed_img) # return transformed_img def to_array(img): img = np.asarray(img, dtype=np.float32) img /= 128 img -= 1 return img # return img def random_dilate(img): img = np.array(img) img = cv2.dilate(img, np.ones(shape=(random.randint(1, 3), random.randint(1, 3)), dtype=np.uint8)) return Image.fromarray(img) # return img def random_erode(img): img = np.array(img) img = cv2.erode(img, np.ones(shape=(random.randint(1, 3), random.randint(1, 3)), dtype=np.uint8)) return Image.fromarray(img) # return img def train_transforms(is_crnn = True): if is_crnn: transform = transforms.Compose([ transforms.RandomApply( [ random_dilate, ], p=0.15), transforms.RandomApply( [ random_erode, ], p=0.15), transforms.RandomAffine(degrees=3, scale=(0.95, 1.05), shear=3, resample=Image.NEAREST, fillcolor=(255,255,255)), # transforms.RandomApply( # [ # to_array, # ], # p=1), transforms.RandomApply( [ ImgAugTransform(), ], p=0.6), transforms.Grayscale(), transforms.ToTensor() ]) else: transform = transforms.Compose([ transforms.RandomApply( [ random_dilate, ], p=0.15), transforms.RandomApply( [ random_erode, ], p=0.15), transforms.RandomAffine(degrees=3, scale=(0.95, 1.05), shear=3, resample=Image.NEAREST, fillcolor=(255, 255, 255)), # transforms.RandomApply( # [ # to_array, # ], # p=1), transforms.RandomApply( [ ImgAugTransform(), ], p=0.6), transforms.ToTensor() ]) return transform def test_transforms(is_crnn = True): if is_crnn: transform = transforms.Compose([ transforms.Grayscale(), transforms.ToTensor() ]) else: transform = transforms.Compose([ transforms.ToTensor() ]) return transform def cut_image(img, new_size, word_gto): if len(word_gto) > 0: rep = True cnt = 0 while rep: if cnt > 30: return img text_poly = word_gto[random.randint(0, len(word_gto) - 1)] center = text_poly.sum(0) / 4 xs = int(center[0] - random.uniform(-100, 100) - new_size[1] / 2) xs = max(xs, 1) ys = int(center[1] - random.uniform(-100, 100) - new_size[0] / 2) ys = max(ys, 1) crop_rect = (xs, ys, xs + new_size[1], ys + new_size[0]) crop_img = img[crop_rect[1]:crop_rect[3], crop_rect[0]:crop_rect[2]] # cv2.imshow('dasd',crop_img) if crop_img.shape[0] == crop_img.shape[1]: rep = False else: cnt += 1 else: xs = int(random.uniform(0, img.shape[1])) ys = int(random.uniform(0, img.shape[0])) crop_rect = (xs, ys, xs + new_size[1], ys + new_size[0]) crop_img = img[crop_rect[1]:crop_rect[3], crop_rect[0]:crop_rect[2]] if len(word_gto) > 0: word_gto[:, :, 0] -= xs word_gto[:, :, 1] -= ys return crop_img class ocrDataset(Dataset): def __init__(self, root, norm_height = 48,in_train = True, target_transform=None,is_crnn = True): self.norm_height = norm_height self.path = self.get_path(root) self.root = root self.train_transform = train_transforms(is_crnn) self.test_transform = test_transforms(is_crnn) self.in_train = in_train self.target_transform = target_transform def get_path(self,data_path): base_dir = os.path.dirname(data_path) files_out = [] cnt = 0 with open(data_path) as f: while True: line = f.readline() if not line: break line = line.strip() if len(line) == 0: continue if not line[0] == '/': line = '{0}/{1}'.format(base_dir, line) files_out.append(line) cnt += 1 # if cnt > 100: # break return files_out def get_data(self,image_name): src_del = " " spl = image_name.split(" ") if len(spl) == 1: spl = image_name.split(",") src_del = "," image_name = spl[0].strip() # image_name = (spl[0] + ' '+ spl[1]).strip() gt_txt = '' if len(spl) > 1: gt_txt = "" delim = "" for k in range(1, len(spl)): gt_txt += delim + spl[k] delim = src_del if len(gt_txt) > 1 and gt_txt[0] == '"' and gt_txt[-1] == '"': gt_txt = gt_txt[1:-1] if image_name[len(image_name) - 1] == ',': image_name = image_name[0:-1] im = cv2.imread(image_name) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) # im = im[2:im.shape[0]-2,:,:] # im_zero = np.zeros((self.norm_height,im.shape[1],3), np.uint8) if self.norm_height > im.shape[0]: im_samp = np.ones((self.norm_height,im.shape[1],3), np.uint8)*255 rand = np.random.randint(0,self.norm_height - im.shape[0]) im_samp[rand:rand + im.shape[0],:,:] = im else: scale = self.norm_height / float(im.shape[0]) width = int(im.shape[1] * scale) im_samp = cv2.resize(im, (int(width), self.norm_height)) image = PIL.Image.fromarray(np.uint8(im_samp)) # image = np.asarray(im, dtype=np.float32) # image /= 128 # image -= 1 #get labels # gt_labels = [] # for k in range(len(gt_txt)): # if gt_txt[k] in self.codec_rev: # gt_labels.append(self.codec_rev[gt_txt[k]]) # else: # gt_labels.append(3) # # return image,gt_labels return image,gt_txt def __len__(self): return len(self.path) def __getitem__(self, index): try: image,label = self.get_data(self.path[index]) except IOError: print('Corrupted image for %d' % index) return self[index + 1] if self.in_train : image = self.train_transform(image) else: image = self.test_transform(image) # image = np.array(image) if self.target_transform is not None: label = self.target_transform(label) return (image, label) class alignCollate(object): def __init__(self): pass def __call__(self, batch): images, labels = zip(*batch) c = images[0].size(0) h = max([p.size(1) for p in images]) w = max([p.size(2) for p in images]) batch_images = torch.zeros(len(images), c, h, w).fill_(1) for i, image in enumerate(images): started_h = max(0, random.randint(0, h - image.size(1))) started_w = max(0, random.randint(0, w - image.size(2))) batch_images[i, :, started_h:started_h + image.size(1), started_w:started_w + image.size(2)] = image return batch_images, labels # Transform class E2Edataset(Dataset): def __init__(self, train_list, input_size=512, normalize = True): super(E2Edataset, self).__init__() self.image_list = np.array(get_images(train_list)) self.input_size = input_size self.normalize = normalize print('{} training images in {}'.format(self.image_list.shape[0], train_list)) self.transform = transforms.Compose([ transforms.ColorJitter(.3,.3,.3,.3), transforms.RandomGrayscale(p=0.1) ]) def __len__(self): return len(self.image_list) def __getitem__(self, index): im_name = self.image_list[index] im = cv2.imread(im_name) # 图片 # cv2.imshow('sad',im) txt_fn = im_name.replace(os.path.basename(im_name).split('.')[1], 'txt') base_name = os.path.basename(txt_fn) txt_fn_gt = '{0}/gt_{1}'.format(os.path.dirname(im_name), base_name) text_polys, text_tags, labels_txt = load_gt_annoataion(txt_fn_gt, True) # 载入标注信息 resize_w = self.input_size resize_h = self.input_size scaled = cut_image(im, (self.input_size, self.input_size), text_polys) if scaled.shape[0] == 0 or scaled.shape[1] == 0: scaled = im if scaled.shape[1] != resize_w or scaled.shape[0] != resize_h: ratio_img= min(scaled.shape[1]/scaled.shape[0],scaled.shape[0]/scaled.shape[1]) if scaled.shape[0] > scaled.shape[1]: resize_w =resize_w * ratio_img scalex = scaled.shape[1] / resize_w scaley = scaled.shape[0] / resize_h scaled = cv2.resize(scaled, dsize=(int(resize_w), int(resize_h))) else : resize_h = resize_h * ratio_img scalex = scaled.shape[1] / resize_w scaley = scaled.shape[0] / resize_h scaled = cv2.resize(scaled, dsize=(int(resize_w), int(resize_h))) # continue if len(text_polys) > 0: text_polys[:, :, 0] /= scalex text_polys[:, :, 1] /= scaley scaled = cv2.copyMakeBorder(scaled, 0, self.input_size - scaled.shape[0], 0, self.input_size - scaled.shape[1], cv2.BORDER_CONSTANT,value=[255,255,255]) # scaled_show = scaled.copy() # for (i, c) in enumerate(text_polys): # draw_box_points(scaled_show, c, color=(0, 255, 0), thickness=1) # # instead of creating a new image, I simply modify the old one # # # # show the modified image with all the rectangles at the end. # cv2.imshow('img', scaled_show) pim = PIL.Image.fromarray(np.uint8(scaled)) if self.transform: pim = self.transform(pim) im = np.array(pim) new_h, new_w, _ = im.shape score_map, geo_map, training_mask, gt_idx, gt_out, labels_out = generate_rbox(im, (new_h, new_w), text_polys, text_tags, labels_txt, vis=False) im = np.asarray(im) ## if self.normalize: im = im.astype(np.float32) im /= 128 im -= 1 im = torch.from_numpy(im).permute(2,0,1) return im, score_map, geo_map, training_mask, gt_idx, gt_out, labels_out def E2Ecollate(batch): img = [] gt_boxes = [] texts = [] scores = [] training_masks = [] for per_batch in batch: img.append(per_batch[0]) scores.append(per_batch[1]) training_masks.append(per_batch[3]) gt_boxes.append(per_batch[5]) texts.append(per_batch[6]) return torch.stack(img, 0), gt_boxes, texts,scores,training_masks class BeamSearchDecoder(): def __init__(self, lib, corpus, chars, word_chars, beam_width=20, lm_type='Words', lm_smoothing=0.01, tfsess=None): word_beam_search_module = tf.load_op_library(lib) self.mat = tf.placeholder(tf.float32) corpus = open(corpus).read() chars = open(chars).read() word_chars = open(word_chars).read() self.beamsearch_decoder = word_beam_search_module.word_beam_search(self.mat, beam_width, lm_type, lm_smoothing, corpus, chars, word_chars) self.tfsess = tfsess or tf.Session() self.idx2char = dict(zip(range(0, len(chars)), chars)) def beamsearch(self, mat): mat = np.concatenate((mat[:, :, 1:], mat[:, :, :1]), axis=-1) results = self.tfsess.run(self.beamsearch_decoder, {self.mat: mat}) return results def decode(self, preds_idx): return [''.join([self.idx2char[idx] for idx in row if idx < len(self.idx2char)]) for row in preds_idx] if __name__ == '__main__': llist = './data/ICDAR2015.txt' data = E2Edataset(train_list=llist) E2Edataloader = torch.utils.data.DataLoader(data, batch_size=2, shuffle=False, collate_fn=E2Ecollate) for index, data in enumerate(E2Edataloader): im = data
[ "numpy.uint8", "albumentations.MedianBlur", "albumentations.Blur", "torchvision.transforms.Grayscale", "torch.from_numpy", "numpy.array", "torchvision.transforms.ColorJitter", "tensorflow.load_op_library", "tensorflow.placeholder", "tensorflow.Session", "numpy.asarray", "data_gen.generate_rbox", "numpy.concatenate", "torchvision.transforms.ToTensor", "random.randint", "random.uniform", "torchvision.transforms.RandomAffine", "data_gen.get_images", "numpy.ones", "albumentations.CoarseDropout", "data_gen.load_gt_annoataion", "os.path.dirname", "cv2.cvtColor", "cv2.imread", "torchvision.transforms.RandomApply", "PIL.Image.fromarray", "torchvision.transforms.RandomGrayscale", "cv2.copyMakeBorder", "torch.stack", "numpy.random.randint", "os.path.basename", "torch.utils.data.DataLoader" ]
[((1965, 1998), 'numpy.asarray', 'np.asarray', (['img'], {'dtype': 'np.float32'}), '(img, dtype=np.float32)\n', (1975, 1998), True, 'import numpy as np\n'), ((2094, 2107), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2102, 2107), True, 'import numpy as np\n'), ((2222, 2242), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (2237, 2242), False, 'from PIL import Image, ImageFilter\n'), ((2295, 2308), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2303, 2308), True, 'import numpy as np\n'), ((2422, 2442), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (2437, 2442), False, 'from PIL import Image, ImageFilter\n'), ((14413, 14503), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['data'], {'batch_size': '(2)', 'shuffle': '(False)', 'collate_fn': 'E2Ecollate'}), '(data, batch_size=2, shuffle=False, collate_fn=\n E2Ecollate)\n', (14440, 14503), False, 'import torch\n'), ((1782, 1795), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1790, 1795), True, 'import numpy as np\n'), ((1868, 1900), 'PIL.Image.fromarray', 'Image.fromarray', (['transformed_img'], {}), '(transformed_img)\n', (1883, 1900), False, 'from PIL import Image, ImageFilter\n'), ((5941, 5967), 'os.path.dirname', 'os.path.dirname', (['data_path'], {}), '(data_path)\n', (5956, 5967), False, 'import os\n'), ((7205, 7227), 'cv2.imread', 'cv2.imread', (['image_name'], {}), '(image_name)\n', (7215, 7227), False, 'import cv2\n'), ((7241, 7276), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (7253, 7276), False, 'import cv2\n'), ((10200, 10219), 'cv2.imread', 'cv2.imread', (['im_name'], {}), '(im_name)\n', (10210, 10219), False, 'import cv2\n'), ((10372, 10396), 'os.path.basename', 'os.path.basename', (['txt_fn'], {}), '(txt_fn)\n', (10388, 10396), False, 'import os\n'), ((10519, 10554), 'data_gen.load_gt_annoataion', 'load_gt_annoataion', (['txt_fn_gt', '(True)'], {}), '(txt_fn_gt, True)\n', (10537, 10554), False, 'from data_gen import load_gt_annoataion\n'), ((12358, 12371), 'numpy.array', 'np.array', (['pim'], {}), '(pim)\n', (12366, 12371), True, 'import numpy as np\n'), ((12481, 12560), 'data_gen.generate_rbox', 'generate_rbox', (['im', '(new_h, new_w)', 'text_polys', 'text_tags', 'labels_txt'], {'vis': '(False)'}), '(im, (new_h, new_w), text_polys, text_tags, labels_txt, vis=False)\n', (12494, 12560), False, 'from data_gen import generate_rbox, generate_rbox2\n'), ((12576, 12590), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (12586, 12590), True, 'import numpy as np\n'), ((13181, 13200), 'torch.stack', 'torch.stack', (['img', '(0)'], {}), '(img, 0)\n', (13192, 13200), False, 'import torch\n'), ((13423, 13446), 'tensorflow.load_op_library', 'tf.load_op_library', (['lib'], {}), '(lib)\n', (13441, 13446), True, 'import tensorflow as tf\n'), ((13466, 13492), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (13480, 13492), True, 'import tensorflow as tf\n'), ((13987, 14042), 'numpy.concatenate', 'np.concatenate', (['(mat[:, :, 1:], mat[:, :, :1])'], {'axis': '(-1)'}), '((mat[:, :, 1:], mat[:, :, :1]), axis=-1)\n', (14001, 14042), True, 'import numpy as np\n'), ((5146, 5177), 'random.uniform', 'random.uniform', (['(0)', 'img.shape[1]'], {}), '(0, img.shape[1])\n', (5160, 5177), False, 'import random\n'), ((5192, 5223), 'random.uniform', 'random.uniform', (['(0)', 'img.shape[0]'], {}), '(0, img.shape[0])\n', (5206, 5223), False, 'import random\n'), ((7531, 7583), 'numpy.random.randint', 'np.random.randint', (['(0)', '(self.norm_height - im.shape[0])'], {}), '(0, self.norm_height - im.shape[0])\n', (7548, 7583), True, 'import numpy as np\n'), ((7861, 7878), 'numpy.uint8', 'np.uint8', (['im_samp'], {}), '(im_samp)\n', (7869, 7878), True, 'import numpy as np\n'), ((9704, 9726), 'data_gen.get_images', 'get_images', (['train_list'], {}), '(train_list)\n', (9714, 9726), False, 'from data_gen import get_images\n'), ((10437, 10461), 'os.path.dirname', 'os.path.dirname', (['im_name'], {}), '(im_name)\n', (10452, 10461), False, 'import os\n'), ((11676, 11827), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['scaled', '(0)', '(self.input_size - scaled.shape[0])', '(0)', '(self.input_size - scaled.shape[1])', 'cv2.BORDER_CONSTANT'], {'value': '[255, 255, 255]'}), '(scaled, 0, self.input_size - scaled.shape[0], 0, self.\n input_size - scaled.shape[1], cv2.BORDER_CONSTANT, value=[255, 255, 255])\n', (11694, 11827), False, 'import cv2\n'), ((12262, 12278), 'numpy.uint8', 'np.uint8', (['scaled'], {}), '(scaled)\n', (12270, 12278), True, 'import numpy as np\n'), ((13865, 13877), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (13875, 13877), True, 'import tensorflow as tf\n'), ((2569, 2616), 'torchvision.transforms.RandomApply', 'transforms.RandomApply', (['[random_dilate]'], {'p': '(0.15)'}), '([random_dilate], p=0.15)\n', (2591, 2616), True, 'import torchvision.transforms as transforms\n'), ((2703, 2749), 'torchvision.transforms.RandomApply', 'transforms.RandomApply', (['[random_erode]'], {'p': '(0.15)'}), '([random_erode], p=0.15)\n', (2725, 2749), True, 'import torchvision.transforms as transforms\n'), ((2836, 2955), 'torchvision.transforms.RandomAffine', 'transforms.RandomAffine', ([], {'degrees': '(3)', 'scale': '(0.95, 1.05)', 'shear': '(3)', 'resample': 'Image.NEAREST', 'fillcolor': '(255, 255, 255)'}), '(degrees=3, scale=(0.95, 1.05), shear=3, resample=\n Image.NEAREST, fillcolor=(255, 255, 255))\n', (2859, 2955), True, 'import torchvision.transforms as transforms\n'), ((3234, 3256), 'torchvision.transforms.Grayscale', 'transforms.Grayscale', ([], {}), '()\n', (3254, 3256), True, 'import torchvision.transforms as transforms\n'), ((3270, 3291), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3289, 3291), True, 'import torchvision.transforms as transforms\n'), ((3372, 3419), 'torchvision.transforms.RandomApply', 'transforms.RandomApply', (['[random_dilate]'], {'p': '(0.15)'}), '([random_dilate], p=0.15)\n', (3394, 3419), True, 'import torchvision.transforms as transforms\n'), ((3506, 3552), 'torchvision.transforms.RandomApply', 'transforms.RandomApply', (['[random_erode]'], {'p': '(0.15)'}), '([random_erode], p=0.15)\n', (3528, 3552), True, 'import torchvision.transforms as transforms\n'), ((3639, 3758), 'torchvision.transforms.RandomAffine', 'transforms.RandomAffine', ([], {'degrees': '(3)', 'scale': '(0.95, 1.05)', 'shear': '(3)', 'resample': 'Image.NEAREST', 'fillcolor': '(255, 255, 255)'}), '(degrees=3, scale=(0.95, 1.05), shear=3, resample=\n Image.NEAREST, fillcolor=(255, 255, 255))\n', (3662, 3758), True, 'import torchvision.transforms as transforms\n'), ((4075, 4096), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4094, 4096), True, 'import torchvision.transforms as transforms\n'), ((4233, 4255), 'torchvision.transforms.Grayscale', 'transforms.Grayscale', ([], {}), '()\n', (4253, 4255), True, 'import torchvision.transforms as transforms\n'), ((4265, 4286), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4284, 4286), True, 'import torchvision.transforms as transforms\n'), ((4355, 4376), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4374, 4376), True, 'import torchvision.transforms as transforms\n'), ((7456, 7509), 'numpy.ones', 'np.ones', (['(self.norm_height, im.shape[1], 3)', 'np.uint8'], {}), '((self.norm_height, im.shape[1], 3), np.uint8)\n', (7463, 7509), True, 'import numpy as np\n'), ((9955, 9997), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', (['(0.3)', '(0.3)', '(0.3)', '(0.3)'], {}), '(0.3, 0.3, 0.3, 0.3)\n', (9977, 9997), True, 'import torchvision.transforms as transforms\n'), ((10012, 10045), 'torchvision.transforms.RandomGrayscale', 'transforms.RandomGrayscale', ([], {'p': '(0.1)'}), '(p=0.1)\n', (10038, 10045), True, 'import torchvision.transforms as transforms\n'), ((12723, 12743), 'torch.from_numpy', 'torch.from_numpy', (['im'], {}), '(im)\n', (12739, 12743), False, 'import torch\n'), ((2149, 2169), 'random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (2163, 2169), False, 'import random\n'), ((2171, 2191), 'random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (2185, 2191), False, 'import random\n'), ((2349, 2369), 'random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (2363, 2369), False, 'import random\n'), ((2371, 2391), 'random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (2385, 2391), False, 'import random\n'), ((674, 702), 'albumentations.Blur', 'A.Blur', ([], {'blur_limit': '(10)', 'p': '(0.5)'}), '(blur_limit=10, p=0.5)\n', (680, 702), True, 'import albumentations as A\n'), ((715, 748), 'albumentations.MedianBlur', 'A.MedianBlur', ([], {'blur_limit': '(5)', 'p': '(0.5)'}), '(blur_limit=5, p=0.5)\n', (727, 748), True, 'import albumentations as A\n'), ((791, 914), 'albumentations.CoarseDropout', 'A.CoarseDropout', ([], {'max_holes': '(7)', 'min_holes': '(3)', 'min_height': '(3)', 'min_width': '(1)', 'max_height': '(16)', 'max_width': '(4)', 'fill_value': '(255)', 'p': '(0.5)'}), '(max_holes=7, min_holes=3, min_height=3, min_width=1,\n max_height=16, max_width=4, fill_value=255, p=0.5)\n', (806, 914), True, 'import albumentations as A\n'), ((980, 1103), 'albumentations.CoarseDropout', 'A.CoarseDropout', ([], {'max_holes': '(7)', 'min_holes': '(3)', 'min_height': '(3)', 'min_width': '(1)', 'max_height': '(16)', 'max_width': '(4)', 'fill_value': '(170)', 'p': '(0.5)'}), '(max_holes=7, min_holes=3, min_height=3, min_width=1,\n max_height=16, max_width=4, fill_value=170, p=0.5)\n', (995, 1103), True, 'import albumentations as A\n'), ((1169, 1291), 'albumentations.CoarseDropout', 'A.CoarseDropout', ([], {'max_holes': '(7)', 'min_holes': '(3)', 'min_height': '(3)', 'min_width': '(1)', 'max_height': '(16)', 'max_width': '(4)', 'fill_value': '(85)', 'p': '(0.5)'}), '(max_holes=7, min_holes=3, min_height=3, min_width=1,\n max_height=16, max_width=4, fill_value=85, p=0.5)\n', (1184, 1291), True, 'import albumentations as A\n'), ((1357, 1478), 'albumentations.CoarseDropout', 'A.CoarseDropout', ([], {'max_holes': '(7)', 'min_holes': '(3)', 'min_height': '(3)', 'min_width': '(1)', 'max_height': '(16)', 'max_width': '(4)', 'fill_value': '(0)', 'p': '(0.5)'}), '(max_holes=7, min_holes=3, min_height=3, min_width=1,\n max_height=16, max_width=4, fill_value=0, p=0.5)\n', (1372, 1478), True, 'import albumentations as A\n'), ((4688, 4713), 'random.uniform', 'random.uniform', (['(-100)', '(100)'], {}), '(-100, 100)\n', (4702, 4713), False, 'import random\n'), ((4782, 4807), 'random.uniform', 'random.uniform', (['(-100)', '(100)'], {}), '(-100, 100)\n', (4796, 4807), False, 'import random\n'), ((10304, 10329), 'os.path.basename', 'os.path.basename', (['im_name'], {}), '(im_name)\n', (10320, 10329), False, 'import os\n')]
import abc import numpy as np import xarray as xr from .registry import register class Regridder(object): """Generic regridder interface.""" __metaclass__ = abc.ABCMeta def __init__(self, input_grid, output_grid, method=None, **kwargs): self.input_grid = input_grid self.output_grid = output_grid self.method = method self._params = kwargs @property def input_grid(self): return self._input_grid @input_grid.setter def input_grid(self, value): self.cached = False self._input_grid = value @property def output_grid(self): return self._output_grid @output_grid.setter def output_grid(self, value): self.cached = False self._output_grid = value def setup(self): pass def regrid(self, data): pass def clean(self): pass def __call__(self, data): if ((self.output_grid.lat.shape == self.input_grid.lat.shape) and (self.output_grid.lon.shape == self.input_grid.lon.shape) and (self.output_grid.lat.values == self.input_grid.lat.values).all() and (self.output_grid.lon.values == self.input_grid.lon.values).all()): return data if not self.cached: self.setup() self.cached = True return self.regrid(data) @register('regridder.esmf') class XesmfRegridder(Regridder): """xESMF regridder""" def setup(self): import xesmf as xe self._regridder = xe.Regridder(self.input_grid, self.output_grid, self.method, **self._params) def regrid(self, data): attrs = data.attrs result = self._regridder(data) result.attrs.update(attrs) return result def clean(self): self._regridder.clean_weight_file() @register('regridder.scipy') class ScipyRegridder(Regridder): """Scipy regridder""" def regrid(self, data): return data.interp(x=self.output_grid.x, y=self.output_grid.y, method=self.method) @register('regridder.pyresample') class PyresampleRegridder(Regridder): """Pyresample regridder""" def setup(self): from pyresample import geometry, kd_tree, bilinear input_def = geometry.SwathDefinition(lons=self.input_grid.lon.values, lats=self.input_grid.lat.values) output_def = geometry.SwathDefinition(lons=self.output_grid.lon.values, lats=self.output_grid.lat.values) if not self.method or self.method == 'nearest': # Set default neighbours used in stencil to 1. Normal default is # 8, which won't work if the input and output grids are similar in # size and resolution. self._params.setdefault('neighbours', 1) self._args = kd_tree.get_neighbour_info(input_def, output_def, 50000, **self._params) self._regridder = kd_tree.get_sample_from_neighbour_info else: raise NotImplementedError('Only nearest-neighbor regridding is ' 'currently supported for pyresample backend') def regrid(self, data): nt = len(data.time) shp = [nt] + list(self.output_grid.lon.shape) out = xr.DataArray(np.zeros(shp), dims=data.dims, coords={'lon': self.output_grid.lon, 'lat': self.output_grid.lat}, name=data.name, attrs=data.attrs) for i in range(nt): tstep = data.isel(time=i) if tstep.isnull().any(): tstep = tstep.to_masked_array() else: tstep = tstep.values out[i] = self._regridder('nn', shp[1:], tstep, *self._args) return out
[ "xesmf.Regridder", "pyresample.geometry.SwathDefinition", "pyresample.kd_tree.get_neighbour_info", "numpy.zeros" ]
[((1591, 1667), 'xesmf.Regridder', 'xe.Regridder', (['self.input_grid', 'self.output_grid', 'self.method'], {}), '(self.input_grid, self.output_grid, self.method, **self._params)\n', (1603, 1667), True, 'import xesmf as xe\n'), ((2392, 2487), 'pyresample.geometry.SwathDefinition', 'geometry.SwathDefinition', ([], {'lons': 'self.input_grid.lon.values', 'lats': 'self.input_grid.lat.values'}), '(lons=self.input_grid.lon.values, lats=self.\n input_grid.lat.values)\n', (2416, 2487), False, 'from pyresample import geometry, kd_tree, bilinear\n'), ((2551, 2648), 'pyresample.geometry.SwathDefinition', 'geometry.SwathDefinition', ([], {'lons': 'self.output_grid.lon.values', 'lats': 'self.output_grid.lat.values'}), '(lons=self.output_grid.lon.values, lats=self.\n output_grid.lat.values)\n', (2575, 2648), False, 'from pyresample import geometry, kd_tree, bilinear\n'), ((3022, 3094), 'pyresample.kd_tree.get_neighbour_info', 'kd_tree.get_neighbour_info', (['input_def', 'output_def', '(50000)'], {}), '(input_def, output_def, 50000, **self._params)\n', (3048, 3094), False, 'from pyresample import geometry, kd_tree, bilinear\n'), ((3539, 3552), 'numpy.zeros', 'np.zeros', (['shp'], {}), '(shp)\n', (3547, 3552), True, 'import numpy as np\n')]
import os import numpy as np import tensorflow as tf from config import TRAINING_CONFIG from core import GameConfig as Game from core import Board class PolicyValueNetwork: def __init__(self, model_name=None): with tf.variable_scope("Dataset"): input_shape = Board().encoded_states().shape # (6, 15, 15) self.iter = tf.data.Iterator.from_structure( (tf.float32, tf.float32, tf.float32), ( tf.TensorShape((None, *input_shape)), tf.TensorShape((None, )), tf.TensorShape((None, Game["board_size"])) ) ) self.mini_batch = self.iter.get_next() self.inputs, self.state_value, self.action_probs = self.mini_batch inputs_t = tf.transpose(self.inputs, [0, 2, 3, 1]) # channel_last self.inputs_t = inputs_t with tf.variable_scope("SharedNet"): conv_blocks = [] for i in range(3): conv_blocks.append( tf.layers.conv2d( inputs=inputs_t if i == 0 else conv_blocks[i - 1], filters=2**(i + 5), # 32, 64, 128 kernel_size=(3, 3), padding="same", activation=tf.nn.relu, name="conv_{}".format(i) ) ) shared_output = conv_blocks[-1] with tf.variable_scope("PolicyHead"): policy_conv = tf.layers.conv2d( inputs=shared_output, filters=4, kernel_size=(1, 1), padding="same", activation=tf.nn.relu ) policy_flatten = tf.layers.flatten(policy_conv) self.policy_logits = tf.layers.dense(policy_flatten, Game["board_size"]) self.policy_output = tf.nn.softmax(self.policy_logits, name="policy_output") with tf.variable_scope("ValueHead"): value_conv = tf.layers.conv2d( inputs=shared_output, filters=2, kernel_size=(1, 1), padding="same", activation=tf.nn.relu ) value_flatten = tf.layers.flatten(value_conv) value_hidden = tf.layers.dense(value_flatten, 64, tf.nn.relu) value_logits = tf.layers.dense(value_hidden, 1) self.value_output = tf.reshape(tf.nn.tanh(value_logits), [-1], name="value_output") self.session = tf.Session() self.saver = tf.train.Saver() self.model_file = self._parse_path(model_name) self.initialized = False def compile(self): """ Loss/Entropy and Optimization definition """ with tf.variable_scope("Loss-Entropy"): # losses value_loss = tf.losses.mean_squared_error(self.state_value, self.value_output) policy_loss = tf.losses.softmax_cross_entropy(self.action_probs, self.policy_logits) l2_reg = tf.contrib.layers.apply_regularization( tf.contrib.layers.l2_regularizer(1e-4), [v for v in tf.trainable_variables() if "bias" not in v.name] ) # final loss self.loss = value_loss + policy_loss + l2_reg self.entropy = tf.reduce_mean( -tf.reduce_sum(self.policy_output * tf.log(self.policy_output + 1e-10), 1) ) with tf.variable_scope("Optimizer"): self.lr = tf.placeholder(tf.float32, name="learning_rate") self.opt = tf.train.AdamOptimizer(self.lr).minimize(self.loss) self._lazy_initialize() def build_dataset(self, generator, num_epoches): """ Generator output: ([state_batch, value_batch, policy_batch]*num_epoches...) """ placeholders = [self.inputs, self.value_output, self.policy_output] self.dataset = tf.data.Dataset.from_generator( generator, tuple(data.dtype for data in placeholders), tuple(data.shape for data in placeholders) ) self.dataset = self.dataset.prefetch(num_epoches) self.session.run(self.iter.make_initializer(self.dataset)) def train_step(self, optimizer, num_epoches): """ Multiple epoches of network training steps. """ old_probs = None # will be assigned later for i in range(num_epoches): new_probs, loss, entropy, _ = self.session.run( [self.policy_output, self.loss, self.entropy, self.opt], feed_dict={self.lr: optimizer["lr"]} ) if i == 0: old_probs = new_probs + 1e-10 kl = 0.0 # KL divergence is apparently zero else: new_probs += 1e-10 kl = (old_probs * np.log(old_probs / new_probs)).sum(1).mean() # early stop when KL diverges too much if kl > 4 * optimizer["kl_target"]: for _ in range(num_epoches - 1 - i): self.session.run(self.mini_batch) # skip redundant data break return loss, entropy, kl, i + 1 def eval_state(self, state): """ Evaluate a board state. """ self._lazy_initialize() value, probs = self.session.run( [self.value_output, self.policy_output], feed_dict={self.inputs: state.encoded_states()[np.newaxis, :]} ) return value[0], probs[0] def save_model(self, model_name): self.saver.save(self.session, self._parse_path(model_name)) def restore_model(self, model_name): model_file = self._parse_path(model_name) if model_file is not None: self.saver.restore(self.session, model_file) self.initialized = True def _parse_path(self, model_name): if '/' in model_name: return model_name # already parsed elif model_name == "latest": checkpoint = tf.train.get_checkpoint_state(self._parse_path("")) return checkpoint.model_checkpoint_path if checkpoint else None elif model_name is not None: return f"{TRAINING_CONFIG['model_path']}/tf/{model_name}" else: return None def _lazy_initialize(self): if self.initialized: return if not self.model_file or not os.path.exists(self.model_file): self.session.run(tf.global_variables_initializer()) self.initialized = True else: self.restore_model(self.model_file)
[ "tensorflow.layers.flatten", "tensorflow.transpose", "tensorflow.contrib.layers.l2_regularizer", "numpy.log", "tensorflow.nn.softmax", "tensorflow.log", "os.path.exists", "tensorflow.Session", "tensorflow.placeholder", "tensorflow.layers.conv2d", "tensorflow.trainable_variables", "tensorflow.train.AdamOptimizer", "tensorflow.nn.tanh", "tensorflow.variable_scope", "tensorflow.layers.dense", "tensorflow.losses.softmax_cross_entropy", "tensorflow.train.Saver", "tensorflow.global_variables_initializer", "core.Board", "tensorflow.losses.mean_squared_error", "tensorflow.TensorShape" ]
[((2581, 2593), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2591, 2593), True, 'import tensorflow as tf\n'), ((2615, 2631), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2629, 2631), True, 'import tensorflow as tf\n'), ((231, 259), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Dataset"""'], {}), "('Dataset')\n", (248, 259), True, 'import tensorflow as tf\n'), ((814, 853), 'tensorflow.transpose', 'tf.transpose', (['self.inputs', '[0, 2, 3, 1]'], {}), '(self.inputs, [0, 2, 3, 1])\n', (826, 853), True, 'import tensorflow as tf\n'), ((921, 951), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""SharedNet"""'], {}), "('SharedNet')\n", (938, 951), True, 'import tensorflow as tf\n'), ((1499, 1530), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""PolicyHead"""'], {}), "('PolicyHead')\n", (1516, 1530), True, 'import tensorflow as tf\n'), ((1558, 1670), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'shared_output', 'filters': '(4)', 'kernel_size': '(1, 1)', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(inputs=shared_output, filters=4, kernel_size=(1, 1),\n padding='same', activation=tf.nn.relu)\n", (1574, 1670), True, 'import tensorflow as tf\n'), ((1790, 1820), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['policy_conv'], {}), '(policy_conv)\n', (1807, 1820), True, 'import tensorflow as tf\n'), ((1854, 1905), 'tensorflow.layers.dense', 'tf.layers.dense', (['policy_flatten', "Game['board_size']"], {}), "(policy_flatten, Game['board_size'])\n", (1869, 1905), True, 'import tensorflow as tf\n'), ((1939, 1994), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.policy_logits'], {'name': '"""policy_output"""'}), "(self.policy_logits, name='policy_output')\n", (1952, 1994), True, 'import tensorflow as tf\n'), ((2009, 2039), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""ValueHead"""'], {}), "('ValueHead')\n", (2026, 2039), True, 'import tensorflow as tf\n'), ((2066, 2178), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'shared_output', 'filters': '(2)', 'kernel_size': '(1, 1)', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(inputs=shared_output, filters=2, kernel_size=(1, 1),\n padding='same', activation=tf.nn.relu)\n", (2082, 2178), True, 'import tensorflow as tf\n'), ((2297, 2326), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['value_conv'], {}), '(value_conv)\n', (2314, 2326), True, 'import tensorflow as tf\n'), ((2354, 2400), 'tensorflow.layers.dense', 'tf.layers.dense', (['value_flatten', '(64)', 'tf.nn.relu'], {}), '(value_flatten, 64, tf.nn.relu)\n', (2369, 2400), True, 'import tensorflow as tf\n'), ((2428, 2460), 'tensorflow.layers.dense', 'tf.layers.dense', (['value_hidden', '(1)'], {}), '(value_hidden, 1)\n', (2443, 2460), True, 'import tensorflow as tf\n'), ((2830, 2863), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Loss-Entropy"""'], {}), "('Loss-Entropy')\n", (2847, 2863), True, 'import tensorflow as tf\n'), ((2911, 2976), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', (['self.state_value', 'self.value_output'], {}), '(self.state_value, self.value_output)\n', (2939, 2976), True, 'import tensorflow as tf\n'), ((3003, 3073), 'tensorflow.losses.softmax_cross_entropy', 'tf.losses.softmax_cross_entropy', (['self.action_probs', 'self.policy_logits'], {}), '(self.action_probs, self.policy_logits)\n', (3034, 3073), True, 'import tensorflow as tf\n'), ((3528, 3558), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Optimizer"""'], {}), "('Optimizer')\n", (3545, 3558), True, 'import tensorflow as tf\n'), ((3582, 3630), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""learning_rate"""'}), "(tf.float32, name='learning_rate')\n", (3596, 3630), True, 'import tensorflow as tf\n'), ((2504, 2528), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['value_logits'], {}), '(value_logits)\n', (2514, 2528), True, 'import tensorflow as tf\n'), ((3151, 3191), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.0001)'], {}), '(0.0001)\n', (3183, 3191), True, 'import tensorflow as tf\n'), ((6514, 6545), 'os.path.exists', 'os.path.exists', (['self.model_file'], {}), '(self.model_file)\n', (6528, 6545), False, 'import os\n'), ((6576, 6609), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6607, 6609), True, 'import tensorflow as tf\n'), ((482, 518), 'tensorflow.TensorShape', 'tf.TensorShape', (['(None, *input_shape)'], {}), '((None, *input_shape))\n', (496, 518), True, 'import tensorflow as tf\n'), ((540, 563), 'tensorflow.TensorShape', 'tf.TensorShape', (['(None,)'], {}), '((None,))\n', (554, 563), True, 'import tensorflow as tf\n'), ((586, 628), 'tensorflow.TensorShape', 'tf.TensorShape', (["(None, Game['board_size'])"], {}), "((None, Game['board_size']))\n", (600, 628), True, 'import tensorflow as tf\n'), ((3654, 3685), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.lr'], {}), '(self.lr)\n', (3676, 3685), True, 'import tensorflow as tf\n'), ((287, 294), 'core.Board', 'Board', ([], {}), '()\n', (292, 294), False, 'from core import Board\n'), ((3219, 3243), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (3241, 3243), True, 'import tensorflow as tf\n'), ((3461, 3495), 'tensorflow.log', 'tf.log', (['(self.policy_output + 1e-10)'], {}), '(self.policy_output + 1e-10)\n', (3467, 3495), True, 'import tensorflow as tf\n'), ((4942, 4971), 'numpy.log', 'np.log', (['(old_probs / new_probs)'], {}), '(old_probs / new_probs)\n', (4948, 4971), True, 'import numpy as np\n')]
""" Atmosphere Spectral Response ============================ This class calculates the output flux of an astronomical object as a funtction of the atmosphere spectral response. """ import os import numpy as np import pandas as pd from scipy.interpolate import splev, splrep class Atmosphere_Spectral_Response: """Atmosphere Spectral Response Class.""" _SPECTRAL_RESPONSE_FILE = os.path.join( "Atmosphere_Spectral_Response", "atmosphere_spectral_response.xlsx" ) def __init__(self): """Initialize the class.""" pass def _read_spreadsheet(self): ss = np.asarray(pd.read_excel(self._SPECTRAL_RESPONSE_FILE)) atm_wavelength_interval = [float(value) for value in ss[1:, 0]] transmitance = [float(value) for value in ss[1:, 1]] self.atm_wavelength_interval = np.asarray(atm_wavelength_interval) self.transmitance = np.asarray(transmitance) def _calculate_spline(self, wavelength_interval): spl = splrep(self.atm_wavelength_interval, self.transmitance) transmitance = splev(wavelength_interval, spl) return transmitance def apply_atmosphere_spectral_response( self, star_specific_flux, l_init, l_final, l_step ): """Apply the atmosphere spectral response. This function applies the atmosphere spectral response on the calculated star specific flux Parameters ---------- star_specific_flux : array like Specific flux of the star Returns ------- star_specific_flux : array like Specific flux of the star after the application atmosphere spectral response. l_init: int Initial wavelength in nanometers. l_final: int Final wavelength in nanometers. l_step: int Step for the wavelength interval in nanometers. """ self.star_specific_flux = star_specific_flux wavelength_interval = range(l_init, l_final, l_step) self._read_spreadsheet() transmitance = self._calculate_spline(wavelength_interval) new_specific_flux = np.multiply(star_specific_flux[0, :], transmitance) self.star_specific_flux[0, :] = new_specific_flux return self.star_specific_flux
[ "numpy.multiply", "numpy.asarray", "os.path.join", "scipy.interpolate.splrep", "scipy.interpolate.splev", "pandas.read_excel" ]
[((392, 477), 'os.path.join', 'os.path.join', (['"""Atmosphere_Spectral_Response"""', '"""atmosphere_spectral_response.xlsx"""'], {}), "('Atmosphere_Spectral_Response',\n 'atmosphere_spectral_response.xlsx')\n", (404, 477), False, 'import os\n'), ((837, 872), 'numpy.asarray', 'np.asarray', (['atm_wavelength_interval'], {}), '(atm_wavelength_interval)\n', (847, 872), True, 'import numpy as np\n'), ((901, 925), 'numpy.asarray', 'np.asarray', (['transmitance'], {}), '(transmitance)\n', (911, 925), True, 'import numpy as np\n'), ((995, 1050), 'scipy.interpolate.splrep', 'splrep', (['self.atm_wavelength_interval', 'self.transmitance'], {}), '(self.atm_wavelength_interval, self.transmitance)\n', (1001, 1050), False, 'from scipy.interpolate import splev, splrep\n'), ((1074, 1105), 'scipy.interpolate.splev', 'splev', (['wavelength_interval', 'spl'], {}), '(wavelength_interval, spl)\n', (1079, 1105), False, 'from scipy.interpolate import splev, splrep\n'), ((2164, 2215), 'numpy.multiply', 'np.multiply', (['star_specific_flux[0, :]', 'transmitance'], {}), '(star_specific_flux[0, :], transmitance)\n', (2175, 2215), True, 'import numpy as np\n'), ((620, 663), 'pandas.read_excel', 'pd.read_excel', (['self._SPECTRAL_RESPONSE_FILE'], {}), '(self._SPECTRAL_RESPONSE_FILE)\n', (633, 663), True, 'import pandas as pd\n')]
######################################################################## # # Date:Sept 2009 Authors: <NAME> # # <EMAIL> # # The Scripps Research Institute (TSRI) # Molecular Graphics Lab # La Jolla, CA 92037, USA # # Copyright: <NAME> and TSRI # ######################################################################### def saveInstancesMatsToFile(filename, matrices): """ save a list of instance matrices to a file status = saveInstancesMatsToFile(filename, matrices) status will be 1 if it worked """ f = open(filename, 'w') if not f: return 0 for mat in matrices: for v in mat.flatten(): f.write("%f "%v) f.write("\n") f.close() return 1 import numpy def readInstancesMatsFromFile(filename): """ read a list of instance matrices from a file matrices = readInstancesMatsFromFile(filename) """ f = open(filename) if not f: return None data = f.readlines() f.close() mats = [] for line in data: mats.append( map(float, line.split()) ) mats = numpy.array(mats) mats.shape = (-1,4,4) return mats
[ "numpy.array" ]
[((1112, 1129), 'numpy.array', 'numpy.array', (['mats'], {}), '(mats)\n', (1123, 1129), False, 'import numpy\n')]
from .road_list import * import numpy as np from litdrive.selfdriving.enums import ManeuverState def fitPolysToPoly(list_p1d_u, list_p1d_v, list_hdg, list_x, list_y): pspace=np.linspace(0.0, 1.0, 100) pnts_x=list() pnts_y=list() if(len(list_p1d_v)!=len(list_p1d_u) or len(list_p1d_v)!=len(list_hdg) or len(list_p1d_v)!=len(list_x) or len(list_p1d_v)!=len(list_y)): raise Exception("ERROR: Parameter lists must have equal lengths!") for i in range(0,len(list_p1d_v)): p1d_u=np.poly1d(list_p1d_u[i]) p1d_v=np.poly1d(list_p1d_v[i]) p1d_x=p1d_u*np.cos(list_hdg[i])-p1d_v*np.sin(list_hdg[i]) p1d_y=p1d_u*np.sin(list_hdg[i])+p1d_v*np.cos(list_hdg[i]) p1d_x[0]+=list_x[i] p1d_y[0]+=list_y[i] pnts_x.append(p1d_x(pspace)) pnts_y.append(p1d_y(pspace)) pnts_x=np.concatenate(pnts_x) pnts_y=np.concatenate(pnts_y) pspace=np.linspace(0.0, 1.0, 100*len(list_p1d_u)) p1d_fx = np.poly1d(np.polyfit(pspace, pnts_x, 3)) p1d_fy = np.poly1d(np.polyfit(pspace, pnts_y, 3)) return (p1d_fx[0],p1d_fx[1],p1d_fx[2],p1d_fx[3],p1d_fy[0],p1d_fy[1],p1d_fy[2],p1d_fy[3]) def getPoly3Params(geo, scaling_factor, road_id): try: geo_x=float(geo.get("x"))*scaling_factor geo_y=float(geo.get("y"))*scaling_factor except (TypeError, ValueError) as e: raise Exception("Road {} Geometry has no or invalid x or y coordinates!".format(road_id)) # Try to get a paramPoly3 geo_pp3 = geo.findall("paramPoly3") if(geo_pp3 is None or len(geo_pp3)>1): raise Exception("Road {} Geometry has no or multiple paramPoly3. Only one paramPoly3 per road is supported!".format(road_id)) geo_pp3=geo_pp3[0] try: geo_hdg = float(geo.get("hdg")) geo_length = float(geo.get("length")) except (TypeError, ValueError) as e: raise Exception("Road {} Geometry has an invalid hdg or length entry!".format(road_id)) try: pp3_aU = float(geo_pp3.get("aU"))*scaling_factor pp3_bU = float(geo_pp3.get("bU"))*scaling_factor pp3_cU = float(geo_pp3.get("cU"))*scaling_factor pp3_dU = float(geo_pp3.get("dU"))*scaling_factor pp3_aV = float(geo_pp3.get("aV"))*scaling_factor pp3_bV = float(geo_pp3.get("bV"))*scaling_factor pp3_cV = float(geo_pp3.get("cV"))*scaling_factor pp3_dV = float(geo_pp3.get("dV"))*scaling_factor except (TypeError, ValueError) as e: raise Exception("Road {} paramPoly3 has invalid {a,b,c,d}{U,V} entries!".format(road_id)) return (pp3_aU, pp3_bU, pp3_cU, pp3_dU, pp3_aV, pp3_bV, pp3_cV, pp3_dV, geo_hdg, geo_x, geo_y) def ReadOpenDrive(xml_tree, scaling_factor=1, lane_offsets=(0.0, 0.25, 0.75), angle_left_threshold=np.pi/4, angle_right_threshold=-np.pi/4): road_list = RoadList() new_lane_id=1 #new_road_id=0 new_junction_id=0 if(xml_tree is None): raise Exception("OpenDRIVE not loaded, exiting...") root_node=xml_tree.getroot() if(root_node is None): raise Exception("ERROR: OpenDRIVE has no root-node, quitting!") header = root_node.find("header") if(header is not None): out_str=list() out_str.append("OpenDRIVE File Header Information:\n") if header.get('revMajor') is not None: out_str.append("Major Revision: ") out_str.append(str(header.get('revMajor'))) out_str.append("\n") if header.get('revMinor') is not None: out_str.append("Minor Revision: ") out_str.append(str(header.get('revMinor'))) out_str.append("\n") if header.get('name') is not None: out_str.append("Name: ") out_str.append(str(header.get('name'))) out_str.append("\n") if header.get('version') is not None: out_str.append("Version: ") out_str.append(str(header.get('version'))) out_str.append("\n") if header.get('date') is not None: out_str.append("Date: ") out_str.append(str(header.get('revMajor'))) out_str.append("\n") if header.get('north') is not None: out_str.append("North: ") out_str.append(str(header.get('north'))) out_str.append("\n") if header.get('south') is not None: out_str.append("South: ") out_str.append(str(header.get('south'))) out_str.append("\n") if header.get('east') is not None: out_str.append("East: ") out_str.append(str(header.get('east'))) out_str.append("\n") if header.get('west') is not None: out_str.append("West: ") out_str.append(str(header.get('west'))) out_str.append("\n") if header.get('vendor') is not None: out_str.append("Vendor: ") out_str.append(str(header.get('vendor'))) out_str.append("\n") print("".join(out_str)) else: print("WARNING: OpenDRIVE file has no header tag!") print("INFO: Loading junctions...") #Dictionary with the junction-id as index #each entry then holds another dict with the connection id as index. #the third dict then holds all values. #Clarification: incomingRoad is the Road ID that comes to the junction, connectingRoad is the Road ID inside the junction. #If the contactPoint is end, then the Polynomial of the connectingRoad is reversed. junction_dict=dict() #First, iterate over all junctions and store them for later usage. junctions = root_node.findall("junction") if(junctions is None): print("WARNING: OpenDRIVE has no junctions!") for junction in junctions: try: junction_id=int(junction.get("id")) except (ValueError, TypeError) as e: raise Exception("ERROR: Junction with invalid ID") connections=junction.findall("connection") if(connections is None): raise Exception("ERROR: Junction {} has no connection entries!") junction_dict[junction_id]=dict() for connection in connections: try: connecting_road=int(connection.get("connectingRoad")) incoming_road=int(connection.get("incomingRoad")) connection_id=int(connection.get("id")) except (ValueError, TypeError) as e: raise Exception("ERROR: Junction {} has a connection with invalid connectingRoad, incomingRoad or id!".format(junction_id)) contact_point = connection.get("contactPoint") if(contact_point is None): raise Exception("ERROR: Junction {} has no contactPoint!".format(junction_id)) lane_links=connection.findall("laneLink") if(lane_links is None or len(lane_links)!=1): raise Exception("ERROR: Junction {} connection {} has no or multiple laneLink tags, only one supported!".format(junction_id, connection_id)) lane_link=lane_links[0] try: link_from=int(lane_link.get("from")) link_to=int(lane_link.get("to")) except (ValueError, TypeError) as e: raise Exception("ERROR: Junction {} connection {} laneLink has invalid from or to tags!".format(junction_id, connection_id)) junction_dict[junction_id][connection_id]={"connectingRoad":connecting_road, "incomingRoad": incoming_road, "from":link_from, "to": link_to, "contactPoint": contact_point} # roads is a list of "road" tags print("INFO: Loading roads...") roads = root_node.findall("road") lane_pre_od_ids=dict() lane_suc_od_ids=dict() if(roads is None): raise Exception("ERROR: OpenDRIVE has no roads!") else: print("INFO: OpenDRIVE has {} roads!".format(len(roads))) overall_len=0.0 overall_lanes=0 #This holds all junction lane id's with the successor as first index and predecessor as second index. inside there is a list of roads. This is used to identify what is left and what is right junction_lane_table=dict() for road in roads: # Get the Road-ID and the id of the junction it belongs to try: road_id=int(road.get("id")) except (TypeError, ValueError) as e: raise Exception("ERROR: Found a Road with no valid ID, quitting!") try: road_junction = int(road.get("junction")) except (TypeError, ValueError) as e: raise Exception("ERROR: Road {} Junction entry is invalid!".format(road_id)) try: road_len=float(road.get("length"))*scaling_factor overall_len+=road_len except: print("WARNING: Road {} has no valid length, ignoring!".format(road_id)) plan_view=road.find("planView") if(plan_view is None): raise Exception("Road {} has no plan view!".format(road_id)) geometries = plan_view.findall("geometry") if(geometries is None): raise Exception("ERROR: Road {} PlanView has no Geometry entries!".format(road_id)) if(len(geometries)!=1): print("WARNING: Road {} PlanView has multiple Geometry entries! Fitting poly".format(road_id)) #raise Exception("ERROR: Road {} PlanView has no or multiple Geometry entries!".format(road_id)) list_p1d_u=list() list_p1d_v=list() list_hdg=list() list_x=list() list_y=list() for geo in geometries: poly_params=getPoly3Params(geo, scaling_factor, road_id) #list_p1d_u.append((pp3_dU,pp3_cU,pp3_bU,pp3_aU)) list_p1d_u.append(poly_params[3::-1]) #same with V list_p1d_v.append(poly_params[7:3:-1]) list_hdg.append(poly_params[8]) list_x.append(poly_params[9]) list_y.append(poly_params[10]) pp3_aU, pp3_bU, pp3_cU, pp3_dU, pp3_aV, pp3_bV, pp3_cV, pp3_dV = fitPolysToPoly(list_p1d_u, list_p1d_v, list_hdg, list_x, list_y) geo_hdg=0 geo_x=0 geo_y=0 else: geo=geometries[0] pp3_aU, pp3_bU, pp3_cU, pp3_dU, pp3_aV, pp3_bV, pp3_cV, pp3_dV, geo_hdg, geo_x, geo_y = getPoly3Params(geo, scaling_factor, road_id) if(road_junction>=0): #To have the right "polynomial direction" of a junction element, we look up the "contactPoint" in the junction for this road #it this value is "end" we reverse the Polynom to deliver the function values from 1 to 0, so we have the right direction. #print("INFO: Road {} is a junction road and becomes lane {}".format(road_id, new_lane_id)) if(road_junction not in junction_dict): raise Exception("Road {} uses junction id {} which does not exist!".format(road_id, road_junction)) junction_active=junction_dict[road_junction].items() lep3=LaneElementPoly3.fromOpenDrive(road_id, pp3_aU, pp3_bU, pp3_cU, pp3_dU, pp3_aV, pp3_bV, pp3_cV, pp3_dV, geo_hdg, geo_x, geo_y) lep3.setAsJunctionElement() for key, val in junction_active: if("connectingRoad" not in val or "contactPoint" not in val): raise Exception("WARNING: junction_dict is malformed, entry Junction ID {} connection id {} has no \"contactPoint\" or \"connectingRoad\"!".format(road_junction, key)) if(val["connectingRoad"]==road_id and val["contactPoint"]=="end"): #print("INFO: contactPoint for Junction ID {} connection id {} is end, reversing road id {}".format(road_junction, key, road_id)) lep3.reverse() break lanes = road.find("lanes") if(lanes is None): raise Exception("ERROR: Road {} has no lanes tag!".format(road_id)) # for laneOffset in lanes.findall("laneOffset"): # sPos = float(laneOffset.get("s")) # a = float(laneOffset.get("a")) # b = float(laneOffset.get("b")) # c = float(laneOffset.get("c")) # d = float(laneOffset.get("d")) # if(abs(a)>0.001 or abs(b)>0.001 or abs(c)>0.001 or abs(d)>0.001): # print("WARNING: Road {} has an laneOffset that could be important! Ignoring!".format(road_id)) rl = RoadElement() lane_sections=lanes.findall("laneSection") if(lane_sections is None or len(lane_sections)>1): raise Exception("WARNING: Road {} has no or multiple lane sections!".format(road_id)) #could be replaced by a for-each loop, if required... lane_section=lane_sections[0] section_right=lane_section.find("right") section_left=lane_section.find("left") if(section_left is not None and section_right is not None): raise Exception("ERROR: Road {} is a junction and has left and right lanes! Unsupported!".format(road_id)) elif(section_right is not None): if(len(section_right.findall("lane"))>1): print("WARNING: Road {} right has multiple lanes but is a crossing, ignoring!".format(road_id)) # Add this lane as the inner right lane rl.addLane(new_lane_id, -1) right_lane=section_right.find("lane") right_link=right_lane.find("link") if(right_link is None): print("WARNING: Junction Road {} right lane has no link!".format(road_id)) else: right_suc=right_link.find("successor") if(right_suc is not None): try: lane_suc=int(right_suc.get("id")) except (ValueError, TypeError) as e: raise Exception("ERROR: Road {} link successor has an invalid id!".format(road_id)) right_pre=right_link.find("predecessor") if(right_pre is not None): try: lane_pre=int(right_pre.get("id")) except (ValueError, TypeError) as e: raise Exception("ERROR: Road {} link predecessor has an invalid id!".format(road_id)) elif(section_left is not None): if(len(section_left.findall("lane"))>1): print("WARNING: Road {} left has multiple lanes but is a crossing, ignoring!".format(road_id)) # Add this lane as the inner left lane rl.addLane(new_lane_id, 1) left_lane=section_left.find("lane") left_link=left_lane.find("link") if(left_link is None): print("WARNING: Junction Road {} left lane has no link!".format(road_id)) else: left_suc=left_link.find("successor") if(left_suc is not None): try: lane_suc=int(left_suc.get("id")) except (ValueError, TypeError) as e: raise Exception("ERROR: Road {} link successor has an invalid id!".format(road_id)) left_pre=left_link.find("predecessor") if(left_pre is not None): try: lane_pre=int(left_pre.get("id")) except (ValueError, TypeError) as e: raise Exception("ERROR: Road {} link predecessor has an invalid id!".format(road_id)) else: raise Exception("ERROR: Road {} has no left or right, but one is required for this junction!".format(road_id)) road_list.addLaneElement(lep3, new_lane_id, None, None) lane_pre_od_ids[new_lane_id]=lane_pre lane_suc_od_ids[new_lane_id]=lane_suc road_list.addRoad(rl, road_id) new_lane_id+=1 road_link=road else: lanes = road.find("lanes") if(lanes is None): raise Exception("ERROR: Road {} has no lanes tag!".format(road_id)) for laneOffset in lanes.findall("laneOffset"): sPos = float(laneOffset.get("s")) a = float(laneOffset.get("a")) b = float(laneOffset.get("b")) c = float(laneOffset.get("c")) d = float(laneOffset.get("d")) if(abs(a)>0.001 or abs(b)>0.001 or abs(c)>0.001 or abs(d)>0.001): print("WARNING: Road {} has an laneOffset that could be important! Ignoring!".format(road_id)) lane_sections=lanes.findall("laneSection") if(lane_sections is None or len(lane_sections)>1): raise Exception("WARNING: Road {} has no multiple lane sections!".format(road_id)) #could be replaced by a for-each loop, if required... lane_section=lane_sections[0] rl = RoadElement() section_left=lane_section.find("left") if(section_left is not None): lanes_left=section_left.findall("lane") if(lanes_left is None): raise Exception("ERROR: Road {} left has no lanes!".format(road_id)) for lane_left in lanes_left: try: left_id=int(lane_left.get("id")) except (ValueError, TypeError) as e: raise Exception("ERROR: Road {} has a left entry with invalid id!".format(road_id)) if(not rl.addLane(new_lane_id, left_id)): raise Exception("ERROR: Road {} left id {} is not supported!".format(road_id, left_id)) lep3=LaneElementPoly3.fromOpenDrive(road_id, pp3_aU, pp3_bU, pp3_cU, pp3_dU, pp3_aV, pp3_bV, pp3_cV, pp3_dV, geo_hdg, geo_x, geo_y) lep3.addOffsetAndFit(lane_offsets, left_id) lane_pre=None lane_suc=None left_link=lane_left.find("link") if(left_link is None): print("WARNING: Road {} left lane id {} has no link entry!".format(road_id, left_id)) else: left_pre=left_link.find("predecessor") if(left_pre is not None): try: lane_pre=int(left_pre.get("id")) except (ValueError, TypeError) as e: raise Exception("ERROR: Road {} left lane id {} predecessor has an invalid id!".format(road_id, left_id)) left_suc=left_link.find("successor") if(left_suc is not None): try: lane_suc=int(left_suc.get("id")) except (ValueError, TypeError) as e: raise Exception("ERROR: Road {} left lane id {} successor has an invalid id!".format(road_id, left_id)) road_list.addLaneElement(lep3, new_lane_id, None, None) lane_pre_od_ids[new_lane_id]=lane_pre lane_suc_od_ids[new_lane_id]=lane_suc new_lane_id+=1 else: print("WARNING: Road {} has a laneSection without a left lane!".format(road_id)) section_right=lane_section.find("right") if(section_right is not None): lanes_right=section_right.findall("lane") if(lanes_right is None): raise Exception("ERROR: in Road {} right has no lanes!".format(road_id)) for lane_right in lanes_right: try: right_id=int(lane_right.get("id")) except (ValueError, TypeError) as e: raise Exception("ERROR: Road {} has a right entry with invalid id!".format(road_id)) if(not rl.addLane(new_lane_id, right_id)): raise Exception("ERROR: Road {} right id {} is not supported!".format(road_id, right_id)) lep3=LaneElementPoly3.fromOpenDrive(road_id, pp3_aU, pp3_bU, pp3_cU, pp3_dU, pp3_aV, pp3_bV, pp3_cV, pp3_dV, geo_hdg, geo_x, geo_y) lep3.addOffsetAndFit(lane_offsets, right_id) lane_pre=None lane_suc=None right_link=lane_right.find("link") if(right_link is None): print("WARNING: Road {} right lane id {} has no link entry!".format(road_id, right_id)) else: right_pre=right_link.find("predecessor") if(right_pre is not None): try: lane_pre=int(right_pre.get("id")) except (ValueError, TypeError) as e: raise Exception("ERROR: Road {} right lane id {} predecessor has an invalid id!".format(road_id, right_id)) right_suc=right_link.find("successor") if(right_suc is not None): try: lane_suc=int(right_suc.get("id")) except (ValueError, TypeError) as e: raise Exception("ERROR: Road {} right lane id {} successor has an invalid id!".format(road_id, right_id)) road_list.addLaneElement(lep3, new_lane_id, None, None) lane_pre_od_ids[new_lane_id]=lane_pre lane_suc_od_ids[new_lane_id]=lane_suc new_lane_id+=1 else: print("WARNING: Road {} has a laneSection without a right lane!".format(road_id)) road_list.addRoad(rl, road_id) for key,lane in road_list.lanes.items(): print("Road: {} lane {}". format(lane.road_id, key)) print(road_list.successors) print(road_list.predecessors) print("INFO: Creating successor lists...") #Loop all roads a second time to generate the successor list. for road in roads: # Get the Road-ID and the id of the junction it belongs to try: road_id=int(road.get("id")) except (TypeError, ValueError) as e: raise Exception("ERROR: Found a Road with no valid ID, quitting!") try: road_junction = int(road.get("junction")) except (TypeError, ValueError) as e: raise Exception("ERROR: Road {} Junction entry is invalid!".format(road_id)) #get the link-tag. none or one exsits road_link=road.find("link") if(road_link is None): print("WARNING: Road {} has no link-tag!".format(road_id)) else: #process the sucessor tag road_link_suc=road_link.find("successor") if(road_link_suc is None): print("WARNING: Road {} link has no successor-tag!".format(road_id)) else: try: suc_id=int(road_link_suc.get("elementId")) except (ValueError, TypeError) as e: raise Exception("ERROR: Road {} succecessor has no or invalid elementId".format(road_id)) suc_type=road_link_suc.get("elementType") suc_contact_point=road_link_suc.get("contactPoint") if(suc_type is None or suc_contact_point is None): raise Exception("ERROR: Road {} succecessor has no elementType or contactPoint".format(road_id)) if(suc_type.lower()=="road"): #if the predecessor is a road, the next id can just be used. if(suc_id not in road_list.roads): raise Exception("ERROR: Road {} successor road {} does not exist!".format(road_id, suc_id)) road=road_list.roads[road_id] suc_road=road_list.roads[suc_id] #do the right lanes for i in [-2,-1]: lane_id=road.getLaneByOpenDriveID(i) if(lane_id is not None): od_lane_id=lane_suc_od_ids[lane_id] if(od_lane_id is None): print("WARNING: Road {} has no successor!") road_list.setLaneSuccessorDict(lane_id, dict()) else: suc_lane_id=suc_road.getLaneByOpenDriveID(od_lane_id) if(suc_lane_id is None): raise Exception("ERROR: everythings fucked up here. I hate opendrive! lane_id {}, od_lane_id {}".format(lane_id,od_lane_id)) road_list.setLaneSuccessorDict(lane_id,{ManeuverState.NEXT: suc_lane_id}) for i in [1,2]: lane_id=road.getLaneByOpenDriveID(i) if(lane_id is not None): od_lane_id=lane_suc_od_ids[lane_id] if(od_lane_id is None): print("WARNING: Road {} has no successor!") road_list.setLanePredecessorDict(lane_id, dict()) else: suc_lane_id=suc_road.getLaneByOpenDriveID(od_lane_id) if(suc_lane_id is None): raise Exception("ERROR: everythings fucked up here. I hate opendrive! lane_id {}, od_lane_id {}".format(lane_id,od_lane_id)) road_list.setLanePredecessorDict(lane_id,{ManeuverState.NEXT: suc_lane_id}) elif(suc_type.lower()=="junction"): #construct a decision #print("INFO: Road {} has junction as successor!".format(road_id)) #first calculate the successor for the right incomingRoad junction_roads_list=list() if(suc_id not in junction_dict or junction_dict[suc_id] is None): raise Exception("ERROR Road {} uses unkown junction {} as successor!".format(road_id, suc_id)) junction_active=junction_dict[suc_id].items() for index, connection_active in junction_active: if(connection_active["incomingRoad"]==road_id): print("Adding road id {} to successor list!".format(connection_active["connectingRoad"])) junction_roads_list.append(connection_active["connectingRoad"]) #the successors are junction-roads which only have one lane. Easy to identify. suc_roads_dict=dict() if(len(junction_roads_list)==1): suc_road_id=junction_roads_list[0] suc_lane_id=road_list.roads[suc_road_id].getLaneByOpenDriveID(-1) if(suc_lane_id is None): suc_lane_id=road_list.roads[suc_road_id].getLaneByOpenDriveID(1) if(suc_lane_id is None): raise Exception("ERROR: No lane id for road {} junction successor road {} found!".format(road_id, suc_road_id)) suc_lane=road_list.lanes[suc_lane_id] suc_roads_dict[ManeuverState.MERGE]=suc_lane_id lane_id=road_list.roads[road_id].getLaneByOpenDriveID(-1) if(lane_id is None): raise Exception("ERROR: road {} has junction {} as successor, but no right lane!".format(road_id, suc_id)) road_list.setLaneSuccessorDict(lane_id, suc_roads_dict) elif(len(junction_roads_list)>1): for suc_road_id in junction_roads_list: #Junction-Roads do only have one lane, so we do not need to lookup them but just guess. suc_lane_id=road_list.roads[suc_road_id].getLaneByOpenDriveID(-1) if(suc_lane_id is None): suc_lane_id=road_list.roads[suc_road_id].getLaneByOpenDriveID(1) if(suc_lane_id is None): raise Exception("ERROR: No lane id for road {} junction successor road {} found!".format(road_id, suc_road_id)) suc_lane=road_list.lanes[suc_lane_id] if(suc_lane.getAngleChange()>angle_left_threshold): if(ManeuverState.LEFT in suc_roads_dict): raise Exception("ERROR: road {} junction successor {} road has already a LEFT entry! {}".format(road_id, suc_id, suc_roads_dict)) suc_roads_dict[ManeuverState.LEFT]=suc_lane_id elif(suc_lane.getAngleChange()<angle_right_threshold): if(ManeuverState.RIGHT in suc_roads_dict): raise Exception("ERROR: road {} junction successor {} road has already a RIGHT entry! {}".format(road_id, suc_id, suc_roads_dict)) suc_roads_dict[ManeuverState.RIGHT]=suc_lane_id else: if(ManeuverState.STRAIGHT in suc_roads_dict): raise Exception("ERROR: road {} junction successor {} road has already a STRAIGHT entry! {}".format(road_id, suc_id, suc_roads_dict)) suc_roads_dict[ManeuverState.STRAIGHT]=suc_lane_id else: raise Exception("ERROR Road {} junction {} successor has no valid roads!".format(road_id, suc_id)) #The Predecessor entry for a junction is always for the right lane from opendrive. lane_id=road_list.roads[road_id].getLaneByOpenDriveID(-1) if(lane_id is None): raise Exception("ERROR: road {} has junction {} as successor, but no right lane!".format(road_id, suc_id)) road_list.setLaneSuccessorDict(lane_id, suc_roads_dict) else: raise Exception("ERROR: Road {} prececessor elementType is invalid!".format(road_id)) #print("INFO: Creating predecessor lists...") #Loop all roads a third time to generate the predecessor list. pre_roads_list=list() for road in roads: # Get the Road-ID and the id of the junction it belongs to try: road_id=int(road.get("id")) except (TypeError, ValueError) as e: raise Exception("ERROR: Found a Road with no valid ID, quitting!") try: road_junction = int(road.get("junction")) except (TypeError, ValueError) as e: raise Exception("ERROR: Road {} Junction entry is invalid!".format(road_id)) #get the link-tag. none or one exsits road_link=road.find("link") if(road_link is None): print("WARNING: Road {} has no link-tag!".format(road_id)) else: #Process the predecessor tag road_link_pre=road_link.find("predecessor") if(road_link_pre is None): print("WARNING: Road {} link as no predecessor-tag!".format(road_id)) else: try: pre_id=int(road_link_pre.get("elementId")) except (ValueError, TypeError) as e: raise Exception("ERROR: Road {} predecessor has no or invalid elementId".format(road_id)) pre_type=road_link_pre.get("elementType") pre_contact_point=road_link_pre.get("contactPoint") if(pre_type is None or pre_contact_point is None): raise Exception("ERROR: Road {} predecessor has no elementType or contactPoint".format(road_id)) if(pre_type.lower()=="road"): if(pre_id not in road_list.roads): raise Exception("ERROR: Road {} successor road {} does not exist!".format(road_id, pre_id)) road=road_list.roads[road_id] suc_road=road_list.roads[pre_id] #do the right lanes for i in [-2,-1]: lane_id=road.getLaneByOpenDriveID(i) if(lane_id is not None): od_lane_id=lane_pre_od_ids[lane_id] if(od_lane_id is None): print("WARNING: Road {} has no successor!") road_list.setLanePredecessorDict(lane_id, dict()) else: suc_lane_id=suc_road.getLaneByOpenDriveID(od_lane_id) if(suc_lane_id is None): raise Exception("ERROR: everythings fucked up here. I hate opendrive! lane_id {}, od_lane_id {}".format(lane_id,od_lane_id)) road_list.setLanePredecessorDict(lane_id,{ManeuverState.NEXT: suc_lane_id}) for i in [1,2]: lane_id=road.getLaneByOpenDriveID(i) if(lane_id is not None): od_lane_id=lane_pre_od_ids[lane_id] if(od_lane_id is None): print("WARNING: Road {} lane id {} has no successor!".format(road_id, lane_id)) road_list.setLaneSuccessorDict(lane_id, dict()) else: suc_lane_id=suc_road.getLaneByOpenDriveID(od_lane_id) if(suc_lane_id is None): raise Exception("ERROR: everythings fucked up here. I hate opendrive! lane_id {}, od_lane_id {}".format(lane_id,od_lane_id)) print("Found left predecessor, road {}, lane {}, od_lane {}, suc_lane {}, pre_id {}".format(road_id, lane_id, od_lane_id, suc_lane_id, pre_id)) road_list.setLaneSuccessorDict(lane_id,{ManeuverState.NEXT: suc_lane_id}) elif(pre_type.lower()=="junction"): #construct a decision print("INFO: Road {} has junction as predecessor!".format(road_id)) junction_roads_list=list() if(pre_id not in junction_dict or junction_dict[pre_id] is None): raise Exception("ERROR Road {} uses unkown junction {} as predecessor!".format(road_id, pre_id)) junction_active=junction_dict[pre_id].items() for index, connection_active in junction_active: if(connection_active["incomingRoad"]==road_id): print("Adding road id {} to predecessor list!".format(connection_active["connectingRoad"])) junction_roads_list.append(connection_active["connectingRoad"]) #the predecessors are junction-roads which only have one lane. Easy to identify. pre_roads_dict=dict() if(len(junction_roads_list)==1): pre_road_id=junction_roads_list[0] print("Getting predecessor for merge, road id is {}, pre_road id is {}".format(road_id, pre_road_id)) pre_lane_id=road_list.roads[pre_road_id].getLaneByOpenDriveID(-1) if(pre_lane_id is None): pre_lane_id=road_list.roads[pre_road_id].getLaneByOpenDriveID(1) if(pre_lane_id is None): raise Exception("ERROR: No lane id for road {} junction predecessor road {} found!".format(road_id, pre_road_id)) print("Found lane id {} ".format(pre_lane_id)) pre_lane=road_list.lanes[pre_lane_id] pre_roads_dict[ManeuverState.MERGE]=pre_lane_id lane_id=road_list.roads[road_id].getLaneByOpenDriveID(-1) if(lane_id is None): raise Exception("ERROR: road {} has junction {} as predecessor, but no right lane!".format(road_id, pre_id)) road_list.setLaneSuccessorDict(lane_id, pre_roads_dict) elif(len(junction_roads_list)>1): for pre_road_id in junction_roads_list: #Junction-Roads do only have one lane, so we do not need to lookup them but just guess. pre_lane_id=road_list.roads[pre_road_id].getLaneByOpenDriveID(-1) if(pre_lane_id is None): pre_lane_id=road_list.roads[pre_road_id].getLaneByOpenDriveID(1) if(pre_lane_id is None): raise Exception("ERROR: No lane id for road {} junction predecessor road {} found!".format(road_id, pre_road_id)) pre_lane=road_list.lanes[pre_lane_id] #print("Adding lane {} with angle change {}".format(pre_lane_id, pre_lane.getAngleChange())) if(pre_lane.getAngleChange()>angle_left_threshold): #the angles are reverted here! if(ManeuverState.LEFT in pre_roads_dict): raise Exception("ERROR: road {} junction predecessor {} road has already a LEFT entry! {}".format(road_id, pre_id, pre_roads_dict)) #print("Adding {} as left for {}!".format()) pre_roads_dict[ManeuverState.LEFT]=pre_lane_id elif(pre_lane.getAngleChange()<angle_right_threshold): if(ManeuverState.RIGHT in pre_roads_dict): raise Exception("ERROR: road {} junction predecessor {} road has already a RIGHT entry! {}".format(road_id, pre_id, pre_roads_dict)) pre_roads_dict[ManeuverState.RIGHT]=pre_lane_id else: if(ManeuverState.STRAIGHT in pre_roads_dict): raise Exception("ERROR: road {} junction predecessor {} road has already a STRAIGHT entry! {}".format(road_id, pre_id, pre_roads_dict)) pre_roads_dict[ManeuverState.STRAIGHT]=pre_lane_id else: raise Exception("ERROR Road {} junction {} predecessor has no valid roads!".format(road_id, pre_id)) #The Predecessor entry for a junction is always for the left lane from opendrive. lane_id=road_list.roads[road_id].getLaneByOpenDriveID(1) if(lane_id is None): raise Exception("ERROR: road {} has junction {} as predecessor, but no right lane!".format(road_id, pre_id)) road_list.setLaneSuccessorDict(lane_id, pre_roads_dict) else: raise Exception("ERROR: Road {} prececessor elementType is invalid!".format(road_id)) for key,lane_suc in road_list.successors.items(): if(lane_suc is None): raise Exception("Lane {} successors is none!".format(key)) for key, lane_pre in road_list.predecessors.items(): if(lane_pre is None): print("Lane {} predecessor list is None, generating...".format(key)) pre_roads_dict=dict() pre_roads_list=list() for key2, lane_suc_dict in road_list.successors.items(): for key3, lane_suc in lane_suc_dict.items(): if(lane_suc == key): pre_roads_list.append(key2) if(len(pre_roads_list)==1): pre_roads_dict[ManeuverState.NEXT] = pre_roads_list[0] else: for lane_suc in pre_roads_list: pre_lane=road_list.lanes[lane_suc] print("Adding lane {} with angle change {}".format(lane_suc, pre_lane.getAngleChange())) if(pre_lane.getAngleChange()<-angle_left_threshold): #the angles are reverted here! if(ManeuverState.LEFT in pre_roads_dict and pre_roads_dict[ManeuverState.LEFT]!=lane_suc): raise Exception("ERROR: lane {} predecessor has already a LEFT entry! {}".format(key, pre_roads_dict)) #print("Adding {} as left for {}!".format()) pre_roads_dict[ManeuverState.LEFT]=lane_suc elif(pre_lane.getAngleChange()>-angle_right_threshold): if(ManeuverState.RIGHT in pre_roads_dict and pre_roads_dict[ManeuverState.RIGHT]!=lane_suc): raise Exception("ERROR: lane {} predecessor has already a RIGHT entry! {}".format(key, pre_roads_dict)) pre_roads_dict[ManeuverState.RIGHT]=lane_suc else: if(ManeuverState.STRAIGHT in pre_roads_dict and pre_roads_dict[ManeuverState.STRAIGHT]!=lane_suc): print("ERROR: lane {} predecessor lane {} has already a STRAIGHT entry (could be merge lane)! {}".format(key, lane_suc, pre_roads_dict)) else: pre_roads_dict[ManeuverState.STRAIGHT]=lane_suc print("Generated: {}".format(pre_roads_dict)) road_list.setLanePredecessorDict(key, pre_roads_dict) # print() # print("Successors:") # for suc in road_list.successors.items(): # print(suc) # print() # print("Predecessors:") # for pre in road_list.predecessors.items(): # print(pre) print("INFO: Overall road network length is {0}m.".format(overall_len)) print("INFO: DONE!") return road_list
[ "numpy.polyfit", "numpy.sin", "numpy.linspace", "numpy.cos", "numpy.concatenate", "numpy.poly1d" ]
[((180, 206), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(100)'], {}), '(0.0, 1.0, 100)\n', (191, 206), True, 'import numpy as np\n'), ((848, 870), 'numpy.concatenate', 'np.concatenate', (['pnts_x'], {}), '(pnts_x)\n', (862, 870), True, 'import numpy as np\n'), ((882, 904), 'numpy.concatenate', 'np.concatenate', (['pnts_y'], {}), '(pnts_y)\n', (896, 904), True, 'import numpy as np\n'), ((511, 535), 'numpy.poly1d', 'np.poly1d', (['list_p1d_u[i]'], {}), '(list_p1d_u[i])\n', (520, 535), True, 'import numpy as np\n'), ((550, 574), 'numpy.poly1d', 'np.poly1d', (['list_p1d_v[i]'], {}), '(list_p1d_v[i])\n', (559, 574), True, 'import numpy as np\n'), ((982, 1011), 'numpy.polyfit', 'np.polyfit', (['pspace', 'pnts_x', '(3)'], {}), '(pspace, pnts_x, 3)\n', (992, 1011), True, 'import numpy as np\n'), ((1036, 1065), 'numpy.polyfit', 'np.polyfit', (['pspace', 'pnts_y', '(3)'], {}), '(pspace, pnts_y, 3)\n', (1046, 1065), True, 'import numpy as np\n'), ((595, 614), 'numpy.cos', 'np.cos', (['list_hdg[i]'], {}), '(list_hdg[i])\n', (601, 614), True, 'import numpy as np\n'), ((621, 640), 'numpy.sin', 'np.sin', (['list_hdg[i]'], {}), '(list_hdg[i])\n', (627, 640), True, 'import numpy as np\n'), ((661, 680), 'numpy.sin', 'np.sin', (['list_hdg[i]'], {}), '(list_hdg[i])\n', (667, 680), True, 'import numpy as np\n'), ((687, 706), 'numpy.cos', 'np.cos', (['list_hdg[i]'], {}), '(list_hdg[i])\n', (693, 706), True, 'import numpy as np\n')]
import numpy as np from wholeslidedata.annotation.structures import Point from wholeslidedata.annotation.wholeslideannotation import WholeSlideAnnotation from wholeslidedata.image.wholeslideimage import WholeSlideImage from wholeslidedata.labels import Label def non_max_suppression_fast(boxes, overlapThresh): """Very efficient NMS function taken from pyimagesearch""" # if there are no boxes, return an empty list if len(boxes) == 0: return [] # if the bounding boxes integers, convert them to floats -- # this is important since we'll be doing a bunch of divisions if boxes.dtype.kind == "i": boxes = boxes.astype("float") # initialize the list of picked indexes pick = [] # grab the coordinates of the bounding boxes x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] # compute the area of the bounding boxes and sort the bounding # boxes by the bottom-right y-coordinate of the bounding box area = (x2 - x1 + 1) * (y2 - y1 + 1) idxs = np.argsort(y2) # keep looping while some indexes still remain in the indexes # list while len(idxs) > 0: # grab the last index in the indexes list and add the # index value to the list of picked indexes last = len(idxs) - 1 i = idxs[last] pick.append(i) # find the largest (x, y) coordinates for the start of # the bounding box and the smallest (x, y) coordinates # for the end of the bounding box xx1 = np.maximum(x1[i], x1[idxs[:last]]) yy1 = np.maximum(y1[i], y1[idxs[:last]]) xx2 = np.minimum(x2[i], x2[idxs[:last]]) yy2 = np.minimum(y2[i], y2[idxs[:last]]) # compute the width and height of the bounding box w = np.maximum(0, xx2 - xx1 + 1) h = np.maximum(0, yy2 - yy1 + 1) # compute the ratio of overlap overlap = (w * h) / area[idxs[:last]] # delete all indexes from the index list that have idxs = np.delete( idxs, np.concatenate(([last], np.where(overlap > overlapThresh)[0])) ) # return only the bounding boxes that were picked using the # integer data type return boxes[pick].astype("int") def get_centerpoints(box, dist): """Returns centerpoints of box""" return (box[0] + dist, box[1] + dist) def point_to_box(x, y, size): """Convert centerpoint to bounding box of fixed size""" return np.array([x - size, y - size, x + size, y + size]) def slide_nms(slide_path, wsa_path, tile_size): """Iterate over WholeSlideAnnotation and perform NMS. For this to properly work, tiles need to be larger than model inference patches.""" wsi = WholeSlideImage(slide_path, backend="asap") wsa = WholeSlideAnnotation(wsa_path) shape = wsi.shapes[0] center_nms_points = [] for y_pos in range(0, shape[1], tile_size): for x_pos in range(0, shape[0], tile_size): wsa_patch = wsa.select_annotations( int(x_pos + tile_size // 2), int(y_pos + tile_size // 2), tile_size, tile_size, ) if wsa_patch: wsa_patch_coords = [point.coordinates for point in wsa_patch] if len(wsa_patch_coords) < 2: continue boxes = np.array( [point_to_box(x[0], x[1], 8) for x in wsa_patch_coords] ) nms_boxes = non_max_suppression_fast(boxes, 0.7) for box in nms_boxes: center_nms_points.append(get_centerpoints(box, 8)) return center_nms_points def to_wsd(points): """Convert list of coordinates into WSD points""" new_points = [] for i, point in enumerate(points): p = Point( index=i, label=Label("til", 1, color="blue"), coordinates=[point], ) new_points.append(p) return new_points
[ "wholeslidedata.annotation.wholeslideannotation.WholeSlideAnnotation", "numpy.minimum", "numpy.where", "numpy.argsort", "numpy.array", "wholeslidedata.labels.Label", "numpy.maximum", "wholeslidedata.image.wholeslideimage.WholeSlideImage" ]
[((1044, 1058), 'numpy.argsort', 'np.argsort', (['y2'], {}), '(y2)\n', (1054, 1058), True, 'import numpy as np\n'), ((2459, 2509), 'numpy.array', 'np.array', (['[x - size, y - size, x + size, y + size]'], {}), '([x - size, y - size, x + size, y + size])\n', (2467, 2509), True, 'import numpy as np\n'), ((2712, 2755), 'wholeslidedata.image.wholeslideimage.WholeSlideImage', 'WholeSlideImage', (['slide_path'], {'backend': '"""asap"""'}), "(slide_path, backend='asap')\n", (2727, 2755), False, 'from wholeslidedata.image.wholeslideimage import WholeSlideImage\n'), ((2766, 2796), 'wholeslidedata.annotation.wholeslideannotation.WholeSlideAnnotation', 'WholeSlideAnnotation', (['wsa_path'], {}), '(wsa_path)\n', (2786, 2796), False, 'from wholeslidedata.annotation.wholeslideannotation import WholeSlideAnnotation\n'), ((1532, 1566), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[idxs[:last]]'], {}), '(x1[i], x1[idxs[:last]])\n', (1542, 1566), True, 'import numpy as np\n'), ((1581, 1615), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[idxs[:last]]'], {}), '(y1[i], y1[idxs[:last]])\n', (1591, 1615), True, 'import numpy as np\n'), ((1630, 1664), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[idxs[:last]]'], {}), '(x2[i], x2[idxs[:last]])\n', (1640, 1664), True, 'import numpy as np\n'), ((1679, 1713), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[idxs[:last]]'], {}), '(y2[i], y2[idxs[:last]])\n', (1689, 1713), True, 'import numpy as np\n'), ((1785, 1813), 'numpy.maximum', 'np.maximum', (['(0)', '(xx2 - xx1 + 1)'], {}), '(0, xx2 - xx1 + 1)\n', (1795, 1813), True, 'import numpy as np\n'), ((1826, 1854), 'numpy.maximum', 'np.maximum', (['(0)', '(yy2 - yy1 + 1)'], {}), '(0, yy2 - yy1 + 1)\n', (1836, 1854), True, 'import numpy as np\n'), ((3862, 3891), 'wholeslidedata.labels.Label', 'Label', (['"""til"""', '(1)'], {'color': '"""blue"""'}), "('til', 1, color='blue')\n", (3867, 3891), False, 'from wholeslidedata.labels import Label\n'), ((2067, 2100), 'numpy.where', 'np.where', (['(overlap > overlapThresh)'], {}), '(overlap > overlapThresh)\n', (2075, 2100), True, 'import numpy as np\n')]
""" Functions to estimate observed ACA magnitudes """ import sys import traceback import logging import collections import scipy.stats import scipy.special import numpy as np import numba from astropy.table import Table, vstack from Chandra.Time import DateTime from cheta import fetch from Quaternion import Quat import Ska.quatutil from mica.archive import aca_l0 from mica.archive.aca_dark.dark_cal import get_dark_cal_image from chandra_aca.transform import count_rate_to_mag, pixels_to_yagzag from cxotime import CxoTime from kadi import events from . import star_obs_catalogs from agasc import get_star logger = logging.getLogger('agasc.supplement') MAX_MAG = 15 MASK = { 'mouse_bit': np.array([[True, True, True, True, True, True, True, True], [True, True, False, False, False, False, True, True], [True, False, False, False, False, False, False, True], [True, False, False, False, False, False, False, True], [True, False, False, False, False, False, False, True], [True, False, False, False, False, False, False, True], [True, True, False, False, False, False, True, True], [True, True, True, True, True, True, True, True]]) } EXCEPTION_MSG = { -1: 'Unknown', 0: 'OK', 1: 'No level 0 data', 2: 'No telemetry data', 3: 'Mismatch in telemetry between aca_l0 and cheta', 4: 'Time mismatch between cheta and level0', 5: 'Failed job', 6: 'Suspect observation' } EXCEPTION_CODES = collections.defaultdict(lambda: -1) EXCEPTION_CODES.update({msg: code for code, msg in EXCEPTION_MSG.items() if code > 0}) class MagStatsException(Exception): def __init__(self, msg='', agasc_id=None, obsid=None, timeline_id=None, mp_starcat_time=None, **kwargs): super().__init__(msg) self.error_code = EXCEPTION_CODES[msg] self.msg = msg self.agasc_id = agasc_id self.obsid = obsid[0] if type(obsid) is list and len(obsid) == 1 else obsid self.timeline_id = timeline_id self.mp_starcat_time = (mp_starcat_time[0] if type(mp_starcat_time) is list and len(mp_starcat_time) == 1 else mp_starcat_time) for k in kwargs: setattr(self, k, kwargs[k]) def __str__(self): return f'MagStatsException: {self.msg} (agasc_id: {self.agasc_id}, ' \ f'obsid: {self.obsid}, mp_starcat_time: {self.mp_starcat_time})' def __iter__(self): yield 'error_code', self.error_code yield 'msg', self.msg yield 'agasc_id', self.agasc_id yield 'obsid', self.obsid yield 'timeline_id', self.timeline_id yield 'mp_starcat_time', self.mp_starcat_time def _magnitude_correction(time, mag_aca): """ Get a time-dependent correction to AOACMAG (prior to dynamic background subtraction). :param time: Chandra.Time.DateTime :param mag_aca: np.array :return: np.array """ params = {"t_ref": "2011-01-01 12:00:00.000", "p": [0.005899340720522751, 0.12029019332761458, -2.99386247406073e-10, -6.9534637950633265, 0.7916261423307238]} q = params['p'] t_ref = DateTime(params['t_ref']) dmag = (q[0] + (q[1] + q[2] * np.atleast_1d(time)) * np.exp(q[3] + q[4] * np.atleast_1d(mag_aca))) dmag[np.atleast_1d(time) < t_ref.secs] = 0 return np.squeeze(dmag) def get_responsivity(time): """ ACA magnitude response over time. This was estimated with bright stars that were observed more than a hundred times during the mission. More details in the `responsivity notebook`_: .. _responsivity notebook: https://nbviewer.jupyter.org/urls/cxc.cfa.harvard.edu/mta/ASPECT/jgonzalez/mag_stats/notebooks/03-high_mag_responsivity-fit.ipynb # noqa :param time: float Time in CXC seconds :return: """ a, b, c = [3.19776750e-02, 5.35201479e+08, 8.49670756e+07] return - a * (1 + scipy.special.erf((time - b) / c)) / 2 def get_droop_systematic_shift(magnitude): """ Difference between the magnitude determined from DC-subtracted image telemetry and the catalog ACA magnitude. The magnitude shift is time-independent. It depends only on the catalog magnitude and is zero for bright stars. More details in the `droop notebook`_: .. _droop notebook: https://nbviewer.jupyter.org/urls/cxc.cfa.harvard.edu/mta/ASPECT/jgonzalez/mag_stats/notebooks/04-DroopAfterSubtractionAndResponsivity-fit.ipynb # noqa :param magnitude: float Catalog ACA magnitude :return: """ a, b = [11.25572, 0.59486369] return np.exp((magnitude - a) / b) def rolling_mean(t, f, window, selection=None): """ Calculate the rolling mean of the 'f' array, using a centered square window in time. :param t: np.array the time array. :param f: np.array the array to average. :param window: float the window size (in the same units as the time array). :param selection: np.array An optional array of bool. :return: np.array An array with the same type and shape as 'f' """ result = np.ones_like(f) * np.nan if selection is None: selection = np.ones_like(f, dtype=bool) assert len(f) == len(t) assert len(f) == len(selection) assert len(selection.shape) == 1 _rolling_mean_(result, t, f, window, selection) return result @numba.jit(nopython=True) def _rolling_mean_(result, t, f, window, selection): i_min = 0 i_max = 0 n = 0 f_sum = 0 for i in range(len(f)): if not selection[i]: continue while i_max < len(f) and t[i_max] < t[i] + window / 2: if selection[i_max]: f_sum += f[i_max] n += 1 i_max += 1 while t[i_min] < t[i] - window / 2: if selection[i_min]: f_sum -= f[i_min] n -= 1 i_min += 1 result[i] = f_sum / n def get_star_position(star, telem): """ Residuals for a given AGASC record at a given slot/time. :param star: Table Row of one AGASC entry :param telem: table Table with columns AOATTQT1, AOATTQT2, AOATTQT3, AOATTQT4. :return: """ aca_misalign = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) rad_to_arcsec = 206264.81 q = np.array([telem['AOATTQT1'], telem['AOATTQT2'], telem['AOATTQT3'], telem['AOATTQT4']]).transpose() norm = np.sum(q**2, axis=1, keepdims=True) # I am just normalizing q, just in case. n = np.squeeze(np.sqrt(norm)) q[n != 0] /= np.sqrt(norm)[n != 0] # prevent warning when dividing by zero (it happens) q_att = Quat(q=q) ts = q_att.transform star_pos_eci = Ska.quatutil.radec2eci(star['RA_PMCORR'], star['DEC_PMCORR']) d_aca = np.dot(np.dot(aca_misalign, ts.transpose(0, 2, 1)), star_pos_eci).transpose() yag = np.arctan2(d_aca[:, 1], d_aca[:, 0]) * rad_to_arcsec zag = np.arctan2(d_aca[:, 2], d_aca[:, 0]) * rad_to_arcsec logger.debug(f' star position. AGASC_ID={star["AGASC_ID"]}, ' f'{len(yag)} samples, ({yag[0]}, {zag[0]})...') return { 'yang_star': yag, 'zang_star': zag, } # this is in case one has to return empty telemetry _telem_dtype = [('times', 'float64'), ('IMGSIZE', 'int32'), ('IMGROW0', 'int16'), ('IMGCOL0', 'int16'), ('IMGRAW', 'float32'), ('AOACASEQ', '<U4'), ('AOPCADMD', '<U4'), ('AOATTQT1', 'float64'), ('AOATTQT2', 'float64'), ('AOATTQT3', 'float64'), ('AOATTQT4', 'float64'), ('AOACIIR', '<U3'), ('AOACISP', '<U3'), ('AOACYAN', 'float64'), ('AOACZAN', 'float64'), ('AOACMAG', 'float32'), ('AOACFCT', '<U4'), ('mags_img', 'float64'), ('yang_img', 'float64'), ('zang_img', 'float64'), ('yang_star', 'float64'), ('zang_star', 'float64'), ('mags', 'float64'), ('dy', 'float64'), ('dz', 'float64'), ('dr', 'float64')] def get_telemetry(obs): """ Get all telemetry relevant for the magnitude estimation task. This gets: - AOACASEQ - AOPCADMD - AOACMAG (ACA estimated magnitude) - AOACIIR (ACA ionizing radiation flag) - AOACISP (ACA saturated pixel flag) MSIDs are renamed to remove the slot number. This assumes all MSIDs occur at the same times (they do) :param obs: astropy.table.Row It must have the following columns: 'agasc_id', 'mp_starcat_time', 'mag', 'slot' :return: dict """ star_obs_catalogs.load() dwell = star_obs_catalogs.DWELLS_NP[star_obs_catalogs.DWELLS_MAP[obs['mp_starcat_time']]] star = get_star(obs['agasc_id'], date=dwell['tstart'], use_supplement=False) start = dwell['tstart'] stop = dwell['tstop'] slot = obs['slot'] logger.debug(f' Getting telemetry for AGASC ID={obs["agasc_id"]}, OBSID={obs["obsid"]}, ' f'mp_starcat_time={obs["mp_starcat_time"]}') # first we get slot data from mica and magnitudes from cheta and match them in time # to match them in time, we assume they come in steps of 1.025 seconds, starting from the first # time sample. slot_data_cols = ['TIME', 'END_INTEG_TIME', 'IMGSIZE', 'IMGROW0', 'IMGCOL0', 'TEMPCCD', 'IMGRAW'] slot_data = aca_l0.get_slot_data(start, stop, slot=obs['slot'], centered_8x8=True, columns=slot_data_cols) names = ['AOACASEQ', 'AOPCADMD', 'CVCMJCTR', 'CVCMNCTR', f'AOACIIR{slot}', f'AOACISP{slot}', f'AOACMAG{slot}', f'AOACFCT{slot}', f'AOACZAN{slot}', f'AOACYAN{slot}'] + [f'AOATTQT{i}' for i in range(1, 5)] msids = fetch.Msidset(names, start, stop) if len(slot_data) == 0: raise MagStatsException('No level 0 data', agasc_id=obs["agasc_id"], obsid=obs["obsid"], mp_starcat_time=obs["mp_starcat_time"], time_range=[start, stop], slot=obs['slot']) times = msids[f'AOACMAG{slot}'].times tmin = np.min([np.min(slot_data['END_INTEG_TIME']), np.min(times)]) t1 = np.round((times - tmin) / 1.025) t2 = np.round((slot_data['END_INTEG_TIME'].data - tmin) / 1.025) _, i1, i2 = np.intersect1d(t1, t2, return_indices=True) times = times[i1] slot_data = slot_data[i2] if len(times) == 0: # the intersection was null. raise MagStatsException('Either no telemetry or no matching times between cheta and level0', agasc_id=obs["agasc_id"], obsid=obs["obsid"], mp_starcat_time=obs["mp_starcat_time"]) # Now that we have the times, we get the rest of the MSIDs telem = { 'times': times } telem.update({k: slot_data[k] for k in slot_data_cols[2:]}) telem.update({ name: msids[name].vals[np.in1d(msids[name].times, times)] for name in names }) # get the normal sun and safe sun mode intervals, which will be removed excluded_ranges = [] for event in [events.normal_suns, events.safe_suns]: excluded_ranges += event.intervals(times[0] - 4, times[-1] + 4) excluded_ranges = [(CxoTime(t[0]).cxcsec, CxoTime(t[1]).cxcsec) for t in excluded_ranges] if excluded_ranges: excluded = np.zeros_like(times, dtype=bool) for excluded_range in excluded_ranges: excluded |= ((times >= excluded_range[0]) & (times <= excluded_range[1])) telem.update({k: telem[k][~excluded] for k in telem}) slot_data = slot_data[~excluded] if len(slot_data) == 0: # the intersection was null. raise MagStatsException('Nothing left after removing excluded ranges', agasc_id=obs["agasc_id"], obsid=obs["obsid"], mp_starcat_time=obs["mp_starcat_time"]) for name in ['AOACIIR', 'AOACISP', 'AOACYAN', 'AOACZAN', 'AOACMAG', 'AOACFCT']: telem[name] = telem[f'{name}{slot}'] del telem[f'{name}{slot}'] for name in ['AOACIIR', 'AOACISP']: telem[name] = np.char.rstrip(telem[name]) ok = (telem['AOACASEQ'] == 'KALM') & (telem['AOACIIR'] == 'OK') & \ (telem['AOPCADMD'] == 'NPNT') & (telem['AOACFCT'] == 'TRAK') # etc... logger.debug(' Adding magnitude estimates') telem.update(get_mag_from_img(slot_data, start, ok)) logger.debug(' Adding star position') telem.update(get_star_position(star=star, telem=telem)) logger.debug(' Correcting for droop') droop_shift = get_droop_systematic_shift(star['MAG_ACA']) logger.debug(' Correcting for responsivity') responsivity = get_responsivity(start) telem['mags'] = telem['mags_img'] - responsivity - droop_shift telem['mags'][~ok] = 0. telem['ok'] = ok telem['dy'] = np.ones(len(ok)) * np.inf telem['dz'] = np.ones(len(ok)) * np.inf telem['dr'] = np.ones(len(ok)) * np.inf yang = telem['yang_img'] - telem['yang_star'] zang = telem['zang_img'] - telem['zang_star'] rang = np.sqrt(yang**2 + zang**2) if np.any(ok & (rang < 10)): y25, y50, y75 = np.quantile(yang[ok & (rang < 10)], [0.25, 0.5, 0.75]) z25, z50, z75 = np.quantile(zang[ok & (rang < 10)], [0.25, 0.5, 0.75]) centroid_outlier = ((yang > y75 + 3 * (y75 - y25)) | (yang < y25 - 3 * (y75 - y25)) | (zang > z75 + 3 * (z75 - z25)) | (zang < z25 - 3 * (z75 - z25))) telem['dy'] = yang - np.mean(yang[ok & ~centroid_outlier]) telem['dz'] = zang - np.mean(zang[ok & ~centroid_outlier]) telem['dr'] = (telem['dy'] ** 2 + telem['dz'] ** 2) ** .5 return telem def get_telemetry_by_agasc_id(agasc_id, obsid=None, ignore_exceptions=False): """ Get all telemetry relevant for the magnitude estimation, given an AGASC ID. This gets all observations of a given star, it gets the telemetry for each, and stacks them. :param agasc_id: int :param obsid: int (optional) :param ignore_exceptions: bool if True, any exception is ignored. Useful in some cases. Default is False. :return: dict """ logger.debug(f' Getting telemetry for AGASC ID={agasc_id}') star_obs_catalogs.load() if obsid is None: obs = star_obs_catalogs.STARS_OBS[ (star_obs_catalogs.STARS_OBS['agasc_id'] == agasc_id)] else: obs = star_obs_catalogs.STARS_OBS[(star_obs_catalogs.STARS_OBS['agasc_id'] == agasc_id) & (star_obs_catalogs.STARS_OBS['obsid'] == obsid)] if len(obs) > 1: obs = obs.loc['mp_starcat_time', sorted(obs['mp_starcat_time'])] telem = [] for i, o in enumerate(obs): try: t = Table(get_telemetry(o)) t['obsid'] = o['obsid'] t['agasc_id'] = agasc_id telem.append(t) except Exception: if not ignore_exceptions: logger.info(f'{agasc_id=}, obsid={o["obsid"]} failed') exc_type, exc_value, exc_traceback = sys.exc_info() trace = traceback.extract_tb(exc_traceback) logger.info(f'{exc_type.__name__} {exc_value}') for step in trace: logger.info(f' in {step.filename}:{step.lineno}/{step.name}:') logger.info(f' {step.line}') raise return vstack(telem) def add_obs_info(telem, obs_stats): """ Add observation-specific information to a telemetry table (ok flag, and outlier flag). This is done as part of get_agasc_id_stats. It is a convenience for writing reports. :param telem: list of tables One or more telemetry tables (potentially many observations) :param obs_stats: table The result of calc_obs_stats. :return: """ logger.debug(' Adding observation info to telemetry...') obs_stats['obs_ok'] = ( (obs_stats['n'] > 10) & (obs_stats['f_track'] > 0.3) & (obs_stats['lf_variability_100s'] < 1) ) obs_stats['comments'] = np.zeros(len(obs_stats), dtype='<U80') telem = vstack(telem) telem['obs_ok'] = True telem['obs_outlier'] = False for s in obs_stats: obsid = s['obsid'] o = (telem['obsid'] == obsid) telem['obs_ok'][o] = np.ones(np.sum(o), dtype=bool) * s['obs_ok'] if (np.any(telem['ok'][o]) and s['f_track'] > 0 and np.isfinite(s['q75']) and np.isfinite(s['q25'])): iqr = s['q75'] - s['q25'] telem['obs_outlier'][o] = ( telem[o]['ok'] & (iqr > 0) & ((telem[o]['mags'] < s['q25'] - 1.5 * iqr) | (telem[o]['mags'] > s['q75'] + 1.5 * iqr)) ) logger.debug(f' Adding observation info to telemetry {obsid=}') return telem @numba.jit(nopython=True) def staggered_aca_slice(array_in, array_out, row, col): for i in np.arange(len(row)): if row[i] + 8 < 1024 and col[i] + 8 < 1024: array_out[i] = array_in[row[i]:row[i] + 8, col[i]:col[i] + 8] def get_mag_from_img(slot_data, t_start, ok=True): """ Vectorized estimate of the magnitude from mica archive image telemetry data. :param slot_data: astropy.Table. The data returned by mica.archive.aca_l0.get_slot_data :param t_start: The starting time of the observation (by convention, the starcat time) :param ok: np.array. An boolean array with the same length as slot_data. Only magnitudes for entries with ok=True are calculated. The rest are set to MAX_MAG. :return: """ logger.debug(' magnitude from images...') dark_cal = get_dark_cal_image(t_start, 'nearest', t_ccd_ref=np.mean(slot_data['TEMPCCD'] - 273.16), aca_image=False) # all images will be 8x8, with a centered mask, imgrow will always be the one of the 8x8 corner. imgrow_8x8 = np.where(slot_data['IMGSIZE'] == 8, slot_data['IMGROW0'], slot_data['IMGROW0'] - 1 ) imgcol_8x8 = np.where(slot_data['IMGSIZE'] == 8, slot_data['IMGCOL0'], slot_data['IMGCOL0'] - 1 ) # subtract closest dark cal dark = np.zeros([len(slot_data), 8, 8], dtype=np.float64) staggered_aca_slice(dark_cal.astype(float), dark, 512 + imgrow_8x8, 512 + imgcol_8x8) img_sub = slot_data['IMGRAW'] - dark * 1.696 / 5 img_sub.mask |= MASK['mouse_bit'] # calculate magnitude mag = np.ones(len(slot_data)) * MAX_MAG counts = np.ma.sum(np.ma.sum(img_sub, axis=1), axis=1) m = ok & np.isfinite(counts) & (counts > 0) mag[m] = count_rate_to_mag(counts[m] * 5 / 1.7) mag[mag > MAX_MAG] = MAX_MAG # this extra step is to investigate the background scale dark = np.ma.array(dark * 1.696 / 5, mask=img_sub.mask) img_raw = np.ma.array(slot_data['IMGRAW'], mask=img_sub.mask) dark_count = np.ma.sum(np.ma.sum(dark, axis=1), axis=1) img_count = np.ma.sum(np.ma.sum(img_raw, axis=1), axis=1) # centroids yag = np.zeros(len(slot_data)) zag = np.zeros(len(slot_data)) pixel_center = np.arange(8) + 0.5 projected_image = np.ma.sum(slot_data['IMGRAW'], axis=1) col = np.ma.sum(pixel_center * projected_image, axis=1) / np.ma.sum(projected_image, axis=1) projected_image = np.ma.sum(slot_data['IMGRAW'], axis=2) row = np.ma.sum(pixel_center * projected_image, axis=1) / np.ma.sum(projected_image, axis=1) y_pixel = row + imgrow_8x8 z_pixel = col + imgcol_8x8 yag[m], zag[m] = pixels_to_yagzag(y_pixel[m], z_pixel[m]) logger.debug(f' magnitude from images... {len(mag)} samples: {mag[0]:.2f}...') return { 'mags_img': mag, 'yang_img': yag, 'zang_img': zag, 'counts_img': img_count, 'counts_dark': dark_count } OBS_STATS_INFO = { 'agasc_id': 'AGASC ID of the star', 'obsid': 'OBSID corresponding to the dwell when the observation is made', 'slot': 'Slot number', 'type': 'GUI/ACQ/BOT', 'mp_starcat_time': 'Timestamp (from kadi.commands) for starcat command ' 'preceding the dwell of an observation.', 'timeline_id': 'starcat command timeline_id from kadi.commands.get_cmds', 'tstart': 'Dwell start time from kadi.events.manvrs', 'tstop': 'Dwell end time from kadi.events.manvrs', 'mag_correction': 'Overall correction applied to the magnitude estimate', 'responsivity': 'Responsivity correction applied to the magnitude estimate', 'droop_shift': 'Droop shift correction applied to the magnitude estimate', 'mag_aca': 'ACA star magnitude from the AGASC catalog', 'mag_aca_err': 'ACA star magnitude uncertainty from the AGASC catalog', 'row': 'Expected row number, based on star location and yanf/zang from mica.archive.starcheck DB', 'col': 'Expected col number, based on star location and yanf/zang from mica.archive.starcheck DB', 'mag_img': 'Magnitude estimate from image telemetry (uncorrected)', 'mag_obs': 'Estimated ACA star magnitude', 'mag_obs_err': 'Estimated ACA star magnitude uncertainty', 'aoacmag_mean': 'Mean of AOACMAG from telemetry', 'aoacmag_err': 'Standard deviation of AOACMAG from telemetry', 'aoacmag_q25': '1st quartile of AOACMAG from telemetry', 'aoacmag_median': 'Median of AOACMAG from telemetry', 'aoacmag_q75': '3rd quartile of AOACMAG from telemetry', 'counts_img': 'Raw counts from image telemetry, summed over the mouse-bit window', 'counts_dark': 'Expected counts from background, summed over the mouse-bit window', 'f_kalman': 'Fraction of all samples where AOACASEQ == "KALM" and AOPCADMD == "NPNT" (n_kalman/n)', 'f_track': 'Fraction of kalman samples with AOACIIR == "OK" and AOACFCT == "TRAK" (n_track/n_kalman)', 'f_dr5': 'Fraction of "track" samples with angle residual less than 5 arcsec (n_dr5/n_track)', 'f_dr3': 'Fraction of "track" samples with angle residual less than 3 arcsec (n_dr3/n_track)', 'f_ok': 'Fraction of all samples with (kalman & track & dr5) == True (n_ok/n)', 'q25': '1st quartile of estimated magnitude', 'median': 'Median of estimated magnitude', 'q75': '1st quartile of estimated magnitude', 'mean': 'Mean of estimated magnitude', 'mean_err': 'Uncrtainty in the mean of estimated magnitude', 'std': 'Standard deviation of estimated magnitude', 'skew': 'Skewness of estimated magnitude', 'kurt': 'Kurtosis of estimated magnitude', 't_mean': 'Mean of estimated magnitude after removing outliers', 't_mean_err': 'Uncertainty in the mean of estimated magnitude after removing outliers', 't_std': 'Standard deviation of estimated magnitude after removing outliers', 't_skew': 'Skewness of estimated magnitude after removing outliers', 't_kurt': 'Kurtosis of estimated magnitude after removing outliers', 'n': 'Number of samples', 'n_ok': 'Number of samples with (kalman & track & dr5) == True', 'outliers': 'Number of outliers (+- 3 IQR)', 'lf_variability_100s': 'Rolling mean of OK magnitudes with a 100 second window', 'lf_variability_500s': 'Rolling mean of OK magnitudes with a 500 second window', 'lf_variability_1000s': 'Rolling mean of OK magnitudes with a 1000 second window', 'tempccd': 'CCD temperature', 'dr_star': 'Angle residual', 'obs_ok': 'Boolean flag: everything OK with this observation', 'obs_suspect': 'Boolean flag: this observation is "suspect"', 'obs_fail': 'Boolean flag: a processing error when estimating magnitude for this observation', 'comments': '', 'w': 'Weight to be used on a weighted mean (1/std)', 'mean_corrected': 'Corrected mean used in weighted mean (t_mean + mag_correction)', 'weighted_mean': 'Mean weighted by inverse of standard deviation (mean/std)', } def get_obs_stats(obs, telem=None): """ Get summary magnitude statistics for an observation. :param obs: astropy.table.Row a "star observation" row. From the join of starcheck catalog and starcat commands It must have the following columns: 'agasc_id', 'mp_starcat_time', 'mag', 'slot' :param telem: dict Dictionary with telemetry (output of get_telemetry) :return: dict dictionary with stats """ logger.debug(f' Getting OBS stats for AGASC ID {obs["agasc_id"]},' f' OBSID {obs["agasc_id"]} at {obs["mp_starcat_time"]}') star_obs_catalogs.load() star = get_star(obs['agasc_id'], use_supplement=False) dwell = star_obs_catalogs.DWELLS_NP[star_obs_catalogs.DWELLS_MAP[obs['mp_starcat_time']]] start = dwell['tstart'] stop = dwell['tstop'] stats = {k: obs[k] for k in ['agasc_id', 'obsid', 'slot', 'type', 'mp_starcat_time', 'timeline_id']} stats['mp_starcat_time'] = stats['mp_starcat_time'] droop_shift = get_droop_systematic_shift(star['MAG_ACA']) responsivity = get_responsivity(start) stats.update({'tstart': start, 'tstop': stop, 'mag_correction': - responsivity - droop_shift, 'responsivity': responsivity, 'droop_shift': droop_shift, 'mag_aca': star['MAG_ACA'], 'mag_aca_err': star['MAG_ACA_ERR'] / 100, 'row': obs['row'], 'col': obs['col'], }) # other default values stats.update({ 'mag_img': np.inf, 'mag_obs': np.inf, 'mag_obs_err': np.inf, 'aoacmag_mean': np.inf, 'aoacmag_err': np.inf, 'aoacmag_q25': np.inf, 'aoacmag_median': np.inf, 'aoacmag_q75': np.inf, 'counts_img': np.inf, 'counts_dark': np.inf, 'f_kalman': 0., 'f_track': 0., 'f_dr5': 0., 'f_dr3': 0., 'f_ok': 0., 'q25': np.inf, 'median': np.inf, 'q75': np.inf, 'mean': np.inf, 'mean_err': np.inf, 'std': np.inf, 'skew': np.inf, 'kurt': np.inf, 't_mean': np.inf, 't_mean_err': np.inf, 't_std': np.inf, 't_skew': np.inf, 't_kurt': np.inf, 'n': 0, 'n_ok': 0, 'outliers': -1, 'lf_variability_100s': np.inf, 'lf_variability_500s': np.inf, 'lf_variability_1000s': np.inf, 'tempccd': np.nan, 'dr_star': np.inf, }) if telem is None: telem = get_telemetry(obs) if len(telem) > 0: stats.update(calc_obs_stats(telem)) logger.debug(f' slot={stats["slot"]}, f_ok={stats["f_ok"]:.3f}, ' f'f_track={stats["f_track"]:.3f}, f_dr3={stats["f_dr3"]:.3f},' f' mag={stats["mag_obs"]:.2f}') return stats def calc_obs_stats(telem): """ Get summary magnitude statistics for an observation. :param telem: dict Dictionary with telemetry (output of get_telemetry) :return: dict dictionary with stats """ times = telem['times'] kalman = (telem['AOACASEQ'] == 'KALM') & (telem['AOPCADMD'] == 'NPNT') track = (telem['AOACIIR'] == 'OK') & (telem['AOACFCT'] == 'TRAK') dr3 = (telem['dr'] < 3) dr5 = (telem['dr'] < 5) f_kalman = np.sum(kalman) / len(kalman) n_kalman = np.sum(kalman) f_track = np.sum(kalman & track) / n_kalman if n_kalman else 0 n_track = np.sum(kalman & track) f_3 = (np.sum(kalman & track & dr3) / n_track) if n_track else 0 f_5 = (np.sum(kalman & track & dr5) / n_track) if n_track else 0 ok = kalman & track & dr5 f_ok = np.sum(ok) / len(ok) if np.any(ok): yang_mean = np.mean(telem['yang_img'][ok] - telem['yang_star'][ok]) zang_mean = np.mean(telem['zang_img'][ok] - telem['zang_star'][ok]) dr_star = np.sqrt(yang_mean**2 + zang_mean**2) else: dr_star = np.inf stats = { 'f_kalman': f_kalman, 'f_track': f_track, 'f_dr5': f_5, 'f_dr3': f_3, 'f_ok': f_ok, 'n': len(telem['AOACMAG']), 'n_ok': np.sum(ok), 'dr_star': dr_star, } if stats['n_ok'] < 10: return stats aoacmag_q25, aoacmag_q50, aoacmag_q75 = np.quantile(telem['AOACMAG'][ok], [0.25, 0.5, 0.75]) mags = telem['mags'] q25, q50, q75 = np.quantile(mags[ok], [0.25, 0.5, 0.75]) iqr = q75 - q25 outlier = ok & ((mags > q75 + 3 * iqr) | (mags < q25 - 3 * iqr)) s_100s = rolling_mean(times, mags, window=100, selection=ok & ~outlier) s_500s = rolling_mean(times, mags, window=500, selection=ok & ~outlier) s_1000s = rolling_mean(times, mags, window=1000, selection=ok & ~outlier) s_100s = s_100s[~np.isnan(s_100s)] s_500s = s_500s[~np.isnan(s_500s)] s_1000s = s_1000s[~np.isnan(s_1000s)] stats.update({ 'aoacmag_mean': np.mean(telem['AOACMAG'][ok]), 'aoacmag_err': np.std(telem['AOACMAG'][ok]), 'aoacmag_q25': aoacmag_q25, 'aoacmag_median': aoacmag_q50, 'aoacmag_q75': aoacmag_q75, 'q25': q25, 'median': q50, 'q75': q75, 'counts_img': np.mean(telem['counts_img'][ok]), 'counts_dark': np.mean(telem['counts_dark'][ok]), 'mean': np.mean(mags[ok]), 'mean_err': scipy.stats.sem(mags[ok]), 'std': np.std(mags[ok]), 'skew': scipy.stats.skew(mags), 'kurt': scipy.stats.kurtosis(mags), 't_mean': np.mean(mags[ok & (~outlier)]), 't_mean_err': scipy.stats.sem(mags[ok & (~outlier)]), 't_std': np.std(mags[ok & (~outlier)]), 't_skew': scipy.stats.skew(mags[ok & (~outlier)]), 't_kurt': scipy.stats.kurtosis(mags[ok & (~outlier)]), 'outliers': np.sum(outlier), 'lf_variability_100s': np.max(s_100s) - np.min(s_100s), 'lf_variability_500s': np.max(s_500s) - np.min(s_500s), 'lf_variability_1000s': np.max(s_1000s) - np.min(s_1000s), 'tempccd': np.mean(telem['TEMPCCD'][ok]) - 273.16, }) stats.update({ 'mag_img': np.mean(telem['mags_img'][ok & (~outlier)]), 'mag_obs': stats['t_mean'], 'mag_obs_err': stats['t_mean_err'] }) return stats AGASC_ID_STATS_INFO = { 'last_obs_time': 'CXC seconds corresponding to the last mp_starcat_time for the star', 'agasc_id': 'AGASC ID of the star', 'mag_aca': 'ACA star magnitude from the AGASC catalog', 'mag_aca_err': 'ACA star magnitude uncertainty from the AGASC catalog', 'mag_obs': 'Estimated ACA star magnitude', 'mag_obs_err': 'Estimated ACA star magnitude uncertainty', 'mag_obs_std': 'Estimated ACA star magnitude standard deviation', 'color': 'Star color from the AGASC catalog', 'n_obsids': 'Number of observations for the star', 'n_obsids_fail': 'Number of observations which give an unexpected error', 'n_obsids_suspect': 'Number of observations deemed "suspect" and ignored in the magnitude estimate', 'n_obsids_ok': 'Number of observations considered in the magnitude estimate', 'n_no_track': 'Number of observations where the star was never tracked', 'n': 'Total number of image samples for the star', 'n_ok': 'Total number of image samples included in magnitude estimate for the star', 'f_ok': 'Fraction of the total samples included in magnitude estimate', 'median': 'Median magnitude over OK image samples', 'sigma_minus': '15.8% quantile of magnitude over OK image samples', 'sigma_plus': '84.2% quantile of magnitude over OK image samples', 'mean': 'Mean of magnitude over OK image samples', 'std': 'Standard deviation of magnitude over OK image samples', 'mag_weighted_mean': 'Average of magnitudes over observations, weighed by the inverse of its standard deviation', 'mag_weighted_std': 'Uncertainty in the weighted magnitude mean', 't_mean': 'Mean magnitude after removing outliers on a per-observation basis', 't_std': 'Magnitude standard deviation after removing outliers on a per-observation basis', 'n_outlier': 'Number of outliers, removed on a per-observation basis', 't_mean_1': 'Mean magnitude after removing 1.5*IQR outliers', 't_std_1': 'Magnitude standard deviation after removing 1.5*IQR outliers', 'n_outlier_1': 'Number of 1.5*IQR outliers', 't_mean_2': 'Mean magnitude after removing 3*IQR outliers', 't_std_2': 'Magnitude standard deviation after removing 3*IQR outliers', 'n_outlier_2': 'Number of 3*IQR outliers', 'selected_atol': 'abs(mag_obs - mag_aca) > 0.3', 'selected_rtol': 'abs(mag_obs - mag_aca) > 3 * mag_aca_err', 'selected_mag_aca_err': 'mag_aca_err > 0.2', 'selected_color': '(color == 1.5) | (color == 0.7)', 't_mean_dr3': 'Truncated mean magnitude after removing outliers and samples with ' 'angular residual > 3 arcsec on a per-observation basis', 't_std_dr3': 'Truncated magnitude standard deviation after removing outliers and samples with ' 'angular residual > 3 arcsec on a per-observation basis', 'mean_dr3': 'Mean magnitude after removing outliers and samples with ' 'angular residual > 3 arcsec on a per-observation basis', 'std_dr3': 'Magnitude standard deviation after removing outliers and samples with ' 'angular residual > 3 arcsec on a per-observation basis', 'f_dr3': 'Fraction of OK image samples with angular residual less than 3 arcsec', 'n_dr3': 'Number of OK image samples with angular residual less than 3 arcsec', 'n_dr3_outliers': 'Number of magnitude outliers after removing outliers and samples with ' 'angular residual > 3 arcsec on a per-observation basis', 'median_dr3': 'Median magnitude after removing outliers and samples with ' 'angular residual > 3 arcsec on a per-observation basis', 'sigma_minus_dr3': '15.8% quantile of magnitude after removing outliers and samples with ' 'angular residual > 3 arcsec on a per-observation basis', 'sigma_plus_dr3': '84.2% quantile of magnitude after removing outliers and samples with ' 'angular residual > 3 arcsec on a per-observation basis', 't_mean_dr5': 'Truncated mean magnitude after removing outliers and samples with ' 'angular residual > 5 arcsec on a per-observation basis', 't_std_dr5': 'Truncated magnitude standard deviation after removing outliers and samples with ' 'angular residual > 5 arcsec on a per-observation basis', 'mean_dr5': 'Mean magnitude after removing outliers and samples with ' 'angular residual > 5 arcsec on a per-observation basis', 'std_dr5': 'Magnitude standard deviation after removing outliers and samples with ' 'angular residual > 5 arcsec on a per-observation basis', 'f_dr5': 'Fraction of OK image samples with angular residual less than 5 arcsec', 'n_dr5': 'Number of OK image samples with angular residual less than 5 arcsec', 'n_dr5_outliers': 'Number of magnitude outliers after removing outliers and samples with ' 'angular residual > 5 arcsec on a per-observation basis', 'median_dr5': 'Median magnitude after removing outliers and samples with ' 'angular residual > 5 arcsec on a per-observation basis', 'sigma_minus_dr5': '15.8% quantile of magnitude after removing outliers and samples with ' 'angular residual > 5 arcsec on a per-observation basis', 'sigma_plus_dr5': '84.2% quantile of magnitude after removing outliers and samples with ' 'angular residual > 5 arcsec on a per-observation basis', } def get_agasc_id_stats(agasc_id, obs_status_override=None, tstop=None): """ Get summary magnitude statistics for an AGASC ID. :param agasc_id: int :param obs_status_override: dict. Dictionary overriding the OK flag for specific observations. Keys are (OBSID, AGASC ID) pairs, values are dictionaries like {'obs_ok': True, 'comments': 'some comment'} :param tstop: cxotime-compatible timestamp Only entries in catalogs.STARS_OBS prior to this timestamp are considered. :return: dict dictionary with stats """ logger.debug(f'Getting stats for AGASC ID {agasc_id}...') min_mag_obs_err = 0.03 if not obs_status_override: obs_status_override = {} star_obs_catalogs.load(tstop=tstop) # Get a table of every time the star has been observed idx0, idx1 = star_obs_catalogs.STARS_OBS_MAP[agasc_id] star_obs = star_obs_catalogs.STARS_OBS[idx0:idx1] if len(star_obs) > 1: star_obs = star_obs.loc['mp_starcat_time', sorted(star_obs['mp_starcat_time'])] # this is the default result, if nothing gets calculated result = { 'last_obs_time': 0, 'agasc_id': agasc_id, 'mag_aca': np.nan, 'mag_aca_err': np.nan, 'mag_obs': 0., 'mag_obs_err': np.nan, 'mag_obs_std': 0., 'color': np.nan, 'n_obsids': 0, 'n_obsids_fail': 0, 'n_obsids_suspect': 0, 'n_obsids_ok': 0, 'n_no_track': 0, 'n': 0, 'n_ok': 0, 'f_ok': 0., 'median': 0, 'sigma_minus': 0, 'sigma_plus': 0, 'mean': 0, 'std': 0, 'mag_weighted_mean': 0, 'mag_weighted_std': 0, 't_mean': 0, 't_std': 0, 'n_outlier': 0, 't_mean_1': 0, 't_std_1': 0, 'n_outlier_1': 0, 't_mean_2': 0, 't_std_2': 0, 'n_outlier_2': 0, # these are the criteria for including in supplement 'selected_atol': False, 'selected_rtol': False, 'selected_mag_aca_err': False, 'selected_color': False } for dr in [3, 5]: result.update({ f't_mean_dr{dr}': 0, f't_std_dr{dr}': 0, f't_mean_dr{dr}_not': 0, f't_std_dr{dr}_not': 0, f'mean_dr{dr}': 0, f'std_dr{dr}': 0, f'f_dr{dr}': 0, f'n_dr{dr}': 0, f'n_dr{dr}_outliers': 0, f'median_dr{dr}': 0, f'sigma_minus_dr{dr}': 0, f'sigma_plus_dr{dr}': 0, }) n_obsids = len(star_obs) # exclude star_obs that are in obs_status_override with status != 0 excluded_obs = np.array([((oi, ai) in obs_status_override and obs_status_override[(oi, ai)]['status'] != 0) for oi, ai in star_obs[['mp_starcat_time', 'agasc_id']]]) if np.any(excluded_obs): logger.debug(' Excluding observations flagged in obs-status table: ' f'{list(star_obs[excluded_obs]["obsid"])}') included_obs = np.array([((oi, ai) in obs_status_override and obs_status_override[(oi, ai)]['status'] == 0) for oi, ai in star_obs[['mp_starcat_time', 'agasc_id']]]) if np.any(included_obs): logger.debug(' Including observations marked OK in obs-status table: ' f'{list(star_obs[included_obs]["obsid"])}') failures = [] all_telem = [] stats = [] last_obs_time = 0 for i, obs in enumerate(star_obs): oi, ai = obs['mp_starcat_time', 'agasc_id'] comment = '' if (oi, ai) in obs_status_override: status = obs_status_override[(oi, ai)] logger.debug(f' overriding status for (AGASC ID {ai}, starcat time {oi}): ' f'{status["status"]}, {status["comments"]}') comment = status['comments'] try: last_obs_time = CxoTime(obs['mp_starcat_time']).cxcsec telem = Table(get_telemetry(obs)) obs_stat = get_obs_stats(obs, telem={k: telem[k] for k in telem.colnames}) obs_stat.update({ 'obs_ok': ( included_obs[i] | ( ~excluded_obs[i] & (obs_stat['n'] > 10) & (obs_stat['f_track'] > 0.3) & (obs_stat['lf_variability_100s'] < 1) ) ), 'obs_suspect': False, 'obs_fail': False, 'comments': comment }) all_telem.append(telem) stats.append(obs_stat) if not obs_stat['obs_ok'] and not excluded_obs[i]: obs_stat['obs_suspect'] = True failures.append( dict(MagStatsException(msg='Suspect observation', agasc_id=obs['agasc_id'], obsid=obs['obsid'], mp_starcat_time=obs["mp_starcat_time"],))) except MagStatsException as e: # this except branch deals with exceptions thrown by get_telemetry all_telem.append(None) # length-zero telemetry short-circuits any new call to get_telemetry obs_stat = get_obs_stats(obs, telem=[]) obs_stat.update({ 'obs_ok': False, 'obs_suspect': False, 'obs_fail': True, 'comments': comment if excluded_obs[i] else f'Error: {e.msg}.' }) stats.append(obs_stat) if not excluded_obs[i]: logger.debug( f' Error in get_agasc_id_stats({agasc_id=}, obsid={obs["obsid"]}): {e}') failures.append(dict(e)) stats = Table(stats) stats['w'] = np.nan stats['mean_corrected'] = np.nan stats['weighted_mean'] = np.nan star = get_star(agasc_id, use_supplement=False) result.update({ 'last_obs_time': last_obs_time, 'mag_aca': star['MAG_ACA'], 'mag_aca_err': star['MAG_ACA_ERR'] / 100, 'color': star['COLOR1'], 'n_obsids_fail': len(failures), 'n_obsids_suspect': np.sum(stats['obs_suspect']), 'n_obsids': n_obsids, }) if not np.any(~excluded_obs): # this happens when all observations have been flagged as not OK a priory (obs-status). logger.debug(f' Skipping star in get_agasc_id_stats({agasc_id=}).' ' All observations are flagged as not good.') return result, stats, failures if len(all_telem) - len(failures) <= 0: # and we reach here if some observations were not flagged as bad, but all failed. logger.debug(f' Error in get_agasc_id_stats({agasc_id=}):' ' There is no OK observation.') return result, stats, failures excluded_obs += np.array([t is None for t in all_telem]) logger.debug(' identifying outlying observations...') for i, (s, t) in enumerate(zip(stats, all_telem)): if excluded_obs[i]: continue t['obs_ok'] = np.ones_like(t['ok'], dtype=bool) * s['obs_ok'] logger.debug(' identifying outlying observations ' f'(OBSID={s["obsid"]}, mp_starcat_time={s["mp_starcat_time"]})') t['obs_outlier'] = np.zeros_like(t['ok']) if np.any(t['ok']) and s['f_track'] > 0 and s['obs_ok']: iqr = s['q75'] - s['q25'] t['obs_outlier'] = ( t['ok'] & (iqr > 0) & ((t['mags'] < s['q25'] - 1.5 * iqr) | (t['mags'] > s['q75'] + 1.5 * iqr)) ) all_telem = vstack([Table(t) for i, t in enumerate(all_telem) if not excluded_obs[i]]) mags = all_telem['mags'] ok = all_telem['ok'] & all_telem['obs_ok'] f_ok = np.sum(ok) / len(ok) result.update({ 'mag_obs_err': min_mag_obs_err, 'n_obsids_ok': np.sum(stats['obs_ok']), 'n_no_track': np.sum((~stats['obs_ok'])) + np.sum(stats['f_ok'][stats['obs_ok']] < 0.3), 'n': len(ok), 'n_ok': np.sum(ok), 'f_ok': f_ok, }) if result['n_ok'] < 10: return result, stats, failures sigma_minus, q25, median, q75, sigma_plus = np.quantile(mags[ok], [0.158, 0.25, 0.5, 0.75, 0.842]) iqr = q75 - q25 outlier_1 = ok & ((mags > q75 + 1.5 * iqr) | (mags < q25 - 1.5 * iqr)) outlier_2 = ok & ((mags > q75 + 3 * iqr) | (mags < q25 - 3 * iqr)) outlier = all_telem['obs_outlier'] # combine measurements using a weighted mean obs_ok = stats['obs_ok'] min_std = max(0.1, stats[obs_ok]['std'].min()) stats['w'][obs_ok] = np.where(stats['std'][obs_ok] != 0, 1. / stats['std'][obs_ok], 1. / min_std) stats['mean_corrected'][obs_ok] = stats['t_mean'][obs_ok] + stats['mag_correction'][obs_ok] stats['weighted_mean'][obs_ok] = stats['mean_corrected'][obs_ok] * stats['w'][obs_ok] mag_weighted_mean = (stats[obs_ok]['weighted_mean'].sum() / stats[obs_ok]['w'].sum()) mag_weighted_std = ( np.sqrt(((stats[obs_ok]['mean'] - mag_weighted_mean)**2 * stats[obs_ok]['w']).sum() / stats[obs_ok]['w'].sum()) ) result.update({ 'agasc_id': agasc_id, 'n': len(ok), 'n_ok': np.sum(ok), 'f_ok': f_ok, 'median': median, 'sigma_minus': sigma_minus, 'sigma_plus': sigma_plus, 'mean': np.mean(mags[ok]), 'std': np.std(mags[ok]), 'mag_weighted_mean': mag_weighted_mean, 'mag_weighted_std': mag_weighted_std, 't_mean': np.mean(mags[ok & (~outlier)]), 't_std': np.std(mags[ok & (~outlier)]), 'n_outlier': np.sum(ok & outlier), 't_mean_1': np.mean(mags[ok & (~outlier_1)]), 't_std_1': np.std(mags[ok & (~outlier_1)]), 'n_outlier_1': np.sum(ok & outlier_1), 't_mean_2': np.mean(mags[ok & (~outlier_2)]), 't_std_2': np.std(mags[ok & (~outlier_2)]), 'n_outlier_2': np.sum(ok & outlier_2), }) for dr in [3, 5]: k = ok & (all_telem['dr'] < dr) k2 = ok & (all_telem['dr'] >= dr) if not np.any(k): continue sigma_minus, q25, median, q75, sigma_plus = np.quantile(mags[k], [0.158, 0.25, 0.5, 0.75, 0.842]) outlier = ok & all_telem['obs_outlier'] mag_not = np.nanmean(mags[k2 & (~outlier)]) if np.sum(k2 & (~outlier)) else np.nan std_not = np.nanstd(mags[k2 & (~outlier)]) if np.sum(k2 & (~outlier)) else np.nan result.update({ f't_mean_dr{dr}': np.mean(mags[k & (~outlier)]), f't_std_dr{dr}': np.std(mags[k & (~outlier)]), f't_mean_dr{dr}_not': mag_not, f't_std_dr{dr}_not': std_not, f'mean_dr{dr}': np.mean(mags[k]), f'std_dr{dr}': np.std(mags[k]), f'f_dr{dr}': np.sum(k) / np.sum(ok), f'n_dr{dr}': np.sum(k), f'n_dr{dr}_outliers': np.sum(k & outlier), f'median_dr{dr}': median, f'sigma_minus_dr{dr}': sigma_minus, f'sigma_plus_dr{dr}': sigma_plus, }) result.update({ 'mag_obs': result['t_mean_dr5'], 'mag_obs_err': np.sqrt(result['t_std_dr5']**2 + min_mag_obs_err**2), 'mag_obs_std': result['t_std_dr5'], }) # these are the criteria for including in supplement result.update({ 'selected_atol': np.abs(result['mag_obs'] - result['mag_aca']) > 0.3, 'selected_rtol': np.abs(result['mag_obs'] - result['mag_aca']) > 3 * result['mag_aca_err'], 'selected_mag_aca_err': result['mag_aca_err'] > 0.2, 'selected_color': (result['color'] == 1.5) | (np.isclose(result['color'], 0.7)) }) logger.debug(f' stats for AGASC ID {agasc_id}: ' f' {stats["mag_obs"][0]}') return result, stats, failures
[ "logging.getLogger", "numpy.char.rstrip", "numpy.sqrt", "astropy.table.Table", "cxotime.CxoTime", "numpy.array", "numpy.nanmean", "sys.exc_info", "numpy.arctan2", "numpy.isfinite", "astropy.table.vstack", "cheta.fetch.Msidset", "numpy.arange", "Quaternion.Quat", "numpy.mean", "numpy.where", "chandra_aca.transform.count_rate_to_mag", "numpy.max", "numpy.exp", "numpy.min", "numpy.round", "chandra_aca.transform.pixels_to_yagzag", "numpy.abs", "numpy.nanstd", "numpy.ma.array", "numpy.in1d", "numpy.any", "numpy.squeeze", "numba.jit", "numpy.isnan", "numpy.std", "agasc.get_star", "mica.archive.aca_l0.get_slot_data", "traceback.extract_tb", "numpy.atleast_1d", "Chandra.Time.DateTime", "numpy.intersect1d", "numpy.ones_like", "numpy.isclose", "numpy.ma.sum", "numpy.sum", "numpy.quantile", "collections.defaultdict", "numpy.zeros_like" ]
[((624, 661), 'logging.getLogger', 'logging.getLogger', (['"""agasc.supplement"""'], {}), "('agasc.supplement')\n", (641, 661), False, 'import logging\n'), ((1619, 1655), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : -1)'], {}), '(lambda : -1)\n', (1642, 1655), False, 'import collections\n'), ((5635, 5659), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (5644, 5659), False, 'import numba\n'), ((17652, 17676), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (17661, 17676), False, 'import numba\n'), ((703, 1174), 'numpy.array', 'np.array', (['[[True, True, True, True, True, True, True, True], [True, True, False, \n False, False, False, True, True], [True, False, False, False, False, \n False, False, True], [True, False, False, False, False, False, False, \n True], [True, False, False, False, False, False, False, True], [True, \n False, False, False, False, False, False, True], [True, True, False, \n False, False, False, True, True], [True, True, True, True, True, True, \n True, True]]'], {}), '([[True, True, True, True, True, True, True, True], [True, True, \n False, False, False, False, True, True], [True, False, False, False, \n False, False, False, True], [True, False, False, False, False, False, \n False, True], [True, False, False, False, False, False, False, True], [\n True, False, False, False, False, False, False, True], [True, True, \n False, False, False, False, True, True], [True, True, True, True, True,\n True, True, True]])\n', (711, 1174), True, 'import numpy as np\n'), ((3383, 3408), 'Chandra.Time.DateTime', 'DateTime', (["params['t_ref']"], {}), "(params['t_ref'])\n", (3391, 3408), False, 'from Chandra.Time import DateTime\n'), ((3582, 3598), 'numpy.squeeze', 'np.squeeze', (['dmag'], {}), '(dmag)\n', (3592, 3598), True, 'import numpy as np\n'), ((4833, 4860), 'numpy.exp', 'np.exp', (['((magnitude - a) / b)'], {}), '((magnitude - a) / b)\n', (4839, 4860), True, 'import numpy as np\n'), ((6500, 6543), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (6508, 6543), True, 'import numpy as np\n'), ((6747, 6784), 'numpy.sum', 'np.sum', (['(q ** 2)'], {'axis': '(1)', 'keepdims': '(True)'}), '(q ** 2, axis=1, keepdims=True)\n', (6753, 6784), True, 'import numpy as np\n'), ((6967, 6976), 'Quaternion.Quat', 'Quat', ([], {'q': 'q'}), '(q=q)\n', (6971, 6976), False, 'from Quaternion import Quat\n'), ((9251, 9320), 'agasc.get_star', 'get_star', (["obs['agasc_id']"], {'date': "dwell['tstart']", 'use_supplement': '(False)'}), "(obs['agasc_id'], date=dwell['tstart'], use_supplement=False)\n", (9259, 9320), False, 'from agasc import get_star\n'), ((9903, 10001), 'mica.archive.aca_l0.get_slot_data', 'aca_l0.get_slot_data', (['start', 'stop'], {'slot': "obs['slot']", 'centered_8x8': '(True)', 'columns': 'slot_data_cols'}), "(start, stop, slot=obs['slot'], centered_8x8=True,\n columns=slot_data_cols)\n", (9923, 10001), False, 'from mica.archive import aca_l0\n'), ((10282, 10315), 'cheta.fetch.Msidset', 'fetch.Msidset', (['names', 'start', 'stop'], {}), '(names, start, stop)\n', (10295, 10315), False, 'from cheta import fetch\n'), ((10808, 10840), 'numpy.round', 'np.round', (['((times - tmin) / 1.025)'], {}), '((times - tmin) / 1.025)\n', (10816, 10840), True, 'import numpy as np\n'), ((10850, 10909), 'numpy.round', 'np.round', (["((slot_data['END_INTEG_TIME'].data - tmin) / 1.025)"], {}), "((slot_data['END_INTEG_TIME'].data - tmin) / 1.025)\n", (10858, 10909), True, 'import numpy as np\n'), ((10926, 10969), 'numpy.intersect1d', 'np.intersect1d', (['t1', 't2'], {'return_indices': '(True)'}), '(t1, t2, return_indices=True)\n', (10940, 10969), True, 'import numpy as np\n'), ((13810, 13840), 'numpy.sqrt', 'np.sqrt', (['(yang ** 2 + zang ** 2)'], {}), '(yang ** 2 + zang ** 2)\n', (13817, 13840), True, 'import numpy as np\n'), ((13844, 13868), 'numpy.any', 'np.any', (['(ok & (rang < 10))'], {}), '(ok & (rang < 10))\n', (13850, 13868), True, 'import numpy as np\n'), ((16210, 16223), 'astropy.table.vstack', 'vstack', (['telem'], {}), '(telem)\n', (16216, 16223), False, 'from astropy.table import Table, vstack\n'), ((16935, 16948), 'astropy.table.vstack', 'vstack', (['telem'], {}), '(telem)\n', (16941, 16948), False, 'from astropy.table import Table, vstack\n'), ((18792, 18880), 'numpy.where', 'np.where', (["(slot_data['IMGSIZE'] == 8)", "slot_data['IMGROW0']", "(slot_data['IMGROW0'] - 1)"], {}), "(slot_data['IMGSIZE'] == 8, slot_data['IMGROW0'], slot_data[\n 'IMGROW0'] - 1)\n", (18800, 18880), True, 'import numpy as np\n'), ((18972, 19060), 'numpy.where', 'np.where', (["(slot_data['IMGSIZE'] == 8)", "slot_data['IMGCOL0']", "(slot_data['IMGCOL0'] - 1)"], {}), "(slot_data['IMGSIZE'] == 8, slot_data['IMGCOL0'], slot_data[\n 'IMGCOL0'] - 1)\n", (18980, 19060), True, 'import numpy as np\n'), ((19602, 19640), 'chandra_aca.transform.count_rate_to_mag', 'count_rate_to_mag', (['(counts[m] * 5 / 1.7)'], {}), '(counts[m] * 5 / 1.7)\n', (19619, 19640), False, 'from chandra_aca.transform import count_rate_to_mag, pixels_to_yagzag\n'), ((19746, 19794), 'numpy.ma.array', 'np.ma.array', (['(dark * 1.696 / 5)'], {'mask': 'img_sub.mask'}), '(dark * 1.696 / 5, mask=img_sub.mask)\n', (19757, 19794), True, 'import numpy as np\n'), ((19809, 19860), 'numpy.ma.array', 'np.ma.array', (["slot_data['IMGRAW']"], {'mask': 'img_sub.mask'}), "(slot_data['IMGRAW'], mask=img_sub.mask)\n", (19820, 19860), True, 'import numpy as np\n'), ((20130, 20168), 'numpy.ma.sum', 'np.ma.sum', (["slot_data['IMGRAW']"], {'axis': '(1)'}), "(slot_data['IMGRAW'], axis=1)\n", (20139, 20168), True, 'import numpy as np\n'), ((20288, 20326), 'numpy.ma.sum', 'np.ma.sum', (["slot_data['IMGRAW']"], {'axis': '(2)'}), "(slot_data['IMGRAW'], axis=2)\n", (20297, 20326), True, 'import numpy as np\n'), ((20508, 20548), 'chandra_aca.transform.pixels_to_yagzag', 'pixels_to_yagzag', (['y_pixel[m]', 'z_pixel[m]'], {}), '(y_pixel[m], z_pixel[m])\n', (20524, 20548), False, 'from chandra_aca.transform import count_rate_to_mag, pixels_to_yagzag\n'), ((25469, 25516), 'agasc.get_star', 'get_star', (["obs['agasc_id']"], {'use_supplement': '(False)'}), "(obs['agasc_id'], use_supplement=False)\n", (25477, 25516), False, 'from agasc import get_star\n'), ((28281, 28295), 'numpy.sum', 'np.sum', (['kalman'], {}), '(kalman)\n', (28287, 28295), True, 'import numpy as np\n'), ((28377, 28399), 'numpy.sum', 'np.sum', (['(kalman & track)'], {}), '(kalman & track)\n', (28383, 28399), True, 'import numpy as np\n'), ((28609, 28619), 'numpy.any', 'np.any', (['ok'], {}), '(ok)\n', (28615, 28619), True, 'import numpy as np\n'), ((29193, 29245), 'numpy.quantile', 'np.quantile', (["telem['AOACMAG'][ok]", '[0.25, 0.5, 0.75]'], {}), "(telem['AOACMAG'][ok], [0.25, 0.5, 0.75])\n", (29204, 29245), True, 'import numpy as np\n'), ((29292, 29332), 'numpy.quantile', 'np.quantile', (['mags[ok]', '[0.25, 0.5, 0.75]'], {}), '(mags[ok], [0.25, 0.5, 0.75])\n', (29303, 29332), True, 'import numpy as np\n'), ((39352, 39505), 'numpy.array', 'np.array', (["[((oi, ai) in obs_status_override and obs_status_override[oi, ai]['status'] !=\n 0) for oi, ai in star_obs[['mp_starcat_time', 'agasc_id']]]"], {}), "([((oi, ai) in obs_status_override and obs_status_override[oi, ai][\n 'status'] != 0) for oi, ai in star_obs[['mp_starcat_time', 'agasc_id']]])\n", (39360, 39505), True, 'import numpy as np\n'), ((39568, 39588), 'numpy.any', 'np.any', (['excluded_obs'], {}), '(excluded_obs)\n', (39574, 39588), True, 'import numpy as np\n'), ((39753, 39906), 'numpy.array', 'np.array', (["[((oi, ai) in obs_status_override and obs_status_override[oi, ai]['status'] ==\n 0) for oi, ai in star_obs[['mp_starcat_time', 'agasc_id']]]"], {}), "([((oi, ai) in obs_status_override and obs_status_override[oi, ai][\n 'status'] == 0) for oi, ai in star_obs[['mp_starcat_time', 'agasc_id']]])\n", (39761, 39906), True, 'import numpy as np\n'), ((39969, 39989), 'numpy.any', 'np.any', (['included_obs'], {}), '(included_obs)\n', (39975, 39989), True, 'import numpy as np\n'), ((42567, 42579), 'astropy.table.Table', 'Table', (['stats'], {}), '(stats)\n', (42572, 42579), False, 'from astropy.table import Table, vstack\n'), ((42689, 42729), 'agasc.get_star', 'get_star', (['agasc_id'], {'use_supplement': '(False)'}), '(agasc_id, use_supplement=False)\n', (42697, 42729), False, 'from agasc import get_star\n'), ((43674, 43716), 'numpy.array', 'np.array', (['[(t is None) for t in all_telem]'], {}), '([(t is None) for t in all_telem])\n', (43682, 43716), True, 'import numpy as np\n'), ((45042, 45096), 'numpy.quantile', 'np.quantile', (['mags[ok]', '[0.158, 0.25, 0.5, 0.75, 0.842]'], {}), '(mags[ok], [0.158, 0.25, 0.5, 0.75, 0.842])\n', (45053, 45096), True, 'import numpy as np\n'), ((45517, 45595), 'numpy.where', 'np.where', (["(stats['std'][obs_ok] != 0)", "(1.0 / stats['std'][obs_ok])", '(1.0 / min_std)'], {}), "(stats['std'][obs_ok] != 0, 1.0 / stats['std'][obs_ok], 1.0 / min_std)\n", (45525, 45595), True, 'import numpy as np\n'), ((5360, 5375), 'numpy.ones_like', 'np.ones_like', (['f'], {}), '(f)\n', (5372, 5375), True, 'import numpy as np\n'), ((5431, 5458), 'numpy.ones_like', 'np.ones_like', (['f'], {'dtype': 'bool'}), '(f, dtype=bool)\n', (5443, 5458), True, 'import numpy as np\n'), ((6847, 6860), 'numpy.sqrt', 'np.sqrt', (['norm'], {}), '(norm)\n', (6854, 6860), True, 'import numpy as np\n'), ((6879, 6892), 'numpy.sqrt', 'np.sqrt', (['norm'], {}), '(norm)\n', (6886, 6892), True, 'import numpy as np\n'), ((7203, 7239), 'numpy.arctan2', 'np.arctan2', (['d_aca[:, 1]', 'd_aca[:, 0]'], {}), '(d_aca[:, 1], d_aca[:, 0])\n', (7213, 7239), True, 'import numpy as np\n'), ((7266, 7302), 'numpy.arctan2', 'np.arctan2', (['d_aca[:, 2]', 'd_aca[:, 0]'], {}), '(d_aca[:, 2], d_aca[:, 0])\n', (7276, 7302), True, 'import numpy as np\n'), ((12026, 12058), 'numpy.zeros_like', 'np.zeros_like', (['times'], {'dtype': 'bool'}), '(times, dtype=bool)\n', (12039, 12058), True, 'import numpy as np\n'), ((12849, 12876), 'numpy.char.rstrip', 'np.char.rstrip', (['telem[name]'], {}), '(telem[name])\n', (12863, 12876), True, 'import numpy as np\n'), ((13894, 13948), 'numpy.quantile', 'np.quantile', (['yang[ok & (rang < 10)]', '[0.25, 0.5, 0.75]'], {}), '(yang[ok & (rang < 10)], [0.25, 0.5, 0.75])\n', (13905, 13948), True, 'import numpy as np\n'), ((13973, 14027), 'numpy.quantile', 'np.quantile', (['zang[ok & (rang < 10)]', '[0.25, 0.5, 0.75]'], {}), '(zang[ok & (rang < 10)], [0.25, 0.5, 0.75])\n', (13984, 14027), True, 'import numpy as np\n'), ((19505, 19531), 'numpy.ma.sum', 'np.ma.sum', (['img_sub'], {'axis': '(1)'}), '(img_sub, axis=1)\n', (19514, 19531), True, 'import numpy as np\n'), ((19888, 19911), 'numpy.ma.sum', 'np.ma.sum', (['dark'], {'axis': '(1)'}), '(dark, axis=1)\n', (19897, 19911), True, 'import numpy as np\n'), ((19947, 19973), 'numpy.ma.sum', 'np.ma.sum', (['img_raw'], {'axis': '(1)'}), '(img_raw, axis=1)\n', (19956, 19973), True, 'import numpy as np\n'), ((20089, 20101), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (20098, 20101), True, 'import numpy as np\n'), ((20179, 20228), 'numpy.ma.sum', 'np.ma.sum', (['(pixel_center * projected_image)'], {'axis': '(1)'}), '(pixel_center * projected_image, axis=1)\n', (20188, 20228), True, 'import numpy as np\n'), ((20231, 20265), 'numpy.ma.sum', 'np.ma.sum', (['projected_image'], {'axis': '(1)'}), '(projected_image, axis=1)\n', (20240, 20265), True, 'import numpy as np\n'), ((20337, 20386), 'numpy.ma.sum', 'np.ma.sum', (['(pixel_center * projected_image)'], {'axis': '(1)'}), '(pixel_center * projected_image, axis=1)\n', (20346, 20386), True, 'import numpy as np\n'), ((20389, 20423), 'numpy.ma.sum', 'np.ma.sum', (['projected_image'], {'axis': '(1)'}), '(projected_image, axis=1)\n', (20398, 20423), True, 'import numpy as np\n'), ((28237, 28251), 'numpy.sum', 'np.sum', (['kalman'], {}), '(kalman)\n', (28243, 28251), True, 'import numpy as np\n'), ((28580, 28590), 'numpy.sum', 'np.sum', (['ok'], {}), '(ok)\n', (28586, 28590), True, 'import numpy as np\n'), ((28641, 28696), 'numpy.mean', 'np.mean', (["(telem['yang_img'][ok] - telem['yang_star'][ok])"], {}), "(telem['yang_img'][ok] - telem['yang_star'][ok])\n", (28648, 28696), True, 'import numpy as np\n'), ((28717, 28772), 'numpy.mean', 'np.mean', (["(telem['zang_img'][ok] - telem['zang_star'][ok])"], {}), "(telem['zang_img'][ok] - telem['zang_star'][ok])\n", (28724, 28772), True, 'import numpy as np\n'), ((28791, 28831), 'numpy.sqrt', 'np.sqrt', (['(yang_mean ** 2 + zang_mean ** 2)'], {}), '(yang_mean ** 2 + zang_mean ** 2)\n', (28798, 28831), True, 'import numpy as np\n'), ((29054, 29064), 'numpy.sum', 'np.sum', (['ok'], {}), '(ok)\n', (29060, 29064), True, 'import numpy as np\n'), ((43057, 43078), 'numpy.any', 'np.any', (['(~excluded_obs)'], {}), '(~excluded_obs)\n', (43063, 43078), True, 'import numpy as np\n'), ((44122, 44144), 'numpy.zeros_like', 'np.zeros_like', (["t['ok']"], {}), "(t['ok'])\n", (44135, 44144), True, 'import numpy as np\n'), ((44619, 44629), 'numpy.sum', 'np.sum', (['ok'], {}), '(ok)\n', (44625, 44629), True, 'import numpy as np\n'), ((47145, 47198), 'numpy.quantile', 'np.quantile', (['mags[k]', '[0.158, 0.25, 0.5, 0.75, 0.842]'], {}), '(mags[k], [0.158, 0.25, 0.5, 0.75, 0.842])\n', (47156, 47198), True, 'import numpy as np\n'), ((3533, 3552), 'numpy.atleast_1d', 'np.atleast_1d', (['time'], {}), '(time)\n', (3546, 3552), True, 'import numpy as np\n'), ((6583, 6674), 'numpy.array', 'np.array', (["[telem['AOATTQT1'], telem['AOATTQT2'], telem['AOATTQT3'], telem['AOATTQT4']]"], {}), "([telem['AOATTQT1'], telem['AOATTQT2'], telem['AOATTQT3'], telem[\n 'AOATTQT4']])\n", (6591, 6674), True, 'import numpy as np\n'), ((10746, 10781), 'numpy.min', 'np.min', (["slot_data['END_INTEG_TIME']"], {}), "(slot_data['END_INTEG_TIME'])\n", (10752, 10781), True, 'import numpy as np\n'), ((10783, 10796), 'numpy.min', 'np.min', (['times'], {}), '(times)\n', (10789, 10796), True, 'import numpy as np\n'), ((14301, 14338), 'numpy.mean', 'np.mean', (['yang[ok & ~centroid_outlier]'], {}), '(yang[ok & ~centroid_outlier])\n', (14308, 14338), True, 'import numpy as np\n'), ((14368, 14405), 'numpy.mean', 'np.mean', (['zang[ok & ~centroid_outlier]'], {}), '(zang[ok & ~centroid_outlier])\n', (14375, 14405), True, 'import numpy as np\n'), ((17185, 17207), 'numpy.any', 'np.any', (["telem['ok'][o]"], {}), "(telem['ok'][o])\n", (17191, 17207), True, 'import numpy as np\n'), ((17249, 17270), 'numpy.isfinite', 'np.isfinite', (["s['q75']"], {}), "(s['q75'])\n", (17260, 17270), True, 'import numpy as np\n'), ((17275, 17296), 'numpy.isfinite', 'np.isfinite', (["s['q25']"], {}), "(s['q25'])\n", (17286, 17296), True, 'import numpy as np\n'), ((18582, 18620), 'numpy.mean', 'np.mean', (["(slot_data['TEMPCCD'] - 273.16)"], {}), "(slot_data['TEMPCCD'] - 273.16)\n", (18589, 18620), True, 'import numpy as np\n'), ((19554, 19573), 'numpy.isfinite', 'np.isfinite', (['counts'], {}), '(counts)\n', (19565, 19573), True, 'import numpy as np\n'), ((28310, 28332), 'numpy.sum', 'np.sum', (['(kalman & track)'], {}), '(kalman & track)\n', (28316, 28332), True, 'import numpy as np\n'), ((28411, 28439), 'numpy.sum', 'np.sum', (['(kalman & track & dr3)'], {}), '(kalman & track & dr3)\n', (28417, 28439), True, 'import numpy as np\n'), ((28480, 28508), 'numpy.sum', 'np.sum', (['(kalman & track & dr5)'], {}), '(kalman & track & dr5)\n', (28486, 28508), True, 'import numpy as np\n'), ((29675, 29691), 'numpy.isnan', 'np.isnan', (['s_100s'], {}), '(s_100s)\n', (29683, 29691), True, 'import numpy as np\n'), ((29714, 29730), 'numpy.isnan', 'np.isnan', (['s_500s'], {}), '(s_500s)\n', (29722, 29730), True, 'import numpy as np\n'), ((29755, 29772), 'numpy.isnan', 'np.isnan', (['s_1000s'], {}), '(s_1000s)\n', (29763, 29772), True, 'import numpy as np\n'), ((29818, 29847), 'numpy.mean', 'np.mean', (["telem['AOACMAG'][ok]"], {}), "(telem['AOACMAG'][ok])\n", (29825, 29847), True, 'import numpy as np\n'), ((29872, 29900), 'numpy.std', 'np.std', (["telem['AOACMAG'][ok]"], {}), "(telem['AOACMAG'][ok])\n", (29878, 29900), True, 'import numpy as np\n'), ((30098, 30130), 'numpy.mean', 'np.mean', (["telem['counts_img'][ok]"], {}), "(telem['counts_img'][ok])\n", (30105, 30130), True, 'import numpy as np\n'), ((30155, 30188), 'numpy.mean', 'np.mean', (["telem['counts_dark'][ok]"], {}), "(telem['counts_dark'][ok])\n", (30162, 30188), True, 'import numpy as np\n'), ((30206, 30223), 'numpy.mean', 'np.mean', (['mags[ok]'], {}), '(mags[ok])\n', (30213, 30223), True, 'import numpy as np\n'), ((30287, 30303), 'numpy.std', 'np.std', (['mags[ok]'], {}), '(mags[ok])\n', (30293, 30303), True, 'import numpy as np\n'), ((30407, 30435), 'numpy.mean', 'np.mean', (['mags[ok & ~outlier]'], {}), '(mags[ok & ~outlier])\n', (30414, 30435), True, 'import numpy as np\n'), ((30518, 30545), 'numpy.std', 'np.std', (['mags[ok & ~outlier]'], {}), '(mags[ok & ~outlier])\n', (30524, 30545), True, 'import numpy as np\n'), ((30691, 30706), 'numpy.sum', 'np.sum', (['outlier'], {}), '(outlier)\n', (30697, 30706), True, 'import numpy as np\n'), ((31008, 31049), 'numpy.mean', 'np.mean', (["telem['mags_img'][ok & ~outlier]"], {}), "(telem['mags_img'][ok & ~outlier])\n", (31015, 31049), True, 'import numpy as np\n'), ((42978, 43006), 'numpy.sum', 'np.sum', (["stats['obs_suspect']"], {}), "(stats['obs_suspect'])\n", (42984, 43006), True, 'import numpy as np\n'), ((43901, 43934), 'numpy.ones_like', 'np.ones_like', (["t['ok']"], {'dtype': 'bool'}), "(t['ok'], dtype=bool)\n", (43913, 43934), True, 'import numpy as np\n'), ((44156, 44171), 'numpy.any', 'np.any', (["t['ok']"], {}), "(t['ok'])\n", (44162, 44171), True, 'import numpy as np\n'), ((44463, 44471), 'astropy.table.Table', 'Table', (['t'], {}), '(t)\n', (44468, 44471), False, 'from astropy.table import Table, vstack\n'), ((44724, 44747), 'numpy.sum', 'np.sum', (["stats['obs_ok']"], {}), "(stats['obs_ok'])\n", (44730, 44747), True, 'import numpy as np\n'), ((44884, 44894), 'numpy.sum', 'np.sum', (['ok'], {}), '(ok)\n', (44890, 44894), True, 'import numpy as np\n'), ((46195, 46205), 'numpy.sum', 'np.sum', (['ok'], {}), '(ok)\n', (46201, 46205), True, 'import numpy as np\n'), ((46341, 46358), 'numpy.mean', 'np.mean', (['mags[ok]'], {}), '(mags[ok])\n', (46348, 46358), True, 'import numpy as np\n'), ((46375, 46391), 'numpy.std', 'np.std', (['mags[ok]'], {}), '(mags[ok])\n', (46381, 46391), True, 'import numpy as np\n'), ((46505, 46533), 'numpy.mean', 'np.mean', (['mags[ok & ~outlier]'], {}), '(mags[ok & ~outlier])\n', (46512, 46533), True, 'import numpy as np\n'), ((46554, 46581), 'numpy.std', 'np.std', (['mags[ok & ~outlier]'], {}), '(mags[ok & ~outlier])\n', (46560, 46581), True, 'import numpy as np\n'), ((46606, 46626), 'numpy.sum', 'np.sum', (['(ok & outlier)'], {}), '(ok & outlier)\n', (46612, 46626), True, 'import numpy as np\n'), ((46648, 46678), 'numpy.mean', 'np.mean', (['mags[ok & ~outlier_1]'], {}), '(mags[ok & ~outlier_1])\n', (46655, 46678), True, 'import numpy as np\n'), ((46701, 46730), 'numpy.std', 'np.std', (['mags[ok & ~outlier_1]'], {}), '(mags[ok & ~outlier_1])\n', (46707, 46730), True, 'import numpy as np\n'), ((46757, 46779), 'numpy.sum', 'np.sum', (['(ok & outlier_1)'], {}), '(ok & outlier_1)\n', (46763, 46779), True, 'import numpy as np\n'), ((46801, 46831), 'numpy.mean', 'np.mean', (['mags[ok & ~outlier_2]'], {}), '(mags[ok & ~outlier_2])\n', (46808, 46831), True, 'import numpy as np\n'), ((46854, 46883), 'numpy.std', 'np.std', (['mags[ok & ~outlier_2]'], {}), '(mags[ok & ~outlier_2])\n', (46860, 46883), True, 'import numpy as np\n'), ((46910, 46932), 'numpy.sum', 'np.sum', (['(ok & outlier_2)'], {}), '(ok & outlier_2)\n', (46916, 46932), True, 'import numpy as np\n'), ((47061, 47070), 'numpy.any', 'np.any', (['k'], {}), '(k)\n', (47067, 47070), True, 'import numpy as np\n'), ((47366, 47387), 'numpy.sum', 'np.sum', (['(k2 & ~outlier)'], {}), '(k2 & ~outlier)\n', (47372, 47387), True, 'import numpy as np\n'), ((47329, 47360), 'numpy.nanmean', 'np.nanmean', (['mags[k2 & ~outlier]'], {}), '(mags[k2 & ~outlier])\n', (47339, 47360), True, 'import numpy as np\n'), ((47456, 47477), 'numpy.sum', 'np.sum', (['(k2 & ~outlier)'], {}), '(k2 & ~outlier)\n', (47462, 47477), True, 'import numpy as np\n'), ((47420, 47450), 'numpy.nanstd', 'np.nanstd', (['mags[k2 & ~outlier]'], {}), '(mags[k2 & ~outlier])\n', (47429, 47450), True, 'import numpy as np\n'), ((48179, 48235), 'numpy.sqrt', 'np.sqrt', (["(result['t_std_dr5'] ** 2 + min_mag_obs_err ** 2)"], {}), "(result['t_std_dr5'] ** 2 + min_mag_obs_err ** 2)\n", (48186, 48235), True, 'import numpy as np\n'), ((11589, 11622), 'numpy.in1d', 'np.in1d', (['msids[name].times', 'times'], {}), '(msids[name].times, times)\n', (11596, 11622), True, 'import numpy as np\n'), ((11912, 11925), 'cxotime.CxoTime', 'CxoTime', (['t[0]'], {}), '(t[0])\n', (11919, 11925), False, 'from cxotime import CxoTime\n'), ((11934, 11947), 'cxotime.CxoTime', 'CxoTime', (['t[1]'], {}), '(t[1])\n', (11941, 11947), False, 'from cxotime import CxoTime\n'), ((17136, 17145), 'numpy.sum', 'np.sum', (['o'], {}), '(o)\n', (17142, 17145), True, 'import numpy as np\n'), ((30739, 30753), 'numpy.max', 'np.max', (['s_100s'], {}), '(s_100s)\n', (30745, 30753), True, 'import numpy as np\n'), ((30756, 30770), 'numpy.min', 'np.min', (['s_100s'], {}), '(s_100s)\n', (30762, 30770), True, 'import numpy as np\n'), ((30803, 30817), 'numpy.max', 'np.max', (['s_500s'], {}), '(s_500s)\n', (30809, 30817), True, 'import numpy as np\n'), ((30820, 30834), 'numpy.min', 'np.min', (['s_500s'], {}), '(s_500s)\n', (30826, 30834), True, 'import numpy as np\n'), ((30868, 30883), 'numpy.max', 'np.max', (['s_1000s'], {}), '(s_1000s)\n', (30874, 30883), True, 'import numpy as np\n'), ((30886, 30901), 'numpy.min', 'np.min', (['s_1000s'], {}), '(s_1000s)\n', (30892, 30901), True, 'import numpy as np\n'), ((30922, 30951), 'numpy.mean', 'np.mean', (["telem['TEMPCCD'][ok]"], {}), "(telem['TEMPCCD'][ok])\n", (30929, 30951), True, 'import numpy as np\n'), ((40659, 40690), 'cxotime.CxoTime', 'CxoTime', (["obs['mp_starcat_time']"], {}), "(obs['mp_starcat_time'])\n", (40666, 40690), False, 'from cxotime import CxoTime\n'), ((44771, 44795), 'numpy.sum', 'np.sum', (["(~stats['obs_ok'])"], {}), "(~stats['obs_ok'])\n", (44777, 44795), True, 'import numpy as np\n'), ((44800, 44844), 'numpy.sum', 'np.sum', (["(stats['f_ok'][stats['obs_ok']] < 0.3)"], {}), "(stats['f_ok'][stats['obs_ok']] < 0.3)\n", (44806, 44844), True, 'import numpy as np\n'), ((47546, 47573), 'numpy.mean', 'np.mean', (['mags[k & ~outlier]'], {}), '(mags[k & ~outlier])\n', (47553, 47573), True, 'import numpy as np\n'), ((47606, 47632), 'numpy.std', 'np.std', (['mags[k & ~outlier]'], {}), '(mags[k & ~outlier])\n', (47612, 47632), True, 'import numpy as np\n'), ((47749, 47765), 'numpy.mean', 'np.mean', (['mags[k]'], {}), '(mags[k])\n', (47756, 47765), True, 'import numpy as np\n'), ((47794, 47809), 'numpy.std', 'np.std', (['mags[k]'], {}), '(mags[k])\n', (47800, 47809), True, 'import numpy as np\n'), ((47885, 47894), 'numpy.sum', 'np.sum', (['k'], {}), '(k)\n', (47891, 47894), True, 'import numpy as np\n'), ((47930, 47949), 'numpy.sum', 'np.sum', (['(k & outlier)'], {}), '(k & outlier)\n', (47936, 47949), True, 'import numpy as np\n'), ((48387, 48432), 'numpy.abs', 'np.abs', (["(result['mag_obs'] - result['mag_aca'])"], {}), "(result['mag_obs'] - result['mag_aca'])\n", (48393, 48432), True, 'import numpy as np\n'), ((48465, 48510), 'numpy.abs', 'np.abs', (["(result['mag_obs'] - result['mag_aca'])"], {}), "(result['mag_obs'] - result['mag_aca'])\n", (48471, 48510), True, 'import numpy as np\n'), ((48655, 48687), 'numpy.isclose', 'np.isclose', (["result['color']", '(0.7)'], {}), "(result['color'], 0.7)\n", (48665, 48687), True, 'import numpy as np\n'), ((3443, 3462), 'numpy.atleast_1d', 'np.atleast_1d', (['time'], {}), '(time)\n', (3456, 3462), True, 'import numpy as np\n'), ((15867, 15881), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (15879, 15881), False, 'import sys\n'), ((15906, 15941), 'traceback.extract_tb', 'traceback.extract_tb', (['exc_traceback'], {}), '(exc_traceback)\n', (15926, 15941), False, 'import traceback\n'), ((47836, 47845), 'numpy.sum', 'np.sum', (['k'], {}), '(k)\n', (47842, 47845), True, 'import numpy as np\n'), ((47848, 47858), 'numpy.sum', 'np.sum', (['ok'], {}), '(ok)\n', (47854, 47858), True, 'import numpy as np\n'), ((3499, 3521), 'numpy.atleast_1d', 'np.atleast_1d', (['mag_aca'], {}), '(mag_aca)\n', (3512, 3521), True, 'import numpy as np\n')]
"""Conversion tool from EDF, EDF+, BDF to FIF.""" # Authors: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # # License: BSD (3-clause) import calendar import datetime import os import re import numpy as np from ...utils import verbose, logger, warn from ..utils import _blk_read_lims from ..base import BaseRaw, _check_update_montage from ..meas_info import _empty_info from ..constants import FIFF from ...filter import resample from ...externals.six.moves import zip class RawEDF(BaseRaw): """Raw object from EDF, EDF+, BDF file. Parameters ---------- input_fname : str Path to the EDF+,BDF file. montage : str | None | instance of Montage Path or instance of montage containing electrode positions. If None, sensor locations are (0,0,0). See the documentation of :func:`mne.channels.read_montage` for more information. eog : list or tuple Names of channels or list of indices that should be designated EOG channels. Values should correspond to the electrodes in the edf file. Default is None. misc : list or tuple Names of channels or list of indices that should be designated MISC channels. Values should correspond to the electrodes in the edf file. Default is None. stim_channel : str | int | None The channel name or channel index (starting at 0). -1 corresponds to the last channel (default). If None, there will be no stim channel added. annot : str | None Path to annotation file. If None, no derived stim channel will be added (for files requiring annotation file to interpret stim channel). annotmap : str | None Path to annotation map file containing mapping from label to trigger. Must be specified if annot is not None. exclude : list of str Channel names to exclude. This can help when reading data with different sampling rates to avoid unnecessary resampling. preload : bool or str (default False) Preload data into memory for data manipulation and faster indexing. If True, the data will be preloaded into memory (fast, requires large amount of memory). If preload is a string, preload is the file name of a memory-mapped file which is used to store the data on the hard drive (slower, requires less memory). verbose : bool, str, int, or None If not None, override default verbose level (see :func:`mne.verbose` and :ref:`Logging documentation <tut_logging>` for more). See Also -------- mne.io.Raw : Documentation of attribute and methods. """ @verbose def __init__(self, input_fname, montage, eog=None, misc=None, stim_channel=-1, annot=None, annotmap=None, exclude=(), preload=False, verbose=None): # noqa: D102 logger.info('Extracting edf Parameters from %s...' % input_fname) input_fname = os.path.abspath(input_fname) info, edf_info = _get_edf_info(input_fname, stim_channel, annot, annotmap, eog, misc, exclude, preload) logger.info('Creating Raw.info structure...') _check_update_montage(info, montage) if bool(annot) != bool(annotmap): warn("Stimulus Channel will not be annotated. Both 'annot' and " "'annotmap' must be specified.") # Raw attributes last_samps = [edf_info['nsamples'] - 1] super(RawEDF, self).__init__( info, preload, filenames=[input_fname], raw_extras=[edf_info], last_samps=last_samps, orig_format='int', verbose=verbose) logger.info('Ready.') @verbose def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a chunk of raw data.""" from scipy.interpolate import interp1d if mult is not None: # XXX "cals" here does not function the same way as in RawFIF, # and for efficiency we want to be able to combine mult and cals # so proj support will have to wait until this is resolved raise NotImplementedError('mult is not supported yet') exclude = self._raw_extras[fi]['exclude'] sel = np.arange(self.info['nchan'])[idx] n_samps = self._raw_extras[fi]['n_samps'] buf_len = int(self._raw_extras[fi]['max_samp']) sfreq = self.info['sfreq'] data_size = self._raw_extras[fi]['data_size'] data_offset = self._raw_extras[fi]['data_offset'] stim_channel = self._raw_extras[fi]['stim_channel'] tal_channels = self._raw_extras[fi]['tal_channel'] annot = self._raw_extras[fi]['annot'] annotmap = self._raw_extras[fi]['annotmap'] subtype = self._raw_extras[fi]['subtype'] # gain constructor physical_range = np.array([ch['range'] for ch in self.info['chs']]) cal = np.array([ch['cal'] for ch in self.info['chs']]) gains = np.atleast_2d(self._raw_extras[fi]['units'] * (physical_range / cal)) # physical dimension in uV physical_min = np.atleast_2d(self._raw_extras[fi]['units'] * self._raw_extras[fi]['physical_min']) digital_min = self._raw_extras[fi]['digital_min'] offsets = np.atleast_2d(physical_min - (digital_min * gains)).T if tal_channels is not None: for tal_channel in tal_channels: offsets[tal_channel] = 0 # This is needed to rearrange the indices to correspond to correct # chunks on the file if excluded channels exist: selection = sel.copy() idx_map = np.argsort(selection) for ei in sorted(exclude): for ii, si in enumerate(sorted(selection)): if si >= ei: selection[idx_map[ii]] += 1 if tal_channels is not None: tal_channels = [tc + 1 if tc >= ei else tc for tc in sorted(tal_channels)] # We could read this one EDF block at a time, which would be this: ch_offsets = np.cumsum(np.concatenate([[0], n_samps])) block_start_idx, r_lims, d_lims = _blk_read_lims(start, stop, buf_len) # But to speed it up, we really need to read multiple blocks at once, # Otherwise we can end up with e.g. 18,181 chunks for a 20 MB file! # Let's do ~10 MB chunks: n_per = max(10 * 1024 * 1024 // (ch_offsets[-1] * data_size), 1) with open(self._filenames[fi], 'rb', buffering=0) as fid: # extract data start_offset = (data_offset + block_start_idx * ch_offsets[-1] * data_size) for ai in range(0, len(r_lims), n_per): block_offset = ai * ch_offsets[-1] * data_size n_read = min(len(r_lims) - ai, n_per) fid.seek(start_offset + block_offset, 0) # Read and reshape to (n_chunks_read, ch0_ch1_ch2_ch3...) many_chunk = _read_ch(fid, subtype, ch_offsets[-1] * n_read, data_size).reshape(n_read, -1) for ii, ci in enumerate(selection): # This now has size (n_chunks_read, n_samp[ci]) ch_data = many_chunk[:, ch_offsets[ci]:ch_offsets[ci + 1]] r_sidx = r_lims[ai][0] r_eidx = (buf_len * (n_read - 1) + r_lims[ai + n_read - 1][1]) d_sidx = d_lims[ai][0] d_eidx = d_lims[ai + n_read - 1][1] if n_samps[ci] != buf_len: if tal_channels is not None and ci in tal_channels: # don't resample tal_channels, zero-pad instead. if n_samps[ci] < buf_len: z = np.zeros((len(ch_data), buf_len - n_samps[ci])) ch_data = np.append(ch_data, z, -1) else: ch_data = ch_data[:, :buf_len] elif ci == stim_channel: if annot and annotmap or tal_channels is not None: # don't resample, it gets overwritten later ch_data = np.zeros((len(ch_data, buf_len))) else: # Stim channel will be interpolated old = np.linspace(0, 1, n_samps[ci] + 1, True) new = np.linspace(0, 1, buf_len, False) ch_data = np.append( ch_data, np.zeros((len(ch_data), 1)), -1) ch_data = interp1d(old, ch_data, kind='zero', axis=-1)(new) else: # XXX resampling each chunk isn't great, # it forces edge artifacts to appear at # each buffer boundary :( # it can also be very slow... ch_data = resample( ch_data, buf_len, n_samps[ci], npad=0, axis=-1) assert ch_data.shape == (len(ch_data), buf_len) data[ii, d_sidx:d_eidx] = ch_data.ravel()[r_sidx:r_eidx] data *= gains.T[sel] data += offsets[sel] # only try to read the stim channel if it's not None and it's # actually one of the requested channels read_size = len(r_lims) * buf_len if stim_channel is not None and (sel == stim_channel).sum() > 0: stim_channel_idx = np.where(sel == stim_channel)[0] if annot and annotmap: evts = _read_annot(annot, annotmap, sfreq, self._last_samps[fi]) data[stim_channel_idx, :] = evts[start:stop + 1] elif tal_channels is not None: tal_channel_idx = np.intersect1d(sel, tal_channels) evts = _parse_tal_channel(np.atleast_2d(data[tal_channel_idx])) self._raw_extras[fi]['events'] = evts unique_annots = sorted(set([e[2] for e in evts])) mapping = dict((a, n + 1) for n, a in enumerate(unique_annots)) stim = np.zeros(read_size) for t_start, t_duration, annotation in evts: evid = mapping[annotation] n_start = int(t_start * sfreq) n_stop = int(t_duration * sfreq) + n_start - 1 # make sure events without duration get one sample n_stop = n_stop if n_stop > n_start else n_start + 1 if any(stim[n_start:n_stop]): warn('EDF+ with overlapping events' ' are not fully supported') stim[n_start:n_stop] += evid data[stim_channel_idx, :] = stim[start:stop] else: # Allows support for up to 17-bit trigger values (2 ** 17 - 1) stim = np.bitwise_and(data[stim_channel_idx].astype(int), 131071) data[stim_channel_idx, :] = stim def _read_ch(fid, subtype, samp, data_size): """Read a number of samples for a single channel.""" if subtype in ('24BIT', 'bdf'): ch_data = np.fromfile(fid, dtype=np.uint8, count=samp * data_size) ch_data = ch_data.reshape(-1, 3).astype(np.int32) ch_data = ((ch_data[:, 0]) + (ch_data[:, 1] << 8) + (ch_data[:, 2] << 16)) # 24th bit determines the sign ch_data[ch_data >= (1 << 23)] -= (1 << 24) # edf data: 16bit data else: ch_data = np.fromfile(fid, dtype='<i2', count=samp) return ch_data def _parse_tal_channel(tal_channel_data): """Parse time-stamped annotation lists (TALs) in stim_channel. Parameters ---------- tal_channel_data : ndarray, shape = [n_chans, n_samples] channel data in EDF+ TAL format Returns ------- events : list List of events. Each event contains [start, duration, annotation]. References ---------- http://www.edfplus.info/specs/edfplus.html#tal """ # convert tal_channel to an ascii string tals = bytearray() for chan in tal_channel_data: for s in chan: i = int(s) tals.extend(np.uint8([i % 256, i // 256])) regex_tal = '([+-]\d+\.?\d*)(\x15(\d+\.?\d*))?(\x14.*?)\x14\x00' # use of latin-1 because characters are only encoded for the first 256 # code points and utf-8 can triggers an "invalid continuation byte" error tal_list = re.findall(regex_tal, tals.decode('latin-1')) events = [] for ev in tal_list: onset = float(ev[0]) duration = float(ev[2]) if ev[2] else 0 for annotation in ev[3].split('\x14')[1:]: if annotation: events.append([onset, duration, annotation]) return events def _get_edf_info(fname, stim_channel, annot, annotmap, eog, misc, exclude, preload): """Extract all the information from the EDF+,BDF file.""" if eog is None: eog = [] if misc is None: misc = [] edf_info = dict() edf_info['annot'] = annot edf_info['annotmap'] = annotmap edf_info['events'] = [] with open(fname, 'rb') as fid: assert(fid.tell() == 0) fid.seek(168) # Seek 8 + 80 bytes for Subject id + 80 bytes for rec id day, month, year = [int(x) for x in re.findall('(\d+)', fid.read(8).decode())] hour, minute, sec = [int(x) for x in re.findall('(\d+)', fid.read(8).decode())] century = 2000 if year < 50 else 1900 date = datetime.datetime(year + century, month, day, hour, minute, sec) edf_info['data_offset'] = header_nbytes = int(fid.read(8).decode()) subtype = fid.read(44).strip().decode()[:5] if len(subtype) > 0: edf_info['subtype'] = subtype else: edf_info['subtype'] = os.path.splitext(fname)[1][1:].lower() edf_info['n_records'] = n_records = int(fid.read(8).decode()) # record length in seconds record_length = float(fid.read(8).decode()) if record_length == 0: edf_info['record_length'] = record_length = 1. warn('Header information is incorrect for record length. Default ' 'record length set to 1.') else: edf_info['record_length'] = record_length nchan = int(fid.read(4).decode()) channels = list(range(nchan)) ch_names = [fid.read(16).strip().decode() for ch in channels] exclude = [ch_names.index(idx) for idx in exclude] for ch in channels: fid.read(80) # transducer units = [fid.read(8).strip().decode() for ch in channels] edf_info['units'] = list() edf_info['exclude'] = exclude include = list() for i, unit in enumerate(units): if i in exclude: continue if unit == 'uV': edf_info['units'].append(1e-6) else: edf_info['units'].append(1) include.append(i) ch_names = [ch_names[idx] for idx in include] physical_min = np.array([float(fid.read(8).decode()) for ch in channels])[include] edf_info['physical_min'] = physical_min physical_max = np.array([float(fid.read(8).decode()) for ch in channels])[include] digital_min = np.array([float(fid.read(8).decode()) for ch in channels])[include] edf_info['digital_min'] = digital_min digital_max = np.array([float(fid.read(8).decode()) for ch in channels])[include] prefiltering = [fid.read(80).strip().decode() for ch in channels][:-1] highpass = np.ravel([re.findall('HP:\s+(\w+)', filt) for filt in prefiltering]) lowpass = np.ravel([re.findall('LP:\s+(\w+)', filt) for filt in prefiltering]) # number of samples per record n_samps = np.array([int(fid.read(8).decode()) for ch in channels]) edf_info['n_samps'] = n_samps n_samps = n_samps[include] fid.read(32 * nchan).decode() # reserved assert fid.tell() == header_nbytes physical_ranges = physical_max - physical_min cals = digital_max - digital_min if edf_info['subtype'] in ('24BIT', 'bdf'): edf_info['data_size'] = 3 # 24-bit (3 byte) integers else: edf_info['data_size'] = 2 # 16-bit (2 byte) integers # Creates a list of dicts of eeg channels for raw.info logger.info('Setting channel info structure...') chs = list() tal_ch_name = 'EDF Annotations' tal_chs = np.where(np.array(ch_names) == tal_ch_name)[0] if len(tal_chs) > 0: if len(tal_chs) > 1: warn('Channel names are not unique, found duplicates for: %s. ' 'Adding running numbers to duplicate channel names.' % tal_ch_name) for idx, tal_ch in enumerate(tal_chs, 1): ch_names[tal_ch] = ch_names[tal_ch] + '-%s' % idx tal_channel = tal_chs else: tal_channel = None edf_info['tal_channel'] = tal_channel if tal_channel is not None and stim_channel is not None and not preload: raise RuntimeError('%s' % ('EDF+ Annotations (TAL) channel needs to be' ' parsed completely on loading.' ' You must set preload parameter to True.')) if stim_channel == -1: stim_channel = len(include) - 1 pick_mask = np.ones(len(ch_names)) for idx, ch_info in enumerate(zip(ch_names, physical_ranges, cals)): ch_name, physical_range, cal = ch_info chan_info = {} chan_info['cal'] = cal chan_info['logno'] = idx + 1 chan_info['scanno'] = idx + 1 chan_info['range'] = physical_range chan_info['unit_mul'] = 0. chan_info['ch_name'] = ch_name chan_info['unit'] = FIFF.FIFF_UNIT_V chan_info['coord_frame'] = FIFF.FIFFV_COORD_HEAD chan_info['coil_type'] = FIFF.FIFFV_COIL_EEG chan_info['kind'] = FIFF.FIFFV_EEG_CH chan_info['loc'] = np.zeros(12) if ch_name in eog or idx in eog or idx - nchan in eog: chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE chan_info['kind'] = FIFF.FIFFV_EOG_CH pick_mask[idx] = False if ch_name in misc or idx in misc or idx - nchan in misc: chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE chan_info['kind'] = FIFF.FIFFV_MISC_CH pick_mask[idx] = False check1 = stim_channel == ch_name check2 = stim_channel == idx check3 = nchan > 1 stim_check = np.logical_and(np.logical_or(check1, check2), check3) if stim_check: chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE chan_info['unit'] = FIFF.FIFF_UNIT_NONE chan_info['kind'] = FIFF.FIFFV_STIM_CH pick_mask[idx] = False chan_info['ch_name'] = 'STI 014' ch_names[idx] = chan_info['ch_name'] edf_info['units'][idx] = 1 if isinstance(stim_channel, str): stim_channel = idx if tal_channel is not None and idx in tal_channel: chan_info['range'] = 1 chan_info['cal'] = 1 chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE chan_info['unit'] = FIFF.FIFF_UNIT_NONE chan_info['kind'] = FIFF.FIFFV_MISC_CH pick_mask[idx] = False chs.append(chan_info) edf_info['stim_channel'] = stim_channel if any(pick_mask): picks = [item for item, mask in zip(range(nchan), pick_mask) if mask] edf_info['max_samp'] = max_samp = n_samps[picks].max() else: edf_info['max_samp'] = max_samp = n_samps.max() # sfreq defined as the max sampling rate of eeg sfreq = n_samps.max() / record_length info = _empty_info(sfreq) info['meas_date'] = calendar.timegm(date.utctimetuple()) info['chs'] = chs if highpass.size == 0: pass elif all(highpass): if highpass[0] == 'NaN': pass # Placeholder for future use. Highpass set in _empty_info. elif highpass[0] == 'DC': info['highpass'] = 0. else: info['highpass'] = float(highpass[0]) else: info['highpass'] = float(np.max(highpass)) warn('Channels contain different highpass filters. Highest filter ' 'setting will be stored.') if lowpass.size == 0: pass elif all(lowpass): if lowpass[0] == 'NaN': pass # Placeholder for future use. Lowpass set in _empty_info. else: info['lowpass'] = float(lowpass[0]) else: info['lowpass'] = float(np.min(lowpass)) warn('Channels contain different lowpass filters. Lowest filter ' 'setting will be stored.') # Some keys to be consistent with FIF measurement info info['description'] = None info['buffer_size_sec'] = 1. edf_info['nsamples'] = int(n_records * max_samp) # These are the conditions under which a stim channel will be interpolated if stim_channel is not None and not (annot and annotmap) and \ tal_channel is None and n_samps[stim_channel] != int(max_samp): warn('Interpolating stim channel. Events may jitter.') info._update_redundant() return info, edf_info def _read_annot(annot, annotmap, sfreq, data_length): """Annotation File Reader. Parameters ---------- annot : str Path to annotation file. annotmap : str Path to annotation map file containing mapping from label to trigger. sfreq : float Sampling frequency. data_length : int Length of the data file. Returns ------- stim_channel : ndarray An array containing stimulus trigger events. """ pat = '([+/-]\d+.\d+),(\w+)' annot = open(annot).read() triggers = re.findall(pat, annot) times, values = zip(*triggers) times = [float(time) * sfreq for time in times] pat = '(\w+):(\d+)' annotmap = open(annotmap).read() mappings = re.findall(pat, annotmap) maps = {} for mapping in mappings: maps[mapping[0]] = mapping[1] triggers = [int(maps[value]) for value in values] stim_channel = np.zeros(data_length) for time, trigger in zip(times, triggers): stim_channel[time] = trigger return stim_channel def read_raw_edf(input_fname, montage=None, eog=None, misc=None, stim_channel=-1, annot=None, annotmap=None, exclude=(), preload=False, verbose=None): """Reader function for EDF+, BDF conversion to FIF. Parameters ---------- input_fname : str Path to the EDF+,BDF file. montage : str | None | instance of Montage Path or instance of montage containing electrode positions. If None, sensor locations are (0,0,0). See the documentation of :func:`mne.channels.read_montage` for more information. eog : list or tuple Names of channels or list of indices that should be designated EOG channels. Values should correspond to the electrodes in the edf file. Default is None. misc : list or tuple Names of channels or list of indices that should be designated MISC channels. Values should correspond to the electrodes in the edf file. Default is None. stim_channel : str | int | None The channel name or channel index (starting at 0). -1 corresponds to the last channel (default). If None, there will be no stim channel added. annot : str | None Path to annotation file. If None, no derived stim channel will be added (for files requiring annotation file to interpret stim channel). annotmap : str | None Path to annotation map file containing mapping from label to trigger. Must be specified if annot is not None. exclude : list of str Channel names to exclude. This can help when reading data with different sampling rates to avoid unnecessary resampling. preload : bool or str (default False) Preload data into memory for data manipulation and faster indexing. If True, the data will be preloaded into memory (fast, requires large amount of memory). If preload is a string, preload is the file name of a memory-mapped file which is used to store the data on the hard drive (slower, requires less memory). verbose : bool, str, int, or None If not None, override default verbose level (see :func:`mne.verbose` and :ref:`Logging documentation <tut_logging>` for more). Returns ------- raw : Instance of RawEDF A Raw object containing EDF data. See Also -------- mne.io.Raw : Documentation of attribute and methods. """ return RawEDF(input_fname=input_fname, montage=montage, eog=eog, misc=misc, stim_channel=stim_channel, annot=annot, annotmap=annotmap, exclude=exclude, preload=preload, verbose=verbose)
[ "numpy.uint8", "numpy.fromfile", "scipy.interpolate.interp1d", "numpy.argsort", "numpy.array", "numpy.arange", "datetime.datetime", "numpy.atleast_2d", "numpy.where", "numpy.max", "numpy.linspace", "numpy.concatenate", "numpy.min", "os.path.splitext", "re.findall", "numpy.intersect1d", "numpy.logical_or", "numpy.append", "numpy.zeros", "os.path.abspath" ]
[((22757, 22779), 're.findall', 're.findall', (['pat', 'annot'], {}), '(pat, annot)\n', (22767, 22779), False, 'import re\n'), ((22944, 22969), 're.findall', 're.findall', (['pat', 'annotmap'], {}), '(pat, annotmap)\n', (22954, 22969), False, 'import re\n'), ((23125, 23146), 'numpy.zeros', 'np.zeros', (['data_length'], {}), '(data_length)\n', (23133, 23146), True, 'import numpy as np\n'), ((2965, 2993), 'os.path.abspath', 'os.path.abspath', (['input_fname'], {}), '(input_fname)\n', (2980, 2993), False, 'import os\n'), ((4882, 4932), 'numpy.array', 'np.array', (["[ch['range'] for ch in self.info['chs']]"], {}), "([ch['range'] for ch in self.info['chs']])\n", (4890, 4932), True, 'import numpy as np\n'), ((4947, 4995), 'numpy.array', 'np.array', (["[ch['cal'] for ch in self.info['chs']]"], {}), "([ch['cal'] for ch in self.info['chs']])\n", (4955, 4995), True, 'import numpy as np\n'), ((5012, 5081), 'numpy.atleast_2d', 'np.atleast_2d', (["(self._raw_extras[fi]['units'] * (physical_range / cal))"], {}), "(self._raw_extras[fi]['units'] * (physical_range / cal))\n", (5025, 5081), True, 'import numpy as np\n'), ((5171, 5259), 'numpy.atleast_2d', 'np.atleast_2d', (["(self._raw_extras[fi]['units'] * self._raw_extras[fi]['physical_min'])"], {}), "(self._raw_extras[fi]['units'] * self._raw_extras[fi][\n 'physical_min'])\n", (5184, 5259), True, 'import numpy as np\n'), ((5728, 5749), 'numpy.argsort', 'np.argsort', (['selection'], {}), '(selection)\n', (5738, 5749), True, 'import numpy as np\n'), ((11658, 11714), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': 'np.uint8', 'count': '(samp * data_size)'}), '(fid, dtype=np.uint8, count=samp * data_size)\n', (11669, 11714), True, 'import numpy as np\n'), ((12069, 12110), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '"""<i2"""', 'count': 'samp'}), "(fid, dtype='<i2', count=samp)\n", (12080, 12110), True, 'import numpy as np\n'), ((14201, 14265), 'datetime.datetime', 'datetime.datetime', (['(year + century)', 'month', 'day', 'hour', 'minute', 'sec'], {}), '(year + century, month, day, hour, minute, sec)\n', (14218, 14265), False, 'import datetime\n'), ((18915, 18927), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (18923, 18927), True, 'import numpy as np\n'), ((4273, 4302), 'numpy.arange', 'np.arange', (["self.info['nchan']"], {}), "(self.info['nchan'])\n", (4282, 4302), True, 'import numpy as np\n'), ((5369, 5418), 'numpy.atleast_2d', 'np.atleast_2d', (['(physical_min - digital_min * gains)'], {}), '(physical_min - digital_min * gains)\n', (5382, 5418), True, 'import numpy as np\n'), ((6189, 6219), 'numpy.concatenate', 'np.concatenate', (['[[0], n_samps]'], {}), '([[0], n_samps])\n', (6203, 6219), True, 'import numpy as np\n'), ((19485, 19514), 'numpy.logical_or', 'np.logical_or', (['check1', 'check2'], {}), '(check1, check2)\n', (19498, 19514), True, 'import numpy as np\n'), ((9902, 9931), 'numpy.where', 'np.where', (['(sel == stim_channel)'], {}), '(sel == stim_channel)\n', (9910, 9931), True, 'import numpy as np\n'), ((12753, 12782), 'numpy.uint8', 'np.uint8', (['[i % 256, i // 256]'], {}), '([i % 256, i // 256])\n', (12761, 12782), True, 'import numpy as np\n'), ((16443, 16476), 're.findall', 're.findall', (['"""HP:\\\\s+(\\\\w+)"""', 'filt'], {}), "('HP:\\\\s+(\\\\w+)', filt)\n", (16453, 16476), False, 'import re\n'), ((16559, 16592), 're.findall', 're.findall', (['"""LP:\\\\s+(\\\\w+)"""', 'filt'], {}), "('LP:\\\\s+(\\\\w+)', filt)\n", (16569, 16592), False, 'import re\n'), ((17417, 17435), 'numpy.array', 'np.array', (['ch_names'], {}), '(ch_names)\n', (17425, 17435), True, 'import numpy as np\n'), ((21142, 21158), 'numpy.max', 'np.max', (['highpass'], {}), '(highpass)\n', (21148, 21158), True, 'import numpy as np\n'), ((21551, 21566), 'numpy.min', 'np.min', (['lowpass'], {}), '(lowpass)\n', (21557, 21566), True, 'import numpy as np\n'), ((10228, 10261), 'numpy.intersect1d', 'np.intersect1d', (['sel', 'tal_channels'], {}), '(sel, tal_channels)\n', (10242, 10261), True, 'import numpy as np\n'), ((10567, 10586), 'numpy.zeros', 'np.zeros', (['read_size'], {}), '(read_size)\n', (10575, 10586), True, 'import numpy as np\n'), ((10304, 10340), 'numpy.atleast_2d', 'np.atleast_2d', (['data[tal_channel_idx]'], {}), '(data[tal_channel_idx])\n', (10317, 10340), True, 'import numpy as np\n'), ((14514, 14537), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (14530, 14537), False, 'import os\n'), ((8096, 8121), 'numpy.append', 'np.append', (['ch_data', 'z', '(-1)'], {}), '(ch_data, z, -1)\n', (8105, 8121), True, 'import numpy as np\n'), ((8639, 8679), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(n_samps[ci] + 1)', '(True)'], {}), '(0, 1, n_samps[ci] + 1, True)\n', (8650, 8679), True, 'import numpy as np\n'), ((8718, 8751), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'buf_len', '(False)'], {}), '(0, 1, buf_len, False)\n', (8729, 8751), True, 'import numpy as np\n'), ((8925, 8969), 'scipy.interpolate.interp1d', 'interp1d', (['old', 'ch_data'], {'kind': '"""zero"""', 'axis': '(-1)'}), "(old, ch_data, kind='zero', axis=-1)\n", (8933, 8969), False, 'from scipy.interpolate import interp1d\n')]
from fastapi import FastAPI from typing import List from fastapi import FastAPI, UploadFile, File import numpy as np from starlette.requests import Request import io from PIL import Image import base64 import cv2 app = FastAPI() @app.post("/predict") async def analyse(image_file_read: bytes = File(...)): file = base64.b64encode(image_file_read) jpg_original = base64.b64decode(file) jpg_as_np = np.frombuffer(jpg_original, dtype=np.uint8) original_image = cv2.imdecode(jpg_as_np, flags=1) return original_image.shape
[ "fastapi.FastAPI", "base64.b64encode", "base64.b64decode", "cv2.imdecode", "numpy.frombuffer", "fastapi.File" ]
[((219, 228), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (226, 228), False, 'from fastapi import FastAPI, UploadFile, File\n'), ((295, 304), 'fastapi.File', 'File', (['...'], {}), '(...)\n', (299, 304), False, 'from fastapi import FastAPI, UploadFile, File\n'), ((318, 351), 'base64.b64encode', 'base64.b64encode', (['image_file_read'], {}), '(image_file_read)\n', (334, 351), False, 'import base64\n'), ((371, 393), 'base64.b64decode', 'base64.b64decode', (['file'], {}), '(file)\n', (387, 393), False, 'import base64\n'), ((410, 453), 'numpy.frombuffer', 'np.frombuffer', (['jpg_original'], {'dtype': 'np.uint8'}), '(jpg_original, dtype=np.uint8)\n', (423, 453), True, 'import numpy as np\n'), ((475, 507), 'cv2.imdecode', 'cv2.imdecode', (['jpg_as_np'], {'flags': '(1)'}), '(jpg_as_np, flags=1)\n', (487, 507), False, 'import cv2\n')]
# modules we'll need import numpy as np import os import glob import pandas as pd from subprocess import call from librosa import load, stft # config INPUT_DIR = "/mnt/d/datasets/Looking-to-Listen_small/all_wavs/" INPUT_DIR_VISUAL = "/mnt/d/datasets/Looking-to-Listen_small/all_vector/" OUTPUT_DIR = "/mnt/d/datasets/Looking-to-Listen_small/mixed_wavs/" OUTPUT_DIR_SPEC = "/mnt/d/datasets/Looking-to-Listen_small/spectrogram/" OUTPUT_DIR_VISUAL = "/mnt/d/datasets/Looking-to-Listen_small/visual/" MIX_INFO_CSV_PATH = "/mnt/d/datasets/Looking-to-Listen_small/mix_info.csv" NUM_MIX = 100 DURATION = 3 # seconds SR = 16000 # Hz FFT_SIZE = 512 HOP_LEN = 160 # 10ms (10ms*16000Hz=160frames) WIN_LEN = 400 # 25ms # utils def getAllwavpaths(directory): wav_paths = glob.glob(os.path.join(directory, "*.wav")) return wav_paths def main(): # check directory exsistence if not os.path.exists(OUTPUT_DIR): os.makedirs(OUTPUT_DIR) if not os.path.exists(OUTPUT_DIR_SPEC): os.makedirs(OUTPUT_DIR_SPEC) if not os.path.exists(OUTPUT_DIR_VISUAL): os.makedirs(OUTPUT_DIR_VISUAL) ### SYNTHESIS PART ### # motivation: generate mixed sounds print("synthesis part...") speech_paths = getAllwavpaths(directory=INPUT_DIR) n_speech = len(speech_paths) speech_list1 = [] speech_list2 = [] mix_list = [] # generate synthesised sounds for i in range(NUM_MIX): ### AUDIO STREAM ### # choose one clean sound and noise sound rand_speech1 = np.random.randint(0, n_speech) rand_speech2 = np.random.randint(0, n_speech) if rand_speech1 == rand_speech2: rand_speech2 = np.random.randint(0, n_speech) _speech1 = speech_paths[rand_speech1] _speech2 = speech_paths[rand_speech2] speech_list1.append(_speech1) speech_list2.append(_speech2) # synthesis sounds topath = os.path.join(OUTPUT_DIR, "{0}.wav".format(i)) mix_list.append(topath) cmd = 'ffmpeg -i {0} -i {1} -t 00:00:{2} -filter_complex amix=2 -ar {3} -ac 1 -y {4}'.format(_speech1, _speech2, DURATION, SR, topath) call(cmd, shell=True) # load speeches audio_speech1, _ = load(_speech1, sr=SR) audio_speech2, _ = load(_speech2, sr=SR) audio_mix, _ = load(topath, sr=SR) # convert spectrograms spectrogram_speech1 = np.abs(stft(audio_speech1, n_fft=FFT_SIZE, hop_length=HOP_LEN, win_length=WIN_LEN)) spectrogram_speech2 = np.abs(stft(audio_speech2, n_fft=FFT_SIZE, hop_length=HOP_LEN, win_length=WIN_LEN)) spectrogram_mix = np.abs(stft(audio_mix, n_fft=FFT_SIZE, hop_length=HOP_LEN, win_length=WIN_LEN)) spectrogram_speech = np.concatenate((spectrogram_speech1, spectrogram_speech2), axis=0) # scaling m = np.max(spectrogram_mix) spectrogram_mix /= m spectrogram_speech /= m # save topath = os.path.join(OUTPUT_DIR_SPEC, "{0}.npz".format(i)) np.savez(topath, mix=spectrogram_mix, true=spectrogram_speech) ### VISUAL STREAM ### todir = os.path.join(OUTPUT_DIR_VISUAL, "{}".format(i)) if not os.path.exists(todir): os.makedirs(todir) _speech1 = os.path.join(INPUT_DIR_VISUAL, os.path.basename(_speech1).replace(".wav", ".csv")) _speech2 = os.path.join(INPUT_DIR_VISUAL, os.path.basename(_speech2).replace(".wav", ".csv")) cmd = 'cp {0} {1}'.format(_speech1, os.path.join(todir, "speech1.csv")) call(cmd, shell=True) cmd = 'cp {0} {1}'.format(_speech2, os.path.join(todir, "speech2.csv")) call(cmd, shell=True) # save synthesis information df = pd.DataFrame({ "i": range(NUM_MIX), "speech1": speech_list1, "speech2": speech_list2, "mix": mix_list }) df.to_csv(MIX_INFO_CSV_PATH, index=False) if __name__ == "__main__": main()
[ "os.path.exists", "numpy.savez", "os.makedirs", "os.path.join", "numpy.max", "numpy.random.randint", "subprocess.call", "numpy.concatenate", "os.path.basename", "librosa.stft", "librosa.load" ]
[((774, 806), 'os.path.join', 'os.path.join', (['directory', '"""*.wav"""'], {}), "(directory, '*.wav')\n", (786, 806), False, 'import os\n'), ((886, 912), 'os.path.exists', 'os.path.exists', (['OUTPUT_DIR'], {}), '(OUTPUT_DIR)\n', (900, 912), False, 'import os\n'), ((922, 945), 'os.makedirs', 'os.makedirs', (['OUTPUT_DIR'], {}), '(OUTPUT_DIR)\n', (933, 945), False, 'import os\n'), ((957, 988), 'os.path.exists', 'os.path.exists', (['OUTPUT_DIR_SPEC'], {}), '(OUTPUT_DIR_SPEC)\n', (971, 988), False, 'import os\n'), ((998, 1026), 'os.makedirs', 'os.makedirs', (['OUTPUT_DIR_SPEC'], {}), '(OUTPUT_DIR_SPEC)\n', (1009, 1026), False, 'import os\n'), ((1038, 1071), 'os.path.exists', 'os.path.exists', (['OUTPUT_DIR_VISUAL'], {}), '(OUTPUT_DIR_VISUAL)\n', (1052, 1071), False, 'import os\n'), ((1081, 1111), 'os.makedirs', 'os.makedirs', (['OUTPUT_DIR_VISUAL'], {}), '(OUTPUT_DIR_VISUAL)\n', (1092, 1111), False, 'import os\n'), ((1539, 1569), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n_speech'], {}), '(0, n_speech)\n', (1556, 1569), True, 'import numpy as np\n'), ((1593, 1623), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n_speech'], {}), '(0, n_speech)\n', (1610, 1623), True, 'import numpy as np\n'), ((2173, 2194), 'subprocess.call', 'call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (2177, 2194), False, 'from subprocess import call\n'), ((2255, 2276), 'librosa.load', 'load', (['_speech1'], {'sr': 'SR'}), '(_speech1, sr=SR)\n', (2259, 2276), False, 'from librosa import load, stft\n'), ((2304, 2325), 'librosa.load', 'load', (['_speech2'], {'sr': 'SR'}), '(_speech2, sr=SR)\n', (2308, 2325), False, 'from librosa import load, stft\n'), ((2349, 2368), 'librosa.load', 'load', (['topath'], {'sr': 'SR'}), '(topath, sr=SR)\n', (2353, 2368), False, 'from librosa import load, stft\n'), ((2772, 2838), 'numpy.concatenate', 'np.concatenate', (['(spectrogram_speech1, spectrogram_speech2)'], {'axis': '(0)'}), '((spectrogram_speech1, spectrogram_speech2), axis=0)\n', (2786, 2838), True, 'import numpy as np\n'), ((2878, 2901), 'numpy.max', 'np.max', (['spectrogram_mix'], {}), '(spectrogram_mix)\n', (2884, 2901), True, 'import numpy as np\n'), ((3063, 3125), 'numpy.savez', 'np.savez', (['topath'], {'mix': 'spectrogram_mix', 'true': 'spectrogram_speech'}), '(topath, mix=spectrogram_mix, true=spectrogram_speech)\n', (3071, 3125), True, 'import numpy as np\n'), ((3590, 3611), 'subprocess.call', 'call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (3594, 3611), False, 'from subprocess import call\n'), ((3700, 3721), 'subprocess.call', 'call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (3704, 3721), False, 'from subprocess import call\n'), ((1692, 1722), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n_speech'], {}), '(0, n_speech)\n', (1709, 1722), True, 'import numpy as np\n'), ((2446, 2521), 'librosa.stft', 'stft', (['audio_speech1'], {'n_fft': 'FFT_SIZE', 'hop_length': 'HOP_LEN', 'win_length': 'WIN_LEN'}), '(audio_speech1, n_fft=FFT_SIZE, hop_length=HOP_LEN, win_length=WIN_LEN)\n', (2450, 2521), False, 'from librosa import load, stft\n'), ((2560, 2635), 'librosa.stft', 'stft', (['audio_speech2'], {'n_fft': 'FFT_SIZE', 'hop_length': 'HOP_LEN', 'win_length': 'WIN_LEN'}), '(audio_speech2, n_fft=FFT_SIZE, hop_length=HOP_LEN, win_length=WIN_LEN)\n', (2564, 2635), False, 'from librosa import load, stft\n'), ((2670, 2741), 'librosa.stft', 'stft', (['audio_mix'], {'n_fft': 'FFT_SIZE', 'hop_length': 'HOP_LEN', 'win_length': 'WIN_LEN'}), '(audio_mix, n_fft=FFT_SIZE, hop_length=HOP_LEN, win_length=WIN_LEN)\n', (2674, 2741), False, 'from librosa import load, stft\n'), ((3244, 3265), 'os.path.exists', 'os.path.exists', (['todir'], {}), '(todir)\n', (3258, 3265), False, 'import os\n'), ((3279, 3297), 'os.makedirs', 'os.makedirs', (['todir'], {}), '(todir)\n', (3290, 3297), False, 'import os\n'), ((3546, 3580), 'os.path.join', 'os.path.join', (['todir', '"""speech1.csv"""'], {}), "(todir, 'speech1.csv')\n", (3558, 3580), False, 'import os\n'), ((3656, 3690), 'os.path.join', 'os.path.join', (['todir', '"""speech2.csv"""'], {}), "(todir, 'speech2.csv')\n", (3668, 3690), False, 'import os\n'), ((3348, 3374), 'os.path.basename', 'os.path.basename', (['_speech1'], {}), '(_speech1)\n', (3364, 3374), False, 'import os\n'), ((3450, 3476), 'os.path.basename', 'os.path.basename', (['_speech2'], {}), '(_speech2)\n', (3466, 3476), False, 'import os\n')]
from math import ceil import warnings from collections import namedtuple import numpy as np from .basis import conv_basis, delta_stim, boxcar_stim, make_nonlinear_raised_cosine __all__ = ['Design', 'Covariate'] class Design: covariates = {} bias = False def __init__(self, experiment): self.experiment = experiment @property def edim(self): return sum((covar.edim for covar in self.covariates.values())) def add_constant(self, bias=True): self.bias = bias def add_covariate(self, label, description, handler, basis, offset, condition, **kwargs): self.covariates[label] = Covariate(self, label, description, handler, basis, offset, condition, **kwargs) def add_covariate_timing(self, label, description, var_label, value_label, *args, **kwargs): binfun = self.experiment.binfun if value_label is None: self.covariates[label] = Covariate( self, label, description, lambda trial: delta_stim( binfun(trial[var_label]), binfun(trial.duration)), *args, **kwargs) else: self.covariates[label] = Covariate( self, label, description, lambda trial: trial[value_label] * delta_stim( binfun(trial[var_label]), binfun(trial.duration)), *args, **kwargs) def add_covariate_spike(self, label, description, var_label, basis, **kwargs): offset = 1 # make sure causal. no instantaneous interaction binfun = self.experiment.binfun if basis is None: basis = make_nonlinear_raised_cosine(10, self.experiment.binsize, (0., 100.), 2) covar = Covariate( self, label, description, lambda trial: delta_stim( binfun(trial[var_label]), binfun(trial.duration)), basis, offset, **kwargs) self.covariates[label] = covar def add_covariate_raw(self, label, description, *args, **kwargs): self.covariates[label] = Covariate(self, label, description, lambda trial: trial[label], *args, **kwargs) def add_covariate_boxcar(self, label, description, on_label, off_label, value_label, *args, **kwargs): binfun = self.experiment.binfun if value_label is None: covar = Covariate( self, label, description, lambda trial: boxcar_stim( binfun(trial[on_label]), binfun(trial[off_label]), binfun(trial.duration)), *args, **kwargs) else: covar = Covariate( self, label, description, lambda trial: trial[value_label] * boxcar_stim( binfun(trial[on_label]), binfun(trial[off_label]), binfun(trial.duration)), *args, **kwargs) self.covariates[label] = covar def _filter_trials(self, trial_indices): expt = self.experiment if trial_indices is not None: trials = [expt.trials[idx] for idx in trial_indices] else: trials = expt.trials.values() return trials def get_response(self, label, trial_indices=None): trials = self._filter_trials(trial_indices) # print(sum([trial[label].shape[0] for trial in trials]), # sum([self.experiment.binfun(trial.duration) for trial in trials])) return np.concatenate([trial[label] for trial in trials]) def get_binned_spike(self, label, trial_indices=None, concat=True): trials = self._filter_trials(trial_indices) expt = self.experiment s = [ _time2bin(trial[label], binwidth=expt.binsize, start=0, stop=trial.duration) for trial in trials ] if concat: s = np.concatenate(s) return s def compile_design_matrix(self, trial_indices=None, concat=True): expt = self.experiment trials = self._filter_trials(trial_indices) # total_bins = sum([expt.binfun(trial.duration) for trial in trials]) # print(total_bins) dm = [] for trial in trials: nbin = expt.binfun(trial.duration) dmt = [] for covar in self.covariates.values(): if covar.condition is not None and not covar.condition( trial): # skip trial continue stim = covar.handler(trial) if covar.basis is None: dmc = stim else: dmc = conv_basis(stim, covar.basis, covar.offset) # print(dmc.shape) dmt.append(dmc) dmt = np.concatenate(dmt, axis=1) assert dmt.shape == (nbin, self.edim) if np.any(np.isnan(dmt)) or np.any(np.isinf(dmt)): warnings.warn('Design matrix contains NaN or Inf') if self.bias: dmt = np.column_stack([np.ones(dmt.shape[0]), dmt]) dm.append(dmt) if concat: dm = np.concatenate(dm, axis=0) return dm def combine_weights(self, w, axis=1): ws = np.split( w, np.cumsum([covar.edim for covar in self.covariates.values()])[:-1], axis=axis) W = namedtuple('Weight', [covar.label for covar in self.covariates.values()]) return W(*ws) class Covariate: def __init__(self, design, label, description, handler, basis=None, offset=0, condition=None, **kwargs): self.design = design self.label = label self.description = description self.handler = handler # function of trial self.basis = basis self.offset = offset self.condition = condition sdim = np.shape(handler(next(iter( design.experiment.trials.values()))))[1] self.sdim = sdim if basis is None: edim = sdim else: edim = basis.edim * sdim self.edim = edim def _time2bin(timing, binwidth, start, stop): duration = stop - start nbin = ceil(duration / binwidth) bins = start + np.arange(nbin + 1) * binwidth # add the last bin edge s = np.histogram(timing, bins=bins)[0] s = s.astype(np.float) return s
[ "numpy.histogram", "math.ceil", "numpy.ones", "numpy.isnan", "numpy.concatenate", "warnings.warn", "numpy.isinf", "numpy.arange" ]
[((6575, 6600), 'math.ceil', 'ceil', (['(duration / binwidth)'], {}), '(duration / binwidth)\n', (6579, 6600), False, 'from math import ceil\n'), ((3665, 3715), 'numpy.concatenate', 'np.concatenate', (['[trial[label] for trial in trials]'], {}), '([trial[label] for trial in trials])\n', (3679, 3715), True, 'import numpy as np\n'), ((6684, 6715), 'numpy.histogram', 'np.histogram', (['timing'], {'bins': 'bins'}), '(timing, bins=bins)\n', (6696, 6715), True, 'import numpy as np\n'), ((4107, 4124), 'numpy.concatenate', 'np.concatenate', (['s'], {}), '(s)\n', (4121, 4124), True, 'import numpy as np\n'), ((5011, 5038), 'numpy.concatenate', 'np.concatenate', (['dmt'], {'axis': '(1)'}), '(dmt, axis=1)\n', (5025, 5038), True, 'import numpy as np\n'), ((5376, 5402), 'numpy.concatenate', 'np.concatenate', (['dm'], {'axis': '(0)'}), '(dm, axis=0)\n', (5390, 5402), True, 'import numpy as np\n'), ((6620, 6639), 'numpy.arange', 'np.arange', (['(nbin + 1)'], {}), '(nbin + 1)\n', (6629, 6639), True, 'import numpy as np\n'), ((5168, 5218), 'warnings.warn', 'warnings.warn', (['"""Design matrix contains NaN or Inf"""'], {}), "('Design matrix contains NaN or Inf')\n", (5181, 5218), False, 'import warnings\n'), ((5111, 5124), 'numpy.isnan', 'np.isnan', (['dmt'], {}), '(dmt)\n', (5119, 5124), True, 'import numpy as np\n'), ((5136, 5149), 'numpy.isinf', 'np.isinf', (['dmt'], {}), '(dmt)\n', (5144, 5149), True, 'import numpy as np\n'), ((5284, 5305), 'numpy.ones', 'np.ones', (['dmt.shape[0]'], {}), '(dmt.shape[0])\n', (5291, 5305), True, 'import numpy as np\n')]
#!/usr/bin/env python3 # std from pathlib import Path import unittest # 3rd import numpy as np # ours from clusterking.util.testing import MyTestCase from clusterking.data.dwe import DataWithErrors class TestDataWithErrors(MyTestCase): def setUp(self): dpath = Path(__file__).parent / "data" / "test.sql" self.data = [[100.0, 200.0], [400.0, 500.0]] self.dwe = DataWithErrors(dpath) def ndwe(self): return self.dwe.copy(deep=True) def test_empty(self): dwe = self.ndwe() self.assertEqual(dwe.abs_cov.shape, (2, 2)) self.assertAllClose(dwe.rel_cov, np.zeros((2, 2))) self.assertFalse(dwe.poisson_errors) def test_data_no_errors(self): dwe = self.ndwe() self.assertAllClose(dwe.data(), self.data) self.assertAllClose( dwe.data(normalize=True), [[1 / 3, 2 / 3], [4 / 9, 5 / 9]] ) all_zero = np.zeros((2, 2)) unit = np.eye(2) self.assertAllClose(dwe.rel_cov, all_zero) self.assertAllClose(dwe.abs_cov, all_zero) self.assertAllClose(dwe.cov(), all_zero) self.assertAllClose(dwe.corr(), unit) # ------------------------------------------------------------------------- def test_reset_errors(self): dwe = self.ndwe() cov = [[4.0, 4.0], [4.0, 16.0]] dwe.add_err_cov(cov) dwe.add_err_corr(1, np.identity(2)) dwe.add_err_uncorr(0.3) dwe.add_err_poisson(normalization_scale=25) dwe.reset_errors() self.assertEqual(np.count_nonzero(dwe.cov()), 0) self.assertEqual(np.count_nonzero(dwe.abs_cov), 0) self.assertEqual(np.count_nonzero(dwe.rel_cov), 0) self.assertFalse(dwe.poisson_errors) self.assertEqual(dwe.poisson_errors_scale, 1.0) self.assertAllClose( dwe.corr(), np.tile(np.eye(dwe.nbins), (dwe.n, 1, 1)) ) def test_add_err_cov(self): dwe = self.ndwe() # Equal for all data points cov = [[4.0, 4.0], [4.0, 16.0]] dwe.add_err_cov(cov) self.assertAllClose(dwe.cov(), cov) self.assertAllClose(dwe.corr(), [[1.0, 1 / 2], [1 / 2, 1.0]]) self.assertAllClose(dwe.err(), [2.0, 4.0]) def test_add_err_corr(self): dwe = self.ndwe() dwe.add_err_corr(1, np.identity(2)) self.assertAllClose(dwe.corr(), np.identity(2)) corr = [[1.0, 0.32], [0.4, 1.0]] dwe = self.ndwe() dwe.add_err_corr(1.0, corr) self.assertAllClose(dwe.corr(), corr) self.assertAllClose(dwe.err(), 1) dwe = self.ndwe() err = [1.52, 2.34] dwe.add_err_corr(err, corr) self.assertAllClose(dwe.err(), err) self.assertAllClose(dwe.corr(), corr) dwe.add_err_corr(err, corr) self.assertAllClose(dwe.corr(), corr) def test_add_err_uncorr(self): dwe = self.ndwe() dwe.add_err_uncorr(0.3) self.assertAllClose(dwe.corr(), np.identity(2)) self.assertAllClose(dwe.err(), 0.3) dwe = self.ndwe() err = [0.3, 1.5] dwe.add_err_uncorr(err) self.assertAllClose(dwe.corr(), np.identity(2)) self.assertAllClose(dwe.err(), err) def test_add_err_maxcorr(self): dwe = self.ndwe() dwe.add_err_maxcorr(0.3) self.assertAllClose(dwe.corr(), np.ones((2, 2, 2))) self.assertAllClose(dwe.err(), 0.3) dwe = self.ndwe() err = [0.3, 1.5] dwe.add_err_maxcorr(err) self.assertAllClose(dwe.corr(), np.ones((2, 2, 2))) self.assertAllClose(dwe.err(), err) # todo: test rel_err # -------------------------------------------------------------------------- def test_add_err_poisson(self): dwe = self.ndwe() dwe.add_err_poisson() self.assertAllClose(dwe.err(), np.sqrt(self.data)) self.assertAllClose(dwe.err(relative=True), 1 / np.sqrt(self.data)) self.assertAllClose(dwe.corr(), np.eye(len(self.data))) def test_add_err_poisson_scaled_relative(self): # Now we increase the statistics by a factor of 4 and expect that the # Normed errors are reduced by a factor of 2. dwe1 = self.ndwe() dwe1.add_err_poisson() rel_err1 = dwe1.err(relative=True) dwe2 = self.ndwe() dwe2.add_err_poisson(normalization_scale=4) rel_err2 = dwe2.err(relative=True) self.assertAllClose(rel_err1, rel_err2 * 2) # -------------------------------------------------------------------------- def test_plot_dist_err(self): self.dwe.plot_dist_err() if __name__ == "__main__": unittest.main()
[ "numpy.identity", "numpy.eye", "numpy.sqrt", "numpy.ones", "pathlib.Path", "numpy.count_nonzero", "numpy.zeros", "clusterking.data.dwe.DataWithErrors", "unittest.main" ]
[((4666, 4681), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4679, 4681), False, 'import unittest\n'), ((394, 415), 'clusterking.data.dwe.DataWithErrors', 'DataWithErrors', (['dpath'], {}), '(dpath)\n', (408, 415), False, 'from clusterking.data.dwe import DataWithErrors\n'), ((928, 944), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (936, 944), True, 'import numpy as np\n'), ((960, 969), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (966, 969), True, 'import numpy as np\n'), ((623, 639), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (631, 639), True, 'import numpy as np\n'), ((1405, 1419), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (1416, 1419), True, 'import numpy as np\n'), ((1614, 1643), 'numpy.count_nonzero', 'np.count_nonzero', (['dwe.abs_cov'], {}), '(dwe.abs_cov)\n', (1630, 1643), True, 'import numpy as np\n'), ((1673, 1702), 'numpy.count_nonzero', 'np.count_nonzero', (['dwe.rel_cov'], {}), '(dwe.rel_cov)\n', (1689, 1702), True, 'import numpy as np\n'), ((2330, 2344), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (2341, 2344), True, 'import numpy as np\n'), ((2386, 2400), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (2397, 2400), True, 'import numpy as np\n'), ((2991, 3005), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (3002, 3005), True, 'import numpy as np\n'), ((3175, 3189), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (3186, 3189), True, 'import numpy as np\n'), ((3371, 3389), 'numpy.ones', 'np.ones', (['(2, 2, 2)'], {}), '((2, 2, 2))\n', (3378, 3389), True, 'import numpy as np\n'), ((3560, 3578), 'numpy.ones', 'np.ones', (['(2, 2, 2)'], {}), '((2, 2, 2))\n', (3567, 3578), True, 'import numpy as np\n'), ((3864, 3882), 'numpy.sqrt', 'np.sqrt', (['self.data'], {}), '(self.data)\n', (3871, 3882), True, 'import numpy as np\n'), ((1869, 1886), 'numpy.eye', 'np.eye', (['dwe.nbins'], {}), '(dwe.nbins)\n', (1875, 1886), True, 'import numpy as np\n'), ((3940, 3958), 'numpy.sqrt', 'np.sqrt', (['self.data'], {}), '(self.data)\n', (3947, 3958), True, 'import numpy as np\n'), ((278, 292), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (282, 292), False, 'from pathlib import Path\n')]
import os import numpy as np import cv2 import matplotlib.pyplot as plt import matplotlib.animation as animation try: from urllib.request import urlopen from urllib.error import HTTPError except ImportError: from urllib2 import urlopen, HTTPError from load_inningdata import get_inning, get_pitch_by_pitch_per_atbat from load_games import get_games from load_players import get_players from visualize.Pitch import course, pitch_history, pitcher_info, batter_info from xmlio.parser import parse, parse_path from xmlio.writer import writer as xmlwriter HEIGHT = 720 WIDTH = 1280 def get_player(players, id): for player in players: if player.id == id: return player return None def inning(atbat_list, players, dst_folder, top_bottom): for atbat in atbat_list: # pitch_by_pitch = get_pitch_by_pitch_per_atbat(atbat.num) pitch_by_pitch = atbat.pitch_by_pitch pitcher_id = atbat.pitcher batter_id = atbat.batter pitcher = get_player(players, pitcher_id) batter = get_player(players, batter_id) print("pitcher", pitcher) print("batter", batter) atbat_img = np.zeros((220,800,3), dtype=np.uint8) picthing_img = np.zeros((500,800,3), dtype=np.uint8) if top_bottom == "top": atbat_img = pitcher_info(atbat_img, pitcher, atbat.p_throws, True) atbat_img = batter_info(atbat_img, batter, atbat.stand, False) else: atbat_img = pitcher_info(atbat_img, pitcher, atbat.p_throws, False) atbat_img = batter_info(atbat_img, batter, atbat.stand, True) for idx, pitch in enumerate(pitch_by_pitch): # print(pitch) course(picthing_img, atbat, pitch) atbat_img = pitch_history(atbat_img, idx, pitch) img = cv2.vconcat([picthing_img, atbat_img]) cv2.imshow("course", img) cv2.waitKey(100) os.makedirs(dst_folder, exist_ok=True) cv2.imwrite(os.path.join(dst_folder, f"{atbat.num:03d}.jpg"), img) BASE_URL = 'http://gd2.mlb.com/components/game/mlb/year_{0}/month_{1:02d}/day_{2:02d}/' GAME_URL = BASE_URL + 'gid_{3}/{4}' BASE_PATH = './game/mlb/year_{0}/month_{1:02d}/day_{2:02d}/' GAME_PATH = BASE_PATH + 'gid_{3}/{4}' def download_xmldata(url): data = urlopen(url) data_str = data.read().decode("utf-8") xmldata = parse(data_str) return xmldata def download_inningdata(year, month, day, game_id): path = GAME_PATH.format(year, month, day, game_id, 'inning/inning_all.xml') if os.path.exists(path): xmldata = parse_path(path) else: xmldata = download_xmldata(GAME_URL.format(year, month, day, game_id, 'inning/inning_all.xml')) xmlwriter(path, xmldata) inningdata = {} for i in range(1, 9+1): top_atbat_list, bot_atbat_list = get_inning(xmldata, i) inningdata[i] = { "top":top_atbat_list, "bottom":bot_atbat_list } return inningdata def download_players(year, month, day, game_id): path = GAME_PATH.format(year, month, day, game_id, 'players.xml') if os.path.exists(path): xmldata = parse_path(path) else: xmldata = download_xmldata(GAME_URL.format(year, month, day, game_id, 'players.xml')) xmlwriter(path, xmldata) players = get_players(xmldata) return players def download_games(year, month, day): path = BASE_PATH.format(year, month, day) + 'scoreboard.xml' if os.path.exists(path): xmldata = parse_path(path) else: xmldata = download_xmldata(BASE_URL.format(year, month, day) + 'scoreboard.xml') xmlwriter(path, xmldata) games = get_games(xmldata) return games if __name__ == "__main__": year = 2021 month = 5 day = 5 games = download_games(year, month, day) for game in games: players = download_players(year, month, day, game.game_id) inningdata = download_inningdata(year, month, day, game.game_id) dst_folder = f"dst_atbat/{game.game_id}" for i, _inning in inningdata.items(): top_atbat_list = _inning.get("top") inning(top_atbat_list, players, dst_folder, "top") bot_atbat_list = _inning.get("bottom") inning(bot_atbat_list, players, dst_folder, "bottom") break
[ "os.path.exists", "cv2.vconcat", "xmlio.writer.writer", "urllib2.urlopen", "xmlio.parser.parse", "os.makedirs", "visualize.Pitch.batter_info", "visualize.Pitch.course", "xmlio.parser.parse_path", "os.path.join", "cv2.imshow", "load_games.get_games", "numpy.zeros", "visualize.Pitch.pitcher_info", "visualize.Pitch.pitch_history", "load_inningdata.get_inning", "load_players.get_players", "cv2.waitKey" ]
[((2403, 2415), 'urllib2.urlopen', 'urlopen', (['url'], {}), '(url)\n', (2410, 2415), False, 'from urllib2 import urlopen, HTTPError\n'), ((2475, 2490), 'xmlio.parser.parse', 'parse', (['data_str'], {}), '(data_str)\n', (2480, 2490), False, 'from xmlio.parser import parse, parse_path\n'), ((2655, 2675), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2669, 2675), False, 'import os\n'), ((2834, 2858), 'xmlio.writer.writer', 'xmlwriter', (['path', 'xmldata'], {}), '(path, xmldata)\n', (2843, 2858), True, 'from xmlio.writer import writer as xmlwriter\n'), ((3246, 3266), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3260, 3266), False, 'import os\n'), ((3415, 3439), 'xmlio.writer.writer', 'xmlwriter', (['path', 'xmldata'], {}), '(path, xmldata)\n', (3424, 3439), True, 'from xmlio.writer import writer as xmlwriter\n'), ((3457, 3477), 'load_players.get_players', 'get_players', (['xmldata'], {}), '(xmldata)\n', (3468, 3477), False, 'from load_players import get_players\n'), ((3613, 3633), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3627, 3633), False, 'import os\n'), ((3777, 3801), 'xmlio.writer.writer', 'xmlwriter', (['path', 'xmldata'], {}), '(path, xmldata)\n', (3786, 3801), True, 'from xmlio.writer import writer as xmlwriter\n'), ((3817, 3835), 'load_games.get_games', 'get_games', (['xmldata'], {}), '(xmldata)\n', (3826, 3835), False, 'from load_games import get_games\n'), ((1219, 1258), 'numpy.zeros', 'np.zeros', (['(220, 800, 3)'], {'dtype': 'np.uint8'}), '((220, 800, 3), dtype=np.uint8)\n', (1227, 1258), True, 'import numpy as np\n'), ((1281, 1320), 'numpy.zeros', 'np.zeros', (['(500, 800, 3)'], {'dtype': 'np.uint8'}), '((500, 800, 3), dtype=np.uint8)\n', (1289, 1320), True, 'import numpy as np\n'), ((2015, 2053), 'os.makedirs', 'os.makedirs', (['dst_folder'], {'exist_ok': '(True)'}), '(dst_folder, exist_ok=True)\n', (2026, 2053), False, 'import os\n'), ((2696, 2712), 'xmlio.parser.parse_path', 'parse_path', (['path'], {}), '(path)\n', (2706, 2712), False, 'from xmlio.parser import parse, parse_path\n'), ((2953, 2975), 'load_inningdata.get_inning', 'get_inning', (['xmldata', 'i'], {}), '(xmldata, i)\n', (2963, 2975), False, 'from load_inningdata import get_inning, get_pitch_by_pitch_per_atbat\n'), ((3287, 3303), 'xmlio.parser.parse_path', 'parse_path', (['path'], {}), '(path)\n', (3297, 3303), False, 'from xmlio.parser import parse, parse_path\n'), ((3654, 3670), 'xmlio.parser.parse_path', 'parse_path', (['path'], {}), '(path)\n', (3664, 3670), False, 'from xmlio.parser import parse, parse_path\n'), ((1379, 1433), 'visualize.Pitch.pitcher_info', 'pitcher_info', (['atbat_img', 'pitcher', 'atbat.p_throws', '(True)'], {}), '(atbat_img, pitcher, atbat.p_throws, True)\n', (1391, 1433), False, 'from visualize.Pitch import course, pitch_history, pitcher_info, batter_info\n'), ((1459, 1509), 'visualize.Pitch.batter_info', 'batter_info', (['atbat_img', 'batter', 'atbat.stand', '(False)'], {}), '(atbat_img, batter, atbat.stand, False)\n', (1470, 1509), False, 'from visualize.Pitch import course, pitch_history, pitcher_info, batter_info\n'), ((1550, 1605), 'visualize.Pitch.pitcher_info', 'pitcher_info', (['atbat_img', 'pitcher', 'atbat.p_throws', '(False)'], {}), '(atbat_img, pitcher, atbat.p_throws, False)\n', (1562, 1605), False, 'from visualize.Pitch import course, pitch_history, pitcher_info, batter_info\n'), ((1631, 1680), 'visualize.Pitch.batter_info', 'batter_info', (['atbat_img', 'batter', 'atbat.stand', '(True)'], {}), '(atbat_img, batter, atbat.stand, True)\n', (1642, 1680), False, 'from visualize.Pitch import course, pitch_history, pitcher_info, batter_info\n'), ((1778, 1812), 'visualize.Pitch.course', 'course', (['picthing_img', 'atbat', 'pitch'], {}), '(picthing_img, atbat, pitch)\n', (1784, 1812), False, 'from visualize.Pitch import course, pitch_history, pitcher_info, batter_info\n'), ((1838, 1874), 'visualize.Pitch.pitch_history', 'pitch_history', (['atbat_img', 'idx', 'pitch'], {}), '(atbat_img, idx, pitch)\n', (1851, 1874), False, 'from visualize.Pitch import course, pitch_history, pitcher_info, batter_info\n'), ((1896, 1934), 'cv2.vconcat', 'cv2.vconcat', (['[picthing_img, atbat_img]'], {}), '([picthing_img, atbat_img])\n', (1907, 1934), False, 'import cv2\n'), ((1948, 1973), 'cv2.imshow', 'cv2.imshow', (['"""course"""', 'img'], {}), "('course', img)\n", (1958, 1973), False, 'import cv2\n'), ((1987, 2003), 'cv2.waitKey', 'cv2.waitKey', (['(100)'], {}), '(100)\n', (1998, 2003), False, 'import cv2\n'), ((2075, 2123), 'os.path.join', 'os.path.join', (['dst_folder', 'f"""{atbat.num:03d}.jpg"""'], {}), "(dst_folder, f'{atbat.num:03d}.jpg')\n", (2087, 2123), False, 'import os\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import pandas as pd import numpy as np import scipy import collections import itertools import json from bokeh.transform import linear_cmap, transform from bokeh.palettes import Set3, Viridis256 from bokeh.models import ( LinearColorMapper, ColumnDataSource, HoverTool, ColorBar, BasicTicker, CategoricalScale, CategoricalAxis, GeoJSONDataSource, BoxSelectTool, ) from bokeh.models.ranges import FactorRange from bokeh.layouts import row, column from bokeh import plotting from stats import cluster_gnn_map def _get_palette(cmap=Set3[12], n=12, start=0, end=1): import matplotlib, numpy as np linspace = np.linspace(start, end, n) cmap = matplotlib.colors.LinearSegmentedColormap.from_list("customcmap", cmap) palette = cmap(linspace) hex_palette = [matplotlib.colors.rgb2hex(c) for c in palette] return hex_palette class Figure: def __init__(self, data, *args, **kw): self._data = data self._fig = None self._kw = kw self._source = None @property def source(self): return self._source class MatrixFigure(Figure): def __init__(self, *args, **kw): super().__init__(*args, **kw) def _process_colors(self, colors, axis=0): labels = None if colors is not None: if colors is True: if axis == 0: index = self._data.data.index else: index = self._data.data.T.index d = {} for x in index.names: levels = sorted(index.get_level_values(x).unique()) d[x] = list(map(str, _get_palette(n=len(levels)))) colors = pd.DataFrame(d, index=levels) if isinstance(colors, (pd.Series, pd.DataFrame)): if axis == 0: colors = colors.reindex(self._data.data.index) else: colors = colors.reindex(self._data.data.columns) if isinstance(colors, pd.DataFrame): labels = list(colors.columns) else: if colors.name is None: labels = [""] else: labels = [colors.name] return colors, labels def _add_dendrogram(self, padding, axis=0): """Add dendrogram""" results = scipy.cluster.hierarchy.dendrogram( self._data.linkage(axis=axis), no_plot=True ) if axis == 0: ymax = float(len(self._data.linkage(axis=axis))) + 0.5 xmax = padding else: ymax = padding xmax = float(len(self._data.linkage(axis=axis))) + 0.5 ycoord = pd.DataFrame(results["icoord"]) ycoord = ycoord * (ymax / ycoord.max().max()) ycoord = -ycoord.values + ymax + 0.5 xcoord = pd.DataFrame(results["dcoord"]) xcoord = xcoord * (xmax / xcoord.max().max()) - xmax xcoord = xcoord.values for x, y in zip(xcoord, ycoord): x = list(map(lambda z: -z, x)) self._fig.line(x=x, y=y, line_color="black") def heatmap( self, low=None, high=None, palette="Viridis256", row_colors=None, col_colors=None, color_bar=True, col_cluster=False, row_cluster=False, cbar_title=None, dendrogram=True, dendrogram_ratio=0.2, *args, **kwargs, ): """Make heatmap of data, possibly with dendrogram Args: data (:class:`~pd.DataFrame`) : A DataFrame matrix with 'z' values to plot palette (str or seq[color], optional) : A palette to use to colormap z Any additional keyword arguments are passed to :func:`bokeh.plotting.rect` """ data = self._data padding_cols, padding_rows = 0, 0 row_colors, row_labels = self._process_colors(row_colors) col_colors, col_labels = self._process_colors(col_colors, axis=1) if row_cluster: data.cluster() if col_cluster: data.cluster(axis=1) def _factors(labels, cluster, axis=0): padding = 0 factors = list(data.linkage_index(axis)) if labels: factors = labels + factors if cluster and dendrogram: padding = int(dendrogram_ratio * len(factors)) factors = list(map(lambda x: f"__{x}", range(padding))) + list(factors) return factors, padding row_factors, row_padding = _factors(row_labels, row_cluster) col_factors, col_padding = _factors(col_labels, col_cluster, axis=1) # Setup ranges if "x_range" not in self._kw.keys(): self._kw["x_range"] = FactorRange(factors=row_factors) if "y_range" not in self._kw.keys(): self._kw["y_range"] = FactorRange(factors=list(reversed(col_factors))) self._fig = plotting.figure(**self._kw) # Apparently reordering is necessary? I thought it would # suffice to set the factors on the x/y ranges datastack = data.reorder().data.stack() # Recall: want to reflect the matrix so y/x shift datastack.index.names = ["y", "x"] df = pd.DataFrame(datastack, columns=["z"]).reset_index() self._source = ColumnDataSource(df) # Setup color mapper if low is None: low = df.z.min() if high is None: high = df.z.max() color_mapper = LinearColorMapper(palette=palette, low=low, high=high) # Make heatmap self._fig.rect( x="x", y="y", width=1, height=1, line_color="black", line_alpha=0.2, alpha=0.8, source=self.source, fill_color=transform("z", color_mapper), *args, **kwargs, ) # Add dendrogram if dendrogram: if row_cluster: self._add_dendrogram(row_padding, axis=0) if col_cluster: # FIXME: currently not correct # self._add_dendrogram(col_padding, axis=1) pass # Setup hover tooltips hover = HoverTool() hover.tooltips = [("Row", "@y"), ("Column", "@x"), ("Value", "@z")] self._fig.add_tools(hover) # Add colorbar if color_bar: color_bar = ColorBar( color_mapper=color_mapper, location=(1, 0), ticker=BasicTicker(), border_line_color=None, title=cbar_title, ) self._fig.add_layout(color_bar, "right") # Add group colors if row_colors is not None: row_colors = row_colors.stack() row_colors = row_colors.reset_index() row_colors.columns = ["y", "x", "color"] self._fig.rect( x="x", y="y", fill_color="color", line_color=None, line_alpha=0.2, alpha=0.8, source=row_colors, width=0.6, height=1, ) # Add col colors; transpose may be needed if col_colors is not None: col_colors = col_colors.stack() col_colors = col_colors.reset_index() col_colors.columns = ["y", "x", "color"] self._fig.rect( x="x", y="y", fill_color="color", line_color=None, line_alpha=0.2, alpha=0.8, source=col_colors, width=0.6, height=1, ) # Configure axes self._fig.axis.major_tick_line_color = None self._fig.axis.minor_tick_line_color = None self._fig.xaxis.major_label_overrides = dict( zip( row_factors, map(lambda x: "" if x.startswith("__") else x, row_factors) ) ) self._fig.xaxis.major_label_orientation = 1.0 self._fig.axis.axis_line_color = None self._fig.grid.grid_line_color = None self._fig.outline_line_color = None return self._fig def vbar_stack(self, factor_levels=None, groups=None): data = self._data.copy().data # Factor is the index levels = data.index.names if factor_levels is None: factor_levels = list(range(len(data.index.names))) factors = list(tuple([x[i] for i in factor_levels]) for x in data.index) if groups is None: groups = sorted(data.columns) data.reset_index(inplace=True) data["x"] = factors self._source = ColumnDataSource(data) if "x_range" not in self._kw.keys(): self._kw["x_range"] = FactorRange(*factors) self._fig = plotting.figure(**self._kw) self._fig.vbar_stack( groups, source=self.source, x="x", color=_get_palette(n=len(groups)), legend_label=groups, width=1, line_color="black", line_alpha=0.2, line_width=0.5, ) self._fig.add_layout(self._fig.legend[0], "right") hover = HoverTool() hover.tooltips = list(map(lambda x: (x[0], f"@{x[1]}"), zip(levels, levels))) hover.tooltips.extend( list(map(lambda x: (x[0], f"@{x[1]}{{%0.1f}}"), zip(groups, groups))) ) self._fig.add_tools(hover) self._fig.add_tools(BoxSelectTool()) self._fig.axis.major_tick_line_color = None self._fig.axis.minor_tick_line_color = None self._fig.xaxis.group_label_orientation = 1.0 self._fig.xaxis.subgroup_label_orientation = 1.0 self._fig.xaxis.major_label_orientation = 1.0 self._fig.xaxis.major_label_text_font_size = "0pt" self._fig.axis.axis_line_color = None self._fig.grid.grid_line_color = None self._fig.outline_line_color = "black" return self._fig def world_map(self): self._kw["plot_height"] = 600 self._kw["title"] = "World map" self._kw.pop("y_axis_label", None) self._kw.pop("x_range", None) self._fig = plotting.figure(**self._kw) from data import natural_earth geosource = GeoJSONDataSource(geojson=json.dumps(natural_earth())) hover = HoverTool( names=["samples"], tooltips=[ ("sample_node_population", "@sample_node_population"), ("sample_name", "@sample_name"), ], ) hover_map = HoverTool(names=["choropleth"], tooltips=[("country", "@country")]) # FIXME: get rid of explicit key groups = sorted(list(set(self.source.data["sample_node_population"]))) palette = dict(zip(groups, _get_palette(n=len(groups)))) self.source.data["colors"] = [ palette[x] for x in self.source.data["sample_node_population"] ] self._fig.add_tools(hover) self._fig.add_tools(hover_map) self._fig.add_tools(BoxSelectTool()) self._fig.xgrid.grid_line_color = None self._fig.ygrid.grid_line_color = None self._fig.patches( "xs", "ys", source=geosource, fill_alpha=0.1, line_width=0.7, line_alpha=0.5, line_color="black", color="white", name="choropleth", ) self._fig.circle( x="longitude", y="latitude", size=7, fill_alpha=1, source=self.source, name="samples", color="colors", ) return self._fig class Matrix: def __init__(self, data=None, *args, **kw): self.data = data self._row_linkage = None self._col_linkage = None self._row_colors = None self._col_colors = None @property def is_square(self): return self.data.shape[0] == self.data.shape[1] def order(self, axis=0): """Return order of axis indices. Will change if clustering""" if axis == 1: return self.col_order return self.row_order def linkage_index(self, axis=0): """Return linkage index""" # By default return other axis if no linkage and square matrix if self.linkage(axis) is None and self.is_square: if self.linkage(1 - axis) is not None: axis = 1 - axis order = self.order(axis=axis) if axis == 0: return self.data.index.values[order] elif axis == 1: return self.data.T.index.values[order] def linkage(self, axis=0): if axis == 0: return self.row_linkage elif axis == 1: return self.col_linkage @property def row_order(self): if self.row_linkage is None: return list(range(self.data.shape[0])) return scipy.cluster.hierarchy.leaves_list(self.row_linkage) @property def col_order(self): if self.col_linkage is None: return list(range(self.data.shape[1])) return scipy.cluster.hierarchy.leaves_list(self.col_linkage) @property def row_colors(self): return self._row_colors @row_colors.setter def row_colors(self, value): self._row_colors = value @property def col_colors(self): return self._col_colors @col_colors.setter def col_colors(self, value): self._col_colors = value @property def data(self): return self._data @data.setter def data(self, data): if not isinstance(data, pd.DataFrame): print("only data frames allowed") raise Exception self._data = data if self.rowname is None: self.rowname = "Row" if self.colname is None: self.colname = "Column" # Will fail on multiindices @property def rowname(self): return self._data.index.name @rowname.setter def rowname(self, value): self._data.index.name = value @property def colname(self): return self._data.columns.name @colname.setter def colname(self, value): self._data.columns.name = value @property def row_linkage(self): return self._row_linkage @row_linkage.setter def row_linkage(self, value): self._row_linkage = value @property def col_linkage(self): return self._col_linkage @col_linkage.setter def col_linkage(self, value): self._col_linkage = value def copy(self): obj = Matrix(self.data) # Check and set all other attributes? obj.row_linkage = self.row_linkage obj.col_linkage = self.col_linkage return obj # Unnecessary? In any case make use of linkage_order def reorder(self, axis=0, inplace=False, both=True): if inplace: obj = self else: obj = self.copy() data = obj.data if axis == 0: order = obj.row_order elif axis == 1: data = data.T order = obj.col_order data = data.reindex(obj.linkage_index(axis=axis)) if both: data = data.T indexnames = data.index.names data.reindex(index=data.T.index) data.index.names = indexnames data = data.T if axis == 0: obj.data = data elif axis == 1: obj.data = data.T return obj def _zscore(self, axis=1): """Standardize mean and variance. 0=rows, 1=columns""" if axis == 1: zscore = self.data.copy() else: zscore = self.data.copy().T for column in list(zscore): zscore[column] = scipy.stats.zscore(zscore[column]) if axis == 1: self.data = zscore else: self.data = zscore.T return self def rescale(self, zscore=True, standardize=False, axis=1): if zscore: return self._zscore(axis=axis) return self def cluster(self, axis=0, method="average", metric="euclidean", **kw): optimal_ordering = kw.pop("optimal_ordering", True) return self._calculate_linkage( axis=axis, method=method, optimal_ordering=optimal_ordering, **kw ) def _calculate_linkage(self, axis=0, method="average", optimal_ordering=True, **kw): data = self.data.copy() if axis == 1: data = data.T linkage = scipy.cluster.hierarchy.linkage( data, method=method, optimal_ordering=optimal_ordering, **kw ) if axis == 0: self.row_linkage = linkage else: self.col_linkage = linkage return self class TSData: def __init__(self, ts, *args, **kw): self._ts = ts self._data = None @property def data(self): return self._data @data.setter def data(self, data): if not isinstance(data, Matrix): print("only Matrix class allowed") raise Exception self._data = data @property def ts(self): return self._ts def _group_samples(self, by="population"): sample_group_set_map = collections.defaultdict(list) for population in self.ts.populations(): md = json.loads(population.metadata.decode()) key = md[by] sample_group_set_map[key].extend( list(self.ts.samples(population=population.id)) ) groups = list(sample_group_set_map.keys()) sample_group_sets = [sample_group_set_map[k] for k in groups] return groups, sample_group_sets def fst(self, by="population", **kw): groups, sample_group_sets = self._group_samples(by=by) k = len(list(self.ts.populations())) i = list(itertools.product(list(range(k)), list(range(k)))) fst = self.ts.Fst(sample_group_sets, indexes=i, **kw) df = pd.DataFrame( np.reshape(fst, newshape=(k, k)), columns=groups, index=groups ) df.index.name = by df.columns.name = by self.data = Matrix(df) return self def figure(data, **kwargs): if isinstance(data, Matrix): return MatrixFigure(data, **kwargs)
[ "bokeh.transform.transform", "scipy.cluster.hierarchy.leaves_list", "bokeh.models.BasicTicker", "data.natural_earth", "bokeh.models.BoxSelectTool", "bokeh.plotting.figure", "numpy.reshape", "bokeh.models.LinearColorMapper", "numpy.linspace", "scipy.stats.zscore", "bokeh.models.ColumnDataSource", "scipy.cluster.hierarchy.linkage", "collections.defaultdict", "matplotlib.colors.rgb2hex", "pandas.DataFrame", "bokeh.models.ranges.FactorRange", "matplotlib.colors.LinearSegmentedColormap.from_list", "bokeh.models.HoverTool" ]
[((698, 724), 'numpy.linspace', 'np.linspace', (['start', 'end', 'n'], {}), '(start, end, n)\n', (709, 724), True, 'import matplotlib, numpy as np\n'), ((736, 807), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'matplotlib.colors.LinearSegmentedColormap.from_list', (['"""customcmap"""', 'cmap'], {}), "('customcmap', cmap)\n", (787, 807), False, 'import matplotlib, numpy as np\n'), ((856, 884), 'matplotlib.colors.rgb2hex', 'matplotlib.colors.rgb2hex', (['c'], {}), '(c)\n', (881, 884), False, 'import matplotlib, numpy as np\n'), ((2801, 2832), 'pandas.DataFrame', 'pd.DataFrame', (["results['icoord']"], {}), "(results['icoord'])\n", (2813, 2832), True, 'import pandas as pd\n'), ((2950, 2981), 'pandas.DataFrame', 'pd.DataFrame', (["results['dcoord']"], {}), "(results['dcoord'])\n", (2962, 2981), True, 'import pandas as pd\n'), ((5109, 5136), 'bokeh.plotting.figure', 'plotting.figure', ([], {}), '(**self._kw)\n', (5124, 5136), False, 'from bokeh import plotting\n'), ((5496, 5516), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['df'], {}), '(df)\n', (5512, 5516), False, 'from bokeh.models import LinearColorMapper, ColumnDataSource, HoverTool, ColorBar, BasicTicker, CategoricalScale, CategoricalAxis, GeoJSONDataSource, BoxSelectTool\n'), ((5678, 5732), 'bokeh.models.LinearColorMapper', 'LinearColorMapper', ([], {'palette': 'palette', 'low': 'low', 'high': 'high'}), '(palette=palette, low=low, high=high)\n', (5695, 5732), False, 'from bokeh.models import LinearColorMapper, ColumnDataSource, HoverTool, ColorBar, BasicTicker, CategoricalScale, CategoricalAxis, GeoJSONDataSource, BoxSelectTool\n'), ((6420, 6431), 'bokeh.models.HoverTool', 'HoverTool', ([], {}), '()\n', (6429, 6431), False, 'from bokeh.models import LinearColorMapper, ColumnDataSource, HoverTool, ColorBar, BasicTicker, CategoricalScale, CategoricalAxis, GeoJSONDataSource, BoxSelectTool\n'), ((8956, 8978), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['data'], {}), '(data)\n', (8972, 8978), False, 'from bokeh.models import LinearColorMapper, ColumnDataSource, HoverTool, ColorBar, BasicTicker, CategoricalScale, CategoricalAxis, GeoJSONDataSource, BoxSelectTool\n'), ((9101, 9128), 'bokeh.plotting.figure', 'plotting.figure', ([], {}), '(**self._kw)\n', (9116, 9128), False, 'from bokeh import plotting\n'), ((9505, 9516), 'bokeh.models.HoverTool', 'HoverTool', ([], {}), '()\n', (9514, 9516), False, 'from bokeh.models import LinearColorMapper, ColumnDataSource, HoverTool, ColorBar, BasicTicker, CategoricalScale, CategoricalAxis, GeoJSONDataSource, BoxSelectTool\n'), ((10506, 10533), 'bokeh.plotting.figure', 'plotting.figure', ([], {}), '(**self._kw)\n', (10521, 10533), False, 'from bokeh import plotting\n'), ((10665, 10796), 'bokeh.models.HoverTool', 'HoverTool', ([], {'names': "['samples']", 'tooltips': "[('sample_node_population', '@sample_node_population'), ('sample_name',\n '@sample_name')]"}), "(names=['samples'], tooltips=[('sample_node_population',\n '@sample_node_population'), ('sample_name', '@sample_name')])\n", (10674, 10796), False, 'from bokeh.models import LinearColorMapper, ColumnDataSource, HoverTool, ColorBar, BasicTicker, CategoricalScale, CategoricalAxis, GeoJSONDataSource, BoxSelectTool\n'), ((10895, 10962), 'bokeh.models.HoverTool', 'HoverTool', ([], {'names': "['choropleth']", 'tooltips': "[('country', '@country')]"}), "(names=['choropleth'], tooltips=[('country', '@country')])\n", (10904, 10962), False, 'from bokeh.models import LinearColorMapper, ColumnDataSource, HoverTool, ColorBar, BasicTicker, CategoricalScale, CategoricalAxis, GeoJSONDataSource, BoxSelectTool\n'), ((13275, 13328), 'scipy.cluster.hierarchy.leaves_list', 'scipy.cluster.hierarchy.leaves_list', (['self.row_linkage'], {}), '(self.row_linkage)\n', (13310, 13328), False, 'import scipy\n'), ((13472, 13525), 'scipy.cluster.hierarchy.leaves_list', 'scipy.cluster.hierarchy.leaves_list', (['self.col_linkage'], {}), '(self.col_linkage)\n', (13507, 13525), False, 'import scipy\n'), ((16915, 17013), 'scipy.cluster.hierarchy.linkage', 'scipy.cluster.hierarchy.linkage', (['data'], {'method': 'method', 'optimal_ordering': 'optimal_ordering'}), '(data, method=method, optimal_ordering=\n optimal_ordering, **kw)\n', (16946, 17013), False, 'import scipy\n'), ((17653, 17682), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (17676, 17682), False, 'import collections\n'), ((4928, 4960), 'bokeh.models.ranges.FactorRange', 'FactorRange', ([], {'factors': 'row_factors'}), '(factors=row_factors)\n', (4939, 4960), False, 'from bokeh.models.ranges import FactorRange\n'), ((9059, 9080), 'bokeh.models.ranges.FactorRange', 'FactorRange', (['*factors'], {}), '(*factors)\n', (9070, 9080), False, 'from bokeh.models.ranges import FactorRange\n'), ((9789, 9804), 'bokeh.models.BoxSelectTool', 'BoxSelectTool', ([], {}), '()\n', (9802, 9804), False, 'from bokeh.models import LinearColorMapper, ColumnDataSource, HoverTool, ColorBar, BasicTicker, CategoricalScale, CategoricalAxis, GeoJSONDataSource, BoxSelectTool\n'), ((11376, 11391), 'bokeh.models.BoxSelectTool', 'BoxSelectTool', ([], {}), '()\n', (11389, 11391), False, 'from bokeh.models import LinearColorMapper, ColumnDataSource, HoverTool, ColorBar, BasicTicker, CategoricalScale, CategoricalAxis, GeoJSONDataSource, BoxSelectTool\n'), ((16162, 16196), 'scipy.stats.zscore', 'scipy.stats.zscore', (['zscore[column]'], {}), '(zscore[column])\n', (16180, 16196), False, 'import scipy\n'), ((18421, 18453), 'numpy.reshape', 'np.reshape', (['fst'], {'newshape': '(k, k)'}), '(fst, newshape=(k, k))\n', (18431, 18453), True, 'import matplotlib, numpy as np\n'), ((1771, 1800), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {'index': 'levels'}), '(d, index=levels)\n', (1783, 1800), True, 'import pandas as pd\n'), ((5420, 5458), 'pandas.DataFrame', 'pd.DataFrame', (['datastack'], {'columns': "['z']"}), "(datastack, columns=['z'])\n", (5432, 5458), True, 'import pandas as pd\n'), ((6000, 6028), 'bokeh.transform.transform', 'transform', (['"""z"""', 'color_mapper'], {}), "('z', color_mapper)\n", (6009, 6028), False, 'from bokeh.transform import linear_cmap, transform\n'), ((6722, 6735), 'bokeh.models.BasicTicker', 'BasicTicker', ([], {}), '()\n', (6733, 6735), False, 'from bokeh.models import LinearColorMapper, ColumnDataSource, HoverTool, ColorBar, BasicTicker, CategoricalScale, CategoricalAxis, GeoJSONDataSource, BoxSelectTool\n'), ((10631, 10646), 'data.natural_earth', 'natural_earth', ([], {}), '()\n', (10644, 10646), False, 'from data import natural_earth\n')]
import asyncio import cv2 import numpy as np from numpysocket import NumpySocket THREADS = 3 frames = [None] * THREADS async def send(sen, ack, i): global frames await sen.send_numpy(frames[i]) ack.receive_ack() async def main(): global frames # host_ip = '172.27.3.3' # host_ip = '172.27.3.38' host_ip = 'localhost' cap = cv2.VideoCapture(0) senders = [] for i in range(THREADS): tmp = NumpySocket() await tmp.start_client(host_ip, 9999 - i) senders.append(tmp) receivers = [] for i in range(THREADS): tmp = NumpySocket() tmp.start_server(8000 + i) receivers.append(tmp) # Read until video is completed n_frame = 0 while cap.isOpened(): ret, frame = cap.read() ref_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) frame_resize = ref_frame[::2, ::2] n_frame += 1 h, w = frame_resize.shape w = w // THREADS frames = np.array_split(frame_resize, THREADS, axis=1) try: # await asyncio.gather(*[senders[i].send_numpy(frames[i]) for i in range(THREADS)], # return_exceptions=False) await asyncio.gather(*[send(senders[i], receivers[i], i) for i in range(THREADS)], return_exceptions=False) except Exception: break for i in range(THREADS): receivers[i].receive_ack() # When everything done, release the video capture object for i in range(THREADS): receivers[i].end_server() cap.release() asyncio.run(main())
[ "numpy.array_split", "numpysocket.NumpySocket", "cv2.VideoCapture", "cv2.cvtColor" ]
[((362, 381), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (378, 381), False, 'import cv2\n'), ((443, 456), 'numpysocket.NumpySocket', 'NumpySocket', ([], {}), '()\n', (454, 456), False, 'from numpysocket import NumpySocket\n'), ((598, 611), 'numpysocket.NumpySocket', 'NumpySocket', ([], {}), '()\n', (609, 611), False, 'from numpysocket import NumpySocket\n'), ((808, 847), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (820, 847), False, 'import cv2\n'), ((988, 1033), 'numpy.array_split', 'np.array_split', (['frame_resize', 'THREADS'], {'axis': '(1)'}), '(frame_resize, THREADS, axis=1)\n', (1002, 1033), True, 'import numpy as np\n')]
# pip install pycocotools opencv-python opencv-contrib-python # wget https://github.com/opencv/opencv_extra/raw/master/testdata/cv/ximgproc/model.yml.gz import os import copy import time import argparse import contextlib import multiprocessing import numpy as np import cv2 import cv2.ximgproc import matplotlib.patches import matplotlib.pyplot as plt import torch from torchvision.datasets import CocoDetection from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval def imshow_with_boxes(img, boxes_xywh, savefig): plt.figure() plt.imshow(img) plt.axis('off') for x, y, w, h in boxes_xywh.tolist(): plt.gca().add_patch(matplotlib.patches.Rectangle((x, y), w, h, linewidth=1, edgecolor='r', facecolor='none')) plt.savefig(savefig) plt.close() return savefig def selective_search(img, fast, topk): algo = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation() algo.setBaseImage(img) if fast: algo.switchToSelectiveSearchFast() else: algo.switchToSelectiveSearchQuality() boxes_xywh = algo.process().astype(np.float32) scores = np.ones( (len(boxes_xywh), ) ) return boxes_xywh[:topk], scores[:topk] def edge_boxes(img, fast, topk, bgr2rgb = (2, 1, 0), algo_edgedet = cv2.ximgproc.createStructuredEdgeDetection('model.yml.gz') if os.path.exists('model.yml.gz') else None): edges = algo_edgedet.detectEdges(img[..., bgr2rgb].astype(np.float32) / 255.0) orimap = algo_edgedet.computeOrientation(edges) edges = algo_edgedet.edgesNms(edges, orimap) algo_edgeboxes = cv2.ximgproc.createEdgeBoxes() algo_edgeboxes.setMaxBoxes(topk) boxes_xywh, scores = algo_edgeboxes.getBoundingBoxes(edges, orimap) if scores is None: boxes_xywh, scores = np.array([[0, 0.0, img.shape[1], img.shape[0]]]), np.ones((1, )) return boxes_xywh, scores.squeeze() def process_image(image_id, img_extra, fast, resize, algo, rgb2bgr = (2, 1, 0), category_other = -1, topk = 1000): img = np.asarray(img_extra[0])[..., rgb2bgr] h, w = img.shape[:2] img_det = img if resize == 1 else cv2.resize(img, (resize, resize)) boxes_xywh, scores = algo(img_det, fast, topk) boxes_xywh = boxes_xywh.astype(np.float32) * (1 if resize == 1 else np.array([w, h, w, h]) / resize) labels = np.full((len(boxes_xywh), ), category_other, dtype = int) return image_id, dict(boxes = boxes_xywh, scores = scores, labels = labels) def process_loaded(image_id, loaded, category_other = -1): boxes_xyxy = loaded['pred_boxes_'].clamp(min = 0) boxes_xywh = torch.stack([boxes_xyxy[:, 0], boxes_xyxy[:, 1], boxes_xyxy[:, 2] - boxes_xyxy[:, 0], boxes_xyxy[:, 3] - boxes_xyxy[:, 1]], dim = -1) labels = np.full((len(boxes_xywh), ), category_other, dtype = int) num_classes = loaded['pred_logits'].shape[-1] scores = loaded['pred_logits'][:, 1:: num_classes - 2][:, 0] I = scores.argsort(descending = True) scores = scores[I] boxes_xywh = boxes_xywh[I] labels = labels[I] return image_id, dict(boxes = boxes_xywh, scores = scores, labels = labels) class CocoEvaluator(object): def __init__(self, coco_gt, iou_type = 'bbox', useCats = 0, maxDets = 100): self.coco_gt = copy.deepcopy(coco_gt) self.coco_eval = COCOeval(coco_gt, iouType = iou_type) if maxDets != [100]: self.coco_eval.params.maxDets = maxDets if not useCats: self.coco_eval.params.useCats = useCats self.coco_eval.params.catIds = [-1] coco_gt.loadAnns = lambda imgIds, loadAnns = coco_gt.loadAnns: [gt.update(dict(category_id = -1)) or gt for gt in loadAnns(imgIds)] self.accumulate, self.summarize = self.coco_eval.accumulate, self.coco_eval.summarize @staticmethod def call_without_stdout(func, *args): with open(os.devnull, 'w') as devnull: with contextlib.redirect_stdout(devnull): return func(*args) def update(self, predictions): tolist = lambda a: [a.tolist()] if a.ndim == 0 else a.tolist() detection_results = [dict(image_id = image_id, bbox = bbox, score = score, category_id = category_id) for image_id, pred in predictions.items() if pred for bbox, score, category_id in zip(pred['boxes'].tolist(), tolist(pred['scores']), pred['labels'].tolist())] self.coco_eval.cocoDt = self.call_without_stdout(COCO.loadRes, self.coco_gt, detection_results) if detection_results else COCO() self.coco_eval.params.imgIds = list(predictions) self.call_without_stdout(self.coco_eval.evaluate) def main(args): coco_mode = 'instances' PATHS = dict( train = (os.path.join(args.dataset_root, f'train{args.dataset_year}'), os.path.join(args.dataset_root, 'annotations', f'{coco_mode}_train{args.dataset_year}.json')), val = (os.path.join(args.dataset_root, f'val{args.dataset_year}'), os.path.join(args.dataset_root, 'annotations', f'{coco_mode}_val{args.dataset_year}.json')), ) dataset = CocoDetection(*PATHS[args.dataset_split]) coco_evaluator = CocoEvaluator(dataset.coco, maxDets = args.max_dets) tic = time.time() if args.output_dir: os.makedirs(args.output_dir, exist_ok = True) if args.algo != 'process_loaded': preds = dict(multiprocessing.Pool(processes = args.num_workers).starmap(process_image, zip(dataset.ids, dataset, [args.fast] * len(dataset), [args.resize] * len(dataset), [globals()[args.algo]] * len(dataset)))) else: preds = [] for i, t in enumerate(zip(dataset.ids, dataset, [args.fast] * len(dataset), [args.resize] * len(dataset), [globals()[args.algo]] * len(dataset))): loaded = torch.load(os.path.join(args.input_dir, str(t[0]) + '.pt'), map_location = 'cpu') preds.append(process_loaded(t[0], loaded)) if args.output_dir: imshow_with_boxes(t[1][0], preds[-1][1]['boxes'][:5], os.path.join(args.output_dir, str(t[0]) + '.jpg')) print(i) if i % 50 == 0 else None preds = dict(preds) print('proposals', time.time() - tic); tic = time.time() coco_evaluator.update(preds) coco_evaluator.accumulate() coco_evaluator.summarize() print('evaluator', time.time() - tic) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input-dir', '-i') parser.add_argument('--output-dir', '-o') parser.add_argument('--dataset-root') parser.add_argument('--dataset-split', default = 'val', choices = ['train', 'val']) parser.add_argument('--dataset-year', type = int, default = 2017) parser.add_argument('--num-workers', type = int, default = 16) parser.add_argument('--algo', default = 'selective_search', choices = ['selective_search', 'edge_boxes', 'process_loaded']) parser.add_argument('--fast', action = 'store_true') parser.add_argument('--resize', type = int, default = 128) parser.add_argument('--max-dets', type = int, nargs = '*', default = [100]) args = parser.parse_args() print(args) main(args)
[ "pycocotools.cocoeval.COCOeval", "numpy.array", "copy.deepcopy", "matplotlib.pyplot.imshow", "os.path.exists", "cv2.ximgproc.createStructuredEdgeDetection", "argparse.ArgumentParser", "numpy.asarray", "pycocotools.coco.COCO", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "cv2.ximgproc.segmentation.createSelectiveSearchSegmentation", "cv2.ximgproc.createEdgeBoxes", "contextlib.redirect_stdout", "matplotlib.pyplot.savefig", "numpy.ones", "matplotlib.pyplot.gca", "cv2.resize", "time.time", "os.makedirs", "torch.stack", "os.path.join", "matplotlib.pyplot.figure", "multiprocessing.Pool", "torchvision.datasets.CocoDetection" ]
[((547, 559), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (557, 559), True, 'import matplotlib.pyplot as plt\n'), ((564, 579), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (574, 579), True, 'import matplotlib.pyplot as plt\n'), ((584, 599), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (592, 599), True, 'import matplotlib.pyplot as plt\n'), ((765, 785), 'matplotlib.pyplot.savefig', 'plt.savefig', (['savefig'], {}), '(savefig)\n', (776, 785), True, 'import matplotlib.pyplot as plt\n'), ((790, 801), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (799, 801), True, 'import matplotlib.pyplot as plt\n'), ((872, 933), 'cv2.ximgproc.segmentation.createSelectiveSearchSegmentation', 'cv2.ximgproc.segmentation.createSelectiveSearchSegmentation', ([], {}), '()\n', (931, 933), False, 'import cv2\n'), ((1592, 1622), 'cv2.ximgproc.createEdgeBoxes', 'cv2.ximgproc.createEdgeBoxes', ([], {}), '()\n', (1620, 1622), False, 'import cv2\n'), ((2619, 2754), 'torch.stack', 'torch.stack', (['[boxes_xyxy[:, 0], boxes_xyxy[:, 1], boxes_xyxy[:, 2] - boxes_xyxy[:, 0], \n boxes_xyxy[:, 3] - boxes_xyxy[:, 1]]'], {'dim': '(-1)'}), '([boxes_xyxy[:, 0], boxes_xyxy[:, 1], boxes_xyxy[:, 2] -\n boxes_xyxy[:, 0], boxes_xyxy[:, 3] - boxes_xyxy[:, 1]], dim=-1)\n', (2630, 2754), False, 'import torch\n'), ((5075, 5116), 'torchvision.datasets.CocoDetection', 'CocoDetection', (['*PATHS[args.dataset_split]'], {}), '(*PATHS[args.dataset_split])\n', (5088, 5116), False, 'from torchvision.datasets import CocoDetection\n'), ((5207, 5218), 'time.time', 'time.time', ([], {}), '()\n', (5216, 5218), False, 'import time\n'), ((6179, 6190), 'time.time', 'time.time', ([], {}), '()\n', (6188, 6190), False, 'import time\n'), ((6370, 6395), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6393, 6395), False, 'import argparse\n'), ((1344, 1374), 'os.path.exists', 'os.path.exists', (['"""model.yml.gz"""'], {}), "('model.yml.gz')\n", (1358, 1374), False, 'import os\n'), ((1282, 1340), 'cv2.ximgproc.createStructuredEdgeDetection', 'cv2.ximgproc.createStructuredEdgeDetection', (['"""model.yml.gz"""'], {}), "('model.yml.gz')\n", (1324, 1340), False, 'import cv2\n'), ((2025, 2049), 'numpy.asarray', 'np.asarray', (['img_extra[0]'], {}), '(img_extra[0])\n', (2035, 2049), True, 'import numpy as np\n'), ((2132, 2165), 'cv2.resize', 'cv2.resize', (['img', '(resize, resize)'], {}), '(img, (resize, resize))\n', (2142, 2165), False, 'import cv2\n'), ((3292, 3314), 'copy.deepcopy', 'copy.deepcopy', (['coco_gt'], {}), '(coco_gt)\n', (3305, 3314), False, 'import copy\n'), ((3340, 3375), 'pycocotools.cocoeval.COCOeval', 'COCOeval', (['coco_gt'], {'iouType': 'iou_type'}), '(coco_gt, iouType=iou_type)\n', (3348, 3375), False, 'from pycocotools.cocoeval import COCOeval\n'), ((5252, 5295), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {'exist_ok': '(True)'}), '(args.output_dir, exist_ok=True)\n', (5263, 5295), False, 'import os\n'), ((1789, 1837), 'numpy.array', 'np.array', (['[[0, 0.0, img.shape[1], img.shape[0]]]'], {}), '([[0, 0.0, img.shape[1], img.shape[0]]])\n', (1797, 1837), True, 'import numpy as np\n'), ((1839, 1852), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (1846, 1852), True, 'import numpy as np\n'), ((4527, 4533), 'pycocotools.coco.COCO', 'COCO', ([], {}), '()\n', (4531, 4533), False, 'from pycocotools.coco import COCO\n'), ((6153, 6164), 'time.time', 'time.time', ([], {}), '()\n', (6162, 6164), False, 'import time\n'), ((6310, 6321), 'time.time', 'time.time', ([], {}), '()\n', (6319, 6321), False, 'import time\n'), ((651, 660), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (658, 660), True, 'import matplotlib.pyplot as plt\n'), ((2294, 2316), 'numpy.array', 'np.array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (2302, 2316), True, 'import numpy as np\n'), ((3947, 3982), 'contextlib.redirect_stdout', 'contextlib.redirect_stdout', (['devnull'], {}), '(devnull)\n', (3973, 3982), False, 'import contextlib\n'), ((4730, 4790), 'os.path.join', 'os.path.join', (['args.dataset_root', 'f"""train{args.dataset_year}"""'], {}), "(args.dataset_root, f'train{args.dataset_year}')\n", (4742, 4790), False, 'import os\n'), ((4792, 4888), 'os.path.join', 'os.path.join', (['args.dataset_root', '"""annotations"""', 'f"""{coco_mode}_train{args.dataset_year}.json"""'], {}), "(args.dataset_root, 'annotations',\n f'{coco_mode}_train{args.dataset_year}.json')\n", (4804, 4888), False, 'import os\n'), ((4902, 4960), 'os.path.join', 'os.path.join', (['args.dataset_root', 'f"""val{args.dataset_year}"""'], {}), "(args.dataset_root, f'val{args.dataset_year}')\n", (4914, 4960), False, 'import os\n'), ((4962, 5056), 'os.path.join', 'os.path.join', (['args.dataset_root', '"""annotations"""', 'f"""{coco_mode}_val{args.dataset_year}.json"""'], {}), "(args.dataset_root, 'annotations',\n f'{coco_mode}_val{args.dataset_year}.json')\n", (4974, 5056), False, 'import os\n'), ((5358, 5406), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'args.num_workers'}), '(processes=args.num_workers)\n', (5378, 5406), False, 'import multiprocessing\n')]
# --------------------------------------------------------------------# # --------------------------------------------------------------------# # ---------- Made by <NAME> @ircam on 11/2015 # ---------- Analyse audio and return soudn features # ---------- to us this don't forget to include these lines before your script: # ---------- # ---------- # --------------------------------------------------------------------# # --------------------------------------------------------------------# from __future__ import absolute_import from __future__ import print_function import numpy as np from pandas import DataFrame import os #sdif import eaSDIF import sys from fileio.sdif.FSdifLoadFile import FSdifLoadFile from six.moves import range def get_f0_info(f0file) : """ load f0 from ascii or SDIF file return tuple of np.arrays as follows return f0times, f0harm, f0val f0harm will be None if input is file is ASCII format """ try: (f0times, f0data) = FSdifLoadFile(f0file) f0times = np.array(f0times) dd = np.array(f0data) if len(dd.shape) > 1 and dd.shape[1] > 2 : f0harm = dd[:,2] else: f0harm = None f0val = np.array(f0data)[:,0] except RuntimeError as rr : print("failed reading "+f0file+" as sdif try reading as txt!") f0times_data = np.loadtxt(f0file) f0times = np.array(f0times_data)[:,0] f0val = np.array(f0times_data)[:,1] f0harm = None return f0times, f0harm, f0val def get_nb_formants(formant_file): """ get the number of formants of an sdif file """ try: ent = eaSDIF.Entity() ent.OpenRead(formant_file) frame = eaSDIF.Frame() #take the length of the fifth matrix, sometimes the first ones don't have the good number of formants =P ent.ReadNextSelectedFrame(frame) ent.ReadNextSelectedFrame(frame) ent.ReadNextSelectedFrame(frame) ent.ReadNextSelectedFrame(frame) ent.ReadNextSelectedFrame(frame) try : mat = frame.GetMatrixWithSig("1RES") except IndexError : pass par_mat = mat.GetMatrixData() return len(par_mat) except EOFError : pass return 0 def get_formants_info(formant_file): """ load formant_file from SDIF file return an array of panadas data frames with the formants in the sdif file return: Array of pandas data frames with formants """ ts = [] # analysis times cols_names = ("Frequency", "Amplitude", "Bw", "Saliance") nb_formants = get_nb_formants(formant_file) try: formants = [] for i in range(nb_formants): formant = DataFrame(columns=cols_names) formants.append(formant) ent = eaSDIF.Entity() ent.OpenRead(formant_file) frame = eaSDIF.Frame() ent.ReadNextSelectedFrame(frame) except EOFError : print("In get_formants_info first exception") pass try: while ent.ReadNextSelectedFrame(frame): try : mat = frame.GetMatrixWithSig("1RES") except IndexError : print("Index Error dans get_formants_info in parse_sdif") # matrix is not present so we continue continue frame_time = frame.GetTime() ts.append(frame_time) par_mat = mat.GetMatrixData() if par_mat.shape[1] != 4 : raise RuntimeError("partial data number of columns "+str(cols)+" unequal to 4 !") if len(par_mat) == nb_formants: for i in range(nb_formants): formants[i] = formants[i].append(DataFrame([par_mat[i].tolist()], columns=cols_names, index = [frame_time])) except EOFError : pass return formants def get_matrix_values(sdif): """ load data from ascii or SDIF file return time-tagged values and matrix data return tlist, Matrix_data This can be used to extract data from lpc or true env .sdif files """ inent = eaSDIF.Entity() res = inent.OpenRead(sdif); if res == False: raise RuntimeError("get_lpc:: "+ sdif +" is no sdif file or does not exist") dlist = []; tlist = []; vec = eaSDIF.Vector() frame = eaSDIF.Frame() #fft size #intypes = inent.GetTypeString() # Very practical line fr printing what is inside for frame in inent: has_IGBG = frame.MatrixExists("IGBG") if has_IGBG: mat = frame.GetMatrixWithSig("IGBG") mat.GetRow(vec, 0) sample_rate = vec[1] fftsize = vec[3] fftsize = int(fftsize / 2) #Extract time tag values for frame in inent: mat = frame.GetMatrix(1); nrow = mat.GetNbRows(); ncol = mat.GetNbCols(); if nrow > 1 and ncol > 0 : tlist.append(frame.GetTime()); #Extract Matrix data values for frame in inent: for i in range (0,fftsize + 1): mat = frame.GetMatrix(1); nrow = mat.GetNbRows(); ncol = mat.GetNbCols(); if nrow > 1 and ncol >= 0 : mat.GetRow(vec, i) dlist.append(float(np.array((vec)[0]))); #Convert dlist into a matrix sample_nb = len(tlist) - 1 # Because the first value in tlist should be ignored fftsize_range = fftsize + 1 sample_nb_range =sample_nb + 1 matrix_data = np.zeros((sample_nb_range, fftsize_range)) for row in range (0, sample_nb_range): for col in range (0, fftsize_range): matrix_data[row][col] = dlist[row*(fftsize_range) + col] #when using the flag -OS1 in super vp the amplitude values are in linear, here we transform it to db so the amplitudes are in db from conversions import lin2db matrix_data = lin2db(matrix_data) return tlist, matrix_data def get_formants_parameter(sdif, parameter): """ This is a function for quick access to the parameters of formants parameters can be: "Frequency", "Amplitude", "Bw", "Saliance" """ formants = get_formants_info(sdif) parameters = [] for formant in formants: parameters.append(formant[parameter]) return parameters #-------------------- #-------------------- #---------- Compute mean for sound features #---------- #-------------------- #-------------------- def mean_matrix(sdif): tlist, matrix_data = get_matrix_values(sdif); matrix_data = matrix_data.mean(axis=0); return matrix_data def formant_from_sdif(sdif): formants = get_formants_info(sdif) return formants def mean_formant_from_sdif(sdif): formants = get_formants_info(sdif) mean_formants =[] for formant in formants: mean_formants.append(formant.mean()) return mean_formants def median_formant_from_sdif(sdif): formants = get_formants_info(sdif) median_formants =[] for formant in formants: median_formants.append(formant.median()) return median_formants def mean_pitch(sdif): f_name = analysis_path + subject_tag(sdif) + str(sdif) + f0_tag f0times, f0harm, f0val = get_f0_info(str(f_name)) return f0val.mean(0)
[ "eaSDIF.Entity", "fileio.sdif.FSdifLoadFile.FSdifLoadFile", "six.moves.range", "conversions.lin2db", "eaSDIF.Frame", "numpy.array", "numpy.zeros", "pandas.DataFrame", "numpy.loadtxt", "eaSDIF.Vector" ]
[((4218, 4233), 'eaSDIF.Entity', 'eaSDIF.Entity', ([], {}), '()\n', (4231, 4233), False, 'import eaSDIF\n'), ((4414, 4429), 'eaSDIF.Vector', 'eaSDIF.Vector', ([], {}), '()\n', (4427, 4429), False, 'import eaSDIF\n'), ((4442, 4456), 'eaSDIF.Frame', 'eaSDIF.Frame', ([], {}), '()\n', (4454, 4456), False, 'import eaSDIF\n'), ((5614, 5656), 'numpy.zeros', 'np.zeros', (['(sample_nb_range, fftsize_range)'], {}), '((sample_nb_range, fftsize_range))\n', (5622, 5656), True, 'import numpy as np\n'), ((5672, 5697), 'six.moves.range', 'range', (['(0)', 'sample_nb_range'], {}), '(0, sample_nb_range)\n', (5677, 5697), False, 'from six.moves import range\n'), ((6001, 6020), 'conversions.lin2db', 'lin2db', (['matrix_data'], {}), '(matrix_data)\n', (6007, 6020), False, 'from conversions import lin2db\n'), ((990, 1011), 'fileio.sdif.FSdifLoadFile.FSdifLoadFile', 'FSdifLoadFile', (['f0file'], {}), '(f0file)\n', (1003, 1011), False, 'from fileio.sdif.FSdifLoadFile import FSdifLoadFile\n'), ((1030, 1047), 'numpy.array', 'np.array', (['f0times'], {}), '(f0times)\n', (1038, 1047), True, 'import numpy as np\n'), ((1061, 1077), 'numpy.array', 'np.array', (['f0data'], {}), '(f0data)\n', (1069, 1077), True, 'import numpy as np\n'), ((1686, 1701), 'eaSDIF.Entity', 'eaSDIF.Entity', ([], {}), '()\n', (1699, 1701), False, 'import eaSDIF\n'), ((1753, 1767), 'eaSDIF.Frame', 'eaSDIF.Frame', ([], {}), '()\n', (1765, 1767), False, 'import eaSDIF\n'), ((2743, 2761), 'six.moves.range', 'range', (['nb_formants'], {}), '(nb_formants)\n', (2748, 2761), False, 'from six.moves import range\n'), ((2868, 2883), 'eaSDIF.Entity', 'eaSDIF.Entity', ([], {}), '()\n', (2881, 2883), False, 'import eaSDIF\n'), ((2935, 2949), 'eaSDIF.Frame', 'eaSDIF.Frame', ([], {}), '()\n', (2947, 2949), False, 'import eaSDIF\n'), ((5144, 5165), 'six.moves.range', 'range', (['(0)', '(fftsize + 1)'], {}), '(0, fftsize + 1)\n', (5149, 5165), False, 'from six.moves import range\n'), ((5719, 5742), 'six.moves.range', 'range', (['(0)', 'fftsize_range'], {}), '(0, fftsize_range)\n', (5724, 5742), False, 'from six.moves import range\n'), ((1217, 1233), 'numpy.array', 'np.array', (['f0data'], {}), '(f0data)\n', (1225, 1233), True, 'import numpy as np\n'), ((1365, 1383), 'numpy.loadtxt', 'np.loadtxt', (['f0file'], {}), '(f0file)\n', (1375, 1383), True, 'import numpy as np\n'), ((2785, 2814), 'pandas.DataFrame', 'DataFrame', ([], {'columns': 'cols_names'}), '(columns=cols_names)\n', (2794, 2814), False, 'from pandas import DataFrame\n'), ((1402, 1424), 'numpy.array', 'np.array', (['f0times_data'], {}), '(f0times_data)\n', (1410, 1424), True, 'import numpy as np\n'), ((1448, 1470), 'numpy.array', 'np.array', (['f0times_data'], {}), '(f0times_data)\n', (1456, 1470), True, 'import numpy as np\n'), ((3760, 3778), 'six.moves.range', 'range', (['nb_formants'], {}), '(nb_formants)\n', (3765, 3778), False, 'from six.moves import range\n'), ((5389, 5405), 'numpy.array', 'np.array', (['vec[0]'], {}), '(vec[0])\n', (5397, 5405), True, 'import numpy as np\n')]
#!/usr/bin/env python # encoding: utf-8 """ response """ import numpy as np import scipy as sp from scipy import signal, interpolate import pandas as pd import warnings from .utils import (get_proper_interval, double_gamma_with_d, get_time_to_peak_from_timecourse, double_gamma_with_d_time_derivative) import logging def _get_timepoints(interval, sample_rate, oversample): total_length = interval[1] - interval[0] timepoints = np.linspace(interval[0], interval[1], int(total_length * sample_rate * oversample), endpoint=False) return timepoints def _create_fir_basis(interval, sample_rate, n_regressors, oversample=1): """""" regressor_labels = ['fir_%d' % i for i in np.arange(n_regressors)] basis = np.eye(n_regressors) basis = np.vstack((basis, basis[-1])) orig_timepoints = np.linspace(interval[0], interval[1], n_regressors + 1, # Include endpoint to allow interpolation endpoint=True) # below timepoints = _get_timepoints(interval, sample_rate, oversample) fir = interpolate.interp1d(orig_timepoints, basis, kind='nearest', axis=0)(timepoints) return pd.DataFrame(fir, index=timepoints, columns=regressor_labels) \ .rename_axis('time') \ .rename_axis('basis function', axis=1) def _create_canonical_hrf_basis(interval, sample_rate, n_regressors, oversample=1): timepoints = _get_timepoints(interval, sample_rate, oversample) basis_function = double_gamma_with_d(timepoints)[:, np.newaxis] return pd.DataFrame(basis_function, index=timepoints, columns=['canonical HRF']) \ .rename_axis('time') \ .rename_axis('basis function', axis=1) def _create_canonical_hrf_with_time_derivative_basis(interval, sample_rate, n_regressors, oversample=1): timepoints = _get_timepoints(interval, sample_rate, oversample) hrf = double_gamma_with_d(timepoints) dt_hrf = double_gamma_with_d_time_derivative(timepoints) return pd.DataFrame(np.array([hrf, dt_hrf]).T, index=timepoints, columns=['HRF', 'HRF (derivative wrt time-to-peak)']) \ .rename_axis('time') \ .rename_axis('basis function', axis=1) def _create_fourier_basis(interval, sample_rate, n_regressors, oversample=1): """""" timepoints = _get_timepoints(interval, sample_rate, oversample) L_fourier = np.zeros((len(timepoints), n_regressors)) L_fourier[:, 0] = 1 for r in range(int(n_regressors/2)): x = np.linspace(0, 2.0*np.pi*(r+1), len(timepoints)) # sin_regressor L_fourier[:, 1+r] = np.sqrt(2) * np.sin(x) # cos_regressor L_fourier[:, 1+r+int(n_regressors/2)] = np.sqrt(2) * np.cos(x) regressor_labels = ['fourier_intercept'] regressor_labels += ['fourier_sin_%d_period' % period for period in np.arange(1, n_regressors//2 + 1)] regressor_labels += ['fourier_cos_%d_period' % period for period in np.arange(1, n_regressors//2 + 1)] return pd.DataFrame(L_fourier, index=timepoints, columns=regressor_labels) \ .rename_axis('time') \ .rename_axis('basis function', axis=1) def _create_legendre_basis(interval, sample_rate, n_regressors, oversample=1): """""" regressor_labels = ['legendre_%d' % poly for poly in np.arange(1, self.n_regressors + 1)] x = np.linspace(-1, 1, int(np.diff(interval)) * oversample + 1, endpoint=True) L_legendre = np.polynomial.legendre.legval(x=x, c=np.eye(n_regressors)).T return pd.DataFrame(L_legendre, index=timepoints, columns=regressor_labels) \ .rename_axis('time') \ .rename_axis('basis function', axis=1) class Regressor(object): def __init__(self, name, fitter): self.name = name self.fitter = fitter def create_design_matrix(): pass class Confound(Regressor): def __init__(self, name, fitter, confounds): super(Confound, self).__init__(name, fitter) self.confounds = pd.DataFrame(confounds) def create_design_matrix(self, oversample=1): self.X = self.confounds self.X.columns = pd.MultiIndex.from_product([['confounds'], [self.name], self.X.columns], names=['event type', 'covariate', 'regressor']) self.X.set_index(self.fitter.input_signal.index, inplace=True) self.X.index.rename('time', inplace=True) class Intercept(Confound): def __init__(self, name, fitter): confound = pd.DataFrame(np.ones(len(fitter.input_signal)), columns=['intercept']) super(Intercept, self).__init__(name, fitter, confound) class Event(Regressor): """Event is a class that encapsulates the creation and conversion of design matrices and resulting beta weights for specific event_types. Design matrices for an event_type can be built up of different basis sets, and one can choose the time interval over which to fit the response. """ def __init__(self, name, fitter, basis_set='fir', interval=[0, 10], n_regressors=None, onsets=None, durations=None, covariates=None): """ Initialize a ResponseFitter object. Parameters ---------- fitter : ResponseFitter object the response fitter object needed to feed the Event its parameters. basis_set : string ['fir', 'fourier', 'legendre'] or np.array (1D) basis set to use in the fitting. interval : list (2) the minimum and maximum timepoints relative to the event onset times that delineate the interval for which to estimate the response time-course n_regressors : int for fourier and legendre basis sets, this argument determines the number of regressors to use. More regressors adds more precision, either in terms of added, higher, frequencies (fourier) or higher polynomial order (legendre) onsets : np.array (1D) onset times, in seconds, of all the events to estimate the response to durations : np.array (1D), optional durations of each of the events in onsets. covariates : dict, optional dictionary of covariates for each of the events in onsets. that is, the keys are the names of the covariates, the values are 1D numpy arrays of length identical to onsets; these are the covariate values of each of the events in onsets. """ super(Event, self).__init__(name, fitter) self.basis_set = basis_set self.interval = interval self.n_regressors = n_regressors self.onsets = pd.Series(onsets) if self.onsets.dtype != float: logging.warning('Onsets should be floats (currently {})! Converting...'.format( self.onsets.dtype)) self.onsets = self.onsets.astype(float) self.durations = durations if durations is not None: self.durations = pd.Series(self.durations) if self.durations.dtype != float: logging.warning('Durations should be floats (currently {})! Converting...'.format( self.durations.dtype)) self.durations = self.durations.astype(float) self.interval_duration = self.interval[1] - self.interval[0] self.sample_duration = self.fitter.sample_duration self.sample_rate = self.fitter.sample_rate # Check whether the interval is proper if ~np.isclose(self.interval_duration % self.sample_duration, 0) and \ ~np.isclose(self.interval_duration % self.sample_duration, self.sample_duration): old_interval = self.interval self.interval = get_proper_interval( old_interval, self.sample_duration) self.interval_duration = self.interval[1] - self.interval[0] warning = '\nWARNING: The duration of the interval %s is not a multiple of ' \ 'the sample duration %s.\n\r' \ 'Interval is now automatically set to %s.' \ % (old_interval, self.sample_duration, self.interval) warnings.warn(warning) if covariates is None: self.covariates = pd.DataFrame( {'intercept': np.ones(self.onsets.shape[0])}) else: self.covariates = pd.DataFrame(covariates) if type(self.basis_set) is not str: self.n_regressors = self.basis_set.shape[1] self.basis_set = pd.DataFrame(self.basis_set, index=np.linspace(*self.interval, num=len( self.basis_set), endpoint=True)) else: if self.basis_set == 'fir': length_interval = self.interval[1] - self.interval[0] if self.n_regressors is None: self.n_regressors = int( length_interval / self.sample_duration) warnings.warn('Number of FIR regressors has automatically been set to %d ' 'per covariate' % self.n_regressors) if self.n_regressors > (length_interval / self.sample_duration): warnings.warn('Number of FIR regressors ({}) is larger than the number of timepoints in the interval ' '({}). ' 'This model can only be fit using regularized methods.'.format(self.n_regressors, int(length_interval / self.sample_rate))) # legendre and fourier basis sets should be odd elif self.basis_set in ('fourier', 'legendre'): if self.n_regressors is None: raise Exception('Please provide number of regressors!') elif (self.n_regressors % 2) == 0: self.n_regressors += 1 warnings.warn('Number of {} regressors has to be uneven and has automatically ' 'been set to {} per covariate'.format(self.basis_set, self.n_regressors)) elif self.basis_set == 'canonical_hrf': if (self.n_regressors is not None) and (self.n_regressors != 1): warnings.warn('With the canonical HRF as a basis set, you can have only ONE ' 'regressors per covariate!') self.n_regressors = 1 elif self.basis_set == 'canonical_hrf_with_time_derivative': if (self.n_regressors is not None) and (self.n_regressors != 2): warnings.warn('With the canonical HRF with time derivative as a basis set,' 'you can have only TWO ' 'regressors per covariate!') self.n_regressors = 2 def event_timecourse(self, covariate=None, oversample=1): """ event_timecourse creates a timecourse of events of nr_samples by n_regressors, which has to be converted to the basis of choice. Parameters ---------- covariate : string, optional Name of the covariate that will be used in the regression. Is set to ones if not providedt.h Returns ------- event_timepoints : np.array (n_regressors, n_timepoints) An array that depicts the occurrence of each of the events in the time-space of the signal. """ if self.durations is None: durations = np.ones_like(self.onsets) * \ self.sample_duration / oversample else: durations = self.durations event_timepoints = np.zeros( self.fitter.input_signal.shape[0] * oversample) if covariate is None: covariate = self.covariates['intercept'] else: covariate = self.covariates[covariate] for e, d, c in zip(self.onsets, durations, covariate): et = int((e + self.interval[0]) * self.sample_rate * oversample) dt = np.max((d * self.sample_rate * oversample, 1), 0).astype(int) event_timepoints[et:et+dt] = c return event_timepoints def create_design_matrix(self, oversample=1): """ create_design_matrix creates the design matrix for this event_type by iterating over covariates. """ # create empty design matrix self.X = np.zeros((self.fitter.input_signal.shape[0] * oversample, self.n_regressors * self.covariates.shape[1])) L = self.get_basis_function(oversample) columns = pd.MultiIndex.from_product(([self.name], self.covariates.columns, L.columns), names=['event type', 'covariate', 'regressor']) oversampled_timepoints = np.linspace(0, self.fitter.input_signal.shape[0] * self.sample_duration, self.fitter.input_signal.shape[0] * oversample, endpoint=False) self.X = pd.DataFrame(self.X, columns=columns, index=oversampled_timepoints) for covariate in self.covariates.columns: event_timepoints = self.event_timecourse(covariate=covariate, oversample=oversample) for regressor in L.columns: self.X[self.name, covariate, regressor] = sp.signal.convolve(event_timepoints, L[regressor], 'full')[:len(self.X)] if oversample != 1: self.downsample_design_matrix() def get_timecourses(self, oversample=1): """ takes betas, given from response_fitter object, and restructures the beta weights to the interval that we're trying to fit, using the L basis function matrix. """ assert hasattr( self, 'betas'), 'no betas found, please run regression before rsq' L = self.get_basis_function(oversample) return self.betas.groupby(level=['event type', 'covariate']).apply(_dotproduct_timecourse, L) def get_basis_function(self, oversample=1): # only for fir, the nr of regressors is dictated by the interval and sample rate if type(self.basis_set) is str: if self.basis_set == 'fir': L = _create_fir_basis( self.interval, self.sample_rate, self.n_regressors, oversample) elif self.basis_set == 'fourier': L = _create_fourier_basis( self.interval, self.sample_rate, self.n_regressors, oversample) elif self.basis_set == 'legendre': L = _create_legendre_basis( self.interval, self.sample_rate, self.n_regressors, oversample) elif self.basis_set == 'canonical_hrf': L = _create_canonical_hrf_basis(self.interval, self.sample_rate, 1, oversample) regressor_labels = ['canonical_hrf'] elif self.basis_set == 'canonical_hrf_with_time_derivative': L = _create_canonical_hrf_with_time_derivative_basis(self.interval, self.sample_rate, 2, oversample) regressor_labels = ['canonical_hrf', 'canonical_hrf_time_derivative'] else: regressor_labels = ['custom_basis_function_%d' % i for i in range(1, self.n_regressors+1)] L = np.zeros( (self.basis_set.shape[0] * oversample, self.n_regressors)) interp = sp.interpolate.interp1d( self.basis_set.index, self.basis_set.values, axis=0) L = interp(timepoints) # L = pd.DataFrame(L, # columns=pd.Index(regressor_labels, name='basis_function'),) # index=L.index) return L def downsample_design_matrix(self): interp = sp.interpolate.interp1d(self.X.index, self.X.values, axis=0) X_ = interp(self.fitter.input_signal.index) self.X = pd.DataFrame(X_, columns=self.X.columns, index=self.fitter.input_signal.index) def get_time_to_peak(self, oversample=20, cutoff=1.0, negative_peak=False): return self.get_timecourses(oversample=oversample)\ .groupby(['event type', 'covariate'], as_index=False)\ .apply(get_time_to_peak_from_timecourse, negative_peak=negative_peak, cutoff=cutoff)\ .reset_index(level=[-1], drop=True)\ .pivot(columns='area', index='peak') def _dotproduct_timecourse(d, L): return L.dot(d.reset_index(level=['event type', 'covariate'], drop=True))
[ "scipy.signal.convolve", "numpy.sqrt", "scipy.interpolate.interp1d", "numpy.array", "numpy.sin", "numpy.arange", "pandas.MultiIndex.from_product", "numpy.diff", "numpy.max", "numpy.linspace", "numpy.vstack", "pandas.DataFrame", "warnings.warn", "numpy.eye", "numpy.ones", "numpy.cos", "pandas.Series", "numpy.ones_like", "numpy.isclose", "numpy.zeros" ]
[((921, 941), 'numpy.eye', 'np.eye', (['n_regressors'], {}), '(n_regressors)\n', (927, 941), True, 'import numpy as np\n'), ((954, 983), 'numpy.vstack', 'np.vstack', (['(basis, basis[-1])'], {}), '((basis, basis[-1]))\n', (963, 983), True, 'import numpy as np\n'), ((1007, 1077), 'numpy.linspace', 'np.linspace', (['interval[0]', 'interval[1]', '(n_regressors + 1)'], {'endpoint': '(True)'}), '(interval[0], interval[1], n_regressors + 1, endpoint=True)\n', (1018, 1077), True, 'import numpy as np\n'), ((1328, 1396), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['orig_timepoints', 'basis'], {'kind': '"""nearest"""', 'axis': '(0)'}), "(orig_timepoints, basis, kind='nearest', axis=0)\n", (1348, 1396), False, 'from scipy import signal, interpolate\n'), ((4675, 4698), 'pandas.DataFrame', 'pd.DataFrame', (['confounds'], {}), '(confounds)\n', (4687, 4698), True, 'import pandas as pd\n'), ((4807, 4931), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[['confounds'], [self.name], self.X.columns]"], {'names': "['event type', 'covariate', 'regressor']"}), "([['confounds'], [self.name], self.X.columns],\n names=['event type', 'covariate', 'regressor'])\n", (4833, 4931), True, 'import pandas as pd\n'), ((7606, 7623), 'pandas.Series', 'pd.Series', (['onsets'], {}), '(onsets)\n', (7615, 7623), True, 'import pandas as pd\n'), ((13036, 13092), 'numpy.zeros', 'np.zeros', (['(self.fitter.input_signal.shape[0] * oversample)'], {}), '(self.fitter.input_signal.shape[0] * oversample)\n', (13044, 13092), True, 'import numpy as np\n'), ((13795, 13903), 'numpy.zeros', 'np.zeros', (['(self.fitter.input_signal.shape[0] * oversample, self.n_regressors * self.\n covariates.shape[1])'], {}), '((self.fitter.input_signal.shape[0] * oversample, self.n_regressors *\n self.covariates.shape[1]))\n', (13803, 13903), True, 'import numpy as np\n'), ((13995, 14125), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['([self.name], self.covariates.columns, L.columns)'], {'names': "['event type', 'covariate', 'regressor']"}), "(([self.name], self.covariates.columns, L.columns\n ), names=['event type', 'covariate', 'regressor'])\n", (14021, 14125), True, 'import pandas as pd\n'), ((14200, 14341), 'numpy.linspace', 'np.linspace', (['(0)', '(self.fitter.input_signal.shape[0] * self.sample_duration)', '(self.fitter.input_signal.shape[0] * oversample)'], {'endpoint': '(False)'}), '(0, self.fitter.input_signal.shape[0] * self.sample_duration, \n self.fitter.input_signal.shape[0] * oversample, endpoint=False)\n', (14211, 14341), True, 'import numpy as np\n'), ((14580, 14647), 'pandas.DataFrame', 'pd.DataFrame', (['self.X'], {'columns': 'columns', 'index': 'oversampled_timepoints'}), '(self.X, columns=columns, index=oversampled_timepoints)\n', (14592, 14647), True, 'import pandas as pd\n'), ((17987, 18047), 'scipy.interpolate.interp1d', 'sp.interpolate.interp1d', (['self.X.index', 'self.X.values'], {'axis': '(0)'}), '(self.X.index, self.X.values, axis=0)\n', (18010, 18047), True, 'import scipy as sp\n'), ((18117, 18195), 'pandas.DataFrame', 'pd.DataFrame', (['X_'], {'columns': 'self.X.columns', 'index': 'self.fitter.input_signal.index'}), '(X_, columns=self.X.columns, index=self.fitter.input_signal.index)\n', (18129, 18195), True, 'import pandas as pd\n'), ((883, 906), 'numpy.arange', 'np.arange', (['n_regressors'], {}), '(n_regressors)\n', (892, 906), True, 'import numpy as np\n'), ((3074, 3084), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3081, 3084), True, 'import numpy as np\n'), ((3087, 3096), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (3093, 3096), True, 'import numpy as np\n'), ((3174, 3184), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3181, 3184), True, 'import numpy as np\n'), ((3187, 3196), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (3193, 3196), True, 'import numpy as np\n'), ((3340, 3375), 'numpy.arange', 'np.arange', (['(1)', '(n_regressors // 2 + 1)'], {}), '(1, n_regressors // 2 + 1)\n', (3349, 3375), True, 'import numpy as np\n'), ((3472, 3507), 'numpy.arange', 'np.arange', (['(1)', '(n_regressors // 2 + 1)'], {}), '(1, n_regressors // 2 + 1)\n', (3481, 3507), True, 'import numpy as np\n'), ((3889, 3924), 'numpy.arange', 'np.arange', (['(1)', '(self.n_regressors + 1)'], {}), '(1, self.n_regressors + 1)\n', (3898, 3924), True, 'import numpy as np\n'), ((7944, 7969), 'pandas.Series', 'pd.Series', (['self.durations'], {}), '(self.durations)\n', (7953, 7969), True, 'import pandas as pd\n'), ((9188, 9210), 'warnings.warn', 'warnings.warn', (['warning'], {}), '(warning)\n', (9201, 9210), False, 'import warnings\n'), ((9393, 9417), 'pandas.DataFrame', 'pd.DataFrame', (['covariates'], {}), '(covariates)\n', (9405, 9417), True, 'import pandas as pd\n'), ((17541, 17608), 'numpy.zeros', 'np.zeros', (['(self.basis_set.shape[0] * oversample, self.n_regressors)'], {}), '((self.basis_set.shape[0] * oversample, self.n_regressors))\n', (17549, 17608), True, 'import numpy as np\n'), ((17648, 17724), 'scipy.interpolate.interp1d', 'sp.interpolate.interp1d', (['self.basis_set.index', 'self.basis_set.values'], {'axis': '(0)'}), '(self.basis_set.index, self.basis_set.values, axis=0)\n', (17671, 17724), True, 'import scipy as sp\n'), ((4083, 4103), 'numpy.eye', 'np.eye', (['n_regressors'], {}), '(n_regressors)\n', (4089, 4103), True, 'import numpy as np\n'), ((8460, 8520), 'numpy.isclose', 'np.isclose', (['(self.interval_duration % self.sample_duration)', '(0)'], {}), '(self.interval_duration % self.sample_duration, 0)\n', (8470, 8520), True, 'import numpy as np\n'), ((8567, 8646), 'numpy.isclose', 'np.isclose', (['(self.interval_duration % self.sample_duration)', 'self.sample_duration'], {}), '(self.interval_duration % self.sample_duration, self.sample_duration)\n', (8577, 8646), True, 'import numpy as np\n'), ((1514, 1575), 'pandas.DataFrame', 'pd.DataFrame', (['fir'], {'index': 'timepoints', 'columns': 'regressor_labels'}), '(fir, index=timepoints, columns=regressor_labels)\n', (1526, 1575), True, 'import pandas as pd\n'), ((1938, 2011), 'pandas.DataFrame', 'pd.DataFrame', (['basis_function'], {'index': 'timepoints', 'columns': "['canonical HRF']"}), "(basis_function, index=timepoints, columns=['canonical HRF'])\n", (1950, 2011), True, 'import pandas as pd\n'), ((3519, 3586), 'pandas.DataFrame', 'pd.DataFrame', (['L_fourier'], {'index': 'timepoints', 'columns': 'regressor_labels'}), '(L_fourier, index=timepoints, columns=regressor_labels)\n', (3531, 3586), True, 'import pandas as pd\n'), ((3957, 3974), 'numpy.diff', 'np.diff', (['interval'], {}), '(interval)\n', (3964, 3974), True, 'import numpy as np\n'), ((4119, 4187), 'pandas.DataFrame', 'pd.DataFrame', (['L_legendre'], {'index': 'timepoints', 'columns': 'regressor_labels'}), '(L_legendre, index=timepoints, columns=regressor_labels)\n', (4131, 4187), True, 'import pandas as pd\n'), ((9317, 9346), 'numpy.ones', 'np.ones', (['self.onsets.shape[0]'], {}), '(self.onsets.shape[0])\n', (9324, 9346), True, 'import numpy as np\n'), ((10179, 10296), 'warnings.warn', 'warnings.warn', (["('Number of FIR regressors has automatically been set to %d per covariate' %\n self.n_regressors)"], {}), "(\n 'Number of FIR regressors has automatically been set to %d per covariate' %\n self.n_regressors)\n", (10192, 10296), False, 'import warnings\n'), ((12876, 12901), 'numpy.ones_like', 'np.ones_like', (['self.onsets'], {}), '(self.onsets)\n', (12888, 12901), True, 'import numpy as np\n'), ((13413, 13462), 'numpy.max', 'np.max', (['(d * self.sample_rate * oversample, 1)', '(0)'], {}), '((d * self.sample_rate * oversample, 1), 0)\n', (13419, 13462), True, 'import numpy as np\n'), ((15008, 15066), 'scipy.signal.convolve', 'sp.signal.convolve', (['event_timepoints', 'L[regressor]', '"""full"""'], {}), "(event_timepoints, L[regressor], 'full')\n", (15026, 15066), True, 'import scipy as sp\n'), ((2444, 2467), 'numpy.array', 'np.array', (['[hrf, dt_hrf]'], {}), '([hrf, dt_hrf])\n', (2452, 2467), True, 'import numpy as np\n'), ((11526, 11639), 'warnings.warn', 'warnings.warn', (['"""With the canonical HRF as a basis set, you can have only ONE regressors per covariate!"""'], {}), "(\n 'With the canonical HRF as a basis set, you can have only ONE regressors per covariate!'\n )\n", (11539, 11639), False, 'import warnings\n'), ((11881, 12014), 'warnings.warn', 'warnings.warn', (['"""With the canonical HRF with time derivative as a basis set,you can have only TWO regressors per covariate!"""'], {}), "(\n 'With the canonical HRF with time derivative as a basis set,you can have only TWO regressors per covariate!'\n )\n", (11894, 12014), False, 'import warnings\n')]
import numpy as np import torch import torch.utils.data as data import torch.nn.functional as F import os import cv2 import math import random import json import csv import pickle import os.path as osp from glob import glob import raft3d.projective_ops as pops from . import frame_utils from .augmentation import RGBDAugmentor, SparseAugmentor class KITTIEval(data.Dataset): crop = 80 def __init__(self, image_size=None, root='datasets/KITTI', do_augment=True): self.init_seed = None mode = "testing" self.image1_list = sorted(glob(osp.join(root, mode, "image_2/*10.png"))) self.image2_list = sorted(glob(osp.join(root, mode, "image_2/*11.png"))) self.disp1_ga_list = sorted(glob(osp.join(root, mode, "disp_ganet_{}/*10.png".format(mode)))) self.disp2_ga_list = sorted(glob(osp.join(root, mode, "disp_ganet_{}/*11.png".format(mode)))) self.calib_list = sorted(glob(osp.join(root, mode, "calib_cam_to_cam/*.txt"))) self.intrinsics_list = [] for calib_file in self.calib_list: with open(calib_file) as f: reader = csv.reader(f, delimiter=' ') for row in reader: if row[0] == 'K_02:': K = np.array(row[1:], dtype=np.float32).reshape(3,3) kvec = np.array([K[0,0], K[1,1], K[0,2], K[1,2]]) self.intrinsics_list.append(kvec) @staticmethod def write_prediction(index, disp1, disp2, flow): def writeFlowKITTI(filename, uv): uv = 64.0 * uv + 2**15 valid = np.ones([uv.shape[0], uv.shape[1], 1]) uv = np.concatenate([uv, valid], axis=-1).astype(np.uint16) cv2.imwrite(filename, uv[..., ::-1]) def writeDispKITTI(filename, disp): disp = (256 * disp).astype(np.uint16) cv2.imwrite(filename, disp) disp1 = np.pad(disp1, ((KITTIEval.crop,0),(0,0)), mode='edge') disp2 = np.pad(disp2, ((KITTIEval.crop, 0), (0,0)), mode='edge') flow = np.pad(flow, ((KITTIEval.crop, 0), (0,0),(0,0)), mode='edge') disp1_path = 'kitti_submission/disp_0/%06d_10.png' % index disp2_path = 'kitti_submission/disp_1/%06d_10.png' % index flow_path = 'kitti_submission/flow/%06d_10.png' % index writeDispKITTI(disp1_path, disp1) writeDispKITTI(disp2_path, disp2) writeFlowKITTI(flow_path, flow) def __len__(self): return len(self.image1_list) def __getitem__(self, index): intrinsics = self.intrinsics_list[index] image1 = cv2.imread(self.image1_list[index]) image2 = cv2.imread(self.image2_list[index]) disp1 = cv2.imread(self.disp1_ga_list[index], cv2.IMREAD_ANYDEPTH) / 256.0 disp2 = cv2.imread(self.disp2_ga_list[index], cv2.IMREAD_ANYDEPTH) / 256.0 image1 = image1[self.crop:] image2 = image2[self.crop:] disp1 = disp1[self.crop:] disp2 = disp2[self.crop:] intrinsics[3] -= self.crop image1 = torch.from_numpy(image1).float().permute(2,0,1) image2 = torch.from_numpy(image2).float().permute(2,0,1) disp1 = torch.from_numpy(disp1).float() disp2 = torch.from_numpy(disp2).float() intrinsics = torch.from_numpy(intrinsics).float() return image1, image2, disp1, disp2, intrinsics class KITTI(data.Dataset): def __init__(self, image_size=None, root='datasets/KITTI', do_augment=True): import csv self.init_seed = None self.crop = 80 if do_augment: self.augmentor = SparseAugmentor(image_size) else: self.augmentor = None self.image1_list = sorted(glob(osp.join(root, "training", "image_2/*10.png"))) self.image2_list = sorted(glob(osp.join(root, "training", "image_2/*11.png"))) self.disp1_list = sorted(glob(osp.join(root, "training", "disp_occ_0/*10.png"))) self.disp2_list = sorted(glob(osp.join(root, "training", "disp_occ_1/*10.png"))) self.disp1_ga_list = sorted(glob(osp.join(root, "training", "disp_ganet/*10.png"))) self.disp2_ga_list = sorted(glob(osp.join(root, "training", "disp_ganet/*11.png"))) self.flow_list = sorted(glob(osp.join(root, "training", "flow_occ/*10.png"))) self.calib_list = sorted(glob(osp.join(root, "training", "calib_cam_to_cam/*.txt"))) self.intrinsics_list = [] for calib_file in self.calib_list: with open(calib_file) as f: reader = csv.reader(f, delimiter=' ') for row in reader: if row[0] == 'K_02:': K = np.array(row[1:], dtype=np.float32).reshape(3,3) kvec = np.array([K[0,0], K[1,1], K[0,2], K[1,2]]) self.intrinsics_list.append(kvec) def __len__(self): return len(self.image1_list) def __getitem__(self, index): if not self.init_seed: worker_info = torch.utils.data.get_worker_info() if worker_info is not None: torch.manual_seed(worker_info.id) np.random.seed(worker_info.id) random.seed(worker_info.id) self.init_seed = True image1 = cv2.imread(self.image1_list[index]) image2 = cv2.imread(self.image2_list[index]) disp1 = cv2.imread(self.disp1_list[index], cv2.IMREAD_ANYDEPTH) / 256.0 disp2 = cv2.imread(self.disp2_list[index], cv2.IMREAD_ANYDEPTH) / 256.0 disp1_dense = cv2.imread(self.disp1_ga_list[index], cv2.IMREAD_ANYDEPTH) / 256.0 disp2_dense = cv2.imread(self.disp2_ga_list[index], cv2.IMREAD_ANYDEPTH) / 256.0 flow, valid = frame_utils.readFlowKITTI(self.flow_list[index]) intrinsics = self.intrinsics_list[index] SCALE = np.random.uniform(0.08, 0.15) # crop top 80 pixels, no ground truth information image1 = image1[self.crop:] image2 = image2[self.crop:] disp1 = disp1[self.crop:] disp2 = disp2[self.crop:] flow = flow[self.crop:] valid = valid[self.crop:] disp1_dense = disp1_dense[self.crop:] disp2_dense = disp2_dense[self.crop:] intrinsics[3] -= self.crop image1 = torch.from_numpy(image1).float().permute(2,0,1) image2 = torch.from_numpy(image2).float().permute(2,0,1) disp1 = torch.from_numpy(disp1 / intrinsics[0]) / SCALE disp2 = torch.from_numpy(disp2 / intrinsics[0]) / SCALE disp1_dense = torch.from_numpy(disp1_dense / intrinsics[0]) / SCALE disp2_dense = torch.from_numpy(disp2_dense / intrinsics[0]) / SCALE dz = (disp2 - disp1_dense).unsqueeze(dim=-1) depth1 = 1.0 / disp1_dense.clamp(min=0.01).float() depth2 = 1.0 / disp2_dense.clamp(min=0.01).float() intrinsics = torch.from_numpy(intrinsics) valid = torch.from_numpy(valid) flow = torch.from_numpy(flow) valid = valid * (disp2 > 0).float() flow = torch.cat([flow, dz], -1) if self.augmentor is not None: image1, image2, depth1, depth2, flow, valid, intrinsics = \ self.augmentor(image1, image2, depth1, depth2, flow, valid, intrinsics) return image1, image2, depth1, depth2, flow, valid, intrinsics
[ "cv2.imwrite", "torch.manual_seed", "numpy.ones", "torch.utils.data.get_worker_info", "os.path.join", "torch.from_numpy", "random.seed", "numpy.array", "numpy.random.seed", "csv.reader", "numpy.random.uniform", "numpy.concatenate", "numpy.pad", "cv2.imread", "torch.cat" ]
[((1925, 1982), 'numpy.pad', 'np.pad', (['disp1', '((KITTIEval.crop, 0), (0, 0))'], {'mode': '"""edge"""'}), "(disp1, ((KITTIEval.crop, 0), (0, 0)), mode='edge')\n", (1931, 1982), True, 'import numpy as np\n'), ((1996, 2053), 'numpy.pad', 'np.pad', (['disp2', '((KITTIEval.crop, 0), (0, 0))'], {'mode': '"""edge"""'}), "(disp2, ((KITTIEval.crop, 0), (0, 0)), mode='edge')\n", (2002, 2053), True, 'import numpy as np\n'), ((2068, 2132), 'numpy.pad', 'np.pad', (['flow', '((KITTIEval.crop, 0), (0, 0), (0, 0))'], {'mode': '"""edge"""'}), "(flow, ((KITTIEval.crop, 0), (0, 0), (0, 0)), mode='edge')\n", (2074, 2132), True, 'import numpy as np\n'), ((2641, 2676), 'cv2.imread', 'cv2.imread', (['self.image1_list[index]'], {}), '(self.image1_list[index])\n', (2651, 2676), False, 'import cv2\n'), ((2694, 2729), 'cv2.imread', 'cv2.imread', (['self.image2_list[index]'], {}), '(self.image2_list[index])\n', (2704, 2729), False, 'import cv2\n'), ((5361, 5396), 'cv2.imread', 'cv2.imread', (['self.image1_list[index]'], {}), '(self.image1_list[index])\n', (5371, 5396), False, 'import cv2\n'), ((5414, 5449), 'cv2.imread', 'cv2.imread', (['self.image2_list[index]'], {}), '(self.image2_list[index])\n', (5424, 5449), False, 'import cv2\n'), ((5927, 5956), 'numpy.random.uniform', 'np.random.uniform', (['(0.08)', '(0.15)'], {}), '(0.08, 0.15)\n', (5944, 5956), True, 'import numpy as np\n'), ((6955, 6983), 'torch.from_numpy', 'torch.from_numpy', (['intrinsics'], {}), '(intrinsics)\n', (6971, 6983), False, 'import torch\n'), ((7000, 7023), 'torch.from_numpy', 'torch.from_numpy', (['valid'], {}), '(valid)\n', (7016, 7023), False, 'import torch\n'), ((7039, 7061), 'torch.from_numpy', 'torch.from_numpy', (['flow'], {}), '(flow)\n', (7055, 7061), False, 'import torch\n'), ((7122, 7147), 'torch.cat', 'torch.cat', (['[flow, dz]', '(-1)'], {}), '([flow, dz], -1)\n', (7131, 7147), False, 'import torch\n'), ((1613, 1651), 'numpy.ones', 'np.ones', (['[uv.shape[0], uv.shape[1], 1]'], {}), '([uv.shape[0], uv.shape[1], 1])\n', (1620, 1651), True, 'import numpy as np\n'), ((1736, 1772), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'uv[..., ::-1]'], {}), '(filename, uv[..., ::-1])\n', (1747, 1772), False, 'import cv2\n'), ((1880, 1907), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'disp'], {}), '(filename, disp)\n', (1891, 1907), False, 'import cv2\n'), ((2747, 2805), 'cv2.imread', 'cv2.imread', (['self.disp1_ga_list[index]', 'cv2.IMREAD_ANYDEPTH'], {}), '(self.disp1_ga_list[index], cv2.IMREAD_ANYDEPTH)\n', (2757, 2805), False, 'import cv2\n'), ((2830, 2888), 'cv2.imread', 'cv2.imread', (['self.disp2_ga_list[index]', 'cv2.IMREAD_ANYDEPTH'], {}), '(self.disp2_ga_list[index], cv2.IMREAD_ANYDEPTH)\n', (2840, 2888), False, 'import cv2\n'), ((5089, 5123), 'torch.utils.data.get_worker_info', 'torch.utils.data.get_worker_info', ([], {}), '()\n', (5121, 5123), False, 'import torch\n'), ((5467, 5522), 'cv2.imread', 'cv2.imread', (['self.disp1_list[index]', 'cv2.IMREAD_ANYDEPTH'], {}), '(self.disp1_list[index], cv2.IMREAD_ANYDEPTH)\n', (5477, 5522), False, 'import cv2\n'), ((5547, 5602), 'cv2.imread', 'cv2.imread', (['self.disp2_list[index]', 'cv2.IMREAD_ANYDEPTH'], {}), '(self.disp2_list[index], cv2.IMREAD_ANYDEPTH)\n', (5557, 5602), False, 'import cv2\n'), ((5633, 5691), 'cv2.imread', 'cv2.imread', (['self.disp1_ga_list[index]', 'cv2.IMREAD_ANYDEPTH'], {}), '(self.disp1_ga_list[index], cv2.IMREAD_ANYDEPTH)\n', (5643, 5691), False, 'import cv2\n'), ((5722, 5780), 'cv2.imread', 'cv2.imread', (['self.disp2_ga_list[index]', 'cv2.IMREAD_ANYDEPTH'], {}), '(self.disp2_ga_list[index], cv2.IMREAD_ANYDEPTH)\n', (5732, 5780), False, 'import cv2\n'), ((6497, 6536), 'torch.from_numpy', 'torch.from_numpy', (['(disp1 / intrinsics[0])'], {}), '(disp1 / intrinsics[0])\n', (6513, 6536), False, 'import torch\n'), ((6561, 6600), 'torch.from_numpy', 'torch.from_numpy', (['(disp2 / intrinsics[0])'], {}), '(disp2 / intrinsics[0])\n', (6577, 6600), False, 'import torch\n'), ((6631, 6676), 'torch.from_numpy', 'torch.from_numpy', (['(disp1_dense / intrinsics[0])'], {}), '(disp1_dense / intrinsics[0])\n', (6647, 6676), False, 'import torch\n'), ((6707, 6752), 'torch.from_numpy', 'torch.from_numpy', (['(disp2_dense / intrinsics[0])'], {}), '(disp2_dense / intrinsics[0])\n', (6723, 6752), False, 'import torch\n'), ((571, 610), 'os.path.join', 'osp.join', (['root', 'mode', '"""image_2/*10.png"""'], {}), "(root, mode, 'image_2/*10.png')\n", (579, 610), True, 'import os.path as osp\n'), ((652, 691), 'os.path.join', 'osp.join', (['root', 'mode', '"""image_2/*11.png"""'], {}), "(root, mode, 'image_2/*11.png')\n", (660, 691), True, 'import os.path as osp\n'), ((936, 982), 'os.path.join', 'osp.join', (['root', 'mode', '"""calib_cam_to_cam/*.txt"""'], {}), "(root, mode, 'calib_cam_to_cam/*.txt')\n", (944, 982), True, 'import os.path as osp\n'), ((1128, 1156), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""" """'}), "(f, delimiter=' ')\n", (1138, 1156), False, 'import csv\n'), ((3220, 3243), 'torch.from_numpy', 'torch.from_numpy', (['disp1'], {}), '(disp1)\n', (3236, 3243), False, 'import torch\n'), ((3268, 3291), 'torch.from_numpy', 'torch.from_numpy', (['disp2'], {}), '(disp2)\n', (3284, 3291), False, 'import torch\n'), ((3321, 3349), 'torch.from_numpy', 'torch.from_numpy', (['intrinsics'], {}), '(intrinsics)\n', (3337, 3349), False, 'import torch\n'), ((3775, 3820), 'os.path.join', 'osp.join', (['root', '"""training"""', '"""image_2/*10.png"""'], {}), "(root, 'training', 'image_2/*10.png')\n", (3783, 3820), True, 'import os.path as osp\n'), ((3862, 3907), 'os.path.join', 'osp.join', (['root', '"""training"""', '"""image_2/*11.png"""'], {}), "(root, 'training', 'image_2/*11.png')\n", (3870, 3907), True, 'import os.path as osp\n'), ((3949, 3997), 'os.path.join', 'osp.join', (['root', '"""training"""', '"""disp_occ_0/*10.png"""'], {}), "(root, 'training', 'disp_occ_0/*10.png')\n", (3957, 3997), True, 'import os.path as osp\n'), ((4038, 4086), 'os.path.join', 'osp.join', (['root', '"""training"""', '"""disp_occ_1/*10.png"""'], {}), "(root, 'training', 'disp_occ_1/*10.png')\n", (4046, 4086), True, 'import os.path as osp\n'), ((4131, 4179), 'os.path.join', 'osp.join', (['root', '"""training"""', '"""disp_ganet/*10.png"""'], {}), "(root, 'training', 'disp_ganet/*10.png')\n", (4139, 4179), True, 'import os.path as osp\n'), ((4223, 4271), 'os.path.join', 'osp.join', (['root', '"""training"""', '"""disp_ganet/*11.png"""'], {}), "(root, 'training', 'disp_ganet/*11.png')\n", (4231, 4271), True, 'import os.path as osp\n'), ((4312, 4358), 'os.path.join', 'osp.join', (['root', '"""training"""', '"""flow_occ/*10.png"""'], {}), "(root, 'training', 'flow_occ/*10.png')\n", (4320, 4358), True, 'import os.path as osp\n'), ((4399, 4451), 'os.path.join', 'osp.join', (['root', '"""training"""', '"""calib_cam_to_cam/*.txt"""'], {}), "(root, 'training', 'calib_cam_to_cam/*.txt')\n", (4407, 4451), True, 'import os.path as osp\n'), ((4597, 4625), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""" """'}), "(f, delimiter=' ')\n", (4607, 4625), False, 'import csv\n'), ((5180, 5213), 'torch.manual_seed', 'torch.manual_seed', (['worker_info.id'], {}), '(worker_info.id)\n', (5197, 5213), False, 'import torch\n'), ((5230, 5260), 'numpy.random.seed', 'np.random.seed', (['worker_info.id'], {}), '(worker_info.id)\n', (5244, 5260), True, 'import numpy as np\n'), ((5277, 5304), 'random.seed', 'random.seed', (['worker_info.id'], {}), '(worker_info.id)\n', (5288, 5304), False, 'import random\n'), ((1669, 1705), 'numpy.concatenate', 'np.concatenate', (['[uv, valid]'], {'axis': '(-1)'}), '([uv, valid], axis=-1)\n', (1683, 1705), True, 'import numpy as np\n'), ((1342, 1388), 'numpy.array', 'np.array', (['[K[0, 0], K[1, 1], K[0, 2], K[1, 2]]'], {}), '([K[0, 0], K[1, 1], K[0, 2], K[1, 2]])\n', (1350, 1388), True, 'import numpy as np\n'), ((3091, 3115), 'torch.from_numpy', 'torch.from_numpy', (['image1'], {}), '(image1)\n', (3107, 3115), False, 'import torch\n'), ((3156, 3180), 'torch.from_numpy', 'torch.from_numpy', (['image2'], {}), '(image2)\n', (3172, 3180), False, 'import torch\n'), ((4811, 4857), 'numpy.array', 'np.array', (['[K[0, 0], K[1, 1], K[0, 2], K[1, 2]]'], {}), '([K[0, 0], K[1, 1], K[0, 2], K[1, 2]])\n', (4819, 4857), True, 'import numpy as np\n'), ((6367, 6391), 'torch.from_numpy', 'torch.from_numpy', (['image1'], {}), '(image1)\n', (6383, 6391), False, 'import torch\n'), ((6432, 6456), 'torch.from_numpy', 'torch.from_numpy', (['image2'], {}), '(image2)\n', (6448, 6456), False, 'import torch\n'), ((1262, 1297), 'numpy.array', 'np.array', (['row[1:]'], {'dtype': 'np.float32'}), '(row[1:], dtype=np.float32)\n', (1270, 1297), True, 'import numpy as np\n'), ((4731, 4766), 'numpy.array', 'np.array', (['row[1:]'], {'dtype': 'np.float32'}), '(row[1:], dtype=np.float32)\n', (4739, 4766), True, 'import numpy as np\n')]
""" Test the maximum a posteriori estimates """ import time import numpy as np from .test_model import prepare_dla_model def test_DLA_MAP(): # test 1 dla_gp = prepare_dla_model(plate=5309, mjd=55929, fiber_id=362, z_qso=3.166) tic = time.time() max_dlas = 4 log_likelihoods_dla = dla_gp.log_model_evidences(max_dlas) toc = time.time() # very time consuming: ~ 4 mins for a single spectrum without parallelized. print("spent {} mins; {} seconds".format((toc - tic) // 60, (toc - tic) % 60)) catalog_MAP_log_nhis = np.array( [ [22.28420156, np.nan, np.nan, np.nan], [20.63417494, 22.28420156, np.nan, np.nan], [20.60601572, 22.28420156, 20.63417494, np.nan], [20.12721363, 22.28420156, 20.63417494, 20.36967609], ] ) catalog_MAP_z_dlas = np.array( [ [3.03175723, np.nan, np.nan, np.nan], [2.52182382, 3.03175723, np.nan, np.nan], [2.39393537, 3.03175723, 2.52182382, np.nan], [2.94786938, 3.03175723, 2.52182382, 2.38944805], ] ) mapind = np.nanargmax(log_likelihoods_dla) MAP_z_dla, MAP_log_nhi = dla_gp.maximum_a_posteriori() nanind = np.isnan(catalog_MAP_z_dlas[mapind]) assert np.all( np.abs(MAP_z_dla[mapind][~nanind] - catalog_MAP_z_dlas[mapind][~nanind]) < 1e-1 ) assert np.all( np.abs(MAP_log_nhi[mapind][~nanind] - catalog_MAP_log_nhis[mapind][~nanind]) < 1e-1 ) # test 2 dla_gp = prepare_dla_model(plate=3816, mjd=55272, fiber_id=76, z_qso=3.68457627) tic = time.time() max_dlas = 4 log_likelihoods_dla = dla_gp.log_model_evidences(max_dlas) toc = time.time() # very time consuming: ~ 4 mins for a single spectrum without parallelized. print("spent {} mins; {} seconds".format((toc - tic) // 60, (toc - tic) % 60)) catalog_MAP_log_nhis = np.array( [ [21.05371292, np.nan, np.nan, np.nan], [20.0073665, 20.94707037, np.nan, np.nan], [20.00838815, 20.94707037, 20.0073665, np.nan], [20.20539934, 20.94707037, 20.0073665, 20.0134955], ] ) catalog_MAP_z_dlas = np.array( [ [3.42520566, np.nan, np.nan, np.nan], [2.69422714, 3.42710284, np.nan, np.nan], [3.41452521, 3.42710284, 2.69422714, np.nan], [3.43813463, 3.42710284, 2.69422714, 3.41262802], ] ) mapind = np.nanargmax(log_likelihoods_dla) MAP_z_dla, MAP_log_nhi = dla_gp.maximum_a_posteriori() nanind = np.isnan(catalog_MAP_z_dlas[mapind]) assert np.all( np.abs(MAP_z_dla[mapind][~nanind] - catalog_MAP_z_dlas[mapind][~nanind]) < 1e-1 ) assert np.all( np.abs(MAP_log_nhi[mapind][~nanind] - catalog_MAP_log_nhis[mapind][~nanind]) < 1e-1 )
[ "numpy.nanargmax", "numpy.abs", "numpy.array", "numpy.isnan", "time.time" ]
[((249, 260), 'time.time', 'time.time', ([], {}), '()\n', (258, 260), False, 'import time\n'), ((353, 364), 'time.time', 'time.time', ([], {}), '()\n', (362, 364), False, 'import time\n'), ((556, 761), 'numpy.array', 'np.array', (['[[22.28420156, np.nan, np.nan, np.nan], [20.63417494, 22.28420156, np.nan,\n np.nan], [20.60601572, 22.28420156, 20.63417494, np.nan], [20.12721363,\n 22.28420156, 20.63417494, 20.36967609]]'], {}), '([[22.28420156, np.nan, np.nan, np.nan], [20.63417494, 22.28420156,\n np.nan, np.nan], [20.60601572, 22.28420156, 20.63417494, np.nan], [\n 20.12721363, 22.28420156, 20.63417494, 20.36967609]])\n', (564, 761), True, 'import numpy as np\n'), ((852, 1048), 'numpy.array', 'np.array', (['[[3.03175723, np.nan, np.nan, np.nan], [2.52182382, 3.03175723, np.nan, np.\n nan], [2.39393537, 3.03175723, 2.52182382, np.nan], [2.94786938, \n 3.03175723, 2.52182382, 2.38944805]]'], {}), '([[3.03175723, np.nan, np.nan, np.nan], [2.52182382, 3.03175723, np\n .nan, np.nan], [2.39393537, 3.03175723, 2.52182382, np.nan], [\n 2.94786938, 3.03175723, 2.52182382, 2.38944805]])\n', (860, 1048), True, 'import numpy as np\n'), ((1126, 1159), 'numpy.nanargmax', 'np.nanargmax', (['log_likelihoods_dla'], {}), '(log_likelihoods_dla)\n', (1138, 1159), True, 'import numpy as np\n'), ((1234, 1270), 'numpy.isnan', 'np.isnan', (['catalog_MAP_z_dlas[mapind]'], {}), '(catalog_MAP_z_dlas[mapind])\n', (1242, 1270), True, 'import numpy as np\n'), ((1619, 1630), 'time.time', 'time.time', ([], {}), '()\n', (1628, 1630), False, 'import time\n'), ((1723, 1734), 'time.time', 'time.time', ([], {}), '()\n', (1732, 1734), False, 'import time\n'), ((1926, 2127), 'numpy.array', 'np.array', (['[[21.05371292, np.nan, np.nan, np.nan], [20.0073665, 20.94707037, np.nan,\n np.nan], [20.00838815, 20.94707037, 20.0073665, np.nan], [20.20539934, \n 20.94707037, 20.0073665, 20.0134955]]'], {}), '([[21.05371292, np.nan, np.nan, np.nan], [20.0073665, 20.94707037,\n np.nan, np.nan], [20.00838815, 20.94707037, 20.0073665, np.nan], [\n 20.20539934, 20.94707037, 20.0073665, 20.0134955]])\n', (1934, 2127), True, 'import numpy as np\n'), ((2218, 2414), 'numpy.array', 'np.array', (['[[3.42520566, np.nan, np.nan, np.nan], [2.69422714, 3.42710284, np.nan, np.\n nan], [3.41452521, 3.42710284, 2.69422714, np.nan], [3.43813463, \n 3.42710284, 2.69422714, 3.41262802]]'], {}), '([[3.42520566, np.nan, np.nan, np.nan], [2.69422714, 3.42710284, np\n .nan, np.nan], [3.41452521, 3.42710284, 2.69422714, np.nan], [\n 3.43813463, 3.42710284, 2.69422714, 3.41262802]])\n', (2226, 2414), True, 'import numpy as np\n'), ((2492, 2525), 'numpy.nanargmax', 'np.nanargmax', (['log_likelihoods_dla'], {}), '(log_likelihoods_dla)\n', (2504, 2525), True, 'import numpy as np\n'), ((2600, 2636), 'numpy.isnan', 'np.isnan', (['catalog_MAP_z_dlas[mapind]'], {}), '(catalog_MAP_z_dlas[mapind])\n', (2608, 2636), True, 'import numpy as np\n'), ((1298, 1370), 'numpy.abs', 'np.abs', (['(MAP_z_dla[mapind][~nanind] - catalog_MAP_z_dlas[mapind][~nanind])'], {}), '(MAP_z_dla[mapind][~nanind] - catalog_MAP_z_dlas[mapind][~nanind])\n', (1304, 1370), True, 'import numpy as np\n'), ((1411, 1487), 'numpy.abs', 'np.abs', (['(MAP_log_nhi[mapind][~nanind] - catalog_MAP_log_nhis[mapind][~nanind])'], {}), '(MAP_log_nhi[mapind][~nanind] - catalog_MAP_log_nhis[mapind][~nanind])\n', (1417, 1487), True, 'import numpy as np\n'), ((2665, 2737), 'numpy.abs', 'np.abs', (['(MAP_z_dla[mapind][~nanind] - catalog_MAP_z_dlas[mapind][~nanind])'], {}), '(MAP_z_dla[mapind][~nanind] - catalog_MAP_z_dlas[mapind][~nanind])\n', (2671, 2737), True, 'import numpy as np\n'), ((2778, 2854), 'numpy.abs', 'np.abs', (['(MAP_log_nhi[mapind][~nanind] - catalog_MAP_log_nhis[mapind][~nanind])'], {}), '(MAP_log_nhi[mapind][~nanind] - catalog_MAP_log_nhis[mapind][~nanind])\n', (2784, 2854), True, 'import numpy as np\n')]
""" """ import datetime import os # import sys import logging import numpy as np import scipy as sp import scipy.optimize # noqa import tqdm import h5py import zcode.inout as zio import zcode.math as zmath from . import spectra, radiation # , utils from . import PATH_DATA, MASS_EXTR, FEDD_EXTR, RADS_EXTR from . constants import MSOL, MELC, MPRT, SPLC, K_BLTZ, H_PLNK NUM = 10 np.seterr(divide='ignore', invalid='ignore', over='raise') # MASS_EXTR = [1e6, 5e10] # FEDD_EXTR = [1e-5, 1e-1] # RADS_EXTR = [3.0, 1e5] GRID_NAMES = ['mass', 'fedd', 'rmin', 'rmax'] ALPHA_VISC = 0.1 BETA_GP = 0.5 FRAC_ADV = 0.5 GAMMA_SH = (32 - 24*BETA_GP - 3*BETA_GP**2) / (24 - 21*BETA_GP) EPS = (5/3 - GAMMA_SH) / (GAMMA_SH - 1.0) EPS_PRIME = EPS / FRAC_ADV DELTA = MELC/MPRT GAE = np.sqrt(1.0 + 18.0 * np.square(ALPHA_VISC/(5.0 + 2*EPS_PRIME))) - 1.0 C1 = GAE * (5 + 2*EPS_PRIME) / (3 * np.square(ALPHA_VISC)) # C2 = np.sqrt(2 * EPS_PRIME * C1 / 3) C3 = 2 * C1 / 3 MEC2 = MELC * SPLC**2 S1 = 1.42e9 * np.sqrt(1 - BETA_GP) * np.sqrt(C3 / C1 / ALPHA_VISC) S3 = 1.05e-24 KB_OVER_MEC2 = K_BLTZ / MEC2 META = dict(ALPHA_VISC=ALPHA_VISC, BETA_GP=BETA_GP, FRAC_ADV=FRAC_ADV) def main(num=None, recreate=True): if num is None: num = NUM fname = grid_fname(num) exists = os.path.exists(fname) logging.warning("Grid for num={} exists: {} ({})".format(num, exists, fname)) logging.info("recreate: {}".format(recreate)) if not exists or recreate: grid, grid_names, grid_temps, grid_valid = get_temp_grid(num) save_grid(fname, grid, grid_names, grid_temps, grid_valid) return def get_interp(num=None): if num is None: num = NUM fname = grid_fname(num) grid, grid_names, grid_temps, grid_valid = load_grid(fname=fname) grid_temps[~grid_valid] = np.mean(grid_temps[grid_valid]) # mesh = np.meshgrid(*grid) # mesh = np.log10(mesh) mesh = [np.log10(gg) for gg in grid] grid_temps = np.log10(grid_temps) interp_ll = sp.interpolate.RegularGridInterpolator(mesh, grid_temps) def interp(xx): try: res = 10**interp_ll(np.log10(xx)) except ValueError: logging.error("ValueError for argument: '{}'".format(xx)) logging.error("ValueError for argument: log: '{}'".format(np.log10(xx))) for gg in interp_ll.grid: logging.error("\t{}".format(zmath.minmax(gg))) raise return res return interp def grid_fname(num): fname = "temp_grid_n{}.hdf5".format(num) fname = os.path.join(PATH_DATA, fname) return fname def save_grid(fname, grid, grid_names, grid_temps, grid_valid): fname = os.path.abspath(fname) with h5py.File(fname, 'w') as out: group = out.create_group('grid') for nn, vv in zip(grid_names, grid): group.create_dataset(nn, data=vv) group = out.create_group('parameters') for nn, vv in META.items(): group.create_dataset(nn, data=vv) out.create_dataset('temps', data=grid_temps) out.create_dataset('valid', data=grid_valid) logging.info("Saved to '{}' size '{}'".format(fname, zio.get_file_size(fname))) return def load_grid(*args, num=None, fname=None): if len(args): raise ValueError("Only passed kwargs to `load_grid()`!") if fname is None: if num is None: num = NUM fname = grid_fname(num) fname = os.path.abspath(fname) if not os.path.exists(fname): raise ValueError("fname '{}' does not exist!".format(fname)) with h5py.File(fname, 'r') as h5: grid_group = h5['grid'] # grid_names = list(grid_group.keys()) grid_names = [] grid = [] for nn in GRID_NAMES: grid.append(grid_group[nn][:]) grid_names.append(nn) grid_temps = h5['temps'][:] grid_valid = h5['valid'][:] return grid, grid_names, grid_temps, grid_valid def get_temp_grid(num, fix=True): grid_extr = [np.array(MASS_EXTR)*MSOL, FEDD_EXTR, RADS_EXTR, RADS_EXTR] grid_names = ['mass', 'fedd', 'rmin', 'rmax'] grid = [np.logspace(*np.log10(extr), num) for extr in grid_extr] shape = [num for ii in range(len(grid))] tot = np.product(shape) grid_temps = np.zeros(shape) grid_valid = np.ones(shape, dtype=bool) cnt = 0 beg = datetime.datetime.now() for idx in tqdm.tqdm(np.ndindex(*shape), total=tot): # print(idx) vals = [gg[ii] for gg, ii in zip(grid, idx)] if vals[2] >= vals[3]: grid_valid[idx] = False continue tt = solve_adaf_temp(*vals) if tt is not None: grid_temps[idx] = tt cnt += 1 end = datetime.datetime.now() dur = (end - beg) dur_per = dur.total_seconds()/cnt bads_nan = np.isnan(grid_temps) grid_temps = np.nan_to_num(grid_temps) bads = grid_valid & np.isclose(grid_temps, 0.0) logging.warning("Success on : {}".format(zmath.frac_str(grid_temps[grid_valid] > 0.0))) logging.warning("nan values: {}".format(zmath.frac_str(bads_nan))) logging.warning("Bad values: {}".format(zmath.frac_str(bads))) logging.warning("Done after {}, per iteration: {}".format(str(dur), dur_per)) if fix: grid_temps = interp_bad_grid_vals(grid, grid_temps, grid_valid) return grid, grid_names, grid_temps, grid_valid def solve_adaf_temp(mass, fedd, rmin, rmax, debug=False): msol = mass / MSOL lvl = logging.WARNING def heat_cool(temp): """Calculate heating and cooling rates for disk as a whole. """ nonlocal mass, fedd, rmin, rmax, msol alpha = ALPHA_VISC beta = BETA_GP eps_prime = EPS_PRIME delta = DELTA rmin = rmin rmax = rmax theta_e = KB_OVER_MEC2 * temp xm = spectra.xm_from_te(temp, msol, fedd) tau_es = 23.87 * fedd * (0.3 / alpha) * (0.5 / C1) * np.sqrt(3/rmin) mean_amp_a = 1.0 + 4.0 * theta_e + 16*np.square(theta_e) alpha_crit = - np.log(tau_es) / np.log(mean_amp_a) s2 = 1.19e-13 * xm # Viscous Heating # --------------- _ge = radiation._heat_func_g(theta_e) q1 = 1.2e38 * _ge * C3 * beta * msol * np.square(fedd) / np.square(alpha*C1) / rmin q2 = delta * 9.39e38 * eps_prime * C3 * msol * fedd / rmin heat_elc = q1 + q2 # Synchrotron # ----------- # Eq. 24 [Hz] f_p = S1 * s2 * np.sqrt(fedd/msol) * np.square(temp) * np.power(rmin, -1.25) lum_synch_peak = np.power(S1 * s2, 3) * S3 * np.power(rmin, -1.75) * np.sqrt(msol) lum_synch_peak *= np.power(fedd, 1.5) * np.power(temp, 7) / f_p # Eq. 26 power_synch = 5.3e35 * np.power(xm/1000, 3) * np.power(alpha/0.3, -1.5) power_synch *= np.power((1 - beta)/0.5, 1.5) * np.power(C1/0.5, -1.5) # Bremsstrahlung # -------------- # Eq. 29 power_brems = 4.78e34 * np.log(rmax/rmin) / np.square(alpha * C1) power_brems *= radiation._brems_fit_func_f(theta_e) * fedd * msol # Compton # ------- power_compt = lum_synch_peak * f_p / (1 - alpha_crit) power_compt *= (np.power(6.2e7 * (temp/1e9) / (f_p/1e12), 1 - alpha_crit) - 1.0) return heat_elc, power_synch, power_brems, power_compt def _func(logt): tt = np.power(10.0, logt) qv, qs, qb, qc = heat_cool(tt) rv = qv - (qs + qb + qc) return rv start_temps = [1e11, 1e10, 1e12, 1e9, 1e8] success = False for ii, t0 in enumerate(start_temps): try: logt = sp.optimize.newton(_func, np.log10(t0), tol=1e-4, maxiter=100) temp_e = np.power(10.0, logt) except (RuntimeError, FloatingPointError) as err: if debug: logging.warn("Trial '{}' (t={:.1e}) optimization failed: {}".format( ii, t0, str(err))) else: success = True break if success: # logging.log(lvl, "Success with `t0`={:.2e} ==> t={:.2e}".format(t0, temp_e)) pass else: err = ("Unable to find electron temperature!" "\nIf the eddington factor is larger than 1e-2, " "this may be expected!") if debug: logging.log(lvl, "FAILED to find electron temperature!") logging.log(lvl, "m = {:.2e}, f = {:.2e}".format(msol, fedd)) logging.log(lvl, err) # raise RuntimeError(err) return None qv, qs, qb, qc = heat_cool(temp_e) heat = qv cool = qs + qb + qc diff = np.fabs(heat - cool) / heat if diff < 1e-2: if debug: logging.log(lvl, "Heating vs. cooling frac-diff: {:.2e}".format(diff)) else: if debug: err = "Electron temperature seems inconsistent (Te = {:.2e})!".format(temp_e) err += "\n\tm: {:.2e}, f: {:.2e}".format(msol, fedd) err += "\n\tHeating: {:.2e}, Cooling: {:.2e}, diff: {:.4e}".format(heat, cool, diff) err += "\n\tThis may mean there is an input error (e.g. mdot may be too large... or small?)." logging.log(lvl, err) return None return temp_e def interp_bad_grid_vals(grid, grid_temps, grid_valid): grid_temps = np.copy(grid_temps) bads = grid_valid & np.isclose(grid_temps, 0.0) shape = [len(gg) for gg in grid] logging.warning("Fixing bad values: {}".format(zmath.frac_str(bads))) neighbors = [] good_neighbors = [] bads_inds = np.array(np.where(bads)).T for bad in tqdm.tqdm(bads_inds): nbs = [] # print(bad) cnt = 0 for dim in range(4): for side in [-1, +1]: test = [bb for bb in bad] test[dim] += side if test[dim] < 0 or test[dim] >= shape[dim]: continue test = tuple(test) # print("\t", test) # print("\t", temps[test]) nbs.append(test) if grid_temps[test] > 0.0: cnt += 1 neighbors.append(nbs) good_neighbors.append(cnt) num_nbs = [len(nbs) for nbs in neighbors] logging.warning("All neighbors: {}".format(zmath.stats_str(num_nbs))) logging.warning("Good neighbors: {}".format(zmath.stats_str(good_neighbors))) goods = np.zeros(len(neighbors)) MAX_TRIES = 10 still_bad = list(np.argsort(good_neighbors)[::-1]) tries = 0 while len(still_bad) > 0 and tries < MAX_TRIES: keep_bad = [] for kk, ii in enumerate(still_bad): values = np.zeros(num_nbs[ii]) for jj, nbr in enumerate(neighbors[ii]): values[jj] = grid_temps[nbr] cnt = np.count_nonzero(values) if cnt == 0: keep_bad.append(kk) continue new = np.sum(np.log10(values[values > 0])) / cnt loc = tuple(bads_inds[ii]) # print("\t", loc, new, cnt) grid_temps[loc] = 10**new goods[ii] = cnt still_bad = [still_bad[kk] for kk in keep_bad] num_still = len(still_bad) logging.warning("Try: {}, still_bad: {}".format(tries, num_still)) if (tries+1 >= MAX_TRIES) and (num_still > 0): logging.error("After {} tries, still {} bad!!".format(tries, num_still)) tries += 1 logging.warning("Filled neighbors: {}".format(zmath.stats_str(goods))) logging.warning("Full temps array: {}".format(zmath.stats_str(grid_temps[grid_valid]))) return grid_temps def plot_grid(grid, grid_names, temps, valid, interp=None): import matplotlib.pyplot as plt import zcode.plot as zplot extr = zmath.minmax(temps, filter='>') smap = zplot.colormap(extr, 'viridis') # bads = valid & np.isclose(temps, 0.0) num = len(grid) fig, axes = plt.subplots(figsize=[14, 14], nrows=num, ncols=num) plt.subplots_adjust(hspace=0.4, wspace=0.4) def_idx = [-4, -4, 4, -4] for (ii, jj), ax in np.ndenumerate(axes): if ii < jj: ax.set_visible(False) continue ax.set(xscale='log', yscale='log') xx = grid[jj] if ii == jj: # print(grid_names[ii], zmath.minmax(grid[ii], filter='>')) # idx = list(range(num)) # idx.pop(ii) # idx = tuple(idx) # vals = np.mean(temps, axis=idx) idx = [slice(None) if aa == ii else def_idx[aa] for aa in range(num)] vals = temps[tuple(idx)] ax.plot(xx, vals, 'k-') if interp is not None: num_test = 10 test = [np.ones(num_test)*grid[aa][def_idx[aa]] for aa in range(num)] test[ii] = zmath.spacing(grid[ii], 'log', num_test) test_vals = [interp(tt) for tt in np.array(test).T] ax.plot(test[ii], test_vals, 'r--') # bad_vals = np.count_nonzero(bads, axis=idx) # tw = ax.twinx() # tw.plot(xx, bad_vals, 'r--') else: # print(ii, jj) # print("\t", ii, grid_names[ii], zmath.minmax(grid[ii], filter='>')) # print("\t", jj, grid_names[jj], zmath.minmax(grid[jj], filter='>')) # idx = [0, 1, 2, 3] # idx.pop(np.max([ii, jj])) # idx.pop(np.min([ii, jj])) # vals = np.mean(temps, axis=tuple(idx)) # idx = [slice(None) if aa in [ii, jj] else num//2 for aa in range(num)] idx = [slice(None) if aa in [ii, jj] else def_idx[aa] for aa in range(num)] vals = temps[tuple(idx)] if len(vals) == 0: continue yy = grid[ii] xx, yy = np.meshgrid(xx, yy, indexing='ij') ax.pcolor(xx, yy, vals, cmap=smap.cmap, norm=smap.norm) if np.count_nonzero(vals > 0.0) == 0: continue tit = "{:.1e}, {:.1e}".format(*zmath.minmax(vals, filter='>')) ax.set_title(tit, size=10) # bad_vals = np.count_nonzero(bads, axis=tuple(idx)) # idx = (bad_vals > 0.0) # aa = xx[idx] # bb = yy[idx] # cc = bad_vals[idx] # ax.scatter(aa, bb, s=2*cc**2, color='0.5', alpha=0.5) # ax.scatter(aa, bb, s=cc**2, color='r') if interp is not None: for kk in range(10): idx = (vals > 0.0) x0 = 10**np.random.uniform(*zmath.minmax(np.log10(xx[idx]))) y0 = 10**np.random.uniform(*zmath.minmax(np.log10(yy[idx]))) # y0 = np.random.choice(yy[idx]) temp = [grid[ll][def_idx[ll]] for ll in range(num)] temp[ii] = y0 temp[jj] = x0 if temp[2] >= temp[3]: temp[2] = 3.1 iv = interp(temp) if not np.isfinite(iv) or np.isclose(iv, 0.0): print("\nBAD") print(temp) print(iv) for kk in range(num): if def_idx[kk] == 0: temp[kk] = temp[kk] * 1.11 elif def_idx[kk] == -1: temp[kk] = 0.99 * temp[kk] iv = interp(temp) print("\t", temp) print("\t", iv) cc = smap.to_rgba(iv) ss = 20 ax.scatter(temp[jj], temp[ii], color='0.5', s=2*ss) ax.scatter(temp[jj], temp[ii], color=cc, s=ss) if ii == num-1: ax.set_xlabel(grid_names[jj]) if jj == 0 and ii != 0: ax.set_ylabel(grid_names[ii]) return fig class Fast_Mahadevan96: def __init__(self, mass, fedd, rmin, rmax, temp_e=None, interp=None): """ """ self.mass = mass # Mass in units of solar=masses self.msol = mass/MSOL self.fedd = fedd self.rmin = rmin self.rmax = rmax if temp_e is None: if interp is None: interp = get_interp() temp_e = interp([mass, fedd, rmin, rmax]) self.temp_e = temp_e xm_e = spectra.xm_from_te(temp_e, self.msol, fedd) self.s2 = 1.19e-13 * xm_e theta_e = radiation.dimensionless_temperature_theta(temp_e, MELC) # Eq. 31 tau_es = 23.87 * fedd * (0.3 / ALPHA_VISC) * (0.5 / C1) * np.sqrt(3/rmin) # Eq. 32 mean_amp_a = 1.0 + 4.0 * theta_e + 16*np.square(theta_e) # Eq. 34 self.alpha_crit = - np.log(tau_es) / np.log(mean_amp_a) return def spectrum(self, freqs): synch = self._calc_spectrum_synch(freqs) brems = self._calc_spectrum_brems(freqs) compt = self._calc_spectrum_compt(freqs) spectrum = synch + brems + compt return spectrum def _calc_spectrum_synch(self, freqs): """Mahadevan 1996 - Eq. 25 Cutoff above peak frequency (i.e. ignore exponential portion). Ignore low-frequency transition to steeper (22/13 slope) from rmax. """ msol = self.msol fedd = self.fedd scalar = np.isscalar(freqs) freqs = np.atleast_1d(freqs) lnu = S3 * np.power(S1*self.s2, 1.6) lnu *= np.power(msol, 1.2) * np.power(fedd, 0.8) lnu *= np.power(self.temp_e, 4.2) * np.power(freqs, 0.4) nu_p = self._freq_synch_peak(self.temp_e, msol, fedd) lnu[freqs > nu_p] = 0.0 if scalar: lnu = np.squeeze(lnu) return lnu def _calc_spectrum_brems(self, freqs): """Mahadevan 1996 - Eq. 30 """ msol = self.msol fedd = self.fedd temp = self.temp_e const = 2.29e24 # erg/s/Hz scalar = np.isscalar(freqs) freqs = np.atleast_1d(freqs) t1 = np.log(self.rmax/self.rmin) / np.square(ALPHA_VISC * C1) t2 = np.exp(-H_PLNK*freqs / (K_BLTZ * temp)) * msol * np.square(fedd) / temp fe = radiation._brems_fit_func_f(temp) lbrems = const * t1 * fe * t2 if scalar: lbrems = np.squeeze(lbrems) return lbrems def _calc_spectrum_compt(self, freqs): """Compton Scattering spectrum from upscattering of Synchrotron photons. Mahadevan 1996 - Eq. 38 """ fedd = self.fedd temp = self.temp_e scalar = np.isscalar(freqs) freqs = np.atleast_1d(freqs) f_p, l_p = self._synch_peak(fedd, self.msol, temp) lsp = np.power(freqs/f_p, -self.alpha_crit) * l_p lsp[freqs < f_p] = 0.0 # See Eq. 35 max_freq = 3*K_BLTZ*temp/H_PLNK lsp[freqs > max_freq] = 0.0 if scalar: lsp = np.squeeze(lsp) return lsp def _freq_synch_peak(self, temp, msol, fedd): """Mahadevan 1996 Eq. 24 """ nu_p = S1 * self.s2 * np.sqrt(fedd/msol) * np.square(temp) * np.power(self.rmin, -1.25) return nu_p def _synch_peak(self, fedd, msol, temp): f_p = self._freq_synch_peak(temp, msol, fedd) l_p = np.power(S1 * self.s2, 3) * S3 * np.power(self.rmin, -1.75) * np.sqrt(msol) l_p *= np.power(fedd, 1.5) * np.power(temp, 7) / f_p return f_p, l_p class Fast_Mahadevan96_Array: def __init__(self, mass, fedd, rmin, rmax, temp_e=None, interp=None): """ """ self.mass = mass # Mass in units of solar=masses self.msol = mass/MSOL self.fedd = fedd self.rmin = rmin self.rmax = rmax if temp_e is None: if interp is None: interp = get_interp() args = [mass, fedd, rmin, rmax] shp = np.shape(args[0]) if not np.all([shp == np.shape(aa) for aa in args]): all_shps = [np.shape(aa) for aa in args] print("all shapes = ", all_shps) raise ValueError("Shape mismatch!") args = [aa.flatten() for aa in args] args = np.array(args).T temp_e = interp(args) temp_e = temp_e.reshape(shp) assert np.shape(temp_e) == np.shape(mass), "Output shape mismatch!" self.temp_e = temp_e xm_e = spectra.xm_from_te(temp_e, self.msol, fedd) self.s2 = 1.19e-13 * xm_e theta_e = radiation.dimensionless_temperature_theta(temp_e, MELC) # Eq. 31 tau_es = 23.87 * fedd * (0.3 / ALPHA_VISC) * (0.5 / C1) * np.sqrt(3/rmin) # Eq. 32 mean_amp_a = 1.0 + 4.0 * theta_e + 16*np.square(theta_e) # Eq. 34 self.alpha_crit = - np.log(tau_es) / np.log(mean_amp_a) return def spectrum(self, freqs): synch = self._calc_spectrum_synch(freqs) brems = self._calc_spectrum_brems(freqs) compt = self._calc_spectrum_compt(freqs) spectrum = synch + brems + compt return spectrum def _calc_spectrum_synch(self, freqs): """Mahadevan 1996 - Eq. 25 Cutoff above peak frequency (i.e. ignore exponential portion). Ignore low-frequency transition to steeper (22/13 slope) from rmax. """ msol = self.msol fedd = self.fedd scalar = np.isscalar(freqs) freqs = np.atleast_1d(freqs) lnu = S3 * np.power(S1*self.s2, 1.6) # lnu *= np.power(msol, 1.2) * np.power(fedd, 0.8) # lnu *= np.power(self.temp_e, 4.2) * np.power(freqs, 0.4) lnu = lnu * np.power(msol, 1.2) * np.power(fedd, 0.8) lnu = lnu * np.power(self.temp_e, 4.2) * np.power(freqs, 0.4) nu_p = self._freq_synch_peak(self.temp_e, msol, fedd) lnu[freqs > nu_p] = 0.0 if scalar: lnu = np.squeeze(lnu) return lnu def _calc_spectrum_brems(self, freqs): """Mahadevan 1996 - Eq. 30 """ msol = self.msol fedd = self.fedd temp = self.temp_e const = 2.29e24 # erg/s/Hz scalar = np.isscalar(freqs) freqs = np.atleast_1d(freqs) t1 = np.log(self.rmax/self.rmin) / np.square(ALPHA_VISC * C1) t2 = np.exp(-H_PLNK*freqs / (K_BLTZ * temp)) * msol * np.square(fedd) / temp fe = radiation._brems_fit_func_f(temp) lbrems = const * t1 * fe * t2 if scalar: lbrems = np.squeeze(lbrems) return lbrems def _calc_spectrum_compt(self, freqs): """Compton Scattering spectrum from upscattering of Synchrotron photons. Mahadevan 1996 - Eq. 38 """ fedd = self.fedd temp = self.temp_e scalar = np.isscalar(freqs) freqs = np.atleast_1d(freqs) f_p, l_p = self._synch_peak(fedd, self.msol, temp) lsp = np.power(freqs/f_p, -self.alpha_crit) * l_p lsp[freqs < f_p] = 0.0 # See Eq. 35 max_freq = 3*K_BLTZ*temp/H_PLNK lsp[freqs > max_freq] = 0.0 if scalar: lsp = np.squeeze(lsp) return lsp def _freq_synch_peak(self, temp, msol, fedd): """Mahadevan 1996 Eq. 24 """ nu_p = S1 * self.s2 * np.sqrt(fedd/msol) * np.square(temp) * np.power(self.rmin, -1.25) return nu_p def _synch_peak(self, fedd, msol, temp): f_p = self._freq_synch_peak(temp, msol, fedd) l_p = np.power(S1 * self.s2, 3) * S3 * np.power(self.rmin, -1.75) * np.sqrt(msol) l_p *= np.power(fedd, 1.5) * np.power(temp, 7) / f_p return f_p, l_p if __name__ == "__main__": main()
[ "numpy.product", "zcode.plot.colormap", "numpy.log10", "numpy.sqrt", "numpy.log", "zcode.math.stats_str", "numpy.argsort", "numpy.array", "logging.log", "numpy.count_nonzero", "numpy.isfinite", "zcode.inout.get_file_size", "os.path.exists", "numpy.mean", "numpy.isscalar", "scipy.interpolate.RegularGridInterpolator", "numpy.where", "numpy.ndenumerate", "numpy.exp", "zcode.math.minmax", "numpy.meshgrid", "numpy.ones", "zcode.math.spacing", "numpy.square", "h5py.File", "numpy.squeeze", "numpy.isnan", "numpy.shape", "matplotlib.pyplot.subplots_adjust", "numpy.atleast_1d", "numpy.copy", "numpy.fabs", "numpy.isclose", "numpy.power", "zcode.math.frac_str", "tqdm.tqdm", "os.path.join", "numpy.ndindex", "datetime.datetime.now", "numpy.zeros", "os.path.abspath", "numpy.seterr", "matplotlib.pyplot.subplots", "numpy.nan_to_num" ]
[((384, 442), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""', 'over': '"""raise"""'}), "(divide='ignore', invalid='ignore', over='raise')\n", (393, 442), True, 'import numpy as np\n'), ((1018, 1047), 'numpy.sqrt', 'np.sqrt', (['(C3 / C1 / ALPHA_VISC)'], {}), '(C3 / C1 / ALPHA_VISC)\n', (1025, 1047), True, 'import numpy as np\n'), ((1281, 1302), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (1295, 1302), False, 'import os\n'), ((1810, 1841), 'numpy.mean', 'np.mean', (['grid_temps[grid_valid]'], {}), '(grid_temps[grid_valid])\n', (1817, 1841), True, 'import numpy as np\n'), ((1961, 1981), 'numpy.log10', 'np.log10', (['grid_temps'], {}), '(grid_temps)\n', (1969, 1981), True, 'import numpy as np\n'), ((1998, 2054), 'scipy.interpolate.RegularGridInterpolator', 'sp.interpolate.RegularGridInterpolator', (['mesh', 'grid_temps'], {}), '(mesh, grid_temps)\n', (2036, 2054), True, 'import scipy as sp\n'), ((2554, 2584), 'os.path.join', 'os.path.join', (['PATH_DATA', 'fname'], {}), '(PATH_DATA, fname)\n', (2566, 2584), False, 'import os\n'), ((2680, 2702), 'os.path.abspath', 'os.path.abspath', (['fname'], {}), '(fname)\n', (2695, 2702), False, 'import os\n'), ((3449, 3471), 'os.path.abspath', 'os.path.abspath', (['fname'], {}), '(fname)\n', (3464, 3471), False, 'import os\n'), ((4253, 4270), 'numpy.product', 'np.product', (['shape'], {}), '(shape)\n', (4263, 4270), True, 'import numpy as np\n'), ((4288, 4303), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (4296, 4303), True, 'import numpy as np\n'), ((4321, 4347), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'bool'}), '(shape, dtype=bool)\n', (4328, 4347), True, 'import numpy as np\n'), ((4371, 4394), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4392, 4394), False, 'import datetime\n'), ((4738, 4761), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4759, 4761), False, 'import datetime\n'), ((4837, 4857), 'numpy.isnan', 'np.isnan', (['grid_temps'], {}), '(grid_temps)\n', (4845, 4857), True, 'import numpy as np\n'), ((4875, 4900), 'numpy.nan_to_num', 'np.nan_to_num', (['grid_temps'], {}), '(grid_temps)\n', (4888, 4900), True, 'import numpy as np\n'), ((9339, 9358), 'numpy.copy', 'np.copy', (['grid_temps'], {}), '(grid_temps)\n', (9346, 9358), True, 'import numpy as np\n'), ((9624, 9644), 'tqdm.tqdm', 'tqdm.tqdm', (['bads_inds'], {}), '(bads_inds)\n', (9633, 9644), False, 'import tqdm\n'), ((11798, 11829), 'zcode.math.minmax', 'zmath.minmax', (['temps'], {'filter': '""">"""'}), "(temps, filter='>')\n", (11810, 11829), True, 'import zcode.math as zmath\n'), ((11841, 11872), 'zcode.plot.colormap', 'zplot.colormap', (['extr', '"""viridis"""'], {}), "(extr, 'viridis')\n", (11855, 11872), True, 'import zcode.plot as zplot\n'), ((11955, 12007), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '[14, 14]', 'nrows': 'num', 'ncols': 'num'}), '(figsize=[14, 14], nrows=num, ncols=num)\n', (11967, 12007), True, 'import matplotlib.pyplot as plt\n'), ((12012, 12055), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.4)', 'wspace': '(0.4)'}), '(hspace=0.4, wspace=0.4)\n', (12031, 12055), True, 'import matplotlib.pyplot as plt\n'), ((12112, 12132), 'numpy.ndenumerate', 'np.ndenumerate', (['axes'], {}), '(axes)\n', (12126, 12132), True, 'import numpy as np\n'), ((881, 902), 'numpy.square', 'np.square', (['ALPHA_VISC'], {}), '(ALPHA_VISC)\n', (890, 902), True, 'import numpy as np\n'), ((995, 1015), 'numpy.sqrt', 'np.sqrt', (['(1 - BETA_GP)'], {}), '(1 - BETA_GP)\n', (1002, 1015), True, 'import numpy as np\n'), ((1915, 1927), 'numpy.log10', 'np.log10', (['gg'], {}), '(gg)\n', (1923, 1927), True, 'import numpy as np\n'), ((2712, 2733), 'h5py.File', 'h5py.File', (['fname', '"""w"""'], {}), "(fname, 'w')\n", (2721, 2733), False, 'import h5py\n'), ((3483, 3504), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (3497, 3504), False, 'import os\n'), ((3584, 3605), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (3593, 3605), False, 'import h5py\n'), ((4420, 4438), 'numpy.ndindex', 'np.ndindex', (['*shape'], {}), '(*shape)\n', (4430, 4438), True, 'import numpy as np\n'), ((4925, 4952), 'numpy.isclose', 'np.isclose', (['grid_temps', '(0.0)'], {}), '(grid_temps, 0.0)\n', (4935, 4952), True, 'import numpy as np\n'), ((7410, 7430), 'numpy.power', 'np.power', (['(10.0)', 'logt'], {}), '(10.0, logt)\n', (7418, 7430), True, 'import numpy as np\n'), ((8655, 8675), 'numpy.fabs', 'np.fabs', (['(heat - cool)'], {}), '(heat - cool)\n', (8662, 8675), True, 'import numpy as np\n'), ((9383, 9410), 'numpy.isclose', 'np.isclose', (['grid_temps', '(0.0)'], {}), '(grid_temps, 0.0)\n', (9393, 9410), True, 'import numpy as np\n'), ((17426, 17444), 'numpy.isscalar', 'np.isscalar', (['freqs'], {}), '(freqs)\n', (17437, 17444), True, 'import numpy as np\n'), ((17461, 17481), 'numpy.atleast_1d', 'np.atleast_1d', (['freqs'], {}), '(freqs)\n', (17474, 17481), True, 'import numpy as np\n'), ((18041, 18059), 'numpy.isscalar', 'np.isscalar', (['freqs'], {}), '(freqs)\n', (18052, 18059), True, 'import numpy as np\n'), ((18076, 18096), 'numpy.atleast_1d', 'np.atleast_1d', (['freqs'], {}), '(freqs)\n', (18089, 18096), True, 'import numpy as np\n'), ((18660, 18678), 'numpy.isscalar', 'np.isscalar', (['freqs'], {}), '(freqs)\n', (18671, 18678), True, 'import numpy as np\n'), ((18695, 18715), 'numpy.atleast_1d', 'np.atleast_1d', (['freqs'], {}), '(freqs)\n', (18708, 18715), True, 'import numpy as np\n'), ((21493, 21511), 'numpy.isscalar', 'np.isscalar', (['freqs'], {}), '(freqs)\n', (21504, 21511), True, 'import numpy as np\n'), ((21528, 21548), 'numpy.atleast_1d', 'np.atleast_1d', (['freqs'], {}), '(freqs)\n', (21541, 21548), True, 'import numpy as np\n'), ((22244, 22262), 'numpy.isscalar', 'np.isscalar', (['freqs'], {}), '(freqs)\n', (22255, 22262), True, 'import numpy as np\n'), ((22279, 22299), 'numpy.atleast_1d', 'np.atleast_1d', (['freqs'], {}), '(freqs)\n', (22292, 22299), True, 'import numpy as np\n'), ((22863, 22881), 'numpy.isscalar', 'np.isscalar', (['freqs'], {}), '(freqs)\n', (22874, 22881), True, 'import numpy as np\n'), ((22898, 22918), 'numpy.atleast_1d', 'np.atleast_1d', (['freqs'], {}), '(freqs)\n', (22911, 22918), True, 'import numpy as np\n'), ((3169, 3193), 'zcode.inout.get_file_size', 'zio.get_file_size', (['fname'], {}), '(fname)\n', (3186, 3193), True, 'import zcode.inout as zio\n'), ((4020, 4039), 'numpy.array', 'np.array', (['MASS_EXTR'], {}), '(MASS_EXTR)\n', (4028, 4039), True, 'import numpy as np\n'), ((4999, 5043), 'zcode.math.frac_str', 'zmath.frac_str', (['(grid_temps[grid_valid] > 0.0)'], {}), '(grid_temps[grid_valid] > 0.0)\n', (5013, 5043), True, 'import zcode.math as zmath\n'), ((5090, 5114), 'zcode.math.frac_str', 'zmath.frac_str', (['bads_nan'], {}), '(bads_nan)\n', (5104, 5114), True, 'import zcode.math as zmath\n'), ((5161, 5181), 'zcode.math.frac_str', 'zmath.frac_str', (['bads'], {}), '(bads)\n', (5175, 5181), True, 'import zcode.math as zmath\n'), ((5962, 5979), 'numpy.sqrt', 'np.sqrt', (['(3 / rmin)'], {}), '(3 / rmin)\n', (5969, 5979), True, 'import numpy as np\n'), ((6083, 6101), 'numpy.log', 'np.log', (['mean_amp_a'], {}), '(mean_amp_a)\n', (6089, 6101), True, 'import numpy as np\n'), ((6546, 6567), 'numpy.power', 'np.power', (['rmin', '(-1.25)'], {}), '(rmin, -1.25)\n', (6554, 6567), True, 'import numpy as np\n'), ((6645, 6658), 'numpy.sqrt', 'np.sqrt', (['msol'], {}), '(msol)\n', (6652, 6658), True, 'import numpy as np\n'), ((6803, 6830), 'numpy.power', 'np.power', (['(alpha / 0.3)', '(-1.5)'], {}), '(alpha / 0.3, -1.5)\n', (6811, 6830), True, 'import numpy as np\n'), ((6852, 6883), 'numpy.power', 'np.power', (['((1 - beta) / 0.5)', '(1.5)'], {}), '((1 - beta) / 0.5, 1.5)\n', (6860, 6883), True, 'import numpy as np\n'), ((6884, 6908), 'numpy.power', 'np.power', (['(C1 / 0.5)', '(-1.5)'], {}), '(C1 / 0.5, -1.5)\n', (6892, 6908), True, 'import numpy as np\n'), ((7027, 7048), 'numpy.square', 'np.square', (['(alpha * C1)'], {}), '(alpha * C1)\n', (7036, 7048), True, 'import numpy as np\n'), ((7246, 7336), 'numpy.power', 'np.power', (['(62000000.0 * (temp / 1000000000.0) / (f_p / 1000000000000.0))', '(1 - alpha_crit)'], {}), '(62000000.0 * (temp / 1000000000.0) / (f_p / 1000000000000.0), 1 -\n alpha_crit)\n', (7254, 7336), True, 'import numpy as np\n'), ((7747, 7767), 'numpy.power', 'np.power', (['(10.0)', 'logt'], {}), '(10.0, logt)\n', (7755, 7767), True, 'import numpy as np\n'), ((8347, 8403), 'logging.log', 'logging.log', (['lvl', '"""FAILED to find electron temperature!"""'], {}), "(lvl, 'FAILED to find electron temperature!')\n", (8358, 8403), False, 'import logging\n'), ((8490, 8511), 'logging.log', 'logging.log', (['lvl', 'err'], {}), '(lvl, err)\n', (8501, 8511), False, 'import logging\n'), ((9203, 9224), 'logging.log', 'logging.log', (['lvl', 'err'], {}), '(lvl, err)\n', (9214, 9224), False, 'import logging\n'), ((9499, 9519), 'zcode.math.frac_str', 'zmath.frac_str', (['bads'], {}), '(bads)\n', (9513, 9519), True, 'import zcode.math as zmath\n'), ((9591, 9605), 'numpy.where', 'np.where', (['bads'], {}), '(bads)\n', (9599, 9605), True, 'import numpy as np\n'), ((10309, 10333), 'zcode.math.stats_str', 'zmath.stats_str', (['num_nbs'], {}), '(num_nbs)\n', (10324, 10333), True, 'import zcode.math as zmath\n'), ((10384, 10415), 'zcode.math.stats_str', 'zmath.stats_str', (['good_neighbors'], {}), '(good_neighbors)\n', (10399, 10415), True, 'import zcode.math as zmath\n'), ((10496, 10522), 'numpy.argsort', 'np.argsort', (['good_neighbors'], {}), '(good_neighbors)\n', (10506, 10522), True, 'import numpy as np\n'), ((10683, 10704), 'numpy.zeros', 'np.zeros', (['num_nbs[ii]'], {}), '(num_nbs[ii])\n', (10691, 10704), True, 'import numpy as np\n'), ((10822, 10846), 'numpy.count_nonzero', 'np.count_nonzero', (['values'], {}), '(values)\n', (10838, 10846), True, 'import numpy as np\n'), ((11518, 11540), 'zcode.math.stats_str', 'zmath.stats_str', (['goods'], {}), '(goods)\n', (11533, 11540), True, 'import zcode.math as zmath\n'), ((11593, 11632), 'zcode.math.stats_str', 'zmath.stats_str', (['grid_temps[grid_valid]'], {}), '(grid_temps[grid_valid])\n', (11608, 11632), True, 'import zcode.math as zmath\n'), ((13824, 13858), 'numpy.meshgrid', 'np.meshgrid', (['xx', 'yy'], {'indexing': '"""ij"""'}), "(xx, yy, indexing='ij')\n", (13835, 13858), True, 'import numpy as np\n'), ((16680, 16697), 'numpy.sqrt', 'np.sqrt', (['(3 / rmin)'], {}), '(3 / rmin)\n', (16687, 16697), True, 'import numpy as np\n'), ((16840, 16858), 'numpy.log', 'np.log', (['mean_amp_a'], {}), '(mean_amp_a)\n', (16846, 16858), True, 'import numpy as np\n'), ((17502, 17529), 'numpy.power', 'np.power', (['(S1 * self.s2)', '(1.6)'], {}), '(S1 * self.s2, 1.6)\n', (17510, 17529), True, 'import numpy as np\n'), ((17543, 17562), 'numpy.power', 'np.power', (['msol', '(1.2)'], {}), '(msol, 1.2)\n', (17551, 17562), True, 'import numpy as np\n'), ((17565, 17584), 'numpy.power', 'np.power', (['fedd', '(0.8)'], {}), '(fedd, 0.8)\n', (17573, 17584), True, 'import numpy as np\n'), ((17600, 17626), 'numpy.power', 'np.power', (['self.temp_e', '(4.2)'], {}), '(self.temp_e, 4.2)\n', (17608, 17626), True, 'import numpy as np\n'), ((17629, 17649), 'numpy.power', 'np.power', (['freqs', '(0.4)'], {}), '(freqs, 0.4)\n', (17637, 17649), True, 'import numpy as np\n'), ((17782, 17797), 'numpy.squeeze', 'np.squeeze', (['lnu'], {}), '(lnu)\n', (17792, 17797), True, 'import numpy as np\n'), ((18111, 18140), 'numpy.log', 'np.log', (['(self.rmax / self.rmin)'], {}), '(self.rmax / self.rmin)\n', (18117, 18140), True, 'import numpy as np\n'), ((18141, 18167), 'numpy.square', 'np.square', (['(ALPHA_VISC * C1)'], {}), '(ALPHA_VISC * C1)\n', (18150, 18167), True, 'import numpy as np\n'), ((18378, 18396), 'numpy.squeeze', 'np.squeeze', (['lbrems'], {}), '(lbrems)\n', (18388, 18396), True, 'import numpy as np\n'), ((18790, 18829), 'numpy.power', 'np.power', (['(freqs / f_p)', '(-self.alpha_crit)'], {}), '(freqs / f_p, -self.alpha_crit)\n', (18798, 18829), True, 'import numpy as np\n'), ((19000, 19015), 'numpy.squeeze', 'np.squeeze', (['lsp'], {}), '(lsp)\n', (19010, 19015), True, 'import numpy as np\n'), ((19201, 19227), 'numpy.power', 'np.power', (['self.rmin', '(-1.25)'], {}), '(self.rmin, -1.25)\n', (19209, 19227), True, 'import numpy as np\n'), ((19424, 19437), 'numpy.sqrt', 'np.sqrt', (['msol'], {}), '(msol)\n', (19431, 19437), True, 'import numpy as np\n'), ((19985, 20002), 'numpy.shape', 'np.shape', (['args[0]'], {}), '(args[0])\n', (19993, 20002), True, 'import numpy as np\n'), ((20747, 20764), 'numpy.sqrt', 'np.sqrt', (['(3 / rmin)'], {}), '(3 / rmin)\n', (20754, 20764), True, 'import numpy as np\n'), ((20907, 20925), 'numpy.log', 'np.log', (['mean_amp_a'], {}), '(mean_amp_a)\n', (20913, 20925), True, 'import numpy as np\n'), ((21569, 21596), 'numpy.power', 'np.power', (['(S1 * self.s2)', '(1.6)'], {}), '(S1 * self.s2, 1.6)\n', (21577, 21596), True, 'import numpy as np\n'), ((21763, 21782), 'numpy.power', 'np.power', (['fedd', '(0.8)'], {}), '(fedd, 0.8)\n', (21771, 21782), True, 'import numpy as np\n'), ((21832, 21852), 'numpy.power', 'np.power', (['freqs', '(0.4)'], {}), '(freqs, 0.4)\n', (21840, 21852), True, 'import numpy as np\n'), ((21985, 22000), 'numpy.squeeze', 'np.squeeze', (['lnu'], {}), '(lnu)\n', (21995, 22000), True, 'import numpy as np\n'), ((22314, 22343), 'numpy.log', 'np.log', (['(self.rmax / self.rmin)'], {}), '(self.rmax / self.rmin)\n', (22320, 22343), True, 'import numpy as np\n'), ((22344, 22370), 'numpy.square', 'np.square', (['(ALPHA_VISC * C1)'], {}), '(ALPHA_VISC * C1)\n', (22353, 22370), True, 'import numpy as np\n'), ((22581, 22599), 'numpy.squeeze', 'np.squeeze', (['lbrems'], {}), '(lbrems)\n', (22591, 22599), True, 'import numpy as np\n'), ((22993, 23032), 'numpy.power', 'np.power', (['(freqs / f_p)', '(-self.alpha_crit)'], {}), '(freqs / f_p, -self.alpha_crit)\n', (23001, 23032), True, 'import numpy as np\n'), ((23203, 23218), 'numpy.squeeze', 'np.squeeze', (['lsp'], {}), '(lsp)\n', (23213, 23218), True, 'import numpy as np\n'), ((23404, 23430), 'numpy.power', 'np.power', (['self.rmin', '(-1.25)'], {}), '(self.rmin, -1.25)\n', (23412, 23430), True, 'import numpy as np\n'), ((23627, 23640), 'numpy.sqrt', 'np.sqrt', (['msol'], {}), '(msol)\n', (23634, 23640), True, 'import numpy as np\n'), ((796, 841), 'numpy.square', 'np.square', (['(ALPHA_VISC / (5.0 + 2 * EPS_PRIME))'], {}), '(ALPHA_VISC / (5.0 + 2 * EPS_PRIME))\n', (805, 841), True, 'import numpy as np\n'), ((4154, 4168), 'numpy.log10', 'np.log10', (['extr'], {}), '(extr)\n', (4162, 4168), True, 'import numpy as np\n'), ((6024, 6042), 'numpy.square', 'np.square', (['theta_e'], {}), '(theta_e)\n', (6033, 6042), True, 'import numpy as np\n'), ((6066, 6080), 'numpy.log', 'np.log', (['tau_es'], {}), '(tau_es)\n', (6072, 6080), True, 'import numpy as np\n'), ((6294, 6315), 'numpy.square', 'np.square', (['(alpha * C1)'], {}), '(alpha * C1)\n', (6303, 6315), True, 'import numpy as np\n'), ((6528, 6543), 'numpy.square', 'np.square', (['temp'], {}), '(temp)\n', (6537, 6543), True, 'import numpy as np\n'), ((6621, 6642), 'numpy.power', 'np.power', (['rmin', '(-1.75)'], {}), '(rmin, -1.75)\n', (6629, 6642), True, 'import numpy as np\n'), ((6685, 6704), 'numpy.power', 'np.power', (['fedd', '(1.5)'], {}), '(fedd, 1.5)\n', (6693, 6704), True, 'import numpy as np\n'), ((6707, 6724), 'numpy.power', 'np.power', (['temp', '(7)'], {}), '(temp, 7)\n', (6715, 6724), True, 'import numpy as np\n'), ((6780, 6802), 'numpy.power', 'np.power', (['(xm / 1000)', '(3)'], {}), '(xm / 1000, 3)\n', (6788, 6802), True, 'import numpy as np\n'), ((7007, 7026), 'numpy.log', 'np.log', (['(rmax / rmin)'], {}), '(rmax / rmin)\n', (7013, 7026), True, 'import numpy as np\n'), ((7689, 7701), 'numpy.log10', 'np.log10', (['t0'], {}), '(t0)\n', (7697, 7701), True, 'import numpy as np\n'), ((12843, 12883), 'zcode.math.spacing', 'zmath.spacing', (['grid[ii]', '"""log"""', 'num_test'], {}), "(grid[ii], 'log', num_test)\n", (12856, 12883), True, 'import zcode.math as zmath\n'), ((13943, 13971), 'numpy.count_nonzero', 'np.count_nonzero', (['(vals > 0.0)'], {}), '(vals > 0.0)\n', (13959, 13971), True, 'import numpy as np\n'), ((16759, 16777), 'numpy.square', 'np.square', (['theta_e'], {}), '(theta_e)\n', (16768, 16777), True, 'import numpy as np\n'), ((16823, 16837), 'numpy.log', 'np.log', (['tau_es'], {}), '(tau_es)\n', (16829, 16837), True, 'import numpy as np\n'), ((18230, 18245), 'numpy.square', 'np.square', (['fedd'], {}), '(fedd)\n', (18239, 18245), True, 'import numpy as np\n'), ((19183, 19198), 'numpy.square', 'np.square', (['temp'], {}), '(temp)\n', (19192, 19198), True, 'import numpy as np\n'), ((19395, 19421), 'numpy.power', 'np.power', (['self.rmin', '(-1.75)'], {}), '(self.rmin, -1.75)\n', (19403, 19421), True, 'import numpy as np\n'), ((19453, 19472), 'numpy.power', 'np.power', (['fedd', '(1.5)'], {}), '(fedd, 1.5)\n', (19461, 19472), True, 'import numpy as np\n'), ((19475, 19492), 'numpy.power', 'np.power', (['temp', '(7)'], {}), '(temp, 7)\n', (19483, 19492), True, 'import numpy as np\n'), ((20294, 20308), 'numpy.array', 'np.array', (['args'], {}), '(args)\n', (20302, 20308), True, 'import numpy as np\n'), ((20405, 20421), 'numpy.shape', 'np.shape', (['temp_e'], {}), '(temp_e)\n', (20413, 20421), True, 'import numpy as np\n'), ((20425, 20439), 'numpy.shape', 'np.shape', (['mass'], {}), '(mass)\n', (20433, 20439), True, 'import numpy as np\n'), ((20826, 20844), 'numpy.square', 'np.square', (['theta_e'], {}), '(theta_e)\n', (20835, 20844), True, 'import numpy as np\n'), ((20890, 20904), 'numpy.log', 'np.log', (['tau_es'], {}), '(tau_es)\n', (20896, 20904), True, 'import numpy as np\n'), ((21741, 21760), 'numpy.power', 'np.power', (['msol', '(1.2)'], {}), '(msol, 1.2)\n', (21749, 21760), True, 'import numpy as np\n'), ((21803, 21829), 'numpy.power', 'np.power', (['self.temp_e', '(4.2)'], {}), '(self.temp_e, 4.2)\n', (21811, 21829), True, 'import numpy as np\n'), ((22433, 22448), 'numpy.square', 'np.square', (['fedd'], {}), '(fedd)\n', (22442, 22448), True, 'import numpy as np\n'), ((23386, 23401), 'numpy.square', 'np.square', (['temp'], {}), '(temp)\n', (23395, 23401), True, 'import numpy as np\n'), ((23598, 23624), 'numpy.power', 'np.power', (['self.rmin', '(-1.75)'], {}), '(self.rmin, -1.75)\n', (23606, 23624), True, 'import numpy as np\n'), ((23656, 23675), 'numpy.power', 'np.power', (['fedd', '(1.5)'], {}), '(fedd, 1.5)\n', (23664, 23675), True, 'import numpy as np\n'), ((23678, 23695), 'numpy.power', 'np.power', (['temp', '(7)'], {}), '(temp, 7)\n', (23686, 23695), True, 'import numpy as np\n'), ((2121, 2133), 'numpy.log10', 'np.log10', (['xx'], {}), '(xx)\n', (2129, 2133), True, 'import numpy as np\n'), ((6276, 6291), 'numpy.square', 'np.square', (['fedd'], {}), '(fedd)\n', (6285, 6291), True, 'import numpy as np\n'), ((6507, 6527), 'numpy.sqrt', 'np.sqrt', (['(fedd / msol)'], {}), '(fedd / msol)\n', (6514, 6527), True, 'import numpy as np\n'), ((6593, 6613), 'numpy.power', 'np.power', (['(S1 * s2)', '(3)'], {}), '(S1 * s2, 3)\n', (6601, 6613), True, 'import numpy as np\n'), ((10959, 10987), 'numpy.log10', 'np.log10', (['values[values > 0]'], {}), '(values[values > 0])\n', (10967, 10987), True, 'import numpy as np\n'), ((14047, 14077), 'zcode.math.minmax', 'zmath.minmax', (['vals'], {'filter': '""">"""'}), "(vals, filter='>')\n", (14059, 14077), True, 'import zcode.math as zmath\n'), ((18181, 18222), 'numpy.exp', 'np.exp', (['(-H_PLNK * freqs / (K_BLTZ * temp))'], {}), '(-H_PLNK * freqs / (K_BLTZ * temp))\n', (18187, 18222), True, 'import numpy as np\n'), ((19162, 19182), 'numpy.sqrt', 'np.sqrt', (['(fedd / msol)'], {}), '(fedd / msol)\n', (19169, 19182), True, 'import numpy as np\n'), ((19362, 19387), 'numpy.power', 'np.power', (['(S1 * self.s2)', '(3)'], {}), '(S1 * self.s2, 3)\n', (19370, 19387), True, 'import numpy as np\n'), ((20096, 20108), 'numpy.shape', 'np.shape', (['aa'], {}), '(aa)\n', (20104, 20108), True, 'import numpy as np\n'), ((22384, 22425), 'numpy.exp', 'np.exp', (['(-H_PLNK * freqs / (K_BLTZ * temp))'], {}), '(-H_PLNK * freqs / (K_BLTZ * temp))\n', (22390, 22425), True, 'import numpy as np\n'), ((23365, 23385), 'numpy.sqrt', 'np.sqrt', (['(fedd / msol)'], {}), '(fedd / msol)\n', (23372, 23385), True, 'import numpy as np\n'), ((23565, 23590), 'numpy.power', 'np.power', (['(S1 * self.s2)', '(3)'], {}), '(S1 * self.s2, 3)\n', (23573, 23590), True, 'import numpy as np\n'), ((2302, 2314), 'numpy.log10', 'np.log10', (['xx'], {}), '(xx)\n', (2310, 2314), True, 'import numpy as np\n'), ((12754, 12771), 'numpy.ones', 'np.ones', (['num_test'], {}), '(num_test)\n', (12761, 12771), True, 'import numpy as np\n'), ((15063, 15082), 'numpy.isclose', 'np.isclose', (['iv', '(0.0)'], {}), '(iv, 0.0)\n', (15073, 15082), True, 'import numpy as np\n'), ((2399, 2415), 'zcode.math.minmax', 'zmath.minmax', (['gg'], {}), '(gg)\n', (2411, 2415), True, 'import zcode.math as zmath\n'), ((12934, 12948), 'numpy.array', 'np.array', (['test'], {}), '(test)\n', (12942, 12948), True, 'import numpy as np\n'), ((15044, 15059), 'numpy.isfinite', 'np.isfinite', (['iv'], {}), '(iv)\n', (15055, 15059), True, 'import numpy as np\n'), ((20037, 20049), 'numpy.shape', 'np.shape', (['aa'], {}), '(aa)\n', (20045, 20049), True, 'import numpy as np\n'), ((14602, 14619), 'numpy.log10', 'np.log10', (['xx[idx]'], {}), '(xx[idx])\n', (14610, 14619), True, 'import numpy as np\n'), ((14683, 14700), 'numpy.log10', 'np.log10', (['yy[idx]'], {}), '(yy[idx])\n', (14691, 14700), True, 'import numpy as np\n')]
import numpy as np def zShift(seq, pos): """Return components of Z curve shift. zCurve[0] = (A+G)-(C+T) # purine/pyrimidine zCurve[1] = (A+C)-(G+T) # amino/keto zCurve[2] = (A+T)-(G+C) # weak/strong """ if seq[pos] == "A": return np.array([1, 1, 1]) if seq[pos] == "G": return np.array([1, -1, -1]) if seq[pos] == "C": return np.array([-1, 1, -1]) if seq[pos] == "T": return np.array([-1, -1, 1]) def zCurve(seq): """Return 3-dimensional Z curve corresponding to sequence. zcurve[n] = zcurve[n-1] + zShift[n] """ zcurve = np.zeros((len(seq), 3), dtype=int) zcurve[0] = zShift(seq, 0) for pos in range(1, len(seq)): zcurve[pos] = np.add(zcurve[pos - 1], zShift(seq, pos)) return zcurve
[ "numpy.array" ]
[((265, 284), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (273, 284), True, 'import numpy as np\n'), ((324, 345), 'numpy.array', 'np.array', (['[1, -1, -1]'], {}), '([1, -1, -1])\n', (332, 345), True, 'import numpy as np\n'), ((385, 406), 'numpy.array', 'np.array', (['[-1, 1, -1]'], {}), '([-1, 1, -1])\n', (393, 406), True, 'import numpy as np\n'), ((446, 467), 'numpy.array', 'np.array', (['[-1, -1, 1]'], {}), '([-1, -1, 1])\n', (454, 467), True, 'import numpy as np\n')]
# coding: utf-8 -*- ''' GFS.py contains utility functions for GFS ''' __all__ = ['get_akbk', 'get_pcoord', 'read_atcf'] import numpy as _np import pandas as _pd def get_akbk(): ''' Returns ak,bk for 64 level GFS model vcoord is obtained from global_fcst.fd/gfsio_module.f ak,bk are as computed from treadeo.gfsio.f for hybrid = .true. and idvc == 2 ''' vcoord = _np.array([1.0000000,0.99467099,0.98863202,0.98180002,0.97408301, \ 0.96538502,0.95560300,0.94463098,0.93235999,0.91867799,0.90347999, \ 0.88666302,0.86813903,0.84783000,0.82568502,0.80167699,0.77581102, \ 0.74813300,0.71872902,0.68773103,0.65531600,0.62170500,0.58715999, \ 0.55197400,0.51646298,0.48095500,0.44577801,0.41124901,0.37765899, \ 0.34526899,0.31430000,0.28492799,0.25728399,0.23145400,0.20748200, \ 0.18537199,0.16509899,0.14660800,0.12982300,0.11465500,0.10100200, \ 0.88756002E-01,0.77808000E-01,0.68048999E-01,0.59370000E-01, \ 0.51670998E-01,0.44854999E-01,0.38830999E-01,0.33514999E-01, \ 0.28829999E-01,0.24707999E-01,0.21083999E-01,0.17901000E-01, \ 0.15107000E-01,0.12658000E-01,0.10511000E-01,0.86310003E-02, \ 0.69849999E-02,0.55439998E-02,0.42840000E-02,0.31830000E-02, \ 0.22199999E-02,0.13780000E-02,0.64200000E-03,0.0000000]) ak = vcoord / 1000. bk = vcoord / 1. return ak,bk def get_pcoord(): ''' Returns the pressure levels in hPa of the native GFS model with 64 levels. OUTPUT: pres = pressure levels (hPa) assuming pref=1013.0 ''' ak,bk = get_akbk() pref = 101.3 pres = ak[:-1] + bk[:-1]*pref return pres * 10. def read_atcf(filename): ''' Read an ATCF file into a dataframe for ease of processing. INPUT: filename = ATCF filename The file contents are specified at: http://www.nrlmry.navy.mil/atcf_web/docs/database/new/abdeck.html OUTPUT: df = DataFrame containing the file contents ''' def _to_number(s): tmp = 0.1 * _np.float(s[:-1]) if s[-1] in ['S','W']: v = -1.0 * tmp if s[-1] in ['S'] else 360.0 - tmp else: v = tmp return v # column names names = ['BASIN','CY','YYYYMMDDHH','TECHNUM','TECH','TAU','LAT','LON','VMAX','MSLP','TY','RAD','WINDCODE','RAD1','RAD2','RAD3','RAD4','POUTER','ROUTER','RMW','GUSTS','EYE','SUBREGION','MAXSEAS','INITIALS','DIR','SPEED','STORMNAME','DEPTH','SEAS','SEASCODE','SEAS1','SEAS2','SEAS3','SEAS4','USERDEFINE1','USERDATA1','USERDEFINE2','USERDATA2','USERDEFINE3','USERDATA3','USERDEFINE4','USERDATA4','USERDEFINE5','USERDATA5'] # column datatypes dtypes = {'BASIN':str,'CY':str,'YYYYMMDDHH':str,'TECHNUM':_np.float,'TECH':str,'TAU':_np.float,'LAT':str,'LON':str,'VMAX':_np.float,'MSLP':_np.float,'TY':str,'RAD':_np.float,'WINDCODE':str,'RAD1':_np.float,'RAD2':_np.float,'RAD3':_np.float,'RAD4':_np.float,'POUTER':_np.float,'ROUTER':_np.float,'RMW':_np.float,'GUSTS':_np.float,'EYE':_np.float,'SUBREGION':str,'MAXSEAS':_np.float,'INITIALS':str,'DIR':_np.float,'SPEED':_np.float,'STORMNAME':str,'DEPTH':str,'SEAS':_np.float,'SEASCODE':str,'SEAS1':_np.float,'SEAS2':_np.float,'SEAS3':_np.float,'SEAS4':_np.float,'USERDEFINE1':str,'USERDATA1':str,'USERDEFINE2':str,'USERDATA2':str,'USERDEFINE3':str,'USERDATA3':str,'USERDEFINE4':str,'USERDATA4':str,'USERDEFINE5':str,'USERDATA5':str} df = _pd.read_csv(filename,skipinitialspace=True,header=None,names=names,dtype=dtypes) # convert YYYYMMDDHH into datetime df['YYYYMMDDHH'] = _pd.to_datetime(df['YYYYMMDDHH'], format='%Y%m%d%H') # set index columns index_cols = ['BASIN','CY','YYYYMMDDHH','TECHNUM','TECH','TAU','TY','SUBREGION'] df.set_index(index_cols, inplace=True) # drop columns that have no information df.dropna(axis=1,how='all',inplace=True) # convert Lat/Lon to floats from hemisphere info df['LAT'] = df['LAT'].apply(lambda f: _to_number(f)) df['LON'] = df['LON'].apply(lambda f: _to_number(f)) return df
[ "numpy.array", "pandas.to_datetime", "numpy.float", "pandas.read_csv" ]
[((417, 1206), 'numpy.array', '_np.array', (['[1.0, 0.99467099, 0.98863202, 0.98180002, 0.97408301, 0.96538502, 0.955603,\n 0.94463098, 0.93235999, 0.91867799, 0.90347999, 0.88666302, 0.86813903,\n 0.84783, 0.82568502, 0.80167699, 0.77581102, 0.748133, 0.71872902, \n 0.68773103, 0.655316, 0.621705, 0.58715999, 0.551974, 0.51646298, \n 0.480955, 0.44577801, 0.41124901, 0.37765899, 0.34526899, 0.3143, \n 0.28492799, 0.25728399, 0.231454, 0.207482, 0.18537199, 0.16509899, \n 0.146608, 0.129823, 0.114655, 0.101002, 0.088756002, 0.077808, \n 0.068048999, 0.05937, 0.051670998, 0.044854999, 0.038830999, \n 0.033514999, 0.028829999, 0.024707999, 0.021083999, 0.017901, 0.015107,\n 0.012658, 0.010511, 0.0086310003, 0.0069849999, 0.0055439998, 0.004284,\n 0.003183, 0.0022199999, 0.001378, 0.000642, 0.0]'], {}), '([1.0, 0.99467099, 0.98863202, 0.98180002, 0.97408301, 0.96538502,\n 0.955603, 0.94463098, 0.93235999, 0.91867799, 0.90347999, 0.88666302, \n 0.86813903, 0.84783, 0.82568502, 0.80167699, 0.77581102, 0.748133, \n 0.71872902, 0.68773103, 0.655316, 0.621705, 0.58715999, 0.551974, \n 0.51646298, 0.480955, 0.44577801, 0.41124901, 0.37765899, 0.34526899, \n 0.3143, 0.28492799, 0.25728399, 0.231454, 0.207482, 0.18537199, \n 0.16509899, 0.146608, 0.129823, 0.114655, 0.101002, 0.088756002, \n 0.077808, 0.068048999, 0.05937, 0.051670998, 0.044854999, 0.038830999, \n 0.033514999, 0.028829999, 0.024707999, 0.021083999, 0.017901, 0.015107,\n 0.012658, 0.010511, 0.0086310003, 0.0069849999, 0.0055439998, 0.004284,\n 0.003183, 0.0022199999, 0.001378, 0.000642, 0.0])\n', (426, 1206), True, 'import numpy as _np\n'), ((3502, 3591), 'pandas.read_csv', '_pd.read_csv', (['filename'], {'skipinitialspace': '(True)', 'header': 'None', 'names': 'names', 'dtype': 'dtypes'}), '(filename, skipinitialspace=True, header=None, names=names,\n dtype=dtypes)\n', (3514, 3591), True, 'import pandas as _pd\n'), ((3647, 3699), 'pandas.to_datetime', '_pd.to_datetime', (["df['YYYYMMDDHH']"], {'format': '"""%Y%m%d%H"""'}), "(df['YYYYMMDDHH'], format='%Y%m%d%H')\n", (3662, 3699), True, 'import pandas as _pd\n'), ((2125, 2142), 'numpy.float', '_np.float', (['s[:-1]'], {}), '(s[:-1])\n', (2134, 2142), True, 'import numpy as _np\n')]
from itertools import combinations from sklearn.metrics.pairwise import cosine_similarity import numpy as np embeddings = {} with open("scripts/etm_w2v_embedding.txt", "r") as file: for line in file.readlines(): splitted = line.split() word = splitted[0] embeddings[word] = np.array([float(n) for n in splitted[1:]]) # print(f'Embeddings: {list(embeddings.keys())[:5]}') topics = [] with open("scripts/topics.txt", "r") as file: for line in file.readlines(): topics.append(line.split()) print(f'Topics: {topics[:5]}') topic_embeddings = [[embeddings[word] for word in topic] for topic in topics] # print(f'Topic embeddings: {topic_embeddings[:5]}') print(f'Topic embeddings length: {len(topic_embeddings)}') combs = list(combinations(range(len(topic_embeddings)), 2)) # print(f'total combinations = {list(combs)}') print(f'combs length = {len(combs)}') similarities = np.array([]) for xi, yi in combs: print(f'xi={xi}') print(f'yi={yi}') print(f'topic_embeddings[xi]={topic_embeddings[xi]}') print(f'topic_embeddings[yi]={topic_embeddings[yi]}') similarity = np.average(cosine_similarity(topic_embeddings[xi], topic_embeddings[yi])) print(f'avg similarity = {similarity}') print(f'avg cos = {np.average(similarity)}') similarities = np.append(similarities, similarity) print(f'similarities length = {len(similarities)}') print(similarities) max_idx = np.argmax(similarities) print(f'max idx similarities = {max_idx}') print(f'max similarity = {similarities[max_idx]}') first_topic_idx, second_topic_idx = combs[max_idx] print(f'best comb = first: {first_topic_idx}, second: {second_topic_idx}') print(f'most similar topics: 1 - {", ".join(topics[first_topic_idx])}\n2 - {", ".join(topics[second_topic_idx])}\n') # X = np.array([[1, 1, 1], [0.98, 0.1, 0.21], [0, 0, 0], [0.8, 0, 1]]) # Y = np.array([[1, 0, 1], [0, 1, 0], [1, 1, 1], [0.99, 1, 0.7]]) # print(X[0:1]) # similarity = cosine_similarity(X, Y) # print(f'avg cos = {np.average(similarity)}') # print(f'cos = {similarity}\n')
[ "sklearn.metrics.pairwise.cosine_similarity", "numpy.average", "numpy.argmax", "numpy.append", "numpy.array" ]
[((914, 926), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (922, 926), True, 'import numpy as np\n'), ((1429, 1452), 'numpy.argmax', 'np.argmax', (['similarities'], {}), '(similarities)\n', (1438, 1452), True, 'import numpy as np\n'), ((1311, 1346), 'numpy.append', 'np.append', (['similarities', 'similarity'], {}), '(similarities, similarity)\n', (1320, 1346), True, 'import numpy as np\n'), ((1136, 1197), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['topic_embeddings[xi]', 'topic_embeddings[yi]'], {}), '(topic_embeddings[xi], topic_embeddings[yi])\n', (1153, 1197), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((1266, 1288), 'numpy.average', 'np.average', (['similarity'], {}), '(similarity)\n', (1276, 1288), True, 'import numpy as np\n')]
from __future__ import print_function, absolute_import import os.path as osp import numpy as np from ..utils.data import Dataset from ..utils.osutils import mkdir_if_missing from ..utils.serialization import write_json, read_json from ..utils.data.dataset import _pluck class SynergyReID(Dataset): md5 = '05050b5d9388563021315a81b531db7d' def __init__(self, root, split_id=0, num_val=100, download=True): super(SynergyReID, self).__init__(root, split_id=split_id) if download: self.download() if not self._check_integrity(): raise RuntimeError("Dataset not found or corrupted. " + "You can use download=True to download it.") self.load(num_val) def download(self): if self._check_integrity(): print("Files already downloaded and verified") return import hashlib import shutil from glob import glob from zipfile import ZipFile raw_dir = osp.join(self.root, 'raw') mkdir_if_missing(raw_dir) # Open the raw zip file fpath = osp.join(raw_dir, 'synergyreid_data.zip') if osp.isfile(fpath) and \ hashlib.md5(open(fpath, 'rb').read()).hexdigest() == self.md5: print("Using downloaded file: " + fpath) else: raise RuntimeError("Please move data to {} " .format(fpath)) # Extract the file exdir = osp.join(raw_dir, 'data_reid') if not osp.isdir(exdir): print("Extracting zip file") with ZipFile(fpath) as z: z.extractall(path=raw_dir) # Format images_dir = osp.join(self.root, 'images') mkdir_if_missing(images_dir) # 487 identities (+1 for background) with 2 camera views each # Here we use the convention that camera 0 is for query and # camera 1 is for gallery identities = [[[] for _ in range(2)] for _ in range(487)] def register(subdir): fpaths = sorted(glob(osp.join(exdir, subdir, '*.jpeg'))) pids = set() for fpath in fpaths: fname = osp.basename(fpath) pid = int(fname.split('_')[0]) cam = 1 if 'gallery' in subdir else 0 pids.add(pid) fname = ('{:08d}_{:02d}_{:04d}.jpg' .format(pid, cam, len(identities[pid][cam]))) identities[pid][cam].append(fname) shutil.copy(fpath, osp.join(images_dir, fname)) return pids trainval_pids = register('reid_training') query_val_pids = register('reid_val/query') gallery_val_pids = register('reid_val/gallery') assert query_val_pids <= gallery_val_pids assert trainval_pids.isdisjoint(query_val_pids) identities_test = [[[] for _ in range(2)] for _ in range(9172)] def register_test(subdir, n=0): fpaths = sorted(glob(osp.join(exdir, subdir, '*.jpeg'))) pids = set() for pindx, fpath in enumerate(fpaths): fname = osp.basename(fpath) pid = int(fname.split('.')[0]) cam = 1 if 'gallery' in subdir else 0 pids.add(pid) fname = ('{:08d}_{:02d}_{:04d}.jpg' .format(pid, cam, 0)) identities_test[pindx+n][cam].append(fname) shutil.copy(fpath, osp.join(images_dir, fname)) return pids query_test_pids = register_test('reid_test/query') gallery_test_pids = register_test('reid_test/gallery', n=len(query_test_pids)) # Save the training / val / test splits splits = [{ 'trainval': sorted(list(trainval_pids)), 'query_val': sorted(list(query_val_pids)), 'gallery_val': sorted(list(gallery_val_pids)), 'query_test': sorted(list(query_test_pids)), 'gallery_test': sorted(list(gallery_test_pids))}] write_json(splits, osp.join(self.root, 'splits.json')) # Save meta information into a json file meta = {'name': 'SynergyReID', 'shot': 'multiple', 'num_cameras': 2, 'identities': identities, 'identities_test': identities_test} write_json(meta, osp.join(self.root, 'meta.json')) def load(self, verbose=True): splits = read_json(osp.join(self.root, 'splits.json')) if self.split_id >= len(splits): raise ValueError("split_id exceeds total splits {}" .format(len(splits))) self.split = splits[self.split_id] trainval_pids = np.concatenate((np.asarray(self.split['trainval']), np.asarray(self.split['query_val']))) def _pluck_val(identities, indices, relabel=False, cam=0): ret = [] for index, pid in enumerate(indices): pid_images = identities[pid] for camid, cam_images in enumerate(pid_images): if camid == cam: for fname in cam_images: name = osp.splitext(fname)[0] x, y, _ = map(int, name.split('_')) assert pid == x and camid == y if relabel: ret.append((fname, index, camid)) else: ret.append((fname, pid, camid)) return ret def _pluck_test(identities, indices, n=0): ret = [] for index, pid in enumerate(indices): pid_images = identities[index+n] for camid, cam_images in enumerate(pid_images): for fname in cam_images: ret.append((fname, pid, camid)) return ret self.meta = read_json(osp.join(self.root, 'meta.json')) identities = self.meta['identities'] identities_test = self.meta['identities_test'] self.train = _pluck(identities, self.split['trainval'], relabel=True) self.trainval = _pluck(identities, trainval_pids, relabel=True) self.query_val = _pluck_val(identities, self.split['query_val'], cam=0) self.gallery_val = _pluck_val(identities, self.split['gallery_val'], cam=1) self.query_test = _pluck_test(identities_test, self.split['query_test']) self.gallery_test = _pluck_test(identities_test, self.split['gallery_test'], n=len(self.split['query_test'])) self.num_train_ids = len(self.split['trainval']) self.num_val_ids = len(self.split['query_val']) self.num_trainval_ids = len(trainval_pids) if verbose: print(self.__class__.__name__, "dataset loaded") print(" subset | # ids | # images") print(" ---------------------------") print(" train | {:5d} | {:8d}" .format(self.num_train_ids, len(self.train))) print(" query val | {:5d} | {:8d}" .format(len(self.split['query_val']), len(self.query_val))) print(" gallery val | {:5d} | {:8d}" .format(len(self.split['gallery_val']), len(self.gallery_val))) print(" trainval | {:5d} | {:8d}" .format(self.num_trainval_ids, len(self.trainval))) print(" ---------------------------") print(" query test | {:5d} | {:8d}" .format(len(self.split['query_test']), len(self.query_test))) print(" gallery test | {:5d} | {:8d}" .format(len(self.split['gallery_test']), len(self.gallery_test)))
[ "zipfile.ZipFile", "os.path.join", "numpy.asarray", "os.path.splitext", "os.path.isfile", "os.path.isdir", "os.path.basename" ]
[((1017, 1043), 'os.path.join', 'osp.join', (['self.root', '"""raw"""'], {}), "(self.root, 'raw')\n", (1025, 1043), True, 'import os.path as osp\n'), ((1127, 1168), 'os.path.join', 'osp.join', (['raw_dir', '"""synergyreid_data.zip"""'], {}), "(raw_dir, 'synergyreid_data.zip')\n", (1135, 1168), True, 'import os.path as osp\n'), ((1493, 1523), 'os.path.join', 'osp.join', (['raw_dir', '"""data_reid"""'], {}), "(raw_dir, 'data_reid')\n", (1501, 1523), True, 'import os.path as osp\n'), ((1718, 1747), 'os.path.join', 'osp.join', (['self.root', '"""images"""'], {}), "(self.root, 'images')\n", (1726, 1747), True, 'import os.path as osp\n'), ((1180, 1197), 'os.path.isfile', 'osp.isfile', (['fpath'], {}), '(fpath)\n', (1190, 1197), True, 'import os.path as osp\n'), ((1539, 1555), 'os.path.isdir', 'osp.isdir', (['exdir'], {}), '(exdir)\n', (1548, 1555), True, 'import os.path as osp\n'), ((4136, 4170), 'os.path.join', 'osp.join', (['self.root', '"""splits.json"""'], {}), "(self.root, 'splits.json')\n", (4144, 4170), True, 'import os.path as osp\n'), ((4402, 4434), 'os.path.join', 'osp.join', (['self.root', '"""meta.json"""'], {}), "(self.root, 'meta.json')\n", (4410, 4434), True, 'import os.path as osp\n'), ((4498, 4532), 'os.path.join', 'osp.join', (['self.root', '"""splits.json"""'], {}), "(self.root, 'splits.json')\n", (4506, 4532), True, 'import os.path as osp\n'), ((6020, 6052), 'os.path.join', 'osp.join', (['self.root', '"""meta.json"""'], {}), "(self.root, 'meta.json')\n", (6028, 6052), True, 'import os.path as osp\n'), ((1615, 1629), 'zipfile.ZipFile', 'ZipFile', (['fpath'], {}), '(fpath)\n', (1622, 1629), False, 'from zipfile import ZipFile\n'), ((2206, 2225), 'os.path.basename', 'osp.basename', (['fpath'], {}), '(fpath)\n', (2218, 2225), True, 'import os.path as osp\n'), ((3168, 3187), 'os.path.basename', 'osp.basename', (['fpath'], {}), '(fpath)\n', (3180, 3187), True, 'import os.path as osp\n'), ((4774, 4808), 'numpy.asarray', 'np.asarray', (["self.split['trainval']"], {}), "(self.split['trainval'])\n", (4784, 4808), True, 'import numpy as np\n'), ((4849, 4884), 'numpy.asarray', 'np.asarray', (["self.split['query_val']"], {}), "(self.split['query_val'])\n", (4859, 4884), True, 'import numpy as np\n'), ((2088, 2121), 'os.path.join', 'osp.join', (['exdir', 'subdir', '"""*.jpeg"""'], {}), "(exdir, subdir, '*.jpeg')\n", (2096, 2121), True, 'import os.path as osp\n'), ((2566, 2593), 'os.path.join', 'osp.join', (['images_dir', 'fname'], {}), '(images_dir, fname)\n', (2574, 2593), True, 'import os.path as osp\n'), ((3032, 3065), 'os.path.join', 'osp.join', (['exdir', 'subdir', '"""*.jpeg"""'], {}), "(exdir, subdir, '*.jpeg')\n", (3040, 3065), True, 'import os.path as osp\n'), ((3513, 3540), 'os.path.join', 'osp.join', (['images_dir', 'fname'], {}), '(images_dir, fname)\n', (3521, 3540), True, 'import os.path as osp\n'), ((5256, 5275), 'os.path.splitext', 'osp.splitext', (['fname'], {}), '(fname)\n', (5268, 5275), True, 'import os.path as osp\n')]
from PIL import Image import numpy as np import os def main(): img = Image(os.path.join('..', 'img', 'paras_prf_pic.jpeg')) aray = np.array(img) r, g, b = np.split(aray, 3, axis = 2) r = r.reshape(-1) g = g.reshape(-1) b = b.reshape(-1) bitmap = list(map(lambda x: 0.299*x[0]+0.587*x[1]+0.114*x[2], zip(r,g,b))) bitmap = np.array(bitmap).reshape([aray.shape[0], aray.shape[1]]) bitmap = np.dot((bitmap > 128).astype(float),255) im = Image.fromarray(bitmap.astype(np.uint8)) if __init__ == "__main__": main()
[ "numpy.array", "numpy.split", "os.path.join" ]
[((140, 153), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (148, 153), True, 'import numpy as np\n'), ((168, 193), 'numpy.split', 'np.split', (['aray', '(3)'], {'axis': '(2)'}), '(aray, 3, axis=2)\n', (176, 193), True, 'import numpy as np\n'), ((80, 127), 'os.path.join', 'os.path.join', (['""".."""', '"""img"""', '"""paras_prf_pic.jpeg"""'], {}), "('..', 'img', 'paras_prf_pic.jpeg')\n", (92, 127), False, 'import os\n'), ((354, 370), 'numpy.array', 'np.array', (['bitmap'], {}), '(bitmap)\n', (362, 370), True, 'import numpy as np\n')]
import subprocess import PIL from PIL import Image import numpy as np import os import shutil import re script_path = os.path.dirname(os.path.realpath(__file__)) temp_img_dir_path = os.path.join(script_path, 'temp_imgs') def arr_to_mp4(arr, output_path, framerate=30, resolution_str=None, temp_dir=temp_img_dir_path): ''' arr shape should be (frames, height, width, 3) ''' use_res = resolution_str != None if use_res: match = re.match(r'\d+x\d+', resolution_str) if not match: use_res = False try: os.mkdir(temp_dir) except Exception as e: print(e) arr = arr.astype('uint8') for i in range(arr.shape[0]): imgarr = arr[i] img = Image.fromarray(imgarr) img.save(os.path.join(temp_dir, str(i)+'.png')) cmd = '' if use_res: cmd = 'ffmpeg -framerate {0} -i {1}/%d.png -pix_fmt yuv420p -s {2} {3}'.format(framerate, temp_dir, resolution_str, output_path) else: cmd = 'ffmpeg -framerate {0} -i {1}/%d.png -pix_fmt yuv420p {2}'.format(framerate, temp_dir, output_path) subprocess.call(cmd.split(' ')) shutil.rmtree(temp_dir) if __name__ == "__main__": arr = np.random.randint(0, 255, (120, 256, 256, 3), dtype="uint8") arr_to_mp4(arr, 'out1.mp4', resolution_str="256x256") # produces out.mp4 which is 4 seconds long of image noise
[ "PIL.Image.fromarray", "os.path.join", "re.match", "os.path.realpath", "numpy.random.randint", "os.mkdir", "shutil.rmtree" ]
[((182, 220), 'os.path.join', 'os.path.join', (['script_path', '"""temp_imgs"""'], {}), "(script_path, 'temp_imgs')\n", (194, 220), False, 'import os\n'), ((134, 160), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (150, 160), False, 'import os\n'), ((1134, 1157), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {}), '(temp_dir)\n', (1147, 1157), False, 'import shutil\n'), ((1196, 1256), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(120, 256, 256, 3)'], {'dtype': '"""uint8"""'}), "(0, 255, (120, 256, 256, 3), dtype='uint8')\n", (1213, 1256), True, 'import numpy as np\n'), ((455, 492), 're.match', 're.match', (['"""\\\\d+x\\\\d+"""', 'resolution_str'], {}), "('\\\\d+x\\\\d+', resolution_str)\n", (463, 492), False, 'import re\n'), ((559, 577), 'os.mkdir', 'os.mkdir', (['temp_dir'], {}), '(temp_dir)\n', (567, 577), False, 'import os\n'), ((724, 747), 'PIL.Image.fromarray', 'Image.fromarray', (['imgarr'], {}), '(imgarr)\n', (739, 747), False, 'from PIL import Image\n')]
import sys sys.path.append('../') import caffe2_paths import numpy as np import glob from itertools import product import pinn.preproc as preproc import pinn.data_reader as data_reader import matplotlib.pyplot as plt import pickle import os # ----------------- Preprocessing -------------------- vds = np.concatenate((np.linspace(-0.1, -0.01, 10),np.linspace(0.01, 0.3, 30))) # print(vds) vbg = np.linspace(-0.1, 0.3, 41) # print(vbg) vtg = np.linspace(-0.1, 0.3, 41) # print(vtg) id_file = glob.glob('./transiXOR_data/current_D9.npy') db_path = 'db/' id_data = np.load(id_file[0]) # !!CAUTION!! If use batch direct weighted L1 loss, # make sure no zero label in the training data # Future version will address this issue internally. # selected_vds_idx = [1, 5, 9, 12, 15, 17, 18, 19, 20] # vds = vds[selected_vds_idx] # id_data = id_data[selected_vds_idx,:,:] id_data = np.concatenate((id_data[0:10,:,:],id_data[11:,:,:])) ## Check whether zero label exit assert np.min(np.abs(id_data).flatten()) > 1e-9, "Zero exist in labels" # vds, vbg, vtg, id print('original data shape: ' + str(id_data.shape) + '; ' + str(id_data.shape[0] * id_data.shape[1] * id_data.shape[2]) ) iter_lst = list(product(vds, vbg, vtg)) vds_train = np.expand_dims(np.array([e[0] for e in iter_lst], dtype=np.float32), axis=1) vbg_train = np.array([e[1] for e in iter_lst], dtype=np.float32) vtg_train = np.array([e[2] for e in iter_lst], dtype=np.float32) id_train = np.expand_dims(id_data.flatten(), axis=1).astype(np.float32) vg_train = np.column_stack((vtg_train, vbg_train)) print('--- Original shape: ') print(vg_train.shape) print(vds_train.shape) print(id_train.shape) ## Using the fact that vtg and vbg are interchangeable ## CAUTION: This invariance may not be true for experimental data vg_train = np.sum(vg_train, axis=1, keepdims=True) ## random select train/eval = 0.9/0.1 np.random.seed = 42 data_arrays = [vg_train, vds_train, id_train] permu = np.random.permutation(len(data_arrays[0])) num_eval = int(len(data_arrays[0])*0.1) data_arrays = [e[permu] for e in data_arrays] data_arrays_eval = [e[0:num_eval] for e in data_arrays] data_arrays_train = [e[num_eval:] for e in data_arrays] ## Odd for train, even for eval # vg_eval = vg_train[::2]; vg_train = vg_train[1::2] # vds_eval = vds_train[::2]; vds_train = vds_train[1::2] # id_eval = id_train[::2]; id_train = id_train[1::2] # data_arrays_train = [vg_train, vds_train, id_train] # data_arrays_eval = [vg_eval, vds_eval, id_eval] ## Check shape of train and eval dataset print('--- Train/Eval shape: ') print( data_arrays_train[0].shape, data_arrays_train[1].shape, data_arrays_train[2].shape ) print( data_arrays_eval[0].shape, data_arrays_eval[1].shape, data_arrays_eval[2].shape ) scale, vg_shift = preproc.compute_dc_meta(*data_arrays_train) preproc_param = { 'scale' : scale, 'vg_shift' : vg_shift, } print(preproc_param) ## Saving the preproc param preproc_data_arrays_train = preproc.dc_iv_preproc( data_arrays_train[0], data_arrays_train[1], data_arrays_train[2], preproc_param['scale'], preproc_param['vg_shift'] ) preproc_data_arrays_eval = preproc.dc_iv_preproc( data_arrays_eval[0], data_arrays_eval[1], data_arrays_eval[2], preproc_param['scale'], preproc_param['vg_shift'] ) # Only expand the dim if the number of dimension is 1 preproc_data_arrays_train = [np.expand_dims( x, axis=1) if x.ndim == 1 else x for x in preproc_data_arrays_train] preproc_data_arrays_eval = [np.expand_dims( x, axis=1) if x.ndim == 1 else x for x in preproc_data_arrays_eval] # Write to database if os.path.isfile(db_path+'train.minidb'): print("XXX Delete the old train database...") os.remove(db_path+'train.minidb') if os.path.isfile(db_path+'eval.minidb'): print("XXX Delete the old eval database...") os.remove(db_path+'eval.minidb') data_reader.write_db('minidb', db_path+'train.minidb', preproc_data_arrays_train) data_reader.write_db('minidb', db_path+'eval.minidb', preproc_data_arrays_eval) pickle.dump(preproc_param, open(db_path+'preproc_param.p', 'wb'))
[ "pinn.data_reader.write_db", "numpy.abs", "itertools.product", "numpy.column_stack", "os.path.isfile", "numpy.array", "numpy.linspace", "numpy.sum", "numpy.concatenate", "pinn.preproc.dc_iv_preproc", "numpy.expand_dims", "numpy.load", "sys.path.append", "pinn.preproc.compute_dc_meta", "glob.glob", "os.remove" ]
[((11, 33), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (26, 33), False, 'import sys\n'), ((396, 422), 'numpy.linspace', 'np.linspace', (['(-0.1)', '(0.3)', '(41)'], {}), '(-0.1, 0.3, 41)\n', (407, 422), True, 'import numpy as np\n'), ((442, 468), 'numpy.linspace', 'np.linspace', (['(-0.1)', '(0.3)', '(41)'], {}), '(-0.1, 0.3, 41)\n', (453, 468), True, 'import numpy as np\n'), ((492, 536), 'glob.glob', 'glob.glob', (['"""./transiXOR_data/current_D9.npy"""'], {}), "('./transiXOR_data/current_D9.npy')\n", (501, 536), False, 'import glob\n'), ((564, 583), 'numpy.load', 'np.load', (['id_file[0]'], {}), '(id_file[0])\n', (571, 583), True, 'import numpy as np\n'), ((898, 955), 'numpy.concatenate', 'np.concatenate', (['(id_data[0:10, :, :], id_data[11:, :, :])'], {}), '((id_data[0:10, :, :], id_data[11:, :, :]))\n', (912, 955), True, 'import numpy as np\n'), ((1346, 1398), 'numpy.array', 'np.array', (['[e[1] for e in iter_lst]'], {'dtype': 'np.float32'}), '([e[1] for e in iter_lst], dtype=np.float32)\n', (1354, 1398), True, 'import numpy as np\n'), ((1411, 1463), 'numpy.array', 'np.array', (['[e[2] for e in iter_lst]'], {'dtype': 'np.float32'}), '([e[2] for e in iter_lst], dtype=np.float32)\n', (1419, 1463), True, 'import numpy as np\n'), ((1547, 1586), 'numpy.column_stack', 'np.column_stack', (['(vtg_train, vbg_train)'], {}), '((vtg_train, vbg_train))\n', (1562, 1586), True, 'import numpy as np\n'), ((1817, 1856), 'numpy.sum', 'np.sum', (['vg_train'], {'axis': '(1)', 'keepdims': '(True)'}), '(vg_train, axis=1, keepdims=True)\n', (1823, 1856), True, 'import numpy as np\n'), ((2793, 2836), 'pinn.preproc.compute_dc_meta', 'preproc.compute_dc_meta', (['*data_arrays_train'], {}), '(*data_arrays_train)\n', (2816, 2836), True, 'import pinn.preproc as preproc\n'), ((2979, 3121), 'pinn.preproc.dc_iv_preproc', 'preproc.dc_iv_preproc', (['data_arrays_train[0]', 'data_arrays_train[1]', 'data_arrays_train[2]', "preproc_param['scale']", "preproc_param['vg_shift']"], {}), "(data_arrays_train[0], data_arrays_train[1],\n data_arrays_train[2], preproc_param['scale'], preproc_param['vg_shift'])\n", (3000, 3121), True, 'import pinn.preproc as preproc\n'), ((3152, 3291), 'pinn.preproc.dc_iv_preproc', 'preproc.dc_iv_preproc', (['data_arrays_eval[0]', 'data_arrays_eval[1]', 'data_arrays_eval[2]', "preproc_param['scale']", "preproc_param['vg_shift']"], {}), "(data_arrays_eval[0], data_arrays_eval[1],\n data_arrays_eval[2], preproc_param['scale'], preproc_param['vg_shift'])\n", (3173, 3291), True, 'import pinn.preproc as preproc\n'), ((3600, 3640), 'os.path.isfile', 'os.path.isfile', (["(db_path + 'train.minidb')"], {}), "(db_path + 'train.minidb')\n", (3614, 3640), False, 'import os\n'), ((3725, 3764), 'os.path.isfile', 'os.path.isfile', (["(db_path + 'eval.minidb')"], {}), "(db_path + 'eval.minidb')\n", (3739, 3764), False, 'import os\n'), ((3844, 3931), 'pinn.data_reader.write_db', 'data_reader.write_db', (['"""minidb"""', "(db_path + 'train.minidb')", 'preproc_data_arrays_train'], {}), "('minidb', db_path + 'train.minidb',\n preproc_data_arrays_train)\n", (3864, 3931), True, 'import pinn.data_reader as data_reader\n'), ((3926, 4011), 'pinn.data_reader.write_db', 'data_reader.write_db', (['"""minidb"""', "(db_path + 'eval.minidb')", 'preproc_data_arrays_eval'], {}), "('minidb', db_path + 'eval.minidb',\n preproc_data_arrays_eval)\n", (3946, 4011), True, 'import pinn.data_reader as data_reader\n'), ((1221, 1243), 'itertools.product', 'product', (['vds', 'vbg', 'vtg'], {}), '(vds, vbg, vtg)\n', (1228, 1243), False, 'from itertools import product\n'), ((1272, 1324), 'numpy.array', 'np.array', (['[e[0] for e in iter_lst]'], {'dtype': 'np.float32'}), '([e[0] for e in iter_lst], dtype=np.float32)\n', (1280, 1324), True, 'import numpy as np\n'), ((3688, 3723), 'os.remove', 'os.remove', (["(db_path + 'train.minidb')"], {}), "(db_path + 'train.minidb')\n", (3697, 3723), False, 'import os\n'), ((3811, 3845), 'os.remove', 'os.remove', (["(db_path + 'eval.minidb')"], {}), "(db_path + 'eval.minidb')\n", (3820, 3845), False, 'import os\n'), ((319, 347), 'numpy.linspace', 'np.linspace', (['(-0.1)', '(-0.01)', '(10)'], {}), '(-0.1, -0.01, 10)\n', (330, 347), True, 'import numpy as np\n'), ((348, 374), 'numpy.linspace', 'np.linspace', (['(0.01)', '(0.3)', '(30)'], {}), '(0.01, 0.3, 30)\n', (359, 374), True, 'import numpy as np\n'), ((3378, 3403), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (3392, 3403), True, 'import numpy as np\n'), ((3492, 3517), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (3506, 3517), True, 'import numpy as np\n'), ((999, 1014), 'numpy.abs', 'np.abs', (['id_data'], {}), '(id_data)\n', (1005, 1014), True, 'import numpy as np\n')]
# Copyright 2022 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """NMS implementation""" import numpy as np def oks_iou(g, d, a_g, a_d, sigmas=None, vis_thr=None): """Calculate oks ious. Args: g: Ground truth keypoints. d: Detected keypoints. a_g: Area of the ground truth object. a_d: Area of the detected object. sigmas: standard deviation of keypoint labelling. vis_thr: threshold of the keypoint visibility. Returns: list: The oks ious. """ if sigmas is None: sigmas = np.array([ .26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89 ]) / 10.0 key_vars = (sigmas * 2)**2 xg = g[0::3] yg = g[1::3] vg = g[2::3] ious = np.zeros(len(d), dtype=np.float32) for n_d in range(0, len(d)): xd = d[n_d, 0::3] yd = d[n_d, 1::3] vd = d[n_d, 2::3] dx = xd - xg dy = yd - yg e = (dx**2 + dy**2) / key_vars / ((a_g + a_d[n_d]) / 2 + np.spacing(1)) / 2 if vis_thr is not None: ind = list(vg > vis_thr) and list(vd > vis_thr) e = e[ind] ious[n_d] = np.sum(np.exp(-e)) / len(e) if e.size != 0 else 0.0 return ious def oks_nms(kpts_db, thr, sigmas=None, vis_thr=None): """OKS NMS implementations. Args: kpts_db: keypoints. thr: Retain overlap < thr. sigmas: standard deviation of keypoint labelling. vis_thr: threshold of the keypoint visibility. Returns: np.ndarray: indexes to keep. """ if not kpts_db: return [] scores = np.array([k['score'] for k in kpts_db]) kpts = np.array([k['keypoints'].flatten() for k in kpts_db]) areas = np.array([k['area'] for k in kpts_db]) order = scores.argsort()[::-1] keep = [] while order.size > 0: i = order[0] keep.append(i) oks_ovr = oks_iou(kpts[i], kpts[order[1:]], areas[i], areas[order[1:]], sigmas, vis_thr) inds = np.where(oks_ovr <= thr)[0] order = order[inds + 1] keep = np.array(keep) return keep def _rescore(overlap, scores, thr, key_type='gaussian'): """Rescoring mechanism gaussian or linear. Args: overlap: calculated ious scores: target scores. thr: retain oks overlap < thr. key_type: 'gaussian' or 'linear' Returns: np.ndarray: indexes to keep """ assert len(overlap) == len(scores) assert key_type in ['gaussian', 'linear'] if key_type == 'linear': inds = np.where(overlap >= thr)[0] scores[inds] = scores[inds] * (1 - overlap[inds]) else: scores = scores * np.exp(-overlap**2 / thr) return scores def soft_oks_nms(kpts_db, thr, max_dets=20, sigmas=None, vis_thr=None): """Soft OKS NMS implementations. Args: kpts_db thr: retain oks overlap < thr. max_dets: max number of detections to keep. sigmas: Keypoint labelling uncertainty. Returns: np.ndarray: indexes to keep. """ if not kpts_db: return [] scores = np.array([k['score'] for k in kpts_db]) kpts = np.array([k['keypoints'].flatten() for k in kpts_db]) areas = np.array([k['area'] for k in kpts_db]) order = scores.argsort()[::-1] scores = scores[order] keep = np.zeros(max_dets, dtype=np.intp) keep_cnt = 0 while order.size > 0 and keep_cnt < max_dets: i = order[0] oks_ovr = oks_iou(kpts[i], kpts[order[1:]], areas[i], areas[order[1:]], sigmas, vis_thr) order = order[1:] scores = _rescore(oks_ovr, scores[1:], thr) tmp = scores.argsort()[::-1] order = order[tmp] scores = scores[tmp] keep[keep_cnt] = i keep_cnt += 1 keep = keep[:keep_cnt] return keep
[ "numpy.where", "numpy.exp", "numpy.array", "numpy.zeros", "numpy.spacing" ]
[((2253, 2292), 'numpy.array', 'np.array', (["[k['score'] for k in kpts_db]"], {}), "([k['score'] for k in kpts_db])\n", (2261, 2292), True, 'import numpy as np\n'), ((2370, 2408), 'numpy.array', 'np.array', (["[k['area'] for k in kpts_db]"], {}), "([k['area'] for k in kpts_db])\n", (2378, 2408), True, 'import numpy as np\n'), ((2742, 2756), 'numpy.array', 'np.array', (['keep'], {}), '(keep)\n', (2750, 2756), True, 'import numpy as np\n'), ((3774, 3813), 'numpy.array', 'np.array', (["[k['score'] for k in kpts_db]"], {}), "([k['score'] for k in kpts_db])\n", (3782, 3813), True, 'import numpy as np\n'), ((3891, 3929), 'numpy.array', 'np.array', (["[k['area'] for k in kpts_db]"], {}), "([k['area'] for k in kpts_db])\n", (3899, 3929), True, 'import numpy as np\n'), ((4005, 4038), 'numpy.zeros', 'np.zeros', (['max_dets'], {'dtype': 'np.intp'}), '(max_dets, dtype=np.intp)\n', (4013, 4038), True, 'import numpy as np\n'), ((1162, 1278), 'numpy.array', 'np.array', (['[0.26, 0.25, 0.25, 0.35, 0.35, 0.79, 0.79, 0.72, 0.72, 0.62, 0.62, 1.07, \n 1.07, 0.87, 0.87, 0.89, 0.89]'], {}), '([0.26, 0.25, 0.25, 0.35, 0.35, 0.79, 0.79, 0.72, 0.72, 0.62, 0.62,\n 1.07, 1.07, 0.87, 0.87, 0.89, 0.89])\n', (1170, 1278), True, 'import numpy as np\n'), ((2670, 2694), 'numpy.where', 'np.where', (['(oks_ovr <= thr)'], {}), '(oks_ovr <= thr)\n', (2678, 2694), True, 'import numpy as np\n'), ((3221, 3245), 'numpy.where', 'np.where', (['(overlap >= thr)'], {}), '(overlap >= thr)\n', (3229, 3245), True, 'import numpy as np\n'), ((3343, 3370), 'numpy.exp', 'np.exp', (['(-overlap ** 2 / thr)'], {}), '(-overlap ** 2 / thr)\n', (3349, 3370), True, 'import numpy as np\n'), ((1647, 1660), 'numpy.spacing', 'np.spacing', (['(1)'], {}), '(1)\n', (1657, 1660), True, 'import numpy as np\n'), ((1808, 1818), 'numpy.exp', 'np.exp', (['(-e)'], {}), '(-e)\n', (1814, 1818), True, 'import numpy as np\n')]
# import the necessary packages import numpy as np import cv2 cap=cv2.VideoCapture(1) def order_points(pts): # initialzie a list of coordinates that will be ordered # such that the first entry in the list is the top-left, # the second entry is the top-right, the third is the # bottom-right, and the fourth is the bottom-left rect = np.zeros((4, 2), dtype = "float32") # the top-left point will have the smallest sum, whereas # the bottom-right point will have the largest sum s = pts.sum(axis = 1) rect[0] = pts[np.argmin(s)] rect[2] = pts[np.argmax(s)] # now, compute the difference between the points, the # top-right point will have the smallest difference, # whereas the bottom-left will have the largest difference diff = np.diff(pts, axis = 1) rect[1] = pts[np.argmin(diff)] rect[3] = pts[np.argmax(diff)] # return the ordered coordinates return rect def four_point_transform(image, pts): # obtain a consistent order of the points and unpack them # individually rect = order_points(pts) (tl, tr, br, bl) = rect # compute the width of the new image, which will be the # maximum distance between bottom-right and bottom-left # x-coordiates or the top-right and top-left x-coordinates widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2)) widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2)) maxWidth = max(int(widthA), int(widthB)) # compute the height of the new image, which will be the # maximum distance between the top-right and bottom-right # y-coordinates or the top-left and bottom-left y-coordinates heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2)) heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2)) maxHeight = max(int(heightA), int(heightB)) # now that we have the dimensions of the new image, construct # the set of destination points to obtain a "birds eye view", # (i.e. top-down view) of the image, again specifying points # in the top-left, top-right, bottom-right, and bottom-left # order dst = np.array([ [0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]], dtype = "float32") # compute the perspective transform matrix and then apply it M = cv2.getPerspectiveTransform(rect, dst) warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight)) # return the warped image return warped #============================================================================================================================================= # mouse callback function def draw_circle(event,x,y,flags,param): global pts global num global frame if event == cv2.EVENT_LBUTTONDBLCLK: cv2.circle(frame,(x,y),10,(255,0,0),-1) pts[num][0]= x pts[num][1]= y num=num+1 print(num) print(pts) #============================================================================================================================================= pts=np.array([(0,0),(0,1),(1,1),(1,0)]) num=0 def points_inverse(): global pts global num global frame while(1): ret,frame=cap.read() cv2.imshow("image_demo",frame) k = cv2.waitKey(20) & 0xFF if k == ord('a'): break cv2.destroyAllWindows() print("out") cv2.namedWindow("image") cv2.setMouseCallback("image",draw_circle) print("out**") while(num<4): cv2.imshow("image",frame) cv2.waitKey(30) cv2.destroyAllWindows() points_inverse() print("point define") while(1): ret,frame=cap.read() global pts # apply the four point tranform to obtain a "birds eye view" of # the image warped = four_point_transform(frame, pts) # show the original and warped images cv2.imshow("Original", frame) cv2.imshow("Warped", warped) k = cv2.waitKey(20) & 0xFF if k == ord('a'): break cv2.destroyAllWindows()
[ "cv2.setMouseCallback", "numpy.sqrt", "cv2.getPerspectiveTransform", "numpy.diff", "numpy.argmax", "cv2.imshow", "numpy.array", "numpy.zeros", "cv2.warpPerspective", "cv2.destroyAllWindows", "cv2.VideoCapture", "cv2.circle", "numpy.argmin", "cv2.waitKey", "cv2.namedWindow" ]
[((66, 85), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(1)'], {}), '(1)\n', (82, 85), False, 'import cv2\n'), ((2964, 3006), 'numpy.array', 'np.array', (['[(0, 0), (0, 1), (1, 1), (1, 0)]'], {}), '([(0, 0), (0, 1), (1, 1), (1, 0)])\n', (2972, 3006), True, 'import numpy as np\n'), ((3787, 3810), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3808, 3810), False, 'import cv2\n'), ((339, 372), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {'dtype': '"""float32"""'}), "((4, 2), dtype='float32')\n", (347, 372), True, 'import numpy as np\n'), ((747, 767), 'numpy.diff', 'np.diff', (['pts'], {'axis': '(1)'}), '(pts, axis=1)\n', (754, 767), True, 'import numpy as np\n'), ((1236, 1288), 'numpy.sqrt', 'np.sqrt', (['((br[0] - bl[0]) ** 2 + (br[1] - bl[1]) ** 2)'], {}), '((br[0] - bl[0]) ** 2 + (br[1] - bl[1]) ** 2)\n', (1243, 1288), True, 'import numpy as np\n'), ((1303, 1355), 'numpy.sqrt', 'np.sqrt', (['((tr[0] - tl[0]) ** 2 + (tr[1] - tl[1]) ** 2)'], {}), '((tr[0] - tl[0]) ** 2 + (tr[1] - tl[1]) ** 2)\n', (1310, 1355), True, 'import numpy as np\n'), ((1595, 1647), 'numpy.sqrt', 'np.sqrt', (['((tr[0] - br[0]) ** 2 + (tr[1] - br[1]) ** 2)'], {}), '((tr[0] - br[0]) ** 2 + (tr[1] - br[1]) ** 2)\n', (1602, 1647), True, 'import numpy as np\n'), ((1663, 1715), 'numpy.sqrt', 'np.sqrt', (['((tl[0] - bl[0]) ** 2 + (tl[1] - bl[1]) ** 2)'], {}), '((tl[0] - bl[0]) ** 2 + (tl[1] - bl[1]) ** 2)\n', (1670, 1715), True, 'import numpy as np\n'), ((2032, 2142), 'numpy.array', 'np.array', (['[[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]]'], {'dtype': '"""float32"""'}), "([[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, \n maxHeight - 1]], dtype='float32')\n", (2040, 2142), True, 'import numpy as np\n'), ((2218, 2256), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['rect', 'dst'], {}), '(rect, dst)\n', (2245, 2256), False, 'import cv2\n'), ((2267, 2319), 'cv2.warpPerspective', 'cv2.warpPerspective', (['image', 'M', '(maxWidth, maxHeight)'], {}), '(image, M, (maxWidth, maxHeight))\n', (2286, 2319), False, 'import cv2\n'), ((3207, 3230), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3228, 3230), False, 'import cv2\n'), ((3246, 3270), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (3261, 3270), False, 'import cv2\n'), ((3272, 3314), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""image"""', 'draw_circle'], {}), "('image', draw_circle)\n", (3292, 3314), False, 'import cv2\n'), ((3394, 3417), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3415, 3417), False, 'import cv2\n'), ((3670, 3699), 'cv2.imshow', 'cv2.imshow', (['"""Original"""', 'frame'], {}), "('Original', frame)\n", (3680, 3699), False, 'import cv2\n'), ((3701, 3729), 'cv2.imshow', 'cv2.imshow', (['"""Warped"""', 'warped'], {}), "('Warped', warped)\n", (3711, 3729), False, 'import cv2\n'), ((525, 537), 'numpy.argmin', 'np.argmin', (['s'], {}), '(s)\n', (534, 537), True, 'import numpy as np\n'), ((554, 566), 'numpy.argmax', 'np.argmax', (['s'], {}), '(s)\n', (563, 566), True, 'import numpy as np\n'), ((785, 800), 'numpy.argmin', 'np.argmin', (['diff'], {}), '(diff)\n', (794, 800), True, 'import numpy as np\n'), ((817, 832), 'numpy.argmax', 'np.argmax', (['diff'], {}), '(diff)\n', (826, 832), True, 'import numpy as np\n'), ((2669, 2715), 'cv2.circle', 'cv2.circle', (['frame', '(x, y)', '(10)', '(255, 0, 0)', '(-1)'], {}), '(frame, (x, y), 10, (255, 0, 0), -1)\n', (2679, 2715), False, 'import cv2\n'), ((3108, 3139), 'cv2.imshow', 'cv2.imshow', (['"""image_demo"""', 'frame'], {}), "('image_demo', frame)\n", (3118, 3139), False, 'import cv2\n'), ((3349, 3375), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'frame'], {}), "('image', frame)\n", (3359, 3375), False, 'import cv2\n'), ((3377, 3392), 'cv2.waitKey', 'cv2.waitKey', (['(30)'], {}), '(30)\n', (3388, 3392), False, 'import cv2\n'), ((3735, 3750), 'cv2.waitKey', 'cv2.waitKey', (['(20)'], {}), '(20)\n', (3746, 3750), False, 'import cv2\n'), ((3147, 3162), 'cv2.waitKey', 'cv2.waitKey', (['(20)'], {}), '(20)\n', (3158, 3162), False, 'import cv2\n')]
import numpy as np from deap import benchmarks from BayesOpt import BO from BayesOpt.Surrogate import RandomForest from BayesOpt.SearchSpace import ContinuousSpace, OrdinalSpace, NominalSpace from BayesOpt.base import Solution np.random.seed(42) def obj_func(x): x_r, x_i, x_d = np.array(x[:2]), x[2], x[3] if x_d == 'OK': tmp = 0 else: tmp = 1 return np.sum((x_r + np.array([2, 2])) ** 2) + abs(x_i - 10) * 10 + tmp def eq_func(x): x_r = np.array(x[:2]) return np.sum(x_r ** 2) - 2 def ineq_func(x): x_r = np.array(x[:2]) return np.sum(x_r) + 1 space = ((ContinuousSpace([-10, 10]) * 2) + OrdinalSpace([5, 15]) + NominalSpace(['OK', 'A', 'B', 'C', 'D', 'E', 'F', 'G'])) warm_data = Solution([4.6827082694127835, 9.87885354178838, 5, 'A'], var_name=["r_0", "r_1", "i", "d"], n_eval=1, fitness=236.76575128) warm_data += Solution([-8.99187067168115, 8.317469942991558, 5, 'D'], var_name=["r_0", "r_1", "i", "d"], n_eval=1, fitness=206.33644151) warm_data += Solution([-2.50919762305275, 9.014286128198322, 12, 'G'], var_name=["r_0", "r_1", "i", "d"], n_eval=1, fitness=142.57378113) warm_data += Solution([4.639878836228101, 1.973169683940732, 9, 'G'], var_name=["r_0", "r_1", "i", "d"], n_eval=1, fitness=70.8740683) if 11 < 2: model = RandomForest(levels=space.levels) opt = BO(space, obj_func, model, minimize=True, n_init_sample=3, max_eval=50, verbose=True, optimizer='MIES', warm_data=warm_data) xopt, fopt, stop_dict = opt.run() else: model = RandomForest(levels=space.levels) opt = BO(space, obj_func, model, minimize=True, n_init_sample=3, max_eval=50, verbose=True, optimizer='MIES', warm_data="test_warmdata.data") xopt, fopt, stop_dict = opt.run()
[ "BayesOpt.SearchSpace.ContinuousSpace", "BayesOpt.Surrogate.RandomForest", "BayesOpt.BO", "BayesOpt.SearchSpace.OrdinalSpace", "numpy.array", "numpy.sum", "BayesOpt.base.Solution", "numpy.random.seed", "BayesOpt.SearchSpace.NominalSpace" ]
[((230, 248), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (244, 248), True, 'import numpy as np\n'), ((745, 872), 'BayesOpt.base.Solution', 'Solution', (["[4.6827082694127835, 9.87885354178838, 5, 'A']"], {'var_name': "['r_0', 'r_1', 'i', 'd']", 'n_eval': '(1)', 'fitness': '(236.76575128)'}), "([4.6827082694127835, 9.87885354178838, 5, 'A'], var_name=['r_0',\n 'r_1', 'i', 'd'], n_eval=1, fitness=236.76575128)\n", (753, 872), False, 'from BayesOpt.base import Solution\n'), ((882, 1009), 'BayesOpt.base.Solution', 'Solution', (["[-8.99187067168115, 8.317469942991558, 5, 'D']"], {'var_name': "['r_0', 'r_1', 'i', 'd']", 'n_eval': '(1)', 'fitness': '(206.33644151)'}), "([-8.99187067168115, 8.317469942991558, 5, 'D'], var_name=['r_0',\n 'r_1', 'i', 'd'], n_eval=1, fitness=206.33644151)\n", (890, 1009), False, 'from BayesOpt.base import Solution\n'), ((1019, 1147), 'BayesOpt.base.Solution', 'Solution', (["[-2.50919762305275, 9.014286128198322, 12, 'G']"], {'var_name': "['r_0', 'r_1', 'i', 'd']", 'n_eval': '(1)', 'fitness': '(142.57378113)'}), "([-2.50919762305275, 9.014286128198322, 12, 'G'], var_name=['r_0',\n 'r_1', 'i', 'd'], n_eval=1, fitness=142.57378113)\n", (1027, 1147), False, 'from BayesOpt.base import Solution\n'), ((1157, 1282), 'BayesOpt.base.Solution', 'Solution', (["[4.639878836228101, 1.973169683940732, 9, 'G']"], {'var_name': "['r_0', 'r_1', 'i', 'd']", 'n_eval': '(1)', 'fitness': '(70.8740683)'}), "([4.639878836228101, 1.973169683940732, 9, 'G'], var_name=['r_0',\n 'r_1', 'i', 'd'], n_eval=1, fitness=70.8740683)\n", (1165, 1282), False, 'from BayesOpt.base import Solution\n'), ((481, 496), 'numpy.array', 'np.array', (['x[:2]'], {}), '(x[:2])\n', (489, 496), True, 'import numpy as np\n'), ((558, 573), 'numpy.array', 'np.array', (['x[:2]'], {}), '(x[:2])\n', (566, 573), True, 'import numpy as np\n'), ((674, 729), 'BayesOpt.SearchSpace.NominalSpace', 'NominalSpace', (["['OK', 'A', 'B', 'C', 'D', 'E', 'F', 'G']"], {}), "(['OK', 'A', 'B', 'C', 'D', 'E', 'F', 'G'])\n", (686, 729), False, 'from BayesOpt.SearchSpace import ContinuousSpace, OrdinalSpace, NominalSpace\n'), ((1304, 1337), 'BayesOpt.Surrogate.RandomForest', 'RandomForest', ([], {'levels': 'space.levels'}), '(levels=space.levels)\n', (1316, 1337), False, 'from BayesOpt.Surrogate import RandomForest\n'), ((1348, 1476), 'BayesOpt.BO', 'BO', (['space', 'obj_func', 'model'], {'minimize': '(True)', 'n_init_sample': '(3)', 'max_eval': '(50)', 'verbose': '(True)', 'optimizer': '"""MIES"""', 'warm_data': 'warm_data'}), "(space, obj_func, model, minimize=True, n_init_sample=3, max_eval=50,\n verbose=True, optimizer='MIES', warm_data=warm_data)\n", (1350, 1476), False, 'from BayesOpt import BO\n'), ((1555, 1588), 'BayesOpt.Surrogate.RandomForest', 'RandomForest', ([], {'levels': 'space.levels'}), '(levels=space.levels)\n', (1567, 1588), False, 'from BayesOpt.Surrogate import RandomForest\n'), ((1599, 1738), 'BayesOpt.BO', 'BO', (['space', 'obj_func', 'model'], {'minimize': '(True)', 'n_init_sample': '(3)', 'max_eval': '(50)', 'verbose': '(True)', 'optimizer': '"""MIES"""', 'warm_data': '"""test_warmdata.data"""'}), "(space, obj_func, model, minimize=True, n_init_sample=3, max_eval=50,\n verbose=True, optimizer='MIES', warm_data='test_warmdata.data')\n", (1601, 1738), False, 'from BayesOpt import BO\n'), ((287, 302), 'numpy.array', 'np.array', (['x[:2]'], {}), '(x[:2])\n', (295, 302), True, 'import numpy as np\n'), ((508, 524), 'numpy.sum', 'np.sum', (['(x_r ** 2)'], {}), '(x_r ** 2)\n', (514, 524), True, 'import numpy as np\n'), ((585, 596), 'numpy.sum', 'np.sum', (['x_r'], {}), '(x_r)\n', (591, 596), True, 'import numpy as np\n'), ((646, 667), 'BayesOpt.SearchSpace.OrdinalSpace', 'OrdinalSpace', (['[5, 15]'], {}), '([5, 15])\n', (658, 667), False, 'from BayesOpt.SearchSpace import ContinuousSpace, OrdinalSpace, NominalSpace\n'), ((612, 638), 'BayesOpt.SearchSpace.ContinuousSpace', 'ContinuousSpace', (['[-10, 10]'], {}), '([-10, 10])\n', (627, 638), False, 'from BayesOpt.SearchSpace import ContinuousSpace, OrdinalSpace, NominalSpace\n'), ((402, 418), 'numpy.array', 'np.array', (['[2, 2]'], {}), '([2, 2])\n', (410, 418), True, 'import numpy as np\n')]
import numpy as num import scipy.sparse.linalg as alg import scipy.linalg as algnorm import scipy.sparse as smat import random # Operacje grafowe - może wydzielić ? def to_adiacency_row(neighbours, n): row = num.zeros(n) row[neighbours] = 1 return row def graph_to_matrix(graph): # Tworzy macierz rzadką opisująca graf n = len(graph) rows = [] cols = [] for i in range(n): rows.extend([i]*len(graph[i])) cols.extend(graph[i]) data = [1.0 for v in rows] matrix = smat.csr_matrix((data, (rows, cols)), (n, n), 'd') # for i in range(n): # matrix[i, graph[i]] = 1 return matrix # return num.array([to_adiacency_row(vertex, len(graph)) for vertex in graph]) def matrix_to_graph(A): rows, cols = A.nonzero() n = A.get_shape()[0] graph = [[]] * n vert = [0] * n for (row, col) in zip(rows, cols): if graph[row].count(col) == 0: if len(graph[row]) == 0: graph[row]=[col] else: graph[row].append(col) if graph[col].count(row) == 0: if len(graph[col]) == 0: graph[col] = [row] else: graph[col].append(row) return graph def __dfs(graph, v, visited): for w in graph[v]: if visited[w] == 0: visited[w] = visited[v] __dfs(graph, w, visited) def extract_connected_component(graph, vertex): # Być może zbędna funkcja n = len(graph) member = [0 for v in graph] member[vertex] = 1 __dfs(graph, vertex, member) number = [0 for v in graph] offset = 0 j = 0 component = [] for i in range(n): if member[i]: number[i] = i - offset component.append(graph[i]) else: number[i] = -1 offset -= 1 for v in component: for i in range(len(v)): v[i] = number[v[i]] return component def get_all_components(graph): n = len(graph) member = [0 for v in graph] vertex = 0 comp_id = 1 while vertex < n and member[vertex] == 0: member[vertex] = comp_id comp_id += 1 __dfs(graph, vertex, member) while vertex < n and member[vertex] > 0: vertex += 1 components = [] number = [0 for v in graph] index = [0 for c in range(1, comp_id)] for i in range(n): comp = member[i]-1 if index[comp] == 0: components.append([graph[i]]) else: components[comp].append(graph[i]) number[i] = index[comp] index[comp] += 1 for component in components: for v in component: for i in range(len(v)): v[i] = number[v[i]] return components # Obliczanie MERW i SimRanków def compute_merw(A): # Archaiczne liczenie MERW n = A.get_shape()[0] w, v = alg.eigsh(A, 1, ) # Macierz jest symetryczna evalue = w[0] evector = v[:, 0] evector = evector / algnorm.norm(evector) P = smat.lil_matrix((n, n)) for row in range(n): denom = evalue * evector[row] for col in range(n): if A[row, col] != 0: P[row, col] = A[row, col] * evector[col] / denom return P, evector, evalue, [evector[i]*evector[i] for i in range(n)] def power_method(A, precision=1e-11): n = A.get_shape()[0] v0 = num.array([random.random()+.1 for i in range(n)]) eps = 1 iter = 0 while iter < 20 or eps > precision: v1 = v0*A iter += 1 eval = 0 eps = 0 for i in range(n): if v0[i] == 0: continue div = v1[i]/v0[i] eval = max(eval, div) eps += eval - div v0 = v1/eval return v0/algnorm.norm(v0), eval, iter def scipy_method(A): w, v = alg.eigsh(A, k=1, which='LA') # Macierz jest symetryczna evalue = w[0] evector = v[:, 0] if evector[0] < 0: evector *= -1 return evector / algnorm.norm(evector), evalue, 1 def _inv(x, y): if x != 0: return 1/(x*y) else: return 0.0 def compute_merw_matrix(A, method=power_method): n = A.get_shape()[0] evector, evalue, iter = method(A) print('({} itr.)'.format(iter), end='') mat1 = smat.diags([evector], [0], shape=(n, n), format='csc') mat2 = smat.diags([[_inv(v, evalue) for v in evector]], # Coś jakby odwrotność macierzy diagonalnej [0], shape=(n, n), format='csc') return mat2*A*mat1, evector, evalue, [v*v for v in evector] def compute_grw(A): # Wyznacza rozkład prawdopodobieństwa i rozkład stacjonarny dla zwykłego błądzenia n = A.get_shape()[0] degrees = smat.diags(A.sum(axis=0), [0], shape=(n, n), format='csr').power(-1) P = degrees * A vals, stationary = alg.eigs(P.transpose(), k=1, sigma=0.9999999) inorm = 1/num.sum(stationary[:, 0]).real return P, [x.real * inorm for x in stationary[:, 0]] def compute_merw_simrank(graph, alpha, precision=1e-5, maxiter=100): n = len(graph) R = num.identity(n) P, v, val, sdist = compute_merw_matrix(graph_to_matrix(graph)) R = num.identity(n) S = num.zeros((n, n)) denom = [[v[x] * v[y] for x in range(n)] for y in range(n)] alpha = alpha / val / val for iteration in range(maxiter): # S.fill(0) # S = num.zeros((n, n)) for y in range(n): S[y, y] = 1.0 for x in range(y): # if denom[x][y] != 0: # To mmoże nie zachodzić, jeśli graf nie jest spójny S[x, y] = 0.0 for a in graph[x]: for b in graph[y]: S[x, y] += R[a, b] / denom[a][b] S[x, y] *= alpha * denom[x][y] S[y, x] = S[x, y] t = R R = S S = t return R, algnorm.norm(R - S) def compute_basic_simrank(graph, alpha, precision=1e-5, maxiter=20): n = len(graph) R = num.identity(n) S = num.zeros((n, n)) for iteration in range(maxiter): for y in range(n): S[y, y] = 1.0 if len(graph[y])>0: for x in range(y): S[x, y] = 0.0 if len(graph[x])>0: for a in graph[x]: for b in graph[y]: S[x, y] += R[a, b] S[x, y] *= alpha / (len(graph[x])*len(graph[y])) S[y, x] = S[x, y] t = R R = S S = t return R, algnorm.norm(R - S) def compute_merw_simrank_ofmatrix(matrix, alpha, precision=1e-5, maxiter=20, method=power_method): graph = matrix_to_graph(matrix) n = len(graph) P, v, val, sdist = compute_merw_matrix(matrix, method=method) R = num.identity(n) S = num.zeros((n, n)) denom = [[v[x]*v[y] for x in range(n)] for y in range(n)] alpha = alpha / val / val for iteration in range(maxiter): #S.fill(0) # S = num.zeros((n, n)) for y in range(n): S[y, y] = 1.0 for x in range(y): if denom[x][y] != 0: # To mmoże nie zachodzić, jeśli graf nie jest spójny S[x, y] = 0.0 for a in graph[x]: for b in graph[y]: S[x, y] += R[a, b] / denom[a][b] S[x, y] *= alpha * denom[x][y] S[y, x] = S[x, y] t = R R = S S = t return R, algnorm.norm(R - S) def compute_P_distance_iterative(P, alpha=0.8, maxiter=100, precision=1e-6): # Archaiczna i niedokładna if alpha <=0 or alpha>1: raise ValueError() D = powr = P*alpha result = smat.identity(P.get_shape()[0], format='csr') + D for i in range(maxiter): powr *= D result = result + powr eps = alg.norm(powr) if eps < precision: return result, eps return result, eps def compute_P_distance(P, alpha=0.8): D = smat.identity(P.get_shape()[0], format='csc') D -= P * alpha return alg.inv(D)
[ "numpy.identity", "scipy.sparse.lil_matrix", "scipy.sparse.linalg.inv", "scipy.sparse.linalg.eigsh", "numpy.sum", "numpy.zeros", "scipy.linalg.norm", "scipy.sparse.linalg.norm", "random.random", "scipy.sparse.diags", "scipy.sparse.csr_matrix" ]
[((214, 226), 'numpy.zeros', 'num.zeros', (['n'], {}), '(n)\n', (223, 226), True, 'import numpy as num\n'), ((519, 569), 'scipy.sparse.csr_matrix', 'smat.csr_matrix', (['(data, (rows, cols))', '(n, n)', '"""d"""'], {}), "((data, (rows, cols)), (n, n), 'd')\n", (534, 569), True, 'import scipy.sparse as smat\n'), ((2878, 2893), 'scipy.sparse.linalg.eigsh', 'alg.eigsh', (['A', '(1)'], {}), '(A, 1)\n', (2887, 2893), True, 'import scipy.sparse.linalg as alg\n'), ((3018, 3041), 'scipy.sparse.lil_matrix', 'smat.lil_matrix', (['(n, n)'], {}), '((n, n))\n', (3033, 3041), True, 'import scipy.sparse as smat\n'), ((3834, 3863), 'scipy.sparse.linalg.eigsh', 'alg.eigsh', (['A'], {'k': '(1)', 'which': '"""LA"""'}), "(A, k=1, which='LA')\n", (3843, 3863), True, 'import scipy.sparse.linalg as alg\n'), ((4285, 4339), 'scipy.sparse.diags', 'smat.diags', (['[evector]', '[0]'], {'shape': '(n, n)', 'format': '"""csc"""'}), "([evector], [0], shape=(n, n), format='csc')\n", (4295, 4339), True, 'import scipy.sparse as smat\n'), ((5067, 5082), 'numpy.identity', 'num.identity', (['n'], {}), '(n)\n', (5079, 5082), True, 'import numpy as num\n'), ((5158, 5173), 'numpy.identity', 'num.identity', (['n'], {}), '(n)\n', (5170, 5173), True, 'import numpy as num\n'), ((5182, 5199), 'numpy.zeros', 'num.zeros', (['(n, n)'], {}), '((n, n))\n', (5191, 5199), True, 'import numpy as num\n'), ((5970, 5985), 'numpy.identity', 'num.identity', (['n'], {}), '(n)\n', (5982, 5985), True, 'import numpy as num\n'), ((5994, 6011), 'numpy.zeros', 'num.zeros', (['(n, n)'], {}), '((n, n))\n', (6003, 6011), True, 'import numpy as num\n'), ((6801, 6816), 'numpy.identity', 'num.identity', (['n'], {}), '(n)\n', (6813, 6816), True, 'import numpy as num\n'), ((6825, 6842), 'numpy.zeros', 'num.zeros', (['(n, n)'], {}), '((n, n))\n', (6834, 6842), True, 'import numpy as num\n'), ((8096, 8106), 'scipy.sparse.linalg.inv', 'alg.inv', (['D'], {}), '(D)\n', (8103, 8106), True, 'import scipy.sparse.linalg as alg\n'), ((2988, 3009), 'scipy.linalg.norm', 'algnorm.norm', (['evector'], {}), '(evector)\n', (3000, 3009), True, 'import scipy.linalg as algnorm\n'), ((5852, 5871), 'scipy.linalg.norm', 'algnorm.norm', (['(R - S)'], {}), '(R - S)\n', (5864, 5871), True, 'import scipy.linalg as algnorm\n'), ((6551, 6570), 'scipy.linalg.norm', 'algnorm.norm', (['(R - S)'], {}), '(R - S)\n', (6563, 6570), True, 'import scipy.linalg as algnorm\n'), ((7514, 7533), 'scipy.linalg.norm', 'algnorm.norm', (['(R - S)'], {}), '(R - S)\n', (7526, 7533), True, 'import scipy.linalg as algnorm\n'), ((7875, 7889), 'scipy.sparse.linalg.norm', 'alg.norm', (['powr'], {}), '(powr)\n', (7883, 7889), True, 'import scipy.sparse.linalg as alg\n'), ((3771, 3787), 'scipy.linalg.norm', 'algnorm.norm', (['v0'], {}), '(v0)\n', (3783, 3787), True, 'import scipy.linalg as algnorm\n'), ((3998, 4019), 'scipy.linalg.norm', 'algnorm.norm', (['evector'], {}), '(evector)\n', (4010, 4019), True, 'import scipy.linalg as algnorm\n'), ((4881, 4906), 'numpy.sum', 'num.sum', (['stationary[:, 0]'], {}), '(stationary[:, 0])\n', (4888, 4906), True, 'import numpy as num\n'), ((3390, 3405), 'random.random', 'random.random', ([], {}), '()\n', (3403, 3405), False, 'import random\n')]
import keras import pandas as pd import numpy as np from keras.models import Sequential from keras.layers import Dense from keras.utils import to_categorical import matplotlib.pyplot as plt import time start_time=time.time() location="dataforDl.csv" data=pd.read_csv(location) data_columns=data.columns xtrain = data[data_columns[data_columns != 'typeoffraud']] ytrain=data['typeoffraud'] location1="dataforDl1.csv" data1=pd.read_csv(location1) data1_columns=data1.columns xtest = data1[data1_columns[data1_columns != 'typeoffraud']] ytest=data1['typeoffraud'] xtrain_norm = (xtrain - xtrain.mean()) / xtrain.std() xtest_norm = (xtest - xtest.mean()) / xtest.std() n_cols = xtrain_norm.shape[1] ytrain=to_categorical(ytrain) ytest=to_categorical(ytest) num_classes=ytrain.shape[1] print(num_classes) def classification_model(): # create model model = Sequential() model.add(Dense(100,activation='relu', input_shape=(n_cols,))) model.add(Dense(100, activation='relu')) model.add(Dense(num_classes, activation='softmax')) # compile model model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) return model # build the model model = classification_model() # fit the model model.fit(xtrain_norm, ytrain, validation_data=(xtest_norm, ytest), epochs=10, verbose=1) # evaluate the model # test_loss,test_acc=model.evaluate(xtest_norm, ytest) test_labels_p=model.predict(xtest_norm) test_labels_p=np.argmax(test_labels_p,axis=1) print('---%s seconds---'%(time.time()-start_time))
[ "pandas.read_csv", "numpy.argmax", "keras.models.Sequential", "keras.utils.to_categorical", "keras.layers.Dense", "time.time" ]
[((213, 224), 'time.time', 'time.time', ([], {}), '()\n', (222, 224), False, 'import time\n'), ((255, 276), 'pandas.read_csv', 'pd.read_csv', (['location'], {}), '(location)\n', (266, 276), True, 'import pandas as pd\n'), ((425, 447), 'pandas.read_csv', 'pd.read_csv', (['location1'], {}), '(location1)\n', (436, 447), True, 'import pandas as pd\n'), ((708, 730), 'keras.utils.to_categorical', 'to_categorical', (['ytrain'], {}), '(ytrain)\n', (722, 730), False, 'from keras.utils import to_categorical\n'), ((737, 758), 'keras.utils.to_categorical', 'to_categorical', (['ytest'], {}), '(ytest)\n', (751, 758), False, 'from keras.utils import to_categorical\n'), ((1478, 1510), 'numpy.argmax', 'np.argmax', (['test_labels_p'], {'axis': '(1)'}), '(test_labels_p, axis=1)\n', (1487, 1510), True, 'import numpy as np\n'), ((867, 879), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (877, 879), False, 'from keras.models import Sequential\n'), ((894, 946), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""', 'input_shape': '(n_cols,)'}), "(100, activation='relu', input_shape=(n_cols,))\n", (899, 946), False, 'from keras.layers import Dense\n'), ((961, 990), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (966, 990), False, 'from keras.layers import Dense\n'), ((1006, 1046), 'keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (1011, 1046), False, 'from keras.layers import Dense\n'), ((1536, 1547), 'time.time', 'time.time', ([], {}), '()\n', (1545, 1547), False, 'import time\n')]
import subprocess import json import os import csv import numpy as np import pandas as pd import pysam from Bio import SeqIO from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord def get_orf(input_genome, output_genome, orf): orf = int(orf) record = SeqIO.read(input_genome, 'fasta') record.seq = record.seq[orf:] SeqIO.write(record, output_genome, 'fasta') def backtranslate(input_nucleotide, input_protein, output_codon): nucleotides = SeqIO.parse(input_nucleotide, 'fasta') proteins = SeqIO.parse(input_protein, 'fasta') codons = [] for protein_record, nucleotide_record in zip(proteins, nucleotides): i = 0 codon_list = [] for character in protein_record.seq: if character != '-': codon = str(nucleotide_record.seq[3*i:3*i+3]) codon_list.append(codon) i += 1 else: codon_list.append('---') codon_record = SeqRecord( Seq(''.join(codon_list)), id=protein_record.id, description=protein_record.description ) codons.append(codon_record) SeqIO.write(codons, output_codon, 'fasta') def select_simulated_gene(dataset, gene, output): aligned_filename = "output/simulation/%s/aligned_%s_orf-%d_codon.fasta" nucleotide_genome_filename = "output/simulation/%s/genome.fasta" % dataset nucleotide_genome = SeqIO.read(nucleotide_genome_filename, 'fasta') max_percent_identity = 0 for i in range(3): non_gaps = 0 matches = 0 codon_list = [] records = SeqIO.parse(aligned_filename % (dataset, gene, i), 'fasta') translated_genome = next(records) reference = next(records) genome_i = 0 for j in range(len(reference)): if reference[j] != '-': non_gaps += 1 codon = str(nucleotide_genome[3*genome_i+i:3*genome_i+i+3].seq) codon_list.append(codon) if reference[j] == translated_genome[j]: matches += 1 if translated_genome[j] != '-': genome_i += 1 percent_identity = matches/non_gaps if percent_identity > max_percent_identity: max_percent_identity = percent_identity desired_codons = ''.join(codon_list) record = SeqRecord( Seq(desired_codons).ungap('-'), id=nucleotide_genome.id, description=gene ) SeqIO.write(record, output, 'fasta') def write_abayesqr_config(sam_filename, reference_filename, output): config_string = ("""filename of reference sequence (FASTA) : %s filname of the aligned reads (sam format) : %s paired-end (1 = true, 0 = false) : 0 SNV_thres : 0.01 reconstruction_start : 1 reconstruction_stop: 1300 min_mapping_qual : 20 min_read_length : 50 max_insert_length : 250 characteristic zone name : test seq_err (assumed sequencing error rate(%%)) : 0.1 MEC improvement threshold : 0.0395 """ % (reference_filename, sam_filename)) with open(output, 'w') as config_file: config_file.write(config_string) def parse_abayesqr_output(input_text, output_fasta): with open(input_text) as input_file: lines = input_file.readlines() records = [] for i, line in enumerate(lines): if i % 2 == 0: freq = float(line.split()[-1]) number = int(i/2)+1 header = 'haplotype-%d_freq-%f' % (number, freq) if i % 2 == 1: seq = Seq(line.strip()) record = SeqRecord(seq, id=header, description='') records.append(record) SeqIO.write(records, output_fasta, 'fasta') def pairwise_distance_csv(fasta_filename, csv_filename): records = list(SeqIO.parse(fasta_filename, 'fasta')) np_seqs = np.array( [list(str(record.seq)) for record in records], dtype='<U1' ) first_records = [] second_records = [] distances = [] search_term = 'quasispecies' for i in range(len(records)): for j in range(len(records)): if records[j].name[: len(search_term)] == search_term: continue first_records.append(records[i].id) second_records.append(records[j].id) distance = (np_seqs[i, :] != np_seqs[j, :]).sum() distances.append(distance) pd.DataFrame({ 'first_record': first_records, 'second_record': second_records, 'distance': distances, }).to_csv(csv_filename) def add_subtype_information(input_csv, output_csv): df = pd.read_csv(input_csv) df['Subtype1'] = df['ID1'].apply(lambda row: row.split('.')[0]) df['Subtype2'] = df['ID2'].apply(lambda row: row.split('.')[0]) df.to_csv(output_csv, index=False) def extract_truth( input_fasta, reference_path, dataset, reference, output_path, output_json_path ): sequences = list(SeqIO.parse(input_fasta, "fasta")) aligned_sequences = [] output_dir = os.path.join("output", "truth", dataset) tmp_dir = os.path.join( output_dir, "truth-%s-%s-temp" % (dataset, reference) ) os.mkdir(tmp_dir) for sequence in sequences: sequence_path = os.path.join(tmp_dir, "ref.fasta") alignment_path = os.path.join(tmp_dir, "aligned.fasta") SeqIO.write(sequence, sequence_path, "fasta") command = [ "water", "-asequence", sequence_path, "-bsequence", reference_path, "-gapopen", "10.0", "-gapextend", ".5", "-aformat", "fasta", "-outfile", alignment_path ] subprocess.run(command) aligned_sequence = list(SeqIO.parse(alignment_path, "fasta"))[0] aligned_sequence.seq = aligned_sequence.seq.ungap('-') aligned_sequences.append(aligned_sequence) os.remove(sequence_path) os.remove(alignment_path) os.rmdir(tmp_dir) sequence_length = min([len(record.seq) for record in aligned_sequences]) for record in aligned_sequences: record.seq = record.seq[:sequence_length] SeqIO.write(aligned_sequences, output_path, "fasta") pairwise_distances = [] for i in range(len(aligned_sequences)): first_sequence = aligned_sequences[i] first_np = np.array(list(first_sequence.seq), dtype='<U1') for j in range(i+1, len(aligned_sequences)): second_sequence = aligned_sequences[j] second_np = np.array(list(second_sequence.seq), dtype='<U1') disagreement = int((first_np != second_np).sum()) pairwise_distances.append({ 'sequenceA': first_sequence.name, 'sequenceB': second_sequence.name, 'disagreement': disagreement }) with open(output_json_path, 'w') as json_file: json.dump(pairwise_distances, json_file, indent=2) def covarying_truth( input_computed, input_actual, input_reference, output_json ): reference = SeqIO.read(input_reference, 'fasta') rl = len(reference.seq) with open(input_computed) as input_file: cvs = json.load(input_file) with open(input_actual) as input_file: true_cvs = json.load(input_file) tp = [] fp = [] tn = [] fn = [] for i in range(rl): if i in true_cvs and i in cvs: tp.append(i) elif i in true_cvs and i not in cvs: fn.append(i) elif i not in true_cvs and i in cvs: fp.append(i) elif i not in true_cvs and i not in cvs: tn.append(i) precision = len(tp)/(len(tp)+len(fp)) recall = len(tp)/(len(tp)+len(fn)) result = { 'true_positives': tp, 'true_negative': tn, 'false_positives': fp, 'false_negatives': fn, 'precision': precision, 'recall': recall } with open(output_json, 'w') as output_file: json.dump(result, output_file, indent=2) def restrict_fasta_to_cvs(input_fasta, input_cvs, output_fasta): with open(input_cvs) as json_file: cvs = json.load(json_file) records = list(SeqIO.parse(input_fasta, 'fasta')) for record in records: record.seq = Seq(''.join([record.seq[site] for site in cvs])) SeqIO.write(records, output_fasta, 'fasta') def downsample_bam(input_bam_path, output_bam_path, downsample_amount): downsample_percentage = 1 - int(downsample_amount) / 100 input_bam = pysam.AlignmentFile(input_bam_path, 'rb') number_of_reads = input_bam.count() downsample_number = np.ceil(downsample_percentage * number_of_reads) \ .astype(np.int) np.random.seed(1) downsample_indices = np.random.choice( number_of_reads, downsample_number, replace=False ) downsample_indices.sort() downsample_index = 0 output_bam = pysam.AlignmentFile( output_bam_path, 'wb', header=input_bam.header ) for i, read in enumerate(input_bam.fetch()): if i == downsample_indices[downsample_index]: output_bam.write(read) downsample_index += 1 if downsample_index == len(downsample_indices): break output_bam.close() pysam.index(output_bam_path) input_bam.close() def pluck_record(input_fasta_path, output_fasta_path, record): all_records = SeqIO.parse(input_fasta_path, 'fasta') desired_record = SeqIO.to_dict(all_records)[record] SeqIO.write(desired_record, output_fasta_path, 'fasta') def single_mapping_dataset(bam_path, ref_path, output_path): bam = pysam.AlignmentFile(bam_path) ref = SeqIO.read(ref_path, 'fasta') percent_identity = np.zeros(bam.mapped, dtype=np.float) differences = np.zeros(bam.mapped, dtype=np.float) number_of_aligned_pairs = np.zeros(bam.mapped, dtype=np.float) for i, read in enumerate(bam.fetch()): aligned_pairs = read.get_aligned_pairs(matches_only=True) aligned_query = np.array([ read.query[pair[0]] for pair in aligned_pairs ], dtype='<U1') aligned_reference = np.array([ ref[pair[1]] for pair in aligned_pairs ], dtype='<U1') agreement = (aligned_query == aligned_reference).sum() number_of_aligned_pairs[i] = len(aligned_pairs) differences[i] = number_of_aligned_pairs[i] - agreement percent_identity[i] = agreement/number_of_aligned_pairs[i] quality = np.array([ read.mapping_quality for read in bam.fetch() ], dtype=np.int) query_length = np.array([ read.query_length for read in bam.fetch() ], dtype=np.int) result = pd.DataFrame({ 'mapping_quality': quality, 'differences': differences, 'number_of_aligned_pairs': number_of_aligned_pairs, 'percent_identity': percent_identity, 'query_length': query_length }, index=[read.query_name for read in bam.fetch()]) result.to_csv(output_path, index_label='read_id') def full_fvm_mapping_dataset(dataset_paths, output_csv_path): all_datasets = list(map( lambda path: pd.read_csv(path, index_col='read_id'), dataset_paths )) for dataset_path, dataset in zip(dataset_paths, all_datasets): dataset_name = dataset_path.split('/')[-2] dataset['reference'] = dataset_name pd.concat(all_datasets, axis=0, sort=False, ignore_index=True) \ .to_csv(output_csv_path) def true_covarying_kmers(input_fasta, input_json, output_csv, k): k = int(k) records = np.array([ list(record.seq) for record in SeqIO.parse(input_fasta, 'fasta') ], dtype='<U1') data = { **{'index_%d' % i: [] for i in range(k)}, **{'character_%d' % i: [] for i in range(k)} } with open(input_json) as json_file: covarying_sites = np.array(json.load(json_file), dtype=np.int) for i in range(len(covarying_sites) - k): covarying_indices = covarying_sites[i:i+k] covarying_kmers = set() for row_index in range(records.shape[0]): covarying_kmer = ''.join(records[row_index, covarying_indices]) covarying_kmers.add(covarying_kmer) for covarying_kmer in list(covarying_kmers): for i in range(k): data['index_%d' % i].append(covarying_indices[i]) data['character_%d' % i].append(covarying_kmer[i]) pd.DataFrame(data).to_csv(output_csv, index=False) def kmers_in_reads(input_bam, input_csv, output_csv, k): k = int(k) bam = pysam.AlignmentFile(input_bam) df = pd.read_csv(input_csv) df['support'] = np.zeros(len(df), dtype=np.int) for read in bam.fetch(): starts_after = df.index_0 >= read.reference_start ends_before = df['index_%d' % (k-1)] <= read.reference_end relevent_kmers = df.loc[starts_after & ends_before, :] for i, row in relevent_kmers.iterrows(): inds = list(row[['index_%d' % i for i in range(k)]]) vacs = ''.join([ read.query[pair[0]] for pair in read.get_aligned_pairs(matches_only=True) if pair[1] in inds ]) kmer = ''.join(row[['character_%d' % i for i in range(k)]]) if vacs == kmer: df.loc[i, 'support'] += 1 df.to_csv(output_csv) def result_json(distance_csv, output_json): df = pd.read_csv(distance_csv) not_quasispecies = df.first_record.apply(lambda x: x[:3] != 'qua') desired_records = list(set(df.first_record[not_quasispecies])) second_is_quasispecies = df.second_record.apply(lambda x: x[:3] == 'qua') results = {} for record in desired_records: if record[:3] != 'qua': continue first_is_desired = df.first_record == record best_match_index = df.loc[ first_is_desired & second_is_quasispecies, 'distance' ].idxmin() results[record] = { 'best_match': str(df.loc[best_match_index, 'second_record']), 'distance': int(df.loc[best_match_index, 'distance']), } with open(output_json, 'w') as json_file: json.dump(results, json_file, indent=2) def covarying_fasta(input_json, input_fasta, output_fasta, end_correction=10): with open(input_json) as json_file: covarying_sites = json.load(json_file) records = list(SeqIO.parse(input_fasta, 'fasta')) for record in records: last_site = len(record.seq) - end_correction record.seq = Seq( ''.join([ record.seq[i] for i in covarying_sites if i > end_correction and i < last_site ]) ) SeqIO.write(records, output_fasta, 'fasta') def report(input_files, output_csv, report_type): csvfile = open(output_csv, 'w') field_names = ['dataset', 'gene', 'worst_distance', 'report_type'] writer = csv.DictWriter(csvfile, field_names) writer.writeheader() for file_path in input_files: with open(file_path) as json_file: result_data = json.load(json_file) dataset = file_path.split('/')[1] gene = file_path.split('/')[4] worst_distance = 0 for key, value in result_data.items(): if value['distance'] > worst_distance: worst_distance = value['distance'] if report_type == 'reconstructing' and worst_distance > 5: raise Exception('A reconstruction dataset failed!', dataset) writer.writerow({ 'dataset': dataset, 'gene': gene, 'worst_distance': worst_distance, 'report_type': report_type }) csvfile.close() def haplotyper_report(input_files, output_csv): csvfile = open(output_csv, 'w') field_names = ['dataset', 'worst_distance'] writer = csv.DictWriter(csvfile, field_names) writer.writeheader() for file_path in input_files: file_path = file_path.split('.')[0] + '.csv' if not os.path.exists(file_path): continue with open(file_path) as json_file: result_data = json.load(json_file) for key, value in result_data.items(): if value['distance'] > worst_distance: worst_distance = value['distance'] writer.writerow({ 'dataset': file_path, 'worst_distance': worst_distance, }) csvfile.close() def superread_agreement(input_superreads, input_fasta, input_json, output_csv): superreads = list(SeqIO.parse(input_superreads, 'fasta')) truth = list(SeqIO.parse(input_fasta, 'fasta')) with open(input_json) as json_file: sites = np.array(json.load(json_file), dtype=np.int) csvfile = open(output_csv, 'w') csvwriter = csv.DictWriter( csvfile, fieldnames=[ 'superread_id', 'weight', 'true_id', 'smallest_diff', 'smallest_recomb', 'start', 'stop' ] ) csvwriter.writeheader() n_char = len(sites) for superread in superreads: smallest_diff = 1e6 superread_id, weight = superread.name.split('_') weight = int(weight.split('-')[1]) superread_np = np.array(list(superread.seq), dtype='<U1')[sites] start = (superread_np != '-').argmax() stop = ((np.arange(n_char) >= start) & (superread_np == '-')).argmax() smallest_recomb = 1e6 for true_sequence_a in truth: true_a_np = np.array(list(true_sequence_a.seq), dtype='<U1') diff = (superread_np[start:stop] != true_a_np[start:stop]).sum() if diff < smallest_diff: smallest_diff = diff smallest_id = true_sequence_a.name for true_sequence_b in truth: true_b_np = np.array(list(true_sequence_b.seq), dtype='<U1') for i in range(start, stop): first = true_a_np[start:i] != superread_np[start:i] second = true_b_np[i:stop] != superread_np[i:stop] recomb = first.sum() + second.sum() if recomb < smallest_recomb: smallest_recomb = recomb csvwriter.writerow({ 'superread_id': superread_id, 'weight': weight, 'true_id': smallest_id, 'smallest_diff': smallest_diff, 'smallest_recomb': smallest_recomb, 'start': start, 'stop': stop }) csvfile.close() def superread_scatter_data(superread_path, output_csv): with open(superread_path) as json_file: superreads = json.load(json_file) pd.DataFrame({ 'weight': [sr['weight'] for sr in superreads], 'vacs_length': [len(sr['vacs']) for sr in superreads], }).to_csv(output_csv)
[ "csv.DictWriter", "pandas.read_csv", "Bio.Seq.Seq", "pysam.AlignmentFile", "numpy.array", "numpy.arange", "os.remove", "os.path.exists", "subprocess.run", "Bio.SeqIO.read", "Bio.SeqIO.write", "os.mkdir", "numpy.random.seed", "pandas.DataFrame", "pysam.index", "numpy.ceil", "numpy.random.choice", "Bio.SeqIO.to_dict", "Bio.SeqRecord.SeqRecord", "os.path.join", "os.rmdir", "numpy.zeros", "Bio.SeqIO.parse", "json.load", "pandas.concat", "json.dump" ]
[((267, 300), 'Bio.SeqIO.read', 'SeqIO.read', (['input_genome', '"""fasta"""'], {}), "(input_genome, 'fasta')\n", (277, 300), False, 'from Bio import SeqIO\n'), ((339, 382), 'Bio.SeqIO.write', 'SeqIO.write', (['record', 'output_genome', '"""fasta"""'], {}), "(record, output_genome, 'fasta')\n", (350, 382), False, 'from Bio import SeqIO\n'), ((469, 507), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input_nucleotide', '"""fasta"""'], {}), "(input_nucleotide, 'fasta')\n", (480, 507), False, 'from Bio import SeqIO\n'), ((523, 558), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input_protein', '"""fasta"""'], {}), "(input_protein, 'fasta')\n", (534, 558), False, 'from Bio import SeqIO\n'), ((1158, 1200), 'Bio.SeqIO.write', 'SeqIO.write', (['codons', 'output_codon', '"""fasta"""'], {}), "(codons, output_codon, 'fasta')\n", (1169, 1200), False, 'from Bio import SeqIO\n'), ((1432, 1479), 'Bio.SeqIO.read', 'SeqIO.read', (['nucleotide_genome_filename', '"""fasta"""'], {}), "(nucleotide_genome_filename, 'fasta')\n", (1442, 1479), False, 'from Bio import SeqIO\n'), ((2492, 2528), 'Bio.SeqIO.write', 'SeqIO.write', (['record', 'output', '"""fasta"""'], {}), "(record, output, 'fasta')\n", (2503, 2528), False, 'from Bio import SeqIO\n'), ((3639, 3682), 'Bio.SeqIO.write', 'SeqIO.write', (['records', 'output_fasta', '"""fasta"""'], {}), "(records, output_fasta, 'fasta')\n", (3650, 3682), False, 'from Bio import SeqIO\n'), ((4586, 4608), 'pandas.read_csv', 'pd.read_csv', (['input_csv'], {}), '(input_csv)\n', (4597, 4608), True, 'import pandas as pd\n'), ((5011, 5051), 'os.path.join', 'os.path.join', (['"""output"""', '"""truth"""', 'dataset'], {}), "('output', 'truth', dataset)\n", (5023, 5051), False, 'import os\n'), ((5066, 5133), 'os.path.join', 'os.path.join', (['output_dir', "('truth-%s-%s-temp' % (dataset, reference))"], {}), "(output_dir, 'truth-%s-%s-temp' % (dataset, reference))\n", (5078, 5133), False, 'import os\n'), ((5152, 5169), 'os.mkdir', 'os.mkdir', (['tmp_dir'], {}), '(tmp_dir)\n', (5160, 5169), False, 'import os\n'), ((5890, 5907), 'os.rmdir', 'os.rmdir', (['tmp_dir'], {}), '(tmp_dir)\n', (5898, 5907), False, 'import os\n'), ((6076, 6128), 'Bio.SeqIO.write', 'SeqIO.write', (['aligned_sequences', 'output_path', '"""fasta"""'], {}), "(aligned_sequences, output_path, 'fasta')\n", (6087, 6128), False, 'from Bio import SeqIO\n'), ((6981, 7017), 'Bio.SeqIO.read', 'SeqIO.read', (['input_reference', '"""fasta"""'], {}), "(input_reference, 'fasta')\n", (6991, 7017), False, 'from Bio import SeqIO\n'), ((8236, 8279), 'Bio.SeqIO.write', 'SeqIO.write', (['records', 'output_fasta', '"""fasta"""'], {}), "(records, output_fasta, 'fasta')\n", (8247, 8279), False, 'from Bio import SeqIO\n'), ((8431, 8472), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['input_bam_path', '"""rb"""'], {}), "(input_bam_path, 'rb')\n", (8450, 8472), False, 'import pysam\n'), ((8616, 8633), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (8630, 8633), True, 'import numpy as np\n'), ((8659, 8726), 'numpy.random.choice', 'np.random.choice', (['number_of_reads', 'downsample_number'], {'replace': '(False)'}), '(number_of_reads, downsample_number, replace=False)\n', (8675, 8726), True, 'import numpy as np\n'), ((8813, 8880), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['output_bam_path', '"""wb"""'], {'header': 'input_bam.header'}), "(output_bam_path, 'wb', header=input_bam.header)\n", (8832, 8880), False, 'import pysam\n'), ((9168, 9196), 'pysam.index', 'pysam.index', (['output_bam_path'], {}), '(output_bam_path)\n', (9179, 9196), False, 'import pysam\n'), ((9302, 9340), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input_fasta_path', '"""fasta"""'], {}), "(input_fasta_path, 'fasta')\n", (9313, 9340), False, 'from Bio import SeqIO\n'), ((9401, 9456), 'Bio.SeqIO.write', 'SeqIO.write', (['desired_record', 'output_fasta_path', '"""fasta"""'], {}), "(desired_record, output_fasta_path, 'fasta')\n", (9412, 9456), False, 'from Bio import SeqIO\n'), ((9530, 9559), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['bam_path'], {}), '(bam_path)\n', (9549, 9559), False, 'import pysam\n'), ((9570, 9599), 'Bio.SeqIO.read', 'SeqIO.read', (['ref_path', '"""fasta"""'], {}), "(ref_path, 'fasta')\n", (9580, 9599), False, 'from Bio import SeqIO\n'), ((9623, 9659), 'numpy.zeros', 'np.zeros', (['bam.mapped'], {'dtype': 'np.float'}), '(bam.mapped, dtype=np.float)\n', (9631, 9659), True, 'import numpy as np\n'), ((9678, 9714), 'numpy.zeros', 'np.zeros', (['bam.mapped'], {'dtype': 'np.float'}), '(bam.mapped, dtype=np.float)\n', (9686, 9714), True, 'import numpy as np\n'), ((9745, 9781), 'numpy.zeros', 'np.zeros', (['bam.mapped'], {'dtype': 'np.float'}), '(bam.mapped, dtype=np.float)\n', (9753, 9781), True, 'import numpy as np\n'), ((12474, 12504), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['input_bam'], {}), '(input_bam)\n', (12493, 12504), False, 'import pysam\n'), ((12514, 12536), 'pandas.read_csv', 'pd.read_csv', (['input_csv'], {}), '(input_csv)\n', (12525, 12536), True, 'import pandas as pd\n'), ((13329, 13354), 'pandas.read_csv', 'pd.read_csv', (['distance_csv'], {}), '(distance_csv)\n', (13340, 13354), True, 'import pandas as pd\n'), ((14616, 14659), 'Bio.SeqIO.write', 'SeqIO.write', (['records', 'output_fasta', '"""fasta"""'], {}), "(records, output_fasta, 'fasta')\n", (14627, 14659), False, 'from Bio import SeqIO\n'), ((14832, 14868), 'csv.DictWriter', 'csv.DictWriter', (['csvfile', 'field_names'], {}), '(csvfile, field_names)\n', (14846, 14868), False, 'import csv\n'), ((15762, 15798), 'csv.DictWriter', 'csv.DictWriter', (['csvfile', 'field_names'], {}), '(csvfile, field_names)\n', (15776, 15798), False, 'import csv\n'), ((16699, 16829), 'csv.DictWriter', 'csv.DictWriter', (['csvfile'], {'fieldnames': "['superread_id', 'weight', 'true_id', 'smallest_diff', 'smallest_recomb',\n 'start', 'stop']"}), "(csvfile, fieldnames=['superread_id', 'weight', 'true_id',\n 'smallest_diff', 'smallest_recomb', 'start', 'stop'])\n", (16713, 16829), False, 'import csv\n'), ((1615, 1674), 'Bio.SeqIO.parse', 'SeqIO.parse', (['(aligned_filename % (dataset, gene, i))', '"""fasta"""'], {}), "(aligned_filename % (dataset, gene, i), 'fasta')\n", (1626, 1674), False, 'from Bio import SeqIO\n'), ((3761, 3797), 'Bio.SeqIO.parse', 'SeqIO.parse', (['fasta_filename', '"""fasta"""'], {}), "(fasta_filename, 'fasta')\n", (3772, 3797), False, 'from Bio import SeqIO\n'), ((4932, 4965), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input_fasta', '"""fasta"""'], {}), "(input_fasta, 'fasta')\n", (4943, 4965), False, 'from Bio import SeqIO\n'), ((5225, 5259), 'os.path.join', 'os.path.join', (['tmp_dir', '"""ref.fasta"""'], {}), "(tmp_dir, 'ref.fasta')\n", (5237, 5259), False, 'import os\n'), ((5285, 5323), 'os.path.join', 'os.path.join', (['tmp_dir', '"""aligned.fasta"""'], {}), "(tmp_dir, 'aligned.fasta')\n", (5297, 5323), False, 'import os\n'), ((5332, 5377), 'Bio.SeqIO.write', 'SeqIO.write', (['sequence', 'sequence_path', '"""fasta"""'], {}), "(sequence, sequence_path, 'fasta')\n", (5343, 5377), False, 'from Bio import SeqIO\n'), ((5608, 5631), 'subprocess.run', 'subprocess.run', (['command'], {}), '(command)\n', (5622, 5631), False, 'import subprocess\n'), ((5827, 5851), 'os.remove', 'os.remove', (['sequence_path'], {}), '(sequence_path)\n', (5836, 5851), False, 'import os\n'), ((5860, 5885), 'os.remove', 'os.remove', (['alignment_path'], {}), '(alignment_path)\n', (5869, 5885), False, 'import os\n'), ((6814, 6864), 'json.dump', 'json.dump', (['pairwise_distances', 'json_file'], {'indent': '(2)'}), '(pairwise_distances, json_file, indent=2)\n', (6823, 6864), False, 'import json\n'), ((7105, 7126), 'json.load', 'json.load', (['input_file'], {}), '(input_file)\n', (7114, 7126), False, 'import json\n'), ((7189, 7210), 'json.load', 'json.load', (['input_file'], {}), '(input_file)\n', (7198, 7210), False, 'import json\n'), ((7899, 7939), 'json.dump', 'json.dump', (['result', 'output_file'], {'indent': '(2)'}), '(result, output_file, indent=2)\n', (7908, 7939), False, 'import json\n'), ((8060, 8080), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (8069, 8080), False, 'import json\n'), ((8100, 8133), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input_fasta', '"""fasta"""'], {}), "(input_fasta, 'fasta')\n", (8111, 8133), False, 'from Bio import SeqIO\n'), ((9362, 9388), 'Bio.SeqIO.to_dict', 'SeqIO.to_dict', (['all_records'], {}), '(all_records)\n', (9375, 9388), False, 'from Bio import SeqIO\n'), ((9915, 9985), 'numpy.array', 'np.array', (['[read.query[pair[0]] for pair in aligned_pairs]'], {'dtype': '"""<U1"""'}), "([read.query[pair[0]] for pair in aligned_pairs], dtype='<U1')\n", (9923, 9985), True, 'import numpy as np\n'), ((10036, 10099), 'numpy.array', 'np.array', (['[ref[pair[1]] for pair in aligned_pairs]'], {'dtype': '"""<U1"""'}), "([ref[pair[1]] for pair in aligned_pairs], dtype='<U1')\n", (10044, 10099), True, 'import numpy as np\n'), ((14070, 14109), 'json.dump', 'json.dump', (['results', 'json_file'], {'indent': '(2)'}), '(results, json_file, indent=2)\n', (14079, 14109), False, 'import json\n'), ((14257, 14277), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (14266, 14277), False, 'import json\n'), ((14297, 14330), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input_fasta', '"""fasta"""'], {}), "(input_fasta, 'fasta')\n", (14308, 14330), False, 'from Bio import SeqIO\n'), ((16454, 16492), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input_superreads', '"""fasta"""'], {}), "(input_superreads, 'fasta')\n", (16465, 16492), False, 'from Bio import SeqIO\n'), ((16511, 16544), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input_fasta', '"""fasta"""'], {}), "(input_fasta, 'fasta')\n", (16522, 16544), False, 'from Bio import SeqIO\n'), ((18586, 18606), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (18595, 18606), False, 'import json\n'), ((3558, 3599), 'Bio.SeqRecord.SeqRecord', 'SeqRecord', (['seq'], {'id': 'header', 'description': '""""""'}), "(seq, id=header, description='')\n", (3567, 3599), False, 'from Bio.SeqRecord import SeqRecord\n'), ((4369, 4474), 'pandas.DataFrame', 'pd.DataFrame', (["{'first_record': first_records, 'second_record': second_records, 'distance':\n distances}"], {}), "({'first_record': first_records, 'second_record':\n second_records, 'distance': distances})\n", (4381, 4474), True, 'import pandas as pd\n'), ((8537, 8585), 'numpy.ceil', 'np.ceil', (['(downsample_percentage * number_of_reads)'], {}), '(downsample_percentage * number_of_reads)\n', (8544, 8585), True, 'import numpy as np\n'), ((11275, 11337), 'pandas.concat', 'pd.concat', (['all_datasets'], {'axis': '(0)', 'sort': '(False)', 'ignore_index': '(True)'}), '(all_datasets, axis=0, sort=False, ignore_index=True)\n', (11284, 11337), True, 'import pandas as pd\n'), ((11779, 11799), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (11788, 11799), False, 'import json\n'), ((12339, 12357), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (12351, 12357), True, 'import pandas as pd\n'), ((14997, 15017), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (15006, 15017), False, 'import json\n'), ((15926, 15951), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (15940, 15951), False, 'import os\n'), ((16043, 16063), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (16052, 16063), False, 'import json\n'), ((16611, 16631), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (16620, 16631), False, 'import json\n'), ((2392, 2411), 'Bio.Seq.Seq', 'Seq', (['desired_codons'], {}), '(desired_codons)\n', (2395, 2411), False, 'from Bio.Seq import Seq\n'), ((5664, 5700), 'Bio.SeqIO.parse', 'SeqIO.parse', (['alignment_path', '"""fasta"""'], {}), "(alignment_path, 'fasta')\n", (5675, 5700), False, 'from Bio import SeqIO\n'), ((11040, 11078), 'pandas.read_csv', 'pd.read_csv', (['path'], {'index_col': '"""read_id"""'}), "(path, index_col='read_id')\n", (11051, 11078), True, 'import pandas as pd\n'), ((11528, 11561), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input_fasta', '"""fasta"""'], {}), "(input_fasta, 'fasta')\n", (11539, 11561), False, 'from Bio import SeqIO\n'), ((17284, 17301), 'numpy.arange', 'np.arange', (['n_char'], {}), '(n_char)\n', (17293, 17301), True, 'import numpy as np\n')]
import unittest import numpy as np import tensorflow as tf import tensorflow.keras as K from tensorflow.keras.layers import Dense, BatchNormalization, Dropout, Softmax from sklearn.metrics import accuracy_score from nncv.data_loader import * from nncv.loss import * class TestTFFunction(unittest.TestCase): _xyz = np.ones([100, 3]) nfeature = _xyz.shape[1] _data = {'x': _xyz} data = tf.data.Dataset.from_tensor_slices(_data).batch(49) iterator = data.make_one_shot_iterator() model = K.models.Sequential([Dense(2, input_shape=(nfeature,), activation=tf.nn.relu)]) def test_place_holder(self): ph_X = tf.keras.Input(shape=(self.nfeature, )) ph_Y = self.model(ph_X) def test_iteration(self): nextx = self.iterator.get_next() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) try: idb = 0 while True: x = sess.run(nextx) print("batch", idb, "batch shape", x['x'].shape) idb += 1 except tf.errors.OutOfRangeError: pass def test_session(self): ''' test how the session and iterator works together with feed dict''' ph_X = tf.keras.Input(shape=(self.nfeature, )) ph_Y = self.model(ph_X) nextx = self.iterator.get_next() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) try: idb = 0 while True: x = sess.run(nextx) Y = sess.run(ph_Y, feed_dict={ph_X: x['x']}) idb += 1 except tf.errors.OutOfRangeError: pass def test_feeddict(self): ''' test whether the return value can be a dict for the session ''' def combo(x): a = x + 3 b = x return {'a': a, 'b': b} ph_X = tf.keras.Input(shape=(self.nfeature, )) # ph_Y = self.model(ph_X) # ph_dict = {'a': ph_a, 'b': ph_b} ph_dict = combo(ph_X) nextx = self.iterator.get_next() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) try: idb = 0 while True: x = sess.run(nextx) Y = sess.run(ph_dict, feed_dict={ph_X: x['x']}) idb += 1 except tf.errors.OutOfRangeError: pass def test_optimization(self): mnist = tf.keras.datasets.mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = np.float32(x_train) y_train = np.float32(y_train) x_test = np.float32(x_test) y_test = np.float32(y_test) x_train, x_test = x_train / 255.0, x_test / 255.0 train_batch = tf.data.Dataset.from_tensor_slices({ 'x': x_train, 'y': y_train}) train_batch = train_batch.batch(32) iterator = train_batch.make_initializable_iterator() nextx = iterator.get_next() model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(512, activation=tf.nn.relu), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) opt = tf.train.AdamOptimizer(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False, name='Adam') def loss(model, data): y_pred = model(data['x']) y_true = data['y'] l = tf.keras.backend.sparse_categorical_crossentropy( y_true, y_pred) g = tf.gradients(l, model.trainable_variables) return l, g, y_pred, y_true ph_l, ph_g, ph_p, ph_y = loss(model, nextx) training_op = opt.minimize(ph_l) init = tf.global_variables_initializer() sess = tf.keras.backend.get_session() sess.run(init) for epoch in range(5): sess.run(iterator.initializer) for batch in range(10): _, l, g, p, y = sess.run((training_op, ph_l, ph_g, ph_p, ph_y)) p_cont = np.argmax(p, axis=-1) e = accuracy_score(p_cont, y) print("epoch {}, loss {:.4f}, accuracy{:.4f}".format( epoch, np.average(l), e))
[ "sklearn.metrics.accuracy_score", "numpy.ones", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.keras.backend.get_session", "numpy.average", "tensorflow.Session", "tensorflow.keras.layers.Dropout", "numpy.argmax", "tensorflow.global_variables_initializer", "tensorflow.gradients", "tensorflow.keras.layers.Dense", "tensorflow.keras.Input", "tensorflow.keras.backend.sparse_categorical_crossentropy", "tensorflow.keras.layers.Flatten", "tensorflow.train.AdamOptimizer", "numpy.float32" ]
[((322, 339), 'numpy.ones', 'np.ones', (['[100, 3]'], {}), '([100, 3])\n', (329, 339), True, 'import numpy as np\n'), ((684, 722), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(self.nfeature,)'}), '(shape=(self.nfeature,))\n', (698, 722), True, 'import tensorflow as tf\n'), ((1319, 1357), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(self.nfeature,)'}), '(shape=(self.nfeature,))\n', (1333, 1357), True, 'import tensorflow as tf\n'), ((2016, 2054), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(self.nfeature,)'}), '(shape=(self.nfeature,))\n', (2030, 2054), True, 'import tensorflow as tf\n'), ((2729, 2748), 'numpy.float32', 'np.float32', (['x_train'], {}), '(x_train)\n', (2739, 2748), True, 'import numpy as np\n'), ((2767, 2786), 'numpy.float32', 'np.float32', (['y_train'], {}), '(y_train)\n', (2777, 2786), True, 'import numpy as np\n'), ((2804, 2822), 'numpy.float32', 'np.float32', (['x_test'], {}), '(x_test)\n', (2814, 2822), True, 'import numpy as np\n'), ((2840, 2858), 'numpy.float32', 'np.float32', (['y_test'], {}), '(y_test)\n', (2850, 2858), True, 'import numpy as np\n'), ((2940, 3004), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (["{'x': x_train, 'y': y_train}"], {}), "({'x': x_train, 'y': y_train})\n", (2974, 3004), True, 'import tensorflow as tf\n'), ((3459, 3578), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.001)', 'beta1': '(0.9)', 'beta2': '(0.999)', 'epsilon': '(1e-08)', 'use_locking': '(False)', 'name': '"""Adam"""'}), "(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon\n =1e-08, use_locking=False, name='Adam')\n", (3481, 3578), True, 'import tensorflow as tf\n'), ((4056, 4089), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4087, 4089), True, 'import tensorflow as tf\n'), ((4105, 4135), 'tensorflow.keras.backend.get_session', 'tf.keras.backend.get_session', ([], {}), '()\n', (4133, 4135), True, 'import tensorflow as tf\n'), ((404, 445), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['_data'], {}), '(_data)\n', (438, 445), True, 'import tensorflow as tf\n'), ((536, 592), 'tensorflow.keras.layers.Dense', 'Dense', (['(2)'], {'input_shape': '(nfeature,)', 'activation': 'tf.nn.relu'}), '(2, input_shape=(nfeature,), activation=tf.nn.relu)\n', (541, 592), False, 'from tensorflow.keras.layers import Dense, BatchNormalization, Dropout, Softmax\n'), ((843, 855), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (853, 855), True, 'import tensorflow as tf\n'), ((1447, 1459), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1457, 1459), True, 'import tensorflow as tf\n'), ((2219, 2231), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2229, 2231), True, 'import tensorflow as tf\n'), ((3765, 3829), 'tensorflow.keras.backend.sparse_categorical_crossentropy', 'tf.keras.backend.sparse_categorical_crossentropy', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (3813, 3829), True, 'import tensorflow as tf\n'), ((3863, 3905), 'tensorflow.gradients', 'tf.gradients', (['l', 'model.trainable_variables'], {}), '(l, model.trainable_variables)\n', (3875, 3905), True, 'import tensorflow as tf\n'), ((886, 919), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (917, 919), True, 'import tensorflow as tf\n'), ((1490, 1523), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1521, 1523), True, 'import tensorflow as tf\n'), ((2262, 2295), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2293, 2295), True, 'import tensorflow as tf\n'), ((3217, 3262), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {'input_shape': '(28, 28)'}), '(input_shape=(28, 28))\n', (3240, 3262), True, 'import tensorflow as tf\n'), ((3276, 3325), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(512)'], {'activation': 'tf.nn.relu'}), '(512, activation=tf.nn.relu)\n', (3297, 3325), True, 'import tensorflow as tf\n'), ((3339, 3367), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (3362, 3367), True, 'import tensorflow as tf\n'), ((3381, 3432), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': 'tf.nn.softmax'}), '(10, activation=tf.nn.softmax)\n', (3402, 3432), True, 'import tensorflow as tf\n'), ((4375, 4396), 'numpy.argmax', 'np.argmax', (['p'], {'axis': '(-1)'}), '(p, axis=-1)\n', (4384, 4396), True, 'import numpy as np\n'), ((4417, 4442), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['p_cont', 'y'], {}), '(p_cont, y)\n', (4431, 4442), False, 'from sklearn.metrics import accuracy_score\n'), ((4532, 4545), 'numpy.average', 'np.average', (['l'], {}), '(l)\n', (4542, 4545), True, 'import numpy as np\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- """Definition of the NSDE algorithm""" import numpy as np try: from openmdao.utils.concurrent import concurrent_eval except ModuleNotFoundError: import warnings warnings.warn("OpenMDAO is not installed. Concurrent evaluation is not available.") from . import sorting, hv from .strategies import EvolutionStrategy def mpi_fobj_wrapper(fobj): """ Wrapper for the objective function to keep track of individual indices when running under MPI. Parameters ---------- fobj : callable Original objective function Returns ------- callable Wrapped function which, in addition to x, takes the individual's index and returns it along with f """ def wrapped(x, ii): return fobj(x), ii return wrapped class NSDE: """ Non-dominated Sorting Differential Evolution (NSDE) Algorithm. Attributes ---------- fobj : callable Objective function. Should have a single argument of type array_like which corresponds to the design vector. Should have either a single float or 1D array output corresponding to the objective function value(s), or two array_like outputs, the first of which corresponds to the objective function value(s) and the second to the constraint violations. Constraints are assumed to be satisfied if constraint violations <= constraint tolerance. lb, ub : array_like Lower and upper bounds range : array_like Distances between the lower and upper bounds f, cr : float Mutation rate and crossover probabilities adaptivity : int Method of self-adaptivity. - 0: No self-adaptivity. Specified mutation rate and crossover probability are used. - 1: Simple self-adaptability. Mutation rate and crossover probability are optimized Mote-Carlo style. - 2: Complex self-adaptability. Mutation rate and crossover probability are mutated with specified strategy. max_gen : int Maximum number of generations tolx, tolf : float Tolerances on the design vectors' and objective function values' spreads tolc : float Constraint violation tolerance. n_dim : int Number of dimension of the problem n_pop : int Population size rng : np.random.Generator Random number generator comm : MPI communicator or None The MPI communicator that will be used objective evaluation for each generation model_mpi : None or tuple If the model in fobj is also parallel, then this will contain a tuple with the the total number of population points to evaluate concurrently, and the color of the point to evaluate on this rank strategy : EvolutionStrategy Evolution strategy to use for procreation pop : np.array List of the individuals' chromosomes making up the current population fit : np.array Fitness of the individuals in the population con : np.array Constraint violations of the individuals in the population generation : int Generation counter """ def __init__( self, strategy=None, mut=0.85, crossp=1.0, adaptivity=0, max_gen=1000, tolx=1e-8, tolf=1e-8, tolc=1e-6, n_pop=None, seed=None, comm=None, model_mpi=None, ): self.fobj = None self.lb, self.ub = None, None self.range = 0 self.f = mut self.cr = crossp self.max_gen = max_gen self.tolx = tolx self.tolf = tolf self.tolc = tolc self.n_dim = 0 self.n_obj = 0 self.n_con = 0 self.n_pop = n_pop self.rng = np.random.default_rng(seed) if adaptivity not in [0, 1, 2]: raise ValueError("self_adaptivity must be one of (0, 1, 2).") self.adaptivity = adaptivity self.comm = comm self.model_mpi = model_mpi if strategy is None: self.strategy = EvolutionStrategy("rand-to-best/1/bin/random") elif isinstance(strategy, EvolutionStrategy): self.strategy = strategy elif isinstance(strategy, str): self.strategy = EvolutionStrategy(strategy) else: raise ValueError( "Argument `strategy` should be None, a str, or an instance of EvolutionStrategy." ) self.pop = None self.fit = None self.con = None self.fronts = None self.dx, self.df, self.hv = np.inf, np.inf, np.inf self.pareto_lb = +np.inf self.pareto_ub = -np.inf self.generation = 0 self._is_initialized = False self._running_under_mpi = comm is not None and hasattr(comm, "bcast") def init(self, fobj, bounds, pop=None): """ Initialize the algorithm. Parameters ---------- fobj : callable Objective function bounds : list of 2-tuples List of (lower, upper) bounds pop : None or array_like, optional Initial population. If None, it will be created at random. """ # Set default values for the mutation and crossover parameters if self.f is None or 0.0 > self.f > 1.0: self.f = 0.85 if self.cr is None or 0.0 > self.cr > 1.0: self.cr = 1.0 # Prepare the objective function and compute the bounds and variable range self.fobj = fobj if self.comm is None else mpi_fobj_wrapper(fobj) self.lb, self.ub = np.asarray(bounds).T self.range = self.ub - self.lb # Compute the number of dimensions self.n_dim = len(bounds) def create_f_cr(adaptivity, f, cr, n, rng): # Create random mutation/crossover parameters if self-adaptivity is used if adaptivity == 0: f = f * np.ones(n) cr = cr * np.ones(n) elif adaptivity == 1: f = rng.uniform(size=n) * 0.9 + 0.1 cr = rng.uniform(size=n) elif adaptivity == 2: f = rng.uniform(size=n) * 0.15 + 0.5 cr = rng.uniform(size=n) * 0.15 + 0.5 return f, cr adjust_pop = False if pop is not None: self.n_pop = pop.shape[0] self.pop = pop self.f, self.cr = create_f_cr( self.adaptivity, self.f, self.cr, self.n_pop, self.rng ) else: if self.n_pop is None or self.n_pop <= 0: self.pop = self.rng.uniform(self.lb, self.ub, size=(1, self.n_dim)) adjust_pop = True self.n_pop = 1 else: self.pop = self.rng.uniform( self.lb, self.ub, size=(self.n_pop, self.n_dim) ) self.f, self.cr = create_f_cr( self.adaptivity, self.f, self.cr, self.n_pop, self.rng ) # Ensure all processors have the same population and mutation/crossover parameters if self._running_under_mpi: self.pop, self.f, self.cr = self.comm.bcast( (self.pop, self.f, self.cr), root=0 ) self.fit, self.con = self(self.pop) self.n_obj = self.fit.shape[1] if self.con is not None: self.n_con = self.con.shape[1] if adjust_pop: self.n_pop = 5 * self.n_dim * self.n_obj # If we are running under MPI, expand population to fully exploit all processors if self._running_under_mpi: self.n_pop = int(np.ceil(self.n_pop / self.comm.size) * self.comm.size) self.pop = np.concatenate( ( self.pop, self.rng.uniform( self.lb, self.ub, size=(self.n_pop - 1, self.n_dim) ), ) ) self.f, self.cr = create_f_cr( self.adaptivity, self.f, self.cr, self.n_pop, self.rng ) if self._running_under_mpi: self.pop, self.f, self.cr = self.comm.bcast( (self.pop, self.f, self.cr), root=0 ) self.fit, self.con = self(self.pop) self.update() # Set generation counter to 0 self.generation = 0 # Mark class as initialized self._is_initialized = True @property def is_initialized(self): """bool: True if the algorithm has been initialized, False if not.""" return self._is_initialized def __iter__(self): """ This class is an iterator itself. Raises ------ RuntimeError If this class is being used as an iterator before it has been initialized. """ if not self._is_initialized: raise RuntimeError("NSDE is not yet initialized.") return self def __next__(self): """ Main iteration. Returns ------- NSDE The new state at the next generation. """ if ( self.generation < self.max_gen and self.dx > self.tolx and self.df > self.tolf ): # Create a new population and mutation/crossover parameters pop_new, f_new, cr_new = self.procreate() # Ensure all processors have the same updated population and mutation/crossover parameters if self._running_under_mpi: pop_new, f_new, cr_new = self.comm.bcast( (pop_new, f_new, cr_new), root=0 ) # Evaluate the fitness of the new population fit_new, con_new = self(pop_new) # Update the class with the new data self.update(pop_new, fit_new, con_new, f_new, cr_new) # Compute spreads and update generation counter if self.n_obj == 1: self.dx = np.linalg.norm(self.pop[0] - self.pop[-1]) self.df = np.abs(self.fit[0] - self.fit[-1]) else: pareto = self.fit[self.fronts[0]] self.pareto_lb = np.minimum(self.pareto_lb, np.min(pareto, axis=0, keepdims=True)) self.pareto_ub = np.maximum(self.pareto_ub, np.max(pareto, axis=0, keepdims=True)) pareto_norm = 1 + (pareto - self.pareto_lb) / (self.pareto_ub - self.pareto_lb) self.hv = hv.hv(pareto_norm, 2.1 * np.ones(self.n_obj)) self.generation += 1 # Return the new state return self else: raise StopIteration def __call__(self, pop): """ Evaluate the fitness of the given population. Parameters ---------- pop : array_like List of chromosomes of the individuals in the population Returns ------- fit : np.array Fitness of the individuals in the given population con : np.array or None Constraint violations of the individuals in the given population if present. None otherwise. Notes ----- If this class has an MPI communicator the individuals will be evaluated in parallel. Otherwise function evaluation will be serial. """ if self.is_initialized: fit = np.empty((self.n_pop, self.n_obj)) con = None if self.n_con is None else np.empty((self.n_pop, self.n_con)) else: fit = pop.shape[0] * [None] con = None def handle_result(_v, _i, _fit, _con): if isinstance(_v, tuple): _fit[_i] = np.asarray(_v[0]) c = np.asarray(_v[1]) if _con is None: _con = np.empty((pop.shape[0], c.size)) _con[_i] = c else: _fit[_i] = _v return _fit, _con # Evaluate generation if self._running_under_mpi: # Construct run cases cases = [((item, ii), None) for ii, item in enumerate(pop)] # Pad the cases with some dummy cases to make the cases divisible amongst the procs. extra = len(cases) % self.comm.size if extra > 0: for j in range(self.comm.size - extra): cases.append(cases[-1]) # Compute the fitness of all individuals in parallel using MPI results = concurrent_eval( self.fobj, cases, self.comm, allgather=True, model_mpi=self.model_mpi ) # Gather the results for result in results: retval, err = result if err is not None or retval is None: raise Exception(err) else: fit, con = handle_result(*retval, fit, con) else: # Evaluate the population in serial for idx, ind in enumerate(pop): val = self.fobj(ind) fit, con = handle_result(val, idx, fit, con) # Turn all NaNs in the fitnesses into infs fit = np.reshape(np.where(np.isnan(fit), np.inf, fit), (pop.shape[0], -1)) if con is not None: con = np.reshape(np.where(np.isnan(con), np.inf, con), (pop.shape[0], -1)) return fit, con def run(self): for _ in self: pass def procreate(self): """ Generate a new population using the selected evolution strategy. Returns ------- pop_new : np.array Chromosomes of the individuals in the next generation f_new : np.array New set of mutation rates cr_new : np.array New set of crossover probabilities """ pop_old_norm = (np.copy(self.pop) - self.lb) / self.range pop_new_norm = np.empty_like(pop_old_norm) # If there are constraints, augment the fitness to penalize infeasible individuals while procreating. # This stops the best and rand-to-best strategies to keep the best infeasible individual alive indefinitely. if self.n_con and False: fit = np.where( np.any(self.con >= 1e-6, axis=1, keepdims=True), np.linalg.norm(self.con, axis=1, keepdims=True) + np.max(self.fit), self.fit, ) else: fit = self.fit if self.adaptivity == 0 or self.adaptivity == 1: if self.adaptivity == 0: # No adaptivity. Use static f and cr. f_new = self.f cr_new = self.cr else: # Simple adaptivity. Use new f and cr. f_new = np.where( self.rng.uniform(size=self.n_pop) < 0.9, self.f, self.rng.uniform(size=self.n_pop) * 0.9 + 0.1, ) cr_new = np.where( self.rng.uniform(size=self.n_pop) < 0.9, self.cr, self.rng.uniform(size=self.n_pop), ) for idx in range(self.n_pop): pop_new_norm[idx], _, _ = self.strategy( idx, pop_old_norm, fit, self.fronts, f_new, cr_new, self.rng, False ) else: # Complex adaptivity. Mutate f and cr. f_new = np.copy(self.f) cr_new = np.copy(self.cr) for idx in range(self.n_pop): pop_new_norm[idx], f_new[idx], cr_new[idx] = self.strategy( idx, pop_old_norm, fit, self.fronts, self.f, self.cr, self.rng, True ) pop_new = self.lb + self.range * np.asarray(pop_new_norm) return pop_new, f_new, cr_new def update(self, pop_new=None, fit_new=None, con_new=None, f_new=None, cr_new=None): """ Update the population (and f/cr if self-adaptive). Parameters ---------- pop_new : np.array or None, optional Proposed new population resulting from procreation fit_new : np.array or None, optional Fitness of the individuals in the new population con_new : np.array or None, optional Constraint violations of the individuals in the new population f_new : np.array or None, optional New set of mutation rates cr_new : np.array or None, optional New set of crossover probabilities Notes ----- Individuals in the old population will only be replaced by the new ones if they have improved fitness. Mutation rate and crossover probabilities will only be replaced if self-adaptivity is turned on and if their corresponding individuals have improved fitness. """ if self.n_obj == 1: self._update_single(pop_new, fit_new, con_new, f_new, cr_new) else: self._update_multi(pop_new, fit_new, con_new, f_new, cr_new) def _update_single( self, pop_new=None, fit_new=None, con_new=None, f_new=None, cr_new=None ): if self.n_con: cs = np.sum( np.where(np.greater(self.con, self.tolc), self.con, 0.0), axis=1 ) else: cs = 0 if ( pop_new is not None and fit_new is not None and f_new is not None and cr_new is not None ): if self.n_con: c_new = np.all(con_new <= self.tolc, axis=1) c_old = np.all(self.con <= self.tolc, axis=1) cs_new = np.sum( np.where(np.greater(con_new, self.tolc), con_new, 0.0), axis=1 ) improved_indices = np.argwhere( ((c_new & c_old) & (fit_new <= self.fit).flatten()) + (c_new & ~c_old) + ((~c_new & ~c_old) & (cs_new <= cs)) ) self.con[improved_indices] = con_new[improved_indices] cs[improved_indices] = cs_new[improved_indices] else: improved_indices = np.argwhere((fit_new <= self.fit).flatten()) self.pop[improved_indices] = pop_new[improved_indices] self.fit[improved_indices] = fit_new[improved_indices] if self.adaptivity != 0: self.f[improved_indices] = f_new[improved_indices] self.cr[improved_indices] = cr_new[improved_indices] # Sort population so the best individual is always the first idx_sort = np.argsort( self.fit.flatten() + np.where(cs != 0.0, cs * np.max(self.fit), 0.0) ) self.pop = self.pop[idx_sort] self.fit = self.fit[idx_sort] if self.n_con: self.con = self.con[idx_sort] if self.adaptivity != 0: self.f = self.f[idx_sort] self.cr = self.cr[idx_sort] def _update_multi( self, pop_new=None, fit_new=None, con_new=None, f_new=None, cr_new=None ): if ( pop_new is not None and fit_new is not None and f_new is not None and cr_new is not None ): self.pop = np.concatenate((self.pop, pop_new)) self.fit = np.concatenate((self.fit, fit_new)) if self.n_con: self.con = np.concatenate((self.con, con_new)) if self.adaptivity != 0: self.f = np.concatenate((self.f, f_new)) self.cr = np.concatenate((self.cr, cr_new)) if self.n_con: fronts = sorting.nonDominatedSorting(self.fit, self.con, self.n_pop) else: fronts = sorting.nonDominatedSorting(self.fit, self.n_pop) fronts[-1] = np.asarray(fronts[-1])[ sorting.crowdingDistanceSorting(self.fit[fronts[-1]])[ : (self.n_pop - sum(len(f) for f in fronts[:-1])) ] ].tolist() new_idxs = [] counter = 0 self.fronts = [] for front in fronts: new_idxs += front self.fronts += [list(range(counter, counter + len(front)))] counter += len(front) self.pop = self.pop[new_idxs] self.fit = self.fit[new_idxs] if self.n_con: self.con = self.con[new_idxs] if self.adaptivity != 0: self.f = self.f[new_idxs] self.cr = self.cr[new_idxs]
[ "numpy.copy", "numpy.abs", "numpy.greater", "numpy.ceil", "numpy.random.default_rng", "numpy.ones", "numpy.asarray", "numpy.min", "numpy.any", "numpy.max", "numpy.empty_like", "numpy.empty", "numpy.concatenate", "numpy.linalg.norm", "warnings.warn", "numpy.isnan", "numpy.all", "openmdao.utils.concurrent.concurrent_eval" ]
[((221, 309), 'warnings.warn', 'warnings.warn', (['"""OpenMDAO is not installed. Concurrent evaluation is not available."""'], {}), "(\n 'OpenMDAO is not installed. Concurrent evaluation is not available.')\n", (234, 309), False, 'import warnings\n'), ((3826, 3853), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (3847, 3853), True, 'import numpy as np\n'), ((14070, 14097), 'numpy.empty_like', 'np.empty_like', (['pop_old_norm'], {}), '(pop_old_norm)\n', (14083, 14097), True, 'import numpy as np\n'), ((5681, 5699), 'numpy.asarray', 'np.asarray', (['bounds'], {}), '(bounds)\n', (5691, 5699), True, 'import numpy as np\n'), ((11545, 11579), 'numpy.empty', 'np.empty', (['(self.n_pop, self.n_obj)'], {}), '((self.n_pop, self.n_obj))\n', (11553, 11579), True, 'import numpy as np\n'), ((12654, 12745), 'openmdao.utils.concurrent.concurrent_eval', 'concurrent_eval', (['self.fobj', 'cases', 'self.comm'], {'allgather': '(True)', 'model_mpi': 'self.model_mpi'}), '(self.fobj, cases, self.comm, allgather=True, model_mpi=self\n .model_mpi)\n', (12669, 12745), False, 'from openmdao.utils.concurrent import concurrent_eval\n'), ((15600, 15615), 'numpy.copy', 'np.copy', (['self.f'], {}), '(self.f)\n', (15607, 15615), True, 'import numpy as np\n'), ((15637, 15653), 'numpy.copy', 'np.copy', (['self.cr'], {}), '(self.cr)\n', (15644, 15653), True, 'import numpy as np\n'), ((19477, 19512), 'numpy.concatenate', 'np.concatenate', (['(self.pop, pop_new)'], {}), '((self.pop, pop_new))\n', (19491, 19512), True, 'import numpy as np\n'), ((19536, 19571), 'numpy.concatenate', 'np.concatenate', (['(self.fit, fit_new)'], {}), '((self.fit, fit_new))\n', (19550, 19571), True, 'import numpy as np\n'), ((10146, 10188), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.pop[0] - self.pop[-1])'], {}), '(self.pop[0] - self.pop[-1])\n', (10160, 10188), True, 'import numpy as np\n'), ((10215, 10249), 'numpy.abs', 'np.abs', (['(self.fit[0] - self.fit[-1])'], {}), '(self.fit[0] - self.fit[-1])\n', (10221, 10249), True, 'import numpy as np\n'), ((11630, 11664), 'numpy.empty', 'np.empty', (['(self.n_pop, self.n_con)'], {}), '((self.n_pop, self.n_con))\n', (11638, 11664), True, 'import numpy as np\n'), ((11855, 11872), 'numpy.asarray', 'np.asarray', (['_v[0]'], {}), '(_v[0])\n', (11865, 11872), True, 'import numpy as np\n'), ((11893, 11910), 'numpy.asarray', 'np.asarray', (['_v[1]'], {}), '(_v[1])\n', (11903, 11910), True, 'import numpy as np\n'), ((13348, 13361), 'numpy.isnan', 'np.isnan', (['fit'], {}), '(fit)\n', (13356, 13361), True, 'import numpy as np\n'), ((14005, 14022), 'numpy.copy', 'np.copy', (['self.pop'], {}), '(self.pop)\n', (14012, 14022), True, 'import numpy as np\n'), ((14403, 14451), 'numpy.any', 'np.any', (['(self.con >= 1e-06)'], {'axis': '(1)', 'keepdims': '(True)'}), '(self.con >= 1e-06, axis=1, keepdims=True)\n', (14409, 14451), True, 'import numpy as np\n'), ((15922, 15946), 'numpy.asarray', 'np.asarray', (['pop_new_norm'], {}), '(pop_new_norm)\n', (15932, 15946), True, 'import numpy as np\n'), ((17723, 17759), 'numpy.all', 'np.all', (['(con_new <= self.tolc)'], {'axis': '(1)'}), '(con_new <= self.tolc, axis=1)\n', (17729, 17759), True, 'import numpy as np\n'), ((17784, 17821), 'numpy.all', 'np.all', (['(self.con <= self.tolc)'], {'axis': '(1)'}), '(self.con <= self.tolc, axis=1)\n', (17790, 17821), True, 'import numpy as np\n'), ((19626, 19661), 'numpy.concatenate', 'np.concatenate', (['(self.con, con_new)'], {}), '((self.con, con_new))\n', (19640, 19661), True, 'import numpy as np\n'), ((19725, 19756), 'numpy.concatenate', 'np.concatenate', (['(self.f, f_new)'], {}), '((self.f, f_new))\n', (19739, 19756), True, 'import numpy as np\n'), ((19783, 19816), 'numpy.concatenate', 'np.concatenate', (['(self.cr, cr_new)'], {}), '((self.cr, cr_new))\n', (19797, 19816), True, 'import numpy as np\n'), ((6012, 6022), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (6019, 6022), True, 'import numpy as np\n'), ((6049, 6059), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (6056, 6059), True, 'import numpy as np\n'), ((10378, 10415), 'numpy.min', 'np.min', (['pareto'], {'axis': '(0)', 'keepdims': '(True)'}), '(pareto, axis=0, keepdims=True)\n', (10384, 10415), True, 'import numpy as np\n'), ((10477, 10514), 'numpy.max', 'np.max', (['pareto'], {'axis': '(0)', 'keepdims': '(True)'}), '(pareto, axis=0, keepdims=True)\n', (10483, 10514), True, 'import numpy as np\n'), ((11971, 12003), 'numpy.empty', 'np.empty', (['(pop.shape[0], c.size)'], {}), '((pop.shape[0], c.size))\n', (11979, 12003), True, 'import numpy as np\n'), ((13463, 13476), 'numpy.isnan', 'np.isnan', (['con'], {}), '(con)\n', (13471, 13476), True, 'import numpy as np\n'), ((14468, 14515), 'numpy.linalg.norm', 'np.linalg.norm', (['self.con'], {'axis': '(1)', 'keepdims': '(True)'}), '(self.con, axis=1, keepdims=True)\n', (14482, 14515), True, 'import numpy as np\n'), ((14518, 14534), 'numpy.max', 'np.max', (['self.fit'], {}), '(self.fit)\n', (14524, 14534), True, 'import numpy as np\n'), ((17391, 17422), 'numpy.greater', 'np.greater', (['self.con', 'self.tolc'], {}), '(self.con, self.tolc)\n', (17401, 17422), True, 'import numpy as np\n'), ((20028, 20050), 'numpy.asarray', 'np.asarray', (['fronts[-1]'], {}), '(fronts[-1])\n', (20038, 20050), True, 'import numpy as np\n'), ((7764, 7800), 'numpy.ceil', 'np.ceil', (['(self.n_pop / self.comm.size)'], {}), '(self.n_pop / self.comm.size)\n', (7771, 7800), True, 'import numpy as np\n'), ((10663, 10682), 'numpy.ones', 'np.ones', (['self.n_obj'], {}), '(self.n_obj)\n', (10670, 10682), True, 'import numpy as np\n'), ((17884, 17914), 'numpy.greater', 'np.greater', (['con_new', 'self.tolc'], {}), '(con_new, self.tolc)\n', (17894, 17914), True, 'import numpy as np\n'), ((18895, 18911), 'numpy.max', 'np.max', (['self.fit'], {}), '(self.fit)\n', (18901, 18911), True, 'import numpy as np\n')]
import sys import os import numpy as np import cv2 from PIL import Image from skimage.morphology import binary_dilation import time def result_fusion(data_list,label_list=None,save_path=None): len_ = len(os.listdir(data_list[0])) count = 0 for item in os.scandir(data_list[0]): img_list = [item.path] + [os.path.join(case, item.name) for case in data_list[1:]] palette = Image.open('./result/pspnet/results/A151678.png').getpalette() mask = np.array(Image.open(img_list[0]),dtype=np.uint8) for label in label_list: tmp_mask = np.zeros_like(mask,dtype=np.uint8) for img_path in img_list: tmp_mask += (np.array(Image.open(img_path)) == label).astype(np.uint8) binary_mask = (tmp_mask > len(data_list)/2).astype(np.uint8) if label == 4 or label==5: binary_mask = binary_dilation(binary_mask) mask[binary_mask == 1] = label mask = Image.fromarray(mask,mode='P') mask.putpalette(palette) mask.save(os.path.join(save_path,item.name)) count += 1 sys.stdout.write('\rCurrent %d/%d'%(count, len_)) sys.stdout.write('\n') def result_fusion_v2(data_list,label_list=None,save_path=None,shape=(256,256),weight=None): len_ = len(os.listdir(data_list[0])) count = 0 for item in os.scandir(data_list[0]): img_list = [item.path] + [os.path.join(case, item.name) for case in data_list[1:]] palette = Image.open('./result/pspnet/results/A151678.png').getpalette() mask = np.zeros((len(label_list),) + shape,dtype=np.uint8) for i, img_path in enumerate(img_list): tmp_mask = np.zeros_like(mask,dtype=np.uint8) for label in label_list: temp = (np.array(Image.open(img_path)) == label).astype(np.uint8) tmp_mask[label,...] = temp mask[tmp_mask == 1] += weight[i] mask = Image.fromarray(np.argmax(mask,axis=0),mode='P') mask.putpalette(palette) mask.save(os.path.join(save_path,item.name)) count += 1 sys.stdout.write('\rCurrent %d/%d'%(count, len_)) sys.stdout.write('\n') if __name__ == "__main__": start = time.time() result_list = ['./result/t4/results','./result/t3/results','./result/pspnet/results','./result/deeplab_rs/results'] # result_fusion(result_list,list(range(7)),'./result/results') result_fusion_v2(result_list,list(range(7)),'./result/results',weight=[9,8,7,5]) print('Run time: %.4f'%(time.time() - start))
[ "skimage.morphology.binary_dilation", "PIL.Image.fromarray", "os.listdir", "PIL.Image.open", "os.scandir", "numpy.zeros_like", "os.path.join", "numpy.argmax", "time.time", "sys.stdout.write" ]
[((266, 290), 'os.scandir', 'os.scandir', (['data_list[0]'], {}), '(data_list[0])\n', (276, 290), False, 'import os\n'), ((1172, 1194), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (1188, 1194), False, 'import sys\n'), ((1360, 1384), 'os.scandir', 'os.scandir', (['data_list[0]'], {}), '(data_list[0])\n', (1370, 1384), False, 'import os\n'), ((2171, 2193), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (2187, 2193), False, 'import sys\n'), ((2239, 2250), 'time.time', 'time.time', ([], {}), '()\n', (2248, 2250), False, 'import time\n'), ((210, 234), 'os.listdir', 'os.listdir', (['data_list[0]'], {}), '(data_list[0])\n', (220, 234), False, 'import os\n'), ((973, 1004), 'PIL.Image.fromarray', 'Image.fromarray', (['mask'], {'mode': '"""P"""'}), "(mask, mode='P')\n", (988, 1004), False, 'from PIL import Image\n'), ((1117, 1168), 'sys.stdout.write', 'sys.stdout.write', (["('\\rCurrent %d/%d' % (count, len_))"], {}), "('\\rCurrent %d/%d' % (count, len_))\n", (1133, 1168), False, 'import sys\n'), ((1304, 1328), 'os.listdir', 'os.listdir', (['data_list[0]'], {}), '(data_list[0])\n', (1314, 1328), False, 'import os\n'), ((2116, 2167), 'sys.stdout.write', 'sys.stdout.write', (["('\\rCurrent %d/%d' % (count, len_))"], {}), "('\\rCurrent %d/%d' % (count, len_))\n", (2132, 2167), False, 'import sys\n'), ((488, 511), 'PIL.Image.open', 'Image.open', (['img_list[0]'], {}), '(img_list[0])\n', (498, 511), False, 'from PIL import Image\n'), ((584, 619), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {'dtype': 'np.uint8'}), '(mask, dtype=np.uint8)\n', (597, 619), True, 'import numpy as np\n'), ((1055, 1089), 'os.path.join', 'os.path.join', (['save_path', 'item.name'], {}), '(save_path, item.name)\n', (1067, 1089), False, 'import os\n'), ((1696, 1731), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {'dtype': 'np.uint8'}), '(mask, dtype=np.uint8)\n', (1709, 1731), True, 'import numpy as np\n'), ((1970, 1993), 'numpy.argmax', 'np.argmax', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (1979, 1993), True, 'import numpy as np\n'), ((2054, 2088), 'os.path.join', 'os.path.join', (['save_path', 'item.name'], {}), '(save_path, item.name)\n', (2066, 2088), False, 'import os\n'), ((326, 355), 'os.path.join', 'os.path.join', (['case', 'item.name'], {}), '(case, item.name)\n', (338, 355), False, 'import os\n'), ((401, 450), 'PIL.Image.open', 'Image.open', (['"""./result/pspnet/results/A151678.png"""'], {}), "('./result/pspnet/results/A151678.png')\n", (411, 450), False, 'from PIL import Image\n'), ((886, 914), 'skimage.morphology.binary_dilation', 'binary_dilation', (['binary_mask'], {}), '(binary_mask)\n', (901, 914), False, 'from skimage.morphology import binary_dilation\n'), ((1420, 1449), 'os.path.join', 'os.path.join', (['case', 'item.name'], {}), '(case, item.name)\n', (1432, 1449), False, 'import os\n'), ((1495, 1544), 'PIL.Image.open', 'Image.open', (['"""./result/pspnet/results/A151678.png"""'], {}), "('./result/pspnet/results/A151678.png')\n", (1505, 1544), False, 'from PIL import Image\n'), ((2551, 2562), 'time.time', 'time.time', ([], {}), '()\n', (2560, 2562), False, 'import time\n'), ((695, 715), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (705, 715), False, 'from PIL import Image\n'), ((1801, 1821), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1811, 1821), False, 'from PIL import Image\n')]
import numpy as np import matplotlib.pyplot as plt from .single_unit import PSTH def shiftappend(arr, shift, end=None, direction='left'): if isinstance(end, type(None)): end = arr[-1] if direction == 'left': return np.hstack((arr[arr > shift]-shift, arr[arr < shift]+end-shift)) elif direction == 'right': return np.hstack((arr[arr < shift]-end+shift, arr[arr < shift]+shift)) else: raise ValueError('unknown direction: %s'%direction) def crosscorrelogram(target, reference, ROI=(-0.5,0.5), binsize=.01, shift=None, skip_plot=False): """ Cross Correlation between two unit, optionally corrected by shift predictor. arguments: - target: the target spike train as 1d numpy.array - reference: the reference spike train as 1d numpy.array keyword arguments: - shift: shift size, if None then skip the shift predictor correction [default: None] - ROI: region of interest as tuple [default: (-0.5, 0.5)] - binsize: the size of each bin [default: 0.01] - skip_plot: if True then skip auto plot crosscorrelogram [default: False] return: - crosscorrelogram: as in 1d numpy.array """ _xcorr, _ = PSTH(target, reference, ROI, binsize, True) if isinstance(shift, int) or isinstance(shift, float): _shift_reference = shiftappend(reference, shift) _xcorr_shift, _ = PSTH(target, _shift_reference, ROI, binsize, True) _xcorr = _xcorr - _xcorr_shift elif isinstance(shift, list) or isinstance(shift, np.ndarray): _xcorr_shift = np.zeros_like(_xcorr) for item in shift: _shift_reference = shiftappend(reference, item) _xcorr_shift_item, _ = PSTH(target, _shift_reference, ROI, binsize, True) _xcorr_shift = _xcorr_shift + _xcorr_shift_item/np.size(shift) _xcorr = _xcorr - _xcorr_shift else: _xcorr_shift = None if not skip_plot: plt.figure(figsize=(16,4)) plt.subplot(1,2,2) _tspec = np.linspace(ROI[0], ROI[1]-1/int((ROI[1]-ROI[0])/binsize), int((ROI[1]-ROI[0])/binsize)) plt.bar(_tspec+binsize/2, _xcorr, width=binsize) plt.vlines([0], 0, np.max(_xcorr)*1.05, linestyle='--', alpha=0.5) plt.xlim((ROI[0], ROI[-1])) plt.title('crosscorrelogram') if not isinstance(_xcorr_shift, type(None)): plt.subplot(1,2,1) plt.bar(_tspec+binsize/2, _xcorr_shift, width=binsize) plt.vlines([0], 0, np.max(_xcorr)*1.05, linestyle='--', alpha=0.5) plt.xlim((ROI[0], ROI[-1])) plt.title('shift predictor') plt.show() return _xcorr
[ "numpy.hstack", "numpy.size", "numpy.zeros_like", "numpy.max", "matplotlib.pyplot.figure", "matplotlib.pyplot.bar", "matplotlib.pyplot.title", "matplotlib.pyplot.xlim", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show" ]
[((246, 315), 'numpy.hstack', 'np.hstack', (['(arr[arr > shift] - shift, arr[arr < shift] + end - shift)'], {}), '((arr[arr > shift] - shift, arr[arr < shift] + end - shift))\n', (255, 315), True, 'import numpy as np\n'), ((1975, 2002), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 4)'}), '(figsize=(16, 4))\n', (1985, 2002), True, 'import matplotlib.pyplot as plt\n'), ((2010, 2030), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (2021, 2030), True, 'import matplotlib.pyplot as plt\n'), ((2143, 2195), 'matplotlib.pyplot.bar', 'plt.bar', (['(_tspec + binsize / 2)', '_xcorr'], {'width': 'binsize'}), '(_tspec + binsize / 2, _xcorr, width=binsize)\n', (2150, 2195), True, 'import matplotlib.pyplot as plt\n'), ((2275, 2302), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(ROI[0], ROI[-1])'], {}), '((ROI[0], ROI[-1]))\n', (2283, 2302), True, 'import matplotlib.pyplot as plt\n'), ((2311, 2340), 'matplotlib.pyplot.title', 'plt.title', (['"""crosscorrelogram"""'], {}), "('crosscorrelogram')\n", (2320, 2340), True, 'import matplotlib.pyplot as plt\n'), ((2678, 2688), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2686, 2688), True, 'import matplotlib.pyplot as plt\n'), ((356, 425), 'numpy.hstack', 'np.hstack', (['(arr[arr < shift] - end + shift, arr[arr < shift] + shift)'], {}), '((arr[arr < shift] - end + shift, arr[arr < shift] + shift))\n', (365, 425), True, 'import numpy as np\n'), ((1597, 1618), 'numpy.zeros_like', 'np.zeros_like', (['_xcorr'], {}), '(_xcorr)\n', (1610, 1618), True, 'import numpy as np\n'), ((2415, 2435), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (2426, 2435), True, 'import matplotlib.pyplot as plt\n'), ((2446, 2504), 'matplotlib.pyplot.bar', 'plt.bar', (['(_tspec + binsize / 2)', '_xcorr_shift'], {'width': 'binsize'}), '(_tspec + binsize / 2, _xcorr_shift, width=binsize)\n', (2453, 2504), True, 'import matplotlib.pyplot as plt\n'), ((2592, 2619), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(ROI[0], ROI[-1])'], {}), '((ROI[0], ROI[-1]))\n', (2600, 2619), True, 'import matplotlib.pyplot as plt\n'), ((2632, 2660), 'matplotlib.pyplot.title', 'plt.title', (['"""shift predictor"""'], {}), "('shift predictor')\n", (2641, 2660), True, 'import matplotlib.pyplot as plt\n'), ((2219, 2233), 'numpy.max', 'np.max', (['_xcorr'], {}), '(_xcorr)\n', (2225, 2233), True, 'import numpy as np\n'), ((2532, 2546), 'numpy.max', 'np.max', (['_xcorr'], {}), '(_xcorr)\n', (2538, 2546), True, 'import numpy as np\n'), ((1852, 1866), 'numpy.size', 'np.size', (['shift'], {}), '(shift)\n', (1859, 1866), True, 'import numpy as np\n')]
import os import subprocess import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.nn.functional import interpolate from loguru import logger from tqdm import tqdm import numpy as np import wandb from draw_concat import draw_concat from generate_noise import generate_spatial_noise from minecraft.level_utils import one_hot_to_blockdata_level, save_level_to_world, clear_empty_world from minecraft.level_renderer import render_minecraft from models import calc_gradient_penalty, save_networks from utils import interpolate3D def update_noise_amplitude(z_prev, real, opt): """ Update the amplitude of the noise for the current scale according to the previous noise map. """ RMSE = torch.sqrt(F.mse_loss(real, z_prev)) return opt.noise_update * RMSE def train_single_scale(D, G, reals, generators, noise_maps, input_from_prev_scale, noise_amplitudes, opt): """ Train one scale. D and G are the current discriminator and generator, reals are the scaled versions of the original level, generators and noise_maps contain information from previous scales and will receive information in this scale, input_from_previous_scale holds the noise map and images from the previous scale, noise_amplitudes hold the amplitudes for the noise in all the scales. opt is a namespace that holds all necessary parameters. """ current_scale = len(generators) clear_empty_world(opt.output_dir, 'Curr_Empty_World') # reset tmp world if opt.use_multiple_inputs: real_group = [] nzx_group = [] nzy_group = [] nz_group = [] for scale_group in reals: real_group.append(scale_group[current_scale]) nzx_group.append(scale_group[current_scale].shape[2]) nzy_group.append(scale_group[current_scale].shape[3]) nz_group.append((scale_group[current_scale].shape[2], scale_group[current_scale].shape[3])) curr_noises = [0 for _ in range(len(real_group))] curr_prevs = [0 for _ in range(len(real_group))] curr_z_prevs = [0 for _ in range(len(real_group))] else: real = reals[current_scale] nz = real.shape[2:] padsize = int(1 * opt.num_layer) # As kernel size is always 3 currently, padsize goes up by one per layer if not opt.pad_with_noise: # pad_noise = nn.ConstantPad3d(padsize, 0) # pad_image = nn.ConstantPad3d(padsize, 0) pad_noise = nn.ReplicationPad3d(padsize) pad_image = nn.ReplicationPad3d(padsize) else: pad_noise = nn.ReplicationPad3d(padsize) pad_image = nn.ReplicationPad3d(padsize) # setup optimizer optimizerD = optim.Adam(D.parameters(), lr=opt.lr_d, betas=(opt.beta1, 0.999)) optimizerG = optim.Adam(G.parameters(), lr=opt.lr_g, betas=(opt.beta1, 0.999)) schedulerD = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerD, milestones=[1600, 2500], gamma=opt.gamma) schedulerG = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerG, milestones=[1600, 2500], gamma=opt.gamma) if current_scale == 0: # Generate new noise if opt.use_multiple_inputs: z_opt_group = [] for nzx, nzy in zip(nzx_group, nzy_group): z_opt = generate_spatial_noise([1, opt.nc_current, nzx, nzy], device=opt.device) z_opt = pad_noise(z_opt) z_opt_group.append(z_opt) else: z_opt = generate_spatial_noise((1, opt.nc_current) + nz, device=opt.device) z_opt = pad_noise(z_opt) else: # Add noise to previous output if opt.use_multiple_inputs: z_opt_group = [] for nzx, nzy in zip(nzx_group, nzy_group): z_opt = torch.zeros([1, opt.nc_current, nzx, nzy]).to(opt.device) z_opt = pad_noise(z_opt) z_opt_group.append(z_opt) else: z_opt = torch.zeros((1, opt.nc_current) + nz).to(opt.device) z_opt = pad_noise(z_opt) logger.info("Training at scale {}", current_scale) grad_d_real = [] grad_d_fake = [] grad_g = [] for p in D.parameters(): grad_d_real.append(torch.zeros(p.shape).to(opt.device)) grad_d_fake.append(torch.zeros(p.shape).to(opt.device)) for p in G.parameters(): grad_g.append(torch.zeros(p.shape).to(opt.device)) for epoch in tqdm(range(opt.niter)): step = current_scale * opt.niter + epoch if opt.use_multiple_inputs: group_steps = len(real_group) noise_group = [] for nzx, nzy in zip(nzx_group, nzy_group): noise_ = generate_spatial_noise([1, opt.nc_current, nzx, nzy], device=opt.device) noise_ = pad_noise(noise_) noise_group.append(noise_) else: group_steps = 1 noise_ = generate_spatial_noise((1, opt.nc_current) + nz, device=opt.device) noise_ = pad_noise(noise_) for curr_inp in range(group_steps): if opt.use_multiple_inputs: real = real_group[curr_inp] nz = nz_group[curr_inp] z_opt = z_opt_group[curr_inp] noise_ = noise_group[curr_inp] prev_scale_results = input_from_prev_scale[curr_inp] opt.curr_inp = curr_inp else: prev_scale_results = input_from_prev_scale ############################ # (1) Update D network: maximize D(x) + D(G(z)) ########################### for j in range(opt.Dsteps): # train with real D.zero_grad() output = D(real).to(opt.device) errD_real = -output.mean() errD_real.backward(retain_graph=True) grads_after = [] cos_sim = [] for i, p in enumerate(D.parameters()): grads_after.append(p.grad) cos_sim.append(nn.CosineSimilarity(-1)(grad_d_real[i], p.grad).mean().item()) diff_d_real = np.mean(cos_sim) grad_d_real = grads_after # train with fake if (j == 0) & (epoch == 0): if current_scale == 0: # If we are in the lowest scale, noise is generated from scratch prev = torch.zeros((1, opt.nc_current) + nz).to(opt.device) prev_scale_results = prev prev = pad_image(prev) z_prev = torch.zeros((1, opt.nc_current) + nz).to(opt.device) z_prev = pad_noise(z_prev) opt.noise_amp = 1 else: # First step in NOT the lowest scale # We need to adapt our inputs from the previous scale and add noise to it prev = draw_concat(generators, noise_maps, reals, noise_amplitudes, prev_scale_results, "rand", pad_noise, pad_image, opt) prev = interpolate3D(prev, real.shape[-3:], mode="bilinear", align_corners=True) prev = pad_image(prev) z_prev = draw_concat(generators, noise_maps, reals, noise_amplitudes, prev_scale_results, "rec", pad_noise, pad_image, opt) z_prev = interpolate3D(z_prev, real.shape[-3:], mode="bilinear", align_corners=True) opt.noise_amp = update_noise_amplitude(z_prev, real, opt) z_prev = pad_image(z_prev) else: # Any other step if opt.use_multiple_inputs: z_prev = curr_z_prevs[curr_inp] prev = draw_concat(generators, noise_maps, reals, noise_amplitudes, prev_scale_results, "rand", pad_noise, pad_image, opt) prev = interpolate3D(prev, real.shape[-3:], mode="bilinear", align_corners=False) prev = pad_image(prev) # After creating our correct noise input, we feed it to the generator: noise = opt.noise_amp * noise_ + prev fake = G(noise.detach(), prev) # Then run the result through the discriminator output = D(fake.detach()) errD_fake = output.mean() # Backpropagation errD_fake.backward(retain_graph=False) # Gradient Penalty gradient_penalty = calc_gradient_penalty(D, real, fake, opt.lambda_grad, opt.device) gradient_penalty.backward(retain_graph=False) grads_after = [] cos_sim = [] for i, p in enumerate(D.parameters()): grads_after.append(p.grad) cos_sim.append(nn.CosineSimilarity(-1)(grad_d_fake[i], p.grad).mean().item()) diff_d_fake = np.mean(cos_sim) grad_d_fake = grads_after # Logging: if step % 10 == 0: wandb.log({f"D(G(z))@{current_scale}": errD_fake.item(), f"D(x)@{current_scale}": -errD_real.item(), f"gradient_penalty@{current_scale}": gradient_penalty.item(), f"D_real_grad@{current_scale}": diff_d_real, f"D_fake_grad@{current_scale}": diff_d_fake, }, step=step, sync=False) optimizerD.step() if opt.use_multiple_inputs: z_opt_group[curr_inp] = z_opt input_from_prev_scale[curr_inp] = prev_scale_results curr_noises[curr_inp] = noise curr_prevs[curr_inp] = prev curr_z_prevs[curr_inp] = z_prev ############################ # (2) Update G network: maximize D(G(z)) ########################### for j in range(opt.Gsteps): G.zero_grad() fake = G(noise.detach(), prev.detach(), temperature=1) output = D(fake) errG = -output.mean() errG.backward(retain_graph=False) grads_after = [] cos_sim = [] for i, p in enumerate(G.parameters()): grads_after.append(p.grad) cos_sim.append(nn.CosineSimilarity(-1)(grad_g[i], p.grad).mean().item()) diff_g = np.mean(cos_sim) grad_g = grads_after if opt.alpha != 0: # i. e. we are trying to find an exact recreation of our input in the lat space Z_opt = opt.noise_amp * z_opt + z_prev G_rec = G(Z_opt.detach(), z_prev, temperature=1) rec_loss = opt.alpha * F.mse_loss(G_rec, real) rec_loss.backward(retain_graph=False) # TODO: Check for unexpected argument retain_graph=True rec_loss = rec_loss.detach() else: # We are not trying to find an exact recreation rec_loss = torch.zeros([]) Z_opt = z_opt optimizerG.step() # More Logging: if step % 10 == 0: wandb.log({f"noise_amplitude@{current_scale}": opt.noise_amp, f"rec_loss@{current_scale}": rec_loss.item(), f"G_grad@{current_scale}": diff_g}, step=step, sync=False, commit=True) # Rendering and logging images of levels if epoch % 500 == 0 or epoch == (opt.niter - 1): token_list = opt.token_list to_level = one_hot_to_blockdata_level try: subprocess.call(["wine", '--version']) real_scaled = to_level(real.detach(), token_list, opt.block2repr, opt.repr_type) # Minecraft World worldname = 'Curr_Empty_World' clear_empty_world(opt.output_dir, worldname) # reset tmp world to_render = [real_scaled, to_level(fake.detach(), token_list, opt.block2repr, opt.repr_type), to_level(G(Z_opt.detach(), z_prev), token_list, opt.block2repr, opt.repr_type)] render_names = [f"real@{current_scale}", f"G(z)@{current_scale}", f"G(z_opt)@{current_scale}"] obj_pth = os.path.join(opt.out_, f"objects/{current_scale}") os.makedirs(obj_pth, exist_ok=True) for n, level in enumerate(to_render): pos = n * (level.shape[0] + 5) save_level_to_world(opt.output_dir, worldname, (pos, 0, 0), level, token_list, opt.props) curr_coords = [[pos, pos + real_scaled.shape[0]], [0, real_scaled.shape[1]], [0, real_scaled.shape[2]]] render_pth = render_minecraft(worldname, curr_coords, obj_pth, render_names[n]) wandb.log({render_names[n]: wandb.Object3D(open(render_pth))}, commit=False) except OSError: pass # Learning Rate scheduler step schedulerD.step() schedulerG.step() # Save networks if opt.use_multiple_inputs: z_opt = z_opt_group torch.save(z_opt, "%s/z_opt.pth" % opt.outf) save_networks(G, D, z_opt, opt) wandb.save(opt.outf) return z_opt, input_from_prev_scale, G
[ "torch.optim.lr_scheduler.MultiStepLR", "models.save_networks", "numpy.mean", "generate_noise.generate_spatial_noise", "minecraft.level_utils.clear_empty_world", "torch.nn.ReplicationPad3d", "minecraft.level_renderer.render_minecraft", "subprocess.call", "models.calc_gradient_penalty", "draw_concat.draw_concat", "torch.nn.functional.mse_loss", "minecraft.level_utils.save_level_to_world", "wandb.save", "torch.save", "utils.interpolate3D", "torch.nn.CosineSimilarity", "loguru.logger.info", "os.makedirs", "os.path.join", "torch.zeros" ]
[((1431, 1484), 'minecraft.level_utils.clear_empty_world', 'clear_empty_world', (['opt.output_dir', '"""Curr_Empty_World"""'], {}), "(opt.output_dir, 'Curr_Empty_World')\n", (1448, 1484), False, 'from minecraft.level_utils import one_hot_to_blockdata_level, save_level_to_world, clear_empty_world\n'), ((2866, 2970), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', ([], {'optimizer': 'optimizerD', 'milestones': '[1600, 2500]', 'gamma': 'opt.gamma'}), '(optimizer=optimizerD, milestones=[1600,\n 2500], gamma=opt.gamma)\n', (2902, 2970), False, 'import torch\n'), ((2984, 3088), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', ([], {'optimizer': 'optimizerG', 'milestones': '[1600, 2500]', 'gamma': 'opt.gamma'}), '(optimizer=optimizerG, milestones=[1600,\n 2500], gamma=opt.gamma)\n', (3020, 3088), False, 'import torch\n'), ((4030, 4080), 'loguru.logger.info', 'logger.info', (['"""Training at scale {}"""', 'current_scale'], {}), "('Training at scale {}', current_scale)\n", (4041, 4080), False, 'from loguru import logger\n'), ((13579, 13623), 'torch.save', 'torch.save', (['z_opt', "('%s/z_opt.pth' % opt.outf)"], {}), "(z_opt, '%s/z_opt.pth' % opt.outf)\n", (13589, 13623), False, 'import torch\n'), ((13628, 13659), 'models.save_networks', 'save_networks', (['G', 'D', 'z_opt', 'opt'], {}), '(G, D, z_opt, opt)\n', (13641, 13659), False, 'from models import calc_gradient_penalty, save_networks\n'), ((13664, 13684), 'wandb.save', 'wandb.save', (['opt.outf'], {}), '(opt.outf)\n', (13674, 13684), False, 'import wandb\n'), ((754, 778), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['real', 'z_prev'], {}), '(real, z_prev)\n', (764, 778), True, 'import torch.nn.functional as F\n'), ((2473, 2501), 'torch.nn.ReplicationPad3d', 'nn.ReplicationPad3d', (['padsize'], {}), '(padsize)\n', (2492, 2501), True, 'import torch.nn as nn\n'), ((2522, 2550), 'torch.nn.ReplicationPad3d', 'nn.ReplicationPad3d', (['padsize'], {}), '(padsize)\n', (2541, 2550), True, 'import torch.nn as nn\n'), ((2582, 2610), 'torch.nn.ReplicationPad3d', 'nn.ReplicationPad3d', (['padsize'], {}), '(padsize)\n', (2601, 2610), True, 'import torch.nn as nn\n'), ((2631, 2659), 'torch.nn.ReplicationPad3d', 'nn.ReplicationPad3d', (['padsize'], {}), '(padsize)\n', (2650, 2659), True, 'import torch.nn as nn\n'), ((3469, 3536), 'generate_noise.generate_spatial_noise', 'generate_spatial_noise', (['((1, opt.nc_current) + nz)'], {'device': 'opt.device'}), '((1, opt.nc_current) + nz, device=opt.device)\n', (3491, 3536), False, 'from generate_noise import generate_spatial_noise\n'), ((4885, 4952), 'generate_noise.generate_spatial_noise', 'generate_spatial_noise', (['((1, opt.nc_current) + nz)'], {'device': 'opt.device'}), '((1, opt.nc_current) + nz, device=opt.device)\n', (4907, 4952), False, 'from generate_noise import generate_spatial_noise\n'), ((3279, 3351), 'generate_noise.generate_spatial_noise', 'generate_spatial_noise', (['[1, opt.nc_current, nzx, nzy]'], {'device': 'opt.device'}), '([1, opt.nc_current, nzx, nzy], device=opt.device)\n', (3301, 3351), False, 'from generate_noise import generate_spatial_noise\n'), ((4663, 4735), 'generate_noise.generate_spatial_noise', 'generate_spatial_noise', (['[1, opt.nc_current, nzx, nzy]'], {'device': 'opt.device'}), '([1, opt.nc_current, nzx, nzy], device=opt.device)\n', (4685, 4735), False, 'from generate_noise import generate_spatial_noise\n'), ((6128, 6144), 'numpy.mean', 'np.mean', (['cos_sim'], {}), '(cos_sim)\n', (6135, 6144), True, 'import numpy as np\n'), ((8650, 8715), 'models.calc_gradient_penalty', 'calc_gradient_penalty', (['D', 'real', 'fake', 'opt.lambda_grad', 'opt.device'], {}), '(D, real, fake, opt.lambda_grad, opt.device)\n', (8671, 8715), False, 'from models import calc_gradient_penalty, save_networks\n'), ((9072, 9088), 'numpy.mean', 'np.mean', (['cos_sim'], {}), '(cos_sim)\n', (9079, 9088), True, 'import numpy as np\n'), ((10715, 10731), 'numpy.mean', 'np.mean', (['cos_sim'], {}), '(cos_sim)\n', (10722, 10731), True, 'import numpy as np\n'), ((11977, 12015), 'subprocess.call', 'subprocess.call', (["['wine', '--version']"], {}), "(['wine', '--version'])\n", (11992, 12015), False, 'import subprocess\n'), ((12211, 12255), 'minecraft.level_utils.clear_empty_world', 'clear_empty_world', (['opt.output_dir', 'worldname'], {}), '(opt.output_dir, worldname)\n', (12228, 12255), False, 'from minecraft.level_utils import one_hot_to_blockdata_level, save_level_to_world, clear_empty_world\n'), ((12630, 12680), 'os.path.join', 'os.path.join', (['opt.out_', 'f"""objects/{current_scale}"""'], {}), "(opt.out_, f'objects/{current_scale}')\n", (12642, 12680), False, 'import os\n'), ((12697, 12732), 'os.makedirs', 'os.makedirs', (['obj_pth'], {'exist_ok': '(True)'}), '(obj_pth, exist_ok=True)\n', (12708, 12732), False, 'import os\n'), ((3935, 3972), 'torch.zeros', 'torch.zeros', (['((1, opt.nc_current) + nz)'], {}), '((1, opt.nc_current) + nz)\n', (3946, 3972), False, 'import torch\n'), ((4195, 4215), 'torch.zeros', 'torch.zeros', (['p.shape'], {}), '(p.shape)\n', (4206, 4215), False, 'import torch\n'), ((4259, 4279), 'torch.zeros', 'torch.zeros', (['p.shape'], {}), '(p.shape)\n', (4270, 4279), False, 'import torch\n'), ((4348, 4368), 'torch.zeros', 'torch.zeros', (['p.shape'], {}), '(p.shape)\n', (4359, 4368), False, 'import torch\n'), ((7849, 7968), 'draw_concat.draw_concat', 'draw_concat', (['generators', 'noise_maps', 'reals', 'noise_amplitudes', 'prev_scale_results', '"""rand"""', 'pad_noise', 'pad_image', 'opt'], {}), "(generators, noise_maps, reals, noise_amplitudes,\n prev_scale_results, 'rand', pad_noise, pad_image, opt)\n", (7860, 7968), False, 'from draw_concat import draw_concat\n'), ((8032, 8106), 'utils.interpolate3D', 'interpolate3D', (['prev', 'real.shape[-3:]'], {'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(prev, real.shape[-3:], mode='bilinear', align_corners=False)\n", (8045, 8106), False, 'from utils import interpolate3D\n'), ((11348, 11363), 'torch.zeros', 'torch.zeros', (['[]'], {}), '([])\n', (11359, 11363), False, 'import torch\n'), ((12858, 12951), 'minecraft.level_utils.save_level_to_world', 'save_level_to_world', (['opt.output_dir', 'worldname', '(pos, 0, 0)', 'level', 'token_list', 'opt.props'], {}), '(opt.output_dir, worldname, (pos, 0, 0), level,\n token_list, opt.props)\n', (12877, 12951), False, 'from minecraft.level_utils import one_hot_to_blockdata_level, save_level_to_world, clear_empty_world\n'), ((13175, 13241), 'minecraft.level_renderer.render_minecraft', 'render_minecraft', (['worldname', 'curr_coords', 'obj_pth', 'render_names[n]'], {}), '(worldname, curr_coords, obj_pth, render_names[n])\n', (13191, 13241), False, 'from minecraft.level_renderer import render_minecraft\n'), ((3760, 3802), 'torch.zeros', 'torch.zeros', (['[1, opt.nc_current, nzx, nzy]'], {}), '([1, opt.nc_current, nzx, nzy])\n', (3771, 3802), False, 'import torch\n'), ((6929, 7048), 'draw_concat.draw_concat', 'draw_concat', (['generators', 'noise_maps', 'reals', 'noise_amplitudes', 'prev_scale_results', '"""rand"""', 'pad_noise', 'pad_image', 'opt'], {}), "(generators, noise_maps, reals, noise_amplitudes,\n prev_scale_results, 'rand', pad_noise, pad_image, opt)\n", (6940, 7048), False, 'from draw_concat import draw_concat\n'), ((7120, 7193), 'utils.interpolate3D', 'interpolate3D', (['prev', 'real.shape[-3:]'], {'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(prev, real.shape[-3:], mode='bilinear', align_corners=True)\n", (7133, 7193), False, 'from utils import interpolate3D\n'), ((7275, 7393), 'draw_concat.draw_concat', 'draw_concat', (['generators', 'noise_maps', 'reals', 'noise_amplitudes', 'prev_scale_results', '"""rec"""', 'pad_noise', 'pad_image', 'opt'], {}), "(generators, noise_maps, reals, noise_amplitudes,\n prev_scale_results, 'rec', pad_noise, pad_image, opt)\n", (7286, 7393), False, 'from draw_concat import draw_concat\n'), ((7468, 7543), 'utils.interpolate3D', 'interpolate3D', (['z_prev', 'real.shape[-3:]'], {'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(z_prev, real.shape[-3:], mode='bilinear', align_corners=True)\n", (7481, 7543), False, 'from utils import interpolate3D\n'), ((11058, 11081), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['G_rec', 'real'], {}), '(G_rec, real)\n', (11068, 11081), True, 'import torch.nn.functional as F\n'), ((6407, 6444), 'torch.zeros', 'torch.zeros', (['((1, opt.nc_current) + nz)'], {}), '((1, opt.nc_current) + nz)\n', (6418, 6444), False, 'import torch\n'), ((6590, 6627), 'torch.zeros', 'torch.zeros', (['((1, opt.nc_current) + nz)'], {}), '((1, opt.nc_current) + nz)\n', (6601, 6627), False, 'import torch\n'), ((6034, 6057), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', (['(-1)'], {}), '(-1)\n', (6053, 6057), True, 'import torch.nn as nn\n'), ((8978, 9001), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', (['(-1)'], {}), '(-1)\n', (8997, 9001), True, 'import torch.nn as nn\n'), ((10631, 10654), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', (['(-1)'], {}), '(-1)\n', (10650, 10654), True, 'import torch.nn as nn\n')]
from PIL import Image import numpy as np from skimage import transform IMG_HEIGHT = 100 IMG_WIDTH = 100 def load(filename): np_image = Image.open(filename) np_image = np.array(np_image).astype('float32')/255 # (IMG_HEIGHT, IMG_WIDTH, 3)) np_image = transform.resize(np_image, (IMG_HEIGHT, IMG_WIDTH, 3)) np_image = np.expand_dims(np_image, axis=0) return np_image
[ "numpy.array", "numpy.expand_dims", "PIL.Image.open", "skimage.transform.resize" ]
[((142, 162), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (152, 162), False, 'from PIL import Image\n'), ((268, 322), 'skimage.transform.resize', 'transform.resize', (['np_image', '(IMG_HEIGHT, IMG_WIDTH, 3)'], {}), '(np_image, (IMG_HEIGHT, IMG_WIDTH, 3))\n', (284, 322), False, 'from skimage import transform\n'), ((338, 370), 'numpy.expand_dims', 'np.expand_dims', (['np_image'], {'axis': '(0)'}), '(np_image, axis=0)\n', (352, 370), True, 'import numpy as np\n'), ((178, 196), 'numpy.array', 'np.array', (['np_image'], {}), '(np_image)\n', (186, 196), True, 'import numpy as np\n')]
import copy import numpy as np from PIL import Image from torch.utils.data.dataset import Dataset class CleanLabelDataset(Dataset): """Clean-label dataset. Args: dataset (Dataset): The dataset to be wrapped. adv_dataset_path (str): The adversarially perturbed dataset path. transform (callable): The backdoor transformations. poison_idx (np.array): An 0/1 (clean/poisoned) array with shape `(len(dataset), )`. target_label (int): The target label. """ def __init__(self, dataset, adv_dataset_path, transform, poison_idx, target_label): super(CleanLabelDataset, self).__init__() self.clean_dataset = copy.deepcopy(dataset) self.adv_data = np.load(adv_dataset_path)["data"] self.clean_data = self.clean_dataset.data self.train = self.clean_dataset.train if self.train: self.data = np.where( (poison_idx == 1)[..., None, None, None], self.adv_data, self.clean_data, ) self.targets = self.clean_dataset.targets self.poison_idx = poison_idx else: # Only fetch poison data when testing. self.data = self.clean_data[np.nonzero(poison_idx)[0]] self.targets = self.clean_dataset.targets[np.nonzero(poison_idx)[0]] self.poison_idx = poison_idx[poison_idx == 1] self.transform = self.clean_dataset.transform self.bd_transform = transform self.target_label = target_label def __getitem__(self, index): img = self.data[index] target = self.targets[index] if self.poison_idx[index] == 1: img = self.augment(img, bd_transform=self.bd_transform) # If `self.train` is `True`, it will not modify `target` for poison data # only in the target class; If `self.train` is `False`, it will flip `target` # to `self.target_label` for testing purpose. target = self.target_label else: img = self.augment(img, bd_transform=None) item = {"img": img, "target": target} return item def __len__(self): return len(self.data) def augment(self, img, bd_transform=None): if bd_transform is not None: img = bd_transform(img) img = Image.fromarray(img) img = self.transform(img) return img
[ "PIL.Image.fromarray", "numpy.where", "numpy.nonzero", "copy.deepcopy", "numpy.load" ]
[((688, 710), 'copy.deepcopy', 'copy.deepcopy', (['dataset'], {}), '(dataset)\n', (701, 710), False, 'import copy\n'), ((2366, 2386), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (2381, 2386), False, 'from PIL import Image\n'), ((735, 760), 'numpy.load', 'np.load', (['adv_dataset_path'], {}), '(adv_dataset_path)\n', (742, 760), True, 'import numpy as np\n'), ((912, 999), 'numpy.where', 'np.where', (['(poison_idx == 1)[..., None, None, None]', 'self.adv_data', 'self.clean_data'], {}), '((poison_idx == 1)[..., None, None, None], self.adv_data, self.\n clean_data)\n', (920, 999), True, 'import numpy as np\n'), ((1258, 1280), 'numpy.nonzero', 'np.nonzero', (['poison_idx'], {}), '(poison_idx)\n', (1268, 1280), True, 'import numpy as np\n'), ((1339, 1361), 'numpy.nonzero', 'np.nonzero', (['poison_idx'], {}), '(poison_idx)\n', (1349, 1361), True, 'import numpy as np\n')]
# This file is mainly derived from https://github.com/openai/baselines. from collections import deque import os.path as osp import time import csv import json from gym.core import Wrapper import numpy as np from .vec_env import VecEnvWrapper class Monitor(Wrapper): EXT = "monitor.csv" f = None def __init__(self, env, filename, allow_early_resets=False, reset_keywords=(), info_keywords=()): Wrapper.__init__(self, env=env) self.tstart = time.time() if filename: self.results_writer = ResultsWriter( filename, header={ 't_start': time.time(), 'env_id': env.spec and env.spec.id}, extra_keys=reset_keywords + info_keywords) else: self.results_writer = None self.reset_keywords = reset_keywords self.info_keywords = info_keywords self.allow_early_resets = allow_early_resets self.rewards = None self.needs_reset = True self.episode_rewards = [] self.episode_lengths = [] self.episode_times = [] self.total_steps = 0 self.current_reset_info = {} def reset(self, **kwargs): self.reset_state() for k in self.reset_keywords: v = kwargs.get(k) if v is None: raise ValueError(f'Expected you to pass kwarg {k} into reset') self.current_reset_info[k] = v return self.env.reset(**kwargs) def reset_state(self): if not self.allow_early_resets and not self.needs_reset: raise RuntimeError( "Tried to reset an environment before done. " "If you want to allow early resets, wrap your env with " "Monitor(env, path, allow_early_resets=True)") self.rewards = [] self.needs_reset = False def step(self, action): if self.needs_reset: raise RuntimeError("Tried to step environment that needs reset") ob, rew, done, info = self.env.step(action) self.update(ob, rew, done, info) return (ob, rew, done, info) def update(self, ob, rew, done, info): self.rewards.append(rew) if done: self.needs_reset = True eprew = sum(self.rewards) eplen = len(self.rewards) epinfo = { "r": round(eprew, 6), "l": eplen, "t": round(time.time() - self.tstart, 6)} for k in self.info_keywords: epinfo[k] = info[k] self.episode_rewards.append(eprew) self.episode_lengths.append(eplen) self.episode_times.append(time.time() - self.tstart) epinfo.update(self.current_reset_info) if self.results_writer: self.results_writer.write_row(epinfo) assert isinstance(info, dict) if isinstance(info, dict): info['episode'] = epinfo self.total_steps += 1 def close(self): super(Monitor, self).close() if self.f is not None: self.f.close() def get_total_steps(self): return self.total_steps def get_episode_rewards(self): return self.episode_rewards def get_episode_lengths(self): return self.episode_lengths def get_episode_times(self): return self.episode_times class ResultsWriter(object): def __init__(self, filename, header='', extra_keys=()): self.extra_keys = extra_keys assert filename is not None if not filename.endswith(Monitor.EXT): if osp.isdir(filename): filename = osp.join(filename, Monitor.EXT) else: filename = filename + "." + Monitor.EXT self.f = open(filename, "wt") if isinstance(header, dict): header = '# {} \n'.format(json.dumps(header)) self.f.write(header) self.logger = csv.DictWriter( self.f, fieldnames=('r', 'l', 't') + tuple(extra_keys)) self.logger.writeheader() self.f.flush() def write_row(self, epinfo): if self.logger: self.logger.writerow(epinfo) self.f.flush() class VecMonitor(VecEnvWrapper): def __init__(self, venv, filename=None, keep_buf=0, info_keywords=()): VecEnvWrapper.__init__(self, venv) self.eprets = None self.eplens = None self.epcount = 0 self.tstart = time.time() if filename: self.results_writer = ResultsWriter( filename, header={'t_start': self.tstart}, extra_keys=info_keywords) else: self.results_writer = None self.info_keywords = info_keywords self.keep_buf = keep_buf if self.keep_buf: self.epret_buf = deque([], maxlen=keep_buf) self.eplen_buf = deque([], maxlen=keep_buf) def reset(self): obs = self.venv.reset() self.eprets = np.zeros(self.num_envs, 'f') self.eplens = np.zeros(self.num_envs, 'i') return obs def step_wait(self): obs, rews, dones, infos = self.venv.step_wait() self.eprets += rews self.eplens += 1 newinfos = list(infos[:]) for i in range(len(dones)): if dones[i]: info = infos[i].copy() ret = self.eprets[i] eplen = self.eplens[i] epinfo = { 'r': ret, 'l': eplen, 't': round(time.time() - self.tstart, 6)} for k in self.info_keywords: epinfo[k] = info[k] info['episode'] = epinfo if self.keep_buf: self.epret_buf.append(ret) self.eplen_buf.append(eplen) self.epcount += 1 self.eprets[i] = 0 self.eplens[i] = 0 if self.results_writer: self.results_writer.write_row(epinfo) newinfos[i] = info return obs, rews, dones, newinfos
[ "collections.deque", "json.dumps", "os.path.join", "gym.core.Wrapper.__init__", "numpy.zeros", "os.path.isdir", "time.time" ]
[((435, 466), 'gym.core.Wrapper.__init__', 'Wrapper.__init__', (['self'], {'env': 'env'}), '(self, env=env)\n', (451, 466), False, 'from gym.core import Wrapper\n'), ((490, 501), 'time.time', 'time.time', ([], {}), '()\n', (499, 501), False, 'import time\n'), ((4528, 4539), 'time.time', 'time.time', ([], {}), '()\n', (4537, 4539), False, 'import time\n'), ((5054, 5082), 'numpy.zeros', 'np.zeros', (['self.num_envs', '"""f"""'], {}), "(self.num_envs, 'f')\n", (5062, 5082), True, 'import numpy as np\n'), ((5105, 5133), 'numpy.zeros', 'np.zeros', (['self.num_envs', '"""i"""'], {}), "(self.num_envs, 'i')\n", (5113, 5133), True, 'import numpy as np\n'), ((3669, 3688), 'os.path.isdir', 'osp.isdir', (['filename'], {}), '(filename)\n', (3678, 3688), True, 'import os.path as osp\n'), ((4895, 4921), 'collections.deque', 'deque', (['[]'], {'maxlen': 'keep_buf'}), '([], maxlen=keep_buf)\n', (4900, 4921), False, 'from collections import deque\n'), ((4951, 4977), 'collections.deque', 'deque', (['[]'], {'maxlen': 'keep_buf'}), '([], maxlen=keep_buf)\n', (4956, 4977), False, 'from collections import deque\n'), ((3717, 3748), 'os.path.join', 'osp.join', (['filename', 'Monitor.EXT'], {}), '(filename, Monitor.EXT)\n', (3725, 3748), True, 'import os.path as osp\n'), ((3936, 3954), 'json.dumps', 'json.dumps', (['header'], {}), '(header)\n', (3946, 3954), False, 'import json\n'), ((2729, 2740), 'time.time', 'time.time', ([], {}), '()\n', (2738, 2740), False, 'import time\n'), ((654, 665), 'time.time', 'time.time', ([], {}), '()\n', (663, 665), False, 'import time\n'), ((2489, 2500), 'time.time', 'time.time', ([], {}), '()\n', (2498, 2500), False, 'import time\n'), ((5619, 5630), 'time.time', 'time.time', ([], {}), '()\n', (5628, 5630), False, 'import time\n')]
#!/usr/bin/env python import numpy as np p = np.pi def DH_to_T(DH): """! Computes the transformation matrices given the DH table of the serial link. @param DH: devavitt-hartemberg parameters. @return T: transformation matrices of a joint with respect to previous joint. """ # Get the number of rows, to know how many T matrices should create. rows = len(DH) T = [] for i in range(rows): Tmp = np.array([[np.cos(DH[i,3]), -np.sin(DH[i,3]), 0, DH[i,1]], [np.sin(DH[i,3])*np.cos(DH[i,0]), np.cos(DH[i,3])*np.cos(DH[i,0]), -np.sin(DH[i,0]), -DH[i,2]*np.sin(DH[i,0])], [np.sin(DH[i,3])*np.sin(DH[i,0]), np.cos(DH[i,3])*np.sin(DH[i,0]), np.cos(DH[i,0]), DH[i,2]*np.cos(DH[i,0])], [0, 0, 0, 1]]) T.append(Tmp) return T def transformations(T_rel_ini, q, info): """! Computes tranformations given T_relatives, q's and the info. @param T_rel_ini: the ones computed with DH_to_T. @param q: current configuration of baxter's arm. @param info: 1->revolute, 0->prismatic. @return T: transformation matrices of a joint with respect to previous joint in the new configuration. """ row_q = q.size row_info = info.size T = [] if row_q != row_info: print("Warning. q and info must have same size.") return for i in range(row_q): if info[i] == 1: Tel = np.array([[np.cos(q[i]), -np.sin(q[i]), 0 , 0], [np.sin(q[i]), np.cos(q[i]), 0 , 0], [0, 0, 1, 0], [0, 0, 0, 1]]) # else: # Case in which there are prismatic joints. ## Tel = np.array([[1, 0, 0, 0], ## [0, 1, 0, 0], ## [0, 0, 0, q[i]] ## [0, 0, 0, 1]]) Tmp = np.dot(T_rel_ini[i], Tel) T.append(Tmp) # Last matrix is constant in time. T_7,e.e T.append(T_rel_ini[row_q]) return T def abs_trans(T_rel): """! Computes trasformations matrices w.r.t. 0 frame. @param T_rel: trasformation matrices of a joint with respect to previous one. @return T: absolute transformation matrices. """ T = [] # First is the same. T.append(T_rel[0]) for i in range(1, len(T_rel)): Tmp = np.dot(T[i-1], T_rel[i]) T.append(Tmp) return T
[ "numpy.sin", "numpy.dot", "numpy.cos" ]
[((1758, 1783), 'numpy.dot', 'np.dot', (['T_rel_ini[i]', 'Tel'], {}), '(T_rel_ini[i], Tel)\n', (1764, 1783), True, 'import numpy as np\n'), ((2206, 2232), 'numpy.dot', 'np.dot', (['T[i - 1]', 'T_rel[i]'], {}), '(T[i - 1], T_rel[i])\n', (2212, 2232), True, 'import numpy as np\n'), ((431, 447), 'numpy.cos', 'np.cos', (['DH[i, 3]'], {}), '(DH[i, 3])\n', (437, 447), True, 'import numpy as np\n'), ((682, 698), 'numpy.cos', 'np.cos', (['DH[i, 0]'], {}), '(DH[i, 0])\n', (688, 698), True, 'import numpy as np\n'), ((449, 465), 'numpy.sin', 'np.sin', (['DH[i, 3]'], {}), '(DH[i, 3])\n', (455, 465), True, 'import numpy as np\n'), ((492, 508), 'numpy.sin', 'np.sin', (['DH[i, 3]'], {}), '(DH[i, 3])\n', (498, 508), True, 'import numpy as np\n'), ((508, 524), 'numpy.cos', 'np.cos', (['DH[i, 0]'], {}), '(DH[i, 0])\n', (514, 524), True, 'import numpy as np\n'), ((525, 541), 'numpy.cos', 'np.cos', (['DH[i, 3]'], {}), '(DH[i, 3])\n', (531, 541), True, 'import numpy as np\n'), ((541, 557), 'numpy.cos', 'np.cos', (['DH[i, 0]'], {}), '(DH[i, 0])\n', (547, 557), True, 'import numpy as np\n'), ((559, 575), 'numpy.sin', 'np.sin', (['DH[i, 0]'], {}), '(DH[i, 0])\n', (565, 575), True, 'import numpy as np\n'), ((585, 601), 'numpy.sin', 'np.sin', (['DH[i, 0]'], {}), '(DH[i, 0])\n', (591, 601), True, 'import numpy as np\n'), ((616, 632), 'numpy.sin', 'np.sin', (['DH[i, 3]'], {}), '(DH[i, 3])\n', (622, 632), True, 'import numpy as np\n'), ((632, 648), 'numpy.sin', 'np.sin', (['DH[i, 0]'], {}), '(DH[i, 0])\n', (638, 648), True, 'import numpy as np\n'), ((649, 665), 'numpy.cos', 'np.cos', (['DH[i, 3]'], {}), '(DH[i, 3])\n', (655, 665), True, 'import numpy as np\n'), ((665, 681), 'numpy.sin', 'np.sin', (['DH[i, 0]'], {}), '(DH[i, 0])\n', (671, 681), True, 'import numpy as np\n'), ((707, 723), 'numpy.cos', 'np.cos', (['DH[i, 0]'], {}), '(DH[i, 0])\n', (713, 723), True, 'import numpy as np\n'), ((1362, 1374), 'numpy.cos', 'np.cos', (['q[i]'], {}), '(q[i])\n', (1368, 1374), True, 'import numpy as np\n'), ((1422, 1434), 'numpy.sin', 'np.sin', (['q[i]'], {}), '(q[i])\n', (1428, 1434), True, 'import numpy as np\n'), ((1436, 1448), 'numpy.cos', 'np.cos', (['q[i]'], {}), '(q[i])\n', (1442, 1448), True, 'import numpy as np\n'), ((1377, 1389), 'numpy.sin', 'np.sin', (['q[i]'], {}), '(q[i])\n', (1383, 1389), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- # # This code is part of Qiskit. # # (C) Copyright IBM 2019, 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. # pylint: disable=missing-docstring,invalid-name import unittest import numpy as np from qiskit import Aer from qiskit.compiler import assemble from qiskit.ignis.verification.tomography import GatesetTomographyFitter from qiskit.ignis.verification.tomography import gateset_tomography_circuits from qiskit.ignis.verification.tomography.basis import default_gateset_basis from qiskit.providers.aer.noise import NoiseModel from qiskit.extensions import HGate, SGate from qiskit.quantum_info import PTM class TestGatesetTomography(unittest.TestCase): @staticmethod def collect_tomography_data(shots=10000, noise_model=None, gateset_basis='Default'): backend_qasm = Aer.get_backend('qasm_simulator') circuits = gateset_tomography_circuits(gateset_basis=gateset_basis) qobj = assemble(circuits, shots=shots) result = backend_qasm.run(qobj, noise_model=noise_model).result() fitter = GatesetTomographyFitter(result, circuits, gateset_basis) return fitter @staticmethod def expected_linear_inversion_gates(Gs, Fs): rho = Gs['rho'] E = Gs['E'] B = np.array([(F @ rho).T[0] for F in Fs]).T BB = np.linalg.inv(B) gates = {label: BB @ G @ B for (label, G) in Gs.items() if label not in ['E', 'rho']} gates['E'] = E @ B gates['rho'] = BB @ rho return gates @staticmethod def hs_distance(A, B): return sum([np.abs(x) ** 2 for x in np.nditer(A-B)]) @staticmethod def convert_from_ptm(vector): Id = np.sqrt(0.5) * np.array([[1, 0], [0, 1]]) X = np.sqrt(0.5) * np.array([[0, 1], [1, 0]]) Y = np.sqrt(0.5) * np.array([[0, -1j], [1j, 0]]) Z = np.sqrt(0.5) * np.array([[1, 0], [0, -1]]) v = vector.reshape(4) return v[0] * Id + v[1] * X + v[2] * Y + v[3] * Z def compare_gates(self, expected_gates, result_gates, labels, delta=0.2): for label in labels: expected_gate = expected_gates[label] result_gate = result_gates[label].data msg = "Failure on gate {}: Expected gate = \n{}\n" \ "vs Actual gate = \n{}".format(label, expected_gate, result_gate) distance = self.hs_distance(expected_gate, result_gate) self.assertAlmostEqual(distance, 0, delta=delta, msg=msg) def run_test_on_basis_and_noise(self, gateset_basis='Default', noise_model=None, noise_ptm=None): if gateset_basis == 'Default': gateset_basis = default_gateset_basis() labels = gateset_basis.gate_labels gates = gateset_basis.gate_matrices gates['rho'] = np.array([[np.sqrt(0.5)], [0], [0], [np.sqrt(0.5)]]) gates['E'] = np.array([[np.sqrt(0.5), 0, 0, np.sqrt(0.5)]]) # apply noise if given for label in labels: if label != "Id" and noise_ptm is not None: gates[label] = noise_ptm @ gates[label] Fs = [gateset_basis.spam_matrix(label) for label in gateset_basis.spam_labels] # prepare the fitter fitter = self.collect_tomography_data(shots=10000, noise_model=noise_model, gateset_basis=gateset_basis) # linear inversion test result_gates = fitter.linear_inversion() expected_gates = self.expected_linear_inversion_gates(gates, Fs) self.compare_gates(expected_gates, result_gates, labels + ['E', 'rho']) # fitter optimization test result_gates = fitter.fit() expected_gates = gates expected_gates['E'] = self.convert_from_ptm(expected_gates['E']) expected_gates['rho'] = self.convert_from_ptm(expected_gates['rho']) self.compare_gates(expected_gates, result_gates, labels + ['E', 'rho']) def test_noiseless_standard_basis(self): self.run_test_on_basis_and_noise() def test_noiseless_h_gate_standard_basis(self): basis = default_gateset_basis() basis.add_gate(HGate()) self.run_test_on_basis_and_noise(gateset_basis=basis) def test_noiseless_s_gate_standard_basis(self): basis = default_gateset_basis() basis.add_gate(SGate()) self.run_test_on_basis_and_noise(gateset_basis=basis) def test_amplitude_damping_standard_basis(self): gamma = 0.05 noise_ptm = PTM(np.array([[1, 0, 0, 0], [0, np.sqrt(1-gamma), 0, 0], [0, 0, np.sqrt(1-gamma), 0], [gamma, 0, 0, 1-gamma]])) noise_model = NoiseModel() noise_model.add_all_qubit_quantum_error(noise_ptm, ['u1', 'u2', 'u3']) self.run_test_on_basis_and_noise(noise_model=noise_model, noise_ptm=np.real(noise_ptm.data)) def test_depolarization_standard_basis(self): p = 0.05 noise_ptm = PTM(np.array([[1, 0, 0, 0], [0, 1-p, 0, 0], [0, 0, 1-p, 0], [0, 0, 0, 1-p]])) noise_model = NoiseModel() noise_model.add_all_qubit_quantum_error(noise_ptm, ['u1', 'u2', 'u3']) self.run_test_on_basis_and_noise(noise_model=noise_model, noise_ptm=np.real(noise_ptm.data)) if __name__ == '__main__': unittest.main()
[ "numpy.abs", "qiskit.ignis.verification.tomography.basis.default_gateset_basis", "qiskit.ignis.verification.tomography.GatesetTomographyFitter", "numpy.sqrt", "qiskit.ignis.verification.tomography.gateset_tomography_circuits", "numpy.nditer", "qiskit.compiler.assemble", "qiskit.extensions.SGate", "numpy.array", "numpy.real", "numpy.linalg.inv", "qiskit.extensions.HGate", "unittest.main", "qiskit.providers.aer.noise.NoiseModel", "qiskit.Aer.get_backend" ]
[((6227, 6242), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6240, 6242), False, 'import unittest\n'), ((1256, 1289), 'qiskit.Aer.get_backend', 'Aer.get_backend', (['"""qasm_simulator"""'], {}), "('qasm_simulator')\n", (1271, 1289), False, 'from qiskit import Aer\n'), ((1309, 1365), 'qiskit.ignis.verification.tomography.gateset_tomography_circuits', 'gateset_tomography_circuits', ([], {'gateset_basis': 'gateset_basis'}), '(gateset_basis=gateset_basis)\n', (1336, 1365), False, 'from qiskit.ignis.verification.tomography import gateset_tomography_circuits\n'), ((1381, 1412), 'qiskit.compiler.assemble', 'assemble', (['circuits'], {'shots': 'shots'}), '(circuits, shots=shots)\n', (1389, 1412), False, 'from qiskit.compiler import assemble\n'), ((1504, 1560), 'qiskit.ignis.verification.tomography.GatesetTomographyFitter', 'GatesetTomographyFitter', (['result', 'circuits', 'gateset_basis'], {}), '(result, circuits, gateset_basis)\n', (1527, 1560), False, 'from qiskit.ignis.verification.tomography import GatesetTomographyFitter\n'), ((1761, 1777), 'numpy.linalg.inv', 'np.linalg.inv', (['B'], {}), '(B)\n', (1774, 1777), True, 'import numpy as np\n'), ((4800, 4823), 'qiskit.ignis.verification.tomography.basis.default_gateset_basis', 'default_gateset_basis', ([], {}), '()\n', (4821, 4823), False, 'from qiskit.ignis.verification.tomography.basis import default_gateset_basis\n'), ((4987, 5010), 'qiskit.ignis.verification.tomography.basis.default_gateset_basis', 'default_gateset_basis', ([], {}), '()\n', (5008, 5010), False, 'from qiskit.ignis.verification.tomography.basis import default_gateset_basis\n'), ((5436, 5448), 'qiskit.providers.aer.noise.NoiseModel', 'NoiseModel', ([], {}), '()\n', (5446, 5448), False, 'from qiskit.providers.aer.noise import NoiseModel\n'), ((5960, 5972), 'qiskit.providers.aer.noise.NoiseModel', 'NoiseModel', ([], {}), '()\n', (5970, 5972), False, 'from qiskit.providers.aer.noise import NoiseModel\n'), ((1707, 1745), 'numpy.array', 'np.array', (['[(F @ rho).T[0] for F in Fs]'], {}), '([(F @ rho).T[0] for F in Fs])\n', (1715, 1745), True, 'import numpy as np\n'), ((2142, 2154), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (2149, 2154), True, 'import numpy as np\n'), ((2157, 2183), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (2165, 2183), True, 'import numpy as np\n'), ((2196, 2208), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (2203, 2208), True, 'import numpy as np\n'), ((2211, 2237), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (2219, 2237), True, 'import numpy as np\n'), ((2250, 2262), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (2257, 2262), True, 'import numpy as np\n'), ((2265, 2298), 'numpy.array', 'np.array', (['[[0, -1.0j], [1.0j, 0]]'], {}), '([[0, -1.0j], [1.0j, 0]])\n', (2273, 2298), True, 'import numpy as np\n'), ((2307, 2319), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (2314, 2319), True, 'import numpy as np\n'), ((2322, 2349), 'numpy.array', 'np.array', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (2330, 2349), True, 'import numpy as np\n'), ((3310, 3333), 'qiskit.ignis.verification.tomography.basis.default_gateset_basis', 'default_gateset_basis', ([], {}), '()\n', (3331, 3333), False, 'from qiskit.ignis.verification.tomography.basis import default_gateset_basis\n'), ((4847, 4854), 'qiskit.extensions.HGate', 'HGate', ([], {}), '()\n', (4852, 4854), False, 'from qiskit.extensions import HGate, SGate\n'), ((5034, 5041), 'qiskit.extensions.SGate', 'SGate', ([], {}), '()\n', (5039, 5041), False, 'from qiskit.extensions import HGate, SGate\n'), ((5762, 5840), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1 - p, 0, 0], [0, 0, 1 - p, 0], [0, 0, 0, 1 - p]]'], {}), '([[1, 0, 0, 0], [0, 1 - p, 0, 0], [0, 0, 1 - p, 0], [0, 0, 0, 1 - p]])\n', (5770, 5840), True, 'import numpy as np\n'), ((5645, 5668), 'numpy.real', 'np.real', (['noise_ptm.data'], {}), '(noise_ptm.data)\n', (5652, 5668), True, 'import numpy as np\n'), ((6169, 6192), 'numpy.real', 'np.real', (['noise_ptm.data'], {}), '(noise_ptm.data)\n', (6176, 6192), True, 'import numpy as np\n'), ((2035, 2044), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (2041, 2044), True, 'import numpy as np\n'), ((2059, 2075), 'numpy.nditer', 'np.nditer', (['(A - B)'], {}), '(A - B)\n', (2068, 2075), True, 'import numpy as np\n'), ((3456, 3468), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (3463, 3468), True, 'import numpy as np\n'), ((3482, 3494), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (3489, 3494), True, 'import numpy as np\n'), ((3530, 3542), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (3537, 3542), True, 'import numpy as np\n'), ((3550, 3562), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (3557, 3562), True, 'import numpy as np\n'), ((5266, 5284), 'numpy.sqrt', 'np.sqrt', (['(1 - gamma)'], {}), '(1 - gamma)\n', (5273, 5284), True, 'import numpy as np\n'), ((5332, 5350), 'numpy.sqrt', 'np.sqrt', (['(1 - gamma)'], {}), '(1 - gamma)\n', (5339, 5350), True, 'import numpy as np\n')]
from scipy.optimize import fsolve import numpy as np from household_dist import HOUSEHOLD_DIST def compute_household_infection_prob(prevalence, household_dist, SAR=0.3741): """ computes the probability that a household is infected given population level prevalence, household size distribution and household secondary attack rate INPUT: prevalence = population level prevalence household_dist = array-like, probability distribution of household sizes 1, 2, 3, ... SAR = household secondary attack rate """ assert(np.absolute(np.sum(household_dist) - 1) < 1e-6) exp_household_size = 0 exp_household_infection_multiplier = 0 for i in range(len(household_dist)): exp_household_size += (i + 1) * household_dist[i] exp_household_infection_multiplier += (1 + (i + 1 - 1) * SAR) * household_dist[i] p = prevalence * exp_household_size / exp_household_infection_multiplier return p # deprecated, modified from Massey's groupt testing code def match_prevalence(p_index, target_prevalence, household_dist, SAR): # computes probability of a primary case given population level prevalence, household size distribution, # and household secondary attack rate # INPUT: # p_index = probability of a primary case in the household # target_prevalence = population level prevalence # household_dist = probability distribution of household sizes 1,2,3,... # SAR = household secondary attack rate assert(np.absolute(np.sum(household_dist) - 1) < 1e-6) exp_household_size = 0 for i in range(len(household_dist)): exp_household_size += (i + 1) * household_dist[i] frac_tot_infected = 0 for i in range(len(household_dist)): frac_tot_infected += (i + 1) * (p_index + SAR * (1 - p_index) - SAR * (1 - p_index) ** (i + 1)) * household_dist[ i] / exp_household_size return frac_tot_infected - target_prevalence # deprecated, modified from Massey's group testing code def eval_p_index(match_prevalence, target_prevalence, household_dist, SAR=0.3741): return fsolve(match_prevalence, 0.005, args=(target_prevalence, household_dist, SAR)) if __name__ == '__main__': household_dist = HOUSEHOLD_DIST['US'] print("household infection probability (US population): " + str(compute_household_infection_prob(0.01, household_dist))) print("household infection probability (household size = 3): " + str(compute_household_infection_prob(0.01, household_dist=[0,0,1])))
[ "scipy.optimize.fsolve", "numpy.sum" ]
[((2099, 2177), 'scipy.optimize.fsolve', 'fsolve', (['match_prevalence', '(0.005)'], {'args': '(target_prevalence, household_dist, SAR)'}), '(match_prevalence, 0.005, args=(target_prevalence, household_dist, SAR))\n', (2105, 2177), False, 'from scipy.optimize import fsolve\n'), ((564, 586), 'numpy.sum', 'np.sum', (['household_dist'], {}), '(household_dist)\n', (570, 586), True, 'import numpy as np\n'), ((1508, 1530), 'numpy.sum', 'np.sum', (['household_dist'], {}), '(household_dist)\n', (1514, 1530), True, 'import numpy as np\n')]
import torch import torch.nn as nn # from torch.nn import init import functools # from torch.autograd import Variable import numpy as np import pdb ############################################################################### # Functions ############################################################################### def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: m.weight.data.normal_(0.0, 0.02) if hasattr(m.bias, 'data'): m.bias.data.fill_(0) elif classname.find('BatchNorm2d') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) def get_norm_layer(norm_type='instance'): if norm_type == 'batch': norm_layer = functools.partial(nn.BatchNorm2d, affine=True) elif norm_type == 'instance': norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=True) else: raise NotImplementedError('normalization layer [%s] is not found' % norm_type) return norm_layer def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, gpu_ids=[], use_parallel=True, learn_residual=False): netG = None use_gpu = len(gpu_ids) > 0 norm_layer = get_norm_layer(norm_type=norm) if use_gpu: assert (torch.cuda.is_available()) # pdb.set_trace() # (Pdb) a # input_nc = 3 # output_nc = 3 # ngf = 64 # which_model_netG = 'resnet_9blocks' # norm = 'instance' # use_dropout = True # gpu_ids = [0] # use_parallel = True # learn_residual = True if which_model_netG == 'resnet_9blocks': netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids, use_parallel=use_parallel, learn_residual=learn_residual) elif which_model_netG == 'resnet_6blocks': netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6, gpu_ids=gpu_ids, use_parallel=use_parallel, learn_residual=learn_residual) elif which_model_netG == 'unet_128': netG = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids, use_parallel=use_parallel, learn_residual=learn_residual) elif which_model_netG == 'unet_256': netG = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids, use_parallel=use_parallel, learn_residual=learn_residual) else: raise NotImplementedError( 'Generator model name [%s] is not recognized' % which_model_netG) if len(gpu_ids) > 0: netG.cuda(gpu_ids[0]) netG.apply(weights_init) return netG def define_D(input_nc, ndf, which_model_netD, n_layers_D=3, norm='batch', use_sigmoid=False, gpu_ids=[], use_parallel=True): netD = None use_gpu = len(gpu_ids) > 0 norm_layer = get_norm_layer(norm_type=norm) # pdb.set_trace() # (Pdb) a # input_nc = 3 # ndf = 64 # which_model_netD = 'basic' # n_layers_D = 3 # norm = 'instance' # use_sigmoid = True # gpu_ids = [0] # use_parallel = True if use_gpu: assert (torch.cuda.is_available()) if which_model_netD == 'basic': netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids, use_parallel=use_parallel) elif which_model_netD == 'n_layers': netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids, use_parallel=use_parallel) else: raise NotImplementedError( 'Discriminator model name [%s] is not recognized' % which_model_netD) if use_gpu: netD.cuda(gpu_ids[0]) netD.apply(weights_init) return netD def print_network(net): num_params = 0 for param in net.parameters(): num_params += param.numel() print(net) print('Total number of parameters: %d' % num_params) ############################################################################## # Classes ############################################################################## # Defines the generator that consists of Resnet blocks between a few # downsampling/upsampling operations. # Code and idea originally from <NAME>'s architecture. # https://github.com/jcjohnson/fast-neural-style/ class ResnetGenerator(nn.Module): def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], use_parallel=True, learn_residual=False, padding_type='reflect'): assert (n_blocks >= 0) super(ResnetGenerator, self).__init__() # pdb.set_trace() # (Pdb) a # self = ResnetGenerator() # input_nc = 3 # output_nc = 3 # ngf = 64 # norm_layer = functools.partial(<class # 'torch.nn.modules.instancenorm.InstanceNorm2d'>, affine=False, track_running_stats=True) # use_dropout = True # n_blocks = 9 # gpu_ids = [0] # use_parallel = True # learn_residual = True # padding_type = 'reflect' self.input_nc = input_nc self.output_nc = output_nc self.ngf = ngf self.gpu_ids = gpu_ids self.use_parallel = use_parallel self.learn_residual = learn_residual if type(norm_layer) == functools.partial: use_bias = norm_layer.func == nn.InstanceNorm2d else: use_bias = norm_layer == nn.InstanceNorm2d model = [ nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), norm_layer(ngf), nn.ReLU(True) ] n_downsampling = 2 # 下采样 # for i in range(n_downsampling): # [0,1] # mult = 2**i # # model += [ # nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias), # norm_layer(ngf * mult * 2), # nn.ReLU(True) # ] model += [ nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=use_bias), norm_layer(128), nn.ReLU(True), nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1, bias=use_bias), norm_layer(256), nn.ReLU(True) ] # 中间的残差网络 # mult = 2**n_downsampling for i in range(n_blocks): # model += [ # ResnetBlock( # ngf * mult, padding_type=padding_type, norm_layer=norm_layer, # use_dropout=use_dropout, use_bias=use_bias) # ] model += [ ResnetBlock(256, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias) ] # 上采样 # for i in range(n_downsampling): # mult = 2**(n_downsampling - i) # # model += [ # nn.ConvTranspose2d( # ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, # padding=1, output_padding=1, bias=use_bias), # norm_layer(int(ngf * mult / 2)), # nn.ReLU(True) # ] model += [ nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, output_padding=1, bias=use_bias), norm_layer(128), nn.ReLU(True), nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, output_padding=1, bias=use_bias), norm_layer(64), nn.ReLU(True), ] model += [ nn.ReflectionPad2d(3), nn.Conv2d(64, output_nc, kernel_size=7, padding=0), nn.Tanh() ] self.model = nn.Sequential(*model) def forward(self, input): if self.gpu_ids and isinstance( input.data, torch.cuda.FloatTensor) and self.use_parallel: output = nn.parallel.data_parallel(self.model, input, self.gpu_ids) else: output = self.model(input) if self.learn_residual: # output = input + output output = torch.clamp(input + output, min=-1, max=1) return output # Define a resnet block class ResnetBlock(nn.Module): def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias): super(ResnetBlock, self).__init__() # pdb.set_trace() # (Pdb) a # self = ResnetBlock() # dim = 256 # padding_type = 'reflect' # norm_layer = functools.partial(<class 'torch.nn.modules. # instancenorm.InstanceNorm2d'>, affine=False, track_running_stats=True) # use_dropout = True # use_bias = True padAndConv = { 'reflect': [ nn.ReflectionPad2d(1), nn.Conv2d(dim, dim, kernel_size=3, bias=use_bias) ], 'replicate': [ nn.ReplicationPad2d(1), nn.Conv2d(dim, dim, kernel_size=3, bias=use_bias) ], 'zero': [nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=use_bias)] } try: blocks = padAndConv[padding_type] + [ norm_layer(dim), nn.ReLU(True) ] + [nn.Dropout(0.5) ] if use_dropout else [] + padAndConv[padding_type] + [ norm_layer(dim) ] except: raise NotImplementedError('padding [%s] is not implemented' % padding_type) self.conv_block = nn.Sequential(*blocks) # self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias) # def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias): # padAndConv = { # 'reflect': [nn.ReflectionPad2d(1), nn.Conv2d(dim, dim, kernel_size=3, bias=use_bias)], # 'replicate': [nn.ReplicationPad2d(1), nn.Conv2d(dim, dim, kernel_size=3, bias=use_bias)], # 'zero': [nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=use_bias)] # } # try: # blocks = [ # padAndConv[padding_type], # # norm_layer(dim), # nn.ReLU(True), # nn.Dropout(0.5) if use_dropout else None, # # padAndConv[padding_type], # # norm_layer(dim) # ] # except: # raise NotImplementedError('padding [%s] is not implemented' % padding_type) # # return nn.Sequential(*blocks) # blocks = [] # if padding_type == 'reflect': # blocks += [nn.ReflectionPad2d(1), nn.Conv2d(dim, dim, kernel_size=3, bias=use_bias)] # elif padding_type == 'replicate': # blocks += [nn.ReplicationPad2d(1), nn.Conv2d(dim, dim, kernel_size=3, bias=use_bias)] # elif padding_type == 'zero': # blocks += [nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=use_bias)] # else: # raise NotImplementedError('padding [%s] is not implemented' % padding_type) # # blocks += [ # norm_layer(dim), # nn.ReLU(True), # nn.Dropout(0.5) if use_dropout else None # ] # # if padding_type == 'reflect': # blocks += [nn.ReflectionPad2d(1), nn.Conv2d(dim, dim, kernel_size=3, bias=use_bias)] # elif padding_type == 'replicate': # blocks += [nn.ReplicationPad2d(1), nn.Conv2d(dim, dim, kernel_size=3, bias=use_bias)] # elif padding_type == 'zero': # blocks += [nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=use_bias)] # else: # raise NotImplementedError('padding [%s] is not implemented' % padding_type) # # blocks += [ # norm_layer(dim) # ] # # return nn.Sequential(*blocks) def forward(self, x): out = x + self.conv_block(x) return out # Defines the Unet generator. # |num_downs|: number of downsamplings in UNet. For example, # if |num_downs| == 7, image of size 128x128 will become of size 1x1 # at the bottleneck class UnetGenerator(nn.Module): def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[], use_parallel=True, learn_residual=False): super(UnetGenerator, self).__init__() pdb.set_trace() self.gpu_ids = gpu_ids self.use_parallel = use_parallel self.learn_residual = learn_residual # currently support only input_nc == output_nc assert (input_nc == output_nc) # construct unet structure unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, norm_layer=norm_layer, innermost=True) for i in range(num_downs - 5): unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, unet_block, norm_layer=norm_layer, use_dropout=use_dropout) unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, unet_block, norm_layer=norm_layer) unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, unet_block, norm_layer=norm_layer) unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, unet_block, norm_layer=norm_layer) unet_block = UnetSkipConnectionBlock(output_nc, ngf, unet_block, outermost=True, norm_layer=norm_layer) self.model = unet_block def forward(self, input): if self.gpu_ids and isinstance( input.data, torch.cuda.FloatTensor) and self.use_parallel: output = nn.parallel.data_parallel(self.model, input, self.gpu_ids) else: output = self.model(input) if self.learn_residual: output = input + output output = torch.clamp(output, min=-1, max=1) return output # Defines the submodule with skip connection. # X -------------------identity---------------------- X # |-- downsampling -- |submodule| -- upsampling --| class UnetSkipConnectionBlock(nn.Module): def __init__(self, outer_nc, inner_nc, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False): super(UnetSkipConnectionBlock, self).__init__() pdb.set_trace() self.outermost = outermost if type(norm_layer) == functools.partial: use_bias = norm_layer.func == nn.InstanceNorm2d else: use_bias = norm_layer == nn.InstanceNorm2d dConv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias) dRelu = nn.LeakyReLU(0.2, True) dNorm = norm_layer(inner_nc) uRelu = nn.ReLU(True) uNorm = norm_layer(outer_nc) if outermost: uConv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1) dModel = [dConv] uModel = [uRelu, uConv, nn.Tanh()] model = [dModel, submodule, uModel] # model = [ # # Down # nn.Conv2d( outer_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias), # # submodule, # # Up # nn.ReLU(True), # nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1), # nn.Tanh() # ] elif innermost: uConv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias) dModel = [dRelu, dConv] uModel = [uRelu, uConv, uNorm] model = [dModel, uModel] # model = [ # # down # nn.LeakyReLU(0.2, True), # # up # nn.ReLU(True), # nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias), # norm_layer(outer_nc) # ] else: uConv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias) dModel = [dRelu, dConv, dNorm] uModel = [uRelu, uConv, uNorm] model = [dModel, submodule, uModel] model += [nn.Dropout(0.5)] if use_dropout else [] # if use_dropout: # model = down + [submodule] + up + [nn.Dropout(0.5)] # else: # model = down + [submodule] + up self.model = nn.Sequential(*model) def forward(self, x): if self.outermost: return self.model(x) else: return torch.cat([self.model(x), x], 1) # Defines the PatchGAN discriminator with the specified arguments. class NLayerDiscriminator(nn.Module): def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[], use_parallel=True): super(NLayerDiscriminator, self).__init__() # pdb.set_trace() # (Pdb) a # self = NLayerDiscriminator() # input_nc = 3 # ndf = 64 # n_layers = 3 # norm_layer = functools.partial(<class # 'torch.nn.modules.instancenorm.InstanceNorm2d'>, affine=False, track_running_stats=True) # use_sigmoid = True # gpu_ids = [0] # use_parallel = True self.gpu_ids = gpu_ids self.use_parallel = use_parallel if type(norm_layer) == functools.partial: use_bias = norm_layer.func == nn.InstanceNorm2d else: use_bias = norm_layer == nn.InstanceNorm2d kw = 4 padw = int(np.ceil((kw - 1) / 2)) sequence = [ nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True) ] nf_mult = 1 nf_mult_prev = 1 for n in range(1, n_layers): nf_mult_prev = nf_mult nf_mult = min(2**n, 8) sequence += [ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias), norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True) ] nf_mult_prev = nf_mult nf_mult = min(2**n_layers, 8) sequence += [ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias), norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True) ] sequence += [ nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw) ] if use_sigmoid: sequence += [nn.Sigmoid()] self.model = nn.Sequential(*sequence) def forward(self, input): if len(self.gpu_ids) and isinstance( input.data, torch.cuda.FloatTensor) and self.use_parallel: return nn.parallel.data_parallel(self.model, input, self.gpu_ids) else: return self.model(input)
[ "torch.nn.Sigmoid", "torch.nn.ReLU", "torch.nn.parallel.data_parallel", "numpy.ceil", "torch.nn.Tanh", "torch.nn.LeakyReLU", "torch.nn.Dropout", "torch.nn.Sequential", "torch.nn.ReflectionPad2d", "torch.nn.Conv2d", "torch.cuda.is_available", "functools.partial", "torch.nn.ReplicationPad2d", "pdb.set_trace", "torch.nn.ConvTranspose2d", "torch.clamp" ]
[((739, 785), 'functools.partial', 'functools.partial', (['nn.BatchNorm2d'], {'affine': '(True)'}), '(nn.BatchNorm2d, affine=True)\n', (756, 785), False, 'import functools\n'), ((1527, 1552), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1550, 1552), False, 'import torch\n'), ((4480, 4505), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4503, 4505), False, 'import torch\n'), ((10402, 10423), 'torch.nn.Sequential', 'nn.Sequential', (['*model'], {}), '(*model)\n', (10415, 10423), True, 'import torch.nn as nn\n'), ((12238, 12260), 'torch.nn.Sequential', 'nn.Sequential', (['*blocks'], {}), '(*blocks)\n', (12251, 12260), True, 'import torch.nn as nn\n'), ((15338, 15353), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (15351, 15353), False, 'import pdb\n'), ((18169, 18184), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (18182, 18184), False, 'import pdb\n'), ((18417, 18502), 'torch.nn.Conv2d', 'nn.Conv2d', (['outer_nc', 'inner_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': 'use_bias'}), '(outer_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias\n )\n', (18426, 18502), True, 'import torch.nn as nn\n'), ((18644, 18667), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (18656, 18667), True, 'import torch.nn as nn\n'), ((18721, 18734), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (18728, 18734), True, 'import torch.nn as nn\n'), ((20917, 20938), 'torch.nn.Sequential', 'nn.Sequential', (['*model'], {}), '(*model)\n', (20930, 20938), True, 'import torch.nn as nn\n'), ((23460, 23484), 'torch.nn.Sequential', 'nn.Sequential', (['*sequence'], {}), '(*sequence)\n', (23473, 23484), True, 'import torch.nn as nn\n'), ((841, 917), 'functools.partial', 'functools.partial', (['nn.InstanceNorm2d'], {'affine': '(False)', 'track_running_stats': '(True)'}), '(nn.InstanceNorm2d, affine=False, track_running_stats=True)\n', (858, 917), False, 'import functools\n'), ((7472, 7493), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(3)'], {}), '(3)\n', (7490, 7493), True, 'import torch.nn as nn\n'), ((7507, 7572), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_nc', 'ngf'], {'kernel_size': '(7)', 'padding': '(0)', 'bias': 'use_bias'}), '(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias)\n', (7516, 7572), True, 'import torch.nn as nn\n'), ((7615, 7628), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (7622, 7628), True, 'import torch.nn as nn\n'), ((8015, 8084), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': 'use_bias'}), '(64, 128, kernel_size=3, stride=2, padding=1, bias=use_bias)\n', (8024, 8084), True, 'import torch.nn as nn\n'), ((8237, 8250), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (8244, 8250), True, 'import torch.nn as nn\n'), ((8264, 8334), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': 'use_bias'}), '(128, 256, kernel_size=3, stride=2, padding=1, bias=use_bias)\n', (8273, 8334), True, 'import torch.nn as nn\n'), ((8487, 8500), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (8494, 8500), True, 'import torch.nn as nn\n'), ((9527, 9628), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(256)', '(128)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'output_padding': '(1)', 'bias': 'use_bias'}), '(256, 128, kernel_size=3, stride=2, padding=1,\n output_padding=1, bias=use_bias)\n', (9545, 9628), True, 'import torch.nn as nn\n'), ((9853, 9866), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (9860, 9866), True, 'import torch.nn as nn\n'), ((9880, 9980), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(128)', '(64)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'output_padding': '(1)', 'bias': 'use_bias'}), '(128, 64, kernel_size=3, stride=2, padding=1,\n output_padding=1, bias=use_bias)\n', (9898, 9980), True, 'import torch.nn as nn\n'), ((10204, 10217), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (10211, 10217), True, 'import torch.nn as nn\n'), ((10261, 10282), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(3)'], {}), '(3)\n', (10279, 10282), True, 'import torch.nn as nn\n'), ((10296, 10346), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', 'output_nc'], {'kernel_size': '(7)', 'padding': '(0)'}), '(64, output_nc, kernel_size=7, padding=0)\n', (10305, 10346), True, 'import torch.nn as nn\n'), ((10360, 10369), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (10367, 10369), True, 'import torch.nn as nn\n'), ((10591, 10649), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['self.model', 'input', 'self.gpu_ids'], {}), '(self.model, input, self.gpu_ids)\n', (10616, 10649), True, 'import torch.nn as nn\n'), ((10794, 10836), 'torch.clamp', 'torch.clamp', (['(input + output)'], {'min': '(-1)', 'max': '(1)'}), '(input + output, min=-1, max=1)\n', (10805, 10836), False, 'import torch\n'), ((17388, 17446), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['self.model', 'input', 'self.gpu_ids'], {}), '(self.model, input, self.gpu_ids)\n', (17413, 17446), True, 'import torch.nn as nn\n'), ((17589, 17623), 'torch.clamp', 'torch.clamp', (['output'], {'min': '(-1)', 'max': '(1)'}), '(output, min=-1, max=1)\n', (17600, 17623), False, 'import torch\n'), ((18815, 18893), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(inner_nc * 2)', 'outer_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)\n', (18833, 18893), True, 'import torch.nn as nn\n'), ((22170, 22191), 'numpy.ceil', 'np.ceil', (['((kw - 1) / 2)'], {}), '((kw - 1) / 2)\n', (22177, 22191), True, 'import numpy as np\n'), ((22226, 22290), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_nc', 'ndf'], {'kernel_size': 'kw', 'stride': '(2)', 'padding': 'padw'}), '(input_nc, ndf, kernel_size=kw, stride=2, padding=padw)\n', (22235, 22290), True, 'import torch.nn as nn\n'), ((22304, 22327), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (22316, 22327), True, 'import torch.nn as nn\n'), ((22965, 23068), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * nf_mult_prev)', '(ndf * nf_mult)'], {'kernel_size': 'kw', 'stride': '(1)', 'padding': 'padw', 'bias': 'use_bias'}), '(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1,\n padding=padw, bias=use_bias)\n', (22974, 23068), True, 'import torch.nn as nn\n'), ((23227, 23250), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (23239, 23250), True, 'import torch.nn as nn\n'), ((23296, 23363), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * nf_mult)', '(1)'], {'kernel_size': 'kw', 'stride': '(1)', 'padding': 'padw'}), '(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)\n', (23305, 23363), True, 'import torch.nn as nn\n'), ((23655, 23713), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['self.model', 'input', 'self.gpu_ids'], {}), '(self.model, input, self.gpu_ids)\n', (23680, 23713), True, 'import torch.nn as nn\n'), ((11439, 11460), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1)'], {}), '(1)\n', (11457, 11460), True, 'import torch.nn as nn\n'), ((11478, 11527), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim', 'dim'], {'kernel_size': '(3)', 'bias': 'use_bias'}), '(dim, dim, kernel_size=3, bias=use_bias)\n', (11487, 11527), True, 'import torch.nn as nn\n'), ((11586, 11608), 'torch.nn.ReplicationPad2d', 'nn.ReplicationPad2d', (['(1)'], {}), '(1)\n', (11605, 11608), True, 'import torch.nn as nn\n'), ((11626, 11675), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim', 'dim'], {'kernel_size': '(3)', 'bias': 'use_bias'}), '(dim, dim, kernel_size=3, bias=use_bias)\n', (11635, 11675), True, 'import torch.nn as nn\n'), ((11724, 11784), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim', 'dim'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': 'use_bias'}), '(dim, dim, kernel_size=3, padding=1, bias=use_bias)\n', (11733, 11784), True, 'import torch.nn as nn\n'), ((19115, 19124), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (19122, 19124), True, 'import torch.nn as nn\n'), ((19555, 19648), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['inner_nc', 'outer_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': 'use_bias'}), '(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1,\n bias=use_bias)\n', (19573, 19648), True, 'import torch.nn as nn\n'), ((20258, 20356), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(inner_nc * 2)', 'outer_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': 'use_bias'}), '(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding\n =1, bias=use_bias)\n', (20276, 20356), True, 'import torch.nn as nn\n'), ((22533, 22636), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * nf_mult_prev)', '(ndf * nf_mult)'], {'kernel_size': 'kw', 'stride': '(2)', 'padding': 'padw', 'bias': 'use_bias'}), '(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2,\n padding=padw, bias=use_bias)\n', (22542, 22636), True, 'import torch.nn as nn\n'), ((22823, 22846), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (22835, 22846), True, 'import torch.nn as nn\n'), ((23424, 23436), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (23434, 23436), True, 'import torch.nn as nn\n'), ((11924, 11939), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (11934, 11939), True, 'import torch.nn as nn\n'), ((20704, 20719), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (20714, 20719), True, 'import torch.nn as nn\n'), ((11893, 11906), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (11900, 11906), True, 'import torch.nn as nn\n')]
import cv2 import numpy as np import torch from matplotlib import pyplot as plt from dataset import mydataset_PCA from torch.utils.data import DataLoader from tqdm import tqdm import pickle import sklearn.decomposition as dc import argparse parser = argparse.ArgumentParser(description='Process some integers.') parser.add_argument( '--aug', help='if processing data augmentation or not', required=True, default=False ,type=bool) args = parser.parse_args() image_dir = "shopee-product-matching/train_images" text_path = "shopee-product-matching/train.csv" epoch_num = 1 #总样本循环次数 batch_size = 1 #训练时的一组数据的大小 #读取数据集,并取出10%作为mini数据集 only test the easy mode train_dataset = mydataset_PCA(image_dir=image_dir,text_path = text_path, is_train=True, is_augmentation=args.aug) test_dataset = mydataset_PCA(image_dir=image_dir,text_path = text_path, is_train=False) feas_train = [] # create the dataset feature base in low dimension labels_train = [] train_loader = DataLoader(dataset = train_dataset,batch_size = 1,shuffle = True) pca_estimator = dc.PCA(n_components=100) # for image,text in train_loader: #遍历每一组数据 img_train = [] for batch_data in tqdm(train_loader): image,text=batch_data # img = np.squeeze(image.numpy()) img = image.numpy() img = img[0,...] # only using the # img_train.append(img.reshape(1,-1)) img_train.append(img) img_num = img.shape[0] # labels_train.append(text[0]) labels_train.extend([text[0] for _ in range(img_num)]) # do the PCA labels_train = np.array(labels_train) img_train = np.concatenate(img_train,axis=0) img_mean = np.mean(img_train, axis=0, keepdims=True) img_train = img_train - img_mean trainned_base = pca_estimator.fit_transform(img_train) components_ = pca_estimator.components_ # do the test test_loader = DataLoader(dataset = test_dataset,batch_size = 1,shuffle = True) acc5 = 0 acc1 = 0 for batch_data in tqdm(test_loader): image,text=batch_data #处理图像数据,提取SIFT特征 # img_test = np.squeeze(image.numpy()) img_test = image.numpy() # img_c = img.reshape(1,-1) img_c = img_test[0,...] img_c = img_c - img_mean img_feature = pca_estimator.transform(img_c) distance_s = np.sum((img_feature - trainned_base) ** 2, axis=-1) idx_sort = np.argsort(distance_s) idx_top5 = idx_sort[:5] pred_label = labels_train[idx_top5] if text[0] in pred_label: #TODO: text need to be further index acc5 = acc5 + 1 if text[0] == pred_label[0]: acc1 = acc1 + 1 # err_rate = err/len(test_dataset) # acc = 1-err_rate acc_rate5 = acc5 / len(test_dataset) acc_rate1 = acc1 / len(test_dataset) print('----------------------------') # print(f"err = {err_rate:.4f}") print(f"acc1 = {acc_rate1:.4f}") print(f"acc5 = {acc_rate5:.4f}")
[ "numpy.mean", "argparse.ArgumentParser", "sklearn.decomposition.PCA", "tqdm.tqdm", "numpy.argsort", "numpy.array", "numpy.sum", "numpy.concatenate", "torch.utils.data.DataLoader", "dataset.mydataset_PCA" ]
[((251, 312), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process some integers."""'}), "(description='Process some integers.')\n", (274, 312), False, 'import argparse\n'), ((688, 788), 'dataset.mydataset_PCA', 'mydataset_PCA', ([], {'image_dir': 'image_dir', 'text_path': 'text_path', 'is_train': '(True)', 'is_augmentation': 'args.aug'}), '(image_dir=image_dir, text_path=text_path, is_train=True,\n is_augmentation=args.aug)\n', (701, 788), False, 'from dataset import mydataset_PCA\n'), ((801, 872), 'dataset.mydataset_PCA', 'mydataset_PCA', ([], {'image_dir': 'image_dir', 'text_path': 'text_path', 'is_train': '(False)'}), '(image_dir=image_dir, text_path=text_path, is_train=False)\n', (814, 872), False, 'from dataset import mydataset_PCA\n'), ((975, 1036), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': '(1)', 'shuffle': '(True)'}), '(dataset=train_dataset, batch_size=1, shuffle=True)\n', (985, 1036), False, 'from torch.utils.data import DataLoader\n'), ((1058, 1082), 'sklearn.decomposition.PCA', 'dc.PCA', ([], {'n_components': '(100)'}), '(n_components=100)\n', (1064, 1082), True, 'import sklearn.decomposition as dc\n'), ((1159, 1177), 'tqdm.tqdm', 'tqdm', (['train_loader'], {}), '(train_loader)\n', (1163, 1177), False, 'from tqdm import tqdm\n'), ((1528, 1550), 'numpy.array', 'np.array', (['labels_train'], {}), '(labels_train)\n', (1536, 1550), True, 'import numpy as np\n'), ((1563, 1596), 'numpy.concatenate', 'np.concatenate', (['img_train'], {'axis': '(0)'}), '(img_train, axis=0)\n', (1577, 1596), True, 'import numpy as np\n'), ((1607, 1648), 'numpy.mean', 'np.mean', (['img_train'], {'axis': '(0)', 'keepdims': '(True)'}), '(img_train, axis=0, keepdims=True)\n', (1614, 1648), True, 'import numpy as np\n'), ((1805, 1865), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': '(1)', 'shuffle': '(True)'}), '(dataset=test_dataset, batch_size=1, shuffle=True)\n', (1815, 1865), False, 'from torch.utils.data import DataLoader\n'), ((1907, 1924), 'tqdm.tqdm', 'tqdm', (['test_loader'], {}), '(test_loader)\n', (1911, 1924), False, 'from tqdm import tqdm\n'), ((2200, 2251), 'numpy.sum', 'np.sum', (['((img_feature - trainned_base) ** 2)'], {'axis': '(-1)'}), '((img_feature - trainned_base) ** 2, axis=-1)\n', (2206, 2251), True, 'import numpy as np\n'), ((2267, 2289), 'numpy.argsort', 'np.argsort', (['distance_s'], {}), '(distance_s)\n', (2277, 2289), True, 'import numpy as np\n')]
import ipywidgets as widgets import ipywidgets from traitlets import Unicode import traitlets from traittypes import Array import logging import numpy as np from .serialize import array_cube_png_serialization, array_serialization from .transferfunction import * import warnings logger = logging.getLogger("ipyvolume") _last_volume_renderer = None @widgets.register('ipyvolume.Scatter') class Scatter(widgets.DOMWidget): _view_name = Unicode('ScatterView').tag(sync=True) _view_module = Unicode('ipyvolume').tag(sync=True) _model_name = Unicode('ScatterModel').tag(sync=True) _model_module = Unicode('ipyvolume').tag(sync=True) x = Array(default_value=None).tag(sync=True, **array_serialization) y = Array(default_value=None).tag(sync=True, **array_serialization) z = Array(default_value=None).tag(sync=True, **array_serialization) vx = Array(default_value=None,allow_none=True).tag(sync=True, **array_serialization) vy = Array(default_value=None,allow_none=True).tag(sync=True, **array_serialization) vz = Array(default_value=None,allow_none=True).tag(sync=True, **array_serialization) selected = Array(default_value=None,allow_none=True).tag(sync=True, **array_serialization) size = traitlets.Float(0.01).tag(sync=True) size_selected = traitlets.Float(0.02).tag(sync=True) color = traitlets.Unicode(default_value="red").tag(sync=True) color_selected = traitlets.Unicode(default_value="white").tag(sync=True) geo = traitlets.Unicode('diamond').tag(sync=True) default_style = dict() default_style["figure.facecolor"] = "black" default_style["xaxis.color"] = "red" default_style["yaxis.color"] = "green" default_style["zaxis.color"] = "blue" default_style["axes.color"] = "grey" @widgets.register('ipyvolume.VolumeRendererThree') class VolumeRendererThree(widgets.DOMWidget): """Widget class representing a volume (rendering) using three.js""" _view_name = Unicode('VolumeRendererThreeView').tag(sync=True) _view_module = Unicode('ipyvolume').tag(sync=True) _model_name = Unicode('VolumeRendererThreeModel').tag(sync=True) _model_module = Unicode('ipyvolume').tag(sync=True) data = Array(default_value=None, allow_none=True).tag(sync=True, **array_cube_png_serialization) data_min = traitlets.CFloat().tag(sync=True) data_max = traitlets.CFloat().tag(sync=True) tf = traitlets.Instance(TransferFunction, allow_none=True).tag(sync=True, **ipywidgets.widget_serialization) angle1 = traitlets.Float(0.1).tag(sync=True) angle2 = traitlets.Float(0.2).tag(sync=True) scatters = traitlets.List(traitlets.Instance(Scatter), [], allow_none=False).tag(sync=True, **ipywidgets.widget_serialization) animation = traitlets.Float(1000.0).tag(sync=True) ambient_coefficient = traitlets.Float(0.5).tag(sync=True) diffuse_coefficient = traitlets.Float(0.8).tag(sync=True) specular_coefficient = traitlets.Float(0.5).tag(sync=True) specular_exponent = traitlets.Float(5).tag(sync=True) stereo = traitlets.Bool(False).tag(sync=True) fullscreen = traitlets.Bool(False).tag(sync=True) width = traitlets.CInt(500).tag(sync=True) height = traitlets.CInt(400).tag(sync=True) downscale = traitlets.CInt(1).tag(sync=True) show = traitlets.Unicode("Volume").tag(sync=True) # for debugging xlim = traitlets.List(traitlets.CFloat, default_value=[0, 1], minlen=2, maxlen=2).tag(sync=True) ylim = traitlets.List(traitlets.CFloat, default_value=[0, 1], minlen=2, maxlen=2).tag(sync=True) zlim = traitlets.List(traitlets.CFloat, default_value=[0, 1], minlen=2, maxlen=2).tag(sync=True) xlabel = traitlets.Unicode("x").tag(sync=True) ylabel = traitlets.Unicode("y").tag(sync=True) zlabel = traitlets.Unicode("z").tag(sync=True) style = traitlets.Dict(default_value=default_style).tag(sync=True) #xlim = traitlets.Tuple(traitlets.CFloat(0), traitlets.CFloat(1)).tag(sync=True) #y#lim = traitlets.Tuple(traitlets.CFloat(0), traitlets.CFloat(1)).tag(sync=True) #zlim = traitlets.Tuple(traitlets.CFloat(0), traitlets.CFloat(1)).tag(sync=True) def _volume_widets(v, lighting=False): import ipywidgets #angle1 = ipywidgets.FloatSlider(min=0, max=np.pi*2, value=v.angle1, description="angle1") #angle2 = ipywidgets.FloatSlider(min=0, max=np.pi*2, value=v.angle2, description="angle2") #ipywidgets.jslink((v, 'angle1'), (angle1, 'value')) #ipywidgets.jslink((v, 'angle2'), (angle2, 'value')) if lighting: ambient_coefficient = ipywidgets.FloatSlider(min=0, max=1, step=0.001, value=v.ambient_coefficient, description="ambient") diffuse_coefficient = ipywidgets.FloatSlider(min=0, max=1, step=0.001, value=v.diffuse_coefficient, description="diffuse") specular_coefficient = ipywidgets.FloatSlider(min=0, max=1, step=0.001, value=v.specular_coefficient, description="specular") specular_exponent = ipywidgets.FloatSlider(min=0, max=10, step=0.001, value=v.specular_exponent, description="specular exp") #angle2 = ipywidgets.FloatSlider(min=0, max=np.pi*2, value=v.angle2, description="angle2") ipywidgets.jslink((v, 'ambient_coefficient'), (ambient_coefficient, 'value')) ipywidgets.jslink((v, 'diffuse_coefficient'), (diffuse_coefficient, 'value')) ipywidgets.jslink((v, 'specular_coefficient'), (specular_coefficient, 'value')) ipywidgets.jslink((v, 'specular_exponent'), (specular_exponent, 'value')) widgets_bottom = [ipywidgets.HBox([ambient_coefficient, diffuse_coefficient]), ipywidgets.HBox([specular_coefficient, specular_exponent])] else: widgets_bottom = [] v.ambient_coefficient = 1 v.diffuse_coefficient = 0 v.specular_coefficient = 0 if 1: stereo = widgets.ToggleButton(value=v.stereo, description='stereo', icon='eye') fullscreen = widgets.ToggleButton(value=v.stereo, description='fullscreen', icon='arrows-alt') ipywidgets.jslink((v, 'stereo'), (stereo, 'value')) ipywidgets.jslink((v, 'fullscreen'), (fullscreen, 'value')) widgets_bottom += [ipywidgets.HBox([stereo,fullscreen])] return ipywidgets.VBox( [v.tf.control(), v, ] + widgets_bottom# , ipywidgets.HBox([angle1, angle2]) ) def volshow(*args, **kwargs): """Deprecated: please use ipyvolume.quickvol or use the ipyvolume.pylab interface""" warnings.warn("Please use ipyvolume.quickvol or use the ipyvolume.pylab interface", DeprecationWarning, stacklevel=2) return quickvolshow(*args, **kwargs) def quickquiver(x, y, z, u, v, w, **kwargs): import ipyvolume.pylab as p3 p3.figure() p3.quiver(x, y, z, u, v, w, **kwargs) return p3.current.container def quickscatter(x, y, z, **kwargs): import ipyvolume.pylab as p3 p3.figure() p3.scatter(x, y, z, **kwargs) return p3.current.container def quickvolshow(data, lighting=False, data_min=None, data_max=None, tf=None, stereo=False, width=400, height=500, ambient_coefficient=0.5, diffuse_coefficient=0.8, specular_coefficient=0.5, specular_exponent=5, downscale=1, level=[0.1, 0.5, 0.9], opacity=[0.01, 0.05, 0.1], level_width=0.1, **kwargs): """ Visualize a 3d array using volume rendering :param data: 3d numpy array :param lighting: boolean, to use lighting or not, if set to false, lighting parameters will be overriden :param data_min: minimum value to consider for data, if None, computed using np.nanmin :param data_max: maximum value to consider for data, if None, computed using np.nanmax :param tf: transfer function (see ipyvolume.transfer_function, or use the argument below) :param stereo: stereo view for virtual reality (cardboard and similar VR head mount) :param width: width of rendering surface :param height: height of rendering surface :param ambient_coefficient: lighting parameter :param diffuse_coefficient: lighting parameter :param specular_coefficient: lighting parameter :param specular_exponent: lighting parameter :param downscale: downscale the rendering for better performance, for instance when set to 2, a 512x512 canvas will show a 256x256 rendering upscaled, but it will render twice as fast. :param level: level(s) for the where the opacity in the volume peaks, maximum sequence of length 3 :param opacity: opacity(ies) for each level, scalar or sequence of max length 3 :param level_width: width of the (gaussian) bumps where the opacity peaks, scalar or sequence of max length 3 :param kwargs: extra argument passed to Volume and default transfer function :return: """ if tf is None: # TODO: should this just call the pylab interface? #tf = TransferFunctionJsBumps(**kwargs) tf_kwargs = {} # level, opacity and widths can be scalars try: level[0] except: level = [level] try: opacity[0] except: opacity = [opacity] * 3 try: level_width[0] except: level_width = [level_width] * 3 #clip off lists min_length = min(len(level), len(level_width), len(opacity)) level = list(level[:min_length]) opacity = list(opacity[:min_length]) level_width = list(level_width[:min_length]) # append with zeros while len(level) < 3: level.append(0) while len(opacity) < 3: opacity.append(0) while len(level_width) < 3: level_width.append(0) for i in range(1,4): tf_kwargs["level"+str(i)] = level[i-1] tf_kwargs["opacity"+str(i)] = opacity[i-1] tf_kwargs["width"+str(i)] = level_width[i-1] tf = TransferFunctionWidgetJs3(**tf_kwargs) if data_min is None: data_min = np.nanmin(data) if data_max is None: data_max = np.nanmax(data) v = VolumeRendererThree(data=data, data_min=data_min, data_max=data_max, stereo=stereo, width=width, height=height, ambient_coefficient=ambient_coefficient, diffuse_coefficient=diffuse_coefficient, specular_coefficient=specular_coefficient, specular_exponent=specular_exponent, tf=tf, **kwargs) box = _volume_widets(v, lighting=lighting) return box def scatter(x, y, z, color=(1,0,0), s=0.01): global _last_figure; fig = _last_figure if fig is None: fig = volshow(None) fig.scatter = Scatter(x=x, y=y, z=z, color=color, size=s) fig.volume.scatter = fig.scatter return fig
[ "logging.getLogger", "traitlets.Instance", "traitlets.List", "ipywidgets.FloatSlider", "numpy.nanmin", "ipywidgets.HBox", "traittypes.Array", "ipywidgets.register", "numpy.nanmax", "warnings.warn", "ipyvolume.pylab.figure", "traitlets.Unicode", "traitlets.CInt", "ipywidgets.jslink", "traitlets.Dict", "ipywidgets.ToggleButton", "traitlets.Float", "ipyvolume.pylab.quiver", "ipyvolume.pylab.scatter", "traitlets.Bool", "traitlets.CFloat" ]
[((289, 319), 'logging.getLogger', 'logging.getLogger', (['"""ipyvolume"""'], {}), "('ipyvolume')\n", (306, 319), False, 'import logging\n'), ((352, 389), 'ipywidgets.register', 'widgets.register', (['"""ipyvolume.Scatter"""'], {}), "('ipyvolume.Scatter')\n", (368, 389), True, 'import ipywidgets as widgets\n'), ((1748, 1797), 'ipywidgets.register', 'widgets.register', (['"""ipyvolume.VolumeRendererThree"""'], {}), "('ipyvolume.VolumeRendererThree')\n", (1764, 1797), True, 'import ipywidgets as widgets\n'), ((6410, 6536), 'warnings.warn', 'warnings.warn', (['"""Please use ipyvolume.quickvol or use the ipyvolume.pylab interface"""', 'DeprecationWarning'], {'stacklevel': '(2)'}), "(\n 'Please use ipyvolume.quickvol or use the ipyvolume.pylab interface',\n DeprecationWarning, stacklevel=2)\n", (6423, 6536), False, 'import warnings\n'), ((6652, 6663), 'ipyvolume.pylab.figure', 'p3.figure', ([], {}), '()\n', (6661, 6663), True, 'import ipyvolume.pylab as p3\n'), ((6668, 6705), 'ipyvolume.pylab.quiver', 'p3.quiver', (['x', 'y', 'z', 'u', 'v', 'w'], {}), '(x, y, z, u, v, w, **kwargs)\n', (6677, 6705), True, 'import ipyvolume.pylab as p3\n'), ((6813, 6824), 'ipyvolume.pylab.figure', 'p3.figure', ([], {}), '()\n', (6822, 6824), True, 'import ipyvolume.pylab as p3\n'), ((6829, 6858), 'ipyvolume.pylab.scatter', 'p3.scatter', (['x', 'y', 'z'], {}), '(x, y, z, **kwargs)\n', (6839, 6858), True, 'import ipyvolume.pylab as p3\n'), ((4526, 4631), 'ipywidgets.FloatSlider', 'ipywidgets.FloatSlider', ([], {'min': '(0)', 'max': '(1)', 'step': '(0.001)', 'value': 'v.ambient_coefficient', 'description': '"""ambient"""'}), "(min=0, max=1, step=0.001, value=v.\n ambient_coefficient, description='ambient')\n", (4548, 4631), False, 'import ipywidgets\n'), ((4657, 4762), 'ipywidgets.FloatSlider', 'ipywidgets.FloatSlider', ([], {'min': '(0)', 'max': '(1)', 'step': '(0.001)', 'value': 'v.diffuse_coefficient', 'description': '"""diffuse"""'}), "(min=0, max=1, step=0.001, value=v.\n diffuse_coefficient, description='diffuse')\n", (4679, 4762), False, 'import ipywidgets\n'), ((4789, 4896), 'ipywidgets.FloatSlider', 'ipywidgets.FloatSlider', ([], {'min': '(0)', 'max': '(1)', 'step': '(0.001)', 'value': 'v.specular_coefficient', 'description': '"""specular"""'}), "(min=0, max=1, step=0.001, value=v.\n specular_coefficient, description='specular')\n", (4811, 4896), False, 'import ipywidgets\n'), ((4920, 5028), 'ipywidgets.FloatSlider', 'ipywidgets.FloatSlider', ([], {'min': '(0)', 'max': '(10)', 'step': '(0.001)', 'value': 'v.specular_exponent', 'description': '"""specular exp"""'}), "(min=0, max=10, step=0.001, value=v.specular_exponent,\n description='specular exp')\n", (4942, 5028), False, 'import ipywidgets\n'), ((5132, 5209), 'ipywidgets.jslink', 'ipywidgets.jslink', (["(v, 'ambient_coefficient')", "(ambient_coefficient, 'value')"], {}), "((v, 'ambient_coefficient'), (ambient_coefficient, 'value'))\n", (5149, 5209), False, 'import ipywidgets\n'), ((5218, 5295), 'ipywidgets.jslink', 'ipywidgets.jslink', (["(v, 'diffuse_coefficient')", "(diffuse_coefficient, 'value')"], {}), "((v, 'diffuse_coefficient'), (diffuse_coefficient, 'value'))\n", (5235, 5295), False, 'import ipywidgets\n'), ((5304, 5383), 'ipywidgets.jslink', 'ipywidgets.jslink', (["(v, 'specular_coefficient')", "(specular_coefficient, 'value')"], {}), "((v, 'specular_coefficient'), (specular_coefficient, 'value'))\n", (5321, 5383), False, 'import ipywidgets\n'), ((5392, 5465), 'ipywidgets.jslink', 'ipywidgets.jslink', (["(v, 'specular_exponent')", "(specular_exponent, 'value')"], {}), "((v, 'specular_exponent'), (specular_exponent, 'value'))\n", (5409, 5465), False, 'import ipywidgets\n'), ((5791, 5861), 'ipywidgets.ToggleButton', 'widgets.ToggleButton', ([], {'value': 'v.stereo', 'description': '"""stereo"""', 'icon': '"""eye"""'}), "(value=v.stereo, description='stereo', icon='eye')\n", (5811, 5861), True, 'import ipywidgets as widgets\n'), ((5883, 5969), 'ipywidgets.ToggleButton', 'widgets.ToggleButton', ([], {'value': 'v.stereo', 'description': '"""fullscreen"""', 'icon': '"""arrows-alt"""'}), "(value=v.stereo, description='fullscreen', icon=\n 'arrows-alt')\n", (5903, 5969), True, 'import ipywidgets as widgets\n'), ((5973, 6024), 'ipywidgets.jslink', 'ipywidgets.jslink', (["(v, 'stereo')", "(stereo, 'value')"], {}), "((v, 'stereo'), (stereo, 'value'))\n", (5990, 6024), False, 'import ipywidgets\n'), ((6033, 6092), 'ipywidgets.jslink', 'ipywidgets.jslink', (["(v, 'fullscreen')", "(fullscreen, 'value')"], {}), "((v, 'fullscreen'), (fullscreen, 'value'))\n", (6050, 6092), False, 'import ipywidgets\n'), ((9903, 9918), 'numpy.nanmin', 'np.nanmin', (['data'], {}), '(data)\n', (9912, 9918), True, 'import numpy as np\n'), ((9963, 9978), 'numpy.nanmax', 'np.nanmax', (['data'], {}), '(data)\n', (9972, 9978), True, 'import numpy as np\n'), ((441, 463), 'traitlets.Unicode', 'Unicode', (['"""ScatterView"""'], {}), "('ScatterView')\n", (448, 463), False, 'from traitlets import Unicode\n'), ((498, 518), 'traitlets.Unicode', 'Unicode', (['"""ipyvolume"""'], {}), "('ipyvolume')\n", (505, 518), False, 'from traitlets import Unicode\n'), ((552, 575), 'traitlets.Unicode', 'Unicode', (['"""ScatterModel"""'], {}), "('ScatterModel')\n", (559, 575), False, 'from traitlets import Unicode\n'), ((611, 631), 'traitlets.Unicode', 'Unicode', (['"""ipyvolume"""'], {}), "('ipyvolume')\n", (618, 631), False, 'from traitlets import Unicode\n'), ((655, 680), 'traittypes.Array', 'Array', ([], {'default_value': 'None'}), '(default_value=None)\n', (660, 680), False, 'from traittypes import Array\n'), ((727, 752), 'traittypes.Array', 'Array', ([], {'default_value': 'None'}), '(default_value=None)\n', (732, 752), False, 'from traittypes import Array\n'), ((799, 824), 'traittypes.Array', 'Array', ([], {'default_value': 'None'}), '(default_value=None)\n', (804, 824), False, 'from traittypes import Array\n'), ((872, 914), 'traittypes.Array', 'Array', ([], {'default_value': 'None', 'allow_none': '(True)'}), '(default_value=None, allow_none=True)\n', (877, 914), False, 'from traittypes import Array\n'), ((961, 1003), 'traittypes.Array', 'Array', ([], {'default_value': 'None', 'allow_none': '(True)'}), '(default_value=None, allow_none=True)\n', (966, 1003), False, 'from traittypes import Array\n'), ((1050, 1092), 'traittypes.Array', 'Array', ([], {'default_value': 'None', 'allow_none': '(True)'}), '(default_value=None, allow_none=True)\n', (1055, 1092), False, 'from traittypes import Array\n'), ((1145, 1187), 'traittypes.Array', 'Array', ([], {'default_value': 'None', 'allow_none': '(True)'}), '(default_value=None, allow_none=True)\n', (1150, 1187), False, 'from traittypes import Array\n'), ((1236, 1257), 'traitlets.Float', 'traitlets.Float', (['(0.01)'], {}), '(0.01)\n', (1251, 1257), False, 'import traitlets\n'), ((1293, 1314), 'traitlets.Float', 'traitlets.Float', (['(0.02)'], {}), '(0.02)\n', (1308, 1314), False, 'import traitlets\n'), ((1342, 1380), 'traitlets.Unicode', 'traitlets.Unicode', ([], {'default_value': '"""red"""'}), "(default_value='red')\n", (1359, 1380), False, 'import traitlets\n'), ((1417, 1457), 'traitlets.Unicode', 'traitlets.Unicode', ([], {'default_value': '"""white"""'}), "(default_value='white')\n", (1434, 1457), False, 'import traitlets\n'), ((1483, 1511), 'traitlets.Unicode', 'traitlets.Unicode', (['"""diamond"""'], {}), "('diamond')\n", (1500, 1511), False, 'import traitlets\n'), ((1933, 1967), 'traitlets.Unicode', 'Unicode', (['"""VolumeRendererThreeView"""'], {}), "('VolumeRendererThreeView')\n", (1940, 1967), False, 'from traitlets import Unicode\n'), ((2002, 2022), 'traitlets.Unicode', 'Unicode', (['"""ipyvolume"""'], {}), "('ipyvolume')\n", (2009, 2022), False, 'from traitlets import Unicode\n'), ((2056, 2091), 'traitlets.Unicode', 'Unicode', (['"""VolumeRendererThreeModel"""'], {}), "('VolumeRendererThreeModel')\n", (2063, 2091), False, 'from traitlets import Unicode\n'), ((2127, 2147), 'traitlets.Unicode', 'Unicode', (['"""ipyvolume"""'], {}), "('ipyvolume')\n", (2134, 2147), False, 'from traitlets import Unicode\n'), ((2175, 2217), 'traittypes.Array', 'Array', ([], {'default_value': 'None', 'allow_none': '(True)'}), '(default_value=None, allow_none=True)\n', (2180, 2217), False, 'from traittypes import Array\n'), ((2280, 2298), 'traitlets.CFloat', 'traitlets.CFloat', ([], {}), '()\n', (2296, 2298), False, 'import traitlets\n'), ((2329, 2347), 'traitlets.CFloat', 'traitlets.CFloat', ([], {}), '()\n', (2345, 2347), False, 'import traitlets\n'), ((2372, 2425), 'traitlets.Instance', 'traitlets.Instance', (['TransferFunction'], {'allow_none': '(True)'}), '(TransferFunction, allow_none=True)\n', (2390, 2425), False, 'import traitlets\n'), ((2489, 2509), 'traitlets.Float', 'traitlets.Float', (['(0.1)'], {}), '(0.1)\n', (2504, 2509), False, 'import traitlets\n'), ((2538, 2558), 'traitlets.Float', 'traitlets.Float', (['(0.2)'], {}), '(0.2)\n', (2553, 2558), False, 'import traitlets\n'), ((2723, 2746), 'traitlets.Float', 'traitlets.Float', (['(1000.0)'], {}), '(1000.0)\n', (2738, 2746), False, 'import traitlets\n'), ((2789, 2809), 'traitlets.Float', 'traitlets.Float', (['(0.5)'], {}), '(0.5)\n', (2804, 2809), False, 'import traitlets\n'), ((2851, 2871), 'traitlets.Float', 'traitlets.Float', (['(0.8)'], {}), '(0.8)\n', (2866, 2871), False, 'import traitlets\n'), ((2914, 2934), 'traitlets.Float', 'traitlets.Float', (['(0.5)'], {}), '(0.5)\n', (2929, 2934), False, 'import traitlets\n'), ((2974, 2992), 'traitlets.Float', 'traitlets.Float', (['(5)'], {}), '(5)\n', (2989, 2992), False, 'import traitlets\n'), ((3021, 3042), 'traitlets.Bool', 'traitlets.Bool', (['(False)'], {}), '(False)\n', (3035, 3042), False, 'import traitlets\n'), ((3075, 3096), 'traitlets.Bool', 'traitlets.Bool', (['(False)'], {}), '(False)\n', (3089, 3096), False, 'import traitlets\n'), ((3125, 3144), 'traitlets.CInt', 'traitlets.CInt', (['(500)'], {}), '(500)\n', (3139, 3144), False, 'import traitlets\n'), ((3173, 3192), 'traitlets.CInt', 'traitlets.CInt', (['(400)'], {}), '(400)\n', (3187, 3192), False, 'import traitlets\n'), ((3224, 3241), 'traitlets.CInt', 'traitlets.CInt', (['(1)'], {}), '(1)\n', (3238, 3241), False, 'import traitlets\n'), ((3268, 3295), 'traitlets.Unicode', 'traitlets.Unicode', (['"""Volume"""'], {}), "('Volume')\n", (3285, 3295), False, 'import traitlets\n'), ((3339, 3413), 'traitlets.List', 'traitlets.List', (['traitlets.CFloat'], {'default_value': '[0, 1]', 'minlen': '(2)', 'maxlen': '(2)'}), '(traitlets.CFloat, default_value=[0, 1], minlen=2, maxlen=2)\n', (3353, 3413), False, 'import traitlets\n'), ((3440, 3514), 'traitlets.List', 'traitlets.List', (['traitlets.CFloat'], {'default_value': '[0, 1]', 'minlen': '(2)', 'maxlen': '(2)'}), '(traitlets.CFloat, default_value=[0, 1], minlen=2, maxlen=2)\n', (3454, 3514), False, 'import traitlets\n'), ((3541, 3615), 'traitlets.List', 'traitlets.List', (['traitlets.CFloat'], {'default_value': '[0, 1]', 'minlen': '(2)', 'maxlen': '(2)'}), '(traitlets.CFloat, default_value=[0, 1], minlen=2, maxlen=2)\n', (3555, 3615), False, 'import traitlets\n'), ((3645, 3667), 'traitlets.Unicode', 'traitlets.Unicode', (['"""x"""'], {}), "('x')\n", (3662, 3667), False, 'import traitlets\n'), ((3696, 3718), 'traitlets.Unicode', 'traitlets.Unicode', (['"""y"""'], {}), "('y')\n", (3713, 3718), False, 'import traitlets\n'), ((3747, 3769), 'traitlets.Unicode', 'traitlets.Unicode', (['"""z"""'], {}), "('z')\n", (3764, 3769), False, 'import traitlets\n'), ((3798, 3841), 'traitlets.Dict', 'traitlets.Dict', ([], {'default_value': 'default_style'}), '(default_value=default_style)\n', (3812, 3841), False, 'import traitlets\n'), ((5492, 5551), 'ipywidgets.HBox', 'ipywidgets.HBox', (['[ambient_coefficient, diffuse_coefficient]'], {}), '([ambient_coefficient, diffuse_coefficient])\n', (5507, 5551), False, 'import ipywidgets\n'), ((5562, 5620), 'ipywidgets.HBox', 'ipywidgets.HBox', (['[specular_coefficient, specular_exponent]'], {}), '([specular_coefficient, specular_exponent])\n', (5577, 5620), False, 'import ipywidgets\n'), ((6120, 6157), 'ipywidgets.HBox', 'ipywidgets.HBox', (['[stereo, fullscreen]'], {}), '([stereo, fullscreen])\n', (6135, 6157), False, 'import ipywidgets\n'), ((2605, 2632), 'traitlets.Instance', 'traitlets.Instance', (['Scatter'], {}), '(Scatter)\n', (2623, 2632), False, 'import traitlets\n')]
from abc import ABC, abstractmethod import logging from typing import Any from ROAR.utilities_module.module import Module from ROAR.utilities_module.vehicle_models import Vehicle, VehicleControl from collections import deque import numpy as np class ROARManiaPlanner(Module): def __init__(self, agent, **kwargs): super().__init__(**kwargs) self.logger = logging self.logger = logging.getLogger(__name__) self.agent = agent self.last_error = None # boundary of detecting future turn self.turn_boundary = 0.75 def run_in_series(self, scene) -> Any: """ Return the error to PID on. """ # Decide between lane or patch first, # then sort patches by distance and type and return one of them # scene = {"lane_error": error_lane, "patches": [(type, side, y_offset)], "on_patch": type]} # type = ["ice", "boost"], side = ["left", "right", "center"], y_offset = float # Algorithm: # 1. Follow main lane if a patch is not present. # 2. If patch is present and desirable, go for it and give the correct lat_error to controller # 3. After you've gone over patch, return back to main lane as quickly as possible. # 4. If can't see main lane, repeat previous action. # CAVEAT: We don't handle the case that we can see patches but not the lane error = None if scene["lane_point"] is not None: #translate lane point into error for pid error = self.point_to_error(scene["lane_point"]) else: error = self.last_error turn_exist = False if scene["backup_lane_point"] is not None: #turn_exist = abs(self.point_to_error(scene["backup_lane_point"])) > self.turn_boundary #print("backup error: ", self.point_to_error(scene["backup_lane_point"])) pass else: turn_exist = True #print("turn: ", turn_exist) # We know where the lane is, and there are patches if scene["patches"]: scene["patches"].sort(key=lambda patch: patch[1][1]) # patch[1][0] is the y_offset print("sorted patches: ", scene["patches"]) for i, patch in enumerate(scene["patches"]): patch_t, patch_point = patch # y, x = patch_point if patch_t == "ice" and turn_exist is False: error = self.avoid(patch_point, error) # break if patch_t == "boost" and turn_exist is False: error = self.pursue(patch_point, error) self.last_error = error return error def avoid(self, point, error): to_patch = self.point_to_error(point) return error + (0.4*(error-to_patch)) def pursue(self, point, error): return 0.5*self.point_to_error(point) def point_to_error(self, point): #get pixel_offset from center pixel_offset = point[1] - self.agent.center_x print("pixel_offset: ", pixel_offset) #normalize to [-1, 1] norm_offset = pixel_offset / 360 print("norm_offset: ", norm_offset) #scale to have smaller errors be less significant scaled_error = np.sign(norm_offset) * (abs(norm_offset)**2) print("scaled_error: ", scaled_error) return scaled_error def repeat_prev_action(self): return None def run_in_threaded(self, **kwargs): pass def save(self, **kwargs): pass
[ "logging.getLogger", "numpy.sign" ]
[((406, 433), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (423, 433), False, 'import logging\n'), ((3298, 3318), 'numpy.sign', 'np.sign', (['norm_offset'], {}), '(norm_offset)\n', (3305, 3318), True, 'import numpy as np\n')]
from collections import namedtuple import numpy as np import pandas as pd import rdkit.Chem as Chem from tqdm import tqdm from neural_fingerprint import NFPRegressor from neural_fingerprint.chemutils import rf_evaluation from neural_fingerprint.models.ecfp import ECFP max_val = 1000 train_idx = 800 def read_data(max_val, train_idx): df_zinc = pd.read_table("./data/train.txt", header=None) target = pd.read_table("./data/train.logP-SA", header=None) df_zinc.columns = ["smiles"] target.columns = ["target"] df = pd.concat([df_zinc.iloc[0:max_val, :], target.iloc[0:max_val, :]], axis=1) # train_smiles, test_smiles = df.smiles[0:train_idx], df.smiles[train_idx:] train_y, test_y = df.target[0:train_idx], df.target[train_idx:] mols = [Chem.MolFromSmiles(smi) for smi in tqdm(df.smiles)] train_mols, test_mols = mols[0:train_idx], mols[train_idx:] return df, train_mols, test_mols, train_y, test_y def benchmark(): _, train_mols, test_mols, train_y, test_y = read_data(max_val, train_idx) # Neural Fingerprint print("Neural fingerprint") model = NFPRegressor(hidden_dim=64, depth=2, nbits=16) model.fit(train_mols, train_y, epochs=10, verbose=True) train_pred, train_fps = model.predict(train_mols, return_fps=True) test_pred, test_fps = model.predict(test_mols, return_fps=True) # NFP + MLP print("Neural fingerprint + MLP") rf_evaluation(train_pred, test_pred, train_y, test_y) # NFP + Random Forest print("Neural fingerprint + Random Forest") rf_evaluation(train_fps, test_fps, train_y, test_y) def mapping_nodes_eample(train_fps, test_fps): from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.manifold import TSNE df, _, _, train_y, test_y = read_data(max_val, train_idx) fps = np.vstack((train_fps, test_fps)) label = np.hstack([np.zeros(800), np.ones(200)]) tsne = TSNE(n_components=2).fit_transform(fps) tes = np.c_[tsne, df.target.to_numpy(), label] gp = GaussianProcessRegressor() gp.fit(train_fps, train_y) xmin, xmax = min(tes[:, 0]), max(tes[:, 0]) ymin, ymax = min(tes[:, 1]), max(tes[:, 1]) zmin, zmax = min(tes[:, 2]), max(tes[:, 2]) gp = GaussianProcessRegressor() gp.fit(tes[:, 0:2], tes[:, 2]) import matplotlib.cm as cm import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D xx, yy = np.meshgrid( np.linspace(xmin - 1, xmax + 1, 200), np.linspace(ymin - 1, ymax + 1, 200) ) xxyy = np.array([xx.ravel(), yy.ravel()]).T z1 = gp.predict(xxyy) z1 = z1.reshape(-1, 200) # plt.scatter(tes[:, 0], tes[:, 1]) plt.pcolor(xx, yy, z1, alpha=0.5, cmap=cm.jet, vmin=zmin, vmax=zmax) plt.colorbar() plt.show() if __name__ == "__main__": benchmark()
[ "sklearn.gaussian_process.GaussianProcessRegressor", "neural_fingerprint.chemutils.rf_evaluation", "numpy.ones", "matplotlib.pyplot.pcolor", "matplotlib.pyplot.colorbar", "rdkit.Chem.MolFromSmiles", "tqdm.tqdm", "neural_fingerprint.NFPRegressor", "sklearn.manifold.TSNE", "numpy.linspace", "numpy.zeros", "numpy.vstack", "pandas.read_table", "pandas.concat", "matplotlib.pyplot.show" ]
[((354, 400), 'pandas.read_table', 'pd.read_table', (['"""./data/train.txt"""'], {'header': 'None'}), "('./data/train.txt', header=None)\n", (367, 400), True, 'import pandas as pd\n'), ((414, 464), 'pandas.read_table', 'pd.read_table', (['"""./data/train.logP-SA"""'], {'header': 'None'}), "('./data/train.logP-SA', header=None)\n", (427, 464), True, 'import pandas as pd\n'), ((540, 614), 'pandas.concat', 'pd.concat', (['[df_zinc.iloc[0:max_val, :], target.iloc[0:max_val, :]]'], {'axis': '(1)'}), '([df_zinc.iloc[0:max_val, :], target.iloc[0:max_val, :]], axis=1)\n', (549, 614), True, 'import pandas as pd\n'), ((1112, 1158), 'neural_fingerprint.NFPRegressor', 'NFPRegressor', ([], {'hidden_dim': '(64)', 'depth': '(2)', 'nbits': '(16)'}), '(hidden_dim=64, depth=2, nbits=16)\n', (1124, 1158), False, 'from neural_fingerprint import NFPRegressor\n'), ((1417, 1470), 'neural_fingerprint.chemutils.rf_evaluation', 'rf_evaluation', (['train_pred', 'test_pred', 'train_y', 'test_y'], {}), '(train_pred, test_pred, train_y, test_y)\n', (1430, 1470), False, 'from neural_fingerprint.chemutils import rf_evaluation\n'), ((1550, 1601), 'neural_fingerprint.chemutils.rf_evaluation', 'rf_evaluation', (['train_fps', 'test_fps', 'train_y', 'test_y'], {}), '(train_fps, test_fps, train_y, test_y)\n', (1563, 1601), False, 'from neural_fingerprint.chemutils import rf_evaluation\n'), ((1829, 1861), 'numpy.vstack', 'np.vstack', (['(train_fps, test_fps)'], {}), '((train_fps, test_fps))\n', (1838, 1861), True, 'import numpy as np\n'), ((2028, 2054), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', ([], {}), '()\n', (2052, 2054), False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n'), ((2240, 2266), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', ([], {}), '()\n', (2264, 2266), False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n'), ((2677, 2745), 'matplotlib.pyplot.pcolor', 'plt.pcolor', (['xx', 'yy', 'z1'], {'alpha': '(0.5)', 'cmap': 'cm.jet', 'vmin': 'zmin', 'vmax': 'zmax'}), '(xx, yy, z1, alpha=0.5, cmap=cm.jet, vmin=zmin, vmax=zmax)\n', (2687, 2745), True, 'import matplotlib.pyplot as plt\n'), ((2750, 2764), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2762, 2764), True, 'import matplotlib.pyplot as plt\n'), ((2769, 2779), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2777, 2779), True, 'import matplotlib.pyplot as plt\n'), ((775, 798), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smi'], {}), '(smi)\n', (793, 798), True, 'import rdkit.Chem as Chem\n'), ((2449, 2485), 'numpy.linspace', 'np.linspace', (['(xmin - 1)', '(xmax + 1)', '(200)'], {}), '(xmin - 1, xmax + 1, 200)\n', (2460, 2485), True, 'import numpy as np\n'), ((2487, 2523), 'numpy.linspace', 'np.linspace', (['(ymin - 1)', '(ymax + 1)', '(200)'], {}), '(ymin - 1, ymax + 1, 200)\n', (2498, 2523), True, 'import numpy as np\n'), ((810, 825), 'tqdm.tqdm', 'tqdm', (['df.smiles'], {}), '(df.smiles)\n', (814, 825), False, 'from tqdm import tqdm\n'), ((1885, 1898), 'numpy.zeros', 'np.zeros', (['(800)'], {}), '(800)\n', (1893, 1898), True, 'import numpy as np\n'), ((1900, 1912), 'numpy.ones', 'np.ones', (['(200)'], {}), '(200)\n', (1907, 1912), True, 'import numpy as np\n'), ((1927, 1947), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)'}), '(n_components=2)\n', (1931, 1947), False, 'from sklearn.manifold import TSNE\n')]
#!/usr/bin/env python # manual """ This script allows you to manually control the simulator or Duckiebot using the keyboard arrows. """ import os os.sys.path.append("../../gym-duckietown") import cv2 import sys import argparse import pyglet from pyglet.window import key import numpy as np import gym import gym_duckietown #from gym_duckietown.envs import DuckietownEnv from gym_duckietown.envs.duckietown_env import * from gym_duckietown.wrappers import UndistortWrapper from NaiveLaneDetection import NaiveLaneDetection steer = 0 throttle = 0 # from experiments.utils import save_img env = DuckietownLF(map_name='straight_road', max_steps=1500, draw_curve=False, draw_bbox=False, domain_rand=False, frame_rate=30, frame_skip=1, camera_width=640, camera_height=480, robot_speed=1.20, #MAXIMUM FORWARD ROBOT SPEED accept_start_angle_deg=5, full_transparency=False, user_tile_start=None, seed=None, distortion=False, randomize_maps_on_reset=False ) env.reset() env.render() vertices = np.array([[(0,200), (640,200), (640,480), (0,480)]]) kernel = 5 low_threshold = 50 high_threshold = 150 rho = 1 theta = np.pi/180 threshold = 10 min_line_len = 10 max_line_gap = 10 NLD = NaiveLaneDetection(vertices, kernel, low_threshold, high_threshold, rho, theta, threshold, min_line_len, max_line_gap) cv2.namedWindow("Lines") cv2.namedWindow("Averaged Line") @env.unwrapped.window.event def on_key_press(symbol, modifiers): """ This handler processes keyboard commands that control the simulation """ if symbol == key.BACKSPACE or symbol == key.SLASH: print('RESET') env.reset() env.render() elif symbol == key.PAGEUP: env.unwrapped.cam_angle[0] = 0 elif symbol == key.ESCAPE: env.close() sys.exit(0) # Take a screenshot # UNCOMMENT IF NEEDED - Skimage dependency # elif symbol == key.RETURN: # print('saving screenshot') # img = env.render('rgb_array') # save_img('screenshot.png', img) # Register a keyboard handler key_handler = key.KeyStateHandler() env.unwrapped.window.push_handlers(key_handler) def weighted_img(img, initial_img, α=0.8, β=1., γ=0.): return cv2.addWeighted(initial_img, α, img, β, γ) def update(dt): """ This function is called at every frame to handle movement/stepping and redrawing """ action = np.array([0.0, 0.0]) if key_handler[key.UP]: action = np.array([0.44, 0.0]) if key_handler[key.DOWN]: action = np.array([-0.44, 0]) if key_handler[key.LEFT]: action = np.array([0.35, +1]) if key_handler[key.RIGHT]: action = np.array([0.35, -1]) if key_handler[key.SPACE]: action = np.array([0, 0]) # Speed boost if key_handler[key.LSHIFT]: action *= 1.5 obs, reward, done, info = env.step(action) lines, avg_lines, left_line, right_line = NLD.Perceive(obs) final = weighted_img(avg_lines, obs) cv2.imshow("Lines", lines) cv2.imshow("Averaged Line", cv2.cvtColor(final, cv2.COLOR_BGR2RGB)) cv2.waitKey(1) print('step_count = %s, reward=%.3f' % (env.unwrapped.step_count, reward)) if key_handler[key.RETURN]: from PIL import Image im = Image.fromarray(obs) im.save('screen.png') if done: print('done!') env.reset() env.render() env.render() pyglet.clock.schedule_interval(update, 1.0 / env.unwrapped.frame_rate) # Enter main event loop pyglet.app.run() env.close()
[ "PIL.Image.fromarray", "pyglet.window.key.KeyStateHandler", "pyglet.clock.schedule_interval", "pyglet.app.run", "NaiveLaneDetection.NaiveLaneDetection", "os.sys.path.append", "cv2.imshow", "numpy.array", "cv2.addWeighted", "cv2.cvtColor", "sys.exit", "cv2.waitKey", "cv2.namedWindow" ]
[((147, 189), 'os.sys.path.append', 'os.sys.path.append', (['"""../../gym-duckietown"""'], {}), "('../../gym-duckietown')\n", (165, 189), False, 'import os\n'), ((1290, 1346), 'numpy.array', 'np.array', (['[[(0, 200), (640, 200), (640, 480), (0, 480)]]'], {}), '([[(0, 200), (640, 200), (640, 480), (0, 480)]])\n', (1298, 1346), True, 'import numpy as np\n'), ((1478, 1600), 'NaiveLaneDetection.NaiveLaneDetection', 'NaiveLaneDetection', (['vertices', 'kernel', 'low_threshold', 'high_threshold', 'rho', 'theta', 'threshold', 'min_line_len', 'max_line_gap'], {}), '(vertices, kernel, low_threshold, high_threshold, rho,\n theta, threshold, min_line_len, max_line_gap)\n', (1496, 1600), False, 'from NaiveLaneDetection import NaiveLaneDetection\n'), ((1597, 1621), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Lines"""'], {}), "('Lines')\n", (1612, 1621), False, 'import cv2\n'), ((1622, 1654), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Averaged Line"""'], {}), "('Averaged Line')\n", (1637, 1654), False, 'import cv2\n'), ((2345, 2366), 'pyglet.window.key.KeyStateHandler', 'key.KeyStateHandler', ([], {}), '()\n', (2364, 2366), False, 'from pyglet.window import key\n'), ((3680, 3750), 'pyglet.clock.schedule_interval', 'pyglet.clock.schedule_interval', (['update', '(1.0 / env.unwrapped.frame_rate)'], {}), '(update, 1.0 / env.unwrapped.frame_rate)\n', (3710, 3750), False, 'import pyglet\n'), ((3776, 3792), 'pyglet.app.run', 'pyglet.app.run', ([], {}), '()\n', (3790, 3792), False, 'import pyglet\n'), ((2482, 2524), 'cv2.addWeighted', 'cv2.addWeighted', (['initial_img', 'α', 'img', 'β', 'γ'], {}), '(initial_img, α, img, β, γ)\n', (2497, 2524), False, 'import cv2\n'), ((2660, 2680), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (2668, 2680), True, 'import numpy as np\n'), ((3258, 3284), 'cv2.imshow', 'cv2.imshow', (['"""Lines"""', 'lines'], {}), "('Lines', lines)\n", (3268, 3284), False, 'import cv2\n'), ((3361, 3375), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3372, 3375), False, 'import cv2\n'), ((2727, 2748), 'numpy.array', 'np.array', (['[0.44, 0.0]'], {}), '([0.44, 0.0])\n', (2735, 2748), True, 'import numpy as np\n'), ((2796, 2816), 'numpy.array', 'np.array', (['[-0.44, 0]'], {}), '([-0.44, 0])\n', (2804, 2816), True, 'import numpy as np\n'), ((2864, 2884), 'numpy.array', 'np.array', (['[0.35, +1]'], {}), '([0.35, +1])\n', (2872, 2884), True, 'import numpy as np\n'), ((2933, 2953), 'numpy.array', 'np.array', (['[0.35, -1]'], {}), '([0.35, -1])\n', (2941, 2953), True, 'import numpy as np\n'), ((3002, 3018), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (3010, 3018), True, 'import numpy as np\n'), ((3317, 3355), 'cv2.cvtColor', 'cv2.cvtColor', (['final', 'cv2.COLOR_BGR2RGB'], {}), '(final, cv2.COLOR_BGR2RGB)\n', (3329, 3355), False, 'import cv2\n'), ((3531, 3551), 'PIL.Image.fromarray', 'Image.fromarray', (['obs'], {}), '(obs)\n', (3546, 3551), False, 'from PIL import Image\n'), ((2064, 2075), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2072, 2075), False, 'import sys\n')]
import math import os import time import numpy as np from torch.utils.tensorboard import SummaryWriter import utils.loss as loss import utils.tensorboard as utb def get_probs(length, exp): probs = (np.arange(1, length + 1) / 100) ** exp last_x = int(0.9 * length) probs[last_x:] = probs[last_x] return probs / probs.sum() def get_probs2(length, dist, prop): probs = (np.arange(1, length + 1) / 100) ** dist last_x = int(0.999 * length) probs[last_x:] = probs[last_x] return (1 - prop) / length + prop * probs / probs.sum() def get_probs_from_ll(base_dir, T, l_dis): event_acc = utb.load_event_accumulator(os.path.join(base_dir, 'train', 'loglikelihood', str(T))) ll_list, sigma_list = utb.load_loglikelihood(event_acc) idx_sorted = np.argsort(ll_list)[::-1] # From larger to smaller probs_ordered = get_probs(len(idx_sorted), l_dis) # From smaller to larger probs = np.ones(len(probs_ordered)) count = 0 # The one with the larger likelihood is assigned the smaller probability for i in idx_sorted: probs[i] = probs_ordered[count] count = count + 1 assert np.sum(probs == 1) == 0 return probs def log_sum_exp_trick(value_list): max_value = np.max(value_list) ds = value_list - max_value sumOfExp = np.exp(ds).sum() return max_value + np.log(sumOfExp) SIGMA_LIST = np.linspace(0.01, 0.3, 200) class ApproxLL: def __init__(self, model, scaler, folder, N=128, N_max=10000, T=40, sigma_list=None): self.folder = os.path.join(folder, 'loglikelihood', str(T)) self.model = model self.scaler = scaler self.bs = 256 self.k = 5 self.sigma_list = sigma_list if sigma_list else SIGMA_LIST n_iter = math.ceil(N / self.bs) self.N = n_iter * self.bs self.n_sigma = len(self.sigma_list) n_iter = math.ceil(N_max / self.bs) self.N_max = n_iter * self.bs self.T = T self.writer = SummaryWriter(log_dir=self.folder) def process_output(self, x): assert x.shape[1] <= 3, 'Error: Data should be in CHW format' dims = x.shape x = self.scaler.inverse_transform(x.reshape(dims[0], -1)).reshape(dims).astype(int) x = np.transpose(x, [0, 2, 3, 1]) # CHW --> HWC return x def compute_ll(self, x_recons, z_infer): n_imgs, z_dim = z_infer.shape for i in range(len(self.sigma_list)): self.writer.add_scalar('sigma_list', self.sigma_list[i], i) print('Starting analysis non-isotropic | n_imgs: {}'.format(n_imgs)) if self.N > self.N_max: print('Error: N > N_max {} > {}'.format(self.N, self.N_max)) init_time = time.time() # ll_list, sigma_i_list, ll_evolution_list = self.load_data(n_imgs) event_acc = utb.load_event_accumulator(self.folder) ll, _ = utb.load_loglikelihood(event_acc) init_img = max(0, len(ll) - 1) for i in range(init_img, n_imgs): # if ll_list[i] < 0: # summary_str = '\n[{}] LL={} | sigma={}' # print(summary_str.format(i, ll_list[i], sigma_i_list[i])) # continue cur_time = time.time() ll = self.compute_ll_img(x_recons[i], z_infer[i, :], str(i)) time_total = time.time() - init_time time_epoch = time.time() - cur_time min_epochs = int(time_epoch / 60) summary_str = '\n[{}]Time: {}:{} | Total time: {} log10 = {}' print(summary_str.format(i, min_epochs, int(time_epoch), int(time_total / 60), ll)) print('Analysis non-isotropic completed | n_imgs: {}'.format(n_imgs)) self.writer.close() return def compute_ll_img(self, x_recons, z_c, img_idx): z_dim = z_c.shape[-1] x_tmp = np.tile(x_recons, [self.bs, 1, 1, 1]) N_i = self.N j = 0 while j < self.n_sigma: sigma = self.sigma_list[j] accepted_samples_count = 0 tries = 0 while accepted_samples_count == 0: psnr_tmp, log_ratio_p_q, log_ratio_1_q = self.get_psnr_ratio(z_c, x_tmp, N_i, sigma, z_dim) accepted_samples = psnr_tmp > self.T accepted_samples_count = np.sum(accepted_samples) assert tries < 5, 'There are not accepted samples in img with id {}'.format(img_idx) tries += 1 N = np.log(len(log_ratio_p_q)) ll_i = self.get_loglikelihood(log_ratio_p_q[accepted_samples], N) # print('IDX {} ll {} sigma {}'.format(img_idx, ll_i, sigma)) self.writer.add_histogram('log(weights)/{}'.format(img_idx), log_ratio_p_q, j) self.writer.add_scalar('loglikelihood/{}'.format(img_idx), ll_i, j) self.writer.add_scalar('N_i/{}'.format(img_idx), N_i, j) if accepted_samples_count < 0.95 * N_i or self.T == 40: j = j + 1 else: self.writer.add_scalar('loglikelihood/{}'.format(img_idx), ll_i, j + 1) self.writer.add_scalar('loglikelihood/{}'.format(img_idx), ll_i, j + 2) self.writer.add_scalar('N_i/{}'.format(img_idx), N_i, j + 1) self.writer.add_scalar('N_i/{}'.format(img_idx), N_i, j + 2) j = j + 3 if accepted_samples_count <= N_i / 10: if N_i == self.N_max: break n_iter = math.ceil(N_i * self.k / self.bs) N_i = min(n_iter * self.bs, self.N_max) print(N_i) return ll_i / np.log(10) def get_psnr_ratio(self, z_c, x_tmp, N_i, sigma, z_dim): n_iter = math.ceil(N_i / self.bs) psnr_tmp = np.zeros(N_i) log_ratio_p_q = np.zeros(N_i) log_ratio_1_q = np.zeros(N_i) for n in range(n_iter): z_c_tile = np.tile(z_c, [self.bs, 1]) z_tmp = z_c_tile + np.random.normal(0, sigma, [self.bs, z_dim]) x_gener = self.model.sample3(z_tmp) x_gener = self.process_output(x_gener) log_ratio_p_q[n * self.bs:(n + 1) * self.bs] = self.log_ratio_p_q(z_tmp, z_c_tile, sigma) log_ratio_1_q[n * self.bs:(n + 1) * self.bs] = self.log_ratio_1_q(z_tmp, z_c_tile, sigma) psnr_tmp[n * self.bs:(n + 1) * self.bs] = loss.PSNR(x_gener, x_tmp, axis=(1, 2, 3)) return psnr_tmp, log_ratio_p_q, log_ratio_1_q def log_ratio_p_q(self, z, z_c, sigma): z_dim = z.shape[-1] return 1 / 2 * (np.sum((z - z_c) ** 2, axis=-1) / (sigma ** 2) - np.sum(z ** 2, axis=-1)) + z_dim * np.log( sigma) def log_ratio_1_q(self, z, z_c, sigma): z_dim = z.shape[-1] a = 1 / 2 * (np.sum((z - z_c) ** 2, axis=-1) / (sigma ** 2)) a2 = z_dim * np.log(sigma) a3 = z_dim * np.log(2 * np.pi) / 2 return a + a2 + a3 def get_loglikelihood(self, log_ratio_p_q_accepted, N): """ Compute the log likelihood using the log_sum_trick :param log_ratio_p_q_accepted: :param N: :return: """ return log_sum_exp_trick(log_ratio_p_q_accepted) - N
[ "numpy.random.normal", "torch.utils.tensorboard.SummaryWriter", "numpy.tile", "math.ceil", "utils.loss.PSNR", "numpy.log", "numpy.max", "numpy.argsort", "numpy.sum", "numpy.linspace", "utils.tensorboard.load_event_accumulator", "numpy.zeros", "numpy.exp", "numpy.transpose", "time.time", "numpy.arange", "utils.tensorboard.load_loglikelihood" ]
[((1385, 1412), 'numpy.linspace', 'np.linspace', (['(0.01)', '(0.3)', '(200)'], {}), '(0.01, 0.3, 200)\n', (1396, 1412), True, 'import numpy as np\n'), ((733, 766), 'utils.tensorboard.load_loglikelihood', 'utb.load_loglikelihood', (['event_acc'], {}), '(event_acc)\n', (755, 766), True, 'import utils.tensorboard as utb\n'), ((1247, 1265), 'numpy.max', 'np.max', (['value_list'], {}), '(value_list)\n', (1253, 1265), True, 'import numpy as np\n'), ((785, 804), 'numpy.argsort', 'np.argsort', (['ll_list'], {}), '(ll_list)\n', (795, 804), True, 'import numpy as np\n'), ((1152, 1170), 'numpy.sum', 'np.sum', (['(probs == 1)'], {}), '(probs == 1)\n', (1158, 1170), True, 'import numpy as np\n'), ((1353, 1369), 'numpy.log', 'np.log', (['sumOfExp'], {}), '(sumOfExp)\n', (1359, 1369), True, 'import numpy as np\n'), ((1771, 1793), 'math.ceil', 'math.ceil', (['(N / self.bs)'], {}), '(N / self.bs)\n', (1780, 1793), False, 'import math\n'), ((1890, 1916), 'math.ceil', 'math.ceil', (['(N_max / self.bs)'], {}), '(N_max / self.bs)\n', (1899, 1916), False, 'import math\n'), ((1997, 2031), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'self.folder'}), '(log_dir=self.folder)\n', (2010, 2031), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((2263, 2292), 'numpy.transpose', 'np.transpose', (['x', '[0, 2, 3, 1]'], {}), '(x, [0, 2, 3, 1])\n', (2275, 2292), True, 'import numpy as np\n'), ((2730, 2741), 'time.time', 'time.time', ([], {}), '()\n', (2739, 2741), False, 'import time\n'), ((2839, 2878), 'utils.tensorboard.load_event_accumulator', 'utb.load_event_accumulator', (['self.folder'], {}), '(self.folder)\n', (2865, 2878), True, 'import utils.tensorboard as utb\n'), ((2895, 2928), 'utils.tensorboard.load_loglikelihood', 'utb.load_loglikelihood', (['event_acc'], {}), '(event_acc)\n', (2917, 2928), True, 'import utils.tensorboard as utb\n'), ((3851, 3888), 'numpy.tile', 'np.tile', (['x_recons', '[self.bs, 1, 1, 1]'], {}), '(x_recons, [self.bs, 1, 1, 1])\n', (3858, 3888), True, 'import numpy as np\n'), ((5733, 5757), 'math.ceil', 'math.ceil', (['(N_i / self.bs)'], {}), '(N_i / self.bs)\n', (5742, 5757), False, 'import math\n'), ((5777, 5790), 'numpy.zeros', 'np.zeros', (['N_i'], {}), '(N_i)\n', (5785, 5790), True, 'import numpy as np\n'), ((5815, 5828), 'numpy.zeros', 'np.zeros', (['N_i'], {}), '(N_i)\n', (5823, 5828), True, 'import numpy as np\n'), ((5853, 5866), 'numpy.zeros', 'np.zeros', (['N_i'], {}), '(N_i)\n', (5861, 5866), True, 'import numpy as np\n'), ((206, 230), 'numpy.arange', 'np.arange', (['(1)', '(length + 1)'], {}), '(1, length + 1)\n', (215, 230), True, 'import numpy as np\n'), ((393, 417), 'numpy.arange', 'np.arange', (['(1)', '(length + 1)'], {}), '(1, length + 1)\n', (402, 417), True, 'import numpy as np\n'), ((1313, 1323), 'numpy.exp', 'np.exp', (['ds'], {}), '(ds)\n', (1319, 1323), True, 'import numpy as np\n'), ((3229, 3240), 'time.time', 'time.time', ([], {}), '()\n', (3238, 3240), False, 'import time\n'), ((5643, 5653), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (5649, 5653), True, 'import numpy as np\n'), ((5922, 5948), 'numpy.tile', 'np.tile', (['z_c', '[self.bs, 1]'], {}), '(z_c, [self.bs, 1])\n', (5929, 5948), True, 'import numpy as np\n'), ((6382, 6423), 'utils.loss.PSNR', 'loss.PSNR', (['x_gener', 'x_tmp'], {'axis': '(1, 2, 3)'}), '(x_gener, x_tmp, axis=(1, 2, 3))\n', (6391, 6423), True, 'import utils.loss as loss\n'), ((6850, 6863), 'numpy.log', 'np.log', (['sigma'], {}), '(sigma)\n', (6856, 6863), True, 'import numpy as np\n'), ((3339, 3350), 'time.time', 'time.time', ([], {}), '()\n', (3348, 3350), False, 'import time\n'), ((3388, 3399), 'time.time', 'time.time', ([], {}), '()\n', (3397, 3399), False, 'import time\n'), ((4305, 4329), 'numpy.sum', 'np.sum', (['accepted_samples'], {}), '(accepted_samples)\n', (4311, 4329), True, 'import numpy as np\n'), ((5503, 5536), 'math.ceil', 'math.ceil', (['(N_i * self.k / self.bs)'], {}), '(N_i * self.k / self.bs)\n', (5512, 5536), False, 'import math\n'), ((5980, 6024), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma', '[self.bs, z_dim]'], {}), '(0, sigma, [self.bs, z_dim])\n', (5996, 6024), True, 'import numpy as np\n'), ((6660, 6673), 'numpy.log', 'np.log', (['sigma'], {}), '(sigma)\n', (6666, 6673), True, 'import numpy as np\n'), ((6781, 6812), 'numpy.sum', 'np.sum', (['((z - z_c) ** 2)'], {'axis': '(-1)'}), '((z - z_c) ** 2, axis=-1)\n', (6787, 6812), True, 'import numpy as np\n'), ((6885, 6902), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (6891, 6902), True, 'import numpy as np\n'), ((6625, 6648), 'numpy.sum', 'np.sum', (['(z ** 2)'], {'axis': '(-1)'}), '(z ** 2, axis=-1)\n', (6631, 6648), True, 'import numpy as np\n'), ((6576, 6607), 'numpy.sum', 'np.sum', (['((z - z_c) ** 2)'], {'axis': '(-1)'}), '((z - z_c) ** 2, axis=-1)\n', (6582, 6607), True, 'import numpy as np\n')]
#%% Setup from matplotlib import rc rc("font", **{"family": "serif", "serif": ["Computer Modern Roman"], "size": 20}) rc("text", usetex=True) import os import numpy as np import mpmath as mp import matplotlib.pyplot as plt from scipy.special import lambertw from scipy import optimize # path = os.path.expanduser( # "~\\Documents\\Git\\mattwthomas.com\\assets\\images\\posts\\2021\\" # ) # os.chdir(path) #%% alpha plot nn = 50 # resolution of plot xmin = 0 xmax = 1 def alphabar(dk): logdk = np.log(dk) W = lambertw(-dk * logdk) return 1 - W / logdk x = np.linspace(1 / nn, xmax, nn) y = alphabar(x) figure = plt.figure() plt.plot(x, y) plt.fill_between(x, y, alpha=0.2) plt.axis([xmin, xmax, 0, 2]) plt.ylabel(r"$\bar{r}(\delta / k)$") plt.xlabel(r"$\delta / k$") plt.xticks([0, 0.25, 0.5, 0.75, 1], [0, 0.25, 0.5, 0.75, 1]) plt.yticks([0, 1, 2]) figure.set_dpi(100) figure.set_size_inches(4, 2.5) figure.savefig("tullock-maximum-r.svg", optimize=True, bbox_inches="tight") # %% direct discrimination nn = 50 # resolution of plot xmin = 0 xmax = 4 def s1(d, k=2): return (k * d) / (k + d) ** 2 x = np.linspace(xmin, xmax, nn) y = s1(x, k=2) figure = plt.figure() plt.plot(x, y) plt.axis([xmin, xmax, 0, 0.3]) plt.ylabel(r"$s_1$") plt.xlabel(r"$\delta$") plt.xticks([0, 1, 2], ["0", "1", "k"]) plt.yticks([0]) figure.set_dpi(100) figure.set_size_inches(4, 2.5) figure.savefig("tullock-direct-discrimination.svg", bbox_inches="tight") # %% covert discrimination nn = 1000 # resolution of plot xmin = 1 xmax = 10 x = np.linspace(xmin + 1 / nn, xmax, nn) # Create inner loop def alphainner(a, k): f1 = 1 / np.log(k) f2 = 1 - (2 / (1 + k ** a)) return a - f1 / f2 # make an initial guess f10 = 1 / np.log(x) f20 = 1 - (2 / (1 + x)) y = optimize.root(alphainner, x0=(f10 / f20), args=(x)) figure = plt.figure() plt.plot(x, y.x) plt.plot(x, alphabar(1 / x)) plt.axis([xmin, xmax, 0, 2]) plt.ylabel(r"$r$") plt.xlabel(r"$k$") plt.xticks(np.linspace(1, xmax, xmax)) plt.yticks([0, 1, 2]) plt.legend([r"$r^\star(k)$", r"$\bar{r}(1/k)$"]) figure.set_dpi(100) figure.set_size_inches(4, 2.5) figure.savefig("tullock-covert-discrimination.svg", bbox_inches="tight") # %% revenue graph nn = 1000 # resolution of plot xmin = 0 xmax = 3 @np.vectorize def revenue(r, k=2, d=1): if r <= alphabar(d / k): return (1 + 1 / k) * ((r * (k * d) ** r) / (k ** r + d ** r) ** 2) elif r > 2: return (d / k) * ((1 + k) / (2 * k)) else: alphar = (2 / r) * (r - 1) ** ((r - 1) / r) return alphar * (d / k) * ((1 + k) / (2 * k)) x = np.linspace(xmin, xmax, nn) y2 = revenue(x, k=1.5) y6 = revenue(x, k=8) figure = plt.figure() plt.plot(x, y2) plt.plot(x, y6) # plt.axis([xmin, xmax, 0, 0.42]) plt.ylabel("Revenue") plt.xlabel(r"$r$") plt.xticks( [0, alphabar(1 / 8), alphabar(1 / 1.5), 2], ["0", r"$\bar{r}(1/8)$", r"$\bar{r}(2/3)$", "2"], ) plt.vlines(alphabar(1 / 1.5), ymin=0, ymax=max(y2), color="C0", linestyle="dashed") plt.vlines(alphabar(1 / 8), ymin=0, ymax=revenue(alphabar(1 / 8), k=8), color="C1", linestyle="dashed") plt.vlines(2, ymin=0, ymax=max(y2), color="grey", linestyle="dashed") plt.yticks([0]) plt.legend([r"$\delta/k = 2/3$", r"$\delta/k = 1/8$"], loc="upper left") figure.set_dpi(100) figure.set_size_inches(4, 2.5) figure.savefig("tullock-covert-revenue.svg", bbox_inches="tight") # %%
[ "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel", "scipy.special.lambertw", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "numpy.log", "matplotlib.pyplot.fill_between", "numpy.linspace", "matplotlib.pyplot.figure", "matplotlib.pyplot.yticks", "matplotlib.rc", "scipy.optimize.root", "matplotlib.pyplot.axis", "matplotlib.pyplot.legend" ]
[((37, 122), 'matplotlib.rc', 'rc', (['"""font"""'], {}), "('font', **{'family': 'serif', 'serif': ['Computer Modern Roman'], 'size':\n 20})\n", (39, 122), False, 'from matplotlib import rc\n'), ((119, 142), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (121, 142), False, 'from matplotlib import rc\n'), ((581, 610), 'numpy.linspace', 'np.linspace', (['(1 / nn)', 'xmax', 'nn'], {}), '(1 / nn, xmax, nn)\n', (592, 610), True, 'import numpy as np\n'), ((637, 649), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (647, 649), True, 'import matplotlib.pyplot as plt\n'), ((650, 664), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (658, 664), True, 'import matplotlib.pyplot as plt\n'), ((665, 698), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', 'y'], {'alpha': '(0.2)'}), '(x, y, alpha=0.2)\n', (681, 698), True, 'import matplotlib.pyplot as plt\n'), ((699, 727), 'matplotlib.pyplot.axis', 'plt.axis', (['[xmin, xmax, 0, 2]'], {}), '([xmin, xmax, 0, 2])\n', (707, 727), True, 'import matplotlib.pyplot as plt\n'), ((728, 765), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\bar{r}(\\\\delta / k)$"""'], {}), "('$\\\\bar{r}(\\\\delta / k)$')\n", (738, 765), True, 'import matplotlib.pyplot as plt\n'), ((765, 792), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\delta / k$"""'], {}), "('$\\\\delta / k$')\n", (775, 792), True, 'import matplotlib.pyplot as plt\n'), ((793, 853), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 0.25, 0.5, 0.75, 1]', '[0, 0.25, 0.5, 0.75, 1]'], {}), '([0, 0.25, 0.5, 0.75, 1], [0, 0.25, 0.5, 0.75, 1])\n', (803, 853), True, 'import matplotlib.pyplot as plt\n'), ((854, 875), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (864, 875), True, 'import matplotlib.pyplot as plt\n'), ((1139, 1166), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'nn'], {}), '(xmin, xmax, nn)\n', (1150, 1166), True, 'import numpy as np\n'), ((1192, 1204), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1202, 1204), True, 'import matplotlib.pyplot as plt\n'), ((1205, 1219), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (1213, 1219), True, 'import matplotlib.pyplot as plt\n'), ((1220, 1250), 'matplotlib.pyplot.axis', 'plt.axis', (['[xmin, xmax, 0, 0.3]'], {}), '([xmin, xmax, 0, 0.3])\n', (1228, 1250), True, 'import matplotlib.pyplot as plt\n'), ((1251, 1270), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$s_1$"""'], {}), "('$s_1$')\n", (1261, 1270), True, 'import matplotlib.pyplot as plt\n'), ((1272, 1295), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\delta$"""'], {}), "('$\\\\delta$')\n", (1282, 1295), True, 'import matplotlib.pyplot as plt\n'), ((1296, 1334), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 1, 2]', "['0', '1', 'k']"], {}), "([0, 1, 2], ['0', '1', 'k'])\n", (1306, 1334), True, 'import matplotlib.pyplot as plt\n'), ((1335, 1350), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0]'], {}), '([0])\n', (1345, 1350), True, 'import matplotlib.pyplot as plt\n'), ((1560, 1596), 'numpy.linspace', 'np.linspace', (['(xmin + 1 / nn)', 'xmax', 'nn'], {}), '(xmin + 1 / nn, xmax, nn)\n', (1571, 1596), True, 'import numpy as np\n'), ((1793, 1840), 'scipy.optimize.root', 'optimize.root', (['alphainner'], {'x0': '(f10 / f20)', 'args': 'x'}), '(alphainner, x0=f10 / f20, args=x)\n', (1806, 1840), False, 'from scipy import optimize\n'), ((1855, 1867), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1865, 1867), True, 'import matplotlib.pyplot as plt\n'), ((1868, 1884), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y.x'], {}), '(x, y.x)\n', (1876, 1884), True, 'import matplotlib.pyplot as plt\n'), ((1914, 1942), 'matplotlib.pyplot.axis', 'plt.axis', (['[xmin, xmax, 0, 2]'], {}), '([xmin, xmax, 0, 2])\n', (1922, 1942), True, 'import matplotlib.pyplot as plt\n'), ((1943, 1960), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$r$"""'], {}), "('$r$')\n", (1953, 1960), True, 'import matplotlib.pyplot as plt\n'), ((1962, 1979), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$k$"""'], {}), "('$k$')\n", (1972, 1979), True, 'import matplotlib.pyplot as plt\n'), ((2020, 2041), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (2030, 2041), True, 'import matplotlib.pyplot as plt\n'), ((2042, 2090), 'matplotlib.pyplot.legend', 'plt.legend', (["['$r^\\\\star(k)$', '$\\\\bar{r}(1/k)$']"], {}), "(['$r^\\\\star(k)$', '$\\\\bar{r}(1/k)$'])\n", (2052, 2090), True, 'import matplotlib.pyplot as plt\n'), ((2615, 2642), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'nn'], {}), '(xmin, xmax, nn)\n', (2626, 2642), True, 'import numpy as np\n'), ((2697, 2709), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2707, 2709), True, 'import matplotlib.pyplot as plt\n'), ((2710, 2725), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2'], {}), '(x, y2)\n', (2718, 2725), True, 'import matplotlib.pyplot as plt\n'), ((2726, 2741), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y6'], {}), '(x, y6)\n', (2734, 2741), True, 'import matplotlib.pyplot as plt\n'), ((2776, 2797), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Revenue"""'], {}), "('Revenue')\n", (2786, 2797), True, 'import matplotlib.pyplot as plt\n'), ((2798, 2815), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$r$"""'], {}), "('$r$')\n", (2808, 2815), True, 'import matplotlib.pyplot as plt\n'), ((3191, 3206), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0]'], {}), '([0])\n', (3201, 3206), True, 'import matplotlib.pyplot as plt\n'), ((3207, 3279), 'matplotlib.pyplot.legend', 'plt.legend', (["['$\\\\delta/k = 2/3$', '$\\\\delta/k = 1/8$']"], {'loc': '"""upper left"""'}), "(['$\\\\delta/k = 2/3$', '$\\\\delta/k = 1/8$'], loc='upper left')\n", (3217, 3279), True, 'import matplotlib.pyplot as plt\n'), ((509, 519), 'numpy.log', 'np.log', (['dk'], {}), '(dk)\n', (515, 519), True, 'import numpy as np\n'), ((528, 549), 'scipy.special.lambertw', 'lambertw', (['(-dk * logdk)'], {}), '(-dk * logdk)\n', (536, 549), False, 'from scipy.special import lambertw\n'), ((1754, 1763), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (1760, 1763), True, 'import numpy as np\n'), ((1992, 2018), 'numpy.linspace', 'np.linspace', (['(1)', 'xmax', 'xmax'], {}), '(1, xmax, xmax)\n', (2003, 2018), True, 'import numpy as np\n'), ((1653, 1662), 'numpy.log', 'np.log', (['k'], {}), '(k)\n', (1659, 1662), True, 'import numpy as np\n')]
#!/usr/bin/env python3 import cudasift import cv2 import numpy as np from profiling import TaggedTimer def main(): sift = cudasift.PyCudaSift(dev_num=0) timr = TaggedTimer() filename = "../data/CY_279b46b9_1575825158217_1575825184058.jpg" # filename = "/home/jfinken/projects/here/sp/jfinken/faiss_gpu/AIC_query2.jpg" data = cv2.imread(filename, cv2.IMREAD_GRAYSCALE) # for writing out keypoints img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE) height, width = data.shape print(f"Input image: {width}x{height}") # data = np.ascontiguousarray(data, dtype=np.float32).ravel() # data = np.array(data, dtype=np.float32).ravel() data = img.astype(np.float32).ravel() timr("np.ascontiguousarray") NUM_RUNS = 3 # Allocate CUDA memory for the source image: once sift.allocate_cuda_image( width, height, cudasift.i_align_up(width, 128), False, None, None ) timr("allocate_cuda_image") # Allocate storage for internal results sift.init_sift_data(max_pts=32768, host=True, dev=True) sift.allocate_sift_temp_memory(width, height, 5, False) timr("allocate_sift_temp_memory") for j in range(NUM_RUNS): # Convenient and temporally performant optimization: # Reuse CUDA malloc-ed device memory # Simply download this input image to the device sift.download_cuda_image(data) timr("download_cuda_image") # Run sift.extract_sift( # num_octaves=5, init_blur=1.0, thresh=2.0, lowest_scale=0.0, scale_up=False num_octaves=5, init_blur=1.0, thresh=2.0, lowest_scale=0.0, scale_up=False, ) timr("extract_sift") print(timr) # Get descriptors and keypoints desc, kp = sift.get_features() desc_np = np.asarray(desc) kp_np = np.asarray(kp) timr( f"get_features done (num_pts={desc_np.shape[0]}, desc_np.shape={desc_np.shape}, kp_np.shape={kp_np.shape})" ) print(timr) """ # Debug: make cv2 keypoints kps = [] for i in range(kp_np.shape[0]): # print(f"keypt @ {desc[i].get('xpos')}, {desc[i].get('ypos')}") kps.append( cv2.KeyPoint( x=int(kp_np[i, 0]), y=int(kp_np[i, 1]), _size=kp_np[i, 2], _angle=kp_np[i, 3], ) ) timr("for-loop over keypoints") print(timr) img = cv2.drawKeypoints( img, kps, outImage=np.array([]), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, ) timr("cv2.drawKeypoints") cv2.imwrite(f"woo.jpg", img) # timr("cv2.imwrite") """ if __name__ == "__main__": main()
[ "numpy.asarray", "cudasift.i_align_up", "cudasift.PyCudaSift", "profiling.TaggedTimer", "cv2.imread" ]
[((129, 159), 'cudasift.PyCudaSift', 'cudasift.PyCudaSift', ([], {'dev_num': '(0)'}), '(dev_num=0)\n', (148, 159), False, 'import cudasift\n'), ((171, 184), 'profiling.TaggedTimer', 'TaggedTimer', ([], {}), '()\n', (182, 184), False, 'from profiling import TaggedTimer\n'), ((349, 391), 'cv2.imread', 'cv2.imread', (['filename', 'cv2.IMREAD_GRAYSCALE'], {}), '(filename, cv2.IMREAD_GRAYSCALE)\n', (359, 391), False, 'import cv2\n'), ((434, 476), 'cv2.imread', 'cv2.imread', (['filename', 'cv2.IMREAD_GRAYSCALE'], {}), '(filename, cv2.IMREAD_GRAYSCALE)\n', (444, 476), False, 'import cv2\n'), ((873, 904), 'cudasift.i_align_up', 'cudasift.i_align_up', (['width', '(128)'], {}), '(width, 128)\n', (892, 904), False, 'import cudasift\n'), ((1862, 1878), 'numpy.asarray', 'np.asarray', (['desc'], {}), '(desc)\n', (1872, 1878), True, 'import numpy as np\n'), ((1895, 1909), 'numpy.asarray', 'np.asarray', (['kp'], {}), '(kp)\n', (1905, 1909), True, 'import numpy as np\n')]
#!/usr/bin/env python """ Perform the Mann-Whitney U test, the Kolmogorov-Smirnov test, and the Student's t-test for the following ensembles: - GPU double precision (reference & control) - CPU double precision - GPU single precision - GPU double precision with additional explicit diffusion Make sure to compile the cpp files for the Mann-Whitney U test and the Kolmogorov-Smirnov test first before running this script (see mannwhitneyu.cpp and kolmogorov_smirnov.cpp). Copyright (c) 2021 ETH Zurich, <NAME> MIT License """ import numpy as np import xarray as xr import pickle import mannwhitneyu as mwu import kolmogorov_smirnov as ks rpert = 'e4' # prefix n_runs = 50 # total number of runs n_sel = 100 # how many times we randomly select runs alpha = 0.05 # significance level nm = 20 # members per ensemble u_crit = 127 # nm = 20 t_crit = 2.024 # nm = 20 replace = False # to bootstrap or not to bootstrap nbins = 100 # Kolmogorov-Smirnov # Some arrays to make life easier tests = ['mwu', 'ks', 't'] comparisons = ['c', 'cpu', 'sp', 'diff'] # Variable variables = ['t_850hPa', 'fi_500hPa', 'u_10m', 't_2m', 'precip', 'asob_t', 'athb_t', 'ps'] path_gpu = '../data/10d_gpu_cpu_sp_diff/gpu_dycore/' path_cpu = '../data/10d_gpu_cpu_sp_diff/cpu_nodycore/' path_gpu_sp = '../data/10d_gpu_cpu_sp_diff/gpu_dycore_sp/' path_gpu_diff = '../data/10d_gpu_cpu_sp_diff/gpu_dycore_diff/' # Final rejection rates rej_rates = {} for comp in comparisons: rej_rates[comp] = {} for vname in variables: rej_rates[comp][vname] = {} runs_r = {} runs_c = {} runs_cpu = {} runs_sp = {} runs_diff = {} # Load data for gpu (reference and control) and cpu for i in range(n_runs): i_str_r = str(i).zfill(4) i_str_c = str(i+n_runs).zfill(4) fname_r = path_gpu + rpert + '_' + i_str_r + '.nc' fname_c = path_gpu + rpert + '_' + i_str_c + '.nc' fname_cpu = path_cpu + rpert + '_' + i_str_r + '.nc' fname_sp = path_gpu_sp + rpert + '_' + i_str_r + '.nc' fname_diff = path_gpu_diff + rpert + '_' + i_str_r + '.nc' runs_r[i] = {} runs_c[i] = {} runs_cpu[i] = {} runs_sp[i] = {} runs_diff[i] = {} runs_r[i]['dset'] = xr.open_dataset(fname_r) runs_c[i]['dset'] = xr.open_dataset(fname_c) runs_cpu[i]['dset'] = xr.open_dataset(fname_cpu) runs_sp[i]['dset'] = xr.open_dataset(fname_sp) runs_diff[i]['dset'] = xr.open_dataset(fname_diff) # Test for each variable for vname in variables: print("----------------------------") print("Working on " + vname + " ...") print("----------------------------") # initialize arrays nt, ny, nx = runs_r[0]['dset'][vname].shape values_r = np.zeros((nt, ny, nx, nm)) values_c = np.zeros((nt, ny, nx, nm)) values_cpu = np.zeros((nt, ny, nx, nm)) values_sp = np.zeros((nt, ny, nx, nm)) values_diff = np.zeros((nt, ny, nx, nm)) # For the results results = {} for test in tests: results[test] = {} for comp in comparisons: results[test][comp] = np.zeros((n_sel, nt)) # Do test multiple times with random selection of ensemble members for s in range(n_sel): if ((s+1) % 10 == 0): print(str(s+1) + " / " + str(n_sel)) # Pick random samples for comparison idxs_r = np.random.choice(np.arange(n_runs), nm, replace=replace) idxs_c = np.random.choice(np.arange(n_runs), nm, replace=replace) idxs_cpu = np.random.choice(np.arange(n_runs), nm, replace=replace) idxs_sp = np.random.choice(np.arange(n_runs), nm, replace=replace) idxs_diff = np.random.choice(np.arange(n_runs), nm, replace=replace) # ============================================================ # Mann-Whitney U test # ============================================================ test = 'mwu' # Put together arrays for i in range(nm): values_r[:,:,:,i] = runs_r[idxs_r[i]]['dset'][vname].values values_c[:,:,:,i] = runs_c[idxs_c[i]]['dset'][vname].values values_cpu[:,:,:,i] = runs_cpu[idxs_cpu[i]]['dset'][vname].values values_sp[:,:,:,i] = runs_sp[idxs_sp[i]]['dset'][vname].values values_diff[:,:,:,i] = runs_diff[idxs_diff[i]]['dset'][vname].values # Call test reject_c = mwu.mwu(values_r, values_c, u_crit) reject_cpu = mwu.mwu(values_r, values_cpu, u_crit) reject_sp = mwu.mwu(values_r, values_sp, u_crit) reject_diff = mwu.mwu(values_r, values_diff, u_crit) results[test]['c'][s] = np.mean(reject_c, axis=(1,2)) results[test]['cpu'][s] = np.mean(reject_cpu, axis=(1,2)) results[test]['sp'][s] = np.mean(reject_sp, axis=(1,2)) results[test]['diff'][s] = np.mean(reject_diff, axis=(1,2)) # ============================================================ # Kolmogorov-Smirnov test # ============================================================ test = 'ks' # Call test reject_c = ks.ks(values_r, values_c, nbins) reject_cpu = ks.ks(values_r, values_cpu, nbins) reject_sp = ks.ks(values_r, values_sp, nbins) reject_diff = ks.ks(values_r, values_diff, nbins) results[test]['c'][s] = np.mean(reject_c, axis=(1,2)) results[test]['cpu'][s] = np.mean(reject_cpu, axis=(1,2)) results[test]['sp'][s] = np.mean(reject_sp, axis=(1,2)) results[test]['diff'][s] = np.mean(reject_diff, axis=(1,2)) # ============================================================ # Student's t-test # ============================================================ test = 't' # Means mean_r = np.mean(values_r, axis=-1) mean_c = np.mean(values_c, axis=-1) mean_cpu = np.mean(values_cpu, axis=-1) mean_sp = np.mean(values_sp, axis=-1) mean_diff = np.mean(values_diff, axis=-1) # Variance var_r = np.zeros((nt, ny, nx)) var_c = np.zeros((nt, ny, nx)) var_cpu = np.zeros((nt, ny, nx)) var_sp = np.zeros((nt, ny, nx)) var_diff = np.zeros((nt, ny, nx)) for i in range(nm): var_r += (values_r[:,:,:,i] - mean_r)**2 var_c += (values_c[:,:,:,i] - mean_c)**2 var_cpu += (values_cpu[:,:,:,i] - mean_cpu)**2 var_sp += (values_sp[:,:,:,i] - mean_sp)**2 var_diff += (values_diff[:,:,:,i] - mean_diff)**2 # Unbiased estimator for standard deviation var_r /= (nm-1) var_c /= (nm-1) var_cpu /= (nm-1) var_sp /= (nm-1) var_diff /= (nm-1) stdev_c = np.sqrt(((nm-1) * var_r + (nm-1) * var_c) / (2*nm - 2)) stdev_cpu = np.sqrt(((nm-1) * var_r + (nm-1) * var_cpu) / (2*nm - 2)) stdev_sp = np.sqrt(((nm-1) * var_r + (nm-1) * var_sp) / (2*nm - 2)) stdev_diff = np.sqrt(((nm-1) * var_r + (nm-1) * var_diff) / (2*nm - 2)) # t-value t_c = np.abs((mean_r - mean_c) / (stdev_c * np.sqrt(2/nm))) t_cpu = np.abs((mean_r - mean_cpu) / (stdev_cpu * np.sqrt(2/nm))) t_sp = np.abs((mean_r - mean_sp) / (stdev_sp * np.sqrt(2/nm))) t_diff = np.abs((mean_r - mean_diff) / (stdev_diff * np.sqrt(2/nm))) # Rejection arrays reject_c = t_c > t_crit reject_cpu = t_cpu > t_crit reject_sp = t_sp > t_crit reject_diff = t_diff > t_crit results[test]['c'][s] = np.mean(reject_c, axis=(1,2)) results[test]['cpu'][s] = np.mean(reject_cpu, axis=(1,2)) results[test]['sp'][s] = np.mean(reject_sp, axis=(1,2)) results[test]['diff'][s] = np.mean(reject_diff, axis=(1,2)) # Store results for comp in comparisons: for test in tests: res = results[test][comp] rej_rates[comp][vname][test] = {} rr = rej_rates[comp][vname][test] rr['q_05'] = np.quantile(res, 0.5, axis=0) rr['q_005'] = np.quantile(res, 0.05, axis=0) rr['q_095'] = np.quantile(res, 0.95, axis=0) rr['mean'] = np.mean(res, axis=0) rr['min'] = np.min(res, axis=0) rr['max'] = np.max(res, axis=0) rr['reject'] = res # Save rejection rates with open('rr_mwu_ks_studt.pickle', 'wb') as handle: pickle.dump(rej_rates, handle)
[ "numpy.mean", "pickle.dump", "numpy.sqrt", "kolmogorov_smirnov.ks", "numpy.max", "mannwhitneyu.mwu", "numpy.zeros", "numpy.quantile", "numpy.min", "xarray.open_dataset", "numpy.arange" ]
[((2243, 2267), 'xarray.open_dataset', 'xr.open_dataset', (['fname_r'], {}), '(fname_r)\n', (2258, 2267), True, 'import xarray as xr\n'), ((2292, 2316), 'xarray.open_dataset', 'xr.open_dataset', (['fname_c'], {}), '(fname_c)\n', (2307, 2316), True, 'import xarray as xr\n'), ((2343, 2369), 'xarray.open_dataset', 'xr.open_dataset', (['fname_cpu'], {}), '(fname_cpu)\n', (2358, 2369), True, 'import xarray as xr\n'), ((2395, 2420), 'xarray.open_dataset', 'xr.open_dataset', (['fname_sp'], {}), '(fname_sp)\n', (2410, 2420), True, 'import xarray as xr\n'), ((2448, 2475), 'xarray.open_dataset', 'xr.open_dataset', (['fname_diff'], {}), '(fname_diff)\n', (2463, 2475), True, 'import xarray as xr\n'), ((2740, 2766), 'numpy.zeros', 'np.zeros', (['(nt, ny, nx, nm)'], {}), '((nt, ny, nx, nm))\n', (2748, 2766), True, 'import numpy as np\n'), ((2782, 2808), 'numpy.zeros', 'np.zeros', (['(nt, ny, nx, nm)'], {}), '((nt, ny, nx, nm))\n', (2790, 2808), True, 'import numpy as np\n'), ((2826, 2852), 'numpy.zeros', 'np.zeros', (['(nt, ny, nx, nm)'], {}), '((nt, ny, nx, nm))\n', (2834, 2852), True, 'import numpy as np\n'), ((2869, 2895), 'numpy.zeros', 'np.zeros', (['(nt, ny, nx, nm)'], {}), '((nt, ny, nx, nm))\n', (2877, 2895), True, 'import numpy as np\n'), ((2914, 2940), 'numpy.zeros', 'np.zeros', (['(nt, ny, nx, nm)'], {}), '((nt, ny, nx, nm))\n', (2922, 2940), True, 'import numpy as np\n'), ((8375, 8405), 'pickle.dump', 'pickle.dump', (['rej_rates', 'handle'], {}), '(rej_rates, handle)\n', (8386, 8405), False, 'import pickle\n'), ((4390, 4425), 'mannwhitneyu.mwu', 'mwu.mwu', (['values_r', 'values_c', 'u_crit'], {}), '(values_r, values_c, u_crit)\n', (4397, 4425), True, 'import mannwhitneyu as mwu\n'), ((4447, 4484), 'mannwhitneyu.mwu', 'mwu.mwu', (['values_r', 'values_cpu', 'u_crit'], {}), '(values_r, values_cpu, u_crit)\n', (4454, 4484), True, 'import mannwhitneyu as mwu\n'), ((4505, 4541), 'mannwhitneyu.mwu', 'mwu.mwu', (['values_r', 'values_sp', 'u_crit'], {}), '(values_r, values_sp, u_crit)\n', (4512, 4541), True, 'import mannwhitneyu as mwu\n'), ((4564, 4602), 'mannwhitneyu.mwu', 'mwu.mwu', (['values_r', 'values_diff', 'u_crit'], {}), '(values_r, values_diff, u_crit)\n', (4571, 4602), True, 'import mannwhitneyu as mwu\n'), ((4635, 4665), 'numpy.mean', 'np.mean', (['reject_c'], {'axis': '(1, 2)'}), '(reject_c, axis=(1, 2))\n', (4642, 4665), True, 'import numpy as np\n'), ((4699, 4731), 'numpy.mean', 'np.mean', (['reject_cpu'], {'axis': '(1, 2)'}), '(reject_cpu, axis=(1, 2))\n', (4706, 4731), True, 'import numpy as np\n'), ((4764, 4795), 'numpy.mean', 'np.mean', (['reject_sp'], {'axis': '(1, 2)'}), '(reject_sp, axis=(1, 2))\n', (4771, 4795), True, 'import numpy as np\n'), ((4830, 4863), 'numpy.mean', 'np.mean', (['reject_diff'], {'axis': '(1, 2)'}), '(reject_diff, axis=(1, 2))\n', (4837, 4863), True, 'import numpy as np\n'), ((5099, 5131), 'kolmogorov_smirnov.ks', 'ks.ks', (['values_r', 'values_c', 'nbins'], {}), '(values_r, values_c, nbins)\n', (5104, 5131), True, 'import kolmogorov_smirnov as ks\n'), ((5153, 5187), 'kolmogorov_smirnov.ks', 'ks.ks', (['values_r', 'values_cpu', 'nbins'], {}), '(values_r, values_cpu, nbins)\n', (5158, 5187), True, 'import kolmogorov_smirnov as ks\n'), ((5208, 5241), 'kolmogorov_smirnov.ks', 'ks.ks', (['values_r', 'values_sp', 'nbins'], {}), '(values_r, values_sp, nbins)\n', (5213, 5241), True, 'import kolmogorov_smirnov as ks\n'), ((5264, 5299), 'kolmogorov_smirnov.ks', 'ks.ks', (['values_r', 'values_diff', 'nbins'], {}), '(values_r, values_diff, nbins)\n', (5269, 5299), True, 'import kolmogorov_smirnov as ks\n'), ((5332, 5362), 'numpy.mean', 'np.mean', (['reject_c'], {'axis': '(1, 2)'}), '(reject_c, axis=(1, 2))\n', (5339, 5362), True, 'import numpy as np\n'), ((5396, 5428), 'numpy.mean', 'np.mean', (['reject_cpu'], {'axis': '(1, 2)'}), '(reject_cpu, axis=(1, 2))\n', (5403, 5428), True, 'import numpy as np\n'), ((5461, 5492), 'numpy.mean', 'np.mean', (['reject_sp'], {'axis': '(1, 2)'}), '(reject_sp, axis=(1, 2))\n', (5468, 5492), True, 'import numpy as np\n'), ((5527, 5560), 'numpy.mean', 'np.mean', (['reject_diff'], {'axis': '(1, 2)'}), '(reject_diff, axis=(1, 2))\n', (5534, 5560), True, 'import numpy as np\n'), ((5782, 5808), 'numpy.mean', 'np.mean', (['values_r'], {'axis': '(-1)'}), '(values_r, axis=-1)\n', (5789, 5808), True, 'import numpy as np\n'), ((5826, 5852), 'numpy.mean', 'np.mean', (['values_c'], {'axis': '(-1)'}), '(values_c, axis=-1)\n', (5833, 5852), True, 'import numpy as np\n'), ((5872, 5900), 'numpy.mean', 'np.mean', (['values_cpu'], {'axis': '(-1)'}), '(values_cpu, axis=-1)\n', (5879, 5900), True, 'import numpy as np\n'), ((5919, 5946), 'numpy.mean', 'np.mean', (['values_sp'], {'axis': '(-1)'}), '(values_sp, axis=-1)\n', (5926, 5946), True, 'import numpy as np\n'), ((5967, 5996), 'numpy.mean', 'np.mean', (['values_diff'], {'axis': '(-1)'}), '(values_diff, axis=-1)\n', (5974, 5996), True, 'import numpy as np\n'), ((6033, 6055), 'numpy.zeros', 'np.zeros', (['(nt, ny, nx)'], {}), '((nt, ny, nx))\n', (6041, 6055), True, 'import numpy as np\n'), ((6072, 6094), 'numpy.zeros', 'np.zeros', (['(nt, ny, nx)'], {}), '((nt, ny, nx))\n', (6080, 6094), True, 'import numpy as np\n'), ((6113, 6135), 'numpy.zeros', 'np.zeros', (['(nt, ny, nx)'], {}), '((nt, ny, nx))\n', (6121, 6135), True, 'import numpy as np\n'), ((6153, 6175), 'numpy.zeros', 'np.zeros', (['(nt, ny, nx)'], {}), '((nt, ny, nx))\n', (6161, 6175), True, 'import numpy as np\n'), ((6195, 6217), 'numpy.zeros', 'np.zeros', (['(nt, ny, nx)'], {}), '((nt, ny, nx))\n', (6203, 6217), True, 'import numpy as np\n'), ((6726, 6787), 'numpy.sqrt', 'np.sqrt', (['(((nm - 1) * var_r + (nm - 1) * var_c) / (2 * nm - 2))'], {}), '(((nm - 1) * var_r + (nm - 1) * var_c) / (2 * nm - 2))\n', (6733, 6787), True, 'import numpy as np\n'), ((6802, 6865), 'numpy.sqrt', 'np.sqrt', (['(((nm - 1) * var_r + (nm - 1) * var_cpu) / (2 * nm - 2))'], {}), '(((nm - 1) * var_r + (nm - 1) * var_cpu) / (2 * nm - 2))\n', (6809, 6865), True, 'import numpy as np\n'), ((6879, 6941), 'numpy.sqrt', 'np.sqrt', (['(((nm - 1) * var_r + (nm - 1) * var_sp) / (2 * nm - 2))'], {}), '(((nm - 1) * var_r + (nm - 1) * var_sp) / (2 * nm - 2))\n', (6886, 6941), True, 'import numpy as np\n'), ((6957, 7021), 'numpy.sqrt', 'np.sqrt', (['(((nm - 1) * var_r + (nm - 1) * var_diff) / (2 * nm - 2))'], {}), '(((nm - 1) * var_r + (nm - 1) * var_diff) / (2 * nm - 2))\n', (6964, 7021), True, 'import numpy as np\n'), ((7525, 7555), 'numpy.mean', 'np.mean', (['reject_c'], {'axis': '(1, 2)'}), '(reject_c, axis=(1, 2))\n', (7532, 7555), True, 'import numpy as np\n'), ((7589, 7621), 'numpy.mean', 'np.mean', (['reject_cpu'], {'axis': '(1, 2)'}), '(reject_cpu, axis=(1, 2))\n', (7596, 7621), True, 'import numpy as np\n'), ((7654, 7685), 'numpy.mean', 'np.mean', (['reject_sp'], {'axis': '(1, 2)'}), '(reject_sp, axis=(1, 2))\n', (7661, 7685), True, 'import numpy as np\n'), ((7720, 7753), 'numpy.mean', 'np.mean', (['reject_diff'], {'axis': '(1, 2)'}), '(reject_diff, axis=(1, 2))\n', (7727, 7753), True, 'import numpy as np\n'), ((3098, 3119), 'numpy.zeros', 'np.zeros', (['(n_sel, nt)'], {}), '((n_sel, nt))\n', (3106, 3119), True, 'import numpy as np\n'), ((3378, 3395), 'numpy.arange', 'np.arange', (['n_runs'], {}), '(n_runs)\n', (3387, 3395), True, 'import numpy as np\n'), ((3452, 3469), 'numpy.arange', 'np.arange', (['n_runs'], {}), '(n_runs)\n', (3461, 3469), True, 'import numpy as np\n'), ((3528, 3545), 'numpy.arange', 'np.arange', (['n_runs'], {}), '(n_runs)\n', (3537, 3545), True, 'import numpy as np\n'), ((3603, 3620), 'numpy.arange', 'np.arange', (['n_runs'], {}), '(n_runs)\n', (3612, 3620), True, 'import numpy as np\n'), ((3680, 3697), 'numpy.arange', 'np.arange', (['n_runs'], {}), '(n_runs)\n', (3689, 3697), True, 'import numpy as np\n'), ((7985, 8014), 'numpy.quantile', 'np.quantile', (['res', '(0.5)'], {'axis': '(0)'}), '(res, 0.5, axis=0)\n', (7996, 8014), True, 'import numpy as np\n'), ((8041, 8071), 'numpy.quantile', 'np.quantile', (['res', '(0.05)'], {'axis': '(0)'}), '(res, 0.05, axis=0)\n', (8052, 8071), True, 'import numpy as np\n'), ((8098, 8128), 'numpy.quantile', 'np.quantile', (['res', '(0.95)'], {'axis': '(0)'}), '(res, 0.95, axis=0)\n', (8109, 8128), True, 'import numpy as np\n'), ((8154, 8174), 'numpy.mean', 'np.mean', (['res'], {'axis': '(0)'}), '(res, axis=0)\n', (8161, 8174), True, 'import numpy as np\n'), ((8199, 8218), 'numpy.min', 'np.min', (['res'], {'axis': '(0)'}), '(res, axis=0)\n', (8205, 8218), True, 'import numpy as np\n'), ((8243, 8262), 'numpy.max', 'np.max', (['res'], {'axis': '(0)'}), '(res, axis=0)\n', (8249, 8262), True, 'import numpy as np\n'), ((7087, 7102), 'numpy.sqrt', 'np.sqrt', (['(2 / nm)'], {}), '(2 / nm)\n', (7094, 7102), True, 'import numpy as np\n'), ((7161, 7176), 'numpy.sqrt', 'np.sqrt', (['(2 / nm)'], {}), '(2 / nm)\n', (7168, 7176), True, 'import numpy as np\n'), ((7232, 7247), 'numpy.sqrt', 'np.sqrt', (['(2 / nm)'], {}), '(2 / nm)\n', (7239, 7247), True, 'import numpy as np\n'), ((7309, 7324), 'numpy.sqrt', 'np.sqrt', (['(2 / nm)'], {}), '(2 / nm)\n', (7316, 7324), True, 'import numpy as np\n')]
from matplotlib.gridspec import GridSpec import matplotlib.pyplot as plt import numpy as np import matplotlib import math from ._default_matplotlib_figure_dimensions import _default_matplotlib_figure_dimensions def _calculate_nrows(nplots, ncols): return math.ceil(nplots / ncols) def _initialize_plot_with_dimensions(ncols, nrows, figsize_width, figsize_height): """ Parameters: ----------- ncols Number of columns in the figure. type: int nrows Number of rows in the figure. type: int figsize_width Scaler adjustment of figure width default: 1 type: float figsize_height Scaler adjustment of figure height default: 1 type: float Returns: -------- fig type: matplotlib.figure.Figure Notes: ------ """ fig_dimensions = _default_matplotlib_figure_dimensions()*np.array([ncols * figsize_width, nrows * figsize_height]) fig = plt.figure(figsize=fig_dimensions) return fig def _construct_plot_layout( nplots, ncols=4, figsize_width=1, figsize_height=1, grid_hspace=0.2, grid_wspace=0, width_ratios=False, height_ratios=False, ): """ Creates Axes for each desired plot. Parameters: ----------- nplots ncols Number of columns. default: 4 type: int Returns: -------- Notes: ------ """ if np.any(width_ratios) == False: if nplots <= ncols: width_ratios = np.ones(ncols) nrows = _calculate_nrows(nplots, ncols) if not height_ratios: height_ratios = np.ones(nrows) fig = _initialize_plot_with_dimensions(ncols, nrows, figsize_width, figsize_height) gridspec = GridSpec(nrows, ncols, width_ratios=width_ratios, height_ratios=height_ratios, hspace=grid_hspace, wspace=grid_wspace) plot_count = 0 AxesDict = {} for ax_i in range(nrows): AxesDict[ax_i] = {} for ax_j in range(ncols): plot_count += 1 AxesDict[ax_i][ax_j] = fig.add_subplot(gridspec[ax_i, ax_j]) if plot_count >= nplots: break return fig, AxesDict
[ "math.ceil", "numpy.ones", "numpy.any", "numpy.array", "matplotlib.pyplot.figure", "matplotlib.gridspec.GridSpec" ]
[((261, 286), 'math.ceil', 'math.ceil', (['(nplots / ncols)'], {}), '(nplots / ncols)\n', (270, 286), False, 'import math\n'), ((980, 1014), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'fig_dimensions'}), '(figsize=fig_dimensions)\n', (990, 1014), True, 'import matplotlib.pyplot as plt\n'), ((1783, 1906), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['nrows', 'ncols'], {'width_ratios': 'width_ratios', 'height_ratios': 'height_ratios', 'hspace': 'grid_hspace', 'wspace': 'grid_wspace'}), '(nrows, ncols, width_ratios=width_ratios, height_ratios=\n height_ratios, hspace=grid_hspace, wspace=grid_wspace)\n', (1791, 1906), False, 'from matplotlib.gridspec import GridSpec\n'), ((912, 969), 'numpy.array', 'np.array', (['[ncols * figsize_width, nrows * figsize_height]'], {}), '([ncols * figsize_width, nrows * figsize_height])\n', (920, 969), True, 'import numpy as np\n'), ((1451, 1471), 'numpy.any', 'np.any', (['width_ratios'], {}), '(width_ratios)\n', (1457, 1471), True, 'import numpy as np\n'), ((1652, 1666), 'numpy.ones', 'np.ones', (['nrows'], {}), '(nrows)\n', (1659, 1666), True, 'import numpy as np\n'), ((1537, 1551), 'numpy.ones', 'np.ones', (['ncols'], {}), '(ncols)\n', (1544, 1551), True, 'import numpy as np\n')]
#!/usr/bin/env python3 """ corrections.py: Script to apply corrections to the images. """ import os from argparse import ArgumentParser from datetime import date, datetime from typing import Optional, Sequence import numpy as np from astropy.io import fits from dresscode.utils import load_config def main(argv: Optional[Sequence[str]] = None) -> int: parser = ArgumentParser() parser.add_argument( "-c", "--config", help="path to config.txt", default="config.txt" ) args = parser.parse_args(argv) config = load_config(args.config) galaxy = config["galaxy"] path = config["path"] + galaxy + "/working_dir/" years = config["years"] # Loop over the different years. for year in years: print("Year: " + year) yearpath = path + year + "/" # PART 1: Apply a coincidence loss correction. print("Applying coincidence loss corrections...") if os.path.isfile(yearpath + "sum_um2_nm.img"): coicorr(yearpath + "sum_um2_nm.img") if os.path.isfile(yearpath + "sum_uw2_nm.img"): coicorr(yearpath + "sum_uw2_nm.img") if os.path.isfile(yearpath + "sum_uw1_nm.img"): coicorr(yearpath + "sum_uw1_nm.img") # PART 2: Apply a large scale sensitivity correction. print("Applying large scale sensitivity corrections...") if os.path.isfile(yearpath + "sum_um2_nm_coi.img"): lsscorr(yearpath + "sum_um2_nm_coi.img") if os.path.isfile(yearpath + "sum_uw2_nm_coi.img"): lsscorr(yearpath + "sum_uw2_nm_coi.img") if os.path.isfile(yearpath + "sum_uw1_nm_coi.img"): lsscorr(yearpath + "sum_uw1_nm_coi.img") # PART 3: Apply a zero point correction. print("Applying zero point corrections...") if os.path.isfile(yearpath + "sum_um2_nm_coilss.img"): zeropoint(yearpath + "sum_um2_nm_coilss.img", -2.330e-3, -1.361e-3) if os.path.isfile(yearpath + "sum_uw2_nm_coilss.img"): zeropoint(yearpath + "sum_uw2_nm_coilss.img", 1.108e-3, -1.960e-3) if os.path.isfile(yearpath + "sum_uw1_nm_coilss.img"): zeropoint(yearpath + "sum_uw1_nm_coilss.img", 2.041e-3, -1.748e-3) return 0 # Functions for PART 1: Coincidence loss correction. def coicorr(filename): # Open the image. Create arrays with zeros with the shape of the image. hdulist = fits.open(filename) data = hdulist[0].data header = hdulist[0].header total_flux = np.full_like(data, np.nan, dtype=np.float64) std = np.full_like(data, np.nan, dtype=np.float64) # Loop over all pixels and for each pixel: sum the flux densities (count rates) of # the 9x9 surrounding pixels: Craw (counts/s). Calculate the standard deviation in # the 9x9 pixels box. for x in range(5, data.shape[1] - 5): for y in range(5, data.shape[0] - 5): total_flux[y, x] = np.sum(data[y - 4 : y + 5, x - 4 : x + 5]) std[y, x] = np.std(data[y - 4 : y + 5, x - 4 : x + 5]) # Obtain the dead time correction factor and the frame time (in s) from the header # of the image. alpha = header["DEADC"] ft = header["FRAMTIME"] # Calculate the total number of counts in the 9x9 pixels box: x = Craw*ft (counts). # Calculate the minimum and maximum possible number of counts in the 9x9 pixels box. total_counts = ft * total_flux total_counts_min = ft * (total_flux - 81 * std) total_counts_max = ft * (total_flux + 81 * std) # Calculate the polynomial correction factor and the minimum and maximum possible # polynomial correction factor. f = polynomial(total_counts) f_min = polynomial(total_counts_min) f_max = polynomial(total_counts_max) # If alpha*total_counts_max is larger than 1, replace this value by 0.99. Otherwise, # the maximum possible theoretical coincidence-loss-corrected count rate will be NaN # in these pixels. if np.sum(alpha * total_counts_max >= 1.0) != 0: print( "Warning: The following pixels have very high fluxes. The uncertainty on " "the correction factor for these pixels is not to be trusted!", np.where(alpha * total_counts_max >= 1.0), ) total_counts_max[alpha * total_counts_max >= 1.0] = 0.99 / alpha # Calculate the theoretical coincidence-loss-corrected count rate: # Ctheory = -ln(1 - alpha*Craw*ft) / (alpha*ft) (counts/s). # Calculate the minimum and maximum possible theoretical coincidence-loss-corrected # count rate. Ctheory = -np.log1p(-alpha * total_counts) / (alpha * ft) Ctheory_min = -np.log1p(-alpha * total_counts_min) / (alpha * ft) Ctheory_max = -np.log1p(-alpha * total_counts_max) / (alpha * ft) # Calculate the coincidence loss correction factor: # Ccorrfactor = Ctheory*f(x)/Craw. # Calculate the minimum and maximum possible coincidence loss correction factor. corrfactor = (Ctheory * f) / total_flux corrfactor_min = (Ctheory_min * f_min) / (total_flux - 81 * std) corrfactor_max = (Ctheory_max * f_max) / (total_flux + 81 * std) # Apply the coincidence loss correction to the data. Apply the minimum and maximum # coincidence loss correction to the data. new_data = corrfactor * data new_data_min = corrfactor_min * data new_data_max = corrfactor_max * data # Calculate the uncertainty and the relative uncertainty on the coincidence loss # correction. Put the relative uncertainty to 0 if the uncertainty is 0 (because in # those pixels the flux is also 0 and the relative uncertainty would be NaN). coicorr_unc = np.maximum( np.abs(new_data - new_data_min), np.abs(new_data_max - new_data) ) coicorr_rel = coicorr_unc / new_data coicorr_rel[coicorr_unc == 0.0] = 0.0 print( "The median coincidence loss correction factor for image " + os.path.basename(filename) + " is " + str(np.nanmedian(corrfactor)) + " and the median relative uncertainty on the corrected data is " + str(np.nanmedian(coicorr_rel)) + "." ) # Adapt the header. Write the corrected data, the applied coincidence loss # correction and the relative uncertainty to a new image. header["PLANE0"] = "primary (counts/s)" header["PLANE1"] = "coincidence loss correction factor" header["PLANE2"] = "relative coincidence loss correction uncertainty (fraction)" datacube = [new_data, corrfactor, coicorr_rel] new_hdu = fits.PrimaryHDU(datacube, header) new_hdu.writeto(filename.replace(".img", "_coi.img"), overwrite=True) print(os.path.basename(filename) + " has been corrected for coincidence loss.") # Function to calculate the empirical polynomial correction to account for the # differences between the observed and theoretical coincidence loss correction: # f(x) = 1 + a1x + a2x**2 + a3x**3 + a4x**4. def polynomial(x): a1 = 0.0658568 a2 = -0.0907142 a3 = 0.0285951 a4 = 0.0308063 return 1 + (a1 * x) + (a2 * x ** 2) + (a3 * x ** 3) + (a4 * x ** 4) # Function for PART 2: Large scale sensitivity correction. def lsscorr(filename): # Open the image and the large scale sensitivity map. hdulist = fits.open(filename) data = hdulist[0].data[0] coicorr = hdulist[0].data[1] coicorr_rel = hdulist[0].data[2] header = hdulist[0].header lss_hdulist = fits.open(filename.replace("nm_coi", "lss")) lss_data = lss_hdulist[1].data # Apply the large scale sensitivity correction to the data. new_data = data / lss_data new_datacube = [new_data, coicorr, coicorr_rel] # Write the corrected data to a new image. new_hdu = fits.PrimaryHDU(new_datacube, header) new_hdu.writeto(filename.replace(".img", "lss.img"), overwrite=True) print( os.path.basename(filename) + " has been corrected for large scale sensitivity variations." ) # Function for PART 3: Zero point correction. def zeropoint(filename, param1, param2): # Open the file. hdulist = fits.open(filename) data = hdulist[0].data[0] coicorr = hdulist[0].data[1] coicorr_rel = hdulist[0].data[2] header = hdulist[0].header # Calculate the average date of observation. start_date = datetime.strptime(header["DATE-OBS"].split("T")[0], "%Y-%m-%d").date() end_date = datetime.strptime(header["DATE-END"].split("T")[0], "%Y-%m-%d").date() obs_date = (end_date - start_date) / 2 + start_date # Calculate the number of years that have elapsed since the 1st of January 2005. first_date = date(2005, 1, 1) elapsed_time = obs_date - first_date years_passed = elapsed_time.days / 365.25 # Calculate the zero point correction. zerocorr = 1 + param1 * years_passed + param2 * years_passed ** 2 # Apply the correction to the data. new_data = data / zerocorr # Adapt the header. Write the corrected data to a new image. header["ZPCORR"] = zerocorr datacube = [new_data, coicorr, coicorr_rel] new_hdu = fits.PrimaryHDU(datacube, header) new_hdu.writeto(filename.replace(".img", "zp.img"), overwrite=True) print( os.path.basename(filename) + " has been corrected for sensitivity loss of the detector over time." ) if __name__ == "__main__": exit(main())
[ "numpy.abs", "numpy.full_like", "astropy.io.fits.PrimaryHDU", "argparse.ArgumentParser", "dresscode.utils.load_config", "numpy.where", "numpy.std", "numpy.nanmedian", "os.path.isfile", "numpy.sum", "datetime.date", "os.path.basename", "astropy.io.fits.open", "numpy.log1p" ]
[((371, 387), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (385, 387), False, 'from argparse import ArgumentParser\n'), ((542, 566), 'dresscode.utils.load_config', 'load_config', (['args.config'], {}), '(args.config)\n', (553, 566), False, 'from dresscode.utils import load_config\n'), ((2419, 2438), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (2428, 2438), False, 'from astropy.io import fits\n'), ((2514, 2558), 'numpy.full_like', 'np.full_like', (['data', 'np.nan'], {'dtype': 'np.float64'}), '(data, np.nan, dtype=np.float64)\n', (2526, 2558), True, 'import numpy as np\n'), ((2569, 2613), 'numpy.full_like', 'np.full_like', (['data', 'np.nan'], {'dtype': 'np.float64'}), '(data, np.nan, dtype=np.float64)\n', (2581, 2613), True, 'import numpy as np\n'), ((6540, 6573), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['datacube', 'header'], {}), '(datacube, header)\n', (6555, 6573), False, 'from astropy.io import fits\n'), ((7263, 7282), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (7272, 7282), False, 'from astropy.io import fits\n'), ((7723, 7760), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['new_datacube', 'header'], {}), '(new_datacube, header)\n', (7738, 7760), False, 'from astropy.io import fits\n'), ((8083, 8102), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (8092, 8102), False, 'from astropy.io import fits\n'), ((8615, 8631), 'datetime.date', 'date', (['(2005)', '(1)', '(1)'], {}), '(2005, 1, 1)\n', (8619, 8631), False, 'from datetime import date, datetime\n'), ((9065, 9098), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['datacube', 'header'], {}), '(datacube, header)\n', (9080, 9098), False, 'from astropy.io import fits\n'), ((935, 978), 'os.path.isfile', 'os.path.isfile', (["(yearpath + 'sum_um2_nm.img')"], {}), "(yearpath + 'sum_um2_nm.img')\n", (949, 978), False, 'import os\n'), ((1040, 1083), 'os.path.isfile', 'os.path.isfile', (["(yearpath + 'sum_uw2_nm.img')"], {}), "(yearpath + 'sum_uw2_nm.img')\n", (1054, 1083), False, 'import os\n'), ((1145, 1188), 'os.path.isfile', 'os.path.isfile', (["(yearpath + 'sum_uw1_nm.img')"], {}), "(yearpath + 'sum_uw1_nm.img')\n", (1159, 1188), False, 'import os\n'), ((1379, 1426), 'os.path.isfile', 'os.path.isfile', (["(yearpath + 'sum_um2_nm_coi.img')"], {}), "(yearpath + 'sum_um2_nm_coi.img')\n", (1393, 1426), False, 'import os\n'), ((1492, 1539), 'os.path.isfile', 'os.path.isfile', (["(yearpath + 'sum_uw2_nm_coi.img')"], {}), "(yearpath + 'sum_uw2_nm_coi.img')\n", (1506, 1539), False, 'import os\n'), ((1605, 1652), 'os.path.isfile', 'os.path.isfile', (["(yearpath + 'sum_uw1_nm_coi.img')"], {}), "(yearpath + 'sum_uw1_nm_coi.img')\n", (1619, 1652), False, 'import os\n'), ((1821, 1871), 'os.path.isfile', 'os.path.isfile', (["(yearpath + 'sum_um2_nm_coilss.img')"], {}), "(yearpath + 'sum_um2_nm_coilss.img')\n", (1835, 1871), False, 'import os\n'), ((1964, 2014), 'os.path.isfile', 'os.path.isfile', (["(yearpath + 'sum_uw2_nm_coilss.img')"], {}), "(yearpath + 'sum_uw2_nm_coilss.img')\n", (1978, 2014), False, 'import os\n'), ((2106, 2156), 'os.path.isfile', 'os.path.isfile', (["(yearpath + 'sum_uw1_nm_coilss.img')"], {}), "(yearpath + 'sum_uw1_nm_coilss.img')\n", (2120, 2156), False, 'import os\n'), ((3972, 4011), 'numpy.sum', 'np.sum', (['(alpha * total_counts_max >= 1.0)'], {}), '(alpha * total_counts_max >= 1.0)\n', (3978, 4011), True, 'import numpy as np\n'), ((5681, 5712), 'numpy.abs', 'np.abs', (['(new_data - new_data_min)'], {}), '(new_data - new_data_min)\n', (5687, 5712), True, 'import numpy as np\n'), ((5714, 5745), 'numpy.abs', 'np.abs', (['(new_data_max - new_data)'], {}), '(new_data_max - new_data)\n', (5720, 5745), True, 'import numpy as np\n'), ((2934, 2972), 'numpy.sum', 'np.sum', (['data[y - 4:y + 5, x - 4:x + 5]'], {}), '(data[y - 4:y + 5, x - 4:x + 5])\n', (2940, 2972), True, 'import numpy as np\n'), ((3001, 3039), 'numpy.std', 'np.std', (['data[y - 4:y + 5, x - 4:x + 5]'], {}), '(data[y - 4:y + 5, x - 4:x + 5])\n', (3007, 3039), True, 'import numpy as np\n'), ((4208, 4249), 'numpy.where', 'np.where', (['(alpha * total_counts_max >= 1.0)'], {}), '(alpha * total_counts_max >= 1.0)\n', (4216, 4249), True, 'import numpy as np\n'), ((4587, 4618), 'numpy.log1p', 'np.log1p', (['(-alpha * total_counts)'], {}), '(-alpha * total_counts)\n', (4595, 4618), True, 'import numpy as np\n'), ((4653, 4688), 'numpy.log1p', 'np.log1p', (['(-alpha * total_counts_min)'], {}), '(-alpha * total_counts_min)\n', (4661, 4688), True, 'import numpy as np\n'), ((4723, 4758), 'numpy.log1p', 'np.log1p', (['(-alpha * total_counts_max)'], {}), '(-alpha * total_counts_max)\n', (4731, 4758), True, 'import numpy as np\n'), ((6659, 6685), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (6675, 6685), False, 'import os\n'), ((7854, 7880), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (7870, 7880), False, 'import os\n'), ((9191, 9217), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (9207, 9217), False, 'import os\n'), ((6097, 6122), 'numpy.nanmedian', 'np.nanmedian', (['coicorr_rel'], {}), '(coicorr_rel)\n', (6109, 6122), True, 'import numpy as np\n'), ((5982, 6006), 'numpy.nanmedian', 'np.nanmedian', (['corrfactor'], {}), '(corrfactor)\n', (5994, 6006), True, 'import numpy as np\n'), ((5924, 5950), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (5940, 5950), False, 'import os\n')]
import numpy as np import os from scipy.optimize import least_squares, minimize from scipy.special import fresnel def autophase(S): '''Optimize phase of complex data by maximizing the sum of imaginary over sum of real .. math:: \phi = \\arctan \left( \\frac{\sum_i^N \Im(s_i) }{ \sum_i^N \Re(s_i) } \\right) S_{\phi} = S e^{-i \phi} Args: S (numpy.ndarray): Complex data Returns: numpy.ndarray: Automatically phased complex data ''' phase = np.arctan(np.sum(np.imag(S))/np.sum(np.real(S))) S_phased = np.exp(-1j * phase) * S return S_phased def add_noise(S, sigma): '''Add noise to array Args: S (numpy.ndarray): Array to add noise to sigma (float): Standard deviation of noise Returns: numpy.ndarray: Array with noise added ''' S_noisy = S + sigma * np.random.randn(*np.shape(S)) return S_noisy def kernel(t, r, method = 'fresnel', angles = 5000): '''Return the Kernel Matrix. .. math:: K(r,t) = \int_{0}^{\pi/2} \cos(\\theta) \cos[(3 \cos(\\theta)^2 - 1)\omega_{ee} t] d\\theta \omega_{ee} = \\frac{\gamma_e^2\hbar}{r^3} +-------------------+----------------------+ |Method |Description | +===================+======================+ |'fresnel' |Fresnel Integral | +-------------------+----------------------+ |'brute force' |Brute Force Method | +-------------------+----------------------+ Args: t (numpy.ndarray): Array of time values in seconds r (numpy.ndarray): Array of radius (distance) values in meters method (str): Method for calculating the kernel. By default, uses the fresnel integral angles (int): For brute-force kernel, number of angles to average over Returns: numpy.ndarray: Numpy array of kernel. The first dimension is the time dimension. The second dimension is the distance dimension. .. note:: The distance array (r) must have all values greater than zero to generate a proper kernel. .. warning:: The number of angles must be carefully selected to ensure the Kernel matrix properly averages the angles for short distances. Example:: t = np.r_[-0.1e-6:10e-6:1000j] r = np.r_[1.5e-9:10e-9:100j] K = kernel(t,r,angles = 2000) ''' t = t.reshape(-1,1) r = r.reshape(1,-1) K = deer_trace(t,r,angles=angles) return K def load_kernel(filename = 'default_kernel.csv', directory = 'kernels'): '''Import Kernel Matrix ''' full_path = os.path.join(directory, filename) kernel_matrix = np.loadtxt(full_path,delimiter = ',') return kernel_matrix def save_kernel(k, filename, directory = 'kernels'): '''Save Kernel Matrix Args: filename (str): Kernel filename k (numpy.ndarray): Kernel Matrix directory (str): Path to Kernel filename ''' full_path = os.path.join(directory,filename) np.savetxt(full_path,k,delimiter = ',') def background_dist(t): '''Calculate the distance above which P(r) should be zero in background fit. Args: t (numpy.ndarray): Time axes Returns: r (float): Distance value for background fit ''' oscillations = 2. omega_ee = 2.*np.pi * oscillations / np.max(t) r = ((2. * np.pi * 5.204e-20)/omega_ee)**(1./3.) return r def deer_trace(t, r, method = 'fresnel', angles=1000): '''Calculate the DEER trace corresponding to a given time axes and distance value +-------------------+----------------------+ |Method |Description | +===================+======================+ |'fresnel' |Fresnel Integral | +-------------------+----------------------+ |'brute force' |Brute Force Method | +-------------------+----------------------+ Args: t (numpy.ndarray): Time axes of DEER trace r (float, int, numpy.ndarray): Distances value or values in meters method (str): Method for calculating deer trace, by default uses fresnel integral angles (int): For brute force method, number of angles to average when generating DEER trace Returns: numpy.ndarray: DEER trace Example:: import numpy as np from matplotlib.pylab import * r = 4e-9 t = np.r[0.:10e-6:1000j] trace = deer_trace(t,r) figure() plot(t,trace) show() ''' omega_ee = 2.*np.pi*(5.204e-20)/(r**3.) if method == 'brute force': theta_array = np.r_[0.:np.pi/2.:1j*angles] trace = np.zeros_like(t) for theta in theta_array: omega = (omega_ee)*(3.*(np.cos(theta)**2.)-1.) trace = trace + np.sin(theta)*np.cos(omega*t) # Normalize by number of angles and Fresnel Integral trace = trace / (angles * (np.sqrt(np.pi/8.))) elif method == 'fresnel': x = np.sqrt(6.*omega_ee*np.abs(t))/ np.sqrt(np.pi) S, C = fresnel(x) trace = np.cos(omega_ee*t)*(C/x) + np.sin(omega_ee*np.abs(t))*(S/x) return trace def background(t, tau, A, B, d = 3.): '''DEER Background function .. math:: A + B e^{- t^{d/3}/\\tau} Args: t (numpy.ndarray): Time axes for background function tau (float): Time constant A (float): Offset B (float): Scaling factor d (float): dimensionality of background function Returns: numpy.ndarray: Background signal ''' background_signal = A + B*np.exp(-1*(np.abs(t)**(d/3.))/tau) return background_signal def background_x0(t, data): '''Guess initial parameters for background function Args: data (numpy.ndarray): Array of data t (numpy.ndarray): Array of axes Returns: list: List of parameters for fit initial guess ''' A = data[-1] B = np.max(data) - A tau = 10e-6 d = 3. x0 = [tau, A, B] return x0 def tikhonov_background(t, r, K, data, background_function = background, r_background = None, lambda_ = 1., L = 'Identity', x0 = None): '''Fit DEER data to background function by forcing P(r) to be zero Args: t (numpy.ndarray): Array of time axis values r (numpy.ndarray): Array of distance values for Kernel K (numpy.ndarray): Kernel matrix data (numpy.ndarray): Array of data values background_function (func): Background function r_background (float): Distance above which P(r) is optimized to zero lambda_ (float): Regularization parameter L (str, numpy.ndarray): Regularization operator, by default Identity for background optimization x0 (list): Initial guess for background fit parameters Returns: numpy.ndarray: Background fit of data ''' # If None, determine r_background based on time trace if r_background == None: r_background = background_dist(t) # If None, initial guess for background function if x0 is None: x0 = background_x0(t, data) def res(x, data, t, r, K, r_background): P_tik = tikhonov(K, (data / background_function(t, *x)) - 1., lambda_ = lambda_, L = L) P_tik[r < r_background] = 0. residual = P_tik return residual out = least_squares(res, x0, verbose = 2, args = (data, t, r, K, r_background), method = 'lm') x = out['x'] fit = background_function(t, *x) return fit def exp_background(t, data, background_function = background, t_min = 0., x0 = None): '''Fit DEER data to background function Args: t (numpy.ndarray): Array of time axis values data (numpy.ndarray): Array of data values background_function (func): Background function t_min (float): Start time for fit x0 (list): Initial guess for background fit parameters Returns: numpy.ndarray: Fit of data ''' if x0 == None: x0 = background_x0(t, data) def res(x, t, data): residual = data - background_function(t, *x) return residual # select range of data for fit data_fit = data[t >= t_min] t_fit = t[t >= t_min] out = least_squares(res, x0, verbose = 2, args = (t_fit, data_fit), method = 'lm') x = out['x'] fit = background_function(t,*x) return fit def operator(n, L): '''Return operator for Regularization +-------------------+----------------------+ |Operator (L) |Description | +===================+======================+ |'Identity' |Identity Matrix | +-------------------+----------------------+ |'1st Derivative' |1st Derivative Matrix | +-------------------+----------------------+ |'2nd Derivative' |2nd Derivative Matrix | +-------------------+----------------------+ Args: n (int): Number of points in Kernal distance dimension L (str, numpy.ndarray): String identifying name of operator or numpy array for operator to pass through function Returns: numpy.ndarray: Regularization operator as numpy array ''' if L == 'Identity': L = np.eye(n) elif L == '1st Derivative': L = np.diag(-1.*np.ones(n),k = 0) L += np.diag(1.*np.ones(n-1),k = 1) L = L[:-1,:] elif (L == None) or (L == '2nd Derivative'): L = np.diag(1.*np.ones(n),k = 0) L += np.diag(-2.*np.ones(n-1),k = 1) L += np.diag(1.*np.ones(n-2),k = 2) L = L[:-2,:] elif isinstance(L, str): raise ValueError('Operator string not understood') return L def tikhonov(K, S, lambda_ = 1.0, L = None): '''Perform Tikhonov Regularization .. math:: P_\lambda = {(K^TK + \lambda^2 L^TL)}^{-1} K^TS Args: K (numpy.ndarray): Kernel Matrix S (numpy.ndarray): Experimental DEER trace lambda_ (float): Regularization parameter L (None, numpy.ndarray): Tikhonov regularization operator, uses 2nd derivative if argument is None Returns: numpy.ndarray: Distance distribution from Tikhonov regularization ''' # Select Real Part S = np.real(S) # Set Operator for Tikhonov Regularization n = np.shape(K)[1] # Determine Operator for Regularization L = operator(n, L) P_lambda = np.dot(np.linalg.inv(np.dot(K.T, K)+(lambda_**2.)*np.dot(L.T, L)),np.dot(K.T, S)) return P_lambda def L_curve(K, S, lambda_array, L = None): '''Generate Tikhonov L-curve Args: K (numpy.ndarray): Kernel Matrix S (numpy.ndarray): Experimental DEER trace lambda_ (numpy.ndarray): Array of Regularization parameters L (None, numpy.ndarray): Tikhonov regularization operator, uses 2nd derivative if argument is None Returns: tuple: tuple containing: rho_array (*numpy.ndarray*): Residual Norm eta_array (*numpy.ndarray*): Solution Norm ''' rho_list = [] eta_list = [] for lambda_ in lambda_array: P_lambda = tikhonov(K, S, lambda_, L = L) rho_list.append(np.linalg.norm(S - np.dot(K, P_lambda))) eta_list.append(np.linalg.norm(P_lambda)) rho_array = np.array(rho_list) eta_array = np.array(eta_list) return rho_array, eta_array def maximum_entropy(K, S, lambda_): '''Maximum Entropy method for determining P(r) .. math:: \Phi_{ME}[P] = \|K P(r) - S\|^2 + \lambda^2 \\times \int [P(r) \ln \\frac{P(r)}{P_0(r)} + \\frac{P_0(r)}{e}] dr \\Rightarrow \min Args: K (numpy.ndarray): Kernel Matrix S (numpy.ndarray): Data array lambda_ (float): Regularization parameter Returns: numpy.ndarray: Distance distribution minimized by maximum entropy method. ''' def min_func(P, K, S, lambda_): res = np.linalg.norm(np.dot(K, P) - S)**2. + (lambda_**2.)*np.sum((P*np.log((P/x0)) + x0/np.exp(1))) return res x0 = tikhonov(K, S, lambda_) x0[x0<=0.] = 1.e-5 n = np.shape(K)[1] bounds = tuple(zip(1e-15*np.ones(n),np.inf*np.ones(n))) output = minimize(min_func, x0, args = (K, S, lambda_), method = 'SLSQP', bounds = bounds, options = {'disp':True}) P_lambda = output['x'] return P_lambda def model_free(K, S, lambda_ = 1., L = None): '''Model Free P(r) with non-negative constraints .. math:: \Phi_{MF}[P(r)] = \|K P(r) - S\|^2 + \lambda^2 \| LP(r) \|^2 \\Rightarrow \min Args: K (numpy.ndarray): Kernel Matrix S (numpy.ndarray): Data array lambda_ (float): Regularization parameter L (str, numpy.ndarray): Operator for regularization Returns: numpy.ndarray: Distance distribution from model free fit ''' def min_func(P, K, S, lambda_, L): res = np.linalg.norm(np.dot(K, P) - S)**2. + (lambda_**2.) * (np.linalg.norm(np.dot(L, P))**2.) return res n = np.shape(K)[1] # Determine Operator for Regularization L = operator(n, L) x0 = tikhonov(K, S, lambda_) x0[x0<=0.] = 1.e-5 bounds = tuple(zip(np.zeros(len(x0)), np.inf*np.ones(len(x0)))) output = minimize(min_func, x0, args = (K, S, lambda_, L), bounds = bounds, options = {'disp':True}) P_lambda = output['x'] return P_lambda def gaussian(r, sigma, mu, Normalize = False): '''Return Gaussian Distribution from given distance array, standard deviation, and mean distance If Normalize = True: .. math:: \\frac{1}{\sqrt{2 \pi {\sigma}^2}} e^{-{(r-\mu)}^2/(2\sigma^2)} If Normalize = False: .. math:: e^{-{(r-\mu)}^2/(2\sigma^2)} Args: r (numpy.ndarray): Numpy array of distance values sigma (float): Standard deviation mu (float): Mean distance Normalize (bool): If True, the integral of Gaussian is normalized to 1 Returns: numpy.ndarray: Gaussian distribution ''' if Normalize: gaussian_dist = (1./(np.sqrt(2*np.pi*(sigma**2.)))) * np.exp(-1*(r-mu)**2./(2.*(sigma**2.))) else: gaussian_dist = np.exp(-1*(r-mu)**2./(2.*(sigma**2.))) return gaussian_dist def gaussians(r, x): '''Return sum of Gaussian distributions from given distance array and list of lists defining amplitude, standard deviation, and mean distance for each Gaussian .. math:: \sum_{i = 1}^{N} A_i e^{-{(r-\mu_i)}^2/(2\sigma_i^2)} Args: r (numpy.ndarray): Numpy array of distance values x (list): list of lists. Each gaussian is definied by a list of 3 parameters. The parameters are ordered: A - amplitude, sigma - standard deviation, mu - center of distribution. Returns: numpy.ndarray: Gaussian distribution ''' gaussian_dist = np.zeros(len(r)) for gaussian_parameters in x: A = gaussian_parameters[0] sigma = gaussian_parameters[1] mu = gaussian_parameters[2] gaussian_dist += (A * gaussian(r, sigma, mu)) return gaussian_dist def model_gaussian(K, S, r, x0 = None): '''Gaussian based fit for distance distribution Args: K (numpy.ndarray): Kernel Matrix S (numpy.ndarray): Data array r (numpy.ndarray): Array of distance values x0 (None, List): Initial guess. If None, the initial guess is automatically chosen based on Tikhonov regularization P(r) Returns: tuple: tuple containing: P_gauss (*numpy.ndarray*): distance distribution x_fit (*dict*): Dictionary of fitting parameters ''' def min_func(x, K, S, r): A = x[0] sigma = x[1] mu = x[2] P_fit = A*gaussian(r, sigma, mu) S_fit = np.dot(K, P_fit) res = sum((S_fit - S)**2.) return res bounds = tuple(zip(np.zeros(3), np.inf*np.ones(3))) if x0 == None: # Find initial guess based on Tikhonov Regularization P_lambda = tikhonov(K, S, lambda_ = 1.0, L = None) A_0 = np.max(P_lambda) # Amplitude is maximum value sigma_0 = 0.2e-9 # Sigma is this value mu_0 = r[np.argmax(P_lambda)] # center is maximum def guess_min_func(x, P_lambda, r): A = x[0] sigma = x[1] mu = x[2] res = sum((A * gaussian(r,sigma,mu) - P_lambda)**2.) return res x0 = [A_0, sigma_0, mu_0] guess_output = minimize(guess_min_func, x0, args = (P_lambda, r),method = 'Nelder-Mead', bounds = bounds, options = {'disp':True}) A_0 = guess_output['x'][0] sigma_0 = guess_output['x'][1] mu_0 = guess_output['x'][2] x0 = [A_0,sigma_0,mu_0] # output = minimize(min_func, x0, args = (K, S, r), bounds = bounds, options = {'disp':True}) output = minimize(min_func, x0, args = (K, S, r), method = 'Nelder-Mead', options = {'disp':True}) A = output['x'][0] sigma = output['x'][1] mu = output['x'][2] P_gauss = A * gaussian(r, sigma, mu) x_fit = {} x_fit['A'] = A x_fit['sigma'] = sigma x_fit['mu'] = mu return P_gauss, x_fit def svd(K, S, cutoff = None): '''Performs SVD on Kernel Matrix, singular values above cutoff index are set to zero, then calculates distance distribution with cutoff applied pseudo inverse. .. math:: S = K P S = U \Sigma V^T P P = V \Sigma^{-1} U^T S Args: K (numpy.ndarray): Kernel S (numpy.ndarray): Data array cutoff (int): Number of singular values to include. None correponds to including all singular values (no cutoff applied). Returns: P (*numpy.ndarray*): Distance distribution array ''' if cutoff is not None: cutoff = int(cutoff) # Perform SVD on Kernel U, singular_values, V = np.linalg.svd(K) # Apply Cutoff to singular values singular_values[cutoff:] = 0 # Construct matrix of singular values m, n = np.shape(K) sigma = np.zeros((m, n)) sigma[:int(min(m, n)),:int(min(m, n))] = np.diag(singular_values) # Inverse Matrix from SVD with cutoff applied A = np.dot(V.T, np.dot(np.linalg.pinv(sigma), U.T)) # Calculate P(r) P = np.dot(A, S) return P def zero_time(t, S, method = 'polyfit', **kwargs): '''Shift DEER data to correct zero time offset +-------------------+----------------------------------------------+ |Method |Description | +===================+==============================================+ |'max' |Set zero time to maximum of data | +-------------------+----------------------------------------------+ |'polyfit' |polynomial fit about time zero | +-------------------+----------------------------------------------+ Parameters for 'polyfit' Method: +-------------------+-------------------------------------------------------+------------+ |Argument |Description |Default | +===================+=======================================================+============+ |'time_width' |Time width about zero for polynomial fit (in seconds) | 100e-9 | +-------------------+-------------------------------------------------------+------------+ |'deg' |degree of polynomial fit | 3 | +-------------------+-------------------------------------------------------+------------+ Args: t (numpy.ndarray): Time axes S (numpy.ndarray): Data array method (str): Method to use for zero time correction Returns: tuple: tuple containing *numpy.ndarray*: Shifted time axes *numpy.ndarray*: Data array ''' if method == 'max': ix = np.argmax(S) t = t - t[ix] elif method == 'polyfit': if 'time_width' in kwargs: time_width = kwargs.pop('time_width') else: time_width = 100e-9 if 'deg' in kwargs: deg = kwargs.pop('deg') else: deg = 3. t_ix_min = np.argmin(np.abs(t + time_width/2.)) t_ix_max = np.argmin(np.abs(t - time_width/2.)) t_fit = t[t_ix_min:t_ix_max] S_fit = S[t_ix_min:t_ix_max] p = np.polyfit(t_fit, S_fit, deg) pder = np.polyder(p) near_zero_root = np.min(np.abs(np.roots(pder))) t = t - near_zero_root return t, S def truncate(t, S, t_truncate): '''Truncate time axes and data at given time Args: t (numpy.ndarray): Time axes S (numpy.ndarray): Data Axes t_truncate (float): time to trunctate data after Returns: tuple: tuple containing *numpy.ndarray*: Truncated time axes *numpy.ndarray*: Truncated data axes ''' ix = np.argmin(np.abs(t - t_truncate)) t = t[:ix] S = S[:ix] return t, S if __name__ == '__main__': pass
[ "numpy.sqrt", "numpy.linalg.pinv", "numpy.polyfit", "numpy.log", "numpy.roots", "numpy.array", "numpy.linalg.norm", "numpy.sin", "numpy.imag", "scipy.optimize.least_squares", "numpy.max", "numpy.exp", "numpy.real", "numpy.dot", "scipy.special.fresnel", "numpy.abs", "numpy.eye", "numpy.ones", "scipy.optimize.minimize", "numpy.argmax", "numpy.polyder", "numpy.cos", "numpy.savetxt", "numpy.linalg.svd", "numpy.shape", "os.path.join", "numpy.diag", "numpy.zeros", "numpy.loadtxt", "numpy.zeros_like" ]
[((2633, 2666), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (2645, 2666), False, 'import os\n'), ((2687, 2723), 'numpy.loadtxt', 'np.loadtxt', (['full_path'], {'delimiter': '""","""'}), "(full_path, delimiter=',')\n", (2697, 2723), True, 'import numpy as np\n'), ((2995, 3028), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (3007, 3028), False, 'import os\n'), ((3033, 3072), 'numpy.savetxt', 'np.savetxt', (['full_path', 'k'], {'delimiter': '""","""'}), "(full_path, k, delimiter=',')\n", (3043, 3072), True, 'import numpy as np\n'), ((7374, 7460), 'scipy.optimize.least_squares', 'least_squares', (['res', 'x0'], {'verbose': '(2)', 'args': '(data, t, r, K, r_background)', 'method': '"""lm"""'}), "(res, x0, verbose=2, args=(data, t, r, K, r_background),\n method='lm')\n", (7387, 7460), False, 'from scipy.optimize import least_squares, minimize\n'), ((8263, 8333), 'scipy.optimize.least_squares', 'least_squares', (['res', 'x0'], {'verbose': '(2)', 'args': '(t_fit, data_fit)', 'method': '"""lm"""'}), "(res, x0, verbose=2, args=(t_fit, data_fit), method='lm')\n", (8276, 8333), False, 'from scipy.optimize import least_squares, minimize\n'), ((10226, 10236), 'numpy.real', 'np.real', (['S'], {}), '(S)\n', (10233, 10236), True, 'import numpy as np\n'), ((11283, 11301), 'numpy.array', 'np.array', (['rho_list'], {}), '(rho_list)\n', (11291, 11301), True, 'import numpy as np\n'), ((11318, 11336), 'numpy.array', 'np.array', (['eta_list'], {}), '(eta_list)\n', (11326, 11336), True, 'import numpy as np\n'), ((12176, 12279), 'scipy.optimize.minimize', 'minimize', (['min_func', 'x0'], {'args': '(K, S, lambda_)', 'method': '"""SLSQP"""', 'bounds': 'bounds', 'options': "{'disp': True}"}), "(min_func, x0, args=(K, S, lambda_), method='SLSQP', bounds=bounds,\n options={'disp': True})\n", (12184, 12279), False, 'from scipy.optimize import least_squares, minimize\n'), ((13216, 13307), 'scipy.optimize.minimize', 'minimize', (['min_func', 'x0'], {'args': '(K, S, lambda_, L)', 'bounds': 'bounds', 'options': "{'disp': True}"}), "(min_func, x0, args=(K, S, lambda_, L), bounds=bounds, options={\n 'disp': True})\n", (13224, 13307), False, 'from scipy.optimize import least_squares, minimize\n'), ((16821, 16910), 'scipy.optimize.minimize', 'minimize', (['min_func', 'x0'], {'args': '(K, S, r)', 'method': '"""Nelder-Mead"""', 'options': "{'disp': True}"}), "(min_func, x0, args=(K, S, r), method='Nelder-Mead', options={\n 'disp': True})\n", (16829, 16910), False, 'from scipy.optimize import least_squares, minimize\n'), ((17831, 17847), 'numpy.linalg.svd', 'np.linalg.svd', (['K'], {}), '(K)\n', (17844, 17847), True, 'import numpy as np\n'), ((17974, 17985), 'numpy.shape', 'np.shape', (['K'], {}), '(K)\n', (17982, 17985), True, 'import numpy as np\n'), ((17998, 18014), 'numpy.zeros', 'np.zeros', (['(m, n)'], {}), '((m, n))\n', (18006, 18014), True, 'import numpy as np\n'), ((18060, 18084), 'numpy.diag', 'np.diag', (['singular_values'], {}), '(singular_values)\n', (18067, 18084), True, 'import numpy as np\n'), ((18222, 18234), 'numpy.dot', 'np.dot', (['A', 'S'], {}), '(A, S)\n', (18228, 18234), True, 'import numpy as np\n'), ((569, 590), 'numpy.exp', 'np.exp', (['(-1.0j * phase)'], {}), '(-1.0j * phase)\n', (575, 590), True, 'import numpy as np\n'), ((3367, 3376), 'numpy.max', 'np.max', (['t'], {}), '(t)\n', (3373, 3376), True, 'import numpy as np\n'), ((4681, 4697), 'numpy.zeros_like', 'np.zeros_like', (['t'], {}), '(t)\n', (4694, 4697), True, 'import numpy as np\n'), ((5963, 5975), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (5969, 5975), True, 'import numpy as np\n'), ((9230, 9239), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (9236, 9239), True, 'import numpy as np\n'), ((10293, 10304), 'numpy.shape', 'np.shape', (['K'], {}), '(K)\n', (10301, 10304), True, 'import numpy as np\n'), ((10458, 10472), 'numpy.dot', 'np.dot', (['K.T', 'S'], {}), '(K.T, S)\n', (10464, 10472), True, 'import numpy as np\n'), ((12086, 12097), 'numpy.shape', 'np.shape', (['K'], {}), '(K)\n', (12094, 12097), True, 'import numpy as np\n'), ((12993, 13004), 'numpy.shape', 'np.shape', (['K'], {}), '(K)\n', (13001, 13004), True, 'import numpy as np\n'), ((14147, 14198), 'numpy.exp', 'np.exp', (['(-1 * (r - mu) ** 2.0 / (2.0 * sigma ** 2.0))'], {}), '(-1 * (r - mu) ** 2.0 / (2.0 * sigma ** 2.0))\n', (14153, 14198), True, 'import numpy as np\n'), ((15765, 15781), 'numpy.dot', 'np.dot', (['K', 'P_fit'], {}), '(K, P_fit)\n', (15771, 15781), True, 'import numpy as np\n'), ((16041, 16057), 'numpy.max', 'np.max', (['P_lambda'], {}), '(P_lambda)\n', (16047, 16057), True, 'import numpy as np\n'), ((16450, 16563), 'scipy.optimize.minimize', 'minimize', (['guess_min_func', 'x0'], {'args': '(P_lambda, r)', 'method': '"""Nelder-Mead"""', 'bounds': 'bounds', 'options': "{'disp': True}"}), "(guess_min_func, x0, args=(P_lambda, r), method='Nelder-Mead',\n bounds=bounds, options={'disp': True})\n", (16458, 16563), False, 'from scipy.optimize import least_squares, minimize\n'), ((19912, 19924), 'numpy.argmax', 'np.argmax', (['S'], {}), '(S)\n', (19921, 19924), True, 'import numpy as np\n'), ((20976, 20998), 'numpy.abs', 'np.abs', (['(t - t_truncate)'], {}), '(t - t_truncate)\n', (20982, 20998), True, 'import numpy as np\n'), ((5071, 5081), 'scipy.special.fresnel', 'fresnel', (['x'], {}), '(x)\n', (5078, 5081), False, 'from scipy.special import fresnel\n'), ((11236, 11260), 'numpy.linalg.norm', 'np.linalg.norm', (['P_lambda'], {}), '(P_lambda)\n', (11250, 11260), True, 'import numpy as np\n'), ((14074, 14125), 'numpy.exp', 'np.exp', (['(-1 * (r - mu) ** 2.0 / (2.0 * sigma ** 2.0))'], {}), '(-1 * (r - mu) ** 2.0 / (2.0 * sigma ** 2.0))\n', (14080, 14125), True, 'import numpy as np\n'), ((15861, 15872), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (15869, 15872), True, 'import numpy as np\n'), ((16151, 16170), 'numpy.argmax', 'np.argmax', (['P_lambda'], {}), '(P_lambda)\n', (16160, 16170), True, 'import numpy as np\n'), ((18163, 18184), 'numpy.linalg.pinv', 'np.linalg.pinv', (['sigma'], {}), '(sigma)\n', (18177, 18184), True, 'import numpy as np\n'), ((20412, 20441), 'numpy.polyfit', 'np.polyfit', (['t_fit', 'S_fit', 'deg'], {}), '(t_fit, S_fit, deg)\n', (20422, 20441), True, 'import numpy as np\n'), ((20458, 20471), 'numpy.polyder', 'np.polyder', (['p'], {}), '(p)\n', (20468, 20471), True, 'import numpy as np\n'), ((521, 531), 'numpy.imag', 'np.imag', (['S'], {}), '(S)\n', (528, 531), True, 'import numpy as np\n'), ((540, 550), 'numpy.real', 'np.real', (['S'], {}), '(S)\n', (547, 550), True, 'import numpy as np\n'), ((4946, 4966), 'numpy.sqrt', 'np.sqrt', (['(np.pi / 8.0)'], {}), '(np.pi / 8.0)\n', (4953, 4966), True, 'import numpy as np\n'), ((5040, 5054), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (5047, 5054), True, 'import numpy as np\n'), ((10413, 10427), 'numpy.dot', 'np.dot', (['K.T', 'K'], {}), '(K.T, K)\n', (10419, 10427), True, 'import numpy as np\n'), ((12131, 12141), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (12138, 12141), True, 'import numpy as np\n'), ((12149, 12159), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (12156, 12159), True, 'import numpy as np\n'), ((14041, 14074), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * sigma ** 2.0)'], {}), '(2 * np.pi * sigma ** 2.0)\n', (14048, 14074), True, 'import numpy as np\n'), ((15881, 15891), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (15888, 15891), True, 'import numpy as np\n'), ((20241, 20269), 'numpy.abs', 'np.abs', (['(t + time_width / 2.0)'], {}), '(t + time_width / 2.0)\n', (20247, 20269), True, 'import numpy as np\n'), ((20297, 20325), 'numpy.abs', 'np.abs', (['(t - time_width / 2.0)'], {}), '(t - time_width / 2.0)\n', (20303, 20325), True, 'import numpy as np\n'), ((901, 912), 'numpy.shape', 'np.shape', (['S'], {}), '(S)\n', (909, 912), True, 'import numpy as np\n'), ((4819, 4832), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4825, 4832), True, 'import numpy as np\n'), ((4833, 4850), 'numpy.cos', 'np.cos', (['(omega * t)'], {}), '(omega * t)\n', (4839, 4850), True, 'import numpy as np\n'), ((5099, 5119), 'numpy.cos', 'np.cos', (['(omega_ee * t)'], {}), '(omega_ee * t)\n', (5105, 5119), True, 'import numpy as np\n'), ((9296, 9306), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (9303, 9306), True, 'import numpy as np\n'), ((9338, 9352), 'numpy.ones', 'np.ones', (['(n - 1)'], {}), '(n - 1)\n', (9345, 9352), True, 'import numpy as np\n'), ((10442, 10456), 'numpy.dot', 'np.dot', (['L.T', 'L'], {}), '(L.T, L)\n', (10448, 10456), True, 'import numpy as np\n'), ((11190, 11209), 'numpy.dot', 'np.dot', (['K', 'P_lambda'], {}), '(K, P_lambda)\n', (11196, 11209), True, 'import numpy as np\n'), ((20512, 20526), 'numpy.roots', 'np.roots', (['pder'], {}), '(pder)\n', (20520, 20526), True, 'import numpy as np\n'), ((5028, 5037), 'numpy.abs', 'np.abs', (['t'], {}), '(t)\n', (5034, 5037), True, 'import numpy as np\n'), ((9451, 9461), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (9458, 9461), True, 'import numpy as np\n'), ((9494, 9508), 'numpy.ones', 'np.ones', (['(n - 1)'], {}), '(n - 1)\n', (9501, 9508), True, 'import numpy as np\n'), ((9538, 9552), 'numpy.ones', 'np.ones', (['(n - 2)'], {}), '(n - 2)\n', (9545, 9552), True, 'import numpy as np\n'), ((11921, 11933), 'numpy.dot', 'np.dot', (['K', 'P'], {}), '(K, P)\n', (11927, 11933), True, 'import numpy as np\n'), ((12890, 12902), 'numpy.dot', 'np.dot', (['K', 'P'], {}), '(K, P)\n', (12896, 12902), True, 'import numpy as np\n'), ((12946, 12958), 'numpy.dot', 'np.dot', (['L', 'P'], {}), '(L, P)\n', (12952, 12958), True, 'import numpy as np\n'), ((4768, 4781), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4774, 4781), True, 'import numpy as np\n'), ((5142, 5151), 'numpy.abs', 'np.abs', (['t'], {}), '(t)\n', (5148, 5151), True, 'import numpy as np\n'), ((5626, 5635), 'numpy.abs', 'np.abs', (['t'], {}), '(t)\n', (5632, 5635), True, 'import numpy as np\n'), ((11969, 11983), 'numpy.log', 'np.log', (['(P / x0)'], {}), '(P / x0)\n', (11975, 11983), True, 'import numpy as np\n'), ((11989, 11998), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (11995, 11998), True, 'import numpy as np\n')]
import os import cv2 import numpy as np from PIL import Image from IPython.display import Video from IPython.display import display as ds DESTINATION_FOLDER = "results" def check_folder(folder): if not os.path.exists(folder): os.makedirs(folder) def display(images_array, save=False): for im in images_array: nparr = np.fromstring(im, np.uint8) image = cv2.imdecode(nparr, cv2.IMREAD_COLOR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) ds(Image.fromarray(image)) if save: check_folder(DESTINATION_FOLDER) counter = 0 for im in images_array: img_file = DESTINATION_FOLDER + '/res_' + str(counter) + '.jpg' counter += 1 fd = open(img_file, 'wb') fd.write(im) fd.close() def draw_bboxes(image, boxes=[], tags=[], save=False): nparr = np.fromstring(image, np.uint8) cv_image = cv2.imdecode(nparr, cv2.IMREAD_COLOR) # Draw a rectangle around the faces counter = 0 for coords in boxes: left = coords["x"] top = coords["y"] right = coords["x"] + coords["width"] bottom = coords["y"] + coords["height"] cv2.rectangle(cv_image, (left, top), (right, bottom), (0, 255, 0), 2) y = top - 15 if top - 15 > 15 else top + 15 cv2.putText(cv_image, tags[counter], (left, y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2) counter += 1 cv_image_rgb = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB) ds(Image.fromarray(cv_image_rgb)) if save: check_folder(DESTINATION_FOLDER) img_file = DESTINATION_FOLDER + '/res_bboxes.jpg' cv2.imwrite(img_file, cv_image) def display_video_mp4(blob): check_folder(DESTINATION_FOLDER) name = DESTINATION_FOLDER + "/" + "video_tmp.mp4" fd = open(name, 'wb') fd.write(blob) fd.close() ds(Video(name, embed=True))
[ "cv2.rectangle", "os.path.exists", "PIL.Image.fromarray", "cv2.imwrite", "os.makedirs", "IPython.display.Video", "cv2.putText", "cv2.imdecode", "cv2.cvtColor", "numpy.fromstring" ]
[((888, 918), 'numpy.fromstring', 'np.fromstring', (['image', 'np.uint8'], {}), '(image, np.uint8)\n', (901, 918), True, 'import numpy as np\n'), ((934, 971), 'cv2.imdecode', 'cv2.imdecode', (['nparr', 'cv2.IMREAD_COLOR'], {}), '(nparr, cv2.IMREAD_COLOR)\n', (946, 971), False, 'import cv2\n'), ((1482, 1523), 'cv2.cvtColor', 'cv2.cvtColor', (['cv_image', 'cv2.COLOR_BGR2RGB'], {}), '(cv_image, cv2.COLOR_BGR2RGB)\n', (1494, 1523), False, 'import cv2\n'), ((209, 231), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (223, 231), False, 'import os\n'), ((245, 264), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (256, 264), False, 'import os\n'), ((350, 377), 'numpy.fromstring', 'np.fromstring', (['im', 'np.uint8'], {}), '(im, np.uint8)\n', (363, 377), True, 'import numpy as np\n'), ((394, 431), 'cv2.imdecode', 'cv2.imdecode', (['nparr', 'cv2.IMREAD_COLOR'], {}), '(nparr, cv2.IMREAD_COLOR)\n', (406, 431), False, 'import cv2\n'), ((448, 486), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (460, 486), False, 'import cv2\n'), ((1215, 1284), 'cv2.rectangle', 'cv2.rectangle', (['cv_image', '(left, top)', '(right, bottom)', '(0, 255, 0)', '(2)'], {}), '(cv_image, (left, top), (right, bottom), (0, 255, 0), 2)\n', (1228, 1284), False, 'import cv2\n'), ((1345, 1445), 'cv2.putText', 'cv2.putText', (['cv_image', 'tags[counter]', '(left, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.75)', '(0, 255, 0)', '(2)'], {}), '(cv_image, tags[counter], (left, y), cv2.FONT_HERSHEY_SIMPLEX, \n 0.75, (0, 255, 0), 2)\n', (1356, 1445), False, 'import cv2\n'), ((1531, 1560), 'PIL.Image.fromarray', 'Image.fromarray', (['cv_image_rgb'], {}), '(cv_image_rgb)\n', (1546, 1560), False, 'from PIL import Image\n'), ((1683, 1714), 'cv2.imwrite', 'cv2.imwrite', (['img_file', 'cv_image'], {}), '(img_file, cv_image)\n', (1694, 1714), False, 'import cv2\n'), ((1906, 1929), 'IPython.display.Video', 'Video', (['name'], {'embed': '(True)'}), '(name, embed=True)\n', (1911, 1929), False, 'from IPython.display import Video\n'), ((498, 520), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (513, 520), False, 'from PIL import Image\n')]
import os import numpy as np import random import numbers import skimage from skimage import io, color import torch # read uint8 image from path def imread_uint8(imgpath, mode='RGB'): ''' mode: 'RGB', 'gray', 'Y', 'L'. 'Y' and 'L' mean the Y channel of YCbCr. ''' if mode == 'RGB': img = io.imread(imgpath) elif mode == 'gray': img = io.imread(imgpath, as_gray=True) img = skimage.img_as_ubyte(img) elif mode in ['Y','L']: # Y channel of YCbCr # Note: The skimage.color.rgb2ycbcr() function is the same with that of matlab, # PIL.Image.convert('YCbCr') is not. img = io.imread(imgpath) if img.ndim == 3: img = color.rgb2ycbcr(img)[:,:,0] img = img.round().astype(np.uint8) return img def augment_img(img, mode='8'): '''flip and/or rotate the image randomly''' if mode == '2': mode = random.randint(0, 1) elif mode == '4': mode = random.randint(0, 3) elif mode == '8': mode = random.randint(0, 7) else: mode = 0 if mode == 0: return img elif mode == 1: return np.fliplr(img) elif mode == 2: return np.rot90(img, k=2) elif mode == 3: return np.fliplr(np.rot90(img, k=2)) elif mode == 4: return np.rot90(img, k=1) elif mode == 5: return np.fliplr(np.rot90(img, k=1)) elif mode == 6: return np.rot90(img, k=3) elif mode == 7: return np.fliplr(np.rot90(img, k=3)) def random_crop(img, size): '''crop image patch randomly''' if isinstance(size, numbers.Number): size = (int(size), int(size)) h, w = img.shape[0:2] ph, pw = size rnd_h = random.randint(0, h - ph) rnd_w = random.randint(0, w - pw) img_patch = img[rnd_h:rnd_h + ph, rnd_w:rnd_w + pw, ...] return img_patch def uint2tensor(img, normalized=True): if img.ndim == 2: img = img[:, :, np.newaxis] img = skimage.img_as_float32(img) if normalized: img = (img - 0.5) / 0.5 img = torch.from_numpy(np.ascontiguousarray(img.transpose(2, 0, 1))).float() return img def tensor2uint(img, normalized=True): img = img.data.squeeze().cpu().numpy().astype(np.float32) if img.ndim == 3: img = img.transpose(1, 2, 0) elif img.ndim == 4: img = img.transpose(0, 2, 3, 1) if normalized: img = img * 0.5 + 0.5 img = img.clip(0, 1) * 255 img = img.round().astype(np.uint8) return img def tensor3to4(tensor): return tensor.unsqueeze(0) def mkdir(path): if not os.path.exists(path): os.makedirs(path)
[ "os.path.exists", "os.makedirs", "skimage.color.rgb2ycbcr", "numpy.fliplr", "skimage.img_as_float32", "skimage.io.imread", "numpy.rot90", "skimage.img_as_ubyte", "random.randint" ]
[((1733, 1758), 'random.randint', 'random.randint', (['(0)', '(h - ph)'], {}), '(0, h - ph)\n', (1747, 1758), False, 'import random\n'), ((1771, 1796), 'random.randint', 'random.randint', (['(0)', '(w - pw)'], {}), '(0, w - pw)\n', (1785, 1796), False, 'import random\n'), ((1988, 2015), 'skimage.img_as_float32', 'skimage.img_as_float32', (['img'], {}), '(img)\n', (2010, 2015), False, 'import skimage\n'), ((317, 335), 'skimage.io.imread', 'io.imread', (['imgpath'], {}), '(imgpath)\n', (326, 335), False, 'from skimage import io, color\n'), ((922, 942), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (936, 942), False, 'import random\n'), ((2610, 2630), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2624, 2630), False, 'import os\n'), ((2640, 2657), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (2651, 2657), False, 'import os\n'), ((375, 407), 'skimage.io.imread', 'io.imread', (['imgpath'], {'as_gray': '(True)'}), '(imgpath, as_gray=True)\n', (384, 407), False, 'from skimage import io, color\n'), ((422, 447), 'skimage.img_as_ubyte', 'skimage.img_as_ubyte', (['img'], {}), '(img)\n', (442, 447), False, 'import skimage\n'), ((980, 1000), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (994, 1000), False, 'import random\n'), ((1159, 1173), 'numpy.fliplr', 'np.fliplr', (['img'], {}), '(img)\n', (1168, 1173), True, 'import numpy as np\n'), ((653, 671), 'skimage.io.imread', 'io.imread', (['imgpath'], {}), '(imgpath)\n', (662, 671), False, 'from skimage import io, color\n'), ((1038, 1058), 'random.randint', 'random.randint', (['(0)', '(7)'], {}), '(0, 7)\n', (1052, 1058), False, 'import random\n'), ((1209, 1227), 'numpy.rot90', 'np.rot90', (['img'], {'k': '(2)'}), '(img, k=2)\n', (1217, 1227), True, 'import numpy as np\n'), ((716, 736), 'skimage.color.rgb2ycbcr', 'color.rgb2ycbcr', (['img'], {}), '(img)\n', (731, 736), False, 'from skimage import io, color\n'), ((1273, 1291), 'numpy.rot90', 'np.rot90', (['img'], {'k': '(2)'}), '(img, k=2)\n', (1281, 1291), True, 'import numpy as np\n'), ((1328, 1346), 'numpy.rot90', 'np.rot90', (['img'], {'k': '(1)'}), '(img, k=1)\n', (1336, 1346), True, 'import numpy as np\n'), ((1392, 1410), 'numpy.rot90', 'np.rot90', (['img'], {'k': '(1)'}), '(img, k=1)\n', (1400, 1410), True, 'import numpy as np\n'), ((1447, 1465), 'numpy.rot90', 'np.rot90', (['img'], {'k': '(3)'}), '(img, k=3)\n', (1455, 1465), True, 'import numpy as np\n'), ((1511, 1529), 'numpy.rot90', 'np.rot90', (['img'], {'k': '(3)'}), '(img, k=3)\n', (1519, 1529), True, 'import numpy as np\n')]
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Routines related to the canonical Chandra ACA dark current model. The model is based on smoothed twice-broken power-law fits of dark current histograms from Jan-2007 though Aug-2017. This analysis was done entirely with dark current maps scaled to -14 C. See: /proj/sot/ska/analysis/dark_current_model/dark_model.ipynb and other files in that directory. Alternatively: http://nbviewer.ipython.org/url/asc.harvard.edu/mta/ASPECT/analysis/dark_current_model/dark_model.ipynb """ import numpy as np import warnings from Chandra.Time import DateTime # Define a common fixed binning of dark current distribution from . import darkbins # Global cache (e.g. for initial dark current in synthetic_dark_image CACHE = {} # Some constants and globals. Done this way to support sherpa fitting. # Needs to be re-worked to be nicer. # Fixed gaussian for smoothing the broken power law dx = 0.1 sigma = 0.30 # Gaussian sigma in log space xg = np.arange(-2.5 * sigma, 2.5 * sigma, dx, dtype=float) yg = np.exp(-0.5 * (xg / sigma) ** 2) yg /= np.sum(yg) NPIX = 1024 ** 2 # Fixed xbins = darkbins.bins xall = darkbins.bin_centers imin = 0 imax = len(xall) # Warm threshold used in fitting acq prob model. This constant is # not used in any configured code, but leave here just in case. warm_threshold = 100. # Increase in dark current per 4 degC increase in T_ccd DARK_SCALE_4C = 1.0 / 0.70 def dark_temp_scale(t_ccd, t_ccd_ref=-19.0, scale_4c=None): """Return the multiplicative scale factor to convert a CCD dark map or dark current value from temperature ``t_ccd`` to temperature ``t_ccd_ref``:: scale = scale_4c ** ((t_ccd_ref - t_ccd) / 4.0) In other words, if you have a dark current value that corresponds to ``t_ccd`` and need the value at a different temperature ``t_ccd_ref`` then use the the following. Do not be misled by the misleading parameter names. >>> from chandra_aca.dark_scale import dark_temp_scale >>> scale = dark_temp_scale(t_ccd, t_ccd_ref, scale_4c) >>> dark_curr_at_t_ccd_ref = scale * dark_curr_at_t_ccd The default value for ``scale_4c`` is 1.0 / 0.7. It is written this way because the equation was previously expressed using 1 / scale_4c with a value of 0.7. This value is based on best global fit for dark current model in `plot_predicted_warmpix.py`. This represents the multiplicative change in dark current for each 4 degC increase:: >>> dark_temp_scale(t_ccd=-18, t_ccd_ref=-10, scale_4c=2.0) 4.0 :param t_ccd: actual temperature (degC) :param t_ccd_ref: reference temperature (degC, default=-19.0) :param scale_4c: increase in dark current per 4 degC increase (default=1.0 / 0.7) :returns: scale factor """ if scale_4c is None: scale_4c = DARK_SCALE_4C return scale_4c ** ((t_ccd_ref - t_ccd) / 4.0) def get_dark_hist(date, t_ccd): """ Return the dark current histogram corresponding to ``date`` and ``t_ccd``. :param date: date in any DateTime format :param t_ccd: CCD temperature (deg C) :returns: bin_centers, bins, darkhist """ pars = get_sbp_pars(date) x = darkbins.bin_centers y = smooth_twice_broken_pow(pars, x) # Model params are calibrated using reference temp. -14 C scale = dark_temp_scale(-14, t_ccd) xbins = darkbins.bins * scale x = x * scale return x, xbins, y def smooth_broken_pow(pars, x): """ Smoothed broken power-law. Pars are same as bpl1d (NOT + gaussian sigma): 1: gamma1 2: gamma2 3: x_b (break point) 4: x_r (normalization reference point) 5: ampl1 """ (gamma1, gamma2, x_b, x_r, ampl1) = pars ampl2 = ampl1 * (x_b / x_r) ** (gamma2 - gamma1) ok = xall > x_b y = ampl1 * (xall / x_r) ** (-gamma1) y[ok] = ampl2 * (xall[ok] / x_r) ** (-gamma2) imin = np.searchsorted(xall, x[0] - 1e-3) imax = np.searchsorted(xall, x[-1] + 1e-3) return np.convolve(y, yg, mode='same')[imin:imax] def smooth_twice_broken_pow(pars, x): """ Smoothed broken power-law. Pars are same as bpl1d (NOT + gaussian sigma): 1: gamma1 2: gamma2 3: gamma3 4: x_b (break point) 5: ampl1 """ x_b2 = 1000 x_r = 50 (gamma1, gamma2, gamma3, x_b, ampl1) = pars y = ampl1 * (xall / x_r) ** (-gamma1) i0, i1 = np.searchsorted(xall, [x_b, x_b2]) ampl2 = ampl1 * (x_b / x_r) ** (gamma2 - gamma1) y[i0:i1] = ampl2 * (xall[i0:i1] / x_r) ** (-gamma2) i1 = np.searchsorted(xall, x_b2) ampl3 = ampl2 * (x_b2 / x_r) ** (gamma3 - gamma2) y[i1:] = ampl3 * (xall[i1:] / x_r) ** (-gamma3) imin = np.searchsorted(xall, x[0] - 1e-3) imax = np.searchsorted(xall, x[-1] + 1e-3) return np.convolve(y, yg, mode='same')[imin:imax] def temp_scalefac(T_ccd): """ Return the multiplicative scale factor to convert a CCD dark map from the nominal -19C temperature to the temperature T. Based on best global fit for dark current model in plot_predicted_warmpix.py. Previous value was 0.62 instead of 0.70. If attempting to reproduce previous analysis, be aware that this is now calling chandra_aca.dark_model.dark_temp_scale and the value will be determined using the module DARK_SCALE_4C value which may differ from previous values of 1.0/0.70 or 1.0/0.62. """ warnings.warn("temp_scalefac is deprecated. See chandra_aca.dark_model.dark_temp_scale.") return dark_temp_scale(-19, T_ccd) def as_array(vals): if np.array(vals).ndim == 0: is_scalar = True vals = np.array([vals]) else: is_scalar = False vals = np.array(vals) return vals, is_scalar def get_sbp_pars(dates): """ Return smooth broken powerlaw parameters set(s) at ``dates``. This is based on the sbp fits for the darkhist_zodi_m14 histograms in /proj/sot/ska/analysis/dark_current_model/dark_model.ipynb. The actual bi-linear fits (as a function of year) to the g1, g2, g3, x_b, and ampl parameters are derived from fits and by-hand inspection of fit trending. This is only accurate for dates > 2007.0. :param dates: one or a list of date(s) in DateTime compatible format :returns: one or a list of parameter lists [g1, g2, g3, x_b, ampl] """ dates, is_scalar = as_array(dates) mid_year = 2012.0 # Fixed in dark_model.ipynb notebook years = DateTime(dates).frac_year dyears = years - mid_year # Poly fit parameter for pre-2012 and post-2012. Vals here are: # y_mid, slope_pre, slope_post par_fits = ((0.075, -0.00692, -0.0207), # g1 (3.32, 0.0203, 0 * 0.0047), # g2 (2.40, 0.061, 0.061), # g3 (192, 0.1, 0.1), # x_b (18400, 1.45e3, 742), # ampl ) pars_list = [] for dyear in dyears: pars = [] for y_mid, slope_pre, slope_post in par_fits: slope = slope_pre if dyear < 0 else slope_post pars.append(y_mid + slope * dyear) pars_list.append(pars) if is_scalar: pars_list = pars_list[0] return pars_list def get_warm_fracs(warm_threshold, date='2013:001:12:00:00', T_ccd=-19.0): """ Calculate fraction of pixels in modeled dark current distribution above warm threshold(s). :param warm_threshold: scalar or list of threshold(s) in e-/sec :param date: date to use for modeled dark current distribution/histogram :param T_ccd: temperature (C) of modeled dark current distribution :returns: list or scalar of warm fractions (depends on warm_threshold type) """ x, xbins, y = get_dark_hist(date, T_ccd) warm_thresholds, is_scalar = as_array(warm_threshold) warmpixes = [] for warm_threshold in warm_thresholds: # First get the full bins to right of warm_threshold ii = np.searchsorted(xbins, warm_threshold) warmpix = np.sum(y[ii:]) lx = np.log(warm_threshold) lx0 = np.log(xbins[ii - 1]) lx1 = np.log(xbins[ii]) ly0 = np.log(y[ii - 1]) ly1 = np.log(y[ii]) m = (ly1 - ly0) / (lx1 - lx0) partial_bin = y[ii] * (lx1 ** m - lx ** m) / (lx1 ** m - lx0 ** m) warmpix += partial_bin warmpixes.append(warmpix) if is_scalar: out = warmpixes[0] else: out = np.array(warmpixes) return out / (1024.0 ** 2) def synthetic_dark_image(date, t_ccd_ref=None): """ Generate a synthetic dark current image corresponding to the specified ``date`` and ``t_ccd``. :param date: (DateTime compatible) :param t_ccd_ref: ACA CCD temperature """ from mica.archive.aca_dark import get_dark_cal_image if 'dark_1999223' not in CACHE: dark = get_dark_cal_image('1999:223:12:00:00', select='nearest', t_ccd_ref=-14).ravel() CACHE['dark_1999223'] = dark.copy() else: dark = CACHE['dark_1999223'].copy() # Fill any pixels above 40 e-/sec with a random sampling from a cool # pixel below 40 e-/sec warm = dark > 40 warm_idx = np.flatnonzero(warm) not_warm_idx = np.flatnonzero(~warm) fill_idx = np.random.randint(0, len(not_warm_idx), len(warm_idx)) dark[warm_idx] = dark[fill_idx] darkmodel = smooth_twice_broken_pow(get_sbp_pars(date), xall) darkran = np.random.poisson(darkmodel) nn = 0 for ii, npix in enumerate(darkran): # Generate n log-uniform variates within bin if npix > 0: logdark = np.random.uniform(np.log(xbins[ii]), np.log(xbins[ii + 1]), npix) dark[nn:nn + npix] += np.exp(logdark) nn += npix np.random.shuffle(dark) dark.shape = (1024, 1024) if t_ccd_ref is not None: dark *= dark_temp_scale(-14, t_ccd_ref) return dark
[ "Chandra.Time.DateTime", "numpy.convolve", "numpy.random.poisson", "numpy.searchsorted", "numpy.flatnonzero", "numpy.log", "numpy.exp", "numpy.sum", "numpy.array", "warnings.warn", "mica.archive.aca_dark.get_dark_cal_image", "numpy.arange", "numpy.random.shuffle" ]
[((1034, 1087), 'numpy.arange', 'np.arange', (['(-2.5 * sigma)', '(2.5 * sigma)', 'dx'], {'dtype': 'float'}), '(-2.5 * sigma, 2.5 * sigma, dx, dtype=float)\n', (1043, 1087), True, 'import numpy as np\n'), ((1093, 1125), 'numpy.exp', 'np.exp', (['(-0.5 * (xg / sigma) ** 2)'], {}), '(-0.5 * (xg / sigma) ** 2)\n', (1099, 1125), True, 'import numpy as np\n'), ((1132, 1142), 'numpy.sum', 'np.sum', (['yg'], {}), '(yg)\n', (1138, 1142), True, 'import numpy as np\n'), ((3957, 3992), 'numpy.searchsorted', 'np.searchsorted', (['xall', '(x[0] - 0.001)'], {}), '(xall, x[0] - 0.001)\n', (3972, 3992), True, 'import numpy as np\n'), ((4003, 4039), 'numpy.searchsorted', 'np.searchsorted', (['xall', '(x[-1] + 0.001)'], {}), '(xall, x[-1] + 0.001)\n', (4018, 4039), True, 'import numpy as np\n'), ((4442, 4476), 'numpy.searchsorted', 'np.searchsorted', (['xall', '[x_b, x_b2]'], {}), '(xall, [x_b, x_b2])\n', (4457, 4476), True, 'import numpy as np\n'), ((4596, 4623), 'numpy.searchsorted', 'np.searchsorted', (['xall', 'x_b2'], {}), '(xall, x_b2)\n', (4611, 4623), True, 'import numpy as np\n'), ((4742, 4777), 'numpy.searchsorted', 'np.searchsorted', (['xall', '(x[0] - 0.001)'], {}), '(xall, x[0] - 0.001)\n', (4757, 4777), True, 'import numpy as np\n'), ((4788, 4824), 'numpy.searchsorted', 'np.searchsorted', (['xall', '(x[-1] + 0.001)'], {}), '(xall, x[-1] + 0.001)\n', (4803, 4824), True, 'import numpy as np\n'), ((5449, 5549), 'warnings.warn', 'warnings.warn', (['"""temp_scalefac is deprecated. See chandra_aca.dark_model.dark_temp_scale."""'], {}), "(\n 'temp_scalefac is deprecated. See chandra_aca.dark_model.dark_temp_scale.'\n )\n", (5462, 5549), False, 'import warnings\n'), ((5739, 5753), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (5747, 5753), True, 'import numpy as np\n'), ((9179, 9199), 'numpy.flatnonzero', 'np.flatnonzero', (['warm'], {}), '(warm)\n', (9193, 9199), True, 'import numpy as np\n'), ((9219, 9240), 'numpy.flatnonzero', 'np.flatnonzero', (['(~warm)'], {}), '(~warm)\n', (9233, 9240), True, 'import numpy as np\n'), ((9429, 9457), 'numpy.random.poisson', 'np.random.poisson', (['darkmodel'], {}), '(darkmodel)\n', (9446, 9457), True, 'import numpy as np\n'), ((9750, 9773), 'numpy.random.shuffle', 'np.random.shuffle', (['dark'], {}), '(dark)\n', (9767, 9773), True, 'import numpy as np\n'), ((4050, 4081), 'numpy.convolve', 'np.convolve', (['y', 'yg'], {'mode': '"""same"""'}), "(y, yg, mode='same')\n", (4061, 4081), True, 'import numpy as np\n'), ((4835, 4866), 'numpy.convolve', 'np.convolve', (['y', 'yg'], {'mode': '"""same"""'}), "(y, yg, mode='same')\n", (4846, 4866), True, 'import numpy as np\n'), ((5674, 5690), 'numpy.array', 'np.array', (['[vals]'], {}), '([vals])\n', (5682, 5690), True, 'import numpy as np\n'), ((6498, 6513), 'Chandra.Time.DateTime', 'DateTime', (['dates'], {}), '(dates)\n', (6506, 6513), False, 'from Chandra.Time import DateTime\n'), ((7965, 8003), 'numpy.searchsorted', 'np.searchsorted', (['xbins', 'warm_threshold'], {}), '(xbins, warm_threshold)\n', (7980, 8003), True, 'import numpy as np\n'), ((8022, 8036), 'numpy.sum', 'np.sum', (['y[ii:]'], {}), '(y[ii:])\n', (8028, 8036), True, 'import numpy as np\n'), ((8050, 8072), 'numpy.log', 'np.log', (['warm_threshold'], {}), '(warm_threshold)\n', (8056, 8072), True, 'import numpy as np\n'), ((8087, 8108), 'numpy.log', 'np.log', (['xbins[ii - 1]'], {}), '(xbins[ii - 1])\n', (8093, 8108), True, 'import numpy as np\n'), ((8123, 8140), 'numpy.log', 'np.log', (['xbins[ii]'], {}), '(xbins[ii])\n', (8129, 8140), True, 'import numpy as np\n'), ((8155, 8172), 'numpy.log', 'np.log', (['y[ii - 1]'], {}), '(y[ii - 1])\n', (8161, 8172), True, 'import numpy as np\n'), ((8187, 8200), 'numpy.log', 'np.log', (['y[ii]'], {}), '(y[ii])\n', (8193, 8200), True, 'import numpy as np\n'), ((8449, 8468), 'numpy.array', 'np.array', (['warmpixes'], {}), '(warmpixes)\n', (8457, 8468), True, 'import numpy as np\n'), ((5608, 5622), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (5616, 5622), True, 'import numpy as np\n'), ((9706, 9721), 'numpy.exp', 'np.exp', (['logdark'], {}), '(logdark)\n', (9712, 9721), True, 'import numpy as np\n'), ((8862, 8934), 'mica.archive.aca_dark.get_dark_cal_image', 'get_dark_cal_image', (['"""1999:223:12:00:00"""'], {'select': '"""nearest"""', 't_ccd_ref': '(-14)'}), "('1999:223:12:00:00', select='nearest', t_ccd_ref=-14)\n", (8880, 8934), False, 'from mica.archive.aca_dark import get_dark_cal_image\n'), ((9624, 9641), 'numpy.log', 'np.log', (['xbins[ii]'], {}), '(xbins[ii])\n', (9630, 9641), True, 'import numpy as np\n'), ((9643, 9664), 'numpy.log', 'np.log', (['xbins[ii + 1]'], {}), '(xbins[ii + 1])\n', (9649, 9664), True, 'import numpy as np\n')]
# <NAME> 2014-2020 # mlxtend Machine Learning Library Extensions # Author: <NAME> <<EMAIL>> # # License: BSD 3 clause import random import numpy as np import pytest from sklearn import exceptions from sklearn.base import clone from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV, cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from mlxtend.classifier import EnsembleVoteClassifier from mlxtend.data import iris_data from mlxtend.utils import assert_raises X, y = iris_data() X = X[:, 1:3] def test_EnsembleVoteClassifier(): np.random.seed(123) clf1 = LogisticRegression(solver='liblinear', multi_class='ovr') clf2 = RandomForestClassifier(n_estimators=10) clf3 = GaussianNB() eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='hard') scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy') scores_mean = (round(scores.mean(), 2)) assert(scores_mean == 0.94) def test_fit_base_estimators_false(): np.random.seed(123) clf1 = LogisticRegression(solver='liblinear', multi_class='ovr') clf2 = RandomForestClassifier(n_estimators=10) clf3 = GaussianNB() clf1.fit(X, y) clf2.fit(X, y) clf3.fit(X, y) eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='hard', fit_base_estimators=False) eclf.fit(X, y) assert round(eclf.score(X, y), 2) == 0.97 def test_use_clones(): np.random.seed(123) clf1 = LogisticRegression(solver='liblinear', multi_class='ovr') clf2 = RandomForestClassifier(n_estimators=10) clf3 = GaussianNB() EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], use_clones=True).fit(X, y) assert_raises(exceptions.NotFittedError, "This RandomForestClassifier instance is not fitted yet." " Call 'fit' with appropriate arguments" " before using this estimator.", clf2.predict, X) EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], use_clones=False).fit(X, y) clf2.predict(X) def test_sample_weight(): # with no weight np.random.seed(123) clf1 = LogisticRegression(solver='liblinear', multi_class='ovr') clf2 = RandomForestClassifier(n_estimators=10) clf3 = GaussianNB() eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='hard') prob1 = eclf.fit(X, y).predict_proba(X) # with weight = 1 w = np.ones(len(y)) np.random.seed(123) clf1 = LogisticRegression(solver='liblinear', multi_class='ovr') clf2 = RandomForestClassifier(n_estimators=10) clf3 = GaussianNB() eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='hard') prob2 = eclf.fit(X, y, sample_weight=w).predict_proba(X) # with random weight random.seed(87) w = np.array([random.random() for _ in range(len(y))]) np.random.seed(123) clf1 = LogisticRegression(solver='liblinear', multi_class='ovr') clf2 = RandomForestClassifier(n_estimators=10) clf3 = GaussianNB() eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='hard') prob3 = eclf.fit(X, y, sample_weight=w).predict_proba(X) diff12 = np.max(np.abs(prob1 - prob2)) diff23 = np.max(np.abs(prob2 - prob3)) assert diff12 < 1e-3, "max diff is %.4f" % diff12 assert diff23 > 1e-3, "max diff is %.4f" % diff23 def test_no_weight_support(): random.seed(87) w = np.array([random.random() for _ in range(len(y))]) logi = LogisticRegression(solver='liblinear', multi_class='ovr') rf = RandomForestClassifier(n_estimators=10) gnb = GaussianNB() knn = KNeighborsClassifier() eclf = EnsembleVoteClassifier(clfs=[logi, rf, gnb, knn], voting='hard') with pytest.raises(TypeError): eclf.fit(X, y, sample_weight=w) def test_no_weight_support_with_no_weight(): logi = LogisticRegression(solver='liblinear', multi_class='ovr') rf = RandomForestClassifier(n_estimators=10) gnb = GaussianNB() knn = KNeighborsClassifier() eclf = EnsembleVoteClassifier(clfs=[logi, rf, gnb, knn], voting='hard') eclf.fit(X, y) def test_1model_labels(): clf = LogisticRegression(multi_class='multinomial', solver='newton-cg', random_state=123) ens_clf_1 = EnsembleVoteClassifier(clfs=[clf], voting='soft', weights=None) ens_clf_2 = EnsembleVoteClassifier(clfs=[clf], voting='soft', weights=[1.]) pred_e1 = ens_clf_1.fit(X, y).predict(X) pred_e2 = ens_clf_2.fit(X, y).predict(X) pred_e3 = clf.fit(X, y).predict(X) np.testing.assert_equal(pred_e1, pred_e2) np.testing.assert_equal(pred_e1, pred_e3) def test_1model_probas(): clf = LogisticRegression(multi_class='multinomial', solver='newton-cg', random_state=123) ens_clf_1 = EnsembleVoteClassifier(clfs=[clf], voting='soft', weights=None) ens_clf_2 = EnsembleVoteClassifier(clfs=[clf], voting='soft', weights=[1.]) pred_e1 = ens_clf_1.fit(X, y).predict_proba(X) pred_e2 = ens_clf_2.fit(X, y).predict_proba(X) pred_e3 = clf.fit(X, y).predict_proba(X) np.testing.assert_almost_equal(pred_e1, pred_e2, decimal=8) np.testing.assert_almost_equal(pred_e1, pred_e3, decimal=8) def test_EnsembleVoteClassifier_weights(): np.random.seed(123) clf1 = LogisticRegression(solver='liblinear', multi_class='ovr') clf2 = RandomForestClassifier(n_estimators=10) clf3 = GaussianNB() eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='soft', weights=[1, 2, 10]) scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy') scores_mean = (round(scores.mean(), 2)) assert(scores_mean == 0.93) def test_EnsembleVoteClassifier_gridsearch(): clf1 = LogisticRegression(solver='liblinear', multi_class='ovr', random_state=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='soft') params = {'logisticregression__C': [1.0, 100.0], 'randomforestclassifier__n_estimators': [20, 200]} grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5, iid=False) X, y = iris_data() grid.fit(X, y) mean_scores = [round(s, 2) for s in grid.cv_results_['mean_test_score']] assert mean_scores == [0.95, 0.96, 0.96, 0.95] def test_EnsembleVoteClassifier_gridsearch_enumerate_names(): clf1 = LogisticRegression(solver='liblinear', multi_class='ovr', random_state=1) clf2 = RandomForestClassifier(random_state=1) eclf = EnsembleVoteClassifier(clfs=[clf1, clf1, clf2]) params = {'logisticregression-1__C': [1.0, 100.0], 'logisticregression-2__C': [1.0, 100.0], 'randomforestclassifier__n_estimators': [5, 20], 'voting': ['hard', 'soft']} grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5, iid=False) X, y = iris_data() grid = grid.fit(X, y) def test_get_params(): clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1, n_estimators=10) clf3 = GaussianNB() eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3]) got = sorted(list({s.split('__')[0] for s in eclf.get_params().keys()})) expect = ['clfs', 'fit_base_estimators', 'gaussiannb', 'kneighborsclassifier', 'randomforestclassifier', 'use_clones', 'verbose', 'voting', 'weights'] assert got == expect, got def test_classifier_gridsearch(): clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1, n_estimators=10) clf3 = GaussianNB() eclf = EnsembleVoteClassifier(clfs=[clf1]) params = {'clfs': [[clf1, clf1, clf1], [clf2, clf3]]} grid = GridSearchCV(estimator=eclf, param_grid=params, iid=False, cv=5, refit=True) grid.fit(X, y) assert len(grid.best_params_['clfs']) == 2 def test_string_labels_numpy_array(): np.random.seed(123) clf1 = LogisticRegression(solver='liblinear', multi_class='ovr') clf2 = RandomForestClassifier(n_estimators=10) clf3 = GaussianNB() eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='hard') y_str = y.copy() y_str = y_str.astype(str) y_str[:50] = 'a' y_str[50:100] = 'b' y_str[100:150] = 'c' scores = cross_val_score(eclf, X, y_str, cv=5, scoring='accuracy') scores_mean = (round(scores.mean(), 2)) assert(scores_mean == 0.94) def test_string_labels_python_list(): np.random.seed(123) clf1 = LogisticRegression(solver='liblinear', multi_class='ovr') clf2 = RandomForestClassifier(n_estimators=10) clf3 = GaussianNB() eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='hard') y_str = (['a' for a in range(50)] + ['b' for a in range(50)] + ['c' for a in range(50)]) scores = cross_val_score(eclf, X, y_str, cv=5, scoring='accuracy') scores_mean = (round(scores.mean(), 2)) assert(scores_mean == 0.94) def test_clone(): clf1 = LogisticRegression(solver='liblinear', multi_class='ovr') clf2 = RandomForestClassifier(n_estimators=10) clf3 = GaussianNB() eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='hard', fit_base_estimators=False) clone(eclf)
[ "mlxtend.data.iris_data", "sklearn.model_selection.GridSearchCV", "numpy.abs", "numpy.testing.assert_equal", "sklearn.base.clone", "sklearn.neighbors.KNeighborsClassifier", "sklearn.ensemble.RandomForestClassifier", "random.seed", "sklearn.linear_model.LogisticRegression", "mlxtend.classifier.EnsembleVoteClassifier", "numpy.testing.assert_almost_equal", "pytest.raises", "numpy.random.seed", "mlxtend.utils.assert_raises", "sklearn.naive_bayes.GaussianNB", "random.random", "sklearn.model_selection.cross_val_score" ]
[((631, 642), 'mlxtend.data.iris_data', 'iris_data', ([], {}), '()\n', (640, 642), False, 'from mlxtend.data import iris_data\n'), ((699, 718), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (713, 718), True, 'import numpy as np\n'), ((730, 787), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (748, 787), False, 'from sklearn.linear_model import LogisticRegression\n'), ((799, 838), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (821, 838), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((850, 862), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (860, 862), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((874, 936), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'voting': '"""hard"""'}), "(clfs=[clf1, clf2, clf3], voting='hard')\n", (896, 936), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((951, 1004), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['eclf', 'X', 'y'], {'cv': '(5)', 'scoring': '"""accuracy"""'}), "(eclf, X, y, cv=5, scoring='accuracy')\n", (966, 1004), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score\n'), ((1241, 1260), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (1255, 1260), True, 'import numpy as np\n'), ((1272, 1329), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (1290, 1329), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1341, 1380), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (1363, 1380), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1392, 1404), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (1402, 1404), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((1475, 1568), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'voting': '"""hard"""', 'fit_base_estimators': '(False)'}), "(clfs=[clf1, clf2, clf3], voting='hard',\n fit_base_estimators=False)\n", (1497, 1568), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((1728, 1747), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (1742, 1747), True, 'import numpy as np\n'), ((1759, 1816), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (1777, 1816), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1858, 1897), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (1880, 1897), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1909, 1921), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (1919, 1921), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((2033, 2225), 'mlxtend.utils.assert_raises', 'assert_raises', (['exceptions.NotFittedError', '"""This RandomForestClassifier instance is not fitted yet. Call \'fit\' with appropriate arguments before using this estimator."""', 'clf2.predict', 'X'], {}), '(exceptions.NotFittedError,\n "This RandomForestClassifier instance is not fitted yet. Call \'fit\' with appropriate arguments before using this estimator."\n , clf2.predict, X)\n', (2046, 2225), False, 'from mlxtend.utils import assert_raises\n'), ((2495, 2514), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (2509, 2514), True, 'import numpy as np\n'), ((2526, 2583), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (2544, 2583), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2595, 2634), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (2617, 2634), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2646, 2658), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (2656, 2658), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((2670, 2732), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'voting': '"""hard"""'}), "(clfs=[clf1, clf2, clf3], voting='hard')\n", (2692, 2732), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((2828, 2847), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (2842, 2847), True, 'import numpy as np\n'), ((2859, 2916), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (2877, 2916), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2928, 2967), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (2950, 2967), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2979, 2991), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (2989, 2991), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((3003, 3065), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'voting': '"""hard"""'}), "(clfs=[clf1, clf2, clf3], voting='hard')\n", (3025, 3065), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((3157, 3172), 'random.seed', 'random.seed', (['(87)'], {}), '(87)\n', (3168, 3172), False, 'import random\n'), ((3236, 3255), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (3250, 3255), True, 'import numpy as np\n'), ((3267, 3324), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (3285, 3324), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3336, 3375), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (3358, 3375), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3387, 3399), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (3397, 3399), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((3411, 3473), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'voting': '"""hard"""'}), "(clfs=[clf1, clf2, clf3], voting='hard')\n", (3433, 3473), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((3766, 3781), 'random.seed', 'random.seed', (['(87)'], {}), '(87)\n', (3777, 3781), False, 'import random\n'), ((3852, 3909), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (3870, 3909), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3919, 3958), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (3941, 3958), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3969, 3981), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (3979, 3981), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((3992, 4014), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (4012, 4014), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((4026, 4090), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[logi, rf, gnb, knn]', 'voting': '"""hard"""'}), "(clfs=[logi, rf, gnb, knn], voting='hard')\n", (4048, 4090), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((4224, 4281), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (4242, 4281), False, 'from sklearn.linear_model import LogisticRegression\n'), ((4291, 4330), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (4313, 4330), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((4341, 4353), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (4351, 4353), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((4364, 4386), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (4384, 4386), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((4398, 4462), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[logi, rf, gnb, knn]', 'voting': '"""hard"""'}), "(clfs=[logi, rf, gnb, knn], voting='hard')\n", (4420, 4462), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((4520, 4607), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'multi_class': '"""multinomial"""', 'solver': '"""newton-cg"""', 'random_state': '(123)'}), "(multi_class='multinomial', solver='newton-cg',\n random_state=123)\n", (4538, 4607), False, 'from sklearn.linear_model import LogisticRegression\n'), ((4649, 4712), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf]', 'voting': '"""soft"""', 'weights': 'None'}), "(clfs=[clf], voting='soft', weights=None)\n", (4671, 4712), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((4729, 4793), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf]', 'voting': '"""soft"""', 'weights': '[1.0]'}), "(clfs=[clf], voting='soft', weights=[1.0])\n", (4751, 4793), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((4928, 4969), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['pred_e1', 'pred_e2'], {}), '(pred_e1, pred_e2)\n', (4951, 4969), True, 'import numpy as np\n'), ((4974, 5015), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['pred_e1', 'pred_e3'], {}), '(pred_e1, pred_e3)\n', (4997, 5015), True, 'import numpy as np\n'), ((5054, 5141), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'multi_class': '"""multinomial"""', 'solver': '"""newton-cg"""', 'random_state': '(123)'}), "(multi_class='multinomial', solver='newton-cg',\n random_state=123)\n", (5072, 5141), False, 'from sklearn.linear_model import LogisticRegression\n'), ((5183, 5246), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf]', 'voting': '"""soft"""', 'weights': 'None'}), "(clfs=[clf], voting='soft', weights=None)\n", (5205, 5246), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((5263, 5327), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf]', 'voting': '"""soft"""', 'weights': '[1.0]'}), "(clfs=[clf], voting='soft', weights=[1.0])\n", (5285, 5327), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((5480, 5539), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['pred_e1', 'pred_e2'], {'decimal': '(8)'}), '(pred_e1, pred_e2, decimal=8)\n', (5510, 5539), True, 'import numpy as np\n'), ((5544, 5603), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['pred_e1', 'pred_e3'], {'decimal': '(8)'}), '(pred_e1, pred_e3, decimal=8)\n', (5574, 5603), True, 'import numpy as np\n'), ((5654, 5673), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (5668, 5673), True, 'import numpy as np\n'), ((5685, 5742), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (5703, 5742), False, 'from sklearn.linear_model import LogisticRegression\n'), ((5754, 5793), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (5776, 5793), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((5805, 5817), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (5815, 5817), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((5829, 5916), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'voting': '"""soft"""', 'weights': '[1, 2, 10]'}), "(clfs=[clf1, clf2, clf3], voting='soft', weights=[1, \n 2, 10])\n", (5851, 5916), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((5994, 6047), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['eclf', 'X', 'y'], {'cv': '(5)', 'scoring': '"""accuracy"""'}), "(eclf, X, y, cv=5, scoring='accuracy')\n", (6009, 6047), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score\n'), ((6300, 6373), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""', 'random_state': '(1)'}), "(solver='liblinear', multi_class='ovr', random_state=1)\n", (6318, 6373), False, 'from sklearn.linear_model import LogisticRegression\n'), ((6445, 6483), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(1)'}), '(random_state=1)\n', (6467, 6483), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((6495, 6507), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (6505, 6507), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((6519, 6581), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'voting': '"""soft"""'}), "(clfs=[clf1, clf2, clf3], voting='soft')\n", (6541, 6581), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((6713, 6777), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'eclf', 'param_grid': 'params', 'cv': '(5)', 'iid': '(False)'}), '(estimator=eclf, param_grid=params, cv=5, iid=False)\n', (6725, 6777), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score\n'), ((6790, 6801), 'mlxtend.data.iris_data', 'iris_data', ([], {}), '()\n', (6799, 6801), False, 'from mlxtend.data import iris_data\n'), ((7046, 7119), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""', 'random_state': '(1)'}), "(solver='liblinear', multi_class='ovr', random_state=1)\n", (7064, 7119), False, 'from sklearn.linear_model import LogisticRegression\n'), ((7191, 7229), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(1)'}), '(random_state=1)\n', (7213, 7229), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((7241, 7288), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf1, clf2]'}), '(clfs=[clf1, clf1, clf2])\n', (7263, 7288), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((7517, 7581), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'eclf', 'param_grid': 'params', 'cv': '(5)', 'iid': '(False)'}), '(estimator=eclf, param_grid=params, cv=5, iid=False)\n', (7529, 7581), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score\n'), ((7594, 7605), 'mlxtend.data.iris_data', 'iris_data', ([], {}), '()\n', (7603, 7605), False, 'from mlxtend.data import iris_data\n'), ((7668, 7703), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(1)'}), '(n_neighbors=1)\n', (7688, 7703), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((7715, 7770), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(1)', 'n_estimators': '(10)'}), '(random_state=1, n_estimators=10)\n', (7737, 7770), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((7782, 7794), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (7792, 7794), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((7806, 7853), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]'}), '(clfs=[clf1, clf2, clf3])\n', (7828, 7853), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((8276, 8311), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(1)'}), '(n_neighbors=1)\n', (8296, 8311), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((8323, 8378), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(1)', 'n_estimators': '(10)'}), '(random_state=1, n_estimators=10)\n', (8345, 8378), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((8390, 8402), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (8400, 8402), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((8414, 8449), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1]'}), '(clfs=[clf1])\n', (8436, 8449), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((8521, 8597), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'eclf', 'param_grid': 'params', 'iid': '(False)', 'cv': '(5)', 'refit': '(True)'}), '(estimator=eclf, param_grid=params, iid=False, cv=5, refit=True)\n', (8533, 8597), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score\n'), ((8805, 8824), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (8819, 8824), True, 'import numpy as np\n'), ((8836, 8893), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (8854, 8893), False, 'from sklearn.linear_model import LogisticRegression\n'), ((8905, 8944), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (8927, 8944), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((8956, 8968), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (8966, 8968), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((8980, 9042), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'voting': '"""hard"""'}), "(clfs=[clf1, clf2, clf3], voting='hard')\n", (9002, 9042), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((9179, 9236), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['eclf', 'X', 'y_str'], {'cv': '(5)', 'scoring': '"""accuracy"""'}), "(eclf, X, y_str, cv=5, scoring='accuracy')\n", (9194, 9236), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score\n'), ((9473, 9492), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (9487, 9492), True, 'import numpy as np\n'), ((9504, 9561), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (9522, 9561), False, 'from sklearn.linear_model import LogisticRegression\n'), ((9573, 9612), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (9595, 9612), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((9624, 9636), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (9634, 9636), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((9648, 9710), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'voting': '"""hard"""'}), "(clfs=[clf1, clf2, clf3], voting='hard')\n", (9670, 9710), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((9845, 9902), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['eclf', 'X', 'y_str'], {'cv': '(5)', 'scoring': '"""accuracy"""'}), "(eclf, X, y_str, cv=5, scoring='accuracy')\n", (9860, 9902), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score\n'), ((10127, 10184), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (10145, 10184), False, 'from sklearn.linear_model import LogisticRegression\n'), ((10196, 10235), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (10218, 10235), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((10247, 10259), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (10257, 10259), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((10271, 10364), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'voting': '"""hard"""', 'fit_base_estimators': '(False)'}), "(clfs=[clf1, clf2, clf3], voting='hard',\n fit_base_estimators=False)\n", (10293, 10364), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((10433, 10444), 'sklearn.base.clone', 'clone', (['eclf'], {}), '(eclf)\n', (10438, 10444), False, 'from sklearn.base import clone\n'), ((3556, 3577), 'numpy.abs', 'np.abs', (['(prob1 - prob2)'], {}), '(prob1 - prob2)\n', (3562, 3577), True, 'import numpy as np\n'), ((3599, 3620), 'numpy.abs', 'np.abs', (['(prob2 - prob3)'], {}), '(prob2 - prob3)\n', (3605, 3620), True, 'import numpy as np\n'), ((4100, 4124), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (4113, 4124), False, 'import pytest\n'), ((1926, 1990), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'use_clones': '(True)'}), '(clfs=[clf1, clf2, clf3], use_clones=True)\n', (1948, 1990), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((2318, 2383), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'use_clones': '(False)'}), '(clfs=[clf1, clf2, clf3], use_clones=False)\n', (2340, 2383), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((3191, 3206), 'random.random', 'random.random', ([], {}), '()\n', (3204, 3206), False, 'import random\n'), ((3800, 3815), 'random.random', 'random.random', ([], {}), '()\n', (3813, 3815), False, 'import random\n')]
from altair.vegalite.v4 import schema from altair.vegalite.v4.schema.channels import Tooltip import pandas as pd import altair as alt import numpy as np from queries import Pomodoro THEME = 'magma' # TO DO: Add docstings where needed def get_current_date(): """ Gets the current date to perform default charts. returns: date: tuple. (year, month, day) """ date = pd.to_datetime('now') year = date.year month = date.month day = date.day return (year, month, day) # POMODORO CHARTS def monthly_chart(year, month, df): """ """ # Filter df_copy = df.copy() filtered = df_copy.loc[f'{year}/{month}'] month_name = filtered.full_date.dt.month_name() month_name = month_name.iloc[0] base = alt.Chart( filtered, title=f'Productivity in {month_name}').mark_circle().encode( x=alt.X('monthdate(full_date):O', title='Days', axis=alt.Axis(labelAngle=-90)), y=alt.Y('hoursminutes(full_date)', title='Daily hours'), ).properties(width=400, height=200) stack = base.mark_bar().encode(y=alt.Y('count()', title='Daily pomodoros'), color=alt.Color('project', title='Project names'), tooltip=[ alt.Tooltip('category', title='Category'), alt.Tooltip('project', title='Project name'), alt.Tooltip('count()', title='Pomodoros'), alt.Tooltip( 'sum(pomodoro_length)', title='Minutes invested this day') ]) scatter = base.encode(color=alt.Color('project', title='Project names'), tooltip=[ alt.Tooltip('category', title='Category'), alt.Tooltip('project', title='Project name'), alt.Tooltip('yearmonthdate(full_date)', title='Date'), alt.Tooltip('pomodoro_calification', title='Satisfaction'), alt.Tooltip('hoursminutes(full_date)', title='Start') ], size=alt.Size('pomodoro_calification', sort='descending', title='Calification')) chart = alt.hconcat(stack, scatter) return chart def hourly_chart(df): """ """ df_copy = df.copy() # Get only the bad pomodoros bad_condition = df_copy.pomodoro_calification == 'Bad' bad_df = df_copy[bad_condition] # Filtered pomodoros without calification condition = df_copy.pomodoro_calification != 0 new_df = df_copy[condition] grouped_chart = alt.Chart(new_df).mark_bar().encode( alt.X('pomodoro_calification:N', title="", axis=None), alt.Y('count():Q', title='Pomodoro count'), alt.Column('hours(full_date):O', title='Good and Bad pomodoros by hour'), alt.Color('pomodoro_calification:N', title='Calification'), tooltip=[alt.Tooltip('hours(full_date)'), alt.Tooltip('count()')]).properties(width=20, height=200) heatmap = alt.Chart( bad_df, title='Bad pomodoros by day and hour').mark_rect().encode( alt.X('hours(full_date)', title='Hours', axis=alt.Axis(labelAngle=-90)), alt.Y('day(full_date):O', title='Day of the week'), alt.Color('count():Q', title='Pomodoro count', scale=alt.Scale(domain=(10, 1), scheme=THEME)), tooltip=[ alt.Tooltip('count()', title='Bad pomodoros'), alt.Tooltip('sum(pomodoro_length)', title='Minutes wasted'), alt.Tooltip('hours(full_date)', title='Hour') ]).properties(width=400, height=200) return grouped_chart & heatmap ## PROJECT CHARTS def create_projects_df(df): """ """ df_copy = df.copy() date_format = '%Y-%m-%d' tmp_projects = df_copy.groupby('project').agg({ 'category': 'first', 'project_start': 'first', 'project_end': 'first', 'project_cancel': 'first', 'pomodoro_date': 'nunique', 'pomodoro_length': 'sum', 'pomodoro_calification': 'count' }) # Rename the columns resulting from the groupby project_columns = { 'project_start': 'start', 'project_end': 'end', 'project_cancel': 'cancel', 'pomodoro_date': 'working_days', 'pomodoro_length': 'minutes', 'pomodoro_calification': 'total_pomodoros' } tmp_projects.rename(columns=project_columns, inplace=True) # Create separete columns for the pomodoro califications tmp_projects_2 = df_copy.groupby( 'project')['pomodoro_calification'].value_counts().unstack().fillna(0) # Merge the two resulting groupby dataframes projects = pd.merge(tmp_projects, tmp_projects_2, left_index=True, right_index=True) # Create the project status column. conditions = [projects.end.notnull(), projects.cancel.notnull()] choices = ['Ended', 'Canceled'] projects['status'] = np.select(conditions, choices, default='On') # Create the days column. It counts the amount of days since its # start until its end/cancel date or current day if still on. today = pd.to_datetime("today", format=date_format) end_mask = (projects.status == "Ended") cancel_mask = (projects.status == 'Canceled') on_mask = (projects.status == 'On') projects['days'] = 0 projects.loc[end_mask, 'days'] = (projects.end - projects.start).dt.days projects.loc[cancel_mask, 'days'] = (projects.cancel - projects.start).dt.days projects.loc[on_mask, 'days'] = (today - projects.start).dt.days # Convert the minutes count into hours projects['hours'] = pd.to_datetime(projects.minutes, unit='m').dt.strftime('%H:%M') # Convert the minutes column to amount of pomodoros projects['pomodoros'] = projects.minutes / 25 projects.reset_index(inplace=True) return projects def projects_hours_days(df): """ """ df_copy = df.copy() single = alt.selection_single() chart = alt.Chart( df_copy, title='Projects').mark_point(filled=True).encode( alt.X('yearmonthdate(start)', title="Project starting date"), alt.Y('days', title='Days since the start'), color=alt.Color( 'status:N', title='Project current status', sort='descending', ), size=alt.Size('hours', title='Total hours invested in the project'), tooltip=[ alt.Tooltip('category', title='Category'), alt.Tooltip('project', title='Project'), alt.Tooltip('start', title='Project starting date'), alt.Tooltip('status', title='Status'), alt.Tooltip('days', title='Days since the start'), alt.Tooltip('working_days', title='Days with at least 1 pomodoro'), alt.Tooltip('hours', title='Total hours invested'), alt.Tooltip('pomodoros', title='Amount of pomodoros made') ]).add_selection(single).properties(width=800).interactive() return chart # Make possible to show various plojects def plot_project(project, df): """ """ df_copy = df.copy() # Filterer the project filtered = df_copy[df_copy.project == project] # Get start and end dates row = filtered.iloc[0] start = row.project_start end = row.project_end cancel = row.project_cancel start = start.date() if end: last = end.date() elif cancel: last = cancel.date() else: today = pd.to_datetime("today") last = today.date() line = alt.Chart(filtered).mark_bar().encode( alt.X( 'yearmonthdate(full_date):O', # scale=alt.Scale( # domain=[start.isoformat(), last.isoformat()]), axis=alt.Axis(labelAngle=-90)), alt.Y('count()')).configure_range(category={'scheme': 'dark2'}) return line def my_theme(): return { 'config': { 'view': { 'continuousHeight': 300, 'continuousWidth': 400 }, # from the default theme 'range': { 'category': { 'scheme': THEME } } } } # Altair theme alt.themes.register('my_theme', my_theme) alt.themes.enable('my_theme') if __name__ == "__main__": pomodoro = Pomodoro() df = pomodoro.create_df(pomodoro.QUERY) project = 'El asesinato de <NAME> - <NAME>' filtered = plot_project(project, df)
[ "altair.selection_single", "queries.Pomodoro", "numpy.select", "altair.Chart", "altair.Axis", "pandas.merge", "altair.Scale", "altair.themes.register", "altair.Y", "altair.X", "altair.themes.enable", "altair.Tooltip", "altair.Column", "altair.hconcat", "altair.Size", "altair.Color", "pandas.to_datetime" ]
[((9347, 9388), 'altair.themes.register', 'alt.themes.register', (['"""my_theme"""', 'my_theme'], {}), "('my_theme', my_theme)\n", (9366, 9388), True, 'import altair as alt\n'), ((9389, 9418), 'altair.themes.enable', 'alt.themes.enable', (['"""my_theme"""'], {}), "('my_theme')\n", (9406, 9418), True, 'import altair as alt\n'), ((406, 427), 'pandas.to_datetime', 'pd.to_datetime', (['"""now"""'], {}), "('now')\n", (420, 427), True, 'import pandas as pd\n'), ((2896, 2923), 'altair.hconcat', 'alt.hconcat', (['stack', 'scatter'], {}), '(stack, scatter)\n', (2907, 2923), True, 'import altair as alt\n'), ((5571, 5644), 'pandas.merge', 'pd.merge', (['tmp_projects', 'tmp_projects_2'], {'left_index': '(True)', 'right_index': '(True)'}), '(tmp_projects, tmp_projects_2, left_index=True, right_index=True)\n', (5579, 5644), True, 'import pandas as pd\n'), ((5888, 5932), 'numpy.select', 'np.select', (['conditions', 'choices'], {'default': '"""On"""'}), "(conditions, choices, default='On')\n", (5897, 5932), True, 'import numpy as np\n'), ((6081, 6124), 'pandas.to_datetime', 'pd.to_datetime', (['"""today"""'], {'format': 'date_format'}), "('today', format=date_format)\n", (6095, 6124), True, 'import pandas as pd\n'), ((6953, 6975), 'altair.selection_single', 'alt.selection_single', ([], {}), '()\n', (6973, 6975), True, 'import altair as alt\n'), ((9463, 9473), 'queries.Pomodoro', 'Pomodoro', ([], {}), '()\n', (9471, 9473), False, 'from queries import Pomodoro\n'), ((1157, 1198), 'altair.Y', 'alt.Y', (['"""count()"""'], {'title': '"""Daily pomodoros"""'}), "('count()', title='Daily pomodoros')\n", (1162, 1198), True, 'import altair as alt\n'), ((1241, 1284), 'altair.Color', 'alt.Color', (['"""project"""'], {'title': '"""Project names"""'}), "('project', title='Project names')\n", (1250, 1284), True, 'import altair as alt\n'), ((2052, 2095), 'altair.Color', 'alt.Color', (['"""project"""'], {'title': '"""Project names"""'}), "('project', title='Project names')\n", (2061, 2095), True, 'import altair as alt\n'), ((2727, 2801), 'altair.Size', 'alt.Size', (['"""pomodoro_calification"""'], {'sort': '"""descending"""', 'title': '"""Calification"""'}), "('pomodoro_calification', sort='descending', title='Calification')\n", (2735, 2801), True, 'import altair as alt\n'), ((8610, 8633), 'pandas.to_datetime', 'pd.to_datetime', (['"""today"""'], {}), "('today')\n", (8624, 8633), True, 'import pandas as pd\n'), ((1421, 1462), 'altair.Tooltip', 'alt.Tooltip', (['"""category"""'], {'title': '"""Category"""'}), "('category', title='Category')\n", (1432, 1462), True, 'import altair as alt\n'), ((1554, 1598), 'altair.Tooltip', 'alt.Tooltip', (['"""project"""'], {'title': '"""Project name"""'}), "('project', title='Project name')\n", (1565, 1598), True, 'import altair as alt\n'), ((1690, 1731), 'altair.Tooltip', 'alt.Tooltip', (['"""count()"""'], {'title': '"""Pomodoros"""'}), "('count()', title='Pomodoros')\n", (1701, 1731), True, 'import altair as alt\n'), ((1823, 1893), 'altair.Tooltip', 'alt.Tooltip', (['"""sum(pomodoro_length)"""'], {'title': '"""Minutes invested this day"""'}), "('sum(pomodoro_length)', title='Minutes invested this day')\n", (1834, 1893), True, 'import altair as alt\n'), ((2163, 2204), 'altair.Tooltip', 'alt.Tooltip', (['"""category"""'], {'title': '"""Category"""'}), "('category', title='Category')\n", (2174, 2204), True, 'import altair as alt\n'), ((2236, 2280), 'altair.Tooltip', 'alt.Tooltip', (['"""project"""'], {'title': '"""Project name"""'}), "('project', title='Project name')\n", (2247, 2280), True, 'import altair as alt\n'), ((2312, 2365), 'altair.Tooltip', 'alt.Tooltip', (['"""yearmonthdate(full_date)"""'], {'title': '"""Date"""'}), "('yearmonthdate(full_date)', title='Date')\n", (2323, 2365), True, 'import altair as alt\n'), ((2439, 2497), 'altair.Tooltip', 'alt.Tooltip', (['"""pomodoro_calification"""'], {'title': '"""Satisfaction"""'}), "('pomodoro_calification', title='Satisfaction')\n", (2450, 2497), True, 'import altair as alt\n'), ((2571, 2624), 'altair.Tooltip', 'alt.Tooltip', (['"""hoursminutes(full_date)"""'], {'title': '"""Start"""'}), "('hoursminutes(full_date)', title='Start')\n", (2582, 2624), True, 'import altair as alt\n'), ((3330, 3383), 'altair.X', 'alt.X', (['"""pomodoro_calification:N"""'], {'title': '""""""', 'axis': 'None'}), "('pomodoro_calification:N', title='', axis=None)\n", (3335, 3383), True, 'import altair as alt\n'), ((3393, 3435), 'altair.Y', 'alt.Y', (['"""count():Q"""'], {'title': '"""Pomodoro count"""'}), "('count():Q', title='Pomodoro count')\n", (3398, 3435), True, 'import altair as alt\n'), ((3445, 3517), 'altair.Column', 'alt.Column', (['"""hours(full_date):O"""'], {'title': '"""Good and Bad pomodoros by hour"""'}), "('hours(full_date):O', title='Good and Bad pomodoros by hour')\n", (3455, 3517), True, 'import altair as alt\n'), ((3546, 3604), 'altair.Color', 'alt.Color', (['"""pomodoro_calification:N"""'], {'title': '"""Calification"""'}), "('pomodoro_calification:N', title='Calification')\n", (3555, 3604), True, 'import altair as alt\n'), ((3965, 4015), 'altair.Y', 'alt.Y', (['"""day(full_date):O"""'], {'title': '"""Day of the week"""'}), "('day(full_date):O', title='Day of the week')\n", (3970, 4015), True, 'import altair as alt\n'), ((6598, 6640), 'pandas.to_datetime', 'pd.to_datetime', (['projects.minutes'], {'unit': '"""m"""'}), "(projects.minutes, unit='m')\n", (6612, 6640), True, 'import pandas as pd\n'), ((8918, 8934), 'altair.Y', 'alt.Y', (['"""count()"""'], {}), "('count()')\n", (8923, 8934), True, 'import altair as alt\n'), ((1020, 1073), 'altair.Y', 'alt.Y', (['"""hoursminutes(full_date)"""'], {'title': '"""Daily hours"""'}), "('hoursminutes(full_date)', title='Daily hours')\n", (1025, 1073), True, 'import altair as alt\n'), ((3623, 3654), 'altair.Tooltip', 'alt.Tooltip', (['"""hours(full_date)"""'], {}), "('hours(full_date)')\n", (3634, 3654), True, 'import altair as alt\n'), ((3673, 3695), 'altair.Tooltip', 'alt.Tooltip', (['"""count()"""'], {}), "('count()')\n", (3684, 3695), True, 'import altair as alt\n'), ((3926, 3950), 'altair.Axis', 'alt.Axis', ([], {'labelAngle': '(-90)'}), '(labelAngle=-90)\n', (3934, 3950), True, 'import altair as alt\n'), ((4126, 4165), 'altair.Scale', 'alt.Scale', ([], {'domain': '(10, 1)', 'scheme': 'THEME'}), '(domain=(10, 1), scheme=THEME)\n', (4135, 4165), True, 'import altair as alt\n'), ((4206, 4251), 'altair.Tooltip', 'alt.Tooltip', (['"""count()"""'], {'title': '"""Bad pomodoros"""'}), "('count()', title='Bad pomodoros')\n", (4217, 4251), True, 'import altair as alt\n'), ((4269, 4328), 'altair.Tooltip', 'alt.Tooltip', (['"""sum(pomodoro_length)"""'], {'title': '"""Minutes wasted"""'}), "('sum(pomodoro_length)', title='Minutes wasted')\n", (4280, 4328), True, 'import altair as alt\n'), ((4346, 4391), 'altair.Tooltip', 'alt.Tooltip', (['"""hours(full_date)"""'], {'title': '"""Hour"""'}), "('hours(full_date)', title='Hour')\n", (4357, 4391), True, 'import altair as alt\n'), ((8883, 8907), 'altair.Axis', 'alt.Axis', ([], {'labelAngle': '(-90)'}), '(labelAngle=-90)\n', (8891, 8907), True, 'import altair as alt\n'), ((784, 842), 'altair.Chart', 'alt.Chart', (['filtered'], {'title': 'f"""Productivity in {month_name}"""'}), "(filtered, title=f'Productivity in {month_name}')\n", (793, 842), True, 'import altair as alt\n'), ((979, 1003), 'altair.Axis', 'alt.Axis', ([], {'labelAngle': '(-90)'}), '(labelAngle=-90)\n', (987, 1003), True, 'import altair as alt\n'), ((3285, 3302), 'altair.Chart', 'alt.Chart', (['new_df'], {}), '(new_df)\n', (3294, 3302), True, 'import altair as alt\n'), ((3746, 3802), 'altair.Chart', 'alt.Chart', (['bad_df'], {'title': '"""Bad pomodoros by day and hour"""'}), "(bad_df, title='Bad pomodoros by day and hour')\n", (3755, 3802), True, 'import altair as alt\n'), ((8674, 8693), 'altair.Chart', 'alt.Chart', (['filtered'], {}), '(filtered)\n', (8683, 8693), True, 'import altair as alt\n'), ((7079, 7139), 'altair.X', 'alt.X', (['"""yearmonthdate(start)"""'], {'title': '"""Project starting date"""'}), "('yearmonthdate(start)', title='Project starting date')\n", (7084, 7139), True, 'import altair as alt\n'), ((7153, 7196), 'altair.Y', 'alt.Y', (['"""days"""'], {'title': '"""Days since the start"""'}), "('days', title='Days since the start')\n", (7158, 7196), True, 'import altair as alt\n'), ((7216, 7288), 'altair.Color', 'alt.Color', (['"""status:N"""'], {'title': '"""Project current status"""', 'sort': '"""descending"""'}), "('status:N', title='Project current status', sort='descending')\n", (7225, 7288), True, 'import altair as alt\n'), ((7370, 7432), 'altair.Size', 'alt.Size', (['"""hours"""'], {'title': '"""Total hours invested in the project"""'}), "('hours', title='Total hours invested in the project')\n", (7378, 7432), True, 'import altair as alt\n'), ((7498, 7539), 'altair.Tooltip', 'alt.Tooltip', (['"""category"""'], {'title': '"""Category"""'}), "('category', title='Category')\n", (7509, 7539), True, 'import altair as alt\n'), ((7557, 7596), 'altair.Tooltip', 'alt.Tooltip', (['"""project"""'], {'title': '"""Project"""'}), "('project', title='Project')\n", (7568, 7596), True, 'import altair as alt\n'), ((7614, 7665), 'altair.Tooltip', 'alt.Tooltip', (['"""start"""'], {'title': '"""Project starting date"""'}), "('start', title='Project starting date')\n", (7625, 7665), True, 'import altair as alt\n'), ((7683, 7720), 'altair.Tooltip', 'alt.Tooltip', (['"""status"""'], {'title': '"""Status"""'}), "('status', title='Status')\n", (7694, 7720), True, 'import altair as alt\n'), ((7738, 7787), 'altair.Tooltip', 'alt.Tooltip', (['"""days"""'], {'title': '"""Days since the start"""'}), "('days', title='Days since the start')\n", (7749, 7787), True, 'import altair as alt\n'), ((7805, 7871), 'altair.Tooltip', 'alt.Tooltip', (['"""working_days"""'], {'title': '"""Days with at least 1 pomodoro"""'}), "('working_days', title='Days with at least 1 pomodoro')\n", (7816, 7871), True, 'import altair as alt\n'), ((7917, 7967), 'altair.Tooltip', 'alt.Tooltip', (['"""hours"""'], {'title': '"""Total hours invested"""'}), "('hours', title='Total hours invested')\n", (7928, 7967), True, 'import altair as alt\n'), ((7985, 8043), 'altair.Tooltip', 'alt.Tooltip', (['"""pomodoros"""'], {'title': '"""Amount of pomodoros made"""'}), "('pomodoros', title='Amount of pomodoros made')\n", (7996, 8043), True, 'import altair as alt\n'), ((6989, 7025), 'altair.Chart', 'alt.Chart', (['df_copy'], {'title': '"""Projects"""'}), "(df_copy, title='Projects')\n", (6998, 7025), True, 'import altair as alt\n')]
import numpy as np import os import sklearn.metrics from scipy.optimize import curve_fit def slice_lat(ds): return ds.sel(lat=slice(-25, 25)) def ensure_dir(file_path): """Check if a directory exists and create it if needed""" if not os.path.exists(file_path): os.makedirs(file_path) def days_per_month(month, year): """Return the number of days in any month and year""" days = [30, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] d = days[int(month)-1] if d==28 and int(year)%4==0: if int(year)%100==0 and int(year)%400!=0: pass else: d = 29 return d def precip_to_mm(ds): """Convert precip to mm""" if ds.pr.attrs['units']=='kg m-2 s-1': ds['pr'] = ds.pr * 24*60**2 ds.pr.attrs['units']='mm day-1' elif ds.pr.attrs['units']=='mm day-1': pass else: raise ValueError('Unrecognised units') return ds def gaus(x,a,x0,sigma): """Simple normal distribution function""" return a*np.exp(-(x-x0)**2/(2*sigma**2)) def fit_gaussian(y, x): """Fit a normal gaussian distribution curve to the data. Returns [amplitide, mean, width, r^2 statistic] 4x4 covariance matrix for above values """ popt_f, pcov_f = np.full(4, np.nan, dtype=np.float64), np.full((4,4), np.nan, dtype=np.float64) bounds = (np.array([0, -30, 0]), np.array([25, 20, 25])) try: popt, pcov = curve_fit(gaus,x,y,p0=[8,-5,10], maxfev=8000, bounds=bounds) a,x0,sigma = popt y_pred = gaus(x,a,x0,sigma) r = sklearn.metrics.r2_score(y, y_pred) popt_f[:3] = popt popt_f[3] = r pcov_f[:3, :3] = pcov except RuntimeError: pass return popt_f, pcov_f
[ "scipy.optimize.curve_fit", "os.path.exists", "os.makedirs", "numpy.exp", "numpy.array", "numpy.full" ]
[((250, 275), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (264, 275), False, 'import os\n'), ((285, 307), 'os.makedirs', 'os.makedirs', (['file_path'], {}), '(file_path)\n', (296, 307), False, 'import os\n'), ((1020, 1061), 'numpy.exp', 'np.exp', (['(-(x - x0) ** 2 / (2 * sigma ** 2))'], {}), '(-(x - x0) ** 2 / (2 * sigma ** 2))\n', (1026, 1061), True, 'import numpy as np\n'), ((1282, 1318), 'numpy.full', 'np.full', (['(4)', 'np.nan'], {'dtype': 'np.float64'}), '(4, np.nan, dtype=np.float64)\n', (1289, 1318), True, 'import numpy as np\n'), ((1320, 1361), 'numpy.full', 'np.full', (['(4, 4)', 'np.nan'], {'dtype': 'np.float64'}), '((4, 4), np.nan, dtype=np.float64)\n', (1327, 1361), True, 'import numpy as np\n'), ((1375, 1396), 'numpy.array', 'np.array', (['[0, -30, 0]'], {}), '([0, -30, 0])\n', (1383, 1396), True, 'import numpy as np\n'), ((1398, 1420), 'numpy.array', 'np.array', (['[25, 20, 25]'], {}), '([25, 20, 25])\n', (1406, 1420), True, 'import numpy as np\n'), ((1452, 1517), 'scipy.optimize.curve_fit', 'curve_fit', (['gaus', 'x', 'y'], {'p0': '[8, -5, 10]', 'maxfev': '(8000)', 'bounds': 'bounds'}), '(gaus, x, y, p0=[8, -5, 10], maxfev=8000, bounds=bounds)\n', (1461, 1517), False, 'from scipy.optimize import curve_fit\n')]
""" Library Features: Name: lib_dryes_downloader_geo Author(s): <NAME> (<EMAIL>), <NAME> (<EMAIL>) Date: '20210929' Version: '1.0.0' """ ################################################################################# # Library import os import logging from osgeo import gdal, gdalconst import numpy as np import rasterio import matplotlib.pylab as plt from lib_dryes_downloader_hsaf_generic import create_darray_2d ################################################################################# logging.getLogger("rasterio").setLevel(logging.WARNING) # ------------------------------------------------------------------------------------- # Method to read tiff file def reproject_file_tiff(file_name_in, file_name_out, file_wide_out, file_high_out, file_geotrans_out, file_proj_out): dset_tiff_out = gdal.GetDriverByName('GTiff').Create( file_name_out, file_wide_out, file_high_out, 1, gdalconst.GDT_Float32) dset_tiff_out.SetGeoTransform(file_geotrans_out) dset_tiff_out.SetProjection(file_proj_out) dset_tiff_in = gdal.Open(file_name_in, gdalconst.GA_ReadOnly) dset_proj_in = dset_tiff_in.GetProjection() dset_geotrans_in = dset_tiff_in.GetGeoTransform() dset_data_in = dset_tiff_in.ReadAsArray() dset_band_in = dset_tiff_in.GetRasterBand(1) # Reproject from input file to output file set with out information gdal.ReprojectImage(dset_tiff_in, dset_tiff_out, dset_proj_in, file_proj_out, gdalconst.GRA_NearestNeighbour) return dset_tiff_out # ------------------------------------------------------------------------------------- # ------------------------------------------------------------------------------------- # Method to get a raster file def read_file_raster(file_name, file_proj='epsg:4326', var_name='land', coord_name_x='Longitude', coord_name_y='Latitude', dim_name_x='Longitude', dim_name_y='Latitude', no_data_default=-9999.0): if os.path.exists(file_name): if (file_name.endswith('.txt') or file_name.endswith('.asc')) or file_name.endswith('.tif'): crs = rasterio.crs.CRS({"init": file_proj}) with rasterio.open(file_name, mode='r+') as dset: dset.crs = crs bounds = dset.bounds no_data = dset.nodata res = dset.res transform = dset.transform data = dset.read() proj = dset.crs.wkt values = data[0, :, :] if (no_data is None) or (np.isnan(no_data)): no_data = no_data_default decimal_round = 7 center_right = bounds.right - (res[0] / 2) center_left = bounds.left + (res[0] / 2) center_top = bounds.top - (res[1] / 2) center_bottom = bounds.bottom + (res[1] / 2) lon = np.arange(center_left, center_right + np.abs(res[0] / 2), np.abs(res[0]), float) lat = np.flip(np.arange(center_bottom, center_top + np.abs(res[0] / 2), np.abs(res[1]), float), axis=0) lons, lats = np.meshgrid(lon, lat) if center_bottom > center_top: center_bottom_tmp = center_top center_top_tmp = center_bottom center_bottom = center_bottom_tmp center_top = center_top_tmp values = np.flipud(values) lats = np.flipud(lats) # # Debug # plt.figure() # plt.imshow(lats) # plt.colorbar() # # # Debug # plt.figure() # plt.imshow(values) # plt.colorbar() # plt.show() min_lon_round = round(np.min(lons), decimal_round) max_lon_round = round(np.max(lons), decimal_round) min_lat_round = round(np.min(lats), decimal_round) max_lat_round = round(np.max(lats), decimal_round) center_right_round = round(center_right, decimal_round) center_left_round = round(center_left, decimal_round) center_bottom_round = round(center_bottom, decimal_round) center_top_round = round(center_top, decimal_round) assert min_lon_round == center_left_round assert max_lon_round == center_right_round assert min_lat_round == center_bottom_round assert max_lat_round == center_top_round dims = values.shape high = dims[0] # nrows wide = dims[1] # cols bounding_box = [min_lon_round, max_lat_round, max_lon_round, min_lat_round] da = create_darray_2d(values, lons, lats, coord_name_x=coord_name_x, coord_name_y=coord_name_y, dim_name_x=dim_name_x, dim_name_y=dim_name_y, name=var_name) else: logging.error(' ===> Geographical file ' + file_name + ' format unknown') raise NotImplementedError('File type reader not implemented yet') else: logging.error(' ===> Geographical file ' + file_name + ' not found') raise IOError('Geographical file location or name is wrong') return da, wide, high, proj, transform, bounding_box, no_data, dim_name_x, dim_name_y # # -------------------------------------------------------------------------------------
[ "osgeo.gdal.Open", "os.path.exists", "logging.getLogger", "rasterio.crs.CRS", "osgeo.gdal.GetDriverByName", "lib_dryes_downloader_hsaf_generic.create_darray_2d", "osgeo.gdal.ReprojectImage", "numpy.abs", "numpy.flipud", "rasterio.open", "numpy.max", "numpy.isnan", "numpy.min", "numpy.meshgrid", "logging.error" ]
[((1101, 1147), 'osgeo.gdal.Open', 'gdal.Open', (['file_name_in', 'gdalconst.GA_ReadOnly'], {}), '(file_name_in, gdalconst.GA_ReadOnly)\n', (1110, 1147), False, 'from osgeo import gdal, gdalconst\n'), ((1422, 1535), 'osgeo.gdal.ReprojectImage', 'gdal.ReprojectImage', (['dset_tiff_in', 'dset_tiff_out', 'dset_proj_in', 'file_proj_out', 'gdalconst.GRA_NearestNeighbour'], {}), '(dset_tiff_in, dset_tiff_out, dset_proj_in,\n file_proj_out, gdalconst.GRA_NearestNeighbour)\n', (1441, 1535), False, 'from osgeo import gdal, gdalconst\n'), ((2034, 2059), 'os.path.exists', 'os.path.exists', (['file_name'], {}), '(file_name)\n', (2048, 2059), False, 'import os\n'), ((530, 559), 'logging.getLogger', 'logging.getLogger', (['"""rasterio"""'], {}), "('rasterio')\n", (547, 559), False, 'import logging\n'), ((5089, 5157), 'logging.error', 'logging.error', (["(' ===> Geographical file ' + file_name + ' not found')"], {}), "(' ===> Geographical file ' + file_name + ' not found')\n", (5102, 5157), False, 'import logging\n'), ((864, 893), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (884, 893), False, 'from osgeo import gdal, gdalconst\n'), ((2181, 2218), 'rasterio.crs.CRS', 'rasterio.crs.CRS', (["{'init': file_proj}"], {}), "({'init': file_proj})\n", (2197, 2218), False, 'import rasterio\n'), ((3160, 3181), 'numpy.meshgrid', 'np.meshgrid', (['lon', 'lat'], {}), '(lon, lat)\n', (3171, 3181), True, 'import numpy as np\n'), ((4706, 4865), 'lib_dryes_downloader_hsaf_generic.create_darray_2d', 'create_darray_2d', (['values', 'lons', 'lats'], {'coord_name_x': 'coord_name_x', 'coord_name_y': 'coord_name_y', 'dim_name_x': 'dim_name_x', 'dim_name_y': 'dim_name_y', 'name': 'var_name'}), '(values, lons, lats, coord_name_x=coord_name_x,\n coord_name_y=coord_name_y, dim_name_x=dim_name_x, dim_name_y=dim_name_y,\n name=var_name)\n', (4722, 4865), False, 'from lib_dryes_downloader_hsaf_generic import create_darray_2d\n'), ((4919, 4992), 'logging.error', 'logging.error', (["(' ===> Geographical file ' + file_name + ' format unknown')"], {}), "(' ===> Geographical file ' + file_name + ' format unknown')\n", (4932, 4992), False, 'import logging\n'), ((2236, 2271), 'rasterio.open', 'rasterio.open', (['file_name'], {'mode': '"""r+"""'}), "(file_name, mode='r+')\n", (2249, 2271), False, 'import rasterio\n'), ((2609, 2626), 'numpy.isnan', 'np.isnan', (['no_data'], {}), '(no_data)\n', (2617, 2626), True, 'import numpy as np\n'), ((2996, 3010), 'numpy.abs', 'np.abs', (['res[0]'], {}), '(res[0])\n', (3002, 3010), True, 'import numpy as np\n'), ((3439, 3456), 'numpy.flipud', 'np.flipud', (['values'], {}), '(values)\n', (3448, 3456), True, 'import numpy as np\n'), ((3480, 3495), 'numpy.flipud', 'np.flipud', (['lats'], {}), '(lats)\n', (3489, 3495), True, 'import numpy as np\n'), ((3791, 3803), 'numpy.min', 'np.min', (['lons'], {}), '(lons)\n', (3797, 3803), True, 'import numpy as np\n'), ((3854, 3866), 'numpy.max', 'np.max', (['lons'], {}), '(lons)\n', (3860, 3866), True, 'import numpy as np\n'), ((3917, 3929), 'numpy.min', 'np.min', (['lats'], {}), '(lats)\n', (3923, 3929), True, 'import numpy as np\n'), ((3980, 3992), 'numpy.max', 'np.max', (['lats'], {}), '(lats)\n', (3986, 3992), True, 'import numpy as np\n'), ((2976, 2994), 'numpy.abs', 'np.abs', (['(res[0] / 2)'], {}), '(res[0] / 2)\n', (2982, 2994), True, 'import numpy as np\n'), ((3103, 3117), 'numpy.abs', 'np.abs', (['res[1]'], {}), '(res[1])\n', (3109, 3117), True, 'import numpy as np\n'), ((3083, 3101), 'numpy.abs', 'np.abs', (['(res[0] / 2)'], {}), '(res[0] / 2)\n', (3089, 3101), True, 'import numpy as np\n')]
import numpy as np import cv2 def ransac_align_points( pA, pB, threshold, diagonal_constraint=0.75, default=np.eye(4)[:3], ): """ """ # sensible requirement of 51 or more spots to compute ransac affine if len(pA) <= 50 or len(pB) <= 50: if default is not None: print("Insufficient spot matches for ransac, returning default identity") return default else: raise ValueError("Insufficient spot matches for ransac, need more than 50") # compute the affine r, Aff, inline = cv2.estimateAffine3D(pA, pB, ransacThreshold=threshold, confidence=0.999) # rarely ransac just doesn't work (depends on data and parameters) # sensible choices for hard constraints on the affine matrix if np.any( np.diag(Aff) < diagonal_constraint ): if default is not None: print("Degenerate affine produced, returning default identity") return default else: raise ValueError("Degenerate affine produced, ransac failed") return Aff
[ "numpy.eye", "cv2.estimateAffine3D", "numpy.diag" ]
[((554, 627), 'cv2.estimateAffine3D', 'cv2.estimateAffine3D', (['pA', 'pB'], {'ransacThreshold': 'threshold', 'confidence': '(0.999)'}), '(pA, pB, ransacThreshold=threshold, confidence=0.999)\n', (574, 627), False, 'import cv2\n'), ((114, 123), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (120, 123), True, 'import numpy as np\n'), ((780, 792), 'numpy.diag', 'np.diag', (['Aff'], {}), '(Aff)\n', (787, 792), True, 'import numpy as np\n')]
# Credit - https://github.com/balajisrinivas/Detect-Face-and-Blur-OpenCV # For blurface from asyncio import TimeoutError, sleep from calendar import timegm from datetime import datetime, timedelta from json import JSONDecodeError, dumps from platform import system from random import choice from statistics import mean, median, mode, pstdev, stdev from string import punctuation from subprocess import PIPE, Popen, STDOUT from time import gmtime, mktime, time from urllib import parse from asyncpraw import Reddit from cv2 import GaussianBlur, dnn, imread, imwrite from deep_translator import GoogleTranslator, constants from discord import Embed, File, channel from discord.ext import commands, tasks from lyricsgenius import Genius from mendeleev import element from numpy import array, max, min, sqrt, squeeze, sum from plotly import graph_objects as go from PyPDF2 import PdfFileReader from qrcode import QRCode import config from src.utils import funcs from src.utils.base_cog import BaseCog from src.utils.page_buttons import PageButtons HCF_LIMIT = 1000000 class Utility(BaseCog, name="Utility", description="Some useful commands for getting data or calculating things."): def __init__(self, botInstance, *args, **kwargs): super().__init__(botInstance, *args, **kwargs) self.reminderIDsToDelete = set() self.remindersToAdd = [] self.client.loop.create_task(self.__generateFiles()) async def __generateFiles(self): await funcs.generateJson("reminders", {"list": []}) self.reminderLoop.start() def blurFace(self, filename: str): imgName = f"{time()}.png" prototxtPath = funcs.PATH + funcs.getResource(self.name, "deploy.prototxt") modelPath = funcs.PATH + funcs.getResource(self.name, "model.caffemodel") model = dnn.readNetFromCaffe(prototxtPath, modelPath) image = imread(filename) h, w = image.shape[:2] kernelW = (w // 7) | 1 kernelH = (h // 7) | 1 blob = dnn.blobFromImage(image, 1.0, (300, 300), (104.0, 177.0, 123.0)) model.setInput(blob) output = squeeze(model.forward()) for i in range(0, output.shape[0]): confidence = output[i, 2] if confidence > 0.4: box = output[i, 3:7] * array([w, h, w, h]) startX, startY, endX, endY = box.astype(int) face = image[startY:endY, startX:endX] face = GaussianBlur(face, (kernelW, kernelH), 0) image[startY:endY, startX:endX] = face imwrite(f"{funcs.PATH}/temp/{imgName}", image) return imgName @tasks.loop(seconds=2.0) async def reminderLoop(self): update = False reminders = await funcs.readJson("data/reminders.json") for reminder in reminders["list"]: rtime = reminder["data"]["time"] if rtime <= int(time()) and await funcs.userIDNotBlacklisted(reminder["data"]["userID"]): try: user = self.client.get_user(reminder["data"]["userID"]) e = Embed(title="⚠️ Reminder", description=reminder["data"]["reminder"]) e.set_footer(text=f"Remind time: {str(datetime.utcfromtimestamp(rtime)).split('.')[0]} UTC") await user.send(embed=e) except: pass self.reminderIDsToDelete.add(reminder["ID"]) if reminder["ID"] in self.reminderIDsToDelete: self.reminderIDsToDelete.remove(reminder["ID"]) reminders["list"].remove(reminder) update = True for reminder in self.remindersToAdd: reminders["list"].append(reminder) self.remindersToAdd.remove(reminder) update = True if update: await funcs.dumpJson("data/reminders.json", reminders) @commands.cooldown(1, 5, commands.BucketType.user) @commands.command(name="reminderdel", description="Removes a reminder.", usage="<reminder ID>", aliases=["reminderdelete", "reminderemove", "removereminder", "deletereminder", "delreminder", "delremind"]) async def reminderdel(self, ctx, reminderID=None): if not reminderID: return await ctx.send( embed=funcs.errorEmbed( None, f"You must specify a reminder ID! See `{self.client.command_prefix}reminders` for a list of your reminders." ) ) reminders = await funcs.readJson("data/reminders.json") toremove = None for reminder in reminders["list"]: if reminder["ID"] == reminderID.casefold() and reminder["data"]["userID"] == ctx.author.id: toremove = reminder["ID"] break if toremove: self.reminderIDsToDelete.add(toremove) await ctx.reply(f"Removed reminder with ID: `{toremove}`") else: await ctx.reply( embed=funcs.errorEmbed( None, f"Unknown reminder ID. See `{self.client.command_prefix}reminders` for a list of your reminders." ) ) @commands.cooldown(1, 5, commands.BucketType.user) @commands.command(name="reminder", description="Creates a reminder or shows a list of your reminders.", aliases=["remind", "remindme", "reminders"], usage="[Xm/h/d (replace X with number of minutes/hours/days)] <message>") async def reminder(self, ctx, minutes=None, *, r=None): if minutes and not r: return await ctx.send(embed=funcs.errorEmbed(None, "Please leave a message!")) reminders = await funcs.readJson("data/reminders.json") now = int(time()) if not minutes and not r: yourreminders = [] for reminder in reminders["list"]: rtime = reminder["data"]["time"] if reminder["data"]["userID"] == ctx.author.id and rtime > now: e = Embed(title="Your Reminders", description=reminder["data"]["reminder"]) e.add_field(name="ID", value=f"`{reminder['ID']}`") e.add_field(name="Remind Date (UTC)", value=f'`{str(datetime.utcfromtimestamp(rtime)).split(".")[0]}`') e.add_field(name="Will Remind In", value=f'`{funcs.timeDifferenceStr(rtime, now)}`') yourreminders.append(e) if not yourreminders: yourreminders.append(Embed(title="Your Reminders", description="None")) else: for i, e in enumerate(yourreminders): e.set_footer(text="Page {:,} of {:,}".format(i + 1, len(yourreminders))) m = await ctx.reply(embed=yourreminders[0]) if len(yourreminders) > 1: await m.edit(view=PageButtons(ctx, self.client, m, yourreminders)) else: try: minutes = float(minutes) except: try: if minutes.casefold().endswith("h"): minutes = float(minutes[:-1]) * 60 elif minutes.casefold().endswith("d"): minutes = float(minutes[:-1]) * 1440 elif minutes.casefold().endswith("m"): minutes = float(minutes[:-1]) else: raise Exception except: return await ctx.reply(embed=funcs.errorEmbed(None, f"Invalid input: `{minutes}`")) if minutes > 100000000 or len(r) > 500: return await ctx.reply(embed=funcs.errorEmbed(None, "That value is too big or your input is too long.")) reminder = { "ID": funcs.randomHex(16), "data": { "userID": ctx.author.id, "time": int(minutes * 60 + now), "reminder": r } } self.remindersToAdd.append(reminder) await ctx.reply("Added reminder: {}\n\nID: `{}`\n\nI will remind you in {} ({}). Be sure to have DMs on!".format( reminder["data"]["reminder"], reminder["ID"], funcs.timeDifferenceStr(reminder["data"]["time"], now), str(datetime.utcfromtimestamp(reminder["data"]["time"])).split(".")[0] )) async def gatherLabelsAndValues(self, ctx): labels, values = [], [] while len(labels) < 25: try: await ctx.send( f"Enter name for label **{len(labels) + 1}**, `!undo` to delete previous entry," + " `!done` to move on to values, or `!cancel` to cancel." ) entry = await self.client.wait_for( "message", check=lambda m: m.author == ctx.author and m.channel == ctx.channel and len(m.content) <= 100, timeout=60 ) except TimeoutError: break content = entry.content if content.casefold() == "!undo": try: labels.pop(-1) except: await ctx.send(embed=funcs.errorEmbed(None, "No entries.")) elif content.casefold() == "!done": break elif content.casefold() == "!cancel": return 0, 0 else: labels.append(content) if len(labels) < 2: raise Exception("Not enough labels.") while len(values) != len(labels): try: await ctx.send( f'Enter value (NOT percentage) for label **{labels[len(values)]}**, ' + '`!undo` to delete previous entry, or `!cancel` to cancel.' ) entry = await self.client.wait_for( "message", check=lambda m: m.author == ctx.author and m.channel == ctx.channel and len(m.content) <= 100, timeout=60 ) except TimeoutError: raise Exception("Not enough values.") content = entry.content if content.casefold() == "!undo": try: values.pop(-1) except: await ctx.send(embed=funcs.errorEmbed(None, "No entries.")) elif content.casefold() == "!cancel": return 0, 0 else: try: values.append(float(content)) except: await ctx.send(embed=funcs.errorEmbed(None, "Invalid value.")) return labels, values async def gatherXtitleAndYtitle(self, ctx): xtitle, ytitle = "", "" try: await ctx.send('Enter your desired x-axis title, or `!na` if you wish to leave it blank.') entry = await self.client.wait_for( "message", check=lambda m: m.author == ctx.author and m.channel == ctx.channel and len(m.content) <= 100, timeout=60 ) if entry.content.casefold() != "!na": xtitle = entry.content except TimeoutError: pass try: await ctx.send('Enter your desired y-axis title, or `!na` if you wish to leave it blank.') entry = await self.client.wait_for( "message", check=lambda m: m.author == ctx.author and m.channel == ctx.channel and len(m.content) <= 100, timeout=60 ) if entry.content.casefold() != "!na": ytitle = entry.content except TimeoutError: pass return xtitle, ytitle @staticmethod async def makeChartEmbed(ctx, fig, labels, values, imgName, title): e = Embed(title=title, description=f"Requested by: {ctx.author.mention}") for i, c in enumerate(labels): e.add_field(name=c, value=f"`{funcs.removeDotZero(values[i])}`") await funcs.funcToCoro(fig.write_image, f"{funcs.PATH}/temp/{imgName}") image = File(f"{funcs.PATH}/temp/{imgName}") e.set_image(url=f"attachment://{imgName}") return e, image @commands.cooldown(1, 5, commands.BucketType.user) @commands.command(name="piechart", description="Generates a pie chart.", aliases=["pie", "piegraph"], usage="[title]") async def piechart(self, ctx, *, title: str=""): if len(title) > 100: return await ctx.reply(embed=funcs.errorEmbed(None, "Title must be 100 characters or less.")) imgName = f"{time()}.png" image = None try: labels, values = await self.gatherLabelsAndValues(ctx) if labels == 0 and values == 0: return await ctx.send("Cancelled chart generation.") except Exception as ex: return await ctx.send(embed=funcs.errorEmbed(None, str(ex))) try: fig = go.Figure(data=[go.Pie(labels=labels, values=values)]) fig.update_layout(title=title) e, image = await self.makeChartEmbed(ctx, fig, labels, values, imgName, title if title else "Pie Chart") except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, "An error occurred, please try again later.") await ctx.reply(embed=e, file=image) await funcs.deleteTempFile(imgName) @commands.cooldown(1, 5, commands.BucketType.user) @commands.command(name="linechart", description="Generates a line chart.", aliases=["line", "linegraph"], usage="[title]") async def linechart(self, ctx, *, title: str=""): if len(title) > 100: return await ctx.reply(embed=funcs.errorEmbed(None, "Title must be 100 characters or less.")) imgName = f"{time()}.png" image = None try: labels, values = await self.gatherLabelsAndValues(ctx) if labels == 0 and values == 0: return await ctx.send("Cancelled chart generation.") except Exception as ex: return await ctx.send(embed=funcs.errorEmbed(None, str(ex))) try: fig = go.Figure(data=[go.Scatter(x=labels, y=values)]) xtitle, ytitle = await self.gatherXtitleAndYtitle(ctx) fig.update_layout(title=title, xaxis_title=xtitle, yaxis_title=ytitle) e, image = await self.makeChartEmbed(ctx, fig, labels, values, imgName, title if title else "Line Chart") except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, "An error occurred, please try again later.") await ctx.reply(embed=e, file=image) await funcs.deleteTempFile(imgName) @commands.cooldown(1, 5, commands.BucketType.user) @commands.command(name="barchart", description="Generates a bar chart.", aliases=["bar", "bargraph"], usage="[title]") async def barchart(self, ctx, *, title: str=""): if len(title) > 100: return await ctx.reply(embed=funcs.errorEmbed(None, "Title must be 100 characters or less.")) imgName = f"{time()}.png" image = None try: labels, values = await self.gatherLabelsAndValues(ctx) if labels == 0 and values == 0: return await ctx.send("Cancelled chart generation.") except Exception as ex: return await ctx.send(embed=funcs.errorEmbed(None, str(ex))) try: fig = go.Figure(data=[go.Bar(x=labels, y=values)]) xtitle, ytitle = await self.gatherXtitleAndYtitle(ctx) fig.update_layout(title=title, xaxis_title=xtitle, yaxis_title=ytitle) e, image = await self.makeChartEmbed(ctx, fig, labels, values, imgName, title if title else "Bar Chart") except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, "An error occurred, please try again later.") await ctx.reply(embed=e, file=image) await funcs.deleteTempFile(imgName) @commands.cooldown(1, 5, commands.BucketType.user) @commands.command(name="github", description="Returns statistics about a GitHub repository.", usage='[username/repository]', aliases=["loc", "code", "linesofcode", "repository", "repo", "git", "source", "sourcecode"]) async def repository(self, ctx, *, repo: str=""): await ctx.send("Getting repository statistics. Please wait...") try: repo = repo.casefold().replace(" ", "") or config.githubRepo while repo.endswith("/"): repo = repo[:-1] repo = repo.split("github.com/")[1] if "github.com/" in repo else repo res = await funcs.getRequest("https://api.codetabs.com/v1/loc/?github=" + repo) e = Embed(description=f"https://github.com/{repo}") e.set_author(name=repo, icon_url="https://media.discordapp.net/attachments/771698457391136798/927918869702647808/github.png") for i in sorted(res.json(), reverse=True, key=lambda x: x["linesOfCode"])[:25]: e.add_field(name=f"{i['language']} Lines (Files)", value="`{:,} ({:,})`".format(i["linesOfCode"], i["files"])) e.set_footer(text="Note: Lines of code do not include comment or blank lines.") e.set_image(url=funcs.githubRepoPic(repo)) except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, "Unknown repository or server error.") await ctx.reply(embed=e) @commands.cooldown(1, 3, commands.BucketType.user) @commands.command(name="covid", description="Gets COVID-19 data.", aliases=["coronavirus", "corona", "covid19", "cv", "c19", "cv19"], usage="[location]") async def covid(self, ctx, *, searchtype: str=""): headers = { "x-rapidapi-host": "corona-virus-world-and-india-data.p.rapidapi.com", "x-rapidapi-key": config.rapidApiKey } try: res = await funcs.getRequest("https://corona-virus-world-and-india-data.p.rapidapi.com/api", headers=headers) data = res.json() total = data["countries_stat"] found = False if searchtype == "": total = data["world_total"] else: if searchtype.casefold() == "us" or searchtype.casefold().startswith(("united states", "america")): searchtype = "usa" elif searchtype.casefold().startswith(("united kingdom", "great britain", "britain", "england")) \ or searchtype.casefold() == "gb": searchtype = "uk" elif searchtype.casefold().startswith("hk"): searchtype = "hong kong" if searchtype.casefold().startswith(("korea", "south korea", "sk")): searchtype = "S. Korea" for i in total: if i["country_name"].casefold().replace(".", "") == searchtype.casefold().replace(".", ""): found = True total = i break if not found: total = data["world_total"] e = Embed(description="Statistics taken at: `" + data["statistic_taken_at"] + " UTC`") e.set_author(name=f"COVID-19 Statistics ({total['country_name'] if found else 'Global'})", icon_url="https://upload.wikimedia.org/wikipedia/commons/thumb/8/82/" + "SARS-CoV-2_without_background.png/220px-SARS-CoV-2_without_background.png") if found: e.add_field(name="Country", value=f"`{total['country_name']}`") e.add_field(name="Total Cases", value=f"`{total['cases']}`") e.add_field(name="Total Deaths", value=f"`{total['deaths']}" + "\n({}%)`".format(round(int(total['deaths'] .replace(',', '').replace('N/A', '0')) / int(total['cases'] .replace(',', '').replace('N/A', '0')) * 100, 2))) e.add_field(name="Total Recovered", value=f"`{total['total_recovered']}" + "\n({}%)`".format(round(int(total['total_recovered'] .replace(',', '').replace('N/A', '0')) / int(total['cases'] .replace(',', '').replace('N/A', '0')) * 100, 2))) e.add_field(name="Active Cases", value=f"`{total['active_cases']}" + "\n({}%)`".format(round(int(total['active_cases'] .replace(',', '').replace('N/A', '0')) / int(total['cases'] .replace(',', '').replace('N/A', '0')) * 100, 2))) e.add_field(name="Critical Cases", value=f"`{total['serious_critical']}" + "\n({}%)`".format(round(int(total['serious_critical'] .replace(',', '').replace('N/A', '0')) / int(total['active_cases'] .replace(',', '').replace('N/A', '0')) * 100, 2))) e.add_field(name="Total Tests", value=f"`{total['total_tests']}`") else: e.add_field(name="Total Cases", value=f"`{total['total_cases']}`") e.add_field(name="Total Deaths", value=f"`{total['total_deaths']}" + "\n({}%)`".format(round(int(total['total_deaths'] .replace(',', '').replace('N/A', '0')) / int(total['total_cases'] .replace(',', '').replace('N/A', '0')) * 100, 2))) e.add_field(name="Total Recovered", value=f"`{total['total_recovered']}" + "\n({}%)`".format(round(int(total['total_recovered'] .replace(',', '').replace('N/A', '0')) / int(total['total_cases'] .replace(',', '').replace('N/A', '0')) * 100, 2))) e.add_field(name="Active Cases", value=f"`{total['active_cases']}" + "\n({}%)`".format(round(int(total['active_cases'] .replace(',', '').replace('N/A', '0')) / int(total['total_cases'] .replace(',', '').replace('N/A', '0')) * 100, 2))) e.add_field(name="Critical Cases", value=f"`{total['serious_critical']}`") e.add_field(name="New Cases Today", value=f"`{total['new_cases']}`") e.add_field(name="New Deaths Today", value=f"`{total['new_deaths']}`") e.set_footer(text="Note: The data provided may not be 100% accurate.") except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, "Invalid input or server error.") await ctx.reply(embed=e) @commands.cooldown(1, 3, commands.BucketType.user) @commands.command(name="flightinfo", description="Gets information about a flight.", aliases=["flight", "flightradar"], usage="<flight number>") async def flightinfo(self, ctx, *, flightstr: str=""): if flightstr == "": e = funcs.errorEmbed(None, "Empty input.") else: ph = "Unknown" flightstr = flightstr.upper().replace(" ", "") url = "https://api.flightradar24.com/common/v1/flight/list.json?" params = {"fetchBy": "flight", "page": "1", "limit": "25", "query": flightstr} try: res = await funcs.getRequest(url, headers={"User-agent": "*"}, params=params) allflights = res.json() fdd = allflights["result"]["response"]["data"] dago, eta = "", "" reg, data, arrive, realarrive, depart, realdepart = ph, ph, ph, ph, ph, ph ft, duration, originname, originicao, originiata, destname, desticao, destiata = ph, ph, ph, ph, ph, ph, ph, ph flighturl = f"https://www.flightradar24.com/data/flights/{flightstr.casefold()}" status, callsign, aircraft, flightdate, airline = ph, ph, ph, ph, ph for data in fdd: callsign = data["identification"]["callsign"] if callsign is None: callsign = "None" status = str(data["status"]["text"]) aircraft = f"{str(data['aircraft']['model']['text'])} ({str(data['aircraft']['model']['code'])})" reg = data["aircraft"]["registration"] airline = data["airline"]["name"] originname = data["airport"]["origin"]["name"] originiata = data["airport"]["origin"]["code"]["iata"] originicao = data["airport"]["origin"]["code"]["icao"] destname = data["airport"]["destination"]["name"] if not originname or not destname: continue destiata = data["airport"]["destination"]["code"]["iata"] desticao = data["airport"]["destination"]["code"]["icao"] realdepart = data["time"]["real"]["departure"] depart = "Local Departure Time" realarrive = data["time"]["real"]["arrival"] arrive = "Local Arrival Time" if realarrive is None: realarrive = data["time"]["estimated"]["arrival"] if realarrive is None: continue arrive = "Estimated Local Arrival Time" duration = str(datetime.fromtimestamp(realarrive) - datetime.utcnow())[:5] if duration[1:2] == ":": duration = "0" + (duration[:4]) eta = "Estimated Flight Time Remaining" else: duration = str(datetime.fromtimestamp(realarrive) - datetime.fromtimestamp(realdepart))[:5] if duration[1:2] == ":": duration = "0" + (duration[:4]) eta = "Total Flight Duration" if eta.startswith("\nEstimated"): ft = str(datetime.utcnow() - datetime.fromtimestamp(realdepart))[:5] if ft[1:2] == ":": ft = "0" + (ft[:4]) dago = "Current Flight Time" realdepart = datetime.fromtimestamp(realdepart + data["airport"]["origin"]["timezone"]["offset"]) realarrive = datetime.fromtimestamp(realarrive + data["airport"]["destination"]["timezone"]["offset"]) flightdate = funcs.dateBirthday(realdepart.day, realdepart.month, realdepart.year, noBD=True) break imgl = res.json()["result"]["response"]["aircraftImages"] thumbnail = "https://images.flightradar24.com/opengraph/fr24_logo_twitter.png" for image in imgl: if image["registration"] != reg: continue thumbnail = list( image["images"]["thumbnails"] )[0]["src"][:-4].replace("_tb", "").replace("com/200/", "com/full/") e = Embed(title=f"Flight {flightstr}", description=flighturl) e.set_image(url=thumbnail) e.add_field(name="Date", value=f"`{flightdate}`") e.add_field(name="Callsign", value=f"`{callsign}`") e.add_field(name="Status", value=f"`{status}`") e.add_field(name="Aircraft", value=f"`{aircraft}`") e.add_field(name="Registration", value=f"`{reg} ({data['aircraft']['country']['name']})`") e.add_field(name="Airline", value=f"`{airline} ({data['airline']['code']['iata']}/{data['airline']['code']['icao']})`") e.add_field(name="Origin", value=f"`{originname} ({originiata}/{originicao})`") e.add_field(name="Destination", value=f"`{destname} ({destiata}/{desticao})`") e.add_field(name=depart, value=f"`{str(realdepart)}`") if dago: e.add_field(name=dago, value=f"`{ft}`") e.add_field(name=arrive, value=f"`{str(realarrive)}`") if eta: e.add_field(name=eta, value=f"`{duration}`") e.set_footer(text="Note: Flight data provided by Flightradar24 may not be 100% accurate.", icon_url="https://i.pinimg.com/564x/8c/90/8f/8c908ff985364bdba5514129d3d4e799.jpg") except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, "Unknown flight or server error.") await ctx.reply(embed=e) @commands.cooldown(1, 3, commands.BucketType.user) @commands.command(name="weather", description="Finds the current weather of a location.", aliases=["w"], usage="<location>") async def weather(self, ctx, *, location: str=""): zero = -funcs.KELVIN url = f"http://api.openweathermap.org/data/2.5/weather?q={location.casefold().replace(' ', '%20')}" + \ f"&APPID={config.owmKey}" try: r = await funcs.getRequest(url) data = r.json() country = data["sys"]["country"] temp = data["main"]["temp"] + zero lastupdate = str(datetime.fromtimestamp(int(data["dt"]) + (int(data["timezone"])))) timenow = str(datetime.fromtimestamp(int(time()) + int(data["timezone"]))) temp2 = funcs.celsiusToFahrenheit(temp) high = data["main"]["temp_max"] + zero low = data["main"]["temp_min"] + zero high2 = funcs.celsiusToFahrenheit(high) low2 = funcs.celsiusToFahrenheit(low) winddegrees = float(data["wind"]["deg"]) e = Embed(title=f"{data['name']}, {country}", description=f"**{data['weather'][0]['description'].title()}**") e.add_field(name="Temperature", value="`{}°F / {}°C`".format(round(temp2, 1), round(temp, 1))) e.add_field(name="Temp Range", value="`{}°F - {}°F\n".format(round(low2, 1), round(high2, 1)) + "{}°C - {}°C`".format(round(low, 1), round(high, 1))) e.add_field(name="Humidity", value="`{}%`".format(data["main"]["humidity"])) e.add_field(name="Wind Speed", value="`{} m/s`".format(data["wind"]["speed"])) e.add_field(name="Wind Direction", value="`{}° ({})`".format(int(winddegrees), funcs.degreesToDirection(winddegrees))) e.add_field(name="Local Time", value=f"`{timenow}`") e.add_field(name="Last Updated (Local Time)", value=f"`{lastupdate}`") e.set_footer(text="Note: Weather data provided by OpenWeatherMap may not be 100% accurate.", icon_url="https://cdn.discordapp.com/attachments/771404776410972161/931460099296358470/unknown.png") e.set_thumbnail(url=f"http://openweathermap.org/img/wn/{data['weather'][0]['icon']}@2x.png") except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, "Unknown location or server error.") await ctx.reply(embed=e) @commands.cooldown(1, 15, commands.BucketType.user) @commands.command(name="translate", description="Translates text to a different language. " + "Translation may sometimes fail due to rate limit.", aliases=["t", "translator", "trans", "tr", "translation"], usage="<language code to translate to> <input>") async def translate(self, ctx, dest=None, *, text): try: dest = dest.casefold() if dest == "zh-tw": dest = "zh-TW" elif dest == "zh-cn": dest = "zh-CN" if dest not in constants.GOOGLE_CODES_TO_LANGUAGES.keys(): e = funcs.errorEmbed( "Invalid language code!", f"Valid options:\n\n{', '.join(f'`{i}`' for i in sorted(constants.GOOGLE_CODES_TO_LANGUAGES.keys()))}" ) else: g = GoogleTranslator(source="auto", target=dest) output = await funcs.funcToCoro(g.translate, text) e = Embed(title="Translation", description=funcs.formatting(output)) except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, "An error occurred. Invalid input?") await ctx.reply(embed=e) @commands.cooldown(1, 5, commands.BucketType.user) @commands.command(name="currency", description="Converts the price of one currency to another.", aliases=["fiat", "cc", "convertcurrency", "currencyconvert"], usage="<from currency> <to currency> [amount]") async def currency(self, ctx, fromC, toC, *, amount: str="1"): try: output = [fromC.upper(), toC.upper(), amount] res = await funcs.getRequest("http://api.exchangeratesapi.io/v1/latest", params={"access_key": config.exchangeratesapiKey}) data = res.json() amount = float(output[2].replace(",", "").replace(" ", "")) initialamount = amount fromCurrency = output[0] toCurrency = output[1] coingecko = "https://api.coingecko.com/api/v3/coins/markets" if fromCurrency != "EUR": try: amount /= data["rates"][fromCurrency] except: res = await funcs.getRequest( coingecko, params={"ids": self.client.tickers[fromCurrency.casefold()], "vs_currency": "EUR"} ) cgData = res.json() amount *= cgData[0]["current_price"] if toCurrency != "EUR": try: amount *= data["rates"][toCurrency] except: res = await funcs.getRequest( coingecko, params={"ids": self.client.tickers[toCurrency.casefold()], "vs_currency": "EUR"} ) cgData = res.json() if fromCurrency.upper() == toCurrency.upper(): amount = float(initialamount) else: amount /= cgData[0]["current_price"] await ctx.reply( f"The current price of **{funcs.removeDotZero(initialamount)} {fromCurrency}** in **{toCurrency}**: " + f"`{funcs.removeDotZero(amount)}`" ) except Exception as ex: funcs.printError(ctx, ex) await ctx.reply(embed=funcs.errorEmbed(None, "Invalid input or unknown currency.")) @commands.cooldown(1, 5, commands.BucketType.user) @commands.command(name="wiki", description="Returns a Wikipedia article.", aliases=["wikipedia"], usage="<article title (case-sensitive)>") async def wiki(self, ctx, *, page: str=""): if page == "": e = funcs.errorEmbed(None, "Cannot process empty input.") else: wikiurl = "https://en.wikipedia.org/w/api.php?format=json&action=query" + \ "&prop=extracts&exintro&explaintext&redirects=1&titles=" try: res = await funcs.getRequest(f"{wikiurl}{page.replace(' ', '_')}") data = res.json() wikipage = data["query"] if list(wikipage["pages"])[0] == "-1": res = await funcs.getRequest(f"{wikiurl}{page.replace(' ', '_').title()}") data = res.json() wikipage = data["query"] if list(wikipage["pages"])[0] == "-1": return await ctx.reply(embed=funcs.errorEmbed(None, "Invalid article.")) if wikipage["pages"][list(wikipage["pages"])[0]]["extract"].casefold().startswith(f"{page} may refer to:\n\n"): try: splitthing = f"may refer to:\n\n" page = wikipage["pages"][list(wikipage["pages"])[0]]["extract"].split( splitthing, 1 )[1].split("\n", 1)[1].split(",", 1)[0] res = await funcs.getRequest(f"{wikiurl}{page.replace(' ', '_')}") data = res.json() wikipage = data["query"] if wikipage["pages"][list(wikipage["pages"])[0]] == "-1": return await ctx.reply(embed=funcs.errorEmbed(None, "Invalid article.")) except IndexError: pass summary = wikipage["pages"][list(wikipage["pages"])[0]]["extract"] if len(summary) != len(wikipage["pages"][list(wikipage["pages"])[0]]["extract"][:1000]): summary = wikipage["pages"][list(wikipage["pages"])[0]]["extract"][:1000] + "..." e = Embed(description="https://en.wikipedia.org/wiki/" + f"{wikipage['pages'][list(wikipage['pages'])[0]]['title'].replace(' ', '_')}" ) e.set_author(name=wikipage["pages"][list(wikipage["pages"])[0]]["title"], icon_url="https://cdn.discordapp.com/attachments/659771291858894849/" + "677853982718165001/1122px-Wikipedia-logo-v2.png") e.add_field(name="Extract", value=f"```{summary}```") except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, "Invalid input or server error.") await ctx.reply(embed=e) @commands.cooldown(1, 45, commands.BucketType.user) @commands.command(name="srctop10", aliases=["top10", "src", "speedruncom", "leaderboard", "lb", "sr"], hidden=True, description="Shows the top 10 leaderboard for speedrun.com games.", usage="[speedrun.com game abbreviation]") async def srctop10(self, ctx, *, game: str="mc"): await ctx.send("Getting speedrun.com data. Please wait...") try: gameres = await funcs.getRequest(f"https://www.speedrun.com/api/v1/games/{game.casefold().replace(' ', '')}") game = gameres.json()["data"] gameName = game["names"]["international"] categories = None for i in game["links"]: if i["rel"] == "categories": categories = i["uri"] break if not categories: raise Exception catres = await funcs.getRequest(categories) cat = catres.json()["data"] lb = None catID, catName, catURL = None, None, None for i in cat: catName = i["name"] catURL = i["weblink"] for j in i["links"]: if j["rel"] == "leaderboard": lb = j["uri"] break if lb: break if not lb: raise Exception output = f"{catURL}\n" catres = await funcs.getRequest(lb) runs = catres.json()["data"]["runs"][:10] count = 0 for i in runs: run = i["run"] count += 1 d, h, m, s, ms = funcs.timeDifferenceStr(run["times"]["primary_t"], 0, noStr=True) names = "" for p in run["players"]: try: names += p["name"] except: pres = await funcs.getRequest(p["uri"]) player = pres.json()["data"] names += player["names"]["international"] names += ", " names = names.replace("_", "\_") output += f"{count}. `{funcs.timeStr(d, h, m, s, ms)}` by [{names[:-2]}]({run['weblink']})\n" if not count: output += "No runs found." top = f"Top {count} - " if count else "" e = Embed(description=output) e.set_author(name=f"{top}{gameName} - {catName}", icon_url="https://cdn.discordapp.com/attachments/771698457391136798/842103813585240124/src.png") if count: e.set_footer(text="Please use the link above to view the full leaderboards as well as other categories.") await ctx.reply(embed=e) except Exception as ex: funcs.printError(ctx, ex) await ctx.reply(embed=funcs.errorEmbed(None, "Server error or unknown game.")) @commands.cooldown(1, 45, commands.BucketType.user) @commands.command(name="srcqueue", aliases=["queue", "speedrunqueue", "srqueue"], hidden=True, description="Shows the run queue for speedrun.com games.", usage="[speedrun.com game abbreviation]") async def srcqueue(self, ctx, *, game: str="mc"): await ctx.send("Getting speedrun.com data. Please wait...") try: gameres = await funcs.getRequest(f"https://www.speedrun.com/api/v1/games/{game.casefold().replace(' ', '')}") game = gameres.json()["data"] gameID = game["id"] gameName = game["names"]["international"] queue = [] categories = {} queueres = await funcs.getRequest( f"https://www.speedrun.com/api/v1/runs?game={gameID}&status=new&embed=players&max=200" ) queuedata = queueres.json() for i in queuedata["data"]: queue.append(i) cat = i["category"] if cat not in categories: catres = await funcs.getRequest(f"https://www.speedrun.com/api/v1/categories/{cat}") categories[cat] = catres.json()["data"]["name"] if queuedata["pagination"]["links"]: while queuedata["pagination"]["links"][-1]["rel"] == "next": queueres = await funcs.getRequest(queuedata["pagination"]["links"][-1]["uri"]) queuedata = queueres.json() for i in queuedata["data"]: queue.append(i) cat = i["category"] if cat not in categories: catres = await funcs.getRequest(f"https://www.speedrun.com/api/v1/categories/{cat}") categories[cat] = catres.json()["data"]["name"] if queue: output = "" outputlist = [] pagecount, count, run = 0, 0, 0 total = len(queue) / 15 for i in queue: run += 1 d, h, m, s, ms = funcs.timeDifferenceStr(i["times"]["primary_t"], 0, noStr=True) names = "" for player in i["players"]["data"]: try: names += player["names"]["international"] except: names += player["name"] names += ", " names = names.replace("_", "\_") output += f"{'{:,}'.format(run)}. [{categories[i['category']]}]({i['weblink']}) " + \ f"in `{funcs.timeStr(d, h, m, s, ms)}` by {names[:-2]}\n" count += 1 if count == 15 or run == len(queue): pagecount += 1 e = Embed(description=output) e.set_author( name=f"Unverified Runs ({'{:,}'.format(len(queue))}) - {gameName}", icon_url="https://cdn.discordapp.com/attachments/771698457391136798/842103813585240124/src.png" ) e.set_footer(text="Page {:,} of {:,}".format(pagecount, funcs.strictRounding(total))) outputlist.append(e) output = "" count = 0 m = await ctx.reply(embed=outputlist[0]) await m.edit(view=PageButtons(ctx, self.client, m, outputlist)) else: e = Embed(description="No runs found.") e.set_author(name=f"Unverified Runs ({'{:,}'.format(len(queue))}) - {gameName}", icon_url="https://cdn.discordapp.com/attachments/771698457391136798/842103813585240124/src.png") await ctx.reply(embed=e) except Exception as ex: funcs.printError(ctx, ex) await ctx.reply(embed=funcs.errorEmbed(None, "Server error or unknown game.")) @commands.cooldown(1, 3, commands.BucketType.user) @commands.command(name="urban", description="Looks up a term on Urban Dictionary.", aliases=["ud", "urbandictionary"], usage="<term>") async def urban(self, ctx, *, term=""): if term == "": return await ctx.reply(embed=funcs.errorEmbed(None, "Empty input.")) else: try: res = await funcs.getRequest("http://api.urbandictionary.com/v0/define", params={"term": term}) data = res.json() terms = data["list"] if not terms: return await ctx.reply(embed=funcs.errorEmbed(None, "Unknown term.")) embeds = [] pagecount = 0 for i, c in enumerate(terms): pagecount += 1 example = c["example"].replace("[", "").replace("]", "") definition = c["definition"].replace("[", "").replace("]", "") permalink = c["permalink"] word = c["word"] author = c["author"] writtenon = funcs.timeStrToDatetime(c["written_on"]) e = Embed(description=permalink) e.set_author(name=f'"{word}"', icon_url="https://cdn.discordapp.com/attachments/659771291858894849/" + "669142387330777115/urban-dictionary-android.png") e.add_field(name="Definition", value=funcs.formatting(definition, limit=1000)) if example: e.add_field(name="Example", value=funcs.formatting(example, limit=1000)) if author: e.add_field(name="Author", value=f"`{author}`") e.add_field(name="Submission Time (UTC)", value=f"`{writtenon}`") try: ar = round(c['thumbs_up'] / (c['thumbs_up'] + c['thumbs_down']) * 100, 2) e.set_footer( text="Approval rate: {}% ({:,} 👍 - ".format(ar, c['thumbs_up']) + "{:,} 👎)\n".format(c['thumbs_down']) + "Page {:,} of {:,}".format(i + 1, len(terms)) ) except ZeroDivisionError: e.set_footer(text="Approval rate: n/a (0 👍 - 0 👎)\nPage {:,} of {:,}".format(i + 1, len(terms))) embeds.append(e) m = await ctx.reply(embed=embeds[0]) await m.edit(view=PageButtons(ctx, self.client, m, embeds)) except Exception as ex: funcs.printError(ctx, ex) @commands.cooldown(1, 5, commands.BucketType.user) @commands.command(name="lyrics", description="Gets the lyrics of a song from Genius.", aliases=["lyric", "song", "genius"], usage="<song keywords>") async def lyrics(self, ctx, *, keywords): try: await ctx.send("Getting lyrics. Please wait...") try: res = await funcs.getRequest("https://api.genius.com/search", params={"q": keywords, "access_token": config.geniusToken}) data2 = res.json()["response"]["hits"][0]["result"] except: return await ctx.send(embed=funcs.errorEmbed(None, "Unknown song.")) author = data2["artist_names"] title = data2["title_with_featured"] link = data2["url"] thumbnail = data2["song_art_image_thumbnail_url"] song = await funcs.funcToCoro(Genius(config.geniusToken).search_song, author, title) originallyric = funcs.multiString(song.lyrics.replace("EmbedShare URLCopyEmbedCopy", ""), limit=2048) embeds = [] pagecount = 0 for p in originallyric: pagecount += 1 e = Embed(description=p, title=f"{author} - {title}"[:256]) e.set_thumbnail(url=thumbnail) e.add_field(name="Genius Link", value=link) e.set_footer(text="Page {:,} of {:,}".format(pagecount, len(originallyric))) embeds.append(e) m = await ctx.reply(embed=embeds[0]) await m.edit(view=PageButtons(ctx, self.client, m, embeds)) except Exception as ex: funcs.printError(ctx, ex) await ctx.reply(embed=funcs.errorEmbed(None, "Server error or song doesn't have lyrics.")) @commands.cooldown(1, 5, commands.BucketType.user) @commands.command(name="qrgen", description="Generates a QR code.", aliases=["qrg", "genqr", "qr", "qrc"], usage='<input> ["QRcolour=black"]\n\nNote: Add "QRcolour=black" at the end to make the QR code black.') async def qrgen(self, ctx, *, text): black = text.split(" ")[-1] == "QRcolour=black" if black: text = text[:-14] while text.endswith(" "): text = text[:-1] imgName = f"{time()}.png" image = None try: e = Embed(title="QR Code") qr = QRCode() qr.add_data(text) qr.make(fit=True) if black: img = qr.make_image(fill_color="white", back_color="black") else: img = qr.make_image(fill_color="black", back_color="white") img.save(f"{funcs.PATH}/temp/{imgName}") image = File(f"{funcs.PATH}/temp/{imgName}") e.set_image(url=f"attachment://{imgName}") except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, "Invalid input.") await ctx.reply(embed=e, file=image) await funcs.deleteTempFile(imgName) @commands.cooldown(1, 5, commands.BucketType.user) @commands.command(name="qrread", description="Reads a QR code.", aliases=["qrscan", "qrr", "readqr"], usage="<image URL OR image attachment>") async def qrread(self, ctx): await ctx.send("Reading image. Please wait... " + "(URL embeds take longer to process than image attachments)") if not ctx.message.attachments: await sleep(3) if ctx.message.attachments or ctx.message.embeds: try: qrlink = ctx.message.attachments[0].url if ctx.message.attachments else ctx.message.embeds[0].thumbnail.url qr = await funcs.decodeQR(qrlink) e = Embed(title="QR Code Message", description=funcs.formatting(qr)) if qr \ else funcs.errorEmbed(None, "Cannot detect QR code. Maybe try making the image clearer?") except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, str(ex)) else: e = funcs.errorEmbed(None, "No attachment or URL detected, please try again.") await ctx.reply(embed=e) @commands.cooldown(1, 5, commands.BucketType.user) @commands.command(name="compile", description="Compiles code.", aliases=["comp"]) async def compile(self, ctx): try: res = await funcs.getRequest("https://run.glot.io/languages", verify=False) data = res.json() languages = [i["name"] for i in data] output = ", ".join(f'`{j}`' for j in languages) language = "" option = None await ctx.reply(embed=Embed(title="Please select a language below or input `quit` to quit...", description=output)) while language not in languages and language != "quit": try: option = await self.client.wait_for( "message", check=lambda m: m.author == ctx.author and m.channel == ctx.channel, timeout=120 ) language = option.content.casefold().replace(" ", "").replace("#", "sharp") \ .replace("♯", "sharp").replace("++", "pp") language = "javascript" if language == "js" else language if language not in languages and language != "quit": await option.reply(embed=funcs.errorEmbed(None, "Invalid language.")) except TimeoutError: return await ctx.send("Cancelling compilation...") if language == "quit": return await option.reply("Cancelling compilation...") versionurl = f"https://run.glot.io/languages/{language}" res = await funcs.getRequest(versionurl, verify=False) data = res.json() url = data["url"] await option.reply("**You have 15 minutes to type out your code. Input `quit` to quit.**") code = None try: option = await self.client.wait_for( "message", check=lambda m: m.author == ctx.author and m.channel == ctx.channel, timeout=900 ) content = option.content try: if option.attachments: content = await funcs.readTxtAttachment(option) code = content.replace("```", "").replace('“', '"').replace('”', '"').replace("‘", "'").replace("’", "'") if code == "quit": return await option.reply("Cancelling compilation...") except: pass except TimeoutError: return await ctx.send("Cancelling compilation...") await option.reply("**Please enter your desired file name including the extension.** (e.g. `main.py`)") try: option = await self.client.wait_for( "message", check=lambda m: m.author == ctx.author and m.channel == ctx.channel, timeout=120 ) filename = option.content except TimeoutError: return await ctx.send("Cancelling compilation...") data = {"files": [{"name": filename, "content": code}]} headers = { "Authorization": f"Token {config.glotIoKey}", "Content-type": "application/json" } res = await funcs.postRequest(url=url, data=dumps(data), headers=headers, verify=False) try: data = res.json() stderr = data["stderr"] if stderr == "": await option.reply(embed=Embed(title="Compilation", description=funcs.formatting(data["stdout"] or "None"))) else: await option.reply(embed=funcs.errorEmbed(data["error"].title(), funcs.formatting(stderr))) except AttributeError: await option.reply(embed=funcs.errorEmbed(None, "Code exceeded the maximum allowed running time.")) except Exception as ex: funcs.printError(ctx, ex) @commands.cooldown(1, 3, commands.BucketType.user) @commands.command(name="unix", description="Converts a unix timestamp to a proper date format in GMT.", aliases=["time", "timestamp", "epoch", "gmt", "utc", "timezone"], usage="[time zone (-12-14)] [timestamp value]") async def unix(self, ctx, tz=None, timestamp=None): mins = 0 if not tz: tz = 0 else: try: tz = float(tz) if not -12 <= tz <= 14: raise Exception if tz != int(tz): mins = int((tz - int(tz)) * 60) except: return await ctx.reply(embed=funcs.errorEmbed(None, "Time zone must be -12-14 inclusive.")) td = timedelta(hours=int(tz), minutes=mins) if not timestamp: timestamp = mktime(gmtime()) dt = datetime.fromtimestamp(timestamp) + td timestamp = timegm((dt - td).timetuple()) else: try: timestamp = int(float(timestamp)) dt = datetime.utcfromtimestamp(timestamp) + td except: return await ctx.reply(embed=funcs.errorEmbed(None, "Invalid timestamp.")) timezone = "" if not tz and not mins else f"{'+' if tz > 0 else ''}{int(tz)}{f':{abs(mins)}' if mins else ''}" await ctx.reply(funcs.formatting(str(dt) + f" (GMT{timezone})\n\nTimestamp: {int(timestamp)}")) @commands.cooldown(1, 3, commands.BucketType.user) @commands.command(name="scisum", aliases=["science", "sci"], hidden=True, description="Shows the science summary for the last month.") async def scisum(self, ctx): await ctx.reply("https://tiny.cc/sci-sum") @commands.cooldown(1, 5, commands.BucketType.user) @commands.command(name="dict", description="Returns the definition(s) of a word.", aliases=["dictionary", "def", "definition", "meaning", "define"], usage="<language code> <word>") async def dict(self, ctx, langcode, *, word): codes = ["en", "hi", "es", "fr", "ja", "ru", "de", "it", "ko", "pt-BR", "ar", "tr"] languages = [ "English", "Hindi", "Spanish", "French", "Japanese", "Russian", "German", "Italian", "Korean", "Brazilian Portuguese", "Arabic", "Turkish" ] langcode = langcode.casefold() if langcode != "pt-BR" else langcode if langcode not in codes: codesList = ", ".join(f"`{code}` ({languages[codes.index(code)]})" for code in codes) e = funcs.errorEmbed("Invalid language code!", f"Valid options:\n\n{codesList}") else: try: res = await funcs.getRequest(f"https://api.dictionaryapi.dev/api/v2/entries/{langcode}/{word}") data = res.json() word = data[0]["word"].title() output = "" for i in data: meanings = i["meanings"] for j in meanings: try: partOfSpeech = f' [{j["partOfSpeech"]}]' except: partOfSpeech = "" definitions = j["definitions"] for k in definitions: definition = k["definition"] output += f"- {definition}{partOfSpeech}\n" e = Embed(title=f'"{word}"').add_field(name="Definition(s)", value=funcs.formatting(output[:-1], limit=1000)) except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, "Unknown word.") await ctx.reply(embed=e) @commands.cooldown(1, 3, commands.BucketType.user) @commands.command(name="reddit", description="Looks up a community or user on Reddit.", aliases=["subreddit", "r", "redditor"], usage="<r/subreddit OR u/redditor>") async def reddit(self, ctx, *, inp=""): redditclient = Reddit(client_id=config.redditClientID, client_secret=config.redditClientSecret, user_agent="*") inp = inp.casefold().replace(" ", "/") inp = inp.split("reddit.com/")[1] if "reddit.com/" in inp else inp while inp.startswith("/"): inp = inp[1:] while inp.endswith("/"): inp = inp[:-1] try: icon_url = "https://www.redditinc.com/assets/images/site/reddit-logo.png" if inp.startswith("r") and "/" in inp: subreddit = await redditclient.subreddit(inp.split("/")[-1], fetch=True) if subreddit.over18 and not isinstance(ctx.channel, channel.DMChannel) and not ctx.channel.is_nsfw(): e = funcs.errorEmbed("NSFW/Over 18!", "Please view this community in an NSFW channel.") else: tags = [ i for i in [ "Link Flairs" if subreddit.can_assign_link_flair else 0, "User Flairs" if subreddit.can_assign_user_flair else 0, "Spoilers Enabled" if subreddit.spoilers_enabled else 0, "NSFW" if subreddit.over18 else 0 ] if i ] e = Embed(description=f"https://www.reddit.com/r/{subreddit.display_name}" + " ([Old Reddit](" + f"https://old.reddit.com/r/{subreddit.display_name}))") e.set_author(icon_url=icon_url, name="r/" + subreddit.display_name) if tags: e.add_field(name="Tags", value=", ".join(f"`{i}`" for i in tags)) e.set_footer(text=subreddit.public_description) dt = datetime.utcfromtimestamp(subreddit.created_utc) e.add_field(name="Creation Date", value=funcs.dateBirthday(dt.day, dt.month, dt.year)) e.add_field(name="Subscribers", value="`{:,}`".format(subreddit.subscribers)) async for submission in subreddit.new(limit=1): sauthor = submission.author or "[deleted]" if sauthor != "[deleted]": sauthor = sauthor.name e.add_field( name="Latest Post ({:,} point{}; from u/{})".format( submission.score, "" if submission.score == 1 else "s", sauthor ), value=f"https://www.reddit.com{submission.permalink}" + " ([Old Reddit](" + f"https://old.reddit.com{submission.permalink}))", inline=False ) elif inp.startswith("u") and "/" in inp: redditor = await redditclient.redditor(inp.split("/")[-1], fetch=True) try: suspended = redditor.is_suspended tags = ["Suspended"] nickname = "" except: suspended = False tags = [ i for i in [ "Verified" if redditor.has_verified_email else 0, "Reddit Employee" if redditor.is_employee else 0, "Moderator" if redditor.is_mod else 0, "Gold" if redditor.is_gold else 0, "NSFW" if redditor.subreddit["over_18"] else 0 ] if i ] nickname = redditor.subreddit["title"] if "NSFW" in tags and not isinstance(ctx.channel, channel.DMChannel) and not ctx.channel.is_nsfw(): e = funcs.errorEmbed("NSFW/Over 18!", "Please view this profile in an NSFW channel.") else: e = Embed(description=f"https://www.reddit.com/user/{redditor.name}" + " ([Old Reddit](" + f"https://old.reddit.com/user/{redditor.name}))") e.set_author(icon_url=icon_url, name="u/" + redditor.name + (f" ({nickname})" if nickname else "")) if tags: e.add_field(name="Tags", value=", ".join(f"`{i}`" for i in tags)) if not suspended: lkarma = redditor.link_karma ckarma = redditor.comment_karma trophies = await redditor.trophies() e.set_thumbnail(url=redditor.icon_img) dt = datetime.utcfromtimestamp(redditor.created_utc) e.add_field(name="Join Date", value=funcs.dateBirthday(dt.day, dt.month, dt.year)) e.add_field(name="Total Karma", value="`{:,}`".format(lkarma + ckarma)) e.add_field(name="Post Karma", value="`{:,}`".format(lkarma)) e.add_field(name="Comment Karma", value="`{:,}`".format(ckarma)) if trophies: e.add_field( name="Trophies ({:,})".format(len(trophies)), value=", ".join(f"`{trophy.name}`" for trophy in trophies[:50]) + ("..." if len(trophies) > 50 else ""), inline=False ) async for submission in redditor.submissions.new(limit=1): e.add_field( name=f"Latest Post (on r/{submission.subreddit.display_name}; " + f"{'{:,}'.format(submission.score)} point{'' if submission.score == 1 else 's'})", value=f"https://www.reddit.com{submission.permalink}" + " ([Old Reddit](" + f"https://old.reddit.com{submission.permalink}))", inline=False ) async for comment in redditor.comments.new(limit=1): e.add_field( name=f"Latest Comment (on r/{comment.subreddit.display_name}; " + f"{'{:,}'.format(comment.score)} point{'' if comment.score == 1 else 's'})", value=funcs.formatting(comment.body, limit=1000), inline=False ) e.set_footer(text=redditor.subreddit["public_description"]) e.set_image(url=redditor.subreddit["banner_img"]) else: e = funcs.errorEmbed("Invalid input!", 'Please use `r/"subreddit name"` or `u/"username"`.') except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, "Invalid search.") await ctx.reply(embed=e) @commands.cooldown(1, 1, commands.BucketType.user) @commands.command(name="calc", description="Does simple math.", aliases=["calculate", "calculator", "cal", "math", "maths", "safeeval"], usage="<input>") async def calc(self, ctx, *, inp): try: e = Embed(description=funcs.formatting(funcs.removeDotZero(funcs.evalMath(inp)))) except ZeroDivisionError: answer = [ "Stop right there, that's illegal!", "Wait hol up...", "FBI OPEN UP!!!", "LOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO" + "OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO" + "OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOL", "You madlad...", "God damn you.", "......................................................", "Why the hell do you exist?", "Mate I think you've got issues.", "Are you okay?", "You tell me the answer.", "What is wrong with you?", "Disgraceful.", "Don't you dare.", "HOW DARE YOU?!?", "You bloody bastard...", "Do that again and I will stick that zero down your throat. Egg for breakfast, anyone?", "Get a life.", "Dio taxista Ronnosessuale dio animale porca di quella madonna vacca in calore rotta in settecento mila pezzi", "Naughty naughty naughty, you filthy old soomaka!", "Hey that's my yarbles! Give it back!", "*magic portal opens...*", "[magic humming]", "Go to the den.", "EXXXXXCCCCCCUUUUUSEEEE MEEE", "what", "wat", "wut", "Negative nothing", "屌", "No.", "no", "Der Mann sprach für seine Rechte\ner ist verstört, er ist ein egoistischer Gör!", "ENOUGH! Because of you, I almost lost my way! But everycreature here has reminded me of " + "the true power of friendship! There will always be darkness in the world, but there will " + "also always be those who find the light!", "Focusing on our differences keeps us divided! Villains and creatures use that division against us!", "SSSSHHHHHAAAAAAAAAAAHHDAAAHHHPPP", "YOU! YOU TRIPLE GREASY WALKING SECOND DINING COURSE, YOU'RE JUST A PHONY! YOU'RE A GIANT, MORALIST" + " PHONY WHO CAN'T TAKE CARE OF ANYONE, ESPECIALLY HIMSELF! YOU HAVE YOUR OWN DISCIPLINE UP YOUR OWN" + " ARSE AND YOU DON'T EVEN SEE IT!" ] try: answer.append( (await funcs.readTxt(funcs.getResource(self.name, "copypasta.txt"))).replace("\*", "*")[:1994] ) except Exception as ex: funcs.printError(ctx, ex) pass e = Embed(description=f"```{choice(answer)}```") except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, str(ex)) await ctx.reply(embed=e) @commands.cooldown(1, 1, commands.BucketType.user) @commands.command(name="sqrt", usage="<input>", hidden=True, aliases=["square", "root"], description="Calculates the square root of a given value or math expession.") async def sqrt(self, ctx, *, val): try: e = Embed(description=funcs.formatting(funcs.removeDotZero(sqrt([funcs.evalMath(val)])[0]))) except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, str(ex)) await ctx.reply(embed=e) @commands.cooldown(1, 3, commands.BucketType.user) @commands.command(name="wordcount", description="Counts the number of words and characters in an input.", aliases=["lettercount", "countletter", "countchar", "countletters", "char", "chars", "letters", "charcount", "wc", "countword", "word", "words", "countwords", "letter"], usage="<input OR text attachment>") async def wordcount(self, ctx, *, inp=""): filename = f"{time()}" if ctx.message.attachments: try: inp = await funcs.readTxtAttachment(ctx.message) if not inp: raise except: try: attach = ctx.message.attachments[0] filename += f"-{attach.filename}" filepath = f"{funcs.PATH}/temp/{filename}" await attach.save(filepath) pdf = await funcs.funcToCoro(open, filepath, "rb") reader = PdfFileReader(pdf) inp = "" for page in range(reader.numPages): pageobj = await funcs.funcToCoro(reader.getPage, page - 1) inp += (await funcs.funcToCoro(pageobj.extractText)) await funcs.funcToCoro(pdf.close) except Exception as ex: funcs.printError(ctx, ex) inp = inp if not inp: return await ctx.reply(embed=funcs.errorEmbed(None, "Cannot process empty input.")) splt = funcs.replaceCharacters(inp, punctuation).split() e = Embed(title="Word Count") e.add_field(name="Characters", value="`{:,}`".format(len(inp.strip()))) e.add_field(name="Words", value="`{:,}`".format(len(splt))) e.add_field(name="Unique Words", value="`{:,}`".format(len(set(splt)))) e.set_footer(text="Note: This may not be 100% accurate.") await ctx.reply(embed=e) await funcs.deleteTempFile(filename) @commands.cooldown(1, 3, commands.BucketType.user) @commands.command(name="country", description="Shows information about a country.", aliases=["location", "countries", "place", "nation"], usage="<country name OR code>") async def country(self, ctx, *, country): msg = ctx.message try: try: res = await funcs.getRequest( "https://restcountries.com/v2/name/" + country.casefold().replace("_", ""), verify=False ) data = res.json() if len(data) > 1: await ctx.reply( "`Please select a number: " + f"{', '.join(str(i) + ' (' + c['name'] + ')' for i, c in enumerate(data))}`" ) try: pchoice = await self.client.wait_for( "message", check=lambda m: m.author == ctx.author and m.channel == ctx.channel, timeout=20 ) msg = pchoice pchoice = int(pchoice.content) if -1 < int(pchoice.content) < len(data) else 0 except (TimeoutError, ValueError): pchoice = 0 else: pchoice = 0 data = data[pchoice] except Exception: res = await funcs.getRequest( "https://restcountries.com/v2/alpha/" + country.casefold().replace("_", ""), verify=False ) data = res.json() lat = data['latlng'][0] long = data['latlng'][1] e = Embed(title=f"{data['name']} ({data['alpha3Code']})") e.set_thumbnail(url=data["flags"]["png"]) e.add_field(name="Native Name", value=f"`{data['nativeName']}`") e.add_field(name="Population", value="`{:,}`".format(data["population"])) e.add_field(name="Demonym", value=f"`{data['demonym']}`") e.add_field( name="Local Currency", value=", ".join(f"`{c['name']} ({c['code']} {c['symbol']})`" for c in data["currencies"]) ) try: if data["gini"]: e.add_field(name="Gini Coefficient", value=f"`{round(data['gini'] / 100, 3)}`") except: pass try: if data["capital"]: e.add_field(name="Capital", value=f"`{data['capital']}`") except: pass e.add_field( name="Coordinates", value=f"`{str(round(lat, 2)).replace('-', '')}°{'N' if lat > 0 else 'S'}, " + f"{str(round(long, 2)).replace('-', '')}°{'E' if long > 0 else 'W'}`" ) e.add_field(name="Region", value=f"`{data['region']} ({data['subregion']})`") e.add_field(name="Land Area", value="`{:,} km² / {:,} mi²`".format(int(data["area"]), int(data["area"] * 0.386102159))) e.add_field(name="Calling Code", value=", ".join(f"`+{code}`" for code in data["callingCodes"])) e.add_field(name="Top Level Domain", value=", ".join(f"`{dom}`" for dom in data["topLevelDomain"])) e.add_field(name="Time Zones", value=", ".join(f"`{tz}`" for tz in data["timezones"])) e.set_footer(text="Note: The data provided may not be 100% accurate.") except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, "Invalid input or server error.") await msg.reply(embed=e) @commands.cooldown(1, 1, commands.BucketType.user) @commands.command(name="ip", description="Shows information about an IP address.", aliases=["ipaddress"], hidden=True, usage="<IP address>") async def ip(self, ctx, ip): try: res = await funcs.getRequest(f"http://ip-api.com/json/{ip}") data = res.json() e = Embed(title=data["query"]) e.add_field(name="City", value=f"`{data['city']}`") e.add_field(name="Region", value=f"`{data['regionName']}`") e.add_field(name="Country", value=f"`{data['country']} ({data['countryCode']})`") e.add_field(name="Location", value=f"`{data['lat']}, {data['lon']}`") if data['zip']: e.add_field(name="Zip", value=f"`{data['zip']}`") e.add_field(name="Time Zone", value=f"`{data['timezone']}`") e.add_field(name="ISP", value=f"`{data['isp']}`") except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, "Invalid input or server error.") await ctx.reply(embed=e) @commands.cooldown(1, 3, commands.BucketType.user) @commands.command(name="element", description="Shows information about a chemical element.", aliases=["elem", "chem", "chemical"], hidden=True, usage="<element symbol or name>") async def chemical(self, ctx, elementname): try: elementobj = element(elementname) except: try: elementobj = element(elementname.title()) except: return await ctx.reply(embed=funcs.errorEmbed(None, "Invalid element.")) try: name = elementobj.name group = elementobj.group mp = elementobj.melting_point bp = elementobj.boiling_point desc = elementobj.description ar = elementobj.atomic_radius en = elementobj.electronegativity() try: fi = elementobj.ionenergies[1] except: fi = None roomtemp = funcs.KELVIN + 25 if not mp or not bp: state = "Artificial" elif mp > roomtemp: state = "Solid" elif mp < roomtemp < bp: state = "Liquid" else: state = "Gas" e = Embed(title=f"{name} ({elementobj.symbol})", description=desc if desc else "") e.set_thumbnail(url=f"https://images-of-elements.com/t/{name.casefold()}.png") e.add_field(name="Protons", value=f"`{elementobj.protons}`") e.add_field(name="Neutrons", value=f"`{elementobj.neutrons}`") e.add_field(name="Electrons", value=f"`{elementobj.electrons}`") e.add_field(name="Atomic Mass", value=f"`{funcs.removeDotZero(elementobj.atomic_weight)}`") e.add_field(name="Period", value=f"`{elementobj.period}`") try: gn = group.name e.add_field(name="Group", value=f"`{group.symbol}{(' - ' + gn) if gn else ''}`") except: pass if ar: e.add_field(name="Atomic Radius", value=f"`{funcs.removeDotZero(ar)}`") if en: e.add_field(name="Electronegativity", value=f"`{funcs.removeDotZero(en)}`") if fi: e.add_field(name="First Ionisation", value=f"`{funcs.removeDotZero(fi)}`") if mp: e.add_field(name="Melting Point", value=f"`{funcs.removeDotZero(mp)}`") if bp: e.add_field(name="Boiling Point", value=f"`{funcs.removeDotZero(bp)}`") e.add_field(name="State", value=f"`{state}`") e.add_field(name="Config", value=f"`{elementobj.econf}`") e.add_field(name="Discoverer", value=f"`{elementobj.discoverers}`") discoveryear = elementobj.discovery_year discoverlocation = elementobj.discovery_location if discoveryear or discoverlocation: both = bool(discoveryear and discoverlocation) e.add_field(name="Discovered In", value=f"`{discoveryear if discoveryear else ''}{' in ' if both else ''}" + f"{discoverlocation if discoverlocation else ''}`") await ctx.reply(embed=e) except Exception as ex: funcs.printError(ctx, ex) await ctx.reply(embed=funcs.errorEmbed(None, "Invalid element.")) @commands.cooldown(1, 1, commands.BucketType.user) @commands.command(name="periodic", description="Shows the periodic table.", aliases=["periotictable", "elements"], hidden=True) async def periodic(self, ctx): await funcs.sendImage(ctx, "https://media.discordapp.net/attachments/871621453521485864/882103596563431424/table.jpg") @commands.cooldown(1, 1, commands.BucketType.user) @commands.command(name="sohcahtoa", description="SOH CAH TOA.", aliases=["trigonometry", "triggernometry", "sincostan", "sinecostan", "sine", "cos", "tan"], hidden=True) async def sohcahtoa(self, ctx): await funcs.sendImage(ctx, "https://media.discordapp.net/attachments/771404776410972161/954017475668885534/unknown.png") @commands.cooldown(1, 1, commands.BucketType.user) @commands.command(name="osi", description="Shows the OSI Model.", aliases=["osimodel", "7layers"], hidden=True) async def osi(self, ctx): await funcs.sendImage(ctx, "https://cdn.discordapp.com/attachments/771404776410972161/950404988369240104/unknown.png") @commands.cooldown(1, 1, commands.BucketType.user) @commands.command(name="normalbodytemp", description="Shows the normal body temperature range chart.", aliases=["bodytemp", "nbt"], hidden=True) async def normalbodytemp(self, ctx): await funcs.sendImage(ctx, "https://cdn.discordapp.com/attachments/771404776410972161/851367517241999380/image0.jpg") @commands.cooldown(1, 2, commands.BucketType.user) @commands.command(description="Gets information and generates a citation for an article via DOI number.", aliases=["reference", "ref", "citation", "doi", "cit", "altmetric", "altmetrics", "cite", "art"], usage="<DOI number> [citation style]", name="article") async def article(self, ctx, doi, style="apa"): await ctx.send("Getting article data. Please wait...") doi = f'https://doi.org/{funcs.replaceCharacters(doi, ["https://doi.org/", "doi:", "doi.org/"])}'.casefold() while doi.endswith("."): doi = doi[:-1] style = style.casefold() style = "chicago-author-date" if style.startswith("chig") or style.startswith("chic") else style style = "multidisciplinary-digital-publishing-institute" if style.startswith("mdpi") else style cmd = f'curl -LH "Accept: text/x-bibliography; style={style}" "{doi}"' try: obj = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=False if system() == "Windows" else True) res = obj.stdout.read().decode("utf-8").split("\n") if res[-1]: res.append("") res = "".join(i.replace("\n", "") for i in res[4:-1]) if res.startswith(("<", " ")) or '{"status"' in res or not res: raise Exception("Invalid DOI number or server error.") while " " in res: res = res.replace(" ", " ") if "java.lang.Thread.run" in res: res = "Invalid citation style!" doi = doi.replace('"', "") desc = doi + "\nhttps://sci-hub.mksa.top/" + doi.replace("https://doi.org/", "") + "\n" e = Embed(title="Article", description=desc + funcs.formatting(res)) obj.kill() doi = doi.split("doi.org/")[1] try: altmetricdata = await funcs.getRequest("https://api.altmetric.com/v1/doi/" + doi, verify=False) altmetric = altmetricdata.json() desc += altmetric["details_url"] + "\n" e.description = desc + funcs.formatting(res) if len(altmetric["title"]) < 257: e.title = altmetric["title"] e.set_thumbnail(url=altmetric["images"]["large"]) try: e.add_field(name='Authors ({:,})'.format(len(altmetric["authors"])), value=", ".join(f"`{author}`" for author in altmetric["authors"][:10]) + ("..." if len(altmetric["authors"]) > 10 else "")) except: pass try: e.add_field(name="Journal", value=f"`{altmetric['journal']} (ISSN: {'/'.join(issn for issn in altmetric['issns'])})`") except: pass if altmetric["published_on"] < 0: pub = (datetime(1970, 1, 1) + timedelta(seconds=altmetric["published_on"])).date() else: pub = datetime.utcfromtimestamp(int(altmetric["published_on"])).date() e.add_field(name="Publish Date", value="`%s %s %s`" % (pub.day, funcs.monthNumberToName(pub.month), pub.year)) try: e.add_field(name="PMID", value=f"`{altmetric['pmid']}`") except: pass citations = [ {"field": "cited_by_msm_count", "name": "News Outlet"}, {"field": "cited_by_tweeters_count", "name": "Twitter"}, {"field": "cited_by_feeds_count", "name": "Blog"}, {"field": "cited_by_wikipedia_count", "name": "Wikipedia"}, {"field": "cited_by_videos_count", "name": "Video"}, {"field": "cited_by_rdts_count", "name": "Reddit"}, {"field": "cited_by_fbwalls_count", "name": "Facebook"}, {"field": "cited_by_gplus_count", "name": "Google+"}, {"field": "cited_by_qna_count", "name": "Q&A Thread"}, {"field": "cited_by_rh_count", "name": "Research Highlight"}, {"field": "cited_by_policies_count", "name": "Policy Source"}, {"field": "cited_by_book_reviews_count", "name": "Book Review"} ] for i in citations: try: if altmetric[i["field"]]: e.add_field(name=f"{i['name']} Mentions", value="`{:,}`".format(altmetric[i["field"]])) except: pass e.set_footer(text="Last updated: {} UTC".format(str(datetime.utcfromtimestamp(int(altmetric["last_updated"])))), icon_url="https://secure.gravatar.com/avatar/97869aff9f24c5d0e1e44b55a274631a") except JSONDecodeError: e.set_footer(text="Note: No Altmetric data available for this article.") try: dimensionsdata = await funcs.getRequest("https://metrics-api.dimensions.ai/doi/" + doi, verify=False) dimensions = dimensionsdata.json() if dimensions["times_cited"]: e.add_field(name="Citations", value="`{:,}`".format(dimensions["times_cited"])) if dimensions["recent_citations"]: e.add_field(name="Citations (2y)", value="`{:,}`".format(dimensions["recent_citations"])) if dimensions["times_cited"] or dimensions["recent_citations"]: e.description = f"{desc}https://badge.dimensions.ai/details/doi/{doi}\n{funcs.formatting(res)}" except: pass except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, str(ex)) await ctx.reply(embed=e) @commands.cooldown(1, 3, commands.BucketType.user) @commands.command(name="quartile", usage='<numbers separated with ;> ["all" to show all points]', aliases=["avg", "average", "mean", "median", "mode", "q1", "q2", "q3", "range", "sd", "iqr", "quartiles", "boxplot", "box", "qir"], description="Computes statistical data from a set of numerical values.") async def quartile(self, ctx, *, items): imgName = f"{time()}.png" image = None try: if ";" not in items: items = items.replace(",", ";") else: items = items.replace(",", "") if items.casefold().endswith("all"): boxpoints = "all" items = items[:-3] while items.endswith(" "): items = items[:-1] else: boxpoints = False while items.startswith(";"): items = items[1:] while items.endswith(";"): items = items[:-1] while " " in items: items = items.replace(" ", " ") while "; ;" in items: items = items.replace("; ;", ";") while ";;" in items: items = items.replace(";;", ";") itemslist = items.split(";") if "" in itemslist: raise Exception("Invalid input. Please separate the items with `;`.") while " " in itemslist: itemslist.remove(" ") data = array(list(map(float, [i.strip() for i in itemslist]))) data.sort() halflist = int(len(data) // 2) q3 = median(data[-halflist:]) q1 = median(data[:halflist]) e = Embed(title="Quartile Calculator", description=f'Requested by: {ctx.author.mention}\n' + f'{funcs.formatting("; ".join(funcs.removeDotZero(float(i)) for i in data))}') e.add_field(name="Total Values", value="`{:,}`".format(len(data))) e.add_field(name="Mean", value=f'`{funcs.removeDotZero(mean(data))}`') try: e.add_field(name="Mode", value=f'`{funcs.removeDotZero(mode(data))}`') except: e.add_field(name="Mode", value="`None`") e.add_field(name="Q1", value=f'`{funcs.removeDotZero(q1)}`') e.add_field(name="Median (Q2)", value=f'`{funcs.removeDotZero(median(data))}`') e.add_field(name="Q3", value=f'`{funcs.removeDotZero(q3)}`') e.add_field(name="Interquartile Range", value=f'`{funcs.removeDotZero(q3 - q1)}`') e.add_field(name="Range", value=f'`{funcs.removeDotZero(max(data) - min(data))}`') e.add_field(name="Population SD", value=f'`{funcs.removeDotZero(pstdev(data))}`') e.add_field(name="Sample SD", value=f'`{funcs.removeDotZero(stdev(data))}`') e.add_field(name="Minimum Value", value=f'`{funcs.removeDotZero(min(data))}`') e.add_field(name="Maximum Value", value=f'`{funcs.removeDotZero(max(data))}`') e.add_field(name="Sum", value=f'`{funcs.removeDotZero(sum(data))}`') fig = go.Figure() fig.add_trace(go.Box(y=data, quartilemethod="linear", name="Linear Quartile")) fig.add_trace(go.Box(y=data, quartilemethod="inclusive", name="Inclusive Quartile")) fig.add_trace(go.Box(y=data, quartilemethod="exclusive", name="Exclusive Quartile")) fig.update_traces(boxpoints=boxpoints, jitter=0.3) await funcs.funcToCoro(fig.write_image, f"{funcs.PATH}/temp/{imgName}") image = File(f"{funcs.PATH}/temp/{imgName}") e.set_image(url=f"attachment://{imgName}") except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, str(ex)) await ctx.reply(embed=e, file=image) await funcs.deleteTempFile(imgName) @commands.cooldown(1, 3, commands.BucketType.user) @commands.command(name="hcf", usage="<value #1 up to {:,}> <value #2 up to {:,}>".format(HCF_LIMIT, HCF_LIMIT), aliases=["lcm", "gcf", "gcd", "hcd", "lcf", "hcm"], description="Calculates the highest common factor and lowest common multiple of two values.") async def hcf(self, ctx, number1, number2): try: a = int(float(number1)) b = int(float(number2)) if a > HCF_LIMIT or b > HCF_LIMIT: raise ValueError lst = sorted([a, b]) a, b = lst[0], lst[1] hcf = 1 for i in range(2, a + 1): if not a % i and not b % i: hcf = i lcm = int((a * b) / hcf) await ctx.reply(f'The HCF of {funcs.removeDotZero(a)} and ' + f'{funcs.removeDotZero(b)} is: **{funcs.removeDotZero(hcf)}' + f'**\nThe LCM of {funcs.removeDotZero(a)} and ' + f'{funcs.removeDotZero(b)} is: **{funcs.removeDotZero(lcm)}**') except ValueError: await ctx.reply(embed=funcs.errorEmbed(None, "Invalid input. Values must be {:,} or below.".format(HCF_LIMIT))) @commands.cooldown(1, 3, commands.BucketType.user) @commands.command(name="zodiac", description="Converts a date to its zodiac sign.", hidden=True, aliases=["starsign", "horoscope", "zs"], usage="[month] [day]\n\nAlternative usage(s):\n\n- <zodiac sign>") async def zodiac(self, ctx, month: str="", day: str=""): try: if month and not day: try: z = funcs.getZodiacInfo(month) e = Embed(title=z[2] + f" :{z[2].casefold().replace('scorpio', 'scorpius')}:") e.add_field(name="Dates", value=f"`{z[1]}`") e.set_image(url=z[0]) except Exception as ex: e = funcs.errorEmbed("Invalid zodiac!", str(ex)) else: if not month: month = month or datetime.now().month if not day: day = day or datetime.now().day try: month = funcs.monthNumberToName(int(month)) except: month = funcs.monthNumberToName(funcs.monthNameToNumber(month)) monthint = int(funcs.monthNameToNumber(month)) try: day = int(day) except: day = int(day[:-2]) date = f"{month} {funcs.valueToOrdinal(day)}" if day < 1 or day > 31 and monthint in [1, 3, 5, 7, 8, 10, 12] \ or day > 30 and monthint in [4, 6, 9, 11] \ or day > 29 and monthint == 2: raise Exception z = funcs.dateToZodiac(date) e = Embed(title=f"{date} Zodiac Sign :{z.casefold().replace('scorpio', 'scorpius')}:") e.set_image(url=funcs.getZodiacInfo(z)[0]) e.set_footer(text=z) except Exception: e = funcs.errorEmbed(None, "Invalid input.") await ctx.reply(embed=e) @commands.cooldown(1, 3, commands.BucketType.user) @commands.command(name="chinesezodiac", description="Converts a year to its Chinese zodiac sign.", usage="[year]", aliases=["cz", "zodiacchinese", "year", "yearofthe", "ly", "leap", "leapyear"], hidden=True) async def chinesezodiac(self, ctx, year: str=""): year = year or datetime.now().year try: year = int(year) e = Embed( title=f"{str(year) if year > 1 else str(year * -1 + 1) + ' B.C.'} Chinese Zodiac Sign", description=funcs.formatting(funcs.yearToChineseZodiac(year)) ) ly = str(funcs.leapYear(year)) e.add_field(name="Leap Year", value=f"`{ly if ly != 'None' else 'Unknown'}`") except Exception: e = funcs.errorEmbed(None, "Invalid input.") await ctx.reply(embed=e) @commands.cooldown(1, 2, commands.BucketType.user) @commands.command(description="Shows how far apart two dates are.", aliases=["weekday", "day", "days", "dates", "age", "today"], usage="[date #1 day] [date #1 month] [date #1 year] [date #2 day] [date #2 month] [date #2 year]\n\n" + "Alternative usage(s):\n\n- <days (+/-) from today OR weeks (+/- ending with w) from today>\n\n" + "- <date day> <date month> <date year> <days (+/-) from date OR weeks (+/- ending with w) from date>", name="date") async def date(self, ctx, day: str="", month: str="", year: str="", day2: str="", month2: str="", year2: str=""): today = datetime.today() try: if day and not month and not year and not day2 and not month2 and not year2: try: day1int = int(day) except ValueError: day1int = int(day[:-1]) * 7 neg1 = day1int < 0 dateobj = datetime.today() + timedelta(days=day1int) month2 = month2 or datetime.now().month day2 = day2 or datetime.now().day year2 = year2 or datetime.now().year try: month2 = funcs.monthNumberToName(int(month2)) except: month2 = funcs.monthNumberToName(funcs.monthNameToNumber(month2)) dateobj2 = datetime(int(year2), int(funcs.monthNameToNumber(month2)), int(day2)) else: neg1 = False month = month or datetime.now().month day = day or datetime.now().day year = year or datetime.now().year try: month = funcs.monthNumberToName(int(month)) except: month = funcs.monthNumberToName(funcs.monthNameToNumber(month)) dateobj = datetime(int(year), int(funcs.monthNameToNumber(month)), int(day)) if day2 and not month2 and not year2: try: day2int = int(day2) except ValueError: day2int = int(day2[:-1]) * 7 dateobj2 = dateobj + timedelta(days=day2int) else: if not month2: month2 = month2 or datetime.now().month if not day2: day2 = day2 or datetime.now().day if not year2: year2 = year2 or datetime.now().year try: month2 = funcs.monthNumberToName(int(month2)) except: month2 = funcs.monthNumberToName(funcs.monthNameToNumber(month2)) dateobj2 = datetime(int(year2), int(funcs.monthNameToNumber(month2)), int(day2)) dateobjs = sorted([dateobj, dateobj2]) delta = dateobjs[1] - dateobjs[0] daysint = delta.days + (1 if neg1 else 0) if dateobj.date() != today.date() and dateobj2.date() != today.date(): e = Embed(title="Two Dates") e.add_field( name="Date #1", value="`%s, %s %s %s`" % ( funcs.weekdayNumberToName(dateobjs[0].weekday()), dateobjs[0].day, funcs.monthNumberToName(dateobjs[0].month), dateobjs[0].year ) ) e.add_field( name="Date #2", value="`%s, %s %s %s`" % ( funcs.weekdayNumberToName(dateobjs[1].weekday()), dateobjs[1].day, funcs.monthNumberToName(dateobjs[1].month), dateobjs[1].year ) ) hastoday = False else: hastoday = True if today.date() == dateobj.date(): e = Embed( title=f"{funcs.weekdayNumberToName(dateobj2.weekday())}, " + f"{dateobj2.day} {funcs.monthNumberToName(dateobj2.month)} {dateobj2.year}" ) else: e = Embed( title=f"{funcs.weekdayNumberToName(dateobj.weekday())}, " + f"{dateobj.day} {funcs.monthNumberToName(dateobj.month)} {dateobj.year}" ) if daysint: years, months, daysfinal, monthsfinal, daysint = funcs.dateDifference(dateobjs[0].date(), dateobjs[1].date()) res = f"== {'Difference From Today' if hastoday else 'Time Difference'} ==\n\n" if years: res += "{:,} year{}, {} month{}, and {} day{}\nor ".format( years, "" if years == 1 else "s", months, "" if months == 1 else "s", daysfinal, "" if daysfinal == 1 else "s" ) if monthsfinal: res += "{:,} month{} and {} day{}\nor ".format( monthsfinal, "" if monthsfinal == 1 else "s", daysfinal, "" if daysfinal == 1 else "s" ) res += "{:,} day{}".format(daysint, "" if daysint == 1 else "s") e.description = funcs.formatting(res) else: e.description = funcs.formatting("Today") except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, "Invalid input.") await ctx.reply(embed=e) @commands.cooldown(1, 1, commands.BucketType.user) @commands.command(name="iss", description="Gets information about the International Space Station and all humans in space.", aliases=["space"], hidden=True) async def iss(self, ctx): try: issdata = await funcs.getRequest("http://api.open-notify.org/iss-now.json", verify=False) iss = issdata.json()["iss_position"] hisdata = await funcs.getRequest("http://api.open-notify.org/astros.json", verify=False) his = hisdata.json()["people"] dt = datetime(1998, 11, 20).date() e = Embed(description="https://en.wikipedia.org/wiki/International_Space_Station") e.set_author(name="The International Space Station", icon_url="https://upload.wikimedia.org/wikipedia/commons/thumb/1/15/ISS_emblem.png/195px-ISS_emblem.png") e.add_field(name="Location", value=f"`{iss['latitude']}, {iss['longitude']}`") e.add_field(name="Launch Date", value=funcs.dateBirthday(dt.day, dt.month, dt.year)) e.add_field(name="Speed", value="`7.66 km/s (27,600 km/h or 17,100 mph)`") if his: e.add_field(name="Humans in Space ({:,})".format(len(his)), inline=False, value=", ".join( f"`{i['name']} ({i['craft']})`" for i in sorted(his, key=lambda x: x["craft"]) )[:800].rsplit("`, ", 1)[0] + "`") e.set_image(url="https://cdn.discordapp.com/attachments/771698457391136798/926876797759537192/unknown.png") except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, "Server error.") await ctx.reply(embed=e) @commands.cooldown(1, 3, commands.BucketType.user) @commands.command(usage="<note #1 with octave (e.g. F#2)> <note #2 with octave (e.g. G5)>", hidden=True, aliases=["octave", "note", "notes", "semitone", "semitones", "vocalrange", "octaves", "notesrange"], name="noterange", description="Shows the range in octaves and semitones between two given musical notes.") async def noterange(self, ctx, *, noterange): try: while " " in noterange: noterange = noterange.replace(" ", " ") note1, note2 = funcs.replaceCharacters(noterange.strip().replace(",", ""), [" - ", " — ", "—"], " ").split(" ") notes = sorted([funcs.noteFinder(note1), funcs.noteFinder(note2)], key=lambda x: x[1]) diff = notes[1][1] - notes[0][1] if not diff: raise Exception else: octaves = diff // 12 semitones = diff % 12 andsemitones = f" and {semitones} semitone{'' if semitones == 1 else 's'}" octavestr = f"{'{:,}'.format(octaves)} octave{'' if octaves == 1 else 's'}{andsemitones if semitones else ''}\nor " e = Embed(title=f"{notes[0][0]} — {notes[1][0]}", description=funcs.formatting( f"== Note Range ==\n\n{octavestr if octaves else ''}{'{:,}'.format(diff)} semitone" + f"{'' if diff == 1 else 's'}" )) except Exception as ex: funcs.printError(ctx, ex) e = funcs.errorEmbed(None, "Invalid input.") e.set_footer(text="Notes: " + ", ".join(i for i in funcs.MUSICAL_NOTES)) await ctx.reply(embed=e) @commands.cooldown(1, 3, commands.BucketType.user) @commands.command(description="Adds a timestamp to a YouTube video link, " + "useful for mobile users who cannot copy links with timestamps.", hidden=True, aliases=["yt", "ytts", "ytt"], usage="<YouTube video link> <timestamp>", name="yttimestamp") async def yttimestamp(self, ctx, link, timestamp): if "youtu" not in link.casefold(): return await ctx.reply(embed=funcs.errorEmbed(None, "Not a YouTube link.")) s = 0 try: for i in range(timestamp.count(":") + 1): try: spl = timestamp.rsplit(":", 1) val = int(spl[1]) timestamp = spl[0] except IndexError: val = int(timestamp) s += val * 60 ** i except: return await ctx.reply(embed=funcs.errorEmbed(None, "Invalid input.")) if "youtu.be" in link.casefold(): link = link.split('?')[0] + "?" else: link = link.split('&')[0] + "&" await ctx.reply(f"<{link}t={s}>") @commands.cooldown(1, 3, commands.BucketType.user) @commands.command(description="Shows the age timeline of a hypothetical person born in a certain year up until adulthood.", usage="[year (1500-2500)]\n\nAlternative usage(s):\n\n- <age (0-100)>", name="agelist", hidden=True) async def agelist(self, ctx, year=""): nowyear = datetime.today().year if not year: year = str(nowyear - 18) try: year = funcs.evalMath(year.replace(",", "")) isage = 0 <= year <= 100 if not 1500 <= year <= 2500 and not isage: return await ctx.reply( embed=funcs.errorEmbed(None, "Year must be 1500-2500 inclusive, and age must be 0-100 inclusive.") ) if isage: year = nowyear - year except: return await ctx.reply(embed=funcs.errorEmbed(None, "Invalid year.")) notableyears = {1: "infant", 2: "toddler", 4: "young child", 7: "child", 10: "older child", 13: "teenager", 18: "adult"} res = f"Born in {year}:\n" isfuture = False for age in range(0, 26): currentyear = year + age if currentyear > nowyear and not isfuture: res += "\n" if age else "" res += "\n== Future ==\n" isfuture = True res += f"\n- {currentyear}: {'baby' if not age else f'{age - 1}-{age}'}" if age in notableyears: res += f" ({notableyears[age]})" await ctx.reply(funcs.formatting(res)) @commands.cooldown(1, 3, commands.BucketType.user) @commands.command(name="google", description="Generates search URLs for Google, Bing, and DuckDuckGo.", aliases=["search", "ddg", "duckduckgo", "lookup", "bing"], usage="<keywords>") async def google(self, ctx, *, inp: str=""): if not inp: return await ctx.reply(embed=funcs.errorEmbed(None, "Empty input.")) param = parse.urlencode({"q": inp}) view = funcs.newButtonView( 2, label="Google", url=f"https://www.google.com/search?{param}", emoji=self.client.emoji["google"] ) view = funcs.newButtonView( 2, label="Bing", url=f"https://www.bing.com/search?{param}", emoji=self.client.emoji["bing"], view=view ) await ctx.reply( f"Use the buttons below to search for `{inp}`.", view=funcs.newButtonView( 2, label="DuckDuckGo", url=f"https://www.duckduckgo.com/?{param}", emoji=self.client.emoji["ddg"], view=view ) ) @commands.cooldown(1, 15, commands.BucketType.user) @commands.command(name="wolfram", description="Queries things using the Wolfram|Alpha API.", aliases=["wolf", "wa", "wolframalpha", "query"], usage="<input>") async def wolfram(self, ctx, *, inp: str=""): if not inp: return await ctx.reply(embed=funcs.errorEmbed(None, "Empty input.")) else: await ctx.send("Querying. Please wait...") try: params = {"appid": config.wolframID, "output": "json", "lang": "en", "input": inp} res = await funcs.getRequest("http://api.wolframalpha.com/v2/query", params=params) data = res.json()["queryresult"] e = Embed() e.set_author(icon_url="https://media.discordapp.net/attachments/771404776410972161/929386312765669376/wolfram.png", name="Wolfram|Alpha Query") if data["success"]: imgs = [] for i, c in enumerate(data["pods"]): if c["subpods"][0]["plaintext"] and i < 25: e.add_field(name=c["title"], value=funcs.formatting(c["subpods"][0]["plaintext"], limit=200), inline=False) try: imgs.append((c["subpods"][0]["img"]["src"], c["title"])) except: pass embeds = [] for i, c in enumerate(imgs): emb = e.copy() emb.set_image(url=c[0]) emb.set_footer(text="{}\nPage {:,} of {:,}".format(c[1], i + 1, len(imgs))) embeds.append(emb) m = await ctx.reply(embed=embeds[0]) return await m.edit(view=PageButtons(ctx, self.client, m, embeds)) else: try: e.add_field(name="Did You Mean", value=", ".join(f"`{i['val']}`" for i in data["didyoumeans"][:20])) except: e.add_field(name="Tips", value=funcs.formatting("Check your spelling, and use English")) return await ctx.reply(embed=e) except Exception as ex: funcs.printError(ctx, ex) return await ctx.reply(embed=funcs.errorEmbed(None, "Server error or query limit reached.")) @commands.cooldown(1, 10, commands.BucketType.user) @commands.command(name="blurface", description="Detects faces in an image and blurs them.", hidden=True, aliases=["faceblur", "blurfaces", "anonymize", "anonymise", "blur"], usage="<image attachment>") async def blurface(self, ctx): if not ctx.message.attachments: return await ctx.reply(embed=funcs.errorEmbed(None, "No attachment detected.")) await ctx.send("Blurring faces. Please wait...") await funcs.useImageFunc(ctx, self.blurFace) setup = Utility.setup
[ "urllib.parse.urlencode", "src.utils.funcs.dateToZodiac", "cv2.dnn.readNetFromCaffe", "src.utils.funcs.errorEmbed", "platform.system", "asyncio.sleep", "src.utils.funcs.monthNameToNumber", "src.utils.funcs.leapYear", "src.utils.funcs.printError", "cv2.imread", "time.gmtime", "src.utils.funcs.valueToOrdinal", "plotly.graph_objects.Figure", "numpy.sum", "src.utils.funcs.timeDifferenceStr", "datetime.datetime.utcfromtimestamp", "src.utils.funcs.timeStrToDatetime", "src.utils.funcs.generateJson", "src.utils.funcs.dateBirthday", "src.utils.funcs.noteFinder", "src.utils.funcs.funcToCoro", "plotly.graph_objects.Pie", "src.utils.funcs.githubRepoPic", "numpy.max", "src.utils.funcs.timeStr", "PyPDF2.PdfFileReader", "discord.File", "src.utils.funcs.getRequest", "src.utils.funcs.newButtonView", "src.utils.funcs.readTxtAttachment", "src.utils.funcs.monthNumberToName", "cv2.GaussianBlur", "datetime.datetime.fromtimestamp", "src.utils.funcs.getZodiacInfo", "deep_translator.GoogleTranslator", "src.utils.funcs.formatting", "datetime.datetime.today", "discord.ext.commands.command", "datetime.datetime", "plotly.graph_objects.Bar", "src.utils.funcs.decodeQR", "src.utils.funcs.dumpJson", "json.dumps", "numpy.min", "src.utils.funcs.yearToChineseZodiac", "discord.ext.tasks.loop", "src.utils.funcs.replaceCharacters", "src.utils.funcs.useImageFunc", "asyncpraw.Reddit", "deep_translator.constants.GOOGLE_CODES_TO_LANGUAGES.keys", "mendeleev.element", "time.time", "src.utils.funcs.degreesToDirection", "statistics.mean", "cv2.imwrite", "qrcode.QRCode", "src.utils.funcs.userIDNotBlacklisted", "plotly.graph_objects.Box", "datetime.datetime.utcnow", "lyricsgenius.Genius", "statistics.stdev", "src.utils.funcs.getResource", "src.utils.page_buttons.PageButtons", "src.utils.funcs.strictRounding", "numpy.array", "src.utils.funcs.deleteTempFile", "datetime.timedelta", "src.utils.funcs.evalMath", "src.utils.funcs.celsiusToFahrenheit", "plotly.graph_objects.Scatter", "statistics.pstdev", "discord.Embed", "cv2.dnn.blobFromImage", "random.choice", "src.utils.funcs.randomHex", "src.utils.funcs.sendImage", "src.utils.funcs.removeDotZero", "statistics.median", "datetime.datetime.now", "statistics.mode", "discord.ext.commands.cooldown", "src.utils.funcs.readJson" ]
[((2643, 2666), 'discord.ext.tasks.loop', 'tasks.loop', ([], {'seconds': '(2.0)'}), '(seconds=2.0)\n', (2653, 2666), False, 'from discord.ext import commands, tasks\n'), ((3899, 3948), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (3916, 3948), False, 'from discord.ext import commands, tasks\n'), ((3954, 4165), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""reminderdel"""', 'description': '"""Removes a reminder."""', 'usage': '"""<reminder ID>"""', 'aliases': "['reminderdelete', 'reminderemove', 'removereminder', 'deletereminder',\n 'delreminder', 'delremind']"}), "(name='reminderdel', description='Removes a reminder.',\n usage='<reminder ID>', aliases=['reminderdelete', 'reminderemove',\n 'removereminder', 'deletereminder', 'delreminder', 'delremind'])\n", (3970, 4165), False, 'from discord.ext import commands, tasks\n'), ((5231, 5280), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (5248, 5280), False, 'from discord.ext import commands, tasks\n'), ((5286, 5522), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""reminder"""', 'description': '"""Creates a reminder or shows a list of your reminders."""', 'aliases': "['remind', 'remindme', 'reminders']", 'usage': '"""[Xm/h/d (replace X with number of minutes/hours/days)] <message>"""'}), "(name='reminder', description=\n 'Creates a reminder or shows a list of your reminders.', aliases=[\n 'remind', 'remindme', 'reminders'], usage=\n '[Xm/h/d (replace X with number of minutes/hours/days)] <message>')\n", (5302, 5522), False, 'from discord.ext import commands, tasks\n'), ((12434, 12483), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (12451, 12483), False, 'from discord.ext import commands, tasks\n'), ((12489, 12610), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""piechart"""', 'description': '"""Generates a pie chart."""', 'aliases': "['pie', 'piegraph']", 'usage': '"""[title]"""'}), "(name='piechart', description='Generates a pie chart.',\n aliases=['pie', 'piegraph'], usage='[title]')\n", (12505, 12610), False, 'from discord.ext import commands, tasks\n'), ((13644, 13693), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (13661, 13693), False, 'from discord.ext import commands, tasks\n'), ((13699, 13824), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""linechart"""', 'description': '"""Generates a line chart."""', 'aliases': "['line', 'linegraph']", 'usage': '"""[title]"""'}), "(name='linechart', description='Generates a line chart.',\n aliases=['line', 'linegraph'], usage='[title]')\n", (13715, 13824), False, 'from discord.ext import commands, tasks\n'), ((14961, 15010), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (14978, 15010), False, 'from discord.ext import commands, tasks\n'), ((15016, 15137), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""barchart"""', 'description': '"""Generates a bar chart."""', 'aliases': "['bar', 'bargraph']", 'usage': '"""[title]"""'}), "(name='barchart', description='Generates a bar chart.',\n aliases=['bar', 'bargraph'], usage='[title]')\n", (15032, 15137), False, 'from discord.ext import commands, tasks\n'), ((16268, 16317), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (16285, 16317), False, 'from discord.ext import commands, tasks\n'), ((16323, 16553), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""github"""', 'description': '"""Returns statistics about a GitHub repository."""', 'usage': '"""[username/repository]"""', 'aliases': "['loc', 'code', 'linesofcode', 'repository', 'repo', 'git', 'source',\n 'sourcecode']"}), "(name='github', description=\n 'Returns statistics about a GitHub repository.', usage=\n '[username/repository]', aliases=['loc', 'code', 'linesofcode',\n 'repository', 'repo', 'git', 'source', 'sourcecode'])\n", (16339, 16553), False, 'from discord.ext import commands, tasks\n'), ((17800, 17849), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (17817, 17849), False, 'from discord.ext import commands, tasks\n'), ((17855, 18017), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""covid"""', 'description': '"""Gets COVID-19 data."""', 'aliases': "['coronavirus', 'corona', 'covid19', 'cv', 'c19', 'cv19']", 'usage': '"""[location]"""'}), "(name='covid', description='Gets COVID-19 data.', aliases=[\n 'coronavirus', 'corona', 'covid19', 'cv', 'c19', 'cv19'], usage=\n '[location]')\n", (17871, 18017), False, 'from discord.ext import commands, tasks\n'), ((23785, 23834), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (23802, 23834), False, 'from discord.ext import commands, tasks\n'), ((23840, 23992), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""flightinfo"""', 'description': '"""Gets information about a flight."""', 'aliases': "['flight', 'flightradar']", 'usage': '"""<flight number>"""'}), "(name='flightinfo', description=\n 'Gets information about a flight.', aliases=['flight', 'flightradar'],\n usage='<flight number>')\n", (23856, 23992), False, 'from discord.ext import commands, tasks\n'), ((29867, 29916), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (29884, 29916), False, 'from discord.ext import commands, tasks\n'), ((29922, 30055), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""weather"""', 'description': '"""Finds the current weather of a location."""', 'aliases': "['w']", 'usage': '"""<location>"""'}), "(name='weather', description=\n 'Finds the current weather of a location.', aliases=['w'], usage=\n '<location>')\n", (29938, 30055), False, 'from discord.ext import commands, tasks\n'), ((32416, 32466), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(15)', 'commands.BucketType.user'], {}), '(1, 15, commands.BucketType.user)\n', (32433, 32466), False, 'from discord.ext import commands, tasks\n'), ((32472, 32743), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""translate"""', 'description': "('Translates text to a different language. ' +\n 'Translation may sometimes fail due to rate limit.')", 'aliases': "['t', 'translator', 'trans', 'tr', 'translation']", 'usage': '"""<language code to translate to> <input>"""'}), "(name='translate', description=\n 'Translates text to a different language. ' +\n 'Translation may sometimes fail due to rate limit.', aliases=['t',\n 'translator', 'trans', 'tr', 'translation'], usage=\n '<language code to translate to> <input>')\n", (32488, 32743), False, 'from discord.ext import commands, tasks\n'), ((33748, 33797), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (33765, 33797), False, 'from discord.ext import commands, tasks\n'), ((33803, 34022), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""currency"""', 'description': '"""Converts the price of one currency to another."""', 'aliases': "['fiat', 'cc', 'convertcurrency', 'currencyconvert']", 'usage': '"""<from currency> <to currency> [amount]"""'}), "(name='currency', description=\n 'Converts the price of one currency to another.', aliases=['fiat', 'cc',\n 'convertcurrency', 'currencyconvert'], usage=\n '<from currency> <to currency> [amount]')\n", (33819, 34022), False, 'from discord.ext import commands, tasks\n'), ((36015, 36064), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (36032, 36064), False, 'from discord.ext import commands, tasks\n'), ((36070, 36212), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""wiki"""', 'description': '"""Returns a Wikipedia article."""', 'aliases': "['wikipedia']", 'usage': '"""<article title (case-sensitive)>"""'}), "(name='wiki', description='Returns a Wikipedia article.',\n aliases=['wikipedia'], usage='<article title (case-sensitive)>')\n", (36086, 36212), False, 'from discord.ext import commands, tasks\n'), ((39003, 39053), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(45)', 'commands.BucketType.user'], {}), '(1, 45, commands.BucketType.user)\n', (39020, 39053), False, 'from discord.ext import commands, tasks\n'), ((39059, 39297), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""srctop10"""', 'aliases': "['top10', 'src', 'speedruncom', 'leaderboard', 'lb', 'sr']", 'hidden': '(True)', 'description': '"""Shows the top 10 leaderboard for speedrun.com games."""', 'usage': '"""[speedrun.com game abbreviation]"""'}), "(name='srctop10', aliases=['top10', 'src', 'speedruncom',\n 'leaderboard', 'lb', 'sr'], hidden=True, description=\n 'Shows the top 10 leaderboard for speedrun.com games.', usage=\n '[speedrun.com game abbreviation]')\n", (39075, 39297), False, 'from discord.ext import commands, tasks\n'), ((42011, 42061), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(45)', 'commands.BucketType.user'], {}), '(1, 45, commands.BucketType.user)\n', (42028, 42061), False, 'from discord.ext import commands, tasks\n'), ((42067, 42275), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""srcqueue"""', 'aliases': "['queue', 'speedrunqueue', 'srqueue']", 'hidden': '(True)', 'description': '"""Shows the run queue for speedrun.com games."""', 'usage': '"""[speedrun.com game abbreviation]"""'}), "(name='srcqueue', aliases=['queue', 'speedrunqueue',\n 'srqueue'], hidden=True, description=\n 'Shows the run queue for speedrun.com games.', usage=\n '[speedrun.com game abbreviation]')\n", (42083, 42275), False, 'from discord.ext import commands, tasks\n'), ((46110, 46159), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (46127, 46159), False, 'from discord.ext import commands, tasks\n'), ((46165, 46307), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""urban"""', 'description': '"""Looks up a term on Urban Dictionary."""', 'aliases': "['ud', 'urbandictionary']", 'usage': '"""<term>"""'}), "(name='urban', description=\n 'Looks up a term on Urban Dictionary.', aliases=['ud',\n 'urbandictionary'], usage='<term>')\n", (46181, 46307), False, 'from discord.ext import commands, tasks\n'), ((48853, 48902), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (48870, 48902), False, 'from discord.ext import commands, tasks\n'), ((48908, 49064), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""lyrics"""', 'description': '"""Gets the lyrics of a song from Genius."""', 'aliases': "['lyric', 'song', 'genius']", 'usage': '"""<song keywords>"""'}), "(name='lyrics', description=\n 'Gets the lyrics of a song from Genius.', aliases=['lyric', 'song',\n 'genius'], usage='<song keywords>')\n", (48924, 49064), False, 'from discord.ext import commands, tasks\n'), ((50694, 50743), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (50711, 50743), False, 'from discord.ext import commands, tasks\n'), ((50749, 50975), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""qrgen"""', 'description': '"""Generates a QR code."""', 'aliases': "['qrg', 'genqr', 'qr', 'qrc']", 'usage': '"""<input> ["QRcolour=black"]\n\nNote: Add "QRcolour=black" at the end to make the QR code black."""'}), '(name=\'qrgen\', description=\'Generates a QR code.\', aliases=\n [\'qrg\', \'genqr\', \'qr\', \'qrc\'], usage=\n """<input> ["QRcolour=black"]\n\nNote: Add "QRcolour=black" at the end to make the QR code black."""\n )\n', (50765, 50975), False, 'from discord.ext import commands, tasks\n'), ((51969, 52018), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (51986, 52018), False, 'from discord.ext import commands, tasks\n'), ((52024, 52170), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""qrread"""', 'description': '"""Reads a QR code."""', 'aliases': "['qrscan', 'qrr', 'readqr']", 'usage': '"""<image URL OR image attachment>"""'}), "(name='qrread', description='Reads a QR code.', aliases=[\n 'qrscan', 'qrr', 'readqr'], usage='<image URL OR image attachment>')\n", (52040, 52170), False, 'from discord.ext import commands, tasks\n'), ((53157, 53206), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (53174, 53206), False, 'from discord.ext import commands, tasks\n'), ((53212, 53297), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""compile"""', 'description': '"""Compiles code."""', 'aliases': "['comp']"}), "(name='compile', description='Compiles code.', aliases=['comp']\n )\n", (53228, 53297), False, 'from discord.ext import commands, tasks\n'), ((57180, 57229), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (57197, 57229), False, 'from discord.ext import commands, tasks\n'), ((57235, 57466), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""unix"""', 'description': '"""Converts a unix timestamp to a proper date format in GMT."""', 'aliases': "['time', 'timestamp', 'epoch', 'gmt', 'utc', 'timezone']", 'usage': '"""[time zone (-12-14)] [timestamp value]"""'}), "(name='unix', description=\n 'Converts a unix timestamp to a proper date format in GMT.', aliases=[\n 'time', 'timestamp', 'epoch', 'gmt', 'utc', 'timezone'], usage=\n '[time zone (-12-14)] [timestamp value]')\n", (57251, 57466), False, 'from discord.ext import commands, tasks\n'), ((58672, 58721), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (58689, 58721), False, 'from discord.ext import commands, tasks\n'), ((58727, 58864), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""scisum"""', 'aliases': "['science', 'sci']", 'hidden': '(True)', 'description': '"""Shows the science summary for the last month."""'}), "(name='scisum', aliases=['science', 'sci'], hidden=True,\n description='Shows the science summary for the last month.')\n", (58743, 58864), False, 'from discord.ext import commands, tasks\n'), ((58973, 59022), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (58990, 59022), False, 'from discord.ext import commands, tasks\n'), ((59028, 59216), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""dict"""', 'description': '"""Returns the definition(s) of a word."""', 'aliases': "['dictionary', 'def', 'definition', 'meaning', 'define']", 'usage': '"""<language code> <word>"""'}), "(name='dict', description=\n 'Returns the definition(s) of a word.', aliases=['dictionary', 'def',\n 'definition', 'meaning', 'define'], usage='<language code> <word>')\n", (59044, 59216), False, 'from discord.ext import commands, tasks\n'), ((60966, 61015), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (60983, 61015), False, 'from discord.ext import commands, tasks\n'), ((61021, 61193), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""reddit"""', 'description': '"""Looks up a community or user on Reddit."""', 'aliases': "['subreddit', 'r', 'redditor']", 'usage': '"""<r/subreddit OR u/redditor>"""'}), "(name='reddit', description=\n 'Looks up a community or user on Reddit.', aliases=['subreddit', 'r',\n 'redditor'], usage='<r/subreddit OR u/redditor>')\n", (61037, 61193), False, 'from discord.ext import commands, tasks\n'), ((68306, 68355), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(1)', 'commands.BucketType.user'], {}), '(1, 1, commands.BucketType.user)\n', (68323, 68355), False, 'from discord.ext import commands, tasks\n'), ((68361, 68523), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""calc"""', 'description': '"""Does simple math."""', 'aliases': "['calculate', 'calculator', 'cal', 'math', 'maths', 'safeeval']", 'usage': '"""<input>"""'}), "(name='calc', description='Does simple math.', aliases=[\n 'calculate', 'calculator', 'cal', 'math', 'maths', 'safeeval'], usage=\n '<input>')\n", (68377, 68523), False, 'from discord.ext import commands, tasks\n'), ((71611, 71660), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(1)', 'commands.BucketType.user'], {}), '(1, 1, commands.BucketType.user)\n', (71628, 71660), False, 'from discord.ext import commands, tasks\n'), ((71666, 71841), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""sqrt"""', 'usage': '"""<input>"""', 'hidden': '(True)', 'aliases': "['square', 'root']", 'description': '"""Calculates the square root of a given value or math expession."""'}), "(name='sqrt', usage='<input>', hidden=True, aliases=[\n 'square', 'root'], description=\n 'Calculates the square root of a given value or math expession.')\n", (71682, 71841), False, 'from discord.ext import commands, tasks\n'), ((72168, 72217), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (72185, 72217), False, 'from discord.ext import commands, tasks\n'), ((72223, 72551), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""wordcount"""', 'description': '"""Counts the number of words and characters in an input."""', 'aliases': "['lettercount', 'countletter', 'countchar', 'countletters', 'char', 'chars',\n 'letters', 'charcount', 'wc', 'countword', 'word', 'words',\n 'countwords', 'letter']", 'usage': '"""<input OR text attachment>"""'}), "(name='wordcount', description=\n 'Counts the number of words and characters in an input.', aliases=[\n 'lettercount', 'countletter', 'countchar', 'countletters', 'char',\n 'chars', 'letters', 'charcount', 'wc', 'countword', 'word', 'words',\n 'countwords', 'letter'], usage='<input OR text attachment>')\n", (72239, 72551), False, 'from discord.ext import commands, tasks\n'), ((74252, 74301), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (74269, 74301), False, 'from discord.ext import commands, tasks\n'), ((74307, 74484), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""country"""', 'description': '"""Shows information about a country."""', 'aliases': "['location', 'countries', 'place', 'nation']", 'usage': '"""<country name OR code>"""'}), "(name='country', description=\n 'Shows information about a country.', aliases=['location', 'countries',\n 'place', 'nation'], usage='<country name OR code>')\n", (74323, 74484), False, 'from discord.ext import commands, tasks\n'), ((77877, 77926), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(1)', 'commands.BucketType.user'], {}), '(1, 1, commands.BucketType.user)\n', (77894, 77926), False, 'from discord.ext import commands, tasks\n'), ((77932, 78081), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""ip"""', 'description': '"""Shows information about an IP address."""', 'aliases': "['ipaddress']", 'hidden': '(True)', 'usage': '"""<IP address>"""'}), "(name='ip', description=\n 'Shows information about an IP address.', aliases=['ipaddress'], hidden\n =True, usage='<IP address>')\n", (77948, 78081), False, 'from discord.ext import commands, tasks\n'), ((79009, 79058), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (79026, 79058), False, 'from discord.ext import commands, tasks\n'), ((79064, 79249), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""element"""', 'description': '"""Shows information about a chemical element."""', 'aliases': "['elem', 'chem', 'chemical']", 'hidden': '(True)', 'usage': '"""<element symbol or name>"""'}), "(name='element', description=\n 'Shows information about a chemical element.', aliases=['elem', 'chem',\n 'chemical'], hidden=True, usage='<element symbol or name>')\n", (79080, 79249), False, 'from discord.ext import commands, tasks\n'), ((82453, 82502), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(1)', 'commands.BucketType.user'], {}), '(1, 1, commands.BucketType.user)\n', (82470, 82502), False, 'from discord.ext import commands, tasks\n'), ((82508, 82638), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""periodic"""', 'description': '"""Shows the periodic table."""', 'aliases': "['periotictable', 'elements']", 'hidden': '(True)'}), "(name='periodic', description='Shows the periodic table.',\n aliases=['periotictable', 'elements'], hidden=True)\n", (82524, 82638), False, 'from discord.ext import commands, tasks\n'), ((82825, 82874), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(1)', 'commands.BucketType.user'], {}), '(1, 1, commands.BucketType.user)\n', (82842, 82874), False, 'from discord.ext import commands, tasks\n'), ((82880, 83057), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""sohcahtoa"""', 'description': '"""SOH CAH TOA."""', 'aliases': "['trigonometry', 'triggernometry', 'sincostan', 'sinecostan', 'sine', 'cos',\n 'tan']", 'hidden': '(True)'}), "(name='sohcahtoa', description='SOH CAH TOA.', aliases=[\n 'trigonometry', 'triggernometry', 'sincostan', 'sinecostan', 'sine',\n 'cos', 'tan'], hidden=True)\n", (82896, 83057), False, 'from discord.ext import commands, tasks\n'), ((83242, 83291), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(1)', 'commands.BucketType.user'], {}), '(1, 1, commands.BucketType.user)\n', (83259, 83291), False, 'from discord.ext import commands, tasks\n'), ((83297, 83412), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""osi"""', 'description': '"""Shows the OSI Model."""', 'aliases': "['osimodel', '7layers']", 'hidden': '(True)'}), "(name='osi', description='Shows the OSI Model.', aliases=[\n 'osimodel', '7layers'], hidden=True)\n", (83313, 83412), False, 'from discord.ext import commands, tasks\n'), ((83593, 83642), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(1)', 'commands.BucketType.user'], {}), '(1, 1, commands.BucketType.user)\n', (83610, 83642), False, 'from discord.ext import commands, tasks\n'), ((83648, 83800), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""normalbodytemp"""', 'description': '"""Shows the normal body temperature range chart."""', 'aliases': "['bodytemp', 'nbt']", 'hidden': '(True)'}), "(name='normalbodytemp', description=\n 'Shows the normal body temperature range chart.', aliases=['bodytemp',\n 'nbt'], hidden=True)\n", (83664, 83800), False, 'from discord.ext import commands, tasks\n'), ((83987, 84036), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(2)', 'commands.BucketType.user'], {}), '(1, 2, commands.BucketType.user)\n', (84004, 84036), False, 'from discord.ext import commands, tasks\n'), ((84042, 84316), 'discord.ext.commands.command', 'commands.command', ([], {'description': '"""Gets information and generates a citation for an article via DOI number."""', 'aliases': "['reference', 'ref', 'citation', 'doi', 'cit', 'altmetric', 'altmetrics',\n 'cite', 'art']", 'usage': '"""<DOI number> [citation style]"""', 'name': '"""article"""'}), "(description=\n 'Gets information and generates a citation for an article via DOI number.',\n aliases=['reference', 'ref', 'citation', 'doi', 'cit', 'altmetric',\n 'altmetrics', 'cite', 'art'], usage='<DOI number> [citation style]',\n name='article')\n", (84058, 84316), False, 'from discord.ext import commands, tasks\n'), ((89991, 90040), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (90008, 90040), False, 'from discord.ext import commands, tasks\n'), ((90046, 90365), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""quartile"""', 'usage': '"""<numbers separated with ;> ["all" to show all points]"""', 'aliases': "['avg', 'average', 'mean', 'median', 'mode', 'q1', 'q2', 'q3', 'range',\n 'sd', 'iqr', 'quartiles', 'boxplot', 'box', 'qir']", 'description': '"""Computes statistical data from a set of numerical values."""'}), '(name=\'quartile\', usage=\n \'<numbers separated with ;> ["all" to show all points]\', aliases=[\'avg\',\n \'average\', \'mean\', \'median\', \'mode\', \'q1\', \'q2\', \'q3\', \'range\', \'sd\',\n \'iqr\', \'quartiles\', \'boxplot\', \'box\', \'qir\'], description=\n \'Computes statistical data from a set of numerical values.\')\n', (90062, 90365), False, 'from discord.ext import commands, tasks\n'), ((94033, 94082), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (94050, 94082), False, 'from discord.ext import commands, tasks\n'), ((95328, 95377), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (95345, 95377), False, 'from discord.ext import commands, tasks\n'), ((95383, 95600), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""zodiac"""', 'description': '"""Converts a date to its zodiac sign."""', 'hidden': '(True)', 'aliases': "['starsign', 'horoscope', 'zs']", 'usage': '"""[month] [day]\n\nAlternative usage(s):\n\n- <zodiac sign>"""'}), '(name=\'zodiac\', description=\n \'Converts a date to its zodiac sign.\', hidden=True, aliases=[\'starsign\',\n \'horoscope\', \'zs\'], usage=\n """[month] [day]\n\nAlternative usage(s):\n\n- <zodiac sign>""")\n', (95399, 95600), False, 'from discord.ext import commands, tasks\n'), ((97334, 97383), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (97351, 97383), False, 'from discord.ext import commands, tasks\n'), ((97389, 97609), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""chinesezodiac"""', 'description': '"""Converts a year to its Chinese zodiac sign."""', 'usage': '"""[year]"""', 'aliases': "['cz', 'zodiacchinese', 'year', 'yearofthe', 'ly', 'leap', 'leapyear']", 'hidden': '(True)'}), "(name='chinesezodiac', description=\n 'Converts a year to its Chinese zodiac sign.', usage='[year]', aliases=\n ['cz', 'zodiacchinese', 'year', 'yearofthe', 'ly', 'leap', 'leapyear'],\n hidden=True)\n", (97405, 97609), False, 'from discord.ext import commands, tasks\n'), ((98231, 98280), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(2)', 'commands.BucketType.user'], {}), '(1, 2, commands.BucketType.user)\n', (98248, 98280), False, 'from discord.ext import commands, tasks\n'), ((98286, 98767), 'discord.ext.commands.command', 'commands.command', ([], {'description': '"""Shows how far apart two dates are."""', 'aliases': "['weekday', 'day', 'days', 'dates', 'age', 'today']", 'usage': '(\n \'[date #1 day] [date #1 month] [date #1 year] [date #2 day] [date #2 month] [date #2 year]\\n\\n\'\n +\n """Alternative usage(s):\n\n- <days (+/-) from today OR weeks (+/- ending with w) from today>\n\n"""\n +\n \'- <date day> <date month> <date year> <days (+/-) from date OR weeks (+/- ending with w) from date>\'\n )', 'name': '"""date"""'}), '(description=\'Shows how far apart two dates are.\', aliases=\n [\'weekday\', \'day\', \'days\', \'dates\', \'age\', \'today\'], usage=\n """[date #1 day] [date #1 month] [date #1 year] [date #2 day] [date #2 month] [date #2 year]\n\n"""\n +\n """Alternative usage(s):\n\n- <days (+/-) from today OR weeks (+/- ending with w) from today>\n\n"""\n +\n \'- <date day> <date month> <date year> <days (+/-) from date OR weeks (+/- ending with w) from date>\'\n , name=\'date\')\n', (98302, 98767), False, 'from discord.ext import commands, tasks\n'), ((103963, 104012), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(1)', 'commands.BucketType.user'], {}), '(1, 1, commands.BucketType.user)\n', (103980, 104012), False, 'from discord.ext import commands, tasks\n'), ((104018, 104183), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""iss"""', 'description': '"""Gets information about the International Space Station and all humans in space."""', 'aliases': "['space']", 'hidden': '(True)'}), "(name='iss', description=\n 'Gets information about the International Space Station and all humans in space.'\n , aliases=['space'], hidden=True)\n", (104034, 104183), False, 'from discord.ext import commands, tasks\n'), ((105761, 105810), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (105778, 105810), False, 'from discord.ext import commands, tasks\n'), ((105816, 106154), 'discord.ext.commands.command', 'commands.command', ([], {'usage': '"""<note #1 with octave (e.g. F#2)> <note #2 with octave (e.g. G5)>"""', 'hidden': '(True)', 'aliases': "['octave', 'note', 'notes', 'semitone', 'semitones', 'vocalrange',\n 'octaves', 'notesrange']", 'name': '"""noterange"""', 'description': '"""Shows the range in octaves and semitones between two given musical notes."""'}), "(usage=\n '<note #1 with octave (e.g. F#2)> <note #2 with octave (e.g. G5)>',\n hidden=True, aliases=['octave', 'note', 'notes', 'semitone',\n 'semitones', 'vocalrange', 'octaves', 'notesrange'], name='noterange',\n description=\n 'Shows the range in octaves and semitones between two given musical notes.'\n )\n", (105832, 106154), False, 'from discord.ext import commands, tasks\n'), ((107548, 107597), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (107565, 107597), False, 'from discord.ext import commands, tasks\n'), ((107603, 107863), 'discord.ext.commands.command', 'commands.command', ([], {'description': "('Adds a timestamp to a YouTube video link, ' +\n 'useful for mobile users who cannot copy links with timestamps.')", 'hidden': '(True)', 'aliases': "['yt', 'ytts', 'ytt']", 'usage': '"""<YouTube video link> <timestamp>"""', 'name': '"""yttimestamp"""'}), "(description='Adds a timestamp to a YouTube video link, ' +\n 'useful for mobile users who cannot copy links with timestamps.',\n hidden=True, aliases=['yt', 'ytts', 'ytt'], usage=\n '<YouTube video link> <timestamp>', name='yttimestamp')\n", (107619, 107863), False, 'from discord.ext import commands, tasks\n'), ((108725, 108774), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (108742, 108774), False, 'from discord.ext import commands, tasks\n'), ((108780, 109023), 'discord.ext.commands.command', 'commands.command', ([], {'description': '"""Shows the age timeline of a hypothetical person born in a certain year up until adulthood."""', 'usage': '"""[year (1500-2500)]\n\nAlternative usage(s):\n\n- <age (0-100)>"""', 'name': '"""agelist"""', 'hidden': '(True)'}), '(description=\n \'Shows the age timeline of a hypothetical person born in a certain year up until adulthood.\'\n , usage=\n """[year (1500-2500)]\n\nAlternative usage(s):\n\n- <age (0-100)>""", name=\n \'agelist\', hidden=True)\n', (108796, 109023), False, 'from discord.ext import commands, tasks\n'), ((110318, 110367), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (110335, 110367), False, 'from discord.ext import commands, tasks\n'), ((110373, 110564), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""google"""', 'description': '"""Generates search URLs for Google, Bing, and DuckDuckGo."""', 'aliases': "['search', 'ddg', 'duckduckgo', 'lookup', 'bing']", 'usage': '"""<keywords>"""'}), "(name='google', description=\n 'Generates search URLs for Google, Bing, and DuckDuckGo.', aliases=[\n 'search', 'ddg', 'duckduckgo', 'lookup', 'bing'], usage='<keywords>')\n", (110389, 110564), False, 'from discord.ext import commands, tasks\n'), ((111369, 111419), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(15)', 'commands.BucketType.user'], {}), '(1, 15, commands.BucketType.user)\n', (111386, 111419), False, 'from discord.ext import commands, tasks\n'), ((111425, 111591), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""wolfram"""', 'description': '"""Queries things using the Wolfram|Alpha API."""', 'aliases': "['wolf', 'wa', 'wolframalpha', 'query']", 'usage': '"""<input>"""'}), "(name='wolfram', description=\n 'Queries things using the Wolfram|Alpha API.', aliases=['wolf', 'wa',\n 'wolframalpha', 'query'], usage='<input>')\n", (111441, 111591), False, 'from discord.ext import commands, tasks\n'), ((113906, 113956), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(10)', 'commands.BucketType.user'], {}), '(1, 10, commands.BucketType.user)\n', (113923, 113956), False, 'from discord.ext import commands, tasks\n'), ((113962, 114177), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""blurface"""', 'description': '"""Detects faces in an image and blurs them."""', 'hidden': '(True)', 'aliases': "['faceblur', 'blurfaces', 'anonymize', 'anonymise', 'blur']", 'usage': '"""<image attachment>"""'}), "(name='blurface', description=\n 'Detects faces in an image and blurs them.', hidden=True, aliases=[\n 'faceblur', 'blurfaces', 'anonymize', 'anonymise', 'blur'], usage=\n '<image attachment>')\n", (113978, 114177), False, 'from discord.ext import commands, tasks\n'), ((1818, 1863), 'cv2.dnn.readNetFromCaffe', 'dnn.readNetFromCaffe', (['prototxtPath', 'modelPath'], {}), '(prototxtPath, modelPath)\n', (1838, 1863), False, 'from cv2 import GaussianBlur, dnn, imread, imwrite\n'), ((1880, 1896), 'cv2.imread', 'imread', (['filename'], {}), '(filename)\n', (1886, 1896), False, 'from cv2 import GaussianBlur, dnn, imread, imwrite\n'), ((2005, 2069), 'cv2.dnn.blobFromImage', 'dnn.blobFromImage', (['image', '(1.0)', '(300, 300)', '(104.0, 177.0, 123.0)'], {}), '(image, 1.0, (300, 300), (104.0, 177.0, 123.0))\n', (2022, 2069), False, 'from cv2 import GaussianBlur, dnn, imread, imwrite\n'), ((12034, 12103), 'discord.Embed', 'Embed', ([], {'title': 'title', 'description': 'f"""Requested by: {ctx.author.mention}"""'}), "(title=title, description=f'Requested by: {ctx.author.mention}')\n", (12039, 12103), False, 'from discord import Embed, File, channel\n'), ((12316, 12352), 'discord.File', 'File', (['f"""{funcs.PATH}/temp/{imgName}"""'], {}), "(f'{funcs.PATH}/temp/{imgName}')\n", (12320, 12352), False, 'from discord import Embed, File, channel\n'), ((61274, 61375), 'asyncpraw.Reddit', 'Reddit', ([], {'client_id': 'config.redditClientID', 'client_secret': 'config.redditClientSecret', 'user_agent': '"""*"""'}), "(client_id=config.redditClientID, client_secret=config.\n redditClientSecret, user_agent='*')\n", (61280, 61375), False, 'from asyncpraw import Reddit\n'), ((73848, 73873), 'discord.Embed', 'Embed', ([], {'title': '"""Word Count"""'}), "(title='Word Count')\n", (73853, 73873), False, 'from discord import Embed, File, channel\n'), ((98967, 98983), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (98981, 98983), False, 'from datetime import datetime, timedelta\n'), ((110743, 110770), 'urllib.parse.urlencode', 'parse.urlencode', (["{'q': inp}"], {}), "({'q': inp})\n", (110758, 110770), False, 'from urllib import parse\n'), ((110786, 110915), 'src.utils.funcs.newButtonView', 'funcs.newButtonView', (['(2)'], {'label': '"""Google"""', 'url': 'f"""https://www.google.com/search?{param}"""', 'emoji': "self.client.emoji['google']"}), "(2, label='Google', url=\n f'https://www.google.com/search?{param}', emoji=self.client.emoji['google']\n )\n", (110805, 110915), False, 'from src.utils import funcs\n'), ((110943, 111076), 'src.utils.funcs.newButtonView', 'funcs.newButtonView', (['(2)'], {'label': '"""Bing"""', 'url': 'f"""https://www.bing.com/search?{param}"""', 'emoji': "self.client.emoji['bing']", 'view': 'view'}), "(2, label='Bing', url=\n f'https://www.bing.com/search?{param}', emoji=self.client.emoji['bing'],\n view=view)\n", (110962, 111076), False, 'from src.utils import funcs\n'), ((1482, 1527), 'src.utils.funcs.generateJson', 'funcs.generateJson', (['"""reminders"""', "{'list': []}"], {}), "('reminders', {'list': []})\n", (1500, 1527), False, 'from src.utils import funcs\n'), ((1672, 1719), 'src.utils.funcs.getResource', 'funcs.getResource', (['self.name', '"""deploy.prototxt"""'], {}), "(self.name, 'deploy.prototxt')\n", (1689, 1719), False, 'from src.utils import funcs\n'), ((1753, 1801), 'src.utils.funcs.getResource', 'funcs.getResource', (['self.name', '"""model.caffemodel"""'], {}), "(self.name, 'model.caffemodel')\n", (1770, 1801), False, 'from src.utils import funcs\n'), ((2750, 2787), 'src.utils.funcs.readJson', 'funcs.readJson', (['"""data/reminders.json"""'], {}), "('data/reminders.json')\n", (2764, 2787), False, 'from src.utils import funcs\n'), ((4550, 4587), 'src.utils.funcs.readJson', 'funcs.readJson', (['"""data/reminders.json"""'], {}), "('data/reminders.json')\n", (4564, 4587), False, 'from src.utils import funcs\n'), ((5759, 5796), 'src.utils.funcs.readJson', 'funcs.readJson', (['"""data/reminders.json"""'], {}), "('data/reminders.json')\n", (5773, 5796), False, 'from src.utils import funcs\n'), ((5815, 5821), 'time.time', 'time', ([], {}), '()\n', (5819, 5821), False, 'from time import gmtime, mktime, time\n'), ((12234, 12299), 'src.utils.funcs.funcToCoro', 'funcs.funcToCoro', (['fig.write_image', 'f"""{funcs.PATH}/temp/{imgName}"""'], {}), "(fig.write_image, f'{funcs.PATH}/temp/{imgName}')\n", (12250, 12299), False, 'from src.utils import funcs\n'), ((13608, 13637), 'src.utils.funcs.deleteTempFile', 'funcs.deleteTempFile', (['imgName'], {}), '(imgName)\n', (13628, 13637), False, 'from src.utils import funcs\n'), ((14925, 14954), 'src.utils.funcs.deleteTempFile', 'funcs.deleteTempFile', (['imgName'], {}), '(imgName)\n', (14945, 14954), False, 'from src.utils import funcs\n'), ((16232, 16261), 'src.utils.funcs.deleteTempFile', 'funcs.deleteTempFile', (['imgName'], {}), '(imgName)\n', (16252, 16261), False, 'from src.utils import funcs\n'), ((17036, 17083), 'discord.Embed', 'Embed', ([], {'description': 'f"""https://github.com/{repo}"""'}), "(description=f'https://github.com/{repo}')\n", (17041, 17083), False, 'from discord import Embed, File, channel\n'), ((19516, 19602), 'discord.Embed', 'Embed', ([], {'description': "('Statistics taken at: `' + data['statistic_taken_at'] + ' UTC`')"}), "(description='Statistics taken at: `' + data['statistic_taken_at'] +\n ' UTC`')\n", (19521, 19602), False, 'from discord import Embed, File, channel\n'), ((24109, 24147), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Empty input."""'], {}), "(None, 'Empty input.')\n", (24125, 24147), False, 'from src.utils import funcs\n'), ((30684, 30715), 'src.utils.funcs.celsiusToFahrenheit', 'funcs.celsiusToFahrenheit', (['temp'], {}), '(temp)\n', (30709, 30715), False, 'from src.utils import funcs\n'), ((30837, 30868), 'src.utils.funcs.celsiusToFahrenheit', 'funcs.celsiusToFahrenheit', (['high'], {}), '(high)\n', (30862, 30868), False, 'from src.utils import funcs\n'), ((30888, 30918), 'src.utils.funcs.celsiusToFahrenheit', 'funcs.celsiusToFahrenheit', (['low'], {}), '(low)\n', (30913, 30918), False, 'from src.utils import funcs\n'), ((36318, 36371), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Cannot process empty input."""'], {}), "(None, 'Cannot process empty input.')\n", (36334, 36371), False, 'from src.utils import funcs\n'), ((41453, 41478), 'discord.Embed', 'Embed', ([], {'description': 'output'}), '(description=output)\n', (41458, 41478), False, 'from discord import Embed, File, channel\n'), ((51281, 51303), 'discord.Embed', 'Embed', ([], {'title': '"""QR Code"""'}), "(title='QR Code')\n", (51286, 51303), False, 'from discord import Embed, File, channel\n'), ((51321, 51329), 'qrcode.QRCode', 'QRCode', ([], {}), '()\n', (51327, 51329), False, 'from qrcode import QRCode\n'), ((51655, 51691), 'discord.File', 'File', (['f"""{funcs.PATH}/temp/{imgName}"""'], {}), "(f'{funcs.PATH}/temp/{imgName}')\n", (51659, 51691), False, 'from discord import Embed, File, channel\n'), ((51933, 51962), 'src.utils.funcs.deleteTempFile', 'funcs.deleteTempFile', (['imgName'], {}), '(imgName)\n', (51953, 51962), False, 'from src.utils import funcs\n'), ((53043, 53117), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""No attachment or URL detected, please try again."""'], {}), "(None, 'No attachment or URL detected, please try again.')\n", (53059, 53117), False, 'from src.utils import funcs\n'), ((59813, 59891), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['"""Invalid language code!"""', 'f"""Valid options:\n\n{codesList}"""'], {}), '(\'Invalid language code!\', f"""Valid options:\n\n{codesList}""")\n', (59829, 59891), False, 'from src.utils import funcs\n'), ((74215, 74245), 'src.utils.funcs.deleteTempFile', 'funcs.deleteTempFile', (['filename'], {}), '(filename)\n', (74235, 74245), False, 'from src.utils import funcs\n'), ((75937, 75990), 'discord.Embed', 'Embed', ([], {'title': 'f"""{data[\'name\']} ({data[\'alpha3Code\']})"""'}), '(title=f"{data[\'name\']} ({data[\'alpha3Code\']})")\n', (75942, 75990), False, 'from discord import Embed, File, channel\n'), ((78259, 78285), 'discord.Embed', 'Embed', ([], {'title': "data['query']"}), "(title=data['query'])\n", (78264, 78285), False, 'from discord import Embed, File, channel\n'), ((79349, 79369), 'mendeleev.element', 'element', (['elementname'], {}), '(elementname)\n', (79356, 79369), False, 'from mendeleev import element\n'), ((80290, 80368), 'discord.Embed', 'Embed', ([], {'title': 'f"""{name} ({elementobj.symbol})"""', 'description': "(desc if desc else '')"}), "(title=f'{name} ({elementobj.symbol})', description=desc if desc else '')\n", (80295, 80368), False, 'from discord import Embed, File, channel\n'), ((82706, 82827), 'src.utils.funcs.sendImage', 'funcs.sendImage', (['ctx', '"""https://media.discordapp.net/attachments/871621453521485864/882103596563431424/table.jpg"""'], {}), "(ctx,\n 'https://media.discordapp.net/attachments/871621453521485864/882103596563431424/table.jpg'\n )\n", (82721, 82827), False, 'from src.utils import funcs\n'), ((83121, 83244), 'src.utils.funcs.sendImage', 'funcs.sendImage', (['ctx', '"""https://media.discordapp.net/attachments/771404776410972161/954017475668885534/unknown.png"""'], {}), "(ctx,\n 'https://media.discordapp.net/attachments/771404776410972161/954017475668885534/unknown.png'\n )\n", (83136, 83244), False, 'from src.utils import funcs\n'), ((83474, 83595), 'src.utils.funcs.sendImage', 'funcs.sendImage', (['ctx', '"""https://cdn.discordapp.com/attachments/771404776410972161/950404988369240104/unknown.png"""'], {}), "(ctx,\n 'https://cdn.discordapp.com/attachments/771404776410972161/950404988369240104/unknown.png'\n )\n", (83489, 83595), False, 'from src.utils import funcs\n'), ((83869, 83989), 'src.utils.funcs.sendImage', 'funcs.sendImage', (['ctx', '"""https://cdn.discordapp.com/attachments/771404776410972161/851367517241999380/image0.jpg"""'], {}), "(ctx,\n 'https://cdn.discordapp.com/attachments/771404776410972161/851367517241999380/image0.jpg'\n )\n", (83884, 83989), False, 'from src.utils import funcs\n'), ((91723, 91747), 'statistics.median', 'median', (['data[-halflist:]'], {}), '(data[-halflist:])\n', (91729, 91747), False, 'from statistics import mean, median, mode, pstdev, stdev\n'), ((91765, 91788), 'statistics.median', 'median', (['data[:halflist]'], {}), '(data[:halflist])\n', (91771, 91788), False, 'from statistics import mean, median, mode, pstdev, stdev\n'), ((93264, 93275), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (93273, 93275), True, 'from plotly import graph_objects as go\n'), ((93728, 93764), 'discord.File', 'File', (['f"""{funcs.PATH}/temp/{imgName}"""'], {}), "(f'{funcs.PATH}/temp/{imgName}')\n", (93732, 93764), False, 'from discord import Embed, File, channel\n'), ((93997, 94026), 'src.utils.funcs.deleteTempFile', 'funcs.deleteTempFile', (['imgName'], {}), '(imgName)\n', (94017, 94026), False, 'from src.utils import funcs\n'), ((104597, 104675), 'discord.Embed', 'Embed', ([], {'description': '"""https://en.wikipedia.org/wiki/International_Space_Station"""'}), "(description='https://en.wikipedia.org/wiki/International_Space_Station')\n", (104602, 104675), False, 'from discord import Embed, File, channel\n'), ((109087, 109103), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (109101, 109103), False, 'from datetime import datetime, timedelta\n'), ((114423, 114461), 'src.utils.funcs.useImageFunc', 'funcs.useImageFunc', (['ctx', 'self.blurFace'], {}), '(ctx, self.blurFace)\n', (114441, 114461), False, 'from src.utils import funcs\n'), ((1623, 1629), 'time.time', 'time', ([], {}), '()\n', (1627, 1629), False, 'from time import gmtime, mktime, time\n'), ((2454, 2495), 'cv2.GaussianBlur', 'GaussianBlur', (['face', '(kernelW, kernelH)', '(0)'], {}), '(face, (kernelW, kernelH), 0)\n', (2466, 2495), False, 'from cv2 import GaussianBlur, dnn, imread, imwrite\n'), ((2567, 2613), 'cv2.imwrite', 'imwrite', (['f"""{funcs.PATH}/temp/{imgName}"""', 'image'], {}), "(f'{funcs.PATH}/temp/{imgName}', image)\n", (2574, 2613), False, 'from cv2 import GaussianBlur, dnn, imread, imwrite\n'), ((3844, 3892), 'src.utils.funcs.dumpJson', 'funcs.dumpJson', (['"""data/reminders.json"""', 'reminders'], {}), "('data/reminders.json', reminders)\n", (3858, 3892), False, 'from src.utils import funcs\n'), ((7878, 7897), 'src.utils.funcs.randomHex', 'funcs.randomHex', (['(16)'], {}), '(16)\n', (7893, 7897), False, 'from src.utils import funcs\n'), ((12816, 12822), 'time.time', 'time', ([], {}), '()\n', (12820, 12822), False, 'from time import gmtime, mktime, time\n'), ((13438, 13463), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (13454, 13463), False, 'from src.utils import funcs\n'), ((13480, 13548), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""An error occurred, please try again later."""'], {}), "(None, 'An error occurred, please try again later.')\n", (13496, 13548), False, 'from src.utils import funcs\n'), ((14031, 14037), 'time.time', 'time', ([], {}), '()\n', (14035, 14037), False, 'from time import gmtime, mktime, time\n'), ((14755, 14780), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (14771, 14780), False, 'from src.utils import funcs\n'), ((14797, 14865), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""An error occurred, please try again later."""'], {}), "(None, 'An error occurred, please try again later.')\n", (14813, 14865), False, 'from src.utils import funcs\n'), ((15343, 15349), 'time.time', 'time', ([], {}), '()\n', (15347, 15349), False, 'from time import gmtime, mktime, time\n'), ((16062, 16087), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (16078, 16087), False, 'from src.utils import funcs\n'), ((16104, 16172), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""An error occurred, please try again later."""'], {}), "(None, 'An error occurred, please try again later.')\n", (16120, 16172), False, 'from src.utils import funcs\n'), ((16952, 17019), 'src.utils.funcs.getRequest', 'funcs.getRequest', (["('https://api.codetabs.com/v1/loc/?github=' + repo)"], {}), "('https://api.codetabs.com/v1/loc/?github=' + repo)\n", (16968, 17019), False, 'from src.utils import funcs\n'), ((17657, 17682), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (17673, 17682), False, 'from src.utils import funcs\n'), ((17699, 17760), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Unknown repository or server error."""'], {}), "(None, 'Unknown repository or server error.')\n", (17715, 17760), False, 'from src.utils import funcs\n'), ((18284, 18386), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['"""https://corona-virus-world-and-india-data.p.rapidapi.com/api"""'], {'headers': 'headers'}), "('https://corona-virus-world-and-india-data.p.rapidapi.com/api'\n , headers=headers)\n", (18300, 18386), False, 'from src.utils import funcs\n'), ((23647, 23672), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (23663, 23672), False, 'from src.utils import funcs\n'), ((23689, 23745), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input or server error."""'], {}), "(None, 'Invalid input or server error.')\n", (23705, 23745), False, 'from src.utils import funcs\n'), ((28307, 28364), 'discord.Embed', 'Embed', ([], {'title': 'f"""Flight {flightstr}"""', 'description': 'flighturl'}), "(title=f'Flight {flightstr}', description=flighturl)\n", (28312, 28364), False, 'from discord import Embed, File, channel\n'), ((30339, 30360), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['url'], {}), '(url)\n', (30355, 30360), False, 'from src.utils import funcs\n'), ((32275, 32300), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (32291, 32300), False, 'from src.utils import funcs\n'), ((32317, 32376), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Unknown location or server error."""'], {}), "(None, 'Unknown location or server error.')\n", (32333, 32376), False, 'from src.utils import funcs\n'), ((33059, 33101), 'deep_translator.constants.GOOGLE_CODES_TO_LANGUAGES.keys', 'constants.GOOGLE_CODES_TO_LANGUAGES.keys', ([], {}), '()\n', (33099, 33101), False, 'from deep_translator import GoogleTranslator, constants\n'), ((33366, 33410), 'deep_translator.GoogleTranslator', 'GoogleTranslator', ([], {'source': '"""auto"""', 'target': 'dest'}), "(source='auto', target=dest)\n", (33382, 33410), False, 'from deep_translator import GoogleTranslator, constants\n'), ((33607, 33632), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (33623, 33632), False, 'from src.utils import funcs\n'), ((33649, 33708), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""An error occurred. Invalid input?"""'], {}), "(None, 'An error occurred. Invalid input?')\n", (33665, 33708), False, 'from src.utils import funcs\n'), ((34193, 34309), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['"""http://api.exchangeratesapi.io/v1/latest"""'], {'params': "{'access_key': config.exchangeratesapiKey}"}), "('http://api.exchangeratesapi.io/v1/latest', params={\n 'access_key': config.exchangeratesapiKey})\n", (34209, 34309), False, 'from src.utils import funcs\n'), ((35887, 35912), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (35903, 35912), False, 'from src.utils import funcs\n'), ((39928, 39956), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['categories'], {}), '(categories)\n', (39944, 39956), False, 'from src.utils import funcs\n'), ((40494, 40514), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['lb'], {}), '(lb)\n', (40510, 40514), False, 'from src.utils import funcs\n'), ((40709, 40774), 'src.utils.funcs.timeDifferenceStr', 'funcs.timeDifferenceStr', (["run['times']['primary_t']", '(0)'], {'noStr': '(True)'}), "(run['times']['primary_t'], 0, noStr=True)\n", (40732, 40774), False, 'from src.utils import funcs\n'), ((41888, 41913), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (41904, 41913), False, 'from src.utils import funcs\n'), ((42749, 42863), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['f"""https://www.speedrun.com/api/v1/runs?game={gameID}&status=new&embed=players&max=200"""'], {}), "(\n f'https://www.speedrun.com/api/v1/runs?game={gameID}&status=new&embed=players&max=200'\n )\n", (42765, 42863), False, 'from src.utils import funcs\n'), ((45643, 45678), 'discord.Embed', 'Embed', ([], {'description': '"""No runs found."""'}), "(description='No runs found.')\n", (45648, 45678), False, 'from discord import Embed, File, channel\n'), ((45987, 46012), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (46003, 46012), False, 'from src.utils import funcs\n'), ((50105, 50160), 'discord.Embed', 'Embed', ([], {'description': 'p', 'title': 'f"""{author} - {title}"""[:256]'}), "(description=p, title=f'{author} - {title}'[:256])\n", (50110, 50160), False, 'from discord import Embed, File, channel\n'), ((50559, 50584), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (50575, 50584), False, 'from src.utils import funcs\n'), ((51218, 51224), 'time.time', 'time', ([], {}), '()\n', (51222, 51224), False, 'from time import gmtime, mktime, time\n'), ((51791, 51816), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (51807, 51816), False, 'from src.utils import funcs\n'), ((51833, 51873), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input."""'], {}), "(None, 'Invalid input.')\n", (51849, 51873), False, 'from src.utils import funcs\n'), ((52422, 52430), 'asyncio.sleep', 'sleep', (['(3)'], {}), '(3)\n', (52427, 52430), False, 'from asyncio import TimeoutError, sleep\n'), ((53364, 53427), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['"""https://run.glot.io/languages"""'], {'verify': '(False)'}), "('https://run.glot.io/languages', verify=False)\n", (53380, 53427), False, 'from src.utils import funcs\n'), ((54789, 54831), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['versionurl'], {'verify': '(False)'}), '(versionurl, verify=False)\n', (54805, 54831), False, 'from src.utils import funcs\n'), ((57148, 57173), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (57164, 57173), False, 'from src.utils import funcs\n'), ((58068, 58076), 'time.gmtime', 'gmtime', ([], {}), '()\n', (58074, 58076), False, 'from time import gmtime, mktime, time\n'), ((58095, 58128), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['timestamp'], {}), '(timestamp)\n', (58117, 58128), False, 'from datetime import datetime, timedelta\n'), ((68183, 68208), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (68199, 68208), False, 'from src.utils import funcs\n'), ((68225, 68266), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid search."""'], {}), "(None, 'Invalid search.')\n", (68241, 68266), False, 'from src.utils import funcs\n'), ((71498, 71523), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (71514, 71523), False, 'from src.utils import funcs\n'), ((72055, 72080), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (72071, 72080), False, 'from src.utils import funcs\n'), ((72678, 72684), 'time.time', 'time', ([], {}), '()\n', (72682, 72684), False, 'from time import gmtime, mktime, time\n'), ((73786, 73827), 'src.utils.funcs.replaceCharacters', 'funcs.replaceCharacters', (['inp', 'punctuation'], {}), '(inp, punctuation)\n', (73809, 73827), False, 'from src.utils import funcs\n'), ((77739, 77764), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (77755, 77764), False, 'from src.utils import funcs\n'), ((77781, 77837), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input or server error."""'], {}), "(None, 'Invalid input or server error.')\n", (77797, 77837), False, 'from src.utils import funcs\n'), ((78164, 78212), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['f"""http://ip-api.com/json/{ip}"""'], {}), "(f'http://ip-api.com/json/{ip}')\n", (78180, 78212), False, 'from src.utils import funcs\n'), ((78871, 78896), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (78887, 78896), False, 'from src.utils import funcs\n'), ((78913, 78969), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input or server error."""'], {}), "(None, 'Invalid input or server error.')\n", (78929, 78969), False, 'from src.utils import funcs\n'), ((82343, 82368), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (82359, 82368), False, 'from src.utils import funcs\n'), ((89878, 89903), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (89894, 89903), False, 'from src.utils import funcs\n'), ((90489, 90495), 'time.time', 'time', ([], {}), '()\n', (90493, 90495), False, 'from time import gmtime, mktime, time\n'), ((93302, 93365), 'plotly.graph_objects.Box', 'go.Box', ([], {'y': 'data', 'quartilemethod': '"""linear"""', 'name': '"""Linear Quartile"""'}), "(y=data, quartilemethod='linear', name='Linear Quartile')\n", (93308, 93365), True, 'from plotly import graph_objects as go\n'), ((93393, 93462), 'plotly.graph_objects.Box', 'go.Box', ([], {'y': 'data', 'quartilemethod': '"""inclusive"""', 'name': '"""Inclusive Quartile"""'}), "(y=data, quartilemethod='inclusive', name='Inclusive Quartile')\n", (93399, 93462), True, 'from plotly import graph_objects as go\n'), ((93490, 93559), 'plotly.graph_objects.Box', 'go.Box', ([], {'y': 'data', 'quartilemethod': '"""exclusive"""', 'name': '"""Exclusive Quartile"""'}), "(y=data, quartilemethod='exclusive', name='Exclusive Quartile')\n", (93496, 93559), True, 'from plotly import graph_objects as go\n'), ((93642, 93707), 'src.utils.funcs.funcToCoro', 'funcs.funcToCoro', (['fig.write_image', 'f"""{funcs.PATH}/temp/{imgName}"""'], {}), "(fig.write_image, f'{funcs.PATH}/temp/{imgName}')\n", (93658, 93707), False, 'from src.utils import funcs\n'), ((93864, 93889), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (93880, 93889), False, 'from src.utils import funcs\n'), ((96988, 97012), 'src.utils.funcs.dateToZodiac', 'funcs.dateToZodiac', (['date'], {}), '(date)\n', (97006, 97012), False, 'from src.utils import funcs\n'), ((97254, 97294), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input."""'], {}), "(None, 'Invalid input.')\n", (97270, 97294), False, 'from src.utils import funcs\n'), ((97695, 97709), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (97707, 97709), False, 'from datetime import datetime, timedelta\n'), ((97997, 98017), 'src.utils.funcs.leapYear', 'funcs.leapYear', (['year'], {}), '(year)\n', (98011, 98017), False, 'from src.utils import funcs\n'), ((98151, 98191), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input."""'], {}), "(None, 'Invalid input.')\n", (98167, 98191), False, 'from src.utils import funcs\n'), ((101427, 101451), 'discord.Embed', 'Embed', ([], {'title': '"""Two Dates"""'}), "(title='Two Dates')\n", (101432, 101451), False, 'from discord import Embed, File, channel\n'), ((103699, 103720), 'src.utils.funcs.formatting', 'funcs.formatting', (['res'], {}), '(res)\n', (103715, 103720), False, 'from src.utils import funcs\n'), ((103771, 103796), 'src.utils.funcs.formatting', 'funcs.formatting', (['"""Today"""'], {}), "('Today')\n", (103787, 103796), False, 'from src.utils import funcs\n'), ((103841, 103866), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (103857, 103866), False, 'from src.utils import funcs\n'), ((103883, 103923), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input."""'], {}), "(None, 'Invalid input.')\n", (103899, 103923), False, 'from src.utils import funcs\n'), ((104267, 104340), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['"""http://api.open-notify.org/iss-now.json"""'], {'verify': '(False)'}), "('http://api.open-notify.org/iss-now.json', verify=False)\n", (104283, 104340), False, 'from src.utils import funcs\n'), ((104418, 104490), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['"""http://api.open-notify.org/astros.json"""'], {'verify': '(False)'}), "('http://api.open-notify.org/astros.json', verify=False)\n", (104434, 104490), False, 'from src.utils import funcs\n'), ((105640, 105665), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (105656, 105665), False, 'from src.utils import funcs\n'), ((105682, 105721), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Server error."""'], {}), "(None, 'Server error.')\n", (105698, 105721), False, 'from src.utils import funcs\n'), ((107341, 107366), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (107357, 107366), False, 'from src.utils import funcs\n'), ((107383, 107423), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input."""'], {}), "(None, 'Invalid input.')\n", (107399, 107423), False, 'from src.utils import funcs\n'), ((110289, 110310), 'src.utils.funcs.formatting', 'funcs.formatting', (['res'], {}), '(res)\n', (110305, 110310), False, 'from src.utils import funcs\n'), ((112110, 112117), 'discord.Embed', 'Embed', ([], {}), '()\n', (112115, 112117), False, 'from discord import Embed, File, channel\n'), ((2295, 2314), 'numpy.array', 'array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (2300, 2314), False, 'from numpy import array, max, min, sqrt, squeeze, sum\n'), ((2922, 2976), 'src.utils.funcs.userIDNotBlacklisted', 'funcs.userIDNotBlacklisted', (["reminder['data']['userID']"], {}), "(reminder['data']['userID'])\n", (2948, 2976), False, 'from src.utils import funcs\n'), ((3099, 3167), 'discord.Embed', 'Embed', ([], {'title': '"""⚠️ Reminder"""', 'description': "reminder['data']['reminder']"}), "(title='⚠️ Reminder', description=reminder['data']['reminder'])\n", (3104, 3167), False, 'from discord import Embed, File, channel\n'), ((6088, 6159), 'discord.Embed', 'Embed', ([], {'title': '"""Your Reminders"""', 'description': "reminder['data']['reminder']"}), "(title='Your Reminders', description=reminder['data']['reminder'])\n", (6093, 6159), False, 'from discord import Embed, File, channel\n'), ((6608, 6657), 'discord.Embed', 'Embed', ([], {'title': '"""Your Reminders"""', 'description': '"""None"""'}), "(title='Your Reminders', description='None')\n", (6613, 6657), False, 'from discord import Embed, File, channel\n'), ((17586, 17611), 'src.utils.funcs.githubRepoPic', 'funcs.githubRepoPic', (['repo'], {}), '(repo)\n', (17605, 17611), False, 'from src.utils import funcs\n'), ((24462, 24527), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['url'], {'headers': "{'User-agent': '*'}", 'params': 'params'}), "(url, headers={'User-agent': '*'}, params=params)\n", (24478, 24527), False, 'from src.utils import funcs\n'), ((27468, 27557), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["(realdepart + data['airport']['origin']['timezone']['offset'])"], {}), "(realdepart + data['airport']['origin']['timezone'][\n 'offset'])\n", (27490, 27557), False, 'from datetime import datetime, timedelta\n'), ((27586, 27680), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["(realarrive + data['airport']['destination']['timezone']['offset'])"], {}), "(realarrive + data['airport']['destination'][\n 'timezone']['offset'])\n", (27608, 27680), False, 'from datetime import datetime, timedelta\n'), ((27709, 27794), 'src.utils.funcs.dateBirthday', 'funcs.dateBirthday', (['realdepart.day', 'realdepart.month', 'realdepart.year'], {'noBD': '(True)'}), '(realdepart.day, realdepart.month, realdepart.year, noBD=True\n )\n', (27727, 27794), False, 'from src.utils import funcs\n'), ((29724, 29749), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (29740, 29749), False, 'from src.utils import funcs\n'), ((29770, 29827), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Unknown flight or server error."""'], {}), "(None, 'Unknown flight or server error.')\n", (29786, 29827), False, 'from src.utils import funcs\n'), ((33442, 33477), 'src.utils.funcs.funcToCoro', 'funcs.funcToCoro', (['g.translate', 'text'], {}), '(g.translate, text)\n', (33458, 33477), False, 'from src.utils import funcs\n'), ((38861, 38886), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (38877, 38886), False, 'from src.utils import funcs\n'), ((38907, 38963), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input or server error."""'], {}), "(None, 'Invalid input or server error.')\n", (38923, 38963), False, 'from src.utils import funcs\n'), ((44159, 44222), 'src.utils.funcs.timeDifferenceStr', 'funcs.timeDifferenceStr', (["i['times']['primary_t']", '(0)'], {'noStr': '(True)'}), "(i['times']['primary_t'], 0, noStr=True)\n", (44182, 44222), False, 'from src.utils import funcs\n'), ((46528, 46615), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['"""http://api.urbandictionary.com/v0/define"""'], {'params': "{'term': term}"}), "('http://api.urbandictionary.com/v0/define', params={'term':\n term})\n", (46544, 46615), False, 'from src.utils import funcs\n'), ((47259, 47299), 'src.utils.funcs.timeStrToDatetime', 'funcs.timeStrToDatetime', (["c['written_on']"], {}), "(c['written_on'])\n", (47282, 47299), False, 'from src.utils import funcs\n'), ((47324, 47352), 'discord.Embed', 'Embed', ([], {'description': 'permalink'}), '(description=permalink)\n', (47329, 47352), False, 'from discord import Embed, File, channel\n'), ((48821, 48846), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (48837, 48846), False, 'from src.utils import funcs\n'), ((49243, 49356), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['"""https://api.genius.com/search"""'], {'params': "{'q': keywords, 'access_token': config.geniusToken}"}), "('https://api.genius.com/search', params={'q': keywords,\n 'access_token': config.geniusToken})\n", (49259, 49356), False, 'from src.utils import funcs\n'), ((52657, 52679), 'src.utils.funcs.decodeQR', 'funcs.decodeQR', (['qrlink'], {}), '(qrlink)\n', (52671, 52679), False, 'from src.utils import funcs\n'), ((52798, 52886), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Cannot detect QR code. Maybe try making the image clearer?"""'], {}), "(None,\n 'Cannot detect QR code. Maybe try making the image clearer?')\n", (52814, 52886), False, 'from src.utils import funcs\n'), ((52935, 52960), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (52951, 52960), False, 'from src.utils import funcs\n'), ((58290, 58326), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['timestamp'], {}), '(timestamp)\n', (58315, 58326), False, 'from datetime import datetime, timedelta\n'), ((59949, 60037), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['f"""https://api.dictionaryapi.dev/api/v2/entries/{langcode}/{word}"""'], {}), "(\n f'https://api.dictionaryapi.dev/api/v2/entries/{langcode}/{word}')\n", (59965, 60037), False, 'from src.utils import funcs\n'), ((60841, 60866), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (60857, 60866), False, 'from src.utils import funcs\n'), ((60887, 60926), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Unknown word."""'], {}), "(None, 'Unknown word.')\n", (60903, 60926), False, 'from src.utils import funcs\n'), ((61995, 62082), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['"""NSFW/Over 18!"""', '"""Please view this community in an NSFW channel."""'], {}), "('NSFW/Over 18!',\n 'Please view this community in an NSFW channel.')\n", (62011, 62082), False, 'from src.utils import funcs\n'), ((62561, 62713), 'discord.Embed', 'Embed', ([], {'description': "(f'https://www.reddit.com/r/{subreddit.display_name}' + ' ([Old Reddit](' +\n f'https://old.reddit.com/r/{subreddit.display_name}))')"}), "(description=f'https://www.reddit.com/r/{subreddit.display_name}' +\n ' ([Old Reddit](' + f'https://old.reddit.com/r/{subreddit.display_name}))')\n", (62566, 62713), False, 'from discord import Embed, File, channel\n'), ((63052, 63100), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['subreddit.created_utc'], {}), '(subreddit.created_utc)\n', (63077, 63100), False, 'from datetime import datetime, timedelta\n'), ((68050, 68142), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['"""Invalid input!"""', '"""Please use `r/"subreddit name"` or `u/"username"`."""'], {}), '(\'Invalid input!\',\n \'Please use `r/"subreddit name"` or `u/"username"`.\')\n', (68066, 68142), False, 'from src.utils import funcs\n'), ((72768, 72804), 'src.utils.funcs.readTxtAttachment', 'funcs.readTxtAttachment', (['ctx.message'], {}), '(ctx.message)\n', (72791, 72804), False, 'from src.utils import funcs\n'), ((85946, 86019), 'src.utils.funcs.getRequest', 'funcs.getRequest', (["('https://api.altmetric.com/v1/doi/' + doi)"], {'verify': '(False)'}), "('https://api.altmetric.com/v1/doi/' + doi, verify=False)\n", (85962, 86019), False, 'from src.utils import funcs\n'), ((86164, 86185), 'src.utils.funcs.formatting', 'funcs.formatting', (['res'], {}), '(res)\n', (86180, 86185), False, 'from src.utils import funcs\n'), ((89160, 89238), 'src.utils.funcs.getRequest', 'funcs.getRequest', (["('https://metrics-api.dimensions.ai/doi/' + doi)"], {'verify': '(False)'}), "('https://metrics-api.dimensions.ai/doi/' + doi, verify=False)\n", (89176, 89238), False, 'from src.utils import funcs\n'), ((95762, 95788), 'src.utils.funcs.getZodiacInfo', 'funcs.getZodiacInfo', (['month'], {}), '(month)\n', (95781, 95788), False, 'from src.utils import funcs\n'), ((96514, 96544), 'src.utils.funcs.monthNameToNumber', 'funcs.monthNameToNumber', (['month'], {}), '(month)\n', (96537, 96544), False, 'from src.utils import funcs\n'), ((99290, 99306), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (99304, 99306), False, 'from datetime import datetime, timedelta\n'), ((99309, 99332), 'datetime.timedelta', 'timedelta', ([], {'days': 'day1int'}), '(days=day1int)\n', (99318, 99332), False, 'from datetime import datetime, timedelta\n'), ((104551, 104573), 'datetime.datetime', 'datetime', (['(1998)', '(11)', '(20)'], {}), '(1998, 11, 20)\n', (104559, 104573), False, 'from datetime import datetime, timedelta\n'), ((105013, 105058), 'src.utils.funcs.dateBirthday', 'funcs.dateBirthday', (['dt.day', 'dt.month', 'dt.year'], {}), '(dt.day, dt.month, dt.year)\n', (105031, 105058), False, 'from src.utils import funcs\n'), ((106481, 106504), 'src.utils.funcs.noteFinder', 'funcs.noteFinder', (['note1'], {}), '(note1)\n', (106497, 106504), False, 'from src.utils import funcs\n'), ((106506, 106529), 'src.utils.funcs.noteFinder', 'funcs.noteFinder', (['note2'], {}), '(note2)\n', (106522, 106529), False, 'from src.utils import funcs\n'), ((111193, 111331), 'src.utils.funcs.newButtonView', 'funcs.newButtonView', (['(2)'], {'label': '"""DuckDuckGo"""', 'url': 'f"""https://www.duckduckgo.com/?{param}"""', 'emoji': "self.client.emoji['ddg']", 'view': 'view'}), "(2, label='DuckDuckGo', url=\n f'https://www.duckduckgo.com/?{param}', emoji=self.client.emoji['ddg'],\n view=view)\n", (111212, 111331), False, 'from src.utils import funcs\n'), ((111969, 112040), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['"""http://api.wolframalpha.com/v2/query"""'], {'params': 'params'}), "('http://api.wolframalpha.com/v2/query', params=params)\n", (111985, 112040), False, 'from src.utils import funcs\n'), ((113765, 113790), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (113781, 113790), False, 'from src.utils import funcs\n'), ((2904, 2910), 'time.time', 'time', ([], {}), '()\n', (2908, 2910), False, 'from time import gmtime, mktime, time\n'), ((4319, 4460), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', 'f"""You must specify a reminder ID! See `{self.client.command_prefix}reminders` for a list of your reminders."""'], {}), "(None,\n f'You must specify a reminder ID! See `{self.client.command_prefix}reminders` for a list of your reminders.'\n )\n", (4335, 4460), False, 'from src.utils import funcs\n'), ((5031, 5161), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', 'f"""Unknown reminder ID. See `{self.client.command_prefix}reminders` for a list of your reminders."""'], {}), "(None,\n f'Unknown reminder ID. See `{self.client.command_prefix}reminders` for a list of your reminders.'\n )\n", (5047, 5161), False, 'from src.utils import funcs\n'), ((5682, 5731), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Please leave a message!"""'], {}), "(None, 'Please leave a message!')\n", (5698, 5731), False, 'from src.utils import funcs\n'), ((8358, 8412), 'src.utils.funcs.timeDifferenceStr', 'funcs.timeDifferenceStr', (["reminder['data']['time']", 'now'], {}), "(reminder['data']['time'], now)\n", (8381, 8412), False, 'from src.utils import funcs\n'), ((12730, 12793), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Title must be 100 characters or less."""'], {}), "(None, 'Title must be 100 characters or less.')\n", (12746, 12793), False, 'from src.utils import funcs\n'), ((13195, 13231), 'plotly.graph_objects.Pie', 'go.Pie', ([], {'labels': 'labels', 'values': 'values'}), '(labels=labels, values=values)\n', (13201, 13231), True, 'from plotly import graph_objects as go\n'), ((13945, 14008), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Title must be 100 characters or less."""'], {}), "(None, 'Title must be 100 characters or less.')\n", (13961, 14008), False, 'from src.utils import funcs\n'), ((14410, 14440), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'labels', 'y': 'values'}), '(x=labels, y=values)\n', (14420, 14440), True, 'from plotly import graph_objects as go\n'), ((15257, 15320), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Title must be 100 characters or less."""'], {}), "(None, 'Title must be 100 characters or less.')\n", (15273, 15320), False, 'from src.utils import funcs\n'), ((15722, 15748), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': 'labels', 'y': 'values'}), '(x=labels, y=values)\n', (15728, 15748), True, 'from plotly import graph_objects as go\n'), ((31708, 31745), 'src.utils.funcs.degreesToDirection', 'funcs.degreesToDirection', (['winddegrees'], {}), '(winddegrees)\n', (31732, 31745), False, 'from src.utils import funcs\n'), ((33537, 33561), 'src.utils.funcs.formatting', 'funcs.formatting', (['output'], {}), '(output)\n', (33553, 33561), False, 'from src.utils import funcs\n'), ((41244, 41273), 'src.utils.funcs.timeStr', 'funcs.timeStr', (['d', 'h', 'm', 's', 'ms'], {}), '(d, h, m, s, ms)\n', (41257, 41273), False, 'from src.utils import funcs\n'), ((43109, 43178), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['f"""https://www.speedrun.com/api/v1/categories/{cat}"""'], {}), "(f'https://www.speedrun.com/api/v1/categories/{cat}')\n", (43125, 43178), False, 'from src.utils import funcs\n'), ((43410, 43471), 'src.utils.funcs.getRequest', 'funcs.getRequest', (["queuedata['pagination']['links'][-1]['uri']"], {}), "(queuedata['pagination']['links'][-1]['uri'])\n", (43426, 43471), False, 'from src.utils import funcs\n'), ((44933, 44958), 'discord.Embed', 'Embed', ([], {'description': 'output'}), '(description=output)\n', (44938, 44958), False, 'from discord import Embed, File, channel\n'), ((46429, 46467), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Empty input."""'], {}), "(None, 'Empty input.')\n", (46445, 46467), False, 'from src.utils import funcs\n'), ((49799, 49825), 'lyricsgenius.Genius', 'Genius', (['config.geniusToken'], {}), '(config.geniusToken)\n', (49805, 49825), False, 'from lyricsgenius import Genius\n'), ((50473, 50513), 'src.utils.page_buttons.PageButtons', 'PageButtons', (['ctx', 'self.client', 'm', 'embeds'], {}), '(ctx, self.client, m, embeds)\n', (50484, 50513), False, 'from src.utils.page_buttons import PageButtons\n'), ((53654, 53750), 'discord.Embed', 'Embed', ([], {'title': '"""Please select a language below or input `quit` to quit..."""', 'description': 'output'}), "(title='Please select a language below or input `quit` to quit...',\n description=output)\n", (53659, 53750), False, 'from discord import Embed, File, channel\n'), ((56522, 56533), 'json.dumps', 'dumps', (['data'], {}), '(data)\n', (56527, 56533), False, 'from json import JSONDecodeError, dumps\n'), ((60683, 60707), 'discord.Embed', 'Embed', ([], {'title': 'f""""{word}\\""""'}), '(title=f\'"{word}"\')\n', (60688, 60707), False, 'from discord import Embed, File, channel\n'), ((60746, 60787), 'src.utils.funcs.formatting', 'funcs.formatting', (['output[:-1]'], {'limit': '(1000)'}), '(output[:-1], limit=1000)\n', (60762, 60787), False, 'from src.utils import funcs\n'), ((65075, 65160), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['"""NSFW/Over 18!"""', '"""Please view this profile in an NSFW channel."""'], {}), "('NSFW/Over 18!',\n 'Please view this profile in an NSFW channel.')\n", (65091, 65160), False, 'from src.utils import funcs\n'), ((65203, 65343), 'discord.Embed', 'Embed', ([], {'description': "(f'https://www.reddit.com/user/{redditor.name}' + ' ([Old Reddit](' +\n f'https://old.reddit.com/user/{redditor.name}))')"}), "(description=f'https://www.reddit.com/user/{redditor.name}' +\n ' ([Old Reddit](' + f'https://old.reddit.com/user/{redditor.name}))')\n", (65208, 65343), False, 'from discord import Embed, File, channel\n'), ((71346, 71371), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (71362, 71371), False, 'from src.utils import funcs\n'), ((73221, 73239), 'PyPDF2.PdfFileReader', 'PdfFileReader', (['pdf'], {}), '(pdf)\n', (73234, 73239), False, 'from PyPDF2 import PdfFileReader\n'), ((73716, 73769), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Cannot process empty input."""'], {}), "(None, 'Cannot process empty input.')\n", (73732, 73769), False, 'from src.utils import funcs\n'), ((84492, 84562), 'src.utils.funcs.replaceCharacters', 'funcs.replaceCharacters', (['doi', "['https://doi.org/', 'doi:', 'doi.org/']"], {}), "(doi, ['https://doi.org/', 'doi:', 'doi.org/'])\n", (84515, 84562), False, 'from src.utils import funcs\n'), ((85802, 85823), 'src.utils.funcs.formatting', 'funcs.formatting', (['res'], {}), '(res)\n', (85818, 85823), False, 'from src.utils import funcs\n'), ((96700, 96725), 'src.utils.funcs.valueToOrdinal', 'funcs.valueToOrdinal', (['day'], {}), '(day)\n', (96720, 96725), False, 'from src.utils import funcs\n'), ((97929, 97960), 'src.utils.funcs.yearToChineseZodiac', 'funcs.yearToChineseZodiac', (['year'], {}), '(year)\n', (97954, 97960), False, 'from src.utils import funcs\n'), ((99368, 99382), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (99380, 99382), False, 'from datetime import datetime, timedelta\n'), ((99420, 99434), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (99432, 99434), False, 'from datetime import datetime, timedelta\n'), ((99472, 99486), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (99484, 99486), False, 'from datetime import datetime, timedelta\n'), ((99741, 99772), 'src.utils.funcs.monthNameToNumber', 'funcs.monthNameToNumber', (['month2'], {}), '(month2)\n', (99764, 99772), False, 'from src.utils import funcs\n'), ((99866, 99880), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (99878, 99880), False, 'from datetime import datetime, timedelta\n'), ((99916, 99930), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (99928, 99930), False, 'from datetime import datetime, timedelta\n'), ((99966, 99980), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (99978, 99980), False, 'from datetime import datetime, timedelta\n'), ((100229, 100259), 'src.utils.funcs.monthNameToNumber', 'funcs.monthNameToNumber', (['month'], {}), '(month)\n', (100252, 100259), False, 'from src.utils import funcs\n'), ((100528, 100551), 'datetime.timedelta', 'timedelta', ([], {'days': 'day2int'}), '(days=day2int)\n', (100537, 100551), False, 'from datetime import datetime, timedelta\n'), ((108046, 108091), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Not a YouTube link."""'], {}), "(None, 'Not a YouTube link.')\n", (108062, 108091), False, 'from src.utils import funcs\n'), ((110687, 110725), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Empty input."""'], {}), "(None, 'Empty input.')\n", (110703, 110725), False, 'from src.utils import funcs\n'), ((111716, 111754), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Empty input."""'], {}), "(None, 'Empty input.')\n", (111732, 111754), False, 'from src.utils import funcs\n'), ((114301, 114350), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""No attachment detected."""'], {}), "(None, 'No attachment detected.')\n", (114317, 114350), False, 'from src.utils import funcs\n'), ((6953, 7000), 'src.utils.page_buttons.PageButtons', 'PageButtons', (['ctx', 'self.client', 'm', 'yourreminders'], {}), '(ctx, self.client, m, yourreminders)\n', (6964, 7000), False, 'from src.utils.page_buttons import PageButtons\n'), ((7755, 7829), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""That value is too big or your input is too long."""'], {}), "(None, 'That value is too big or your input is too long.')\n", (7771, 7829), False, 'from src.utils import funcs\n'), ((12185, 12215), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['values[i]'], {}), '(values[i])\n', (12204, 12215), False, 'from src.utils import funcs\n'), ((30630, 30636), 'time.time', 'time', ([], {}), '()\n', (30634, 30636), False, 'from time import gmtime, mktime, time\n'), ((35947, 36007), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input or unknown currency."""'], {}), "(None, 'Invalid input or unknown currency.')\n", (35963, 36007), False, 'from src.utils import funcs\n'), ((41948, 42003), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Server error or unknown game."""'], {}), "(None, 'Server error or unknown game.')\n", (41964, 42003), False, 'from src.utils import funcs\n'), ((45559, 45603), 'src.utils.page_buttons.PageButtons', 'PageButtons', (['ctx', 'self.client', 'm', 'outputlist'], {}), '(ctx, self.client, m, outputlist)\n', (45570, 45603), False, 'from src.utils.page_buttons import PageButtons\n'), ((46047, 46102), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Server error or unknown game."""'], {}), "(None, 'Server error or unknown game.')\n", (46063, 46102), False, 'from src.utils import funcs\n'), ((47644, 47684), 'src.utils.funcs.formatting', 'funcs.formatting', (['definition'], {'limit': '(1000)'}), '(definition, limit=1000)\n', (47660, 47684), False, 'from src.utils import funcs\n'), ((48727, 48767), 'src.utils.page_buttons.PageButtons', 'PageButtons', (['ctx', 'self.client', 'm', 'embeds'], {}), '(ctx, self.client, m, embeds)\n', (48738, 48767), False, 'from src.utils.page_buttons import PageButtons\n'), ((50619, 50686), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Server error or song doesn\'t have lyrics."""'], {}), '(None, "Server error or song doesn\'t have lyrics.")\n', (50635, 50686), False, 'from src.utils import funcs\n'), ((52743, 52763), 'src.utils.funcs.formatting', 'funcs.formatting', (['qr'], {}), '(qr)\n', (52759, 52763), False, 'from src.utils import funcs\n'), ((55364, 55395), 'src.utils.funcs.readTxtAttachment', 'funcs.readTxtAttachment', (['option'], {}), '(option)\n', (55387, 55395), False, 'from src.utils import funcs\n'), ((63161, 63206), 'src.utils.funcs.dateBirthday', 'funcs.dateBirthday', (['dt.day', 'dt.month', 'dt.year'], {}), '(dt.day, dt.month, dt.year)\n', (63179, 63206), False, 'from src.utils import funcs\n'), ((65921, 65968), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['redditor.created_utc'], {}), '(redditor.created_utc)\n', (65946, 65968), False, 'from datetime import datetime, timedelta\n'), ((68659, 68678), 'src.utils.funcs.evalMath', 'funcs.evalMath', (['inp'], {}), '(inp)\n', (68673, 68678), False, 'from src.utils import funcs\n'), ((73153, 73191), 'src.utils.funcs.funcToCoro', 'funcs.funcToCoro', (['open', 'filepath', '"""rb"""'], {}), "(open, filepath, 'rb')\n", (73169, 73191), False, 'from src.utils import funcs\n'), ((73511, 73538), 'src.utils.funcs.funcToCoro', 'funcs.funcToCoro', (['pdf.close'], {}), '(pdf.close)\n', (73527, 73538), False, 'from src.utils import funcs\n'), ((73599, 73624), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (73615, 73624), False, 'from src.utils import funcs\n'), ((80739, 80784), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['elementobj.atomic_weight'], {}), '(elementobj.atomic_weight)\n', (80758, 80784), False, 'from src.utils import funcs\n'), ((82403, 82445), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid element."""'], {}), "(None, 'Invalid element.')\n", (82419, 82445), False, 'from src.utils import funcs\n'), ((85070, 85078), 'platform.system', 'system', ([], {}), '()\n', (85076, 85078), False, 'from platform import system\n'), ((89769, 89790), 'src.utils.funcs.formatting', 'funcs.formatting', (['res'], {}), '(res)\n', (89785, 89790), False, 'from src.utils import funcs\n'), ((92417, 92440), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['q1'], {}), '(q1)\n', (92436, 92440), False, 'from src.utils import funcs\n'), ((92582, 92605), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['q3'], {}), '(q3)\n', (92601, 92605), False, 'from src.utils import funcs\n'), ((92672, 92700), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['(q3 - q1)'], {}), '(q3 - q1)\n', (92691, 92700), False, 'from src.utils import funcs\n'), ((96189, 96203), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (96201, 96203), False, 'from datetime import datetime, timedelta\n'), ((96271, 96285), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (96283, 96285), False, 'from datetime import datetime, timedelta\n'), ((96451, 96481), 'src.utils.funcs.monthNameToNumber', 'funcs.monthNameToNumber', (['month'], {}), '(month)\n', (96474, 96481), False, 'from src.utils import funcs\n'), ((97148, 97170), 'src.utils.funcs.getZodiacInfo', 'funcs.getZodiacInfo', (['z'], {}), '(z)\n', (97167, 97170), False, 'from src.utils import funcs\n'), ((99656, 99687), 'src.utils.funcs.monthNameToNumber', 'funcs.monthNameToNumber', (['month2'], {}), '(month2)\n', (99679, 99687), False, 'from src.utils import funcs\n'), ((100147, 100177), 'src.utils.funcs.monthNameToNumber', 'funcs.monthNameToNumber', (['month'], {}), '(month)\n', (100170, 100177), False, 'from src.utils import funcs\n'), ((101128, 101159), 'src.utils.funcs.monthNameToNumber', 'funcs.monthNameToNumber', (['month2'], {}), '(month2)\n', (101151, 101159), False, 'from src.utils import funcs\n'), ((108491, 108531), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input."""'], {}), "(None, 'Invalid input.')\n", (108507, 108531), False, 'from src.utils import funcs\n'), ((109395, 109491), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Year must be 1500-2500 inclusive, and age must be 0-100 inclusive."""'], {}), "(None,\n 'Year must be 1500-2500 inclusive, and age must be 0-100 inclusive.')\n", (109411, 109491), False, 'from src.utils import funcs\n'), ((109623, 109662), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid year."""'], {}), "(None, 'Invalid year.')\n", (109639, 109662), False, 'from src.utils import funcs\n'), ((35700, 35734), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['initialamount'], {}), '(initialamount)\n', (35719, 35734), False, 'from src.utils import funcs\n'), ((35798, 35825), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['amount'], {}), '(amount)\n', (35817, 35825), False, 'from src.utils import funcs\n'), ((40976, 41002), 'src.utils.funcs.getRequest', 'funcs.getRequest', (["p['uri']"], {}), "(p['uri'])\n", (40992, 41002), False, 'from src.utils import funcs\n'), ((43745, 43814), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['f"""https://www.speedrun.com/api/v1/categories/{cat}"""'], {}), "(f'https://www.speedrun.com/api/v1/categories/{cat}')\n", (43761, 43814), False, 'from src.utils import funcs\n'), ((44727, 44756), 'src.utils.funcs.timeStr', 'funcs.timeStr', (['d', 'h', 'm', 's', 'ms'], {}), '(d, h, m, s, ms)\n', (44740, 44756), False, 'from src.utils import funcs\n'), ((46762, 46801), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Unknown term."""'], {}), "(None, 'Unknown term.')\n", (46778, 46801), False, 'from src.utils import funcs\n'), ((47776, 47813), 'src.utils.funcs.formatting', 'funcs.formatting', (['example'], {'limit': '(1000)'}), '(example, limit=1000)\n', (47792, 47813), False, 'from src.utils import funcs\n'), ((49530, 49569), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Unknown song."""'], {}), "(None, 'Unknown song.')\n", (49546, 49569), False, 'from src.utils import funcs\n'), ((57029, 57102), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Code exceeded the maximum allowed running time."""'], {}), "(None, 'Code exceeded the maximum allowed running time.')\n", (57045, 57102), False, 'from src.utils import funcs\n'), ((57896, 57957), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Time zone must be -12-14 inclusive."""'], {}), "(None, 'Time zone must be -12-14 inclusive.')\n", (57912, 57957), False, 'from src.utils import funcs\n'), ((58397, 58441), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid timestamp."""'], {}), "(None, 'Invalid timestamp.')\n", (58413, 58441), False, 'from src.utils import funcs\n'), ((71433, 71447), 'random.choice', 'choice', (['answer'], {}), '(answer)\n', (71439, 71447), False, 'from random import choice\n'), ((73365, 73407), 'src.utils.funcs.funcToCoro', 'funcs.funcToCoro', (['reader.getPage', '(page - 1)'], {}), '(reader.getPage, page - 1)\n', (73381, 73407), False, 'from src.utils import funcs\n'), ((73446, 73483), 'src.utils.funcs.funcToCoro', 'funcs.funcToCoro', (['pageobj.extractText'], {}), '(pageobj.extractText)\n', (73462, 73483), False, 'from src.utils import funcs\n'), ((81126, 81149), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['ar'], {}), '(ar)\n', (81145, 81149), False, 'from src.utils import funcs\n'), ((81237, 81260), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['en'], {}), '(en)\n', (81256, 81260), False, 'from src.utils import funcs\n'), ((81347, 81370), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['fi'], {}), '(fi)\n', (81366, 81370), False, 'from src.utils import funcs\n'), ((81454, 81477), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['mp'], {}), '(mp)\n', (81473, 81477), False, 'from src.utils import funcs\n'), ((81561, 81584), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['bp'], {}), '(bp)\n', (81580, 81584), False, 'from src.utils import funcs\n'), ((87022, 87042), 'datetime.datetime', 'datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (87030, 87042), False, 'from datetime import datetime, timedelta\n'), ((87045, 87089), 'datetime.timedelta', 'timedelta', ([], {'seconds': "altmetric['published_on']"}), "(seconds=altmetric['published_on'])\n", (87054, 87089), False, 'from datetime import datetime, timedelta\n'), ((87291, 87325), 'src.utils.funcs.monthNumberToName', 'funcs.monthNumberToName', (['pub.month'], {}), '(pub.month)\n', (87314, 87325), False, 'from src.utils import funcs\n'), ((92175, 92185), 'statistics.mean', 'mean', (['data'], {}), '(data)\n', (92179, 92185), False, 'from statistics import mean, median, mode, pstdev, stdev\n'), ((92519, 92531), 'statistics.median', 'median', (['data'], {}), '(data)\n', (92525, 92531), False, 'from statistics import mean, median, mode, pstdev, stdev\n'), ((92876, 92888), 'statistics.pstdev', 'pstdev', (['data'], {}), '(data)\n', (92882, 92888), False, 'from statistics import mean, median, mode, pstdev, stdev\n'), ((92966, 92977), 'statistics.stdev', 'stdev', (['data'], {}), '(data)\n', (92971, 92977), False, 'from statistics import mean, median, mode, pstdev, stdev\n'), ((93059, 93068), 'numpy.min', 'min', (['data'], {}), '(data)\n', (93062, 93068), False, 'from numpy import array, max, min, sqrt, squeeze, sum\n'), ((93150, 93159), 'numpy.max', 'max', (['data'], {}), '(data)\n', (93153, 93159), False, 'from numpy import array, max, min, sqrt, squeeze, sum\n'), ((93231, 93240), 'numpy.sum', 'sum', (['data'], {}), '(data)\n', (93234, 93240), False, 'from numpy import array, max, min, sqrt, squeeze, sum\n'), ((95110, 95132), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['b'], {}), '(b)\n', (95129, 95132), False, 'from src.utils import funcs\n'), ((95141, 95165), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['lcm'], {}), '(lcm)\n', (95160, 95165), False, 'from src.utils import funcs\n'), ((100652, 100666), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (100664, 100666), False, 'from datetime import datetime, timedelta\n'), ((100745, 100759), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (100757, 100759), False, 'from datetime import datetime, timedelta\n'), ((100839, 100853), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (100851, 100853), False, 'from datetime import datetime, timedelta\n'), ((101039, 101070), 'src.utils.funcs.monthNameToNumber', 'funcs.monthNameToNumber', (['month2'], {}), '(month2)\n', (101062, 101070), False, 'from src.utils import funcs\n'), ((101703, 101745), 'src.utils.funcs.monthNumberToName', 'funcs.monthNumberToName', (['dateobjs[0].month'], {}), '(dateobjs[0].month)\n', (101726, 101745), False, 'from src.utils import funcs\n'), ((102079, 102121), 'src.utils.funcs.monthNumberToName', 'funcs.monthNumberToName', (['dateobjs[1].month'], {}), '(dateobjs[1].month)\n', (102102, 102121), False, 'from src.utils import funcs\n'), ((113306, 113346), 'src.utils.page_buttons.PageButtons', 'PageButtons', (['ctx', 'self.client', 'm', 'embeds'], {}), '(ctx, self.client, m, embeds)\n', (113317, 113346), False, 'from src.utils.page_buttons import PageButtons\n'), ((113836, 113898), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Server error or query limit reached."""'], {}), "(None, 'Server error or query limit reached.')\n", (113852, 113898), False, 'from src.utils import funcs\n'), ((6453, 6488), 'src.utils.funcs.timeDifferenceStr', 'funcs.timeDifferenceStr', (['rtime', 'now'], {}), '(rtime, now)\n', (6476, 6488), False, 'from src.utils import funcs\n'), ((9381, 9418), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""No entries."""'], {}), "(None, 'No entries.')\n", (9397, 9418), False, 'from src.utils import funcs\n'), ((10521, 10558), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""No entries."""'], {}), "(None, 'No entries.')\n", (10537, 10558), False, 'from src.utils import funcs\n'), ((26606, 26640), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['realarrive'], {}), '(realarrive)\n', (26628, 26640), False, 'from datetime import datetime, timedelta\n'), ((26643, 26660), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (26658, 26660), False, 'from datetime import datetime, timedelta\n'), ((26904, 26938), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['realarrive'], {}), '(realarrive)\n', (26926, 26938), False, 'from datetime import datetime, timedelta\n'), ((26941, 26975), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['realdepart'], {}), '(realdepart)\n', (26963, 26975), False, 'from datetime import datetime, timedelta\n'), ((27231, 27248), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (27246, 27248), False, 'from datetime import datetime, timedelta\n'), ((27251, 27285), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['realdepart'], {}), '(realdepart)\n', (27273, 27285), False, 'from datetime import datetime, timedelta\n'), ((37073, 37115), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid article."""'], {}), "(None, 'Invalid article.')\n", (37089, 37115), False, 'from src.utils import funcs\n'), ((45323, 45350), 'src.utils.funcs.strictRounding', 'funcs.strictRounding', (['total'], {}), '(total)\n', (45343, 45350), False, 'from src.utils import funcs\n'), ((54437, 54480), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid language."""'], {}), "(None, 'Invalid language.')\n", (54453, 54480), False, 'from src.utils import funcs\n'), ((56926, 56950), 'src.utils.funcs.formatting', 'funcs.formatting', (['stderr'], {}), '(stderr)\n', (56942, 56950), False, 'from src.utils import funcs\n'), ((66029, 66074), 'src.utils.funcs.dateBirthday', 'funcs.dateBirthday', (['dt.day', 'dt.month', 'dt.year'], {}), '(dt.day, dt.month, dt.year)\n', (66047, 66074), False, 'from src.utils import funcs\n'), ((79526, 79568), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid element."""'], {}), "(None, 'Invalid element.')\n", (79542, 79568), False, 'from src.utils import funcs\n'), ((92279, 92289), 'statistics.mode', 'mode', (['data'], {}), '(data)\n', (92283, 92289), False, 'from statistics import mean, median, mode, pstdev, stdev\n'), ((92773, 92782), 'numpy.max', 'max', (['data'], {}), '(data)\n', (92776, 92782), False, 'from numpy import array, max, min, sqrt, squeeze, sum\n'), ((92785, 92794), 'numpy.min', 'min', (['data'], {}), '(data)\n', (92788, 92794), False, 'from numpy import array, max, min, sqrt, squeeze, sum\n'), ((95047, 95069), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['a'], {}), '(a)\n', (95066, 95069), False, 'from src.utils import funcs\n'), ((112601, 112658), 'src.utils.funcs.formatting', 'funcs.formatting', (["c['subpods'][0]['plaintext']"], {'limit': '(200)'}), "(c['subpods'][0]['plaintext'], limit=200)\n", (112617, 112658), False, 'from src.utils import funcs\n'), ((113603, 113659), 'src.utils.funcs.formatting', 'funcs.formatting', (['"""Check your spelling, and use English"""'], {}), "('Check your spelling, and use English')\n", (113619, 113659), False, 'from src.utils import funcs\n'), ((7603, 7656), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', 'f"""Invalid input: `{minutes}`"""'], {}), "(None, f'Invalid input: `{minutes}`')\n", (7619, 7656), False, 'from src.utils import funcs\n'), ((8434, 8485), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (["reminder['data']['time']"], {}), "(reminder['data']['time'])\n", (8459, 8485), False, 'from datetime import datetime, timedelta\n'), ((10792, 10832), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid value."""'], {}), "(None, 'Invalid value.')\n", (10808, 10832), False, 'from src.utils import funcs\n'), ((37850, 37892), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid article."""'], {}), "(None, 'Invalid article.')\n", (37866, 37892), False, 'from src.utils import funcs\n'), ((56774, 56816), 'src.utils.funcs.formatting', 'funcs.formatting', (["(data['stdout'] or 'None')"], {}), "(data['stdout'] or 'None')\n", (56790, 56816), False, 'from src.utils import funcs\n'), ((67735, 67777), 'src.utils.funcs.formatting', 'funcs.formatting', (['comment.body'], {'limit': '(1000)'}), '(comment.body, limit=1000)\n', (67751, 67777), False, 'from src.utils import funcs\n'), ((71983, 72002), 'src.utils.funcs.evalMath', 'funcs.evalMath', (['val'], {}), '(val)\n', (71997, 72002), False, 'from src.utils import funcs\n'), ((94878, 94900), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['a'], {}), '(a)\n', (94897, 94900), False, 'from src.utils import funcs\n'), ((94941, 94963), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['b'], {}), '(b)\n', (94960, 94963), False, 'from src.utils import funcs\n'), ((94972, 94996), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['hcf'], {}), '(hcf)\n', (94991, 94996), False, 'from src.utils import funcs\n'), ((102502, 102541), 'src.utils.funcs.monthNumberToName', 'funcs.monthNumberToName', (['dateobj2.month'], {}), '(dateobj2.month)\n', (102525, 102541), False, 'from src.utils import funcs\n'), ((102766, 102804), 'src.utils.funcs.monthNumberToName', 'funcs.monthNumberToName', (['dateobj.month'], {}), '(dateobj.month)\n', (102789, 102804), False, 'from src.utils import funcs\n'), ((33263, 33305), 'deep_translator.constants.GOOGLE_CODES_TO_LANGUAGES.keys', 'constants.GOOGLE_CODES_TO_LANGUAGES.keys', ([], {}), '()\n', (33303, 33305), False, 'from deep_translator import GoogleTranslator, constants\n'), ((71202, 71247), 'src.utils.funcs.getResource', 'funcs.getResource', (['self.name', '"""copypasta.txt"""'], {}), "(self.name, 'copypasta.txt')\n", (71219, 71247), False, 'from src.utils import funcs\n'), ((3226, 3258), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['rtime'], {}), '(rtime)\n', (3251, 3258), False, 'from datetime import datetime, timedelta\n'), ((6336, 6368), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['rtime'], {}), '(rtime)\n', (6361, 6368), False, 'from datetime import datetime, timedelta\n')]
import argparse import multiprocessing import os import pickle import subprocess import sys from random import randint from time import sleep import georasters as gr import numpy as np from osgeo import gdal, osr def save_img(data, geotransform, proj, outPath, noDataValue=np.nan, split=False): # Start the gdal driver for GeoTIFF if outPath == "MEM": driver = gdal.GetDriverByName("MEM") else: driver = gdal.GetDriverByName("GTiff") paths = [] shape = data.shape if len(shape) > 2: if split: for i in range(shape[2]): path = os.path.join( os.path.dirname(outPath), str(i + 1) + "_" + os.path.basename(outPath), ) ds = driver.Create( path, shape[1], shape[0], 1, gdal.GDT_Float32, ["COMPRESS=LZW", "NUM_THREADS=ALL_CPUS", "BIGTIFF=YES"], ) ds.SetProjection(proj) ds.SetGeoTransform(geotransform) ds.GetRasterBand(1).WriteArray(data[:, :, i]) ds.GetRasterBand(1).SetNoDataValue(noDataValue) ds.FlushCache() ds = None paths.append(path) else: ds = driver.Create( outPath, shape[1], shape[0], shape[2], gdal.GDT_Float32, ["COMPRESS=LZW", "NUM_THREADS=ALL_CPUS", "BIGTIFF=YES"], ) ds.SetProjection(proj) ds.SetGeoTransform(geotransform) for i in range(shape[2]): ds.GetRasterBand(i + 1).WriteArray(data[:, :, i]) ds.GetRasterBand(i + 1).SetNoDataValue(noDataValue) ds.FlushCache() ds = None paths.append(outPath) else: ds = driver.Create( outPath, shape[1], shape[0], 1, gdal.GDT_Float32, ["COMPRESS=LZW", "NUM_THREADS=ALL_CPUS", "BIGTIFF=YES"], ) ds.SetProjection(proj) ds.SetGeoTransform(geotransform) ds.GetRasterBand(1).WriteArray(data) ds.GetRasterBand(1).SetNoDataValue(noDataValue) ds.FlushCache() ds = None paths.append(outPath) return paths def get_raster_info(raster_path): ret = gdal.Info(raster_path, options="-json") num_bands = len(ret["bands"]) prj = ret["coordinateSystem"]["wkt"] geoinformation = ret["geoTransform"] try: nodata = ret["bands"][0]["noDataValue"] except KeyError: nodata = -9999 return prj, geoinformation, nodata, num_bands def apply_band_to_filepath(path, band_num): band_path = os.path.join( os.path.dirname(path), str(band_num) + "_" + os.path.basename(path) ) return band_path def get_band_from_filepath(path): num = os.path.basename(path).split("_")[0] return num def get_band_raster(input_raster, split_file, current_band): sleep(randint(1, 20)) split_outPath = apply_band_to_filepath(split_file, current_band) subprocess.run( [ "gdal_translate", "-b", str(current_band), "-co", "COMPRESS=LZW", "-co", "NUM_THREADS=ALL_CPUS", input_raster, split_outPath, ] ) print("band {} is split".format(current_band)) return split_outPath def warp_raster_func(warped_file, split_file_path, kwargs, dest_prj): sleep(randint(1, 20)) srs = osr.SpatialReference(wkt=dest_prj) band_n = get_band_from_filepath(split_file_path) warp_outPath = apply_band_to_filepath(warped_file, band_n) kwargs["dstSRS"] = srs gdal.Warp(warp_outPath, split_file_path, **kwargs) print("band {} is warped".format(band_n)) return warp_outPath def align_raster_func(path, alignraster, pickle_filename): sleep(randint(1, 20)) band = get_band_from_filepath(path) out_filename = apply_band_to_filepath(pickle_filename, band) (alignedraster_o, alignedraster_a, GeoT_a) = gr.align_rasters( path, alignraster, how=np.mean ) array = np.array(alignedraster_o) with open(out_filename, "wb") as f: pickle.dump([array, path, GeoT_a], f) print("band {} is aligned".format(out_filename)) return out_filename def save_raster_func( alignraster_prj, dst_filename, alignraster_nodata, pickle_filename ): sleep(randint(1, 20)) with open(pickle_filename, "rb") as f: line = pickle.load(f) n = get_band_from_filepath(pickle_filename) dest_name = apply_band_to_filepath(dst_filename, n) save_img( line[0], list(line[2]), alignraster_prj, dest_name, noDataValue=alignraster_nodata, split=False, ) print("band {} is saved".format(dest_name)) return dest_name def align_raster_full(alignraster, input_raster, dst_filename, temp_dir): # NOTE: rasterband is kept track of with integers before an underscore in the file names # this is used for split and warp, it is not used for align or save because of ram limitations cpu = multiprocessing.cpu_count() - 1 # get raster data ( alignraster_prj, alignraster_geoinformation, alignraster_nodata, alignraster_num_bands, ) = get_raster_info(alignraster) ( input_raster_prj, input_raster_geoinformation, input_raster_nodata, input_raster_num_bands, ) = get_raster_info(input_raster) xRes = alignraster_geoinformation[1] yRes = alignraster_geoinformation[5] # split input raster into bands split_file = os.path.join(temp_dir, "split_raster.tif") input = [(input_raster, split_file, n + 1) for n in range(input_raster_num_bands)] with multiprocessing.Pool(processes=cpu) as p: split_file_paths = p.starmap_async(get_band_raster, input).get() p.close() p.join() print("input file is split") print(split_file_paths) sleep(randint(1, 20)) # warp each split file kwargs = { "format": "GTiff", "xRes": xRes, "yRes": yRes, "resampleAlg": "lanczos", "srcNodata": input_raster_nodata, "dstNodata": alignraster_nodata, "creationOptions": ["COMPRESS=LZW", "NUM_THREADS=ALL_CPUS"], } warped_file = os.path.join(temp_dir, "warped_raster.tif") input = [ (warped_file, split_file_path, kwargs, alignraster_prj) for split_file_path in split_file_paths ] with multiprocessing.Pool(processes=cpu) as p: warped_paths = p.starmap_async(warp_raster_func, input).get() p.close() p.join() print("gdal warp has finished") print(warped_paths) sleep(randint(1, 20)) # align each warped file aligned_file = os.path.join(temp_dir, "aligned_pickle.pkl") input = [(warped_path, alignraster, aligned_file) for warped_path in warped_paths] with multiprocessing.Pool(processes=3) as p: pickle_paths = p.starmap_async(align_raster_func, input).get() p.close() p.join() print("align raster has finished") print(pickle_paths) sleep(randint(1, 20)) # save the pkl to raster input = [ (alignraster_prj, dst_filename, alignraster_nodata, pickle_filename) for pickle_filename in pickle_paths ] with multiprocessing.Pool(processes=4) as p: result = p.starmap_async(save_raster_func, input).get() p.close() p.join() print("saving rasters has finished") print(result) def get_parser(): desc = "make the transforms for the icp json in a directory" parser = argparse.ArgumentParser(description=desc) parser.add_argument("--dst_filename", "-d", help="Path to save band rasters") parser.add_argument("--raster", "-r", help="Path to input raster") parser.add_argument("--alignraster", "-a", help="path to raster to align to") parser.add_argument("--temp_dir", "-t", help="directory to use for temp files") return parser def main(rawargs): """ align raster to another raster Parameters ---------- dst_filename: str Path to save band rasters raster: str Path to input raster alignraster: str path to raster to align to temp_dir: str directory to use for temp files Returns ------- None Examples -------- python align_raster.py \ -d "/media/desktop-linux/my_book/testdata/2013_sat.tif" \ -r "/home/desktop-linux/2013_imagery/clipped_data/clipped_2013_sat.tif" \ -a "/home/desktop-linux/2013_imagery/clipped_data/clipped_2013_nlcd.tif" \ -t "/media/desktop-linux/my_book/testdata/working" """ args = get_parser().parse_args(rawargs) dst_filename = args.dst_filename raster = args.raster if not os.path.exists(raster): print("Unable to find path to raster: %s" % args.raster) sys.exit(1) alignraster = args.alignraster if not os.path.exists(alignraster): print("Unable to find path to alignraster: %s" % args.alignraster) sys.exit(1) temp_dir = args.temp_dir if not os.path.exists(temp_dir): print("Unable to find path to temp_dir: %s" % args.temp_dir) sys.exit(1) align_raster_full(alignraster, raster, dst_filename, temp_dir) if __name__ == "__main__": main(sys.argv[1:])
[ "os.path.exists", "pickle.dump", "argparse.ArgumentParser", "osgeo.gdal.Warp", "osgeo.osr.SpatialReference", "os.path.join", "pickle.load", "multiprocessing.cpu_count", "numpy.array", "os.path.dirname", "osgeo.gdal.Info", "multiprocessing.Pool", "os.path.basename", "sys.exit", "osgeo.gdal.GetDriverByName", "random.randint", "georasters.align_rasters" ]
[((2477, 2516), 'osgeo.gdal.Info', 'gdal.Info', (['raster_path'], {'options': '"""-json"""'}), "(raster_path, options='-json')\n", (2486, 2516), False, 'from osgeo import gdal, osr\n'), ((3684, 3718), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {'wkt': 'dest_prj'}), '(wkt=dest_prj)\n', (3704, 3718), False, 'from osgeo import gdal, osr\n'), ((3866, 3916), 'osgeo.gdal.Warp', 'gdal.Warp', (['warp_outPath', 'split_file_path'], {}), '(warp_outPath, split_file_path, **kwargs)\n', (3875, 3916), False, 'from osgeo import gdal, osr\n'), ((4228, 4276), 'georasters.align_rasters', 'gr.align_rasters', (['path', 'alignraster'], {'how': 'np.mean'}), '(path, alignraster, how=np.mean)\n', (4244, 4276), True, 'import georasters as gr\n'), ((4303, 4328), 'numpy.array', 'np.array', (['alignedraster_o'], {}), '(alignedraster_o)\n', (4311, 4328), True, 'import numpy as np\n'), ((5828, 5870), 'os.path.join', 'os.path.join', (['temp_dir', '"""split_raster.tif"""'], {}), "(temp_dir, 'split_raster.tif')\n", (5840, 5870), False, 'import os\n'), ((6529, 6572), 'os.path.join', 'os.path.join', (['temp_dir', '"""warped_raster.tif"""'], {}), "(temp_dir, 'warped_raster.tif')\n", (6541, 6572), False, 'import os\n'), ((6997, 7041), 'os.path.join', 'os.path.join', (['temp_dir', '"""aligned_pickle.pkl"""'], {}), "(temp_dir, 'aligned_pickle.pkl')\n", (7009, 7041), False, 'import os\n'), ((7850, 7891), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc'}), '(description=desc)\n', (7873, 7891), False, 'import argparse\n'), ((380, 407), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""MEM"""'], {}), "('MEM')\n", (400, 407), False, 'from osgeo import gdal, osr\n'), ((435, 464), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (455, 464), False, 'from osgeo import gdal, osr\n'), ((2869, 2890), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (2884, 2890), False, 'import os\n'), ((3135, 3149), 'random.randint', 'randint', (['(1)', '(20)'], {}), '(1, 20)\n', (3142, 3149), False, 'from random import randint\n'), ((3658, 3672), 'random.randint', 'randint', (['(1)', '(20)'], {}), '(1, 20)\n', (3665, 3672), False, 'from random import randint\n'), ((4058, 4072), 'random.randint', 'randint', (['(1)', '(20)'], {}), '(1, 20)\n', (4065, 4072), False, 'from random import randint\n'), ((4377, 4414), 'pickle.dump', 'pickle.dump', (['[array, path, GeoT_a]', 'f'], {}), '([array, path, GeoT_a], f)\n', (4388, 4414), False, 'import pickle\n'), ((4600, 4614), 'random.randint', 'randint', (['(1)', '(20)'], {}), '(1, 20)\n', (4607, 4614), False, 'from random import randint\n'), ((4674, 4688), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4685, 4688), False, 'import pickle\n'), ((5306, 5333), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (5331, 5333), False, 'import multiprocessing\n'), ((5967, 6002), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'cpu'}), '(processes=cpu)\n', (5987, 6002), False, 'import multiprocessing\n'), ((6189, 6203), 'random.randint', 'randint', (['(1)', '(20)'], {}), '(1, 20)\n', (6196, 6203), False, 'from random import randint\n'), ((6714, 6749), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'cpu'}), '(processes=cpu)\n', (6734, 6749), False, 'import multiprocessing\n'), ((6932, 6946), 'random.randint', 'randint', (['(1)', '(20)'], {}), '(1, 20)\n', (6939, 6946), False, 'from random import randint\n'), ((7138, 7171), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': '(3)'}), '(processes=3)\n', (7158, 7171), False, 'import multiprocessing\n'), ((7358, 7372), 'random.randint', 'randint', (['(1)', '(20)'], {}), '(1, 20)\n', (7365, 7372), False, 'from random import randint\n'), ((7554, 7587), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': '(4)'}), '(processes=4)\n', (7574, 7587), False, 'import multiprocessing\n'), ((9030, 9052), 'os.path.exists', 'os.path.exists', (['raster'], {}), '(raster)\n', (9044, 9052), False, 'import os\n'), ((9127, 9138), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9135, 9138), False, 'import sys\n'), ((9186, 9213), 'os.path.exists', 'os.path.exists', (['alignraster'], {}), '(alignraster)\n', (9200, 9213), False, 'import os\n'), ((9298, 9309), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9306, 9309), False, 'import sys\n'), ((9351, 9375), 'os.path.exists', 'os.path.exists', (['temp_dir'], {}), '(temp_dir)\n', (9365, 9375), False, 'import os\n'), ((9454, 9465), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9462, 9465), False, 'import sys\n'), ((2914, 2936), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2930, 2936), False, 'import os\n'), ((3010, 3032), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (3026, 3032), False, 'import os\n'), ((640, 664), 'os.path.dirname', 'os.path.dirname', (['outPath'], {}), '(outPath)\n', (655, 664), False, 'import os\n'), ((705, 730), 'os.path.basename', 'os.path.basename', (['outPath'], {}), '(outPath)\n', (721, 730), False, 'import os\n')]
# copytrue (c) 2020 PaddlePaddle Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #! /usr/bin/env python # -*- coding: utf-8 -*- import numpy as np import cv2 import math import xml.etree.ElementTree as ET from PIL import Image def resize_img(img): """ 调整图片尺寸 Args: img: 图片信息 """ h, w = img.shape[:2] min_size = 580 if w >= h and w > min_size: new_w = min_size new_h = new_w * h / w elif h >= w and h > min_size: new_h = min_size new_w = new_h * w / h else: new_h = h new_w = w new_img = cv2.resize( img, (int(new_w), int(new_h)), interpolation=cv2.INTER_CUBIC) scale_value = new_w / w return new_img, scale_value def plot_det_label(image, anno, labels): """ 目标检测类型生成标注图 Args: image: 图片路径 anno: 图片标注 labels: 图片所属数据集的类别信息 """ catid2color = {} img = cv2.imread(image) img, scale_value = resize_img(img) tree = ET.parse(anno) objs = tree.findall('object') color_map = get_color_map_list(len(labels) + 1) for i, obj in enumerate(objs): cname = obj.find('name').text catid = labels.index(cname) if cname not in labels: continue xmin = int(float(obj.find('bndbox').find('xmin').text) * scale_value) ymin = int(float(obj.find('bndbox').find('ymin').text) * scale_value) xmax = int(float(obj.find('bndbox').find('xmax').text) * scale_value) ymax = int(float(obj.find('bndbox').find('ymax').text) * scale_value) if catid not in catid2color: catid2color[catid] = color_map[catid + 1] color = tuple(catid2color[catid]) img = draw_rectangle_and_cname(img, xmin, ymin, xmax, ymax, cname, color) return img def plot_seg_label(anno): """ 语义分割类型生成标注图 Args: anno: 图片标注 """ label = pil_imread(anno) pse_label = gray2pseudo(label) return pse_label def plot_insseg_label(image, anno, labels, alpha=0.7): """ 实例分割类型生成标注图 Args: image: 图片路径 anno: 图片标注 labels: 图片所属数据集的类别信息 """ anno = np.load(anno, allow_pickle=True).tolist() catid2color = dict() img = cv2.imread(image) img, scale_value = resize_img(img) color_map = get_color_map_list(len(labels) + 1) img_h = anno['h'] img_w = anno['w'] gt_class = anno['gt_class'] gt_bbox = anno['gt_bbox'] gt_poly = anno['gt_poly'] num_bbox = gt_bbox.shape[0] num_mask = len(gt_poly) # 描绘mask信息 img_array = np.array(img).astype('float32') for i in range(num_mask): cname = gt_class[i] catid = labels.index(cname) if cname not in labels: continue if catid not in catid2color: catid2color[catid] = color_map[catid + 1] color = np.array(catid2color[catid]).astype('float32') import pycocotools.mask as mask_util for x in range(len(gt_poly[i])): for y in range(len(gt_poly[i][x])): gt_poly[i][x][y] = int(float(gt_poly[i][x][y]) * scale_value) poly = gt_poly[i] rles = mask_util.frPyObjects(poly, int(float(img_h) * scale_value), int(float(img_w) * scale_value)) rle = mask_util.merge(rles) mask = mask_util.decode(rle) * 255 idx = np.nonzero(mask) img_array[idx[0], idx[1], :] *= 1.0 - alpha img_array[idx[0], idx[1], :] += alpha * color img = img_array.astype('uint8') for i in range(num_bbox): cname = gt_class[i] catid = labels.index(cname) if cname not in labels: continue if catid not in catid2color: catid2color[catid] = color_map[catid] color = tuple(catid2color[catid]) xmin, ymin, xmax, ymax = gt_bbox[i] img = draw_rectangle_and_cname(img, int(float(xmin) * scale_value), int(float(ymin) * scale_value), int(float(xmax) * scale_value), int(float(ymax) * scale_value), cname, color) return img def draw_rectangle_and_cname(img, xmin, ymin, xmax, ymax, cname, color): """ 根据提供的标注信息,给图片描绘框体和类别显示 Args: img: 图片路径 xmin: 检测框最小的x坐标 ymin: 检测框最小的y坐标 xmax: 检测框最大的x坐标 ymax: 检测框最大的y坐标 cname: 类别信息 color: 类别与颜色的对应信息 """ # 描绘检测框 line_width = math.ceil(2 * max(img.shape[0:2]) / 600) cv2.rectangle( img, pt1=(xmin, ymin), pt2=(xmax, ymax), color=color, thickness=line_width) # 计算并描绘类别信息 text_thickness = math.ceil(2 * max(img.shape[0:2]) / 1200) fontscale = math.ceil(0.5 * max(img.shape[0:2]) / 600) tw, th = cv2.getTextSize( cname, 0, fontScale=fontscale, thickness=text_thickness)[0] cv2.rectangle( img, pt1=(xmin + 1, ymin - th), pt2=(xmin + int(0.7 * tw) + 1, ymin), color=color, thickness=-1) cv2.putText( img, cname, (int(xmin) + 3, int(ymin) - 5), 0, 0.6 * fontscale, (255, 255, 255), lineType=cv2.LINE_AA, thickness=text_thickness) return img def pil_imread(file_path): """ 将图片读成np格式数据 Args: file_path: 图片路径 """ img = Image.open(file_path) return np.asarray(img) def get_color_map_list(num_classes): """ 为类别信息生成对应的颜色列表 Args: num_classes: 类别数量 """ color_map = num_classes * [0, 0, 0] for i in range(0, num_classes): j = 0 lab = i while lab: color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j)) color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j)) color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j)) j += 1 lab >>= 3 color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)] return color_map def gray2pseudo(gray_image): """ 将分割的结果映射到图片 Args: gray_image: 灰度图 """ color_map = get_color_map_list(256) color_map = np.array(color_map).astype("uint8") # 用OpenCV进行色彩映射 c1 = cv2.LUT(gray_image, color_map[:, 0]) c2 = cv2.LUT(gray_image, color_map[:, 1]) c3 = cv2.LUT(gray_image, color_map[:, 2]) pseudo_img = np.dstack((c1, c2, c3)) return pseudo_img
[ "cv2.rectangle", "numpy.dstack", "PIL.Image.open", "xml.etree.ElementTree.parse", "pycocotools.mask.decode", "numpy.asarray", "cv2.LUT", "numpy.array", "pycocotools.mask.merge", "numpy.nonzero", "cv2.getTextSize", "numpy.load", "cv2.imread" ]
[((1435, 1452), 'cv2.imread', 'cv2.imread', (['image'], {}), '(image)\n', (1445, 1452), False, 'import cv2\n'), ((1503, 1517), 'xml.etree.ElementTree.parse', 'ET.parse', (['anno'], {}), '(anno)\n', (1511, 1517), True, 'import xml.etree.ElementTree as ET\n'), ((2771, 2788), 'cv2.imread', 'cv2.imread', (['image'], {}), '(image)\n', (2781, 2788), False, 'import cv2\n'), ((5192, 5285), 'cv2.rectangle', 'cv2.rectangle', (['img'], {'pt1': '(xmin, ymin)', 'pt2': '(xmax, ymax)', 'color': 'color', 'thickness': 'line_width'}), '(img, pt1=(xmin, ymin), pt2=(xmax, ymax), color=color,\n thickness=line_width)\n', (5205, 5285), False, 'import cv2\n'), ((6027, 6048), 'PIL.Image.open', 'Image.open', (['file_path'], {}), '(file_path)\n', (6037, 6048), False, 'from PIL import Image\n'), ((6060, 6075), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (6070, 6075), True, 'import numpy as np\n'), ((6853, 6889), 'cv2.LUT', 'cv2.LUT', (['gray_image', 'color_map[:, 0]'], {}), '(gray_image, color_map[:, 0])\n', (6860, 6889), False, 'import cv2\n'), ((6899, 6935), 'cv2.LUT', 'cv2.LUT', (['gray_image', 'color_map[:, 1]'], {}), '(gray_image, color_map[:, 1])\n', (6906, 6935), False, 'import cv2\n'), ((6945, 6981), 'cv2.LUT', 'cv2.LUT', (['gray_image', 'color_map[:, 2]'], {}), '(gray_image, color_map[:, 2])\n', (6952, 6981), False, 'import cv2\n'), ((6999, 7022), 'numpy.dstack', 'np.dstack', (['(c1, c2, c3)'], {}), '((c1, c2, c3))\n', (7008, 7022), True, 'import numpy as np\n'), ((3876, 3897), 'pycocotools.mask.merge', 'mask_util.merge', (['rles'], {}), '(rles)\n', (3891, 3897), True, 'import pycocotools.mask as mask_util\n'), ((3955, 3971), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (3965, 3971), True, 'import numpy as np\n'), ((5475, 5547), 'cv2.getTextSize', 'cv2.getTextSize', (['cname', '(0)'], {'fontScale': 'fontscale', 'thickness': 'text_thickness'}), '(cname, 0, fontScale=fontscale, thickness=text_thickness)\n', (5490, 5547), False, 'import cv2\n'), ((2694, 2726), 'numpy.load', 'np.load', (['anno'], {'allow_pickle': '(True)'}), '(anno, allow_pickle=True)\n', (2701, 2726), True, 'import numpy as np\n'), ((3107, 3120), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3115, 3120), True, 'import numpy as np\n'), ((3913, 3934), 'pycocotools.mask.decode', 'mask_util.decode', (['rle'], {}), '(rle)\n', (3929, 3934), True, 'import pycocotools.mask as mask_util\n'), ((6788, 6807), 'numpy.array', 'np.array', (['color_map'], {}), '(color_map)\n', (6796, 6807), True, 'import numpy as np\n'), ((3393, 3421), 'numpy.array', 'np.array', (['catid2color[catid]'], {}), '(catid2color[catid])\n', (3401, 3421), True, 'import numpy as np\n')]
""" @author <NAME> """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) sys.path.append(PROJECT_ROOT) import numpy as np import tensorflow as tf # TF 1.x class MatrixEstimator(object): """ TensorFlow 1.x implementation of estimator proposed in: <NAME>., <NAME>., & <NAME>. (2015). Measures of entropy from data using infinitely divisible kernels. IEEE Transactions on Information Theory, 61(1), 535-548. with modifications made by the authors to attenuate scale and dimension related artifacts in the gaussian kernel. The originally proposed kernel width formula is: sigma = gamma * n ^ (-1 / (4+d)) with gamma being some empirical constant. If normalize_scale is set to True, then the variable is first normalized to zero mean and unit variance, as: x -> (x - mean) / sqrt(var + epsilon) which is equivalent to add the standard deviation as a multiplicative dependence in sigma, as done in the classical Silverman rule. This is done to achieve invariance to changes of scale during the mutual information estimation process. Epsilon is a small number to avoid division by zero. If normalize_dimension is set to True, then sigma is computed as: sigma = gamma * sqrt(d) * n ^ (-1 / (4+d)) This is done to center the distribution of pair-wise distances to the same mean across variables with different dimensions, and as a consequence to attenuate dimension related artifacts. Note that normalize_scale=False and normalize_dimension=False will give you the original version of the estimator. The estimator with those modifications was used in: <NAME>. & <NAME>., "On the Information Plane of Autoencoders," in 2020 International Joint Conference on Neural Networks (IJCNN). Full text available at: https://arxiv.org/abs/2005.07783 If you find this software useful, please consider citing our work. """ def __init__( self, gamma=1.0, alpha=1.01, epsilon=1e-8, normalize_scale=True, normalize_dimension=True, log_base=2, use_memory_efficient_gram=False, ): self.gamma = gamma self.alpha = alpha self.epsilon = epsilon self.normalize_scale = normalize_scale self.normalize_dimension = normalize_dimension self.log_base = log_base self.use_memory_efficient_gram = use_memory_efficient_gram def _compute_sigma(self, x): x_dims = tf.shape(x) n = tf.cast(x_dims[0], tf.float32) d = tf.cast(x_dims[1], tf.float32) sigma = self.gamma * n ** (-1 / (4 + d)) if self.normalize_dimension: sigma = sigma * tf.sqrt(d) return sigma def _normalize_variable(self, x, x_is_image): if x_is_image: mean_x = tf.reduce_mean(x) var_x = tf.reduce_mean(tf.square(x - mean_x)) else: mean_x, var_x = tf.nn.moments(x, [0]) std_x = tf.sqrt(var_x + self.epsilon) x = (x - mean_x) / std_x return x def normalized_gram(self, x, sigma_x=None, x_is_image=False): """If sigma_x is provided, then that value will be used. Otherwise, it will be automatically computed using the formula. If x_is_image is True, then the normalization of scale (if applicable) is done aggregating all dimensions. If false, each dimension is normalized independently. """ if sigma_x is None: sigma_x = self._compute_sigma(x) if self.normalize_scale: x = self._normalize_variable(x, x_is_image) # Compute pairwise distances (distance matrix) if self.use_memory_efficient_gram: # This option stores a smaller tensor in memory, which might be more convenient for you # when the dimensionality of the variable is too large, at the cost of introducing some # rounding errors due to the intermediate steps # (although I expect them to be insignificant in most cases), # because it performs # (N, Dim) matmul (Dim, N) = (N, N) # thanks to an equivalent formulation of the pairwise distances pairwise_dot = tf.matmul(x, tf.transpose(x)) # (N, N) = (N, Dim) matmul (Dim, N) norms = tf.diag_part(pairwise_dot) # (N,) norms = tf.reshape(norms, [-1, 1]) # (N, 1) pairwise_distance = norms - 2 * pairwise_dot + tf.transpose(norms) # (N, N) = (N, 1) - (N, N) + (1, N) # Avoids small negatives due to possible rounding errors pairwise_distance = tf.nn.relu(pairwise_distance) # (N, N) else: # This option is more robust to rounding errors at the cost of storing a larger tensor # in memory, because it performs # (N, 1, Dim) - (1, N, Dim) = (N, N, Dim) # which is the straightforward difference matrix that is then squared and reduced to (N, N) pairwise_difference = x[:, tf.newaxis, :] - x[tf.newaxis, :, :] # (N, N, Dim) = (N, 1, Dim) - (1, N, Dim) pairwise_squared_difference = tf.square(pairwise_difference) # (N, N, Dim) pairwise_distance = tf.reduce_sum(pairwise_squared_difference, axis=2) # (N, N) # We don't bother with the normalization constant of the gaussian kernel # since it is canceled out during normalization of the Gram matrix den = 2 * (sigma_x ** 2) gram = tf.exp(-pairwise_distance / den) # Normalize gram x_dims = tf.shape(x) n = tf.cast(x_dims[0], tf.float32) norm_gram = gram / n return norm_gram def entropy( self, x, sigma_x=None, x_is_image=False): """See 'normalized_gram' doc.""" norm_gram = self.normalized_gram(x, sigma_x, x_is_image) entropy = self.entropy_with_gram(norm_gram) return entropy def joint_entropy( self, x, y, sigma_x=None, sigma_y=None, x_is_image=False, y_is_image=False): """See 'normalized_gram' doc.""" norm_gram_a = self.normalized_gram(x, sigma_x, x_is_image) norm_gram_b = self.normalized_gram(y, sigma_y, y_is_image) joint_entropy = self.joint_entropy_with_gram(norm_gram_a, norm_gram_b) return joint_entropy def mutual_information( self, x, y, sigma_x=None, sigma_y=None, x_is_image=False, y_is_image=False): """See 'normalized_gram' doc.""" norm_gram_a = self.normalized_gram(x, sigma_x, x_is_image) norm_gram_b = self.normalized_gram(y, sigma_y, y_is_image) mi_xy = self.mutual_information_with_gram(norm_gram_a, norm_gram_b) return mi_xy def entropy_with_gram(self, norm_gram): with tf.device('/cpu:0'): eigvals, _ = tf.self_adjoint_eig(norm_gram) # Fix possible numerical instabilities: # Remove small negatives eigvals = tf.nn.relu(eigvals) # Ensure eigenvalues sum 1 eigvals = eigvals / tf.reduce_sum(eigvals) # Compute entropy in the specified base sum_term = tf.reduce_sum(eigvals ** self.alpha) entropy = tf.log(sum_term) / (1.0 - self.alpha) entropy = entropy / np.log(self.log_base) return entropy def joint_entropy_with_gram(self, norm_gram_a, norm_gram_b): n = tf.cast(tf.shape(norm_gram_a)[0], tf.float32) norm_gram = n * tf.multiply(norm_gram_a, norm_gram_b) joint_entropy = self.entropy_with_gram(norm_gram) return joint_entropy def mutual_information_with_gram(self, norm_gram_a, norm_gram_b): h_x = self.entropy_with_gram(norm_gram_a) h_y = self.entropy_with_gram(norm_gram_b) h_xy = self.joint_entropy_with_gram(norm_gram_a, norm_gram_b) mi_xy = h_x + h_y - h_xy return mi_xy
[ "tensorflow.shape", "tensorflow.self_adjoint_eig", "tensorflow.transpose", "tensorflow.reduce_sum", "tensorflow.nn.moments", "numpy.log", "tensorflow.multiply", "tensorflow.reduce_mean", "tensorflow.cast", "sys.path.append", "tensorflow.log", "tensorflow.square", "tensorflow.device", "os.path.dirname", "tensorflow.sqrt", "tensorflow.reshape", "tensorflow.nn.relu", "tensorflow.diag_part", "tensorflow.exp" ]
[((233, 262), 'sys.path.append', 'sys.path.append', (['PROJECT_ROOT'], {}), '(PROJECT_ROOT)\n', (248, 262), False, 'import sys\n'), ((199, 224), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (214, 224), False, 'import os\n'), ((2690, 2701), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (2698, 2701), True, 'import tensorflow as tf\n'), ((2714, 2744), 'tensorflow.cast', 'tf.cast', (['x_dims[0]', 'tf.float32'], {}), '(x_dims[0], tf.float32)\n', (2721, 2744), True, 'import tensorflow as tf\n'), ((2757, 2787), 'tensorflow.cast', 'tf.cast', (['x_dims[1]', 'tf.float32'], {}), '(x_dims[1], tf.float32)\n', (2764, 2787), True, 'import tensorflow as tf\n'), ((3185, 3214), 'tensorflow.sqrt', 'tf.sqrt', (['(var_x + self.epsilon)'], {}), '(var_x + self.epsilon)\n', (3192, 3214), True, 'import tensorflow as tf\n'), ((5703, 5735), 'tensorflow.exp', 'tf.exp', (['(-pairwise_distance / den)'], {}), '(-pairwise_distance / den)\n', (5709, 5735), True, 'import tensorflow as tf\n'), ((5778, 5789), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (5786, 5789), True, 'import tensorflow as tf\n'), ((5802, 5832), 'tensorflow.cast', 'tf.cast', (['x_dims[0]', 'tf.float32'], {}), '(x_dims[0], tf.float32)\n', (5809, 5832), True, 'import tensorflow as tf\n'), ((3029, 3046), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['x'], {}), '(x)\n', (3043, 3046), True, 'import tensorflow as tf\n'), ((3147, 3168), 'tensorflow.nn.moments', 'tf.nn.moments', (['x', '[0]'], {}), '(x, [0])\n', (3160, 3168), True, 'import tensorflow as tf\n'), ((4533, 4559), 'tensorflow.diag_part', 'tf.diag_part', (['pairwise_dot'], {}), '(pairwise_dot)\n', (4545, 4559), True, 'import tensorflow as tf\n'), ((4588, 4614), 'tensorflow.reshape', 'tf.reshape', (['norms', '[-1, 1]'], {}), '(norms, [-1, 1])\n', (4598, 4614), True, 'import tensorflow as tf\n'), ((4842, 4871), 'tensorflow.nn.relu', 'tf.nn.relu', (['pairwise_distance'], {}), '(pairwise_distance)\n', (4852, 4871), True, 'import tensorflow as tf\n'), ((5359, 5389), 'tensorflow.square', 'tf.square', (['pairwise_difference'], {}), '(pairwise_difference)\n', (5368, 5389), True, 'import tensorflow as tf\n'), ((5437, 5487), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['pairwise_squared_difference'], {'axis': '(2)'}), '(pairwise_squared_difference, axis=2)\n', (5450, 5487), True, 'import tensorflow as tf\n'), ((7008, 7027), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (7017, 7027), True, 'import tensorflow as tf\n'), ((7054, 7084), 'tensorflow.self_adjoint_eig', 'tf.self_adjoint_eig', (['norm_gram'], {}), '(norm_gram)\n', (7073, 7084), True, 'import tensorflow as tf\n'), ((7196, 7215), 'tensorflow.nn.relu', 'tf.nn.relu', (['eigvals'], {}), '(eigvals)\n', (7206, 7215), True, 'import tensorflow as tf\n'), ((7385, 7421), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(eigvals ** self.alpha)'], {}), '(eigvals ** self.alpha)\n', (7398, 7421), True, 'import tensorflow as tf\n'), ((7707, 7744), 'tensorflow.multiply', 'tf.multiply', (['norm_gram_a', 'norm_gram_b'], {}), '(norm_gram_a, norm_gram_b)\n', (7718, 7744), True, 'import tensorflow as tf\n'), ((2902, 2912), 'tensorflow.sqrt', 'tf.sqrt', (['d'], {}), '(d)\n', (2909, 2912), True, 'import tensorflow as tf\n'), ((3082, 3103), 'tensorflow.square', 'tf.square', (['(x - mean_x)'], {}), '(x - mean_x)\n', (3091, 3103), True, 'import tensorflow as tf\n'), ((4459, 4474), 'tensorflow.transpose', 'tf.transpose', (['x'], {}), '(x)\n', (4471, 4474), True, 'import tensorflow as tf\n'), ((4684, 4703), 'tensorflow.transpose', 'tf.transpose', (['norms'], {}), '(norms)\n', (4696, 4703), True, 'import tensorflow as tf\n'), ((7287, 7309), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['eigvals'], {}), '(eigvals)\n', (7300, 7309), True, 'import tensorflow as tf\n'), ((7444, 7460), 'tensorflow.log', 'tf.log', (['sum_term'], {}), '(sum_term)\n', (7450, 7460), True, 'import tensorflow as tf\n'), ((7514, 7535), 'numpy.log', 'np.log', (['self.log_base'], {}), '(self.log_base)\n', (7520, 7535), True, 'import numpy as np\n'), ((7645, 7666), 'tensorflow.shape', 'tf.shape', (['norm_gram_a'], {}), '(norm_gram_a)\n', (7653, 7666), True, 'import tensorflow as tf\n')]
import os import numpy as np from pwtools.common import is_seq, file_write from .testenv import testdir def test_is_seq(): fn = os.path.join(testdir, 'is_seq_test_file') file_write(fn, 'lala') fd = open(fn , 'r') for xx in ([1,2,3], (1,2,3), np.array([1,2,3])): print(type(xx)) assert is_seq(xx) is True for xx in ('aaa', fd): print(type(xx)) assert is_seq(xx) is False fd.close()
[ "pwtools.common.is_seq", "numpy.array", "os.path.join", "pwtools.common.file_write" ]
[((133, 174), 'os.path.join', 'os.path.join', (['testdir', '"""is_seq_test_file"""'], {}), "(testdir, 'is_seq_test_file')\n", (145, 174), False, 'import os\n'), ((179, 201), 'pwtools.common.file_write', 'file_write', (['fn', '"""lala"""'], {}), "(fn, 'lala')\n", (189, 201), False, 'from pwtools.common import is_seq, file_write\n'), ((259, 278), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (267, 278), True, 'import numpy as np\n'), ((318, 328), 'pwtools.common.is_seq', 'is_seq', (['xx'], {}), '(xx)\n', (324, 328), False, 'from pwtools.common import is_seq, file_write\n'), ((403, 413), 'pwtools.common.is_seq', 'is_seq', (['xx'], {}), '(xx)\n', (409, 413), False, 'from pwtools.common import is_seq, file_write\n')]
# Save to HDF because cPickle fails with very large arrays # https://github.com/numpy/numpy/issues/2396 import h5py import numpy as np import tempfile import unittest def dict_to_hdf(fname, d): """ Save a dict-of-dict datastructure where values are numpy arrays to a .hdf5 file """ with h5py.File(fname, 'w') as f: def _dict_to_group(root, d): for key, val in d.iteritems(): if isinstance(val, dict): grp = root.create_group(key) _dict_to_group(grp, val) else: root.create_dataset(key, data=val) _dict_to_group(f, d) def hdf_to_dict(fname): """ Loads a dataset saved using dict_to_hdf """ with h5py.File(fname, 'r') as f: def _load_to_dict(root): d = {} for key, val in root.iteritems(): if isinstance(val, h5py.Group): d[key] = _load_to_dict(val) else: d[key] = val.value return d return _load_to_dict(f) def load(exp_name, ret_d=False, data_fname='data.hdf5'): d = hdf_to_dict('../%s' % data_fname) mosaic = d['mosaic'] id2label = d['id2label'] train_ij = d['experiments'][exp_name]['train_ij'] test_ij = d['experiments'][exp_name]['test_ij'] y_train = d['experiments'][exp_name]['y_train'] y_test = d['experiments'][exp_name]['y_test'] if ret_d: return mosaic, id2label, train_ij, test_ij, y_train, y_test, d else: return mosaic, id2label, train_ij, test_ij, y_train, y_test # -- Unit tests class HDFIOTest(unittest.TestCase): def test_hdfio(self): d = { 'a' : np.random.rand(5, 3), 'b' : { 'c' : np.random.randn(1, 2), 'd' : { 'e' : np.random.randn(10, 5), 'f' : np.random.randn(10, 5), } } } with tempfile.NamedTemporaryFile() as f: dict_to_hdf(f.name, d) d2 = hdf_to_dict(f.name) self.assertItemsEqual(d, d2) if __name__ == '__main__': unittest.main()
[ "numpy.random.rand", "h5py.File", "tempfile.NamedTemporaryFile", "unittest.main", "numpy.random.randn" ]
[((2172, 2187), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2185, 2187), False, 'import unittest\n'), ((308, 329), 'h5py.File', 'h5py.File', (['fname', '"""w"""'], {}), "(fname, 'w')\n", (317, 329), False, 'import h5py\n'), ((753, 774), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (762, 774), False, 'import h5py\n'), ((1725, 1745), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)'], {}), '(5, 3)\n', (1739, 1745), True, 'import numpy as np\n'), ((1991, 2020), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (2018, 2020), False, 'import tempfile\n'), ((1789, 1810), 'numpy.random.randn', 'np.random.randn', (['(1)', '(2)'], {}), '(1, 2)\n', (1804, 1810), True, 'import numpy as np\n'), ((1862, 1884), 'numpy.random.randn', 'np.random.randn', (['(10)', '(5)'], {}), '(10, 5)\n', (1877, 1884), True, 'import numpy as np\n'), ((1912, 1934), 'numpy.random.randn', 'np.random.randn', (['(10)', '(5)'], {}), '(10, 5)\n', (1927, 1934), True, 'import numpy as np\n')]
import argparse from datetime import datetime as dt from lightgbm import LGBMRegressor import numpy as np import pandas as pd from sklearn.metrics import r2_score from sklearn.model_selection import KFold import yaml # from models import lgbm as my_lgbm from cv import r2_cv from preprocessing import load_x, load_y from utils import print_exit, print_float # Don't define any function in this file, # thus don't define main function. # use var `now` in config file and submit file. now = dt.now().strftime('%Y-%m-%d-%H-%M-%S') parser = argparse.ArgumentParser() parser.add_argument('--config', default='./configs/default.yml') options = parser.parse_args() with open(options.config, 'r') as file: config = yaml.safe_load(file) features = config['extracted_features'] col_id_name = config['col_id_name'] col_target_name = config['col_target_name'] dropped_ids = config['dropped_ids'] lgbm_params = config['lgbm_params'] n_folds = config['cv']['n_folds'] Xs = load_x(features, dropped_ids) X_train_all = Xs['train'] X_test = Xs['test'] y_train_all = load_y(col_id_name, col_target_name, dropped_ids) reg_params = lgbm_params['instance'] regressor = LGBMRegressor( boosting_type=reg_params['boosting_type'], learning_rate=reg_params['learning_rate'], reg_alpha=reg_params['reg_alpha'], reg_lambda=reg_params['reg_lambda'], random_state=reg_params['random_state'], silent=reg_params['silent'], ) # cv_scores = r2_cv(regressor, X_train_all, y_train_all, n_folds) # cv_score = cv_scores.mean() # Train regressor.fit(X_train_all, y_train_all) # Predict y_pred_logarithmic = regressor.predict(X_test) y_pred = np.exp(y_pred_logarithmic) # Evaluate y_pred_from_train = regressor.predict(X_train_all) score = r2_score(y_train_all, y_pred_from_train) sub_df = pd.DataFrame( pd.read_feather('data/input/test.feather')[col_id_name] ) sub_df[col_target_name] = y_pred sub_df.to_csv( './data/output/sub_{time}_{score:.5f}.csv'.format( time=now, score=score, ), index=False ) config_file_name = './configs/{time}_{score:.5f}.yml'.format( time=now, score=score, ) with open(config_file_name, 'w') as file: yaml.dump(config, file)
[ "pandas.read_feather", "preprocessing.load_x", "argparse.ArgumentParser", "yaml.dump", "lightgbm.LGBMRegressor", "preprocessing.load_y", "numpy.exp", "yaml.safe_load", "datetime.datetime.now", "sklearn.metrics.r2_score" ]
[((543, 568), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (566, 568), False, 'import argparse\n'), ((972, 1001), 'preprocessing.load_x', 'load_x', (['features', 'dropped_ids'], {}), '(features, dropped_ids)\n', (978, 1001), False, 'from preprocessing import load_x, load_y\n'), ((1062, 1111), 'preprocessing.load_y', 'load_y', (['col_id_name', 'col_target_name', 'dropped_ids'], {}), '(col_id_name, col_target_name, dropped_ids)\n', (1068, 1111), False, 'from preprocessing import load_x, load_y\n'), ((1162, 1417), 'lightgbm.LGBMRegressor', 'LGBMRegressor', ([], {'boosting_type': "reg_params['boosting_type']", 'learning_rate': "reg_params['learning_rate']", 'reg_alpha': "reg_params['reg_alpha']", 'reg_lambda': "reg_params['reg_lambda']", 'random_state': "reg_params['random_state']", 'silent': "reg_params['silent']"}), "(boosting_type=reg_params['boosting_type'], learning_rate=\n reg_params['learning_rate'], reg_alpha=reg_params['reg_alpha'],\n reg_lambda=reg_params['reg_lambda'], random_state=reg_params[\n 'random_state'], silent=reg_params['silent'])\n", (1175, 1417), False, 'from lightgbm import LGBMRegressor\n'), ((1643, 1669), 'numpy.exp', 'np.exp', (['y_pred_logarithmic'], {}), '(y_pred_logarithmic)\n', (1649, 1669), True, 'import numpy as np\n'), ((1741, 1781), 'sklearn.metrics.r2_score', 'r2_score', (['y_train_all', 'y_pred_from_train'], {}), '(y_train_all, y_pred_from_train)\n', (1749, 1781), False, 'from sklearn.metrics import r2_score\n'), ((718, 738), 'yaml.safe_load', 'yaml.safe_load', (['file'], {}), '(file)\n', (732, 738), False, 'import yaml\n'), ((2177, 2200), 'yaml.dump', 'yaml.dump', (['config', 'file'], {}), '(config, file)\n', (2186, 2200), False, 'import yaml\n'), ((494, 502), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (500, 502), True, 'from datetime import datetime as dt\n'), ((1810, 1852), 'pandas.read_feather', 'pd.read_feather', (['"""data/input/test.feather"""'], {}), "('data/input/test.feather')\n", (1825, 1852), True, 'import pandas as pd\n')]
# <NAME> 2014-2020 # mlxtend Machine Learning Library Extensions # Author: <NAME> <<EMAIL>> # # License: BSD 3 clause from mlxtend.utils import assert_raises from mlxtend.utils import check_Xy, format_kwarg_dictionaries import numpy as np import sys import os y = np.array([1, 2, 3, 4]) X = np.array([[1., 2.], [3., 4.], [5., 6.], [7., 8.]]) d_default = {'key1': 1, 'key2': 2} d_user = {'key3': 3, 'key4': 4} protected_keys = ['key1', 'key4'] def test_check_Xy_ok(): check_Xy(X, y) def test_check_Xy_invalid_type_X(): expect = "X must be a NumPy array. Found <class 'list'>" if (sys.version_info < (3, 0)): expect = expect.replace('class', 'type') assert_raises(ValueError, expect, check_Xy, [1, 2, 3, 4], y) def test_check_Xy_float16_X(): check_Xy(X.astype(np.float16), y) def test_check_Xy_float16_y(): check_Xy(X, y.astype(np.int16)) def test_check_Xy_invalid_type_y(): expect = "y must be a NumPy array. Found <class 'list'>" if (sys.version_info < (3, 0)): expect = expect.replace('class', 'type') assert_raises(ValueError, expect, check_Xy, X, [1, 2, 3, 4]) def test_check_Xy_invalid_dtype_X(): assert_raises(ValueError, 'X must be an integer or float array. Found object.', check_Xy, X.astype('object'), y) def test_check_Xy_invalid_dtype_y(): if (sys.version_info > (3, 0)): expect = ('y must be an integer array. Found <U1. ' 'Try passing the array as y.astype(np.integer)') else: expect = ('y must be an integer array. Found |S1. ' 'Try passing the array as y.astype(np.integer)') assert_raises(ValueError, expect, check_Xy, X, np.array(['a', 'b', 'c', 'd'])) def test_check_Xy_invalid_dim_y(): if sys.version_info[:2] == (2, 7) and os.name == 'nt': s = 'y must be a 1D array. Found (4L, 2L)' else: s = 'y must be a 1D array. Found (4, 2)' assert_raises(ValueError, s, check_Xy, X, X.astype(np.integer)) def test_check_Xy_invalid_dim_X(): if sys.version_info[:2] == (2, 7) and os.name == 'nt': s = 'X must be a 2D array. Found (4L,)' else: s = 'X must be a 2D array. Found (4,)' assert_raises(ValueError, s, check_Xy, y, y) def test_check_Xy_unequal_length_X(): assert_raises(ValueError, ('y and X must contain the same number of samples. ' 'Got y: 4, X: 3'), check_Xy, X[1:], y) def test_check_Xy_unequal_length_y(): assert_raises(ValueError, ('y and X must contain the same number of samples. ' 'Got y: 3, X: 4'), check_Xy, X, y[1:]) def test_format_kwarg_dictionaries_defaults_empty(): empty = format_kwarg_dictionaries() assert isinstance(empty, dict) assert len(empty) == 0 def test_format_kwarg_dictionaries_protected_keys(): formatted_kwargs = format_kwarg_dictionaries( default_kwargs=d_default, user_kwargs=d_user, protected_keys=protected_keys) for key in protected_keys: assert key not in formatted_kwargs def test_format_kwarg_dictionaries_no_default_kwargs(): formatted_kwargs = format_kwarg_dictionaries(user_kwargs=d_user) assert formatted_kwargs == d_user def test_format_kwarg_dictionaries_no_user_kwargs(): formatted_kwargs = format_kwarg_dictionaries(default_kwargs=d_default) assert formatted_kwargs == d_default def test_format_kwarg_dictionaries_default_kwargs_invalid_type(): invalid_kwargs = 'not a dictionary' message = ('d must be of type dict or None, but got ' '{} instead'.format(type(invalid_kwargs))) assert_raises(TypeError, message, format_kwarg_dictionaries, default_kwargs=invalid_kwargs) def test_format_kwarg_dictionaries_user_kwargs_invalid_type(): invalid_kwargs = 'not a dictionary' message = ('d must be of type dict or None, but got ' '{} instead'.format(type(invalid_kwargs))) assert_raises(TypeError, message, format_kwarg_dictionaries, user_kwargs=invalid_kwargs)
[ "numpy.array", "mlxtend.utils.format_kwarg_dictionaries", "mlxtend.utils.assert_raises", "mlxtend.utils.check_Xy" ]
[((266, 288), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (274, 288), True, 'import numpy as np\n'), ((293, 351), 'numpy.array', 'np.array', (['[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]]'], {}), '([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])\n', (301, 351), True, 'import numpy as np\n'), ((476, 490), 'mlxtend.utils.check_Xy', 'check_Xy', (['X', 'y'], {}), '(X, y)\n', (484, 490), False, 'from mlxtend.utils import check_Xy, format_kwarg_dictionaries\n'), ((679, 739), 'mlxtend.utils.assert_raises', 'assert_raises', (['ValueError', 'expect', 'check_Xy', '[1, 2, 3, 4]', 'y'], {}), '(ValueError, expect, check_Xy, [1, 2, 3, 4], y)\n', (692, 739), False, 'from mlxtend.utils import assert_raises\n'), ((1140, 1200), 'mlxtend.utils.assert_raises', 'assert_raises', (['ValueError', 'expect', 'check_Xy', 'X', '[1, 2, 3, 4]'], {}), '(ValueError, expect, check_Xy, X, [1, 2, 3, 4])\n', (1153, 1200), False, 'from mlxtend.utils import assert_raises\n'), ((2547, 2591), 'mlxtend.utils.assert_raises', 'assert_raises', (['ValueError', 's', 'check_Xy', 'y', 'y'], {}), '(ValueError, s, check_Xy, y, y)\n', (2560, 2591), False, 'from mlxtend.utils import assert_raises\n'), ((2708, 2828), 'mlxtend.utils.assert_raises', 'assert_raises', (['ValueError', '"""y and X must contain the same number of samples. Got y: 4, X: 3"""', 'check_Xy', 'X[1:]', 'y'], {}), "(ValueError,\n 'y and X must contain the same number of samples. Got y: 4, X: 3',\n check_Xy, X[1:], y)\n", (2721, 2828), False, 'from mlxtend.utils import assert_raises\n'), ((2961, 3081), 'mlxtend.utils.assert_raises', 'assert_raises', (['ValueError', '"""y and X must contain the same number of samples. Got y: 3, X: 4"""', 'check_Xy', 'X', 'y[1:]'], {}), "(ValueError,\n 'y and X must contain the same number of samples. Got y: 3, X: 4',\n check_Xy, X, y[1:])\n", (2974, 3081), False, 'from mlxtend.utils import assert_raises\n'), ((3237, 3264), 'mlxtend.utils.format_kwarg_dictionaries', 'format_kwarg_dictionaries', ([], {}), '()\n', (3262, 3264), False, 'from mlxtend.utils import check_Xy, format_kwarg_dictionaries\n'), ((3405, 3511), 'mlxtend.utils.format_kwarg_dictionaries', 'format_kwarg_dictionaries', ([], {'default_kwargs': 'd_default', 'user_kwargs': 'd_user', 'protected_keys': 'protected_keys'}), '(default_kwargs=d_default, user_kwargs=d_user,\n protected_keys=protected_keys)\n', (3430, 3511), False, 'from mlxtend.utils import check_Xy, format_kwarg_dictionaries\n'), ((3749, 3794), 'mlxtend.utils.format_kwarg_dictionaries', 'format_kwarg_dictionaries', ([], {'user_kwargs': 'd_user'}), '(user_kwargs=d_user)\n', (3774, 3794), False, 'from mlxtend.utils import check_Xy, format_kwarg_dictionaries\n'), ((3911, 3962), 'mlxtend.utils.format_kwarg_dictionaries', 'format_kwarg_dictionaries', ([], {'default_kwargs': 'd_default'}), '(default_kwargs=d_default)\n', (3936, 3962), False, 'from mlxtend.utils import check_Xy, format_kwarg_dictionaries\n'), ((4232, 4328), 'mlxtend.utils.assert_raises', 'assert_raises', (['TypeError', 'message', 'format_kwarg_dictionaries'], {'default_kwargs': 'invalid_kwargs'}), '(TypeError, message, format_kwarg_dictionaries, default_kwargs\n =invalid_kwargs)\n', (4245, 4328), False, 'from mlxtend.utils import assert_raises\n'), ((4603, 4696), 'mlxtend.utils.assert_raises', 'assert_raises', (['TypeError', 'message', 'format_kwarg_dictionaries'], {'user_kwargs': 'invalid_kwargs'}), '(TypeError, message, format_kwarg_dictionaries, user_kwargs=\n invalid_kwargs)\n', (4616, 4696), False, 'from mlxtend.utils import assert_raises\n'), ((1964, 1994), 'numpy.array', 'np.array', (["['a', 'b', 'c', 'd']"], {}), "(['a', 'b', 'c', 'd'])\n", (1972, 1994), True, 'import numpy as np\n')]
from __future__ import absolute_import from __future__ import print_function import numpy as np import re from scipy import linalg import scipy.ndimage as ndi from six.moves import range import os import sys import threading import copy import inspect import types from keras import backend as K from keras.utils.generic_utils import Progbar import tensorflow as tf import cv2 class ImageDataGenerator(object): '''Generate minibatches with real-time data augmentation. # Arguments featurewise_center: set input mean to 0 over the dataset. samplewise_center: set each sample mean to 0. featurewise_std_normalization: divide inputs by std of the dataset. samplewise_std_normalization: divide each input by its std. featurewise_standardize_axis: axis along which to perform feature-wise center and std normalization. samplewise_standardize_axis: axis along which to to perform sample-wise center and std normalization. zca_whitening: apply ZCA whitening. rotation_range: degrees (0 to 180). width_shift_range: fraction of total width. height_shift_range: fraction of total height. shear_range: shear intensity (shear angle in radians). zoom_range: amount of zoom. if scalar z, zoom will be randomly picked in the range [1-z, 1+z]. A sequence of two can be passed instead to select this range. channel_shift_range: shift range for each channels. fill_mode: points outside the boundaries are filled according to the given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default is 'nearest'. cval: value used for points outside the boundaries when fill_mode is 'constant'. Default is 0. horizontal_flip: whether to randomly flip images horizontally. vertical_flip: whether to randomly flip images vertically. rescale: rescaling factor. If None or 0, no rescaling is applied, otherwise we multiply the data by the value provided (before applying any other transformation). dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension (the depth) is at index 1, in 'tf' mode it is at index 3. It defaults to the `image_dim_ordering` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "th". seed: random seed for reproducible pipeline processing. If not None, it will also be used by `flow` or `flow_from_directory` to generate the shuffle index in case of no seed is set. ''' def __init__(self, featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, featurewise_standardize_axis=None, samplewise_standardize_axis=None, zca_whitening=False, rotation_range=0., width_shift_range=0., height_shift_range=0., shear_range=0., zoom_range=0., channel_shift_range=0., fill_mode='nearest', cval=0., horizontal_flip=False, vertical_flip=False, rescale=None, dim_ordering=K.image_dim_ordering(), seed=None, verbose=1): self.config = copy.deepcopy(locals()) self.config['config'] = self.config self.config['mean'] = None self.config['std'] = None self.config['principal_components'] = None self.config['rescale'] = rescale if dim_ordering not in {'tf', 'th'}: raise Exception('dim_ordering should be "tf" (channel after row and ' 'column) or "th" (channel before row and column). ' 'Received arg: ', dim_ordering) self.__sync_seed = self.config['seed'] or np.random.randint(0, 4294967295) self.default_pipeline = [] self.default_pipeline.append(random_transform) self.default_pipeline.append(standardize) self.set_pipeline(self.default_pipeline) self.__fitting = False self.fit_lock = threading.Lock() @property def sync_seed(self): return self.__sync_seed @property def fitting(self): return self.__fitting @property def pipeline(self): return self.__pipeline def sync(self, image_data_generator): self.__sync_seed = image_data_generator.sync_seed return (self, image_data_generator) def set_pipeline(self, p): if p is None: self.__pipeline = self.default_pipeline elif type(p) is list: self.__pipeline = p else: raise Exception('invalid pipeline.') def flow(self, X, y=None, batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_mode=None, save_format='jpeg'): return NumpyArrayIterator( X, y, self, batch_size=batch_size, shuffle=shuffle, seed=seed, dim_ordering=self.config['dim_ordering'], save_to_dir=save_to_dir, save_prefix=save_prefix, save_mode=save_mode, save_format=save_format) def flow_from_list(self, X, y=None, batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_mode=None, save_format='jpeg'): return ListArrayIterator( X, y, self, batch_size=batch_size, shuffle=shuffle, seed=seed, dim_ordering=self.config['dim_ordering'], save_to_dir=save_to_dir, save_prefix=save_prefix, save_mode=save_mode, save_format=save_format) # def flow_with_mask(self, X, y=None, batch_size=32, shuffle=True, seed=None, # save_to_dir=None, save_prefix='', save_mode=None, save_format='jpeg'): # return ListArrayIteratorWithMask( # X, y, self, # batch_size=batch_size, shuffle=shuffle, seed=seed, # dim_ordering=self.config['dim_ordering'], # save_to_dir=save_to_dir, save_prefix=save_prefix, # save_mode=save_mode, save_format=save_format) def flow_from_directory(self, directory, color_mode=None, target_size=None, image_reader='pil', reader_config=None, read_formats=None, classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_mode=None, save_format='jpeg'): if reader_config is None: reader_config = {'target_mode': 'RGB', 'target_size': (256, 256)} if read_formats is None: read_formats = {'png', 'jpg', 'jpeg', 'bmp'} return DirectoryIterator( directory, self, color_mode=color_mode, target_size=target_size, image_reader=image_reader, reader_config=reader_config, read_formats=read_formats, classes=classes, class_mode=class_mode, dim_ordering=self.config['dim_ordering'], batch_size=batch_size, shuffle=shuffle, seed=seed, save_to_dir=save_to_dir, save_prefix=save_prefix, save_mode=save_mode, save_format=save_format) def process(self, x): # get next sync_seed np.random.seed(self.__sync_seed) self.__sync_seed = np.random.randint(0, 4294967295) self.config['fitting'] = self.__fitting self.config['sync_seed'] = self.__sync_seed for p in self.__pipeline: x = p(x, **self.config) return x def fit_generator(self, generator, nb_iter): '''Fit a generator # Arguments generator: Iterator, generate data for fitting. nb_iter: Int, number of iteration to fit. ''' with self.fit_lock: try: self.__fitting = nb_iter*generator.batch_size for i in range(nb_iter): next(generator) finally: self.__fitting = False def fit(self, X, rounds=1): '''Fit the pipeline on a numpy array # Arguments X: Numpy array, the data to fit on. rounds: how many rounds of fit to do over the data ''' # X = np.copy(X) with self.fit_lock: try: # self.__fitting = rounds*X.shape[0] self.__fitting = rounds * len(X) for r in range(rounds): # for i in range(X.shape[0]): for i in range(len(X)): self.process(X[i]) finally: self.__fitting = False if __name__ == '__main__': pass
[ "six.moves.range", "keras.backend.image_dim_ordering", "threading.Lock", "numpy.random.randint", "numpy.random.seed" ]
[((3432, 3454), 'keras.backend.image_dim_ordering', 'K.image_dim_ordering', ([], {}), '()\n', (3452, 3454), True, 'from keras import backend as K\n'), ((4362, 4378), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (4376, 4378), False, 'import threading\n'), ((7642, 7674), 'numpy.random.seed', 'np.random.seed', (['self.__sync_seed'], {}), '(self.__sync_seed)\n', (7656, 7674), True, 'import numpy as np\n'), ((7702, 7734), 'numpy.random.randint', 'np.random.randint', (['(0)', '(4294967295)'], {}), '(0, 4294967295)\n', (7719, 7734), True, 'import numpy as np\n'), ((4083, 4115), 'numpy.random.randint', 'np.random.randint', (['(0)', '(4294967295)'], {}), '(0, 4294967295)\n', (4100, 4115), True, 'import numpy as np\n'), ((8278, 8292), 'six.moves.range', 'range', (['nb_iter'], {}), '(nb_iter)\n', (8283, 8292), False, 'from six.moves import range\n'), ((8809, 8822), 'six.moves.range', 'range', (['rounds'], {}), '(rounds)\n', (8814, 8822), False, 'from six.moves import range\n')]
import pytest import numpy as np from sklearn.ensemble import RandomForestClassifier from ..sequential import sequential import pkg_resources PATH = pkg_resources.resource_filename(__name__, 'test_data/') def test_sequential(): "Test sequential feature selection" # load data X = np.load(PATH+'features_largeN.npy') X = X[:,:20] y = np.load(PATH+'features_largeN_labels.npy') # perform SFS clf = RandomForestClassifier(n_estimators=100) X_fwd = sequential(X, y, estimator=clf) X_bwd = sequential(X, y, estimator=clf, direction='backward') # test shapes X_fwd.shape == (700, 10) X_bwd.shape == (700, 10)
[ "numpy.load", "sklearn.ensemble.RandomForestClassifier", "pkg_resources.resource_filename" ]
[((151, 206), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '"""test_data/"""'], {}), "(__name__, 'test_data/')\n", (182, 206), False, 'import pkg_resources\n'), ((300, 337), 'numpy.load', 'np.load', (["(PATH + 'features_largeN.npy')"], {}), "(PATH + 'features_largeN.npy')\n", (307, 337), True, 'import numpy as np\n'), ((361, 405), 'numpy.load', 'np.load', (["(PATH + 'features_largeN_labels.npy')"], {}), "(PATH + 'features_largeN_labels.npy')\n", (368, 405), True, 'import numpy as np\n'), ((433, 473), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (455, 473), False, 'from sklearn.ensemble import RandomForestClassifier\n')]
import numpy as np from keras.models import Model from keras.layers import Input, Conv2D, DepthwiseConv2D # model inputs = Input(shape=(4, 4, 3)) x = DepthwiseConv2D((3, 3), strides=( 1, 1), depth_multiplier=1, padding='same')(inputs) model = Model(inputs, x) model.load_weights('model.h5') print(model.summary()) # data input_x = np.load('input_x.npy') output_x = np.load('output_x.npy') o = model.predict(input_x) print(np.allclose(output_x, o))
[ "numpy.allclose", "keras.layers.DepthwiseConv2D", "keras.layers.Input", "keras.models.Model", "numpy.load" ]
[((124, 146), 'keras.layers.Input', 'Input', ([], {'shape': '(4, 4, 3)'}), '(shape=(4, 4, 3))\n', (129, 146), False, 'from keras.layers import Input, Conv2D, DepthwiseConv2D\n'), ((248, 264), 'keras.models.Model', 'Model', (['inputs', 'x'], {}), '(inputs, x)\n', (253, 264), False, 'from keras.models import Model\n'), ((337, 359), 'numpy.load', 'np.load', (['"""input_x.npy"""'], {}), "('input_x.npy')\n", (344, 359), True, 'import numpy as np\n'), ((371, 394), 'numpy.load', 'np.load', (['"""output_x.npy"""'], {}), "('output_x.npy')\n", (378, 394), True, 'import numpy as np\n'), ((151, 226), 'keras.layers.DepthwiseConv2D', 'DepthwiseConv2D', (['(3, 3)'], {'strides': '(1, 1)', 'depth_multiplier': '(1)', 'padding': '"""same"""'}), "((3, 3), strides=(1, 1), depth_multiplier=1, padding='same')\n", (166, 226), False, 'from keras.layers import Input, Conv2D, DepthwiseConv2D\n'), ((429, 453), 'numpy.allclose', 'np.allclose', (['output_x', 'o'], {}), '(output_x, o)\n', (440, 453), True, 'import numpy as np\n')]
from pymesh.TestCase import TestCase from pymesh import distance_to_mesh, BVH from pymesh.meshutils import generate_box_mesh import numpy as np class DistanceToMeshTest(TestCase): def test_boundary_pts_cgal(self): mesh = generate_box_mesh( np.array([0, 0, 0]), np.array([1, 1, 1])) pts = np.array([ [0.0, 0.0, 0.0], [1.0, 1.0, 1.0] ]) sq_dist, face_idx, closest_pts = distance_to_mesh(mesh, pts, "cgal") self.assert_array_equal(sq_dist, np.zeros(2)) def test_boundary_pts_geogram(self): mesh = generate_box_mesh( np.array([0, 0, 0]), np.array([1, 1, 1])) pts = np.array([ [0.0, 0.0, 0.0], [1.0, 1.0, 1.0] ]) if "geogram" in BVH.available_engines: sq_dist, face_idx, closest_pts = distance_to_mesh(mesh, pts, "geogram") self.assert_array_equal(sq_dist, np.zeros(2))
[ "numpy.array", "numpy.zeros", "pymesh.distance_to_mesh" ]
[((336, 380), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]'], {}), '([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])\n', (344, 380), True, 'import numpy as np\n'), ((453, 488), 'pymesh.distance_to_mesh', 'distance_to_mesh', (['mesh', 'pts', '"""cgal"""'], {}), "(mesh, pts, 'cgal')\n", (469, 488), False, 'from pymesh import distance_to_mesh, BVH\n'), ((697, 741), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]'], {}), '([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])\n', (705, 741), True, 'import numpy as np\n'), ((279, 298), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (287, 298), True, 'import numpy as np\n'), ((300, 319), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (308, 319), True, 'import numpy as np\n'), ((531, 542), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (539, 542), True, 'import numpy as np\n'), ((640, 659), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (648, 659), True, 'import numpy as np\n'), ((661, 680), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (669, 680), True, 'import numpy as np\n'), ((866, 904), 'pymesh.distance_to_mesh', 'distance_to_mesh', (['mesh', 'pts', '"""geogram"""'], {}), "(mesh, pts, 'geogram')\n", (882, 904), False, 'from pymesh import distance_to_mesh, BVH\n'), ((951, 962), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (959, 962), True, 'import numpy as np\n')]