repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
loveprolife/srcnn-tensorflow2
[ "163c90b33af22b460173376d27a1714025056de7" ]
[ "train.py" ]
[ "\r\nfrom utils import (\r\n read_data,\r\n input_setup,\r\n imsave,\r\n merge,\r\n get_last_weights\r\n)\r\nimport numpy as np\r\nimport datetime\r\nimport tensorflow as tf\r\nimport time\r\nimport pprint\r\nimport os\r\nimport argparse\r\nfrom tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\r\n# os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\r\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\r\n\r\n\r\ngpus = tf.config.experimental.list_physical_devices('GPU')\r\nif gpus:\r\n try:\r\n for gpu in gpus:\r\n tf.config.experimental.set_memory_growth(gpu, True)\r\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\r\n except RuntimeError as e:\r\n print(e)\r\n\r\n\r\nparser = argparse.ArgumentParser(description='SRCNN Training')\r\nparser.add_argument(\"--epoch\", default=150, type=int, help=\"Number of epoch [15000]\")\r\nparser.add_argument(\"--batch_size\", default=16, type=int, help=\"The size of batch images [128]\")\r\nparser.add_argument(\"--image_size\", default=33, type=int, help=\"The size of image to use [33]\")\r\nparser.add_argument(\"--label_size\", default=21, type=int, help=\"The size of label to produce [21]\")\r\nparser.add_argument(\"--learning_rate\", default=1e-4, type=int,\r\n help=\"The learning rate of gradient descent algorithm [1e-4]\")\r\nparser.add_argument(\"--c_dim\", default=1, type=int, help=\"Dimension of image color. [1]\")\r\nparser.add_argument(\"--scale\", default=3, type=int, help=\"The size of scale factor for preprocessing input image [3]\")\r\nparser.add_argument(\"--stride\", default=14, type=int, help=\"The size of stride to apply input image [14]\")\r\nparser.add_argument(\"--checkpoint_dir\", default=\"checkpoint/\", type=str, help=\"Name of checkpoint directory [checkpoint]\")\r\nparser.add_argument(\"--sample_dir\", default=\"sample\", type=str, help=\"Name of sample directory [sample]\")\r\nparser.add_argument(\"-w\", \"--load_weights\", default=None, type=str, help=\"whether to load weights from a checkpoint, set None to initialize, set \\'last\\' to load last checkpoint\")\r\nparser.add_argument(\"--save_path\", default='checkpoint/models/', type=str)\r\nparser.add_argument(\"--is_train\", default=True, type=bool, help=\"True for training, False for testing [True]\")\r\n# parser.add_argument(\"--is_train\", default=False, type=bool, help=\"True for training, False for testing [True]\")\r\nargs, unknown = parser.parse_known_args()\r\n\r\npp = pprint.PrettyPrinter()\r\ndef plot_graphs(history, string):\r\n plt.plot(history.history[string])\r\n plt.plot(history.history['val_'+string])\r\n plt.xlabel(\"Epochs\")\r\n plt.ylabel(string)\r\n plt.legend([string, 'val_'+string])\r\n plt.show()\r\n\r\ndef createmodel(args):\r\n model = tf.keras.Sequential()\r\n model.add(tf.keras.layers.Conv2D(64, (9, 9), kernel_initializer='normal', strides=1, padding='VALID',\r\n activation='relu', input_shape=[args.image_size, args.image_size, args.c_dim],\r\n name='conv1'))\r\n model.add(tf.keras.layers.Conv2D(32, (1, 1), kernel_initializer='normal', strides=1, padding='VALID',\r\n activation='relu', name='conv2'))\r\n model.add(tf.keras.layers.Conv2D(1, (5, 5), kernel_initializer='normal', strides=1, padding='VALID',\r\n activation='relu', name='conv3'))\r\n model.compile(optimizer=tf.keras.optimizers.Adam(lr=args.learning_rate),\r\n loss=tf.losses.MSE)\r\n return model\r\n\r\npp.pprint(args)\r\nos.makedirs(args.checkpoint_dir, exist_ok=True)\r\nos.makedirs(args.save_path, exist_ok=True)\r\nos.makedirs(args.sample_dir, exist_ok=True)\r\nif args.is_train:\r\n input_setup(args)\r\n data_dir = 'checkpoint/train.h5'\r\n train_data, train_label = read_data(data_dir)\r\n srcnn = createmodel(args)\r\n # load last weights\r\n if args.load_weights is not None:\r\n if args.load_weights.endswith('.h5'):\r\n weights_path = args.load_weights\r\n else:\r\n weights_path = get_last_weights(args.save_path)\r\n try:\r\n last_step = int(os.path.basename(weights_path).split('_')[-1].split('.')[0])\r\n except:\r\n last_step = 0\r\n\r\n try:\r\n ret = srcnn.load_weights(weights_path)\r\n except RuntimeError as e:\r\n print(f'[Warning] Ignoring {e}')\r\n print(\r\n '[Warning] Don\\'t panic if you see this, this might be because you load a pretrained weights with different number of classes. The rest of the weights should be loaded already.')\r\n\r\n print(f'[Info] loaded weights: {os.path.basename(weights_path)}, resuming checkpoint from step: {last_step}')\r\n else:\r\n last_step = 0\r\n current_time = datetime.datetime.now().strftime(('%Y%m%d-%H%M%S'))\r\n log_dir = 'logs/' + current_time\r\n summary_writer = tf.summary.create_file_writer(log_dir)\r\n model_path = 'SRCNN.h5'\r\n saved_model = tf.keras.callbacks.ModelCheckpoint(args.save_path + 'ep_{epoch:03d}.h5', monitor='loss',\r\n save_weights_only=True, save_best_only=True, period=5)\r\n tensorboard = tf.keras.callbacks.TensorBoard(log_dir='log')\r\n start_time = time.time()\r\n history = srcnn.fit(train_data, train_label, batch_size=args.batch_size, validation_split=0.2,\r\n epochs=args.epoch, initial_epoch=last_step, callbacks=[saved_model, tensorboard], verbose=2)\r\n print('spending time:' + str(time.time() - start_time))\r\n # plot_graphs(history, \"val_loss\")\r\n plot_graphs(history, \"loss\")\r\nelse:\r\n nx, ny = input_setup(args)\r\n data_dir = 'checkpoint/test.h5'\r\n weights_path = 'checkpoint/ep150-loss0.005.h5'\r\n test_data, test_label = read_data(data_dir)\r\n print(test_data.shape)\r\n srcnn = createmodel(args)\r\n srcnn.load_weights(weights_path)\r\n result = srcnn.predict(test_data)\r\n print(result.shape)\r\n # result = srcnn.evaluate(test_data, test_label)\r\n\r\n result = merge(result, [nx, ny])\r\n print(result.shape)\r\n image_path = os.path.join(os.getcwd(), args.sample_dir)\r\n image_path = os.path.join(image_path, \"test_image.png\")\r\n imsave(result, image_path)\r\n" ]
[ [ "matplotlib.pyplot.legend", "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.config.experimental.list_logical_devices", "tensorflow.config.experimental.set_memory_growth", "tensorflow.config.experimental.list_physical_devices", "tensorflow.summary.create_file_writer", "tensorflow.keras.Sequential", "tensorflow.keras.layers.Conv2D", "matplotlib.pyplot.plot", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.callbacks.TensorBoard", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
obrienam/BeeFanningDetector
[ "51a8d7d036d58398d997dcebb7fb495040a0539d" ]
[ "Files/fanning_counter.py" ]
[ "import cv2\nimport numpy as np\nfrom collections import defaultdict\n\n'''\nVariables that are important for\nthis program.\n'''\n\ntimes=0\nnumfan=0\n#Video stream used for processing\nvs=cv2.VideoCapture(\"/Users/aidanobrien/Documents/GitHub.nosync/BeeFanningDetector/Assets/test_img&videos/test_vid4.mp4\")\n#Background image used for initial background subtraction and binary and operations.\nbk=cv2.imread('/Users/aidanobrien/Documents/GitHub.nosync/BeeFanningDetector/Assets/test_img&videos/testbkgrd1.jpg')\n#Background image used for secont background subtraction and binary and operations. This is used to detect the wings.\nbk2=cv2.imread('/Users/aidanobrien/Documents/GitHub.nosync/BeeFanningDetector/Assets/test_img&videos/black.png')\nframes=defaultdict(dict) #Dict for holding the video frames of potentially fanning bees\nfoundbee=defaultdict(dict) #Dict that holds flags cooresponding to wether or not a fanning bee was found at a particular spot\nfanframe=defaultdict(dict) #Dict that holds the most recent frame number when a particular bee was detected.\nfound=False\nsframes=0\n#Only use these cropping bounds in 30 fps vids. This eliminates \n#areas at top/bottom of frame that never contain fanning.\nbk=bk[100:100+240,0:0+640]\nbk2=bk2[100:100+240,0:0+640]\n\n'''\nThis function is given a wing contour and\nframe image, and determines wether or not \nthe cooresponding bee is fanning.\n'''\ndef checkWings(c,img):\n global numfan\n global frames\n global foundbee\n global sframes\n global fanframe\n i=0\n (x,y),(ma,Ma),angle=cv2.fitEllipse(c)\n x2,y2,w,h=cv2.boundingRect(c)\n ell=cv2.fitEllipse(c)\n found=False\n mom=cv2.moments(c)\n hy=0\n xw=0\n cx = int(mom[\"m10\"] / mom[\"m00\"])\n cy = int(mom[\"m01\"] / mom[\"m00\"])\n if(cy>100):\n hy=100\n else:\n hy=50\n if(cx>100):\n xw=100\n else:\n xw=50\n #height, width, layers = img.shape\n if(frames.get(tuple([cx,cy])) is not None and i in frames.get(tuple([cx,cy]))):\n framediff=0\n if(fanframe.get(tuple([cx,cy])) is not None):\n while framediff<100:\n if(i in fanframe.get(tuple([cx,cy]))):\n framediff=sframes-fanframe.get(tuple([cx,cy]))[i]\n if(cx == 428 and cy == 140):\n print(\"Diff: {}\".format(framediff))\n else:\n break\n if framediff>=100:\n \n i+=1\n else:\n break\n if(i in fanframe.get(tuple([cx,cy]))):\n #hy, xw, layers = frames[cx,cy][i][0].shape\n fanframe[cx,cy][i]=sframes\n frames[cx,cy][i].append(img[cy-hy:cy+hy,cx-xw:cx+xw])\n else:\n for cX in range (cx-55,cx+55):\n for cY in range (cy-10,cy+10):\n \n framediff=0\n if(fanframe.get(tuple([cX,cY])) is not None):\n while framediff<100:\n if(i in fanframe.get(tuple([cX,cY]))):\n if(cX == 428 and cY == 140):\n print(\"Diff: {}\".format(framediff))\n framediff=sframes-fanframe.get(tuple([cX,cY]))[i]\n else:\n break\n if framediff>=100:\n \n i+=1\n else:\n break \n \n \n if(frames.get(tuple([cX,cY])) is not None and i in frames.get(tuple([cX,cY]))):\n #print(\"{},{}\".format(cx,cy))\n #hy, xw, layers = frames[cX,cY][i][0].shape\n if(cY>100):\n hy=100\n else:\n hy=50\n if(cX>100):\n xw=100\n else:\n xw=50\n frames[cX,cY][i].append(img[cY-hy:cY+hy,cX-xw:cX+xw])\n fanframe[cX,cY][i]=sframes\n found=True\n if(len(frames.get(tuple([cX,cY]))[i])>=20 and foundbee.get(tuple([cX,cY]))[i]==False):\n print(\"Fanning Detected\")\n #print(\"{}, {}\".format(cX,cY))\n cv2.ellipse(img,ell,(0,255,0),2)\n foundbee[cX,cY][i]=True\n numfan+=1\n return\n \n if(found==False and cy < 189):\n fanframe[cx,cy][i]=sframes\n frames[cx,cy][i]=[img[cy-hy:cy+hy,cx-xw:cx+xw]]\n foundbee[cx,cy][i]=False\n\n\n'''\nThis function iterates through every entry\nin the frames dictionary and exports \nvideos frames for entries with atleast \n20 frames. Videos can be found in the fanning_exports\ndirectory.\n'''\ndef make_vids():\n i = 0\n global frames\n for key in frames:\n for key2 in frames[key]:\n f=frames[key][key2]\n height, width, layers = f[0].shape\n print(len(f))\n if(len(f)>=20 and (width is not 0 and height is not 0)):\n \n size = (width,height)\n out = cv2.VideoWriter()\n out.open('/Users/aidanobrien/Documents/GitHub.nosync/BeeFanningDetector/Assets/fanning_exports/wings_'+str(key)+\", \"+str(fanframe[key][key2])+'.mov',cv2.VideoWriter_fourcc(*'mp4v'), 10, (size),True)\n for fr in f: \n out.write(fr)\n out.release()\n i=i+1\n\n\n'''\nThis is the main driver loop\nthat iterates through the provided\nvideo and calls the appropriate functions\nto detect fanning bees.\n'''\nwhile True:\n sframes+=1\n hasframes,img=vs.read()\n if(hasframes == False):\n break\n #Again, in 30 fps top and bottom areas of the frame are removed.\n img=img[100:100+240,0:0+640]\n subImage=(bk.astype('int32')-img.astype('int32')).clip(0).astype('uint8')\n grey=cv2.cvtColor(subImage,cv2.COLOR_BGR2GRAY)\n retval,thresh=cv2.threshold(grey,35,255,cv2.THRESH_BINARY)\n kernel=np.ones((5,5),np.uint8)\n thresh=cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel)\n noback = cv2.bitwise_and(img, img, mask= thresh)\n #Color Bounds for 60 fps vids\n #upper = np.array([220,220,220]) \n #lower = np.array([160,160,160]) \n cv2.imshow('noback',noback)\n #Color Bounds for 30 fps vids\n upper = np.array([255,255,255]) \n lower = np.array([128,128,128]) \n mask = cv2.inRange(noback, lower, upper)\n\n wings = cv2.bitwise_and(noback, noback, mask=mask)\n \n cv2.imshow('Just_Wings/Shadows',wings)\n\n subImage2=(wings.astype('int32')-bk2.astype('int32')).clip(0).astype('uint8')\n grey2=cv2.cvtColor(subImage2,cv2.COLOR_BGR2GRAY)\n retval2,thresh2=cv2.threshold(grey2,35,255,cv2.THRESH_BINARY)\n kernel2=np.ones((5,5),np.uint8)\n thresh2=cv2.morphologyEx(thresh2,cv2.MORPH_OPEN,kernel)\n im2, contours1, hierarchy1 = cv2.findContours(thresh2, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n cnt2=[]\n for c in contours1:\n x,y,w,h=cv2.boundingRect(c)\n r=w/h\n #wing ellipse bounds for 30 fps video \n if(w*h>150 and w*h < 200 and w > h):\n #wing ellipse bounds for 60 fps video (still refining these)\n #if(w*h>300 and w > h and w > 25 and w < 53 and h > 10 and h < 30 and r > 1.44 and r < 3.9):\n ell=cv2.fitEllipse(c)\n checkWings(c,img)\n cv2.ellipse(img,ell,(0,255,0),2)\n \n #print(w*h,w,h)\n \n else:\n cnt2.append(c)\n \n \n cv2.putText(img, \"Fanning Bees: {}\".format(numfan), (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2, cv2.LINE_4) \n \n cv2.imshow('Result',img)\n\n cv2.imshow('Thresh',thresh2)\n if times > 0:\n key=cv2.waitKey(1) & 0xFF\n #if q is pressed, stop loop\n if key == ord(\"c\"):\n continue\n if key == ord(\"q\"):\n break\n times = times + 1\nvs.release()\ncv2.destroyAllWindows()\nmake_vids()\n " ]
[ [ "numpy.array", "numpy.ones" ] ]
nicktheway/Pagerank
[ "ff57072deabca020548cd7fcc9f3c5a857f438ed" ]
[ "scripts/visualize_pr.py" ]
[ "import numpy as np \nimport os\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mtick\n\n#import helper functions\nimport helpers as hp\n\ndataset = \"web-Google-diades\"\n\npagerankFilePath = \"./results/pageranks/\" + dataset + \".data\"\nserialPagerankFilePath = \"./results/serial/pageranks/\" + dataset + \".data\"\nlogFilePath = \"./results/logs/\" + dataset + \".txt\"\nserialLogFilePath = \"./results/serial/logs/\" + dataset + \".txt\"\n\nplt.style.use('seaborn-deep')\n\ndataPath, loadToCrsTime, makeStochasticTime, colorTime, colorGroups, iterationTimes, errorProgression = hp.loadParallelLogData(logFilePath)\nSdataPath, SloadToCrsTime, SmakeStochasticTime, SiterationTimes, SerrorProgression = hp.loadSerialLogData(serialLogFilePath)\nx = np.arange(1,iterationTimes.size+1,1)\n\n# Time plot\nfig1 = plt.figure(1)\nax1 = fig1.subplots()\nax1.tick_params(axis='y')\nplt.bar(x, np.cumsum(iterationTimes), alpha=0.6, label='Parallel (ms)')\nax1.bar(x, np.cumsum(SiterationTimes), alpha=0.4, label='Serial (ms)')\nax1.set_ylabel('Cumulative Time (ms)')\nax1.set_xlabel('Iteration #')\nax1.grid(True, linestyle='--', linewidth=0.2)\nlegend1 = ax1.legend(loc=0, shadow=True, title=\"Iteration cumulative Time\")\nplt.xlim([0, iterationTimes.size + 1])\nplt.title('Pagerank calculation\\'s times.')\nplt.suptitle(dataPath, fontweight='bold')\n\nplt.show()\n\n# Speed up-convergence delta plot\nfig2 = plt.figure(2)\nax2_1 = fig2.subplots()\n\n## data\nspeed_ups = SiterationTimes / iterationTimes\nmeanSpeedUp = np.cumsum(speed_ups) / np.arange(1, speed_ups.size+1)\n\nax2_1.plot(x, meanSpeedUp, label='mean speed up', linestyle='--')\nax2_1.set_xlabel('Iteration #')\nax2_1.set_ylabel('Mean speed up')\nlegend2_1 = ax2_1.legend(loc=0, shadow=True)\n\nax2_2 = ax2_1.twinx()\nax2_2.semilogy()\nax2_2.tick_params(axis='y', colors='C2')\nax2_2.plot(x, errorProgression, color='C2', label='Parallel')\nax2_2.plot(x, SerrorProgression, color='C2', ls='', marker='*', label='Serial')\nax2_2.set_ylabel('convergence delta')\nax2_2.grid(True, color='C2', linestyle='--', linewidth=0.1)\nlegend2_2 = ax2_2.legend(loc=0, shadow=True, bbox_to_anchor=(1,0.9), title=\"Convergence delta\")\n\nplt.xlim([0, iterationTimes.size+1])\nplt.suptitle(dataPath, fontweight='bold')\nplt.title('How convergence_delta, speed_up and iteration_times relate.')\nplt.show()\n\n'''\n'''\n# Difference plot\n## data\npr = np.fromfile(pagerankFilePath, dtype=float)\npr_cor = np.fromfile(serialPagerankFilePath, dtype=float)\n\nfig3 = plt.figure(3)\nax3 = fig3.subplots()\nax3.plot(pr-pr_cor)\n\nplt.xlim([0, pr.size])\nplt.ylim(bottom=-1e-12, top=1e-12)\nplt.title(\"Pagerank's vector difference between\\nthe two implementations.\")\nplt.suptitle(dataPath, fontweight='bold')\nplt.xlabel('index')\nplt.ylabel('difference')\n\nplt.show()\n\nfig4 = plt.figure(4)\nax4 = fig4.subplots()\nax4.plot(pr)\nplt.suptitle(dataPath, fontweight='bold')\nplt.title(\"Pagerank vector.\")\nplt.xlabel('index')\nplt.ylabel('pagerank')\nplt.show()\n'''\n'''\n\n# Console messages\nprCalcTime = sum(iterationTimes)\nprint(\"Data preperation time:\\n\\tLoad to memory = %0.2fms\\n\\tColor data in %d groups = %0.2fms\\n\\tMake matrix stochastic = %0.2fms vs %0.2fms\" % (loadToCrsTime, colorGroups, colorTime, makeStochasticTime, SmakeStochasticTime))\nprint(\"Pagerank calculation time: %0.2fms\" % prCalcTime)\nprint(\"Total Time = %0.2fms\" % (prCalcTime+loadToCrsTime+makeStochasticTime+colorTime))\nprint(\"Pagerank vector sum = %0.2f\" % (sum(pr)))" ]
[ [ "numpy.fromfile", "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.ylim", "numpy.cumsum", "matplotlib.pyplot.xlim", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.style.use", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
privateos/xigmoid
[ "3d01c65a7f82ce0d851a42d7e38f084eae2b1622" ]
[ "mnist/get_mnist.py" ]
[ "import os\nimport numpy as np\n\ndef get():\n current_file_name = os.path.realpath(__file__)\n current_file_path = os.path.split(current_file_name)[0]\n mnist_file_name = os.path.join(current_file_path, 'mnist.npz')\n mnist_npz = np.load(mnist_file_name)\n x = mnist_npz['x']\n y = mnist_npz['y']\n #x = np.reshape(x, (x.shape[0], -1, 1))\n x = np.transpose(x, (0, 3, 1, 2))\n y = np.argmax(y, 1)\n return x, y\n\nif __name__ == '__main__':\n x, y = get()\n print(x.shape, y.shape)" ]
[ [ "numpy.load", "numpy.argmax", "numpy.transpose" ] ]
PTRRupprecht/Spikefinder-Elephant
[ "be2330a4cc523bc415ba14ef45c20e078eda1cc6" ]
[ "elephant/utils.py" ]
[ "\nimport numpy as np\n\n\ndef norm(x):\n return (x - np.mean(x)) / (np.std(x) + 1e-7)\n\ndef smooth(x,window_len=11,window='hanning'):\n \"\"\"smooth the data using a window with requested size.\n \n This method is based on the convolution of a scaled window with the signal.\n The signal is prepared by introducing reflected copies of the signal \n (with the window size) in both ends so that transient parts are minimized\n in the begining and end part of the output signal.\n \n input:\n x: the input signal \n window_len: the dimension of the smoothing window; should be an odd integer\n window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\n flat window will produce a moving average smoothing.\n\n output:\n the smoothed signal\n \n example:\n\n t=linspace(-2,2,0.1)\n x=sin(t)+randn(len(t))*0.1\n y=smooth(x)\n \n see also: \n \n numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve\n scipy.signal.lfilter\n \n TODO: the window parameter could be the window itself if an array instead of a string\n NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.\n \"\"\"\n\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n\n if window_len<3:\n return x\n\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n\n s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('numpy.'+window+'(window_len)')\n\n y=np.convolve(w/w.sum(),s,mode='valid')\n return y[(window_len/2-1):-(window_len/2)]\n # return y\n" ]
[ [ "numpy.std", "numpy.mean", "numpy.ones" ] ]
jafluri/pygsp
[ "ff8c0023c25df4cbcfd88a7ef63c6223f3030a51", "ff8c0023c25df4cbcfd88a7ef63c6223f3030a51" ]
[ "pygsp/graphs/graph.py", "pygsp/graphs/randomring.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom __future__ import division\n\nimport os\nfrom collections import Counter\n\nimport numpy as np\nfrom scipy import sparse\n\nfrom pygsp import utils\nfrom .fourier import FourierMixIn\nfrom .difference import DifferenceMixIn\nfrom ._io import IOMixIn\nfrom ._layout import LayoutMixIn\n\n\nclass Graph(FourierMixIn, DifferenceMixIn, IOMixIn, LayoutMixIn):\n r\"\"\"Base graph class.\n\n * Instantiate it to construct a graph from a (weighted) adjacency matrix.\n * Provide a common interface (and implementation) for graph objects.\n * Initialize attributes for derived classes.\n\n Parameters\n ----------\n adjacency : sparse matrix or array_like\n The (weighted) adjacency matrix of size n_vertices by n_vertices that\n encodes the graph.\n The data is copied except if it is a sparse matrix in CSR format.\n lap_type : {'combinatorial', 'normalized'}\n The kind of Laplacian to be computed by :meth:`compute_laplacian`.\n coords : array_like\n A matrix of size n_vertices by d that represents the coordinates of the\n vertices in a d-dimensional embedding space.\n plotting : dict\n Plotting parameters.\n\n Attributes\n ----------\n n_vertices or N : int\n The number of vertices (nodes) in the graph.\n n_edges or Ne : int\n The number of edges (links) in the graph.\n W : :class:`scipy.sparse.csr_matrix`\n The adjacency matrix that contains the weights of the edges.\n It is represented as an n_vertices by n_vertices matrix, where\n :math:`W_{i,j}` is the weight of the edge :math:`(v_i, v_j)` from\n vertex :math:`v_i` to vertex :math:`v_j`. :math:`W_{i,j} = 0` means\n that there is no direct connection.\n L : :class:`scipy.sparse.csr_matrix`\n The graph Laplacian, an N-by-N matrix computed from W.\n lap_type : 'normalized', 'combinatorial'\n The kind of Laplacian that was computed by :func:`compute_laplacian`.\n signals : dict (string -> :class:`numpy.ndarray`)\n Signals attached to the graph.\n coords : :class:`numpy.ndarray`\n Vertices coordinates in 2D or 3D space. Used for plotting only.\n plotting : dict\n Plotting parameters.\n\n Examples\n --------\n\n Define a simple graph.\n\n >>> graph = graphs.Graph([\n ... [0., 2., 0.],\n ... [2., 0., 5.],\n ... [0., 5., 0.],\n ... ])\n >>> graph\n Graph(n_vertices=3, n_edges=2)\n >>> graph.n_vertices, graph.n_edges\n (3, 2)\n >>> graph.W.toarray()\n array([[0., 2., 0.],\n [2., 0., 5.],\n [0., 5., 0.]])\n >>> graph.d\n array([1, 2, 1], dtype=int32)\n >>> graph.dw\n array([2., 7., 5.])\n >>> graph.L.toarray()\n array([[ 2., -2., 0.],\n [-2., 7., -5.],\n [ 0., -5., 5.]])\n\n Add some coordinates to plot it.\n\n >>> import matplotlib.pyplot as plt\n >>> graph.set_coordinates([\n ... [0, 0],\n ... [0, 1],\n ... [1, 0],\n ... ])\n >>> fig, ax = graph.plot()\n\n \"\"\"\n\n def __init__(self, adjacency, lap_type='combinatorial', coords=None,\n plotting={}):\n\n self.logger = utils.build_logger(__name__)\n\n if not sparse.isspmatrix(adjacency):\n adjacency = np.asanyarray(adjacency)\n\n if (adjacency.ndim != 2) or (adjacency.shape[0] != adjacency.shape[1]):\n raise ValueError('Adjacency: must be a square matrix.')\n\n # CSR sparse matrices are the most efficient for matrix multiplication.\n # They are the sole sparse matrix type to support eliminate_zeros().\n self._adjacency = sparse.csr_matrix(adjacency, copy=False)\n\n if np.isnan(self._adjacency.sum()):\n raise ValueError('Adjacency: there is a Not a Number (NaN).')\n if np.isinf(self._adjacency.sum()):\n raise ValueError('Adjacency: there is an infinite value.')\n if self.has_loops():\n self.logger.warning('Adjacency: there are self-loops '\n '(non-zeros on the diagonal). '\n 'The Laplacian will not see them.')\n if (self._adjacency < 0).nnz != 0:\n self.logger.warning('Adjacency: there are negative edge weights.')\n\n self.n_vertices = self._adjacency.shape[0]\n\n # Don't keep edges of 0 weight. Otherwise n_edges will not correspond\n # to the real number of edges. Problematic when plotting.\n self._adjacency.eliminate_zeros()\n\n self._directed = None\n self._connected = None\n\n # Don't count edges two times if undirected.\n # Be consistent with the size of the differential operator.\n if self.is_directed():\n self.n_edges = self._adjacency.nnz\n else:\n diagonal = np.count_nonzero(self._adjacency.diagonal())\n off_diagonal = self._adjacency.nnz - diagonal\n self.n_edges = off_diagonal // 2 + diagonal\n\n if coords is not None:\n # TODO: self.coords should be None if unset.\n self.coords = np.asanyarray(coords)\n\n self.plotting = {'vertex_size': 100,\n 'vertex_color': (0.12, 0.47, 0.71, 0.5),\n 'edge_color': (0.5, 0.5, 0.5, 0.5),\n 'edge_width': 2,\n 'edge_style': '-',\n 'highlight_color': 'C1',\n 'normalize_intercept': .25}\n self.plotting.update(plotting)\n self.signals = dict()\n\n # Attributes that are lazily computed.\n self._A = None\n self._d = None\n self._dw = None\n self._lmax = None\n self._lmax_method = None\n self._U = None\n self._e = None\n self._coherence = None\n self._D = None\n # self._L = None\n\n # TODO: what about Laplacian? Lazy as Fourier, or disallow change?\n self.lap_type = lap_type\n self.compute_laplacian(lap_type)\n\n # TODO: kept for backward compatibility.\n self.Ne = self.n_edges\n self.N = self.n_vertices\n\n def _get_extra_repr(self):\n return dict()\n\n def __repr__(self, limit=None):\n s = ''\n for attr in ['n_vertices', 'n_edges']:\n s += '{}={}, '.format(attr, getattr(self, attr))\n for i, (key, value) in enumerate(self._get_extra_repr().items()):\n if (limit is not None) and (i == limit - 2):\n s += '..., '\n break\n s += '{}={}, '.format(key, value)\n return '{}({})'.format(self.__class__.__name__, s[:-2])\n\n def set_signal(self, signal, name):\n r\"\"\"Attach a signal to the graph.\n\n Attached signals can be accessed (and modified or deleted) through the\n :attr:`signals` dictionary.\n\n Parameters\n ----------\n signal : array_like\n A sequence that assigns a value to each vertex.\n The value of the signal at vertex `i` is ``signal[i]``.\n name : String\n Name of the signal used as a key in the :attr:`signals` dictionary.\n\n Examples\n --------\n >>> graph = graphs.Sensor(10)\n >>> signal = np.arange(graph.n_vertices)\n >>> graph.set_signal(signal, 'mysignal')\n >>> graph.signals\n {'mysignal': array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}\n\n \"\"\"\n signal = self._check_signal(signal)\n self.signals[name] = signal\n\n def subgraph(self, vertices):\n r\"\"\"Create a subgraph from a list of vertices.\n\n Parameters\n ----------\n vertices : list\n Vertices to keep.\n Either a list of indices or an indicator function.\n\n Returns\n -------\n subgraph : :class:`Graph`\n Subgraph.\n\n Examples\n --------\n >>> graph = graphs.Graph([\n ... [0., 3., 0., 0.],\n ... [3., 0., 4., 0.],\n ... [0., 4., 0., 2.],\n ... [0., 0., 2., 0.],\n ... ])\n >>> graph = graph.subgraph([0, 2, 1])\n >>> graph.W.toarray()\n array([[0., 0., 3.],\n [0., 0., 4.],\n [3., 4., 0.]])\n\n \"\"\"\n adjacency = self.W[vertices, :][:, vertices]\n try:\n coords = self.coords[vertices]\n except AttributeError:\n coords = None\n graph = Graph(adjacency, self.lap_type, coords, self.plotting)\n for name, signal in self.signals.items():\n graph.set_signal(signal[vertices], name)\n return graph\n\n def is_weighted(self):\n r\"\"\"Check if the graph is weighted.\n\n A graph is unweighted (binary) if and only if all the entries in the\n adjacency matrix are either zero or one.\n\n Returns\n -------\n weighted : bool\n True if the graph is weighted, False otherwise.\n\n Examples\n --------\n\n Unweighted (binary) graph:\n\n >>> graph = graphs.Graph([\n ... [0, 1, 0],\n ... [1, 0, 1],\n ... [0, 1, 0],\n ... ])\n >>> graph.is_weighted()\n False\n\n Weighted graph:\n\n >>> graph = graphs.Graph([\n ... [0, 2, 0],\n ... [2, 0, 1],\n ... [0, 1, 0],\n ... ])\n >>> graph.is_weighted()\n True\n\n \"\"\"\n return not np.all(self.W.data == 1)\n\n def is_connected(self):\n r\"\"\"Check if the graph is connected (cached).\n\n A graph is connected if and only if there exists a (directed) path\n between any two vertices.\n\n Returns\n -------\n connected : bool\n True if the graph is connected, False otherwise.\n\n Notes\n -----\n\n For undirected graphs, starting at a vertex and trying to visit all the\n others is enough.\n For directed graphs, one needs to check that a vertex can both be\n visited by all the others and visit all the others.\n\n Examples\n --------\n\n Connected graph:\n\n >>> graph = graphs.Graph([\n ... [0, 3, 0, 0],\n ... [3, 0, 4, 0],\n ... [0, 4, 0, 2],\n ... [0, 0, 2, 0],\n ... ])\n >>> graph.is_connected()\n True\n\n Disconnected graph:\n\n >>> graph = graphs.Graph([\n ... [0, 3, 0, 0],\n ... [3, 0, 4, 0],\n ... [0, 0, 0, 2],\n ... [0, 0, 2, 0],\n ... ])\n >>> graph.is_connected()\n False\n\n\n \"\"\"\n if self._connected is not None:\n return self._connected\n\n adjacencies = [self.W]\n if self.is_directed():\n adjacencies.append(self.W.T)\n\n for adjacency in adjacencies:\n visited = np.zeros(self.n_vertices, dtype=np.bool)\n stack = set([0])\n\n while stack:\n vertex = stack.pop()\n\n if visited[vertex]:\n continue\n visited[vertex] = True\n\n neighbors = adjacency[vertex].nonzero()[1]\n stack.update(neighbors)\n\n if not np.all(visited):\n self._connected = False\n return self._connected\n\n self._connected = True\n return self._connected\n\n def is_directed(self):\n r\"\"\"Check if the graph has directed edges (cached).\n\n In this framework, we consider that a graph is directed if and\n only if its weight matrix is not symmetric.\n\n Returns\n -------\n directed : bool\n True if the graph is directed, False otherwise.\n\n Examples\n --------\n\n Directed graph:\n\n >>> graph = graphs.Graph([\n ... [0, 3, 0],\n ... [3, 0, 4],\n ... [0, 0, 0],\n ... ])\n >>> graph.is_directed()\n True\n\n Undirected graph:\n\n >>> graph = graphs.Graph([\n ... [0, 3, 0],\n ... [3, 0, 4],\n ... [0, 4, 0],\n ... ])\n >>> graph.is_directed()\n False\n\n \"\"\"\n if self._directed is None:\n self._directed = (self.W != self.W.T).nnz != 0\n return self._directed\n\n def has_loops(self):\n r\"\"\"Check if any vertex is connected to itself.\n\n A graph has self-loops if and only if the diagonal entries of its\n adjacency matrix are not all zero.\n\n Returns\n -------\n loops : bool\n True if the graph has self-loops, False otherwise.\n\n Examples\n --------\n\n Without self-loops:\n\n >>> graph = graphs.Graph([\n ... [0, 3, 0],\n ... [3, 0, 4],\n ... [0, 0, 0],\n ... ])\n >>> graph.has_loops()\n False\n\n With a self-loop:\n\n >>> graph = graphs.Graph([\n ... [1, 3, 0],\n ... [3, 0, 4],\n ... [0, 0, 0],\n ... ])\n >>> graph.has_loops()\n True\n\n \"\"\"\n return np.any(self.W.diagonal() != 0)\n\n def extract_components(self):\n r\"\"\"Split the graph into connected components.\n\n See :func:`is_connected` for the method used to determine\n connectedness.\n\n Returns\n -------\n graphs : list\n A list of graph structures. Each having its own node list and\n weight matrix. If the graph is directed, add into the info\n parameter the information about the source nodes and the sink\n nodes.\n\n Examples\n --------\n >>> from scipy import sparse\n >>> W = sparse.rand(10, 10, 0.2)\n >>> W = utils.symmetrize(W)\n >>> G = graphs.Graph(W)\n >>> components = G.extract_components()\n >>> has_sinks = 'sink' in components[0].info\n >>> sinks_0 = components[0].info['sink'] if has_sinks else []\n\n \"\"\"\n if self.A.shape[0] != self.A.shape[1]:\n self.logger.error('Inconsistent shape to extract components. '\n 'Square matrix required.')\n return None\n\n if self.is_directed():\n raise NotImplementedError('Directed graphs not supported yet.')\n\n graphs = []\n\n visited = np.zeros(self.A.shape[0], dtype=bool)\n # indices = [] # Assigned but never used\n\n while not visited.all():\n # pick a node not visted yet\n stack = set(np.nonzero(~visited)[0][[0]])\n comp = []\n\n while len(stack):\n v = stack.pop()\n if not visited[v]:\n comp.append(v)\n visited[v] = True\n\n # Add indices of nodes not visited yet and accessible from\n # v\n stack.update(set([idx for idx in self.A[v, :].nonzero()[1]\n if not visited[idx]]))\n\n comp = sorted(comp)\n self.logger.info(('Constructing subgraph for component of '\n 'size {}.').format(len(comp)))\n G = self.subgraph(comp)\n G.info = {'orig_idx': comp}\n graphs.append(G)\n\n return graphs\n\n def compute_laplacian(self, lap_type='combinatorial'):\n r\"\"\"Compute a graph Laplacian.\n\n For undirected graphs, the combinatorial Laplacian is defined as\n\n .. math:: L = D - W,\n\n where :math:`W` is the weighted adjacency matrix and :math:`D` the\n weighted degree matrix. The normalized Laplacian is defined as\n\n .. math:: L = I - D^{-1/2} W D^{-1/2},\n\n where :math:`I` is the identity matrix.\n\n For directed graphs, the Laplacians are built from a symmetrized\n version of the weighted adjacency matrix that is the average of the\n weighted adjacency matrix and its transpose. As the Laplacian is\n defined as the divergence of the gradient, it is not affected by the\n orientation of the edges.\n\n For both Laplacians, the diagonal entries corresponding to disconnected\n nodes (i.e., nodes with degree zero) are set to zero.\n\n Once computed, the Laplacian is accessible by the attribute :attr:`L`.\n\n Parameters\n ----------\n lap_type : {'combinatorial', 'normalized'}\n The kind of Laplacian to compute. Default is combinatorial.\n\n Examples\n --------\n\n Combinatorial and normalized Laplacians of an undirected graph.\n\n >>> graph = graphs.Graph([\n ... [0, 2, 0],\n ... [2, 0, 1],\n ... [0, 1, 0],\n ... ])\n >>> graph.compute_laplacian('combinatorial')\n >>> graph.L.toarray()\n array([[ 2., -2., 0.],\n [-2., 3., -1.],\n [ 0., -1., 1.]])\n >>> graph.compute_laplacian('normalized')\n >>> graph.L.toarray()\n array([[ 1. , -0.81649658, 0. ],\n [-0.81649658, 1. , -0.57735027],\n [ 0. , -0.57735027, 1. ]])\n\n Combinatorial and normalized Laplacians of a directed graph.\n\n >>> graph = graphs.Graph([\n ... [0, 2, 0],\n ... [2, 0, 1],\n ... [0, 0, 0],\n ... ])\n >>> graph.compute_laplacian('combinatorial')\n >>> graph.L.toarray()\n array([[ 2. , -2. , 0. ],\n [-2. , 2.5, -0.5],\n [ 0. , -0.5, 0.5]])\n >>> graph.compute_laplacian('normalized')\n >>> graph.L.toarray()\n array([[ 1. , -0.89442719, 0. ],\n [-0.89442719, 1. , -0.4472136 ],\n [ 0. , -0.4472136 , 1. ]])\n\n The Laplacian is defined as the divergence of the gradient.\n See :meth:`compute_differential_operator` for details.\n\n >>> graph = graphs.Path(20)\n >>> graph.compute_differential_operator()\n >>> L = graph.D.dot(graph.D.T)\n >>> np.all(L.toarray() == graph.L.toarray())\n True\n\n The Laplacians have a bounded spectrum.\n\n >>> G = graphs.Sensor(50)\n >>> G.compute_laplacian('combinatorial')\n >>> G.compute_fourier_basis()\n >>> -1e-10 < G.e[0] < 1e-10 < G.e[-1] < 2*np.max(G.dw)\n True\n >>> G.compute_laplacian('normalized')\n >>> G.compute_fourier_basis()\n >>> -1e-10 < G.e[0] < 1e-10 < G.e[-1] < 2\n True\n\n \"\"\"\n\n if lap_type != self.lap_type:\n # Those attributes are invalidated when the Laplacian is changed.\n # Alternative: don't allow the user to change the Laplacian.\n self._lmax = None\n self._U = None\n self._e = None\n self._coherence = None\n self._D = None\n\n self.lap_type = lap_type\n\n if not self.is_directed():\n W = self.W\n else:\n W = utils.symmetrize(self.W, method='average')\n\n if lap_type == 'combinatorial':\n D = sparse.diags(self.dw)\n self.L = D - W\n elif lap_type == 'normalized':\n d = np.zeros(self.n_vertices)\n disconnected = (self.dw == 0)\n np.power(self.dw, -0.5, where=~disconnected, out=d)\n D = sparse.diags(d)\n self.L = sparse.identity(self.n_vertices) - D * W * D\n self.L[disconnected, disconnected] = 0\n self.L.eliminate_zeros()\n else:\n raise ValueError('Unknown Laplacian type {}'.format(lap_type))\n\n def _check_signal(self, s):\n r\"\"\"Check if signal is valid.\"\"\"\n s = np.asanyarray(s)\n if s.shape[0] != self.n_vertices:\n raise ValueError('First dimension must be the number of vertices '\n 'G.N = {}, got {}.'.format(self.N, s.shape))\n return s\n\n def dirichlet_energy(self, x):\n r\"\"\"Compute the Dirichlet energy of a signal defined on the vertices.\n\n The Dirichlet energy of a signal :math:`x` is defined as\n\n .. math:: x^\\top L x = \\| \\nabla_\\mathcal{G} x \\|_2^2\n = \\frac12 \\sum_{i,j} W[i, j] (x[j] - x[i])^2\n\n for the combinatorial Laplacian, and\n\n .. math:: x^\\top L x = \\| \\nabla_\\mathcal{G} x \\|_2^2\n = \\frac12 \\sum_{i,j} W[i, j]\n \\left( \\frac{x[j]}{d[j]} - \\frac{x[i]}{d[i]} \\right)^2\n\n for the normalized Laplacian, where :math:`d` is the weighted degree\n :attr:`dw`, :math:`\\nabla_\\mathcal{G} x = D^\\top x` and :math:`D` is\n the differential operator :attr:`D`. See :meth:`grad` for the\n definition of the gradient :math:`\\nabla_\\mathcal{G}`.\n\n Parameters\n ----------\n x : array_like\n Signal of length :attr:`n_vertices` living on the vertices.\n\n Returns\n -------\n energy : float\n The Dirichlet energy of the graph signal.\n\n See Also\n --------\n grad : compute the gradient of a vertex signal\n\n Examples\n --------\n\n Non-directed graph:\n\n >>> graph = graphs.Path(5, directed=False)\n >>> signal = [0, 2, 2, 4, 4]\n >>> graph.dirichlet_energy(signal)\n 8.0\n >>> # The Dirichlet energy is indeed the squared norm of the gradient.\n >>> graph.compute_differential_operator()\n >>> graph.grad(signal)\n array([2., 0., 2., 0.])\n\n Directed graph:\n\n >>> graph = graphs.Path(5, directed=True)\n >>> signal = [0, 2, 2, 4, 4]\n >>> graph.dirichlet_energy(signal)\n 4.0\n >>> # The Dirichlet energy is indeed the squared norm of the gradient.\n >>> graph.compute_differential_operator()\n >>> graph.grad(signal)\n array([1.41421356, 0. , 1.41421356, 0. ])\n\n \"\"\"\n x = self._check_signal(x)\n return x.T.dot(self.L.dot(x))\n\n @property\n def W(self):\n r\"\"\"Weighted adjacency matrix of the graph.\"\"\"\n return self._adjacency\n\n @W.setter\n def W(self, value):\n # TODO: user can still do G.W[0, 0] = 1, or modify the passed W.\n raise AttributeError('In-place modification of the graph is not '\n 'supported. Create another Graph object.')\n\n @property\n def A(self):\n r\"\"\"Graph adjacency matrix (the binary version of W).\n\n The adjacency matrix defines which edges exist on the graph.\n It is represented as an N-by-N matrix of booleans.\n :math:`A_{i,j}` is True if :math:`W_{i,j} > 0`.\n \"\"\"\n if self._A is None:\n self._A = self.W > 0\n return self._A\n\n @property\n def d(self):\n r\"\"\"The degree (number of neighbors) of vertices.\n\n For undirected graphs, the degree of a vertex is the number of vertices\n it is connected to.\n For directed graphs, the degree is the average of the in and out\n degrees, where the in degree is the number of incoming edges, and the\n out degree the number of outgoing edges.\n\n In both cases, the degree of the vertex :math:`v_i` is the average\n between the number of non-zero values in the :math:`i`-th column (the\n in degree) and the :math:`i`-th row (the out degree) of the weighted\n adjacency matrix :attr:`W`.\n\n Examples\n --------\n\n Undirected graph:\n\n >>> graph = graphs.Graph([\n ... [0, 1, 0],\n ... [1, 0, 2],\n ... [0, 2, 0],\n ... ])\n >>> print(graph.d) # Number of neighbors.\n [1 2 1]\n >>> print(graph.dw) # Weighted degree.\n [1 3 2]\n\n Directed graph:\n\n >>> graph = graphs.Graph([\n ... [0, 1, 0],\n ... [0, 0, 2],\n ... [0, 2, 0],\n ... ])\n >>> print(graph.d) # Number of neighbors.\n [0.5 1.5 1. ]\n >>> print(graph.dw) # Weighted degree.\n [0.5 2.5 2. ]\n\n \"\"\"\n if self._d is None:\n if not self.is_directed():\n # Shortcut for undirected graphs.\n self._d = self.W.getnnz(axis=1)\n # axis=1 faster for CSR (https://stackoverflow.com/a/16391764)\n else:\n degree_in = self.W.getnnz(axis=0)\n degree_out = self.W.getnnz(axis=1)\n self._d = (degree_in + degree_out) / 2\n return self._d\n\n @property\n def dw(self):\n r\"\"\"The weighted degree of vertices.\n\n For undirected graphs, the weighted degree of the vertex :math:`v_i` is\n defined as\n\n .. math:: d[i] = \\sum_j W[j, i] = \\sum_j W[i, j],\n\n where :math:`W` is the weighted adjacency matrix :attr:`W`.\n\n For directed graphs, the weighted degree of the vertex :math:`v_i` is\n defined as\n\n .. math:: d[i] = \\frac12 (d^\\text{in}[i] + d^\\text{out}[i])\n = \\frac12 (\\sum_j W[j, i] + \\sum_j W[i, j]),\n\n i.e., as the average of the in and out degrees.\n\n Examples\n --------\n\n Undirected graph:\n\n >>> graph = graphs.Graph([\n ... [0, 1, 0],\n ... [1, 0, 2],\n ... [0, 2, 0],\n ... ])\n >>> print(graph.d) # Number of neighbors.\n [1 2 1]\n >>> print(graph.dw) # Weighted degree.\n [1 3 2]\n\n Directed graph:\n\n >>> graph = graphs.Graph([\n ... [0, 1, 0],\n ... [0, 0, 2],\n ... [0, 2, 0],\n ... ])\n >>> print(graph.d) # Number of neighbors.\n [0.5 1.5 1. ]\n >>> print(graph.dw) # Weighted degree.\n [0.5 2.5 2. ]\n\n \"\"\"\n if self._dw is None:\n if not self.is_directed():\n # Shortcut for undirected graphs.\n self._dw = np.ravel(self.W.sum(axis=0))\n else:\n degree_in = np.ravel(self.W.sum(axis=0))\n degree_out = np.ravel(self.W.sum(axis=1))\n self._dw = (degree_in + degree_out) / 2\n return self._dw\n\n @property\n def lmax(self):\n r\"\"\"Largest eigenvalue of the graph Laplacian.\n\n Can be exactly computed by :func:`compute_fourier_basis` or\n approximated by :func:`estimate_lmax`.\n \"\"\"\n if self._lmax is None:\n self.logger.warning('The largest eigenvalue G.lmax is not '\n 'available, we need to estimate it. '\n 'Explicitly call G.estimate_lmax() or '\n 'G.compute_fourier_basis() '\n 'once beforehand to suppress the warning.')\n self.estimate_lmax()\n return self._lmax\n\n def estimate_lmax(self, method='lanczos'):\n r\"\"\"Estimate the Laplacian's largest eigenvalue (cached).\n\n The result is cached and accessible by the :attr:`lmax` property.\n\n Exact value given by the eigendecomposition of the Laplacian, see\n :func:`compute_fourier_basis`. That estimation is much faster than the\n eigendecomposition.\n\n Parameters\n ----------\n method : {'lanczos', 'bounds'}\n Whether to estimate the largest eigenvalue with the implicitly\n restarted Lanczos method, or to return an upper bound on the\n spectrum of the Laplacian.\n\n Notes\n -----\n Runs the implicitly restarted Lanczos method (as implemented in\n :func:`scipy.sparse.linalg.eigsh`) with a large tolerance, then\n increases the calculated largest eigenvalue by 1 percent. For much of\n the PyGSP machinery, we need to approximate filter kernels on an\n interval that contains the spectrum of L. The only cost of using a\n larger interval is that the polynomial approximation over the larger\n interval may be a slightly worse approximation on the actual spectrum.\n As this is a very mild effect, it is not necessary to obtain very tight\n bounds on the spectrum of L.\n\n A faster but less tight alternative is to use known algebraic bounds on\n the graph Laplacian.\n\n Examples\n --------\n >>> G = graphs.Logo()\n >>> G.compute_fourier_basis() # True value.\n >>> print('{:.2f}'.format(G.lmax))\n 13.78\n >>> G.estimate_lmax(method='lanczos') # Estimate.\n >>> print('{:.2f}'.format(G.lmax))\n 13.92\n >>> G.estimate_lmax(method='bounds') # Upper bound.\n >>> print('{:.2f}'.format(G.lmax))\n 18.58\n\n \"\"\"\n if method == self._lmax_method:\n return\n self._lmax_method = method\n\n if method == 'lanczos':\n try:\n # We need to cast the matrix L to a supported type.\n # TODO: not good for memory. Cast earlier?\n lmax = sparse.linalg.eigsh(self.L.asfptype(), k=1, tol=5e-3,\n ncv=min(self.N, 10),\n return_eigenvectors=False)\n lmax = lmax[0]\n assert lmax <= self._get_upper_bound() + 1e-12\n lmax *= 1.01 # Increase by 1% to be robust to errors.\n self._lmax = lmax\n except sparse.linalg.ArpackNoConvergence:\n raise ValueError('The Lanczos method did not converge. '\n 'Try to use bounds.')\n\n elif method == 'bounds':\n self._lmax = self._get_upper_bound()\n\n else:\n raise ValueError('Unknown method {}'.format(method))\n\n def _get_upper_bound(self):\n r\"\"\"Return an upper bound on the eigenvalues of the Laplacian.\"\"\"\n\n if self.lap_type == 'normalized':\n return 2 # Equal iff the graph is bipartite.\n elif self.lap_type == 'combinatorial':\n bounds = []\n # Equal for full graphs.\n bounds += [self.n_vertices * np.max(self.W)]\n # Gershgorin circle theorem. Equal for regular bipartite graphs.\n # Special case of the below bound.\n bounds += [2 * np.max(self.dw)]\n # Anderson, Morley, Eigenvalues of the Laplacian of a graph.\n # Equal for regular bipartite graphs.\n if self.n_edges > 0:\n sources, targets, _ = self.get_edge_list()\n bounds += [np.max(self.dw[sources] + self.dw[targets])]\n # Merris, A note on Laplacian graph eigenvalues.\n if not self.is_directed():\n W = self.W\n else:\n W = utils.symmetrize(self.W, method='average')\n m = W.dot(self.dw) / self.dw # Mean degree of adjacent vertices.\n bounds += [np.max(self.dw + m)]\n # Good review: On upper bounds for Laplacian graph eigenvalues.\n return min(bounds)\n else:\n raise ValueError('Unknown Laplacian type '\n '{}'.format(self.lap_type))\n\n def get_edge_list(self):\n r\"\"\"Return an edge list, an alternative representation of the graph.\n\n Each edge :math:`e_k = (v_i, v_j) \\in \\mathcal{E}` from :math:`v_i` to\n :math:`v_j` is associated with the weight :math:`W[i, j]`. For each\n edge :math:`e_k`, the method returns :math:`(i, j, W[i, j])` as\n `(sources[k], targets[k], weights[k])`, with :math:`i \\in [0,\n |\\mathcal{V}|-1], j \\in [0, |\\mathcal{V}|-1], k \\in [0,\n |\\mathcal{E}|-1]`.\n\n Returns\n -------\n sources : vector of int\n Source node indices.\n targets : vector of int\n Target node indices.\n weights : vector of float\n Edge weights.\n\n Notes\n -----\n The weighted adjacency matrix is the canonical form used in this\n package to represent a graph as it is the easiest to work with when\n considering spectral methods.\n\n Edge orientation (i.e., which node is the source or the target) is\n arbitrary for undirected graphs.\n The implementation uses the upper triangular part of the adjacency\n matrix, hence :math:`i \\leq j \\ \\forall k`.\n\n Examples\n --------\n\n Edge list of a directed graph.\n\n >>> graph = graphs.Graph([\n ... [0, 3, 0],\n ... [3, 0, 4],\n ... [0, 0, 0],\n ... ])\n >>> sources, targets, weights = graph.get_edge_list()\n >>> list(sources), list(targets), list(weights)\n ([0, 1, 1], [1, 0, 2], [3, 3, 4])\n\n Edge list of an undirected graph.\n\n >>> graph = graphs.Graph([\n ... [0, 3, 0],\n ... [3, 0, 4],\n ... [0, 4, 0],\n ... ])\n >>> sources, targets, weights = graph.get_edge_list()\n >>> list(sources), list(targets), list(weights)\n ([0, 1], [1, 2], [3, 4])\n\n \"\"\"\n\n if self.is_directed():\n W = self.W.tocoo()\n else:\n W = sparse.triu(self.W, format='coo')\n\n sources = W.row\n targets = W.col\n weights = W.data\n\n assert self.n_edges == sources.size == targets.size == weights.size\n return sources, targets, weights\n\n def plot(self, vertex_color=None, vertex_size=None, highlight=[],\n edges=None, edge_color=None, edge_width=None,\n indices=False, colorbar=True, limits=None, ax=None,\n title=None, backend=None):\n r\"\"\"Docstring overloaded at import time.\"\"\"\n from pygsp.plotting import _plot_graph\n return _plot_graph(self, vertex_color=vertex_color,\n vertex_size=vertex_size, highlight=highlight,\n edges=edges, indices=indices, colorbar=colorbar,\n edge_color=edge_color, edge_width=edge_width,\n limits=limits, ax=ax, title=title, backend=backend)\n\n def plot_signal(self, *args, **kwargs):\n r\"\"\"Deprecated, use plot() instead.\"\"\"\n return self.plot(*args, **kwargs)\n\n def plot_spectrogram(self, node_idx=None):\n r\"\"\"Docstring overloaded at import time.\"\"\"\n from pygsp.plotting import _plot_spectrogram\n _plot_spectrogram(self, node_idx=node_idx)\n", "# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom scipy import sparse\n\nfrom pygsp import utils\nfrom . import Graph # prevent circular import in Python < 3.5\n\n\nclass RandomRing(Graph):\n r\"\"\"Ring graph with randomly sampled vertices.\n\n Parameters\n ----------\n N : int\n Number of vertices.\n angles : array_like, optional\n The angular coordinate, in :math:`[0, 2\\pi]`, of the vertices.\n seed : int\n Seed for the random number generator (for reproducible graphs).\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> G = graphs.RandomRing(N=10, seed=42)\n >>> fig, axes = plt.subplots(1, 2)\n >>> _ = axes[0].spy(G.W)\n >>> _ = G.plot(ax=axes[1])\n >>> _ = axes[1].set_xlim(-1.1, 1.1)\n >>> _ = axes[1].set_ylim(-1.1, 1.1)\n\n \"\"\"\n\n def __init__(self, N=64, angles=None, seed=None, **kwargs):\n\n self.seed = seed\n\n if angles is None:\n rs = np.random.RandomState(seed)\n angles = np.sort(rs.uniform(0, 2*np.pi, size=N), axis=0)\n else:\n angles = np.asanyarray(angles)\n angles.sort() # Need to be sorted to take the difference.\n N = len(angles)\n if np.any(angles < 0) or np.any(angles >= 2*np.pi):\n raise ValueError('Angles should be in [0, 2 pi]')\n self.angles = angles\n\n if N < 3:\n # Asymmetric graph needed for 2 as 2 distances connect them.\n raise ValueError('There should be at least 3 vertices.')\n\n rows = range(0, N-1)\n cols = range(1, N)\n weights = np.diff(angles)\n\n # Close the loop.\n rows = np.concatenate((rows, [0]))\n cols = np.concatenate((cols, [N-1]))\n weights = np.concatenate((weights, [2*np.pi + angles[0] - angles[-1]]))\n\n W = sparse.coo_matrix((weights, (rows, cols)), shape=(N, N))\n W = utils.symmetrize(W, method='triu')\n\n # Width as the expected angle. All angles are equal to that value when\n # the ring is uniformly sampled.\n width = 2 * np.pi / N\n assert (W.data.mean() - width) < 1e-10\n # TODO: why this kernel ? It empirically produces eigenvectors closer\n # to the sines and cosines.\n W.data = width / W.data\n\n coords = np.stack([np.cos(angles), np.sin(angles)], axis=1)\n plotting = {'limits': np.array([-1, 1, -1, 1])}\n\n # TODO: save angle and 2D position as graph signals\n super(RandomRing, self).__init__(W, coords=coords, plotting=plotting,\n **kwargs)\n\n def _get_extra_repr(self):\n return dict(seed=self.seed)\n" ]
[ [ "scipy.sparse.isspmatrix", "numpy.nonzero", "numpy.power", "scipy.sparse.diags", "scipy.sparse.csr_matrix", "numpy.all", "numpy.max", "numpy.asanyarray", "scipy.sparse.identity", "scipy.sparse.triu", "numpy.zeros" ], [ "scipy.sparse.coo_matrix", "numpy.cos", "numpy.sin", "numpy.concatenate", "numpy.asanyarray", "numpy.diff", "numpy.any", "numpy.array", "numpy.random.RandomState" ] ]
idc9/andersoncd
[ "af2123b241e5f82f7c51b2bbf5196fb02723b582" ]
[ "examples/expe_eigvals/expe_rayleigh.py" ]
[ "import pandas\nimport matplotlib.pyplot as plt\n\nfrom andersoncd.plot_utils import configure_plt\n\n\nconfigure_plt()\n\ndatasets = [\n \"gina_agnostic\", \"rcv1.binary\", \"real-sim\", \"news20.binary\"]\n\n\ndataset_title = {}\ndataset_title[\"leukemia\"] = \"leukemia\"\ndataset_title[\"gina_agnostic\"] = \"gina agnostic\"\ndataset_title[\"hiva_agnostic\"] = \"hiva agnostic\"\ndataset_title[\"upselling\"] = \"upselling\"\ndataset_title[\"rcv1.binary\"] = \"rcv1\"\ndataset_title[\"news20.binary\"] = \"news20\"\ndataset_title[\"kdda_train\"] = \"kdd\"\ndataset_title[\"real-sim\"] = \"real-sim\"\ndataset_title[\"finance\"] = \"finance\"\n\nlist_k = [0, 7, 8, 9]\n\nfontsize = 50\n\nfig, axarr = plt.subplots(\n 2, 2, sharex=False, sharey=False, figsize=[10, 8], constrained_layout=True)\n\nfor i, dataset in enumerate(datasets):\n for idxk, k in enumerate(list_k):\n color = plt.cm.viridis(idxk / len(list_k))\n df = pandas.read_pickle(\"results/%s_%i.pkl\" % (dataset, k))\n rayleighs = df['rayleighs'].to_numpy()[0]\n label = r\"$W(T^{%d})$\" % 2 ** k\n axarr.flat[i].plot(\n rayleighs.real, rayleighs.imag, color=color, label=label)\n axarr.flat[i].plot(1, 0, marker=\"P\", c='k')\n axarr.flat[i].set_title(dataset_title[dataset])\n axarr.flat[i].axis(\"equal\")\n axarr.flat[i].set_xticks((-2, -1, 0, 1, 2))\n axarr.flat[i].set_yticks((-2, -1, 0, 1, 2))\n\n\nfig.show()\n" ]
[ [ "pandas.read_pickle", "matplotlib.pyplot.subplots" ] ]
PlanetG3/ArtificialIntelligenceEngines
[ "77667d499ffd57a745e8eb58877c557d807c95c3" ]
[ "DeepLearningEnginesCode/Python/Ch02_LinearNetwork/main.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nLinear associative net.\n\"\"\"\n\nimport numpy as np\n# use pylab for ploting.\nimport pylab as pl\n\n########## Define class MLP ##########\nclass LinearNetwork():\n \n ########## Define Methods for LinearNetwork class ##########\n # Initialize network topology\n def __init__(self,nInput,nOutput):\n \n # set number of units in each layer\n self.nInput = nInput\n self.nOutput = nOutput\n \n # define weights between input and output layers\n # use of +1 input units defines bias unit in input layer\n # example in book does not have bias term, so this is an extra in code.\n self.W = np.random.rand(self.nOutput,self.nInput+1)\n \n # space to store change in weights\n self.dw = np.zeros([self.nOutput,self.nInput+1])\n \n # define vector to store state of output layer\n self.state = np.zeros([self.nOutput])\n \n # define vector to store delta terms of output layer\n self.deltaO = np.zeros([self.nOutput])\n\n # Iterate the network by one step \n def step(self,inp,tar,alpha):\n # get input vector with bias unit by appending 1 to end of input vector using \n # example in book does not have bias term, so this is an extra in code.\n input = np.append([inp], [1.0])\n \n # use input layer state to get output layer state\n for k in range(self.nOutput):\n # get total input u to output unit\n u = np.dot(self.W[k,:],input)\n # use u to find state of output unit\n self.state[k] = self.activation(u)\n\n # Learning algorithm\n if (alpha>0.):\n # get delta terms of output layer units\n for k in range(self.nOutput):\n self.deltaO[k] = (self.state[k] - tar[k])\n\n for k in range(self.nOutput):\n self.dw[k,:] -= alpha * self.deltaO[k] * input\n\n # Define linear unit activation function\n def activation(self,x):\n return x\n\n\n########## set parameters ##########\n# set random seed so get same sequence of random numbers each time prog is run.\nnp.random.seed(1)\nnip = 2 # num input units\nnop = 1 # num output units\n# Initialize network\nM = LinearNetwork(nip,nop) \n\n# Input vectors \ninputvectors = np.array([[0,0],[0,1],[1,0],[1,1]])\n\n# targets for XOR, not used here, but left here for comparison\n# target = np.array([[0],[1],[1],[0]])\n\n# targets for simple linearly separable problem\ntargets = np.array([[0],[1],[0],[1]])\n\n# Timesteps = number of learning trials\nnumiter = 20\n\n# num training vectors\nT = targets.shape[0]\n\n# set learning rate alpha\nalpha = 0.2\n\n# Store cost function values E\nE = np.zeros(numiter)\n\n########## Run learning ##########\nfor iter in range(numiter):\n\n # reset weight changes to zero\n M.dw = M.dw*0\n \n Et = 0.\n \n for t in range(T):\n \n # find weight change for one association for one step\n inputvector = inputvectors[t]\n target = targets[t]\n \n # get network output and delta term at output \n M.step(inputvector,target,alpha)\n \n # Compute the error\n dif =(target - M.state)\n Et += np.sum(dif*dif)\n \n E[iter] = Et\n \n # update weights\n for k in range(M.nOutput):\n M.W[k,:] += M.dw[k,:]\n\n# Print comparison of target and output\nfor k in range(T):\n inputvector = inputvectors[k,:]\n target = targets[t]\n M.step(inputvector,target,0.)\n print('input vector:' + str(inputvector))\n print( 'target: ' + str(target) + ', output: ' + (\"%.2f\" % M.state))\n\n########## Plot ##########\nF = pl.figure(0,figsize=(4,4))\nf = F.add_subplot(111)\nf.plot(E)\nf.set_aspect(np.diff(f.get_xlim())/np.diff(f.get_ylim()))\nf.set_xlabel('Training epoch')\nf.set_ylabel('Error')\nf.set_title('Error during training')\npl.show()\n\n########## The End ##########\n" ]
[ [ "numpy.dot", "numpy.random.seed", "numpy.append", "numpy.random.rand", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
msnitish/posthog
[ "cb86113f568e72eedcb64b5fd00c313d21e72f90" ]
[ "ee/clickhouse/queries/experiments/funnel_experiment_result.py" ]
[ "from dataclasses import asdict, dataclass\nfrom datetime import datetime\nfrom typing import List, Optional, Tuple, Type\n\nfrom numpy.random import default_rng\nfrom rest_framework.exceptions import ValidationError\n\nfrom ee.clickhouse.queries.experiments import (\n CONTROL_VARIANT_KEY,\n FF_DISTRIBUTION_THRESHOLD,\n MIN_PROBABILITY_FOR_SIGNIFICANCE,\n)\nfrom ee.clickhouse.queries.funnels import ClickhouseFunnel\nfrom posthog.constants import ExperimentSignificanceCode\nfrom posthog.models.feature_flag import FeatureFlag\nfrom posthog.models.filters.filter import Filter\nfrom posthog.models.team import Team\n\nProbability = float\n\n\n@dataclass(frozen=True)\nclass Variant:\n key: str\n success_count: int\n failure_count: int\n\n\nEXPECTED_LOSS_SIGNIFICANCE_LEVEL = 0.01\n\n\nclass ClickhouseFunnelExperimentResult:\n \"\"\"\n This class calculates Experiment Results.\n It returns two things:\n 1. A Funnel Breakdown based on Feature Flag values\n 2. Probability that Feature Flag value 1 has better conversion rate then FeatureFlag value 2\n\n Currently, we support a maximum of 4 feature flag values: control and 3 test variants\n\n The passed in Filter determines which funnel to create, along with the experiment start & end date values\n\n Calculating (2) uses sampling from a Beta distribution. If `control` value for the feature flag has 10 successes and 12 conversion failures,\n we assume the conversion rate follows a Beta(10, 12) distribution. Same for `test` variant.\n\n Then, we calculcate how many times a sample from `test` variant is higher than a sample from the `control` variant. This becomes the\n probability.\n \"\"\"\n\n def __init__(\n self,\n filter: Filter,\n team: Team,\n feature_flag: FeatureFlag,\n experiment_start_date: datetime,\n experiment_end_date: Optional[datetime] = None,\n funnel_class: Type[ClickhouseFunnel] = ClickhouseFunnel,\n ):\n\n breakdown_key = f\"$feature/{feature_flag.key}\"\n variants = [variant[\"key\"] for variant in feature_flag.variants]\n\n query_filter = filter.with_data(\n {\n \"date_from\": experiment_start_date,\n \"date_to\": experiment_end_date,\n \"breakdown\": breakdown_key,\n \"breakdown_type\": \"event\",\n \"properties\": [{\"key\": breakdown_key, \"value\": variants, \"operator\": \"exact\", \"type\": \"event\"}],\n # :TRICKY: We don't use properties set on filters, instead using experiment variant options\n }\n )\n self.funnel = funnel_class(query_filter, team)\n\n def get_results(self):\n funnel_results = self.funnel.run()\n control_variant, test_variants = self.get_variants(funnel_results)\n\n probabilities = self.calculate_results(control_variant, test_variants)\n\n mapping = {\n variant.key: probability for variant, probability in zip([control_variant, *test_variants], probabilities)\n }\n\n significance_code, loss = self.are_results_significant(control_variant, test_variants, probabilities)\n\n return {\n \"insight\": funnel_results,\n \"probability\": mapping,\n \"significant\": significance_code == ExperimentSignificanceCode.SIGNIFICANT,\n \"filters\": self.funnel._filter.to_dict(),\n \"significance_code\": significance_code,\n \"expected_loss\": loss,\n \"variants\": [asdict(variant) for variant in [control_variant, *test_variants]],\n }\n\n def get_variants(self, funnel_results):\n control_variant = None\n test_variants = []\n for result in funnel_results:\n total = result[0][\"count\"]\n success = result[-1][\"count\"]\n failure = total - success\n breakdown_value = result[0][\"breakdown_value\"][0]\n if breakdown_value == CONTROL_VARIANT_KEY:\n control_variant = Variant(key=breakdown_value, success_count=int(success), failure_count=int(failure))\n else:\n test_variants.append(Variant(breakdown_value, int(success), int(failure)))\n\n return control_variant, test_variants\n\n @staticmethod\n def calculate_results(\n control_variant: Variant, test_variants: List[Variant], priors: Tuple[int, int] = (1, 1)\n ) -> List[Probability]:\n \"\"\"\n Calculates probability that A is better than B. First variant is control, rest are test variants.\n\n Supports maximum 4 variants today\n\n For each variant, we create a Beta distribution of conversion rates,\n where alpha (successes) = success count of variant + prior success\n beta (failures) = failure count + variant + prior failures\n\n The prior is information about the world we already know. For example, a stronger prior for failures implies\n you'd need extra evidence of successes to confirm that the variant is indeed better.\n\n By default, we choose a non-informative prior. That is, both success & failure are equally likely.\n \"\"\"\n\n if not control_variant:\n raise ValidationError(\"No control variant data found\", code=\"no_data\")\n\n if len(test_variants) > 3:\n raise ValidationError(\"Can't calculate A/B test results for more than 4 variants\", code=\"too_much_data\")\n\n if len(test_variants) < 1:\n raise ValidationError(\"Can't calculate A/B test results for less than 2 variants\", code=\"no_data\")\n\n return calculate_probability_of_winning_for_each([control_variant, *test_variants])\n\n @staticmethod\n def are_results_significant(\n control_variant: Variant, test_variants: List[Variant], probabilities: List[Probability]\n ) -> Tuple[ExperimentSignificanceCode, Probability]:\n control_sample_size = control_variant.success_count + control_variant.failure_count\n\n for variant in test_variants:\n # We need a feature flag distribution threshold because distribution of people\n # can skew wildly when there are few people in the experiment\n if variant.success_count + variant.failure_count < FF_DISTRIBUTION_THRESHOLD:\n return ExperimentSignificanceCode.NOT_ENOUGH_EXPOSURE, 1\n\n if control_sample_size < FF_DISTRIBUTION_THRESHOLD:\n return ExperimentSignificanceCode.NOT_ENOUGH_EXPOSURE, 1\n\n if (\n probabilities[0] < MIN_PROBABILITY_FOR_SIGNIFICANCE\n and sum(probabilities[1:]) < MIN_PROBABILITY_FOR_SIGNIFICANCE\n ):\n # Sum of probability of winning for all variants except control is less than 90%\n return ExperimentSignificanceCode.LOW_WIN_PROBABILITY, 1\n\n best_test_variant = max(\n test_variants, key=lambda variant: variant.success_count / (variant.success_count + variant.failure_count)\n )\n\n expected_loss = calculate_expected_loss(best_test_variant, [control_variant])\n\n if expected_loss >= EXPECTED_LOSS_SIGNIFICANCE_LEVEL:\n return ExperimentSignificanceCode.HIGH_LOSS, expected_loss\n\n return ExperimentSignificanceCode.SIGNIFICANT, expected_loss\n\n\ndef calculate_expected_loss(target_variant: Variant, variants: List[Variant]) -> float:\n \"\"\"\n Calculates expected loss in conversion rate for a given variant.\n Loss calculation comes from VWO's SmartStats technical paper:\n https://cdn2.hubspot.net/hubfs/310840/VWO_SmartStats_technical_whitepaper.pdf (pg 12)\n\n > The loss function is the amount of uplift that one can expect to\n be lost by choosing a given variant, given particular values of λA and λB\n\n The unit of the return value is conversion rate values\n\n \"\"\"\n random_sampler = default_rng()\n prior_success = 1\n prior_failure = 1\n simulations_count = 100_000\n\n variant_samples = []\n for variant in variants:\n # Get `N=simulations` samples from a Beta distribution with alpha = prior_success + variant_sucess,\n # and beta = prior_failure + variant_failure\n samples = random_sampler.beta(\n variant.success_count + prior_success, variant.failure_count + prior_failure, simulations_count\n )\n variant_samples.append(samples)\n\n target_variant_samples = random_sampler.beta(\n target_variant.success_count + prior_success, target_variant.failure_count + prior_failure, simulations_count\n )\n\n loss = 0\n variant_conversions = list(zip(*variant_samples))\n for i in range(simulations_count):\n loss += max(0, max(variant_conversions[i]) - target_variant_samples[i])\n\n return loss / simulations_count\n\n\ndef simulate_winning_variant_for_conversion(target_variant: Variant, variants: List[Variant]) -> Probability:\n random_sampler = default_rng()\n prior_success = 1\n prior_failure = 1\n simulations_count = 100_000\n\n variant_samples = []\n for variant in variants:\n # Get `N=simulations` samples from a Beta distribution with alpha = prior_success + variant_sucess,\n # and beta = prior_failure + variant_failure\n samples = random_sampler.beta(\n variant.success_count + prior_success, variant.failure_count + prior_failure, simulations_count\n )\n variant_samples.append(samples)\n\n target_variant_samples = random_sampler.beta(\n target_variant.success_count + prior_success, target_variant.failure_count + prior_failure, simulations_count\n )\n\n winnings = 0\n variant_conversions = list(zip(*variant_samples))\n for i in range(simulations_count):\n if target_variant_samples[i] > max(variant_conversions[i]):\n winnings += 1\n\n return winnings / simulations_count\n\n\ndef calculate_probability_of_winning_for_each(variants: List[Variant]) -> List[Probability]:\n \"\"\"\n Calculates the probability of winning for each variant.\n \"\"\"\n if len(variants) == 2:\n # simple case\n probability = simulate_winning_variant_for_conversion(variants[1], [variants[0]])\n return [max(0, 1 - probability), probability]\n\n elif len(variants) == 3:\n probability_third_wins = simulate_winning_variant_for_conversion(variants[2], [variants[0], variants[1]])\n probability_second_wins = simulate_winning_variant_for_conversion(variants[1], [variants[0], variants[2]])\n return [\n max(0, 1 - probability_third_wins - probability_second_wins),\n probability_second_wins,\n probability_third_wins,\n ]\n\n elif len(variants) == 4:\n probability_second_wins = simulate_winning_variant_for_conversion(\n variants[1], [variants[0], variants[2], variants[3]]\n )\n probability_third_wins = simulate_winning_variant_for_conversion(\n variants[2], [variants[0], variants[1], variants[3]]\n )\n probability_fourth_wins = simulate_winning_variant_for_conversion(\n variants[3], [variants[0], variants[1], variants[2]]\n )\n return [\n max(0, 1 - probability_second_wins - probability_third_wins - probability_fourth_wins),\n probability_second_wins,\n probability_third_wins,\n probability_fourth_wins,\n ]\n else:\n raise ValidationError(\"Can't calculate A/B test results for more than 4 variants\", code=\"too_much_data\")\n" ]
[ [ "numpy.random.default_rng" ] ]
robertbindar/TileDB-Py
[ "cc2d92c45fc12d72a0259301561e3649b38e726c" ]
[ "tiledb/tests/test_core.py" ]
[ "import copy\nimport random\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\n\nimport tiledb\nfrom tiledb import TileDBError\nimport tiledb.main as core\nfrom tiledb.tests.common import DiskTestCase, rand_ascii\n\n\nclass CoreCCTest(DiskTestCase):\n def test_pyquery_basic(self):\n ctx = tiledb.Ctx()\n uri = self.path(\"test_pyquery_basic\")\n with tiledb.from_numpy(uri, np.random.rand(4)) as A:\n pass\n\n with tiledb.open(uri) as a:\n with tiledb.scope_ctx({\"py.init_buffer_bytes\": \"abcd\"}) as testctx:\n with self.assertRaises(ValueError):\n core.PyQuery(testctx, a, (\"\",), (), 0, False)\n\n q = core.PyQuery(ctx, a, (\"\",), (), 0, False)\n\n try:\n q._test_err(\"bad foo happened\")\n except Exception as exc:\n assert isinstance(exc, tiledb.TileDBError)\n assert exc.message == \"bad foo happened\"\n\n q.set_ranges([[(0, 3)]])\n\n with self.assertRaises(TileDBError):\n q.set_ranges([[(0, 3.0)]])\n\n q.set_ranges([[(0, np.int32(3))]])\n\n with self.assertRaises(TileDBError):\n q.set_ranges([[(3, \"a\")]])\n\n with self.assertRaisesRegex(\n TileDBError,\n \"Failed to cast dim range '\\\\(1.2344, 5.6789\\\\)' to dim type UINT64.*$\",\n ):\n q.set_ranges([[(1.2344, 5.6789)]])\n\n with self.assertRaisesRegex(\n TileDBError,\n \"Failed to cast dim range '\\\\('aa', 'bbbb'\\\\)' to dim type UINT64.*$\",\n ):\n q.set_ranges([[(\"aa\", \"bbbb\")]])\n\n with tiledb.open(uri) as a:\n q2 = core.PyQuery(ctx, a, (\"\",), (), 0, False)\n q2.set_ranges([[(0, 3)]])\n q2.submit()\n res = q2.results()[\"\"][0]\n res.dtype = np.double\n assert_array_equal(res, a[:])\n\n def test_pyquery_init(self):\n uri = self.path(\"test_pyquery_init\")\n intmax = np.iinfo(np.int64).max\n config_dict = {\n \"sm.tile_cache_size\": \"100\",\n \"py.init_buffer_bytes\": str(intmax),\n \"py.alloc_max_bytes\": str(intmax),\n }\n with tiledb.scope_ctx(config_dict) as ctx:\n with tiledb.from_numpy(uri, np.random.rand(4)) as A:\n pass\n\n with tiledb.open(uri) as a:\n q = core.PyQuery(ctx, a, (\"\",), (), 0, False)\n self.assertEqual(q._test_init_buffer_bytes, intmax)\n self.assertEqual(q._test_alloc_max_bytes, intmax)\n\n with self.assertRaisesRegex(\n ValueError,\n \"Invalid parameter: 'py.alloc_max_bytes' must be >= 1 MB \",\n ), tiledb.scope_ctx({\"py.alloc_max_bytes\": 10}) as ctx2:\n q = core.PyQuery(ctx2, a, (\"\",), (), 0, False)\n\n def test_import_buffer(self):\n uri = self.path(\"test_import_buffer\")\n\n def_tile = 1\n if tiledb.libtiledb.version() < (2, 2):\n def_tile = 2\n\n dom = tiledb.Domain(\n tiledb.Dim(domain=(0, 3), tile=def_tile, dtype=np.int64),\n tiledb.Dim(domain=(0, 3), tile=def_tile, dtype=np.int64),\n )\n attrs = [\n tiledb.Attr(name=\"\", dtype=np.float64),\n tiledb.Attr(name=\"foo\", dtype=np.int32),\n tiledb.Attr(name=\"str\", dtype=str),\n ]\n schema = tiledb.ArraySchema(domain=dom, attrs=attrs, sparse=False)\n tiledb.DenseArray.create(uri, schema)\n\n data_orig = {\n \"\": 2.5 * np.identity(4, dtype=np.float64),\n \"foo\": 8 * np.identity(4, dtype=np.int32),\n \"str\": np.array(\n [rand_ascii(random.randint(0, 5)) for _ in range(16)], dtype=\"U0\"\n ).reshape(4, 4),\n }\n\n with tiledb.open(uri, \"w\") as A:\n A[:] = data_orig\n\n with tiledb.open(uri) as B:\n assert_array_equal(B[:][\"\"], data_orig[\"\"]),\n assert_array_equal(B[:][\"foo\"], data_orig[\"foo\"])\n\n data_mod = {\n \"\": 5 * np.identity(4, dtype=np.float64),\n \"foo\": 32 * np.identity(4, dtype=np.int32),\n \"str\": np.array(\n [rand_ascii(random.randint(1, 7)) for _ in range(16)], dtype=\"U0\"\n ).reshape(4, 4),\n }\n\n str_offsets = np.array(\n [0] + [len(x) for x in data_mod[\"str\"].flatten()[:-1]], dtype=np.uint64\n )\n str_offsets = np.cumsum(str_offsets)\n\n str_raw = np.array(\n [ord(c) for c in \"\".join([x for x in data_mod[\"str\"].flatten()])],\n dtype=np.uint8,\n )\n\n data_mod_bfr = {\n \"\": (data_mod[\"\"].flatten().view(np.uint8), np.array([], dtype=np.uint64)),\n \"foo\": (\n data_mod[\"foo\"].flatten().view(np.uint8),\n np.array([], dtype=np.uint64),\n ),\n \"str\": (str_raw.flatten().view(np.uint8), str_offsets),\n }\n\n with tiledb.open(uri) as C:\n res = C.multi_index[0:3, 0:3]\n assert_array_equal(res[\"\"], data_orig[\"\"])\n assert_array_equal(res[\"foo\"], data_orig[\"foo\"])\n assert_array_equal(res[\"str\"], data_orig[\"str\"])\n\n C._set_buffers(copy.deepcopy(data_mod_bfr))\n res = C.multi_index[0:3, 0:3]\n assert_array_equal(res[\"\"], data_mod[\"\"])\n assert_array_equal(res[\"foo\"], data_mod[\"foo\"])\n assert_array_equal(res[\"str\"], data_mod[\"str\"])\n\n with tiledb.open(uri) as D:\n D._set_buffers(copy.deepcopy(data_mod_bfr))\n res = D[:, :]\n assert_array_equal(res[\"\"], data_mod[\"\"])\n assert_array_equal(res[\"foo\"], data_mod[\"foo\"])\n assert_array_equal(res[\"str\"], data_mod[\"str\"])\n\n with tiledb.DenseArray(uri, mode=\"r\") as E, tiledb.scope_ctx() as ctx:\n # Ensure that query only returns specified attributes\n q = core.PyQuery(ctx, E, (\"foo\",), (), 0, False)\n q.set_ranges([[(0, 1)]])\n q.submit()\n r = q.results()\n self.assertTrue(\"foo\" in r)\n self.assertTrue(\"str\" not in r)\n del q\n" ]
[ [ "numpy.int32", "numpy.cumsum", "numpy.testing.assert_array_equal", "numpy.identity", "numpy.random.rand", "numpy.iinfo", "numpy.array" ] ]
pvgladkov/tweet-sentiment-extraction
[ "4950be56de062fe55b5810c92e655c5c52c9fbcb" ]
[ "tse/models.py" ]
[ "import torch\nimport transformers\nfrom torch import nn as nn\nimport numpy as np\n\n\ndef load_model(train_config, device):\n model_config = transformers.RobertaConfig.from_pretrained(train_config.BERT_PATH)\n model_config.output_hidden_states = True\n model = TweetModel(train_config, conf=model_config)\n model.to(device)\n return model\n\n\nclass GELU(nn.Module):\n def forward(self, x):\n return x * torch.sigmoid(1.702 * x)\n\n\nclass TweetModel(transformers.BertPreTrainedModel):\n\n prefix = 'roberta'\n\n def __init__(self, train_config, conf):\n super(TweetModel, self).__init__(conf)\n self.bert = transformers.RobertaModel.from_pretrained(train_config.BERT_PATH, config=conf)\n dropout_p = 0.4\n self.l0 = nn.Linear(768 * 2, 256)\n self.l1 = nn.Linear(256, 2)\n self.gelu = GELU()\n self.dropouts = nn.ModuleList([nn.Dropout(dropout_p) for _ in range(4)])\n torch.nn.init.xavier_normal_(self.l0.weight)\n\n @staticmethod\n def loss_fn(start_logits, end_logits, start_positions, end_positions):\n loss_fct = nn.CrossEntropyLoss()\n\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n\n total_loss = (start_loss + end_loss)\n return total_loss\n\n def forward(self, ids, mask, token_type_ids, start=None, end=None):\n _, _, out = self.bert(\n ids,\n attention_mask=mask,\n token_type_ids=token_type_ids\n )\n\n out = torch.cat((out[-1], out[-2]), dim=-1)\n\n out = self.l0(out)\n out = self.gelu(out)\n\n start_logits = None\n end_logits = None\n loss = None\n for i, dropout in enumerate(self.dropouts):\n if i == 0:\n tmp_out = dropout(out)\n logits = self.l1(tmp_out)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n if start is not None:\n loss = self.loss_fn(start_logits, end_logits, start, end)\n else:\n tmp_out = dropout(out)\n logits = self.l1(tmp_out)\n tmp_start_logits, tmp_end_logits = logits.split(1, dim=-1)\n\n tmp_start_logits = tmp_start_logits.squeeze(-1)\n tmp_end_logits = tmp_end_logits.squeeze(-1)\n\n start_logits = start_logits + tmp_start_logits\n end_logits = end_logits + tmp_end_logits\n if start is not None:\n loss += self.loss_fn(tmp_start_logits, tmp_end_logits, start, end)\n\n start_logits = start_logits / len(self.dropouts)\n end_logits = end_logits / len(self.dropouts)\n\n if start is not None:\n loss = loss / len(self.dropouts)\n\n return start_logits, end_logits, loss\n\n @staticmethod\n def softmax(start_logits, end_logits):\n outputs_start_ = torch.softmax(start_logits, dim=1).cpu().detach().numpy()\n outputs_end_ = torch.softmax(end_logits, dim=1).cpu().detach().numpy()\n return outputs_start_, outputs_end_\n\n @staticmethod\n def probs_to_positions(start_probs, end_probs):\n start_positions = np.argmax(start_probs, axis=1)\n end_positions = np.argmax(end_probs, axis=1)\n return start_positions, end_positions\n\n @classmethod\n def to_positions(cls, start_logits, end_logits):\n start_probs, end_probs = cls.softmax(start_logits, end_logits)\n start_positions, end_positions = cls.probs_to_positions(start_probs, end_probs)\n return start_positions, end_positions\n\n\nclass Head(nn.Module):\n def __init__(self):\n super(Head, self).__init__()\n self.l0 = nn.Linear(768 * 2, 256)\n self.l1 = nn.Linear(256, 1)\n self.gelu = GELU()\n self.dropout = nn.Dropout(0.1)\n\n def forward(self, x):\n out = self.l0(x)\n out = self.gelu(out)\n out = self.dropout(out)\n out = self.l1(out)\n out = out.squeeze(-1)\n return out\n\n\nclass TweetModelTwoHead(TweetModel):\n\n prefix = 'roberta2h'\n\n def __init__(self, train_config, conf):\n super(TweetModelTwoHead, self).__init__(train_config, conf)\n self.start_head = Head()\n self.end_head = Head()\n\n def forward(self, ids, mask, token_type_ids, start=None, end=None):\n _, _, out = self.bert(\n ids,\n attention_mask=mask,\n token_type_ids=token_type_ids\n )\n\n out = torch.cat((out[-1], out[-2]), dim=-1)\n\n start_out = self.start_head(out)\n end_out = self.end_head(out)\n loss = None\n if start is not None:\n loss = self.loss_fn(start_out, end_out, start, end)\n\n return start_out, end_out, loss\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.nn.Dropout", "torch.sigmoid", "torch.softmax", "torch.cat", "torch.nn.init.xavier_normal_", "torch.nn.Linear", "numpy.argmax" ] ]
profefonso/MachineLearnigClasification
[ "12d309a3cb088a3b1597fb899066f8d1541e05c8" ]
[ "models/main.py" ]
[ "# Cargo las librerías necesarias\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.model_selection import cross_val_score, train_test_split\r\nfrom sklearn import metrics\r\nfrom sklearn import model_selection\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\n\r\nfrom load_images import LecturaImagenes\r\n\r\nlectura_imagenes = LecturaImagenes()\r\nmatriz_imagenes, etiquetas = lectura_imagenes.cargaImagenes(hogd=True)\r\nprint(etiquetas)\r\n\r\n# Division del Dataset en 80% train y 20% prueba\r\nx_train, x_test, y_train, y_test = train_test_split(matriz_imagenes, etiquetas, \r\ntest_size=0.20, random_state=0)\r\n#print(x_test)\r\n#print(y_test)\r\n\r\nprint('::::: CLASIFICADOR CON REGRESION LOGISTICA :::::')\r\n# Entrenando el modelo\r\nlogisticRegr = LogisticRegression(random_state=0, max_iter=1000)\r\nhistory = logisticRegr.fit(x_train, y_train)\r\n\r\n# Realizamos prediccion con la informacion de test\r\n#logisticRegr.predict(x_test[0].reshape(1,-1))\r\npredictions = logisticRegr.predict(x_test)\r\n\r\n# Usamos el Score para obtener el Accuracy del modelo\r\nprint('::::: ACCURACY :::::')\r\nscore = cross_val_score(logisticRegr, matriz_imagenes, etiquetas, cv=5)\r\ntext_score = \"Accuracy Score = {:f} (+/- {:f})\".format(-score.mean(), score.std())\r\nprint(text_score)\r\n\r\n# Generamos la matriz de confusion\r\nprint('::::: MATRIZ CONFUSION :::::')\r\ncm = metrics.confusion_matrix(y_test, predictions)\r\nprint(cm)\r\nplt.figure(figsize=(9,9))\r\nsns.heatmap(cm, annot=True, fmt=\".3f\", linewidths=.5, square = True, cmap = 'Blues_r')\r\nplt.title('CONFUSION MATRIX - LOGISTIC REGRESSION')\r\nplt.ylabel('Actual label')\r\nplt.xlabel('Predicted label')\r\nplt.suptitle(text_score)\r\nplt.show()\r\nprint(metrics.classification_report(y_test, predictions))\r\n\r\ntrain_sizes = [10, 20, 30]\r\ntrain_sizes, train_scores, valid_scores = model_selection.learning_curve(logisticRegr, matriz_imagenes, etiquetas, train_sizes = train_sizes, cv=5,scoring = 'neg_mean_squared_error')\r\nprint(train_sizes)\r\nprint(train_scores)\r\nprint(valid_scores)\r\n\r\nprint(\" \")\r\n\r\n\r\n# CLASIFICADOR CON SVM\r\nprint('::::: CLASIFICADOR CON SVM :::::')\r\n# Entrenando el modelo\r\nsvm = SVC(kernel='linear', max_iter=1000)\r\nhistory = svm.fit(x_train, y_train)\r\n\r\n# Realizamos prediccion con la informacion de test\r\n#logisticRegr.predict(x_test[0].reshape(1,-1))\r\npredictions = svm.predict(x_test)\r\n\r\n# Usamos el Score para obtener el Accuracy del modelo\r\nprint('::::: ACCURACY :::::')\r\nscore = cross_val_score(svm, matriz_imagenes, etiquetas, cv=5)\r\ntext_score = \"Accuracy Score = {:f} (+/- {:f})\".format(-score.mean(), score.std())\r\nprint(text_score)\r\n\r\n# Generamos la matriz de confusion\r\nprint('::::: MATRIZ CONFUSION :::::')\r\ncm = metrics.confusion_matrix(y_test, predictions)\r\nprint(cm)\r\nplt.figure(figsize=(9,9))\r\nsns.heatmap(cm, annot=True, fmt=\".3f\", linewidths=.5, square = True, cmap = 'Greens_r')\r\nplt.title('CONFUSION MATRIX - SVM')\r\nplt.ylabel('Actual label')\r\nplt.xlabel('Predicted label')\r\nplt.suptitle(text_score)\r\nplt.show()\r\nprint(metrics.classification_report(y_test, predictions))\r\n\r\n\r\n" ]
[ [ "sklearn.model_selection.cross_val_score", "sklearn.linear_model.LogisticRegression", "matplotlib.pyplot.title", "sklearn.model_selection.learning_curve", "sklearn.metrics.confusion_matrix", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.ylabel", "sklearn.svm.SVC", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.suptitle", "sklearn.metrics.classification_report", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
sjurug/SVDB
[ "1427e964f88815f9b1fd180af6cdeb1ff29b2eaa" ]
[ "svdb/query_module.py" ]
[ "from __future__ import absolute_import\nimport sys, os, glob\nfrom . import readVCF\nfrom . import overlap_module\nfrom operator import itemgetter\nfrom . import merge_vcf_module_cython\nimport sqlite3\nimport gzip\n\nimport numpy as np\n\ndef main(args):\n #start by loading the variations\n queries = []\n if args.prefix:\n f=open(args.prefix+\"_query.vcf\",\"w\")\n noOCCTag=1;\n infoFound=0;\n\n if args.query_vcf.endswith(\".gz\"):\n f=gzip.open(args.query_vcf,\"rt\")\n else:\n f=open(args.query_vcf,\"rt\")\n\n for line in f:\n if line[0] == \"#\":\n meta_line=line.replace(\"#\",\"\");\n content=meta_line.split(\"=\");\n\n lookForFilter=meta_line.split(\"=\");\n #the last infotag will be the Feature tag\n if(lookForFilter[0] != \"INFO\" and noOCCTag and infoFound==1):\n if not args.prefix:\n sys.stdout.write(\"##INFO=<ID={},Number=1,Type=Integer,Description=\\\"The number of occurances of the event in the database\\\">\\n\".format(args.out_occ));\n sys.stdout.write(\"##INFO=<ID={},Number=1,Type=Float,Description=\\\"The frequency of the event in the database\\\">\\n\".format(args.out_frq));\n sys.stdout.write(line);\n else:\n f.write(\"##INFO=<ID={},Number=1,Type=Integer,Description=\\\"The number of occurances of the event in the database\\\">\\n\".format(args.out_occ))\n f.write(\"##INFO=<ID={},Number=1,Type=Float,Description=\\\"The frequency of the event in the database\\\">\\n\".format(args.out_frq))\n f.write(line)\n\n infoFound=0;noFeatureTag=0;\n elif(lookForFilter[0] == \"INFO\"):\n if not args.prefix:\n sys.stdout.write(line);\n else:\n f.write(line)\n\n infoFound=1;\n #there should only be one feature tag per vf file\n if(line == \"INFO=<ID={},Number=1,Type=Integer,Description=\\\"The number of occurances of the event in the database\\\">\".format(args.out_occ)):\n noOCCTag=0\n\n\n else:\n if line[1] != \"#\":\n if not args.prefix:\n print (\"##SVDB_version={} cmd=\\\"{}\\\"\".format(args.version,\" \".join(sys.argv)))\n else:\n f.write( \"##SVDB_version={} cmd=\\\"{}\\\"\".format(args.version,\" \".join(sys.argv)) )\n if not args.prefix:\n sys.stdout.write(line)\n else:\n f.write(line)\n continue\n\n #in this case I need to store a query\n chrA,posA,chrB,posB,event_type,INFO,FORMAT =readVCF.readVCFLine(line);\n current_variation = [chrA, int(posA), chrB, int(posB),event_type, FORMAT, line] # plus a counter and the variation\n queries.append(current_variation)\n \n # at this point queries contains an entry for each variation\n #now query each sample.db present in the given folder and store the occurences\n \n if args.bedpedb or args.db:\n if args.bedpedb:\n args.db=args.bedpedb\n db_file=args.db\n DBvariants={}\n db_size=1\n Use_OCC_tag=False\n if args.in_occ:\n OCC_tag=args.in_occ\n Use_OCC_tag=True \n\n if args.in_frq:\n FRQ_tag=args.in_frq\n\n\n if db_file.endswith(\".gz\"):\n f=gzip.open(db_file,\"rt\")\n else:\n f=open(db_file,\"rt\")\n\n #print FRQ_tag\n for line in f:\n if line[0] == \"#\":\n continue\n \n if args.bedpedb:\n content=line.strip().split()\n\n if ( content[0] == content[2] and (int(content[1]) < int(content[3])) ) or (content[0] < content[2]):\n chrA=content[0]\n posA=int(content[1])\n chrB=content[2]\n posB=int(content[3])\n else:\n chrA=content[2]\n posA=int(content[3])\n chrB=content[0]\n posB=int(content[1])\n\n event_type=content[4]\n hits=int(content[5])\n frequency=float(content[6])\n FORMAT=[False]\n\n else:\n chrA,posA,chrB,posB,event_type,INFO,FORMAT = readVCF.readVCFLine(line)\n\n if not chrA in DBvariants:\n DBvariants[chrA]={}\n if not chrB in DBvariants[chrA]:\n DBvariants[chrA][chrB]={}\n if not event_type in DBvariants[chrA][chrB]:\n DBvariants[chrA][chrB][event_type]={}\n DBvariants[chrA][chrB][event_type][\"samples\"]=[]\n DBvariants[chrA][chrB][event_type][\"coordinates\"]=[]\n\n DBvariants[chrA][chrB][event_type][\"coordinates\"].append(np.array([int(posA),int(posB)]))\n if \"GT\" in FORMAT and not Use_OCC_tag:\n DBvariants[chrA][chrB][event_type][\"samples\"].append(np.array(FORMAT[\"GT\"]))\n db_size=len(FORMAT[\"GT\"])\n elif args.bedpedb:\n DBvariants[chrA][chrB][event_type][\"samples\"].append([hits,frequency])\n Use_OCC_tag=True\n \n else:\n try:\n OCC=INFO[OCC_tag]\n FRQ=INFO[FRQ_tag]\n DBvariants[chrA][chrB][event_type][\"samples\"].append([OCC,FRQ])\n Use_OCC_tag=True\n except:\n print(\"Error: frequency or hit tag not found! Make sure to set the --in_occ AND --in_frq to the number and frequency of alleles/individuals as presented in the INFO column of the input db\\n\" )\n print(\"database variants not having the --in_occ or --in_frq tag must be removed\")\n print(\"you may also skip these parameters and cluster based on the GT entry of the format column (if such exists)\" )\n quit()\n\n for chrA in DBvariants:\n for chrB in DBvariants[chrA]:\n for var in DBvariants[chrA][chrB]:\n DBvariants[chrA][chrB][var][\"coordinates\"]=np.array(DBvariants[chrA][chrB][var][\"coordinates\"]) \n DBvariants[chrA][chrB][var][\"samples\"]=np.array(DBvariants[chrA][chrB][var][\"samples\"])\n \n for query in queries:\n hits = queryVCFDB(DBvariants, query,args,Use_OCC_tag)\n query[5] = hits\n\n for query in queries:\n vcf_entry = query[6].strip()\n content=vcf_entry.split(\"\\t\")\n if not Use_OCC_tag:\n if query[5]:\n content[7]=\"{};{}={};{}={}\".format(content[7],args.out_occ, query[5],args.out_frq,(query[5]/float(db_size ) ))\n else:\n if query[5][0]:\n content[7]=\"{};{}={};{}={}\".format(content[7],args.out_occ, int(query[5][0]),args.out_frq,query[5][1]) \n \n if not args.prefix:\n print((\"\\t\").join(content))\n else:\n f.write((\"\\t\").join(content)+\"\\n\")\n return()\n \n \n elif args.sqdb:\n db_file=args.sqdb\n conn = sqlite3.connect(args.sqdb)\n \n if args.memory:\n memory_db=sqlite3.connect(':memory:')\n db_dump=\"\".join(line for line in conn.iterdump())\n memory_db.executescript(db_dump)\n conn.close()\n c = memory_db.cursor()\n else:\n c=conn.cursor()\n \n db_size=0\n A='SELECT DISTINCT sample FROM SVDB'\n for sample in c.execute(A):\n db_size +=1\n if not db_size:\n print (\"error: no samples in the db\")\n quit()\n\n for query in queries:\n hits = SQDB(query,args,c)\n query[5] = hits \n\n \n \n \n for query in queries:\n vcf_entry = query[6].strip()\n content=vcf_entry.split(\"\\t\")\n frq = query[5] / float(db_size)\n if frq > args.max_frq:\n continue\n if query[5]:\n content[7] = \"{};{}={};{}={}\".format(content[7], args.out_occ, query[5], args.out_frq, frq)\n if not args.prefix:\n print((\"\\t\").join(content))\n else:\n f.write((\"\\t\").join(content)+\"\\n\")\n\n\n\ndef queryVCFDB(DBvariants, Query_variant,args,Use_OCC_tag):\n chrA = Query_variant[0]\n chrApos = Query_variant[1]\n chrB =Query_variant[2]\n chrBpos = Query_variant[3]\n variation_type=Query_variant[4]\n samples=set([])\n frequency=[]\n occ=[]\n similarity=[]\n if not chrA in DBvariants:\n if Use_OCC_tag:\n return([0,0])\n else:\n return 0\n if not chrB in DBvariants[chrA]:\n if Use_OCC_tag:\n return([0,0])\n else:\n return 0\n for var in DBvariants[chrA][chrB]:\n if not args.no_var and variation_type != var:\n continue\n\n #candidates=DBvariants[chrA][chrB][var][\"coordinates\"][ ( args.bnd_distance >= abs(DBvariants[chrA][chrB][var][\"coordinates\"][:,0] - chrApos) ) & ( args.bnd_distance >= abs(DBvariants[chrA][chrB][var][\"coordinates\"][:,1] - chrBpos) ) ]\n candidates=np.where( ( args.bnd_distance >= abs(DBvariants[chrA][chrB][var][\"coordinates\"][:,0] - chrApos) ) & ( args.bnd_distance >= abs(DBvariants[chrA][chrB][var][\"coordinates\"][:,1] - chrBpos) ) ) \n if not len(candidates[0]) and not args.no_var:\n if Use_OCC_tag:\n return([0,0])\n else:\n return 0\n # check if this variation is already present\n for candidate in candidates[0]:\n event=DBvariants[chrA][chrB][var][\"coordinates\"][candidate]\n sample_list=DBvariants[chrA][chrB][var][\"samples\"][candidate]\n #check if the variant type of the events is the same\n hit_tmp = None\n match=False\n\n if not (chrA == chrB):\n hit_tmp,match=overlap_module.precise_overlap(chrApos,chrBpos,event[0],event[1],args.bnd_distance)\n else:\n hit_tmp,match = overlap_module.isSameVariation(chrApos,chrBpos,event[0],event[1],args.overlap,args.bnd_distance)\n\n if match:\n similarity.append(hit_tmp)\n if Use_OCC_tag:\n occ.append(sample_list[0])\n frequency.append(sample_list[1])\n else:\n for i in range(0,len(sample_list)):\n GT=sample_list[i]\n if not GT == \"0|0\" and not GT == \"0/0\": \n samples = samples | set([i])\n if Use_OCC_tag:\n if occ:\n if not (chrA == chrB):\n idx=similarity.index(min(similarity))\n else:\n idx=similarity.index(max(similarity))\n hits=[ occ[idx],frequency[idx] ]\n else:\n hits=[0,0]\n else:\n hits=len(samples)\n \n return hits\n \n\ndef SQDB(Query_variant,args,c):\n distance = args.bnd_distance\n overlap = args.overlap\n variant={}\n variant[\"type\"]=Query_variant[4]\n variant[\"chrA\"]=Query_variant[0]\n variant[\"chrB\"]=Query_variant[2]\n variant[\"posA\"]=Query_variant[1]\n variant[\"ci_A_start\"]= 0\n variant[\"ci_A_end\"]= 0\n variant[\"posB\"]=Query_variant[3]\n variant[\"ci_B_start\"]= 0\n variant[\"ci_B_end\"]= 0\n \n selection =\"sample\"\n if variant[\"chrA\"] == variant[\"chrB\"]:\n selection = \"posA, posB, sample\"\n\n A='SELECT {} FROM SVDB WHERE var == \\'{}\\' AND chrA == \\'{}\\' AND chrB == \\'{}\\' AND posA <= {} AND posA >= {} AND posB <= {} AND posB >= {}'.format(selection,variant[\"type\"],variant[\"chrA\"],variant[\"chrB\"],variant[\"posA\"]+distance, variant[\"posA\"] -distance,variant[\"posB\"] + distance, variant[\"posB\"]-distance)\n hits = c.execute(A)\n\n match=set([])\n occurances=0\n for hit in hits:\n if variant[\"chrA\"] == variant[\"chrB\"]:\n var={}\n var[\"posA\"]=int( hit[0] )\n var[\"posB\"]=int( hit[1] )\n var[\"index\"]=hit[2]\n similar,tmp=overlap_module.isSameVariation(variant[\"posA\"],variant[\"posB\"],var[\"posA\"],var[\"posB\"],overlap,distance)\n if similar:\n match.add( var[\"index\"] )\n \n else:\n match.add(hit[0])\n\n occurances=len(match)\n return occurances \n" ]
[ [ "numpy.array" ] ]
foamliu/3D-Object-Detection
[ "1c5ae0b470f08abe652f38817393ebd0a8cf3ea9" ]
[ "data_generator_depth.py" ]
[ "import os\nimport random\nfrom random import shuffle\n\nimport cv2 as cv\nimport numpy as np\n\nfrom config import img_cols\nfrom config import img_rows\n\ntrain_folder = 'data/rgb'\ndepth_folder = 'data/depth'\nsemantic_folder = 'data/semantic'\n\n\ndef get_depth(name):\n tokens = name.split('_')\n tokens[-1] = 'depth.png'\n name = '_'.join(tokens)\n filename = os.path.join(depth_folder, name)\n label = cv.imread(filename, 0)\n return label\n\n\ndef random_choice(image_size, crop_size):\n height, width = image_size\n crop_height, crop_width = crop_size\n x = random.randint(0, width - crop_width)\n y = random.randint(0, height - crop_height)\n return x, y\n\n\ndef safe_crop(mat, x, y, crop_size):\n crop_height, crop_width = crop_size\n if len(mat.shape) == 2:\n ret = np.zeros((crop_height, crop_width), np.float32)\n else:\n ret = np.zeros((crop_height, crop_width, 3), np.float32)\n crop = mat[y:y + crop_height, x:x + crop_width]\n h, w = crop.shape[:2]\n ret[0:h, 0:w] = crop\n if crop_size != (320, 320):\n ret = cv.resize(ret, dsize=(img_rows, img_cols), interpolation=cv.INTER_CUBIC)\n return ret\n\n\ndef data_gen(usage, batch_size):\n filename = '{}_names.txt'.format(usage)\n with open(filename, 'r') as f:\n names = f.read().splitlines()\n i = 0\n np.random.shuffle(names)\n while True:\n batch_x = np.empty((batch_size, img_rows, img_cols, 3), dtype=np.float32)\n batch_y = np.empty((batch_size, img_rows, img_cols, 1), dtype=np.float32)\n\n for i_batch in range(batch_size):\n name = names[i]\n filename = os.path.join(train_folder, name)\n image = cv.imread(filename)\n image_size = image.shape[:2]\n depth = get_depth(name)\n\n different_sizes = [(320, 320), (480, 480), (640, 640)]\n crop_size = random.choice(different_sizes)\n\n x, y = random_choice(image_size, crop_size)\n image = safe_crop(image, x, y, crop_size)\n depth = safe_crop(depth, x, y, crop_size)\n\n if np.random.random_sample() > 0.5:\n image = np.fliplr(image)\n depth = np.fliplr(depth)\n\n batch_x[i_batch, :, :, 0:3] = image / 255.\n batch_y[i_batch, :, :, 0] = depth / 255.\n\n i += 1\n if i >= len(names):\n i = 0\n np.random.shuffle(names)\n\n yield batch_x, batch_y\n\n\ndef train_gen(batch_size):\n return data_gen('train', batch_size)\n\n\ndef valid_gen(batch_size):\n return data_gen('valid', batch_size)\n\n\ndef split_data():\n train_folder = 'data/rgb'\n names = [f for f in os.listdir(train_folder) if f.endswith('.png')]\n num_samples = len(names)\n print('num_samples: ' + str(num_samples))\n num_train_samples = int(num_samples * 0.8)\n print('num_train_samples: ' + str(num_train_samples))\n num_valid_samples = num_samples - num_train_samples\n print('num_valid_samples: ' + str(num_valid_samples))\n valid_names = random.sample(names, num_valid_samples)\n train_names = [n for n in names if n not in valid_names]\n shuffle(valid_names)\n shuffle(train_names)\n\n with open('valid_names.txt', 'w') as file:\n file.write('\\n'.join(valid_names))\n\n with open('train_names.txt', 'w') as file:\n file.write('\\n'.join(train_names))\n\n\nif __name__ == '__main__':\n split_data()\n" ]
[ [ "numpy.fliplr", "numpy.random.random_sample", "numpy.random.shuffle", "numpy.zeros", "numpy.empty" ] ]
ZhouSky/tianshou
[ "253c5805def07355e10345222613a815c7eea823" ]
[ "mytest/testutil.py" ]
[ "import numpy as np\nimport torch\n\n\ndef generate_task_index(task_interval):\n task_ind = np.zeros((task_interval[-1]), dtype=np.int32)\n for i in range(task_interval.size - 1):\n task_ind[task_interval[i]: task_interval[i + 1]] = i\n return task_ind\n\n\ndef compute_errors(output, task_ind, label, num_task):\n num_ins = np.zeros([num_task])\n errors = np.zeros([num_task + 1])\n for i in range(output.shape[0]):\n num_ins[task_ind[i]] += 1\n if np.argmax(output[i, :]) != label[i]:\n errors[task_ind[i]] += 1\n for i in range(num_task):\n errors[i] = errors[i] / num_ins[i]\n errors[-1] = np.mean(errors[0:num_task])\n return errors\n\n\ndef test_net(net, testdata, testlabel, test_task_interval, device=torch.device('cpu')):\n task_ind = generate_task_index(test_task_interval)\n outputs = []\n with torch.no_grad():\n for a in range(testdata.shape[0]):\n outputs.append(\n net(torch.tensor(testdata[a], dtype=torch.float, device=device), task_ind[a])\n )\n output = torch.stack(outputs, 0)\n test_error = compute_errors(\n output.cpu().numpy(), task_ind, testlabel, test_task_interval.size - 1\n )\n\n return test_error\n" ]
[ [ "torch.tensor", "numpy.argmax", "torch.no_grad", "numpy.mean", "torch.device", "numpy.zeros", "torch.stack" ] ]
gohyojun15/CSRNet-pytorch
[ "f52777657dd0136f056550465ce9b3b5c8ec416f" ]
[ "test.py" ]
[ "import argparse\n\nimport torch\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as CM\nfrom tqdm import tqdm\n\nfrom config import Config\nfrom model import CSRNet\nfrom dataset import create_train_dataloader, create_test_dataloader, CrowdDataset\nfrom utils import denormalize\n\nparser = argparse.ArgumentParser(description=\"generate density map for crane\")\n\n\n#train datasets\nparser.add_argument(\"--train_image_root\",type=str,help=\"image data root\")\nparser.add_argument(\"--train_image_gt_root\",type=str,help=\"ground truth root\")\nparser.add_argument(\"--train_image_density_root\",type=str,help=\"density map root.\")\n# test datasets\nparser.add_argument(\"--test_image_root\",type=str,help=\"image data root\")\nparser.add_argument(\"--test_image_gt_root\",type=str,help=\"ground truth root\")\nparser.add_argument(\"--test_image_density_root\",type=str,help=\"density map root.\")\n\n\n\ndef cal_mae(img_root,model_param_path):\n '''\n Calculate the MAE of the test data.\n img_root: the root of test image data.\n gt_dmap_root: the root of test ground truth density-map data.\n model_param_path: the path of specific mcnn parameters.\n '''\n device=torch.device(\"cuda\")\n model=CSRNet()\n model.load_state_dict(torch.load(model_param_path))\n model.to(device)\n dataset=create_test_dataloader(img_root)\n dataloader=torch.utils.data.DataLoader(dataset,batch_size=1,shuffle=False)\n model.eval()\n mae=0\n with torch.no_grad():\n for i, data in enumerate(tqdm(dataloader)):\n image = data['image'].cuda()\n gt_densitymap = data['densitymap'].cuda()\n # forward propagation\n et_dmap=model(image)\n mae+=abs(et_dmap.data.sum()-gt_densitymap.data.sum()).item()\n del image,gt_densitymap,et_dmap\n print(\"model_param_path:\"+model_param_path+\" mae:\"+str(mae/len(dataloader)))\n\ndef estimate_density_map(img_root,model_param_path,index):\n '''\n Show one estimated density-map.\n img_root: the root of test image data.\n gt_dmap_root: the root of test ground truth density-map data.\n model_param_path: the path of specific mcnn parameters.\n index: the order of the test image in test dataset.\n '''\n device=torch.device(\"cuda\")\n model=CSRNet().to(device)\n model.load_state_dict(torch.load(model_param_path))\n dataset=CrowdDataset(img_root)\n dataloader=torch.utils.data.DataLoader(dataset,batch_size=1,shuffle=False)\n model.eval()\n for i,(img,gt_dmap) in enumerate(dataloader):\n if i==index:\n img=img.to(device)\n gt_dmap=gt_dmap.to(device)\n # forward propagation\n et_dmap=model(img).detach()\n et_dmap=et_dmap.squeeze(0).squeeze(0).cpu().numpy()\n print(et_dmap.shape)\n plt.imshow(et_dmap,cmap=CM.jet)\n break\n\n\nif __name__==\"__main__\":\n args = parser.parse_args()\n\n torch.backends.cudnn.enabled=False\n\n test_dataset_root = [\n args.test_image_root,\n args.test_image_gt_root,\n args.test_image_density_root\n ]\n\n model_param_path='./checkpoints/346.pth'\n cal_mae(test_dataset_root,model_param_path)\n # estimate_density_map(img_root,gt_dmap_root,model_param_path,3) " ]
[ [ "matplotlib.pyplot.imshow", "torch.load", "torch.utils.data.DataLoader", "torch.no_grad", "torch.device" ] ]
voxel-scape/voxel-scape-api
[ "d04fd9bd530ec3be7a3372562aea824fb2dcacc9" ]
[ "tasks/semantic/modules/user.py" ]
[ "#!/usr/bin/env python3\n# This file is covered by the LICENSE file in the root of this project.\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torchvision.transforms as transforms\nimport imp\nimport yaml\nimport time\nfrom PIL import Image\nimport __init__ as booger\nimport collections\nimport copy\nimport cv2\nimport os\nimport numpy as np\n\nfrom tasks.semantic.modules.segmentator import *\nfrom tasks.semantic.postproc.KNN import KNN\n\n\nclass User():\n def __init__(self, ARCH, DATA, datadir, logdir, modeldir):\n # parameters\n self.ARCH = ARCH\n self.DATA = DATA\n self.datadir = datadir\n self.logdir = logdir\n self.modeldir = modeldir\n\n # get the data\n parserModule = imp.load_source(\"parserModule\",\n booger.TRAIN_PATH + '/tasks/semantic/dataset/' +\n self.DATA[\"name\"] + '/parser.py')\n self.parser = parserModule.Parser(root=self.datadir,\n train_sequences=self.DATA[\"split\"][\"train\"],\n valid_sequences=self.DATA[\"split\"][\"valid\"],\n test_sequences=self.DATA[\"split\"][\"test\"],\n labels=self.DATA[\"labels\"],\n color_map=self.DATA[\"color_map\"],\n learning_map=self.DATA[\"learning_map\"],\n learning_map_inv=self.DATA[\"learning_map_inv\"],\n sensor=self.ARCH[\"dataset\"][\"sensor\"],\n max_points=self.ARCH[\"dataset\"][\"max_points\"],\n batch_size=1,\n workers=self.ARCH[\"train\"][\"workers\"],\n gt=True,\n shuffle_train=False)\n\n # concatenate the encoder and the head\n with torch.no_grad():\n self.model = Segmentator(self.ARCH,\n self.parser.get_n_classes(),\n self.modeldir)\n\n # use knn post processing?\n self.post = None\n if self.ARCH[\"post\"][\"KNN\"][\"use\"]:\n self.post = KNN(self.ARCH[\"post\"][\"KNN\"][\"params\"],\n self.parser.get_n_classes())\n\n # GPU?\n self.gpu = False\n self.model_single = self.model\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(\"Infering in device: \", self.device)\n if torch.cuda.is_available() and torch.cuda.device_count() > 0:\n cudnn.benchmark = True\n cudnn.fastest = True\n self.gpu = True\n self.model.cuda()\n\n def infer(self):\n # do train set\n # self.infer_subset(loader=self.parser.get_train_set(),\n # to_orig_fn=self.parser.to_original)\n\n # do valid set\n self.infer_subset(loader=self.parser.get_valid_set(),\n to_orig_fn=self.parser.to_original)\n # do test set\n # self.infer_subset(loader=self.parser.get_test_set(),\n # to_orig_fn=self.parser.to_original)\n\n print('Finished Infering')\n\n return\n\n def infer_subset(self, loader, to_orig_fn):\n # switch to evaluate mode\n self.model.eval()\n\n # empty the cache to infer in high res\n if self.gpu:\n torch.cuda.empty_cache()\n\n with torch.no_grad():\n end = time.time()\n\n for i, (proj_in, proj_mask, _, _, path_seq, path_name, p_x, p_y, proj_range, unproj_range, _, _, _, _, npoints) in enumerate(loader):\n # first cut to rela size (batch size one allows it)\n p_x = p_x[0, :npoints]\n p_y = p_y[0, :npoints]\n proj_range = proj_range[0, :npoints]\n unproj_range = unproj_range[0, :npoints]\n path_seq = path_seq[0]\n path_name = path_name[0]\n\n if self.gpu:\n proj_in = proj_in.cuda()\n proj_mask = proj_mask.cuda()\n p_x = p_x.cuda()\n p_y = p_y.cuda()\n if self.post:\n proj_range = proj_range.cuda()\n unproj_range = unproj_range.cuda()\n\n # compute output\n proj_output = self.model(proj_in, proj_mask)\n proj_argmax = proj_output[0].argmax(dim=0)\n\n if self.post:\n # knn postproc\n unproj_argmax = self.post(proj_range,\n unproj_range,\n proj_argmax,\n p_x,\n p_y)\n else:\n # put in original pointcloud using indexes\n unproj_argmax = proj_argmax[p_y, p_x]\n\n # measure elapsed time\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n\n print(\"Infered seq\", path_seq, \"scan\", path_name,\n \"in\", time.time() - end, \"sec\")\n end = time.time()\n\n # save scan\n # get the first scan in batch and project scan\n pred_np = unproj_argmax.cpu().numpy()\n pred_np = pred_np.reshape((-1)).astype(np.int32)\n\n # map to original label\n pred_np = to_orig_fn(pred_np)\n\n # save scan\n path = os.path.join(self.logdir, \"sequences\",\n path_seq, \"predictions\", path_name)\n pred_np.tofile(path)\n" ]
[ [ "torch.cuda.synchronize", "torch.cuda.empty_cache", "torch.no_grad", "torch.cuda.is_available", "torch.cuda.device_count" ] ]
hiyouga/SAGAN-PyTorch
[ "e469cfa47e4317dce910c54687e9d24f571d6f2b" ]
[ "data_utils.py" ]
[ "from torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\n\n\ndef load_data(im_size, batch_size, workers, dataset, data_path):\n transform = transforms.Compose([\n transforms.Resize((im_size, im_size)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n if dataset == 'cifar10':\n dataset = datasets.CIFAR10(data_path, train=True, transform=transform, download=True)\n else:\n assert False, f\"Unknwn dataset: {dataset}\"\n dataloader = DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=workers,\n drop_last=True,\n pin_memory=True)\n return dataloader\n" ]
[ [ "torch.utils.data.DataLoader" ] ]
AdrKacz/IA-Negotiation
[ "d5f9544d2212919b9f5feb5429db4aeacb06add3" ]
[ "rl-python/environment.py" ]
[ "from agent import Buyer, Seller\nfrom copy import deepcopy\nfrom math import exp\nimport matplotlib.pyplot as plt\n\nclass Environment():\n def __init__(self):\n # Training Information\n self.num_cycles = int(5e1)\n self.num_episodes = int(1e2)\n self.num_episodes_testing = int(1e3)\n\n\n # Complexity : price_space_size = P , time_space_size = T\n self.price_space_size = 5\n self.time_space_size = 5\n\n # Actions (P + 2)\n # 1 <= i <= n : offer ; 0 : accept : -1 : reject\n self.action_space = [i + 1 for i in range(self.price_space_size)] + [0, -1]\n\n self.action_space_size = len(self.action_space)\n\n # States (P * T + 2)\n # price.time (for each price and for each time)\n # Start state s\n offer_states = [f'{price + 1}.{time}' for time in range(self.time_space_size) for price in range(self.price_space_size)]\n self.state_space = ['s'] + offer_states + ['d']\n\n self.state_space_size = len(self.state_space)\n\n self.last_offer = None\n self.time_step = 0\n\n # Buyer and Seller\n self.buyer, self.seller = Buyer(), Seller()\n\n def train(self, verbose=False, display_normalized=False, display_plot=False, train_both=True, train_agent=None):\n # Train Seller then Buyer alternatively\n if not train_both and not train_agent:\n raise ValueError('Specified a agent to train if you don\\'t train both')\n elif not train_both and train_agent not in ['Seller', 'Buyer']:\n raise ValueError('Train Agent must be either Seller or Buyer')\n\n # Initialise Q-Tables\n self.seller.initialise_q_table(self.action_space_size, self.state_space_size)\n self.buyer.initialise_q_table(self.action_space_size, self.state_space_size)\n\n # Statistics\n transactions_validated, transactions_rejected = 0, 0\n seller_wallets, buyer_wallets = list(), list()\n # Q-Learning Algorithm\n trainee, trainer = self.seller, self.buyer\n trainee_wallets, trainer_wallets = seller_wallets, buyer_wallets\n trainee_q_table, trainer_q_table = deepcopy(trainee.q_table), deepcopy(trainer.q_table)\n\n if train_agent == 'Buyer':\n trainee, trainer = trainer, trainee\n trainee_wallets, trainer_wallets = trainer_wallets, trainee_wallets\n trainee_q_table, trainer_q_table = trainer_q_table, trainee_q_table\n\n def trainer_first():\n # Trainer Start Negociation\n action_index = trainer.exploit(overwrite_q_table=trainer_q_table)\n step_return = self.step(self.action_space[action_index], trainee)\n trainee.state = step_return['new_state']\n\n if step_return['done']:\n raise ValueError('Must do an offer at first')\n\n for cycle in range(self.num_cycles):\n trainee.reset_wallet(), trainer.reset_wallet()\n for episode in range(self.num_episodes):\n self.reset()\n is_trainer_first = episode % 2 == 0\n if is_trainer_first:\n trainer_first()\n for step in range(self.time_space_size):\n action_index = trainee.act()\n step_return = self.step(self.action_space[action_index], trainee)\n\n if step_return['done']:\n if step_return['info']['type'] == 'validated':\n transactions_validated += 1\n trainee.update_wallet(step_return['info']['offer']), trainer.update_wallet(step_return['info']['offer'])\n elif step_return['info']['type'] == 'rejected':\n transactions_rejected += 1\n trainee.update_state(step_return)\n break\n\n if is_trainer_first:\n self.time_step += 1\n\n trainer.state = step_return['new_state']\n action_index = trainer.exploit(overwrite_q_table=trainer_q_table)\n step_return = self.step(self.action_space[action_index], trainee)\n trainee.update_state(step_return)\n\n if step_return['done']:\n if step_return['info']['type'] == 'validated':\n transactions_validated += 1\n trainee.update_wallet(step_return['info']['offer']), trainer.update_wallet(step_return['info']['offer'])\n elif step_return['info']['type'] == 'rejected':\n transactions_rejected += 1\n break\n\n if not is_trainer_first:\n self.time_step += 1\n\n if self.time_step >= self.time_space_size:\n raise ValueError('No one close the transaction before end')\n\n trainee.exploration_decay(episode)\n # Update Statistics\n trainee_wallets.append(trainee.wallet), trainer_wallets.append(trainer.wallet)\n trainee.save_statistics_delta(), trainer.save_statistics_delta()\n # Switch Trainee and Trainer (cache trainer copy from previous trainee training)\n if train_both:\n trainer_q_table = deepcopy(trainer.q_table)\n trainee, trainer = trainer, trainee\n trainee_wallets, trainer_wallets = trainer_wallets, trainee_wallets\n trainee_q_table, trainer_q_table = trainer_q_table, trainee_q_table\n\n assert transactions_validated + transactions_rejected == self.num_cycles * self.num_episodes\n\n if not verbose:\n return\n\n # Display result\n self.display_statistics(self.num_cycles * self.num_episodes, transactions_validated, transactions_rejected)\n\n # Raw Q-Tables\n self.display_q_tables()\n\n # Prob. Q-Tables\n if display_normalized:\n self.display_prob_q_tables()\n\n # Plot Result\n if display_plot:\n self.display_plot(seller_wallets, buyer_wallets)\n\n def test(self):\n # Statistics\n self.seller.reset_statistics(), self.buyer.reset_statistics()\n transactions_validated, transactions_rejected = 0, 0\n\n # Test Seller versus Buyer\n first, second = self.seller, self.buyer\n for episode in range(self.num_episodes_testing):\n self.reset()\n for step in range(self.time_space_size):\n action_index = first.exploit()\n step_return = self.step(self.action_space[action_index])\n\n if step_return['done']:\n if step_return['info']['type'] == 'validated':\n transactions_validated += 1\n first.update_wallet(step_return['info']['offer']), second.update_wallet(step_return['info']['offer'])\n elif step_return['info']['type'] == 'rejected':\n transactions_rejected += 1\n break\n\n second.state = step_return['new_state']\n action_index = second.exploit()\n step_return = self.step(self.action_space[action_index])\n\n if step_return['done']:\n if step_return['info']['type'] == 'validated':\n transactions_validated += 1\n first.update_wallet(step_return['info']['offer']), second.update_wallet(step_return['info']['offer'])\n elif step_return['info']['type'] == 'rejected':\n transactions_rejected += 1\n break\n\n self.time_step += 1\n\n if self.time_step >= self.time_space_size:\n raise ValueError('No one close the transaction before end')\n\n # Switch first and second\n first, second = second, first\n\n # Update Statistics\n assert transactions_validated + transactions_rejected == self.num_episodes_testing\n\n # Display result\n self.display_statistics(self.num_episodes_testing, transactions_validated, transactions_rejected)\n\n print(f'Seller Wallet: {self.seller.wallet:4>}')\n print(f'Buyer Wallet: {self.buyer.wallet:4>}')\n\n def display_statistics(self, transactions_total, transactions_validated, transactions_rejected):\n print('\\n', '===== ' * 5)\n print(f'Transactions Total\\t: {transactions_total:6d}')\n print(f'Transactions Validated\\t: {transactions_validated:6d}')\n print(f'Transactions Rejected\\t: {transactions_rejected:6d}')\n print('\\t\\tSeller\\t Buyer\\t Total')\n print(f'Validated:\\t{self.seller.transactions_validated:6d}\\t{self.buyer.transactions_validated:6d}\\t{self.seller.transactions_validated + self.buyer.transactions_validated:6d}')\n print(f'Rejected:\\t{self.seller.transactions_rejected:6d}\\t{self.buyer.transactions_rejected:6d}\\t{self.seller.transactions_rejected + self.buyer.transactions_rejected:6d}')\n\n def display_q_tables(self):\n print('\\n', '===== ' * 5)\n print(' ' * 10, 'Seller Raw Q-Table\\t\\t\\t\\t \\t Buyer Raw Q-Table ')\n # Print action space\n to_string = lambda row : ' '.join(map(lambda x:f'{x:>5}', row))\n print(' ' * 10, to_string(self.action_space), '\\t|\\t', to_string(self.action_space))\n for i in range(len(self.seller.q_table)):\n state = self.state_space[i]\n seller_row = self.seller.q_table[i]\n buyer_row = self.buyer.q_table[i]\n to_string = lambda row : ' '.join(map(lambda x:f'{x:.0e}' if x != 0 else ' ' * 5, row))\n\n print(f'[{i:2d}] {state:>3}', '>', to_string(seller_row), '\\t|\\t', to_string(buyer_row))\n\n def display_prob_q_tables(self):\n print('\\n', '===== ' * 5)\n print(' ' * 10, 'Seller Prob. Q-Table\\t\\t \\t Buyer Prob. Q-Table ')\n # Print action space\n to_string = lambda row : ' '.join(map(lambda x:f'{x:>3}', row))\n print(' ' * 10, to_string(self.action_space), '\\t|\\t', to_string(self.action_space))\n\n normalized = lambda row : [v / sum(row) if sum(row) > 0 else 0 for v in row]\n to_string_hidden = lambda row : [f'{v:.2f}' if v > 0 else ' ' for v in normalized(row)]\n to_string = lambda row : ' '.join(map(lambda x:f'{x[1:]:>3}', to_string_hidden(row)))\n for i in range(len(self.seller.q_table)):\n state = self.state_space[i]\n seller_row = self.seller.q_table[i]\n buyer_row = self.buyer.q_table[i]\n print(f'[{i:2d}] {state:>3}', '>', to_string(seller_row), '\\t|\\t', to_string(buyer_row))\n\n def display_plot(self, seller_wallets, buyer_wallets):\n cycles = list(range(self.num_cycles))\n plt.figure()\n plt.subplot(311)\n plt.title('Seller')\n plt.plot(cycles, self.seller.transactions_validated_list, 'o-', label='Validated', color='g')\n plt.plot(cycles, self.seller.transactions_rejected_list, 'o-', label='Rejected', color='r')\n plt.legend()\n\n plt.subplot(312)\n plt.title('Buyer')\n plt.plot(cycles, self.buyer.transactions_validated_list, 'o-', label='Validated', color='g')\n plt.plot(cycles, self.buyer.transactions_rejected_list, 'o-', label='Rejected', color='r')\n plt.legend()\n\n plt.subplot(313)\n plt.title('Wallet')\n plt.plot(cycles, seller_wallets, 'o-', label='Seller', color='b')\n plt.plot(cycles, buyer_wallets, 'o-', label='Buyer', color='m')\n plt.legend()\n\n plt.show()\n\n def reset(self):\n self.time_step = 0\n\n self.seller.reset()\n self.seller.state = self.state_space.index('s')\n self.seller.current_reward = 0\n\n self.buyer.reset()\n self.buyer.state = self.state_space.index('s')\n self.buyer.current_reward = 0\n\n def step(self, action, agent=None):\n # TODO: Dissociate Reward for Seller and Buyer\n # NOTE: Reward based on previous state to maximise profit\n\n new_state_string, done, info = None, False, {}\n\n new_state_string = f'{self.time_step}'\n if action == -1:\n new_state_string = 'd'\n done = True\n info = {'type': 'rejected'}\n elif action == 0:\n new_state_string = 'd'\n done = True\n info = {'type': 'validated', 'offer': self.last_offer}\n else:\n self.last_offer = action\n new_state_string = f'{action}.{new_state_string}'\n\n step_return_without_reward = {\n 'new_state': self.state_space.index(new_state_string),\n 'done': done,\n 'info': info,\n }\n return {\n **step_return_without_reward,\n 'reward': agent.get_reward(step_return_without_reward) if agent else 0,\n }\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
sasha-scale/vision
[ "2ab08289cfaca8c006ade977deeefa53cc231890" ]
[ "torchvision/transforms/transforms.py" ]
[ "import math\nimport numbers\nimport random\nimport warnings\nfrom collections.abc import Sequence\nfrom typing import Tuple, List, Optional\n\nimport torch\nfrom PIL import Image\nfrom torch import Tensor\n\ntry:\n import accimage\nexcept ImportError:\n accimage = None\n\nfrom . import functional as F\n\n\n__all__ = [\"Compose\", \"ToTensor\", \"PILToTensor\", \"ConvertImageDtype\", \"ToPILImage\", \"Normalize\", \"Resize\", \"Scale\",\n \"CenterCrop\", \"Pad\", \"Lambda\", \"RandomApply\", \"RandomChoice\", \"RandomOrder\", \"RandomCrop\",\n \"RandomHorizontalFlip\", \"RandomVerticalFlip\", \"RandomResizedCrop\", \"RandomSizedCrop\", \"FiveCrop\", \"TenCrop\",\n \"LinearTransformation\", \"ColorJitter\", \"RandomRotation\", \"RandomAffine\", \"Grayscale\", \"RandomGrayscale\",\n \"RandomPerspective\", \"RandomErasing\"]\n\n_pil_interpolation_to_str = {\n Image.NEAREST: 'PIL.Image.NEAREST',\n Image.BILINEAR: 'PIL.Image.BILINEAR',\n Image.BICUBIC: 'PIL.Image.BICUBIC',\n Image.LANCZOS: 'PIL.Image.LANCZOS',\n Image.HAMMING: 'PIL.Image.HAMMING',\n Image.BOX: 'PIL.Image.BOX',\n}\n\n\nclass Compose(object):\n \"\"\"Composes several transforms together.\n\n Args:\n transforms (list of ``Transform`` objects): list of transforms to compose.\n\n Example:\n >>> transforms.Compose([\n >>> transforms.CenterCrop(10),\n >>> transforms.ToTensor(),\n >>> ])\n \"\"\"\n\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, img):\n for t in self.transforms:\n img = t(img)\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass ToTensor(object):\n \"\"\"Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.\n\n Converts a PIL Image or numpy.ndarray (H x W x C) in the range\n [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]\n if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)\n or if the numpy.ndarray has dtype = np.uint8\n\n In the other cases, tensors are returned without scaling.\n\n .. note::\n Because the input image is scaled to [0.0, 1.0], this transformation should not be used when\n transforming target image masks. See the `references`_ for implementing the transforms for image masks.\n\n .. _references: https://github.com/pytorch/vision/tree/master/references/segmentation\n \"\"\"\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (PIL Image or numpy.ndarray): Image to be converted to tensor.\n\n Returns:\n Tensor: Converted image.\n \"\"\"\n return F.to_tensor(pic)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass PILToTensor(object):\n \"\"\"Convert a ``PIL Image`` to a tensor of the same type.\n\n Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).\n \"\"\"\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (PIL Image): Image to be converted to tensor.\n\n Returns:\n Tensor: Converted image.\n \"\"\"\n return F.pil_to_tensor(pic)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass ConvertImageDtype(object):\n \"\"\"Convert a tensor image to the given ``dtype`` and scale the values accordingly\n\n Args:\n dtype (torch.dtype): Desired data type of the output\n\n .. note::\n\n When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.\n If converted back and forth, this mismatch has no effect.\n\n Raises:\n RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as\n well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to\n overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range\n of the integer ``dtype``.\n \"\"\"\n\n def __init__(self, dtype: torch.dtype) -> None:\n self.dtype = dtype\n\n def __call__(self, image: torch.Tensor) -> torch.Tensor:\n return F.convert_image_dtype(image, self.dtype)\n\n\nclass ToPILImage(object):\n \"\"\"Convert a tensor or an ndarray to PIL Image.\n\n Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape\n H x W x C to a PIL Image while preserving the value range.\n\n Args:\n mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).\n If ``mode`` is ``None`` (default) there are some assumptions made about the input data:\n - If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.\n - If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.\n - If the input has 2 channels, the ``mode`` is assumed to be ``LA``.\n - If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,\n ``short``).\n\n .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes\n \"\"\"\n def __init__(self, mode=None):\n self.mode = mode\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.\n\n Returns:\n PIL Image: Image converted to PIL Image.\n\n \"\"\"\n return F.to_pil_image(pic, self.mode)\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n if self.mode is not None:\n format_string += 'mode={0}'.format(self.mode)\n format_string += ')'\n return format_string\n\n\nclass Normalize(object):\n \"\"\"Normalize a tensor image with mean and standard deviation.\n Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n``\n channels, this transform will normalize each channel of the input\n ``torch.*Tensor`` i.e.,\n ``output[channel] = (input[channel] - mean[channel]) / std[channel]``\n\n .. note::\n This transform acts out of place, i.e., it does not mutate the input tensor.\n\n Args:\n mean (sequence): Sequence of means for each channel.\n std (sequence): Sequence of standard deviations for each channel.\n inplace(bool,optional): Bool to make this operation in-place.\n\n \"\"\"\n\n def __init__(self, mean, std, inplace=False):\n self.mean = mean\n self.std = std\n self.inplace = inplace\n\n def __call__(self, tensor):\n \"\"\"\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n\n Returns:\n Tensor: Normalized Tensor image.\n \"\"\"\n return F.normalize(tensor, self.mean, self.std, self.inplace)\n\n def __repr__(self):\n return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)\n\n\nclass Resize(torch.nn.Module):\n \"\"\"Resize the input image to the given size.\n The image can be a PIL Image or a torch Tensor, in which case it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions\n\n Args:\n size (sequence or int): Desired output size. If size is a sequence like\n (h, w), output size will be matched to this. If size is an int,\n smaller edge of the image will be matched to this number.\n i.e, if height > width, then image will be rescaled to\n (size * height / width, size).\n In torchscript mode padding as single int is not supported, use a tuple or\n list of length 1: ``[size, ]``.\n interpolation (int, optional): Desired interpolation enum defined by `filters`_.\n Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR``\n and ``PIL.Image.BICUBIC`` are supported.\n \"\"\"\n\n def __init__(self, size, interpolation=Image.BILINEAR):\n super().__init__()\n self.size = _setup_size(size, error_msg=\"If size is a sequence, it should have 2 values\")\n self.interpolation = interpolation\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be scaled.\n\n Returns:\n PIL Image or Tensor: Rescaled image.\n \"\"\"\n return F.resize(img, self.size, self.interpolation)\n\n def __repr__(self):\n interpolate_str = _pil_interpolation_to_str[self.interpolation]\n return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)\n\n\nclass Scale(Resize):\n \"\"\"\n Note: This transform is deprecated in favor of Resize.\n \"\"\"\n def __init__(self, *args, **kwargs):\n warnings.warn(\"The use of the transforms.Scale transform is deprecated, \" +\n \"please use transforms.Resize instead.\")\n super(Scale, self).__init__(*args, **kwargs)\n\n\nclass CenterCrop(torch.nn.Module):\n \"\"\"Crops the given image at the center.\n The image can be a PIL Image or a torch Tensor, in which case it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).\n \"\"\"\n\n def __init__(self, size):\n super().__init__()\n self.size = _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n\n Returns:\n PIL Image or Tensor: Cropped image.\n \"\"\"\n return F.center_crop(img, self.size)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0})'.format(self.size)\n\n\nclass Pad(torch.nn.Module):\n \"\"\"Pad the given image on all sides with the given \"pad\" value.\n The image can be a PIL Image or a torch Tensor, in which case it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions\n\n Args:\n padding (int or tuple or list): Padding on each border. If a single int is provided this\n is used to pad all borders. If tuple of length 2 is provided this is the padding\n on left/right and top/bottom respectively. If a tuple of length 4 is provided\n this is the padding for the left, top, right and bottom borders respectively.\n In torchscript mode padding as single int is not supported, use a tuple or\n list of length 1: ``[padding, ]``.\n fill (int or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant\n padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.\n Default is constant. Mode symmetric is not yet supported for Tensor inputs.\n\n - constant: pads with a constant value, this value is specified with fill\n\n - edge: pads with the last value at the edge of the image\n\n - reflect: pads with reflection of image without repeating the last value on the edge\n\n For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n\n - symmetric: pads with reflection of image repeating the last value on the edge\n\n For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n \"\"\"\n\n def __init__(self, padding, fill=0, padding_mode=\"constant\"):\n super().__init__()\n if not isinstance(padding, (numbers.Number, tuple, list)):\n raise TypeError(\"Got inappropriate padding arg\")\n\n if not isinstance(fill, (numbers.Number, str, tuple)):\n raise TypeError(\"Got inappropriate fill arg\")\n\n if padding_mode not in [\"constant\", \"edge\", \"reflect\", \"symmetric\"]:\n raise ValueError(\"Padding mode should be either constant, edge, reflect or symmetric\")\n\n if isinstance(padding, Sequence) and len(padding) not in [1, 2, 4]:\n raise ValueError(\"Padding must be an int or a 1, 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n self.padding = padding\n self.fill = fill\n self.padding_mode = padding_mode\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be padded.\n\n Returns:\n PIL Image or Tensor: Padded image.\n \"\"\"\n return F.pad(img, self.padding, self.fill, self.padding_mode)\n\n def __repr__(self):\n return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\\\n format(self.padding, self.fill, self.padding_mode)\n\n\nclass Lambda(object):\n \"\"\"Apply a user-defined lambda as a transform.\n\n Args:\n lambd (function): Lambda/function to be used for transform.\n \"\"\"\n\n def __init__(self, lambd):\n assert callable(lambd), repr(type(lambd).__name__) + \" object is not callable\"\n self.lambd = lambd\n\n def __call__(self, img):\n return self.lambd(img)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass RandomTransforms(object):\n \"\"\"Base class for a list of transformations with randomness\n\n Args:\n transforms (list or tuple): list of transformations\n \"\"\"\n\n def __init__(self, transforms):\n assert isinstance(transforms, (list, tuple))\n self.transforms = transforms\n\n def __call__(self, *args, **kwargs):\n raise NotImplementedError()\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass RandomApply(RandomTransforms):\n \"\"\"Apply randomly a list of transformations with a given probability\n\n Args:\n transforms (list or tuple): list of transformations\n p (float): probability\n \"\"\"\n\n def __init__(self, transforms, p=0.5):\n super(RandomApply, self).__init__(transforms)\n self.p = p\n\n def __call__(self, img):\n if self.p < random.random():\n return img\n for t in self.transforms:\n img = t(img)\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n format_string += '\\n p={}'.format(self.p)\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass RandomOrder(RandomTransforms):\n \"\"\"Apply a list of transformations in a random order\n \"\"\"\n def __call__(self, img):\n order = list(range(len(self.transforms)))\n random.shuffle(order)\n for i in order:\n img = self.transforms[i](img)\n return img\n\n\nclass RandomChoice(RandomTransforms):\n \"\"\"Apply single transformation randomly picked from a list\n \"\"\"\n def __call__(self, img):\n t = random.choice(self.transforms)\n return t(img)\n\n\nclass RandomCrop(torch.nn.Module):\n \"\"\"Crop the given image at a random location.\n The image can be a PIL Image or a Tensor, in which case it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).\n padding (int or sequence, optional): Optional padding on each border\n of the image. Default is None. If a single int is provided this\n is used to pad all borders. If tuple of length 2 is provided this is the padding\n on left/right and top/bottom respectively. If a tuple of length 4 is provided\n this is the padding for the left, top, right and bottom borders respectively.\n In torchscript mode padding as single int is not supported, use a tuple or\n list of length 1: ``[padding, ]``.\n pad_if_needed (boolean): It will pad the image if smaller than the\n desired size to avoid raising an exception. Since cropping is done\n after padding, the padding seems to be done at a random offset.\n fill (int or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant\n padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.\n Mode symmetric is not yet supported for Tensor inputs.\n\n - constant: pads with a constant value, this value is specified with fill\n\n - edge: pads with the last value on the edge of the image\n\n - reflect: pads with reflection of image (without repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n\n - symmetric: pads with reflection of image (repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n\n \"\"\"\n\n @staticmethod\n def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int, int]:\n \"\"\"Get parameters for ``crop`` for a random crop.\n\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n output_size (tuple): Expected output size of the crop.\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.\n \"\"\"\n w, h = F._get_image_size(img)\n th, tw = output_size\n if w == tw and h == th:\n return 0, 0, h, w\n\n i = torch.randint(0, h - th + 1, size=(1, )).item()\n j = torch.randint(0, w - tw + 1, size=(1, )).item()\n return i, j, th, tw\n\n def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode=\"constant\"):\n super().__init__()\n\n self.size = tuple(_setup_size(\n size, error_msg=\"Please provide only two dimensions (h, w) for size.\"\n ))\n\n self.padding = padding\n self.pad_if_needed = pad_if_needed\n self.fill = fill\n self.padding_mode = padding_mode\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n\n Returns:\n PIL Image or Tensor: Cropped image.\n \"\"\"\n if self.padding is not None:\n img = F.pad(img, self.padding, self.fill, self.padding_mode)\n\n width, height = F._get_image_size(img)\n # pad the width if needed\n if self.pad_if_needed and width < self.size[1]:\n padding = [self.size[1] - width, 0]\n img = F.pad(img, padding, self.fill, self.padding_mode)\n # pad the height if needed\n if self.pad_if_needed and height < self.size[0]:\n padding = [0, self.size[0] - height]\n img = F.pad(img, padding, self.fill, self.padding_mode)\n\n i, j, h, w = self.get_params(img, self.size)\n\n return F.crop(img, i, j, h, w)\n\n def __repr__(self):\n return self.__class__.__name__ + \"(size={0}, padding={1})\".format(self.size, self.padding)\n\n\nclass RandomHorizontalFlip(torch.nn.Module):\n \"\"\"Horizontally flip the given image randomly with a given probability.\n The image can be a PIL Image or a torch Tensor, in which case it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n Args:\n p (float): probability of the image being flipped. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be flipped.\n\n Returns:\n PIL Image or Tensor: Randomly flipped image.\n \"\"\"\n if torch.rand(1) < self.p:\n return F.hflip(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomVerticalFlip(torch.nn.Module):\n \"\"\"Vertically flip the given image randomly with a given probability.\n The image can be a PIL Image or a torch Tensor, in which case it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n Args:\n p (float): probability of the image being flipped. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be flipped.\n\n Returns:\n PIL Image or Tensor: Randomly flipped image.\n \"\"\"\n if torch.rand(1) < self.p:\n return F.vflip(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomPerspective(torch.nn.Module):\n \"\"\"Performs a random perspective transformation of the given image with a given probability.\n The image can be a PIL Image or a Tensor, in which case it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.\n\n Args:\n distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1.\n Default is 0.5.\n p (float): probability of the image being transformed. Default is 0.5.\n interpolation (int): Interpolation type. If input is Tensor, only ``PIL.Image.NEAREST`` and\n ``PIL.Image.BILINEAR`` are supported. Default, ``PIL.Image.BILINEAR`` for PIL images and Tensors.\n fill (n-tuple or int or float): Pixel fill value for area outside the rotated\n image. If int or float, the value is used for all bands respectively. Default is 0.\n This option is only available for ``pillow>=5.0.0``. This option is not supported for Tensor\n input. Fill value for the area outside the transform in the output image is always 0.\n\n \"\"\"\n\n def __init__(self, distortion_scale=0.5, p=0.5, interpolation=Image.BILINEAR, fill=0):\n super().__init__()\n self.p = p\n self.interpolation = interpolation\n self.distortion_scale = distortion_scale\n self.fill = fill\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be Perspectively transformed.\n\n Returns:\n PIL Image or Tensor: Randomly transformed image.\n \"\"\"\n if torch.rand(1) < self.p:\n width, height = F._get_image_size(img)\n startpoints, endpoints = self.get_params(width, height, self.distortion_scale)\n return F.perspective(img, startpoints, endpoints, self.interpolation, self.fill)\n return img\n\n @staticmethod\n def get_params(width: int, height: int, distortion_scale: float) -> Tuple[List[List[int]], List[List[int]]]:\n \"\"\"Get parameters for ``perspective`` for a random perspective transform.\n\n Args:\n width (int): width of the image.\n height (int): height of the image.\n distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1.\n\n Returns:\n List containing [top-left, top-right, bottom-right, bottom-left] of the original image,\n List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.\n \"\"\"\n half_height = height // 2\n half_width = width // 2\n topleft = [\n int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1, )).item()),\n int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1, )).item())\n ]\n topright = [\n int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1, )).item()),\n int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1, )).item())\n ]\n botright = [\n int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1, )).item()),\n int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1, )).item())\n ]\n botleft = [\n int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1, )).item()),\n int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1, )).item())\n ]\n startpoints = [[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]]\n endpoints = [topleft, topright, botright, botleft]\n return startpoints, endpoints\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomResizedCrop(torch.nn.Module):\n \"\"\"Crop the given image to random size and aspect ratio.\n The image can be a PIL Image or a Tensor, in which case it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions\n\n A crop of random size (default: of 0.08 to 1.0) of the original size and a random\n aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop\n is finally resized to given size.\n This is popularly used to train the Inception networks.\n\n Args:\n size (int or sequence): expected output size of each edge. If size is an\n int instead of sequence like (h, w), a square output size ``(size, size)`` is\n made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).\n scale (tuple of float): range of size of the origin size cropped\n ratio (tuple of float): range of aspect ratio of the origin aspect ratio cropped.\n interpolation (int): Desired interpolation enum defined by `filters`_.\n Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR``\n and ``PIL.Image.BICUBIC`` are supported.\n \"\"\"\n\n def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR):\n super().__init__()\n self.size = _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n\n if not isinstance(scale, Sequence):\n raise TypeError(\"Scale should be a sequence\")\n if not isinstance(ratio, Sequence):\n raise TypeError(\"Ratio should be a sequence\")\n if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):\n warnings.warn(\"Scale and ratio should be of kind (min, max)\")\n\n self.interpolation = interpolation\n self.scale = scale\n self.ratio = ratio\n\n @staticmethod\n def get_params(\n img: Tensor, scale: List[float], ratio: List[float]\n ) -> Tuple[int, int, int, int]:\n \"\"\"Get parameters for ``crop`` for a random sized crop.\n\n Args:\n img (PIL Image or Tensor): Input image.\n scale (list): range of scale of the origin size cropped\n ratio (list): range of aspect ratio of the origin aspect ratio cropped\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for a random\n sized crop.\n \"\"\"\n width, height = F._get_image_size(img)\n area = height * width\n\n for _ in range(10):\n target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()\n log_ratio = torch.log(torch.tensor(ratio))\n aspect_ratio = torch.exp(\n torch.empty(1).uniform_(log_ratio[0], log_ratio[1])\n ).item()\n\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if 0 < w <= width and 0 < h <= height:\n i = torch.randint(0, height - h + 1, size=(1,)).item()\n j = torch.randint(0, width - w + 1, size=(1,)).item()\n return i, j, h, w\n\n # Fallback to central crop\n in_ratio = float(width) / float(height)\n if in_ratio < min(ratio):\n w = width\n h = int(round(w / min(ratio)))\n elif in_ratio > max(ratio):\n h = height\n w = int(round(h * max(ratio)))\n else: # whole image\n w = width\n h = height\n i = (height - h) // 2\n j = (width - w) // 2\n return i, j, h, w\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped and resized.\n\n Returns:\n PIL Image or Tensor: Randomly cropped and resized image.\n \"\"\"\n i, j, h, w = self.get_params(img, self.scale, self.ratio)\n return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)\n\n def __repr__(self):\n interpolate_str = _pil_interpolation_to_str[self.interpolation]\n format_string = self.__class__.__name__ + '(size={0}'.format(self.size)\n format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))\n format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))\n format_string += ', interpolation={0})'.format(interpolate_str)\n return format_string\n\n\nclass RandomSizedCrop(RandomResizedCrop):\n \"\"\"\n Note: This transform is deprecated in favor of RandomResizedCrop.\n \"\"\"\n def __init__(self, *args, **kwargs):\n warnings.warn(\"The use of the transforms.RandomSizedCrop transform is deprecated, \" +\n \"please use transforms.RandomResizedCrop instead.\")\n super(RandomSizedCrop, self).__init__(*args, **kwargs)\n\n\nclass FiveCrop(torch.nn.Module):\n \"\"\"Crop the given image into four corners and the central crop.\n The image can be a PIL Image or a Tensor, in which case it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n .. Note::\n This transform returns a tuple of images and there may be a mismatch in the number of\n inputs and targets your Dataset returns. See below for an example of how to deal with\n this.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an ``int``\n instead of sequence like (h, w), a square crop of size (size, size) is made.\n If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).\n\n Example:\n >>> transform = Compose([\n >>> FiveCrop(size), # this is a list of PIL Images\n >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor\n >>> ])\n >>> #In your test loop you can do the following:\n >>> input, target = batch # input is a 5d tensor, target is 2d\n >>> bs, ncrops, c, h, w = input.size()\n >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops\n >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops\n \"\"\"\n\n def __init__(self, size):\n super().__init__()\n self.size = _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n\n Returns:\n tuple of 5 images. Image can be PIL Image or Tensor\n \"\"\"\n return F.five_crop(img, self.size)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0})'.format(self.size)\n\n\nclass TenCrop(torch.nn.Module):\n \"\"\"Crop the given image into four corners and the central crop plus the flipped version of\n these (horizontal flipping is used by default).\n The image can be a PIL Image or a Tensor, in which case it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n .. Note::\n This transform returns a tuple of images and there may be a mismatch in the number of\n inputs and targets your Dataset returns. See below for an example of how to deal with\n this.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).\n vertical_flip (bool): Use vertical flipping instead of horizontal\n\n Example:\n >>> transform = Compose([\n >>> TenCrop(size), # this is a list of PIL Images\n >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor\n >>> ])\n >>> #In your test loop you can do the following:\n >>> input, target = batch # input is a 5d tensor, target is 2d\n >>> bs, ncrops, c, h, w = input.size()\n >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops\n >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops\n \"\"\"\n\n def __init__(self, size, vertical_flip=False):\n super().__init__()\n self.size = _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n self.vertical_flip = vertical_flip\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n\n Returns:\n tuple of 10 images. Image can be PIL Image or Tensor\n \"\"\"\n return F.ten_crop(img, self.size, self.vertical_flip)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0}, vertical_flip={1})'.format(self.size, self.vertical_flip)\n\n\nclass LinearTransformation(object):\n \"\"\"Transform a tensor image with a square transformation matrix and a mean_vector computed\n offline.\n Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and\n subtract mean_vector from it which is then followed by computing the dot\n product with the transformation matrix and then reshaping the tensor to its\n original shape.\n\n Applications:\n whitening transformation: Suppose X is a column vector zero-centered data.\n Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X),\n perform SVD on this matrix and pass it as transformation_matrix.\n\n Args:\n transformation_matrix (Tensor): tensor [D x D], D = C x H x W\n mean_vector (Tensor): tensor [D], D = C x H x W\n \"\"\"\n\n def __init__(self, transformation_matrix, mean_vector):\n if transformation_matrix.size(0) != transformation_matrix.size(1):\n raise ValueError(\"transformation_matrix should be square. Got \" +\n \"[{} x {}] rectangular matrix.\".format(*transformation_matrix.size()))\n\n if mean_vector.size(0) != transformation_matrix.size(0):\n raise ValueError(\"mean_vector should have the same length {}\".format(mean_vector.size(0)) +\n \" as any one of the dimensions of the transformation_matrix [{}]\"\n .format(tuple(transformation_matrix.size())))\n\n self.transformation_matrix = transformation_matrix\n self.mean_vector = mean_vector\n\n def __call__(self, tensor):\n \"\"\"\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be whitened.\n\n Returns:\n Tensor: Transformed image.\n \"\"\"\n if tensor.size(0) * tensor.size(1) * tensor.size(2) != self.transformation_matrix.size(0):\n raise ValueError(\"tensor and transformation matrix have incompatible shape.\" +\n \"[{} x {} x {}] != \".format(*tensor.size()) +\n \"{}\".format(self.transformation_matrix.size(0)))\n flat_tensor = tensor.view(1, -1) - self.mean_vector\n transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)\n tensor = transformed_tensor.view(tensor.size())\n return tensor\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '(transformation_matrix='\n format_string += (str(self.transformation_matrix.tolist()) + ')')\n format_string += (\", (mean_vector=\" + str(self.mean_vector.tolist()) + ')')\n return format_string\n\n\nclass ColorJitter(torch.nn.Module):\n \"\"\"Randomly change the brightness, contrast and saturation of an image.\n\n Args:\n brightness (float or tuple of float (min, max)): How much to jitter brightness.\n brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]\n or the given [min, max]. Should be non negative numbers.\n contrast (float or tuple of float (min, max)): How much to jitter contrast.\n contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]\n or the given [min, max]. Should be non negative numbers.\n saturation (float or tuple of float (min, max)): How much to jitter saturation.\n saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]\n or the given [min, max]. Should be non negative numbers.\n hue (float or tuple of float (min, max)): How much to jitter hue.\n hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].\n Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.\n \"\"\"\n\n def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):\n super().__init__()\n self.brightness = self._check_input(brightness, 'brightness')\n self.contrast = self._check_input(contrast, 'contrast')\n self.saturation = self._check_input(saturation, 'saturation')\n self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),\n clip_first_on_zero=False)\n\n @torch.jit.unused\n def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):\n if isinstance(value, numbers.Number):\n if value < 0:\n raise ValueError(\"If {} is a single number, it must be non negative.\".format(name))\n value = [center - float(value), center + float(value)]\n if clip_first_on_zero:\n value[0] = max(value[0], 0.0)\n elif isinstance(value, (tuple, list)) and len(value) == 2:\n if not bound[0] <= value[0] <= value[1] <= bound[1]:\n raise ValueError(\"{} values should be between {}\".format(name, bound))\n else:\n raise TypeError(\"{} should be a single number or a list/tuple with lenght 2.\".format(name))\n\n # if value is 0 or (1., 1.) for brightness/contrast/saturation\n # or (0., 0.) for hue, do nothing\n if value[0] == value[1] == center:\n value = None\n return value\n\n @staticmethod\n @torch.jit.unused\n def get_params(brightness, contrast, saturation, hue):\n \"\"\"Get a randomized transform to be applied on image.\n\n Arguments are same as that of __init__.\n\n Returns:\n Transform which randomly adjusts brightness, contrast and\n saturation in a random order.\n \"\"\"\n transforms = []\n\n if brightness is not None:\n brightness_factor = random.uniform(brightness[0], brightness[1])\n transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))\n\n if contrast is not None:\n contrast_factor = random.uniform(contrast[0], contrast[1])\n transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))\n\n if saturation is not None:\n saturation_factor = random.uniform(saturation[0], saturation[1])\n transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))\n\n if hue is not None:\n hue_factor = random.uniform(hue[0], hue[1])\n transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))\n\n random.shuffle(transforms)\n transform = Compose(transforms)\n\n return transform\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Input image.\n\n Returns:\n PIL Image or Tensor: Color jittered image.\n \"\"\"\n fn_idx = torch.randperm(4)\n for fn_id in fn_idx:\n if fn_id == 0 and self.brightness is not None:\n brightness = self.brightness\n brightness_factor = torch.tensor(1.0).uniform_(brightness[0], brightness[1]).item()\n img = F.adjust_brightness(img, brightness_factor)\n\n if fn_id == 1 and self.contrast is not None:\n contrast = self.contrast\n contrast_factor = torch.tensor(1.0).uniform_(contrast[0], contrast[1]).item()\n img = F.adjust_contrast(img, contrast_factor)\n\n if fn_id == 2 and self.saturation is not None:\n saturation = self.saturation\n saturation_factor = torch.tensor(1.0).uniform_(saturation[0], saturation[1]).item()\n img = F.adjust_saturation(img, saturation_factor)\n\n if fn_id == 3 and self.hue is not None:\n hue = self.hue\n hue_factor = torch.tensor(1.0).uniform_(hue[0], hue[1]).item()\n img = F.adjust_hue(img, hue_factor)\n\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n format_string += 'brightness={0}'.format(self.brightness)\n format_string += ', contrast={0}'.format(self.contrast)\n format_string += ', saturation={0}'.format(self.saturation)\n format_string += ', hue={0})'.format(self.hue)\n return format_string\n\n\nclass RandomRotation(torch.nn.Module):\n \"\"\"Rotate the image by angle.\n The image can be a PIL Image or a Tensor, in which case it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.\n\n Args:\n degrees (sequence or float or int): Range of degrees to select from.\n If degrees is a number instead of sequence like (min, max), the range of degrees\n will be (-degrees, +degrees).\n resample (int, optional): An optional resampling filter. See `filters`_ for more information.\n If omitted, or if the image has mode \"1\" or \"P\", it is set to PIL.Image.NEAREST.\n If input is Tensor, only ``PIL.Image.NEAREST`` and ``PIL.Image.BILINEAR`` are supported.\n expand (bool, optional): Optional expansion flag.\n If true, expands the output to make it large enough to hold the entire rotated image.\n If false or omitted, make the output image the same size as the input image.\n Note that the expand flag assumes rotation around the center and no translation.\n center (list or tuple, optional): Optional center of rotation, (x, y). Origin is the upper left corner.\n Default is the center of the image.\n fill (n-tuple or int or float): Pixel fill value for area outside the rotated\n image. If int or float, the value is used for all bands respectively.\n Defaults to 0 for all bands. This option is only available for Pillow>=5.2.0.\n This option is not supported for Tensor input. Fill value for the area outside the transform in the output\n image is always 0.\n\n .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters\n\n \"\"\"\n\n def __init__(self, degrees, resample=False, expand=False, center=None, fill=None):\n super().__init__()\n self.degrees = _setup_angle(degrees, name=\"degrees\", req_sizes=(2, ))\n\n if center is not None:\n _check_sequence_input(center, \"center\", req_sizes=(2, ))\n\n self.center = center\n\n self.resample = resample\n self.expand = expand\n self.fill = fill\n\n @staticmethod\n def get_params(degrees: List[float]) -> float:\n \"\"\"Get parameters for ``rotate`` for a random rotation.\n\n Returns:\n float: angle parameter to be passed to ``rotate`` for random rotation.\n \"\"\"\n angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item())\n return angle\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be rotated.\n\n Returns:\n PIL Image or Tensor: Rotated image.\n \"\"\"\n angle = self.get_params(self.degrees)\n return F.rotate(img, angle, self.resample, self.expand, self.center, self.fill)\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)\n format_string += ', resample={0}'.format(self.resample)\n format_string += ', expand={0}'.format(self.expand)\n if self.center is not None:\n format_string += ', center={0}'.format(self.center)\n if self.fill is not None:\n format_string += ', fill={0}'.format(self.fill)\n format_string += ')'\n return format_string\n\n\nclass RandomAffine(torch.nn.Module):\n \"\"\"Random affine transformation of the image keeping center invariant.\n The image can be a PIL Image or a Tensor, in which case it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.\n\n Args:\n degrees (sequence or float or int): Range of degrees to select from.\n If degrees is a number instead of sequence like (min, max), the range of degrees\n will be (-degrees, +degrees). Set to 0 to deactivate rotations.\n translate (tuple, optional): tuple of maximum absolute fraction for horizontal\n and vertical translations. For example translate=(a, b), then horizontal shift\n is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is\n randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.\n scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is\n randomly sampled from the range a <= scale <= b. Will keep original scale by default.\n shear (sequence or float or int, optional): Range of degrees to select from.\n If shear is a number, a shear parallel to the x axis in the range (-shear, +shear)\n will be applied. Else if shear is a tuple or list of 2 values a shear parallel to the x axis in the\n range (shear[0], shear[1]) will be applied. Else if shear is a tuple or list of 4 values,\n a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied.\n Will not apply shear by default.\n resample (int, optional): An optional resampling filter. See `filters`_ for more information.\n If omitted, or if the image has mode \"1\" or \"P\", it is set to ``PIL.Image.NEAREST``.\n If input is Tensor, only ``PIL.Image.NEAREST`` and ``PIL.Image.BILINEAR`` are supported.\n fillcolor (tuple or int): Optional fill color (Tuple for RGB Image and int for grayscale) for the area\n outside the transform in the output image (Pillow>=5.0.0). This option is not supported for Tensor\n input. Fill value for the area outside the transform in the output image is always 0.\n\n .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters\n\n \"\"\"\n\n def __init__(self, degrees, translate=None, scale=None, shear=None, resample=0, fillcolor=0):\n super().__init__()\n self.degrees = _setup_angle(degrees, name=\"degrees\", req_sizes=(2, ))\n\n if translate is not None:\n _check_sequence_input(translate, \"translate\", req_sizes=(2, ))\n for t in translate:\n if not (0.0 <= t <= 1.0):\n raise ValueError(\"translation values should be between 0 and 1\")\n self.translate = translate\n\n if scale is not None:\n _check_sequence_input(scale, \"scale\", req_sizes=(2, ))\n for s in scale:\n if s <= 0:\n raise ValueError(\"scale values should be positive\")\n self.scale = scale\n\n if shear is not None:\n self.shear = _setup_angle(shear, name=\"shear\", req_sizes=(2, 4))\n else:\n self.shear = shear\n\n self.resample = resample\n self.fillcolor = fillcolor\n\n @staticmethod\n def get_params(\n degrees: List[float],\n translate: Optional[List[float]],\n scale_ranges: Optional[List[float]],\n shears: Optional[List[float]],\n img_size: List[int]\n ) -> Tuple[float, Tuple[int, int], float, Tuple[float, float]]:\n \"\"\"Get parameters for affine transformation\n\n Returns:\n params to be passed to the affine transformation\n \"\"\"\n angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item())\n if translate is not None:\n max_dx = float(translate[0] * img_size[0])\n max_dy = float(translate[1] * img_size[1])\n tx = int(round(torch.empty(1).uniform_(-max_dx, max_dx).item()))\n ty = int(round(torch.empty(1).uniform_(-max_dy, max_dy).item()))\n translations = (tx, ty)\n else:\n translations = (0, 0)\n\n if scale_ranges is not None:\n scale = float(torch.empty(1).uniform_(scale_ranges[0], scale_ranges[1]).item())\n else:\n scale = 1.0\n\n shear_x = shear_y = 0.0\n if shears is not None:\n shear_x = float(torch.empty(1).uniform_(shears[0], shears[1]).item())\n if len(shears) == 4:\n shear_y = float(torch.empty(1).uniform_(shears[2], shears[3]).item())\n\n shear = (shear_x, shear_y)\n\n return angle, translations, scale, shear\n\n def forward(self, img):\n \"\"\"\n img (PIL Image or Tensor): Image to be transformed.\n\n Returns:\n PIL Image or Tensor: Affine transformed image.\n \"\"\"\n\n img_size = F._get_image_size(img)\n\n ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img_size)\n return F.affine(img, *ret, resample=self.resample, fillcolor=self.fillcolor)\n\n def __repr__(self):\n s = '{name}(degrees={degrees}'\n if self.translate is not None:\n s += ', translate={translate}'\n if self.scale is not None:\n s += ', scale={scale}'\n if self.shear is not None:\n s += ', shear={shear}'\n if self.resample > 0:\n s += ', resample={resample}'\n if self.fillcolor != 0:\n s += ', fillcolor={fillcolor}'\n s += ')'\n d = dict(self.__dict__)\n d['resample'] = _pil_interpolation_to_str[d['resample']]\n return s.format(name=self.__class__.__name__, **d)\n\n\nclass Grayscale(torch.nn.Module):\n \"\"\"Convert image to grayscale.\n The image can be a PIL Image or a Tensor, in which case it is expected\n to have [..., 3, H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n Args:\n num_output_channels (int): (1 or 3) number of channels desired for output image\n\n Returns:\n PIL Image: Grayscale version of the input.\n - If ``num_output_channels == 1`` : returned image is single channel\n - If ``num_output_channels == 3`` : returned image is 3 channel with r == g == b\n\n \"\"\"\n\n def __init__(self, num_output_channels=1):\n super().__init__()\n self.num_output_channels = num_output_channels\n\n def forward(self, img: Tensor) -> Tensor:\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be converted to grayscale.\n\n Returns:\n PIL Image or Tensor: Grayscaled image.\n \"\"\"\n return F.rgb_to_grayscale(img, num_output_channels=self.num_output_channels)\n\n def __repr__(self):\n return self.__class__.__name__ + '(num_output_channels={0})'.format(self.num_output_channels)\n\n\nclass RandomGrayscale(torch.nn.Module):\n \"\"\"Randomly convert image to grayscale with a probability of p (default 0.1).\n The image can be a PIL Image or a Tensor, in which case it is expected\n to have [..., 3, H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n Args:\n p (float): probability that image should be converted to grayscale.\n\n Returns:\n PIL Image or Tensor: Grayscale version of the input image with probability p and unchanged\n with probability (1-p).\n - If input image is 1 channel: grayscale version is 1 channel\n - If input image is 3 channel: grayscale version is 3 channel with r == g == b\n\n \"\"\"\n\n def __init__(self, p=0.1):\n super().__init__()\n self.p = p\n\n def forward(self, img: Tensor) -> Tensor:\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be converted to grayscale.\n\n Returns:\n PIL Image or Tensor: Randomly grayscaled image.\n \"\"\"\n num_output_channels = F._get_image_num_channels(img)\n if torch.rand(1) < self.p:\n return F.rgb_to_grayscale(img, num_output_channels=num_output_channels)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={0})'.format(self.p)\n\n\nclass RandomErasing(torch.nn.Module):\n \"\"\" Randomly selects a rectangle region in an image and erases its pixels.\n 'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/pdf/1708.04896.pdf\n\n Args:\n p: probability that the random erasing operation will be performed.\n scale: range of proportion of erased area against input image.\n ratio: range of aspect ratio of erased area.\n value: erasing value. Default is 0. If a single int, it is used to\n erase all pixels. If a tuple of length 3, it is used to erase\n R, G, B channels respectively.\n If a str of 'random', erasing each pixel with random values.\n inplace: boolean to make this transform inplace. Default set to False.\n\n Returns:\n Erased Image.\n\n # Examples:\n >>> transform = transforms.Compose([\n >>> transforms.RandomHorizontalFlip(),\n >>> transforms.ToTensor(),\n >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n >>> transforms.RandomErasing(),\n >>> ])\n \"\"\"\n\n def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False):\n super().__init__()\n if not isinstance(value, (numbers.Number, str, tuple, list)):\n raise TypeError(\"Argument value should be either a number or str or a sequence\")\n if isinstance(value, str) and value != \"random\":\n raise ValueError(\"If value is str, it should be 'random'\")\n if not isinstance(scale, (tuple, list)):\n raise TypeError(\"Scale should be a sequence\")\n if not isinstance(ratio, (tuple, list)):\n raise TypeError(\"Ratio should be a sequence\")\n if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):\n warnings.warn(\"Scale and ratio should be of kind (min, max)\")\n if scale[0] < 0 or scale[1] > 1:\n raise ValueError(\"Scale should be between 0 and 1\")\n if p < 0 or p > 1:\n raise ValueError(\"Random erasing probability should be between 0 and 1\")\n\n self.p = p\n self.scale = scale\n self.ratio = ratio\n self.value = value\n self.inplace = inplace\n\n @staticmethod\n def get_params(\n img: Tensor, scale: Tuple[float, float], ratio: Tuple[float, float], value: Optional[List[float]] = None\n ) -> Tuple[int, int, int, int, Tensor]:\n \"\"\"Get parameters for ``erase`` for a random erasing.\n\n Args:\n img (Tensor): Tensor image of size (C, H, W) to be erased.\n scale (tuple or list): range of proportion of erased area against input image.\n ratio (tuple or list): range of aspect ratio of erased area.\n value (list, optional): erasing value. If None, it is interpreted as \"random\"\n (erasing each pixel with random values). If ``len(value)`` is 1, it is interpreted as a number,\n i.e. ``value[0]``.\n\n Returns:\n tuple: params (i, j, h, w, v) to be passed to ``erase`` for random erasing.\n \"\"\"\n img_c, img_h, img_w = img.shape\n area = img_h * img_w\n\n for _ in range(10):\n erase_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()\n aspect_ratio = torch.empty(1).uniform_(ratio[0], ratio[1]).item()\n\n h = int(round(math.sqrt(erase_area * aspect_ratio)))\n w = int(round(math.sqrt(erase_area / aspect_ratio)))\n if not (h < img_h and w < img_w):\n continue\n\n if value is None:\n v = torch.empty([img_c, h, w], dtype=torch.float32).normal_()\n else:\n v = torch.tensor(value)[:, None, None]\n\n i = torch.randint(0, img_h - h + 1, size=(1, )).item()\n j = torch.randint(0, img_w - w + 1, size=(1, )).item()\n return i, j, h, w, v\n\n # Return original image\n return 0, 0, img_h, img_w, img\n\n def forward(self, img):\n \"\"\"\n Args:\n img (Tensor): Tensor image of size (C, H, W) to be erased.\n\n Returns:\n img (Tensor): Erased Tensor image.\n \"\"\"\n if torch.rand(1) < self.p:\n\n # cast self.value to script acceptable type\n if isinstance(self.value, (int, float)):\n value = [self.value, ]\n elif isinstance(self.value, str):\n value = None\n elif isinstance(self.value, tuple):\n value = list(self.value)\n else:\n value = self.value\n\n if value is not None and not (len(value) in (1, img.shape[-3])):\n raise ValueError(\n \"If value is a sequence, it should have either a single value or \"\n \"{} (number of input channels)\".format(img.shape[-3])\n )\n\n x, y, h, w, v = self.get_params(img, scale=self.scale, ratio=self.ratio, value=value)\n return F.erase(img, x, y, h, w, v, self.inplace)\n return img\n\n\ndef _setup_size(size, error_msg):\n if isinstance(size, numbers.Number):\n return int(size), int(size)\n\n if isinstance(size, Sequence) and len(size) == 1:\n return size[0], size[0]\n\n if len(size) != 2:\n raise ValueError(error_msg)\n\n return size\n\n\ndef _check_sequence_input(x, name, req_sizes):\n msg = req_sizes[0] if len(req_sizes) < 2 else \" or \".join([str(s) for s in req_sizes])\n if not isinstance(x, Sequence):\n raise TypeError(\"{} should be a sequence of length {}.\".format(name, msg))\n if len(x) not in req_sizes:\n raise ValueError(\"{} should be sequence of length {}.\".format(name, msg))\n\n\ndef _setup_angle(x, name, req_sizes=(2, )):\n if isinstance(x, numbers.Number):\n if x < 0:\n raise ValueError(\"If {} is a single number, it must be positive.\".format(name))\n x = [-x, x]\n else:\n _check_sequence_input(x, name, req_sizes)\n\n return [float(d) for d in x]\n" ]
[ [ "torch.mm", "torch.randint", "torch.empty", "torch.randperm", "torch.tensor", "torch.rand" ] ]
Luger-Lab/Q-FADD
[ "5407178513837fd1804397bba0708e6300c21626" ]
[ "src/qfadd_distribution.py" ]
[ "#!/usr/bin/env python\n\nimport numpy as np\nimport argparse\nimport matplotlib\nmatplotlib.use('PDF')\nimport matplotlib.pyplot as plt\nfrom scipy.stats import t\n\n#define p-value function\ndef ttest(avg1,sem1,n1,avg2,sem2,n2):\n sed = np.sqrt(np.power(sem1,2) + np.power(sem2,2))\n t_stat = np.divide(np.subtract(avg1,avg2),sed)\n df = n1+n2 - 2\n p = (1.0 - t.cdf(np.abs(t_stat),df))*2.0\n return p\n\n#Build input parser\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-flist\",help='Space delimited list of text files, each listing files to combine per population',type=str,default='')\nparser.add_argument(\"-dlist\",help='Space delimited list of distribution files for comparing different populations of Q-FADD results (Ex: \"-dlist PARP1.txt PARP2.txt HPF1.txt\")',type=str,default='')\nparser.add_argument(\"-labels\",help='Labels for violin plot populations, space delimited. (Ex: \"-labels PARP1 PARP2 HPF1\")',type=str,default='')\nparser.add_argument(\"-o\",help='Output prefix for violin plot and text file generation',type=str,default='qfadd_distribution')\nargs = parser.parse_args()\n\n#Determine if analyzing list of QFADD outputs or list of QFADD distributions\nif args.flist!='':\n inlist = args.flist.split(\" \")\n if args.labels != '':\n labels = args.labels.split(\" \")\n else:\n labels = inlist\nelse:\n print('ERROR: You must provide at least one filelist!')\n quit()\n\n#If list of qFADD results is provided, build a distribution of D and F values\nfor idx in range(len(inlist)):\n filelist = np.genfromtxt(inlist[idx],dtype=str)\n d_list = np.array([],dtype=float)\n f_list = np.array([],dtype=float)\n if args.labels != '':\n write_files = True\n ofile = open(args.o+\"_\"+labels[idx]+\"_best_fit_models.dat\",'w')\n ofile.write(\"#model file, D (um^2/s), Mobile Fraction (ppt),r^2\\n\")\n else:\n write_files = False\n for FILE in filelist:\n d,dconv,f,r2,rmsd,dum = np.genfromtxt(FILE,delimiter=',',dtype=float,unpack=True)\n d_list = np.append(d_list,dconv[0])\n f_list = np.append(f_list,f[0])\n if write_files:\n ofile.write(FILE+\",\"+str(dconv[0])+\",\"+str(f[0])+\",\"+str(r2[0])+\"\\n\")\n print(\"Cumulative Q-FADD results stored in \"+args.o+\"_best_fit_models.dat\")\n if write_files:\n ofile.close()\n\n plt.figure(figsize=(3.,3.))\n ax1 = plt.subplot(111)\n ax1.violinplot(d_list,[0],showmeans=True)\n plt.ylabel(r'$\\rm{D_{eff} (\\mu m^{2}/s)}$')\n plt.xticks([],[])\n plt.tight_layout()\n plt.savefig(args.o+\"_Deff_Violin.pdf\",format='pdf')\n plt.close('all')\n\n plt.figure(figsize=(3.,3.))\n ax1 = plt.subplot(111)\n ax1.violinplot(f_list,[0],showmeans=True)\n plt.ylabel('Mobile Fraction (ppt)')\n plt.xticks([],[])\n plt.tight_layout()\n plt.savefig(args.o+\"_MobileFraction_Violin.pdf\",format='pdf')\n plt.close('all')\n\n\n#Calculate statistics\nfor idx2 in range(len(labels)):\n d1 = d_list[idx2]\n f1 = f_list[idx2]\n n1 = len(d1)\n d1avg = np.average(d1)\n d1std = np.std(d1)\n d1sem = d1std/np.sqrt(n1)\n\n f1avg = np.average(f1)\n f1std = np.std(f1)\n f1sem = f1std/np.sqrt(n1)\n\n for idx3 in range(idx2+1,len(labels)):\n d2 = d_list[idx3]\n f2 = f_list[idx3]\n n2 = len(d2)\n d2avg = np.average(d2)\n d2std = np.std(d2)\n d2sem = d2std/np.sqrt(n2)\n\n f2avg = np.average(f2)\n f2std = np.std(f2)\n f2sem = f2std/np.sqrt(n2)\n\n pstatd= ttest(d1avg,d1sem,n1,d2avg,d2sem,n2)\n pstatf= ttest(f1avg,f1sem,n1,f2avg,f2sem,n2)\n print(\"Comparing \"+labels[idx2]+\" with \"+labels[idx3]+\":\")\n print(\"\\t\"+labels[idx2]+\", Deff: \"+str(np.around(d1avg,decimals=3))+\" +/- \"+str(np.around(d1sem,decimals=3)))\n print(\"\\t\"+labels[idx3]+\", Deff: \"+str(np.around(d2avg,decimals=3))+\" +/- \"+str(np.around(d2sem,decimals=3)))\n print(\"\\tp-value: \"+str(np.around(pstatd,decimals=5))+\"\\n\")\n print(\"\\t\"+labels[idx2]+\", Mob. Frac.: \"+str(np.around(f1avg,decimals=3))+\" +/- \"+str(np.around(f1sem,decimals=3)))\n print(\"\\t\"+labels[idx3]+\", Mob. Frac.: \"+str(np.around(f2avg,decimals=3))+\" +/- \"+str(np.around(f2sem,decimals=3)))\n print(\"\\tp-value: \"+str(np.around(pstatf,decimals=5))+\"\\n\")\n" ]
[ [ "matplotlib.pyplot.tight_layout", "numpy.sqrt", "numpy.abs", "numpy.power", "matplotlib.use", "numpy.around", "numpy.subtract", "matplotlib.pyplot.savefig", "numpy.genfromtxt", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.subplot", "numpy.std", "numpy.append", "matplotlib.pyplot.close", "numpy.average", "matplotlib.pyplot.xticks", "numpy.array", "matplotlib.pyplot.figure" ] ]
dj-khandelwal/lux
[ "9ade6dde756e55a1f281a82f8091b71ee051f8af" ]
[ "tests/test_config.py" ]
[ "# Copyright 2019-2020 The Lux Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .context import lux\nimport pytest\nimport pandas as pd\nimport time\nfrom lux.vis.VisList import VisList\nimport lux\n\n\ndef register_new_action(validator: bool = True):\n df = pd.read_csv(\"lux/data/car.csv\")\n\n def random_categorical(ldf):\n intent = [lux.Clause(\"?\", data_type=\"nominal\")]\n vlist = VisList(intent, ldf)\n for vis in vlist:\n vis.score = 10\n vlist.sort()\n vlist = vlist.showK()\n return {\n \"action\": \"bars\",\n \"description\": \"Random list of Bar charts\",\n \"collection\": vlist,\n }\n\n def contain_horsepower(df):\n for clause in df.intent:\n if clause.get_attr() == \"Horsepower\":\n return True\n return False\n\n if validator:\n lux.config.register_action(\"bars\", random_categorical, contain_horsepower)\n else:\n lux.config.register_action(\"bars\", random_categorical)\n return df\n\n\ndef test_default_actions_registered(global_var):\n df = pytest.car_df\n df._repr_html_()\n assert \"Distribution\" in df.recommendation\n assert len(df.recommendation[\"Distribution\"]) > 0\n\n assert \"Occurrence\" in df.recommendation\n assert len(df.recommendation[\"Occurrence\"]) > 0\n\n assert \"Temporal\" in df.recommendation\n assert len(df.recommendation[\"Temporal\"]) > 0\n\n assert \"Correlation\" in df.recommendation\n assert len(df.recommendation[\"Correlation\"]) > 0\n\n\ndef test_fail_validator():\n df = register_new_action()\n df._repr_html_()\n assert (\n \"bars\" not in df.recommendation,\n \"Bars should not be rendered when there is no intent 'horsepower' specified.\",\n )\n\n\ndef test_pass_validator():\n df = register_new_action()\n df.set_intent([\"Acceleration\", \"Horsepower\"])\n df._repr_html_()\n assert len(df.recommendation[\"bars\"]) > 0\n assert (\n \"bars\" in df.recommendation,\n \"Bars should be rendered when intent 'horsepower' is specified.\",\n )\n\n\ndef test_no_validator():\n df = register_new_action(False)\n df._repr_html_()\n assert len(df.recommendation[\"bars\"]) > 0\n assert \"bars\" in df.recommendation\n\n\ndef test_invalid_function(global_var):\n df = pd.read_csv(\"lux/data/car.csv\")\n with pytest.raises(ValueError, match=\"Action must be a callable\"):\n lux.config.register_action(\"bars\", \"not a Callable\")\n\n\ndef test_invalid_validator(global_var):\n df = pd.read_csv(\"lux/data/car.csv\")\n\n def random_categorical(ldf):\n intent = [lux.Clause(\"?\", data_type=\"nominal\")]\n vlist = VisList(intent, ldf)\n for vis in vlist:\n vis.score = 10\n vlist.sort()\n vlist = vlist.showK()\n return {\n \"action\": \"bars\",\n \"description\": \"Random list of Bar charts\",\n \"collection\": vlist,\n }\n\n with pytest.raises(ValueError, match=\"Display condition must be a callable\"):\n lux.config.register_action(\"bars\", random_categorical, \"not a Callable\")\n\n\ndef test_remove_action():\n df = register_new_action()\n df.set_intent([\"Acceleration\", \"Horsepower\"])\n df._repr_html_()\n assert (\n \"bars\" in df.recommendation,\n \"Bars should be rendered after it has been registered with correct intent.\",\n )\n assert (\n len(df.recommendation[\"bars\"]) > 0,\n \"Bars should be rendered after it has been registered with correct intent.\",\n )\n lux.config.remove_action(\"bars\")\n df._repr_html_()\n assert (\n \"bars\" not in df.recommendation,\n \"Bars should not be rendered after it has been removed.\",\n )\n df.clear_intent()\n\n\ndef test_remove_invalid_action(global_var):\n df = pd.read_csv(\"lux/data/car.csv\")\n with pytest.raises(ValueError, match=\"Option 'bars' has not been registered\"):\n lux.config.remove_action(\"bars\")\n\n\n# TODO: This test does not pass in pytest but is working in Jupyter notebook.\ndef test_remove_default_actions(global_var):\n df = pytest.car_df\n df._repr_html_()\n\n lux.config.remove_action(\"distribution\")\n df._repr_html_()\n assert \"Distribution\" not in df.recommendation\n\n lux.config.remove_action(\"occurrence\")\n df._repr_html_()\n assert \"Occurrence\" not in df.recommendation\n\n lux.config.remove_action(\"temporal\")\n df._repr_html_()\n assert \"Temporal\" not in df.recommendation\n\n lux.config.remove_action(\"correlation\")\n df._repr_html_()\n assert \"Correlation\" not in df.recommendation\n\n assert (\n len(df.recommendation) == 0,\n \"Default actions should not be rendered after it has been removed.\",\n )\n\n df = register_new_action()\n df.set_intent([\"Acceleration\", \"Horsepower\"])\n df._repr_html_()\n assert (\n \"bars\" in df.recommendation,\n \"Bars should be rendered after it has been registered with correct intent.\",\n )\n assert len(df.recommendation[\"bars\"]) > 0\n df.clear_intent()\n\n from lux.action.default import register_default_actions\n\n register_default_actions()\n\n\ndef test_matplotlib_set_default_plot_config():\n lux.config.plotting_backend = \"matplotlib\"\n\n def add_title(fig, ax):\n ax.set_title(\"Test Title\")\n return fig, ax\n\n df = pd.read_csv(\"lux/data/car.csv\")\n lux.config.plot_config = add_title\n df._repr_html_()\n title_addition = 'ax.set_title(\"Test Title\")'\n exported_code_str = df.recommendation[\"Correlation\"][0].to_Altair()\n assert title_addition in exported_code_str\n\n\ndef test_set_default_plot_config():\n lux.config.plotting_backend = \"vegalite\"\n\n def change_color_make_transparent_add_title(chart):\n chart = chart.configure_mark(color=\"green\", opacity=0.2)\n chart.title = \"Test Title\"\n return chart\n\n df = pd.read_csv(\"lux/data/car.csv\")\n lux.config.plot_config = change_color_make_transparent_add_title\n df._repr_html_()\n config_mark_addition = 'chart = chart.configure_mark(color=\"green\", opacity=0.2)'\n title_addition = 'chart.title = \"Test Title\"'\n exported_code_str = df.recommendation[\"Correlation\"][0].to_Altair()\n assert config_mark_addition in exported_code_str\n assert title_addition in exported_code_str\n\n\ndef test_sampling_flag_config():\n df = pd.read_csv(\"https://raw.githubusercontent.com/lux-org/lux-datasets/master/data/airbnb_nyc.csv\")\n df._repr_html_()\n assert df.recommendation[\"Correlation\"][0].data.shape[0] == 30000\n lux.config.sampling = False\n df = df.copy()\n df._repr_html_()\n assert df.recommendation[\"Correlation\"][0].data.shape[0] == 48895\n lux.config.sampling = True\n\n\ndef test_sampling_parameters_config():\n df = pd.read_csv(\"lux/data/car.csv\")\n df._repr_html_()\n assert df.recommendation[\"Correlation\"][0].data.shape[0] == 392\n lux.config.sampling_start = 50\n lux.config.sampling_cap = 100\n df = pd.read_csv(\"lux/data/car.csv\")\n df._repr_html_()\n assert df.recommendation[\"Correlation\"][0].data.shape[0] == 100\n lux.config.sampling_cap = 30000\n lux.config.sampling_start = 10000\n\n\ndef test_heatmap_flag_config():\n df = pd.read_csv(\"https://raw.githubusercontent.com/lux-org/lux-datasets/master/data/airbnb_nyc.csv\")\n df._repr_html_()\n assert df.recommendation[\"Correlation\"][0]._postbin\n lux.config.heatmap = False\n df = pd.read_csv(\"https://raw.githubusercontent.com/lux-org/lux-datasets/master/data/airbnb_nyc.csv\")\n df._repr_html_()\n assert not df.recommendation[\"Correlation\"][0]._postbin\n lux.config.heatmap = True\n\n\ndef test_topk(global_var):\n df = pd.read_csv(\"lux/data/college.csv\")\n lux.config.topk = False\n df._repr_html_()\n assert len(df.recommendation[\"Correlation\"]) == 45, \"Turn off top K\"\n lux.config.topk = 20\n df = pd.read_csv(\"lux/data/college.csv\")\n df._repr_html_()\n assert len(df.recommendation[\"Correlation\"]) == 20, \"Show top 20\"\n for vis in df.recommendation[\"Correlation\"]:\n assert vis.score > 0.2\n\n\ndef test_sort(global_var):\n df = pd.read_csv(\"lux/data/college.csv\")\n lux.config.topk = 15\n df._repr_html_()\n assert len(df.recommendation[\"Correlation\"]) == 15, \"Show top 15\"\n for vis in df.recommendation[\"Correlation\"]:\n assert vis.score > 0.5\n df = pd.read_csv(\"lux/data/college.csv\")\n lux.config.sort = \"ascending\"\n df._repr_html_()\n assert len(df.recommendation[\"Correlation\"]) == 15, \"Show bottom 15\"\n for vis in df.recommendation[\"Correlation\"]:\n assert vis.score < 0.35\n\n lux.config.sort = \"none\"\n df = pd.read_csv(\"lux/data/college.csv\")\n df._repr_html_()\n scorelst = [x.score for x in df.recommendation[\"Distribution\"]]\n assert sorted(scorelst) != scorelst, \"unsorted setting\"\n lux.config.sort = \"descending\"\n\n\n# TODO: This test does not pass in pytest but is working in Jupyter notebook.\n# def test_plot_setting(global_var):\n# \tdf = pytest.car_df\n# \tdf[\"Year\"] = pd.to_datetime(df[\"Year\"], format='%Y')\n# \tdef change_color_add_title(chart):\n# \t\tchart = chart.configure_mark(color=\"green\") # change mark color to green\n# \t\tchart.title = \"Custom Title\" # add title to chart\n# \t\treturn chart\n\n# \tdf.plot_config = change_color_add_title\n\n# \tdf._repr_html_()\n\n# \tvis_code = df.recommendation[\"Correlation\"][0].to_Altair()\n# \tprint (vis_code)\n# \tassert 'chart = chart.configure_mark(color=\"green\")' in vis_code, \"Exported chart does not have additional plot style setting.\"\n" ]
[ [ "pandas.read_csv" ] ]
philippjfr/datashader
[ "eb9218cb810297aea2ae1030349cef6a6f3ab3cb" ]
[ "datashader/tests/benchmarks/test_extend_line.py" ]
[ "import pytest\n\nimport numpy as np\n\nfrom datashader.glyphs import _build_draw_line, _build_extend_line, _build_map_onto_pixel\nfrom datashader.utils import ngjit\n\n\[email protected]\ndef extend_line():\n @ngjit\n def append(i, x, y, agg):\n agg[y, x] += 1\n\n mapper = ngjit(lambda x: x)\n map_onto_pixel = _build_map_onto_pixel(mapper, mapper)\n draw_line = _build_draw_line(append)\n return _build_extend_line(draw_line, map_onto_pixel)\n\n\[email protected]('high', [0, 10**5])\[email protected]('low', [0, -10**5])\[email protected](group=\"extend_line\")\ndef test_extend_line_uniform(benchmark, extend_line, low, high):\n n = 10**6\n vt = (1, 0, 1, 0)\n bounds = (0, 0, 10**4, 10**4)\n\n xs = np.random.uniform(bounds[0] + low, bounds[2] + high, n)\n ys = np.random.uniform(bounds[1] + low, bounds[3] + high, n)\n\n agg = np.zeros((bounds[2], bounds[3]), dtype='i4')\n benchmark(extend_line, vt, bounds, xs, ys, True, agg)\n\n\[email protected](group=\"extend_line\")\ndef test_extend_line_normal(benchmark, extend_line):\n n = 10**6\n vt = (1, 0, 1, 0)\n bounds = (0, 0, 10**4, 10**4)\n\n start = 1456297053\n end = start + 60 * 60 * 24\n xs = np.linspace(start, end, n)\n\n signal = np.random.normal(0, 0.3, size=n).cumsum() + 50\n noise = lambda var, bias, n: np.random.normal(bias, var, n)\n ys = signal + noise(1, 10*(np.random.random() - 0.5), n)\n\n agg = np.zeros((bounds[2], bounds[3]), dtype='i4')\n benchmark(extend_line, vt, bounds, xs, ys, True, agg)\n" ]
[ [ "numpy.random.random", "numpy.linspace", "numpy.random.normal", "numpy.random.uniform", "numpy.zeros" ] ]
diegoinacio/computer-vision-notebooks
[ "89040e9aaa5a3382173d10efcce11068d5a82d71" ]
[ "Computer-Vision-Fundamentals/color_model_plot.py" ]
[ "import numpy as np\n\nfrom plotly import __version__, tools\nfrom plotly.offline import init_notebook_mode, iplot\n\nfrom plotly.graph_objs import *\ninit_notebook_mode(connected=True)\n\n# ! Identity Base\niBASE = np.array([\n [0, 1, 0, 1, 0, 1, 0, 1],\n [0, 0, 1, 1, 0, 0, 1, 1],\n [0, 0, 0, 0, 1, 1, 1, 1]\n])\nbase_pairs = [\n (0, 1), (1, 3), (3, 2), (2, 0), (4, 5), (5, 7), (7, 6), (6, 4), (0, 4), (1, 5), (2, 6), (3, 7)\n]\n\ndef plotRGB(points):\n ###############################\n #Identity model visualization #\n ###############################\n # Identity point color\n ipColor = [f'rgb({int(r*255)}, {int(g*255)}, {int(b*255)})' for r, g, b in zip(*iBASE)]\n # Identity point scatter\n ipMODEL = Scatter3d(\n x=iBASE[0],\n y=iBASE[1],\n z=iBASE[2],\n mode='markers',\n hovertext=[f'iPoint {e}' for e in range(8)],\n marker={\n 'size': 8,\n 'color': ipColor\n }\n )\n # Create identity edges\n x_lines, y_lines, z_lines, c_lines = [], [], [], []\n for p in base_pairs:\n for i in range(2):\n x_lines.append(iBASE[0][p[i]])\n y_lines.append(iBASE[1][p[i]])\n z_lines.append(iBASE[2][p[i]])\n c_lines.append(ipColor[p[i]])\n x_lines.append(None)\n y_lines.append(None)\n z_lines.append(None)\n c_lines.append('rgb(1, 0, 0)')\n # Identity edge scatter\n ieMODEL = Scatter3d(\n x=x_lines,\n y=y_lines,\n z=z_lines,\n mode='lines',\n line={'color': c_lines, 'width': 4}\n )\n #############################\n # Point cloud visualization #\n #############################\n # Split channels\n R, G, B = points.T\n # Define point colors\n color = [f'rgb({int(r*255)}, {int(g*255)}, {int(b*255)})' for r, g, b in points]\n MODEL = Scatter3d(\n x=R, y=G, z=B,\n name='RGB color model',\n mode='markers',\n line={'dash': 'dot'},\n marker={\n 'size': 1,\n 'color': color\n }\n )\n ##########################\n # Visualization settings #\n ##########################\n DATA = [ipMODEL, ieMODEL, MODEL]\n # Layout\n layout = Layout(\n title='RGB color model',\n margin={'l': 0, 'r': 0, 'b': 0, 't':0},\n scene={\n 'xaxis': {'title': 'R'},\n 'yaxis': {'title': 'G'},\n 'zaxis': {'title': 'B'}\n },\n showlegend=False\n )\n # Figure\n fig = Figure(data=DATA, layout=layout)\n # Camera\n CAMERA = {\n 'up': {'x': 0, 'y': 1, 'z': 0},\n 'eye': {'x': 1.2, 'y': 1,'z': 1.5}\n }\n # Update visualization\n fig['layout'].update(scene={'camera': CAMERA})\n # Plot\n iplot(fig, filename='RGB_color', show_link=False)\n\n\ndef plotXYZ(points, colors, m):\n ###############################\n # Identity base visualization #\n ###############################\n # Create identity edges\n x_lines, y_lines, z_lines, c_lines = [], [], [], []\n for p in base_pairs:\n for i in range(2):\n x_lines.append(iBASE[0][p[i]])\n y_lines.append(iBASE[1][p[i]])\n z_lines.append(iBASE[2][p[i]])\n c_lines.append(\"gray\")\n x_lines.append(None)\n y_lines.append(None)\n z_lines.append(None)\n c_lines.append('rgb(1, 0, 0)')\n # Identity edge scatter\n ieBASE = Scatter3d(\n x=x_lines,\n y=y_lines,\n z=z_lines,\n mode='lines',\n line={'color': c_lines, 'dash': 'dot', 'width': 2}\n )\n ################################\n # Identity model visualization #\n ################################\n # Create identity points\n iRGB = np.array([\n [0, 1, 0, 1, 0, 1, 0, 1],\n [0, 0, 1, 1, 0, 0, 1, 1],\n [0, 0, 0, 0, 1, 1, 1, 1]\n ])\n # Identity point color\n ipColor = [f'rgb({int(r*255)}, {int(g*255)}, {int(b*255)})' for r, g, b in zip(*iRGB)]\n iRGB = np.dot(m, iRGB)\n # Identity point scatter\n ipMODEL = Scatter3d(\n x=iRGB[0],\n y=iRGB[1],\n z=iRGB[2],\n mode='markers',\n hovertext=[f'iPoint {e}' for e in range(8)],\n marker={\n 'size': 8,\n 'color': ipColor\n }\n )\n # Create identity edges\n edge_pairs = [(0, 1), (1, 3), (3, 2), (2, 0), (4, 5), (5, 7), (7, 6), (6, 4), (0, 4), (1, 5), (2, 6), (3, 7)]\n x_lines, y_lines, z_lines, c_lines = [], [], [], []\n for p in edge_pairs:\n for i in range(2):\n x_lines.append(iRGB[0][p[i]])\n y_lines.append(iRGB[1][p[i]])\n z_lines.append(iRGB[2][p[i]])\n c_lines.append(ipColor[p[i]])\n x_lines.append(None)\n y_lines.append(None)\n z_lines.append(None)\n c_lines.append('rgb(1, 0, 0)')\n # Identity edge scatter\n ieMODEL = Scatter3d(\n x=x_lines,\n y=y_lines,\n z=z_lines,\n mode='lines',\n line={'color': c_lines, 'width': 4}\n )\n #############################\n # Point cloud visualization #\n #############################\n # Split channels\n X, Y, Z = points.T\n # Define point colors\n color = [f'rgb({int(r*255)}, {int(g*255)}, {int(b*255)})' for r, g, b in colors]\n MODEL = Scatter3d(\n x=X, y=Y, z=Z,\n name='XYZ color model',\n mode='markers',\n line={'dash': 'dot'},\n marker={\n 'size': 1,\n 'color': color\n }\n )\n ##########################\n # Visualization settings #\n ##########################\n DATA = [ipMODEL, ieMODEL, MODEL, ieBASE]\n # Layout\n layout = Layout(\n title='RGB color model',\n margin={'l': 0, 'r': 0, 'b': 0, 't':0},\n scene={\n 'xaxis': {'title': 'X'},\n 'yaxis': {'title': 'Y'},\n 'zaxis': {'title': 'Z'}\n },\n showlegend=False\n )\n # Figure\n fig = Figure(data=DATA, layout=layout)\n # Camera\n CAMERA = {\n 'up': {'x': 0, 'y': 1, 'z': 0},\n 'eye': {'x': 1.2, 'y': 1,'z': 1.5}\n }\n # Update visualization\n fig['layout'].update(scene={'camera': CAMERA})\n # Plot\n iplot(fig, filename='XYZ_color', show_link=False)\n\n\ndef plotHSV(points, colors):\n ##########################\n # Identity visualization #\n ##########################\n # Create identity points\n h = np.array([0, 60, 120, 180, 240, 300, 0, 0])\n X = np.sin(2*np.pi*h/360); X[-1] = 0; X[-2] = 0\n Z = np.cos(2*np.pi*h/360); Z[-1] = 0; Z[-2] = 0\n Y = np.array([1, 1, 1, 1, 1, 1, 1, 0])\n iRGB = [[1, 0, 0], [1, 1, 0], [0, 1, 0], [0, 1, 1], [0, 0, 1], [1, 0, 1], [1, 1, 1], [0, 0, 0]]\n # Identity point color\n ipColor = [f'rgb({int(r*255)}, {int(g*255)}, {int(b*255)})' for r, g, b in iRGB]\n # Identity point scatter\n ipMODEL = Scatter3d(\n x=X,\n y=Y,\n z=Z,\n mode='markers',\n hovertext=[f'iPoint {e}' for e in range(8)],\n marker={\n 'size': 8,\n 'color': ipColor\n }\n )\n # Create identity edges\n edge_pairs = []\n for i in range(5):\n edge_pairs += [i, i+1], [i, 6], [i, 7]\n else:\n edge_pairs += [5, 0], [5, 6], [5, 7]\n x_lines, y_lines, z_lines, c_lines = [], [], [], []\n for p in edge_pairs:\n for i in range(2):\n x_lines.append(X[p[i]])\n y_lines.append(Y[p[i]])\n z_lines.append(Z[p[i]])\n c_lines.append(ipColor[p[i]])\n x_lines.append(None)\n y_lines.append(None)\n z_lines.append(None)\n c_lines.append('rgb(1, 0, 0)')\n # Identity edge scatter\n ieMODEL = Scatter3d(\n x=x_lines,\n y=y_lines,\n z=z_lines,\n mode='lines',\n line={'color': c_lines, 'width': 4}\n )\n #############################\n # Point cloud visualization #\n #############################\n # Split channels\n H, S, V = points.T\n X = S*V*np.sin(2*np.pi*H/360)\n Z = S*V*np.cos(2*np.pi*H/360)\n # Define point colors\n color = [f'rgb({int(r*255)}, {int(g*255)}, {int(b*255)})' for r, g, b in colors]\n MODEL = Scatter3d(\n x=X, y=V, z=Z,\n name='HSV color model',\n mode='markers',\n line={'dash': 'dot'},\n marker={\n 'size': 1,\n 'color': color\n }\n )\n ##########################\n # Visualization settings #\n ##########################\n DATA = [ipMODEL, ieMODEL, MODEL]\n # Layout\n layout = Layout(\n title='RGB color model',\n margin={'l': 0, 'r': 0, 'b': 0, 't':0},\n scene={\n 'xaxis': {'title': 'X'},\n 'yaxis': {'title': 'Y'},\n 'zaxis': {'title': 'Z'}\n },\n showlegend=False\n )\n # Figure\n fig = Figure(data=DATA, layout=layout)\n # Camera\n CAMERA = {\n 'up': {'x': 0, 'y': 1, 'z': 0},\n 'eye': {'x': 1.2, 'y': 1,'z': 1.5}\n }\n # Update visualization\n fig['layout'].update(scene={'camera': CAMERA})\n # Plot\n iplot(fig, filename='HSV_color', show_link=False)\n\n\ndef plotHSL(points, colors):\n ##########################\n # Identity visualization #\n ##########################\n # Create identity points\n h = np.array([0, 60, 120, 180, 240, 300, 0, 0])\n X = np.sin(2*np.pi*h/360); X[-1] = 0; X[-2] = 0\n Z = np.cos(2*np.pi*h/360); Z[-1] = 0; Z[-2] = 0\n Y = np.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1, 0])\n iRGB = [[1, 0, 0], [1, 1, 0], [0, 1, 0], [0, 1, 1], [0, 0, 1], [1, 0, 1], [1, 1, 1], [0, 0, 0]]\n # Identity point color\n ipColor = [f'rgb({int(r*255)}, {int(g*255)}, {int(b*255)})' for r, g, b in iRGB]\n # Identity point scatter\n ipMODEL = Scatter3d(\n x=X,\n y=Y,\n z=Z,\n mode='markers',\n hovertext=[f'iPoint {e}' for e in range(8)],\n marker={\n 'size': 8,\n 'color': ipColor\n }\n )\n # Create identity edges\n edge_pairs = []\n for i in range(5):\n edge_pairs += [i, i+1], [i, 6], [i, 7]\n else:\n edge_pairs += [5, 0], [5, 6], [5, 7]\n x_lines, y_lines, z_lines, c_lines = [], [], [], []\n for p in edge_pairs:\n for i in range(2):\n x_lines.append(X[p[i]])\n y_lines.append(Y[p[i]])\n z_lines.append(Z[p[i]])\n c_lines.append(ipColor[p[i]])\n x_lines.append(None)\n y_lines.append(None)\n z_lines.append(None)\n c_lines.append('rgb(1, 0, 0)')\n # Identity edge scatter\n ieMODEL = Scatter3d(\n x=x_lines,\n y=y_lines,\n z=z_lines,\n mode='lines',\n line={'color': c_lines, 'width': 4}\n )\n #############################\n # Point cloud visualization #\n #############################\n # Split channels\n H, S, V = points.T\n V2 = np.where(V <= 0.5, V, 1 - V)*2\n X = S*V2*np.sin(2*np.pi*H/360)\n Z = S*V2*np.cos(2*np.pi*H/360)\n # Define point colors\n color = [f'rgb({int(r*255)}, {int(g*255)}, {int(b*255)})' for r, g, b in colors]\n MODEL = Scatter3d(\n x=X, y=V, z=Z,\n name='HSL color model',\n mode='markers',\n line={'dash': 'dot'},\n marker={\n 'size': 1,\n 'color': color\n }\n )\n ##########################\n # Visualization settings #\n ##########################\n DATA = [ipMODEL, ieMODEL, MODEL]\n # Layout\n layout = Layout(\n title='HSL color model',\n margin={'l': 0, 'r': 0, 'b': 0, 't':0},\n scene={\n 'xaxis': {'title': 'X'},\n 'yaxis': {'title': 'Y'},\n 'zaxis': {'title': 'Z'}\n },\n showlegend=False\n )\n # Figure\n fig = Figure(data=DATA, layout=layout)\n # Camera\n CAMERA = {\n 'up': {'x': 0, 'y': 1, 'z': 0},\n 'eye': {'x': 1.2, 'y': 1,'z': 1.5}\n }\n # Update visualization\n fig['layout'].update(scene={'camera': CAMERA})\n # Plot\n iplot(fig, filename='HSL_color', show_link=False)" ]
[ [ "numpy.dot", "numpy.cos", "numpy.sin", "numpy.array", "numpy.where" ] ]
ruijieren98/SfmLearner-Pytorch
[ "893743f33c31a687a6bb4a447af01f9abf47b85f" ]
[ "train_flexible_shifts.py" ]
[ "import time\nimport csv\n\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport custom_transforms\nimport models\nfrom utils import save_checkpoint,save_path_formatter\nfrom logger import TermLogger, AverageMeter\nfrom itertools import chain\nfrom tensorboardX import SummaryWriter\nfrom datasets.shifted_sequence_folders import ShiftedSequenceFolder\nfrom datasets.sequence_folders import SequenceFolder\nfrom train import train, validate_with_gt, validate_without_gt, parser\n\nparser.add_argument('-d', '--target-displacement', type=float, help='displacement to aim at when adjustting shifts, regarding posenet output',\n metavar='D', default=0.05)\n\nbest_error = -1\nn_iter = 0\ndevice = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n\ndef main():\n global args, best_error, n_iter, device\n args = parser.parse_args()\n save_path = save_path_formatter(args, parser)\n args.save_path = 'checkpoints_shifted'/save_path\n print('=> will save everything to {}'.format(args.save_path))\n args.save_path.makedirs_p()\n torch.manual_seed(args.seed)\n\n training_writer = SummaryWriter(args.save_path)\n output_writers = []\n if args.log_output:\n for i in range(3):\n output_writers.append(SummaryWriter(args.save_path/'valid'/str(i)))\n\n # Data loading code\n normalize = custom_transforms.Normalize(mean=[0.5, 0.5, 0.5],\n std=[0.5, 0.5, 0.5])\n train_transform = custom_transforms.Compose([\n custom_transforms.RandomHorizontalFlip(),\n custom_transforms.RandomScaleCrop(),\n custom_transforms.ArrayToTensor(),\n normalize\n ])\n\n valid_transform = custom_transforms.Compose([custom_transforms.ArrayToTensor(), normalize])\n\n print(\"=> fetching scenes in '{}'\".format(args.data))\n train_set = ShiftedSequenceFolder(\n args.data,\n transform=train_transform,\n seed=args.seed,\n train=True,\n sequence_length=args.sequence_length,\n target_displacement=args.target_displacement\n )\n\n # if no Groundtruth is avalaible, Validation set is the same type as training set to measure photometric loss from warping\n if args.with_gt:\n from datasets.validation_folders import ValidationSet\n val_set = ValidationSet(\n args.data,\n transform=valid_transform\n )\n else:\n val_set = SequenceFolder(\n args.data,\n transform=valid_transform,\n seed=args.seed,\n train=False,\n sequence_length=args.sequence_length,\n )\n print('{} samples found in {} train scenes'.format(len(train_set), len(train_set.scenes)))\n print('{} samples found in {} valid scenes'.format(len(val_set), len(val_set.scenes)))\n train_loader = torch.utils.data.DataLoader(\n train_set, batch_size=args.batch_size, shuffle=True,\n num_workers=args.workers, pin_memory=True)\n adjust_loader = torch.utils.data.DataLoader(\n train_set, batch_size=args.batch_size, shuffle=False,\n num_workers=0, pin_memory=True) # workers is set to 0 to avoid multiple instances to be modified at the same time\n val_loader = torch.utils.data.DataLoader(\n val_set, batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n if args.epoch_size == 0:\n args.epoch_size = len(train_loader)\n\n train.args = args\n # create model\n print(\"=> creating model\")\n\n disp_net = models.DispNetS().cuda()\n output_exp = args.mask_loss_weight > 0\n if not output_exp:\n print(\"=> no mask loss, PoseExpnet will only output pose\")\n pose_exp_net = models.PoseExpNet(nb_ref_imgs=args.sequence_length - 1, output_exp=args.mask_loss_weight > 0).to(device)\n\n if args.pretrained_exp_pose:\n print(\"=> using pre-trained weights for explainabilty and pose net\")\n weights = torch.load(args.pretrained_exp_pose)\n pose_exp_net.load_state_dict(weights['state_dict'], strict=False)\n else:\n pose_exp_net.init_weights()\n\n if args.pretrained_disp:\n print(\"=> using pre-trained weights for Dispnet\")\n weights = torch.load(args.pretrained_disp)\n disp_net.load_state_dict(weights['state_dict'])\n else:\n disp_net.init_weights()\n\n cudnn.benchmark = True\n disp_net = torch.nn.DataParallel(disp_net)\n pose_exp_net = torch.nn.DataParallel(pose_exp_net)\n\n print('=> setting adam solver')\n\n parameters = chain(disp_net.parameters(), pose_exp_net.parameters())\n optimizer = torch.optim.Adam(parameters, args.lr,\n betas=(args.momentum, args.beta),\n weight_decay=args.weight_decay)\n\n with open(args.save_path/args.log_summary, 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter='\\t')\n writer.writerow(['train_loss', 'validation_loss'])\n\n with open(args.save_path/args.log_full, 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter='\\t')\n writer.writerow(['train_loss', 'photo_loss', 'explainability_loss', 'smooth_loss'])\n\n logger = TermLogger(n_epochs=args.epochs, train_size=min(len(train_loader), args.epoch_size), valid_size=len(val_loader))\n logger.epoch_bar.start()\n\n for epoch in range(args.epochs):\n logger.epoch_bar.update(epoch)\n\n # train for one epoch\n logger.reset_train_bar()\n train_loss = train(args, train_loader, disp_net, pose_exp_net, optimizer, args.epoch_size, logger, training_writer)\n logger.train_writer.write(' * Avg Loss : {:.3f}'.format(train_loss))\n\n if (epoch + 1) % 5 == 0:\n train_set.adjust = True\n logger.reset_train_bar(len(adjust_loader))\n average_shifts = adjust_shifts(args, train_set, adjust_loader, pose_exp_net, epoch, logger, training_writer)\n shifts_string = ' '.join(['{:.3f}'.format(s) for s in average_shifts])\n logger.train_writer.write(' * adjusted shifts, average shifts are now : {}'.format(shifts_string))\n for i, shift in enumerate(average_shifts):\n training_writer.add_scalar('shifts{}'.format(i), shift, epoch)\n train_set.adjust = False\n\n # evaluate on validation set\n logger.reset_valid_bar()\n if args.with_gt:\n errors, error_names = validate_with_gt(args, val_loader, disp_net, epoch, logger, output_writers)\n else:\n errors, error_names = validate_without_gt(args, val_loader, disp_net, pose_exp_net, epoch, logger, output_writers)\n error_string = ', '.join('{} : {:.3f}'.format(name, error) for name, error in zip(error_names, errors))\n logger.valid_writer.write(' * Avg {}'.format(error_string))\n\n for error, name in zip(errors, error_names):\n training_writer.add_scalar(name, error, epoch)\n\n # Up to you to chose the most relevant error to measure your model's performance, careful some measures are to maximize (such as a1,a2,a3)\n decisive_error = errors[0]\n if best_error < 0:\n best_error = decisive_error\n\n # remember lowest error and save checkpoint\n is_best = decisive_error < best_error\n best_error = min(best_error, decisive_error)\n save_checkpoint(\n args.save_path, {\n 'epoch': epoch + 1,\n 'state_dict': disp_net.module.state_dict()\n }, {\n 'epoch': epoch + 1,\n 'state_dict': pose_exp_net.module.state_dict()\n },\n is_best)\n\n with open(args.save_path/args.log_summary, 'a') as csvfile:\n writer = csv.writer(csvfile, delimiter='\\t')\n writer.writerow([train_loss, decisive_error])\n logger.epoch_bar.finish()\n\n\[email protected]_grad()\ndef adjust_shifts(args, train_set, adjust_loader, pose_exp_net, epoch, logger, train_writer):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n new_shifts = AverageMeter(args.sequence_length-1)\n pose_exp_net.train()\n poses = np.zeros(((len(adjust_loader)-1) * args.batch_size * (args.sequence_length-1),6))\n\n end = time.time()\n\n for i, (indices, tgt_img, ref_imgs, intrinsics, intrinsics_inv) in enumerate(adjust_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n tgt_img = tgt_img.to(device)\n ref_imgs = [img.to(device) for img in ref_imgs]\n\n # compute output\n explainability_mask, pose_batch = pose_exp_net(tgt_img, ref_imgs)\n\n if i < len(adjust_loader)-1:\n step = args.batch_size*(args.sequence_length-1)\n poses[i * step:(i+1) * step] = pose_batch.cpu().reshape(-1,6).numpy()\n\n for index, pose in zip(indices, pose_batch):\n displacements = pose[:,:3].norm(p=2, dim=1).cpu().numpy()\n train_set.reset_shifts(index, displacements)\n new_shifts.update(train_set.samples[index]['ref_imgs'])\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n logger.train_bar.update(i)\n if i % args.print_freq == 0:\n logger.train_writer.write('Adjustement:'\n 'Time {} Data {} shifts {}'.format(batch_time, data_time, new_shifts))\n\n prefix = 'train poses'\n coeffs_names = ['tx', 'ty', 'tz']\n if args.rotation_mode == 'euler':\n coeffs_names.extend(['rx', 'ry', 'rz'])\n elif args.rotation_mode == 'quat':\n coeffs_names.extend(['qx', 'qy', 'qz'])\n for i in range(poses.shape[1]):\n train_writer.add_histogram('{} {}'.format(prefix, coeffs_names[i]), poses[:,i], epoch)\n\n return new_shifts.avg\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.optim.Adam", "torch.load", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.no_grad", "torch.cuda.is_available", "torch.device", "torch.nn.DataParallel" ] ]
johntzwei/wilds
[ "cb373f729826fbdd737664f06bb91b92c6bf5395" ]
[ "examples/models/initializer.py" ]
[ "import torch.nn as nn\nimport torchvision\nfrom models.bert.bert import BertClassifier, BertFeaturizer\nfrom models.bert.roberta import RobertaClassifier, RobertaFeaturizer\nfrom models.bert.distilbert import DistilBertClassifier, DistilBertFeaturizer\nfrom models.resnet_multispectral import ResNet18\nfrom models.layers import Identity\nfrom models.gnn import GINVirtual\nfrom models.code_gpt import GPT2LMHeadLogit, GPT2FeaturizerLMHeadLogit\nfrom transformers import GPT2Tokenizer\n\ndef initialize_model(config, d_out, is_featurizer=False):\n \"\"\"\n Initializes models according to the config\n Args:\n - config (dictionary): config dictionary\n - d_out (int): the dimensionality of the model output\n - is_featurizer (bool): whether to return a model or a (featurizer, classifier) pair that constitutes a model.\n Output:\n If is_featurizer=True:\n - featurizer: a model that outputs feature Tensors of shape (batch_size, ..., feature dimensionality)\n - classifier: a model that takes in feature Tensors and outputs predictions. In most cases, this is a linear layer.\n\n If is_featurizer=False:\n - model: a model that is equivalent to nn.Sequential(featurizer, classifier)\n \"\"\"\n if config.model in ('resnet50', 'resnet34', 'wideresnet50', 'densenet121'):\n if is_featurizer:\n featurizer = initialize_torchvision_model(\n name=config.model,\n d_out=None,\n **config.model_kwargs)\n classifier = nn.Linear(featurizer.d_out, d_out)\n model = (featurizer, classifier)\n else:\n model = initialize_torchvision_model(\n name=config.model,\n d_out=d_out,\n **config.model_kwargs)\n elif 'bert' in config.model:\n if is_featurizer:\n featurizer = initialize_bert_based_model(config, d_out, is_featurizer)\n classifier = nn.Linear(featurizer.d_out, d_out)\n model = (featurizer, classifier)\n else:\n model = initialize_bert_based_model(config, d_out)\n elif config.model == 'resnet18_ms': # multispectral resnet 18\n if is_featurizer:\n featurizer = ResNet18(num_classes=None, **config.model_kwargs)\n classifier = nn.Linear(featurizer.d_out, d_out)\n model = (featurizer, classifier)\n else:\n model = ResNet18(num_classes=d_out, **config.model_kwargs)\n elif config.model == 'gin-virtual':\n if is_featurizer:\n featurizer = GINVirtual(num_tasks=None, **config.model_kwargs)\n classifier = nn.Linear(featurizer.d_out, d_out)\n model = (featurizer, classifier)\n else:\n model = GINVirtual(num_tasks=d_out, **config.model_kwargs)\n elif config.model == 'code-gpt-py':\n name = 'microsoft/CodeGPT-small-py'\n tokenizer = GPT2Tokenizer.from_pretrained(name)\n if is_featurizer:\n model = GPT2FeaturizerLMHeadLogit.from_pretrained(name)\n model.resize_token_embeddings(len(tokenizer))\n featurizer = model.transformer\n classifier = model.lm_head\n model = (featurizer, classifier)\n else:\n model = GPT2LMHeadLogit.from_pretrained(name)\n model.resize_token_embeddings(len(tokenizer))\n elif config.model == 'logistic_regression':\n assert not is_featurizer, \"Featurizer not supported for logistic regression\"\n model = nn.Linear(out_features=d_out, **config.model_kwargs)\n else:\n raise ValueError(f'Model: {config.model} not recognized.')\n return model\n\ndef initialize_bert_based_model(config, d_out, is_featurizer=False):\n if config.model == 'bert-base-uncased':\n if is_featurizer:\n model = BertFeaturizer.from_pretrained(config.model, **config.model_kwargs)\n else:\n model = BertClassifier.from_pretrained(\n config.model,\n num_labels=d_out,\n **config.model_kwargs)\n elif config.model == 'roberta-base':\n if is_featurizer:\n model = RobertaFeaturizer.from_pretrained(config.model, **config.model_kwargs)\n else:\n model = RobertaClassifier.from_pretrained(\n config.model,\n num_labels=d_out,\n **config.model_kwargs)\n\n elif config.model == 'distilbert-base-uncased':\n if is_featurizer:\n model = DistilBertFeaturizer.from_pretrained(config.model, **config.model_kwargs)\n else:\n model = DistilBertClassifier.from_pretrained(\n config.model,\n num_labels=d_out,\n **config.model_kwargs)\n else:\n raise ValueError(f'Model: {config.model} not recognized.')\n return model\n\ndef initialize_torchvision_model(name, d_out, **kwargs):\n # get constructor and last layer names\n if name == 'wideresnet50':\n constructor_name = 'wide_resnet50_2'\n last_layer_name = 'fc'\n elif name == 'densenet121':\n constructor_name = name\n last_layer_name = 'classifier'\n elif name in ('resnet50', 'resnet34'):\n constructor_name = name\n last_layer_name = 'fc'\n else:\n raise ValueError(f'Torchvision model {name} not recognized')\n # construct the default model, which has the default last layer\n constructor = getattr(torchvision.models, constructor_name)\n model = constructor(**kwargs)\n # adjust the last layer\n d_features = getattr(model, last_layer_name).in_features\n if d_out is None: # want to initialize a featurizer model\n last_layer = Identity(d_features)\n model.d_out = d_features\n else: # want to initialize a classifier for a particular num_classes\n last_layer = nn.Linear(d_features, d_out)\n model.d_out = d_out\n setattr(model, last_layer_name, last_layer)\n return model\n" ]
[ [ "torch.nn.Linear" ] ]
sudhu26/data-science-portfolio
[ "88f7a350cbd9245e4f92ff1829e49c5d378c609d" ]
[ "academics/SparkML/Assignment03.py" ]
[ "# imports and tools\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import IntegerType\nimport pyspark.sql.functions as f\n\nfrom pyspark.ml.feature import CountVectorizer\n\nimport re\nimport os\nimport numpy as np\nimport operator\n\nimport matplotlib.pyplot as plt\n\n# create SparkContext object\nspark = SparkSession.builder.appName(\"Assignment_03\").getOrCreate()\nspark.sparkContext.setLogLevel(\"ERROR\")\nsc = spark.sparkContext\n\n####################################################################################\n## part 1\n# load data to dataframe\nprint('*' * 100)\nprint('Part 1 - load data into dataframe\\n')\npath = 's3://tdp-ml-datasets/misc/Amazon.csv'\ndataAmazon = spark.read.load(path , format = 'csv', header = 'true', inferschema = 'true', sep = \",\")\n\ndataAmazonRDD = dataAmazon.rdd\ndataAmazonRDD = dataAmazonRDD.map(lambda x: (x[0], list(x[1:])))\n\nprint('*' * 25)\nprint('\\nAmazon\\n')\nfor x in dataAmazonRDD.take(5):\n print(x)\n\npath = 's3://tdp-ml-datasets/misc/Google.csv'\ndataGoogle = spark.read.load(path , format = 'csv', header = 'true', inferschema = 'true', sep = \",\")\n\ndataGoogleRDD = dataGoogle.rdd\ndataGoogleRDD = dataGoogleRDD.map(lambda x: (x[0], list(x[1:])))\n\nprint('*' * 25)\nprint('\\nGoogle\\n')\nfor x in dataGoogleRDD.take(5):\n print(x)\n\n####################################################################################\n## part 2\n# \nprint('*' * 100)\nprint('Part 2 - Bag-of-words\\n')\n\nprint('*' * 50)\nprint('Part 2a & 2b- implement function to return non-empty string and remove stopwords\\n')\n\npath = 's3://tdp-ml-datasets/misc/stopwords.txt' \nstopwords = sc.textFile(path).collect()\n\nquickbrownfox = 'A quick brown fox jumps over the lazy dog.'\nregexRule = '\\W+'\ndef tokenizer(string):\n # parse string and lower case\n string = re.split(regexRule, string)\n string = filter(None, string)\n string = [wrd.lower() for wrd in string]\n \n # remove stopwords\n cleanString = []\n for wrd in string:\n if not (wrd in stopwords):\n cleanString.append(wrd)\n return cleanString\n return string\n\nsent = 'the skyscraper is very tall and slightly gray'\nprint('original sentence:\\n{}'.format(sent))\nprint('processed sentence:\\n{}'.format(tokenizer(sent)))\n\nprint('*' * 50)\nprint('Part 2c- tokenize Amazon and Google datasets\\n')\n\nprint('*' * 25)\nprint('\\nAmazon\\n')\n\n# tokenize product title\ndataAmazonTokens = dataAmazonRDD.map(lambda x: (x[0], tokenizer(x[1][0])))\nfor x in dataAmazonTokens.take(5):\n print(x)\n\nprint('*' * 25)\nprint('\\nGoogle\\n')\n\n# tokenize product title\ndataGoogleTokens = dataGoogleRDD.map(lambda x: (x[0], tokenizer(x[1][0])))\nfor x in dataGoogleTokens.take(5):\n print(x)\n\n####################################################################################\n## part 3\n# \nprint('*' * 100)\nprint('Part 3 - Return term frequency of a list of tokens\\n')\n\ndef termFreq(tokens, normalize = False):\n # capture term frequency for input token list\n termFreqDict = {}\n for token in tokens:\n if token in termFreqDict:\n termFreqDict[token] += 1\n else:\n termFreqDict[token] = 1\n\n # normalize counts based on number of tokens\n if normalize:\n for token in termFreqDict:\n termFreqDict[token] /= float(len(tokens))\n return termFreqDict\n\n\n\nprint('Google term frequency sample')\ngoogleTermFreq = dataGoogleTokens.map(lambda x: (x[0], termFreq(x[1], normalize = False)))\ngoogleTermFreqNorm = dataGoogleTokens.map(lambda x: (x[0], termFreq(x[1], normalize = True)))\n\nprint('\\nNot normalized')\nfor x in googleTermFreq.take(5):\n print(x)\nprint('\\nNormalized')\nfor x in googleTermFreqNorm.take(5):\n print(x)\n\n\nprint('\\nAmazon term frequency sample')\namazonTermFreq = dataAmazonTokens.map(lambda x: (x[0], termFreq(x[1], normalize = False)))\namazonTermFreqNorm = dataAmazonTokens.map(lambda x: (x[0], termFreq(x[1], normalize = True)))\n\nprint('\\nNot normalized')\nfor x in amazonTermFreq.take(5):\n print(x)\nprint('\\nNormalized')\nfor x in amazonTermFreqNorm.take(5):\n print(x)\n\n\n####################################################################################\n## part 4\n# \nprint('*' * 100)\nprint('Part 4 - Combine Amazon and Google RDDs\\n')\n\ncorpus = dataAmazonTokens.union(dataGoogleTokens)\n\nprint('Print every 100th key/value pair in corpus')\nfor x in corpus.collect()[::100]:\n print(x)\n\n\ncorpusDf = corpus.toDF(['id','tokens'])\n\n####################################################################################\n## part 5\n# \nprint('*' * 100)\nprint('Part 5 - Calculate IDFs and visualize lowest values \\n')\n\n\ndef calcIdf(corpus):\n # get document count\n docCount = corpusDf.agg(f.countDistinct('id'))\n docCount = docCount.collect()[0][0]\n\n # explode token vector\n corpusDfExplode = (corpusDf.select('id','tokens',(f.explode('tokens').alias('indvToken'))))\n\n # count number of IDs that include each word to get document frequency\n docFreqs = corpusDfExplode.groupBy('indvToken').agg(f.countDistinct('id').alias('df')) \n docFreqs = docFreqs.sort(f.desc('df'))\n \n idfDf = docFreqs.withColumn('idf', docFreqs.df / docCount)\n idfDf = docFreqs.withColumn('idf', f.log((docCount + 1) / (docFreqs.df + 1)))\n\n idfRdd = idfDf.select('indvToken','idf').rdd \n return idfRdd \n\nidfRdd = calcIdf(corpus = corpusDf)\nprint('Five lowest IDF values')\nprint(idfRdd.take(5))\n\nidfSubset = idfRdd.takeOrdered(10, key = lambda x: x[1]) \nidfSubset = [x[1] for x in idfSubset]\nplt.hist(idfSubset, bins = 5) \nplt.xlabel('token')\nplt.ylabel('IDF')\nplt.savefig('LowestIDF.png')\n\n####################################################################################\n## part 6\n# \nprint('*' * 100)\nprint('Part 6 - Calculate term frequencies and TF-IDF \\n')\n\n# \nprint('Part A - Calculate term frequencies')\ncombinedTermFreq = amazonTermFreq.union(googleTermFreq)\n\ndef tfidfFunc(corpus, idfs):\n \n # transform input corpus and IDFs into dictionaries\n idfs = idfs.collectAsMap()\n keyTFs = corpus.collectAsMap() \n \n # for each document in the corpus\n for key in keyTFs.keys():\n \n # for each token in the document\n for token in keyTFs[key].keys():\n\n # multiply the token's frequency by that term's IDF value\n keyTFs[key][token] = keyTFs[key][token] * idfs[token]\n \n return keyTFs\n \nidfDict = tfidfFunc(combinedTermFreq, idfRdd)\n\nprint('Print first five items in idfDict, which is a dictionary where the key is the document ID and the value is an embedded dictionary containing the tokens and the corresponding TF-IDF values')\nidfDictFirstFive = {k: idfDict[k] for k in list(idfDict)[:5]} \n\nfor k in idfDictFirstFive.keys():\n print(idfDictFirstFive[k])\n print()\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.hist", "matplotlib.pyplot.savefig", "matplotlib.pyplot.ylabel" ] ]
sai-krishna-ghanta/Lane_detection_CV
[ "0b02afe2a484c98568aca1b7cdc29d2b7e90dc0f" ]
[ "threshold.py" ]
[ "import numpy as np\nimport cv2\n\ndef abs_sobel_thresh(img, orient='x', thresh=(20, 100)):\n \"\"\"\n #--------------------- \n # This function applies Sobel x or y, and then \n # takes an absolute value and applies a threshold.\n #\n \"\"\"\n # Take the derivative in x or y given orient = 'x' or 'y'\n if orient == 'x':\n abs_sobel = np.absolute(cv2.Sobel(img, cv2.CV_64F, 1, 0))\n if orient == 'y':\n abs_sobel = np.absolute(cv2.Sobel(img, cv2.CV_64F, 0, 1))\n\n # Scale to 8-bit (0 - 255) then convert to type = np.uint8 \n scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))\n \n # Create a binary mask where mag thresholds are met \n binary_output = np.zeros_like(scaled_sobel)\n binary_output[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 255\n\n # Return the result\n return binary_output\n\n\ndef mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):\n \"\"\"\n #---------------------\n # This function takes in an image and optional Sobel kernel size, \n # as well as thresholds for gradient magnitude. And computes the gradient magnitude, \n # applies a threshold, and creates a binary output image showing where thresholds were met.\n #\n \"\"\"\n # Take the gradient in x and y separately\n sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n \n # Calculate the gradient magnitude\n gradmag = np.sqrt(sobelx**2 + sobely**2)\n \n # Scale to 8-bit (0 - 255) and convert to type = np.uint8\n scale_factor = np.max(gradmag)/255\n gradmag = (gradmag/scale_factor).astype(np.uint8)\n\n # Create a binary mask where mag thresholds are met \n binary_output = np.zeros_like(gradmag)\n binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 255\n\n # Return the binary image\n return binary_output\n\n\ndef dir_thresh(img, sobel_kernel=3, thresh=(0.7, 1.3)):\n \"\"\"\n #---------------------\n # This function applies Sobel x and y, \n # then computes the direction of the gradient,\n # and then applies a threshold.\n #\n \"\"\"\n # Take the gradient in x and y separately\n sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n \n # Take the absolute value of the x and y gradients \n # and calculate the direction of the gradient\n absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))\n \n # Create a binary mask where direction thresholds are met \n binary_output = np.zeros_like(absgraddir)\n binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 255\n \n # Return the binary image\n return binary_output.astype(np.uint8)\n\n\ndef get_combined_gradients(img, thresh_x, thresh_y, thresh_mag, thresh_dir):\n \"\"\"\n #---------------------\n # This function isolates lane line pixels, by focusing on pixels\n # that are likely to be part of lane lines.\n # I am using Red Channel, since it detects white pixels very well. \n #\n \"\"\"\n rows, cols = img.shape[:2]\n \n # save cropped image for documentation\n temp = np.copy(img)\n temp = temp[220:rows-12, 0:cols, 2]\n cv2.imwrite(\"./output_images/02_cropped.png\", temp)\n\n R_channel = img[220:rows-12, 0:cols, 2] # focusing only on regions where lane lines are likely present\n\n sobelx = abs_sobel_thresh(R_channel, 'x', thresh_x)\n sobely = abs_sobel_thresh(R_channel, 'y', thresh_y)\n mag_binary = mag_thresh(R_channel, 3, thresh_mag)\n dir_binary = dir_thresh(R_channel, 15, thresh_dir)\n \n # debug\n #cv2.imshow('sobelx', sobelx)\n\n # combine sobelx, sobely, magnitude & direction measurements\n gradient_combined = np.zeros_like(dir_binary).astype(np.uint8)\n gradient_combined[((sobelx > 1) & (mag_binary > 1) & (dir_binary > 1)) | ((sobelx > 1) & (sobely > 1))] = 255 # | (R > 1)] = 255\n\n return gradient_combined\n\n\ndef channel_thresh(channel, thresh=(80, 255)):\n \"\"\"\n #---------------------\n # This function takes in a channel of an image and\n # returns thresholded binary image\n # \n \"\"\"\n binary = np.zeros_like(channel)\n binary[(channel > thresh[0]) & (channel <= thresh[1])] = 255\n return binary\n\n\ndef get_combined_hls(img, th_h, th_l, th_s):\n \"\"\"\n #---------------------\n # This function takes in an image, converts it to HLS colorspace, \n # extracts individual channels, applies thresholding on them\n #\n \"\"\"\n\n # convert to hls color space\n hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)\n\n rows, cols = img.shape[:2]\n \n # trying to use Red channel info to improve results\n #R = img[220:rows - 12, 0:cols, 2]\n #_, R = cv2.threshold(R, 180, 255, cv2.THRESH_BINARY)\n \n H = hls[220:rows - 12, 0:cols, 0]\n L = hls[220:rows - 12, 0:cols, 1]\n S = hls[220:rows - 12, 0:cols, 2]\n\n h_channel = channel_thresh(H, th_h)\n l_channel = channel_thresh(L, th_l)\n s_channel = channel_thresh(S, th_s)\n \n # debug\n #cv2.imshow('Thresholded S channel', s_channel)\n\n # Trying to use Red channel, it works even better than S channel sometimes, \n # but in cases where there is shadow on road and road color is different, \n # S channel works better. \n hls_comb = np.zeros_like(s_channel).astype(np.uint8)\n hls_comb[((s_channel > 1) & (l_channel == 0)) | ((s_channel == 0) & (h_channel > 1) & (l_channel > 1))] = 255 \n # trying to use both S channel and R channel\n #hls_comb[((s_channel > 1) & (h_channel > 1)) | (R > 1)] = 255\n \n # return combined hls image \n return hls_comb\n\n\ndef combine_grad_hls(grad, hls):\n \"\"\" \n #---------------------\n # This function combines gradient and hls images into one.\n # For binary gradient image, if pixel is bright, set that pixel value in reulting image to 255\n # For binary hls image, if pixel is bright, set that pixel value in resulting image to 255 \n # Edit: Assign different values to distinguish them\n # \n \"\"\"\n result = np.zeros_like(hls).astype(np.uint8)\n #result[((grad > 1) | (hls > 1))] = 255\n result[(grad > 1)] = 100\n result[(hls > 1)] = 255\n\n return result\n" ]
[ [ "numpy.absolute", "numpy.sqrt", "numpy.max", "numpy.copy", "numpy.zeros_like" ] ]
yangzhaonan18/trafficSignDetect
[ "a84bfe4d0b74593b1671361d2aef547785ea4958" ]
[ "expand_rect.py" ]
[ "# -*- coding=utf-8 -*-\n# py37\n\n\nimport numpy as np\n\n\ndef expand_yellow(list):\n list = np.array(list)\n print(list.shape)\n unite_list = []\n for i in range(len(list)):\n if list[i][2] > 10 and list[i][3] > 10:\n if list[i][2] < list[i][3]: # 黄色区域的宽度 小于 高度的时候,将不变看成是 黄色施工标志进行尺寸的调整\n list[i][0] = int(list[i][0] - list[i][2] * 0.2)\n # list[i][1] = int(list[i][1] - list[i][3] * 0.2)\n list[i][2] = int(list[i][2] * 1.8)\n # list[i][3] = int(list[i][3] * 1.3)\n unite_list.append(list[i])\n return unite_list\n\n\ndef expand_blue(list):\n list = np.array(list)\n print(list.shape)\n unite_list = []\n for i in range(len(list)):\n if list[i][2] > 5 and list[i][3] > 5:\n if list[i][2] < list[i][3]: # 黄色区域的宽度 小于 高度的时候,将不变看成是 黄色施工标志进行尺寸的调整\n list[i][0] = int(list[i][0] - list[i][2] * 0.2)\n list[i][1] = int(list[i][1] - list[i][3] * 0.2)\n list[i][2] = int(list[i][2] * 1.5)\n list[i][3] = int(list[i][3] * 1.5)\n unite_list.append(list[i])\n return unite_list\n" ]
[ [ "numpy.array" ] ]
opensourceai/yolov3-tensorflow-cn
[ "2e41fb40cd2a2c8f60152cf51be6b657b9aadb1a" ]
[ "core/utils.py" ]
[ "import colorsys\nimport numpy as np\nimport tensorflow as tf\nfrom collections import Counter\nfrom PIL import ImageFont, ImageDraw\n\n\n# Discard all boxes with low scores and high IOU 丢弃所有低分和高IOU的盒子,和自身iou高的boxe\ndef gpu_nms(boxes, scores, num_classes, max_boxes=50, score_thresh=0.3, iou_thresh=0.5):\n \"\"\"\n /*----------------------------------- NMS(非最大抑制) on gpu ---------------------------------------*/\n\n Arguments:\n boxes -- tensor of shape [1, 10647, 4] # 10647 boxes\n scores -- tensor of shape [1, 10647, num_classes], scores of boxes\n classes -- the return value of function `read_coco_names`\n Note:Applies Non-max suppression (NMS) to set of boxes. Prunes away boxes that have high\n intersection-over-union (IOU) overlap with previously selected boxes.\n\n max_boxes -- integer, maximum number of predicted boxes you'd like, default is 20 你想要的最大预测宽数\n score_thresh -- real value, if [ highest class probability score < score_threshold]\n then get rid of the corresponding box # 舍弃相应的box\n iou_thresh -- real value, \"intersection over union\" threshold used for NMS filtering\n \"\"\"\n\n boxes_list, label_list, score_list = [], [], []\n max_boxes = tf.constant(max_boxes, dtype='int32')\n\n # since we do nms for single image, then reshape it\n boxes = tf.reshape(boxes, [-1, 4]) # '-1' means we don't konw the exact number of boxes\n # confs = tf.reshape(confs, [-1,1])\n score = tf.reshape(scores, [-1, num_classes]) # 10647x80\n # print(score)\n\n # Step 1: Create a filtering mask based on \"box_class_scores\" by using \"threshold\".\n mask = tf.greater_equal(score, tf.constant(score_thresh)) # score大于等于0.3\n # print(\"mask==> : \", mask)\n # Step 2: Do non_max_suppression for each class\n for i in range(num_classes):\n # Step 3: Apply the mask to scores, boxes and pick them out\n filter_boxes = tf.boolean_mask(boxes, mask[:, i]) # 选出有第i类的boxes的张量信息\n # print(boxes, mask)\n # exit()\n filter_score = tf.boolean_mask(score[:, i], mask[:, i]) # 选出有第i类的分数的张量信息\n # 这是个超级赞的方法, 进过non_max_suppression挑选索引\n nms_indices = tf.image.non_max_suppression(boxes=filter_boxes,\n scores=filter_score,\n max_output_size=max_boxes,\n iou_threshold=iou_thresh, name='nms_indices')\n # 转换为标签\n label_list.append(tf.ones_like(tf.gather(filter_score, nms_indices), 'int32') * i)\n boxes_list.append(tf.gather(filter_boxes, nms_indices)) # 第几个c(c∈10647)中含有第i类box(4维张量)\n score_list.append(tf.gather(filter_score, nms_indices)) # 第几个c(c∈10647)中含有第i类预测列表(80维张量,包含所有的种类的预测的各类概率)\n # print(len(label_list))\n boxes = tf.concat(boxes_list, axis=0)\n score = tf.concat(score_list, axis=0)\n label = tf.concat(label_list, axis=0)\n\n return boxes, score, label\n\n\ndef py_nms(boxes, scores, max_boxes=50, iou_thresh=0.5):\n \"\"\"\n 按照分数排序,选出最多50个,大于0.5阈值的方框\n Pure Python NMS baseline.\n\n Arguments: boxes => shape of [-1, 4], the value of '-1' means that dont know the\n exact number of boxes\n scores => shape of [-1,]\n max_boxes => representing the maximum of boxes to be selected by non_max_suppression 最大框数\n iou_thresh => representing iou_threshold for deciding to keep boxes\n \"\"\"\n assert boxes.shape[1] == 4 and len(scores.shape) == 1\n\n # 左下角坐标,右上角坐标\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n order = scores.argsort()[::-1] # 从大到小排序, order保存排序好的原位置的索引\n\n keep = [] # 保存index\n while order.size > 0: # 检测order中是否还有元素\n i = order[0] # 获取最高分\n keep.append(i) # 保存最高分数index\n xx1 = np.maximum(x1[i], x1[order[1:]]) # 除本身外的其他,x坐标,计算最大值 ,还能带广播\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n ovr = inter / (areas[i] + areas[order[1:]] - inter) # 6得一匹\n # 挑选除 iou小于阈值的方框 ---> 找出不是同一个目标的anchor\n inds = np.where(ovr <= iou_thresh)[0] # np.where 返回元组,元组中保存一个列表,列表中保存排序后的值的索引\n order = order[inds + 1]\n\n return keep[:max_boxes]\n\n\ndef cpu_nms(boxes, scores, num_classes, max_boxes=50, score_thresh=0.3, iou_thresh=0.5):\n \"\"\"\n /*----------------------------------- NMS on cpu ---------------------------------------*/\n Arguments:\n boxes ==> shape [1, 10647, 4]\n scores ==> shape [1, 10647, num_classes] prods * confs\n \"\"\"\n\n # 删去第一维度\n boxes = boxes.reshape(-1, 4)\n scores = scores.reshape(-1, num_classes) # [10647,num_class]\n # Picked bounding boxes\n picked_boxes, picked_score, picked_label = [], [], []\n\n for i in range(num_classes):\n # 条件判断\n indices = np.where(scores[:, i] >= score_thresh) # 第几个anchor的第i类的分数是否大于阈值\n filter_boxes = boxes[indices] # 根据index找到该boxes\n filter_scores = scores[:, i][indices]\n if len(filter_boxes) == 0: continue # 如果没有该boxes跳过\n # do non_max_suppression on the cpu 挑选出进过非最大抑制的方框\n indices = py_nms(filter_boxes, filter_scores, # 返回index\n max_boxes=max_boxes, iou_thresh=iou_thresh)\n picked_boxes.append(filter_boxes[indices])\n picked_score.append(filter_scores[indices])\n picked_label.append(np.ones(len(indices), dtype='int32') * i) # 类别index\n if len(picked_boxes) == 0: return None, None, None\n\n # (num,4), (num,1),(num,)\n boxes = np.concatenate(picked_boxes, axis=0)\n score = np.concatenate(picked_score, axis=0)\n label = np.concatenate(picked_label, axis=0)\n\n return boxes, score, label\n\n\ndef resize_image_correct_bbox(image, boxes, image_h, image_w):\n origin_image_size = tf.to_float(tf.shape(image)[0:2])\n image = tf.image.resize_images(image, size=[image_h, image_w]) # 图片缩放\n\n # correct bbox 边框修正\n xx1 = boxes[:, 0] * image_w / origin_image_size[1]\n yy1 = boxes[:, 1] * image_h / origin_image_size[0]\n xx2 = boxes[:, 2] * image_w / origin_image_size[1]\n yy2 = boxes[:, 3] * image_h / origin_image_size[0]\n idx = boxes[:, 4]\n\n boxes = tf.stack([xx1, yy1, xx2, yy2, idx], axis=1)\n return image, boxes\n\n\ndef draw_boxes(image, boxes, scores, labels, classes, detection_size,\n font='data/font/HuaWenXinWei-1.ttf', show=True):\n \"\"\"\n :param boxes, shape of [num, 4]\n :param scores, shape of [num, ]\n :param labels, shape of [num, ]\n :param image,\n :param classes, the return list from the function `read_coco_names`\n \"\"\"\n if boxes is None: return image\n draw = ImageDraw.Draw(image)\n # draw settings\n font = ImageFont.truetype(font=font, size=np.floor(2e-2 * image.size[1]).astype('int32'))\n hsv_tuples = [(x / len(classes), 0.9, 1.0) for x in range(len(classes))]\n colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))\n for i in range(len(labels)): # for each bounding box, do:\n bbox, score, label = boxes[i], scores[i], classes[labels[i]]\n bbox_text = \"%s %.2f\" % (label, score)\n text_size = draw.textsize(bbox_text, font)\n # convert_to_original_size\n detection_size, original_size = np.array(detection_size), np.array(image.size)\n ratio = original_size / detection_size\n bbox = list((bbox.reshape(2, 2) * ratio).reshape(-1))\n # 画框(bbox左上角的点,右上角的点)\n draw.rectangle(bbox, outline=colors[labels[i]], width=3)\n # 计算文本框的坐标左上角的点\n text_origin = bbox[:2] - np.array([0, text_size[1]])\n # 画出文本框的\n draw.rectangle([tuple(text_origin), tuple(text_origin + text_size)], fill=colors[labels[i]])\n # 在文本框中填入文字\n draw.text(tuple(text_origin), bbox_text, fill=(0, 0, 0), font=font)\n\n image.show() if show else None\n return image\n\n\ndef draw_Chinese(image, txt, coordinate, font='data/font/HuaWenXinWei-1.ttf'):\n draw = ImageDraw.Draw(image)\n font = ImageFont.truetype(font=font, size=np.floor(3e-2 * image.size[1]).astype('int32'))\n draw.text(coordinate, txt, fill=(255, 255, 0), font=font)\n return image\n\n\ndef read_coco_names(class_file_name):\n names = {}\n with open(class_file_name, 'r') as data: # 直接读取所有文件内容\n for ID, name in enumerate(data): # 按列进行读取\n names[ID] = name.strip('\\n') # 去除换行符号\n return names\n\n\n# 讲模型转换为一个模型文件\ndef freeze_graph(sess, output_file, output_node_names):\n output_graph_def = tf.graph_util.convert_variables_to_constants( # 讲变量转化为常量\n sess,\n sess.graph.as_graph_def(),\n output_node_names,\n )\n\n with tf.gfile.GFile(output_file, \"wb\") as f:\n f.write(output_graph_def.SerializeToString())\n # .output_graph_def.node 图中所有的节点\n print(\"=> {} ops written to {}.\".format(len(output_graph_def.node), output_file))\n\n\n# 读取模型\ndef read_pb_return_tensors(graph, pb_file, return_elements):\n with tf.gfile.FastGFile(pb_file, 'rb') as f:\n frozen_graph_def = tf.GraphDef()\n frozen_graph_def.ParseFromString(f.read())\n\n with graph.as_default():\n return_elements = tf.import_graph_def(frozen_graph_def,\n return_elements=return_elements)\n input_tensor, output_tensors = return_elements[0], return_elements[1:]\n\n return input_tensor, output_tensors\n\n\ndef load_weights(var_list, weights_file):\n \"\"\"\n Loads and converts pre-trained weights.\n :param var_list: list of network variables.\n :param weights_file: name of the binary file.\n :return: list of assign ops\n \"\"\"\n with open(weights_file, \"rb\") as fp:\n np.fromfile(fp, dtype=np.int32, count=5) # 读取前5个,(跳过前5个)\n # print(np.fromfile(fp, dtype=np.int32, count=-1))\n # print(fp)\n # exit()\n weights = np.fromfile(fp, dtype=np.float32) # 读取所有\n\n ptr = 0\n i = 0\n assign_ops = []\n # for var in var_list:\n # print(var)\n # exit()\n while i < len(var_list) - 1:\n var1 = var_list[i]\n print(\"=> loading \", var1.name)\n var2 = var_list[i + 1]\n print(\"=> loading \", var2.name)\n # do something only if we process conv layer\n if 'Conv' in var1.name.split('/')[-2]:\n # check type of next layer\n if 'BatchNorm' in var2.name.split('/')[-2]:\n # load batch norm params\n gamma, beta, mean, var = var_list[i + 1:i + 5]\n batch_norm_vars = [beta, gamma, mean, var]\n for var in batch_norm_vars:\n shape = var.shape.as_list()\n num_params = np.prod(shape) # 计算BN层的参数量\n # 读取相对应的参数量\n var_weights = weights[ptr:ptr + num_params].reshape(shape) # 恢复shape\n ptr += num_params\n assign_ops.append(tf.assign(var, var_weights, validate_shape=True))\n # we move the pointer by 4, because we loaded 4 variables\n i += 4\n elif 'Conv' in var2.name.split('/')[-2]:\n # load biases\n bias = var2\n bias_shape = bias.shape.as_list()\n bias_params = np.prod(bias_shape)\n bias_weights = weights[ptr:ptr + bias_params].reshape(bias_shape)\n ptr += bias_params\n assign_ops.append(tf.assign(bias, bias_weights, validate_shape=True))\n # we loaded 1 variable\n i += 1\n # we can load weights of conv layer\n shape = var1.shape.as_list()\n num_params = np.prod(shape)\n\n # 这是什么沙雕模型文件需要这种加载方式\n var_weights = weights[ptr:ptr + num_params].reshape(\n (shape[3], shape[2], shape[0], shape[1])) # 沙雕模型文件\n # remember to transpose to column-major 维度交换\n var_weights = np.transpose(var_weights, (2, 3, 1, 0))\n ptr += num_params\n assign_ops.append(\n tf.assign(var1, var_weights, validate_shape=True))\n i += 1\n\n return assign_ops\n\n\ndef get_anchors(anchors_path, image_h, image_w):\n '''loads the anchors from a file,从的文件中载入anchors'''\n with open(anchors_path) as f:\n anchors = f.readline()\n # print(anchors)\n anchors = np.array(anchors.split(), dtype=np.float32)\n anchors = anchors.reshape(-1, 2)\n # print(anchors)\n '''\n [[108 152]\n [146 174]\n [157 240]\n [192 342]\n [240 357]\n [307 286]\n [283 402]\n [397 348]\n [357 394]]\n '''\n anchors[:, 1] = anchors[:, 1] * image_h\n anchors[:, 0] = anchors[:, 0] * image_w\n return anchors.astype(np.int32)\n\n\ndef bbox_iou(A, B):\n intersect_mins = np.maximum(A[:, 0:2], B[:, 0:2])\n intersect_maxs = np.minimum(A[:, 2:4], B[:, 2:4])\n intersect_wh = np.maximum(intersect_maxs - intersect_mins, 0.)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n\n # 给定axis 上的乘积\n A_area = np.prod(A[:, 2:4] - A[:, 0:2], axis=1)\n B_area = np.prod(B[:, 2:4] - B[:, 0:2], axis=1)\n\n iou = intersect_area / (A_area + B_area - intersect_area)\n\n return iou\n\n\ndef evaluate(y_pred, y_true, iou_thresh=0.5, score_thresh=0.3):\n num_images = y_true[0].shape[0] # 检查的图片数量 Batch_size(8)\n num_classes = y_true[0][0][..., 5:].shape[-1]\n # 以为class_id 初始化字典\n true_labels_dict = {i: 0 for i in range(num_classes)} # {class: count}\n pred_labels_dict = {i: 0 for i in range(num_classes)}\n true_positive_dict = {i: 0 for i in range(num_classes)}\n\n # 循环每张图片\n for i in range(num_images):\n true_labels_list, true_boxes_list = [], []\n for j in range(3): # three feature maps 3个feature map\n # y_true : [feature_map_1(Batch_size,....), .....]\n true_probs_temp = y_true[j][i][..., 5:] # 各个类别预测的概率\n true_boxes_temp = y_true[j][i][..., 0:4] # boxes信息\n\n # 去除y_true中没有目标的anchor\n object_mask = true_probs_temp.sum(axis=-1) > 0\n\n # 取出feature_map中,只含有目标的单元\n true_probs_temp = true_probs_temp[object_mask] # shape(13x13x3,class_id)\n true_boxes_temp = true_boxes_temp[object_mask] # shape(13x13x3,boxes)\n\n true_labels_list += np.argmax(true_probs_temp, axis=-1).tolist() # 存在目标的cell中,\n true_boxes_list += true_boxes_temp.tolist()\n\n # 计算每张张图片的,各个class的数量,\n if len(true_labels_list) != 0:\n # 计算每张图片的中各个class的数量\n for cls, count in Counter(true_labels_list).items(): true_labels_dict[cls] += count\n\n # y_pred : [boxes , confs , probs ]\n pred_boxes = y_pred[0][i:i + 1] # [Batch_size,10647,4]\n pred_confs = y_pred[1][i:i + 1] # [Batch_size,10647,1]\n pred_probs = y_pred[2][i:i + 1] # [Batch_size,10647,class_num]\n\n # 进过非最大抑制处理后得到最终的\n pred_boxes, pred_scores, pred_labels = cpu_nms(pred_boxes, pred_confs * pred_probs, num_classes,\n score_thresh=score_thresh, iou_thresh=iou_thresh)\n\n # 所有有效的存在真实值的 boxes\n true_boxes = np.array(true_boxes_list)\n box_centers, box_sizes = true_boxes[:, 0:2], true_boxes[:, 2:4]\n\n # 坐标转换\n true_boxes[:, 0:2] = box_centers - box_sizes / 2. # 左上角坐标\n true_boxes[:, 2:4] = true_boxes[:, 0:2] + box_sizes # 右下角坐标\n pred_labels_list = [] if pred_labels is None else pred_labels.tolist()\n\n # 统计pre中每个class出现的次数\n if len(pred_labels_list) != 0:\n for cls, count in Counter(pred_labels_list).items(): pred_labels_dict[cls] += count\n else:\n continue\n\n detected = []\n for k in range(len(pred_labels_list)):\n # 计算每个pre_box 与 所有ture_boxes的IOU, pre的第K个对应 true中第M个\n iou = bbox_iou(pred_boxes[k:k + 1], true_boxes)\n # 提取最大的iou的iou\n m = np.argmax(iou) # Extract index of largest overlap\n # 当前iou大于阈值, and pre的class第k等于true的第m个最大的iou. and m 还没有被使用过\n if iou[m] >= iou_thresh and pred_labels_list[k] == true_labels_list[m] and m not in detected:\n true_positive_dict[true_labels_list[m]] += 1\n detected.append(m)\n\n # 召回率(查全率)\n recall = sum(true_positive_dict.values()) / (sum(true_labels_dict.values()) + 1e-6)\n # 精确度\n precision = sum(true_positive_dict.values()) / (sum(pred_labels_dict.values()) + 1e-6)\n\n return recall, precision\n\n\ndef compute_ap(recall, precision):\n \"\"\" Compute the average precision, given the recall and precision curves.\n Code originally from https://github.com/rbgirshick/py-faster-rcnn.\n # Arguments\n recall: The recall curve (list).\n precision: The precision curve (list).\n # Returns\n The average precision as computed in py-faster-rcnn.\n \"\"\"\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.0], recall, [1.0]))\n mpre = np.concatenate(([0.0], precision, [0.0]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n" ]
[ [ "tensorflow.concat", "numpy.minimum", "tensorflow.stack", "tensorflow.image.non_max_suppression", "tensorflow.gfile.GFile", "numpy.concatenate", "numpy.where", "tensorflow.boolean_mask", "tensorflow.import_graph_def", "tensorflow.gather", "numpy.argmax", "tensorflow.shape", "tensorflow.image.resize_images", "numpy.floor", "numpy.transpose", "numpy.array", "numpy.sum", "numpy.fromfile", "tensorflow.constant", "numpy.maximum", "tensorflow.reshape", "tensorflow.assign", "numpy.prod", "tensorflow.GraphDef", "tensorflow.gfile.FastGFile" ] ]
rishabh1694/dspn-mod
[ "94794bc64785a63d5a957214ae29b830c2a4bc3a" ]
[ "dspn/Object-Detection-Metrics-State/lib/Evaluator.py" ]
[ "###########################################################################################\n# #\n# Evaluator class: Implements the most popular metrics for object detection #\n# #\n# Developed by: Rafael Padilla ([email protected]) #\n# SMT - Signal Multimedia and Telecommunications Lab #\n# COPPE - Universidade Federal do Rio de Janeiro #\n# Last modification: Oct 9th 2018 #\n###########################################################################################\n\nimport os\nimport sys\nfrom collections import Counter, defaultdict\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom BoundingBox import *\nfrom BoundingBoxes import *\nfrom utils import *\n\n\nclass Evaluator:\n def GetPascalVOCMetrics(\n self,\n boundingboxes,\n IOUThreshold=0.5,\n method=MethodAveragePrecision.EveryPointInterpolation,\n ):\n \"\"\"Get the metrics used by the VOC Pascal 2012 challenge.\n Get\n Args:\n boundingboxes: Object of the class BoundingBoxes representing ground truth and detected\n bounding boxes;\n IOUThreshold: IOU threshold indicating which detections will be considered TP or FP\n (default value = 0.5);\n method (default = EveryPointInterpolation): It can be calculated as the implementation\n in the official PASCAL VOC toolkit (EveryPointInterpolation), or applying the 11-point\n interpolatio as described in the paper \"The PASCAL Visual Object Classes(VOC) Challenge\"\n or EveryPointInterpolation\" (ElevenPointInterpolation);\n Returns:\n A list of dictionaries. Each dictionary contains information and metrics of each class.\n The keys of each dictionary are:\n dict['class']: class representing the current dictionary;\n dict['precision']: array with the precision values;\n dict['recall']: array with the recall values;\n dict['AP']: average precision;\n dict['interpolated precision']: interpolated precision values;\n dict['interpolated recall']: interpolated recall values;\n dict['total positives']: total number of ground truth positives;\n dict['total TP']: total number of True Positive detections;\n dict['total FP']: total number of False Negative detections;\n \"\"\"\n ret = (\n []\n ) # list containing metrics (precision, recall, average precision) of each class\n # List with all ground truths (Ex: [imageName,class,confidence=1, (bb coordinates XYX2Y2)])\n groundTruths = []\n # List with all detections (Ex: [imageName,class,confidence,(bb coordinates XYX2Y2)])\n detections = []\n # Get all classes\n classes = []\n # Loop through all bounding boxes and separate them into GTs and detections\n for bb in boundingboxes.getBoundingBoxes():\n # [imageName, class, confidence, (bb coordinates XYX2Y2)]\n if bb.getBBType() == BBType.GroundTruth:\n groundTruths.append(\n [\n bb.getImageName(),\n bb.getClassId(),\n 1,\n bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2),\n ]\n )\n else:\n detections.append(\n [\n bb.getImageName(),\n bb.getClassId(),\n bb.getConfidence(),\n bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2),\n ]\n )\n # get class\n if bb.getClassId() not in classes:\n classes.append(bb.getClassId())\n classes = sorted(classes)\n # Precision x Recall is obtained individually by each class\n # Loop through by classes\n for c in classes:\n # Get only detection of class c\n dects = []\n [dects.append(d) for d in detections if d[1] == c]\n # Get only ground truths of class c\n gts = []\n [gts.append(g) for g in groundTruths if g[1] == c]\n npos = len(gts)\n # sort detections by decreasing confidence\n dects = sorted(dects, key=lambda conf: conf[2], reverse=True)\n TP = np.zeros(len(dects))\n FP = np.zeros(len(dects))\n # create dictionary with amount of gts for each image\n det = Counter([cc[0] for cc in gts])\n for key, val in det.items():\n det[key] = np.zeros(val)\n print(\"Evaluating class: %s (%d detections)\" % (str(c), len(dects)))\n d_to_gt = defaultdict(list)\n for gt in gts:\n d_to_gt[gt[0]].append(gt)\n # Loop through detections\n for d in range(len(dects)):\n # print('dect %s => %s' % (dects[d][0], dects[d][3],))\n # Find ground truth image\n gt = d_to_gt[dects[d][0]]\n iouMax = float(\"inf\")\n jmax = None\n for j in range(len(gt)):\n # print('Ground truth gt => %s' % (gt[j][3],))\n gt_coord, gt_type = gt[j][3]\n det_coord, det_type = dects[d][3]\n correct_type = det_type == gt_type\n if correct_type:\n distance = np.linalg.norm(3 * (gt_coord - det_coord))\n if distance < iouMax:\n iouMax = distance\n jmax = j\n # Assign detection as true positive/don't care/false positive\n if iouMax <= IOUThreshold and jmax is not None:\n if det[dects[d][0]][jmax] == 0:\n TP[d] = 1 # count as true positive\n det[dects[d][0]][jmax] = 1 # flag as already 'seen'\n # print(\"TP\")\n else:\n FP[d] = 1 # count as false positive\n # print(\"FP\")\n # - A detected \"cat\" is overlaped with a GT \"cat\" with IOU >= IOUThreshold.\n else:\n FP[d] = 1 # count as false positive\n # print(\"FP\")\n # compute precision, recall and average precision\n acc_FP = np.cumsum(FP)\n acc_TP = np.cumsum(TP)\n rec = acc_TP / npos\n prec = np.divide(acc_TP, (acc_FP + acc_TP))\n # Depending on the method, call the right implementation\n if method == MethodAveragePrecision.EveryPointInterpolation:\n [ap, mpre, mrec, ii] = Evaluator.CalculateAveragePrecision(rec, prec)\n else:\n [ap, mpre, mrec, _] = Evaluator.ElevenPointInterpolatedAP(rec, prec)\n # add class result in the dictionary to be returned\n r = {\n \"class\": c,\n \"precision\": prec,\n \"recall\": rec,\n \"AP\": ap,\n \"interpolated precision\": mpre,\n \"interpolated recall\": mrec,\n \"total positives\": npos,\n \"total TP\": np.sum(TP),\n \"total FP\": np.sum(FP),\n }\n ret.append(r)\n return ret\n\n def PlotPrecisionRecallCurve(\n self,\n boundingBoxes,\n IOUThreshold=0.5,\n method=MethodAveragePrecision.EveryPointInterpolation,\n showAP=False,\n showInterpolatedPrecision=False,\n savePath=None,\n showGraphic=True,\n ):\n \"\"\"PlotPrecisionRecallCurve\n Plot the Precision x Recall curve for a given class.\n Args:\n boundingBoxes: Object of the class BoundingBoxes representing ground truth and detected\n bounding boxes;\n IOUThreshold (optional): IOU threshold indicating which detections will be considered\n TP or FP (default value = 0.5);\n method (default = EveryPointInterpolation): It can be calculated as the implementation\n in the official PASCAL VOC toolkit (EveryPointInterpolation), or applying the 11-point\n interpolatio as described in the paper \"The PASCAL Visual Object Classes(VOC) Challenge\"\n or EveryPointInterpolation\" (ElevenPointInterpolation).\n showAP (optional): if True, the average precision value will be shown in the title of\n the graph (default = False);\n showInterpolatedPrecision (optional): if True, it will show in the plot the interpolated\n precision (default = False);\n savePath (optional): if informed, the plot will be saved as an image in this path\n (ex: /home/mywork/ap.png) (default = None);\n showGraphic (optional): if True, the plot will be shown (default = True)\n Returns:\n A list of dictionaries. Each dictionary contains information and metrics of each class.\n The keys of each dictionary are:\n dict['class']: class representing the current dictionary;\n dict['precision']: array with the precision values;\n dict['recall']: array with the recall values;\n dict['AP']: average precision;\n dict['interpolated precision']: interpolated precision values;\n dict['interpolated recall']: interpolated recall values;\n dict['total positives']: total number of ground truth positives;\n dict['total TP']: total number of True Positive detections;\n dict['total FP']: total number of False Negative detections;\n \"\"\"\n results = self.GetPascalVOCMetrics(boundingBoxes, IOUThreshold, method)\n result = None\n # Each resut represents a class\n for result in results:\n if result is None:\n raise IOError(\"Error: Class %d could not be found.\" % classId)\n\n classId = result[\"class\"]\n precision = result[\"precision\"]\n recall = result[\"recall\"]\n average_precision = result[\"AP\"]\n mpre = result[\"interpolated precision\"]\n mrec = result[\"interpolated recall\"]\n npos = result[\"total positives\"]\n total_tp = result[\"total TP\"]\n total_fp = result[\"total FP\"]\n\n if showGraphic is False:\n continue\n\n plt.close()\n if showInterpolatedPrecision:\n if method == MethodAveragePrecision.EveryPointInterpolation:\n plt.plot(\n mrec, mpre, \"--r\", label=\"Interpolated precision (every point)\"\n )\n elif method == MethodAveragePrecision.ElevenPointInterpolation:\n # Uncomment the line below if you want to plot the area\n # plt.plot(mrec, mpre, 'or', label='11-point interpolated precision')\n # Remove duplicates, getting only the highest precision of each recall value\n nrec = []\n nprec = []\n for idx in range(len(mrec)):\n r = mrec[idx]\n if r not in nrec:\n idxEq = np.argwhere(mrec == r)\n nrec.append(r)\n nprec.append(max([mpre[int(id)] for id in idxEq]))\n plt.plot(nrec, nprec, \"or\", label=\"11-point interpolated precision\")\n plt.plot(recall, precision, label=\"Precision\")\n plt.xlabel(\"recall\")\n plt.ylabel(\"precision\")\n if showAP:\n ap_str = \"{0:.2f}%\".format(average_precision * 100)\n # ap_str = \"{0:.4f}%\".format(average_precision * 100)\n plt.title(\n \"Precision x Recall curve \\nClass: %s, AP: %s\"\n % (str(classId), ap_str)\n )\n else:\n plt.title(\"Precision x Recall curve \\nClass: %s\" % str(classId))\n plt.legend(shadow=True)\n plt.grid()\n ############################################################\n # Uncomment the following block to create plot with points #\n ############################################################\n # plt.plot(recall, precision, 'bo')\n # labels = ['R', 'Y', 'J', 'A', 'U', 'C', 'M', 'F', 'D', 'B', 'H', 'P', 'E', 'X', 'N', 'T',\n # 'K', 'Q', 'V', 'I', 'L', 'S', 'G', 'O']\n # dicPosition = {}\n # dicPosition['left_zero'] = (-30,0)\n # dicPosition['left_zero_slight'] = (-30,-10)\n # dicPosition['right_zero'] = (30,0)\n # dicPosition['left_up'] = (-30,20)\n # dicPosition['left_down'] = (-30,-25)\n # dicPosition['right_up'] = (20,20)\n # dicPosition['right_down'] = (20,-20)\n # dicPosition['up_zero'] = (0,30)\n # dicPosition['up_right'] = (0,30)\n # dicPosition['left_zero_long'] = (-60,-2)\n # dicPosition['down_zero'] = (-2,-30)\n # vecPositions = [\n # dicPosition['left_down'],\n # dicPosition['left_zero'],\n # dicPosition['right_zero'],\n # dicPosition['right_zero'], #'R', 'Y', 'J', 'A',\n # dicPosition['left_up'],\n # dicPosition['left_up'],\n # dicPosition['right_up'],\n # dicPosition['left_up'], # 'U', 'C', 'M', 'F',\n # dicPosition['left_zero'],\n # dicPosition['right_up'],\n # dicPosition['right_down'],\n # dicPosition['down_zero'], #'D', 'B', 'H', 'P'\n # dicPosition['left_up'],\n # dicPosition['up_zero'],\n # dicPosition['right_up'],\n # dicPosition['left_up'], # 'E', 'X', 'N', 'T',\n # dicPosition['left_zero'],\n # dicPosition['right_zero'],\n # dicPosition['left_zero_long'],\n # dicPosition['left_zero_slight'], # 'K', 'Q', 'V', 'I',\n # dicPosition['right_down'],\n # dicPosition['left_down'],\n # dicPosition['right_up'],\n # dicPosition['down_zero']\n # ] # 'L', 'S', 'G', 'O'\n # for idx in range(len(labels)):\n # box = dict(boxstyle='round,pad=.5',facecolor='yellow',alpha=0.5)\n # plt.annotate(labels[idx],\n # xy=(recall[idx],precision[idx]), xycoords='data',\n # xytext=vecPositions[idx], textcoords='offset points',\n # arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3\"),\n # bbox=box)\n if savePath is not None:\n plt.savefig(os.path.join(savePath, classId + \".png\"))\n if showGraphic is True:\n plt.show()\n # plt.waitforbuttonpress()\n plt.pause(0.05)\n return results\n\n @staticmethod\n def CalculateAveragePrecision(rec, prec):\n mrec = []\n mrec.append(0)\n [mrec.append(e) for e in rec]\n mrec.append(1)\n mpre = []\n mpre.append(0)\n [mpre.append(e) for e in prec]\n mpre.append(0)\n for i in range(len(mpre) - 1, 0, -1):\n mpre[i - 1] = max(mpre[i - 1], mpre[i])\n ii = []\n for i in range(len(mrec) - 1):\n if mrec[1:][i] != mrec[0:-1][i]:\n ii.append(i + 1)\n ap = 0\n for i in ii:\n ap = ap + np.sum((mrec[i] - mrec[i - 1]) * mpre[i])\n # return [ap, mpre[1:len(mpre)-1], mrec[1:len(mpre)-1], ii]\n return [ap, mpre[0 : len(mpre) - 1], mrec[0 : len(mpre) - 1], ii]\n\n @staticmethod\n # 11-point interpolated average precision\n def ElevenPointInterpolatedAP(rec, prec):\n # def CalculateAveragePrecision2(rec, prec):\n mrec = []\n # mrec.append(0)\n [mrec.append(e) for e in rec]\n # mrec.append(1)\n mpre = []\n # mpre.append(0)\n [mpre.append(e) for e in prec]\n # mpre.append(0)\n recallValues = np.linspace(0, 1, 11)\n recallValues = list(recallValues[::-1])\n rhoInterp = []\n recallValid = []\n # For each recallValues (0, 0.1, 0.2, ... , 1)\n for r in recallValues:\n # Obtain all recall values higher or equal than r\n argGreaterRecalls = np.argwhere(mrec[:] >= r)\n pmax = 0\n # If there are recalls above r\n if argGreaterRecalls.size != 0:\n pmax = max(mpre[argGreaterRecalls.min() :])\n recallValid.append(r)\n rhoInterp.append(pmax)\n # By definition AP = sum(max(precision whose recall is above r))/11\n ap = sum(rhoInterp) / 11\n # Generating values for the plot\n rvals = []\n rvals.append(recallValid[0])\n [rvals.append(e) for e in recallValid]\n rvals.append(0)\n pvals = []\n pvals.append(0)\n [pvals.append(e) for e in rhoInterp]\n pvals.append(0)\n # rhoInterp = rhoInterp[::-1]\n cc = []\n for i in range(len(rvals)):\n p = (rvals[i], pvals[i - 1])\n if p not in cc:\n cc.append(p)\n p = (rvals[i], pvals[i])\n if p not in cc:\n cc.append(p)\n recallValues = [i[0] for i in cc]\n rhoInterp = [i[1] for i in cc]\n return [ap, rhoInterp, recallValues, None]\n\n # For each detections, calculate IOU with reference\n @staticmethod\n def _getAllIOUs(reference, detections):\n ret = []\n bbReference = reference.getAbsoluteBoundingBox(BBFormat.XYX2Y2)\n # img = np.zeros((200,200,3), np.uint8)\n for d in detections:\n bb = d.getAbsoluteBoundingBox(BBFormat.XYX2Y2)\n iou = Evaluator.iou(bbReference, bb)\n # Show blank image with the bounding boxes\n # img = add_bb_into_image(img, d, color=(255,0,0), thickness=2, label=None)\n # img = add_bb_into_image(img, reference, color=(0,255,0), thickness=2, label=None)\n ret.append((iou, reference, d)) # iou, reference, detection\n # cv2.imshow(\"comparing\",img)\n # cv2.waitKey(0)\n # cv2.destroyWindow(\"comparing\")\n return sorted(\n ret, key=lambda i: i[0], reverse=True\n ) # sort by iou (from highest to lowest)\n\n @staticmethod\n def iou(boxA, boxB):\n # if boxes dont intersect\n if Evaluator._boxesIntersect(boxA, boxB) is False:\n return 0\n interArea = Evaluator._getIntersectionArea(boxA, boxB)\n union = Evaluator._getUnionAreas(boxA, boxB, interArea=interArea)\n # intersection over union\n iou = interArea / union\n assert iou >= 0\n return iou\n\n # boxA = (Ax1,Ay1,Ax2,Ay2)\n # boxB = (Bx1,By1,Bx2,By2)\n @staticmethod\n def _boxesIntersect(boxA, boxB):\n if boxA[0] > boxB[2]:\n return False # boxA is right of boxB\n if boxB[0] > boxA[2]:\n return False # boxA is left of boxB\n if boxA[3] < boxB[1]:\n return False # boxA is above boxB\n if boxA[1] > boxB[3]:\n return False # boxA is below boxB\n return True\n\n @staticmethod\n def _getIntersectionArea(boxA, boxB):\n xA = max(boxA[0], boxB[0])\n yA = max(boxA[1], boxB[1])\n xB = min(boxA[2], boxB[2])\n yB = min(boxA[3], boxB[3])\n # intersection area\n return (xB - xA + 1) * (yB - yA + 1)\n\n @staticmethod\n def _getUnionAreas(boxA, boxB, interArea=None):\n area_A = Evaluator._getArea(boxA)\n area_B = Evaluator._getArea(boxB)\n if interArea is None:\n interArea = Evaluator._getIntersectionArea(boxA, boxB)\n return float(area_A + area_B - interArea)\n\n @staticmethod\n def _getArea(box):\n return (box[2] - box[0] + 1) * (box[3] - box[1] + 1)\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.pause", "numpy.linspace", "numpy.cumsum", "numpy.linalg.norm", "numpy.argwhere", "matplotlib.pyplot.plot", "matplotlib.pyplot.close", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.zeros", "numpy.sum", "numpy.divide", "matplotlib.pyplot.ylabel" ] ]
cmpute/pytorch-lightning
[ "695e0514f8e60a88f49786c33311f223be2e7357" ]
[ "tests/models/test_amp.py" ]
[ "import os\n\nimport pytest\nimport torch\n\nimport tests.base.develop_pipelines as tpipes\nimport tests.base.develop_utils as tutils\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom tests.base import EvalModelTemplate\n\n\[email protected]\[email protected](\"backend\", ['dp', 'ddp'])\[email protected](not torch.cuda.is_available(), reason=\"test requires GPU machine\")\ndef test_amp_single_gpu(tmpdir, backend):\n \"\"\"Make sure DP/DDP + AMP work.\"\"\"\n tutils.reset_seed()\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n gpus=1,\n distributed_backend=backend,\n precision=16,\n )\n\n model = EvalModelTemplate()\n # tutils.run_model_test(trainer_options, model)\n result = trainer.fit(model)\n\n assert result == 1\n\n\[email protected]\[email protected](\"backend\", ['dp', 'ddp'])\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_amp_multi_gpu(tmpdir, backend):\n \"\"\"Make sure DP/DDP + AMP work.\"\"\"\n tutils.set_random_master_port()\n\n model = EvalModelTemplate()\n\n trainer_options = dict(\n default_root_dir=tmpdir,\n max_epochs=1,\n # gpus=2,\n gpus='0, 1', # test init with gpu string\n distributed_backend=backend,\n precision=16,\n )\n\n # tutils.run_model_test(trainer_options, model)\n trainer = Trainer(**trainer_options)\n result = trainer.fit(model)\n assert result\n\n\[email protected]\[email protected](\"backend\", ['dp', 'ddp'])\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_multi_gpu_wandb(tmpdir, backend):\n \"\"\"Make sure DP/DDP + AMP work.\"\"\"\n from pytorch_lightning.loggers import WandbLogger\n tutils.set_random_master_port()\n\n model = EvalModelTemplate()\n logger = WandbLogger(name='utest')\n\n trainer_options = dict(\n default_root_dir=tmpdir,\n max_epochs=1,\n gpus=2,\n distributed_backend=backend,\n precision=16,\n logger=logger,\n\n )\n # tutils.run_model_test(trainer_options, model)\n trainer = Trainer(**trainer_options)\n result = trainer.fit(model)\n assert result\n trainer.test(model)\n\n\[email protected]\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_amp_gpu_ddp_slurm_managed(tmpdir):\n \"\"\"Make sure DDP + AMP work.\"\"\"\n # simulate setting slurm flags\n tutils.set_random_master_port()\n os.environ['SLURM_LOCALID'] = str(0)\n\n model = EvalModelTemplate()\n\n # exp file to get meta\n logger = tutils.get_default_logger(tmpdir)\n\n # exp file to get weights\n checkpoint = tutils.init_checkpoint_callback(logger)\n\n # fit model\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n gpus=[0],\n distributed_backend='ddp',\n precision=16,\n checkpoint_callback=checkpoint,\n logger=logger,\n )\n trainer.is_slurm_managing_tasks = True\n result = trainer.fit(model)\n\n # correct result and ok accuracy\n assert result == 1, 'amp + ddp model failed to complete'\n\n # test root model address\n assert trainer.resolve_root_node_address('abc') == 'abc'\n assert trainer.resolve_root_node_address('abc[23]') == 'abc23'\n assert trainer.resolve_root_node_address('abc[23-24]') == 'abc23'\n assert trainer.resolve_root_node_address('abc[23-24, 45-40, 40]') == 'abc23'\n\n\ndef test_cpu_model_with_amp(tmpdir):\n \"\"\"Make sure model trains on CPU.\"\"\"\n trainer_options = dict(\n default_root_dir=tmpdir,\n progress_bar_refresh_rate=0,\n max_epochs=1,\n limit_train_batches=0.4,\n limit_val_batches=0.4,\n precision=16\n )\n\n model = EvalModelTemplate()\n\n with pytest.raises((MisconfigurationException, ModuleNotFoundError)):\n tpipes.run_model_test(trainer_options, model, on_gpu=False)\n" ]
[ [ "torch.cuda.device_count", "torch.cuda.is_available" ] ]
siposcsaba89/tensorrtx
[ "aad17004e0f2420babf3ea245ee2bab03e05baad" ]
[ "tsm/gen_wts.py" ]
[ "import argparse\nimport struct\n\nimport torch\nimport numpy as np\n\n\ndef write_one_weight(writer, name, weight):\n assert isinstance(weight, np.ndarray)\n values = weight.reshape(-1)\n writer.write('{} {}'.format(name, len(values)))\n for value in values:\n writer.write(' ')\n # float to bytes to hex_string\n writer.write(struct.pack('>f', float(value)).hex())\n writer.write('\\n')\n\n\ndef convert_name(name):\n return name.replace(\"module.\", \"\").replace(\"base_model.\", \"\").\\\n replace(\"net.\", \"\").replace(\"new_fc\", \"fc\").replace(\"backbone.\", \"\").\\\n replace(\"cls_head.fc_cls\", \"fc\").replace(\".conv.\", \".\").\\\n replace(\"conv1.bn\", \"bn1\").replace(\"conv2.bn\", \"bn2\").\\\n replace(\"conv3.bn\", \"bn3\").replace(\"downsample.bn\", \"downsample.1\").\\\n replace(\"downsample.weight\", \"downsample.0.weight\")\n\n\ndef main(args):\n ckpt = torch.load(args.checkpoint)['state_dict']\n ckpt = {k: v for k, v in ckpt.items() if 'num_batches_tracked' not in k}\n with open(args.out_filename, \"w\") as f:\n f.write(f\"{len(ckpt)}\\n\")\n for k, v in ckpt.items():\n key = convert_name(k)\n write_one_weight(f, key, v.cpu().numpy())\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"checkpoint\", type=str, help=\"Path to checkpoint file\")\n parser.add_argument(\"--out-filename\",\n type=str,\n default=\"tsm_r50.wts\",\n help=\"Path to converted wegiths file\")\n args = parser.parse_args()\n main(args)\n" ]
[ [ "torch.load" ] ]
robintwhite/scholar-scrape
[ "c5862c2233e0564d83e142f475b0cb6466b7eb5e" ]
[ "main.py" ]
[ "# %% Imports\nimport numpy as np \nimport pandas as pd \nfrom scholarly import scholarly\nfrom utils import scholar\nimport os\nimport time\nimport random\nimport requests, bs4\nfrom tqdm import tqdm\n\n# %% Get search terms from file\nsearch_terms = []\nwith open(\"search_terms.txt\") as f:\n lines = f.readlines()\n for line in lines:\n search_terms.append(line.strip())\n\n# %% Get search terms\nsq = scholar.ScholarQuery()\nphrase = sq._parenthesize_phrases(\",\".join(search_terms)) #\",\".join(search_terms)\nprint(phrase)\n\n# %% Create url using scholar utils with more advanced search options\nssq = scholar.SearchScholarQuery()\nssq.set_words(phrase) #can set more than one and with quotations for exact phrase\n#ssq.set_phrase('Xradia Versa') #specific phrase\nssq.set_timeframe(start=None, end=None)\nurl = ssq.get_url()\nprint(url)\n\n# %% Get total number of articles for query\n# NOTE: This only works for more than 10 results as the text changes from 'About XX results' to 'XX results'\n# Need to fix this to be more robust\ndef get_num_results(url):\n ''' Return the total number of results from the search query url. Taken from scholar.py'''\n res = requests.get(url)\n res.raise_for_status()\n soup = bs4.BeautifulSoup(res.text, features=\"lxml\")\n tag = soup.find(name='div', attrs={'id': 'gs_ab_md'})\n if tag is not None:\n raw_text = tag.findAll(text=True)\n # raw text is a list because the body contains <b> etc\n if raw_text is not None and len(raw_text) > 0:\n try:\n num_results = raw_text[0].split()[1] # Only for 'About XX results' text\n # num_results may now contain commas to separate\n # thousands, strip:\n num_results = num_results.replace(',', '')\n return int(num_results)\n except (IndexError, ValueError):\n print('Error: Possibly fewer than 1 page of results') \n pass\n\n# %% \nnum_results = get_num_results(url)\nprint(num_results)\n\n# %%\nurl_scholarly = url.replace('http://scholar.google.com','') # call to url in scholarly 'https://scholar.google.com{0}'\n\nif num_results:\n article_list = []\n search_query = scholarly.search_pubs_custom_url(url_scholarly)\n for _ in tqdm(range(num_results)):\n try:\n article = next(search_query)\n # scholarly.pprint(article)\n article_list.append(article)\n time.sleep(random.uniform(0,1)) # random sleep for google\n except StopIteration:\n print('No more results to show')\n continue\nelse:\n print(f'No results to show: {num_results}')\n\n# %% Pandas database - get values\ncol_titles = ['authors', 'author_ids', 'title', 'abstract', 'pub_year', 'journal',\n 'citations', 'eprint_url', 'pub_url', 'citedby_url', 'scilab_url', \n 'related_articles_url', 'scholarbib_url']\n\nauthors = []\nauthor_ids = []\npaper_titles = []\nabstracts = []\nyear = []\njournals = []\ncitations = []\neprint_urls = []\npub_urls = []\ncitedby_urls = []\nscilab_urls = []\nrelated_articles_urls = []\nscholarbib_urls = []\n\nfor article in tqdm(article_list):\n authors.append(article['bib'].get('author', ''))\n\n author_ids.append(article.get('author_id', ''))\n\n paper_titles.append(article['bib'].get('title', ''))\n\n abstracts.append(article['bib'].get('abstract', ''))\n\n year.append(article['bib'].get('pub_year', ''))\n\n journals.append(article['bib'].get('venue', ''))\n\n citations.append(article.get('num_citations', ''))\n\n eprint_urls.append(article.get('eprint_url', ''))\n\n pub_urls.append(article.get('pub_url', ''))\n\n citedby_urls.append(article.get('citedby_url', ''))\n\n scilab_urls.append(article.get('url_add_sclib', ''))\n\n related_articles_urls.append(article.get('url_related_articles', ''))\n\n scholarbib_urls.append(article.get('url_scholarbib',''))\n\n\n# %% Pendas Database - create dataframe\ndf = pd.DataFrame({\n 'authors': authors,\n 'author_ids': author_ids,\n 'title': paper_titles,\n 'abstract': abstracts,\n 'pub_year': year,\n 'journal': journals,\n 'citations': citations,\n 'eprint_url': eprint_urls,\n 'pub_url': pub_urls,\n 'citedby_url': citedby_urls,\n 'scilab_url': scilab_urls,\n 'related_articles_url': related_articles_urls,\n 'scholarbib_url': scholarbib_urls})\n\n# %%\ndf.to_csv('scholar_scrape.csv')\n# %%\n# Scrape citation url for exact author list, and journal \n\n# %%\n# import pickle\n# pickle.dump(article_list, open( \"test_article_list.p\", \"wb\" ) )\n\n# %%\n# Can create separate query for specific authors with more information and articles " ]
[ [ "pandas.DataFrame" ] ]
dibyanshushekhardey/Machine-Learning
[ "42047bc418096086a0c133dcd04c94f88342472b" ]
[ "Machine Learning Fundamentals with Python Datacamp/Unsupervised Learning in Python/Visualization with hierarchical clustering and t-SNE/hierarchies of stocks.py" ]
[ "# Import normalize\nfrom sklearn.preprocessing import normalize\n\n# Normalize the movements: normalized_movements\nnormalized_movements = normalize(movements)\n\n# Calculate the linkage: mergings\nmergings = linkage(normalized_movements, method='complete')\n\n# Plot the dendrogram\ndendrogram(mergings,\n labels=companies,\n leaf_rotation=90,\n leaf_font_size=6)\nplt.show()\n" ]
[ [ "sklearn.preprocessing.normalize" ] ]
lukebfunk/OpticalPooledScreens
[ "0f6a46c0f66d038d737e92f22adafb54a175005e" ]
[ "ops/in_situ.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom ops.constants import *\nimport ops.utils\n\n\ndef extract_base_intensity(maxed, peaks, cells, threshold_peaks):\n\n # reads outside of cells get label 0\n read_mask = (peaks > threshold_peaks)\n values = maxed[:, :, read_mask].transpose([2, 0, 1])\n labels = cells[read_mask]\n positions = np.array(np.where(read_mask)).T\n\n return values, labels, positions\n\n\ndef format_bases(values, labels, positions, cycles, bases): \n index = (CYCLE, cycles), (CHANNEL, bases)\n try:\n df = ops.utils.ndarray_to_dataframe(values, index)\n except ValueError:\n print('failed to reshape extracted pixels to sequencing bases, writing empty table')\n return pd.DataFrame()\n\n df_positions = pd.DataFrame(positions, columns=[POSITION_I, POSITION_J])\n df = (df.stack([CYCLE, CHANNEL])\n .reset_index()\n .rename(columns={0: INTENSITY, 'level_0': READ})\n .join(pd.Series(labels, name=CELL), on=READ)\n .join(df_positions, on=READ)\n .sort_values([CELL, READ, CYCLE])\n )\n\n return df\n\ndef do_median_call(df_bases, cycles=12, channels=4, correction_quartile=0, correction_only_in_cells=False, correction_by_cycle=False):\n \"\"\"Call reads from raw base signal using median correction. Use the \n `correction_only_in_cells` flag to specify if correction is based on reads within \n cells, or all reads.\n \"\"\"\n def correction(df,channels,correction_quartile,correction_only_in_cells):\n if correction_only_in_cells:\n # first obtain transformation matrix W\n X_ = dataframe_to_values(df.query('cell > 0'))\n _, W = transform_medians(X_.reshape(-1, channels),correction_quartile=correction_quartile)\n\n # then apply to all data\n X = dataframe_to_values(df)\n Y = W.dot(X.reshape(-1, channels).T).T.astype(int)\n else:\n X = dataframe_to_values(df)\n Y, W = transform_medians(X.reshape(-1, channels),correction_quartile=correction_quartile)\n return Y,W\n\n if correction_by_cycle:\n # this hasn't worked in practice very well, unclear why\n Y = np.empty(df_bases.pipe(len),dtype=df_bases.dtypes['intensity']).reshape(-1,channels)\n for cycle, (_, df_cycle) in enumerate(df_bases.groupby('cycle')):\n Y[cycle::cycles,:],_ = correction(df_cycle,channels,correction_quartile,correction_only_in_cells)\n else:\n Y,W = correction(df_bases,channels,correction_quartile,correction_only_in_cells)\n\n df_reads = call_barcodes(df_bases, Y, cycles=cycles, channels=channels)\n\n return df_reads\n\n\ndef clean_up_bases(df_bases):\n \"\"\"Sort. Pre-processing for `dataframe_to_values`.\n \"\"\"\n return df_bases.sort_values([WELL, TILE, CELL, READ, CYCLE, CHANNEL])\n\n\ndef call_cells(df_reads):\n \"\"\"Determine count of top barcodes \n \"\"\"\n cols = [WELL, TILE, CELL]\n s = (df_reads\n .drop_duplicates([WELL, TILE, READ])\n .groupby(cols)[BARCODE]\n .value_counts()\n .rename('count')\n .sort_values(ascending=False)\n .reset_index()\n .groupby(cols)\n )\n\n return (df_reads\n .join(s.nth(0)[BARCODE].rename(BARCODE_0), on=cols)\n .join(s.nth(0)['count'].rename(BARCODE_COUNT_0), on=cols)\n .join(s.nth(1)[BARCODE].rename(BARCODE_1), on=cols)\n .join(s.nth(1)['count'].rename(BARCODE_COUNT_1), on=cols)\n .join(s['count'].sum() .rename(BARCODE_COUNT), on=cols)\n .assign(**{BARCODE_COUNT_0: lambda x: x[BARCODE_COUNT_0].fillna(0),\n BARCODE_COUNT_1: lambda x: x[BARCODE_COUNT_1].fillna(0)})\n .drop_duplicates(cols)\n .drop([READ, BARCODE], axis=1) # drop the read\n .drop([POSITION_I, POSITION_J], axis=1) # drop the read coordinates\n .filter(regex='^(?!Q_)') # remove read quality scores\n .query('cell > 0') # remove reads not in a cell\n )\n\ndef call_cells_mapping(df_reads,df_pool):\n \"\"\"Determine count of top barcodes, barcodes prioritized if barcode maps to given pool design\n \"\"\"\n guide_info_cols = [SGRNA,GENE_SYMBOL,GROUP]\n\n # map reads\n df_mapped = (pd.merge(df_reads,df_pool[[PREFIX]],how='left',left_on=BARCODE,right_on=PREFIX)\n .assign(mapped = lambda x: pd.notnull(x[PREFIX]))\n .drop(PREFIX,axis=1)\n )\n\n # choose top 2 barcodes, priority given by (mapped,count)\n cols = [WELL, TILE, CELL]\n s = (df_mapped\n .drop_duplicates([WELL, TILE, READ])\n .groupby(cols+['mapped'])[BARCODE]\n .value_counts()\n .rename('count')\n .reset_index()\n .sort_values(['mapped','count'],ascending=False)\n .groupby(cols)\n )\n\n df_cells = (df_reads\n .join(s.nth(0)[BARCODE].rename(BARCODE_0), on=cols)\n .join(s.nth(0)['count'].rename(BARCODE_COUNT_0), on=cols)\n .join(s.nth(1)[BARCODE].rename(BARCODE_1), on=cols)\n .join(s.nth(1)['count'].rename(BARCODE_COUNT_1), on=cols)\n .join(s['count'].sum() .rename(BARCODE_COUNT), on=cols)\n .assign(**{BARCODE_COUNT_0: lambda x: x[BARCODE_COUNT_0].fillna(0),\n BARCODE_COUNT_1: lambda x: x[BARCODE_COUNT_1].fillna(0)})\n .drop_duplicates(cols)\n .drop([READ, BARCODE], axis=1) # drop the read\n .drop([POSITION_I, POSITION_J], axis=1) # drop the read coordinates\n .query('cell > 0') # remove reads not in a cell\n )\n\n # merge guide info; done here for speed over earlier\n df_cells = (pd.merge(df_cells,df_pool[[PREFIX] + guide_info_cols],how='left',left_on=BARCODE_0,right_on=PREFIX)\n .rename({col:col+'_0' for col in guide_info_cols},axis=1)\n .drop(PREFIX,axis=1)\n )\n df_cells = (pd.merge(df_cells,df_pool[[PREFIX]+ guide_info_cols],how='left',left_on=BARCODE_1,right_on=PREFIX)\n .rename({col:col+'_1' for col in guide_info_cols},axis=1)\n .drop(PREFIX,axis=1)\n )\n\n return df_cells\n\n\ndef dataframe_to_values(df, value='intensity'):\n \"\"\"Dataframe must be sorted on [cycle, channel]. \n Returns N x cycles x channels.\n \"\"\"\n cycles = df[CYCLE].value_counts()\n assert len(set(cycles)) == 1\n n_cycles = len(cycles)\n n_channels = len(df[CHANNEL].value_counts())\n x = np.array(df[value]).reshape(-1, n_cycles, n_channels)\n return x\n\n\ndef transform_medians(X,correction_quartile=0):\n \"\"\"For each dimension, find points where that dimension is max. Use median of those points to define new axes. \n Describe with linear transformation W so that W * X = Y.\n \"\"\"\n def get_medians(X,correction_quartile):\n arr = []\n for i in range(X.shape[1]):\n max_spots = X[X.argmax(axis=1) == i]\n try:\n arr += [np.median(max_spots[max_spots[:,i] >= np.quantile(max_spots,axis=0,q=correction_quartile)[i]],axis=0)]\n except:\n arr += [np.median(max_spots,axis=0)]\n M = np.array(arr)\n return M\n\n # def get_medians(X):\n # arr = []\n # for i in range(X.shape[1]):\n # arr += [np.median(X[X.argmax(axis=1) == i], axis=0)]\n # M = np.array(arr)\n # return M\n\n M = get_medians(X,correction_quartile).T\n M = M / M.sum(axis=0)\n W = np.linalg.inv(M)\n Y = W.dot(X.T).T.astype(int)\n return Y, W\n\n\ndef call_barcodes(df_bases, Y, cycles=12, channels=4):\n bases = sorted(set(df_bases[CHANNEL]))\n if any(len(x) != 1 for x in bases):\n raise ValueError('supplied weird bases: {0}'.format(bases))\n df_reads = df_bases.drop_duplicates([WELL, TILE, READ]).copy()\n df_reads[BARCODE] = call_bases_fast(Y.reshape(-1, cycles, channels), bases)\n Q = quality(Y.reshape(-1, cycles, channels))\n # needed for performance later\n for i in range(len(Q[0])):\n df_reads['Q_%d' % i] = Q[:,i]\n \n return (df_reads\n .assign(Q_min=lambda x: x.filter(regex='Q_\\d+').min(axis=1))\n .drop([CYCLE, CHANNEL, INTENSITY], axis=1)\n )\n\n\ndef call_bases_fast(values, bases):\n \"\"\"4-color: bases='ACGT'\n \"\"\"\n assert values.ndim == 3\n assert values.shape[2] == len(bases)\n calls = values.argmax(axis=2)\n calls = np.array(list(bases))[calls]\n return [''.join(x) for x in calls]\n\n\ndef quality(X):\n X = np.abs(np.sort(X, axis=-1).astype(float))\n Q = 1 - np.log(2 + X[..., -2]) / np.log(2 + X[..., -1])\n Q = (Q * 2).clip(0, 1)\n return Q\n\n\ndef reads_to_fastq(df, microscope='MN', dataset='DS', flowcell='FC'):\n\n wrap = lambda x: '{' + x + '}'\n join_fields = lambda xs: ':'.join(map(wrap, xs))\n\n a = '@{m}:{d}:{f}'.format(m=microscope, d=dataset, f=flowcell)\n b = join_fields([WELL, CELL, 'well_tile', READ, POSITION_I, POSITION_J])\n c = '\\n{b}\\n+\\n{{phred}}'.format(b=wrap(BARCODE))\n fmt = a + b + c \n \n well_tiles = sorted(set(df[WELL] + '_' + df[TILE]))\n fields = [WELL, TILE, CELL, READ, POSITION_I, POSITION_J, BARCODE]\n \n Q = df.filter(like='Q_').values\n \n reads = []\n for i, row in enumerate(df[fields].values):\n d = dict(zip(fields, row))\n d['phred'] = ''.join(phred(q) for q in Q[i])\n d['well_tile'] = well_tiles.index(d[WELL] + '_' + d[TILE])\n reads.append(fmt.format(**d))\n \n return reads\n \n\ndef dataframe_to_fastq(df, file, dataset):\n s = '\\n'.join(reads_to_fastq(df, dataset))\n with open(file, 'w') as fh:\n fh.write(s)\n fh.write('\\n')\n\n\ndef phred(q):\n \"\"\"Convert 0...1 to 0...30\n No \":\".\n No \"@\".\n No \"+\".\n \"\"\"\n n = int(q * 30 + 33)\n if n == 43:\n n += 1\n if n == 58:\n n += 1\n return chr(n)\n\n\ndef add_clusters(df_cells, barcode_col=BARCODE_0, radius=50,\n verbose=True, ij=(POSITION_I, POSITION_J)):\n \"\"\"Assigns -1 to clusters with only one cell.\n \"\"\"\n from scipy.spatial.kdtree import KDTree\n import networkx as nx\n\n I, J = ij\n x = df_cells[GLOBAL_X] + df_cells[J]\n y = df_cells[GLOBAL_Y] + df_cells[I]\n barcodes = df_cells[barcode_col]\n barcodes = np.array(barcodes)\n\n kdt = KDTree(np.array([x, y]).T)\n num_cells = len(df_cells)\n\n if verbose:\n message = 'searching for clusters among {} {} objects'\n print(message.format(num_cells, barcode_col))\n pairs = kdt.query_pairs(radius)\n pairs = np.array(list(pairs))\n\n x = barcodes[pairs]\n y = x[:, 0] == x[:, 1]\n\n G = nx.Graph()\n G.add_edges_from(pairs[y])\n\n clusters = list(nx.connected_components(G))\n\n cluster_index = np.zeros(num_cells, dtype=int) - 1\n for i, c in enumerate(clusters):\n cluster_index[list(c)] = i\n\n df_cells = df_cells.copy()\n df_cells[CLUSTER] = cluster_index\n df_cells[CLUSTER_SIZE] = (df_cells\n .groupby(CLUSTER)[barcode_col].transform('size'))\n df_cells.loc[df_cells[CLUSTER] == -1, CLUSTER_SIZE] = 1\n return df_cells\n\n\ndef index_singleton_clusters(clusters):\n clusters = clusters.copy()\n filt = clusters == -1\n n = clusters.max()\n clusters[filt] = range(n, n + len(filt))\n return clusters\n\n\ndef join_by_cell_location(df_cells, df_ph, max_distance=4):\n \"\"\"Can speed up over independent fields of view with \n `ops.utils.groupby_apply2`.\n \"\"\"\n from scipy.spatial.kdtree import KDTree\n # df_cells = df_cells.sort_values(['well', 'tile', 'cell'])\n # df_ph = df_ph.sort_values(['well', 'tile', 'cell'])\n i_tree = df_ph['global_y']\n j_tree = df_ph['global_x']\n i_query = df_cells['global_y']\n j_query = df_cells['global_x']\n \n kdt = KDTree(list(zip(i_tree, j_tree)))\n distance, index = kdt.query(list(zip(i_query, j_query)))\n cell_ph = df_ph.iloc[index]['cell'].pipe(list)\n cols_left = ['well', 'tile', 'cell_ph']\n cols_right = ['well', 'tile', 'cell']\n cols_ph = [c for c in df_ph.columns if c not in df_cells.columns]\n return (df_cells\n .assign(cell_ph=cell_ph, distance=distance)\n .query('distance < @max_distance')\n .join(df_ph.set_index(cols_right)[cols_ph], on=cols_left)\n # .drop(['cell_ph'], axis=1)\n )\n\n" ]
[ [ "numpy.log", "pandas.merge", "pandas.notnull", "pandas.Series", "numpy.linalg.inv", "numpy.median", "numpy.quantile", "pandas.DataFrame", "numpy.sort", "numpy.array", "numpy.zeros", "numpy.where" ] ]
fciannel/tensor2tensor
[ "44f669058390bec03024baa04c1a33e91cc0909d" ]
[ "tensor2tensor/models/video/nfg_conv3d_test.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test when the latent-network encoder is a conv3d net.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nfrom tensor2tensor.models.video import nfg_test_utils\nimport tensorflow as tf\n\nconv3d_net_hparams = (\n (\"conv3d_net\", 2, 2, \"conv3d_net\", \"conditional\", -1, 3),\n (\"conv3d_dil\", 2, 2, \"conv3d_net\", \"conditional\", -1, -1, False, True),)\n\n\nclass NextFrameGlowConv3DTest(nfg_test_utils.NextFrameGlowTest,\n parameterized.TestCase):\n\n @parameterized.named_parameters(*conv3d_net_hparams)\n def testGlowTrainAndDecode(self, in_frames=1, out_frames=1,\n latent_dist_encoder=\"pointwise\",\n gen_mode=\"conditional\", pretrain_steps=-1,\n num_train_frames=-1, cond_first_frame=False,\n apply_dilations=False):\n self.GlowTrainAndDecode(\n in_frames=in_frames, out_frames=out_frames,\n latent_dist_encoder=latent_dist_encoder, gen_mode=gen_mode,\n pretrain_steps=pretrain_steps, num_train_frames=num_train_frames,\n cond_first_frame=cond_first_frame, apply_dilations=apply_dilations)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.test.main" ] ]
alessandrocuda/ISANet
[ "e33a5a408554c900a7d282baa500bd8023a2cd2f" ]
[ "isanet/optimizer/utils.py" ]
[ "import numpy as np\n\ndef l_norm(l_v):\n \"\"\"Computes the norm of a list of numpy array.\n\n Parameters\n ----------\n l_v : array-like\n\n Returns\n -------\n float\n The norm of l_v.\n \"\"\"\n return np.sqrt(np.sum([np.sum(np.square(l_v[i])) for i in range(0, len(l_v))]))\n\ndef l_scalar_product(l_v, l_w):\n \"\"\"Computes the scalar product between two list of numpy array.\n\n Parameters\n ----------\n l_v : array-like\n l_w : array-like\n\n Returns\n -------\n float\n The scalar product between l_v and l_w.\n\n \"\"\"\n\n return np.sum([np.sum(np.multiply(l_v[i], l_w[i])) for i in range(0, len(l_v))])\n\ndef make_vector(l_v):\n \"\"\"Takes a list of numpy array and returns a column vector.\n\n Parameters\n ----------\n l_v : array-like\n\n Returns\n -------\n array\n Array with dimensions (n, 1).\n \"\"\"\n\n row_vector = [l_v[l].flatten() for l in range(len(l_v))]\n return np.concatenate(row_vector).reshape(-1, 1)\n\ndef restore_w_to_model(model, w):\n \"\"\"Takes an array of weights and transforms it into a list \n of matrices with dimensions taken from the model passed.\n\n Parameters\n ----------\n model : isanet.model.MLP\n The Multilayer Perceptron object.\n w : array\n\n Returns\n -------\n array-like\n \"\"\"\n start = 0\n weights = [0]*model.n_layers\n for i in range(model.n_layers):\n n_rows = model.weights[i].shape[0]\n n_cols = model.weights[i].shape[1]\n end = n_rows*n_cols\n weights[i] = w[start:start + end].reshape(n_rows,n_cols)\n start = start + end \n return weights\n" ]
[ [ "numpy.concatenate", "numpy.square", "numpy.multiply" ] ]
noahchalifour/memn2n
[ "0eb341c45c1259b5dbb041cfa264251dc7b9383c", "0eb341c45c1259b5dbb041cfa264251dc7b9383c" ]
[ "utils/preprocessing.py", "utils/data/babi_dialog.py" ]
[ "import tensorflow as tf\n# import tensorflow_text as tf_text\n\nfrom .hparams import *\n\n\ndef get_tokenizer_fn(hparams):\n\n # tf_word_tokenizer = tf_text.WhitespaceTokenizer()\n\n def word_tokenize(text):\n # return tf_word_tokenizer.tokenize(text)\n return tf.strings.split(text, sep=' ')\n\n def character_tokenize(text):\n return tf.strings.bytes_split(text)\n\n if hparams[HP_TOKEN_TYPE.name] == 'word':\n return word_tokenize\n elif hparams[HP_TOKEN_TYPE.name] == 'character':\n return character_tokenize\n\n return None\n\n\ndef build_lookup_table(keys, values=None, default_value=-1):\n\n if values is None:\n values = tf.range(len(keys))\n\n kv_init = tf.lookup.KeyValueTensorInitializer(\n keys=keys, values=values)\n\n return tf.lookup.StaticHashTable(kv_init,\n default_value=default_value)\n\n\ndef preprocess_input(inputs, \n tokenizer_fn,\n vocab_table,\n candidates_table):\n\n num_memories = tf.shape(inputs['memories'])[0]\n memories = tf.strings.split(inputs['memories'], \n sep=' ', maxsplit=2).to_tensor()\n\n if tf.shape(memories)[1] > 1:\n mem_tok = tf.concat([memories[:, :2],\n tokenizer_fn(memories[:, 2])], axis=1)\n else:\n padding = tf.fill([num_memories, 2], '')\n mem_tok = tf.concat([padding,\n tokenizer_fn(inputs['memories'])], axis=1)\n\n mem_tok = mem_tok.to_tensor()\n mem_enc = vocab_table.lookup(mem_tok)\n\n inp_tok = tokenizer_fn(inputs['inputs'])\n inp_enc = vocab_table.lookup(inp_tok)\n\n out_enc = [candidates_table.lookup(inputs['outputs'])]\n\n return ({\n 'memories': mem_enc,\n 'inputs': inp_enc\n }, out_enc)\n\n\ndef preprocess_dataset(dataset,\n tokenizer_fn,\n vocab_table, \n candidates_table,\n hparams):\n\n dataset = dataset.shuffle(5000)\n\n dataset = dataset.map(lambda inputs: (\n preprocess_input(inputs, \n tokenizer_fn=tokenizer_fn,\n vocab_table=vocab_table,\n candidates_table=candidates_table)),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n dataset = dataset.padded_batch(hparams[HP_BATCH_SIZE.name], \n padded_shapes=({\n 'memories': [-1, -1],\n 'inputs': [-1]\n }, [-1]))\n\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n dataset = dataset.repeat()\n\n return dataset", "import os\nimport glob\nimport re\nimport tensorflow as tf\n\nfrom ..hparams import *\nfrom ..preprocessing import preprocess_dataset, get_tokenizer_fn\n\n\ndef get_candidates(base_path, task):\n\n def clean_candidate(cand):\n _cand = re.sub(r'^\\d+\\s+', '', cand)\n _cand = _cand.strip('\\n')\n _cand = _cand.strip()\n return _cand\n\n if task == 6:\n candidates_fn = 'dialog-babi-task6-dstc2-candidates.txt'\n else:\n candidates_fn = 'dialog-babi-candidates.txt'\n\n candidates_fp = os.path.join(base_path, candidates_fn)\n\n with open(candidates_fp, 'r') as f:\n lines = f.readlines()\n\n all_candidates = [clean_candidate(c) for c in lines]\n\n return all_candidates\n\n\ndef load_all_texts(base_path, task):\n\n if task == 6:\n filepaths = glob.glob(os.path.join(base_path,\n 'dialog-babi-task6*.txt'))\n else:\n filepaths = glob.glob(os.path.join(base_path,\n 'dialog-babi-task[1-5]-*[s|l|v|n|t].txt'))\n\n texts = []\n\n for filepath in filepaths:\n with open(filepath, 'r') as f:\n for line in f:\n _line = line.strip('\\n')\n if _line == '':\n continue\n _line = re.sub(r'^\\d+\\s+', '', _line)\n line_texts = [t.strip() for t in _line.split('\\t')]\n if len(line_texts) > 1:\n texts += line_texts\n\n return texts\n\n\ndef load_kb(base_path, task):\n\n if task == 6:\n kb_path = os.path.join(base_path, 'dialog-babi-task6-dstc2-kb.txt')\n else:\n kb_path = os.path.join(base_path, 'dialog-babi-kb-all.txt')\n\n kb = []\n\n with open(kb_path, 'r') as f:\n for line in f:\n _line = line.strip('\\n')\n if _line == '':\n continue\n _line = re.sub(r'^\\d+\\s+', '', _line)\n if task == 6:\n result, _type, word = _line.split(' ')\n else:\n result, word = _line.split('\\t')\n result, _type = result.split(' ')\n kb.append([result, _type, word])\n\n return kb\n\n\ndef load_dataset(suffix, \n base_path,\n hparams,\n task=None):\n\n initial_memory = ['' for _ in range(hparams[HP_MEMORY_SIZE.name])]\n \n memories = []\n inputs = []\n outputs = []\n \n if task is None:\n glob_pattern = os.path.join(base_path, \n 'dialog-babi-task*-{}.txt'.format(suffix))\n else:\n glob_pattern = os.path.join(base_path, \n 'dialog-babi-task{}-*-{}.txt'.format(task, suffix))\n\n is_new_dialog = True\n is_knowledge = False\n conv_index = 0\n\n for filepath in glob.glob(glob_pattern):\n\n with open(filepath, 'r') as f:\n \n for line in f:\n\n _line = line.strip('\\n')\n\n if _line == '':\n conv_index = 0\n is_new_dialog = True\n continue\n\n _line = re.sub(r'^\\d+\\s+', '', _line)\n\n texts = _line.split('\\t')\n\n if len(texts) == 1:\n\n inp = '#{} <u> {}'.format(conv_index, texts[0])\n\n if len(memories) > 0 and conv_index != 0:\n new_memory = memories[-1][1:] + [inp]\n else:\n new_memory = initial_memory[1:] + [inp]\n\n if is_knowledge and len(memories) > 0:\n memories[-1] = new_memory\n else:\n is_knowledge = True\n memories.append(new_memory)\n\n conv_index += 1\n\n else:\n \n inp_text, out_text = texts\n \n inp_text = inp_text.strip()\n out_text = out_text.strip()\n\n if is_knowledge:\n\n is_knowledge = False\n inputs.append(inp_text)\n outputs.append(out_text)\n\n else:\n\n if is_new_dialog:\n new_memory = initial_memory\n is_new_dialog = False\n else:\n last_memory = memories[-1]\n mem_inp = '#{} <u> {}'.format(conv_index, inputs[-1])\n mem_out = '#{} <r> {}'.format(conv_index + 1, outputs[-1])\n new_memory = last_memory[2:] + [mem_inp, mem_out]\n conv_index += 2\n\n memories.append(new_memory)\n inputs.append(inp_text)\n outputs.append(out_text)\n\n is_new_dialog = False\n\n dataset = tf.data.Dataset.from_tensor_slices({\n 'memories': memories,\n 'inputs': inputs, \n 'outputs': outputs\n })\n\n size = len(inputs)\n\n return dataset, size" ]
[ [ "tensorflow.fill", "tensorflow.shape", "tensorflow.strings.split", "tensorflow.lookup.KeyValueTensorInitializer", "tensorflow.strings.bytes_split", "tensorflow.lookup.StaticHashTable" ], [ "tensorflow.data.Dataset.from_tensor_slices" ] ]
VemburajYadav/PhiFlow
[ "842c113d1850569b97e30ab0632866bb5bc4b300" ]
[ "phi/physics/pressuresolver/multiscale.py" ]
[ "import logging\nimport numpy as np\n\nfrom phi import math\nfrom .solver_api import PressureSolver, FluidDomain\n\n\nclass MultiscaleSolver(PressureSolver):\n\n def __init__(self, solvers, autodiff=False):\n \"\"\"\n A multigrid solver first solves the pressure on a lower-resolution grid and successively upsamples and refines it.\n On each grid, i, the pressure is calculated using the i-th provided PressureSolver.\n The resulting pressure is then upsampled and given as initial guess to the next level.\n\n This approach reduces the number of high-resolution iterations required, especially if the previous solver had a higher accuracy.\n\n :param solvers: tuple or list of PressureSolvers with length equal to number of grids\n :param autodiff: if True, use autodiff, else use multigrid forward solver for backprop\n \"\"\"\n if isinstance(solvers, PressureSolver):\n solvers = [solvers] * 2\n PressureSolver.__init__(self, 'MultiscaleSolver',\n supported_devices=solvers[0].supported_devices,\n supports_guess=solvers[0].supports_guess,\n supports_loop_counter=np.all([s.supports_loop_counter for s in solvers]),\n supports_continuous_masks=True)\n assert np.all([s.supports_guess for s in solvers[1:]]), 'solvers must support initial guess'\n self.solvers = solvers\n self.autodiff = autodiff\n\n def solve(self, divergence, domain, pressure_guess):\n assert isinstance(domain, FluidDomain)\n\n if self.autodiff:\n return _mg_solve_forward(divergence, domain, pressure_guess, self.solvers)\n\n def pressure_gradient(op, grad):\n return _mg_solve_forward(grad, domain, None, self.solvers)[0]\n\n return math.with_custom_gradient(_mg_solve_forward,\n [divergence, domain, pressure_guess, self.solvers],\n pressure_gradient,\n input_index=0, output_index=0,\n name_base='multiscale_solve')\n\n\ndef _mg_solve_forward(divergence, domain, pressure_guess, solvers):\n fluid_mask = domain.accessible_tensor(extend=1)\n active_mask = domain.active_tensor(extend=1)\n if active_mask is not None or fluid_mask is not None:\n if not np.all([s.supports_continuous_masks for s in solvers[:-1]]):\n logging.warning(\n \"MultiscaleSolver solver: There are boundary conditions inside the domain but \"\n \"not all intermediate solvers support continuous masks\")\n div_lvls = [divergence]\n act_lvls = [active_mask]\n fld_lvls = [fluid_mask]\n for grid_i in range(len(solvers) - 1):\n div_lvls.insert(0, math.downsample2x(div_lvls[0]))\n act_lvls.insert(0, math.downsample2x(act_lvls[0]) if act_lvls[0] is not None else None)\n fld_lvls.insert(0, math.downsample2x(fld_lvls[0]) if fld_lvls[0] is not None else None)\n if pressure_guess is not None:\n pressure_guess = math.downsample2x(pressure_guess)\n\n iter_list = []\n for i, div in enumerate(div_lvls):\n pressure_guess, iteration = solvers[i].solve(div, FluidDomain(act_lvls[i], fld_lvls[i], boundaries), pressure_guess)\n iter_list.append(iteration)\n if pressure_guess.shape[1] < divergence.shape[1]:\n pressure_guess = math.upsample2x(pressure_guess) * 2 ** math.spatial_rank(divergence)\n\n return pressure_guess, iter_list\n" ]
[ [ "numpy.all" ] ]
HuthLab/multi-timescale-LSTM-LMs
[ "d27de333b11f29d898069e852eaf45ca39d903a8" ]
[ "splitcross.py" ]
[ "from collections import defaultdict\n\nimport torch\nimport torch.nn as nn\n\nimport numpy as np\n\n\nclass SplitCrossEntropyLoss(nn.Module):\n r'''SplitCrossEntropyLoss calculates an approximate softmax'''\n def __init__(self, hidden_size, splits, verbose=False):\n # We assume splits is [0, split1, split2, N] where N >= |V|\n # For example, a vocab of 1000 words may have splits [0] + [100, 500] + [inf]\n super(SplitCrossEntropyLoss, self).__init__()\n self.hidden_size = hidden_size\n self.splits = [0] + splits + [100 * 1000000]\n self.nsplits = len(self.splits) - 1\n self.stats = defaultdict(list)\n self.verbose = verbose\n # Each of the splits that aren't in the head require a pretend token, we'll call them tombstones\n # The probability given to this tombstone is the probability of selecting an item from the represented split\n if self.nsplits > 1:\n self.tail_vectors = nn.Parameter(torch.zeros(self.nsplits - 1, hidden_size))\n self.tail_bias = nn.Parameter(torch.zeros(self.nsplits - 1))\n\n def logprob(self, weight, bias, hiddens, splits=None, softmaxed_head_res=None, verbose=False):\n # First we perform the first softmax on the head vocabulary and the tombstones\n if softmaxed_head_res is None:\n start, end = self.splits[0], self.splits[1]\n head_weight = None if end - start == 0 else weight[start:end]\n head_bias = None if end - start == 0 else bias[start:end]\n # We only add the tombstones if we have more than one split\n if self.nsplits > 1:\n head_weight = self.tail_vectors if head_weight is None else torch.cat([head_weight, self.tail_vectors])\n head_bias = self.tail_bias if head_bias is None else torch.cat([head_bias, self.tail_bias])\n\n # Perform the softmax calculation for the word vectors in the head for all splits\n # We need to guard against empty splits as torch.cat does not like random lists\n head_res = torch.nn.functional.linear(hiddens, head_weight, bias=head_bias)\n softmaxed_head_res = torch.nn.functional.log_softmax(head_res, dim=-1)\n\n if splits is None:\n splits = list(range(self.nsplits))\n\n results = []\n running_offset = 0\n for idx in splits:\n\n # For those targets in the head (idx == 0) we only need to return their loss\n if idx == 0:\n results.append(softmaxed_head_res[:, :-(self.nsplits - 1)])\n\n # If the target is in one of the splits, the probability is the p(tombstone) * p(word within tombstone)\n else:\n start, end = self.splits[idx], self.splits[idx + 1]\n tail_weight = weight[start:end]\n tail_bias = bias[start:end]\n\n # Calculate the softmax for the words in the tombstone\n tail_res = torch.nn.functional.linear(hiddens, tail_weight, bias=tail_bias)\n\n # Then we calculate p(tombstone) * p(word in tombstone)\n # Adding is equivalent to multiplication in log space\n head_entropy = (softmaxed_head_res[:, -idx]).contiguous()\n tail_entropy = torch.nn.functional.log_softmax(tail_res, dim=-1)\n results.append(head_entropy.view(-1, 1) + tail_entropy)\n\n if len(results) > 1:\n return torch.cat(results, dim=1)\n return results[0]\n\n def split_on_targets(self, hiddens, targets):\n # Split the targets into those in the head and in the tail\n split_targets = []\n split_hiddens = []\n\n # Determine to which split each element belongs (for each start split value, add 1 if equal or greater)\n # This method appears slower at least for WT-103 values for approx softmax\n #masks = [(targets >= self.splits[idx]).view(1, -1) for idx in range(1, self.nsplits)]\n #mask = torch.sum(torch.cat(masks, dim=0), dim=0)\n ###\n # This is equally fast for smaller splits as method below but scales linearly\n mask = None\n for idx in range(1, self.nsplits):\n partial_mask = targets >= self.splits[idx]\n mask = mask + partial_mask if mask is not None else partial_mask\n ###\n #masks = torch.stack([targets] * (self.nsplits - 1))\n #mask = torch.sum(masks >= self.split_starts, dim=0)\n for idx in range(self.nsplits):\n # If there are no splits, avoid costly masked select\n if self.nsplits == 1:\n split_targets, split_hiddens = [targets], [hiddens]\n continue\n # If all the words are covered by earlier targets, we have empties so later stages don't freak out\n if sum(len(t) for t in split_targets) == len(targets):\n split_targets.append([])\n split_hiddens.append([])\n continue\n # Are you in our split?\n tmp_mask = mask == idx\n split_targets.append(torch.masked_select(targets, tmp_mask))\n split_hiddens.append(hiddens.masked_select(tmp_mask.unsqueeze(1).expand_as(hiddens)).view(-1, hiddens.size(1)))\n return split_targets, split_hiddens\n\n def forward(self, weight, bias, hiddens, targets, verbose=False, return_avg=True):\n if self.verbose or verbose:\n for idx in sorted(self.stats):\n print('{}: {}'.format(idx, int(np.mean(self.stats[idx]))), end=', ')\n print()\n\n total_loss = None\n if len(hiddens.size()) > 2: hiddens = hiddens.view(-1, hiddens.size(2))\n\n split_targets, split_hiddens = self.split_on_targets(hiddens, targets)\n\n # First we perform the first softmax on the head vocabulary and the tombstones\n start, end = self.splits[0], self.splits[1]\n head_weight = None if end - start == 0 else weight[start:end]\n head_bias = None if end - start == 0 else bias[start:end]\n\n # We only add the tombstones if we have more than one split\n if self.nsplits > 1:\n head_weight = self.tail_vectors if head_weight is None else torch.cat([head_weight, self.tail_vectors])\n head_bias = self.tail_bias if head_bias is None else torch.cat([head_bias, self.tail_bias])\n\n # Perform the softmax calculation for the word vectors in the head for all splits\n # We need to guard against empty splits as torch.cat does not like random lists\n combo = torch.cat([split_hiddens[i] for i in range(self.nsplits) if len(split_hiddens[i])])\n ###\n all_head_res = torch.nn.functional.linear(combo, head_weight, bias=head_bias)\n softmaxed_all_head_res = torch.nn.functional.log_softmax(all_head_res, dim=-1)\n if self.verbose or verbose:\n self.stats[0].append(combo.size()[0] * head_weight.size()[0])\n\n running_offset = 0\n for idx in range(self.nsplits):\n # If there are no targets for this split, continue\n if len(split_targets[idx]) == 0: continue\n\n # For those targets in the head (idx == 0) we only need to return their loss\n if idx == 0:\n softmaxed_head_res = softmaxed_all_head_res[running_offset:running_offset + len(split_hiddens[idx])]\n entropy = -torch.gather(softmaxed_head_res, dim=1, index=split_targets[idx].view(-1, 1))\n # If the target is in one of the splits, the probability is the p(tombstone) * p(word within tombstone)\n else:\n softmaxed_head_res = softmaxed_all_head_res[running_offset:running_offset + len(split_hiddens[idx])]\n\n if self.verbose or verbose:\n start, end = self.splits[idx], self.splits[idx + 1]\n tail_weight = weight[start:end]\n self.stats[idx].append(split_hiddens[idx].size()[0] * tail_weight.size()[0])\n\n # Calculate the softmax for the words in the tombstone\n tail_res = self.logprob(weight, bias, split_hiddens[idx], splits=[idx], softmaxed_head_res=softmaxed_head_res)\n\n # Then we calculate p(tombstone) * p(word in tombstone)\n # Adding is equivalent to multiplication in log space\n head_entropy = softmaxed_head_res[:, -idx]\n # All indices are shifted - if the first split handles [0,...,499] then the 500th in the second split will be 0 indexed\n indices = (split_targets[idx] - self.splits[idx]).view(-1, 1)\n # Warning: if you don't squeeze, you get an N x 1 return, which acts oddly with broadcasting\n tail_entropy = torch.gather(torch.nn.functional.log_softmax(tail_res, dim=-1), dim=1, index=indices).squeeze()\n entropy = -(head_entropy + tail_entropy)\n ###\n running_offset += len(split_hiddens[idx])\n if return_avg:\n total_loss = entropy.float().sum() if total_loss is None else total_loss + entropy.float().sum()\n else:\n total_loss = entropy.float() if total_loss is None else total_loss + entropy.float()\n\n return (total_loss / len(targets)).type_as(weight)\n\n\nif __name__ == '__main__':\n np.random.seed(42)\n torch.manual_seed(42)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(42)\n\n V = 8\n H = 10\n N = 100\n E = 10\n\n embed = torch.nn.Embedding(V, H)\n crit = SplitCrossEntropyLoss(hidden_size=H, splits=[V // 2])\n bias = torch.nn.Parameter(torch.ones(V))\n optimizer = torch.optim.SGD(list(embed.parameters()) + list(crit.parameters()), lr=1)\n\n for _ in range(E):\n prev = torch.autograd.Variable((torch.rand(N, 1) * 0.999 * V).int().long())\n x = torch.autograd.Variable((torch.rand(N, 1) * 0.999 * V).int().long())\n y = embed(prev).squeeze()\n c = crit(embed.weight, bias, y, x.view(N))\n print('Crit', c.exp().data[0])\n\n logprobs = crit.logprob(embed.weight, bias, y[:2]).exp()\n print(logprobs)\n print(logprobs.sum(dim=1))\n\n optimizer.zero_grad()\n c.backward()\n optimizer.step()\n" ]
[ [ "torch.ones", "numpy.random.seed", "torch.nn.functional.log_softmax", "torch.cuda.manual_seed", "torch.manual_seed", "torch.cat", "torch.zeros", "torch.nn.Embedding", "numpy.mean", "torch.rand", "torch.cuda.is_available", "torch.masked_select", "torch.nn.functional.linear" ] ]
stephenroller/naacl2016
[ "486ded494fe5d115a426efea66897632d3e33253" ]
[ "ctxpredict/ioutil.py" ]
[ "#!/usr/bin/env python\nimport os\nimport os.path\nimport logging\nimport itertools\nimport numpy as np\n\n# before anything else, configure the logger\nlogger = logging.getLogger()\nhandler = logging.StreamHandler()\nformatter = logging.Formatter(\"%(asctime)s %(levelname)-8s %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\")\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\nlogger.setLevel(logging.DEBUG)\n\nN = 2\n\n\nclass CorpusBatchReader(object):\n def __init__(self, folder, dist_space, batch_size=256):\n self.folder = folder\n self.batch_size = batch_size\n self.dist_space = dist_space\n\n self.files = os.listdir(folder)\n if not self.files:\n raise ValueError(\"%s doesn't have any files in it!\" % folder)\n self.fileno = 0\n\n self._read_file()\n\n def __iter__(self):\n return self\n\n def _read_file(self):\n logger.debug(\"Reading file '%s'\" % self.files[self.fileno])\n file = self.files[self.fileno]\n # todo use path.join\n compressed = np.load(os.path.join(self.folder, file))\n self._targetids = compressed['targets']\n self._contexts = compressed['contexts'].item(0)\n self._file_idx = 0\n\n def next(self):\n \"\"\"\n Returns the next mini-batch.\n \"\"\"\n # first check if this is the end of the file\n if self._file_idx >= len(self._targetids):\n # go to next file\n self.fileno += 1\n if self.fileno >= len(self.files):\n # out of everything!\n raise StopIteration\n self._read_file()\n\n\n batch_size = self.batch_size\n\n idx = self._file_idx\n self._file_idx += batch_size\n\n Yids = self._targetids[idx:idx+batch_size]\n #Y = self.dist_space.matrix[Yids]\n X = self._contexts[idx:idx+batch_size,:256].todense()\n\n return X, np.array([Yids]).T\n\n def rewind(self):\n self.fileno = 0\n self._read_file()\n\n def progress(self):\n nof = float(len(self.files))\n floor = self.fileno / nof\n maxchunk = 1./nof\n chunk = self._file_idx / float(len(self._targetids))\n return floor + chunk * maxchunk\n\n\nclass DataIterator(object):\n def __init__(self, corpus_batch_reader, epochs=1, maxbatches=0):\n self.epoch = 0\n self.max_epochs = epochs\n self.cbr = corpus_batch_reader\n self.maxbatches = maxbatches\n\n self.test = self.cbr.next()\n self.val = self.cbr.next()\n self.train = None\n\n self.batch = -1\n\n def __iter__(self):\n return self\n\n def next(self):\n if self.maxbatches and self.batch >= self.maxbatches:\n raise StopIteration\n self.train = self.val\n try:\n self.val= self.cbr.next()\n except StopIteration:\n self.epoch += 1\n if self.epoch >= self.max_epochs:\n raise\n self.cbr.rewind()\n # don't train on the \"test\" item; skip one item off!\n self.cbr.next()\n self.val = self.cbr.next()\n\n self.batch += 1\n return self.train\n\n def progress(self):\n if self.maxbatches:\n return float(self.batch) / self.maxbatches\n me = float(self.max_epochs)\n floor = self.epoch / me\n maxchunk = 1. / me\n prog = self.cbr.progress()\n chunk = self.cbr.progress() * maxchunk\n return floor + chunk\n\nclass CSVLogger(object):\n def __init__(self, filename):\n self.filename = filename\n self.count = 0\n if self.filename:\n self._handle = open(self.filename, 'w')\n\n def append(self, record):\n if self.filename:\n if not self.count:\n self.keys = sorted(record.keys())\n self._handle.write(\",\".join(self.keys) + \"\\n\")\n self._handle.write(\",\".join(str(record.get(k, '')) for k in self.keys))\n self._handle.write(\"\\n\")\n self._handle.flush()\n self.count += 1\n\n def __del__(self):\n if self.filename:\n self._handle.close()\n\n" ]
[ [ "numpy.array" ] ]
DenisDiachkov/AttnGAN
[ "f8800539de20e77a08ec3ae371a29ce94769be40" ]
[ "code/miscc/losses.py" ]
[ "import torch\nimport torch.nn as nn\n\nimport numpy as np\nfrom miscc.config import cfg\n\nfrom GlobalAttention import func_attention\n\n\n# ##################Loss for matching text-image###################\ndef cosine_similarity(x1, x2, dim=1, eps=1e-8):\n \"\"\"Returns cosine similarity between x1 and x2, computed along dim.\n \"\"\"\n w12 = torch.sum(x1 * x2, dim)\n w1 = torch.norm(x1, 2, dim)\n w2 = torch.norm(x2, 2, dim)\n return (w12 / (w1 * w2).clamp(min=eps)).squeeze()\n\n\ndef sent_loss(cnn_code, rnn_code, labels, class_ids,\n batch_size, eps=1e-8):\n # ### Mask mis-match samples ###\n # that come from the same class as the real sample ###\n masks = []\n if class_ids is not None:\n for i in range(batch_size):\n mask = (class_ids == class_ids[i]).astype(np.uint8)\n mask[i] = 0\n masks.append(mask.reshape((1, -1)))\n masks = np.concatenate(masks, 0)\n # masks: batch_size x batch_size\n masks = torch.ByteTensor(masks)\n if cfg.CUDA:\n masks = masks.cuda()\n\n # --> seq_len x batch_size x nef\n if cnn_code.dim() == 2:\n cnn_code = cnn_code.unsqueeze(0)\n rnn_code = rnn_code.unsqueeze(0)\n\n # cnn_code_norm / rnn_code_norm: seq_len x batch_size x 1\n cnn_code_norm = torch.norm(cnn_code, 2, dim=2, keepdim=True)\n rnn_code_norm = torch.norm(rnn_code, 2, dim=2, keepdim=True)\n # scores* / norm*: seq_len x batch_size x batch_size\n scores0 = torch.bmm(cnn_code, rnn_code.transpose(1, 2))\n norm0 = torch.bmm(cnn_code_norm, rnn_code_norm.transpose(1, 2))\n scores0 = scores0 / norm0.clamp(min=eps) * cfg.TRAIN.SMOOTH.GAMMA3\n\n # --> batch_size x batch_size\n scores0 = scores0.squeeze()\n if class_ids is not None:\n scores0.data.masked_fill_(masks.type(torch.bool), -float('inf'))\n scores1 = scores0.transpose(0, 1)\n if labels is not None:\n loss0 = nn.CrossEntropyLoss()(scores0, labels)\n loss1 = nn.CrossEntropyLoss()(scores1, labels)\n else:\n loss0, loss1 = None, None\n return loss0, loss1\n\n\ndef words_loss(img_features, words_emb, labels,\n cap_lens, class_ids, batch_size):\n \"\"\"\n words_emb(query): batch x nef x seq_len\n img_features(context): batch x nef x 17 x 17\n \"\"\"\n masks = []\n att_maps = []\n similarities = []\n cap_lens = cap_lens.data.tolist()\n for i in range(batch_size):\n if class_ids is not None:\n mask = (class_ids == class_ids[i]).astype(np.uint8)\n mask[i] = 0\n masks.append(mask.reshape((1, -1)))\n # Get the i-th text description\n words_num = cap_lens[i]\n # -> 1 x nef x words_num\n word = words_emb[i, :, :words_num].unsqueeze(0).contiguous()\n # -> batch_size x nef x words_num\n word = word.repeat(batch_size, 1, 1)\n # batch x nef x 17*17\n context = img_features\n \"\"\"\n word(query): batch x nef x words_num\n context: batch x nef x 17 x 17\n weiContext: batch x nef x words_num\n attn: batch x words_num x 17 x 17\n \"\"\"\n weiContext, attn = func_attention(word, context, cfg.TRAIN.SMOOTH.GAMMA1)\n att_maps.append(attn[i].unsqueeze(0).contiguous())\n # --> batch_size x words_num x nef\n word = word.transpose(1, 2).contiguous()\n weiContext = weiContext.transpose(1, 2).contiguous()\n # --> batch_size*words_num x nef\n word = word.view(batch_size * words_num, -1)\n weiContext = weiContext.view(batch_size * words_num, -1)\n #\n # -->batch_size*words_num\n row_sim = cosine_similarity(word, weiContext)\n # --> batch_size x words_num\n row_sim = row_sim.view(batch_size, words_num)\n\n # Eq. (10)\n row_sim.mul_(cfg.TRAIN.SMOOTH.GAMMA2).exp_()\n row_sim = row_sim.sum(dim=1, keepdim=True)\n row_sim = torch.log(row_sim)\n\n # --> 1 x batch_size\n # similarities(i, j): the similarity between the i-th image and the j-th text description\n similarities.append(row_sim)\n\n # batch_size x batch_size\n similarities = torch.cat(similarities, 1)\n if class_ids is not None:\n masks = np.concatenate(masks, 0)\n # masks: batch_size x batch_size\n masks = torch.ByteTensor(masks)\n if cfg.CUDA:\n masks = masks.cuda()\n\n similarities = similarities * cfg.TRAIN.SMOOTH.GAMMA3\n if class_ids is not None:\n similarities.data.masked_fill_(masks.type(torch.bool), -float('inf'))\n similarities1 = similarities.transpose(0, 1)\n if labels is not None:\n loss0 = nn.CrossEntropyLoss()(similarities, labels)\n loss1 = nn.CrossEntropyLoss()(similarities1, labels)\n else:\n loss0, loss1 = None, None\n return loss0, loss1, att_maps\n\n\n# ##################Loss for G and Ds##############################\ndef discriminator_loss(netD, real_imgs, fake_imgs, conditions,\n real_labels, fake_labels):\n # Forward\n real_features = netD(real_imgs)\n fake_features = netD(fake_imgs.detach())\n # loss\n #\n cond_real_logits = netD.COND_DNET(real_features, conditions)\n cond_real_errD = nn.BCELoss()(cond_real_logits, real_labels)\n cond_fake_logits = netD.COND_DNET(fake_features, conditions)\n cond_fake_errD = nn.BCELoss()(cond_fake_logits, fake_labels)\n #\n batch_size = real_features.size(0)\n cond_wrong_logits = netD.COND_DNET(real_features[:(batch_size - 1)], conditions[1:batch_size])\n cond_wrong_errD = nn.BCELoss()(cond_wrong_logits, fake_labels[1:batch_size])\n\n if netD.UNCOND_DNET is not None:\n real_logits = netD.UNCOND_DNET(real_features)\n fake_logits = netD.UNCOND_DNET(fake_features)\n real_errD = nn.BCELoss()(real_logits, real_labels)\n fake_errD = nn.BCELoss()(fake_logits, fake_labels)\n errD = ((real_errD + cond_real_errD) / 2. +\n (fake_errD + cond_fake_errD + cond_wrong_errD) / 3.)\n else:\n errD = cond_real_errD + (cond_fake_errD + cond_wrong_errD) / 2.\n return errD\n\n\ndef generator_loss(netsD, image_encoder, fake_imgs, real_labels,\n words_embs, sent_emb, match_labels,\n cap_lens, class_ids):\n numDs = len(netsD)\n batch_size = real_labels.size(0)\n logs = ''\n # Forward\n errG_total = 0\n for i in range(numDs):\n features = netsD[i](fake_imgs[i])\n cond_logits = netsD[i].COND_DNET(features, sent_emb)\n cond_errG = nn.BCELoss()(cond_logits, real_labels)\n if netsD[i].UNCOND_DNET is not None:\n logits = netsD[i].UNCOND_DNET(features)\n errG = nn.BCELoss()(logits, real_labels)\n g_loss = errG + cond_errG\n else:\n g_loss = cond_errG\n errG_total += g_loss\n # err_img = errG_total.data[0]\n logs += 'g_loss%d: %.2f ' % (i, g_loss.data.item())\n\n # Ranking loss\n if i == (numDs - 1):\n # words_features: batch_size x nef x 17 x 17\n # sent_code: batch_size x nef\n region_features, cnn_code = image_encoder(fake_imgs[i])\n w_loss0, w_loss1, _ = words_loss(region_features, words_embs,\n match_labels, cap_lens,\n class_ids, batch_size)\n w_loss = (w_loss0 + w_loss1) * \\\n cfg.TRAIN.SMOOTH.LAMBDA\n # err_words = err_words + w_loss.data[0]\n\n s_loss0, s_loss1 = sent_loss(cnn_code, sent_emb,\n match_labels, class_ids, batch_size)\n s_loss = (s_loss0 + s_loss1) * \\\n cfg.TRAIN.SMOOTH.LAMBDA\n # err_sent = err_sent + s_loss.data[0]\n\n errG_total += w_loss + s_loss\n logs += 'w_loss: %.2f s_loss: %.2f ' % (w_loss.data.item(), s_loss.data.item())\n return errG_total, logs\n\n\n##################################################################\ndef KL_loss(mu, logvar):\n # -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)\n KLD = torch.mean(KLD_element).mul_(-0.5)\n return KLD\n" ]
[ [ "torch.ByteTensor", "torch.nn.CrossEntropyLoss", "torch.norm", "torch.mean", "torch.cat", "torch.sum", "torch.nn.BCELoss", "numpy.concatenate", "torch.log" ] ]
quamernasim/PETAI-master
[ "d76f37bea1e313246a556871c6304370e9a5616b" ]
[ "core/models/Redundant/SeismicNet6LayerEncoder.py" ]
[ "\"\"\"\n This is our proposed Seismic Net for semantic segmentation of facies from seismic images. This Network has\n residual connections in both Encoder and Decoder. It has also long residual skip connections to retain the\n spatial locations. Primary investigations shows promising results. Need to play with the architecture and\n hyper-parameters to obtain optimal results.\n\"\"\"\n\nimport torch.nn as nn\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, in_channels=64, out_channels=64, kernel_size=3):\n super(ResidualBlock, self).__init__()\n padding = kernel_size // 2\n self.block = nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=2,\n padding=padding),\n nn.BatchNorm2d(out_channels),\n nn.PReLU(),\n nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size, padding=padding),\n nn.BatchNorm2d(out_channels)\n )\n\n self.downsample = nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=2),\n )\n self.act = nn.PReLU()\n\n def forward(self, x):\n residual = self.block(x)\n x = self.downsample(x)\n return self.act(x + residual)\n\n\nclass TransposeResidualBlock(nn.Module):\n def __init__(self, in_channels=64, out_channels=64, kernel_size=3):\n super(TransposeResidualBlock, self).__init__()\n padding = kernel_size // 2\n\n self.block1 = nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,\n stride=2, padding=padding, output_padding=1)\n self.block2 = nn.Sequential(\n nn.BatchNorm2d(out_channels),\n nn.PReLU(),\n nn.ConvTranspose2d(in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size,\n padding=padding),\n nn.BatchNorm2d(out_channels)\n )\n\n self.upsample = nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=2,\n output_padding=1)\n self.act = nn.PReLU()\n\n def forward(self, x, output_size):\n residual = self.block2(self.block1(x, output_size=output_size))\n x = self.upsample(x, output_size=output_size)\n\n return self.act(x + residual)\n\n\nclass SeismicNet(nn.Module):\n def __init__(self, in_channels=1, n_classes=6):\n super(SeismicNet, self).__init__()\n\n self.start = nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=3, padding=1),\n nn.BatchNorm2d(64),\n nn.PReLU()\n )\n\n self.encode1 = ResidualBlock(64, 64)\n self.encode2 = ResidualBlock(64, 128)\n self.encode3 = ResidualBlock(128, 256)\n self.encode4 = ResidualBlock(256, 512)\n self.encode5 = ResidualBlock(512, 1024)\n self.encode6 = ResidualBlock(1024, 2048)\n\n self.middle = nn.Sequential(\n nn.Conv2d(2048, 2048, 1),\n nn.BatchNorm2d(2048),\n nn.PReLU()\n )\n \n self.dencode6 = TransposeResidualBlock(2048, 1024)\n self.dencode5 = TransposeResidualBlock(1024, 512)\n self.dencode4 = TransposeResidualBlock(512, 256)\n self.dencode3 = TransposeResidualBlock(256, 128)\n self.dencode2 = TransposeResidualBlock(128, 64)\n self.dencode1 = TransposeResidualBlock(64, 64)\n\n self.end = nn.Sequential(\n nn.Conv2d(in_channels=64, out_channels=n_classes, kernel_size=1)\n )\n\n def forward(self, x):\n x = self.start(x)\n\n # Encoder\n x1 = self.encode1(x)\n x2 = self.encode2(x1)\n x3 = self.encode3(x2)\n x4 = self.encode4(x3)\n x5 = self.encode5(x4)\n x6 = self.encode6(x5)\n\n out_middle = self.middle(x6)\n\n # Decoder\n out6 = self.dencode6(out_middle, x5.size()) + x5\n out5 = self.dencode5(out6, x4.size()) + x4\n out4 = self.dencode4(out5, x3.size()) + x3\n out3 = self.dencode3(out4, x2.size()) + x2\n out2 = self.dencode2(out3, x1.size()) + x1\n out1 = self.dencode1(out2, x.size()) + x\n\n out = self.end(out1)\n\n return out\n\ndef SeismicNet6LayerEncoder():\n \n model = SeismicNet()\n \n return model" ]
[ [ "torch.nn.PReLU", "torch.nn.Conv2d", "torch.nn.ConvTranspose2d", "torch.nn.BatchNorm2d" ] ]
Tarpelite/title_generation
[ "8b9a9878e42ef1217b346daf492450c3d004dd21" ]
[ "examples/language-modeling/run_language_modeling_gen.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).\nGPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned\nusing a masked language modeling (MLM) loss.\n\"\"\"\n\n\nimport logging\nimport math\nimport os\nfrom dataclasses import dataclass, field\nfrom typing import Optional\nfrom tqdm import tqdm\nfrom transformers import (\n CONFIG_MAPPING,\n MODEL_WITH_LM_HEAD_MAPPING,\n AutoConfig,\n AutoModelWithLMHead,\n AutoModelForTokenClassification,\n AutoModelForSequenceClassification,\n AutoTokenizer,\n DataCollatorForLanguageModeling,\n DataCollatorForWeightedLanguageModeling,\n DataCollatorForSelectLM,\n DataCollatorForMaskGen,\n DataCollatorForCheckMaskGen,\n HfArgumentParser,\n LineByLineTextDataset,\n FullyLineByLineTextDataset,\n PreTrainedTokenizer,\n TextDataset,\n Trainer,\n TrainingArguments,\n MaskSelector,\n MaskGenerator,\n set_seed,\n)\n# from pudb import set_trace\n# set_trace()\n\nfrom torch.utils.data.dataloader import DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.data.sampler import RandomSampler, Sampler, SequentialSampler\nimport torch\n\nlogger = logging.getLogger(__name__)\n\n\nMODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())\nMODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.\n \"\"\"\n\n model_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.\"\n },\n )\n model_type: Optional[str] = field(\n default=None,\n metadata={\"help\": \"If training from scratch, pass a model type from the list: \" + \", \".join(MODEL_TYPES)},\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None, metadata={\"help\": \"Where do you want to store the pretrained models downloaded from s3\"}\n )\n gen_model_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.\"\n },\n )\n cls_model_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.\"\n },\n )\n \n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n train_data_file: Optional[str] = field(\n default=None, metadata={\"help\": \"The input training data file (a text file).\"}\n )\n eval_data_file: Optional[str] = field(\n default=None,\n metadata={\"help\": \"An optional input evaluation data file to evaluate the perplexity on (a text file).\"},\n )\n line_by_line: bool = field(\n default=False,\n metadata={\"help\": \"Whether distinct lines of text in the dataset are to be handled as distinct sequences.\"},\n )\n\n mlm: bool = field(\n default=False, metadata={\"help\": \"Train with masked-language modeling loss instead of language modeling.\"}\n )\n mlm_probability: float = field(\n default=0.15, metadata={\"help\": \"Ratio of tokens to mask for masked language modeling loss\"}\n )\n\n mlm_sample_times: int = field(\n default = 1,\n metadata={\n \"help\": \"determine the sample times of mlm if 1 is reduced to the normal mlm\"\n }\n )\n\n block_size: int = field(\n default=-1,\n metadata={\n \"help\": \"Optional input sequence length after tokenization.\"\n \"The training dataset will be truncated in block of this size for training.\"\n \"Default to the model max input length for single sentence inputs (take into account special tokens).\"\n },\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached training and evaluation sets\"}\n )\n weighted_vocab: str = field(\n default=\"\", metadata={\"help\": \"weighted vocab for target language masking\"}\n )\n\n\ndef get_dataset(args: DataTrainingArguments, tokenizer: PreTrainedTokenizer,model_args:ModelArguments, evaluate=False):\n file_path = args.eval_data_file if evaluate else args.train_data_file\n if args.line_by_line:\n return FullyLineByLineTextDataset(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size, cache_dir=model_args.cache_dir)\n \n else:\n return TextDataset(\n tokenizer=tokenizer, file_path=file_path, block_size=args.block_size, overwrite_cache=args.overwrite_cache\n )\n\n\ndef main():\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n\n if data_args.eval_data_file is None and training_args.do_eval:\n raise ValueError(\n \"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file \"\n \"or remove the --do_eval argument.\"\n )\n\n if (\n os.path.exists(training_args.output_dir)\n and os.listdir(training_args.output_dir)\n and training_args.do_train\n and not training_args.overwrite_output_dir\n ):\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.\"\n )\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n training_args.local_rank,\n training_args.device,\n training_args.n_gpu,\n bool(training_args.local_rank != -1),\n training_args.fp16,\n )\n logger.info(\"Training/evaluation parameters %s\", training_args)\n\n # Set seed\n set_seed(training_args.seed)\n\n # Load pretrained model and tokenizer\n #\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n\n if model_args.config_name:\n config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)\n elif model_args.model_name_or_path:\n config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)\n else:\n config = CONFIG_MAPPING[model_args.model_type]()\n logger.warning(\"You are instantiating a new config instance from scratch.\")\n\n if model_args.tokenizer_name:\n tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir)\n elif model_args.model_name_or_path:\n tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)\n else:\n raise ValueError(\n \"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another script, save it,\"\n \"and load it from here, using --tokenizer_name\"\n )\n\n if model_args.model_name_or_path:\n model = AutoModelWithLMHead.from_pretrained(\n model_args.model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n config=config,\n cache_dir=model_args.cache_dir,\n )\n\n else:\n logger.info(\"Training new model from scratch\")\n model = AutoModelWithLMHead.from_config(config)\n \n if model_args.gen_model_name_or_path:\n gen_config = AutoConfig.from_pretrained(\n model_args.gen_model_name_or_path,\n num_labels=2,\n id2label = {0:\"0\", 1:\"1\"},\n label2id={\"0\":0, \"1\":1 },\n cache_dir=model_args.cache_dir,\n )\n gen_model = AutoModelForTokenClassification.from_pretrained(\n model_args.gen_model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.gen_model_name_or_path),\n config=gen_config,\n cache_dir=model_args.cache_dir,\n )\n gen_model.resize_token_embeddings(len(tokenizer))\n mask_generator = MaskGenerator(gen_model,training_args)\n \n if model_args.cls_model_name_or_path:\n cls_config = AutoConfig.from_pretrained(\n model_args.cls_model_name_or_path,\n num_labels=2,\n finetuning_task=\"cola\",\n cache_dir=model_args.cache_dir,\n )\n cls_model = AutoModelForSequenceClassification.from_pretrained(\n model_args.cls_model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.cls_model_name_or_path),\n config=cls_config,\n cache_dir=model_args.cache_dir,\n )\n cls_model.resize_token_embeddings(len(tokenizer))\n mask_selector = MaskSelector(cls_model,training_args)\n\n model.resize_token_embeddings(len(tokenizer))\n \n\n if config.model_type in [\"bert\", \"roberta\", \"distilbert\", \"camembert\"] and not data_args.mlm:\n raise ValueError(\n \"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the --mlm \"\n \"flag (masked language modeling).\"\n )\n\n if data_args.block_size <= 0:\n data_args.block_size = tokenizer.max_len\n # Our input block size will be the max possible for the model\n else:\n data_args.block_size = min(data_args.block_size, tokenizer.max_len)\n\n # Get datasets\n\n train_dataset = get_dataset(data_args, tokenizer=tokenizer, model_args=model_args) if training_args.do_train else None\n eval_dataset = get_dataset(data_args, model_args=None, tokenizer=tokenizer, evaluate=True) if training_args.do_eval else None\n\n if model_args.cls_model_name_or_path:\n data_collator = DataCollatorForCheckMaskGen(\n tokenizer=tokenizer,\n mlm=data_args.mlm,\n mlm_probability=data_args.mlm_probability,\n generator = mask_generator,\n selector=mask_selector\n\n )\n train_sampler = (\n RandomSampler(train_dataset)\n if training_args.local_rank == -1\n else DistributedSampler(train_dataset)\n )\n\n train_dataloader = DataLoader(\n train_dataset,\n batch_size=128,\n sampler = train_sampler,\n collate_fn = data_collator.collate_batch,\n drop_last = False,\n )\n all_scores = []\n\n epoch_iterator = tqdm(train_dataloader)\n for score in epoch_iterator:\n # print(score)\n all_scores.extend(score)\n \n print(\"avg tgt score:\")\n print(sum(all_scores)/ len(all_scores))\n exit()\n \n\n data_collator = DataCollatorForMaskGen(\n tokenizer=tokenizer, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability, generator=mask_generator\n )\n\n \n\n # Initialize our Trainer\n trainer = Trainer(\n model=model,\n args=training_args,\n data_collator=data_collator,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n prediction_loss_only=True,\n )\n\n # Training\n if training_args.do_train:\n model_path = (\n model_args.model_name_or_path\n if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)\n else None\n )\n trainer.train(model_path=model_path)\n trainer.save_model()\n # For convenience, we also re-save the tokenizer to the same directory,\n # so that you can share your model easily on huggingface.co/models =)\n if trainer.is_world_master():\n tokenizer.save_pretrained(training_args.output_dir)\n\n # Evaluation\n results = {}\n if training_args.do_eval:\n logger.info(\"*** Evaluate ***\")\n\n eval_output = trainer.evaluate()\n\n perplexity = math.exp(eval_output[\"eval_loss\"])\n result = {\"perplexity\": perplexity}\n\n output_eval_file = os.path.join(training_args.output_dir, \"eval_results_lm.txt\")\n if trainer.is_world_master():\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n results.update(result)\n\n return results\n\n\ndef _mp_fn(index):\n # For xla_spawn (TPUs)\n main()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.utils.data.dataloader.DataLoader", "torch.utils.data.sampler.RandomSampler", "torch.utils.data.distributed.DistributedSampler" ] ]
smoorjani/LA-Crime-Analysis
[ "26dfeae3a1b7e885a9bd1f78af2c5c5df5b7d071" ]
[ "geographic_mapping.py" ]
[ "import plotly\r\nimport plotly.plotly as py\r\nimport plotly.graph_objs as go\r\nimport pandas as pd\r\n\r\n# Plots each crime on a map of Los Angeles\r\n\r\nplotly.tools.set_credentials_file(username='smoorjani', api_key='ZNLECT9fuPKYvIqUfd1M')\r\n\r\nmapbox_access_token = 'pk.eyJ1Ijoic2FtcmFqbSIsImEiOiJjanFsNnpnMWEwaW83NDJxajE5a3VwYWk1In0.5ieoEoWVw4B_xcB8Mi68RA'\r\n\r\ndf = pd.read_csv('minimized.csv')\r\n\r\n#lat_lon = list(df['Location '])\r\n#df.dropna(subset=['Location '])\r\nimport random\r\n\r\nlat_lon = list(map(lambda x: x[1:-1].replace(',',''),list(map(str,list(df['Location '])))))\r\nlat_lon = list(filter(None, lat_lon))\r\nlat_lon = random.sample(lat_lon, 40000)\r\n#print(lat_lon[0])\r\nlats = [float(str(a)[:a.find(' ')]) for a in lat_lon]\r\n'''\r\ncounter = 0\r\nfor a in lat_lon:\r\n #print (counter)\r\n a = float(str(a)[:a.find(' ')])\r\n counter += 1\r\n'''\r\nlons = [float(str(a)[a.find(' ')+1:]) for a in lat_lon]\r\ncodes = list(df['Crime Code Description'])\r\n\r\n\r\ndata = [\r\n go.Scattermapbox(\r\n lat=lats,\r\n lon=lons,\r\n mode='markers',\r\n marker=dict(\r\n size=9\r\n ),\r\n text=codes,\r\n )\r\n]\r\n\r\nlayout = go.Layout(\r\n autosize=True,\r\n hovermode='closest',\r\n mapbox=dict(\r\n accesstoken=mapbox_access_token,\r\n bearing=0,\r\n center=dict(\r\n\r\n lat=33.9829,\r\n lon=-118.3338\r\n ),\r\n pitch=0,\r\n zoom=10\r\n ),\r\n)\r\n\r\nfig = dict(data=data, layout=layout)\r\npy.iplot(fig, filename='Multiple Mapbox')\r\n\r\n" ]
[ [ "pandas.read_csv" ] ]
khalillakhdhar/recommander_python
[ "1a29131dd91dc68b271882124abf271177921641" ]
[ "caserec/recommenders/item_recommendation/item_attribute_knn.py" ]
[ "# coding=utf-8\n\"\"\"\"\n Item Based Collaborative Filtering Recommender with Attributes (Item Attribute KNN)\n [Item Recommendation (Ranking)]\n\n Its philosophy is as follows: in order to determine the rating of User u on item m, we can find other movies that\n are similar to item m, and based on User u’s ratings on those similar movies we infer his rating on item m.\n However, instead of traditional ItemKNN, this approach uses a metadata or pre-computed similarity matrix.\n\n\"\"\"\n\n# © 2019. Case Recommender (MIT License)\n\nfrom collections import defaultdict\nimport numpy as np\n\nfrom caserec.recommenders.item_recommendation.itemknn import ItemKNN\nfrom caserec.utils.process_data import ReadFile\n\n__author__ = 'Arthur Fortes <[email protected]>'\n\n\nclass ItemAttributeKNN(ItemKNN):\n def __init__(self, train_file=None, test_file=None, output_file=None, metadata_file=None, similarity_file=None,\n k_neighbors=30, rank_length=10, as_binary=False, as_similar_first=True, metadata_as_binary=False,\n metadata_similarity_sep='\\t', similarity_metric=\"cosine\", sep='\\t', output_sep='\\t'):\n \"\"\"\n Item Attribute KNN for Item Recommendation\n\n This algorithm predicts a rank for each user based on the similar items that he/her consumed,\n using a metadata or similarity pre-computed file\n\n Usage::\n\n >> ItemAttributeKNN(train, test, similarity_file=sim_matrix, as_similar_first=True).compute()\n >> ItemAttributeKNN(train, test, metadata_file=metadata, as_similar_first=True).compute()\n\n :param train_file: File which contains the train set. This file needs to have at least 3 columns\n (user item feedback_value).\n :type train_file: str\n\n :param test_file: File which contains the test set. This file needs to have at least 3 columns\n (user item feedback_value).\n :type test_file: str, default None\n\n :param output_file: File with dir to write the final predictions\n :type output_file: str, default None\n\n :param metadata_file: File which contains the metadata set. This file needs to have at least 2 columns\n (item metadata).\n :type metadata_file: str, default None\n\n :param similarity_file: File which contains the similarity set. This file needs to have at least 3 columns\n (item item similarity).\n :type similarity_file: str, default None\n\n :param k_neighbors: Number of neighbors to use. If None, k_neighbor = int(sqrt(n_users))\n :type k_neighbors: int, default None\n\n :param rank_length: Size of the rank that must be generated by the predictions of the recommender algorithm\n :type rank_length: int, default 10\n\n :param as_binary: If True, the explicit feedback will be transform to binary\n :type as_binary: bool, default False\n\n :param as_similar_first: If True, for each unknown item, which will be predicted, we first look for its k\n most similar users and then take the intersection with the users that\n seen that item.\n :type as_similar_first: bool, default True\n\n :param metadata_as_binary: f True, the explicit value will be transform to binary\n :type metadata_as_binary: bool, default False\n\n :param metadata_similarity_sep: Delimiter for similarity or metadata file\n :type metadata_similarity_sep: str, default '\\t'\n\n :param similarity_metric: Pairwise metric to compute the similarity between the items. Reference about\n distances: http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.distance.pdist.html\n :type similarity_metric: str, default cosine\n\n :param sep: Delimiter for input files file\n :type sep: str, default '\\t'\n\n :param output_sep: Delimiter for output file\n :type output_sep: str, default '\\t'\n \"\"\"\n super(ItemAttributeKNN, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file,\n k_neighbors=k_neighbors, rank_length=rank_length, as_binary=as_binary,\n as_similar_first=as_similar_first, similarity_metric=similarity_metric,\n sep=sep, output_sep=output_sep)\n\n self.recommender_name = 'Item Attribute KNN Algorithm'\n\n self.metadata_file = metadata_file\n self.similarity_file = similarity_file\n self.metadata_as_binary = metadata_as_binary\n self.metadata_similarity_sep = metadata_similarity_sep\n\n def init_model(self):\n \"\"\"\n Method to fit the model. Create and calculate a similarity matrix by metadata file or a pre-computed similarity\n matrix\n\n \"\"\"\n\n self.similar_items = defaultdict(list)\n\n # Set the value for k\n if self.k_neighbors is None:\n self.k_neighbors = int(np.sqrt(len(self.items)))\n\n if self.metadata_file is not None:\n metadata = ReadFile(self.metadata_file, sep=self.metadata_similarity_sep, as_binary=self.metadata_as_binary\n ).read_metadata_or_similarity()\n\n self.matrix = np.zeros((len(self.items), len(metadata['col_2'])))\n\n meta_to_meta_id = {}\n for m, data in enumerate(metadata['col_2']):\n meta_to_meta_id[data] = m\n\n for item in metadata['col_1']:\n for m in metadata['dict'][item]:\n self.matrix[self.item_to_item_id[item], meta_to_meta_id[m]] = metadata['dict'][item][m]\n\n # create header info for metadata\n sparsity = (1 - (metadata['number_interactions'] / (len(metadata['col_1']) * len(metadata['col_2'])))) * 100\n\n self.extra_info_header = \">> metadata:: %d items and %d metadata (%d interactions) | sparsity:: %.2f%%\" % \\\n (len(metadata['col_1']), len(metadata['col_2']), metadata['number_interactions'],\n sparsity)\n\n # Create similarity matrix based on metadata or similarity file. Transpose=False, because it is an\n # item x metadata matrix\n self.si_matrix = self.compute_similarity(transpose=False)\n\n elif self.similarity_file is not None:\n similarity = ReadFile(self.similarity_file, sep=self.metadata_similarity_sep, as_binary=False\n ).read_metadata_or_similarity()\n\n self.si_matrix = np.zeros((len(self.items), len(self.items)))\n\n # Fill similarity matrix\n for i in similarity['col_1']:\n for i_j in similarity['dict'][i]:\n self.si_matrix[self.item_to_item_id[i], self.item_to_item_id[int(i_j)]] = similarity['dict'][i][i_j]\n\n # Remove NaNs\n self.si_matrix[np.isnan(self.si_matrix)] = 0.0\n\n else:\n raise ValueError(\"This algorithm needs a similarity matrix or a metadata file!\")\n\n # Create original matrix user x item for prediction process\n self.create_matrix()\n\n for i_id, item in enumerate(self.items):\n self.similar_items[i_id] = sorted(range(len(self.si_matrix[i_id])),\n key=lambda k: -self.si_matrix[i_id][k])[1:self.k_neighbors + 1]\n" ]
[ [ "numpy.isnan" ] ]
bartongroup/Simpson_Barton_Nanopore_1
[ "1b509454a9e25a8c81be5092f8e525ca00e7b5a5" ]
[ "pipeline/chimera_pipeline/scripts/chimerID/chimerID/logodds.py" ]
[ "import itertools as it\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nfrom statsmodels.stats.multitest import multipletests\n\n\ndef read_all_chimeric_counts(h5_fns, sample_names, normalise=True):\n chimeric_counts = {}\n all_gene_non_chimeric_counts = {}\n for sample, h5_fn in zip(sample_names, h5_fns):\n chimeric_counts[sample] = pd.read_hdf(h5_fn, key='chimera_counts')\n all_gene_non_chimeric_counts[sample] = pd.read_hdf(h5_fn, key='non_chimeric_counts')\n norm_factors = pd.read_hdf(h5_fn, key='norm_factors')\n if normalise:\n chimeric_counts[sample] /= norm_factors\n all_gene_non_chimeric_counts[sample] /= norm_factors\n chimeric_counts = pd.concat(\n chimeric_counts, axis=1,\n sort=True, names=['sample', 'boot'])\n all_gene_non_chimeric_counts = pd.concat(\n all_gene_non_chimeric_counts, axis=1,\n sort=True, names=['sample', 'boot'])\n downstream_genes = {(chimera, strand): downstream for\n chimera, downstream, strand\n in chimeric_counts.index}\n chimeric_counts = chimeric_counts.groupby(level=(0, 2), axis=0).sum()\n non_chimeric_counts = all_gene_non_chimeric_counts.loc[chimeric_counts.index].copy()\n counts = pd.concat(\n {'chimeric': chimeric_counts,\n 'nonchimeric': non_chimeric_counts},\n axis=1, sort=True, names=['readtype', 'sample', 'boot'],\n ).reorder_levels(['sample', 'readtype', 'boot'], axis=1).fillna(0)\n return counts, downstream_genes\n\n\ndef get_bootstrap_stats(bootstraps, cond_a, cond_b):\n bootstraps = bootstraps.copy() + 0.5\n cond_a_ratio = (\n bootstraps.loc[:, (cond_a, 'chimeric', pd.IndexSlice[:])].values /\n bootstraps.loc[:, (cond_a, 'nonchimeric', pd.IndexSlice[:])].values\n )\n cond_b_ratio = (\n bootstraps.loc[:, (cond_b, 'chimeric', pd.IndexSlice[:])].values /\n bootstraps.loc[:, (cond_b, 'nonchimeric', pd.IndexSlice[:])].values\n )\n ks_stat = []\n ks_p_val = []\n for i in range(len(bootstraps)):\n ks, p_val = stats.ks_2samp(cond_a_ratio[i], cond_b_ratio[i])\n ks_stat.append(ks)\n ks_p_val.append(p_val)\n ks_stat = np.array(ks_stat)\n ks_p_val = np.array(ks_p_val)\n n_boots = len(bootstraps.columns.unique(level=2))\n boot_lr = {}\n for n, (i, j) in enumerate(it.product(range(n_boots), repeat=2)):\n cond_a_data = bootstraps.loc[:, (cond_a, pd.IndexSlice[:], i)].copy()\n cond_a_data.columns = cond_a_data.columns.droplevel(0)\n cond_b_data = bootstraps.loc[:, (cond_b, pd.IndexSlice[:], j)].copy()\n cond_b_data.columns = cond_b_data.columns.droplevel(0)\n r = ((cond_a_data['chimeric'].values / cond_a_data['nonchimeric'].values) /\n (cond_b_data['chimeric'].values / cond_b_data['nonchimeric'].values))\n boot_lr[n] = np.log2(r).ravel()\n boot_lr = pd.DataFrame.from_dict(boot_lr)\n boot_lr.index = bootstraps.index\n boot_lr_res = boot_lr.quantile([0.5, 0.025, 0.975], axis=1).T\n boot_lr_res.columns = ['logodds_median', 'logodds_lower_ci95', 'logodds_upper_ci95']\n boot_lr_res['logodds_mean'] = boot_lr.mean(axis=1)\n boot_lr_res['ks_stat'] = ks_stat\n boot_lr_res['ks_p_val'] = ks_p_val\n _, boot_lr_res['ks_fdr'], *_ = multipletests(boot_lr_res.ks_p_val, method='bonferroni')\n return boot_lr_res\n\n\ndef generate_bootstrapped_logodds(h5_fns, cond_a_sample_name, cond_b_sample_name):\n counts, downstream_genes = read_all_chimeric_counts(\n h5_fns, [cond_a_sample_name, cond_b_sample_name], normalise=False)\n median_counts = counts.groupby(level=['sample', 'readtype'], axis=1).median()\n median_counts = counts.groupby(level=['sample', 'readtype'], axis=1).median()\n median_counts.columns = (median_counts.columns.get_level_values(0) + '_' +\n median_counts.columns.get_level_values(1))\n logodds_ratios = get_bootstrap_stats(\n counts, cond_a_sample_name, cond_b_sample_name)\n logodds_ratios['downstream_genes'] = pd.Series(downstream_genes)\n logodds_ratios = logodds_ratios.join(median_counts)\n return logodds_ratios" ]
[ [ "pandas.read_hdf", "pandas.concat", "scipy.stats.ks_2samp", "pandas.Series", "numpy.log2", "pandas.DataFrame.from_dict", "numpy.array" ] ]
10imaging/Open3D
[ "057c1e1c74f58042e6c0da3368eddc6701c53613" ]
[ "examples/Python/Basic/transformation.py" ]
[ "# Open3D: www.open3d.org\n# The MIT License (MIT)\n# See license file or visit www.open3d.org for details\n\n# examples/Python/Utility/transformation.py\n\nimport numpy as np\nimport open3d as o3d\nimport time\n\n\ndef geometry_generator():\n mesh = o3d.geometry.TriangleMesh.create_sphere()\n verts = np.asarray(mesh.vertices)\n colors = np.random.uniform(0, 1, size=verts.shape)\n mesh.vertex_colors = o3d.utility.Vector3dVector(colors)\n mesh.compute_vertex_normals()\n\n pcl = o3d.geometry.PointCloud()\n pcl.points = mesh.vertices\n pcl.colors = mesh.vertex_colors\n pcl.normals = mesh.vertex_normals\n yield pcl\n\n yield o3d.geometry.LineSet.create_from_triangle_mesh(mesh)\n\n yield mesh\n\n\ndef animate(geom):\n vis = o3d.visualization.Visualizer()\n vis.create_window()\n\n geom.rotate(geom.get_rotation_matrix_from_xyz((0.75, 0.5, 0)))\n vis.add_geometry(geom)\n\n scales = [0.9 for _ in range(30)] + [1 / 0.9 for _ in range(30)]\n axisangles = [(0.2 / np.sqrt(2), 0.2 / np.sqrt(2), 0) for _ in range(60)]\n ts = [(0.1, 0.1, -0.1) for _ in range(30)\n ] + [(-0.1, -0.1, 0.1) for _ in range(30)]\n\n for scale, aa in zip(scales, axisangles):\n R = geom.get_rotation_matrix_from_axis_angle(aa)\n geom.scale(scale).rotate(R, center=False)\n vis.update_geometry()\n vis.poll_events()\n vis.update_renderer()\n time.sleep(0.05)\n\n for t in ts:\n geom.translate(t)\n vis.update_geometry()\n vis.poll_events()\n vis.update_renderer()\n time.sleep(0.05)\n\n for scale, aa, t in zip(scales, axisangles, ts):\n R = geom.get_rotation_matrix_from_axis_angle(aa)\n geom.scale(scale).translate(t).rotate(R, center=True)\n vis.update_geometry()\n vis.poll_events()\n vis.update_renderer()\n time.sleep(0.05)\n\n\nif __name__ == \"__main__\":\n for geom in geometry_generator():\n animate(geom)\n" ]
[ [ "numpy.asarray", "numpy.random.uniform", "numpy.sqrt" ] ]
jorgesantos/Detectron2
[ "3bdf3ab4a4626985b3581da0a5b9e8c534b56980" ]
[ "detectron2/utils/visualizer.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport colorsys\nimport logging\nimport math\nimport numpy as np\nfrom enum import Enum, unique\nimport cv2\nimport matplotlib as mpl\nimport matplotlib.colors as mplc\nimport matplotlib.figure as mplfigure\nimport pycocotools.mask as mask_util\nimport torch\nfrom fvcore.common.file_io import PathManager\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\nfrom PIL import Image\n\nfrom detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes\n\nfrom .colormap import random_color\n\nlogger = logging.getLogger(__name__)\n\n__all__ = [\"ColorMode\", \"VisImage\", \"Visualizer\"]\n\n\n_SMALL_OBJECT_AREA_THRESH = 1000\n_LARGE_MASK_AREA_THRESH = 120000\n_OFF_WHITE = (1.0, 1.0, 240.0 / 255)\n_BLACK = (0, 0, 0)\n_RED = (1.0, 0, 0)\n\n_KEYPOINT_THRESHOLD = 0.05\n\n\n@unique\nclass ColorMode(Enum):\n \"\"\"\n Enum of different color modes to use for instance visualizations.\n \"\"\"\n\n IMAGE = 0\n \"\"\"\n Picks a random color for every instance and overlay segmentations with low opacity.\n \"\"\"\n SEGMENTATION = 1\n \"\"\"\n Let instances of the same category have similar colors\n (from metadata.thing_colors), and overlay them with\n high opacity. This provides more attention on the quality of segmentation.\n \"\"\"\n IMAGE_BW = 2\n \"\"\"\n Same as IMAGE, but convert all areas without masks to gray-scale.\n Only available for drawing per-instance mask predictions.\n \"\"\"\n\n\nclass GenericMask:\n \"\"\"\n Attribute:\n polygons (list[ndarray]): list[ndarray]: polygons for this mask.\n Each ndarray has format [x, y, x, y, ...]\n mask (ndarray): a binary mask\n \"\"\"\n\n def __init__(self, mask_or_polygons, height, width):\n self._mask = self._polygons = self._has_holes = None\n self.height = height\n self.width = width\n\n m = mask_or_polygons\n if isinstance(m, dict):\n # RLEs\n assert \"counts\" in m and \"size\" in m\n if isinstance(m[\"counts\"], list): # uncompressed RLEs\n h, w = m[\"size\"]\n assert h == height and w == width\n m = mask_util.frPyObjects(m, h, w)\n self._mask = mask_util.decode(m)[:, :]\n return\n\n if isinstance(m, list): # list[ndarray]\n self._polygons = [np.asarray(x).reshape(-1) for x in m]\n return\n\n if isinstance(m, np.ndarray): # assumed to be a binary mask\n assert m.shape[1] != 2, m.shape\n assert m.shape == (height, width), m.shape\n self._mask = m.astype(\"uint8\")\n return\n\n raise ValueError(\"GenericMask cannot handle object {} of type '{}'\".format(m, type(m)))\n\n @property\n def mask(self):\n if self._mask is None:\n self._mask = self.polygons_to_mask(self._polygons)\n return self._mask\n\n @property\n def polygons(self):\n if self._polygons is None:\n self._polygons, self._has_holes = self.mask_to_polygons(self._mask)\n return self._polygons\n\n @property\n def has_holes(self):\n if self._has_holes is None:\n if self._mask is not None:\n self._polygons, self._has_holes = self.mask_to_polygons(self._mask)\n else:\n self._has_holes = False # if original format is polygon, does not have holes\n return self._has_holes\n\n def mask_to_polygons(self, mask):\n # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level\n # hierarchy. External contours (boundary) of the object are placed in hierarchy-1.\n # Internal contours (holes) are placed in hierarchy-2.\n # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours.\n mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr\n res = cv2.findContours(mask.astype(\"uint8\"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n hierarchy = res[-1]\n if hierarchy is None: # empty mask\n return [], False\n has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0\n res = res[-2]\n res = [x.flatten() for x in res]\n res = [x for x in res if len(x) >= 6]\n return res, has_holes\n\n def polygons_to_mask(self, polygons):\n rle = mask_util.frPyObjects(polygons, self.height, self.width)\n rle = mask_util.merge(rle)\n return mask_util.decode(rle)[:, :]\n\n def area(self):\n return self.mask.sum()\n\n def bbox(self):\n p = mask_util.frPyObjects(self.polygons, self.height, self.width)\n p = mask_util.merge(p)\n bbox = mask_util.toBbox(p)\n bbox[2] += bbox[0]\n bbox[3] += bbox[1]\n return bbox\n\n\nclass _PanopticPrediction:\n def __init__(self, panoptic_seg, segments_info):\n self._seg = panoptic_seg\n\n self._sinfo = {s[\"id\"]: s for s in segments_info} # seg id -> seg info\n segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True)\n areas = areas.numpy()\n sorted_idxs = np.argsort(-areas)\n self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs]\n self._seg_ids = self._seg_ids.tolist()\n for sid, area in zip(self._seg_ids, self._seg_areas):\n if sid in self._sinfo:\n self._sinfo[sid][\"area\"] = float(area)\n\n def non_empty_mask(self):\n \"\"\"\n Returns:\n (H, W) array, a mask for all pixels that have a prediction\n \"\"\"\n empty_ids = []\n for id in self._seg_ids:\n if id not in self._sinfo:\n empty_ids.append(id)\n if len(empty_ids) == 0:\n return np.zeros(self._seg.shape, dtype=np.uint8)\n assert (\n len(empty_ids) == 1\n ), \">1 ids corresponds to no labels. This is currently not supported\"\n return (self._seg != empty_ids[0]).numpy().astype(np.bool)\n\n def semantic_masks(self):\n for sid in self._seg_ids:\n sinfo = self._sinfo.get(sid)\n if sinfo is None or sinfo[\"isthing\"]:\n # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions.\n continue\n yield (self._seg == sid).numpy().astype(np.bool), sinfo\n\n def instance_masks(self):\n for sid in self._seg_ids:\n sinfo = self._sinfo.get(sid)\n if sinfo is None or not sinfo[\"isthing\"]:\n continue\n mask = (self._seg == sid).numpy().astype(np.bool)\n if mask.sum() > 0:\n yield mask, sinfo\n\n\ndef _create_text_labels(classes, scores, class_names):\n \"\"\"\n Args:\n classes (list[int] or None):\n scores (list[float] or None):\n class_names (list[str] or None):\n\n Returns:\n list[str] or None\n \"\"\"\n labels = None\n if classes is not None and class_names is not None and len(class_names) > 1:\n labels = [class_names[i] for i in classes]\n if scores is not None:\n if labels is None:\n labels = [\"{:.0f}%\".format(s * 100) for s in scores]\n else:\n labels = [\"{} {:.0f}%\".format(l, s * 100) for l, s in zip(labels, scores)]\n return labels\n\n\nclass VisImage:\n def __init__(self, img, scale=1.0):\n \"\"\"\n Args:\n img (ndarray): an RGB image of shape (H, W, 3).\n scale (float): scale the input image\n \"\"\"\n self.img = img\n self.scale = scale\n self.width, self.height = img.shape[1], img.shape[0]\n self._setup_figure(img)\n\n def _setup_figure(self, img):\n \"\"\"\n Args:\n Same as in :meth:`__init__()`.\n\n Returns:\n fig (matplotlib.pyplot.figure): top level container for all the image plot elements.\n ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system.\n \"\"\"\n fig = mplfigure.Figure(frameon=False)\n self.dpi = fig.get_dpi()\n # add a small 1e-2 to avoid precision lost due to matplotlib's truncation\n # (https://github.com/matplotlib/matplotlib/issues/15363)\n fig.set_size_inches(\n (self.width * self.scale + 1e-2) / self.dpi,\n (self.height * self.scale + 1e-2) / self.dpi,\n )\n self.canvas = FigureCanvasAgg(fig)\n # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig)\n ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])\n ax.axis(\"off\")\n ax.set_xlim(0.0, self.width)\n ax.set_ylim(self.height)\n\n self.fig = fig\n self.ax = ax\n\n def save(self, filepath):\n \"\"\"\n Args:\n filepath (str): a string that contains the absolute path, including the file name, where\n the visualized image will be saved.\n \"\"\"\n if filepath.lower().endswith(\".jpg\") or filepath.lower().endswith(\".png\"):\n # faster than matplotlib's imshow\n cv2.imwrite(filepath, self.get_image()[:, :, ::-1])\n else:\n # support general formats (e.g. pdf)\n self.ax.imshow(self.img, interpolation=\"nearest\")\n self.fig.savefig(filepath)\n\n def get_image(self):\n \"\"\"\n Returns:\n ndarray:\n the visualized image of shape (H, W, 3) (RGB) in uint8 type.\n The shape is scaled w.r.t the input image using the given `scale` argument.\n \"\"\"\n canvas = self.canvas\n s, (width, height) = canvas.print_to_buffer()\n if (self.width, self.height) != (width, height):\n img = cv2.resize(self.img, (width, height))\n else:\n img = self.img\n\n # buf = io.BytesIO() # works for cairo backend\n # canvas.print_rgba(buf)\n # width, height = self.width, self.height\n # s = buf.getvalue()\n\n buffer = np.frombuffer(s, dtype=\"uint8\")\n\n # imshow is slow. blend manually (still quite slow)\n img_rgba = buffer.reshape(height, width, 4)\n rgb, alpha = np.split(img_rgba, [3], axis=2)\n\n try:\n import numexpr as ne # fuse them with numexpr\n\n visualized_image = ne.evaluate(\"img * (1 - alpha / 255.0) + rgb * (alpha / 255.0)\")\n except ImportError:\n alpha = alpha.astype(\"float32\") / 255.0\n visualized_image = img * (1 - alpha) + rgb * alpha\n\n visualized_image = visualized_image.astype(\"uint8\")\n\n return visualized_image\n\n\nclass Visualizer:\n def __init__(self, img_rgb, metadata, scale=1.0, instance_mode=ColorMode.IMAGE):\n \"\"\"\n Args:\n img_rgb: a numpy array of shape (H, W, C), where H and W correspond to\n the height and width of the image respectively. C is the number of\n color channels. The image is required to be in RGB format since that\n is a requirement of the Matplotlib library. The image is also expected\n to be in the range [0, 255].\n metadata (MetadataCatalog): image metadata.\n \"\"\"\n self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)\n self.metadata = metadata\n self.output = VisImage(self.img, scale=scale)\n self.cpu_device = torch.device(\"cpu\")\n\n # too small texts are useless, therefore clamp to 9\n self._default_font_size = max(\n np.sqrt(self.output.height * self.output.width) // 90, 10 // scale\n )\n self._instance_mode = instance_mode\n\n def draw_instance_predictions(self, predictions):\n \"\"\"\n Draw instance-level prediction results on an image.\n\n Args:\n predictions (Instances): the output of an instance detection/segmentation\n model. Following fields will be used to draw:\n \"pred_boxes\", \"pred_classes\", \"scores\", \"pred_masks\" (or \"pred_masks_rle\").\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n boxes = predictions.pred_boxes if predictions.has(\"pred_boxes\") else None\n scores = predictions.scores if predictions.has(\"scores\") else None\n classes = predictions.pred_classes if predictions.has(\"pred_classes\") else None\n labels = _create_text_labels(classes, scores, self.metadata.get(\"thing_classes\", None))\n keypoints = predictions.pred_keypoints if predictions.has(\"pred_keypoints\") else None\n\n if predictions.has(\"pred_masks\"):\n masks = np.asarray(predictions.pred_masks)\n masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]\n else:\n masks = None\n\n if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get(\"thing_colors\"):\n colors = [\n self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes\n ]\n alpha = 0.8\n else:\n colors = None\n alpha = 0.5\n\n if self._instance_mode == ColorMode.IMAGE_BW:\n self.output.img = self._create_grayscale_image(\n (predictions.pred_masks.any(dim=0) > 0).numpy()\n )\n alpha = 0.3\n\n self.overlay_instances(\n masks=masks,\n boxes=boxes,\n labels=labels,\n keypoints=keypoints,\n assigned_colors=colors,\n alpha=alpha,\n )\n return self.output\n\n def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8):\n \"\"\"\n Draw semantic segmentation predictions/labels.\n\n Args:\n sem_seg (Tensor or ndarray): the segmentation of shape (H, W).\n Each value is the integer label of the pixel.\n area_threshold (int): segments with less than `area_threshold` are not drawn.\n alpha (float): the larger it is, the more opaque the segmentations are.\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n if isinstance(sem_seg, torch.Tensor):\n sem_seg = sem_seg.numpy()\n labels, areas = np.unique(sem_seg, return_counts=True)\n sorted_idxs = np.argsort(-areas).tolist()\n labels = labels[sorted_idxs]\n for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels):\n try:\n mask_color = [x / 255 for x in self.metadata.stuff_colors[label]]\n except (AttributeError, IndexError):\n mask_color = None\n\n binary_mask = (sem_seg == label).astype(np.uint8)\n text = self.metadata.stuff_classes[label]\n self.draw_binary_mask(\n binary_mask,\n color=mask_color,\n edge_color=_OFF_WHITE,\n text=text,\n alpha=alpha,\n area_threshold=area_threshold,\n )\n return self.output\n\n def draw_panoptic_seg_predictions(\n self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7\n ):\n \"\"\"\n Draw panoptic prediction results on an image.\n\n Args:\n panoptic_seg (Tensor): of shape (height, width) where the values are ids for each\n segment.\n segments_info (list[dict]): Describe each segment in `panoptic_seg`.\n Each dict contains keys \"id\", \"category_id\", \"isthing\".\n area_threshold (int): stuff segments with less than `area_threshold` are not drawn.\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n pred = _PanopticPrediction(panoptic_seg, segments_info)\n\n if self._instance_mode == ColorMode.IMAGE_BW:\n self.output.img = self._create_grayscale_image(pred.non_empty_mask())\n\n # draw mask for all semantic segments first i.e. \"stuff\"\n for mask, sinfo in pred.semantic_masks():\n category_idx = sinfo[\"category_id\"]\n try:\n mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]]\n except AttributeError:\n mask_color = None\n\n text = self.metadata.stuff_classes[category_idx]\n self.draw_binary_mask(\n mask,\n color=mask_color,\n edge_color=_OFF_WHITE,\n text=text,\n alpha=alpha,\n area_threshold=area_threshold,\n )\n\n # draw mask for all instances second\n all_instances = list(pred.instance_masks())\n if len(all_instances) == 0:\n return self.output\n masks, sinfo = list(zip(*all_instances))\n category_ids = [x[\"category_id\"] for x in sinfo]\n\n try:\n scores = [x[\"score\"] for x in sinfo]\n except KeyError:\n scores = None\n labels = _create_text_labels(category_ids, scores, self.metadata.thing_classes)\n\n try:\n colors = [random_color(rgb=True, maximum=1) for k in category_ids]\n except AttributeError:\n colors = None\n self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha)\n\n return self.output\n\n def draw_dataset_dict(self, dic):\n \"\"\"\n Draw annotations/segmentaions in Detectron2 Dataset format.\n\n Args:\n dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format.\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n annos = dic.get(\"annotations\", None)\n if annos:\n if \"segmentation\" in annos[0]:\n masks = [x[\"segmentation\"] for x in annos]\n else:\n masks = None\n if \"keypoints\" in annos[0]:\n keypts = [x[\"keypoints\"] for x in annos]\n keypts = np.array(keypts).reshape(len(annos), -1, 3)\n else:\n keypts = None\n\n boxes = [BoxMode.convert(x[\"bbox\"], x[\"bbox_mode\"], BoxMode.XYXY_ABS) for x in annos]\n\n labels = [x[\"category_id\"] for x in annos]\n colors = None\n if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get(\"thing_colors\"):\n colors = [\n self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in labels\n ]\n names = self.metadata.get(\"thing_classes\", None)\n if names:\n labels = [names[i] for i in labels]\n labels = [\n \"{}\".format(i) + (\"|crowd\" if a.get(\"iscrowd\", 0) else \"\")\n for i, a in zip(labels, annos)\n ]\n self.overlay_instances(\n labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors\n )\n\n sem_seg = dic.get(\"sem_seg\", None)\n if sem_seg is None and \"sem_seg_file_name\" in dic:\n with PathManager.open(dic[\"sem_seg_file_name\"], \"rb\") as f:\n sem_seg = Image.open(f)\n sem_seg = np.asarray(sem_seg, dtype=\"uint8\")\n if sem_seg is not None:\n self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5)\n return self.output\n\n def overlay_instances(\n self,\n *,\n boxes=None,\n labels=None,\n masks=None,\n keypoints=None,\n assigned_colors=None,\n alpha=0.5\n ):\n \"\"\"\n Args:\n boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,\n or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,\n or a :class:`RotatedBoxes`,\n or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format\n for the N objects in a single image,\n labels (list[str]): the text to be displayed for each instance.\n masks (masks-like object): Supported types are:\n\n * :class:`detectron2.structures.PolygonMasks`,\n :class:`detectron2.structures.BitMasks`.\n * list[list[ndarray]]: contains the segmentation masks for all objects in one image.\n The first level of the list corresponds to individual instances. The second\n level to all the polygon that compose the instance, and the third level\n to the polygon coordinates. The third level should have the format of\n [x0, y0, x1, y1, ..., xn, yn] (n >= 3).\n * list[ndarray]: each ndarray is a binary mask of shape (H, W).\n * list[dict]: each dict is a COCO-style RLE.\n keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),\n where the N is the number of instances and K is the number of keypoints.\n The last dimension corresponds to (x, y, visibility or score).\n assigned_colors (list[matplotlib.colors]): a list of colors, where each color\n corresponds to each mask or box in the image. Refer to 'matplotlib.colors'\n for full list of formats that the colors are accepted in.\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n num_instances = None\n if boxes is not None:\n boxes = self._convert_boxes(boxes)\n num_instances = len(boxes)\n if masks is not None:\n masks = self._convert_masks(masks)\n if num_instances:\n assert len(masks) == num_instances\n else:\n num_instances = len(masks)\n if keypoints is not None:\n if num_instances:\n assert len(keypoints) == num_instances\n else:\n num_instances = len(keypoints)\n keypoints = self._convert_keypoints(keypoints)\n if labels is not None:\n assert len(labels) == num_instances\n if assigned_colors is None:\n assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]\n if num_instances == 0:\n return self.output\n if boxes is not None and boxes.shape[1] == 5:\n return self.overlay_rotated_instances(\n boxes=boxes, labels=labels, assigned_colors=assigned_colors\n )\n\n # Display in largest to smallest order to reduce occlusion.\n areas = None\n if boxes is not None:\n areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)\n elif masks is not None:\n areas = np.asarray([x.area() for x in masks])\n\n if areas is not None:\n sorted_idxs = np.argsort(-areas).tolist()\n # Re-order overlapped instances in descending order.\n boxes = boxes[sorted_idxs] if boxes is not None else None\n labels = [labels[k] for k in sorted_idxs] if labels is not None else None\n masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None\n assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]\n keypoints = keypoints[sorted_idxs] if keypoints is not None else None\n\n for i in range(num_instances):\n color = assigned_colors[i]\n if boxes is not None:\n self.draw_box(boxes[i], edge_color=color)\n\n if masks is not None:\n for segment in masks[i].polygons:\n self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)\n\n if labels is not None:\n # first get a box\n if boxes is not None:\n x0, y0, x1, y1 = boxes[i]\n text_pos = (x0, y0) # if drawing boxes, put text on the box corner.\n horiz_align = \"left\"\n elif masks is not None:\n x0, y0, x1, y1 = masks[i].bbox()\n\n # draw text in the center (defined by median) when box is not drawn\n # median is less sensitive to outliers.\n text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]\n horiz_align = \"center\"\n else:\n continue # drawing the box confidence for keypoints isn't very useful.\n # for small objects, draw text at the side to avoid occlusion\n instance_area = (y1 - y0) * (x1 - x0)\n if (\n instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale\n or y1 - y0 < 40 * self.output.scale\n ):\n if y1 >= self.output.height - 5:\n text_pos = (x1, y0)\n else:\n text_pos = (x0, y1)\n\n height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)\n lighter_color = self._change_color_brightness(color, brightness_factor=0.7)\n font_size = (\n np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)\n * 0.5\n * self._default_font_size\n )\n self.draw_text(\n labels[i],\n text_pos,\n color=lighter_color,\n horizontal_alignment=horiz_align,\n font_size=font_size,\n )\n\n # draw keypoints\n if keypoints is not None:\n for keypoints_per_instance in keypoints:\n self.draw_and_connect_keypoints(keypoints_per_instance)\n\n return self.output\n\n def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None):\n \"\"\"\n Args:\n boxes (ndarray): an Nx5 numpy array of\n (x_center, y_center, width, height, angle_degrees) format\n for the N objects in a single image.\n labels (list[str]): the text to be displayed for each instance.\n assigned_colors (list[matplotlib.colors]): a list of colors, where each color\n corresponds to each mask or box in the image. Refer to 'matplotlib.colors'\n for full list of formats that the colors are accepted in.\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n\n num_instances = len(boxes)\n\n if assigned_colors is None:\n assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]\n if num_instances == 0:\n return self.output\n\n # Display in largest to smallest order to reduce occlusion.\n if boxes is not None:\n areas = boxes[:, 2] * boxes[:, 3]\n\n sorted_idxs = np.argsort(-areas).tolist()\n # Re-order overlapped instances in descending order.\n boxes = boxes[sorted_idxs]\n labels = [labels[k] for k in sorted_idxs] if labels is not None else None\n colors = [assigned_colors[idx] for idx in sorted_idxs]\n\n for i in range(num_instances):\n self.draw_rotated_box_with_label(\n boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None\n )\n\n return self.output\n\n def draw_and_connect_keypoints(self, keypoints):\n \"\"\"\n Draws keypoints of an instance and follows the rules for keypoint connections\n to draw lines between appropriate keypoints. This follows color heuristics for\n line color.\n\n Args:\n keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints\n and the last dimension corresponds to (x, y, probability).\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n visible = {}\n keypoint_names = self.metadata.get(\"keypoint_names\")\n for idx, keypoint in enumerate(keypoints):\n # draw keypoint\n x, y, prob = keypoint\n if prob > _KEYPOINT_THRESHOLD:\n self.draw_circle((x, y), color=_RED)\n if keypoint_names:\n keypoint_name = keypoint_names[idx]\n visible[keypoint_name] = (x, y)\n\n if self.metadata.get(\"keypoint_connection_rules\"):\n for kp0, kp1, color in self.metadata.keypoint_connection_rules:\n if kp0 in visible and kp1 in visible:\n x0, y0 = visible[kp0]\n x1, y1 = visible[kp1]\n color = tuple(x / 255.0 for x in color)\n self.draw_line([x0, x1], [y0, y1], color=color)\n\n # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip\n # Note that this strategy is specific to person keypoints.\n # For other keypoints, it should just do nothing\n try:\n ls_x, ls_y = visible[\"left_shoulder\"]\n rs_x, rs_y = visible[\"right_shoulder\"]\n mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2\n except KeyError:\n pass\n else:\n # draw line from nose to mid-shoulder\n nose_x, nose_y = visible.get(\"nose\", (None, None))\n if nose_x is not None:\n self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED)\n\n try:\n # draw line from mid-shoulder to mid-hip\n lh_x, lh_y = visible[\"left_hip\"]\n rh_x, rh_y = visible[\"right_hip\"]\n except KeyError:\n pass\n else:\n mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2\n self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED)\n return self.output\n\n \"\"\"\n Primitive drawing functions:\n \"\"\"\n\n def draw_text(\n self,\n text,\n position,\n *,\n font_size=None,\n color=\"g\",\n horizontal_alignment=\"center\",\n rotation=0\n ):\n \"\"\"\n Args:\n text (str): class label\n position (tuple): a tuple of the x and y coordinates to place text on image.\n font_size (int, optional): font of the text. If not provided, a font size\n proportional to the image width is calculated and used.\n color: color of the text. Refer to `matplotlib.colors` for full list\n of formats that are accepted.\n horizontal_alignment (str): see `matplotlib.text.Text`\n rotation: rotation angle in degrees CCW\n\n Returns:\n output (VisImage): image object with text drawn.\n \"\"\"\n if not font_size:\n font_size = self._default_font_size\n\n # since the text background is dark, we don't want the text to be dark\n color = np.maximum(list(mplc.to_rgb(color)), 0.2)\n color[np.argmax(color)] = max(0.8, np.max(color))\n\n x, y = position\n self.output.ax.text(\n x,\n y,\n text,\n size=font_size * self.output.scale,\n family=\"sans-serif\",\n bbox={\"facecolor\": \"black\", \"alpha\": 0.8, \"pad\": 0.7, \"edgecolor\": \"none\"},\n verticalalignment=\"top\",\n horizontalalignment=horizontal_alignment,\n color=color,\n zorder=10,\n rotation=rotation,\n )\n return self.output\n\n def draw_box(self, box_coord, alpha=0.5, edge_color=\"g\", line_style=\"-\"):\n \"\"\"\n Args:\n box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0\n are the coordinates of the image's top left corner. x1 and y1 are the\n coordinates of the image's bottom right corner.\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n edge_color: color of the outline of the box. Refer to `matplotlib.colors`\n for full list of formats that are accepted.\n line_style (string): the string to use to create the outline of the boxes.\n\n Returns:\n output (VisImage): image object with box drawn.\n \"\"\"\n x0, y0, x1, y1 = box_coord\n width = x1 - x0\n height = y1 - y0\n\n linewidth = max(self._default_font_size / 4, 1)\n\n self.output.ax.add_patch(\n mpl.patches.Rectangle(\n (x0, y0),\n width,\n height,\n fill=False,\n edgecolor=edge_color,\n linewidth=linewidth * self.output.scale,\n alpha=alpha,\n linestyle=line_style,\n )\n )\n return self.output\n\n def draw_rotated_box_with_label(\n self, rotated_box, alpha=0.5, edge_color=\"g\", line_style=\"-\", label=None\n ):\n \"\"\"\n Args:\n rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle),\n where cnt_x and cnt_y are the center coordinates of the box.\n w and h are the width and height of the box. angle represents how\n many degrees the box is rotated CCW with regard to the 0-degree box.\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n edge_color: color of the outline of the box. Refer to `matplotlib.colors`\n for full list of formats that are accepted.\n line_style (string): the string to use to create the outline of the boxes.\n label (string): label for rotated box. It will not be rendered when set to None.\n\n Returns:\n output (VisImage): image object with box drawn.\n \"\"\"\n cnt_x, cnt_y, w, h, angle = rotated_box\n area = w * h\n # use thinner lines when the box is small\n linewidth = self._default_font_size / (\n 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3\n )\n\n theta = angle * math.pi / 180.0\n c = math.cos(theta)\n s = math.sin(theta)\n rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)]\n # x: left->right ; y: top->down\n rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect]\n for k in range(4):\n j = (k + 1) % 4\n self.draw_line(\n [rotated_rect[k][0], rotated_rect[j][0]],\n [rotated_rect[k][1], rotated_rect[j][1]],\n color=edge_color,\n linestyle=\"--\" if k == 1 else line_style,\n linewidth=linewidth,\n )\n\n if label is not None:\n text_pos = rotated_rect[1] # topleft corner\n\n height_ratio = h / np.sqrt(self.output.height * self.output.width)\n label_color = self._change_color_brightness(edge_color, brightness_factor=0.7)\n font_size = (\n np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size\n )\n self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle)\n\n return self.output\n\n def draw_circle(self, circle_coord, color, radius=3):\n \"\"\"\n Args:\n circle_coord (list(int) or tuple(int)): contains the x and y coordinates\n of the center of the circle.\n color: color of the polygon. Refer to `matplotlib.colors` for a full list of\n formats that are accepted.\n radius (int): radius of the circle.\n\n Returns:\n output (VisImage): image object with box drawn.\n \"\"\"\n x, y = circle_coord\n self.output.ax.add_patch(\n mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color)\n )\n return self.output\n\n def draw_line(self, x_data, y_data, color, linestyle=\"-\", linewidth=None):\n \"\"\"\n Args:\n x_data (list[int]): a list containing x values of all the points being drawn.\n Length of list should match the length of y_data.\n y_data (list[int]): a list containing y values of all the points being drawn.\n Length of list should match the length of x_data.\n color: color of the line. Refer to `matplotlib.colors` for a full list of\n formats that are accepted.\n linestyle: style of the line. Refer to `matplotlib.lines.Line2D`\n for a full list of formats that are accepted.\n linewidth (float or None): width of the line. When it's None,\n a default value will be computed and used.\n\n Returns:\n output (VisImage): image object with line drawn.\n \"\"\"\n if linewidth is None:\n linewidth = self._default_font_size / 3\n linewidth = max(linewidth, 1)\n self.output.ax.add_line(\n mpl.lines.Line2D(\n x_data,\n y_data,\n linewidth=linewidth * self.output.scale,\n color=color,\n linestyle=linestyle,\n )\n )\n return self.output\n\n def draw_binary_mask(\n self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=4096\n ):\n \"\"\"\n Args:\n binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and\n W is the image width. Each value in the array is either a 0 or 1 value of uint8\n type.\n color: color of the mask. Refer to `matplotlib.colors` for a full list of\n formats that are accepted. If None, will pick a random color.\n edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a\n full list of formats that are accepted.\n text (str): if None, will be drawn in the object's center of mass.\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n area_threshold (float): a connected component small than this will not be shown.\n\n Returns:\n output (VisImage): image object with mask drawn.\n \"\"\"\n if color is None:\n color = random_color(rgb=True, maximum=1)\n color = mplc.to_rgb(color)\n if area_threshold is None:\n area_threshold = 4096\n\n has_valid_segment = False\n binary_mask = binary_mask.astype(\"uint8\") # opencv needs uint8\n mask = GenericMask(binary_mask, self.output.height, self.output.width)\n shape2d = (binary_mask.shape[0], binary_mask.shape[1])\n\n if not mask.has_holes:\n # draw polygons for regular masks\n for segment in mask.polygons:\n area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))\n if area < area_threshold:\n continue\n has_valid_segment = True\n segment = segment.reshape(-1, 2)\n self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)\n else:\n rgba = np.zeros(shape2d + (4,), dtype=\"float32\")\n rgba[:, :, :3] = color\n rgba[:, :, 3] = (mask.mask == 1).astype(\"float32\") * alpha\n has_valid_segment = True\n self.output.ax.imshow(rgba)\n\n if text is not None and has_valid_segment:\n # TODO sometimes drawn on wrong objects. the heuristics here can improve.\n lighter_color = self._change_color_brightness(color, brightness_factor=0.7)\n _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)\n largest_component_id = np.argmax(stats[1:, -1]) + 1\n\n # draw text on the largest component, as well as other very large components.\n for cid in range(1, _num_cc):\n if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:\n # median is more stable than centroid\n # center = centroids[largest_component_id]\n center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]\n self.draw_text(text, center, color=lighter_color)\n return self.output\n\n def draw_polygon(self, segment, color, edge_color=None, alpha=0.5):\n \"\"\"\n Args:\n segment: numpy array of shape Nx2, containing all the points in the polygon.\n color: color of the polygon. Refer to `matplotlib.colors` for a full list of\n formats that are accepted.\n edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a\n full list of formats that are accepted. If not provided, a darker shade\n of the polygon color will be used instead.\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n\n Returns:\n output (VisImage): image object with polygon drawn.\n \"\"\"\n if edge_color is None:\n # make edge color darker than the polygon color\n if alpha > 0.8:\n edge_color = self._change_color_brightness(color, brightness_factor=-0.7)\n else:\n edge_color = color\n edge_color = mplc.to_rgb(edge_color) + (1,)\n\n polygon = mpl.patches.Polygon(\n segment,\n fill=True,\n facecolor=mplc.to_rgb(color) + (alpha,),\n edgecolor=edge_color,\n linewidth=max(self._default_font_size // 15 * self.output.scale, 1),\n )\n self.output.ax.add_patch(polygon)\n return self.output\n\n \"\"\"\n Internal methods:\n \"\"\"\n\n def _jitter(self, color):\n \"\"\"\n Randomly modifies given color to produce a slightly different color than the color given.\n\n Args:\n color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color\n picked. The values in the list are in the [0.0, 1.0] range.\n\n Returns:\n jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the\n color after being jittered. The values in the list are in the [0.0, 1.0] range.\n \"\"\"\n color = mplc.to_rgb(color)\n vec = np.random.rand(3)\n # better to do it in another color space\n vec = vec / np.linalg.norm(vec) * 0.5\n res = np.clip(vec + color, 0, 1)\n return tuple(res)\n\n def _create_grayscale_image(self, mask=None):\n \"\"\"\n Create a grayscale version of the original image.\n The colors in masked area, if given, will be kept.\n \"\"\"\n img_bw = self.img.astype(\"f4\").mean(axis=2)\n img_bw = np.stack([img_bw] * 3, axis=2)\n if mask is not None:\n img_bw[mask] = self.img[mask]\n return img_bw\n\n def _change_color_brightness(self, color, brightness_factor):\n \"\"\"\n Depending on the brightness_factor, gives a lighter or darker color i.e. a color with\n less or more saturation than the original color.\n\n Args:\n color: color of the polygon. Refer to `matplotlib.colors` for a full list of\n formats that are accepted.\n brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of\n 0 will correspond to no change, a factor in [-1.0, 0) range will result in\n a darker color and a factor in (0, 1.0] range will result in a lighter color.\n\n Returns:\n modified_color (tuple[double]): a tuple containing the RGB values of the\n modified color. Each value in the tuple is in the [0.0, 1.0] range.\n \"\"\"\n assert brightness_factor >= -1.0 and brightness_factor <= 1.0\n color = mplc.to_rgb(color)\n polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))\n modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])\n modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness\n modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness\n modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])\n return modified_color\n\n def _convert_boxes(self, boxes):\n \"\"\"\n Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension.\n \"\"\"\n if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes):\n return boxes.tensor.numpy()\n else:\n return np.asarray(boxes)\n\n def _convert_masks(self, masks_or_polygons):\n \"\"\"\n Convert different format of masks or polygons to a tuple of masks and polygons.\n\n Returns:\n list[GenericMask]:\n \"\"\"\n\n m = masks_or_polygons\n if isinstance(m, PolygonMasks):\n m = m.polygons\n if isinstance(m, BitMasks):\n m = m.tensor.numpy()\n if isinstance(m, torch.Tensor):\n m = m.numpy()\n ret = []\n for x in m:\n if isinstance(x, GenericMask):\n ret.append(x)\n else:\n ret.append(GenericMask(x, self.output.height, self.output.width))\n return ret\n\n def _convert_keypoints(self, keypoints):\n if isinstance(keypoints, Keypoints):\n keypoints = keypoints.tensor\n keypoints = np.asarray(keypoints)\n return keypoints\n\n def get_output(self):\n \"\"\"\n Returns:\n output (VisImage): the image output containing the visualizations added\n to the image.\n \"\"\"\n return self.output\n" ]
[ [ "numpy.split", "numpy.sqrt", "numpy.asarray", "numpy.max", "torch.unique", "torch.device", "matplotlib.colors.to_rgb", "numpy.unique", "matplotlib.backends.backend_agg.FigureCanvasAgg", "numpy.clip", "numpy.stack", "numpy.frombuffer", "numpy.argmax", "numpy.zeros", "numpy.ascontiguousarray", "matplotlib.patches.Rectangle", "matplotlib.patches.Circle", "numpy.random.rand", "numpy.argsort", "numpy.array", "matplotlib.figure.Figure", "matplotlib.lines.Line2D", "numpy.linalg.norm", "numpy.prod" ] ]
Musyue/mobile_robot
[ "ed8a75c41e8ccaf1b48639239e4119bf2d1d5f65" ]
[ "src/archtive_code/mobile_4wd_driver_hoffman_test5.py" ]
[ "#! /usr/bin/env python\n# coding=utf-8\nimport rospy\nimport sys\nfrom std_msgs.msg import String,Float64,Bool,Int64MultiArray\nfrom nav_msgs.msg import Path\nfrom geometry_msgs.msg import Quaternion,Point,Pose\nfrom sensor_msgs.msg import Imu\nimport time \nfrom math import *\nimport numpy as np\nfrom mobile_control.mobileplatform_driver_steptech import *\nfrom geometry_msgs.msg import Twist\nfrom scipy.io import loadmat\nimport matplotlib.pyplot as plt\nimport tf2_ros\nfrom tf.transformations import euler_from_quaternion, quaternion_from_euler\nimport logging\nlogging.basicConfig()\nclass AGV4WDICONTROLLER():\n def __init__(self):\n self.mpfh=MobilePlatformDriver()\n self.wheel_R=0.15/2#m\n self.car_length=0.5\n self.car_width=0.395\n rospy.init_node(\"imu_data_for_mobileplatform\")\n self.imu_sub=rospy.Subscriber('/imu_data',Imu,self.Imu_callback)\n self.path_sub=rospy.Subscriber('/path_target',Path,self.PathTarget_callback)\n self.cmd_vel_sub=rospy.Subscriber('/cmd_vel',Twist,self.CmdVel_callback)\n self.ImuOrientation=()\n self.ImuAngularvelocity=()\n self.ImuLinearAcceleration=()\n self.ImuOrientationCovariance=[]\n self.ImuAngularvelocityCovariance=[]\n self.ImuLinearAccelerationCovariance=[]\n self.linear_x=0.00001\n self.linear_y=0\n self.linear_z=0\n self.angular_x=0\n self.angular_y=0\n self.angular_z=0.00001\n self.pose_x=0.0\n self.pose_y=0.\n self.pose_z=0.\n self.pose_quaternion_x=0.\n self.pose_quaternion_y=0.\n self.pose_quaternion_z=0.0\n self.pose_quaternion_w=0.0\n self.roll=0\n self.pitch=0\n self.yaw=0\n self.speed_rotation=[]\n self.odemetry_x=0.0\n self.odemetry_y=0.0\n self.odemetry_pha=0.0#3.14\n self.odemetry_beta=0.0\n self.vel_reference=1.0#0.5\n self.reference_x=0\n self.reference_y=0\n self.reference_pha=0\n self.reference_beta=0\n self.k1=4\n self.k2=20\n self.k3=0.5\n self.k4=1\n ####hoffamn\n self.st=tan(0)\n self.phi=0#gama\n self.index_ref=0\n self.phaRdot=0.08\n self.betaRdot=0\n self.tfBuffer = tf2_ros.Buffer()\n self.listener = tf2_ros.TransformListener(self.tfBuffer)\n self.tranformtfs=\"map\"\n self.tranformtft='base_link'\n self.trans = self.tfBuffer.lookup_transform(self.tranformtfs, self.tranformtft, rospy.Time())\n #trans.transform.translation.y, trans.transform.translation.x\n self.read_path=loadmat('/data/ros/yue_wk_2019/src/mobile_robot/src/mobile_control/test_path.mat')\n # self.pub_vstar=rospy.Publisher(\"/vstar\",Float64,queue_size=10)\n # self.pub_x=rospy.Publisher(\"/x\",Float64,queue_size=10)\n # self.pub_y=rospy.Publisher(\"/y\",Float64,queue_size=10)\n # self.pub_=rospy.Publisher(\"/theta\",Float64,queue_size=10)\n self.target_path=[]\n self.homing_original_position=[self.mpfh.Driver_steer_encode_fl_original,self.mpfh.Driver_steer_encode_fr_original,self.mpfh.Driver_steer_encode_rl_original,self.mpfh.Driver_steer_encode_rr_original]\n def CmdVel_callback(self,msg):\n # print \"msg\",msg.linear.x\n self.linear_x=msg.linear.x\n self.linear_y=msg.linear.y\n self.linear_z=msg.linear.z\n self.angular_x=msg.angular.x\n self.angular_y=msg.angular.y\n self.angular_z=msg.angular.z\n def PathTarget_callback(self,msg):\n self.pose_x=msg.pose.position.x\n self.pose_y=msg.pose.position.y\n self.pose_z=msg.pose.position.z\n self.pose_quaternion_x=msg.pose.orientation.x\n self.pose_quaternion_y=msg.pose.orientation.y\n self.pose_quaternion_z=msg.pose.orientation.z\n self.pose_quaternion_w=msg.pose.orientation.w\n orientation_list = [msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w]\n (self.roll,self.pitch,self.yaw) = euler_from_quaternion (orientation_list)\n def Avage_list(self,listdata,appendata):\n if len(listdata)>10:\n listdata=listdata[1:]\n listdata.append(appendata)\n else:\n listdata.append(appendata)\n return listdata\n def Init_Node(self):\n \n rospy.init_node(\"imu_data_for_mobileplatform\")\n # self.mpfh.Init_can()\n # self.mpfh.Open_driver_can_Node(0x00000000,1)\n # self.mpfh.Enable_Motor_Controller_All()\n # self.mpfh.Send_trapezoid_Velocity(2500)\n def Imu_callback(self,msg):\n self.ImuOrientation=(msg.orientation.x,msg.orientation.y,msg.orientation.z,msg.orientation.w)\n self.ImuAngularvelocity=(msg.angular_velocity.x,msg.angular_velocity.y,msg.angular_velocity.z)\n self.ImuLinearAcceleration=(msg.linear_acceleration.x,msg.linear_acceleration.y,msg.linear_acceleration.z)\n self.ImuOrientationCovariance=msg.orientation_covariance\n self.ImuAngularvelocityCovariance=msg.angular_velocity_covariance\n self.ImuLinearAccelerationCovariance=msg.linear_acceleration_covariance\n def set_pdemetry_vel(self,vel):\n self.odemetry_vel=vel\n def set_pdemetry_x(self,x):\n self.odemetry_x=x\n def set_pdemetry_y(self,y):\n self.odemetry_y=y\n\n\n\n def Caculate_velocity_from_RPM(self):\n # Velocity=[]\n\n RPM_fl=self.mpfh.Dec_to_RPM(self.mpfh.Driver_walk_velocity_encode_fl)\n RPM_fr=self.mpfh.Dec_to_RPM(self.mpfh.Driver_walk_velocity_encode_fr)\n RPM_rl=self.mpfh.Dec_to_RPM(self.mpfh.Driver_walk_velocity_encode_rl)\n RPM_rr=self.mpfh.Dec_to_RPM(self.mpfh.Driver_walk_velocity_encode_rr)\n print(\"RPM_fl\",RPM_fl,RPM_fr,RPM_rl,RPM_rr)\n if 0 not in [RPM_fl,RPM_fr,RPM_rl,RPM_rr]:\n Velocity=[(RPM_fl*2*pi*self.wheel_R)/60.0,(RPM_fr*2*pi*self.wheel_R)/60.0,(RPM_rl*2*pi*self.wheel_R)/60.0,(RPM_rr*2*pi*self.wheel_R)/60.0]\n print(\"------Velocity---------\",Velocity)#self.mpfh.Driver_walk_velocity_encode_fl\n return Velocity\n else:\n Velocity=[(RPM_fl*2*pi*self.wheel_R)/60.0,(RPM_fr*2*pi*self.wheel_R)/60.0,(RPM_rl*2*pi*self.wheel_R)/60.0,(RPM_rr*2*pi*self.wheel_R)/60.0]\n print(\"----some zero in list for velocity---\",Velocity)\n return [-1.0*self.vel_reference,self.vel_reference,-1.0*self.vel_reference,self.vel_reference]\n # print \"there are velocity error in encode\"\n\n def Caculate_rad_from_position_data(self):\n detafi=self.mpfh.Pos_to_rad(self.mpfh.Driver_steer_encode_fl-self.mpfh.Driver_steer_encode_fl_original)\n detafo=self.mpfh.Pos_to_rad(self.mpfh.Driver_steer_encode_fr-self.mpfh.Driver_steer_encode_fr_original)\n detari=self.mpfh.Pos_to_rad(self.mpfh.Driver_steer_encode_rl-self.mpfh.Driver_steer_encode_rl_original)\n detaro=self.mpfh.Pos_to_rad(self.mpfh.Driver_steer_encode_rr-self.mpfh.Driver_steer_encode_rr_original)\n print(\"self.mpfh.Driver_steer_encode_fl\",self.mpfh.Driver_steer_encode_fl,self.mpfh.Driver_steer_encode_fr,self.mpfh.Driver_steer_encode_rl,self.mpfh.Driver_steer_encode_rr)\n return [detafi,detafo,detari,detaro]\n\n\n def my_arccot(self,x):\n return pi/2-atan(x)\n # if x>0:\n # return atan(1/x)+pi-3328842.5883102473,\n # elif x<0:\n # return atan(1/x)\n # else:\n # return 0.0\n def caculate_four_steer_degree_theta(self,temp_fr_re):\n \"\"\"\n arccot(x)=\n {\n arctan(1/x)+π(x>0)\n arctan(1/x)(x<0)\n }\n \"\"\"\n # temp_fr_re=self.caculate_bicycle_model_thetafr_re()\n \n temp_theta_fo_fr=(1/tan(temp_fr_re[0]))*(1+(self.car_width/self.car_length)*(tan(temp_fr_re[0])-tan(temp_fr_re[1])))\n temp_theta_fi_fl=(1/tan(temp_fr_re[0]))*(1-(self.car_width/self.car_length)*(tan(temp_fr_re[0])-tan(temp_fr_re[1])))\n temp_theta_ro_rr=(1/tan(temp_fr_re[1]))*(1+(self.car_width/self.car_length)*(tan(temp_fr_re[0])-tan(temp_fr_re[1])))\n temp_theta_ri_rl=(1/tan(temp_fr_re[1]))*(1-(self.car_width/self.car_length)*(tan(temp_fr_re[0])-tan(temp_fr_re[1])))\n theta_fo_fr=self.my_arccot(temp_theta_fo_fr)\n theta_fi_fl=self.my_arccot(temp_theta_fi_fl)\n theta_ro_rr=self.my_arccot(temp_theta_ro_rr)\n theta_ri_rl=self.my_arccot(temp_theta_ri_rl)\n return [self.Set_rad_in_halfpi(theta_fi_fl),self.Set_rad_in_halfpi(theta_fo_fr),self.Set_rad_in_halfpi(theta_ri_rl),self.Set_rad_in_halfpi(theta_ro_rr)]\n def Set_rad_in_halfpi(self,rad):\n if rad<pi/2.0:\n return rad\n else:\n return rad-pi\n # return (rad+pi/2.0)%(pi/2.0) - pi/2.0\n def Set_rad_in_pi(self,rad):\n return (rad+pi)%(pi*2.0) - pi\n def caculate_four_walk_motor_velocity(self,NewVA,temp_theta_fl_fr_rl_rr,temp_fr_re):\n # temp_theta_fl_fr_rl_rr=self.caculate_four_steer_degree_theta()\n # temp_fr_re=self.caculate_bicycle_model_thetafr_re()\n\n v1_fr_fo=(NewVA*tan(temp_fr_re[0])*(1/sin(temp_theta_fl_fr_rl_rr[1])))/sqrt(1+(1/4)*(tan(temp_fr_re[0])+tan(temp_fr_re[1]))**2)\n v2_fl_fi=(NewVA*tan(temp_fr_re[0])*(1/sin(temp_theta_fl_fr_rl_rr[0])))/sqrt(1+(1/4)*(tan(temp_fr_re[0])+tan(temp_fr_re[1]))**2)\n v3_rr_ro=(NewVA*tan(temp_fr_re[1])*(1/sin(temp_theta_fl_fr_rl_rr[3])))/sqrt(1+(1/4)*(tan(temp_fr_re[0])+tan(temp_fr_re[1]))**2)\n v4_rl_ri=(NewVA*tan(temp_fr_re[1])*(1/sin(temp_theta_fl_fr_rl_rr[2])))/sqrt(1+(1/4)*(tan(temp_fr_re[0])+tan(temp_fr_re[1]))**2)\n print(\"v2_fl_fi,v1_fr_fo,v4_rl_ri,v3_rr_ro\",v2_fl_fi,v1_fr_fo,v4_rl_ri,v3_rr_ro)\n if abs(v2_fl_fi)>=1.0 and v2_fl_fi>0.0 :\n v2_fl_fi=1.0\n elif abs(v2_fl_fi)>=1.0 and v2_fl_fi<0.0:\n v2_fl_fi=-1.0\n elif abs(v2_fl_fi)<=1.0:\n v2_fl_fi=v2_fl_fi\n else:\n v2_fl_fi=0.0\n\n if abs(v1_fr_fo)>=1.0 and v1_fr_fo>0.0 :\n v1_fr_fo=1.0\n elif abs(v1_fr_fo)>=1.0 and v1_fr_fo<0.0:\n v1_fr_fo=-1.0\n elif abs(v1_fr_fo)<=1.0:\n v1_fr_fo=v1_fr_fo\n else:\n v1_fr_fo=0.0\n\n if abs(v4_rl_ri)>=1.0 and v4_rl_ri>0.0 :\n v4_rl_ri=1.0\n elif abs(v4_rl_ri)>=1.0 and v4_rl_ri<0.0:\n v4_rl_ri=-1.0\n elif abs(v4_rl_ri)<=1.0:\n v4_rl_ri=v4_rl_ri\n else:\n v4_rl_ri=0.0\n\n if abs(v3_rr_ro)>=1.0 and v3_rr_ro>0.0 :\n v3_rr_ro=1.0\n elif abs(v3_rr_ro)>=1.0 and v3_rr_ro<0.0:\n v3_rr_ro=-1.0\n elif abs(v3_rr_ro)<=1.0:\n v3_rr_ro=v3_rr_ro\n else:\n v3_rr_ro=0.0\n\n return [v2_fl_fi,v1_fr_fo,v4_rl_ri,v3_rr_ro]\n # def find_closest_point(self,x,y):\n # e=sqrt((x-self.target_path[0][0])**2+(y-self.target_path[0][1])**2)\n # index=0\n # if e!=0:\n # for i in range(len(self.read_path['path'])):\n # e_temp=sqrt((x-self.target_path[i][0])**2+(y-self.target_path[i][1])**2)\n # if e_temp<e:\n # e=e_temp\n # index=i\n # return index\n # else:\n # return 1\n\n # def add_target(self,):\n # for i in range(len(self.read_path['path'])):\n # self.target_path.append(list(self.read_path['path'][i]))\n def sign(self,x):\n if x!=0.0:\n return x/abs(x)\n else:\n return 0.0\n def hoffman_control(self,point_ref_all,dt):\n self.odemetry_x=self.odemetry_x+self.vel_reference*cos(self.odemetry_pha)*dt\n self.odemetry_y=self.odemetry_y+self.vel_reference*sin(self.odemetry_pha)*dt\n self.odemetry_pha=self.odemetry_pha+self.vel_reference*(1.0/self.car_length)*self.st*dt\n self.odemetry_pha=self.Set_rad_in_pi(self.odemetry_pha)\n e=sqrt((self.odemetry_x-point_ref_all[0][0])**2+(self.odemetry_y-point_ref_all[0][1])**2)\n for i in range(2,len(point_ref_all)):\n etmp=sqrt((self.odemetry_x-point_ref_all[i][0])**2+(self.odemetry_y-point_ref_all[i][1])**2)\n if etmp<e:\n e=etmp\n self.index_ref=i\n print(\"self.index_ref\",self.index_ref)\n point_ref=point_ref_all[self.index_ref]\n ex1=point_ref[0]-self.odemetry_x\n ey1=point_ref[1]-self.odemetry_y\n ex2=cos(point_ref[2])\n ey2=sin(point_ref[2])\n sinnn=ex1*ey2-ey1*ex2\n e=self.sign(sinnn)*e\n self.phi=point_ref[2]- self.odemetry_pha+atan(e)\n self.st=tan(self.phi)\n if abs(self.st)>=1.0:\n self.st=1.0*self.sign(self.st)\n else:\n self.st=self.st\n def set_array_to_list(self,array_num):\n newtemp=[]\n for i in range(len(array_num)):\n newtemp.append(list(array_num[i]))\n return newtemp\n def hoffman_kinematic_model(self,VC,phi_ref):\n Vfl=VC*sqrt(self.car_length**2+(self.car_length/tan(phi_ref)+self.car_width/2.0)**2)/(self.car_length/tan(phi_ref))\n Vfr=VC*sqrt(self.car_length**2+(self.car_length/tan(phi_ref)-self.car_width/2.0)**2)/(self.car_length/tan(phi_ref))\n Vrl=VC*(self.car_length/tan(phi_ref)+self.car_width/2.0)/(self.car_length/tan(phi_ref))\n Vrr=VC*(self.car_length/tan(phi_ref)-self.car_width/2.0)/(self.car_length/tan(phi_ref))\n detarl=0.0\n detarr=0.0\n detafl=atan(self.car_length/(self.car_length/tan(phi_ref)-self.car_width/2.0))\n detafr=atan(self.car_length/(self.car_length/tan(phi_ref)+self.car_width/2.0))\n return [Vfl,Vfr,Vrl,Vrr,detafl,detafr,detarl,detarr]\ndef main():\n agvobj=AGV4WDICONTROLLER()\n# agvobj.Init_Node()\n# rospy.init_node(\"imu_data_for_mobileplatform\")\n# time.sleep(3)\n ratet=1\n rate=rospy.Rate(ratet)\n zerotime=time.time()\n dt=0\n\n flg=0\n count=1\n xr=[]\n yr=[]\n x=[]\n y=[]\n plt.ion() #开启interactive mode 成功的关键函数\n plt.figure(1)\n# agvobj.add_target()\n# plt.show()\n# postion_list\n speedfl=0\n speedfr=0\n speedrl=0\n speedrr=0\n flagg=1\n flaggg=1\n pathfilename='pathsmallCirclexythera'\n while not rospy.is_shutdown():\n # print \"haha\"\n rospy.logerr(agvobj.trans.transform.translation.y)\n # recevenum=agvobj.mpfh.CanAnalysis.Can_GetReceiveNum(0)\n # starttime=time.time()\n # print(\"recevenum----\",recevenum)\n # if recevenum!=None:\n # if flg==0:\n # agvobj.mpfh.Send_same_velocity_to_four_walking_wheel([-1.0,1.0,-1.0,1.0],1,agvobj.vel_reference)\n # # time.sleep(0.5)\n # # agvobj.mpfh.Send_position_to_four_steering_wheel(agvobj.homing_original_position)\n # flg=1\n # agvobj.mpfh.Read_sensor_data_from_driver()\n\n # pathreference=agvobj.set_array_to_list(agvobj.read_path[pathfilename])\n # # print \"pathreference\",pathreference,len(pathreference)\n # agvobj.hoffman_control(pathreference,dt)\n\n # velocity_real_time=agvobj.Caculate_velocity_from_RPM()\n # if len(velocity_real_time)!=0:\n # rad_real_time=agvobj.Caculate_rad_from_position_data()\n # print(\"velocity_real_time\",velocity_real_time)\n # print(\"rad_real_time\",rad_real_time)\n # Vfi=velocity_real_time[0]\n # Vfo=velocity_real_time[1]\n # Vri=velocity_real_time[2]\n # Vro=velocity_real_time[3]\n # detafi=rad_real_time[0]\n # detafo=rad_real_time[1]\n # detari=rad_real_time[2]\n # detaro=rad_real_time[3]\n # VC=(agvobj.sign(Vri)*Vri+agvobj.sign(Vro)*Vro)/2.0\n # print(\"VC-------\",VC)\n # v_deta=agvobj.hoffman_kinematic_model(VC,agvobj.phi)\n # print(\"four velocity and four steer---\",v_deta)\n # wheel_diretion_flg=[-1.0,1.0,-1.0,1.0]\n # wheel_diretion_flg1=[1.0,1.0,1.0,1.0]\n # speed_flag=[1.0,-1.0,-1.0,-1.0]\n # speedfl=wheel_diretion_flg1[0]*abs(v_deta[0])\n # speedfr=wheel_diretion_flg1[1]*abs(v_deta[1])\n # speedrl=wheel_diretion_flg1[2]*abs(v_deta[2])\n # speedrr=wheel_diretion_flg1[3]*abs(v_deta[3])\n # print(\"speedfl,speedfr,speedrl,speedrr\",speedfl,speedfr,speedrl,speedrr)\n # agvobj.mpfh.Send_diff_velocity_to_four_walking_wheel(wheel_diretion_flg,speed_flag,speedfl,speedfr,speedrl,speedrr)\n # ratation_flag=[-1.0,-1.0,-1.0,-1.0]\n # four_steer_degree_theta=v_deta[4:]\n # agvobj.mpfh.Send_diff_degree_position_to_four_steering_wheel(ratation_flag,four_steer_degree_theta)\n\n # xr.append(pathreference[agvobj.index_ref][0])\n # yr.append(pathreference[agvobj.index_ref][1])\n # x.append(agvobj.odemetry_x)\n # y.append(agvobj.odemetry_y)\n # # print \"x,y\",x,y\n # # plt.xlim(0., 10.5)\n # # plt.ylim(0., 10.0)\n # plt.plot(xr,yr,'ro',x,y,'bs')\n # plt.draw()\n # plt.pause(0.01)\n # count+=1\n # print(\"count\",count)\n # else:\n # agvobj.mpfh.Send_Control_Command(agvobj.mpfh.CanAnalysis.yamlDic['sync_data_ID'], agvobj.mpfh.MobileDriver_Command.ZERO_COMMAND)\n # print(\"---------read data----\")\n # endtime=time.time()\n # dt=endtime-starttime\n\n rate.sleep() \nif __name__==\"__main__\":\n main()" ]
[ [ "matplotlib.pyplot.ion", "scipy.io.loadmat", "matplotlib.pyplot.figure" ] ]
mollerhoj/pyro
[ "20b476442c8f7a415407ef1770e9abda9df4420d" ]
[ "pyro/distributions/transforms/__init__.py" ]
[ "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom torch.distributions import biject_to, transform_to\nfrom torch.distributions.transforms import * # noqa F403\nfrom torch.distributions.transforms import __all__ as torch_transforms\n\nfrom pyro.distributions.torch_transform import ComposeTransformModule\nfrom pyro.distributions.transforms.affine_autoregressive import AffineAutoregressive, affine_autoregressive\nfrom pyro.distributions.transforms.affine_coupling import AffineCoupling, affine_coupling\nfrom pyro.distributions.transforms.batchnorm import BatchNorm, batchnorm\nfrom pyro.distributions.transforms.block_autoregressive import BlockAutoregressive, block_autoregressive\nfrom pyro.distributions.transforms.discrete_cosine import DiscreteCosineTransform\nfrom pyro.distributions.transforms.generalized_channel_permute import (GeneralizedChannelPermute,\n generalized_channel_permute)\nfrom pyro.distributions.transforms.householder import Householder, householder\nfrom pyro.distributions.transforms.lower_cholesky_affine import LowerCholeskyAffine\nfrom pyro.distributions.transforms.neural_autoregressive import (ELUTransform, LeakyReLUTransform,\n TanhTransform, NeuralAutoregressive,\n neural_autoregressive,\n elu, leaky_relu, tanh)\nfrom pyro.distributions.transforms.permute import Permute, permute\nfrom pyro.distributions.transforms.polynomial import Polynomial, polynomial\nfrom pyro.distributions.transforms.planar import Planar, ConditionalPlanar, planar, conditional_planar\nfrom pyro.distributions.transforms.radial import Radial, ConditionalRadial, radial, conditional_radial\nfrom pyro.distributions.transforms.sylvester import Sylvester, sylvester\nfrom pyro.distributions.constraints import IndependentConstraint, corr_cholesky_constraint\nfrom pyro.distributions.transforms.cholesky import CorrLCholeskyTransform\n\n########################################\n# register transforms\n\nbiject_to.register(IndependentConstraint, lambda c: biject_to(c.base_constraint))\ntransform_to.register(IndependentConstraint, lambda c: transform_to(c.base_constraint))\n\n\n@biject_to.register(corr_cholesky_constraint)\n@transform_to.register(corr_cholesky_constraint)\ndef _transform_to_corr_cholesky(constraint):\n return CorrLCholeskyTransform()\n\n\ndef iterated(repeats, base_fn, *args, **kwargs):\n \"\"\"\n Helper function to compose a sequence of bijective transforms with potentially\n learnable parameters using :class:`~pyro.distributions.ComposeTransformModule`.\n\n :param repeats: number of repeated transforms.\n :param base_fn: function to construct the bijective transform.\n :param args: arguments taken by `base_fn`.\n :param kwargs: keyword arguments taken by `base_fn`.\n :return: instance of :class:`~pyro.distributions.TransformModule`.\n \"\"\"\n assert isinstance(repeats, int) and repeats >= 1\n return ComposeTransformModule([base_fn(*args, **kwargs) for _ in range(repeats)])\n\n\n__all__ = [\n 'iterated',\n 'AffineAutoregressive',\n 'AffineCoupling',\n 'BatchNorm',\n 'BlockAutoregressive',\n 'ComposeTransformModule',\n 'ConditionalPlanar',\n 'ConditionalRadial',\n 'CorrLCholeskyTransform',\n 'DiscreteCosineTransform',\n 'ELUTransform',\n 'GeneralizedChannelPermute',\n 'Householder',\n 'LeakyReLUTransform',\n 'LowerCholeskyAffine',\n 'NeuralAutoregressive',\n 'Permute',\n 'Planar',\n 'Polynomial',\n 'Radial',\n 'Sylvester',\n 'TanhTransform',\n 'affine_autoregressive',\n 'affine_coupling',\n 'batchnorm',\n 'block_autoregressive',\n 'conditional_planar',\n 'conditional_radial',\n 'elu',\n 'generalized_channel_permute',\n 'householder',\n 'leaky_relu',\n 'neural_autoregressive',\n 'permute',\n 'planar',\n 'polynomial',\n 'radial',\n 'sylvester',\n 'tanh',\n]\n\n__all__.extend(torch_transforms)\ndel torch_transforms\n" ]
[ [ "torch.distributions.biject_to", "torch.distributions.transform_to", "torch.distributions.biject_to.register", "torch.distributions.transform_to.register" ] ]
mathieu-sibue/preparing-interviews-with-nlp-audio
[ "aac0115d8fd476f7c0a7d9ca500706e839f6382c" ]
[ "back/src/features/topic_rank/topicrank.py" ]
[ "\n\"\"\"TopicRank keyphrase extraction model.\n\nGraph-based ranking approach to keyphrase extraction described in:\nhttps://aclanthology.org/I13-1062.pdf\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport string\nfrom itertools import combinations\n\nimport networkx as nx\nimport numpy as np\nfrom scipy.cluster.hierarchy import linkage, fcluster\nfrom scipy.spatial.distance import pdist\n\nfrom .base import LoadFile\n\n\nFRENCH_STOP_WORDS = {\n \"alors\",\n \"au\",\n \"aucuns\",\n \"aussi\",\n \"autre\",\n \"avant\",\n \"avec\",\n \"avoir\",\n \"bon\",\n \"car\",\n \"ce\",\n \"cela\",\n \"ces\",\n \"ceux\",\n \"chaque\",\n \"ci\",\n \"comme\",\n \"comment\",\n \"dans\",\n \"des\",\n \"du\",\n \"dedans\",\n \"dehors\",\n \"depuis\",\n \"devrait\",\n \"doit\",\n \"donc\",\n \"dos\",\n \"début\",\n \"elle\",\n \"elles\",\n \"en\",\n \"encore\",\n \"essai\",\n \"est\",\n \"et\",\n \"eu\",\n \"fait\",\n \"faites\",\n \"fois\",\n \"font\",\n \"hors\",\n \"ici\",\n \"il\",\n \"ils\",\n \"je\",\n \"juste\",\n \"la\",\n \"le\",\n \"les\",\n \"leur\",\n \"là\",\n \"ma\",\n \"maintenant\",\n \"mais\",\n \"mes\",\n \"mien\",\n \"moins\",\n \"mon\",\n \"mot\",\n \"même\",\n \"ni\",\n \"nommés\",\n \"notre\",\n \"nous\",\n \"ou\",\n \"où\",\n \"par\",\n \"parce\",\n \"pas\",\n \"peut\",\n \"peu\",\n \"plupart\",\n \"pour\",\n \"pourquoi\",\n \"quand\",\n \"que\",\n \"quel\",\n \"quelle\",\n \"quelles\",\n \"quels\",\n \"qui\",\n \"sa\",\n \"sans\",\n \"ses\",\n \"seulement\",\n \"si\",\n \"sien\",\n \"son\",\n \"sont\",\n \"sous\",\n \"soyez\",\n \"sujet\",\n \"sur\",\n \"ta\",\n \"tandis\",\n \"tellement\",\n \"tels\",\n \"tes\",\n \"ton\",\n \"tous\",\n \"tout\",\n \"toute\",\n \"trop\",\n \"très\",\n \"tu\",\n \"voient\",\n \"vont\",\n \"votre\",\n \"vous\",\n \"vu\",\n \"ça\",\n \"étaient\",\n \"état\",\n \"étions\",\n \"été\",\n \"être\",\n \"plus\",\n \"celui\",\n \"entre\",\n \"vers\",\n \"dont\",\n \"divers\",\n \"pendant\",\n \"non\",\n \"certain\",\n \"chose\",\n}\n\n\nclass TopicRank(LoadFile):\n\n \"\"\"TopicRank keyphrase extraction model.\n\n Parameterized example::\n\n import pke\n import string\n from nltk.corpus import stopwords\n\n # 1. create a TopicRank extractor.\n extractor = pke.unsupervised.TopicRank()\n\n # 2. load the content of the document.\n extractor.load_document(input='path/to/input.xml')\n\n # 3. select the longest sequences of nouns and adjectives, that do\n # not contain punctuation marks or stopwords as candidates.\n pos = {'NOUN', 'PROPN', 'ADJ'}\n stoplist = list(string.punctuation)\n stoplist += ['-lrb-', '-rrb-', '-lcb-', '-rcb-', '-lsb-', '-rsb-']\n stoplist += stopwords.words('english')\n extractor.candidate_selection(pos=pos, stoplist=stoplist)\n\n # 4. build topics by grouping candidates with HAC (average linkage,\n # threshold of 1/4 of shared stems). Weight the topics using random\n # walk, and select the first occuring candidate from each topic.\n extractor.candidate_weighting(threshold=0.74, method='average')\n\n # 5. get the 10-highest scored candidates as keyphrases\n keyphrases = extractor.get_n_best(n=10)\n\n \"\"\"\n\n def __init__(self):\n \"\"\"Redefining initializer for TopicRank.\n \"\"\"\n\n super(TopicRank, self).__init__()\n\n self.graph = nx.Graph()\n \"\"\" The topic graph. \"\"\"\n\n self.topics = []\n \"\"\" The topic container. \"\"\"\n\n def candidate_selection(self, pos=None):\n \"\"\"Selects longest sequences of nouns and adjectives as keyphrase\n candidates.\n\n Args:\n pos (set): the set of valid POS tags, defaults to ('NOUN',\n 'PROPN', 'ADJ').\n\n \"\"\"\n\n # define default pos tags set\n if pos is None:\n pos = {'NOUN', 'PROPN', 'ADJ'}\n\n # select sequence of adjectives and nouns\n # print(self.sentences[0].stems)\n self.longest_pos_sequence_selection(valid_pos=pos)\n\n\n # initialize stoplist list if not provided\n\n stoplist = set(self.stoplist).union(FRENCH_STOP_WORDS)\n\n # filter candidates containing stopwords or punctuation marks\n self.candidate_filtering(stoplist=set(string.punctuation).union({'-lrb-', '-rrb-', '-lcb-', '-rcb-', '-lsb-', '-rsb-'}.union(stoplist)))\n\n\n def vectorize_candidates(self):\n \"\"\"Vectorize the keyphrase candidates.\n\n Returns:\n C (list): the list of candidates.\n X (matrix): vectorized representation of the candidates.\n\n \"\"\"\n\n # build the vocabulary, i.e. setting the vector dimensions\n dim = set([])\n #print(\"candiate items:\",self.candidates.items())\n # for k, v in self.candidates.iteritems():\n # iterate Python 2/3 compatible\n for (k, v) in self.candidates.items():\n for w in v.lexical_form:\n dim.add(w)\n dim = list(dim)\n\n # vectorize the candidates Python 2/3 + sort for random issues\n C = list(self.candidates) # .keys()\n C.sort()\n\n X = np.zeros((len(C), len(dim)))\n for i, k in enumerate(C):\n for w in self.candidates[k].lexical_form:\n X[i, dim.index(w)] += 1\n\n #print(C,X)\n return C, X\n\n def topic_clustering(self, threshold=0.74, method='single'):\n \"\"\"Clustering candidates into topics.\n\n Args:\n threshold (float): the minimum similarity for clustering, defaults\n to 0.74, i.e. more than 1/4 of stem overlap similarity.\n method (str): the linkage method, defaults to average.\n\n \"\"\"\n\n # handle document with only one candidate\n if len(self.candidates) == 1:\n self.topics.append([list(self.candidates)[0]])\n return\n\n if len(self.candidates) == 0:\n self.topics = []\n return\n\n # vectorize the candidates\n candidates, X = self.vectorize_candidates()\n\n # compute the distance matrix\n Y = pdist(X)\n\n # compute the clusters\n Z = linkage(Y, method=method)\n\n # form flat clusters\n clusters = fcluster(Z, t=threshold, criterion='distance')\n\n # for each topic identifier\n for cluster_id in range(1, max(clusters) + 1):\n self.topics.append([candidates[j] for j in range(len(clusters))\n if clusters[j] == cluster_id])\n\n def build_topic_graph(self):\n \"\"\"Build topic graph.\"\"\"\n\n # adding the nodes to the graph\n # print(self.topics)\n self.graph.add_nodes_from(range(len(self.topics)))\n\n # loop through the topics to connect the nodes\n for i, j in combinations(range(len(self.topics)), 2):\n self.graph.add_edge(i, j, weight=0.0)\n for c_i in self.topics[i]:\n for c_j in self.topics[j]:\n for p_i in self.candidates[c_i].offsets:\n for p_j in self.candidates[c_j].offsets:\n gap = abs(p_i - p_j)\n if p_i < p_j:\n gap -= len(self.candidates[c_i].lexical_form) - 1\n if p_j < p_i:\n gap -= len(self.candidates[c_j].lexical_form) - 1\n self.graph[i][j]['weight'] += 1.0 / gap\n\n mapping = {i: self.topics[i][0] for i in range(len(self.topics))}\n self.graph = nx.relabel_nodes(self.graph, mapping)\n\n def candidate_weighting(self,\n threshold=0.74,\n method='average',\n heuristic=None):\n \"\"\"Candidate ranking using random walk.\n\n Args:\n threshold (float): the minimum similarity for clustering, defaults\n to 0.74.\n method (str): the linkage method, defaults to average.\n heuristic (str): the heuristic for selecting the best candidate for\n each topic, defaults to first occurring candidate. Other options\n are 'frequent' (most frequent candidate, position is used for\n ties).\n\n \"\"\"\n\n # cluster the candidates\n self.topic_clustering(threshold=threshold, method=method)\n\n # build the topic graph\n self.build_topic_graph()\n\n\n # compute the word scores using random walk\n w = nx.pagerank_scipy(self.graph, alpha=0.85, weight='weight')\n\n # loop through the topics\n for i, topic in enumerate(self.topics):\n\n # get the offsets of the topic candidates\n offsets = [self.candidates[t].offsets[0] for t in topic]\n\n # get first candidate from topic\n if heuristic == 'frequent':\n\n # get frequencies for each candidate within the topic\n freq = [len(self.candidates[t].surface_forms) for t in topic]\n\n # get the indexes of the most frequent candidates\n indexes = [j for j, f in enumerate(freq) if f == max(freq)]\n\n # offsets of the indexes\n indexes_offsets = [offsets[j] for j in indexes]\n most_frequent = indexes_offsets.index(min(indexes_offsets))\n self.weights[topic[most_frequent]] = w[self.topics[i][0]]\n\n else:\n first = offsets.index(min(offsets))\n # print(w)\n self.weights[topic[first]] = w[self.topics[i][0]]\n\n" ]
[ [ "scipy.cluster.hierarchy.linkage", "scipy.spatial.distance.pdist", "scipy.cluster.hierarchy.fcluster" ] ]
chengfenggui/attention-is-all-you-need-pytorch
[ "368b7c1a48b3c0a495f97de185e336d546aef2cc" ]
[ "conv_transformer/UNet/Model.py" ]
[ "import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom .Parts import DoubleConv, Down, Up, OutConv\r\n\r\n\r\nclass UNet(nn.Module):\r\n def __init__(self, in_channels, out_channels, depth=2, bilinear=True):\r\n super(UNet, self).__init__()\r\n\r\n self.in_channels = in_channels\r\n self.out_channels = out_channels\r\n self.bilinear = bilinear\r\n self.depth = depth\r\n\r\n init_channels = 64 if in_channels <= 32 else in_channels * 2\r\n channels = [init_channels * 2 ** i for i in range(depth + 1)]\r\n\r\n self.inc = DoubleConv(in_channels=in_channels, out_channels=init_channels)\r\n self.downs = nn.ModuleList(\r\n [Down(channels[i], channels[i + 1]) for i in range(depth)]\r\n )\r\n\r\n factor = 2 if bilinear else 1\r\n\r\n out_modules = [Up(channels[depth - i], channels[depth - i - 1] // factor, bilinear) for i in range(depth - 1)]\r\n out_modules.append(Up(channels[1], channels[0], bilinear))\r\n\r\n self.ups = nn.ModuleList(out_modules)\r\n self.outc = OutConv(init_channels, out_channels)\r\n\r\n def forward(self, x):\r\n depth = self.depth\r\n xs = []\r\n xs.append(self.inc(x))\r\n\r\n for i in range(depth):\r\n xs.append(self.downs[i](xs[i]))\r\n\r\n x = self.ups[0](xs[-1], xs[-2])\r\n for i in range(depth - 1):\r\n x = self.ups[i + 1](x, xs[-i - 3])\r\n\r\n x = self.outc(x)\r\n\r\n return x\r\n" ]
[ [ "torch.nn.ModuleList" ] ]
JINJINT/MABtest
[ "e20764baeddf0f32d058c17852eb6981145c15b9" ]
[ "MABtest/generatemu.py" ]
[ "# scrip fot generate the true mean of the arms according to reward type and time variation\nfrom numpy import sqrt, log, exp, mean, cumsum, zeros, argsort, argmin, argmax, array, random, argwhere, sin, mod, power\nfrom numpy.random import rand\nfrom .toimport import *\nfrom .plotting import *\nimport dill\nimport functools\n\ndef generate_mu_timelist(record_max = 10**4, no_arms = 5, mu_type = 'biggap', is_timevar = False, timevar_type = 'General', reward_type = 'Bernoulli', mu_list = None, direc = None, plot = True, ):\n ''' \n generate the value of each arm mean as a function of time\n\n ''' \n # if not provided the list of mean of arms to start with, then generate it\n if mu_list is None: \n mu_list = generate_mu_list(no_arms, mu_type, timevar_type, reward_type)\n \n # make sure the senatity\n no_arms = len(mu_list)\n \n if not is_timevar: \n # no changes \n mu_time_list = lambda t, i: mu_list[i]\n \n else: \n if timevar_type == 'Abrupt':\n \n # continuous changes \n if mu_type == \"biggap\":\n f_t = lambda t: 0.1*pow(-1,np.floor(t/50000))*(t>=50000*np.floor(t/50000) and t<50000*(np.floor(t/50000)+1)) \n elif mu_type == \"smallgap\": \n f_t = lambda t: 0.01*pow(-1,np.floor(t/50000))*(t>=50000*np.floor(t/50000) and t<50000*(np.floor(t/50000)+1)) \n else: \n f_t = lambda t: rand()*pow(-1,np.floor(t/50000))*(t>=50000*np.floor(t/50000) and t<50000*(np.floor(t/50000)+1)) \n \n mu_time_list = lambda t, i: mu_list[mod(i+ np.int(np.divide(t,50000)), no_arms)] + f_t(t)\n\n elif timevar_type == 'General':\n # continuous changes \n f_t = lambda t: sin(np.pi*t/50000)+1\n mu_time_list = lambda t, i: mu_list[i]*f_t(t+50000*i) \n\n elif timevar_type =='RealAbrupt':\n # Example from the Yahoo! dataset, from article \"Nearly Optimal Adaptive Procedure with Change Detection for Piecewise-Stationary Bandit\" (M-UCB) https://arxiv.org/abs/1802.03692\n # 6 arms, 9 discrete change\n mu_list = [[0.071, 0.041, 0.032, 0.030, 0.020, 0.011], \n [0.055, 0.053, 0.032, 0.030, 0.008, 0.011], \n [0.040, 0.063, 0.032, 0.030, 0.008, 0.011], \n [0.040, 0.042, 0.043, 0.030, 0.008, 0.011], \n [0.030, 0.032, 0.055, 0.030, 0.008, 0.011], \n [0.030, 0.032, 0.020, 0.030, 0.008, 0.021], \n [0.020, 0.022, 0.020, 0.045, 0.008, 0.021], \n [0.020, 0.022, 0.020, 0.057, 0.008, 0.011], \n [0.020, 0.022, 0.034, 0.057, 0.022, 0.011]] \n\n mu_time_list = lambda t, i: mu_list[int(np.floor(t/50000))][i]\n \n # save the data to direc\n if direc is not None: \n filename = 'noarms%d_mu_type%s_timevar_type%s_reward_type%s' % (no_arms, mu_type, timevar_type, reward_type)\n if not os.path.exists(\"%s/%s.pkl\"%(direc, filename)):\n savelambda(mu_time_list, direc, filename) \n else:\n with open('%s/%s.pkl'%(direc, filename), 'rb') as input:\n mu_dat = dill.load(input) \n mu_time_list = mu_dat\n \n plotfilename = 'mu_time_list%s'%(filename)\n plotdirec = direc + '/plots'\n\n if not os.path.exists(\"%s/%s.pdf\"%(plotdirec, plotfilename)) and plot:\n plot_mu(plotdirec, plotfilename, no_arms, record_max, mu_time_list, mu_type) \n \n # return the list, which is a list of funtion, each represent the mean of arms as a function of time\n return mu_time_list \n \n \n\ndef generate_mu_list(no_arms, mu_type, timevar_type, reward_type):\n ''' \n generate the value of each arm mean\n\n '''\n if 'sparse' in mu_type:\n signal = np.int(np.ceil(np.power(no_arms, 0.5)))\n elif 'dense' in mu_type:\n signal = np.int(np.ceil(0.5*no_arms))\n if 'strong' in mu_type: \n sig_a = 10\n sig_mu = 2\n elif 'weak' in mu_type:\n sig_a = 3\n sig_mu = 0.5 \n\n if mu_type == \"biggap\":\n mu_list = np.arange(0.1, 0.6, 0.1)\n elif mu_type == 'smallgap':\n mu_list = np.arange(0.1, 0.2, 0.01)\n elif mu_type == 'rand':\n if reward_type == 'Bernoulli':\n mu_list = rand(no_arms)\n elif reward_type == 'Gaussian': \n mu_list = abs(normal(loc = 0, scale = 1, size = no_arms))\n else:\n if reward_type == 'Bernoulli':\n mu_list = [beta(a = sig_a, b = 1) for i in range(signal)]\n mu_list.extend([beta(a = 1, b = 1) for i in range(no_arms - signal)])\n elif reward_type == 'Gaussian':\n mu_list = [normal(loc = sig_mu, scale = 1) for i in range(signal)]\n mu_list.extend([normal(loc = 0, scale = 1) for i in range(no_arms - signal)]) \n \n mu_list = list(mu_list)\n return mu_list\n\n\ndef plot_mu(plotdirec, plotfilename, no_arms, record_max, mu_time_list, mu_type):\n \n fig, ax = plt.subplots(figsize = (5,5)) \n\n # set up label colors\n if len(color_list) < no_arms:\n plot_col = randcolor(no_arms)\n else:\n plot_col = color_list\n \n # set up labels\n labels = ['arm %d' % (i) for i in range(no_arms)] \n\n for i in range(no_arms):\n ax.plot(range(record_max), list(map(functools.partial(mu_time_list, i = i), range(record_max))), color = plot_col[i], label = labels[i], lw = 2) \n\n ax.ticklabel_format(style='sci', axis='x', scilimits=(0,0)) \n ax.set_xlabel('Time', labelpad = 20)\n ax.set_ylabel('True mean', labelpad = 20)\n ax.set_ylim((0,1))\n\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.7, box.height])\n\n ax.legend(ncol = 1, prop={'size': 10})\n\n saveplot(plotdirec, plotfilename, ax) \n return None \n \n\n\n" ]
[ [ "numpy.random.rand", "numpy.sin" ] ]
jkwang1992/sbp-env
[ "929a88c30e0056cce55ef22f74bfa218c1e78cc8" ]
[ "samplers/randomPolicySampler.py" ]
[ "import random\n\nimport numpy as np\nfrom overrides import overrides\n\nfrom randomness import SUPPORTED_RANDOM_METHODS, RandomnessManager\nfrom samplers.baseSampler import Sampler\nfrom utils import planner_registry\n\n\nclass RandomPolicySampler(Sampler):\n r\"\"\"Uniformly and randomly samples configurations across :math:`d` where\n :math:`d` is\n the dimensionality of the *C-Space*.\n :class:`~samplers.randomPolicySampler.RandomPolicySampler` samples configuration\n :math:`q \\in \\mathbb{R}^d` across each dimension uniformly with an\n :math:`0 \\le \\epsilon < 1` bias towds the goal configuration.\n\n A random number :math:`p \\sim \\mathcal{U}(0,1)` is first drawn, then the\n configuration :math:`q_\\text{new}` that this function returns is given by\n\n .. math::\n q_\\text{new} =\n \\begin{cases}\n q \\sim \\mathcal{U}(0,1)^d & \\text{if } p < \\epsilon\\\\\n q_\\text{target} & \\text{otherwise.}\n \\end{cases}\n\n :py:const:`CONSTANT`\n \"\"\"\n\n @overrides\n def __init__(self, random_method: str = \"pseudo_random\", **kwargs):\n \"\"\"\n :param random_method: the kind of random method to use. Must be a choice from\n :data:`randomness.SUPPORTED_RANDOM_METHODS`.\n :param kwargs: pass through to super class\n \"\"\"\n super().__init__(**kwargs)\n if random_method not in SUPPORTED_RANDOM_METHODS:\n raise ValueError(\n \"Given random_method is not valid! Valid options includes:\\n\"\n \"{}\".format(\n \"\\n\".join((\" - {}\".format(m) for m in SUPPORTED_RANDOM_METHODS))\n )\n )\n\n self.random_method = random_method\n self.random = None\n\n @overrides\n def init(self, **kwargs):\n \"\"\"The delayed **initialisation** method\n\n :param num_dim: the number of dimensions\n\n \"\"\"\n super().init(**kwargs)\n self.random = RandomnessManager(num_dim=kwargs[\"num_dim\"])\n\n self.use_original_method = False\n\n if self.args.engine == \"klampt\":\n self.low, self.high = (\n [-np.pi] * kwargs[\"num_dim\"],\n [np.pi] * kwargs[\"num_dim\"],\n )\n elif self.args.engine == \"4d\":\n self.low, self.high = [\n [0, 0, -np.pi, -np.pi],\n [self.args.env.dim[0], self.args.env.dim[1], np.pi, np.pi],\n ]\n else:\n self.use_original_method = True\n\n @overrides\n def get_next_pos(self) -> Sampler.GetNextPosReturnType:\n # Random path\n if random.random() < self.args.goalBias:\n # goal bias\n p = self.goal_pos\n else:\n if self.use_original_method:\n p = self.random.get_random(self.random_method)\n p *= self.args.env.dim\n else:\n p = np.random.uniform(self.low, self.high)\n\n return p, self.report_success, self.report_fail\n\n\n# start register\nsampler_id = \"random\"\n\nplanner_registry.register_sampler(sampler_id, sampler_class=RandomPolicySampler)\n# finish register\n" ]
[ [ "numpy.random.uniform" ] ]
swt2c/pandas
[ "70773d952bf52229d7214707ce6d66cee70607cb" ]
[ "pandas/tests/groupby/test_categorical.py" ]
[ "# -*- coding: utf-8 -*-\nfrom datetime import datetime\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import PY37\n\nimport pandas as pd\nfrom pandas import (\n Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut)\nimport pandas.util.testing as tm\nfrom pandas.util.testing import (\n assert_equal, assert_frame_equal, assert_series_equal)\n\n\ndef cartesian_product_for_groupers(result, args, names):\n \"\"\" Reindex to a cartesian production for the groupers,\n preserving the nature (Categorical) of each grouper \"\"\"\n\n def f(a):\n if isinstance(a, (CategoricalIndex, Categorical)):\n categories = a.categories\n a = Categorical.from_codes(np.arange(len(categories)),\n categories=categories,\n ordered=a.ordered)\n return a\n\n index = pd.MultiIndex.from_product(map(f, args), names=names)\n return result.reindex(index).sort_index()\n\n\ndef test_apply_use_categorical_name(df):\n cats = qcut(df.C, 4)\n\n def get_stats(group):\n return {'min': group.min(),\n 'max': group.max(),\n 'count': group.count(),\n 'mean': group.mean()}\n\n result = df.groupby(cats, observed=False).D.apply(get_stats)\n assert result.index.names[0] == 'C'\n\n\ndef test_basic():\n\n cats = Categorical([\"a\", \"a\", \"a\", \"b\", \"b\", \"b\", \"c\", \"c\", \"c\"],\n categories=[\"a\", \"b\", \"c\", \"d\"], ordered=True)\n data = DataFrame({\"a\": [1, 1, 1, 2, 2, 2, 3, 4, 5], \"b\": cats})\n\n exp_index = CategoricalIndex(list('abcd'), name='b', ordered=True)\n expected = DataFrame({'a': [1, 2, 4, np.nan]}, index=exp_index)\n result = data.groupby(\"b\", observed=False).mean()\n tm.assert_frame_equal(result, expected)\n\n cat1 = Categorical([\"a\", \"a\", \"b\", \"b\"],\n categories=[\"a\", \"b\", \"z\"], ordered=True)\n cat2 = Categorical([\"c\", \"d\", \"c\", \"d\"],\n categories=[\"c\", \"d\", \"y\"], ordered=True)\n df = DataFrame({\"A\": cat1, \"B\": cat2, \"values\": [1, 2, 3, 4]})\n\n # single grouper\n gb = df.groupby(\"A\", observed=False)\n exp_idx = CategoricalIndex(['a', 'b', 'z'], name='A', ordered=True)\n expected = DataFrame({'values': Series([3, 7, 0], index=exp_idx)})\n result = gb.sum()\n tm.assert_frame_equal(result, expected)\n\n # GH 8623\n x = DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'],\n [1, 'John P. Doe']],\n columns=['person_id', 'person_name'])\n x['person_name'] = Categorical(x.person_name)\n\n g = x.groupby(['person_id'], observed=False)\n result = g.transform(lambda x: x)\n tm.assert_frame_equal(result, x[['person_name']])\n\n result = x.drop_duplicates('person_name')\n expected = x.iloc[[0, 1]]\n tm.assert_frame_equal(result, expected)\n\n def f(x):\n return x.drop_duplicates('person_name').iloc[0]\n\n result = g.apply(f)\n expected = x.iloc[[0, 1]].copy()\n expected.index = Index([1, 2], name='person_id')\n expected['person_name'] = expected['person_name'].astype('object')\n tm.assert_frame_equal(result, expected)\n\n # GH 9921\n # Monotonic\n df = DataFrame({\"a\": [5, 15, 25]})\n c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])\n\n result = df.a.groupby(c, observed=False).transform(sum)\n tm.assert_series_equal(result, df['a'])\n\n tm.assert_series_equal(\n df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)),\n df['a'])\n tm.assert_frame_equal(\n df.groupby(c, observed=False).transform(sum),\n df[['a']])\n tm.assert_frame_equal(\n df.groupby(c, observed=False).transform(lambda xs: np.max(xs)),\n df[['a']])\n\n # Filter\n tm.assert_series_equal(\n df.a.groupby(c, observed=False).filter(np.all),\n df['a'])\n tm.assert_frame_equal(\n df.groupby(c, observed=False).filter(np.all),\n df)\n\n # Non-monotonic\n df = DataFrame({\"a\": [5, 15, 25, -5]})\n c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])\n\n result = df.a.groupby(c, observed=False).transform(sum)\n tm.assert_series_equal(result, df['a'])\n\n tm.assert_series_equal(\n df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)),\n df['a'])\n tm.assert_frame_equal(\n df.groupby(c, observed=False).transform(sum),\n df[['a']])\n tm.assert_frame_equal(\n df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)),\n df[['a']])\n\n # GH 9603\n df = DataFrame({'a': [1, 0, 0, 0]})\n c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list('abcd')))\n result = df.groupby(c, observed=False).apply(len)\n\n exp_index = CategoricalIndex(\n c.values.categories, ordered=c.values.ordered)\n expected = Series([1, 0, 0, 0], index=exp_index)\n expected.index.name = 'a'\n tm.assert_series_equal(result, expected)\n\n # more basic\n levels = ['foo', 'bar', 'baz', 'qux']\n codes = np.random.randint(0, 4, size=100)\n\n cats = Categorical.from_codes(codes, levels, ordered=True)\n\n data = DataFrame(np.random.randn(100, 4))\n\n result = data.groupby(cats, observed=False).mean()\n\n expected = data.groupby(np.asarray(cats), observed=False).mean()\n exp_idx = CategoricalIndex(levels, categories=cats.categories,\n ordered=True)\n expected = expected.reindex(exp_idx)\n\n assert_frame_equal(result, expected)\n\n grouped = data.groupby(cats, observed=False)\n desc_result = grouped.describe()\n\n idx = cats.codes.argsort()\n ord_labels = np.asarray(cats).take(idx)\n ord_data = data.take(idx)\n\n exp_cats = Categorical(ord_labels, ordered=True,\n categories=['foo', 'bar', 'baz', 'qux'])\n expected = ord_data.groupby(\n exp_cats, sort=False, observed=False).describe()\n assert_frame_equal(desc_result, expected)\n\n # GH 10460\n expc = Categorical.from_codes(np.arange(4).repeat(8),\n levels, ordered=True)\n exp = CategoricalIndex(expc)\n tm.assert_index_equal((desc_result.stack().index\n .get_level_values(0)), exp)\n exp = Index(['count', 'mean', 'std', 'min', '25%', '50%',\n '75%', 'max'] * 4)\n tm.assert_index_equal((desc_result.stack().index\n .get_level_values(1)), exp)\n\n\ndef test_level_get_group(observed):\n # GH15155\n df = DataFrame(data=np.arange(2, 22, 2),\n index=MultiIndex(\n levels=[pd.CategoricalIndex([\"a\", \"b\"]), range(10)],\n codes=[[0] * 5 + [1] * 5, range(10)],\n names=[\"Index1\", \"Index2\"]))\n g = df.groupby(level=[\"Index1\"], observed=observed)\n\n # expected should equal test.loc[[\"a\"]]\n # GH15166\n expected = DataFrame(data=np.arange(2, 12, 2),\n index=pd.MultiIndex(levels=[pd.CategoricalIndex(\n [\"a\", \"b\"]), range(5)],\n codes=[[0] * 5, range(5)],\n names=[\"Index1\", \"Index2\"]))\n result = g.get_group('a')\n\n assert_frame_equal(result, expected)\n\n\[email protected](PY37, reason=\"flaky on 3.7, xref gh-21636\", strict=False)\[email protected]('ordered', [True, False])\ndef test_apply(ordered):\n # GH 10138\n\n dense = Categorical(list('abc'), ordered=ordered)\n\n # 'b' is in the categories but not in the list\n missing = Categorical(\n list('aaa'), categories=['a', 'b'], ordered=ordered)\n values = np.arange(len(dense))\n df = DataFrame({'missing': missing,\n 'dense': dense,\n 'values': values})\n grouped = df.groupby(['missing', 'dense'], observed=True)\n\n # missing category 'b' should still exist in the output index\n idx = MultiIndex.from_arrays(\n [missing, dense], names=['missing', 'dense'])\n expected = DataFrame([0, 1, 2.],\n index=idx,\n columns=['values'])\n\n result = grouped.apply(lambda x: np.mean(x))\n assert_frame_equal(result, expected)\n\n # we coerce back to ints\n expected = expected.astype('int')\n result = grouped.mean()\n assert_frame_equal(result, expected)\n\n result = grouped.agg(np.mean)\n assert_frame_equal(result, expected)\n\n # but for transform we should still get back the original index\n idx = MultiIndex.from_arrays([missing, dense],\n names=['missing', 'dense'])\n expected = Series(1, index=idx)\n result = grouped.apply(lambda x: 1)\n assert_series_equal(result, expected)\n\n\ndef test_observed(observed):\n # multiple groupers, don't re-expand the output space\n # of the grouper\n # gh-14942 (implement)\n # gh-10132 (back-compat)\n # gh-8138 (back-compat)\n # gh-8869\n\n cat1 = Categorical([\"a\", \"a\", \"b\", \"b\"],\n categories=[\"a\", \"b\", \"z\"], ordered=True)\n cat2 = Categorical([\"c\", \"d\", \"c\", \"d\"],\n categories=[\"c\", \"d\", \"y\"], ordered=True)\n df = DataFrame({\"A\": cat1, \"B\": cat2, \"values\": [1, 2, 3, 4]})\n df['C'] = ['foo', 'bar'] * 2\n\n # multiple groupers with a non-cat\n gb = df.groupby(['A', 'B', 'C'], observed=observed)\n exp_index = pd.MultiIndex.from_arrays(\n [cat1, cat2, ['foo', 'bar'] * 2],\n names=['A', 'B', 'C'])\n expected = DataFrame({'values': Series(\n [1, 2, 3, 4], index=exp_index)}).sort_index()\n result = gb.sum()\n if not observed:\n expected = cartesian_product_for_groupers(\n expected,\n [cat1, cat2, ['foo', 'bar']],\n list('ABC'))\n\n tm.assert_frame_equal(result, expected)\n\n gb = df.groupby(['A', 'B'], observed=observed)\n exp_index = pd.MultiIndex.from_arrays(\n [cat1, cat2],\n names=['A', 'B'])\n expected = DataFrame({'values': [1, 2, 3, 4]},\n index=exp_index)\n result = gb.sum()\n if not observed:\n expected = cartesian_product_for_groupers(\n expected,\n [cat1, cat2],\n list('AB'))\n\n tm.assert_frame_equal(result, expected)\n\n # https://github.com/pandas-dev/pandas/issues/8138\n d = {'cat':\n pd.Categorical([\"a\", \"b\", \"a\", \"b\"], categories=[\"a\", \"b\", \"c\"],\n ordered=True),\n 'ints': [1, 1, 2, 2],\n 'val': [10, 20, 30, 40]}\n df = pd.DataFrame(d)\n\n # Grouping on a single column\n groups_single_key = df.groupby(\"cat\", observed=observed)\n result = groups_single_key.mean()\n\n exp_index = pd.CategoricalIndex(list('ab'), name=\"cat\",\n categories=list('abc'),\n ordered=True)\n expected = DataFrame({\"ints\": [1.5, 1.5], \"val\": [20., 30]},\n index=exp_index)\n if not observed:\n index = pd.CategoricalIndex(list('abc'), name=\"cat\",\n categories=list('abc'),\n ordered=True)\n expected = expected.reindex(index)\n\n tm.assert_frame_equal(result, expected)\n\n # Grouping on two columns\n groups_double_key = df.groupby([\"cat\", \"ints\"], observed=observed)\n result = groups_double_key.agg('mean')\n expected = DataFrame(\n {\"val\": [10, 30, 20, 40],\n \"cat\": pd.Categorical(['a', 'a', 'b', 'b'],\n categories=['a', 'b', 'c'],\n ordered=True),\n \"ints\": [1, 2, 1, 2]}).set_index([\"cat\", \"ints\"])\n if not observed:\n expected = cartesian_product_for_groupers(\n expected,\n [df.cat.values, [1, 2]],\n ['cat', 'ints'])\n\n tm.assert_frame_equal(result, expected)\n\n # GH 10132\n for key in [('a', 1), ('b', 2), ('b', 1), ('a', 2)]:\n c, i = key\n result = groups_double_key.get_group(key)\n expected = df[(df.cat == c) & (df.ints == i)]\n assert_frame_equal(result, expected)\n\n # gh-8869\n # with as_index\n d = {'foo': [10, 8, 4, 8, 4, 1, 1], 'bar': [10, 20, 30, 40, 50, 60, 70],\n 'baz': ['d', 'c', 'e', 'a', 'a', 'd', 'c']}\n df = pd.DataFrame(d)\n cat = pd.cut(df['foo'], np.linspace(0, 10, 3))\n df['range'] = cat\n groups = df.groupby(['range', 'baz'], as_index=False, observed=observed)\n result = groups.agg('mean')\n\n groups2 = df.groupby(['range', 'baz'], as_index=True, observed=observed)\n expected = groups2.agg('mean').reset_index()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_observed_codes_remap(observed):\n d = {'C1': [3, 3, 4, 5], 'C2': [1, 2, 3, 4], 'C3': [10, 100, 200, 34]}\n df = pd.DataFrame(d)\n values = pd.cut(df['C1'], [1, 2, 3, 6])\n values.name = \"cat\"\n groups_double_key = df.groupby([values, 'C2'], observed=observed)\n\n idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]],\n names=[\"cat\", \"C2\"])\n expected = DataFrame({\"C1\": [3, 3, 4, 5],\n \"C3\": [10, 100, 200, 34]}, index=idx)\n if not observed:\n expected = cartesian_product_for_groupers(\n expected,\n [values.values, [1, 2, 3, 4]],\n ['cat', 'C2'])\n\n result = groups_double_key.agg('mean')\n tm.assert_frame_equal(result, expected)\n\n\ndef test_observed_perf():\n # we create a cartesian product, so this is\n # non-performant if we don't use observed values\n # gh-14942\n df = DataFrame({\n 'cat': np.random.randint(0, 255, size=30000),\n 'int_id': np.random.randint(0, 255, size=30000),\n 'other_id': np.random.randint(0, 10000, size=30000),\n 'foo': 0})\n df['cat'] = df.cat.astype(str).astype('category')\n\n grouped = df.groupby(['cat', 'int_id', 'other_id'], observed=True)\n result = grouped.count()\n assert result.index.levels[0].nunique() == df.cat.nunique()\n assert result.index.levels[1].nunique() == df.int_id.nunique()\n assert result.index.levels[2].nunique() == df.other_id.nunique()\n\n\ndef test_observed_groups(observed):\n # gh-20583\n # test that we have the appropriate groups\n\n cat = pd.Categorical(['a', 'c', 'a'], categories=['a', 'b', 'c'])\n df = pd.DataFrame({'cat': cat, 'vals': [1, 2, 3]})\n g = df.groupby('cat', observed=observed)\n\n result = g.groups\n if observed:\n expected = {'a': Index([0, 2], dtype='int64'),\n 'c': Index([1], dtype='int64')}\n else:\n expected = {'a': Index([0, 2], dtype='int64'),\n 'b': Index([], dtype='int64'),\n 'c': Index([1], dtype='int64')}\n\n tm.assert_dict_equal(result, expected)\n\n\ndef test_observed_groups_with_nan(observed):\n # GH 24740\n df = pd.DataFrame({'cat': pd.Categorical(['a', np.nan, 'a'],\n categories=['a', 'b', 'd']),\n 'vals': [1, 2, 3]})\n g = df.groupby('cat', observed=observed)\n result = g.groups\n if observed:\n expected = {'a': Index([0, 2], dtype='int64')}\n else:\n expected = {'a': Index([0, 2], dtype='int64'),\n 'b': Index([], dtype='int64'),\n 'd': Index([], dtype='int64')}\n tm.assert_dict_equal(result, expected)\n\n\ndef test_dataframe_categorical_with_nan(observed):\n # GH 21151\n s1 = pd.Categorical([np.nan, 'a', np.nan, 'a'],\n categories=['a', 'b', 'c'])\n s2 = pd.Series([1, 2, 3, 4])\n df = pd.DataFrame({'s1': s1, 's2': s2})\n result = df.groupby('s1', observed=observed).first().reset_index()\n if observed:\n expected = DataFrame({'s1': pd.Categorical(['a'],\n categories=['a', 'b', 'c']), 's2': [2]})\n else:\n expected = DataFrame({'s1': pd.Categorical(['a', 'b', 'c'],\n categories=['a', 'b', 'c']),\n 's2': [2, np.nan, np.nan]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_datetime():\n # GH9049: ensure backward compatibility\n levels = pd.date_range('2014-01-01', periods=4)\n codes = np.random.randint(0, 4, size=100)\n\n cats = Categorical.from_codes(codes, levels, ordered=True)\n\n data = DataFrame(np.random.randn(100, 4))\n result = data.groupby(cats, observed=False).mean()\n\n expected = data.groupby(np.asarray(cats), observed=False).mean()\n expected = expected.reindex(levels)\n expected.index = CategoricalIndex(expected.index,\n categories=expected.index,\n ordered=True)\n\n assert_frame_equal(result, expected)\n\n grouped = data.groupby(cats, observed=False)\n desc_result = grouped.describe()\n\n idx = cats.codes.argsort()\n ord_labels = cats.take_nd(idx)\n ord_data = data.take(idx)\n expected = ord_data.groupby(ord_labels, observed=False).describe()\n assert_frame_equal(desc_result, expected)\n tm.assert_index_equal(desc_result.index, expected.index)\n tm.assert_index_equal(\n desc_result.index.get_level_values(0),\n expected.index.get_level_values(0))\n\n # GH 10460\n expc = Categorical.from_codes(\n np.arange(4).repeat(8), levels, ordered=True)\n exp = CategoricalIndex(expc)\n tm.assert_index_equal((desc_result.stack().index\n .get_level_values(0)), exp)\n exp = Index(['count', 'mean', 'std', 'min', '25%', '50%',\n '75%', 'max'] * 4)\n tm.assert_index_equal((desc_result.stack().index\n .get_level_values(1)), exp)\n\n\ndef test_categorical_index():\n\n s = np.random.RandomState(12345)\n levels = ['foo', 'bar', 'baz', 'qux']\n codes = s.randint(0, 4, size=20)\n cats = Categorical.from_codes(codes, levels, ordered=True)\n df = DataFrame(\n np.repeat(\n np.arange(20), 4).reshape(-1, 4), columns=list('abcd'))\n df['cats'] = cats\n\n # with a cat index\n result = df.set_index('cats').groupby(level=0, observed=False).sum()\n expected = df[list('abcd')].groupby(cats.codes, observed=False).sum()\n expected.index = CategoricalIndex(\n Categorical.from_codes(\n [0, 1, 2, 3], levels, ordered=True), name='cats')\n assert_frame_equal(result, expected)\n\n # with a cat column, should produce a cat index\n result = df.groupby('cats', observed=False).sum()\n expected = df[list('abcd')].groupby(cats.codes, observed=False).sum()\n expected.index = CategoricalIndex(\n Categorical.from_codes(\n [0, 1, 2, 3], levels, ordered=True), name='cats')\n assert_frame_equal(result, expected)\n\n\ndef test_describe_categorical_columns():\n # GH 11558\n cats = pd.CategoricalIndex(['qux', 'foo', 'baz', 'bar'],\n categories=['foo', 'bar', 'baz', 'qux'],\n ordered=True)\n df = DataFrame(np.random.randn(20, 4), columns=cats)\n result = df.groupby([1, 2, 3, 4] * 5).describe()\n\n tm.assert_index_equal(result.stack().columns, cats)\n tm.assert_categorical_equal(result.stack().columns.values, cats.values)\n\n\ndef test_unstack_categorical():\n # GH11558 (example is taken from the original issue)\n df = pd.DataFrame({'a': range(10),\n 'medium': ['A', 'B'] * 5,\n 'artist': list('XYXXY') * 2})\n df['medium'] = df['medium'].astype('category')\n\n gcat = df.groupby(\n ['artist', 'medium'], observed=False)['a'].count().unstack()\n result = gcat.describe()\n\n exp_columns = pd.CategoricalIndex(['A', 'B'], ordered=False,\n name='medium')\n tm.assert_index_equal(result.columns, exp_columns)\n tm.assert_categorical_equal(result.columns.values, exp_columns.values)\n\n result = gcat['A'] + gcat['B']\n expected = pd.Series([6, 4], index=pd.Index(['X', 'Y'], name='artist'))\n tm.assert_series_equal(result, expected)\n\n\ndef test_bins_unequal_len():\n # GH3011\n series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])\n bins = pd.cut(series.dropna().values, 4)\n\n # len(bins) != len(series) here\n with pytest.raises(ValueError):\n series.groupby(bins).mean()\n\n\ndef test_as_index():\n # GH13204\n df = DataFrame({'cat': Categorical([1, 2, 2], [1, 2, 3]),\n 'A': [10, 11, 11],\n 'B': [101, 102, 103]})\n result = df.groupby(['cat', 'A'], as_index=False, observed=True).sum()\n expected = DataFrame(\n {'cat': Categorical([1, 2], categories=df.cat.cat.categories),\n 'A': [10, 11],\n 'B': [101, 205]},\n columns=['cat', 'A', 'B'])\n tm.assert_frame_equal(result, expected)\n\n # function grouper\n f = lambda r: df.loc[r, 'A']\n result = df.groupby(['cat', f], as_index=False, observed=True).sum()\n expected = DataFrame(\n {'cat': Categorical([1, 2], categories=df.cat.cat.categories),\n 'A': [10, 22],\n 'B': [101, 205]},\n columns=['cat', 'A', 'B'])\n tm.assert_frame_equal(result, expected)\n\n # another not in-axis grouper (conflicting names in index)\n s = Series(['a', 'b', 'b'], name='cat')\n result = df.groupby(['cat', s], as_index=False, observed=True).sum()\n tm.assert_frame_equal(result, expected)\n\n # is original index dropped?\n group_columns = ['cat', 'A']\n expected = DataFrame(\n {'cat': Categorical([1, 2], categories=df.cat.cat.categories),\n 'A': [10, 11],\n 'B': [101, 205]},\n columns=['cat', 'A', 'B'])\n\n for name in [None, 'X', 'B']:\n df.index = Index(list(\"abc\"), name=name)\n result = df.groupby(group_columns, as_index=False, observed=True).sum()\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_preserve_categories():\n # GH-13179\n categories = list('abc')\n\n # ordered=True\n df = DataFrame({'A': pd.Categorical(list('ba'),\n categories=categories,\n ordered=True)})\n index = pd.CategoricalIndex(categories, categories, ordered=True)\n tm.assert_index_equal(\n df.groupby('A', sort=True, observed=False).first().index, index)\n tm.assert_index_equal(\n df.groupby('A', sort=False, observed=False).first().index, index)\n\n # ordered=False\n df = DataFrame({'A': pd.Categorical(list('ba'),\n categories=categories,\n ordered=False)})\n sort_index = pd.CategoricalIndex(categories, categories, ordered=False)\n nosort_index = pd.CategoricalIndex(list('bac'), list('bac'),\n ordered=False)\n tm.assert_index_equal(\n df.groupby('A', sort=True, observed=False).first().index,\n sort_index)\n tm.assert_index_equal(\n df.groupby('A', sort=False, observed=False).first().index,\n nosort_index)\n\n\ndef test_preserve_categorical_dtype():\n # GH13743, GH13854\n df = DataFrame({'A': [1, 2, 1, 1, 2],\n 'B': [10, 16, 22, 28, 34],\n 'C1': Categorical(list(\"abaab\"),\n categories=list(\"bac\"),\n ordered=False),\n 'C2': Categorical(list(\"abaab\"),\n categories=list(\"bac\"),\n ordered=True)})\n # single grouper\n exp_full = DataFrame({'A': [2.0, 1.0, np.nan],\n 'B': [25.0, 20.0, np.nan],\n 'C1': Categorical(list(\"bac\"),\n categories=list(\"bac\"),\n ordered=False),\n 'C2': Categorical(list(\"bac\"),\n categories=list(\"bac\"),\n ordered=True)})\n for col in ['C1', 'C2']:\n result1 = df.groupby(by=col, as_index=False, observed=False).mean()\n result2 = df.groupby(\n by=col, as_index=True, observed=False).mean().reset_index()\n expected = exp_full.reindex(columns=result1.columns)\n tm.assert_frame_equal(result1, expected)\n tm.assert_frame_equal(result2, expected)\n\n\ndef test_categorical_no_compress():\n data = Series(np.random.randn(9))\n\n codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])\n cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)\n\n result = data.groupby(cats, observed=False).mean()\n exp = data.groupby(codes, observed=False).mean()\n\n exp.index = CategoricalIndex(exp.index, categories=cats.categories,\n ordered=cats.ordered)\n assert_series_equal(result, exp)\n\n codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])\n cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)\n\n result = data.groupby(cats, observed=False).mean()\n exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)\n exp.index = CategoricalIndex(exp.index, categories=cats.categories,\n ordered=cats.ordered)\n assert_series_equal(result, exp)\n\n cats = Categorical([\"a\", \"a\", \"a\", \"b\", \"b\", \"b\", \"c\", \"c\", \"c\"],\n categories=[\"a\", \"b\", \"c\", \"d\"], ordered=True)\n data = DataFrame({\"a\": [1, 1, 1, 2, 2, 2, 3, 4, 5], \"b\": cats})\n\n result = data.groupby(\"b\", observed=False).mean()\n result = result[\"a\"].values\n exp = np.array([1, 2, 4, np.nan])\n tm.assert_numpy_array_equal(result, exp)\n\n\ndef test_sort():\n\n # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby # noqa: flake8\n # This should result in a properly sorted Series so that the plot\n # has a sorted x axis\n # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')\n\n df = DataFrame({'value': np.random.randint(0, 10000, 100)})\n labels = [\"{0} - {1}\".format(i, i + 499) for i in range(0, 10000, 500)]\n cat_labels = Categorical(labels, labels)\n\n df = df.sort_values(by=['value'], ascending=True)\n df['value_group'] = pd.cut(df.value, range(0, 10500, 500),\n right=False, labels=cat_labels)\n\n res = df.groupby(['value_group'], observed=False)['value_group'].count()\n exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]\n exp.index = CategoricalIndex(exp.index, name=exp.index.name)\n tm.assert_series_equal(res, exp)\n\n\ndef test_sort2():\n # dataframe groupby sort was being ignored # GH 8868\n df = DataFrame([['(7.5, 10]', 10, 10],\n ['(7.5, 10]', 8, 20],\n ['(2.5, 5]', 5, 30],\n ['(5, 7.5]', 6, 40],\n ['(2.5, 5]', 4, 50],\n ['(0, 2.5]', 1, 60],\n ['(5, 7.5]', 7, 70]], columns=['range', 'foo', 'bar'])\n df['range'] = Categorical(df['range'], ordered=True)\n index = CategoricalIndex(['(0, 2.5]', '(2.5, 5]', '(5, 7.5]',\n '(7.5, 10]'], name='range', ordered=True)\n expected_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]],\n columns=['foo', 'bar'], index=index)\n\n col = 'range'\n result_sort = df.groupby(col, sort=True, observed=False).first()\n assert_frame_equal(result_sort, expected_sort)\n\n # when categories is ordered, group is ordered by category's order\n expected_sort = result_sort\n result_sort = df.groupby(col, sort=False, observed=False).first()\n assert_frame_equal(result_sort, expected_sort)\n\n df['range'] = Categorical(df['range'], ordered=False)\n index = CategoricalIndex(['(0, 2.5]', '(2.5, 5]', '(5, 7.5]',\n '(7.5, 10]'], name='range')\n expected_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]],\n columns=['foo', 'bar'], index=index)\n\n index = CategoricalIndex(['(7.5, 10]', '(2.5, 5]', '(5, 7.5]',\n '(0, 2.5]'],\n categories=['(7.5, 10]', '(2.5, 5]',\n '(5, 7.5]', '(0, 2.5]'],\n name='range')\n expected_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],\n index=index, columns=['foo', 'bar'])\n\n col = 'range'\n\n # this is an unordered categorical, but we allow this ####\n result_sort = df.groupby(col, sort=True, observed=False).first()\n assert_frame_equal(result_sort, expected_sort)\n\n result_nosort = df.groupby(col, sort=False, observed=False).first()\n assert_frame_equal(result_nosort, expected_nosort)\n\n\ndef test_sort_datetimelike():\n # GH10505\n\n # use same data as test_groupby_sort_categorical, which category is\n # corresponding to datetime.month\n df = DataFrame({'dt': [datetime(2011, 7, 1), datetime(2011, 7, 1),\n datetime(2011, 2, 1), datetime(2011, 5, 1),\n datetime(2011, 2, 1), datetime(2011, 1, 1),\n datetime(2011, 5, 1)],\n 'foo': [10, 8, 5, 6, 4, 1, 7],\n 'bar': [10, 20, 30, 40, 50, 60, 70]},\n columns=['dt', 'foo', 'bar'])\n\n # ordered=True\n df['dt'] = Categorical(df['dt'], ordered=True)\n index = [datetime(2011, 1, 1), datetime(2011, 2, 1),\n datetime(2011, 5, 1), datetime(2011, 7, 1)]\n result_sort = DataFrame(\n [[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])\n result_sort.index = CategoricalIndex(index, name='dt', ordered=True)\n\n index = [datetime(2011, 7, 1), datetime(2011, 2, 1),\n datetime(2011, 5, 1), datetime(2011, 1, 1)]\n result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],\n columns=['foo', 'bar'])\n result_nosort.index = CategoricalIndex(index, categories=index,\n name='dt', ordered=True)\n\n col = 'dt'\n assert_frame_equal(\n result_sort, df.groupby(col, sort=True, observed=False).first())\n\n # when categories is ordered, group is ordered by category's order\n assert_frame_equal(\n result_sort, df.groupby(col, sort=False, observed=False).first())\n\n # ordered = False\n df['dt'] = Categorical(df['dt'], ordered=False)\n index = [datetime(2011, 1, 1), datetime(2011, 2, 1),\n datetime(2011, 5, 1), datetime(2011, 7, 1)]\n result_sort = DataFrame(\n [[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])\n result_sort.index = CategoricalIndex(index, name='dt')\n\n index = [datetime(2011, 7, 1), datetime(2011, 2, 1),\n datetime(2011, 5, 1), datetime(2011, 1, 1)]\n result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],\n columns=['foo', 'bar'])\n result_nosort.index = CategoricalIndex(index, categories=index,\n name='dt')\n\n col = 'dt'\n assert_frame_equal(\n result_sort, df.groupby(col, sort=True, observed=False).first())\n assert_frame_equal(\n result_nosort, df.groupby(col, sort=False, observed=False).first())\n\n\ndef test_empty_sum():\n # https://github.com/pandas-dev/pandas/issues/18678\n df = pd.DataFrame({\"A\": pd.Categorical(['a', 'a', 'b'],\n categories=['a', 'b', 'c']),\n 'B': [1, 2, 1]})\n expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A')\n\n # 0 by default\n result = df.groupby(\"A\", observed=False).B.sum()\n expected = pd.Series([3, 1, 0], expected_idx, name='B')\n tm.assert_series_equal(result, expected)\n\n # min_count=0\n result = df.groupby(\"A\", observed=False).B.sum(min_count=0)\n expected = pd.Series([3, 1, 0], expected_idx, name='B')\n tm.assert_series_equal(result, expected)\n\n # min_count=1\n result = df.groupby(\"A\", observed=False).B.sum(min_count=1)\n expected = pd.Series([3, 1, np.nan], expected_idx, name='B')\n tm.assert_series_equal(result, expected)\n\n # min_count>1\n result = df.groupby(\"A\", observed=False).B.sum(min_count=2)\n expected = pd.Series([3, np.nan, np.nan], expected_idx, name='B')\n tm.assert_series_equal(result, expected)\n\n\ndef test_empty_prod():\n # https://github.com/pandas-dev/pandas/issues/18678\n df = pd.DataFrame({\"A\": pd.Categorical(['a', 'a', 'b'],\n categories=['a', 'b', 'c']),\n 'B': [1, 2, 1]})\n\n expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A')\n\n # 1 by default\n result = df.groupby(\"A\", observed=False).B.prod()\n expected = pd.Series([2, 1, 1], expected_idx, name='B')\n tm.assert_series_equal(result, expected)\n\n # min_count=0\n result = df.groupby(\"A\", observed=False).B.prod(min_count=0)\n expected = pd.Series([2, 1, 1], expected_idx, name='B')\n tm.assert_series_equal(result, expected)\n\n # min_count=1\n result = df.groupby(\"A\", observed=False).B.prod(min_count=1)\n expected = pd.Series([2, 1, np.nan], expected_idx, name='B')\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_multiindex_categorical_datetime():\n # https://github.com/pandas-dev/pandas/issues/21390\n\n df = pd.DataFrame({\n 'key1': pd.Categorical(list('abcbabcba')),\n 'key2': pd.Categorical(\n list(pd.date_range('2018-06-01 00', freq='1T', periods=3)) * 3),\n 'values': np.arange(9),\n })\n result = df.groupby(['key1', 'key2']).mean()\n\n idx = pd.MultiIndex.from_product(\n [pd.Categorical(['a', 'b', 'c']),\n pd.Categorical(pd.date_range('2018-06-01 00', freq='1T', periods=3))],\n names=['key1', 'key2'])\n expected = pd.DataFrame(\n {'values': [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx)\n assert_frame_equal(result, expected)\n\n\[email protected](\"as_index, expected\", [\n (True, pd.Series(\n index=pd.MultiIndex.from_arrays(\n [pd.Series([1, 1, 2], dtype='category'),\n [1, 2, 2]], names=['a', 'b']\n ),\n data=[1, 2, 3], name='x'\n )),\n (False, pd.DataFrame({\n 'a': pd.Series([1, 1, 2], dtype='category'),\n 'b': [1, 2, 2],\n 'x': [1, 2, 3]\n }))\n])\ndef test_groupby_agg_observed_true_single_column(as_index, expected):\n # GH-23970\n df = pd.DataFrame({\n 'a': pd.Series([1, 1, 2], dtype='category'),\n 'b': [1, 2, 2],\n 'x': [1, 2, 3]\n })\n\n result = df.groupby(\n ['a', 'b'], as_index=as_index, observed=True)['x'].sum()\n\n assert_equal(result, expected)\n\n\[email protected]('fill_value', [None, np.nan, pd.NaT])\ndef test_shift(fill_value):\n ct = pd.Categorical(['a', 'b', 'c', 'd'],\n categories=['a', 'b', 'c', 'd'], ordered=False)\n expected = pd.Categorical([None, 'a', 'b', 'c'],\n categories=['a', 'b', 'c', 'd'], ordered=False)\n res = ct.shift(1, fill_value=fill_value)\n assert_equal(res, expected)\n" ]
[ [ "pandas.Series", "numpy.linspace", "numpy.asarray", "pandas.DataFrame", "pandas.util.testing.assert_frame_equal", "numpy.max", "pandas.util.testing.assert_index_equal", "numpy.random.randn", "numpy.mean", "numpy.random.randint", "pandas.util.testing.assert_numpy_array_equal", "pandas.util.testing.assert_categorical_equal", "numpy.arange", "pandas.Index", "pandas.util.testing.assert_series_equal", "pandas.cut", "pandas.Categorical.from_codes", "pandas.Categorical", "pandas.util.testing.assert_equal", "pandas.date_range", "numpy.array", "numpy.random.RandomState", "numpy.sum", "pandas.CategoricalIndex", "pandas.MultiIndex.from_arrays", "pandas.qcut", "pandas.util.testing.assert_dict_equal" ] ]
wolfd/jpeg-svg
[ "42370757ec98642b57486c7d2fd3fae7df9bc271" ]
[ "jpeg_parser.py" ]
[ "import typing as T\nimport struct\nimport enum\nimport numpy as np\n\nfrom zigzag import fill_zigzag\nfrom huffman import Huffman\n\nBLOCK_SIDE_PX = 8\nBLOCK_SIZE_BYTE = BLOCK_SIDE_PX * BLOCK_SIDE_PX\n\nSOI_EXPECTED = (0xFF, 0xD8)\nJFIF_APP0_EXPECTED = (0xFF, 0xE0)\n\n\ndef unpack_from_file(format: str, file_: T.BinaryIO):\n return struct.unpack(format, file_.read(struct.calcsize(format)))\n\n\ndef one_from_file(format: str, file_: T.BinaryIO):\n return unpack_from_file(format, file_)[0]\n\n\ndef get_zero_block(dtype=np.uint8):\n return np.zeros(shape=(BLOCK_SIDE_PX, BLOCK_SIDE_PX), dtype=dtype)\n\n\nclass Jpeg(object):\n def __init__(self, file_: T.BinaryIO):\n self.jfif_header: T.Optional[JfifHeader] = None\n self.huffman_tables: T.List[Huffman] = []\n self.start_of_frame: T.Optional[StartOfFrame] = None\n\n\nclass DensityUnits(enum.Enum):\n NONE = 0\n DOTS_PER_INCH = 1\n DOTS_PER_CM = 2\n\n\nJfifHeader = T.NamedTuple(\n \"JfifHeader\", [(\"version\", int), (\"density_units\", DensityUnits)]\n)\n\n\ndef read_jfif_header(jpeg: Jpeg, file_: T.BinaryIO):\n \"\"\"\n typedef struct _JFIFHeader\n {\n BYTE SOI[2]; /* 00h Start of Image Marker */\n BYTE APP0[2]; /* 02h Application Use Marker */\n BYTE Length[2]; /* 04h Length of APP0 Field */\n BYTE Identifier[5]; /* 06h \"JFIF\" (zero terminated) Id String */\n BYTE Version[2]; /* 07h JFIF Format Revision */\n BYTE Units; /* 09h Units used for Resolution */\n BYTE Xdensity[2]; /* 0Ah Horizontal Resolution */\n BYTE Ydensity[2]; /* 0Ch Vertical Resolution */\n BYTE XThumbnail; /* 0Eh Horizontal Pixel Count */\n BYTE YThumbnail; /* 0Fh Vertical Pixel Count */\n } JFIFHEAD;\n \"\"\"\n # SOI is the start of image marker and always contains the marker code\n # values FFh D8h.\n assert unpack_from_file(\"2B\", file_) == SOI_EXPECTED\n\n # APP0 is the Application marker and always contains the marker code values\n # FFh E0h.\n assert unpack_from_file(\"2B\", file_) == JFIF_APP0_EXPECTED # APP0\n\n # Length is the size of the JFIF (APP0) marker segment, including the size\n # of the Length field itself and any thumbnail data contained in the APP0\n # segment. Because of this, the value of Length equals\n # 16 + 3 * XThumbnail * YThumbnail.\n length = unpack_from_file(\"2B\", file_)\n # TODO: check the value\n\n # Identifier contains the values 4Ah 46h 49h 46h 00h (JFIF) and is used to\n # identify the code stream as conforming to the JFIF specification.\n assert one_from_file(\"5s\", file_) == b\"JFIF\\x00\"\n version = unpack_from_file(\"2B\", file_)\n\n # Units, Xdensity, and Ydensity identify the unit of measurement used to\n # describe the image resolution.\n # Units may be:\n # - 01h for dots per inch\n # - 02h for dots per centimeter\n # - 00h for none (use measurement as pixel aspect ratio).\n units = one_from_file(\"B\", file_)\n\n # Xdensity and Ydensity are the horizontal and vertical resolution of the\n # image data, respectively. If the Units field value is 00h, the Xdensity\n # and Ydensity fields will contain the pixel aspect ratio\n # (Xdensity : Ydensity) rather than the image resolution.\n # Because non-square pixels are discouraged for portability reasons, the\n # Xdensity and Ydensity values normally equal 1 when the Units value is 0.\n x_density = one_from_file(\">H\", file_)\n y_density = one_from_file(\">H\", file_)\n print(\"x_density: {}, y_density: {}\".format(x_density, y_density))\n\n x_thumbnail = one_from_file(\"B\", file_)\n y_thumbnail = one_from_file(\"B\", file_)\n print(\"x_thumbnail: {}, y_thumbnail: {}\".format(x_thumbnail, y_thumbnail))\n\n thumbnail_data: T.Optional[T.Iterable[int]] = None\n if x_thumbnail * y_thumbnail > 0:\n # (RGB) * k (3 * k bytes) Packed (byte-interleaved) 24-bit RGB values\n # (8 bits per colour channel) for the thumbnail pixels, in the order\n # R0, G0, B0, ... Rk,\n # Gk, Bk, with k = HthumbnailA * VthumbnailA.\n print(\"Reading {} bytes of thumbnail data\".format(len(thumbnail_data)))\n thumbnail_data = file_.read(3 * x_thumbnail * y_thumbnail)\n\n jpeg.jfif_header = JfifHeader(\n version=version,\n density_units=DensityUnits(units),\n x_density=x_density,\n y_density=y_density,\n x_thumbnail=x_thumbnail,\n y_thumbnail=y_thumbnail,\n thumbnail_data=thumbnail_data,\n )\n\n\n# http://vip.sugovica.hu/Sardi/kepnezo/JPEG%20File%20Layout%20and%20Format.htm\ndef read_dht_header(jpeg: Jpeg, file_: T.BinaryIO):\n \"\"\"\n Read Huffman Table\n \"\"\"\n start_seek_position = file_.tell()\n\n # JPEGs are network byte order e.g. *big* endian\n length = one_from_file(\">H\", file_)\n\n ht_information = one_from_file(\"B\", file_)\n # bit 0..3 : number of HT (0..3, otherwise error)\n # bit 4 : type of HT, 0 = DC table, 1 = AC table\n # bit 5..7 : not used, must be 0\n print(\"ht_information\", bin(ht_information))\n\n # number as in \"index\" not \"count\"\n ht_num = ht_information & 0b00001111\n print(f\"huffman table number: {ht_num}\")\n ht_type = (ht_information & 0b00010000) >> 4\n\n ht_type_str = \"AC\" if bool(ht_type) else \"DC\"\n print(f\"type of huffman table: {ht_type_str}\")\n unused_ht = (ht_information & 0b11100000) >> 5\n assert unused_ht == 0\n\n # Number of symbols with codes of length 1..16,\n # the sum(n) of these bytes is the total number of codes,\n # which must be <= 256\n num_symbols_per_bit_length = unpack_from_file(\"16B\", file_)\n print(num_symbols_per_bit_length)\n\n num_symbols = sum(num_symbols_per_bit_length)\n assert num_symbols <= 256 # per comment above\n\n # Table containing the symbols in order of increasing\n # code length ( n = total number of codes ).\n # symbols = unpack_from_file(\"{}B\".format(num_symbols), file_)\n\n symbols: T.List[T.List[int]] = []\n for symbol_count in num_symbols_per_bit_length:\n symbols.append(unpack_from_file(f\"{symbol_count}B\", file_))\n\n print(symbols)\n\n huff = Huffman(num_symbols_per_bit_length, symbols)\n\n if (file_.tell() - start_seek_position) != length:\n raise NotImplementedError(\n \"Sorry, we don't handle this yet {} != {}\".format(\n (file_.tell() - start_seek_position), length\n )\n )\n # one dht segment can contain another table\n # check if the next byte is a 0xFF\n # TODO: that\n\n\ndef read_dqt_header(jpeg: Jpeg, file_: T.BinaryIO):\n start_seek_position = file_.tell()\n\n length = one_from_file(\">H\", file_)\n\n qt_information = one_from_file(\"B\", file_)\n # bit 0..3: precision of QT, 0 = 8 bit, otherwise 16 bit\n # bit 4..7: QT id (0..3, otherwise error)\n qt_num = ((1 << 4) - 1) & qt_information\n precision = ((1 << 4) - 1) & (qt_information >> 4)\n\n # read rest of the bytes\n bytes_left = length - 3\n\n assert bytes_left / (precision + 1) == 64\n print(\"bytes left: {}\".format(bytes_left))\n\n print(\"precision: {} qt_num: {}\".format(precision, qt_num))\n\n # This gives QT values, n = 64*(precision+1)\n if precision == 0:\n qt_table = unpack_from_file(\"{}B\".format(64), file_)\n block = get_zero_block()\n else: # precision > 0\n qt_table = unpack_from_file(\">{}H\".format(64), file_)\n block = get_zero_block(dtype=np.uint16)\n print(qt_table)\n\n fill_zigzag(qt_table, block)\n print(block)\n\n\nclass ComponentType(enum.Enum):\n Y = 1\n Cb = 2\n Cr = 3\n I = 4\n Q = 5\n\n\nSamplingFactors = T.NamedTuple(\n \"SamplingFactors\", [(\"vertical\", int), (\"horizontal\", int)]\n)\n\n\ndef parse_sampling_factors(value: int) -> SamplingFactors:\n return SamplingFactors(\n # anding redundant if byte\n vertical=value >> 4 & 0xF,\n horizontal=value & 0xF,\n )\n\n\nComponentInfo = T.NamedTuple(\n \"ComponentInfo\",\n [\n (\"component_id\", ComponentType),\n (\"sampling_factors\", SamplingFactors),\n (\"quantization_table_id\", int),\n ],\n)\n\n\nStartOfFrame = T.NamedTuple(\n \"StartOfFrame\",\n [\n (\"data_precision\", int),\n (\"image_height\", int),\n (\"image_width\", int),\n (\"components\", T.List[ComponentInfo]),\n ],\n)\n\n\ndef read_sof0_header(jpeg: Jpeg, file_: T.BinaryIO):\n start_seek_position = file_.tell()\n\n length = one_from_file(\">H\", file_)\n data_precision = one_from_file(\"B\", file_) # bit depth of image (8)\n assert data_precision == 8\n\n image_height = one_from_file(\">H\", file_)\n image_width = one_from_file(\">H\", file_)\n\n num_components = one_from_file(\"B\", file_) # 1 = gray, 3 = color\n components = []\n for _ in range(num_components):\n components.append(\n ComponentInfo(\n component_id=ComponentType(one_from_file(\"B\", file_)),\n sampling_factors=parse_sampling_factors(one_from_file(\"B\", file_)),\n quantization_table_id=one_from_file(\"B\", file_),\n )\n )\n\n jpeg.start_of_frame = StartOfFrame(\n data_precision=data_precision,\n image_height=image_height,\n image_width=image_width,\n components=components,\n )\n\n\n# https://en.wikipedia.org/wiki/JPEG#Syntax_and_structure\nMarker = T.NamedTuple(\n \"Marker\",\n [\n (\"short\", str),\n (\"name\", str),\n (\"decoder\", T.Callable[[T.Any, T.BinaryIO], T.Any]),\n ],\n)\n\nMARKER_LOOKUP = {\n 0xD8: Marker(short=\"SOI\", name=\"Start Of Image\", decoder=None),\n 0xC0: Marker(\n short=\"SOF0\", name=\"Start Of Frame (baseline DCT)\", decoder=read_sof0_header\n ),\n 0xC2: Marker(short=\"SOF2\", name=\"Start Of Frame (progressive DCT)\", decoder=None),\n 0xC4: Marker(short=\"DHT\", name=\"Define Huffman Table(s)\", decoder=read_dht_header),\n 0xDB: Marker(\n short=\"DQT\", name=\"Define Quantization Table(s)\", decoder=read_dqt_header\n ),\n 0xDD: Marker(short=\"DRI\", name=\"Define Restart Interval\", decoder=None),\n 0xDA: Marker(short=\"SOS\", name=\"Start Of Scan\", decoder=None),\n # Restart defined in loop\n # Application-specific defined in loop\n 0xFE: Marker(short=\"COM\", name=\"Comment\", decoder=None),\n 0xD9: Marker(short=\"EOI\", name=\"End Of Image\", decoder=None),\n}\n\n# insert Restart markers\nfor n in range(8):\n # make 0xDn (n=0..7) for RSTn\n restart_marker_byte = int(\"D{}\".format(n), 16)\n MARKER_LOOKUP[restart_marker_byte] = Marker(\n short=\"RST{}\".format(n), name=\"Restart\", decoder=None # ?\n )\n\n# insert App markers\nfor n in range(8):\n # make 0xEn (n=0..7) for APPn\n app_marker_byte = int(\"E{}\".format(n), 16)\n MARKER_LOOKUP[app_marker_byte] = Marker(\n short=\"APP{}\".format(n), name=\"Application-specific\", decoder=None # ?\n )\n\n\ndef add_app14_marker():\n # special A D O B E tmtm header that adds RGB and other colorspace support to JPG\n app_marker_byte = int(\"E{}\".format(14), 16)\n MARKER_LOOKUP[app_marker_byte] = Marker(\n short=\"APP{}\".format(n),\n name=\"Application-specific\",\n decoder=None, # yet, or probably forever\n )\n\n\ndef get_next_marker(jpeg: Jpeg, file_: T.BinaryIO):\n \"\"\" returns the next marker, with it's index \"\"\"\n seek_position = file_.tell()\n # ignore byte-stuffed FFs (0xFF, 0x00)\n def find_next_ff():\n byte = file_.read(1)\n while byte != b\"\\xFF\":\n byte = file_.read(1)\n if byte == b\"\":\n return None # EOF\n return file_.read(1) # read marker identifier (or 0x00)\n\n while True:\n marker_identifier = find_next_ff()\n if marker_identifier is None:\n return None # EOF\n elif marker_identifier != b\"\\x00\":\n break # not a byte stuffed thing!\n\n int_marker_id = struct.unpack(\"B\", marker_identifier)[0]\n\n if int_marker_id in MARKER_LOOKUP:\n found_marker = MARKER_LOOKUP[int_marker_id]\n print(\n \"Found marker {}, {}, {}\".format(\n hex(int_marker_id), found_marker.short, found_marker.name\n )\n )\n\n if found_marker.decoder is not None:\n found_marker.decoder(jpeg, file_)\n else:\n print(\"Unknown marker {}\".format(hex(int_marker_id)))\n\n return file_.tell() - 2 # right before the marker byte\n" ]
[ [ "numpy.zeros" ] ]
girving/lucid
[ "19f171cd73f763f3143b74f7d886bcb499d0e058" ]
[ "lucid/optvis/param/images.py" ]
[ "\n# Copyright 2018 The Lucid Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"High-level wrapper for paramaterizing images.\"\"\"\n\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom lucid.optvis.param.color import to_valid_rgb\nfrom lucid.optvis.param.spatial import naive, fft_image\n\ndef image(w, h=None, batch=None, sd=None, decorrelate=True, fft=True, alpha=False):\n h = h or w\n batch = batch or 1\n channels = 4 if alpha else 3\n shape = [batch, w, h, channels]\n param_f = fft_image if fft else naive\n t = param_f(shape, sd=sd)\n rgb = to_valid_rgb(t[..., :3], decorrelate=decorrelate, sigmoid=True)\n if alpha:\n a = tf.nn.sigmoid(t[..., 3:])\n return tf.concat([rgb, a], -1)\n return rgb\n" ]
[ [ "tensorflow.concat", "tensorflow.nn.sigmoid" ] ]
Wanjpeng/nvrjevnkbklremvklmkekrbemk
[ "ab0ef61a04a7a46021fd546a6112b11780849d82" ]
[ "main.py" ]
[ "import time\r\n#from multiprocessing import Pool\r\n#from multiprocessing import freeze_support\r\nfrom threading import Thread\r\nimport numpy as np\r\nimport random\r\nimport win32gui, win32con\r\n\r\nCONFIGFILE = '.\\lwp.conf'\r\nclass DriftWords:\r\n def __init__(self):\r\n self.Words = []\r\n self.Coors = []\r\n self.Velocities = []\r\n self.Accelerations = []\r\n self.__XMINC = 0\r\n self.__XMAXC = 3560#1850\r\n self.__YMINC = 0\r\n self.__YMAXC = 1400#1000\r\n \r\n self.__XINIMINV = 2\r\n self.__YINIMINV = 2\r\n self.__XMINV = -3\r\n self.__XMAXV = 3\r\n self.__YMINV = -3\r\n self.__YMAXV = 3\r\n \r\n def Initalize(self):\r\n for word in self.Words:\r\n self.Coors.append([np.random.randint(self.__XMINC,self.__XMAXC,1)[0], self.__YMINC])\r\n self.Velocities.append([np.random.randint(self.__XINIMINV, self.__XMAXV,1)[0],\r\n np.random.randint(self.__YINIMINV, self.__YMAXV,1)[0]])\r\n self.Accelerations.append([np.random.randn(1)[0], np.random.randn(1)[0]])\r\n print('DriftWords Initialize Completed...')\r\n \r\n def OnChange(self,mode,para=''):\r\n if mode == 'RandomMove':\r\n #print('Random Move starting...')\r\n for i in range(len(self.Words)):\r\n ## update the coordiante of Words[i]\r\n # Update the coordiante's 'x' of word\r\n #print('words:',self.Words)\r\n #print('coors:',self.Coors)\r\n if self.Coors[i][0] <= self.__XMINC :\r\n if self.Velocities[i][0] < 0:\r\n self.Velocities[i][0] = -self.Velocities[i][0]\r\n self.Coors[i][0] = self.Coors[i][0] + int(self.Velocities[i][0])\r\n else:\r\n self.Coors[i][0] = self.Coors[i][0] + int(self.Velocities[i][0])\r\n elif self.Coors[i][0] >= self.__XMAXC :\r\n if self.Velocities[i][0] >0:\r\n self.Velocities[i][0] = -self.Velocities[i][0]\r\n self.Coors[i][0] = self.Coors[i][0] + int(self.Velocities[i][0])\r\n else:\r\n self.Coors[i][0] = self.Coors[i][0] + int(self.Velocities[i][0])\r\n else:\r\n self.Coors[i][0] = self.Coors[i][0] + int(self.Velocities[i][0])\r\n # Update the coordiante's 'y' of word\r\n if self.Coors[i][1] <= self.__YMINC :\r\n if self.Velocities[i][1] < 1:\r\n self.Velocities[i][1] = -self.Velocities[i][1]\r\n self.Coors[i][1] = self.Coors[i][1] + int(self.Velocities[i][1])\r\n else:\r\n self.Coors[i][1] = self.Coors[i][1] + int(self.Velocities[i][1])\r\n elif self.Coors[i][1] >= self.__YMAXC :\r\n if self.Velocities[i][1] >1:\r\n self.Velocities[i][1] = -self.Velocities[i][1]\r\n self.Coors[i][1] = self.Coors[i][1] + int(self.Velocities[i][1])\r\n else:\r\n self.Coors[i][1] = self.Coors[i][1] + int(self.Velocities[i][1])\r\n else:\r\n self.Coors[i][1] = self.Coors[i][1] + int(self.Velocities[i][1])\r\n ## update the Velocity of Words[i]\r\n if self.Velocities[i][0] <= self.__XMINV and self.Accelerations[i][0]<0:\r\n self.Accelerations[i][0] = -self.Accelerations[i][0]\r\n self.Velocities[i][0] = self.Velocities[i][0] + int(self.Accelerations[i][0])\r\n elif self.Velocities[i][0] >= self.__XMAXV and self.Accelerations[i][0]>0:\r\n self.Accelerations[i][0] = -self.Accelerations[i][0]\r\n self.Velocities[i][0] = self.Velocities[i][0] + int(self.Accelerations[i][0])\r\n else:\r\n self.Velocities[i][0] = self.Velocities[i][0] + int(self.Accelerations[i][0])\r\n if self.Velocities[i][1] <= self.__YMINV and self.Accelerations[i][1]<1:\r\n self.Accelerations[i][1] = -self.Accelerations[i][1]\r\n self.Velocities[i][1] = self.Velocities[i][1] + int(self.Accelerations[i][1])\r\n elif self.Velocities[i][1] >= self.__YMAXV and self.Accelerations[i][1]>1:\r\n self.Accelerations[i][1] = -self.Accelerations[i][1]\r\n self.Velocities[i][1] = self.Velocities[i][1] + int(self.Accelerations[i][1])\r\n else:\r\n self.Velocities[i][1] = self.Velocities[i][1] + int(self.Accelerations[i][1])\r\n ## update the Accelerations of Words[i]\r\n self.Accelerations[i][0] = np.random.randn(1)[0]/2\r\n self.Accelerations[i][1] = np.random.randn(1)[0]/2\r\n #print('word:%13s'%self.Words[i],'cor:',self.Coors[i],\r\n # 'V:',self.Velocities[i],'A:',self.Accelerations[i])\r\n #print('Random Move Completed...')\r\n elif mode == 'DelWord':\r\n word_id = int(para)\r\n if word_id != -1:\r\n print('Delete Word:\\'%s\\'! ID:%d'%(self.Words[word_id],word_id))\r\n del self.Words[word_id]\r\n del self.Coors[word_id]\r\n del self.Velocities[word_id]\r\n del self.Accelerations[word_id]\r\n return 0\r\n else:\r\n return 1 # 无法删除,返回1\r\n elif mode == 'AddWord':\r\n print('prepare add word')\r\n if para == '':\r\n return 2\r\n if para in self.Words:\r\n #win32gui.MessageBox(0,'123','456',win32con.MB_ABORTRETRYIGNORE)\r\n #print('Word:\\'%s\\' already exists, so it can\\'t be added to show!'%para)\r\n return 2 # 字符串已有,无法添加,返回2\r\n print(self.Words)\r\n print(self.Coors)\r\n print(self.Velocities)\r\n print(self.Accelerations)\r\n self.Words.append(para)\r\n self.Coors.append([np.random.randint(self.__XMINC,self.__XMAXC,1)[0], self.__YMINC])\r\n print('Coors appended..')\r\n self.Velocities.append([np.random.randint(self.__XINIMINV, self.__XMAXV,1)[0], np.random.randint(self.__YINIMINV, self.__YMAXV,1)[0]])\r\n print('Velocities appended..')\r\n self.Accelerations.append([np.random.randn(1)[0], np.random.randn(1)[0]])\r\n print('Accelerations appended..')\r\n print('Add word:\\'%s\\'accomplished!'%para)\r\n return 0\r\n def ShowWords(self):\r\n print('Drift Words list:',end='')\r\n for word in self.Words:\r\n print(word,end='')\r\n print('\\nDrift Words print finished!')\r\n\r\ndef GetWorkerWhdwnd():\r\n # Get the handle of program manager\r\n hProgMan = win32gui.FindWindow(\"Progman\", None)\r\n # print(\"hProgMan:\",hProgMan)\r\n \r\n # Send a message to program manager to split 'FolderView' and 'SysListView' into WorkerW,\r\n # 参数为0时,无法工作\r\n rc,result = win32gui.SendMessageTimeout(hProgMan, 0x052C, 0x0000000D, 1, win32con.SMTO_NORMAL, 1000)\r\n# rc,result = win32gui.SendMessageTimeout(hProgMan, 0x052C, 0x0000000D, 0, win32con.SMTO_NORMAL, 1000)\r\n# print('rc,result:',rc,result)\r\n# print(\"win32 error:\",win32api.GetLastError())\r\n arhWorkerW = []\r\n def GetWorkerWs(hwnd,mouse):\r\n if win32gui.GetClassName(hwnd) == 'WorkerW' :\r\n arhWorkerW.append(hwnd)\r\n #print('hwnd:',hex(hwnd),\"wdnm:%15s\"%win32gui.GetWindowText(hwnd),\r\n # 'class:%15s'%win32gui.GetClassName(hwnd))\r\n win32gui.EnumWindows(GetWorkerWs, 0)\r\n return arhWorkerW[-1]\r\n\r\ndef GethChildwindow(hParendWND,childWNDclass,childWNDcaption):\r\n matched_childhWND = win32gui.FindWindowEx(hParendWND,0,childWNDclass,childWNDcaption)\r\n return matched_childhWND\r\n \r\ndef SendTToBG():\r\n cnt = 0\r\n hWorkerW = GetWorkerWhdwnd()\r\n hBGWND = GethChildwindow(hWorkerW,'Python DeskTop BG On Windows',\r\n 'my_Python_windows_desktop')\r\n global MoveFlg\r\n while 1:\r\n time.sleep(0.014) # 0.016667 为60Hz\r\n if MoveFlg ==0 :\r\n #print('send message')\r\n win32gui.SendMessage(hBGWND, win32con.WM_TIMER, cnt, None)\r\n elif MoveFlg ==1 :\r\n pass\r\n elif MoveFlg ==2 :\r\n #print('send message quit')\r\n break\r\n return 0\r\n\r\nclass BGWND:\r\n hdcbuffer = 0\r\n global DFW\r\n def BGWndProc(self,hwnd,msg,wParam,lParam):\r\n if msg == win32con.WM_PAINT:\r\n #print('BG painting...%d...'%wParam)\r\n hdc,ps = win32gui.BeginPaint(hwnd)\r\n rect = win32gui.GetClientRect(hwnd)\r\n # win32gui.Rectangle(hdc,min(xc),min(yc),max(xc),max(yc))\r\n # win32gui.Ellipse(hdc,300+wParam,550,400+wParam,650)\r\n DFW.OnChange(mode='RandomMove')\r\n \r\n #self.hdcbuffer = win32gui.CreateCompatibleDC(hdc)\r\n #hBitMap = win32gui.CreateCompatibleBitmap(hdc, 1920, 1080)\r\n ###win32gui.ReleaseDC(hwnd, hdc)\r\n #win32gui.SelectObject(self.hdcbuffer, hBitMap)\r\n #win32gui.PatBlt(self.hdcbuffer, 0, 0, 1920, 1080, win32con.WHITENESS)\r\n \r\n #win32gui.SetBkMode(hdc,win32con.TRANSPARENT)\r\n for index in range(len(DFW.Words)):\r\n rect = (int(DFW.Coors[index][0]),int(DFW.Coors[index][1]),1920,1080)\r\n win32gui.DrawText(hdc,'%s,%d'%(DFW.Words[index],wParam),\r\n len('%s'%(DFW.Words[index])),rect,win32con.DT_SINGLELINE|win32con.DT_TOP|win32con.DT_LEFT)\r\n #win32gui.BitBlt(hdc, 0, 0, 1920, 1080, self.hdcbuffer, 0, 0, win32con.SRCCOPY )\r\n win32gui.EndPaint(hwnd,ps)\r\n elif msg == win32con.WM_DESTROY: \r\n win32gui.PostQuitMessage(0)\r\n elif msg == win32con.WM_TIMER:\r\n #print(\"BG TIMER TRIGGERED\",wParam)\r\n win32gui.InvalidateRect(hwnd,None,True)\r\n win32gui.UpdateWindow(hwnd)\r\n else:\r\n pass\r\n #print('BG:其他信息:',msg)\r\n return win32gui.DefWindowProc(hwnd,msg,wParam,lParam) \r\n \r\n def BGWNDRegister(self,parenthwnd):\r\n def BGWNDREG():\r\n wc = win32gui.WNDCLASS() \r\n wc.hbrBackground = win32con.COLOR_DESKTOP +1\r\n wc.hCursor = win32gui.LoadCursor(0,win32con.IDI_APPLICATION) \r\n wc.lpszClassName = \"Python DeskTop BG On Windows\" \r\n wc.lpfnWndProc = self.BGWndProc \r\n reg = win32gui.RegisterClass(wc)\r\n return reg\r\n \r\n hwnd = win32gui.CreateWindowEx(\r\n win32con.WS_EX_NOACTIVATE, BGWNDREG(),\r\n 'my_Python_windows_desktop',\r\n win32con.WS_MAXIMIZE|win32con.WS_CHILD|win32con.WS_VISIBLE,#WS_OVERLAPPEDWINDOW\r\n win32con.CW_USEDEFAULT,win32con.CW_USEDEFAULT,\r\n win32con.CW_USEDEFAULT,win32con.CW_USEDEFAULT,\r\n parenthwnd,#WorkerW\r\n 0,0,None)\r\n win32gui.ShowWindow(hwnd,win32con.SW_SHOW) \r\n win32gui.UpdateWindow(hwnd) \r\n win32gui.PumpMessages()\r\n \r\n def StartBGWND(self):\r\n print('starting Background Display Window...')\r\n hWorkerW = GetWorkerWhdwnd()\r\n if not hWorkerW:\r\n print('ERROR OCCURED: Can\\'t get handle of WorkerW!')\r\n return 0\r\n if not GethChildwindow(hWorkerW,'Python DeskTop BG On Windows','my_Python_windows_desktop'):\r\n self.BGWNDRegister(hWorkerW) # Background display\r\n else:\r\n print('Background Display Window already exists!')\r\n\r\nclass FGWND:\r\n ID_BUTTON1 = 0\r\n ID_BUTTON2 = 0\r\n ID_BUTTON3 = 0\r\n ID_LISTBOX1 = 0\r\n ID_TEXT1 = 0\r\n def FGWndProc(self,hwnd,msg,wParam,lParam): \r\n global MoveFlg\r\n #global DFW\r\n if msg == win32con.WM_PAINT: \r\n hdc,ps = win32gui.BeginPaint(hwnd) \r\n rect = win32gui.GetClientRect(hwnd) \r\n #win32gui.DrawText(hdc,'control pannel',len('control pannel'),\r\n # rect,win32con.DT_SINGLELINE|win32con.DT_CENTER|win32con.DT_VCENTER) \r\n win32gui.EndPaint(hwnd,ps) \r\n if msg == win32con.WM_DESTROY:\r\n MoveFlg = 2\r\n with open(CONFIGFILE, 'w+', encoding='utf-8')as fo:\r\n for word in DFW.Words:\r\n fo.write(word+'\\n')\r\n print('closing BG window...')\r\n hWorkerW = GetWorkerWhdwnd()\r\n if not hWorkerW:\r\n print('ERROR OCCURED: Can\\'t get handle of WorkerW!')\r\n else:\r\n hBGWND = GethChildwindow(hWorkerW,'Python DeskTop BG On Windows','my_Python_windows_desktop')\r\n if hBGWND:\r\n win32gui.SendMessageTimeout(hBGWND, win32con.WM_CLOSE, 0, 0, win32con.SMTO_NORMAL, 1000)\r\n else:\r\n print('can\\'t find BG window!')\r\n print('closing FG window...')\r\n win32gui.PostQuitMessage(0)\r\n elif msg == win32con.WM_COMMAND:\r\n if lParam == self.ID_BUTTON1:\r\n tempFlg = MoveFlg\r\n MoveFlg = 1\r\n # 首先获取文本框内的文本长度\r\n length = win32gui.SendMessage(self.ID_TEXT1, win32con.WM_GETTEXTLENGTH)+1\r\n # 生成一个指针用来存放字符串(大小为length)\r\n buffer = win32gui.PyMakeBuffer(length)\r\n # 向文本框发送信息获取内容\r\n win32gui.SendMessage(self.ID_TEXT1, win32con.WM_GETTEXT, length, buffer)\r\n # 获取字符串地址和长度\r\n address, length = win32gui.PyGetBufferAddressAndLen(buffer[:-1])\r\n # 获取指针所指字符串\r\n text = win32gui.PyGetString(address, length)\r\n \r\n #print('Len:%d,'%length,'Text:\\'%s\\''%text)\r\n if not DFW.OnChange('AddWord',text):\r\n win32gui.SendMessage(self.ID_LISTBOX1, win32con.LB_ADDSTRING,0,text)\r\n #print('list box 添加完成')\r\n MoveFlg = tempFlg\r\n elif lParam == self.ID_BUTTON2:\r\n tempFlg = MoveFlg\r\n MoveFlg = 1\r\n sel_id = win32gui.SendMessage(self.ID_LISTBOX1, win32con.LB_GETCURSEL,0,0)\r\n # win32con.LB_GETCOUNT 获取listbox内条目总数\r\n if sel_id != -1:\r\n DFW.OnChange('DelWord',str(sel_id))\r\n # 删除id为 sel_id 的条目\r\n win32gui.SendMessage(self.ID_LISTBOX1, win32con.LB_DELETESTRING,sel_id,0)\r\n MoveFlg = tempFlg\r\n elif lParam == self.ID_BUTTON3:\r\n #print('button 3 clicked: switch the state of words\\'s moving')\r\n if MoveFlg == 0:\r\n MoveFlg = 1\r\n elif MoveFlg ==1:\r\n MoveFlg = 0\r\n else:\r\n pass\r\n #print(wParam,lParam)\r\n else:\r\n pass\r\n #print('FG:其他信息',hex(msg),wParam,lParam,'IDs:',self.ID_BUTTON1,self.ID_BUTTON2,self.ID_LISTBOX1,self.ID_TEXT1)\r\n \r\n return win32gui.DefWindowProc(hwnd,msg,wParam,lParam) \r\n def FGWNDRegister(self):\r\n global DFW\r\n def FGWNDREG():\r\n wc = win32gui.WNDCLASS() \r\n wc.hbrBackground = win32con.COLOR_BTNFACE + 1 \r\n wc.hCursor = win32gui.LoadCursor(0,win32con.IDI_APPLICATION) \r\n wc.lpszClassName = \"Python DeskTop FG On Windows\" \r\n wc.lpfnWndProc = self.FGWndProc \r\n reg = win32gui.RegisterClass(wc)\r\n return reg\r\n reg = FGWNDREG()\r\n hwnd = win32gui.CreateWindowEx(0, reg,\r\n 'Live Wallpaper Control Pannel',\r\n win32con.WS_DLGFRAME|win32con.WS_SYSMENU|win32con.WS_MINIMIZEBOX,\r\n win32con.CW_USEDEFAULT,win32con.CW_USEDEFAULT,\r\n 450,430,\r\n 0,#Parent hWnd\r\n 0,0,None)\r\n self.ID_BUTTON1 = win32gui.CreateWindow(\"button\" ,'添加信息',win32con.WS_CHILD|win32con.WS_VISIBLE|win32con.BS_PUSHBUTTON,\r\n 330, 330, 85, 30, hwnd, 1, None,None)\r\n self.ID_BUTTON2 = win32gui.CreateWindow(\"button\" ,'删除信息',win32con.WS_CHILD|win32con.WS_VISIBLE|win32con.BS_PUSHBUTTON,\r\n 330, 20, 85, 30, hwnd, 2, None,None)\r\n self.ID_BUTTON3 = win32gui.CreateWindow(\"button\" ,'启动/停止',win32con.WS_CHILD|win32con.WS_VISIBLE|win32con.BS_PUSHBUTTON,\r\n 330, 150, 85, 30, hwnd, 2, None,None)\r\n self.ID_LISTBOX1=win32gui.CreateWindow( \"listbox\", \"list box 1\",\r\n win32con.WS_CHILD|win32con.WS_VSCROLL | win32con.WS_TABSTOP | win32con.LBS_HASSTRINGS|win32con.LBS_NOTIFY,\r\n 20, 20, 280, 300, hwnd, 1, None, None )\r\n self.ID_TEXT1 = win32gui.CreateWindow(\"edit\" ,'',win32con.WS_CHILD|win32con.WS_VISIBLE|win32con.WS_BORDER|win32con.ES_AUTOVSCROLL|win32con.ES_AUTOHSCROLL,\r\n 20, 330, 280, 30, hwnd, 1, None,None)\r\n win32gui.ShowWindow(hwnd,win32con.SW_SHOW)\r\n win32gui.ShowWindow(self.ID_LISTBOX1,win32con.SW_SHOW)\r\n for word_id in range(len(DFW.Words)):\r\n win32gui.SendMessage(self.ID_LISTBOX1, win32con.LB_ADDSTRING,0,DFW.Words[word_id])\r\n #print('L1-2:',win32gui.SendMessage(self.ID_LISTBOX1, win32con.WM_SETREDRAW, 0,None))\r\n\r\n win32gui.UpdateWindow(hwnd)\r\n # print('FG Timer',win32api.SetTimer(hwnd,1,1000,None))\r\n win32gui.PumpMessages()\r\n def StartFGWND(self):\r\n print('starting Foreground Control Pannel Window...')\r\n if not win32gui.FindWindow('Python DeskTop FG On Windows','Live Wallpaper Control Pannel'):\r\n self.FGWNDRegister() # Foreground display\r\n else:\r\n print('Foreground Control Pannel Window already exists!')\r\n\r\n\r\nif __name__ == '__main__':\r\n# freeze_support()\r\n\r\n DFW = DriftWords()\r\n\r\n #try:\r\n fi = open(CONFIGFILE,'r+',encoding='utf-8')\r\n i = 0\r\n while 1:\r\n line = fi.readline()\r\n line = line.strip()\r\n #print('--------------line',line)\r\n if line != '':\r\n DFW.Words.append(line)\r\n i = i+1\r\n #print('line[%d]:\\'%s\\''%(i,line))\r\n else:\r\n break\r\n #print('Read lines finished.')\r\n DFW.Initalize()\r\n fi.close()\r\n #except:\r\n # print('can\\'t find the configuration file!\\nCreate a new one(%s).'%CONFIGFILE)\r\n # fi = open(CONFIGFILE,'w',encoding='utf-8')\r\n # fi.close()\r\n \r\n MoveFlg = 1\r\n \r\n BG = BGWND()\r\n FG = FGWND()\r\n \r\n t1 = Thread(target=BG.StartBGWND,args=() )\r\n t2 = Thread(target=FG.StartFGWND,args=() )\r\n t1.start()\r\n t2.start()\r\n time.sleep(1)\r\n if 1:#input('输入\\'y\\'来进行窗口刷新,其他键不刷新:') == 'y':\r\n t3 = Thread(target=SendTToBG,args=() )\r\n t3.start()\r\n t3.join()\r\n t1.join() \r\n t2.join() \r\n" ]
[ [ "numpy.random.randn", "numpy.random.randint" ] ]
dlgeorge/geoclaw
[ "3fa4dcea359e6439d1fbb79f0d478e65ff2ea38f" ]
[ "examples/multi-layer/plane_wave/setplot.py" ]
[ "\n\"\"\"\nSet up the plot figures, axes, and items to be done for each frame.\n\nThis module is imported by the plotting routines and then the\nfunction setplot is called to set the plot parameters.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom clawpack.visclaw import geoplot, gaugetools\n\nimport clawpack.clawutil.data as clawutil\nimport clawpack.amrclaw.data as amrclaw\nimport clawpack.geoclaw.data\n\nimport clawpack.geoclaw.multilayer.plot as ml_plot\n\n\ndef setplot(plotdata=None, bathy_location=0.15, bathy_angle=0.0,\n bathy_left=-1.0, bathy_right=-0.2):\n \"\"\"Setup the plotting data objects.\n\n Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.\n Output: a modified version of plotdata.\n\n returns plotdata object\n\n \"\"\"\n\n if plotdata is None:\n from clawpack.visclaw.data import ClawPlotData\n plotdata = ClawPlotData()\n\n # Load data from output\n clawdata = clawutil.ClawInputData(2)\n clawdata.read(os.path.join(plotdata.outdir, 'claw.data'))\n multilayer_data = clawpack.geoclaw.data.MultilayerData()\n multilayer_data.read(os.path.join(plotdata.outdir, 'multilayer.data'))\n\n def transform_c2p(x, y, x0, y0, theta):\n return ((x+x0)*np.cos(theta) - (y+y0)*np.sin(theta),\n (x+x0)*np.sin(theta) + (y+y0)*np.cos(theta))\n\n def transform_p2c(x, y, x0, y0, theta):\n return (x*np.cos(theta) + y*np.sin(theta) - x0,\n -x*np.sin(theta) + y*np.cos(theta) - y0)\n\n # Setup bathymetry reference lines\n with open(os.path.join(plotdata.outdir, \"bathy_geometry.data\"), 'r') \\\n as bathy_geometry_file:\n bathy_location = float(bathy_geometry_file.readline())\n bathy_angle = float(bathy_geometry_file.readline())\n x = [0.0, 0.0]\n y = [0.0, 1.0]\n x1, y1 = transform_c2p(x[0], y[0], bathy_location, 0.0, bathy_angle)\n x2, y2 = transform_c2p(x[1], y[1], bathy_location, 0.0, bathy_angle)\n\n if abs(x1 - x2) < 10**-3:\n x = [x1, x1]\n y = [clawdata.lower[1], clawdata.upper[1]]\n else:\n m = (y1 - y2) / (x1 - x2)\n x[0] = (clawdata.lower[1] - y1) / m + x1\n y[0] = clawdata.lower[1]\n x[1] = (clawdata.upper[1] - y1) / m + x1\n y[1] = clawdata.upper[1]\n ref_lines = [((x[0], y[0]), (x[1], y[1]))]\n\n plotdata.clearfigures()\n plotdata.save_frames = False\n\n # ========================================================================\n # Generic helper functions\n def pcolor_afteraxes(current_data):\n bathy_ref_lines(current_data)\n\n def contour_afteraxes(current_data):\n axes = plt.gca()\n pos = -80.0 * (23e3 / 180) + 500e3 - 5e3\n axes.plot([pos, pos], [-300e3, 300e3], 'b',\n [pos-5e3, pos-5e3], [-300e3, 300e3], 'y')\n wind_contours(current_data)\n bathy_ref_lines(current_data)\n\n def profile_afteraxes(current_data):\n pass\n\n def bathy_ref_lines(current_data):\n axes = plt.gca()\n for ref_line in ref_lines:\n x1 = ref_line[0][0]\n y1 = ref_line[0][1]\n x2 = ref_line[1][0]\n y2 = ref_line[1][1]\n axes.plot([x1, x2], [y1, y2], 'y--', linewidth=1)\n\n # ========================================================================\n # Axis limits\n\n xlimits = [-0.5, 0.5]\n ylimits = [-0.5, 0.5]\n eta = [multilayer_data.eta[0], multilayer_data.eta[1]]\n top_surface_limits = [eta[0] - 0.03, eta[0] + 0.03]\n internal_surface_limits = [eta[1] - 0.015, eta[1] + 0.015]\n top_speed_limits = [0.0, 0.1]\n internal_speed_limits = [0.0, 0.03]\n\n # ========================================================================\n # Surface Elevations\n plotfigure = plotdata.new_plotfigure(name='Surface')\n plotfigure.show = True\n plotfigure.kwargs = {'figsize': (14, 4)}\n\n # Top surface\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.title = 'Top Surface'\n plotaxes.axescmd = 'subplot(1, 2, 1)'\n plotaxes.scaled = True\n plotaxes.xlimits = xlimits\n plotaxes.ylimits = ylimits\n plotaxes.afteraxes = pcolor_afteraxes\n ml_plot.add_surface_elevation(plotaxes,1,bounds=top_surface_limits)\n # ml_plot.add_surface_elevation(plotaxes,1,bounds=[-0.06,0.06])\n # ml_plot.add_surface_elevation(plotaxes,1)\n ml_plot.add_land(plotaxes, 1)\n \n # Bottom surface\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.title = 'Internal Surface'\n plotaxes.axescmd = 'subplot(1,2,2)'\n plotaxes.scaled = True\n plotaxes.xlimits = xlimits\n plotaxes.ylimits = ylimits\n plotaxes.afteraxes = pcolor_afteraxes\n # ml_plot.add_surface_elevation(plotaxes,2,bounds=[-300-0.5,-300+0.5])\n ml_plot.add_surface_elevation(plotaxes,2,bounds=internal_surface_limits)\n # ml_plot.add_surface_elevation(plotaxes,2)\n ml_plot.add_land(plotaxes, 2)\n \n # ========================================================================\n # Depths\n # ========================================================================\n plotfigure = plotdata.new_plotfigure(name='Depths', figno=42)\n plotfigure.show = False\n plotfigure.kwargs = {'figsize':(14,4)}\n \n # Top surface\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.title = 'Top Layer Depth'\n plotaxes.axescmd = 'subplot(1,2,1)'\n plotaxes.scaled = True\n plotaxes.xlimits = xlimits\n plotaxes.ylimits = ylimits\n plotaxes.afteraxes = pcolor_afteraxes\n ml_plot.add_layer_depth(plotaxes,1,bounds=[-0.1,1.1])\n ml_plot.add_land(plotaxes, 1)\n \n # Bottom surface\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.title = 'Bottom Layer Depth'\n plotaxes.axescmd = 'subplot(1,2,2)'\n plotaxes.scaled = True\n plotaxes.xlimits = xlimits\n plotaxes.ylimits = ylimits\n plotaxes.afteraxes = pcolor_afteraxes\n ml_plot.add_layer_depth(plotaxes,2,bounds=[-0.1,0.7])\n ml_plot.add_land(plotaxes, 2)\n \n # ========================================================================\n # Water Speed\n plotfigure = plotdata.new_plotfigure(name='speed')\n plotfigure.show = True\n plotfigure.kwargs = {'figsize': (14, 4)}\n\n # Top layer speed\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.title = 'Currents - Top Layer'\n plotaxes.scaled = True\n plotaxes.xlimits = xlimits\n plotaxes.ylimits = ylimits\n plotaxes.axescmd = 'subplot(1, 2, 1)'\n plotaxes.afteraxes = pcolor_afteraxes\n ml_plot.add_speed(plotaxes, 1, bounds=top_speed_limits)\n ml_plot.add_land(plotaxes, 1)\n\n # Bottom layer speed\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.title = 'Currents - Bottom Layer'\n plotaxes.scaled = True\n plotaxes.xlimits = xlimits\n plotaxes.ylimits = ylimits\n plotaxes.axescmd = 'subplot(1,2,2)'\n plotaxes.afteraxes = pcolor_afteraxes\n # add_speed(plotaxes,2,bounds=[0.0,1e-10])\n ml_plot.add_speed(plotaxes,2,bounds=internal_speed_limits)\n # add_speed(plotaxes,2)\n ml_plot.add_land(plotaxes, 2)\n \n # Individual components\n plotfigure = plotdata.new_plotfigure(name='speed_components',figno=401)\n plotfigure.show = False\n plotfigure.kwargs = {'figsize':(14,14)}\n \n # Top layer\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.title = \"X-Velocity - Top Layer\"\n plotaxes.scaled = True\n plotaxes.xlimits = xlimits\n plotaxes.ylimits = ylimits\n plotaxes.axescmd = 'subplot(2,2,1)'\n plotaxes.afteraxes = pcolor_afteraxes\n # add_x_velocity(plotaxes,1,bounds=[-1e-10,1e-10])\n ml_plot.add_x_velocity(plotaxes,1)\n ml_plot.add_land(plotaxes, 1)\n \n plotaxes = plotfigure.new_plotaxes()\n plotaxes.title = \"Y-Velocity - Top Layer\"\n plotaxes.scaled = True\n plotaxes.xlimits = xlimits\n plotaxes.ylimits = ylimits\n plotaxes.axescmd = 'subplot(2,2,2)'\n plotaxes.afteraxes = pcolor_afteraxes\n # add_y_velocity(plotaxes,1,bounds=[-0.000125,0.000125])\n ml_plot.add_y_velocity(plotaxes,1)\n ml_plot.add_land(plotaxes, 1)\n \n # Bottom layer\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.title = \"X-Velocity - Bottom Layer\"\n plotaxes.scaled = True\n plotaxes.xlimits = xlimits\n plotaxes.ylimits = ylimits\n plotaxes.axescmd = 'subplot(2,2,3)'\n plotaxes.afteraxes = pcolor_afteraxes\n # add_x_velocity(plotaxes,2,bounds=[-1e-10,1e-10])\n ml_plot.add_x_velocity(plotaxes,2)\n ml_plot.add_land(plotaxes, 2)\n \n plotaxes = plotfigure.new_plotaxes()\n plotaxes.title = \"Y-Velocity - Bottom Layer\"\n plotaxes.scaled = True\n plotaxes.xlimits = xlimits\n plotaxes.ylimits = ylimits\n plotaxes.axescmd = 'subplot(2,2,4)'\n plotaxes.afteraxes = pcolor_afteraxes\n # add_y_velocity(plotaxes,2,bounds=[-0.8e-6,.8e-6])\n ml_plot.add_y_velocity(plotaxes,2)\n\n ml_plot.add_land(plotaxes, 2)\n # ========================================================================\n # Profile Plots\n # Note that these are not currently plotted by default - set\n # `plotfigure.show = True` is you want this to be plotted\n plotfigure = plotdata.new_plotfigure(name='profile')\n plotfigure.show = False\n\n # Top surface\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.xlimits = xlimits\n plotaxes.ylimits = [-1.1, 0.1]\n plotaxes.title = \"Profile of depth\"\n plotaxes.afteraxes = profile_afteraxes\n\n slice_index = 30\n\n # Internal surface\n def bathy_profile(current_data):\n return current_data.x[:, slice_index], b(current_data)[:, slice_index]\n\n def lower_surface(current_data):\n if multilayer_data.init_type == 2:\n return current_data.x[:, slice_index], \\\n eta2(current_data)[:, slice_index]\n elif multilayer_data.init_type == 6:\n return current_data.y[slice_index, :], \\\n eta2(current_data)[slice_index, :]\n\n def upper_surface(current_data):\n if multilayer_data.init_type == 2:\n return current_data.x[:, slice_index], \\\n eta1(current_data)[:, slice_index]\n elif multilayer_data.init_type == 6:\n return current_data.y[slice_index, :], \\\n eta1(current_data)[slice_index, :]\n\n def top_speed(current_data):\n if multilayer_data.init_type == 2:\n return current_data.x[:, slice_index], \\\n water_u1(current_data)[:, slice_index]\n elif multilayer_data.init_type == 6:\n return current_data.y[slice_index, :], \\\n water_u1(current_data)[slice_index, :]\n\n def bottom_speed(current_data):\n if multilayer_data.init_type == 2:\n return current_data.x[:, slice_index], \\\n water_u2(current_data)[:, slice_index]\n elif multilayer_data.init_type == 6:\n return current_data.y[slice_index, :], \\\n water_u2(current_data)[slice_index, :]\n\n # Bathy\n plotitem = plotaxes.new_plotitem(plot_type='1d_from_2d_data')\n plotitem.map_2d_to_1d = bathy_profile\n plotitem.plot_var = 0\n plotitem.amr_plotstyle = ['-', '+', 'x']\n plotitem.color = 'k'\n plotitem.show = True\n\n # Internal Interface\n plotitem = plotaxes.new_plotitem(plot_type='1d_from_2d_data')\n plotitem.map_2d_to_1d = lower_surface\n plotitem.plot_var = 7\n plotitem.amr_plotstyle = ['-', '+', 'x']\n plotitem.color = 'b'\n plotitem.show = True\n\n # Upper Interface\n plotitem = plotaxes.new_plotitem(plot_type='1d_from_2d_data')\n plotitem.map_2d_to_1d = upper_surface\n plotitem.plot_var = 6\n plotitem.amr_plotstyle = ['-', '+', 'x']\n plotitem.color = (0.2, 0.8, 1.0)\n plotitem.show = True\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.title = 'Y-Velocity'\n plotaxes.scaled = True\n plotaxes.xlimits = xlimits\n plotaxes.ylimits = ylimits\n plotaxes.afteraxes = pcolor_afteraxes\n \n # Water\n # plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')\n # # plotitem.plot_var = geoplot.surface\n # plotitem.plot_var = water_v\n # plotitem.pcolor_cmap = colormaps.make_colormap({1.0:'r',0.5:'w',0.0:'b'})\n # # plotitem.pcolor_cmin = -1.e-10\n # # plotitem.pcolor_cmax = 1.e-10\n # # plotitem.pcolor_cmin = -2.5 # -3.0\n # # plotitem.pcolor_cmax = 2.5 # 3.0\n # plotitem.add_colorbar = True\n # plotitem.amr_celledges_show = [0,0,0]\n # plotitem.amr_patchedges_show = [1,1,1]\n\n # Land\n ml_plot.add_land(plotaxes, 1)\n \n # ========================================================================\n # Contour plot for surface\n # ========================================================================\n plotfigure = plotdata.new_plotfigure(name='contour_surface',figno=15)\n plotfigure.show = False\n plotfigure.kwargs = {'figsize':(14,4)}\n \n # Set up for axes in this figure:\n \n # Top Surface\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.title = 'Top Surface'\n plotaxes.axescmd = 'subplot(1,2,1)'\n plotaxes.scaled = True\n plotaxes.xlimits = xlimits\n plotaxes.ylimits = ylimits\n plotaxes.afteraxes = contour_afteraxes\n ml_plot.add_surface_elevation(plotaxes,plot_type='contour',surface=1,bounds=[-2.5,-1.5,-0.5,0.5,1.5,2.5])\n ml_plot.add_land(plotaxes, 1, plot_type='contour')\n \n # Internal Surface\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.title = 'Internal Surface'\n plotaxes.axescmd = 'subplot(1,2,2)'\n plotaxes.scaled = True\n plotaxes.xlimits = xlimits\n plotaxes.ylimits = ylimits\n plotaxes.afteraxes = contour_afteraxes\n ml_plot.add_surface_elevation(plotaxes,plot_type='contour',surface=2,bounds=[-2.5,-1.5,-0.5,0.5,1.5,2.5])\n ml_plot.add_land(plotaxes, 2, plot_type='contour')\n \n # ========================================================================\n # Contour plot for speed\n # ========================================================================\n plotfigure = plotdata.new_plotfigure(name='contour_speed',figno=16)\n plotfigure.show = False\n plotfigure.kwargs = {'figsize':(14,4)}\n \n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.title = 'Current'\n plotaxes.scaled = True\n plotaxes.xlimits = xlimits\n plotaxes.ylimits = ylimits\n plotaxes.afteraxes = contour_afteraxes\n \n # Surface\n plotitem = plotaxes.new_plotitem(plot_type='2d_contour')\n plotitem.plot_var = ml_plot.water_speed_depth_ave\n plotitem.kwargs = {'linewidths':1}\n # plotitem.contour_levels = [1.0,2.0,3.0,4.0,5.0,6.0]\n plotitem.contour_levels = [0.5,1.5,3,4.5,6.0]\n plotitem.amr_contour_show = [1,1,1]\n plotitem.amr_celledges_show = [0,0,0]\n plotitem.amr_patchedges_show = [1,1,1]\n plotitem.amr_contour_colors = 'k'\n # plotitem.amr_contour_colors = ['r','k','b'] # color on each level\n # plotitem.amr_grid_bgcolor = ['#ffeeee', '#eeeeff', '#eeffee']\n plotitem.show = True \n \n # Land\n plotitem = plotaxes.new_plotitem(plot_type='2d_contour')\n plotitem.plot_var = geoplot.land\n plotitem.contour_nlevels = 40\n plotitem.contour_min = 0.0\n plotitem.contour_max = 100.0\n plotitem.amr_contour_colors = ['g'] # color on each level\n plotitem.amr_patch_bgcolor = ['#ffeeee', '#eeeeff', '#eeffee']\n plotitem.amr_celledges_show = 0\n plotitem.amr_patchedges_show = 0\n plotitem.show = True\n\n # ========================================================================\n # Grid Cells\n # ========================================================================\n \n # Figure for grid cells\n plotfigure = plotdata.new_plotfigure(name='cells', figno=2)\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.xlimits = xlimits\n plotaxes.ylimits = ylimits\n plotaxes.title = 'Grid patches'\n plotaxes.scaled = True\n\n # Set up for item on these axes:\n plotitem = plotaxes.new_plotitem(plot_type='2d_patch')\n plotitem.amr_patch_bgcolor = ['#ffeeee', '#eeeeff', '#eeffee']\n plotitem.amr_celledges_show = [0,0,0]\n plotitem.amr_patchedges_show = [1,1,1]\n \n # ========================================================================\n # Vorticity Plot\n # ========================================================================\n # plotfigure = plotdata.new_plotfigure(name='vorticity',figno=17)\n # plotfigure.show = False\n # plotaxes = plotfigure.new_plotaxes()\n # plotaxes.title = \"Vorticity\"\n # plotaxes.scaled = True\n # plotaxes.xlimits = xlimits\n # plotaxes.ylimits = ylimits\n # plotaxes.afteraxes = pcolor_afteraxes\n # \n # # Vorticity\n # plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')\n # plotitem.plot_var = 9\n # plotitem.imshow_cmap = plt.get_cmap('PRGn')\n # # plotitem.pcolor_cmap = plt.get_cmap('PuBu')\n # # plotitem.pcolor_cmin = 0.0\n # # plotitem.pcolor_cmax = 6.0\n # plotitem.imshow_cmin = -1.e-2\n # plotitem.imshow_cmax = 1.e-2\n # plotitem.add_colorbar = True\n # plotitem.amr_celledges_show = [0,0,0]\n # plotitem.amr_patchedges_show = [1]\n # \n # # Land\n # plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')\n # plotitem.plot_var = geoplot.land\n # plotitem.pcolor_cmap = geoplot.land_colors\n # plotitem.pcolor_cmin = 0.0\n # plotitem.pcolor_cmax = 80.0\n # plotitem.add_colorbar = False\n # plotitem.amr_celledges_show = [0,0,0]\n\n # ========================================================================\n # Figures for gauges\n\n # Top\n plotfigure = plotdata.new_plotfigure(name='Surface & topo',\n type='each_gauge',\n figno=301)\n plotfigure.show = True\n plotfigure.clf_each_gauge = True\n plotfigure.kwargs = {'figsize': (14, 4)}\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.axescmd = 'subplot(1, 2, 1)'\n plotaxes.xlimits = [0.0, 1.0]\n plotaxes.ylimits = top_surface_limits\n plotaxes.title = 'Top Surface'\n\n # Plot surface as blue curve:\n plotitem = plotaxes.new_plotitem(plot_type='1d_plot')\n plotitem.plot_var = 6\n plotitem.plotstyle = 'b-'\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.axescmd = 'subplot(1, 2, 2)'\n plotaxes.xlimits = [0.0, 1.0]\n plotaxes.ylimits = internal_surface_limits\n plotaxes.title = 'Bottom Surface'\n\n # Plot surface as blue curve:\n plotitem = plotaxes.new_plotitem(plot_type='1d_plot')\n plotitem.plot_var = 7\n plotitem.plotstyle = 'b-'\n\n # =========================================================================\n # Other plots\n\n # Gauge Locations - Enable to see where gauges are located\n def locations_afteraxes(current_data, gaugenos='all'):\n gaugetools.plot_gauge_locations(current_data.plotdata,\n gaugenos=gaugenos,\n format_string='kx',\n add_labels=True)\n pcolor_afteraxes(current_data)\n\n plotfigure = plotdata.new_plotfigure(name='Gauge Locations')\n plotfigure.show = False\n plotfigure.kwargs = {'figsize': (14, 4)}\n\n # Top surface\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.title = 'Top Surface'\n plotaxes.axescmd = 'subplot(1, 2, 1)'\n plotaxes.scaled = True\n plotaxes.xlimits = xlimits\n plotaxes.ylimits = ylimits\n plotaxes.afteraxes = locations_afteraxes\n ml_plot.add_surface_elevation(plotaxes, 1, bounds=top_surface_limits)\n ml_plot.add_land(plotaxes, 1)\n\n # Bottom surface\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.title = 'Internal Surface'\n plotaxes.axescmd = 'subplot(1, 2, 2)'\n plotaxes.scaled = True\n plotaxes.xlimits = xlimits\n plotaxes.ylimits = ylimits\n plotaxes.afteraxes = locations_afteraxes\n ml_plot.add_surface_elevation(plotaxes, 2, bounds=internal_surface_limits)\n ml_plot.add_land(plotaxes, 2)\n\n # -----------------------------------------\n # Parameters used only when creating html and/or latex hardcopy\n # e.g., via pyclaw.plotters.frametools.printframes:\n\n plotdata.printfigs = True # print figures\n plotdata.print_format = 'png' # file format\n plotdata.print_framenos = 'all' # list of frames to print\n plotdata.print_fignos = 'all' # list of figures to print\n plotdata.html = True # create html files of plots?\n plotdata.latex = False # create latex file of plots?\n plotdata.latex_figsperline = 2 # layout of plots\n plotdata.latex_framesperline = 1 # layout of plots\n plotdata.latex_makepdf = False # also run pdflatex?\n plotdata.parallel = True # make multiple frame png's at once\n\n return plotdata\n" ]
[ [ "matplotlib.pyplot.gca", "numpy.cos", "numpy.sin" ] ]
astro-projects/astro
[ "7fa0404fc690569ff85e379ecca54778f09a9333" ]
[ "src/astro/files/types/csv.py" ]
[ "import io\n\nimport pandas as pd\n\nfrom astro.constants import FileType as FileTypeConstants\nfrom astro.files.types.base import FileType\n\n\nclass CSVFileType(FileType):\n \"\"\"Concrete implementation to handle CSV file type\"\"\"\n\n def export_to_dataframe(self, stream, **kwargs) -> pd.DataFrame:\n \"\"\"read csv file from one of the supported locations and return dataframe\n\n :param stream: file stream object\n \"\"\"\n return pd.read_csv(stream, **kwargs)\n\n def create_from_dataframe(self, df: pd.DataFrame, stream: io.TextIOWrapper) -> None:\n \"\"\"Write csv file to one of the supported locations\n\n :param df: pandas dataframe\n :param stream: file stream object\n \"\"\"\n df.to_csv(stream, index=False)\n\n @property\n def name(self):\n return FileTypeConstants.CSV\n" ]
[ [ "pandas.read_csv" ] ]
jenish-cj/botnlufoodrest
[ "b41aa2c7a1f6e492e10f07e67562b612b5b13a53" ]
[ "rasa_core/policies/trainer.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\n\nimport numpy as np\nfrom builtins import object\n\nfrom rasa_core.domain import check_domain_sanity\nfrom rasa_core.interpreter import RegexInterpreter\n\nlogger = logging.getLogger(__name__)\n\n\nclass PolicyTrainer(object):\n def __init__(self, ensemble, domain, featurizer):\n self.domain = domain\n self.ensemble = ensemble\n self.featurizer = featurizer\n\n def train(self, filename=None, max_history=3,\n augmentation_factor=20, max_training_samples=None,\n max_number_of_trackers=2000, remove_duplicates=True, **kwargs):\n \"\"\"Trains a policy on a domain using training data from a file.\n\n :param augmentation_factor: how many stories should be created by\n randomly concatenating stories\n :param filename: story file containing the training conversations\n :param max_history: number of past actions to consider for the\n prediction of the next action\n :param max_training_samples: specifies how many training samples to\n train on - `None` to use all examples\n :param max_number_of_trackers: limits the tracker generation during\n story file parsing - `None` for unlimited\n :param remove_duplicates: remove duplicates from the training set before\n training\n :param kwargs: additional arguments passed to the underlying ML trainer\n (e.g. keras parameters)\n :return: trained policy\n \"\"\"\n\n logger.debug(\"Policy trainer got kwargs: {}\".format(kwargs))\n check_domain_sanity(self.domain)\n\n X, y = self._prepare_training_data(filename, max_history,\n augmentation_factor,\n max_training_samples,\n max_number_of_trackers,\n remove_duplicates)\n\n self.ensemble.train(X, y, self.domain, self.featurizer, **kwargs)\n\n def _prepare_training_data(self, filename, max_history, augmentation_factor,\n max_training_samples=None,\n max_number_of_trackers=2000,\n remove_duplicates=True):\n \"\"\"Reads training data from file and prepares it for the training.\"\"\"\n\n from rasa_core.training_utils import extract_training_data_from_file\n\n if filename:\n X, y = extract_training_data_from_file(\n filename,\n augmentation_factor=augmentation_factor,\n max_history=max_history,\n remove_duplicates=remove_duplicates,\n domain=self.domain,\n featurizer=self.featurizer,\n interpreter=RegexInterpreter(),\n max_number_of_trackers=max_number_of_trackers)\n if max_training_samples is not None:\n X = X[:max_training_samples, :]\n y = y[:max_training_samples]\n else:\n X = np.zeros((0, self.domain.num_features))\n y = np.zeros(self.domain.num_actions)\n return X, y\n" ]
[ [ "numpy.zeros" ] ]
erikvdp/boruta_py
[ "e04d1a17de142679eabebebd8cedde202587fbf1" ]
[ "boruta/boruta_py.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nAuthor: Daniel Homola <[email protected]>\n\nOriginal code and method by: Miron B Kursa, https://m2.icm.edu.pl/boruta/\n\nLicense: BSD 3 clause\n\"\"\"\n\nfrom __future__ import print_function, division\nimport numpy as np\nimport scipy as sp\nfrom sklearn.utils import check_random_state, check_X_y\nfrom sklearn.base import TransformerMixin, BaseEstimator\nimport warnings\n\n\nclass BorutaPy(BaseEstimator, TransformerMixin):\n \"\"\"\n Improved Python implementation of the Boruta R package.\n\n The improvements of this implementation include:\n - Faster run times:\n Thanks to scikit-learn's fast implementation of the ensemble methods.\n - Scikit-learn like interface:\n Use BorutaPy just like any other scikit learner: fit, fit_transform and\n transform are all implemented in a similar fashion.\n - Modularity:\n Any ensemble method could be used: random forest, extra trees\n classifier, even gradient boosted trees.\n - Two step correction:\n The original Boruta code corrects for multiple testing in an overly\n conservative way. In this implementation, the Benjamini Hochberg FDR is\n used to correct in each iteration across active features. This means\n only those features are included in the correction which are still in\n the selection process. Following this, each that passed goes through a\n regular Bonferroni correction to check for the repeated testing over\n the iterations.\n - Percentile:\n Instead of using the max values of the shadow features the user can\n specify which percentile to use. This gives a finer control over this\n crucial parameter. For more info, please read about the perc parameter.\n - Automatic tree number:\n Setting the n_estimator to 'auto' will calculate the number of trees\n in each itartion based on the number of features under investigation.\n This way more trees are used when the training data has many feautres\n and less when most of the features have been rejected.\n - Ranking of features:\n After fitting BorutaPy it provides the user with ranking of features.\n Confirmed ones are 1, Tentatives are 2, and the rejected are ranked\n starting from 3, based on their feautre importance history through\n the iterations.\n\n We highly recommend using pruned trees with a depth between 3-7.\n\n For more, see the docs of these functions, and the examples below.\n\n Original code and method by: Miron B Kursa, https://m2.icm.edu.pl/boruta/\n\n Boruta is an all relevant feature selection method, while most other are\n minimal optimal; this means it tries to find all features carrying\n information usable for prediction, rather than finding a possibly compact\n subset of features on which some classifier has a minimal error.\n\n Why bother with all relevant feature selection?\n When you try to understand the phenomenon that made your data, you should\n care about all factors that contribute to it, not just the bluntest signs\n of it in context of your methodology (yes, minimal optimal set of features\n by definition depends on your classifier choice).\n\n Parameters\n ----------\n\n estimator : object\n A supervised learning estimator, with a 'fit' method that returns the\n feature_importances_ attribute. Important features must correspond to\n high absolute values in the feature_importances_.\n\n n_estimators : int or string, default = 1000\n If int sets the number of estimators in the chosen ensemble method.\n If 'auto' this is determined automatically based on the size of the\n dataset. The other parameters of the used estimators need to be set\n with initialisation.\n\n perc : int, default = 100\n Instead of the max we use the percentile defined by the user, to pick\n our threshold for comparison between shadow and real features. The max\n tend to be too stringent. This provides a finer control over this. The\n lower perc is the more false positives will be picked as relevant but\n also the less relevant features will be left out. The usual trade-off.\n The default is essentially the vanilla Boruta corresponding to the max.\n\n alpha : float, default = 0.05\n Level at which the corrected p-values will get rejected in both\n correction steps.\n\n two_step : Boolean, default = True\n If you want to use the original implementation of Boruta with Bonferroni\n correction only set this to False.\n\n max_iter : int, default = 100\n The number of maximum iterations to perform.\n\n random_state : int, RandomState instance or None; default=None\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n verbose : int, default=0\n Controls verbosity of output:\n - 0: no output\n - 1: displays iteration number\n - 2: which features have been selected already\n\n early_stopping : bool, default = False\n Whether to use early stopping to terminate the selection process\n before reaching `max_iter` iterations if the algorithm cannot\n confirm a tentative feature for `n_iter_no_change` iterations.\n Will speed up the process at a cost of a possibility of a\n worse result.\n \n n_iter_no_change : int, default = 20\n Ignored if `early_stopping` is False. The maximum amount of\n iterations without confirming a tentative feature. \n\n Attributes\n ----------\n\n n_features_ : int\n The number of selected features.\n\n support_ : array of shape [n_features]\n\n The mask of selected features - only confirmed ones are True.\n\n support_weak_ : array of shape [n_features]\n\n The mask of selected tentative features, which haven't gained enough\n support during the max_iter number of iterations..\n\n ranking_ : array of shape [n_features]\n\n The feature ranking, such that ``ranking_[i]`` corresponds to the\n ranking position of the i-th feature. Selected (i.e., estimated\n best) features are assigned rank 1 and tentative features are assigned\n rank 2.\n\n importance_history_ : array-like, shape [n_features, n_iters]\n\n The calculated importance values for each feature across all iterations. \n\n Examples\n --------\n \n import pandas as pd\n from sklearn.ensemble import RandomForestClassifier\n from boruta import BorutaPy\n \n # load X and y\n # NOTE BorutaPy accepts numpy arrays only, hence the .values attribute\n X = pd.read_csv('examples/test_X.csv', index_col=0).values\n y = pd.read_csv('examples/test_y.csv', header=None, index_col=0).values\n y = y.ravel()\n \n # define random forest classifier, with utilising all cores and\n # sampling in proportion to y labels\n rf = RandomForestClassifier(n_jobs=-1, class_weight='balanced', max_depth=5)\n \n # define Boruta feature selection method\n feat_selector = BorutaPy(rf, n_estimators='auto', verbose=2, random_state=1)\n \n # find all relevant features - 5 features should be selected\n feat_selector.fit(X, y)\n \n # check selected features - first 5 features are selected\n feat_selector.support_\n \n # check ranking of features\n feat_selector.ranking_\n \n # call transform() on X to filter it down to selected features\n X_filtered = feat_selector.transform(X)\n\n References\n ----------\n\n [1] Kursa M., Rudnicki W., \"Feature Selection with the Boruta Package\"\n Journal of Statistical Software, Vol. 36, Issue 11, Sep 2010\n \"\"\"\n\n def __init__(self, estimator, n_estimators=1000, perc=100, alpha=0.05,\n two_step=True, max_iter=100, random_state=None, verbose=0,\n early_stopping=False, n_iter_no_change=20):\n self.estimator = estimator\n self.n_estimators = n_estimators\n self.perc = perc\n self.alpha = alpha\n self.two_step = two_step\n self.max_iter = max_iter\n self.random_state = random_state\n self.verbose = verbose\n self.early_stopping = early_stopping\n self.n_iter_no_change = n_iter_no_change\n self.__version__ = '0.3'\n self._is_lightgbm = 'lightgbm' in str(type(self.estimator))\n\n def fit(self, X, y):\n \"\"\"\n Fits the Boruta feature selection with the provided estimator.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n The training input samples.\n\n y : array-like, shape = [n_samples]\n The target values.\n \"\"\"\n\n return self._fit(X, y)\n\n def transform(self, X, weak=False, return_df=False):\n \"\"\"\n Reduces the input X to the features selected by Boruta.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n The training input samples.\n\n weak: boolean, default = False\n If set to true, the tentative features are also used to reduce X.\n \n return_df : boolean, default = False\n If ``X`` if a pandas dataframe and this parameter is set to True,\n the transformed data will also be a dataframe.\n\n Returns\n -------\n X : array-like, shape = [n_samples, n_features_]\n The input matrix X's columns are reduced to the features which were\n selected by Boruta.\n \"\"\"\n\n return self._transform(X, weak, return_df)\n\n def fit_transform(self, X, y, weak=False, return_df=False):\n \"\"\"\n Fits Boruta, then reduces the input X to the selected features.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n The training input samples.\n\n y : array-like, shape = [n_samples]\n The target values.\n\n weak: boolean, default = False\n If set to true, the tentative features are also used to reduce X.\n\n return_df : boolean, default = False\n If ``X`` if a pandas dataframe and this parameter is set to True,\n the transformed data will also be a dataframe.\n\n Returns\n -------\n X : array-like, shape = [n_samples, n_features_]\n The input matrix X's columns are reduced to the features which were\n selected by Boruta.\n \"\"\"\n\n self._fit(X, y)\n return self._transform(X, weak, return_df)\n\n def _validate_pandas_input(self, arg):\n try:\n return arg.values\n except AttributeError:\n raise ValueError(\n \"input needs to be a numpy array or pandas data frame.\"\n )\n\n def _fit(self, X, y):\n # check input params\n self._check_params(X, y)\n\n if not isinstance(X, np.ndarray):\n X = self._validate_pandas_input(X) \n if not isinstance(y, np.ndarray):\n y = self._validate_pandas_input(y)\n\n self.random_state = check_random_state(self.random_state)\n \n early_stopping = False\n if self.early_stopping:\n if self.n_iter_no_change >= self.max_iter:\n if self.verbose > 0:\n print(\n f\"n_iter_no_change is bigger or equal to max_iter\"\n f\"({self.n_iter_no_change} >= {self.max_iter}), \"\n f\"early stopping will not be used.\"\n )\n else:\n early_stopping = True\n \n # setup variables for Boruta\n n_sample, n_feat = X.shape\n _iter = 1\n # early stopping vars\n _same_iters = 1\n _last_dec_reg = None\n # holds the decision about each feature:\n # 0 - default state = tentative in original code\n # 1 - accepted in original code\n # -1 - rejected in original code\n dec_reg = np.zeros(n_feat, dtype=int)\n # counts how many times a given feature was more important than\n # the best of the shadow features\n hit_reg = np.zeros(n_feat, dtype=int)\n # these record the history of the iterations\n imp_history = np.zeros(n_feat, dtype=float)\n sha_max_history = []\n\n # set n_estimators\n if self.n_estimators != 'auto':\n self.estimator.set_params(n_estimators=self.n_estimators)\n\n # main feature selection loop\n while np.any(dec_reg == 0) and _iter < self.max_iter:\n # find optimal number of trees and depth\n if self.n_estimators == 'auto':\n # number of features that aren't rejected\n not_rejected = np.where(dec_reg >= 0)[0].shape[0]\n n_tree = self._get_tree_num(not_rejected)\n self.estimator.set_params(n_estimators=n_tree)\n\n # make sure we start with a new tree in each iteration\n if self._is_lightgbm:\n self.estimator.set_params(random_state=self.random_state.randint(0, 10000))\n else:\n self.estimator.set_params(random_state=self.random_state)\n\n # add shadow attributes, shuffle them and train estimator, get imps\n cur_imp = self._add_shadows_get_imps(X, y, dec_reg)\n\n # get the threshold of shadow importances we will use for rejection\n imp_sha_max = np.percentile(cur_imp[1], self.perc)\n\n # record importance history\n sha_max_history.append(imp_sha_max)\n imp_history = np.vstack((imp_history, cur_imp[0]))\n\n # register which feature is more imp than the max of shadows\n hit_reg = self._assign_hits(hit_reg, cur_imp, imp_sha_max)\n\n # based on hit_reg we check if a feature is doing better than\n # expected by chance\n dec_reg = self._do_tests(dec_reg, hit_reg, _iter)\n\n # print out confirmed features\n if self.verbose > 0 and _iter < self.max_iter:\n self._print_results(dec_reg, _iter, 0)\n if _iter < self.max_iter:\n _iter += 1\n \n # early stopping\n if early_stopping:\n if _last_dec_reg is not None and (_last_dec_reg == dec_reg).all():\n _same_iters += 1\n if self.verbose > 0:\n print(\n f\"Early stopping: {_same_iters} out \"\n f\"of {self.n_iter_no_change}\"\n )\n else:\n _same_iters = 1\n _last_dec_reg = dec_reg.copy()\n if _same_iters > self.n_iter_no_change:\n break\n\n # we automatically apply R package's rough fix for tentative ones\n confirmed = np.where(dec_reg == 1)[0]\n tentative = np.where(dec_reg == 0)[0]\n # ignore the first row of zeros\n tentative_median = np.median(imp_history[1:, tentative], axis=0)\n # which tentative to keep\n tentative_confirmed = np.where(tentative_median\n > np.median(sha_max_history))[0]\n tentative = tentative[tentative_confirmed]\n\n # basic result variables\n self.n_features_ = confirmed.shape[0]\n self.support_ = np.zeros(n_feat, dtype=bool)\n self.support_[confirmed] = 1\n self.support_weak_ = np.zeros(n_feat, dtype=bool)\n self.support_weak_[tentative] = 1\n\n # ranking, confirmed variables are rank 1\n self.ranking_ = np.ones(n_feat, dtype=int)\n # tentative variables are rank 2\n self.ranking_[tentative] = 2\n # selected = confirmed and tentative\n selected = np.hstack((confirmed, tentative))\n # all rejected features are sorted by importance history\n not_selected = np.setdiff1d(np.arange(n_feat), selected)\n # large importance values should rank higher = lower ranks -> *(-1)\n imp_history_rejected = imp_history[1:, not_selected] * -1\n\n # update rank for not_selected features\n if not_selected.shape[0] > 0:\n # calculate ranks in each iteration, then median of ranks across feats\n iter_ranks = self._nanrankdata(imp_history_rejected, axis=1)\n rank_medians = np.nanmedian(iter_ranks, axis=0)\n ranks = self._nanrankdata(rank_medians, axis=0)\n\n # set smallest rank to 3 if there are tentative feats\n if tentative.shape[0] > 0:\n ranks = ranks - np.min(ranks) + 3\n else:\n # and 2 otherwise\n ranks = ranks - np.min(ranks) + 2\n self.ranking_[not_selected] = ranks\n else:\n # all are selected, thus we set feature supports to True\n self.support_ = np.ones(n_feat, dtype=bool)\n\n self.importance_history_ = imp_history\n\n # notify user\n if self.verbose > 0:\n self._print_results(dec_reg, _iter, 1)\n return self\n\n def _transform(self, X, weak=False, return_df=False):\n # sanity check\n try:\n self.ranking_\n except AttributeError:\n raise ValueError('You need to call the fit(X, y) method first.')\n\n if weak:\n indices = self.support_ + self.support_weak_\n else:\n indices = self.support_\n\n if return_df:\n X = X.iloc[:, indices]\n else:\n X = X[:, indices]\n return X\n\n def _get_tree_num(self, n_feat):\n depth = None\n try:\n depth = self.estimator.get_params()['max_depth']\n except KeyError:\n warnings.warn(\n \"The estimator does not have a max_depth property, as a result \"\n \" the number of trees to use cannot be estimated automatically.\"\n )\n if depth == None:\n depth = 10\n # how many times a feature should be considered on average\n f_repr = 100\n # n_feat * 2 because the training matrix is extended with n shadow features\n multi = ((n_feat * 2) / (np.sqrt(n_feat * 2) * depth))\n n_estimators = int(multi * f_repr)\n return n_estimators\n\n def _get_imp(self, X, y):\n try:\n self.estimator.fit(X, y)\n except Exception as e:\n raise ValueError('Please check your X and y variable. The provided '\n 'estimator cannot be fitted to your data.\\n' + str(e))\n try:\n imp = self.estimator.feature_importances_\n except Exception:\n raise ValueError('Only methods with feature_importance_ attribute '\n 'are currently supported in BorutaPy.')\n return imp\n\n def _get_shuffle(self, seq):\n self.random_state.shuffle(seq)\n return seq\n\n def _add_shadows_get_imps(self, X, y, dec_reg):\n # find features that are tentative still\n x_cur_ind = np.where(dec_reg >= 0)[0]\n x_cur = np.copy(X[:, x_cur_ind])\n x_cur_w = x_cur.shape[1]\n # deep copy the matrix for the shadow matrix\n x_sha = np.copy(x_cur)\n # make sure there's at least 5 columns in the shadow matrix for\n while (x_sha.shape[1] < 5):\n x_sha = np.hstack((x_sha, x_sha))\n # shuffle xSha\n x_sha = np.apply_along_axis(self._get_shuffle, 0, x_sha)\n # get importance of the merged matrix\n imp = self._get_imp(np.hstack((x_cur, x_sha)), y)\n # separate importances of real and shadow features\n imp_sha = imp[x_cur_w:]\n imp_real = np.zeros(X.shape[1])\n imp_real[:] = np.nan\n imp_real[x_cur_ind] = imp[:x_cur_w]\n return imp_real, imp_sha\n\n def _assign_hits(self, hit_reg, cur_imp, imp_sha_max):\n # register hits for features that did better than the best of shadows\n cur_imp_no_nan = cur_imp[0]\n cur_imp_no_nan[np.isnan(cur_imp_no_nan)] = 0\n hits = np.where(cur_imp_no_nan > imp_sha_max)[0]\n hit_reg[hits] += 1\n return hit_reg\n\n def _do_tests(self, dec_reg, hit_reg, _iter):\n active_features = np.where(dec_reg >= 0)[0]\n hits = hit_reg[active_features]\n # get uncorrected p values based on hit_reg\n to_accept_ps = sp.stats.binom.sf(hits - 1, _iter, .5).flatten()\n to_reject_ps = sp.stats.binom.cdf(hits, _iter, .5).flatten()\n\n if self.two_step:\n # two step multicor process\n # first we correct for testing several features in each round using FDR\n to_accept = self._fdrcorrection(to_accept_ps, alpha=self.alpha)[0]\n to_reject = self._fdrcorrection(to_reject_ps, alpha=self.alpha)[0]\n\n # second we correct for testing the same feature over and over again\n # using bonferroni\n to_accept2 = to_accept_ps <= self.alpha / float(_iter)\n to_reject2 = to_reject_ps <= self.alpha / float(_iter)\n\n # combine the two multi corrections, and get indexes\n to_accept *= to_accept2\n to_reject *= to_reject2\n else:\n # as in th original Boruta, we simply do bonferroni correction\n # with the total n_feat in each iteration\n to_accept = to_accept_ps <= self.alpha / float(len(dec_reg))\n to_reject = to_reject_ps <= self.alpha / float(len(dec_reg))\n\n # find features which are 0 and have been rejected or accepted\n to_accept = np.where((dec_reg[active_features] == 0) * to_accept)[0]\n to_reject = np.where((dec_reg[active_features] == 0) * to_reject)[0]\n\n # updating dec_reg\n dec_reg[active_features[to_accept]] = 1\n dec_reg[active_features[to_reject]] = -1\n return dec_reg\n\n def _fdrcorrection(self, pvals, alpha=0.05):\n \"\"\"\n Benjamini/Hochberg p-value correction for false discovery rate, from\n statsmodels package. Included here for decoupling dependency on statsmodels.\n\n Parameters\n ----------\n pvals : array_like\n set of p-values of the individual tests.\n alpha : float\n error rate\n\n Returns\n -------\n rejected : array, bool\n True if a hypothesis is rejected, False if not\n pvalue-corrected : array\n pvalues adjusted for multiple hypothesis testing to limit FDR\n \"\"\"\n pvals = np.asarray(pvals)\n pvals_sortind = np.argsort(pvals)\n pvals_sorted = np.take(pvals, pvals_sortind)\n nobs = len(pvals_sorted)\n ecdffactor = np.arange(1, nobs + 1) / float(nobs)\n\n reject = pvals_sorted <= ecdffactor * alpha\n if reject.any():\n rejectmax = max(np.nonzero(reject)[0])\n reject[:rejectmax] = True\n\n pvals_corrected_raw = pvals_sorted / ecdffactor\n pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]\n pvals_corrected[pvals_corrected > 1] = 1\n # reorder p-values and rejection mask to original order of pvals\n pvals_corrected_ = np.empty_like(pvals_corrected)\n pvals_corrected_[pvals_sortind] = pvals_corrected\n reject_ = np.empty_like(reject)\n reject_[pvals_sortind] = reject\n return reject_, pvals_corrected_\n\n def _nanrankdata(self, X, axis=1):\n \"\"\"\n Replaces bottleneck's nanrankdata with scipy and numpy alternative.\n \"\"\"\n ranks = sp.stats.mstats.rankdata(X, axis=axis)\n ranks[np.isnan(X)] = np.nan\n return ranks\n\n def _check_params(self, X, y):\n \"\"\"\n Check hyperparameters as well as X and y before proceeding with fit.\n \"\"\"\n # check X and y are consistent len, X is Array and y is column\n X, y = check_X_y(X, y)\n if self.perc <= 0 or self.perc > 100:\n raise ValueError('The percentile should be between 0 and 100.')\n\n if self.alpha <= 0 or self.alpha > 1:\n raise ValueError('Alpha should be between 0 and 1.')\n\n def _print_results(self, dec_reg, _iter, flag):\n n_iter = str(_iter) + ' / ' + str(self.max_iter)\n n_confirmed = np.where(dec_reg == 1)[0].shape[0]\n n_rejected = np.where(dec_reg == -1)[0].shape[0]\n cols = ['Iteration: ', 'Confirmed: ', 'Tentative: ', 'Rejected: ']\n\n # still in feature selection\n if flag == 0:\n n_tentative = np.where(dec_reg == 0)[0].shape[0]\n content = map(str, [n_iter, n_confirmed, n_tentative, n_rejected])\n if self.verbose == 1:\n output = cols[0] + n_iter\n elif self.verbose > 1:\n output = '\\n'.join([x[0] + '\\t' + x[1] for x in zip(cols, content)])\n\n # Boruta finished running and tentatives have been filtered\n else:\n n_tentative = np.sum(self.support_weak_)\n n_rejected = np.sum(~(self.support_|self.support_weak_))\n content = map(str, [n_iter, n_confirmed, n_tentative, n_rejected])\n result = '\\n'.join([x[0] + '\\t' + x[1] for x in zip(cols, content)])\n output = \"\\n\\nBorutaPy finished running.\\n\\n\" + result\n print(output)\n" ]
[ [ "numpy.nanmedian", "numpy.take", "numpy.sqrt", "numpy.asarray", "numpy.minimum.accumulate", "numpy.any", "numpy.where", "numpy.hstack", "sklearn.utils.check_X_y", "numpy.empty_like", "numpy.arange", "numpy.copy", "numpy.apply_along_axis", "scipy.stats.mstats.rankdata", "numpy.zeros", "scipy.stats.binom.sf", "numpy.nonzero", "numpy.min", "numpy.isnan", "numpy.median", "scipy.stats.binom.cdf", "numpy.argsort", "numpy.sum", "numpy.percentile", "numpy.ones", "sklearn.utils.check_random_state", "numpy.vstack" ] ]
AdityaMate/collapsing_bandits
[ "2aecccc6fd986f869088438ea5eba7bbfd5c1e91" ]
[ "code/compute_exact_yundi_infinite.py" ]
[ "from __future__ import print_function\nimport numpy as np \nimport argparse\nfrom itertools import product, combinations\nfrom functools import reduce\nfrom tqdm import tqdm\nimport subprocess\nimport os\nimport multiprocessing\nimport platform\n# import special_pomdp\n\nimport traceback, functools, multiprocessing\n \ndef trace_unhandled_exceptions(func):\n @functools.wraps(func)\n def wrapped_func(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except:\n print ('Exception in '+func.__name__)\n traceback.print_exc()\n return wrapped_func\n\n\n# path constants\npomdp_file_folder = 'pomdp_files'\noutput_folder = 'pg'\n\n# Defaults, probably won't work though.\ndespot_solver_path = 'pomdpx'\npomdp_solve_path = 'pomdp-solve'\n# CPLEX_PATH = '/Applications/CPLEX_Studio128/cplex/bin/x86-64_osx'\n\nif platform.release() == '3.10.0-957.12.1.el7.x86_64': # cannon\n\tdespot_solver_path = '/n/home10/jkillian/.local/bin/despot'\n\tpomdp_solve_path = '/n/home10/jkillian/.local/bin/pomdp-solve'\n\nelif platform.release() == '4.15.0-64-generic': # deathstar\n\tdespot_solver_path = '/home/jkillian/.local/bin/despot'\n\tpomdp_solve_path = '/home/jkillian/.local/bin/pomdp-solve'\n\t\nelif platform.release() == '18.6.0': # Jack Macbook 2019\n\tdespot_solver_path = 'pomdpx'\n\tpomdp_solve_path = 'pomdp-solve'\n\t# CPLEX_PATH = '/Applications/CPLEX_Studio128/cplex/bin/x86-64_osx'\n\nif not os.path.exists(pomdp_file_folder):\n os.makedirs(pomdp_file_folder)\n\nif not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n\n# @trace_unhandled_exceptions\ndef yundi_special_worker(args):\n \"\"\"This function will be called by each thread.\n This function can not be a class method.\n \"\"\"\n # Expand list of args into named args.\n mid, candidate, T, belief, ind, days_remaining = args\n del args\n\n if candidate:\n # fname, a, obs_space = make_pomdp_given_cost(mid, T, ind, discount=1.)\n # print(T)\n initial_belief = np.array([1-belief, belief])\n # solverPath = CPLEX_PATH\n # optimal_action = optimal_strategy(mid, days_remaining, solverPath, T, initial_belief)\n optimal_action = optimal_strategy(mid, days_remaining, T, initial_belief)\n optimal_results[ind] = optimal_action\n\n if (optimal_action == 1):\n # Serial-only Portion\n with trueCount.get_lock():\n trueCount.value += 1\n \n\ndef optimal_strategy(subsidy, roundLeft, T, belief):\n\n\tnum_states = T.shape[0]\n\tbestAction = POMDP_solve(subsidy, roundLeft, T)\n\n\tresult = -1;\n\tmaxSum = -(2**31)\n\tfor temp in bestAction.keys():\n\t\tsum = 0\n\t\tfor i in range(num_states):\n\t\t\tsum += temp[i] * belief[i];\n\t\t\n\t\tif (sum > maxSum):\n\t\t\tmaxSum = sum\n\t\t\tresult = bestAction[temp]\n\t\t\n\t\n\n\tif (result == 1):\n\t\treturn 1;\n\telif result == 0:\n\t\treturn 0;\n\telse:\n\t\tprint('error!')\n\t\treturn 0\n\t\n\t\n\ndef POMDP_solve(subsidy, roundLeft, T, beta=0.9):\n\n\tn = 1\n\n\tnum_states = 2\n\tnum_observations = 2\n\tnum_actions = 2\n\n\n\tO1 = [\n\t\t[1.0, 0.0],\n\t\t[0.0, 1.0]\n\t]\n\n\n\tPomdpO = np.zeros((num_states, num_actions, num_observations + 1));\n\tPomdpR = np.zeros((num_states, num_actions));\n\n\tfor state in range(num_states):\n\t\tPomdpO[state][0][num_observations] = 1;\n\t\tPomdpO[state][1][num_observations] = 0;\n\t\tfor observation in range(num_observations):\n\t\t\tPomdpO[state][0][observation] = 0;\n\t\t\tPomdpO[state][1][observation] = O1[state][observation];\n\t\t\n\t\n\n\tfor state in range(num_states):\n\t\tPomdpR[state][0] = subsidy+state;\n\t\tPomdpR[state][1] = state;\n\t# print('pomdpr',PomdpR)\n\t\t\n\t\n\tepsilon = 0.2;\n\t# print('hey')\n\tpomdp_object = special_pomdp.pomdpSolver(num_states, num_observations + 1, num_actions,\n\t\t\tbeta, T, PomdpO, PomdpR, roundLeft, epsilon);\n\tpomdp_object.solve();\n\t# // pomdp.solverSolve(solverPath);\n\treturn pomdp_object.bestAction;\n\n\ndef tup_print(tups, joiner='', f=None):\n tup_strs = [joiner.join(list(map(make_char, tup))) for tup in tups]\n # print(' '.join(tup_strs),file=f)\n return tup_strs\n\ndef make_char(x):\n return chr(x+65)\n\ndef yundi_whittle_exact(T, belief, beta, solver='normal'):\n\n upper = 2\n lower = 0\n\n optimal_action = 0\n\n gap = 10000\n gap_tol = 1e-4 \n while gap > gap_tol:\n\n mid = (upper + lower) / 2.0;\n #print('mid',mid)\n trueCount = 0\n # Spawn up to 9999999 jobs, I think this is the maximum possible.\n # I do not know what happens if you exceed this.\n if solver == 'normal':\n\n fname, a, obs_space = make_pomdp_given_cost(mid, T, 0, discount=beta)\n # print(T)\n initial_belief = belief\n if True:\n initial_belief = np.array([1-belief, belief])\n\n optimal_action = solve_given_cost(fname, initial_belief, a)\n \n if optimal_action == 0: \n upper = mid;\n\n else:\n lower = mid\n gap = upper - lower\n\n \n return (upper + lower) / 2.0\n\n\n\n\ndef print_POMDP_given_cost(combined_action_space, combined_state_space, combined_observation_space,\n\t\tT_matrices, O_matrices, R, C,\n\t\t pomdp_filename, discount):\n\t\n\t# fname = os.path.join(root,pomdp_file_folder)\n\t# fname = os.path.join(fname, pomdp_filename)\n\tfname = os.path.join(pomdp_file_folder, pomdp_filename)\n\tfout = open(fname, 'w')\n\n\tprint('discount: %.2f'%discount,file=fout)\n\tprint('values: reward',file=fout)\n\tprint('actions: ',end='', file=fout)\n\taction_space_strs = tup_print(combined_action_space)\n\tprint(' '.join(action_space_strs),file=fout)\n\tprint('states: ', end='', file=fout); \n\tstate_space_strs = tup_print(combined_state_space)\n\tprint(' '.join(state_space_strs),file=fout)\n\tprint('observations: ', end='', file=fout); \n\tobservation_space_strs = tup_print(combined_observation_space)\n\tprint(' '.join(observation_space_strs),file=fout)\n\tprint(file=fout)\n\n\tfor i, action in enumerate(action_space_strs):\n\t\tprint('T:%s'%action,file=fout)\n\t\tfor row in T_matrices[i]:\n\t\t\tprint(' '.join(list(map(str, row))), file=fout)\n\t\tprint(file=fout)\n\n\tfor i, action in enumerate(action_space_strs):\n\t\tprint('O:%s'%action,file=fout)\n\t\tfor row in O_matrices[i]:\n\t\t\tprint(' '.join(list(map(str, row))), file=fout)\n\t\tprint(file=fout)\n\n\tfor i,state in enumerate(state_space_strs):\n\t\tfor j, action in enumerate(action_space_strs):\n\t\t# print('R:* : * : %s : * %i' % (state, R[i]), file=fout)\n\t\t\tr = R[i]\n\t\t\t# If we don't call, we get C subsidy\n\t\t\tif j == 0:\n\t\t\t\tr = R[i] + C\n\t\t\tprint('R:%s : %s : * : * %.4f' % (action, state, r), file=fout)\n\t\t\t# R: <action> : <start-state> : <end-state> : <observation> %f\n\n\tfout.close()\n\n\ndef make_pomdp_given_cost(C, T, ind, discount=0.95):\n\n\tn = 1\n\n\tnum_states = 2\n\tnum_observations = 3\n\n\n\tstates_per_patient = np.arange(num_states)\n\tcombined_state_space = list(product(states_per_patient, repeat=n))\n\t# print('State Space')\n\t# print(combined_state_space)\n\t# print()\n\n\tpatient_indices = np.arange(n)\n\tcombined_action_space = list(combinations(patient_indices, 1))\n\tcombined_action_space = [(0,),(1,)]\n\n\t# print('Action Space')\n\t# print(combined_action_space)\n\t# print()\n\n\tobservations_per_patient = np.arange(num_observations)\n\tcombined_observation_space = list(product(observations_per_patient, repeat=n))\n\t\n\n\tT_matrices = T # first one should be no action, second is action\n\n\n\tO0 = [\n\t\t[1.0, 0.0, 0.0],\n\t\t[1.0, 0.0, 0.0]\n\t]\n\n\tO1 = [\n\t\t[0.0, 1.0, 0.0],\n\t\t[0.0, 0.0, 1.0]\n\t]\n\n\td = {\n\t\t0: O0,\n\t\t1: O1\n\t}\n\n\tO_matrices = [O0, O1]\n\n\n\n\n\t# R: <action> : <start-state> : <end-state> : <observation> %f\n\t# so do \n\t# and compute for all end states such that it is the sum of the 1's in the end state\n\t# R: * : * : <end-state> : * %f\n\tR = [sum(x) for x in combined_state_space]\n\n\n\tpomdp_filename = 'single_patient_pomdp_patient=%s_c=%s.POMDP' %(ind,C)\n\tprint_POMDP_given_cost(combined_action_space, combined_state_space, combined_observation_space,\n\t\tT_matrices, O_matrices, R, C, pomdp_filename = pomdp_filename, \n\t\tdiscount=discount)\n\treturn pomdp_filename, combined_action_space, combined_observation_space\n\n\ndef solve_given_cost(fname, initial_belief, action_space):\n\n\toutname = os.path.join(output_folder, fname)\n\t# outname = os.path.join(root, outname)\n\n\tfname = os.path.join(pomdp_file_folder, fname)\n\t# fname = os.path.join(root, fname)\n\n\tsubprocess.check_output([pomdp_solve_path, '-pomdp', fname, '-o', outname])\n\n\talpha_fname = outname+'.alpha'\n\tpg_fname = outname+'.pg'\n\n\tpg_d = {}\n\tpg_f = open(pg_fname,'r')\n\tfor line in pg_f:\n\t\tline = line.strip().split()\n\t\t# print(line)\n\t\t# ['0', '1', '-', '19', '19', '-', '-', '-', '-', '-', '-']\n\t\tnode_num = int(line[0])\n\n\t\taction = int(line[1])\n\t\t\n\t\tobs_list = line[2:]\n\t\tobs_list = [int(x) if x!='-' else -1 for x in obs_list]\n\t\tpg_d[node_num] = (action, obs_list)\n\n\tpg_f.close()\n\n\talpha_list = []\n\talpha_f = open(alpha_fname, 'r')\n\tfor i,line in enumerate(alpha_f):\n\t\tif i%3 == 1:\n\t\t\tline = line.strip().split()\n\t\t\t# print(line)\n\t\t\tweights = np.array(list(map(float, line)))\n\t\t\talpha_list.append(weights)\n\talphas = np.array(alpha_list)\n\tstart_node = np.argmax(alphas.dot(initial_belief))\n\n\treturn pg_d[start_node][0]\nif __name__==\"__main__\":\n T = [[[0.8,.2],[0.15,0.85]],\n [[0.7,.3],[.10,0.9]]]\n T = np.array(T)\n belief = 0.5\n w = yundi_whittle_exact(T, belief, 140)\n\n\n\n\n\n\n\n" ]
[ [ "numpy.arange", "numpy.array", "numpy.zeros" ] ]
stonebig/holoviews
[ "d5270c30dd1af38a785452aeac2fbabbe528e892" ]
[ "holoviews/plotting/mpl/__init__.py" ]
[ "import os\nfrom distutils.version import LooseVersion\n\nfrom matplotlib import rc_params_from_file\nfrom matplotlib.colors import ListedColormap\n\nfrom ...core import Layout, Collator, GridMatrix, config\nfrom ...core.options import Cycle, Palette, Options\nfrom ...core.overlay import NdOverlay, Overlay\nfrom ...element import * # noqa (API import)\nfrom ..plot import PlotSelector\nfrom .annotation import * # noqa (API import)\nfrom .chart import * # noqa (API import)\nfrom .chart3d import * # noqa (API import)\nfrom .path import * # noqa (API import)\nfrom .plot import * # noqa (API import)\nfrom .raster import * # noqa (API import)\nfrom .tabular import * # noqa (API import)\nfrom . import pandas # noqa (API import)\nfrom . import seaborn # noqa (API import)\n\nfrom .renderer import MPLRenderer\n\n\nmpl_ge_150 = LooseVersion(mpl.__version__) >= '1.5.0'\n\n\ndef set_style(key):\n \"\"\"\n Select a style by name, e.g. set_style('default'). To revert to the\n previous style use the key 'unset' or False.\n \"\"\"\n if key is None:\n return\n elif not key or key in ['unset', 'backup']:\n if 'backup' in styles:\n plt.rcParams.update(styles['backup'])\n else:\n raise Exception('No style backed up to restore')\n elif key not in styles:\n raise KeyError('%r not in available styles.')\n else:\n path = os.path.join(os.path.dirname(__file__), styles[key])\n new_style = rc_params_from_file(path, use_default_template=False)\n styles['backup'] = dict(plt.rcParams)\n\n plt.rcParams.update(new_style)\n\n\n# Define matplotlib based style cycles and Palettes\ndef get_color_cycle():\n if mpl_ge_150:\n cyl = mpl.rcParams['axes.prop_cycle']\n # matplotlib 1.5 verifies that axes.prop_cycle *is* a cycler\n # but no guarantee that there's a `color` key.\n # so users could have a custom rcParams w/ no color...\n try:\n return [x['color'] for x in cyl]\n except KeyError:\n pass # just return axes.color style below\n return mpl.rcParams['axes.color_cycle']\n\n\nstyles = {'default': './default.mplstyle',\n 'default>1.5': './default1.5.mplstyle'}\n\nif config.style_17:\n if mpl_ge_150:\n set_style('default>1.5')\n else:\n set_style('default')\n Cycle.default_cycles.update({'default_colors': get_color_cycle()})\nelse:\n Cycle.default_cycles['default_colors'] = ['#30a2da', '#fc4f30', '#e5ae38',\n '#6d904f', '#8b8b8b']\n\n# Define Palettes and cycles from matplotlib colormaps\nPalette.colormaps.update({cm: plt.get_cmap(cm) for cm in plt.cm.datad\n if 'spectral' not in cm and 'Vega' not in cm})\nlisted_cmaps = [cm for cm in Palette.colormaps.values() if isinstance(cm, ListedColormap)]\nCycle.default_cycles.update({cm.name: list(cm.colors) for cm in listed_cmaps})\n\nstyle_aliases = {'edgecolor': ['ec', 'ecolor'], 'facecolor': ['fc'],\n 'linewidth': ['lw'], 'edgecolors': ['ec', 'edgecolor'],\n 'size': ['s'], 'color': ['c'], 'markeredgecolor': ['mec'],\n 'markeredgewidth': ['mew'], 'markerfacecolor': ['mfc'],\n 'markersize': ['ms']}\n\nStore.renderers['matplotlib'] = MPLRenderer.instance()\n\nif len(Store.renderers) == 1:\n Store.current_backend = 'matplotlib'\n\n# Defines a wrapper around GridPlot and RasterGridPlot\n# switching to RasterGridPlot if the plot only contains\n# Raster Elements\nBasicGridPlot = GridPlot\ndef grid_selector(grid):\n raster_fn = lambda x: True if isinstance(x, Raster) else False\n all_raster = all(grid.traverse(raster_fn, [Element]))\n return 'RasterGridPlot' if all_raster else 'GridPlot'\n\nGridPlot = PlotSelector(grid_selector,\n plot_classes=[('GridPlot', BasicGridPlot),\n ('RasterGridPlot', RasterGridPlot)])\n\n# Register default Elements\nStore.register({Curve: CurvePlot,\n Scatter: PointPlot,\n Bars: BarPlot,\n Histogram: HistogramPlot,\n Points: PointPlot,\n VectorField: VectorFieldPlot,\n ErrorBars: ErrorPlot,\n Spread: SpreadPlot,\n Spikes: SpikesPlot,\n BoxWhisker: BoxPlot,\n Area: AreaPlot,\n\n # General plots\n GridSpace: GridPlot,\n GridMatrix: GridPlot,\n NdLayout: LayoutPlot,\n Layout: LayoutPlot,\n AdjointLayout: AdjointLayoutPlot,\n\n # Element plots\n NdOverlay: OverlayPlot,\n Overlay: OverlayPlot,\n\n # Chart 3D\n Surface: SurfacePlot,\n Trisurface: TrisurfacePlot,\n Scatter3D: Scatter3DPlot,\n\n # Tabular plots\n ItemTable: TablePlot,\n Table: TablePlot,\n Collator: TablePlot,\n\n # Raster plots\n QuadMesh: QuadMeshPlot,\n Raster: RasterPlot,\n HeatMap: HeatMapPlot,\n Image: RasterPlot,\n RGB: RasterPlot,\n HSV: RasterPlot,\n\n # Annotation plots\n VLine: VLinePlot,\n HLine: HLinePlot,\n Arrow: ArrowPlot,\n Spline: SplinePlot,\n Text: TextPlot,\n\n # Path plots\n Contours: ContourPlot,\n Path: PathPlot,\n Box: PathPlot,\n Bounds: PathPlot,\n Ellipse: PathPlot,\n Polygons: PolygonPlot}, 'matplotlib', style_aliases=style_aliases)\n\n\nMPLPlot.sideplots.update({Histogram: SideHistogramPlot,\n GridSpace: GridPlot,\n Spikes: SideSpikesPlot,\n BoxWhisker: SideBoxPlot})\n\nif config.style_17:\n CurvePlot.show_grid = True\n SideHistogramPlot.show_grid = True\n PointPlot.show_grid = True\n\n MPLPlot.show_frame = True\n for framelesscls in [RasterGridPlot, GridPlot,\n AdjoinedPlot, Plot3D, CurvePlot, HistogramPlot]:\n framelesscls.show_frame = False\nelse:\n # Raster types, Path types and VectorField should have frames\n for framedcls in [VectorFieldPlot, ContourPlot, PathPlot, RasterPlot,\n QuadMeshPlot, HeatMapPlot, PolygonPlot]:\n framedcls.show_frame = True\n\n\noptions = Store.options(backend='matplotlib')\ndflt_cmap = 'hot' if config.style_17 else 'fire'\n# Default option definitions\n# Note: *No*short aliases here! e.g use 'facecolor' instead of 'fc'\n\n# Charts\noptions.Curve = Options('style', color=Cycle(), linewidth=2)\noptions.Scatter = Options('style', color=Cycle(), marker='o', cmap=dflt_cmap)\n\nif not config.style_17:\n options.Points = Options('plot', show_frame=True)\n\noptions.ErrorBars = Options('style', ecolor='k')\noptions.Spread = Options('style', facecolor=Cycle(), alpha=0.6, edgecolor='k', linewidth=0.5)\noptions.Bars = Options('style', ec='k', color=Cycle())\noptions.Histogram = Options('style', ec='k', facecolor=Cycle())\noptions.Points = Options('style', color=Cycle(), marker='o', cmap=dflt_cmap)\noptions.Scatter3D = Options('style', c=Cycle(), marker='o')\noptions.Scatter3D = Options('plot', fig_size=150)\noptions.Surface = Options('plot', fig_size=150)\noptions.Spikes = Options('style', color='black', cmap='fire')\noptions.Area = Options('style', facecolor=Cycle(), edgecolor='black')\noptions.BoxWhisker = Options('style', boxprops=dict(color='k', linewidth=1.5),\n whiskerprops=dict(color='k', linewidth=1.5))\n\n# Rasters\noptions.Image = Options('style', cmap=dflt_cmap, interpolation='nearest')\noptions.GridImage = Options('style', cmap=dflt_cmap, interpolation='nearest')\noptions.Raster = Options('style', cmap=dflt_cmap, interpolation='nearest')\noptions.QuadMesh = Options('style', cmap=dflt_cmap)\noptions.HeatMap = Options('style', cmap='RdYlBu_r', interpolation='nearest')\noptions.HeatMap = Options('plot', show_values=True, xticks=20, yticks=20)\noptions.RGB = Options('style', interpolation='nearest')\n# Composites\noptions.Layout = Options('plot', sublabel_format='{Alpha}')\noptions.GridMatrix = Options('plot', fig_size=160, shared_xaxis=True,\n shared_yaxis=True, xaxis=None, yaxis=None)\n\n# Annotations\noptions.VLine = Options('style', color=Cycle())\noptions.HLine = Options('style', color=Cycle())\nif config.style_17:\n options.Spline = Options('style', linewidth=2, edgecolor='r')\nelse:\n options.Spline = Options('style', edgecolor=Cycle())\n\noptions.Text = Options('style', fontsize=13)\noptions.Arrow = Options('style', color='k', linewidth=2, fontsize=13)\n# Paths\noptions.Contours = Options('style', color=Cycle())\noptions.Contours = Options('plot', show_legend=True)\noptions.Path = Options('style', color=Cycle())\n\nif config.style_17:\n options.Box = Options('style', color=Cycle())\n options.Bounds = Options('style', color=Cycle())\n options.Ellipse = Options('style', color=Cycle())\nelse:\n options.Box = Options('style', color='black')\n options.Bounds = Options('style', color='black')\n options.Ellipse = Options('style', color='black')\n options.Polygons = Options('style', facecolor=Cycle(), edgecolor='black')\n\n# Interface\noptions.TimeSeries = Options('style', color=Cycle())\n" ]
[ [ "matplotlib.rc_params_from_file" ] ]
TheFloe1995/correct-pose
[ "90c3c46ca50b07526a4df36645fa71572255a950" ]
[ "unit_tests/helpers.py" ]
[ "import torch.nn as nn\n\n\nclass DummyLoss(nn.Module):\n def forward(self, predictions, labels):\n return predictions.sum()\n\n\nclass DummyModel(nn.Module):\n def __init__(self):\n super(DummyModel, self).__init__()\n self.net = nn.Linear(63, 63)\n\n def forward(self, pose_batch):\n return self.net(pose_batch.reshape(-1, 63)).reshape(-1, 21, 3)\n\n def test(self, pose_batch):\n return pose_batch\n\n @property\n def device(self):\n return next(self.parameters()).device\n" ]
[ [ "torch.nn.Linear" ] ]
boldyshev/sutton
[ "6155f13491a859283ebb2154c978b2727a4c27af" ]
[ "chapter6/exercise6_9.py" ]
[ "#!/usr/bin/env python3\n\"\"\"Exercise 6.9, page 130\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom example6_5 import WindyGridworld, sarsa_windy\n\n\ndef plot_time_episodes(world_type, actions):\n dim = 10, 7\n start = 0, 3\n goal = 7, 3\n state_action_dim = *dim, len(actions)\n\n world = world_type(dim, actions, start, goal)\n q = np.zeros(state_action_dim)\n x, y = sarsa_windy(world, q)\n\n plt.plot(x, y, label=f'# actions {len(actions)}')\n\n\nif __name__ == '__main__':\n actions = [(0, 1), (0, -1), (1, 0), (-1, 0), (1, 1), (1, -1), (-1, -1), (-1, 1)]\n plot_time_episodes(WindyGridworld, actions)\n\n actions_halt = actions + [(0, 0)]\n plot_time_episodes(WindyGridworld, actions_halt)\n\n plt.xlabel('Time steps')\n plt.ylabel('Episodes')\n plt.legend()\n plt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.ylabel" ] ]
MarShin/pymarl2
[ "b6598cbd3ca044564270b6cdb1bb495173af7f4c" ]
[ "src/main.py" ]
[ "import numpy as np\nimport os\nimport collections\nfrom os.path import dirname, abspath, join\nfrom copy import deepcopy\nfrom sacred import Experiment, SETTINGS\nfrom sacred.observers import FileStorageObserver\nfrom sacred.utils import apply_backspaces_and_linefeeds\nimport sys\nimport torch as th\nfrom utils.logging import get_logger\nimport yaml\n\nfrom run import REGISTRY as run_REGISTRY\n\nSETTINGS['CAPTURE_MODE'] = \"fd\" # set to \"no\" if you want to see stdout/stderr in console\nlogger = get_logger()\n\nex = Experiment(\"pymarl\")\nex.logger = logger\nex.captured_out_filter = apply_backspaces_and_linefeeds\n\nresults_path = join(dirname(dirname(abspath(__file__))), \"results\")\n\n\[email protected]\ndef my_main(_run, _config, _log):\n # Setting the random seed throughout the modules\n config = config_copy(_config)\n np.random.seed(config[\"seed\"])\n th.manual_seed(config[\"seed\"])\n config['env_args']['seed'] = config[\"seed\"]\n \n # run\n run_REGISTRY[_config['run']](_run, config, _log)\n\ndef _get_config(params, arg_name, subfolder):\n config_name = None\n for _i, _v in enumerate(params):\n if _v.split(\"=\")[0] == arg_name:\n config_name = _v.split(\"=\")[1]\n del params[_i]\n break\n\n if config_name is not None:\n with open(os.path.join(os.path.dirname(__file__), \"config\", subfolder, \"{}.yaml\".format(config_name)), \"r\") as f:\n try:\n config_dict = yaml.load(f)\n except yaml.YAMLError as exc:\n assert False, \"{}.yaml error: {}\".format(config_name, exc)\n return config_dict\n\n\ndef recursive_dict_update(d, u):\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n d[k] = recursive_dict_update(d.get(k, {}), v)\n else:\n d[k] = v\n return d\n\n\ndef config_copy(config):\n if isinstance(config, dict):\n return {k: config_copy(v) for k, v in config.items()}\n elif isinstance(config, list):\n return [config_copy(v) for v in config]\n else:\n return deepcopy(config)\n\n\ndef parse_command(params, key, default):\n result = default\n for _i, _v in enumerate(params):\n if _v.split(\"=\")[0].strip() == key:\n result = _v[_v.index('=')+1:].strip()\n break\n return result\n\n\nif __name__ == '__main__':\n params = deepcopy(sys.argv)\n\n # Get the defaults from default.yaml\n with open(os.path.join(os.path.dirname(__file__), \"config\", \"default.yaml\"), \"r\") as f:\n try:\n config_dict = yaml.load(f)\n except yaml.YAMLError as exc:\n assert False, \"default.yaml error: {}\".format(exc)\n\n # Load algorithm and env base configs\n env_config = _get_config(params, \"--env-config\", \"envs\")\n alg_config = _get_config(params, \"--config\", \"algs\")\n # config_dict = {**config_dict, **env_config, **alg_config}\n config_dict = recursive_dict_update(config_dict, env_config)\n config_dict = recursive_dict_update(config_dict, alg_config)\n\n # now add all the config to sacred\n ex.add_config(config_dict)\n\n # Save to disk by default for sacred\n map_name = parse_command(params, \"env_args.map_name\", config_dict['env_args']['map_name'])\n algo_name = parse_command(params, \"name\", config_dict['name']) \n file_obs_path = join(results_path, \"sacred\", map_name, algo_name)\n \n logger.info(\"Saving to FileStorageObserver in {}.\".format(file_obs_path))\n ex.observers.append(FileStorageObserver.create(file_obs_path))\n\n ex.run_commandline(params)\n\n # flush\n sys.stdout.flush()\n" ]
[ [ "torch.manual_seed", "numpy.random.seed" ] ]
baranshad/models
[ "aaf008855e9764f32d974e86f8e1f9cfddfafd9a" ]
[ "research/object_detection/core/freezable_batch_norm_test.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for object_detection.core.freezable_batch_norm.\"\"\"\nimport numpy as np\nimport tensorflow as tf\n\nfrom object_detection.core import freezable_batch_norm\n\n\nclass FreezableBatchNormTest(tf.test.TestCase):\n \"\"\"Tests for FreezableBatchNorm operations.\"\"\"\n\n def _build_model(self, training=None):\n model = tf.keras.models.Sequential()\n norm = freezable_batch_norm.FreezableBatchNorm(training=training,\n input_shape=(10,),\n momentum=0.8)\n model.add(norm)\n return model, norm\n\n def _train_freezable_batch_norm(self, training_mean, training_var):\n model, _ = self._build_model()\n model.compile(loss='mse', optimizer='sgd')\n\n # centered on training_mean, variance training_var\n train_data = np.random.normal(\n loc=training_mean,\n scale=training_var,\n size=(1000, 10))\n model.fit(train_data, train_data, epochs=4, verbose=0)\n return model.weights\n\n def _test_batchnorm_layer(\n self, norm, should_be_training, test_data,\n testing_mean, testing_var, training_arg, training_mean, training_var):\n out_tensor = norm(tf.convert_to_tensor(test_data, dtype=tf.float32),\n training=training_arg)\n out = tf.keras.backend.eval(out_tensor)\n out -= tf.keras.backend.eval(norm.beta)\n out /= tf.keras.backend.eval(norm.gamma)\n\n if not should_be_training:\n out *= training_var\n out += (training_mean - testing_mean)\n out /= testing_var\n\n np.testing.assert_allclose(out.mean(), 0.0, atol=1.5e-1)\n np.testing.assert_allclose(out.std(), 1.0, atol=1.5e-1)\n\n def test_batchnorm_freezing_training_none(self):\n with self.test_session():\n training_mean = 5.0\n training_var = 10.0\n\n testing_mean = -10.0\n testing_var = 5.0\n\n # Initially train the batch norm, and save the weights\n trained_weights = self._train_freezable_batch_norm(training_mean,\n training_var)\n\n # Load the batch norm weights, freezing training to True.\n # Apply the batch norm layer to testing data and ensure it is normalized\n # according to the batch statistics.\n model, norm = self._build_model(training=True)\n for trained_weight, blank_weight in zip(trained_weights, model.weights):\n weight_copy = blank_weight.assign(tf.keras.backend.eval(trained_weight))\n tf.keras.backend.eval(weight_copy)\n\n # centered on testing_mean, variance testing_var\n test_data = np.random.normal(\n loc=testing_mean,\n scale=testing_var,\n size=(1000, 10))\n\n # Test with training=True passed to the call method:\n training_arg = True\n should_be_training = True\n self._test_batchnorm_layer(norm, should_be_training, test_data,\n testing_mean, testing_var, training_arg,\n training_mean, training_var)\n\n # Test with training=False passed to the call method:\n training_arg = False\n should_be_training = False\n self._test_batchnorm_layer(norm, should_be_training, test_data,\n testing_mean, testing_var, training_arg,\n training_mean, training_var)\n\n # Test the layer in various Keras learning phase scopes:\n training_arg = None\n should_be_training = False\n self._test_batchnorm_layer(norm, should_be_training, test_data,\n testing_mean, testing_var, training_arg,\n training_mean, training_var)\n\n tf.keras.backend.set_learning_phase(True)\n should_be_training = True\n self._test_batchnorm_layer(norm, should_be_training, test_data,\n testing_mean, testing_var, training_arg,\n training_mean, training_var)\n\n tf.keras.backend.set_learning_phase(False)\n should_be_training = False\n self._test_batchnorm_layer(norm, should_be_training, test_data,\n testing_mean, testing_var, training_arg,\n training_mean, training_var)\n\n def test_batchnorm_freezing_training_false(self):\n with self.test_session():\n training_mean = 5.0\n training_var = 10.0\n\n testing_mean = -10.0\n testing_var = 5.0\n\n # Initially train the batch norm, and save the weights\n trained_weights = self._train_freezable_batch_norm(training_mean,\n training_var)\n\n # Load the batch norm back up, freezing training to False.\n # Apply the batch norm layer to testing data and ensure it is normalized\n # according to the training data's statistics.\n model, norm = self._build_model(training=False)\n for trained_weight, blank_weight in zip(trained_weights, model.weights):\n weight_copy = blank_weight.assign(tf.keras.backend.eval(trained_weight))\n tf.keras.backend.eval(weight_copy)\n\n # centered on testing_mean, variance testing_var\n test_data = np.random.normal(\n loc=testing_mean,\n scale=testing_var,\n size=(1000, 10))\n\n # Make sure that the layer is never training\n # Test with training=True passed to the call method:\n training_arg = True\n should_be_training = False\n self._test_batchnorm_layer(norm, should_be_training, test_data,\n testing_mean, testing_var, training_arg,\n training_mean, training_var)\n\n # Test with training=False passed to the call method:\n training_arg = False\n should_be_training = False\n self._test_batchnorm_layer(norm, should_be_training, test_data,\n testing_mean, testing_var, training_arg,\n training_mean, training_var)\n\n # Test the layer in various Keras learning phase scopes:\n training_arg = None\n should_be_training = False\n self._test_batchnorm_layer(norm, should_be_training, test_data,\n testing_mean, testing_var, training_arg,\n training_mean, training_var)\n\n tf.keras.backend.set_learning_phase(True)\n should_be_training = False\n self._test_batchnorm_layer(norm, should_be_training, test_data,\n testing_mean, testing_var, training_arg,\n training_mean, training_var)\n\n tf.keras.backend.set_learning_phase(False)\n should_be_training = False\n self._test_batchnorm_layer(norm, should_be_training, test_data,\n testing_mean, testing_var, training_arg,\n training_mean, training_var)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.test.main", "tensorflow.keras.backend.eval", "numpy.random.normal", "tensorflow.keras.backend.set_learning_phase", "tensorflow.keras.models.Sequential" ] ]
nikoladze/pyhf
[ "03b227373b3f893eb501178c3613e0919d5198d8" ]
[ "tests/test_scripts.py" ]
[ "import json\nimport shlex\nimport pyhf\nimport time\nimport pytest\n\n\ndef test_version(script_runner):\n command = 'pyhf --version'\n start = time.time()\n ret = script_runner.run(*shlex.split(command))\n end = time.time()\n elapsed = end - start\n assert ret.success\n assert pyhf.__version__ in ret.stdout\n assert ret.stderr == ''\n # make sure it took less than a second\n assert elapsed < 1.0\n\n\n# see test_import.py for the same (detailed) test\ndef test_import_prepHistFactory(tmpdir, script_runner):\n temp = tmpdir.join(\"parsed_output.json\")\n command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s} --hide-progress'.format(\n temp.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n assert ret.success\n assert ret.stdout == ''\n assert ret.stderr == ''\n\n parsed_xml = json.loads(temp.read())\n spec = {'channels': parsed_xml['channels']}\n pyhf.utils.validate(spec, 'model.json')\n\n\ndef test_import_prepHistFactory_withProgress(tmpdir, script_runner):\n temp = tmpdir.join(\"parsed_output.json\")\n command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s}'.format(\n temp.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n assert ret.success\n assert ret.stdout == ''\n assert ret.stderr != ''\n\n\ndef test_import_prepHistFactory_stdout(tmpdir, script_runner):\n command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/'\n ret = script_runner.run(*shlex.split(command))\n assert ret.success\n assert ret.stdout != ''\n assert ret.stderr != ''\n d = json.loads(ret.stdout)\n assert d\n\n\ndef test_import_prepHistFactory_and_cls(tmpdir, script_runner):\n temp = tmpdir.join(\"parsed_output.json\")\n command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s}'.format(\n temp.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n\n command = 'pyhf cls {0:s}'.format(temp.strpath)\n ret = script_runner.run(*shlex.split(command))\n\n assert ret.success\n d = json.loads(ret.stdout)\n assert d\n assert 'CLs_obs' in d\n assert 'CLs_exp' in d\n\n for measurement in [\n 'GaussExample',\n 'GammaExample',\n 'LogNormExample',\n 'ConstExample',\n ]:\n command = 'pyhf cls {0:s} --measurement {1:s}'.format(temp.strpath, measurement)\n ret = script_runner.run(*shlex.split(command))\n\n assert ret.success\n d = json.loads(ret.stdout)\n assert d\n assert 'CLs_obs' in d\n assert 'CLs_exp' in d\n\n tmp_out = tmpdir.join('{0:s}_output.json'.format(measurement))\n # make sure output file works too\n command += ' --output-file {0:s}'.format(tmp_out.strpath)\n ret = script_runner.run(*shlex.split(command))\n assert ret.success\n d = json.load(tmp_out)\n assert 'CLs_obs' in d\n assert 'CLs_exp' in d\n\n\[email protected](\n \"backend\", [\"numpy\", \"tensorflow\", \"pytorch\", \"jax\"],\n)\ndef test_cls_backend_option(tmpdir, script_runner, backend):\n temp = tmpdir.join(\"parsed_output.json\")\n command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s}'.format(\n temp.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n\n command = 'pyhf cls --backend {0:s} {1:s}'.format(backend, temp.strpath)\n ret = script_runner.run(*shlex.split(command))\n\n assert ret.success\n d = json.loads(ret.stdout)\n assert d\n assert 'CLs_obs' in d\n assert 'CLs_exp' in d\n\n\ndef test_import_and_export(tmpdir, script_runner):\n temp = tmpdir.join(\"parsed_output.json\")\n command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s}'.format(\n temp.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n\n command = 'pyhf json2xml {0:s} --output-dir {1:s}'.format(\n temp.strpath, tmpdir.mkdir('output').strpath\n )\n ret = script_runner.run(*shlex.split(command))\n assert ret.success\n\n\ndef test_patch(tmpdir, script_runner):\n patch = tmpdir.join('patch.json')\n\n patch.write(\n u'''\n[{\"op\": \"replace\", \"path\": \"/channels/0/samples/0/data\", \"value\": [5,6]}]\n '''\n )\n\n temp = tmpdir.join(\"parsed_output.json\")\n command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s}'.format(\n temp.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n\n command = 'pyhf cls {0:s} --patch {1:s}'.format(temp.strpath, patch.strpath)\n ret = script_runner.run(*shlex.split(command))\n assert ret.success\n\n command = 'pyhf json2xml {0:s} --output-dir {1:s} --patch {2:s}'.format(\n temp.strpath, tmpdir.mkdir('output_1').strpath, patch.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n assert ret.success\n\n command = f'pyhf cls {temp.strpath:s} --patch -'\n\n ret = script_runner.run(*shlex.split(command), stdin=patch)\n assert ret.success\n\n command = f\"pyhf json2xml {temp.strpath:s} --output-dir {tmpdir.mkdir('output_2').strpath:s} --patch -\"\n ret = script_runner.run(*shlex.split(command), stdin=patch)\n assert ret.success\n\n\ndef test_patch_fail(tmpdir, script_runner):\n patch = tmpdir.join('patch.json')\n\n patch.write('''not,json''')\n\n temp = tmpdir.join(\"parsed_output.json\")\n command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s}'.format(\n temp.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n\n command = 'pyhf cls {0:s} --patch {1:s}'.format(temp.strpath, patch.strpath)\n ret = script_runner.run(*shlex.split(command))\n assert not ret.success\n\n command = 'pyhf json2xml {0:s} --output-dir {1:s} --patch {2:s}'.format(\n temp.strpath, tmpdir.mkdir('output').strpath, patch.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n assert not ret.success\n\n\ndef test_bad_measurement_name(tmpdir, script_runner):\n temp = tmpdir.join(\"parsed_output.json\")\n command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s}'.format(\n temp.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n\n command = 'pyhf cls {0:s} --measurement \"a-fake-measurement-name\"'.format(\n temp.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n assert not ret.success\n # assert 'no measurement by name' in ret.stderr # numpy swallows the log.error() here, dunno why\n\n\ndef test_testpoi(tmpdir, script_runner):\n temp = tmpdir.join(\"parsed_output.json\")\n command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s}'.format(\n temp.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n\n pois = [1.0, 0.5, 0.0]\n results_exp = []\n results_obs = []\n for testpoi in pois:\n command = 'pyhf cls {0:s} --testpoi {testpoi:f}'.format(\n temp.strpath, testpoi=testpoi\n )\n ret = script_runner.run(*shlex.split(command))\n\n assert ret.success\n d = json.loads(ret.stdout)\n assert d\n assert 'CLs_obs' in d\n assert 'CLs_exp' in d\n\n results_exp.append(d['CLs_exp'])\n results_obs.append(d['CLs_obs'])\n\n import numpy as np\n import itertools\n\n for pair in itertools.combinations(results_exp, r=2):\n assert not np.array_equal(*pair)\n\n assert len(list(set(results_obs))) == len(pois)\n\n\[email protected](\n 'opts,success',\n [(['maxiter=1000'], True), (['maxiter=100'], True), (['maxiter=10'], False)],\n)\ndef test_cls_optimizer(tmpdir, script_runner, opts, success):\n temp = tmpdir.join(\"parsed_output.json\")\n command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s}'.format(\n temp.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n\n command = 'pyhf cls {0:s} --optimizer scipy_optimizer {1:s}'.format(\n temp.strpath, ' '.join('--optconf {0:s}'.format(opt) for opt in opts)\n )\n ret = script_runner.run(*shlex.split(command))\n\n assert ret.success == success\n\n\ndef test_inspect(tmpdir, script_runner):\n temp = tmpdir.join(\"parsed_output.json\")\n command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s} --hide-progress'.format(\n temp.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n\n command = 'pyhf inspect {0:s}'.format(temp.strpath)\n ret = script_runner.run(*shlex.split(command))\n assert ret.success\n\n\ndef test_inspect_outfile(tmpdir, script_runner):\n temp = tmpdir.join(\"parsed_output.json\")\n command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s} --hide-progress'.format(\n temp.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n\n tempout = tmpdir.join(\"inspect_output.json\")\n command = 'pyhf inspect {0:s} --output-file {1:s}'.format(\n temp.strpath, tempout.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n assert ret.success\n\n summary = json.loads(tempout.read())\n assert [\n 'channels',\n 'measurements',\n 'modifiers',\n 'parameters',\n 'samples',\n 'systematics',\n ] == sorted(summary.keys())\n assert len(summary['channels']) == 1\n assert len(summary['measurements']) == 4\n assert len(summary['modifiers']) == 6\n assert len(summary['parameters']) == 6\n assert len(summary['samples']) == 3\n assert len(summary['systematics']) == 6\n\n\ndef test_prune(tmpdir, script_runner):\n temp = tmpdir.join(\"parsed_output.json\")\n command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s} --hide-progress'.format(\n temp.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n\n command = 'pyhf prune -m staterror_channel1 --measurement GammaExample {0:s}'.format(\n temp.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n assert ret.success\n\n\ndef test_prune_outfile(tmpdir, script_runner):\n temp = tmpdir.join(\"parsed_output.json\")\n command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s} --hide-progress'.format(\n temp.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n\n tempout = tmpdir.join(\"prune_output.json\")\n command = 'pyhf prune -m staterror_channel1 --measurement GammaExample {0:s} --output-file {1:s}'.format(\n temp.strpath, tempout.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n assert ret.success\n\n spec = json.loads(temp.read())\n ws = pyhf.Workspace(spec)\n assert 'GammaExample' in ws.measurement_names\n assert 'staterror_channel1' in ws.parameters\n pruned_spec = json.loads(tempout.read())\n pruned_ws = pyhf.Workspace(pruned_spec)\n assert 'GammaExample' not in pruned_ws.measurement_names\n assert 'staterror_channel1' not in pruned_ws.parameters\n\n\ndef test_rename(tmpdir, script_runner):\n temp = tmpdir.join(\"parsed_output.json\")\n command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s} --hide-progress'.format(\n temp.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n\n command = 'pyhf rename -m staterror_channel1 staterror_channelone --measurement GammaExample GamEx {0:s}'.format(\n temp.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n assert ret.success\n\n\ndef test_rename_outfile(tmpdir, script_runner):\n temp = tmpdir.join(\"parsed_output.json\")\n command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s} --hide-progress'.format(\n temp.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n\n tempout = tmpdir.join(\"rename_output.json\")\n command = 'pyhf rename -m staterror_channel1 staterror_channelone --measurement GammaExample GamEx {0:s} --output-file {1:s}'.format(\n temp.strpath, tempout.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n assert ret.success\n\n spec = json.loads(temp.read())\n ws = pyhf.Workspace(spec)\n assert 'GammaExample' in ws.measurement_names\n assert 'GamEx' not in ws.measurement_names\n assert 'staterror_channel1' in ws.parameters\n assert 'staterror_channelone' not in ws.parameters\n renamed_spec = json.loads(tempout.read())\n renamed_ws = pyhf.Workspace(renamed_spec)\n assert 'GammaExample' not in renamed_ws.measurement_names\n assert 'GamEx' in renamed_ws.measurement_names\n assert 'staterror_channel1' not in renamed_ws.parameters\n assert 'staterror_channelone' in renamed_ws.parameters\n\n\ndef test_combine(tmpdir, script_runner):\n temp_1 = tmpdir.join(\"parsed_output.json\")\n temp_2 = tmpdir.join(\"renamed_output.json\")\n command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s} --hide-progress'.format(\n temp_1.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n\n rename_channels = {'channel1': 'channel2'}\n rename_measurements = {\n 'ConstExample': 'OtherConstExample',\n 'LogNormExample': 'OtherLogNormExample',\n 'GaussExample': 'OtherGaussExample',\n 'GammaExample': 'OtherGammaExample',\n }\n\n command = 'pyhf rename {0:s} {1:s} {2:s} --output-file {3:s}'.format(\n temp_1.strpath,\n ''.join(' -c ' + ' '.join(item) for item in rename_channels.items()),\n ''.join(\n ' --measurement ' + ' '.join(item) for item in rename_measurements.items()\n ),\n temp_2.strpath,\n )\n ret = script_runner.run(*shlex.split(command))\n\n command = 'pyhf combine {0:s} {1:s}'.format(temp_1.strpath, temp_2.strpath)\n ret = script_runner.run(*shlex.split(command))\n assert ret.success\n\n\ndef test_combine_outfile(tmpdir, script_runner):\n temp_1 = tmpdir.join(\"parsed_output.json\")\n temp_2 = tmpdir.join(\"renamed_output.json\")\n command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s} --hide-progress'.format(\n temp_1.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n\n rename_channels = {'channel1': 'channel2'}\n rename_measurements = {\n 'ConstExample': 'OtherConstExample',\n 'LogNormExample': 'OtherLogNormExample',\n 'GaussExample': 'OtherGaussExample',\n 'GammaExample': 'OtherGammaExample',\n }\n\n command = 'pyhf rename {0:s} {1:s} {2:s} --output-file {3:s}'.format(\n temp_1.strpath,\n ''.join(' -c ' + ' '.join(item) for item in rename_channels.items()),\n ''.join(\n ' --measurement ' + ' '.join(item) for item in rename_measurements.items()\n ),\n temp_2.strpath,\n )\n ret = script_runner.run(*shlex.split(command))\n\n tempout = tmpdir.join(\"combined_output.json\")\n command = 'pyhf combine {0:s} {1:s} --output-file {2:s}'.format(\n temp_1.strpath, temp_2.strpath, tempout.strpath\n )\n ret = script_runner.run(*shlex.split(command))\n assert ret.success\n\n combined_spec = json.loads(tempout.read())\n combined_ws = pyhf.Workspace(combined_spec)\n assert combined_ws.channels == ['channel1', 'channel2']\n assert len(combined_ws.measurement_names) == 8\n\n\[email protected]('do_json', [False, True])\[email protected](\n 'algorithms', [['md5'], ['sha256'], ['sha256', 'md5'], ['sha256', 'md5']]\n)\ndef test_workspace_digest(tmpdir, script_runner, algorithms, do_json):\n results = {\n 'md5': '202eb7615102c35ba86be47eb6fa5e78',\n 'sha256': '7c32ca3b8db75cbafcf5cd7ed4672fa2b1fa69e391c9b89068dd947a521866ec',\n }\n\n temp = tmpdir.join(\"parsed_output.json\")\n command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath} --hide-progress'\n ret = script_runner.run(*shlex.split(command))\n\n command = f\"pyhf digest {temp.strpath} -a {' -a '.join(algorithms)}{' -j' if do_json else ''}\"\n ret = script_runner.run(*shlex.split(command))\n assert ret.success\n assert all(algorithm in ret.stdout for algorithm in algorithms)\n if do_json:\n expected_output = json.dumps(\n {algorithm: results[algorithm] for algorithm in algorithms},\n sort_keys=True,\n indent=4,\n )\n else:\n expected_output = '\\n'.join(\n f\"{algorithm}:{results[algorithm]}\" for algorithm in algorithms\n )\n\n assert ret.stdout == expected_output + '\\n'\n assert ret.stderr == ''\n\n if do_json:\n assert json.loads(ret.stdout) == {\n algorithm: results[algorithm] for algorithm in algorithms\n }\n\n\[email protected]('output_file', [False, True])\[email protected]('with_metadata', [False, True])\ndef test_patchset_extract(datadir, tmpdir, script_runner, output_file, with_metadata):\n temp = tmpdir.join(\"extracted_output.json\")\n command = f'pyhf patchset extract {datadir.join(\"example_patchset.json\").strpath} --name patch_channel1_signal_syst1'\n if output_file:\n command += f\" --output-file {temp.strpath}\"\n if with_metadata:\n command += \" --with-metadata\"\n\n ret = script_runner.run(*shlex.split(command))\n\n assert ret.success\n if output_file:\n extracted_output = json.loads(temp.read())\n else:\n extracted_output = json.loads(ret.stdout)\n if with_metadata:\n assert 'metadata' in extracted_output\n else:\n assert (\n extracted_output\n == json.load(datadir.join(\"example_patchset.json\"))['patches'][0]['patch']\n )\n\n\ndef test_patchset_verify(datadir, script_runner):\n command = f'pyhf patchset verify {datadir.join(\"example_bkgonly.json\").strpath} {datadir.join(\"example_patchset.json\").strpath}'\n ret = script_runner.run(*shlex.split(command))\n\n assert ret.success\n assert 'All good' in ret.stdout\n\n\[email protected]('output_file', [False, True])\ndef test_patchset_apply(datadir, tmpdir, script_runner, output_file):\n temp = tmpdir.join(\"patched_output.json\")\n command = f'pyhf patchset apply {datadir.join(\"example_bkgonly.json\").strpath} {datadir.join(\"example_patchset.json\").strpath} --name patch_channel1_signal_syst1'\n if output_file:\n command += f\" --output-file {temp.strpath}\"\n\n ret = script_runner.run(*shlex.split(command))\n\n assert ret.success\n if output_file:\n extracted_output = json.loads(temp.read())\n else:\n extracted_output = json.loads(ret.stdout)\n assert extracted_output['channels'][0]['samples'][0]['modifiers'][0]['data'] == {\n \"hi\": 1.2,\n \"lo\": 0.8,\n }\n" ]
[ [ "numpy.array_equal" ] ]
vohoaiviet/tensor2tensor
[ "c2bd023ab7fc8ca0f4c4138e955845537d10ca55" ]
[ "tensor2tensor/data_generators/audio_test.py" ]
[ "# Copyright 2017 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tensor2tensor.data_generators.audio.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport io\nimport os\n\n# Dependency imports\n\nfrom tensor2tensor.data_generators import audio\n\nimport tensorflow as tf\n\n\nclass AudioTest(tf.test.TestCase):\n\n def testDataCollection(self):\n # Generate a trivial source and target file.\n tmp_dir = self.get_temp_dir()\n test_files = [\n \"dir1/file1\",\n \"dir1/file2\",\n \"dir1/dir2/file3\",\n \"dir1/dir2/dir3/file4\",\n ]\n for filename in test_files:\n input_filename = os.path.join(tmp_dir, filename + \".WAV\")\n target_filename = os.path.join(tmp_dir, filename + \".WRD\")\n directories = os.path.dirname(input_filename)\n if not os.path.exists(directories):\n os.makedirs(directories)\n io.open(input_filename, \"wb\")\n io.open(target_filename, \"wb\")\n\n data_dict = audio._collect_data(tmp_dir, \".WAV\", \".WRD\")\n expected = [os.path.join(tmp_dir, filename) for filename in test_files]\n self.assertEqual(sorted(list(data_dict)), sorted(expected))\n\n # Clean up.\n for filename in test_files:\n os.remove(os.path.join(tmp_dir, \"%s.WAV\" % filename))\n os.remove(os.path.join(tmp_dir, \"%s.WRD\" % filename))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.test.main" ] ]
aiporre/uBAM
[ "547b842b8dbaa221a2dcb93bf040f5decaacb517" ]
[ "behaviorAnalysis/magnification/run_magnification/compute_magnification_flow.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nPython 2.7\n@author: Biagio Brattoli \[email protected]\nLast Update: 23.8.2018\n\nUse Generative Model for posture extrapolation\n\"\"\"\nfrom datetime import datetime\nimport os, sys, numpy as np, argparse\nfrom time import time\nfrom tqdm import tqdm, trange\nimport matplotlib.pyplot as plt\n\nfrom skimage.transform import resize\nfrom scipy.misc import imread, imsave\n\n#import config_pytorch as cfg\nimport config_pytorch_human as cfg\nimport cv2\n\noptical_flow = cv2.createOptFlow_DualTVL1()\n\nsource_path = cfg.results_path+'/magnification/magnification_pervideo/'\ndest_path = cfg.results_path+'/magnification/magnification_pervideo_flow/'\nif not os.path.exists(dest_path):\n os.mkdir(dest_path)\n\nvideos = sorted(os.listdir(source_path))\nfor video in tqdm(videos, desc=\"Compute Flow - videos\"):\n files = sorted(os.listdir(source_path+'/'+video+'/impaired/'))\n files = [f for f in files if '.png' in f]\n flows = []\n for frame in tqdm(files, desc=\"Frames\"):\n original = imread(source_path+'/'+video+'/impaired/'+frame)\n magnified= imread(source_path+'/'+video+'/magnified/'+frame)\n original = cv2.cvtColor(original,cv2.COLOR_RGB2GRAY)\n magnified= cv2.cvtColor(magnified,cv2.COLOR_RGB2GRAY)\n flow = optical_flow.calc(original, magnified, None)\n flows.append(flow)\n #mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])\n flows = np.stack(flows, axis=0)\n np.savez(dest_path+'/'+video, flow=flows)\n" ]
[ [ "numpy.savez", "scipy.misc.imread", "numpy.stack" ] ]
PURE-melo/GRS-Det
[ "fcc16a48ba628e7bca83795120108ae8a4fa3027" ]
[ "gaussian_mask.py" ]
[ "import numpy as np\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef gaussian(kernel, w):\r\n sigma = ((kernel-1) * 0.5 - 1) * 0.3 + 0.8\r\n s = 2*(sigma**2)\r\n dx = np.exp(-1*w*np.square(np.arange(kernel) - int(kernel / 2)) / s)\r\n return np.reshape(dx,(-1,1))\r\n\r\ndef gaussian_mask(bbox,imgshape):\r\n segmap = np.zeros((imgshape[0],imgshape[1]), dtype=np.float)\r\n for i, box in enumerate(bbox):\r\n rect = cv2.minAreaRect(box)\r\n angle = rect[-1]\r\n x1,y1,x2,y2 = np.min(box[:,0]), np.min(box[:,1]), np.max(box[:,0]), np.max(box[:,1])\r\n w = x2 - x1\r\n h = y2 - y1\r\n if w<h:\r\n flag=1\r\n max_w_h = h\r\n min_w_h = w\r\n else:\r\n flag=0\r\n max_w_h = w\r\n min_w_h = h\r\n if rect[1][0]>rect[1][1]:\r\n longside = rect[1][0]\r\n shortside = rect[1][1]\r\n angle = -angle\r\n else:\r\n longside = rect[1][1]\r\n shortside = rect[1][0]\r\n angle=-angle\r\n angle = angle+90\r\n\r\n centerx = w/2\r\n centery = h/2 \r\n value= 2.95*np.exp(-0.35*(max_w_h/min_w_h))*max_w_h/min_w_h\r\n if flag:\r\n dx = gaussian(w, value) \r\n dy = gaussian(h, 0.1*min_w_h/max_w_h)\r\n else:\r\n dx = gaussian(w, 0.1*min_w_h/max_w_h)\r\n dy = gaussian(h, value)\r\n gau_map = np.multiply(dy,np.transpose(dx))\r\n rot_mat = cv2.getRotationMatrix2D((w/2,h/2), angle+flag*90, 1)\r\n gau_map = cv2.warpAffine(gau_map, rot_mat, (w,h))\r\n gau_map = (gau_map - np.min(gau_map))/(np.max(gau_map)-np.min(gau_map))\r\n segmap[y1:y2, x1:x2] = np.maximum(segmap[y1:y2, x1:x2],gau_map)\r\n\r\n return segmap\r\n\r\nif __name__ == \"__main__\":\r\n #box = [np.array([[134,52],[417,481],[335,535],[52,106]]),np.array([[298,22],[480,316],[439,341],[257,47]])]\r\n #box = [np.array([[420,8],[787,692],[621,781],[254,97]])]\r\n box = [np.array([[41,372],[349,452],[340,487],[32,407]]),np.array([[686,539],[1000,622],[993,656],[677,573]]),np.array([[381,465],[678,537],[670,570],[373,498]]),np.array([[526,97],[834,118],[831,154],[523,133]]),np.array([[192,70],[505,95],[503,131],[190,106]])] \r\n segmap = gaussian_mask(box, (1000,1000))\r\n cmap='jet'\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.axis('off')\r\n plt.imshow(segmap,cmap)\r\n plt.show()\r\n" ]
[ [ "numpy.array", "matplotlib.pyplot.yticks", "matplotlib.pyplot.imshow", "numpy.maximum", "numpy.min", "numpy.reshape", "numpy.arange", "numpy.transpose", "numpy.max", "matplotlib.pyplot.axis", "numpy.exp", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "numpy.zeros" ] ]
varunagrawal/GTDynamics
[ "1f48112a5ffd273c74c5ce2ddda4166e2fae7dff" ]
[ "scripts/nithya00_constrained_opt_benchmark.py" ]
[ "\"\"\"\n * GTDynamics Copyright 2020, Georgia Tech Research Corporation,\n * Atlanta, Georgia 30332-0415\n * All Rights Reserved\n * See LICENSE for the license information\n *\n * @file nithya_yetong00_constrainedopt_benchmark.py\n * @brief Plot intermediate results for penalty method optimizer vs augmented lagrangian optimizer.\n * @author Nithya Jayakumar\n * @author Yetong Zhang\n\"\"\"\n\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt \n\n# load optimization result data from file\ndef load_data(filename):\n data = []\n with open(filename) as data_file:\n lines = data_file.readlines()\n for line in lines:\n line = line.split()\n line = [float(data) for data in line]\n data.append(line)\n data = np.array(data)\n data_arranged = {}\n data_arranged[\"num_iters\"] = data[:, 0]\n data_arranged[\"cumulative_iters\"] = np.cumsum(data[:, 0])\n data_arranged[\"mu\"] = data[:, 1]\n data_arranged[\"feasibility\"] = data[:, 2]\n data_arranged[\"optimality\"] = data[:, 3]\n return data_arranged\n\n# create 2 plots: \n# plot1: penalty parameter vs. cumulative lm iterations\n# plot2: feasiblity, optimality vs. cumulative lm iterations\ndef make_plot(data):\n # fig = plt.figure(figsize=(10, 10), dpi=160)\n fig, axes = plt.subplots(1, 2, figsize=(12, 5))\n\n axes[0].set_yscale('log')\n axes[0].plot(data[\"cumulative_iters\"], data[\"mu\"])\n axes[0].set_xlabel(\"Cumulative L-M Iterations\")\n axes[0].set_ylabel(\"Penalty Parameter mu\")\n\n axes[1].set_xlabel(\"Cumulative L-M Iterations\")\n axes[1].set_ylabel(\"Residual\")\n axes[1].plot(data[\"cumulative_iters\"], data[\"feasibility\"], label = \"Feasibility\", color=\"b\")\n axes[1].plot(data[\"cumulative_iters\"], data[\"optimality\"], label = \"Optimality\", color=\"r\")\n axes[1].legend()\n\n# Load data for penalty method optimizer.\ndata_penalty = load_data('build/scripts/penalty_data.txt')\ndata_augl = load_data('build/scripts/augl_data.txt')\n\nmake_plot(data_penalty)\nmake_plot(data_augl)\nplt.show()\n" ]
[ [ "matplotlib.pyplot.subplots", "numpy.array", "matplotlib.pyplot.show", "numpy.cumsum" ] ]
diddytpq/Predict-Tennisball-LandingPoint
[ "0ae4a9ff45fd4dd82b4b4e3cc2533e7fd5d1506a" ]
[ "src/predict_ball_pos/src/kalman_utils/utils.py" ]
[ "import cv2\nimport numpy as np\nimport os\nimport math\nfrom scipy.spatial import distance as dist\nfrom collections import OrderedDict\n\nlower_green = np.array([89, 250, 250])\nupper_green = np.array([90, 255, 255])\n\nlower_rgb = np.array([10,180,120])\nupper_rgb = np.array([40,255,250])\n\n\nclass Bouncing_point():\n\n def __init__(self):\n\n self.ball_left_center_list = []\n self.ball_right_center_list = []\n\n self.left_gradient_list = []\n self.right_gradient_list = []\n\n self.bouncing_point = []\n\n def gradient_check(self,num):\n\n if num >= 0:\n return True\n \n elif num < 0:\n return False\n \n\n def gradient_cal(self,ball_center_list):\n\n x_pre,y_pre = ball_center_list[0][0], ball_center_list[0][1]\n x,y = ball_center_list[1][0], ball_center_list[1][1]\n \n\n delta_x = x-x_pre\n delta_y = y-y_pre\n \n #if (abs(delta_x) or abs(delta_y))> 50 or delta_x == 0:\n if (abs(delta_x) or abs(delta_y))> 200 or delta_x == 0:\n #if delta_x == 0:\n #self.ball_center_list = []\n #self.ball_state_list = []\n #self.gradient_list = []\n\n return False\n\n gradient = delta_y/delta_x\n\n if ((x_pre + x)/2) < 640 and x_pre > x:\n self.left_gradient_list.append(gradient)\n\n elif ((x_pre + x)/2) > 640 and x_pre < x :\n self.right_gradient_list.append(gradient)\n \n if len(self.left_gradient_list) == 2: #left view\n if self.gradient_check(self.left_gradient_list[0]) == False and self.gradient_check(self.left_gradient_list[1]) == True:\n self.bouncing_point.append([(x_pre + x)/2, (y_pre + y)/2])\n \n del self.left_gradient_list[0]\n\n if len(self.right_gradient_list) == 2: #right view\n if self.gradient_check(self.right_gradient_list[0]) == True and self.gradient_check(self.right_gradient_list[1]) == False:\n self.bouncing_point.append([(x_pre + x)/2, (y_pre + y)/2])\n\n del self.right_gradient_list[0]\n \n \n\n\n def append_ball(self,ball_center_list,):\n\n if ball_center_list[0] < 640:\n self.ball_left_center_list.append(ball_center_list)\n\n if len(self.ball_left_center_list) > 2:\n del self.ball_left_center_list[0]\n\n if len(self.ball_left_center_list) == 2:\n self.gradient_cal(self.ball_left_center_list)\n else :\n self.ball_right_center_list.append(ball_center_list)\n\n if len(self.ball_right_center_list) > 2:\n del self.ball_right_center_list[0]\n\n if len(self.ball_right_center_list) == 2:\n self.gradient_cal(self.ball_right_center_list)\n\n\nclass Trajectory:\n def __init__(self, maxDisappeared = 10):\n\n self.nextObjectID = 0\n self.point_list = OrderedDict()\n self.disappeared = OrderedDict()\n self.maxDisappeared = maxDisappeared\n\n def register(self, centroid):\n self.point_list[self.nextObjectID] = [centroid]\n self.disappeared[self.nextObjectID] = 0\n self.nextObjectID += 1\n\n def deregister(self, objectID):\n del self.point_list[objectID]\n del self.disappeared[objectID]\n\n def update(self, next_centroid_list):\n \n if len(next_centroid_list) == 0:\n \n for ID in list(self.disappeared.keys()):\n self.disappeared[ID] += 1\n \n if self.disappeared[ID] >= self.maxDisappeared:\n self.deregister(ID)\n\n return self.point_list\n \n if len(self.point_list) == 0:\n for i in range(len(next_centroid_list)):\n self.register(next_centroid_list[i])\n \n else:\n objectIDs = list(self.point_list.keys()) \n pre_point = list()\n \n for ID in list(self.point_list.keys()):\n \n pre_point.append(((self.point_list[ID])[-1]))\n\n \n distan = dist.cdist(np.array(pre_point), next_centroid_list)\n rows = distan.min(axis=1).argsort()\n cols = distan[rows].argmin(axis=1)\n \n usedRows = set()\n usedCols = set()\n \n for (row, col) in zip(rows, cols):\n \n if row in usedRows or col in usedCols: continue\n\n objectID = objectIDs[row]\n self.point_list[objectID].append(next_centroid_list[col])\n self.disappeared[objectID] = 0\n \n usedRows.add(row)\n usedCols.add(col)\n \n unusedRows = set(range(0, distan.shape[0])).difference(usedRows)\n unusedCols = set(range(0, distan.shape[1])).difference(usedCols)\n \n if distan.shape[0] >= distan.shape[1]:\n \n for row in unusedRows:\n objectID = objectIDs[row]\n self.disappeared[objectID] += 1\n \n if self.disappeared[objectID] > self.maxDisappeared:\n self.deregister(objectID)\n else:\n for col in unusedCols:\n self.register(next_centroid_list[col])\n \n return self.point_list\n \n \n \n\n\n\ndef img2court(M,point):\n\n div = M[2][0]*point[0] + M[2][1]*point[1] + 1\n new_x = (M[0][0]*point[0] + M[0][1]*point[1] + M[0][2]) / div\n new_y = (M[1][0]*point[0] + M[1][1]*point[1] + M[1][2]) / div\n\n return new_x, new_y\n\n\n\ndef sum_box(center_list,stats_list): #두점 사이 거리를 계산하여 일정 거리 미만일 경우 합친다. \n \n if len(center_list) < 2:\n return center_list, stats_list\n\n center_points = center_list\n stats = stats_list\n i , j = 0 , 1\n\n while True:\n \n if i + 1 == len(center_points):\n break\n\n #score = int(math.sqrt((center_points[i][0] - center_points[j][0])**2 + (center_points[i][1] - center_points[j][1])**2))\n\n x_length = center_points[i][0] - center_points[j][0]\n y_length = center_points[i][1] - center_points[j][1]\n #print(i,j,score)\n\n #if score < 130:\n if abs(x_length) < 30 and abs(y_length) < 130:\n \n #stats_list 변경 stats_points([x, y, width, height, area])\n min_x = int(min(stats[i][0],stats[j][0]))\n min_y = int(min(stats[i][1],stats[j][1]))\n new_width = int(max(stats[i][0] + stats[i][2],stats[j][0] + stats[j][2])) - min_x\n new_height = int((max(stats[i][1] + stats[i][3],stats[j][1] + stats[j][3]))) - min_y\n new_area = new_width * new_height\n stats.append([min_x, min_y, new_width, new_height, new_area])\n\n new_cen_x = int(new_width + min_x)\n new_cen_y = int(new_height + min_y)\n center_points.append([new_cen_x, new_cen_y])\n\n del center_points[j]\n del center_points[i]\n del stats[j]\n del stats[i]\n\n j = i + 1\n \n else:\n j += 1\n\n if j == len(center_points):\n i += 1\n j = i+1\n \n return center_points, stats\n \n\ndef img_view(img_name):\n \n img = cv2.imread(img_name)\n \n cv2.imshow('img', img)\n\n\n k = cv2.waitKey(0)\n if k == 27:\n cv2.destroyAllWindows()\n\n\ndef bounce_point2top_view(point,M_letf,M_right):\n\n x, y = point\n\n if x < 640:\n new_x , new_y = img2court(M_letf,[x,y])\n\n else:\n new_x , new_y = img2court(M_right,[x,y])\n\n return int(new_x), int(new_y)\n\n\ndef check_player_box(center_points_list, stats_points_list, stats_points_open_list, pre_stats_points_open_L,pre_stats_points_open_R):\n\n i = 0\n\n player_stats_points_list = []\n\n stats_points_open_L, stats_points_open_R = stats_points_open_list[0], stats_points_open_list[1]\n\n if len(center_points_list) == 0:\n return center_points_list, stats_points_list, player_stats_points_list\n\n if len(stats_points_open_L) == 0:\n stats_points_open_L = pre_stats_points_open_L\n \n if len(stats_points_open_R) == 0:\n stats_points_open_R = pre_stats_points_open_R\n\n \n while i < len(center_points_list):\n\n x_center, y_center = center_points_list[i]\n x, y, width, height, area = stats_points_list[i]\n\n if x_center < 640 :\n if len(stats_points_open_L) == 0 : \n i += 1 \n continue\n\n for j in range(len(stats_points_open_L)):\n #x_open, y_open, width_open, height_open, area_open = stats_points_open_L[j]\n\n #if x_open < x_center < (width_open + x_open):\n if nms(stats_points_list[i], stats_points_open_L[j]):\n\n player_stats_points_list.append(stats_points_list[i])\n del center_points_list[i]\n del stats_points_list[i]\n break\n\n if j == (len(stats_points_open_L)-1):\n i += 1\n\n else:\n if len(stats_points_open_R) == 0 : \n i += 1 \n continue\n\n for j in range(len(stats_points_open_R)):\n #x_open, y_open, width_open, height_open, area_open = stats_points_open_R[j]\n\n if nms(stats_points_list[i], stats_points_open_R[j]):\n\n player_stats_points_list.append(stats_points_list[i])\n del center_points_list[i]\n del stats_points_list[i]\n break\n\n if j == (len(stats_points_open_R)-1):\n i += 1\n\n return center_points_list, stats_points_list, player_stats_points_list\n\ndef get_center_point(centroids_list, stats_list):\n\n center_points = []\n stats_points = []\n stats = stats_list\n\n for index, centroid in enumerate(centroids_list):\n if stats[index][0] == 0 and stats[index][1] == 0:\n continue\n if np.any(np.isnan(centroid)):\n continue\n\n x, y, width, height, area = stats[index]\n centerX, centerY = int(centroid[0]), int(centroid[1])\n\n center_points.append([centerX,centerY])\n stats_points.append([x, y, width, height, area])\n\n return center_points, stats_points\n\ndef split_stats_point(stats_list):\n\n stats_points_open_L = []\n stats_points_open_R = []\n\n for index, stats in enumerate(stats_list):\n\n if (stats[0] <= 0 or stats[1] <= 0) or stats[0] > 2000:\n continue\n if np.any(np.isnan(stats)):\n continue\n\n x, y, width, height, area = stats\n\n if x < 640 :\n stats_points_open_L.append([x, y, width, height, area])\n else: \n stats_points_open_R.append([x, y, width, height, area])\n\n return stats_points_open_L, stats_points_open_R \n\n\ndef draw_rectangle(image, points, color = (0,255,0), info = False):\n\n if len(points) > 0:\n for i in range(len(points)):\n\n x, y, width, height, area = points[i]\n\n if x < 0: continue\n\n if info:\n cv2.putText(image, str(width/(height+1)), (x - 1, y - 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2, cv2.LINE_AA) \n cv2.rectangle(image, (x, y), (x + width, y + height), color,3)\n\n\ndef check_ball(stats_points):\n\n x, y, width, height, area = stats_points\n aspect_ratio = width / (height+1)\n\n if 0.94 < aspect_ratio < 1.11 and area < 2500:\n return True\n\n else:\n return False\n\ndef nms(box_1, box_2):\n\n x_1, y_1, width_1, height_1, are_1 = box_1 \n x_2, y_2, width_2, height_2, are_2 = box_2\n\n max_x = np.argmax([x_1,x_2])\n\n if max_x == 0:\n if (x_2 <= x_1 <= (x_2 + width_2)) or (x_2 <= (x_1 + width_1) <= (x_2 + width_2)):\n return True\n\n else:\n return False\n \n else:\n if (x_1 <= x_2 <= (x_1 + width_1)) or (x_1 <= (x_2 + width_2) <= (x_1 + width_1)):\n return True\n \n else:\n return False\n\n\ndef match_player_box(pre_stats_points_open,stats_points):\n\n for j in range(len(pre_stats_points_open)):\n x_open, y_open, width_open, height_open, area_open = pre_stats_points_open[j]\n \n for i in range(len(stats_points)):\n x, y, width, height, area = stats_points[i]\n\n if nms(pre_stats_points_open[j], stats_points[i]):\n pre_stats_points_open[j] = stats_points[i]\n break\n\n return pre_stats_points_open\n\ndef get_ball_point(ball_candidate_list,box_list):\n\n if len(ball_candidate_list) == 0 or len(box_list) == 0:\n return ball_candidate_list\n \n i = 0\n while i < len(ball_candidate_list):\n ball_x, ball_y = ball_candidate_list[i]\n #print(i, len(ball_candidate_list))\n\n for j in range(len(box_list)):\n x, y, width, height, area = box_list[j]\n\n if (x < ball_x < (x + width)) and (y < ball_y < (y + height)):\n del ball_candidate_list[i]\n break\n\n if j == (len(box_list)-1):\n i += 1\n\n \n return ball_candidate_list\n\ndef get_person_postion(boxes, objectness, classes, nums, class_names):\n \n \n player_postion_list = []\n\n for i in range(len(stats_points)):\n x, y, width, height, area = stats_points[i]\n\n player_x, player_y = bounce_point2top_view([x+(width/2),y+height-(height/5)],M_letf,M_right)\n\n if player_x < 25:\n player_x = 25\n \n elif player_x > 937:\n player_x = 957\n\n player_postion_list.append([player_x, player_y])\n\n return player_postion_list\n" ]
[ [ "numpy.isnan", "numpy.array", "numpy.argmax" ] ]
APrioriInvestments/typed_python
[ "a3191e5d30333eba156c2a910abc78f7813dcaa3" ]
[ "typed_python/compiler/tests/compiler_typed_python_comparison_test.py" ]
[ "# Copyright 2017-2019 typed_python Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nimport typed_python\nimport numpy\nimport unittest\nimport traceback\n\nfrom typed_python import TupleOf, Float32, Int32, ListOf, Function\n\n\nclass Operation:\n \"\"\"Base class for all operations we want to test.\n\n This class models executing the operation in both the\n interpreter and the compiler, and describing which deviations\n between the interpreter and the compiler we'd expect.\n \"\"\"\n def __init__(self):\n self._compiledFunctionCache = {}\n\n def arity(self):\n \"\"\"Return the number of arguments we expect.\"\"\"\n raise NotImplementedError(self)\n\n def name(self):\n \"\"\"return our name.\"\"\"\n raise NotImplementedError(self)\n\n def getLambda(self):\n \"\"\"Return a lambda function representing the operation.\n\n The arguments must have names a1, a2, ...\n \"\"\"\n raise NotImplementedError(self)\n\n def getCompiledLambda(self, types):\n if types not in self._compiledFunctionCache:\n self._compiledFunctionCache[types] = Function(self.getLambda())\n self._compiledFunctionCache[types].resultTypeFor(*types)\n\n return self._compiledFunctionCache[types]\n\n def subsetOfTypesWorthTesting(self, argTypesSoFar, typeList):\n \"\"\"Return the subset of TypeModel instances worth testing in the given argument.\"\"\"\n return typeList\n\n def expectInterpreterDeviation(self, values):\n \"\"\"Would we expect the interpreter and typed_python to be different?\"\"\"\n return False\n\n def expectCompilerDeviation(self, values, compilerTypes):\n \"\"\"Would we expect typed_python and compiler to produce a deviation?\"\"\"\n return False\n\n\nclass TypeModel:\n \"\"\"Base class for all types we want to test.\n\n This class models the process of producing instances of the type, understanding which\n other types we can use to represent this type in a valid form, and understanding\n what more basic instances of a type we'd expect to behave like.\n \"\"\"\n\n def name(self):\n \"\"\"Return the name\"\"\"\n return self.pytype().__name__\n\n def pytype(self):\n \"\"\"Return the actual type object we represent.\"\"\"\n raise NotImplementedError(self)\n\n def instances(self):\n \"\"\"Produce some instances of this type.\"\"\"\n raise NotImplementedError(self)\n\n def isOutOfBounds(self, instance):\n return False\n\n def equivalentOwnInstance(self, interpInstance):\n \"\"\"Given an interpreter instance, represent it as ourself.\n\n Returns (True, obj) or (False, None)\n \"\"\"\n return (False, None)\n\n def containingTypes(self):\n \"\"\"Produce a list of total types that the compiler should be willing to cast this to.\"\"\"\n return [self.pytype(), object]\n\n def equivalentInterpreterInstance(self, instance):\n \"\"\"Return a representation of the object as an interpreter object.\n\n If all values in an operation have an equivalent interpreter instance, then it should\n be the case that the operation produces the same result on the interpreter values.\n\n Returns:\n (True, obj) if there is an equivalent object or (False, None) if not.\n \"\"\"\n return (False, None)\n\n def areEquivalent(self, instanceA, instanceB):\n return instanceA == instanceB\n\n def wantsToTestOperationOn(self, op, otherType, reversed):\n return True\n\n\ndef isnan(x):\n if isinstance(x, float):\n return numpy.isnan(x)\n\n if isinstance(x, Float32):\n return isnan(float(x))\n\n return False\n\n\ndef isinf(x):\n if isinstance(x, float):\n return numpy.isinf(x)\n\n if isinstance(x, Float32):\n return isinf(float(x))\n\n return False\n\n\ndef isfinite(x):\n if isinstance(x, float):\n return numpy.isfinite(x)\n\n if isinstance(x, Float32):\n return isfinite(float(x))\n\n return True\n\n\ndef isneginf(x):\n if isinstance(x, float):\n return numpy.isneginf(x)\n\n if isinstance(x, Float32):\n return isneginf(float(x))\n\n return False\n\n\ndef isint(x):\n try:\n int(x)\n return True\n except Exception:\n return False\n\n\ndef isfloat(x):\n try:\n float(x)\n return True\n except Exception:\n return False\n\n\nclass ArithmeticOperation(Operation):\n def __init__(self, name):\n super().__init__()\n self._name = name\n\n def arity(self):\n return 2\n\n def name(self):\n return self._name\n\n def getLambda(self):\n name = self._name\n\n if name == \"add\":\n return lambda a1, a2: a1 + a2\n if name == \"sub\":\n return lambda a1, a2: a1 - a2\n if name == \"mul\":\n return lambda a1, a2: a1 * a2\n if name == \"truediv\":\n return lambda a1, a2: a1 / a2\n if name == \"floordiv\":\n return lambda a1, a2: a1 // a2\n if name == \"and\":\n return lambda a1, a2: a1 & a2\n if name == \"or\":\n return lambda a1, a2: a1 | a2\n if name == \"xor\":\n return lambda a1, a2: a1 ^ a2\n if name == \"lshift\":\n return lambda a1, a2: a1 << a2\n if name == \"rshift\":\n return lambda a1, a2: a1 >> a2\n if name == \"mod\":\n return lambda a1, a2: a1 % a2\n if name == \"pow\":\n return lambda a1, a2: a1 ** a2\n\n raise Exception(f\"unknown op {self._name}\")\n\n def expectInterpreterDeviation(self, values):\n if self._name in (\"truediv\", \"floordiv\"):\n if isinf(values[1]):\n return True\n\n if self._name in (\"floordiv\", 'mod', \"pow\", \"rshift\"):\n for v in values:\n if getattr(type(v), \"IsUnsignedInt\", False):\n # unsigned integer values at the top of the range don't behave correctly\n # because we cast them to signed values which wraps them around to negative\n # numbers\n if int(type(v)(v * 2)) != v:\n return True\n\n if self._name == \"mod\" and type(values[0]) == Int32 and type(values[1]) == Float32:\n # int32 % float32 with integers close to the float cutoff can have odd roundoff errors\n if Int32(Float32(v)) != values[0]:\n return True\n\n if self._name in (\"lshift\", \"pow\"):\n if isint(values[0]) and isint(values[1]) and abs(int(values[1])) > 63:\n return True\n\n if self._name == \"pow\" and (isint(values[0]) or isfloat(values[0])) and values[0] < 0:\n return True\n\n if self._name == \"pow\" and isinf(values[1]):\n return True\n\n if self._name == \"add\" and isinstance(values[0], (TupleOf, ListOf)) and isinstance(values[1], (TupleOf, ListOf)):\n # adding tuples whose values can't be coerced will work when the object is a 'tuple' but not when\n # it's a TupleOf\n try:\n values[0] + values[1]\n except Exception:\n return True\n return False\n\n return False\n\n def expectCompilerDeviation(self, values, compilerTypes):\n if self.expectInterpreterDeviation(values):\n return True\n\n return False\n\n def subsetOfTypesWorthTesting(self, argTypesSoFar, typeList):\n \"\"\"Return the subset of TypeModel instances worth testing in the given argument.\"\"\"\n if argTypesSoFar:\n return [\n t for t in typeList\n if argTypesSoFar[0].wantsToTestOperationOn(self, t, reversed=False) or\n t.wantsToTestOperationOn(self, argTypesSoFar[0], reversed=True)\n ]\n\n return typeList\n\n\nclass RegisterTypeModel(TypeModel):\n def wantsToTestOperationOn(self, op, otherType, reversed):\n return isinstance(otherType, RegisterTypeModel)\n\n\nclass FloatTypeModel(RegisterTypeModel):\n def __str__(self):\n return \"FloatTypeModel()\"\n\n def pytype(self):\n return float\n\n def instances(self):\n return [-1e100, -1e10, -2.0, -1.0, -.5, 0.0, .5, 1.0, 2.0, 1e10, 1e100, numpy.nan, numpy.inf, -numpy.inf]\n\n def equivalentOwnInstance(self, interpInstance):\n return (True, interpInstance)\n\n def equivalentInterpreterInstance(self, instance):\n return (True, instance)\n\n def areEquivalent(self, instanceA, instanceB):\n if isfinite(instanceA) and isfinite(instanceB):\n if abs(instanceA) > 1.0:\n return (abs(instanceA) - abs(instanceB)) / (abs(instanceA) + abs(instanceB)) < 1e-10\n\n return abs(instanceA - instanceB) < 1e-15\n\n if isnan(instanceA) and isnan(instanceB):\n return True\n\n if isinf(instanceA) and isinf(instanceB):\n return True\n\n if isneginf(instanceA) and isneginf(instanceB):\n return True\n\n return False\n\n\nclass Float32TypeModel(RegisterTypeModel):\n def pytype(self):\n return Float32\n\n def __str__(self):\n return \"Float32TypeModel()\"\n\n def isOutOfBounds(self, instance):\n if isfinite(instance) and Float32(instance + 1) == instance:\n return True\n return False\n\n def instances(self):\n return [Float32(f) for f in FloatTypeModel().instances()]\n\n def equivalentOwnInstance(self, interpInstance):\n return (True, Float32(interpInstance))\n\n def equivalentInterpreterInstance(self, instance):\n return (True, float(instance))\n\n def areEquivalent(self, instanceA, instanceB):\n return FloatTypeModel().areEquivalent(float(instanceA), float(instanceB))\n\n\nclass IntTypeModel(RegisterTypeModel):\n def pytype(self):\n return int\n\n def __str__(self):\n return \"IntTypeModel()\"\n\n def instances(self):\n return [\n -(1 << 63) + 1, -(1 << 31) + 1, -(1 << 15) + 1, -(1 << 8) + 1, -10, -1, -0,\n 1, 10, 127, 255, (1 << 15) - 1, (1 << 31) - 1, (1 << 63) - 1\n ]\n\n def isOutOfBounds(self, instance):\n return (instance >= (1 << 63)) or (instance <= -(1 << 63))\n\n def equivalentOwnInstance(self, interpInstance):\n return (True, interpInstance)\n\n def equivalentInterpreterInstance(self, instance):\n return (True, instance)\n\n\nclass SmallIntTypeModel(RegisterTypeModel):\n def __init__(self, tpType):\n self.tpType = tpType\n super().__init__()\n\n def __str__(self):\n return f\"SmallIntTypeModel({self.tpType})\"\n\n def pytype(self):\n return self.tpType\n\n def instances(self):\n return [self.tpType(x) for x in IntTypeModel().instances()]\n\n def isOutOfBounds(self, instance):\n maxSize = 1 << (self.tpType.Bits + (1 if self.tpType.IsUnsignedInt else 0))\n return (instance >= maxSize) or (instance <= -maxSize)\n\n def equivalentOwnInstance(self, interpInstance):\n return (True, self.tpType(interpInstance))\n\n def equivalentInterpreterInstance(self, instance):\n return (True, int(instance))\n\n\nclass BoolTypeModel(RegisterTypeModel):\n def __str__(self):\n return \"BoolTypeModel()\"\n\n def pytype(self):\n return bool\n\n def instances(self):\n return [False, True]\n\n def equivalentOwnInstance(self, interpInstance):\n return (True, interpInstance)\n\n def equivalentInterpreterInstance(self, instance):\n return (True, instance)\n\n\nclass TupleOfTypeModel(TypeModel):\n def __init__(self, subtypeModel):\n assert isinstance(subtypeModel, TypeModel), subtypeModel\n self.subtypeModel = subtypeModel\n\n def __str__(self):\n return f\"TupleOfTypeModel({self.subtypeModel})\"\n\n def pytype(self):\n return TupleOf(self.subtypeModel.pytype())\n\n def instances(self):\n subInstances = self.subtypeModel.instances()\n\n T = self.pytype()\n\n res = []\n\n res.append(T())\n\n for i in range(len(subInstances)):\n res.append(T(subInstances[:i]))\n\n return res\n\n def areEquivalent(self, i1, i2):\n if len(i1) != len(i2):\n return False\n\n for i in range(len(i1)):\n if not self.subtypeModel.areEquivalent(i1[i], i2[i]):\n return False\n\n return True\n\n def equivalentOwnInstance(self, interpInstance):\n try:\n return True, self.pytype()(interpInstance)\n except Exception:\n return False, None\n\n def equivalentInterpreterInstance(self, ownInstance):\n res = []\n\n for subElt in ownInstance:\n isEquiv, equivVal = self.subtypeModel.equivalentInterpreterInstance(subElt)\n\n if not isEquiv:\n return False, None\n\n res.append(equivVal)\n\n return True, tuple(res)\n\n def isOutOfBounds(self, value):\n for i in value:\n if self.subtypeModel.isOutOfBounds(i):\n return True\n return False\n\n def wantsToTestOperationOn(self, op, otherType, reversed):\n return isinstance(otherType, TupleOfTypeModel)\n\n\nallTypes = [\n FloatTypeModel(),\n Float32TypeModel(),\n IntTypeModel(),\n BoolTypeModel(),\n SmallIntTypeModel(typed_python.Int32),\n SmallIntTypeModel(typed_python.Int16),\n SmallIntTypeModel(typed_python.Int8),\n SmallIntTypeModel(typed_python.UInt64),\n SmallIntTypeModel(typed_python.UInt32),\n SmallIntTypeModel(typed_python.UInt16),\n SmallIntTypeModel(typed_python.UInt8),\n TupleOfTypeModel(FloatTypeModel()),\n TupleOfTypeModel(IntTypeModel()),\n TupleOfTypeModel(TupleOfTypeModel(IntTypeModel()))\n]\n\n\ndef typeModelForValue(val):\n return typeModelForType(type(val))\n\n\ndef typeModelForType(typ):\n if issubclass(typ, TupleOf):\n return TupleOfTypeModel(typeModelForType(typ.ElementType))\n\n if typ is float or typ is float:\n return FloatTypeModel()\n\n if typ is Float32:\n return Float32TypeModel()\n\n if typ is int or typ is int:\n return IntTypeModel()\n\n if typ is bool:\n return BoolTypeModel()\n\n if getattr(typ, \"Bits\", None) is not None:\n return SmallIntTypeModel(typ)\n\n assert False, f\"dont know how to produce a type model for {typ}\"\n\n\nclass Scenario:\n def __init__(self, op, argTypes, values, compileTypes):\n self.op = op\n self.argTypes = argTypes\n self.compileTypes = compileTypes\n self.values = values\n self.failureDesc = \"didn't fail?\"\n\n def check(self):\n # check interpreter against typed_python\n if not self.checkAgainstInterpreter():\n return False\n\n if not self.checkAgainstCompiler():\n return False\n\n return True\n\n def checkAgainstCompiler(self):\n if self.op.expectCompilerDeviation(self.values, self.compileTypes):\n return True\n\n compiledForm = self.op.getCompiledLambda(self.compileTypes)\n\n try:\n typedPythonVal = self.op.getLambda()(*self.values)\n typedPythonException = False\n except Exception:\n typedPythonException = traceback.format_exc()\n\n try:\n compiledVal = compiledForm(*self.values)\n compiledException = False\n except Exception:\n compiledException = traceback.format_exc()\n\n if compiledException and typedPythonException:\n return True\n\n if typedPythonException:\n if self.op.expectInterpreterDeviation(self.values):\n return True\n\n self.failureDesc = \"typed_python produced an exception but interpreter didn't:\\n\" + typedPythonException\n return False\n\n if compiledException:\n self.failureDesc = \"compiler produced an exception but typed_python didn't:\\n\" + compiledException\n return False\n\n if type(typedPythonVal) != type(compiledVal):\n self.failureDesc = (\n f\"Compiler produced {compiledVal} of type {type(compiledVal)}.\\n\"\n f\"TypedPython produced {typedPythonVal} of type {type(typedPythonVal)}.\\n\"\n f\"The types are not the same.\"\n )\n return False\n\n typedPythonValType = typeModelForValue(typedPythonVal)\n\n if typedPythonValType is None:\n self.failureDesc = (\n f\"TypedPython produced {typedPythonVal} of type {type(typedPythonVal)}, and we don't have a type model for it.\"\n )\n return False\n\n if typedPythonValType.isOutOfBounds(typedPythonVal) or typedPythonValType.isOutOfBounds(compiledVal):\n # the interpreter's value is out of bounds for this value, so we can ignore it\n return True\n\n if not typedPythonValType.areEquivalent(typedPythonVal, compiledVal):\n self.failureDesc = (\n f\"Compiler produced {compiledVal} of type {type(compiledVal)}.\\n\"\n f\"TypedPython produced {typedPythonVal} of type {type(typedPythonVal)}.\\n\"\n f\"These are not equivalent according to type model '{typedPythonValType.name()}'\\n\"\n )\n return False\n\n return True\n\n def checkAgainstInterpreter(self):\n \"\"\"If all of the arguments here have a pure python form, check that against the typed-python form.\"\"\"\n\n # don't generate an interpreter check for all the different variations of compile types.\n if not all(self.compileTypes[i] == self.argTypes[i].pytype() for i in range(len(self.compileTypes))):\n return True\n\n if self.op.expectInterpreterDeviation(self.values):\n return True\n\n # get a list of interpreter values that would be equivalent\n interpreterValues = []\n for i in range(len(self.argTypes)):\n hasInterpreterEquivalent, interpVal = self.argTypes[i].equivalentInterpreterInstance(self.values[i])\n if hasInterpreterEquivalent:\n interpreterValues.append(interpVal)\n\n if len(interpreterValues) != len(self.values):\n # we can't run this test because we don't have an equivalent for all values\n return True\n\n # check if the typedPython value is really the same as the interpreter value.\n try:\n interpVal = self.op.getLambda()(*interpreterValues)\n interpException = False\n except Exception:\n interpException = True\n\n try:\n typedPythonVal = self.op.getLambda()(*self.values)\n typedPythonException = False\n except Exception:\n typedPythonException = True\n\n if typedPythonException and interpException:\n return True\n\n if typedPythonException:\n self.failureDesc = \"typed_python produced an exception but interpreter didn't.\"\n return False\n\n if interpException:\n self.failureDesc = \"interpreter produced an exception but typed_python didn't.\"\n return False\n\n typedPythonValType = typeModelForValue(typedPythonVal)\n\n if typedPythonValType is None:\n self.failureDesc = (\n f\"TypedPython produced {typedPythonVal} of type {type(typedPythonVal)}, and we don't have a type model for it.\"\n )\n return False\n\n hasInterpValRep, interpValAsTpVal = typedPythonValType.equivalentOwnInstance(interpVal)\n if not hasInterpValRep:\n self.failureDesc = (\n f\"TypedPython produced {typedPythonVal} of type {type(typedPythonVal)}, which cannot represent the \"\n f\"interpreter's values of {interpVal} of type {type(interpVal)}.\"\n )\n return False\n\n if typedPythonValType.equivalentInterpreterInstance(interpValAsTpVal)[1] != interpVal:\n # the interpreter's value is out of bounds for this value, so we can ignore it\n return True\n\n if typedPythonValType.isOutOfBounds(interpValAsTpVal):\n return True\n\n if not typedPythonValType.areEquivalent(typedPythonVal, interpValAsTpVal):\n self.failureDesc = (\n f\"Interpreter produced {interpVal} of type {type(interpVal)} using vals {interpreterValues}.\\n\"\n f\"TypedPython produced {typedPythonVal} of type {type(typedPythonVal)}.\\n\"\n f\"We cast the interpreter val to {interpValAsTpVal} of type {type(interpValAsTpVal)}.\\n\"\n f\"These are not equivalent according to type model '{typedPythonValType.name()}'\\n\"\n )\n return False\n\n return True\n\n def failureDescription(self):\n res = f\"Op '{self.op.name()}' failed on arguments:\\n\"\n for i in range(len(self.argTypes)):\n res += (\n f\" {str(self.values[i]):40s} of type {str(self.argTypes[i].name()):40s} \"\n f\"compiled as {str(self.compileTypes[i]):40s}\\n\"\n )\n res += \"Failure:\" + (\"\\n\" + self.failureDesc).replace(\"\\n\", \"\\n \") + \"\\n\"\n\n return res\n\n\nclass TestTypedPythonAgainstCompiler(unittest.TestCase):\n \"\"\"Systematically compare typed_python, the interpreter, and the compiler.\n\n We rely on two main invariants. First, the compiler should cause\n us to produce the same outputs as we would get when we run typed_python code\n in the interpreter. Second, typed_python types are intended to work like their\n untyped counterparts: as long as all the datatypes are representable in equivalent\n forms between the untyped and typed versions, adding the typing shouldn't change\n the outcome.\n\n This test suite attempts to systematically verify that that is true. Because\n some functionality is not implemented, we provide functions to suppress errors\n when something doesn't work yet.\n \"\"\"\n\n def callOrException(self, f, *args):\n try:\n return f(*args)\n except Exception:\n return 'Exception'\n\n def checkOperation(self, op):\n scenarios = self.scenariosWithArgs(op, ())\n\n print(f\"Checking {op.name()} with {len(scenarios)} scenarios over {len(set(s.argTypes for s in scenarios))} signatures.\")\n\n failed = []\n for s in scenarios:\n if not s.check():\n failed.append(s)\n\n if failed:\n print(f\"Total failures: {len(failed)}\")\n for f in failed:\n print(f.failureDescription())\n\n if failed:\n self.assertFalse(True, \"Failures exist.\")\n\n def scenariosWithArgs(self, op, argTypes):\n if len(argTypes) < op.arity():\n scenarios = []\n\n for possibleArgType in op.subsetOfTypesWorthTesting(argTypes, allTypes):\n scenarios.extend(self.scenariosWithArgs(op, argTypes + (possibleArgType,)))\n\n return scenarios\n\n return self.scenariosWithArgsAndValues(op, argTypes, ())\n\n def scenariosWithArgsAndValues(self, op, argTypes, values):\n if len(values) < op.arity():\n scenarios = []\n\n for possibleValue in argTypes[len(values)].instances():\n scenarios.extend(self.scenariosWithArgsAndValues(op, argTypes, values + (possibleValue,)))\n\n return scenarios\n\n return self.scenariosWithArgsAndValuesAndUptypes(op, argTypes, values, ())\n\n def scenariosWithArgsAndValuesAndUptypes(self, op, argTypes, values, actualTypes):\n if len(actualTypes) < op.arity():\n scenarios = []\n\n for uptype in argTypes[len(actualTypes)].containingTypes():\n scenarios.extend(self.scenariosWithArgsAndValuesAndUptypes(op, argTypes, values, actualTypes + (uptype,)))\n\n return scenarios\n\n return [Scenario(op, argTypes, values, actualTypes)]\n\n def test_add(self):\n self.checkOperation(ArithmeticOperation(\"add\"))\n\n def test_mul(self):\n self.checkOperation(ArithmeticOperation(\"mul\"))\n\n def test_sub(self):\n self.checkOperation(ArithmeticOperation(\"sub\"))\n\n def test_truediv(self):\n self.checkOperation(ArithmeticOperation(\"truediv\"))\n\n def test_floordiv(self):\n self.checkOperation(ArithmeticOperation(\"floordiv\"))\n\n def test_mod(self):\n self.checkOperation(ArithmeticOperation(\"mod\"))\n\n # we are not currently getting the types of 'pow' right. int ** int should be int.\n @pytest.mark.skip\n def test_pow(self):\n self.checkOperation(ArithmeticOperation(\"pow\"))\n\n def test_lshift(self):\n self.checkOperation(ArithmeticOperation(\"lshift\"))\n\n # we have numerous failures here\n @pytest.mark.skip\n def test_rshift(self):\n self.checkOperation(ArithmeticOperation(\"rshift\"))\n\n def test_and(self):\n self.checkOperation(ArithmeticOperation(\"and\"))\n\n def test_or(self):\n self.checkOperation(ArithmeticOperation(\"or\"))\n\n def test_xor(self):\n self.checkOperation(ArithmeticOperation(\"xor\"))\n" ]
[ [ "numpy.isnan", "numpy.isneginf", "numpy.isinf", "numpy.isfinite" ] ]
epfl-lasa/rds
[ "574b3881dbaf4fdcd785dd96ba4c451928454b40" ]
[ "rds/script/trajectory_plot.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom matplotlib.colors import LinearSegmentedColormap\n\nimport capsule_distance\n\ncapsule = capsule_distance.Capsule(0.18, -0.5, 0.45)\ncapsule_larger = capsule_distance.Capsule(0.18, -0.5, 0.5)\n\ndef cmap_map(function, cmap):\n \"\"\" Applies function (which should operate on vectors of shape 3: [r, g, b]), on colormap cmap.\n This routine will break any discontinuous points in a colormap.\n \"\"\"\n cdict = cmap._segmentdata\n step_dict = {}\n # Firt get the list of points where the segments start or end\n for key in ('red', 'green', 'blue'):\n step_dict[key] = list(map(lambda x: x[0], cdict[key]))\n step_list = sum(step_dict.values(), [])\n step_list = np.array(list(set(step_list)))\n # Then compute the LUT, and apply the function to the LUT\n reduced_cmap = lambda step : np.array(cmap(step)[0:3])\n old_LUT = np.array(list(map(reduced_cmap, step_list)))\n new_LUT = np.array(list(map(function, old_LUT)))\n # Now try to make a minimal segment definition of the new LUT\n cdict = {}\n for i, key in enumerate(['red','green','blue']):\n this_cdict = {}\n for j, step in enumerate(step_list):\n if step in step_dict[key]:\n this_cdict[step] = new_LUT[j, i]\n elif new_LUT[j,i] != old_LUT[j, i]:\n this_cdict[step] = new_LUT[j, i]\n colorvector = list(map(lambda x: x + (x[1], ), this_cdict.items()))\n colorvector.sort()\n cdict[key] = colorvector\n\n return LinearSegmentedColormap('colormap',cdict,1024)\n\ndef add_circles(X, Y, r, ax):\n\tfor i in range(X.shape[0]):\n\t\tcircle = plt.Circle((X[i], Y[i]), r, fill=False, color=\"k\")#[0.8,0.8,0.0])\n\t\tax.add_artist(circle)\n\ndef plot_center_index_range(xy_robot, orientation_robot, xy_reference, X_crowd, Y_crowd, X_crowd_ref, Y_crowd_ref,\n\tindex_range_bounds, distance_cutoff, ax, cm_past_future, baseline_method=False):\n\tsub_range = np.arange(index_range_bounds[0], index_range_bounds[1], 1)\n\txy_robot = xy_robot[sub_range, :]\n\torientation_robot = orientation_robot[sub_range]\n\txy_reference = xy_reference[sub_range, :]\n\tX_crowd = X_crowd[sub_range, :]\n\tY_crowd = Y_crowd[sub_range, :]\n\tX_crowd_ref = X_crowd_ref[sub_range, :]\n\tY_crowd_ref = Y_crowd_ref[sub_range, :]\n\tif True: #distance cutoff\n\t\tx_max = np.max(xy_robot[:, 0]) + distance_cutoff\n\t\tx_min = np.min(xy_robot[:, 0]) - distance_cutoff\n\t\ty_max = np.max(xy_robot[:, 1]) + distance_cutoff\n\t\ty_min = np.min(xy_robot[:, 1]) - distance_cutoff\n\t\tax.plot([x_max, x_max, x_min, x_min, x_max], [y_max, y_min, y_min, y_max, y_max], color=[0.8,0.8,0.8], linestyle='--')\n\t\tindices_close = np.logical_and(\n\t\t\tnp.logical_and((x_max > X_crowd), (x_min < X_crowd)),\n\t\t\tnp.logical_and((y_max > Y_crowd), (y_min < Y_crowd)))\n\t\tX_crowd[np.logical_not(indices_close)] = np.nan\n\t\tY_crowd[np.logical_not(indices_close)] = np.nan\n\t\tindices_close = np.logical_and(\n\t\t\tnp.logical_and((x_max > X_crowd_ref), (x_min < X_crowd_ref)),\n\t\t\tnp.logical_and((y_max > Y_crowd_ref), (y_min < Y_crowd_ref)))\n\t\tX_crowd_ref[np.logical_not(indices_close)] = np.nan\n\t\tY_crowd_ref[np.logical_not(indices_close)] = np.nan\n\tt_normalized = np.linspace(0.0, 1.0, xy_robot.shape[0])\n\tax.scatter(X_crowd.flatten(), Y_crowd.flatten(), c=np.repeat(t_normalized, X_crowd.shape[1]),\n\t\tcmap = cm_past_future)\n\tlime_color = [0.25,1.0,0.4]\n\t#ax.scatter(xy_robot[:, 0], xy_robot[:, 1], marker='o', edgecolors=\"k\", facecolors=\"None\", lw=2)\n\tax.scatter(xy_robot[:, 0], xy_robot[:, 1], c=t_normalized, cmap = cm_past_future)\n\t\n\t#ax.scatter(xy_reference[:, 0], xy_reference[:, 1], c=t_normalized, cmap = cm_past_future, marker='s', edgecolors='k')\n\n\tsub_sampler_ref = np.arange(0, xy_reference.shape[0], 4)\n\tif True: #show robot references\n\t\tax.scatter(xy_reference[sub_sampler_ref, 0], xy_reference[sub_sampler_ref, 1],\n\t\t\tc=t_normalized[sub_sampler_ref], cmap=cm_past_future, marker=\"s\")\n\t\t\t\t#facecolors=\"None\", edgecolors=cm_past_future(t_normalized[sub_sampler_ref]), marker=\"x\", lw=1)\n\t\tax.scatter(xy_reference[sub_sampler_ref, 0], xy_reference[sub_sampler_ref, 1],\n\t\t\tmarker='s', edgecolors='k', facecolors=\"None\")\n\n\tif False: #show pedestrian references\n\t\tax.scatter(X_crowd_ref[sub_sampler_ref, :].flatten(), Y_crowd_ref[sub_sampler_ref, :].flatten(),\n\t\t\tc=np.repeat(t_normalized[sub_sampler_ref], X_crowd.shape[1]),\n\t\t\tcmap = cm_past_future, marker=\"s\")\n\n\tindex_middle = xy_robot.shape[0]/2\n\tx_robot_middle = xy_robot[index_middle, 0]\n\ty_robot_middle = xy_robot[index_middle, 1]\n\torientation_robot_middle = orientation_robot[index_middle]\n\tcapsule.plot_at_pose(x_robot_middle, y_robot_middle, orientation_robot_middle, ax, orca=baseline_method, color=\"g\")\n\n\tadd_circles(np.transpose(X_crowd[index_middle, :]), np.transpose(Y_crowd[index_middle, :]), 0.3, ax)\n\n\ndef plot_robot_trajectory(xy_robot, xy_reference, ax):\n\tax.plot(xy_robot[:, 0], xy_robot[:, 1], color=\"g\", zorder=0)#, \"k\"), zorder=0)\n\tax.plot(xy_reference[:, 0], xy_reference[:, 1], color=\"g\", linestyle=\"--\")#, \"k--\"), zorder=0)\n\ntrajectories_rds = np.genfromtxt('../trajectories_rds.csv', delimiter=';')\ntrajectories_baseline = np.genfromtxt('../trajectories_baseline.csv', delimiter=';')\n\ntrajectories_both_cases = [trajectories_rds, trajectories_baseline]\n\nfig, axes = plt.subplots(2, 1, sharex=True, subplot_kw={\"adjustable\":'box-forced'})\nfig.subplots_adjust(hspace=0.025)\n\nfor k in [0, 1]:\n\tax = axes[k]\n\ttrajectories = trajectories_both_cases[k]\n\n\tsub_sampler = np.arange(0, trajectories.shape[0], 1)\n\ttrajectories = trajectories[sub_sampler, :]\n\n\tindices_x = np.arange(0, trajectories.shape[1] - 5, 4) + 5\n\tindices_y = indices_x + 1\n\tindices_x_ref = indices_x + 2\n\tindices_y_ref = indices_x + 3\n\tX = trajectories[:, indices_x]\n\tY = trajectories[:, indices_y]\n\tX_ref = trajectories[:, indices_x_ref]\n\tY_ref = trajectories[:, indices_y_ref]\n\n\tax.set_xlim([-3.2, 9])\n\tax.set_ylim([-4.5, 1])\n\n\tplot_robot_trajectory(trajectories[:, 0:2], trajectories[:, 3:5], ax)\n\n\tindex_ranges = [\n\t\t[trajectories.shape[0]*5/40, trajectories.shape[0]*9/40],\n\t\t[150, 190]]#,\n\t\t#[trajectories.shape[0]*13/20 - 25, trajectories.shape[0]*15/20 - 25]]\n\n\tcolormaps = [\n\t\t#cmap_map(lambda x: 1-x, cm.seismic),\n\t\tcmap_map(lambda x: 1-x, cm.BrBG),\n\t\tcmap_map(lambda x: 1-x, cm.seismic)]\n\n\tfor i in range(len(index_ranges)):\n\t\tplot_center_index_range(trajectories[:, 0:2], trajectories[:, 2], trajectories[:, 3:5],\n\t\t\tX, Y, X_ref, Y_ref, index_ranges[i], 1.5, ax, colormaps[i], baseline_method=(k==1))\n\n\tif k == 0:\n\t\tshift_x = -9.5\n\t\tshift_y = -4.75\n\t\tax.text(2.9+shift_x, 2.6+shift_y, \"1 m\")\n\t\tax.plot([2.75+shift_x,3.75+shift_x], [2.5+shift_y, 2.5+shift_y],'k', linewidth=1)\n\t\tax.plot([2.75+shift_x,2.75+shift_x], [2.45+shift_y, 2.55+shift_y],'k', linewidth=1)\n\t\tax.plot([3.75+shift_x,3.75+shift_x], [2.45+shift_y, 2.55+shift_y],'k', linewidth=1)\n\n\tax.set_aspect(\"equal\")\n\tax.xaxis.set_ticks([])\n\tax.xaxis.set_ticklabels([])\n\tax.yaxis.set_ticks([])\n\tax.yaxis.set_ticklabels([])\nplt.show()\n\nquit()\n\ndef plot_index_range(xy_robot, xy_reference, X_crowd, Y_crowd, index_range_bounds, distance_cutoff, ax, x_lim, y_lim):\n\tax.set_aspect(\"equal\")\n\tax.set_xlim(x_lim)\n\tax.set_ylim(y_lim)\n\tM = ax.transData.get_matrix()\n\tmarkersize_arg_pedestrians = (0.3*M[0,0])**2\n\n\tsub_range = np.arange(index_range_bounds[0], index_range_bounds[1], 1)\n\txy_robot = xy_robot[sub_range, :]\n\txy_reference = xy_reference[sub_range, :]\n\tX_crowd = X_crowd[sub_range, :]\n\tY_crowd = Y_crowd[sub_range, :]\n\tx_max = np.max(xy_robot[:, 0]) + distance_cutoff\n\tx_min = np.min(xy_robot[:, 0]) - distance_cutoff\n\ty_max = np.max(xy_robot[:, 1]) + distance_cutoff\n\ty_min = np.min(xy_robot[:, 1]) - distance_cutoff\n\tindices_close = np.logical_and(\n\t\tnp.logical_and((x_max > X_crowd), (x_min < X_crowd)),\n\t\tnp.logical_and((y_max > Y_crowd), (y_min < Y_crowd)))\n\tX_crowd[np.logical_not(indices_close)] = np.nan\n\tY_crowd[np.logical_not(indices_close)] = np.nan\n\tt_normalized = np.linspace(0.0, 1.0, xy_robot.shape[0])\n\tax.scatter(X_crowd.flatten(), Y_crowd.flatten(), c=np.repeat(t_normalized, X_crowd.shape[1]),\n\t\tcmap = cm.hot_r, s=markersize_arg_pedestrians)\n\tlime_color = [0.25,1.0,0.4]\n\t#ax.scatter(xy_robot[:, 0], xy_robot[:, 1], s=300, facecolors=\"None\", edgecolors=lime_color, lw=1)\n\tax.scatter(xy_robot[:, 0], xy_robot[:, 1], c=t_normalized, cmap = cm.hot_r, s=markersize_arg_pedestrians)\n\tax.scatter(xy_robot[:, 0], xy_robot[:, 1], facecolors=\"None\", edgecolors=lime_color, lw=0.1, s=markersize_arg_pedestrians)\n\tsub_sampler_ref = np.arange(0, xy_reference.shape[0], 4)\n\tax.scatter(xy_reference[sub_sampler_ref, 0], xy_reference[sub_sampler_ref, 1], c=t_normalized[sub_sampler_ref], cmap = cm.hot_r, marker=\"s\", edgecolors=lime_color)\n\tax.plot([x_max, x_max, x_min, x_min, x_max], [y_max, y_min, y_min, y_max, y_max], 'b--')\n\ntrajectories_rds = np.genfromtxt('../trajectories_rds.csv', delimiter=';')\n#trajectories_baseline = np.genfromtxt('../trajectories_baseline.csv', delimiter=';')\nsub_sampler = np.arange(0, trajectories_rds.shape[0], 1)\ntrajectories_rds = trajectories_rds[sub_sampler, :]\n\nindices_x = np.arange(0, trajectories_rds.shape[1] - 5, 4) + 5\nindices_y = indices_x + 1\nX = trajectories_rds[:, indices_x]\nY = trajectories_rds[:, indices_y]\n\nfig, ax = plt.subplots(1, 1)\nplot_index_range(trajectories_rds[:, 0:2], trajectories_rds[:, 3:5], X, Y,\n\t[0, trajectories_rds.shape[0]/5], 1.5, ax, [3, 11], [-8, -1])\n\nax.set_aspect(\"equal\")\nplt.show()\n\nquit()\n\n\nsub_sampler = np.arange(0, trajectories_rds.shape[0]/5, 1)\ntrajectories_rds = trajectories_rds[sub_sampler, :]\n\nt_normalized = np.linspace(0.0, 1.0, trajectories_rds.shape[0])\n#plt.scatter(trajectories_rds[:, 3], trajectories_rds[:, 4], facecolors=\"None\", edgecolors=cm.hot_r(t_normalized), lw=1)\n\nif True:\n\n\tindices_x = np.arange(0, trajectories_rds.shape[1] - 3, 4) + 3\n\tindices_y = indices_x + 1\n\tX = trajectories_rds[:, indices_x]\n\tY = trajectories_rds[:, indices_y]\n\tplt.scatter(X.flatten(), Y.flatten(), c = np.repeat(t_normalized, indices_x.shape[0]), cmap = cm.hot_r)#, edgecolors='k')\n\tax = plt.gca()\n\tax.set_xlim([-10, 10])\n\tax.set_ylim([-10, 10])\n\tax.set_aspect(\"equal\")\n\nif True:\n\tlime_color = [0.25,1.0,0.4]\n\tplt.scatter(trajectories_rds[:, 3], trajectories_rds[:, 4], c = t_normalized, cmap = cm.hot_r, marker=\"s\", edgecolors=lime_color)#, lw=1)\n\tplt.scatter(trajectories_rds[:, 0], trajectories_rds[:, 1], c = t_normalized, cmap = cm.hot_r, edgecolors=lime_color)#, lw=1)\n\n\nplt.show()\n" ]
[ [ "matplotlib.pyplot.gca", "numpy.logical_not", "numpy.linspace", "matplotlib.pyplot.scatter", "numpy.min", "numpy.arange", "matplotlib.colors.LinearSegmentedColormap", "numpy.logical_and", "matplotlib.pyplot.subplots", "numpy.genfromtxt", "matplotlib.pyplot.Circle", "numpy.max", "numpy.transpose", "numpy.repeat", "matplotlib.pyplot.show" ] ]
SamBoutin/majorana-rgf-grape
[ "3233627125519a3cc36b1d167c00d39b54abc48d", "3233627125519a3cc36b1d167c00d39b54abc48d" ]
[ "setup.py", "rgf_grape/optimization/wireOptimizer.py" ]
[ "\"\"\"\nThis file is part of the rgf_grape python package.\nCopyright (C) 2017-2018 S. Boutin\nFor details of the rgf_grape algorithm and applications see:\nS. Boutin, J. Camirand Lemyre, and I. Garate, Majorana bound state engineering \nvia efficient real-space parameter optimization, ArXiv 1804.03170 (2018).\n\"\"\"\n\nimport numpy\nfrom distutils.core import setup\nfrom Cython.Build import cythonize\nfrom distutils.extension import Extension\n\noptim_path = 'rgf_grape/optimization/'\noptim_files = [optim_path + '_functionsDefinitions.pyx',\n optim_path + 'fctDefs.cpp']\n\nrgf_path = 'rgf_grape/rgf/'\nrgf_files = [rgf_path+'_rgf_solver.pyx', rgf_path+'rgf_utils.cpp']\n\nextensions = [Extension(\"rgf_grape.optimization._functionsDefinitions\",\n optim_files,\n include_dirs=['.', numpy.get_include()],\n language=\"c++\",\n extra_compile_args=[\"-O3\", \"-Wall\"],\n extra_link_args=['-g']\n ),\n Extension(\"rgf_grape.rgf._rgf_solver\", rgf_files,\n include_dirs=['.', numpy.get_include()],\n language=\"c++\",\n extra_compile_args=[\"-O3\", \"-Wall\"],\n extra_link_args=['-g']\n )]\n\nsetup(\n ext_modules=cythonize(extensions)\n)\n", "\"\"\"\nThis file is part of the rgf_grape python package.\nCopyright (C) 2017-2018 S. Boutin\nFor details of the rgf_grape algorithm and applications see:\nS. Boutin, J. Camirand Lemyre, and I. Garate, Majorana bound state engineering \nvia efficient real-space parameter optimization, ArXiv 1804.03170 (2018).\n\"\"\"\n\nimport numpy as np\nimport scipy.optimize\n\nclass StopOptimizingException(Exception):\n pass\n\nclass TakeStepBH(object):\n def __init__(self, stepsize=1, scaling=None, bounds=None, dim=1):\n self.stepsize = stepsize\n self.dim =dim\n if scaling is None:\n self.scaling = np.ones(dim)\n else:\n assert np.array(scaling).size == dim\n self.scaling = scaling\n if bounds is None:\n self.lb = -np.inf*np.ones(dims)\n self.ub = np.inf*np.ones(dims)\n else: \n assert len(bounds) == dim\n self.lb = np.array([(b[0] if b[0] is not None else -np.inf) for b in bounds])\n self.ub = np.array([(b[1] if b[1] is not None else np.inf) for b in bounds])\n\n def __call__(self, x):\n s = self.stepsize\n assert self.dim == x.size\n rnd = self.scaling*np.random.uniform(-s, s, self.dim)\n new_x = x+rnd\n mask_lb = new_x < self.lb\n new_x[mask_lb] = self.lb[mask_lb]*(1+np.sign(self.lb[mask_lb])*1e-6)\n mask_ub = new_x > self.ub\n new_x[mask_ub] = self.ub[mask_ub]*(1-np.sign(self.ub[mask_ub])*1e-6)\n return new_x\n\n\nclass WireOptimizer(object):\n def __init__(self, wire, cost, update, grad, filter, args={}):\n self.wire = wire\n self.cost = cost\n self.grad = grad\n if 'controls' not in args:\n args['controls'] = []\n self.update = update\n self.filter = filter\n self.args = args\n\n @classmethod\n def from_config(cls, params):\n return cls(\n params.wire,\n cost=params.cost_function,\n grad=params.grad_function,\n update=params.update_function,\n filter=params.filter_function,\n args=params.cost_function_args\n )\n\n def optFunc(self, x, args):\n args = self.updateArgs(x, args)\n val, detr = self.cost(args)\n args['results']['fun'] = val\n return val\n\n def optGrad(self, x, args):\n args = self.updateArgs(x, args)\n grad, val = self.grad(args)\n return self.filter(x, grad, args)\n\n def optFuncAndGrad(self, x, args):\n args = self.updateArgs(x, args)\n grad, val = self.grad(args)\n args['results']['fun'] = val\n gradFilter = self.filter(args['x'], grad, args)\n return val, gradFilter\n\n def updateArgs(self, x, args):\n args['x'] = x\n args['wOpt'] = self\n self.update(x, args)\n args['sol'] = self.wire.make_RGF_solver()\n return args\n\n def minimize(\n self, x0, bnd=None, args={}, opts={'iprint': 1},\n callback=None, optMethod='l-bfgs-b', \n basinhopping=False, cbBH=None, bh_kwargs={},\n constraints=()\n ):\n args = self.updateArgs(x0, args)\n kwargs = {\n 'args': args, 'method': optMethod,\n 'bounds': bnd, 'options': opts, 'callback': callback\n }\n if self.grad is None:\n fun = self.optFunc\n else:\n kwargs['jac'] = True\n fun = self.optFuncAndGrad\n if len(constraints)>0:\n kwargs['method'] = 'SLSQP'\n kwargs['constraints'] = constraints\n if basinhopping:\n step = bh_kwargs['stepsize']\n takeStep = TakeStepBH(step, bounds=bnd, dim=x0.size)\n try:\n res = scipy.optimize.basinhopping(\n fun, x0, minimizer_kwargs=kwargs,\n take_step=takeStep,\n callback=cbBH, **bh_kwargs\n )\n except StopOptimizingException:\n res = callback.args['fun']\n else:\n try:\n res = scipy.optimize.minimize(fun, x0, **kwargs)\n except StopOptimizingException:\n res = callback.args['fun']\n return res\n\n def test_gradient(self, x0, filters=None, grads=None, args={}):\n print('testing gradient calculation')\n res = []\n fb = self.filter\n gb = self.grad\n if filters is None:\n filters = [lambda x,y,z: y, self.filter]\n if grads is None:\n grads = [finiteDiff, self.grad]\n for f, g in zip(filters, grads):\n print('In test grad loop')\n self.grad = g\n self.filter = f\n res += [self.optGrad(x0, args)]\n assert len(res[0]) == len(res[1])\n for i, (g1, g2) in enumerate(zip(res[0], res[1])):\n print(i, g1, g2, g1 / g2)\n self.filter = fb\n self.grad = gb\n" ]
[ [ "numpy.get_include" ], [ "numpy.sign", "numpy.random.uniform", "numpy.array", "numpy.ones" ] ]
AlexWang1996/classfication_utilities
[ "9b8fb19c99a1ba8503b6117401f1dce7840ce4a5" ]
[ "vgg16.py" ]
[ "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\nimport numpy as np\n\ndef vgg16(images, classes):\n\n # conv1_1\n with tf.name_scope('conv1_1') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 3, 64], dtype=tf.float32,stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv1_1 = tf.nn.relu(out, name=scope)\n\n # conv1_2\n with tf.name_scope('conv1_2') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32,stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv1_1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv1_2 = tf.nn.relu(out, name=scope)\n\n # pool1\n pool1 = tf.nn.max_pool(conv1_2,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='SAME',name='pool1')\n\n # conv2_1\n with tf.name_scope('conv2_1') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32,stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv2_1 = tf.nn.relu(out, name=scope)\n\n # conv2_2\n with tf.name_scope('conv2_2') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 128], dtype=tf.float32,stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv2_1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv2_2 = tf.nn.relu(out, name=scope)\n\n # pool2\n pool2 = tf.nn.max_pool(conv2_2,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='SAME',name='pool2')\n\n # conv3_1\n with tf.name_scope('conv3_1') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 256], dtype=tf.float32,stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv3_1 = tf.nn.relu(out, name=scope)\n\n # conv3_2\n with tf.name_scope('conv3_2') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv3_1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv3_2 = tf.nn.relu(out, name=scope)\n\n # conv3_3\n with tf.name_scope('conv3_3') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv3_2, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv3_3 = tf.nn.relu(out, name=scope)\n\n # pool3\n pool3 = tf.nn.max_pool(conv3_3,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='SAME',name='pool3')\n\n # conv4_1\n with tf.name_scope('conv4_1') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 512], dtype=tf.float32,stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(pool3, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv4_1 = tf.nn.relu(out, name=scope)\n\n # conv4_2\n with tf.name_scope('conv4_2') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv4_1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv4_2 = tf.nn.relu(out, name=scope)\n\n # conv4_3\n with tf.name_scope('conv4_3') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv4_2, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv4_3 = tf.nn.relu(out, name=scope)\n\n # pool4\n pool4 = tf.nn.max_pool(conv4_3,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='SAME',name='pool4')\n\n # conv5_1\n with tf.name_scope('conv5_1') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(pool4, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv5_1 = tf.nn.relu(out, name=scope)\n\n # conv5_2\n with tf.name_scope('conv5_2') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv5_1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv5_2 = tf.nn.relu(out, name=scope)\n\n # conv5_3\n with tf.name_scope('conv5_3') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv5_2, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv5_3 = tf.nn.relu(out, name=scope)\n\n # pool5\n pool5 = tf.nn.max_pool(conv5_3,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='SAME',name='pool4')\n\n # fc1\n with tf.name_scope('fc1') as scope:\n shape = int(np.prod(pool5.get_shape()[1:]))\n fc1w = tf.Variable(tf.truncated_normal([shape, 4096],dtype=tf.float32,stddev=1e-1), name='weights')\n fc1b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),trainable=True, name='biases')\n pool5_flat = tf.reshape(pool5, [-1, shape])\n fc1l = tf.nn.bias_add(tf.matmul(pool5_flat, fc1w), fc1b)\n fc1 = tf.nn.relu(fc1l)\n\n # fc2\n with tf.name_scope('fc2') as scope:\n fc2w = tf.Variable(tf.truncated_normal([4096, 4096],dtype=tf.float32,stddev=1e-1), name='weights')\n fc2b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),trainable=True, name='biases')\n fc2l = tf.nn.bias_add(tf.matmul(fc1, fc2w), fc2b)\n fc2 = tf.nn.relu(fc2l)\n\n # fc3\n with tf.name_scope('fc3') as scope:\n fc3w = tf.Variable(tf.truncated_normal([4096, classes],dtype=tf.float32,stddev=1e-1), name='weights')\n fc3b = tf.Variable(tf.constant(1.0, shape=[classes], dtype=tf.float32),trainable=True, name='biases')\n fc3l = tf.nn.bias_add(tf.matmul(fc2, fc3w), fc3b, name='output')\n\n return fc3l" ]
[ [ "tensorflow.nn.bias_add", "tensorflow.nn.relu", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.constant", "tensorflow.nn.max_pool", "tensorflow.reshape", "tensorflow.name_scope", "tensorflow.nn.conv2d" ] ]
phycept/relation-autoencoder
[ "7a43d4f1ea3a4155181712394b08a82c31c54db3", "7a43d4f1ea3a4155181712394b08a82c31c54db3" ]
[ "learning/models/decoders/SelectionalPreferences.py", "learning/NegativeExampleGenerator.py" ]
[ "__author__ = 'enfry'\n\nimport math\nimport theano\nfrom definitions import settings\nimport numpy as np\nimport theano.tensor as T\nimport cPickle as pickle\n\nclass SelectionalPreferences(object):\n\n def __init__(self, rng, embedSize, relationNum, argVocSize, data, ex_emb):\n\n self.k = embedSize\n self.r = relationNum\n self.a = argVocSize\n\n a = self.a\n k = self.k\n r = self.r\n\n\n # Selectional Preferences\n Ca1NP = np.asarray(rng.normal(0, math.sqrt(0.1), size=(k, r)), dtype=theano.config.floatX)\n Ca2NP = np.asarray(rng.normal(0, math.sqrt(0.1), size=(k, r)), dtype=theano.config.floatX)\n self.C1 = theano.shared(value=Ca1NP, name='C1')\n self.C2 = theano.shared(value=Ca2NP, name='C2')\n\n # argument embeddings\n ANP = np.asarray(rng.uniform(-0.01, 0.01, size=(a, k)), dtype=theano.config.floatX) # @UndefinedVariable\n\n if ex_emb:\n import gensim\n external_embeddings = gensim.models.Word2Vec.load(settings.external_embeddings_path)\n\n for idArg in xrange(self.a):\n arg = data.id2Arg[idArg].lower().split(' ')\n new = np.zeros(k, dtype=theano.config.floatX)\n size = 0\n for ar in arg:\n if ar in external_embeddings:\n new += external_embeddings[ar]\n size += 1\n if size > 0:\n ANP[idArg] = new/size\n\n self.A = theano.shared(value=ANP, name='A') # (a1, k)\n\n self.Ab = theano.shared(value=np.zeros(a, dtype=theano.config.floatX), # @UndefinedVariable\n name='Ab', borrow=True)\n\n self.params = [self.A, self.C1, self.C2, self.Ab]\n\n\n\n\n\n def leftMostFactorization(self, batchSize, args, wC1):\n l = batchSize\n k = self.k # embed size\n r = self.r # relation number\n argEmbeds = self.A[args.flatten()]\n Afirst = T.batched_dot(wC1, argEmbeds)\n return Afirst\n\n def rightMostFactorization(self, batchSize, args, wC2):\n l = batchSize\n k = self.k # embed size\n r = self.r # relation number\n argEmbeds2 = self.A[args.flatten()]\n Asecond = T.batched_dot(wC2, argEmbeds2)\n return Asecond\n\n\n\n def negLeftMostFactorization(self, batchSize, negEmbed, wC1):\n # l = batchSize\n # k = self.k # embed size\n # r = self.r # relation number\n Afirst = T.batched_tensordot(wC1, negEmbed.dimshuffle(1, 2, 0), axes=[[1], [1]]) # [l,k] [l,k,n] = [l,n]\n return Afirst\n\n def negRightMostFactorization(self, batchSize, negEmbed, wC2):\n # l = batchSize\n # k = self.k # embed size\n # r = self.r # relation number\n Asecond = T.batched_tensordot(wC2, negEmbed.dimshuffle(1, 2, 0), axes=[[1], [1]]) # [l,k] [l,k,n] = [l,n]\n return Asecond\n\n\n\n def getScores(self, args1, args2, l, n, relationProbs, neg1, neg2, entropy):\n weightedC1= T.dot(relationProbs, self.C1.dimshuffle(1, 0))\n weightedC2= T.dot(relationProbs, self.C2.dimshuffle(1, 0))\n\n left1 = self.leftMostFactorization(batchSize=l, args=args1, wC1=weightedC1)\n right1 = self.rightMostFactorization(batchSize=l, args=args2, wC2=weightedC2)\n one = left1 + right1\n\n u = T.concatenate([one + self.Ab[args1], one + self.Ab[args2]])\n logScoresP = T.log(T.nnet.sigmoid(u))\n allScores = logScoresP\n allScores = T.concatenate([allScores, entropy, entropy])\n\n negembed1 = self.A[neg1.flatten()].reshape((n, l, self.k))\n negembed2 = self.A[neg2.flatten()].reshape((n, l, self.k))\n negative1 = self.negLeftMostFactorization(batchSize=l,\n negEmbed=negembed1,\n wC1=weightedC1)\n negative2 = self.negRightMostFactorization(batchSize=l,\n negEmbed=negembed2,\n wC2=weightedC2)\n\n negOne = negative1.dimshuffle(1, 0) + right1\n negTwo = negative2.dimshuffle(1, 0) + left1\n g = T.concatenate([negOne + self.Ab[neg1], negTwo + self.Ab[neg2]])\n logScores = T.log(T.nnet.sigmoid(-g))\n allScores = T.concatenate([allScores, logScores.flatten()])\n\n return allScores\n\n\n", "__author__ = 'diego'\n\n\nimport numpy as np\n\n\nclass NegativeExampleGenerator(object):\n def __init__(self, rand, negSamplingCum):\n # negSamplingCum: list of float, for faster sample\n self._rand = rand\n self._negSamplingCum = negSamplingCum\n # self._neg2SamplingCum = neg2SamplingCum\n# self._negSamplingDistrPower = negSamplingDistrPower\n# self._compute_unigram_distribution()\n\n def _univariate_distr_sample(self, sampleSize=1):\n return [self._negSamplingCum.searchsorted(self._rand.uniform(0, self._negSamplingCum[-1]))\n for i in xrange(0, sampleSize)]\n\n def generate_random_negative_example(self, positiveArgs, negativeExampleNum):\n l = positiveArgs.shape[0] # number of positive instances\n n = negativeExampleNum # number of negative examples generated per instance\n\n negativeArgs = np.zeros((n, l), dtype=np.int32)\n # Todo: change to concatenate\n for instance_idx in xrange(l):\n samples = self._univariate_distr_sample(n)\n for negNum_idx in xrange(n):\n negativeArgs[negNum_idx, instance_idx] = samples[negNum_idx]\n return negativeArgs\n" ]
[ [ "numpy.zeros" ], [ "numpy.zeros" ] ]
kochanczyk/PyBNF
[ "d691d1a043b11b454f1317158067b65a47c6714b" ]
[ "tests/test_simplex.py" ]
[ "from .context import data, algorithms, pset, objective, config\nimport numpy as np\nfrom copy import deepcopy\n\nfrom shutil import rmtree\n\n\nclass TestSimplex:\n\n @classmethod\n def setup_class(cls):\n cls.data1s = [\n '# time v1_result v2_result v3_result\\n',\n ' 1 2.1 3.1 6.1\\n',\n ]\n cls.d1s = data.Data()\n cls.d1s.data = cls.d1s._read_file_lines(cls.data1s, '\\s+')\n\n cls.config = config.Configuration({\n 'population_size': 2, 'max_iterations': 20, 'fit_type': 'sim', 'simplex_start_step': 1.0,\n 'simplex_reflection': 1., 'simplex_expansion': 1., 'simplex_contraction': 0.5, 'simplex_shrink': 0.5,\n ('var', 'v1__FREE'): [2.], ('var', 'v2__FREE'): [3.], ('var', 'v3__FREE'): [4.],\n 'models': {'bngl_files/parabola.bngl'}, 'exp_data': {'bngl_files/par1.exp'}, 'initialization': 'lh',\n 'bngl_files/parabola.bngl': ['bngl_files/par1.exp']})\n\n cls.logconfig = config.Configuration({\n 'population_size': 2, 'max_iterations': 20, 'fit_type': 'sim', 'simplex_start_step': 1.0,\n 'simplex_reflection': 1., 'simplex_expansion': 1., 'simplex_contraction': 0.5, 'simplex_shrink': 0.5,\n ('logvar', 'v1__FREE'): [2.], ('logvar', 'v2__FREE'): [3.], ('logvar', 'v3__FREE'): [4.],\n 'models': {'bngl_files/parabola.bngl'}, 'exp_data': {'bngl_files/par1.exp'}, 'initialization': 'lh',\n 'bngl_files/parabola.bngl': ['bngl_files/par1.exp']})\n\n @classmethod\n def teardown_class(cls):\n rmtree('pybnf_output')\n\n def test_start(self):\n sim = algorithms.SimplexAlgorithm(deepcopy(self.config))\n sim.variables.sort() # Required for Python <= 3.5 to be sure we are checking the correct indices in the simplex\n first = sim.start_run()\n assert len(first) == 4\n assert first[0]['v1__FREE'] == 2.\n assert first[3]['v3__FREE'] == 5.\n\n def test_updates(self):\n sim = algorithms.SimplexAlgorithm(deepcopy(self.config))\n sim.variables.sort() # Required for Python <= 3.5 to be sure we are checking the correct indices in the simplex\n first = sim.start_run()\n next_params = []\n for p, score in zip(first, [5., 7., 8., 6.]):\n res = algorithms.Result(p, self.data1s, p.name)\n res.score = score\n next_params += sim.got_result(res)\n\n assert len(next_params) == 2\n # First point should be the reflection of the worst PSet, where v2 += 1\n # M = (2 1/3, 3, 4 1/3)\n np.testing.assert_almost_equal(next_params[0]['v1__FREE'], 8./3.)\n # Second point should be reflection of v1\n # M = (2, 3 1/3, 4 1/3)\n np.testing.assert_almost_equal(next_params[1]['v1__FREE'], 1.)\n\n res = algorithms.Result(next_params[1], self.data1s, next_params[1].name)\n res.score = 5.5\n # Case 2 - take it, do nothing else.\n p2_1 = sim.got_result(res)\n assert p2_1 == []\n res = algorithms.Result(next_params[0], self.data1s, next_params[0].name)\n res.score = 4.5\n # Case 1 - pick an expansion point\n p2_2 = sim.got_result(res)\n assert len(p2_2) == 1\n # print(sim.centroids)\n # print(p2_2)\n np.testing.assert_almost_equal(p2_2[0]['v1__FREE'], 3.)\n np.testing.assert_almost_equal(p2_2[0]['v2__FREE'], 1.)\n\n res = algorithms.Result(p2_2[0], self.data1s, p2_2[0].name)\n res.score = 4.75\n iter_2 = sim.got_result(res)\n assert len(iter_2) == 2\n assert sim.simplex == [(4.5, next_params[0]), (5., first[0]), (5.5, next_params[1]), (6., first[3])]\n # ((2 2/3, 2, 4 2/3), (2,3,4), (1, 3 2/3, 4 2/3), (2,3,5))\n\n # Next iteration. This time, we'll give non-improvements, forcing a simplex shrink.\n\n # iter2[0] - modifies (2,3,5)\n # M = (1 8/9, 2 8/9, 4 4/9)\n np.testing.assert_almost_equal(iter_2[0]['v1__FREE'], 1 + 7./9.)\n np.testing.assert_almost_equal(iter_2[0]['v2__FREE'], 2. + 7./9.)\n\n res = algorithms.Result(iter_2[0], self.data1s, iter_2[0].name)\n res.score = 5.9\n p3_1 = sim.got_result(res)\n # Should move half way back to the centroid\n np.testing.assert_almost_equal(p3_1[0]['v1__FREE'], 1 + 15./18.)\n np.testing.assert_almost_equal(p3_1[0]['v2__FREE'], 2. + 15./18.)\n res = algorithms.Result(p3_1[0], self.data1s, p3_1[0].name)\n res.score = 10.\n nothing = sim.got_result(res)\n\n # iter2[1] modifies (1, 3 2/3, 4 2/3)\n # M = (2 2/9, 2 2/3, 4 5/9)\n res = algorithms.Result(iter_2[1], self.data1s, iter_2[1].name)\n res.score = 5.9\n p3_2 = sim.got_result(res)\n # Worse than original, should move to halfway to the centroid.\n np.testing.assert_almost_equal(p3_2[0]['v1__FREE'], 1. + 11. / 18.)\n np.testing.assert_almost_equal(p3_2[0]['v2__FREE'], 3. + 1. / 6.)\n res = algorithms.Result(p3_2[0], self.data1s, p3_2[0].name)\n res.score = 10.\n final_ps = sim.got_result(res)\n\n # After replacement, we have\n # ((2 2/3, 2, 4 2/3), (2,3,4), (1, 3 2/3, 4 2/3), (1 7/9, 2 7/9, 3 8/9))\n\n # Iteration was unproductive, so expect to shrink, should have gotten 3 new psets to complete that shrink\n assert len(final_ps) == 3\n np.testing.assert_almost_equal(final_ps[0]['v1__FREE'], 7. / 3.)\n np.testing.assert_almost_equal(final_ps[1]['v2__FREE'], 17. / 6.)\n np.testing.assert_almost_equal(final_ps[2]['v3__FREE'], 77. / 18.)\n\n def test_start_log(self):\n sim = algorithms.SimplexAlgorithm(deepcopy(self.logconfig))\n sim.variables.sort() # Required for Python <= 3.5 to be sure we are checking the correct indices in the simplex\n first = sim.start_run()\n assert len(first) == 4\n np.testing.assert_almost_equal(first[0]['v1__FREE'], 10.**2.)\n np.testing.assert_almost_equal(first[3]['v3__FREE'], 10.**5.)\n\n def test_updates_log(self):\n \"\"\" The above test should also work identically in log space\"\"\"\n sim = algorithms.SimplexAlgorithm(deepcopy(self.logconfig))\n sim.variables.sort() # Required for Python <= 3.5 to be sure we are checking the correct indices in the simplex\n first = sim.start_run()\n next_params = []\n for p, score in zip(first, [5., 7., 8., 6.]):\n res = algorithms.Result(p, self.data1s, p.name)\n res.score = score\n next_params += sim.got_result(res)\n\n assert len(next_params) == 2\n # First point should be the reflection of the worst PSet, where v2 += 1\n # M = (2 1/3, 3, 4 1/3)\n np.testing.assert_almost_equal(next_params[0]['v1__FREE'], 10**(8. / 3.))\n # Second point should be reflection of v1\n # M = (2, 3 1/3, 4 1/3)\n np.testing.assert_almost_equal(next_params[1]['v1__FREE'], 10**1.)\n\n res = algorithms.Result(next_params[1], self.data1s, next_params[1].name)\n res.score = 5.5\n # Case 2 - take it, do nothing else.\n p2_1 = sim.got_result(res)\n assert p2_1 == []\n res = algorithms.Result(next_params[0], self.data1s, next_params[0].name)\n res.score = 4.5\n # Case 1 - pick an expansion point\n p2_2 = sim.got_result(res)\n assert len(p2_2) == 1\n # print(sim.centroids)\n # print(p2_2)\n np.testing.assert_almost_equal(p2_2[0]['v1__FREE'], 10**3.)\n np.testing.assert_almost_equal(p2_2[0]['v2__FREE'], 10**1.)\n\n res = algorithms.Result(p2_2[0], self.data1s, p2_2[0].name)\n res.score = 4.75\n iter_2 = sim.got_result(res)\n assert len(iter_2) == 2\n assert sim.simplex == [(4.5, next_params[0]), (5., first[0]), (5.5, next_params[1]), (6., first[3])]\n # ((2 2/3, 2, 4 2/3), (2,3,4), (1, 3 2/3, 4 2/3), (2,3,5))\n\n # Next iteration. This time, we'll give non-improvements, forcing a simplex shrink.\n\n # iter2[0] - modifies (2,3,5)\n # M = (1 8/9, 2 8/9, 4 4/9)\n np.testing.assert_almost_equal(iter_2[0]['v1__FREE'], 10**(1 + 7. / 9.))\n np.testing.assert_almost_equal(iter_2[0]['v2__FREE'], 10**(2. + 7. / 9.))\n\n res = algorithms.Result(iter_2[0], self.data1s, iter_2[0].name)\n res.score = 5.9\n p3_1 = sim.got_result(res)\n # Should move half way back to the centroid\n np.testing.assert_almost_equal(p3_1[0]['v1__FREE'], 10**(1 + 15. / 18.))\n np.testing.assert_almost_equal(p3_1[0]['v2__FREE'], 10**(2. + 15. / 18.))\n res = algorithms.Result(p3_1[0], self.data1s, p3_1[0].name)\n res.score = 10.\n nothing = sim.got_result(res)\n\n # iter2[1] modifies (1, 3 2/3, 4 2/3)\n # M = (2 2/9, 2 2/3, 4 5/9)\n res = algorithms.Result(iter_2[1], self.data1s, iter_2[1].name)\n res.score = 5.9\n p3_2 = sim.got_result(res)\n # Worse than original, should move to halfway to the centroid.\n np.testing.assert_almost_equal(p3_2[0]['v1__FREE'], 10**(1. + 11. / 18.))\n np.testing.assert_almost_equal(p3_2[0]['v2__FREE'], 10**(3. + 1. / 6.))\n res = algorithms.Result(p3_2[0], self.data1s, p3_2[0].name)\n res.score = 10.\n final_ps = sim.got_result(res)\n\n # After replacement, we have\n # ((2 2/3, 2, 4 2/3), (2,3,4), (1, 3 2/3, 4 2/3), (1 7/9, 2 7/9, 3 8/9))\n\n # Iteration was unproductive, so expect to shrink, should have gotten 3 new psets to complete that shrink\n assert len(final_ps) == 3\n np.testing.assert_almost_equal(final_ps[0]['v1__FREE'], 10**(7. / 3.))\n np.testing.assert_almost_equal(final_ps[1]['v2__FREE'], 10**(17. / 6.))\n np.testing.assert_almost_equal(final_ps[2]['v3__FREE'], 10**(77. / 18.))\n" ]
[ [ "numpy.testing.assert_almost_equal" ] ]
guoyii/ECG
[ "ef96d5e3291fd6abd0d2c6a2f3d433d2d7336d2e" ]
[ "Frequency/main_mit.py" ]
[ "from sklearn.decomposition import PCA\nfrom sklearn.externals import joblib\nfrom sklearn.svm import LinearSVC\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n\nfrom utils import read_dataPath\nfrom utils import read_data\nfrom utils import myfft\nfrom utils import mydwt\nfrom utils import mywavedec\nimport numpy as np\nimport h5py\nimport time\nimport os\n\nresult_path = \"V:/users/gy/MyProject/ECG/result\" \ndata_path = \"V:/users/gy/data/ECG/MITBIH_ECG\"\ndata_folder = \"sample\" \nn_components = 500\nmodel_pca_name = \"model_pca_fft+dwt_\"\nmodel_Lsvc_name = \"model_lsvc_fft+dwt_\"\n\n\nif not os.path.isdir(result_path + \"/Data_H5\"): \n os.makedirs(result_path + \"/Data_H5\")\ndata_h5_path = result_path + \"/Data_H5\" \n\n\nif os.path.exists(data_h5_path + \"/data_src.h5\"): \n o = h5py.File(data_h5_path + \"/data_src.h5\", 'r')\n X_src = np.array(o['X'][:])\n labels = np.array(o['labels'][:])\n o.close()\nelse:\n X_src, labels = read_dataPath(data_path) \n X_src, labels = read_data(X_src, labels) \n f = h5py.File(data_h5_path + \"/data_src.h5\", 'w')\n f.create_dataset('X', data=X_src)\n f.create_dataset('labels', data=labels)\n f.close()\n\n\ndata_name = \"data_fft.h5\"\nif os.path.exists(data_h5_path + \"/\" +data_name): \n o = h5py.File(data_h5_path + \"/\" +data_name, 'r')\n X_fft = np.array(o['X_fft'][:])\n labels = np.array(o['labels'][:])\n o.close()\nelse:\n X_fft = myfft(X_src[:])\n f = h5py.File(data_h5_path + \"/\" + data_name, 'w')\n f.create_dataset('X_fft', data=X_fft)\n f.create_dataset('labels', data=labels)\n f.close()\n\n\ndata_name = \"data_dwt.h5\"\nif os.path.exists(data_h5_path + \"/\" +data_name): \n o = h5py.File(data_h5_path + \"/\" +data_name, 'r')\n X_dwt = np.array(o['X_dwt'][:])\n labels = np.array(o['labels'][:])\n o.close()\nelse:\n X_dwt = mydwt(X_src[:])\n f = h5py.File(data_h5_path + \"/\" + data_name, 'w')\n f.create_dataset('X_dwt', data=X_dwt)\n f.create_dataset('labels', data=labels)\n f.close()\n\n\ndata_name = \"data_wavedec.h5\"\nif os.path.exists(data_h5_path + \"/\" +data_name): \n o = h5py.File(data_h5_path + \"/\" +data_name, 'r')\n X_wavedec = np.array(o['X_wavedec'][:])\n labels = np.array(o['labels'][:])\n o.close()\nelse:\n X_wavedec = mywavedec(X_src[:])\n f = h5py.File(data_h5_path + \"/\" + data_name, 'w')\n f.create_dataset('X_wavedec', data=X_wavedec)\n f.create_dataset('labels', data=labels)\n f.close()\n\n##////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\nX = np.hstack((X_fft, X_dwt))\nprint(\"X.shape:\", X.shape)\n\n##////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\nif not os.path.isdir(result_path + \"/model_pca\"):\n os.makedirs(result_path + \"/model_pca\")\nmodel_pca_path = result_path + \"/model_pca\"\n\n\nif os.path.exists(model_pca_path + \"/\" + model_pca_name + str(n_components) +\".pkl\"): \n model_pca = joblib.load(model_pca_path + \"/\" + model_pca_name + str(n_components) +\".pkl\") \nelse:\n start = time.clock()\n model_pca =PCA(n_components=n_components) \n model_pca.fit(X) \n joblib.dump(model_pca, model_pca_path + \"/\" + model_pca_name + str(n_components) +\".pkl\")\n print(\"Save model:{}\".format(model_pca_name + str(n_components) +\".pkl\"))\n print(\"Time: %g s <==> %g min\"%(time.clock() - start, (time.clock() - start)/60)) \nprint(\"Success:{}\".format(model_pca_name + str(n_components) +\".pkl\")) \n\nX_pca = model_pca.transform(X) \n\n\nss = StandardScaler()\nX = ss.fit_transform(X_pca)\nprint(\"Done!\")\nprint(\"Dataset shape: \", X.shape) \n\nX_train, X_test, label_train, label_test = train_test_split(X, labels, test_size = 0.2, random_state = 0)\nprint(\"Train: data.shape:{} label.shape:{}\".format(X_train.shape, label_train.shape))\nprint(\"Test: data.shape:{} label.shape:{}\".format(X_test.shape, label_test.shape))\n\n\nif not os.path.isdir(result_path + \"/model_LinearSVC\"): \n os.makedirs(result_path + \"/model_LinearSVC\")\nmodel_Lsvc_path = result_path + \"/model_LinearSVC\"\n\n\nif os.path.exists(model_Lsvc_path + \"/\" +model_Lsvc_name + str(n_components) +\".pkl\"): \n model_lsvc = joblib.load(model_Lsvc_path + \"/\" +model_Lsvc_name + str(n_components) +\".pkl\") \nelse:\n t0 = time.clock()\n model_lsvc = LinearSVC() \n model_lsvc.fit(X_train, label_train) \n joblib.dump(model_lsvc, model_Lsvc_path + \"/\" + model_Lsvc_name + str(n_components) +\".pkl\")\n print(\"Save model:{}\".format(model_Lsvc_name + str(n_components) +\".pkl\"))\n print(\"Time for training the LinearSVC: %g s <==> %g min\"%(time.clock() - t0, (time.clock() - t0)/60)) \nprint(\"Success:{}\".format(model_Lsvc_name + str(n_components) +\".pkl\")) \n\n\ny_predict = model_lsvc.predict(X_test)\nprint('The Accuracy of LinearSVC is:', model_lsvc.score(X_test, label_test))\n" ]
[ [ "numpy.hstack", "sklearn.model_selection.train_test_split", "sklearn.svm.LinearSVC", "sklearn.preprocessing.StandardScaler", "numpy.array", "sklearn.decomposition.PCA" ] ]
sankalpdayal/ProgrammingCarla
[ "4cbb1ba2a9f861f544fa71f9d0db2beff5cdd3a0" ]
[ "ros/src/tl_detector/light_classification/tl_classifier.py" ]
[ "from styx_msgs.msg import TrafficLight\nfrom sklearn.linear_model import LogisticRegression\nimport pickle\nimport tensorflow as tf\nfrom keras.models import load_model\nimport numpy as np\nimport cv2\n\nclass TLClassifier(object):\n def __init__(self):\n #TODO load classifier\n detect_model_name = 'ssd_mobilenet_v1_coco_2018_01_28'\n PATH_TO_CKPT = '/home/workspace/ProgrammingCarla/tl/' + detect_model_name + '/frozen_inference_graph.pb'\n # setup tensorflow graph\n self.detection_graph = tf.Graph()\n\n # configuration for possible GPU use\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n # load frozen tensorflow detection model and initialize \n # the tensorflow graph\n with self.detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n self.sess = tf.Session(graph=self.detection_graph, config=config)\n self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n self.boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n self.scores =self.detection_graph.get_tensor_by_name('detection_scores:0')\n self.classes = self.detection_graph.get_tensor_by_name('detection_classes:0')\n self.num_detections =self.detection_graph.get_tensor_by_name('num_detections:0')\n #setup light state detector\n self.hist_bin_size = 2\n self.tl_state_clf = pickle.load(open('/home/workspace/ProgrammingCarla/tl/classifier.p', 'rb'))\n TrafficLight.RED\n \n def get_localization(self, image): \n with self.detection_graph.as_default():\n image_expanded = np.expand_dims(cv2.cvtColor(image, cv2.COLOR_BGR2RGB), axis=0)\n (boxes, scores, classes, num_detections) = self.sess.run([self.boxes, self.scores, self.classes, self.num_detections],\n feed_dict={self.image_tensor: image_expanded})\n boxes=np.squeeze(boxes)\n classes =np.squeeze(classes)\n scores = np.squeeze(scores)\n return boxes, classes, scores\n \n def box_normal_to_pixel(self, box, dim):\n height, width = dim[0], dim[1]\n box_pixel = [int(box[0]*height), int(box[1]*width), int(box[2]*height), int(box[3]*width)]\n return np.array(box_pixel)\n \n def get_histogram(self,img, hist_bin_size):\n gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n data_index = 0\n X = np.zeros((1,(hist_bin_size)*12+1))\n index = 0\n for chnl in range(4):\n if chnl < 3:\n for y_pos_ind in range(3): \n if y_pos_ind == 0:\n y1 = 0\n y2 = 10\n if y_pos_ind == 1:\n y1 = 10\n y2 = 20\n if y_pos_ind == 2:\n y1 = 20\n y2 = 32\n hist, bin_edges = np.histogram(np.ravel(img[y1:y2,:,chnl]),hist_bin_size)\n hist = hist/np.sum(hist)\n x_strt = index*hist_bin_size\n x_stp = (index+1)*hist_bin_size\n X[data_index,x_strt:x_stp] = hist.reshape(1,-1)\n index += 1\n else:\n \n for y_pos_ind in range(3): \n if y_pos_ind == 0:\n y1 = 0\n y2 = 10\n if y_pos_ind == 1:\n y1 = 10\n y2 = 20\n if y_pos_ind == 2:\n y1 = 20\n y2 = 32\n hist, bin_edges = np.histogram(np.ravel(gray_image[y1:y2,:]),hist_bin_size)\n hist = hist/np.sum(hist)\n x_strt = index*hist_bin_size\n x_stp = (index+1)*hist_bin_size\n X[data_index,x_strt:x_stp] = hist.reshape(1,-1)\n index += 1\n top_int = np.mean(gray_image[:10,:])\n cen_int = np.mean(gray_image[10:20,:])\n bot_int = np.mean(gray_image[20:32,:])\n if top_int > cen_int:\n if top_int > bot_int:\n X[0,-1] = 0.0\n else:\n X[0,-1] = 1.0\n else:\n if cen_int > bot_int:\n X[0,-1] = 0.5\n else:\n X[0,-1] = 1.0\n return X\n\n def get_classification(self, image):\n \"\"\"Determines the color of the traffic light in the image\n\n Args:\n image (cv::Mat): image containing the traffic light\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n #TODO implement light color prediction\n boxes, classes, scores = self.get_localization(image)\n traffic_boxes = []\n dim = image.shape[0:2]\n light_found = False\n for i, cls in enumerate(classes):\n if cls == 10:\n box = boxes[i]\n box_height = box[2] - box[0]\n box_width = box[3] - box[1]\n if scores[i]>0.25 and box_height/box_width>1.5 and box_height > 0.1:\n traffic_boxes.append(self.box_normal_to_pixel(boxes[i], dim))\n light_found = True\n\n if light_found:\n red_votes = 0\n green_votes = 0\n \n for box in traffic_boxes:\n x = self.get_histogram(cv2.resize(image[box[0]:box[2],box[1]:box[3],:],(32,32)), self.hist_bin_size)\n pr = self.tl_state_clf.predict(x)\n if pr == 0:\n red_votes += 1\n else:\n green_votes += 1\n if red_votes > green_votes:\n print('RED')\n return TrafficLight.RED\n else:\n print('GREEN')\n return TrafficLight.GREEN\n print('UNKNOWN')\n return TrafficLight.UNKNOWN\n" ]
[ [ "tensorflow.Graph", "tensorflow.import_graph_def", "tensorflow.gfile.GFile", "numpy.squeeze", "tensorflow.ConfigProto", "numpy.mean", "tensorflow.Session", "numpy.ravel", "tensorflow.GraphDef", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
Jin-Tao-208/web_science_coursework
[ "bb4ab2226b70e7b0f7bbef40ceb002900e757a31" ]
[ "LSH.py" ]
[ "from pymongo import MongoClient\r\nfrom nltk.tokenize import RegexpTokenizer\r\nfrom stop_words import get_stop_words\r\nimport sys\r\nimport random\r\nimport time\r\nimport binascii\r\nimport numpy as np\r\nimport re\r\nimport json\r\n\r\n# client = MongoClient('127.0.0.1', 27017) # is assigned local port\r\n# dbName = \"TwitterDump\" # set-up a MongoDatabase\r\n# db = client[dbName]\r\n# collName = 'colTest3' # here we create a collection\r\n# collection = db[collName] # This is for the Collection put in the DB\r\n\r\nnumHashes = 300 # The length of the signature matrix\r\n\r\nprocessStart = time.time()\r\ndoc_set = []\r\nbol = False\r\ncount3 = 0\r\n# result = collection.find({\"retweet\": bol}, {\"text\": 1, \"uid\": 1, \"_id\": 1})\r\nid_set = []\r\n\r\nwith open('./LSHdata/LSHdata.json', 'r', encoding='utf-8') as f:\r\n data1 = json.load(f)\r\n\r\nfor i in range(0, len(data1)):\r\n if not data1[i]['retweet']:\r\n doc_set.append(data1[i]['text'])\r\n id_set.append(data1[i]['_id'])\r\n count3 += 1\r\n\r\n# for x in result:\r\n# doc_set.append(x['text'])\r\n# id_set.append(x['_id'])\r\n# count3 += 1\r\n\r\n# print(doc_set)\r\nnumDocs = len(doc_set)\r\n\r\ncurShingleID = 0\r\n\r\ndocsAsShingleSets = {}\r\n\r\nt0 = time.time()\r\n\r\ntotalShingles = 0\r\ndocNames = []\r\ntext = []\r\n\r\ntokenizer = RegexpTokenizer(r'\\w+')\r\nen_stop = get_stop_words('en')\r\n\r\nfor i in doc_set:\r\n raw = i.lower()\r\n raw = re.sub(r'(https|http)?:\\/\\/(\\w|\\.|\\/|\\?|\\=|\\&|\\%)*\\b', '', raw, flags=re.MULTILINE)\r\n tokens = tokenizer.tokenize(raw)\r\n stopped_tokens = [i for i in tokens if not i in en_stop]\r\n text.append(stopped_tokens)\r\n\r\nfor i in range(0, numDocs):\r\n words = text[i]\r\n shinglesInDoc = set()\r\n docNames.append(id_set[i])\r\n for index in range(0, len(words) - 2):\r\n shingle = words[index] + \" \" + words[index + 1] + \" \" + words[index + 2]\r\n\r\n crc = binascii.crc32(shingle.encode())\r\n shinglesInDoc.add(crc)\r\n\r\n docsAsShingleSets[id_set[i]] = shinglesInDoc\r\n totalShingles = totalShingles + (len(words) - 2)\r\n\r\nprint(\"total tweets:\", count3)\r\n\r\nprint('\\nRemoving retweets and Shingling ' + str(numDocs) + ' docs took %.2f sec.' % (time.time() - t0))\r\n\r\nprint('\\nAverage shingles per doc: %.2f' % (totalShingles / numDocs))\r\n\r\n\r\n# numElems = int(numDocs * (numDocs - 1) / 2)\r\n\r\n# estJSim = [0 for x in range(numElems)]\r\n\r\ndef getTriangleIndex(i, j):\r\n # If i == j that's an error.\r\n if i == j:\r\n sys.stderr.write(\"Can't access triangle matrix with i == j\")\r\n sys.exit(1)\r\n # If j < i just swap the values.\r\n if j < i:\r\n temp = i\r\n i = j\r\n j = temp\r\n k = int(i * (1000 - (i + 1) / 2.0) + j - i) - 1\r\n\r\n return k\r\n\r\n\r\nt0 = time.time()\r\n\r\nprint('\\nGenerating random hash functions...')\r\n\r\nmaxShingleID = 2 ** 32 - 1\r\nnextPrime = 4294967311\r\n\r\n\r\ndef pickRandomCoeffs(k):\r\n # Create a list of 'k' random values.\r\n randList = []\r\n\r\n while k > 0:\r\n # Get a random shingle ID.\r\n randIndex = random.randint(0, maxShingleID)\r\n\r\n # Ensure that each random number is unique.\r\n while randIndex in randList:\r\n randIndex = random.randint(0, maxShingleID)\r\n\r\n # Add the random number to the list.\r\n randList.append(randIndex)\r\n k = k - 1\r\n\r\n return randList\r\n\r\n\r\ncoeffA = pickRandomCoeffs(numHashes)\r\ncoeffB = pickRandomCoeffs(numHashes)\r\n\r\nprint('\\nGenerating MinHash signatures for all documents...')\r\n\r\nsignatures = []\r\n\r\nfor docID in docNames:\r\n shingleIDSet = docsAsShingleSets[docID]\r\n signature = []\r\n for i in range(0, numHashes):\r\n\r\n # For each of the shingles actually in the document, calculate its hash code\r\n # using hash function 'i'.\r\n\r\n # Track the lowest hash ID seen. Initialize 'minHashCode' to be greater than\r\n # the maximum possible value output by the hash.\r\n minHashCode = nextPrime + 1\r\n\r\n # For each shingle in the document...\r\n for shingleID in shingleIDSet:\r\n # Evaluate the hash function.\r\n hashCode = (coeffA[i] * shingleID + coeffB[i]) % nextPrime\r\n\r\n # Track the lowest hash code seen.\r\n if hashCode < minHashCode:\r\n minHashCode = hashCode\r\n\r\n # Add the smallest hash code value as component number 'i' of the signature.\r\n signature.append(minHashCode)\r\n\r\n # Store the MinHash signature for this document.\r\n signatures.append(signature)\r\n\r\nelapsed = (time.time() - t0)\r\n\r\nprint(\"\\nGenerating MinHash signatures took %.2fsec\" % elapsed)\r\n\r\nprint('\\nComparing all signatures...')\r\n\r\nt0 = time.time()\r\n\r\nsignaturesT = [[row[col] for row in signatures] for col in range(len(signatures[0]))]\r\n\r\n# For each of the test documents...\r\nbuckt = set()\r\ncount1 = 0\r\nb = 50 # each band b\r\nr = 2000 # each doc r\r\nthreshold = 0.7 # limit the compare num\r\n\r\nmark = 0\r\nmark1 = 0\r\n\r\ngroup = np.array(signatures)\r\n\r\nnumElems = int(r * (r - 1) / 2)\r\n\r\nclusterList = []\r\n\r\n\r\n# The signature matrix is divided into N buckets according to a band for each b and a doc for each r\r\ndef Matrix_block(matrix, Row, Col):\r\n a = matrix.shape[0]\r\n print(a)\r\n b = matrix.shape[1]\r\n count4 = 0\r\n mark = 0\r\n count6 = 0\r\n for row in range(int(a / Row)):\r\n for col in range(int(b / Col)):\r\n group2 = matrix[row * Row:(row + 1) * Row, col * Col:(col + 1) * Col]\r\n count4 += 1\r\n # Each bucket is considered to be equal if there are hash values that are equal over a certain threshold 0.4\r\n for i in range(0, len(group2[0])):\r\n signature1 = group2[i]\r\n for j in range(i + 1, len(group2[0])):\r\n signature2 = group2[j]\r\n count5 = 0\r\n for k in range(0, Col):\r\n count5 += (signature1[k] == signature2[k])\r\n if count5 / Col > threshold:\r\n buckt.add(docNames[count6 + i] + \" \" + docNames[count6 + j])\r\n count6 += Row\r\n\r\n\r\nt1 = time.time()\r\n\r\nx = Matrix_block(group, r, b)\r\n\r\nprint(\"bucket:\", buckt)\r\nprint(\"there are \", len(buckt), \"pair\")\r\nprint(\"Comparative similar time: %.2fsec\" % (time.time() - t1))\r\n\r\narrayResult = []\r\nfor x in buckt:\r\n k = x.split(' ')\r\n numInfo = {k[0], k[1]}\r\n arrayResult.append(numInfo)\r\n\r\nlenth = len(arrayResult)\r\n\r\nfor i in range(1, lenth):\r\n for j in range(i):\r\n if arrayResult[i] == {0} or arrayResult[j] == {0}:\r\n continue\r\n x = arrayResult[i].union(arrayResult[j])\r\n y = len(arrayResult[i]) + len(arrayResult[j])\r\n if len(x) < y:\r\n arrayResult[i] = x\r\n arrayResult[j] = {0}\r\nclusters = [i for i in arrayResult if i != {0}]\r\nminC = len(clusters)\r\nmaxC = 0\r\navgC = 0\r\nfor x in clusters:\r\n if len(x) < minC:\r\n minC = len(x)\r\n if len(x) > maxC:\r\n maxC = len(x)\r\n avgC += len(x)\r\nprint(\"Total have \", len(clusters), \"clusters\")\r\nprint(\"average cluster:\", avgC / len(clusters))\r\nprint(\"minimum cluster:\", minC)\r\nprint(\"maximum cluster:\", maxC)\r\nprint(\"total time spent: %.2f sec\" % (time.time() - processStart))\r\n" ]
[ [ "numpy.array" ] ]
HighDeFing/thesis_v4
[ "2dc9288af75a8b51fe54ed66f520e8aa8a0ab3c7" ]
[ "scripts/haystack_files/haystack_upload_files.py" ]
[ "from haystack.nodes import PDFToTextConverter\nfrom haystack.document_stores import ElasticsearchDocumentStore\nfrom haystack.nodes import DensePassageRetriever\nfrom haystack.nodes import FARMReader, ElasticsearchRetriever\nfrom haystack.pipelines import ExtractiveQAPipeline, DocumentSearchPipeline\nfrom haystack.nodes import PreProcessor\nfrom haystack.nodes import ElasticsearchRetriever, BM25Retriever\nfrom haystack import Document\nimport json\nimport pdfplumber\n#import spacy\nimport pandas as pd\nimport numpy as np\nimport os\nimport unidecode\nfrom scripts.progress_bar.progress_bar import printProgressBar\n\nmodel_path = './notebooks/models/model_big'\nmodel_9 = './notebooks/models/model_9'\nmodel_10 = './notebooks/models/model_10'\n\nmodel_4 = './notebooks/models/model_4'\nmodel_6 = './notebooks/models/model_6'\n\nclass Haystack_module():\n def __init__(self, option = \"ES\", pipe_line_op = \"document\", dense_model_path = \"\"):\n self.document_store = ElasticsearchDocumentStore(similarity=\"dot_product\")\n\n #select an option for retriever\n if option == \"Dense\":\n self.init_Dense_retriever(document_store=self.document_store, save_dir=dense_model_path)\n retriever = self.get_Dense_retriever()\n if option == \"ES\":\n self.init_ES_retriever(self.document_store)\n retriever = self.get_ES_retriever()\n \n # get the reader\n #self.init_FARMReader()\n #reader = self.get_FARMReader()\n \n #set preprocess files\n self.init_preProcessor()\n\n #Establish pipeline\n if pipe_line_op == \"document\":\n self.init_DocumentSearchPipeline(retriever)\n if pipe_line_op == \"qa\":\n # get the reader\n self.init_FARMReader()\n reader = self.get_FARMReader()\n self.init_QAPipeline(retriever = retriever, reader = reader)\n\n #self.qa_pipe = ExtractiveQAPipeline(reader=reader, retriever=retriever)\n\n # Document Store\n def get_document_store(self):\n return self.document_store\n\n #Retrivers\n #Elastic Search retriever\n def init_ES_retriever(self, document_store):\n self.es_retriever = BM25Retriever(document_store=document_store)\n\n def get_ES_retriever(self):\n return self.es_retriever\n\n #Dense retriever\n def init_Dense_retriever(self, document_store, save_dir=\"\"):\n\n if not save_dir:\n self.dp_retriever = DensePassageRetriever(\n document_store=document_store,\n query_embedding_model=\"IIC/dpr-spanish-question_encoder-allqa-base\", #IIC/dpr-spanish-question_encoder-allqa-base #voidful/dpr-question_encoder-bert-base-multilingual\n passage_embedding_model=\"IIC/dpr-spanish-passage_encoder-allqa-base\", #IIC/dpr-spanish-passage_encoder-allqa-base #voidful/dpr-ctx_encoder-bert-base-multilingual\n use_gpu=True,\n batch_size = 64\n )\n else:\n #load fine tuned model\n #print(\"THIS IS THE MODEL\")\n print(save_dir)\n self.dp_retriever = DensePassageRetriever.load(load_dir=save_dir, document_store=document_store, use_gpu=True)\n\n\n def get_Dense_retriever(self):\n return self.dp_retriever\n\n #Readers\n def init_FARMReader(self):\n self.FARM_reader = FARMReader(\"mrm8488/distill-bert-base-spanish-wwm-cased-finetuned-spa-squad2-es\", use_gpu=True)\n\n def get_FARMReader(self):\n return self.FARM_reader\n\n\n def write_file_in_elastic(self, document_store, retriever, option, file_source, school, title, author, year, size, path):\n meta_data = { \"school\": str(school), \"title\": str(title), \"author\": str(author), \"year\": str(year), \"size\": str(size), \"path\": str(path) }\n # print(meta_data)\n converter = PDFToTextConverter(remove_numeric_tables=True, valid_languages=[\"es\"])\n docs = converter.convert(file_path=file_source, meta=meta_data)\n\n\n # text = \"\"\"d A cu m u lad a (% )\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nV alor m xim o de tensin fase-tierra (en p.u)\\n\\nValor m xim o de tensin fas e-tierra (e n p.u)\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nP ro b ab ilid ad A cu m u lad a ( % )\\n\\n\\x0cProbabilida d Acum ulada\\n(Ba rra 115kV fa se \"C\" - S/E Te m bla dor)\\n\\nProbabilidad Acumulada\\n(Barra 34,5kV fase \"C\" - S/E Tucupita)\\n\\n\\n\\nPro b ab ilid ad Acu m u lad a (% )\\n\\nProbabilidad Acumulada (%)\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nProba bilidad Acumulada\\n(Barra 13,8kV fase \"C\" - S/E Barra ncas)\\n\\nProba bilidad Acumulada\\n(Ba rra 13,8kV fa se \"B\" - S/E Tucupita)\\n\\n\\nProbabilidad Acumulada (%)\\n\\nProbabilidad Acumulada (%)\\n\\n\\nV alor m xim o de te ns in fase -tier ra (en p.u)\\n\\nValor m xim o de te ns in fas e -tie rra (e n p.u)\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nV alor m xim o de te ns in fas e -tie rra (e n p.u)\\n\\n\\n\\nValor m xim o de te ns in fas e -tie rra (e n p.u)\\n\\nA.26.3 Distribucin estadstica de probabilidad de ocurrencia de sobretensiones para\\nenergizacin de lneas de transmisin (CASO 1)\\n\\nHistogram a S/E Ba rranca s Barra 115kV\\n\\n\\n\\nProbabilidad Ocurrencia (%)\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nProbabilidad Ocurrencia (%)\\n\\nHistogra ma S/E Palital Ba rra 115kV\\n\\nInte rvalo de s obre te ns in fas e -tie rr a \"Fas e A\" (p.u)\\n\\nInte r valo de s obr e te ns in fas e -tie rr a \"Fas e B\" (p.u)\\n\\n\\x0cHistograma S/E Temblador Barra 115kV\\n\\n\\n\\nP ro b a b ilida d O c ur re n c ia (% )\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nProbabilidad Ocurrencia (% )\\n\\nHistograma S/E Tucupita Barra 115kV\\n\\n\\nIntervalo de s obretensin fas e-tie rra \"Fas e C\" (p.u)\\n\\nHistogra ma S/E Tucupita Ba rra 34,5kV\\n\\nProbabilidad Ocurrencia (%)\\n\\n\\nProbabilidad Ocurrencia (%)\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nInte r valo de s obr e te ns in fas e -tie r ra \"Fas e C\" (p.u)\\n\\nHistogra ma S/E Ba rra nca s Ba rra 13,8kV\\n\\n\\n\\nInte rvalo de s obre te ns in fas e -tie r ra \"Fas e B\" (p.u)\\n\\n\\nProbabilidad Ocurrencia (%)\\n\\n\\nHistogra ma S/E Tucupita Ba rra 13,8kV\\n\\n\\n\\nInte rvalo de s obre\"\"\"\n # print(text.replace('\\n',\"\"))\n\n #print(docs[0].meta)\n #new_docs = self.clean_break_line(docs)\n preProcessor = self.get_preProcessor()\n new_docs = preProcessor.process(docs)\n #print(new_docs)\n #print(new_docs[1][\"meta\"])\n new_docs = self.clean_break_line(new_docs)\n pre_docs = new_docs\n\n #print(new_docs)\n\n #file_text = pre_docs.strip()\n #print(file_text)\n #self.init_preProcessor()\n #processor = self.get_preProcessor()\n #pre_docs = processor.process(docs)\n\n document_store.write_documents(pre_docs)\n if option == \"dense\":\n document_store.update_embeddings(retriever)\n \n\n #PipeLines\n #QA Pipeline\n def init_QAPipeline(self, reader, retriever):\n self.qa_pipe = ExtractiveQAPipeline(reader=reader, retriever=retriever)\n\n def get_QAPipeline(self):\n return self.qa_pipe\n\n #Document PipeLine\n def init_DocumentSearchPipeline(self, retriever):\n self.document_search_pipe = DocumentSearchPipeline(retriever=retriever)\n\n def get_DocumentSearchPipeline(self):\n return self.document_search_pipe\n\n def clean_break_line(self, docs):\n aux_content = []\n new_docs = []\n for pre_docs in docs:\n #print(pre_docs[\"content\"].replace('\\n',\"\"))\n aux_meta = pre_docs.meta\n aux_content = pre_docs.content.replace('\\n',\"\")\n new_docs.append(Document(content=aux_content, meta=aux_meta))\n #print(new_docs)\n return new_docs\n\n def init_preProcessor(self):\n self.processor = PreProcessor(\n clean_empty_lines=True,\n clean_whitespace=True,\n split_by=\"word\",\n split_length=200,\n split_respect_sentence_boundary=True,\n split_overlap=0,\n language=\"es\"\n )\n\n def get_preProcessor(self):\n return self.processor\n\n def write_files_from_csv_Dense(self, csv_source):\n df = pd.read_csv(csv_source)\n df_head = df.copy()\n\n write_vec = np.vectorize(self.write_file_in_elastic)\n\n document_store = self.get_document_store()\n #self.init_Dense_retriever(document_store)\n retriever = self.get_Dense_retriever()\n option = \"dense\" \n \n write_vec(document_store = document_store, retriever = retriever, option = option, \n file_source = df_head['path'], school = df_head[\"school_complex\"], title = df_head[\"thesis_title\"],\n author = df_head[\"thesis_author\"], year = df_head[\"thesis_year\"], size = df_head[\"size\"], path = df_head[\"path\"])\n\n def write_files_from_csv_Sparse(self, csv_source):\n df = pd.read_csv(csv_source)\n df_head = df.copy()\n\n write_vec = np.vectorize(self.write_file_in_elastic)\n\n document_store = self.get_document_store()\n retriever = self.get_ES_retriever()\n option = \"sparse\" \n\n write_vec(document_store = document_store, retriever = retriever, option = option, \n file_source = df_head['path'], school = df_head[\"school_complex\"], title = df_head[\"thesis_title\"],\n author = df_head[\"thesis_author\"], year = df_head[\"thesis_year\"], size = df_head[\"size\"], path = df_head[\"path\"])\n\n\nif __name__ == \"__main__\":\n\n csv_source = \"scripts/haystack_files/data/thesis_comp_ingelec_quimica.csv\"\n\n ## BM25 \n # elastic = Haystack_module(option=\"ES\")\n # elastic.write_files_from_csv_Sparse(csv_source)\n\n ## Bare bones\n elastic = Haystack_module(option=\"Dense\")\n elastic.write_files_from_csv_Dense(csv_source)\n\n ## model_4\n #elastic = Haystack_module(option=\"Dense\", dense_model_path=model_4)\n #elastic.write_files_from_csv_Dense(csv_source)\n\n ## model_6\n #elastic = Haystack_module(option=\"Dense\", dense_model_path=model_6)\n #elastic.write_files_from_csv_Dense(csv_source)\n\n ## model_9\n # elastic = Haystack_module(option=\"Dense\", dense_model_path=model_9)\n # elastic.write_files_from_csv_Dense(csv_source)\n\n ## model_10\n #elastic = Haystack_module(option=\"Dense\", dense_model_path=model_10)\n #elastic.write_files_from_csv_Dense(csv_source)\n\n ## Old Bare Bones\n # elastic = Haystack_module(option=\"Dense\")\n # elastic.write_files_from_csv_Dense(csv_source)\n\n\n #model_9\n # model_path = model_9\n # elastic = Haystack_module(option=\"Dense\", dense_model_path=model_path)\n # elastic = Haystack_module(option=\"Dense\")\n # #csv_source = \"scripts/haystack_files/data/thesis_30_computacion.csv\"\n # csv_source = \"scripts/haystack_files/data/thesis_comp_ingelec_quimica.csv\"\n # elastic.write_files_from_csv_Dense(csv_source)\n\n # df = pd.read_csv(csv_source)\n # df_head = df.copy()\n # df_head = df_head.head(2)\n\n #print(df_head['path'].values[1])\n\n # write_vec = np.vectorize(elastic.write_file_in_elastic)\n\n # write_vec(df_head['path'], df_head[\"school_complex\"],\n # df_head[\"thesis_title\"], df_head[\"thesis_author\"], df_head[\"thesis_year\"],\n # df_head[\"size\"], df_head[\"path\"], df_head[\"resumen\"])\n\n # elastic.write_file_in_elastic(df_head['path'].values[0], df_head[\"school_complex\"].values[0],\n # df_head[\"thesis_title\"].values[0], df_head[\"thesis_author\"].values[0], df_head[\"thesis_year\"]\n # .values[0], df_head[\"size\"].values[0], df_head[\"path\"].values[0], df_head[\"resumen\"].values[0])\n\n # elastic.init_QAPipeline()\n # elastic.init_DocumentSearchPipeline()\n\n # elastic_pipe = elastic.get_QAPipeline()\n # elastic_search_pipe = elastic.get_DocumentSearchPipeline()\n\n # query = '¿Qué es un adolescente?'\n # result = elastic_pipe.run(query=query, params={\"Retriever\": {\"top_k\": 10}, \"Reader\": {\"top_k\": 3}})\n # ##result_2 = elastic_search_pipe.run(query=query)\n # #elastic_pipe.draw()\n # print(result)\n ##print(result_2)\n\n\n\n\n\n" ]
[ [ "pandas.read_csv", "numpy.vectorize" ] ]
harmonic-minimization/harmoni_manuscript_codes
[ "6cecd98208b3955d0a20b1d258cc06477e615edf" ]
[ "simulations_toys_sc1_grad_grid.py" ]
[ "\"\"\"\n-----------------------------------------------------------------------\nHarmoni: a Novel Method for Eliminating Spurious Neuronal Interactions due to the Harmonic Components in Neuronal Data\nMina Jamshidi Idaji, Juanli Zhang, Tilman Stephani, Guido Nolte, Klaus-Robert Mueller, Arno Villringer, Vadim V. Nikulin\nhttps://doi.org/10.1101/2021.10.06.463319\n-----------------------------------------------------------------------\nscript for:\n** simulation of toy examples**\n\nIn the manuscript figure:\npanel A: scenario 1\npanel B: scenario 3\npanel C: scenario 4\npanel D: scenario 5\n\nIn this script more other scenarios are included than the 4 that are presented in teh ms.\n-----------------------------------------------------------------------\n\n(c) Mina Jamshidi ([email protected]) @ Neurolgy Dept, MPI CBS, 2021\nhttps://github.com/minajamshidi\n(c) please cite the above paper in case of using this code for your research\n\nLicense: MIT License\n-----------------------------------------------------------------------\n\nlast modified: 20210929 \\Mina\n\n-----------------------------------------------------------------------\n-----------------------------------------------------------------------\n\nthe two-signal scenario without noise\n±±±±±±±±±±± ±±±±±±±±±±± ±±±±±±±±±±±\n\nScenario 1:\n------------\nx1 -R- x3\n| |\ny1 -S- y3\n\n\n\nscenario 2:\n--------------\nx1 -R- x3\n| |\ny1 -S- y3\n\ny2 y4\n\n\nscenario 3:\n--------------\nx1 -R- x3\n| |\ny1 -S- y3\n\ny2 -R- y4\n\n\nscenario 4:\n--------------\nx1 x3\n| |\ny1 y3\n\ny2 -R- y4\n\nscenario 5:\n--------------\n\nx1 -R- y4\n|\ny1 x3\n |\ny2 y3\n\nscenario 6:\n--------------\nx1 <--> x3\nx1 <--> y4\n\nx1 -R- y4\n| ..\ny1 ...x3\n |\ny2 y3\n\n±±±±±±±±±±± ±±±±±±±±±±± ±±±±±±±±±±±\n\n\"\"\"\n\n\nimport numpy as np\nfrom numpy import pi\nimport os.path as op\nfrom matplotlib import pyplot as plt\nfrom scipy.signal import filtfilt, butter\n\nfrom tools_signal import *\nfrom tools_simulations import data_fun_pink_noise, filtered_randn, produce_nm_phase_locked_sig, adjust_snr\nfrom tools_general import *\nfrom tools_connectivity import *\nfrom scipy import stats\n# from harmoni.harmonitools import harmonic_removal_simple\nfrom harmoni.extratools import *\nfrom tools_harmonic_removal import harmonic_removal_simple\n\n# --------------------\n# Scenario\n# --------------------\nscenario = 1 # the scenario to be simulated - pls check the header for the scenario descriptions\n\n# in the following we encode the scenario in the parameters identifying which components exist in the signals\nif scenario == 1:\n x1_x3_coupling = 1\n y2_y4_exist = 0\nelif scenario == 2:\n x1_x3_coupling = 1\n y2_y4_exist = 1\n y2_y4_coupling = 0\nelif scenario == 3:\n x1_x3_coupling = 1\n y2_y4_exist = 1\n y2_y4_coupling = 1\nelif scenario == 4:\n x1_x3_coupling = 0\n y2_y4_exist = 1\n y2_y4_coupling = 1\nelif scenario == 5:\n x1_x3_coupling = 0\n y2_y4_exist = 1\n y2_y4_coupling = 0\nelif scenario == 6:\n x1_x3_coupling = 1\n y2_y4_exist = 1\n y2_y4_coupling = 0\nelif scenario == 7:\n x1_x3_coupling = 1\n y2_y4_exist = 1\n y2_y4_coupling = 0\n\n\n# --------------------\n# general settings\n# --------------------\n\npath_save_results = '/data/p_02076/CODES/Codes_CurrentlyWorking/EEG_Networks/build_nets_python36/harmoni/harmoni-supplementary-data/' # fill this in, if you wanna save the results. Otherwise leave it as ''\npath_save_fig = '' # fill this in, if you wanna save the figures. Otherwise leave it as ''\n\n# in case you have the seeds for the simulations, fill this in. Otherwise leave it as ''\n# path_seeds = ''\npath_seeds = ''\n# --------------------\n# parameters\n# --------------------\nfs = 256 # sampling frequency\nduration = 60 # seconds\nn_samples = int(duration * fs) # number of time samples\ntimes = np.arange(0, n_samples)/fs # the time points - used for plotting purpose\nmax_iter = 50 # number of interactions\nc_y2 = 1 # the weight of y2 in the signal\nc_y4 = 1 # the weight of y4\nnoisy = 1 # if the additive noise should be added to the signals. noisy = 1 --> noisy signals\nSNR_alpha = dBinv(5) # SNR of the alpha band\nSNR_beta = dBinv(-5) # SNR of the beta band\ncoh = True # to use coherence or PLV as the connectivity measure\n\n# the filter coefficients\nb10, a10 = butter(N=2, Wn=np.array([8, 12])/fs*2, btype='bandpass')\nb20, a20 = butter(N=2, Wn=np.array([18, 22])/fs*2, btype='bandpass')\n\n# the containers for the optimum values of c and phi\nc_abs_opt_1 = np.empty((max_iter,))\nc_phi_opt_1 = np.empty((max_iter,))\nc_abs_opt_2 = np.empty((max_iter,))\nc_phi_opt_2 = np.empty((max_iter,))\n\n\n# the containers for the synchronization values\nsynch_sig1x_sig1y = np.empty((max_iter,))\nsynch_sig1x_yres1_grid = np.empty((max_iter,))\nsynch_sig1x_yres1_grad = np.empty((max_iter,))\nsynch_sig2x_sig2y = np.empty((max_iter,))\nsynch_sig2x_yres2_grid = np.empty((max_iter,))\nsynch_sig2x_yres2_grad = np.empty((max_iter,))\nsynch_sig1x_sig2y = np.empty((max_iter,))\nsynch_sig1x_yres2_grid = np.empty((max_iter,))\nsynch_sig1x_yres2_grad = np.empty((max_iter,))\nsynch_sig2x_sig1y = np.empty((max_iter,))\nsynch_sig2x_yres1_grid = np.empty((max_iter,))\nsynch_sig2x_yres1_grad = np.empty((max_iter,))\nsynch_sig1y_sig2y = np.empty((max_iter,))\nsynch_yres1_yres2_grid = np.empty((max_iter,))\nsynch_yres1_yres2_grad = np.empty((max_iter,))\n\nif path_seeds == '':\n seed = np.random.randint(low=0, high=2 ** 32, size=(max_iter,))\nelse:\n seed = load_pickle(path_seeds)\n\nfor n_iter in range(max_iter):\n # n_iter = 0\n print(n_iter)\n np.random.seed(int(seed[n_iter]))\n \"\"\" \n dphi_y1 = 0\n dphi_y3 = 0\n dphi_x3 = 0\n dphi_y4 = 0\n \"\"\"\n dphi_y1 = pi / 2 * np.random.random(1) + pi / 4 # phase-shift of y1 comparing to the phase warped x1\n dphi_y3 = pi / 2 * np.random.random(1) + pi / 4 # phase-shift of y3 comparing to the phase of warped x3\n dphi_x3 = pi / 2 * np.random.random(1) + pi / 4 # phase-shift of x3 comparing to x1(in case of coupling of x1 & x3)\n dphi_y4 = pi / 2 * np.random.random(1) + pi / 4 # phase-shift of y4 comparing to y2\n\n # --------------------------------------------------------------\n # generate narrow-band components of sig1 and sig2\n # --------------------------------------------------------------\n\n # x1 is the alpha component of sig1 - produced by band-pass filtering random noise\n x1 = filtered_randn(8, 12, fs, n_samples)\n if x1_x3_coupling: # if sig1 and sig2 are coupled, generate x3 by shifting the phase of x1\n x3 = produce_nm_phase_locked_sig(x1, dphi_x3, 1, 1, [8, 12], fs, nonsin_mode=1)\n else: # otherwise, also generate x3 by band-pass filtering random noise\n x3 = filtered_randn(8, 12, fs, n_samples)\n\n # generate y1 and y3 by phase-warping of x1 and x3, and then adding a phase-shift\n y1 = produce_nm_phase_locked_sig(sig=x1, phase_lag=dphi_y1, n=1, m=2, wn_base=[8, 12], sfreq=fs)\n y3 = produce_nm_phase_locked_sig(x3, dphi_y3, 1, 2, [8, 12], fs)\n\n # generate a band-pass filtering random noise, it will be saved as y2\n y2 = filtered_randn(16, 24, fs, n_samples)\n if y2_y4_exist: # if y2 and y4 are contained in sig1 and sig2:\n if y2_y4_coupling: # if y2 and y4 are coupled, generate y4 by phase-shifting y2\n y4 = produce_nm_phase_locked_sig(y2, dphi_y4, 1, 1, [18, 22], fs, nonsin_mode=1)\n else: # otherwise, if y2 and y4 are not coupled:\n if scenario == 5 or scenario == 6 or scenario == 7: # if there is a geneuine CFS:\n # use phase warping on x1, to generate y4 cross-frequency coupled to x1\n y4 = produce_nm_phase_locked_sig(sig=x1, phase_lag=dphi_y4, n=1, m=2, wn_base=[8, 12], sfreq=fs, nonsin_mode=1)\n else: # if non of the above cases, generate y4 by band-pass filtering random noise\n y4 = filtered_randn(16, 24, fs, n_samples)\n\n # the alpha components of sig1 and sig2 ------------\n x_sig1 = x1\n x_sig2 = x3\n\n # the beta components of sig1 and sig2 ---------------\n if scenario == 7:\n y_sig1 = y1\n y_sig2 = 0\n else:\n y_sig1 = y1\n y_sig2 = y3\n\n if y2_y4_exist:\n if scenario == 7:\n y_sig2 = y_sig2 + c_y4 * y4\n else:\n y_sig1 = y_sig1 + c_y2 * y2\n y_sig2 = y_sig2 + c_y4 * y4\n\n # --------------------------------------------------------------\n # generate and add the pink noise - SNR is also tuned here\n # --------------------------------------------------------------\n\n if noisy:\n # generate the noise components ---------\n pink_noise_1 = data_fun_pink_noise(times)[np.newaxis, :]\n pink_noise_2 = data_fun_pink_noise(times)[np.newaxis, :]\n\n # SNR adjustment ------------\n factor_x_sig1 = adjust_snr(np.real(x_sig1), pink_noise_1, SNR_alpha, np.array([8, 12]) / fs * 2)\n x_sig1 = x_sig1 / factor_x_sig1\n\n factor_x_sig2 = adjust_snr(np.real(x_sig2), pink_noise_2, SNR_alpha, np.array([8, 12]) / fs * 2)\n x_sig2 = x_sig2 / factor_x_sig2\n\n factor_y_sig1 = adjust_snr(np.real(y_sig1), pink_noise_1, SNR_beta, np.array([16, 24]) / fs * 2)\n y_sig1 = y_sig1 / factor_y_sig1\n\n factor_y_sig2 = adjust_snr(np.real(y_sig2), pink_noise_2, SNR_beta, np.array([16, 24]) / fs * 2)\n y_sig2 = y_sig2 / factor_y_sig2\n\n # final sig1 and sig1 ---------------------------------------\n sig1 = np.real(x_sig1 + y_sig1)\n sig2 = np.real(x_sig2 + y_sig2)\n\n if noisy: # if noisy add teh pink noise\n sig1 += pink_noise_1\n sig2 += pink_noise_2\n\n \"\"\"\n from here on, we pretend that we have the noisy non-sin signal and we wanna use Harmoni to suppress the\n harmonic info\n \"\"\"\n # --------------------------------------------------------------\n # HARMONI\n # --------------------------------------------------------------\n\n # filter sig1 and sig2 in narrow-band\n sig1_x = filtfilt(b10, a10, sig1)\n sig1_y = filtfilt(b20, a20, sig1)\n\n sig2_x = filtfilt(b10, a10, sig2)\n sig2_y = filtfilt(b20, a20, sig2)\n\n # optimization for sig1 and sig2 -------------\n y_sig1_res_grad = harmonic_removal_simple(sig1_x, sig1_y, fs)\n y_sig1_res_grid = harmonic_removal_simple(sig1_x, sig1_y, fs, method='grid')\n\n y_sig2_res_grad = harmonic_removal_simple(sig2_x, sig2_y, fs)\n y_sig2_res_grid = harmonic_removal_simple(sig2_x, sig2_y, fs, method='grid')\n\n # compute the synchronization indices\n # we use the absolute coherency as the metric\n synch_sig1x_sig1y[n_iter] = compute_phaseconn_with_permtest(sig1_x, sig1_y, 1, 2, fs, plv_type='abs', coh=coh)\n synch_sig1x_yres1_grad[n_iter] = compute_phaseconn_with_permtest(sig1_x, y_sig1_res_grad, 1, 2, fs, plv_type='abs', coh=coh)\n synch_sig1x_yres1_grid[n_iter] = compute_phaseconn_with_permtest(sig1_x, y_sig1_res_grid, 1, 2, fs, plv_type='abs', coh=coh)\n\n synch_sig2x_sig2y[n_iter] = compute_phaseconn_with_permtest(sig2_x, sig2_y, 1, 2, fs, plv_type='abs', coh=coh)\n synch_sig2x_yres2_grad[n_iter] = compute_phaseconn_with_permtest(sig2_x, y_sig2_res_grad, 1, 2, fs, plv_type='abs', coh=coh)\n synch_sig2x_yres2_grid[n_iter] = compute_phaseconn_with_permtest(sig2_x, y_sig2_res_grid, 1, 2, fs, plv_type='abs', coh=coh)\n\n synch_sig1x_sig2y[n_iter] = compute_phaseconn_with_permtest(sig1_x, sig2_y, 1, 2, fs, plv_type='abs', coh=coh)\n synch_sig1x_yres2_grad[n_iter] = compute_phaseconn_with_permtest(sig1_x, y_sig2_res_grad, 1, 2, fs, plv_type='abs', coh=coh)\n synch_sig1x_yres2_grid[n_iter] = compute_phaseconn_with_permtest(sig1_x, y_sig2_res_grid, 1, 2, fs, plv_type='abs', coh=coh)\n\n synch_sig2x_sig1y[n_iter] = compute_phaseconn_with_permtest(sig2_x, sig1_y, 1, 2, fs, plv_type='abs', coh=coh)\n synch_sig2x_yres1_grad[n_iter] = compute_phaseconn_with_permtest(sig2_x, y_sig1_res_grad, 1, 2, fs, plv_type='abs', coh=coh)\n synch_sig2x_yres1_grid[n_iter] = compute_phaseconn_with_permtest(sig2_x, y_sig1_res_grid, 1, 2, fs, plv_type='abs', coh=coh)\n\n synch_sig1y_sig2y[n_iter] = compute_phaseconn_with_permtest(sig1_y, sig2_y, 1, 1, fs, plv_type='abs', coh=coh)\n synch_yres1_yres2_grad[n_iter] = compute_phaseconn_with_permtest(y_sig1_res_grad, y_sig2_res_grad, 1, 1, fs, plv_type='abs', coh=coh)\n synch_yres1_yres2_grid[n_iter] = compute_phaseconn_with_permtest(y_sig1_res_grid, y_sig2_res_grid, 1, 1, fs, plv_type='abs', coh=coh)\n\n\ndict1 = {'seed': seed,\n 'synch_sig1x_sig1y': synch_sig1x_sig1y, 'synch_sig1x_yres1_grid': synch_sig1x_yres1_grid,\n 'synch_sig1x_yres1_grad': synch_sig1x_yres1_grad,\n 'synch_sig2x_sig2y': synch_sig2x_sig2y, 'synch_sig2x_yres2_grad': synch_sig2x_yres2_grad,\n 'synch_sig2x_yres2_grid': synch_sig2x_yres2_grid,\n 'synch_sig1x_sig2y': synch_sig1x_sig2y, 'synch_sig1x_yres2_grad': synch_sig1x_yres2_grad,\n 'synch_sig1x_yres2_grid': synch_sig1x_yres2_grid,\n 'synch_sig2x_sig1y': synch_sig2x_sig1y, 'synch_sig2x_yres1_grad': synch_sig2x_yres1_grad,\n 'synch_sig2x_yres1_grid': synch_sig2x_yres1_grid,\n 'synch_sig1y_sig2y': synch_sig1y_sig2y, 'synch_yres1_yres2_grad': synch_yres1_yres2_grad,\n 'synch_yres1_yres2_grid': synch_yres1_yres2_grid}\nif len(path_save_results):\n save_pickle(path_save_results + '/toys_grad_grid_' + 'scenario' + str(scenario), dict1)\n\n#\n# # ------------------------------------\n# # plotting\n# # ------------------------------------\n#\n# # fig = plt.figure()\n# #\n# # ax = plt.subplot(231)\n# # plot_boxplot_paired(ax, dict1['plv_sig1x_sig1y'], dic1t['plv_sig1x_yres1'], datapoints=True,\n# # labels=['plv(s1_x, s1_y)', 'plv(s1_x, s1_y_res)'])\n# #\n# # ax = plt.subplot(232)\n# # plot_boxplot_paired(ax, dict1['plv_sig2x_sig2y'], dict1['plv_sig2x_yres2'], datapoints=True,\n# # labels=['plv(s2_x, s2_y)', 'plv(s2_x, s2_y_res)'])\n# #\n# # ax = plt.subplot(233)\n# # plot_boxplot_paired(ax, dict1['plv_sig1x_sig2y'], dict1['plv_sig1x_yres2'], datapoints=True,\n# # labels=['plv(s1_x, s2_y)', 'plv(s1_x, s2_y_res)'])\n# #\n# # ax = plt.subplot(234)\n# # plot_boxplot_paired(ax, dict1['plv_sig2x_sig1y'], dict1['plv_sig2x_yres1'], datapoints=True,\n# # labels=['plv(s2_x, s1_y)', 'plv(s2_x, s1_y_res)'])\n# #\n# # ax = plt.subplot(235)\n# # plot_boxplot_paired(ax, dict1['plv_sig1y_sig2y'], dict1['plv_yres1_yres2'], datapoints=True,\n# # labels=['plv(s1_y, s2_y)', 'plv(s1_y_res, s2_y_res)'])\n# #\n# # fname_fig = op.join(path_save_fig, 'sc' + str(scenario) + '.eps')\n# # fig.savefig(fname_fig, facecolor='white')\n#\n#\n# # ------------------------------------\n# # plot by loading your saved results\n# # ------------------------------------\n# # if you wanna load your saved results. uncomment the follwoing line:\n# # dict1 = load_pickle(path_save_results + 'toys_scenario' + str(scenario))\n#\n# data = (dict1['synch_sig1x_sig1y'][:, np.newaxis], dict1['synch_sig1x_yres1'][:, np.newaxis],\n# dict1['synch_sig2x_sig2y'][:, np.newaxis], dict1['synch_sig2x_yres2'][:, np.newaxis],\n# dict1['synch_sig1x_sig2y'][:, np.newaxis], dict1['synch_sig1x_yres2'][:, np.newaxis],\n# dict1['synch_sig2x_sig1y'][:, np.newaxis], dict1['synch_sig2x_yres1'][:, np.newaxis],\n# dict1['synch_sig1y_sig2y'][:, np.newaxis], dict1['synch_yres1_yres2'][:, np.newaxis])\n#\n# random_coh = random_synchronization_dist(1, 2, duration, f0=10, fs=fs, maxiter=5000)\n# perc95, perc99 = np.percentile(random_coh, [95, 99])\n#\n# fig = plt.figure()\n# plt.hlines([perc95, perc99], 0, 11, linestyle='--', color='lightgray')\n#\n# plt.boxplot(np.concatenate(data, axis=1), notch=True)\n# violin_plot([random_coh], positions=[11])\n#\n# for k in range(max_iter):\n# for i1 in range(0, 9, 2):\n# plt.plot(np.ones((1, 1)) * (i1+1) + np.random.randn(1, 1) * 0.02, data[i1][k],\n# marker='.', color='lightskyblue', markersize=3)\n# plt.plot(np.ones((1, 1)) * (i1+2) + np.random.randn(1, 1) * 0.02, data[i1+1][k],\n# marker='.', color='lightskyblue', markersize=3)\n# x = np.array([i1+1, i1+2])\n# y = np.array([data[i1][k], data[i1+1][k]])\n# plt.plot(x, y, '-', linewidth=.05)\n#\n# if len(path_save_fig):\n# fname_fig = op.join(path_save_fig, 'sc' + str(scenario) + '.eps')\n# fig.savefig(fname_fig, facecolor='white')\n#\n# # do the stats\n# for ii in range(0, 9, 2):\n# res = stats.wilcoxon(data[ii].ravel(), data[ii+1].ravel())\n# print(res[1], res[1]*5)" ]
[ [ "scipy.signal.filtfilt", "numpy.random.random", "numpy.arange", "numpy.real", "numpy.array", "numpy.empty", "numpy.random.randint" ] ]
uditgupta002/RegularizationMethodsForNN
[ "2554e088bafa9932e6e4d7f5ba033a2b79afe29d" ]
[ "src/regularization_utils.py" ]
[ "'''\nCreated on Nov 7, 2017\n\n@author: udit.gupta\n'''\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport sklearn\nimport sklearn.datasets\nimport sklearn.linear_model\nimport scipy.io\n\ndef sigmoid(x):\n \"\"\"\n Compute the sigmoid of x\n\n Arguments:\n x -- A scalar or numpy array of any size.\n\n Return:\n s -- sigmoid(x)\n \"\"\"\n s = 1/(1+np.exp(-x))\n return s\n\ndef relu(x):\n \"\"\"\n Compute the relu of x\n\n Arguments:\n x -- A scalar or numpy array of any size.\n\n Return:\n s -- relu(x)\n \"\"\"\n s = np.maximum(0,x)\n \n return s\n\ndef load_planar_dataset(seed):\n \n np.random.seed(seed)\n \n m = 400 # number of examples\n N = int(m/2) # number of points per class\n D = 2 # dimensionality\n X = np.zeros((m,D)) # data matrix where each row is a single example\n Y = np.zeros((m,1), dtype='uint8') # labels vector (0 for red, 1 for blue)\n a = 4 # maximum ray of the flower\n\n for j in range(2):\n ix = range(N*j,N*(j+1))\n t = np.linspace(j*3.12,(j+1)*3.12,N) + np.random.randn(N)*0.2 # theta\n r = a*np.sin(4*t) + np.random.randn(N)*0.2 # radius\n X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]\n Y[ix] = j\n \n X = X.T\n Y = Y.T\n\n return X, Y\n\ndef initialize_parameters(layer_dims):\n \"\"\"\n Arguments:\n layer_dims -- python array (list) containing the dimensions of each layer in our network\n \n Returns:\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", ..., \"WL\", \"bL\":\n W1 -- weight matrix of shape (layer_dims[l], layer_dims[l-1])\n b1 -- bias vector of shape (layer_dims[l], 1)\n Wl -- weight matrix of shape (layer_dims[l-1], layer_dims[l])\n bl -- bias vector of shape (1, layer_dims[l])\n \n Tips:\n - For example: the layer_dims for the \"Planar Data classification model\" would have been [2,2,1]. \n This means W1's shape was (2,2), b1 was (1,2), W2 was (2,1) and b2 was (1,1). Now you have to generalize it!\n - In the for loop, use parameters['W' + str(l)] to access Wl, where l is the iterative integer.\n \"\"\"\n \n np.random.seed(3)\n parameters = {}\n L = len(layer_dims) # number of layers in the network\n\n for l in range(1, L):\n parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) / np.sqrt(layer_dims[l-1])\n parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))\n \n assert(parameters['W' + str(l)].shape == layer_dims[l], layer_dims[l-1])\n assert(parameters['W' + str(l)].shape == layer_dims[l], 1)\n\n \n return parameters\n\ndef forward_propagation(X, parameters):\n \"\"\"\n Implements the forward propagation (and computes the loss) presented in Figure 2.\n \n Arguments:\n X -- input dataset, of shape (input size, number of examples)\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\":\n W1 -- weight matrix of shape ()\n b1 -- bias vector of shape ()\n W2 -- weight matrix of shape ()\n b2 -- bias vector of shape ()\n W3 -- weight matrix of shape ()\n b3 -- bias vector of shape ()\n \n Returns:\n loss -- the loss function (vanilla logistic loss)\n \"\"\"\n \n # retrieve parameters\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n W3 = parameters[\"W3\"]\n b3 = parameters[\"b3\"]\n \n # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID\n Z1 = np.dot(W1, X) + b1\n A1 = relu(Z1)\n Z2 = np.dot(W2, A1) + b2\n A2 = relu(Z2)\n Z3 = np.dot(W3, A2) + b3\n A3 = sigmoid(Z3)\n \n cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)\n \n return A3, cache\n\ndef backward_propagation(X, Y, cache):\n \"\"\"\n Implement the backward propagation presented in figure 2.\n \n Arguments:\n X -- input dataset, of shape (input size, number of examples)\n Y -- true \"label\" vector (containing 0 if cat, 1 if non-cat)\n cache -- cache output from forward_propagation()\n \n Returns:\n gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables\n \"\"\"\n m = X.shape[1]\n (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache\n \n dZ3 = A3 - Y\n dW3 = 1./m * np.dot(dZ3, A2.T)\n db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)\n \n dA2 = np.dot(W3.T, dZ3)\n dZ2 = np.multiply(dA2, np.int64(A2 > 0))\n dW2 = 1./m * np.dot(dZ2, A1.T)\n db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)\n \n dA1 = np.dot(W2.T, dZ2)\n dZ1 = np.multiply(dA1, np.int64(A1 > 0))\n dW1 = 1./m * np.dot(dZ1, X.T)\n db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)\n \n gradients = {\"dZ3\": dZ3, \"dW3\": dW3, \"db3\": db3,\n \"dA2\": dA2, \"dZ2\": dZ2, \"dW2\": dW2, \"db2\": db2,\n \"dA1\": dA1, \"dZ1\": dZ1, \"dW1\": dW1, \"db1\": db1}\n \n return gradients\n\ndef update_parameters(parameters, grads, learning_rate):\n \"\"\"\n Update parameters using gradient descent\n \n Arguments:\n parameters -- python dictionary containing your parameters:\n parameters['W' + str(i)] = Wi\n parameters['b' + str(i)] = bi\n grads -- python dictionary containing your gradients for each parameters:\n grads['dW' + str(i)] = dWi\n grads['db' + str(i)] = dbi\n learning_rate -- the learning rate, scalar.\n \n Returns:\n parameters -- python dictionary containing your updated parameters \n \"\"\"\n \n n = len(parameters) // 2 # number of layers in the neural networks\n\n # Update rule for each parameter\n for k in range(n):\n parameters[\"W\" + str(k+1)] = parameters[\"W\" + str(k+1)] - learning_rate * grads[\"dW\" + str(k+1)]\n parameters[\"b\" + str(k+1)] = parameters[\"b\" + str(k+1)] - learning_rate * grads[\"db\" + str(k+1)]\n \n return parameters\n\ndef predict(X, y, parameters):\n \"\"\"\n This function is used to predict the results of a n-layer neural network.\n \n Arguments:\n X -- data set of examples you would like to label\n parameters -- parameters of the trained model\n \n Returns:\n p -- predictions for the given dataset X\n \"\"\"\n \n m = X.shape[1]\n p = np.zeros((1,m), dtype = np.int)\n \n # Forward propagation\n a3, caches = forward_propagation(X, parameters)\n \n # convert probas to 0/1 predictions\n for i in range(0, a3.shape[1]):\n if a3[0,i] > 0.5:\n p[0,i] = 1\n else:\n p[0,i] = 0\n\n # print results\n\n #print (\"predictions: \" + str(p[0,:]))\n #print (\"true labels: \" + str(y[0,:]))\n print(\"Accuracy: \" + str(np.mean((p[0,:] == y[0,:]))))\n \n return p\n\ndef compute_cost(a3, Y):\n \"\"\"\n Implement the cost function\n \n Arguments:\n a3 -- post-activation, output of forward propagation\n Y -- \"true\" labels vector, same shape as a3\n \n Returns:\n cost - value of the cost function\n \"\"\"\n m = Y.shape[1]\n \n logprobs = np.multiply(-np.log(a3),Y) + np.multiply(-np.log(1 - a3), 1 - Y)\n cost = 1./m * np.nansum(logprobs)\n \n return cost\n\ndef load_dataset():\n train_dataset = h5py.File('../datasets/train_catvnoncat.h5', \"r\")\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n\n test_dataset = h5py.File('../datasets/test_catvnoncat.h5', \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n \n train_set_y = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n \n train_set_x_orig = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T\n test_set_x_orig = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T\n \n train_set_x = train_set_x_orig/255\n test_set_x = test_set_x_orig/255\n\n return train_set_x, train_set_y, test_set_x, test_set_y, classes\n\n\ndef predict_dec(parameters, X):\n \"\"\"\n Used for plotting decision boundary.\n \n Arguments:\n parameters -- python dictionary containing your parameters \n X -- input data of size (m, K)\n \n Returns\n predictions -- vector of predictions of our model (red: 0 / blue: 1)\n \"\"\"\n \n # Predict using forward propagation and a classification threshold of 0.5\n a3, cache = forward_propagation(X, parameters)\n predictions = (a3>0.5)\n return predictions\n\ndef load_planar_dataset(randomness, seed):\n \n np.random.seed(seed)\n \n m = 50\n N = int(m/2) # number of points per class\n D = 2 # dimensionality\n X = np.zeros((m,D)) # data matrix where each row is a single example\n Y = np.zeros((m,1), dtype='uint8') # labels vector (0 for red, 1 for blue)\n a = 2 # maximum ray of the flower\n\n for j in range(2):\n \n ix = range(N*j,N*(j+1))\n if j == 0:\n t = np.linspace(j, 4*3.1415*(j+1),N) #+ np.random.randn(N)*randomness # theta\n r = 0.3*np.square(t) + np.random.randn(N)*randomness # radius\n if j == 1:\n t = np.linspace(j, 2*3.1415*(j+1),N) #+ np.random.randn(N)*randomness # theta\n r = 0.2*np.square(t) + np.random.randn(N)*randomness # radius\n \n X[ix] = np.c_[r*np.cos(t), r*np.sin(t)]\n Y[ix] = j\n \n X = X.T\n Y = Y.T\n\n return X, Y\n\ndef plot_decision_boundary(model, X, y):\n # Set min and max values and give it some padding\n x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1\n y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1\n h = 0.01\n # Generate a grid of points with distance h between them\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole grid\n Z = model(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.ylabel('x2')\n plt.xlabel('x1')\n plt.scatter(X[0, :], X[1, :], c=y[0], cmap=plt.cm.Spectral)\n plt.show()\n \ndef load_2D_dataset():\n data = scipy.io.loadmat('../datasets/data.mat')\n train_X = data['X'].T\n train_Y = data['y'].T\n test_X = data['Xval'].T\n test_Y = data['yval'].T\n\n plt.scatter(train_X[0, :], train_X[1, :], c=train_Y[0], s=40, cmap=plt.cm.Spectral);\n \n return train_X, train_Y, test_X, test_Y\n" ]
[ [ "numpy.dot", "matplotlib.pyplot.contourf", "numpy.sqrt", "numpy.linspace", "numpy.random.randn", "numpy.mean", "numpy.exp", "numpy.square", "numpy.arange", "numpy.sin", "numpy.nansum", "numpy.zeros", "numpy.log", "numpy.int64", "numpy.array", "matplotlib.pyplot.show", "numpy.sum", "matplotlib.pyplot.ylabel", "numpy.maximum", "numpy.random.seed", "matplotlib.pyplot.scatter", "numpy.cos", "matplotlib.pyplot.xlabel" ] ]
g6ling/Pytorch-Cartpole
[ "ecb7b622cfefe825ac95388cceb6752413d90a2a", "ecb7b622cfefe825ac95388cceb6752413d90a2a" ]
[ "parallel/3-ACER/worker.py", "POMDP/3-DRQN-Store-State/model.py" ]
[ "import gym\nimport torch\nimport torch.multiprocessing as mp\nimport numpy as np\nfrom model import LocalModel\nfrom memory import Memory, Trajectory\nfrom config import env_name, max_episode, log_interval, replay_memory_capacity, replay_ratio\n\nclass Worker(mp.Process):\n def __init__(self, global_model, global_average_model, global_optimizer, global_ep, global_ep_r, res_queue, name):\n super(Worker, self).__init__()\n\n self.env = gym.make(env_name)\n self.env.seed(500)\n\n self.name = 'w%i' % name\n self.global_ep, self.global_ep_r, self.res_queue = global_ep, global_ep_r, res_queue\n self.global_model, self.global_average_model, self.global_optimizer = global_model, global_average_model, global_optimizer\n self.local_model = LocalModel(self.env.observation_space.shape[0], self.env.action_space.n)\n self.num_actions = self.env.action_space.n\n\n self.memory = Memory(replay_memory_capacity)\n\n def record(self, score, loss):\n with self.global_ep.get_lock():\n self.global_ep.value += 1\n with self.global_ep_r.get_lock():\n if self.global_ep_r.value == 0.:\n self.global_ep_r.value = score\n else:\n self.global_ep_r.value = 0.99 * self.global_ep_r.value + 0.01 * score\n if self.global_ep.value % log_interval == 0:\n print('{} , {} episode | score: {:.2f}'.format(\n self.name, self.global_ep.value, self.global_ep_r.value))\n\n self.res_queue.put([self.global_ep.value, self.global_ep_r.value, loss])\n\n def run(self):\n while self.global_ep.value < max_episode:\n self.algorithm(True)\n n = np.random.poisson(replay_ratio)\n for _ in range(n):\n self.algorithm(False)\n\n def algorithm(self, on_policy):\n self.local_model.pull_from_global_model(self.global_model)\n if not on_policy and len(self.memory) > 100:\n trajectory = self.memory.sample()\n else:\n trajectory, score = self.run_env()\n loss = self.local_model.train(on_policy, trajectory, self.global_average_model, self.global_optimizer, self.global_model, self.global_average_model)\n if on_policy:\n self.record(score, loss)\n\n\n def run_env(self):\n done = False\n score = 0\n steps = 0\n\n state = self.env.reset()\n state = torch.Tensor(state)\n state = state.unsqueeze(0)\n trajectory = Trajectory()\n\n while True:\n action, policy = self.local_model.get_action(state)\n policy = torch.Tensor(policy)\n\n next_state, reward, done, _ = self.env.step(action)\n next_state = torch.Tensor(next_state)\n next_state = next_state.unsqueeze(0)\n\n mask = 0 if done else 1\n reward = reward if not done or score == 499 else -1\n trajectory.push(state, next_state, action, reward, mask, policy)\n\n score += reward\n state = next_state\n\n if done:\n break\n\n self.memory.push(trajectory)\n trajectory = trajectory.sample()\n return trajectory, score\n", "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom config import gamma, device, batch_size, sequence_length, burn_in_length\n\nclass DRQN(nn.Module):\n def __init__(self, num_inputs, num_outputs):\n super(DRQN, self).__init__()\n self.num_inputs = num_inputs\n self.num_outputs = num_outputs\n\n self.lstm = nn.LSTM(input_size=num_inputs, hidden_size=16, batch_first=True)\n self.fc1 = nn.Linear(16, 128)\n self.fc2 = nn.Linear(128, num_outputs)\n\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_uniform(m.weight)\n\n def forward(self, x, hidden=None):\n # x [batch_size, sequence_length, num_inputs]\n out, hidden = self.lstm(x, hidden)\n\n out = F.relu(self.fc1(out))\n qvalue = self.fc2(out)\n\n return qvalue, hidden\n\n\n @classmethod\n def train_model(cls, online_net, target_net, optimizer, batch):\n def slice_burn_in(item):\n return item[:, burn_in_length:, :]\n states = torch.stack(batch.state).view(batch_size, sequence_length, online_net.num_inputs)\n next_states = torch.stack(batch.next_state).view(batch_size, sequence_length, online_net.num_inputs)\n actions = torch.stack(batch.action).view(batch_size, sequence_length, -1).long()\n rewards = torch.stack(batch.reward).view(batch_size, sequence_length, -1)\n masks = torch.stack(batch.mask).view(batch_size, sequence_length, -1)\n rnn_state = torch.stack(batch.rnn_state).view(batch_size, sequence_length, 2, -1)\n\n \n\n [h0, c0] = rnn_state[:, 0, :, :].transpose(0, 1)\n h0 = h0.unsqueeze(0).detach()\n c0 = c0.unsqueeze(0).detach()\n\n [h1, c1] = rnn_state[:, 1, :, :].transpose(0, 1)\n h1 = h1.unsqueeze(0).detach()\n c1 = c1.unsqueeze(0).detach()\n\n pred, _ = online_net(states, (h0, c0))\n next_pred, _ = target_net(next_states, (h1, c1))\n\n pred = slice_burn_in(pred)\n next_pred = slice_burn_in(next_pred)\n actions = slice_burn_in(actions)\n rewards = slice_burn_in(rewards)\n masks = slice_burn_in(masks)\n \n pred = pred.gather(2, actions)\n \n target = rewards + masks * gamma * next_pred.max(2, keepdim=True)[0]\n\n loss = F.mse_loss(pred, target.detach())\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n return loss\n\n def get_action(self, state, hidden):\n state = state.unsqueeze(0).unsqueeze(0)\n\n qvalue, hidden = self.forward(state, hidden)\n \n _, action = torch.max(qvalue, 2)\n return action.numpy()[0][0], hidden\n" ]
[ [ "numpy.random.poisson", "torch.Tensor" ], [ "torch.max", "torch.nn.LSTM", "torch.nn.Linear", "torch.stack", "torch.nn.init.xavier_uniform" ] ]
simonjantsch/switss
[ "53865f218ac581b51e203becf6f26fedd0bc1326" ]
[ "switss/model/reachability_form.py" ]
[ "from . import AbstractMDP,MDP\nfrom ..utils import InvertibleDict, cast_dok_matrix, DTMCVisualizationConfig, VisualizationConfig\nfrom ..solver.milp import LP\n\nfrom collections import defaultdict\nfrom bidict import bidict\nimport copy as copy\nimport numpy as np\nfrom scipy.sparse import dok_matrix,hstack,vstack\n\nclass ReachabilityForm:\n \"\"\" \n A reachability form is a wrapper for special DTMCs/MDPs with dedicated initial and target states. \n In particular, the following properties are fulfilled:\n\n - exactly one fail, goal and initial state,\n - fail and goal have exactly one action, which maps only to themselves,\n - the fail state (goal state) has index :math:`N_{S_{\\\\text{all}}}-1` (:math:`N_{S_{\\\\text{all}}}-2`) and the corresponding state-action-pair index :math:`C_{S_{\\\\text{all}}}-1` (:math:`C_{S_{\\\\text{all}}}-2`),\n - every state is reachable from the initial state (fail doesn't need to be reachable) and\n - every state reaches the goal state (except the fail state)\n\n \"\"\"\n def __init__(self, system, initial_label, target_label=\"rf_target\", fail_label=\"rf_fail\", ignore_consistency_checks=False):\n \"\"\"Instantiates a RF.\n\n :param system: The MDP/DTMC that fulfills the specified properties.\n :type system: model.AbstractMDP\n :param initial_label: Label of initial state - there must be exactly one\n :type initial_label: str\n :param target_label: Label of target state - there must be exactly one, defaults to \"rf_target\"\n :type target_label: str, optional\n :param fail_label: Label of fail state - there must be exactly one, defaults to \"rf_fail\"\n :type fail_label: str, optional\n :param ignore_consistency_checks: If set to False, checks consistency of given model (i.e. if the properties are satisfied),\n defaults to False\n :type ignore_consistency_checks: bool, optional\n :type ignore_consistency_checks: bool, optional\n \"\"\" \n if not ignore_consistency_checks:\n ReachabilityForm.assert_consistency(system, initial_label, target_label, fail_label)\n \n self.__P = system.P[:system.C-2, :system.N-2]\n self.__system = system\n self.initial = next(iter(system.states_by_label[initial_label]))\n self.target_label = target_label\n self.fail_label = fail_label\n self.initial_label = initial_label\n self.__index_by_state_action = system.index_by_state_action.copy()\n del self.__index_by_state_action.inv[system.C-2]\n del self.__index_by_state_action.inv[system.C-1]\n \n self.__A = self._reach_form_id_matrix() - self.__P\n self.__to_target = system.P.getcol(system.N-2).todense()[:system.C-2]\n\n system_mecs, nr_of_system_mecs = system.maximal_end_components()\n self.__mec_states = system_mecs[:system.N-2]\n self.__nr_of_mecs = int(nr_of_system_mecs - 2)\n \n self.__target_visualization_style = None\n self.__fail_visualization_style = None\n self.set_target_visualization_style()\n self.set_fail_visualization_style()\n\n @property\n def target_sap_idx(self):\n return self.system.C-2\n\n @property\n def target_state_idx(self):\n return self.system.N-2\n\n @property\n def fail_sap_idx(self):\n return self.system.C-1\n\n @property\n def fail_state_idx(self):\n return self.system.N-1\n\n def set_target_visualization_style(self,style=None):\n assert style is None or type(style) == type(self.system.visualization)\n\n def _state_map(sourceidx,labels):\n return { \"color\" : \"green\", \"style\": \"filled\", \"label\" : self.target_label }\n def _action_map(sourceidx,action,labels):\n return { \"node\" : { \"shape\" : \"point\" }, \"edge\" : { \"dir\" : \"none\" } }\n \n if style is None:\n if type(self.system) is MDP: style = VisualizationConfig(state_map=_state_map,action_map=_action_map)\n else: style = DTMCVisualizationConfig(state_map=_state_map)\n \n self.__target_visualization_style = style\n\n def set_fail_visualization_style(self,style=None):\n assert style is None or type(style) == type(self.system.visualization)\n \n def _state_map(sourceidx,labels):\n return { \"color\" : \"red\", \"style\": \"filled\", \"label\" : self.fail_label }\n def _action_map(sourceidx,action,labels):\n return { \"node\" : { \"shape\" : \"point\" }, \"edge\" : { \"dir\" : \"none\" } }\n\n if style is None:\n if type(self.system) is MDP: style = VisualizationConfig(state_map=_state_map,action_map=_action_map)\n else: style = DTMCVisualizationConfig(state_map=_state_map)\n \n self.__fail_visualization_style = style\n\n @property\n def system(self):\n \"\"\"The underlying system (instance of model.AbstractMDP)\"\"\"\n return self.__system\n\n @property\n def A(self):\n \"\"\"\n Returns a :math:`C \\\\times N` matrix :math:`\\mathbf{A}` where \n\n .. math::\n \n \\\\textbf{A}((s,a), d) = \\\\begin{cases} 1 - \\\\textbf{P}((s,a), d) &\\\\text{ if } d = s \\\\\\\\\n - \\\\textbf{P}((s,a), d) &\\\\text{ if } d \\\\neq s \\end{cases}, \n\n for all :math:`(s,a),d \\in \\mathcal{M} \\\\times S`.\"\"\"\n return self.__A\n\n @property\n def to_target(self):\n \"\"\"\n Returns a vector of length :math:`C` :math:`\\\\textbf{b}` where\n\n .. math::\n\n \\\\textbf{b}((s,a)) = \\\\text{P}((s,a),\\\\text{goal}),\n\n for all :math:`(s,a) \\in \\mathcal{M}`. \n \"\"\"\n return self.__to_target\n\n @property\n def mec_states(self):\n \"\"\"\n Returns a vector of length :math:`N` which contains a non-zero value for each state that is contained in a proper end component.\n \"\"\"\n return self.__mec_states\n\n @property\n def nr_of_mecs(self):\n \"\"\"\n Returns the number of proper end components (excluding goal and fail).\n \"\"\"\n return self.__nr_of_mecs\n\n\n @staticmethod\n def assert_consistency(system, initial_label, target_label=\"rf_target\", fail_label=\"rf_fail\"):\n \"\"\"Checks whether a system fulfills the reachability form properties.\n\n :param system: The system\n :type system: model.AbstractMDP\n :param initial_label: Label of initial state\n :type initial_label: str\n :param target_label: Label of target state, defaults to \"rf_target\"\n :type target_label: str, optional\n :param fail_label: Label of fail state, defaults to \"rf_fail\"\n :type fail_label: str, optional\n \"\"\" \n assert isinstance(system, AbstractMDP)\n assert len({initial_label, target_label, fail_label}) == 3, \"Initial label, target label and fail label must be pairwise distinct\"\n\n # check that there is exactly 1 target,fail and initial state resp.\n states = []\n for statelabel, name in [(initial_label, \"initial\"), (target_label, \"target\"), (fail_label, \"fail\")]:\n labeledstates = system.states_by_label[statelabel].copy()\n count = len(labeledstates) \n assert count == 1, \\\n \"There must be exactly 1 %s state. There are %s states in system %s with label %s\" % (name, count, system, statelabel)\n states.append(labeledstates.pop())\n init,target,fail = states\n \n # check that fail and target state only map to themselves\n for state,name,rowidx,colidx in [(target,\"target\",system.C-2,system.N-2),(fail,\"fail\",system.C-1,system.N-1)]:\n successors = list(system.successors(state))\n assert len(successors) == 1 and successors[0][0] == state, \"State %s must only one action and successor; itself\" % name\n succst,succact,p = successors[0]\n saindex = system.index_by_state_action[(succst,succact)]\n assert saindex == rowidx, \"State-action of %s must be at index %s but is at %s\" % (name, rowidx, saindex)\n assert state == colidx, \"State %s must be at index %s but is at %s\" % (name, colidx, state)\n\n # fail_mask has a 1 only at the fail state and zeros otherwise\n fail_mask = np.zeros(system.N,dtype=bool)\n fail_mask[fail] = True \n\n # check that every state is reachable\n # the fail state doesn't need to be reachable\n fwd_mask = system.reachable_mask({init},\"forward\")\n assert (fwd_mask | fail_mask).all(), \"Not every state is reachable from %s in system %s\" % (initial_label, system)\n\n # check that every state except fail reaches goal\n # if bwd_mask[fail] == 1 then bwd_mask[fail] ^ fail_mask[fail] == 0\n bwd_mask = system.reachable_mask({target},\"backward\")\n assert (bwd_mask ^ fail_mask).all(), \"Not every state reaches %s in system %s\" % (target_label, system)\n\n @staticmethod\n def reduce(system, initial_label, target_label, new_target_label=\"rf_target\", new_fail_label=\"rf_fail\", debug=False):\n \"\"\"Reduces a system to a system in reachability form. \n The transformation does a forward search starting at the initial state, then a \n backwards search starting from the targets states and then removes all states \n that happen to be not reachable in at least one of these searches. Transitions that \n lead to removed states are mapped to a dedicated new \"fail\"-state (default label is \"rf_fail\"). \n All old target states are remapped to a dedicated new \"target\"-state (default label is \"rf_target\"). \n \n :param system: The system that should be reduced.\n :type system: model.AbstractMDP\n :param initial_label: Label of initial state (there must be exactly one)\n :type initial_label: str\n :param target_label: Label of target state (there must be at least one)\n :type target_label: str\n :param new_target_label: Label of dedicated new target state, defaults to \"rf_target\"\n :type new_target_label: str, optional\n :param new_fail_label: Label of dedicated new fail state, defaults to \"rf_fail\"\n :type new_fail_label: str, optional\n :param debug: If True, additional diagnostic information is printed, defaults to False\n :type debug: bool, optional\n :return: A triple (RF, state_map, state_action_map) where state_map (state_action_map) is a mapping from system states\n (state-actions pairs) to states (state-action pairs) of the reduced system. If a state (state-action pair) is not a \n key in the dictionary, it was removed.\n :rtype: Tuple[model.ReachabilityForm, Dict[int,int], Dict[Tuple[int,int],Tuple[int,int]]]\n \"\"\" \n assert isinstance(system, AbstractMDP)\n assert new_target_label not in system.states_by_label.keys(), \"Label '%s' for target state already exists in system %s\" % (new_target_label, system)\n assert new_fail_label not in system.states_by_label.keys(), \"Label '%s' for fail state already exists in system %s\" % (new_fail_label, system)\n target_states = system.states_by_label[target_label]\n assert len(target_states) > 0, \"There needs to be at least one target state.\"\n initial_state_count = len(system.states_by_label[initial_label])\n assert initial_state_count == 1, \"There are %d states with label '%s'. Must be 1.\" % (initial_state_count, initial_label)\n initial = list(system.states_by_label[initial_label])[0]\n \n if debug:\n print(\"calculating reachable mask (backward)...\")\n backward_reachable = system.reachable_mask(target_states, \"backward\")\n if debug:\n print(\"calculating reachable mask (forward)...\")\n forward_reachable = system.reachable_mask(set([initial]), \"forward\", blocklist=target_states)\n # states which are reachable from the initial state AND are able to reach target states\n reachable_mask = backward_reachable & forward_reachable\n # TODO: use reachable_mask instead of reachable everywhere\n # this is much better for performance since lookup of states is always O(1)\n reachable = [ idx for idx,x in enumerate(reachable_mask) if x ]\n reachable_index_mapping = { v : k for k,v in enumerate(reachable) }\n\n if debug:\n print(\"tested backward & forward reachability test\")\n \n # reduce states + new target and new fail state \n new_state_count = len(reachable) + 2\n target_idx, fail_idx = new_state_count - 2, new_state_count - 1\n \n if debug:\n print(\"new states: %s, target index: %s, fail index: %s\" % (new_state_count, target_idx, fail_idx))\n \n # create a mapping from system to reachability form\n to_rf_cols, to_rf_rows = bidict(), bidict()\n\n for sapidx in range(system.C):\n stateidx, actionidx = system.index_by_state_action.inv[sapidx]\n if reachable_mask[stateidx]:\n newidx = reachable_index_mapping[stateidx] # reachable.index(stateidx)\n to_rf_rows[(stateidx,actionidx)] = (newidx,actionidx)\n to_rf_cols[stateidx] = newidx\n\n if debug:\n print(\"computed state-action mapping\")\n\n # compute reduced transition matrix (without non-reachable states)\n # compute probability of reaching the target state in one step \n new_N = len(set(to_rf_cols.values()))\n new_C = len(set(to_rf_rows.values()))\n new_P = dok_matrix((new_C, new_N))\n new_index_by_state_action = bidict()\n \n if debug:\n print(\"shape of new_P (%s,%s)\" % (new_C,new_N))\n\n to_target = np.zeros(new_C)\n\n # mask for faster access\n target_mask = np.zeros(system.N, dtype=bool)\n for t in target_states:\n target_mask[t] = 1\n\n for newidx, ((source,action),(newsource,newaction)) in enumerate(to_rf_rows.items()):\n new_index_by_state_action[(newsource,newaction)] = newidx\n if target_mask[source]: # in target_states:\n to_target[newidx] = 1\n else:\n idx = system.index_by_state_action[(source,action)]\n for dest in [s for s,a,p in system.successors(source) if a == action]:\n if dest in to_rf_cols:\n newdest = to_rf_cols[dest]\n new_P[newidx, newdest] = system.P[idx, dest]\n if debug:\n print(\"computed transition matrix & to_target\")\n\n rf_system = ReachabilityForm.__initialize_system(\n new_P, \n new_index_by_state_action,\n to_target, \n to_rf_rows, \n system, \n new_target_label, \n new_fail_label)\n\n rf = ReachabilityForm(\n rf_system, \n initial_label, \n target_label=new_target_label, \n fail_label=new_fail_label,\n ignore_consistency_checks=True)\n\n rf.__adapt_style(to_rf_cols.inv, to_rf_rows.inv, system.visualization)\n\n return rf, to_rf_cols, to_rf_rows\n\n @staticmethod\n def __initialize_system(P, index_by_state_action, to_target, mapping, configuration, target_label, fail_label):\n C,N = P.shape\n P_compl = dok_matrix((C+2, N+2))\n target_state, fail_state = N, N+1\n\n index_by_state_action_compl = index_by_state_action.copy()\n index_by_state_action_compl[(target_state,0)] = C\n index_by_state_action_compl[(fail_state,0)] = C+1\n\n if configuration.reward_vector is not None:\n reward_vector = np.zeros(C+2)\n reward_vector[C] = 0\n reward_vector[C+1] = 0\n else:\n reward_vector = None\n\n \n # copy labels from configuration (i.e. a system)\n # mapping defines which state-action pairs in the system map to which state-action pairs in the r.f.\n label_to_actions = defaultdict(set)\n label_to_states = defaultdict(set)\n for idx in range(C):\n stateidx, actionidx = index_by_state_action.inv[idx]\n sys_stateidx, sys_actionidx = mapping.inv[(stateidx,actionidx)]\n labels = configuration.labels_by_state[sys_stateidx]\n for l in labels:\n label_to_states[l].add(stateidx)\n actionlabels = configuration.labels_by_action[(sys_stateidx,sys_actionidx)]\n for l in actionlabels:\n label_to_actions[l].add((stateidx,actionidx))\n\n if configuration.reward_vector is not None:\n # initialize new reward vector\n sys_idx = configuration.index_by_state_action[(sys_stateidx,sys_actionidx)]\n reward_vector[idx] = configuration.reward_vector[sys_idx]\n\n label_to_states[fail_label].add(fail_state)\n label_to_states[target_label].add(target_state)\n\n not_to_fail = np.zeros(C)\n for (idx, dest), p in P.items():\n not_to_fail[idx] += p\n P_compl[idx, dest] = p\n\n for idx, p_target in enumerate(to_target):\n if p_target > 0:\n P_compl[idx, target_state] = p_target\n p_fail = 1 - (p_target + not_to_fail[idx])\n if p_fail > 0:\n P_compl[idx, fail_state] = p_fail\n\n P_compl[C, target_state] = 1\n P_compl[C+1, fail_state] = 1\n\n return type(configuration)( \n P=P_compl, \n index_by_state_action=index_by_state_action_compl, \n label_to_states=label_to_states,\n label_to_actions=label_to_actions,\n reward_vector=reward_vector)\n \n def __adapt_style(self, state_map, state_action_map, viz_cfg):\n C,N = self.system.C,self.system.N\n\n def _state_style(sourceidx, labels):\n if sourceidx == N-2: # target state\n return self.__target_visualization_style.state_map(sourceidx,labels)\n elif sourceidx == N-1: \n return self.__fail_visualization_style.state_map(sourceidx,labels)\n else:\n return viz_cfg.state_map(state_map[sourceidx],labels)\n \n def _action_style(sourceidx,action,labels):\n if sourceidx == N-2:\n return self.__target_visualization_style.action_map(sourceidx,action,labels)\n elif sourceidx == N-1:\n return self.__fail_visualization_style.action_map(sourceidx,action,labels)\n else:\n _sourceidx,_action = state_action_map[(sourceidx,action)]\n return viz_cfg.action_map(_sourceidx,_action,labels)\n \n def _trans_style_dtmc(sourceidx,destidx,p):\n if destidx == N-2:\n return self.__target_visualization_style.trans_map(sourceidx,destidx,p)\n elif destidx == N-1:\n return self.__fail_visualization_style.trans_map(sourceidx,destidx,p)\n else:\n return viz_cfg.trans_map(state_map[sourceidx],state_map[destidx],p)\n \n def _trans_style_mdp(sourceidx,action,destidx,p):\n if destidx == N-2:\n return self.__target_visualization_style.trans_map(sourceidx,action,destidx,p)\n elif destidx == N-1:\n return self.__fail_visualization_style.trans_map(sourceidx,action,destidx,p)\n else:\n _sourceidx,_action = state_action_map[(sourceidx,action)]\n return viz_cfg.trans_map(_sourceidx,_action,state_map[destidx],p)\n \n if type(viz_cfg) == DTMCVisualizationConfig:\n self.system.visualization = DTMCVisualizationConfig(state_map=_state_style,trans_map=_trans_style_dtmc)\n else:\n self.system.visualization = VisualizationConfig(state_map=_state_style,trans_map=_trans_style_mdp,action_map=_action_style)\n\n def __repr__(self):\n return \"ReachabilityForm(initial=%s, target=%s, fail=%s, system=%s)\" % (self.initial_label, self.target_label, self.fail_label, self.system)\n\n def fark_constraints(self, threshold, mode):\n \"\"\"returns the right constraint set dependent on the given mode.\n\n :param threshold: the threshold\n :type threshold: float\n :param mode: either 'min' or 'max'\n :type mode: str\n :return: either :math:`(C+1) \\\\times N`-matrix :math:`M_z`, and vector of length :math:`C+1` :math:`rhs_z` or :math:`(N+1) \\\\times C`-matrix :math:`M_y`, and :math:`N+1`-vector :math:`rhs_y`.\n :rtype: Tuple[scipy.sparse.dok_matrix, np.ndarray[float]]\n \"\"\" \n assert mode in [\"min\", \"max\"]\n\n if mode == \"min\":\n return self.fark_z_constraints(threshold)\n else:\n return self.fark_y_constraints(threshold)\n\n def fark_z_constraints(self, threshold):\n \"\"\"\n Returns a matrix :math:`M_z` and a vector :math:`rhs_z` such that for a :math:`N` vector :math:`\\mathbf{z}`\n\n .. math::\n\n M_z\\, \\mathbf{z} \\leq rhs_z \\quad \\\\text{ iff } \\quad \n \\\\mathbf{A} \\, \\mathbf{z} \\leq \\mathbf{b} \\land \\mathbf{z}(\\\\texttt{init}) \\geq \\lambda\n \\quad \\\\text{ iff } \\quad\n \\mathbf{z} \\in \\mathcal{P}^{\\\\text{min}}(\\lambda)\n \n :param threshold: The threshold :math:`\\lambda` for which the Farkas z-constraints should be constructed\n :type threshold: Float\n :return: :math:`(C+1) \\\\times N`-matrix :math:`M_z`, and vector of length :math:`C+1` :math:`rhs_z`\n :rtype: Tuple[scipy.sparse.dok_matrix, np.ndarray[float]]\n \"\"\"\n C,N = self.__P.shape\n\n rhs = self.to_target.A1.copy()\n rhs.resize(C+1)\n rhs[C] = -threshold\n\n delta = np.zeros(N)\n delta[self.initial] = 1\n\n fark_z_matr = vstack((self.A,-delta))\n\n return fark_z_matr, rhs\n\n def fark_y_constraints(self, threshold):\n \"\"\" \n Returns a matrix :math:`M_y` and a vector :math:`rhs_y` such that for a :math:`C` vector :math:`\\mathbf{y}`\n\np .. math::\n\n M_y\\, \\mathbf{y} \\leq rhs_y \\quad \\\\text{ iff } \\quad\n \\mathbf{y} \\, \\mathbf{A} \\leq \\delta_{\\\\texttt{init}} \\land \\mathbf{b} \\, \\mathbf{y} \\geq \\lambda\n \\quad \\\\text{ iff } \\quad\n \\mathbf{y} \\in \\mathcal{P}^{\\\\text{max}}(\\lambda)\n\n where :math:`\\lambda` is the threshold, :math:'\\mathbf{A}' is the system matrix and :math:`\\mathbf{b}` is to_target. The vector :math:`\\delta_{\\\\texttt{init}}` is 1 for the initial state, and otherwise 0.\n\n :param threshold: The threshold :math:`\\lambda` for which the Farkas y-constraints should be constructed\n :type threshold: Float\n :return: :math:`(N+1) \\\\times C`-matrix :math:`M_y`, and :math:`N+1`-vector :math:`rhs_y` \n :rtype: Tuple[scipy.sparse.dok_matrix, np.ndarray[float]]\n \"\"\"\n C,N = self.__P.shape\n\n b = cast_dok_matrix(self.to_target)\n\n rhs = np.zeros(N+1)\n rhs[self.initial] = 1\n rhs[N] = -threshold\n\n fark_y_matr = hstack((self.A,-b)).T\n return fark_y_matr, rhs\n\n def _reach_form_id_matrix(self):\n \"\"\"Computes the matrix :math:`I` for a given reachability form that for every row (st,act) has an entry 1 at the column corresponding to st.\"\"\"\n C,N = self.__P.shape\n I = dok_matrix((C,N))\n\n for i in range(0,C):\n (state, _) = self.__index_by_state_action.inv[i]\n I[i,state] = 1\n\n return I\n\n def max_z_state(self,solver=\"cbc\"):\n \"\"\"\n Returns a solution to the LP \n\n .. math::\n\n \\max \\, \\sum_{s} \\mathbf{x}(s) \\quad \\\\text{ subject to } \\quad \\mathbf{x} \\in \\mathcal{P}^{\\\\text{min}}(0)\n \n The solution vector corresponds to the minimal reachabiliy probability, i.e. \n :math:`\\mathbf{x}^*(s) = \\mathbf{Pr}^{\\\\text{min}}_s(\\diamond \\\\text{goal})` for all :math:`s \\in S`.\n\n :param solver: Solver that should be used, defaults to \"cbc\"\n :type solver: str, optional\n :return: Result vector\n :rtype: np.ndarray[float]\n \"\"\" \n C,N = self.__P.shape\n matr, rhs = self.fark_z_constraints(0)\n opt = np.ones(N)\n max_z_lp = LP.from_coefficients(\n matr,rhs,opt,sense=\"<=\",objective=\"max\")\n\n for st_idx in range(N):\n max_z_lp.add_constraint([(st_idx,1)],\">=\",0)\n max_z_lp.add_constraint([(st_idx,1)],\"<=\",1)\n\n result = max_z_lp.solve(solver=solver)\n return result.result_vector\n\n def max_z_state_action(self,solver=\"cbc\"):\n \"\"\"\n Let :math:`\\mathbf{x}` be a solution vector to `max_z_state`. This function then returns a \n :math:`C` vector :math:`\\mathbf{v}` such that\n\n .. math::\n\n \\mathbf{v}((s,a)) = \\mathbf{P}((s,a),\\\\text{goal}) + \\sum_{d \\in S } \\mathbf{P}((s,a),d) \\mathbf{x}(d)\n\n for all :math:`(s,a) \\in \\mathcal{M}`.\n\n :param solver: [description], defaults to \"cbc\"\n :type solver: str, optional\n :return: Result vector\n :rtype: np.ndarray[float]\n \"\"\" \n\n max_z_vec = self.max_z_state(solver=solver)\n return self.__P.dot(max_z_vec) + self.to_target.A1\n\n def max_y_state_action(self,solver=\"cbc\"):\n \"\"\"\n Returns a solution to the LP \n\n .. math::\n\n \\max \\, \\mathbf{b} \\, \\mathbf{x} \\quad \\\\text{ subject to } \\quad \\mathbf{x} \\in \\mathcal{P}^{\\\\text{max}}(0)\n \n :param solver: Solver that should be used, defaults to \"cbc\"\n :type solver: str, optional\n :return: Result vector\n :rtype: np.ndarray[float]\n \"\"\"\n C,N = self.__P.shape\n\n matr, rhs = self.fark_y_constraints(0)\n max_y_lp = LP.from_coefficients(\n matr,rhs,self.to_target,sense=\"<=\",objective=\"max\")\n\n for sap_idx in range(C):\n max_y_lp.add_constraint([(sap_idx,1)],\">=\",0)\n\n result = max_y_lp.solve(solver=solver)\n return result.result_vector\n\n def max_y_state(self,solver=\"cbc\"):\n \"\"\"\n Let :math:`\\mathbf{x}` be a solution vector to `max_y_state_action`. This function then returns a \n :math:`N` vector :math:`\\mathbf{v}` such that\n\n .. math::\n\n \\mathbf{v}(s) = \\sum_{a \\in \\\\text{Act}(s)} \\mathbf{x}((s,a))\n\n for all :math:`s \\in S`.\n\n :param solver: Solver that should be used, defaults to \"cbc\"\n :type solver: str, optional\n :return: Result vector\n :rtype: np.ndarray[float]\n \"\"\" \n C,N = self.__P.shape\n max_y_vec = self.max_y_state_action(solver=solver)\n max_y_states = np.zeros(N)\n max_y_states[self.initial] = 1\n for sap_idx in range(C):\n (st,act) = self.__index_by_state_action.inv[sap_idx]\n max_y_states[st] = max_y_states[st] + max_y_vec[sap_idx]\n return max_y_states\n\n def pr_min(self,solver=\"cbc\"):\n \"\"\"Computes an :math:`N` vector :math:`\\mathbf{x}` such that \n :math:`\\mathbf{x}(s) = \\mathbf{Pr}^{\\\\text{min}}_s(\\diamond \\\\text{goal})` for :math:`s \\in S`.\n\n :param solver: Solver that should be used, defaults to \"cbc\"\n :type solver: str, optional\n :return: Result vector\n :rtype: np.ndarray[float]\n \"\"\" \n return self.max_z_state(solver=solver)\n\n def pr_max(self,solver=\"cbc\"):\n \"\"\"Computes an :math:`N` vector :math:`\\mathbf{x}` such that :math:`\\mathbf{x}(s) = \n \\mathbf{Pr}^{\\\\text{max}}_s(\\diamond \\\\text{goal})` for :math:`s \\in S`.\n\n :param solver: Solver that should be used, defaults to \"cbc\"\n :type solver: str, optional\n :return: Result vector\n :rtype: np.ndarray[float]\n \"\"\"\n C,N = self.__P.shape\n\n matr, rhs = self.fark_z_constraints(1)\n opt = np.ones(N)\n pr_max_z_lp = LP.from_coefficients(\n matr,rhs,opt,sense=\">=\",objective=\"min\")\n\n for st_idx in range(N):\n pr_max_z_lp.add_constraint([(st_idx,1)],\">=\",0)\n pr_max_z_lp.add_constraint([(st_idx,1)],\"<=\",1)\n\n result = pr_max_z_lp.solve(solver=solver)\n return result.result_vector\n\n def _check_mec_freeness(self):\n\n # indices of old fail and target state\n target_state, target_action = self.system.N-2, self.system.C-2\n fail_state, fail_action = self.system.N-1, self.system.C-1\n\n if len(set(self.system.predecessors(fail_state))) == 1:\n # if that happens, then fail state has no predecessors but itself.\n # in that case, the fail state has no impact on the other states.\n assert (self.pr_min() == 1).all()\n return\n\n import copy\n new_label_to_states = copy.deepcopy(self.system.states_by_label)\n new_index_by_state_action = copy.deepcopy(self.system.index_by_state_action)\n\n # create a new transition matrix with 2 new entries for a new target and fail state\n P = dok_matrix((self.system.C+2,self.system.N+2))\n P[:self.system.C,:self.system.N] = self.system.P\n\n # indices of new target and new fail state according to RF\n new_target_state, new_target_action = self.system.N, self.system.C\n new_fail_state, new_fail_action = self.system.N+1, self.system.C+1\n # index state-action pairs\n new_index_by_state_action[(new_target_state,0)] = new_target_action\n new_index_by_state_action[(new_fail_state,0)] = new_fail_action\n \n # map new target and new fail state only to themselves\n P[new_target_action,new_target_state] = 1\n P[new_fail_action,new_fail_state] = 1\n\n # remap old fail & target state to new target state\n P[target_action,target_state] = 0\n P[target_action,new_target_state] = 1\n P[fail_action,fail_state] = 0\n P[fail_action,new_target_state] = 1\n \n # remove fail and target label from old target and fail state\n new_label_to_states.remove(self.target_label, target_state)\n new_label_to_states.remove(self.fail_label, fail_state)\n # add fail and target label to new target and new fail state\n new_label_to_states.add(self.target_label, new_target_state)\n new_label_to_states.add(self.fail_label, new_fail_state)\n\n # new system should already be in RF, so calling .reduce is not necessary\n target_or_fail_system = type(self.system)(\n P=P, \n index_by_state_action=new_index_by_state_action,\n label_to_actions={},\n label_to_states=new_label_to_states)\n \n target_or_fail_rf = ReachabilityForm(\n target_or_fail_system,\n self.initial_label,\n self.target_label,\n self.fail_label)\n\n assert (target_or_fail_rf.pr_min() == 1).all(), target_or_fail_rf.pr_min()\n\n" ]
[ [ "scipy.sparse.dok_matrix", "numpy.ones", "scipy.sparse.vstack", "scipy.sparse.hstack", "numpy.zeros" ] ]
ruivieira/workshop-recommendation-engines
[ "3efe678856dde1862cf6262ad1117aaa7b506265" ]
[ "utils.py" ]
[ "import csv\nfrom scipy import sparse\nimport numpy as np\n\ndef load_data(path):\n max_movie = 0\n max_user = 0\n data = []\n pairs = []\n with open(path, 'rt') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n next(reader, None)\n for row in reader:\n movie_id = int(row[0])\n user_id = int(row[1])\n rating = float(row[2])\n max_movie = max(movie_id, max_movie)\n max_user = max(user_id, max_user)\n data.append((movie_id, user_id, rating,))\n pairs.append((movie_id, user_id, ))\n \n ratings = sparse.lil_matrix((max_movie, max_user))\n for d in data:\n ratings[d[0]-1, d[1]-1] = d[2]\n\n return (ratings, np.array(pairs))" ]
[ [ "numpy.array", "scipy.sparse.lil_matrix" ] ]
nguyentthai96/Mem2Seq
[ "7ad9fb8c2e70b39ebfcbea659d755d0ee9c2bbf5" ]
[ "models/enc_vanilla.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch import optim\nimport torch.nn.functional as F\nfrom utils.masked_cross_entropy import *\nfrom utils.config import *\nimport random\nimport numpy as np\nimport datetime\nfrom utils.measures import wer,moses_multi_bleu\nimport nltk\nfrom sklearn.metrics import f1_score\nfrom itertools import chain\n\nclass VanillaSeqToSeq(nn.Module):\n def __init__(self,hidden_size,max_len,max_r,lang,path,task,lr=0.01,n_layers=1, dropout=0.1):\n super(VanillaSeqToSeq, self).__init__()\n self.name = \"VanillaSeqToSeq\"\n self.task = task\n self.input_size = lang.n_words\n self.output_size = lang.n_words\n self.hidden_size = hidden_size\n self.max_len = max_len ## max input\n self.max_r = max_r ## max responce len \n self.lang = lang\n self.lr = lr\n self.decoder_learning_ratio = 1.0\n self.n_layers = n_layers\n self.dropout = dropout\n if path:\n if USE_CUDA:\n logging.info(\"MODEL {} LOADED\".format(str(path)))\n self.encoder = torch.load(str(path)+'/enc.th')\n self.decoder = torch.load(str(path)+'/dec.th')\n else:\n logging.info(\"MODEL {} LOADED\".format(str(path)))\n self.encoder = torch.load(str(path)+'/enc.th',lambda storage, loc: storage)\n self.decoder = torch.load(str(path)+'/dec.th',lambda storage, loc: storage)\n self.decoder.viz_arr =[] \n else:\n self.encoder = EncoderRNN(lang.n_words, hidden_size, n_layers,dropout)\n self.decoder = VanillaDecoderRNN(hidden_size, lang.n_words, self.max_len, n_layers, dropout)\n\n # Initialize optimizers and criterion\n self.encoder_optimizer = optim.Adam(self.encoder.parameters(), lr=lr)\n self.decoder_optimizer = optim.Adam(self.decoder.parameters(), lr=lr * self.decoder_learning_ratio)\n \n self.loss = 0\n self.print_every = 1\n # Move models to GPU\n if USE_CUDA:\n self.encoder.cuda()\n self.decoder.cuda()\n\n def print_loss(self):\n print_loss_avg = self.loss / self.print_every\n self.print_every += 1\n return 'L:{:.2f}'.format(print_loss_avg)\n\n def save_model(self,dec_type):\n name_data = \"KVR/\" if self.task=='' else \"BABI/\"\n if USEKB:\n directory = 'save/vanilla_KB-'+name_data+str(self.task)+'HDD'+str(self.hidden_size)+'DR'+str(self.dropout)+'L'+str(self.n_layers)+'lr'+str(self.lr)+str(dec_type) \n else:\n directory = 'save/vanilla_noKB-'+name_data+str(self.task)+'HDD'+str(self.hidden_size)+'DR'+str(self.dropout)+'L'+str(self.n_layers)+'lr'+str(self.lr)+str(dec_type) \n if not os.path.exists(directory):\n os.makedirs(directory)\n torch.save(self.encoder, directory+'/enc.th')\n torch.save(self.decoder, directory+'/dec.th')\n \n def load_model(self,file_name_enc,file_name_dec):\n self.encoder = torch.load(file_name_enc)\n self.decoder = torch.load(file_name_dec)\n\n\n def train_batch(self, input_batches, input_lengths, target_batches, \n target_lengths, target_index, target_gate, batch_size, clip,\n teacher_forcing_ratio, reset): \n # Zero gradients of both optimizers\n if reset:\n self.loss = 0\n self.print_every = 1\n\n self.encoder_optimizer.zero_grad()\n self.decoder_optimizer.zero_grad()\n # self.opt.zero_grad()\n loss_Vocab,loss_Ptr,loss_Gate = 0,0,0\n # Run words through encoder\n encoder_outputs, encoder_hidden = self.encoder(input_batches, input_lengths)\n \n # Prepare input and output variables\n decoder_input = Variable(torch.LongTensor([SOS_token] * batch_size))\n decoder_hidden = (encoder_hidden[0][:self.decoder.n_layers],encoder_hidden[1][:self.decoder.n_layers])\n\n max_target_length = max(target_lengths)\n all_decoder_outputs_vocab = Variable(torch.zeros(max_target_length, batch_size, self.output_size))\n # Move new Variables to CUDA\n if USE_CUDA:\n all_decoder_outputs_vocab = all_decoder_outputs_vocab.cuda()\n decoder_input = decoder_input.cuda()\n\n # Choose whether to use teacher forcing\n use_teacher_forcing = random.random() < teacher_forcing_ratio\n \n if use_teacher_forcing: \n # Run through decoder one time step at a time\n for t in range(max_target_length):\n decoder_vacab, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs)\n\n all_decoder_outputs_vocab[t] = decoder_vacab\n decoder_input = target_batches[t] # Next input is current target\n if USE_CUDA: decoder_input = decoder_input.cuda()\n \n else:\n for t in range(max_target_length):\n decoder_vacab,decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs)\n all_decoder_outputs_vocab[t] = decoder_vacab\n topv, topi = decoder_vacab.data.topk(1)\n decoder_input = Variable(topi.view(-1)) # Chosen word is next input\n if USE_CUDA: decoder_input = decoder_input.cuda()\n \n #Loss calculation and backpropagation\n loss_Vocab = masked_cross_entropy(\n all_decoder_outputs_vocab.transpose(0, 1).contiguous(), # -> batch x seq\n target_batches.transpose(0, 1).contiguous(), # -> batch x seq\n target_lengths\n )\n\n loss = loss_Vocab\n loss.backward()\n \n # Clip gradient norms\n ec = torch.nn.utils.clip_grad_norm(self.encoder.parameters(), clip)\n dc = torch.nn.utils.clip_grad_norm(self.decoder.parameters(), clip)\n # Update parameters with optimizers\n self.encoder_optimizer.step()\n self.decoder_optimizer.step()\n # self.opt.step()\n \n self.loss += loss.data[0]\n \n\n\n\n def evaluate_batch(self,batch_size,input_batches, input_lengths, target_batches): \n # Set to not-training mode to disable dropout\n self.encoder.train(False)\n self.decoder.train(False) \n # Run words through encoder\n encoder_outputs, encoder_hidden = self.encoder(input_batches, input_lengths, None)\n # Prepare input and output variables\n decoder_input = Variable(torch.LongTensor([SOS_token] * batch_size))\n decoder_hidden = (encoder_hidden[0][:self.decoder.n_layers],encoder_hidden[1][:self.decoder.n_layers])\n\n decoded_words = []\n all_decoder_outputs_vocab = Variable(torch.zeros(self.max_r, batch_size, self.decoder.output_size))\n # Move new Variables to CUDA\n\n if USE_CUDA:\n all_decoder_outputs_vocab = all_decoder_outputs_vocab.cuda()\n decoder_input = decoder_input.cuda()\n \n # Run through decoder one time step at a time\n for t in range(self.max_r):\n decoder_vacab,decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs)\n\n all_decoder_outputs_vocab[t] = decoder_vacab\n topv, topi = decoder_vacab.data.topk(1)\n decoder_input = Variable(topi.view(-1))\n \n decoded_words.append(['<EOS>'if ni == EOS_token else self.lang.index2word[ni] for ni in topi.view(-1)])\n # Next input is chosen word\n if USE_CUDA: decoder_input = decoder_input.cuda()\n\n # Set back to training mode\n self.encoder.train(True)\n self.decoder.train(True)\n \n return decoded_words\n\n def evaluate(self,dev,avg_best,BLEU=False):\n logging.info(\"STARTING EVALUATION\")\n acc_avg = 0.0\n wer_avg = 0.0\n bleu_avg = 0.0\n acc_P = 0.0\n acc_V = 0.0\n ref = []\n hyp = []\n ref_s = \"\"\n hyp_s = \"\"\n pbar = tqdm(enumerate(dev),total=len(dev))\n for j, data_dev in pbar: \n words = self.evaluate_batch(len(data_dev[1]),data_dev[0],data_dev[1],data_dev[2]) \n acc=0\n w = 0\n temp_gen = []\n #print(words)\n for i, row in enumerate(np.transpose(words)):\n st = ''\n for e in row:\n if e== '<EOS>':\n break\n else:\n st+= e + ' '\n temp_gen.append(st)\n correct = data_dev[7][i] \n\n if (correct.lstrip().rstrip() == st.lstrip().rstrip()):\n acc+=1\n #else:\n # print(\"Correct:\"+str(correct.lstrip().rstrip()))\n # print(\"\\tPredict:\"+str(st.lstrip().rstrip()))\n # print(\"\\tFrom:\"+str(self.from_whichs[:,i]))\n\n w += wer(correct.lstrip().rstrip(),st.lstrip().rstrip())\n ref.append(str(correct.lstrip().rstrip()))\n hyp.append(str(st.lstrip().rstrip()))\n ref_s+=str(correct.lstrip().rstrip())+ \"\\n\"\n hyp_s+=str(st.lstrip().rstrip()) + \"\\n\"\n\n acc_avg += acc/float(len(data_dev[1]))\n wer_avg += w/float(len(data_dev[1]))\n pbar.set_description(\"R:{:.4f},W:{:.4f}\".format(acc_avg/float(len(dev)),\n wer_avg/float(len(dev))))\n\n if (BLEU): \n bleu_score = moses_multi_bleu(np.array(hyp), np.array(ref), lowercase=True) \n logging.info(\"BLEU SCORE:\"+str(bleu_score)) \n \n if (bleu_score >= avg_best):\n self.save_model(str(self.name)+str(bleu_score))\n logging.info(\"MODEL SAVED\")\n return bleu_score\n else:\n acc_avg = acc_avg/float(len(dev))\n if (acc_avg >= avg_best):\n self.save_model(str(self.name)+str(acc_avg))\n logging.info(\"MODEL SAVED\")\n return acc_avg\n\n\nclass EncoderRNN(nn.Module):\n def __init__(self, input_size, hidden_size, n_layers=1, dropout=0.1):\n super(EncoderRNN, self).__init__() \n self.input_size = input_size\n self.hidden_size = hidden_size\n self.n_layers = n_layers\n self.dropout = dropout \n self.embedding_dropout = nn.Dropout(dropout) \n self.embedding = nn.Embedding(input_size, hidden_size)\n self.lstm = nn.LSTM(hidden_size, hidden_size, n_layers, dropout=self.dropout)\n if USE_CUDA:\n self.lstm = self.lstm.cuda() \n self.embedding_dropout = self.embedding_dropout.cuda()\n self.embedding = self.embedding.cuda() \n\n def get_state(self, input):\n \"\"\"Get cell states and hidden states.\"\"\"\n batch_size = input.size(1)\n h0_encoder = Variable(torch.zeros(self.n_layers, batch_size, self.hidden_size )) ### * self.num_directions = 2 if bi\n c0_encoder = Variable(torch.zeros(self.n_layers, batch_size, self.hidden_size )) \n if USE_CUDA:\n h0_encoder = h0_encoder.cuda()\n c0_encoder = c0_encoder.cuda() \n return h0_encoder, c0_encoder\n\n def forward(self, input_seqs, input_lengths, hidden=None):\n # Note: we run this all at once (over multiple batches of multiple sequences)\n embedded = self.embedding(input_seqs)\n embedded = self.embedding_dropout(embedded) \n h0_encoder, c0_encoder = self.get_state(input_seqs)\n if input_lengths:\n embedded = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths, batch_first=False)\n outputs, (src_h_t, src_c_t) = self.lstm(embedded, (h0_encoder, c0_encoder))\n if input_lengths:\n outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=False)\n return outputs, (src_h_t, src_c_t)\n\nclass VanillaDecoderRNN(nn.Module):\n def __init__(self, hidden_size, output_size, max_len, n_layers=1, dropout=0.1):\n super(VanillaDecoderRNN, self).__init__()\n # Keep for reference\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.n_layers = n_layers\n self.dropout = dropout\n\n # Define layers\n self.embedding = nn.Embedding(output_size, hidden_size)\n self.embedding_dropout = nn.Dropout(dropout)\n self.lstm = nn.LSTM(hidden_size, hidden_size, n_layers, dropout=dropout)\n self.out = nn.Linear(hidden_size, output_size)\n if USE_CUDA:\n self.embedding = self.embedding.cuda()\n self.embedding_dropout = self.embedding_dropout.cuda()\n self.lstm = self.lstm.cuda()\n self.out = self.out.cuda()\n\n def forward(self, input_seq, last_hidden, encoder_outputs):\n batch_size = input_seq.size(0)\n embedded = self.embedding(input_seq)\n embedded = self.embedding_dropout(embedded)\n embedded = embedded.view(1, batch_size, self.hidden_size) # S=1 x B x N\n rnn_output, hidden = self.lstm(embedded, last_hidden)\n output = self.out(rnn_output)\n\n return output.squeeze(0),hidden\n" ]
[ [ "torch.nn.Dropout", "torch.LongTensor", "torch.nn.LSTM", "torch.load", "torch.zeros", "torch.nn.Embedding", "torch.nn.utils.rnn.pack_padded_sequence", "torch.nn.Linear", "torch.nn.utils.rnn.pad_packed_sequence", "numpy.transpose", "numpy.array", "torch.save" ] ]
ecreager/models
[ "dafed92d823dee3ee8f176ee52288554df30220d" ]
[ "research/object_detection/dataset_tools/oid_geotagged_tfrecord_creation.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Utilities for creating TFRecords of TF examples for the Open Images dataset.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom object_detection.core import standard_fields\nfrom object_detection.utils import dataset_util\n\n\ndef tf_example_from_annotations_data_frame(annotations_data_frame, label_maps,\n encoded_image):\n \"\"\"Populates a TF Example message with image annotations from a data frame.\n\n Args:\n annotations_data_frame: Data frame containing the annotations for a single\n image.\n label_maps: list of label maps (first = labels, second = countries)\n encoded_image: The encoded image string\n\n Returns:\n The populated TF Example, if the label of at least one object is present in\n label_map. Otherwise, returns None.\n \"\"\"\n\n label_map, country_map = label_maps\n \n filtered_data_frame = annotations_data_frame[\n annotations_data_frame.LabelName.isin(label_map)]\n filtered_data_frame = filtered_data_frame[\n filtered_data_frame.country.isin(country_map)]\n filtered_data_frame_boxes = filtered_data_frame[\n ~filtered_data_frame.YMin.isnull()]\n filtered_data_frame_labels = filtered_data_frame[\n filtered_data_frame.YMin.isnull()]\n image_id = annotations_data_frame.ImageID.iloc[0]\n\n feature_map = {\n standard_fields.TfExampleFields.object_bbox_ymin:\n dataset_util.float_list_feature(\n filtered_data_frame_boxes.YMin.as_matrix()),\n standard_fields.TfExampleFields.object_bbox_xmin:\n dataset_util.float_list_feature(\n filtered_data_frame_boxes.XMin.as_matrix()),\n standard_fields.TfExampleFields.object_bbox_ymax:\n dataset_util.float_list_feature(\n filtered_data_frame_boxes.YMax.as_matrix()),\n standard_fields.TfExampleFields.object_bbox_xmax:\n dataset_util.float_list_feature(\n filtered_data_frame_boxes.XMax.as_matrix()),\n standard_fields.TfExampleFields.object_class_text:\n dataset_util.bytes_list_feature(\n filtered_data_frame_boxes.LabelName.as_matrix()),\n standard_fields.TfExampleFields.object_class_label:\n dataset_util.int64_list_feature(\n filtered_data_frame_boxes.LabelName.map(lambda x: label_map[x])\n .as_matrix()),\n standard_fields.TfExampleFields.country_class_text:\n dataset_util.bytes_list_feature(\n filtered_data_frame_boxes.country.as_matrix()),\n standard_fields.TfExampleFields.country_class_label:\n dataset_util.int64_list_feature(\n filtered_data_frame_boxes.country.map(lambda x: country_map[x])\n .as_matrix()),\n standard_fields.TfExampleFields.filename:\n dataset_util.bytes_feature('{}.jpg'.format(image_id)),\n standard_fields.TfExampleFields.source_id:\n dataset_util.bytes_feature(image_id),\n standard_fields.TfExampleFields.image_encoded:\n dataset_util.bytes_feature(encoded_image),\n }\n\n if 'IsGroupOf' in filtered_data_frame.columns:\n feature_map[standard_fields.TfExampleFields.\n object_group_of] = dataset_util.int64_list_feature(\n filtered_data_frame_boxes.IsGroupOf.as_matrix().astype(int))\n if 'IsOccluded' in filtered_data_frame.columns:\n feature_map[standard_fields.TfExampleFields.\n object_occluded] = dataset_util.int64_list_feature(\n filtered_data_frame_boxes.IsOccluded.as_matrix().astype(\n int))\n if 'IsTruncated' in filtered_data_frame.columns:\n feature_map[standard_fields.TfExampleFields.\n object_truncated] = dataset_util.int64_list_feature(\n filtered_data_frame_boxes.IsTruncated.as_matrix().astype(\n int))\n if 'IsDepiction' in filtered_data_frame.columns:\n feature_map[standard_fields.TfExampleFields.\n object_depiction] = dataset_util.int64_list_feature(\n filtered_data_frame_boxes.IsDepiction.as_matrix().astype(\n int))\n\n if 'ConfidenceImageLabel' in filtered_data_frame_labels.columns:\n feature_map[standard_fields.TfExampleFields.\n image_class_label] = dataset_util.int64_list_feature(\n filtered_data_frame_labels.LabelName.map(\n lambda x: label_map[x]).as_matrix())\n feature_map[standard_fields.TfExampleFields.\n image_class_text] = dataset_util.bytes_list_feature(\n filtered_data_frame_labels.LabelName.as_matrix()),\n return tf.train.Example(features=tf.train.Features(feature=feature_map))\n" ]
[ [ "tensorflow.train.Features" ] ]
juiyuliao/GSEA
[ "edf0e7ace73791d425a1d5934834ac7bb8306c05" ]
[ "gseapy/algorithm.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport sys, logging\nimport numpy as np\n#from functools import reduce\n#from multiprocessing import Pool\nfrom math import ceil\nfrom gseapy.stats import multiple_testing_correction\nfrom joblib import delayed, Parallel\n\n\ndef enrichment_score(gene_list, correl_vector, gene_set, weighted_score_type=1, \n nperm=1000, rs=None, single=False, scale=False):\n \"\"\"This is the most important function of GSEApy. It has the same algorithm with GSEA and ssGSEA.\n\n :param gene_list: The ordered gene list gene_name_list, rank_metric.index.values\n :param gene_set: gene_sets in gmt file, please use gsea_gmt_parser to get gene_set.\n :param weighted_score_type: It's the same with gsea's weighted_score method. Weighting by the correlation\n is a very reasonable choice that allows significant gene sets with less than perfect coherence.\n options: 0(classic),1,1.5,2. default:1. if one is interested in penalizing sets for lack of\n coherence or to discover sets with any type of nonrandom distribution of tags, a value p < 1\n might be appropriate. On the other hand, if one uses sets with large number of genes and only\n a small subset of those is expected to be coherent, then one could consider using p > 1.\n Our recommendation is to use p = 1 and use other settings only if you are very experienced\n with the method and its behavior.\n\n :param correl_vector: A vector with the correlations (e.g. signal to noise scores) corresponding to the genes in\n the gene list. Or rankings, rank_metric.values\n :param nperm: Only use this parameter when computing esnull for statistical testing. Set the esnull value\n equal to the permutation number.\n :param rs: Random state for initializing gene list shuffling. Default: seed=None\n\n :return:\n\n ES: Enrichment score (real number between -1 and +1)\n\n ESNULL: Enrichment score calculated from random permutations.\n\n Hits_Indices: Index of a gene in gene_list, if gene is included in gene_set.\n\n RES: Numerical vector containing the running enrichment score for all locations in the gene list .\n\n \"\"\"\n N = len(gene_list)\n # Test whether each element of a 1-D array is also present in a second array\n # It's more intuitive here than original enrichment_score source code.\n # use .astype to covert bool to integer\n tag_indicator = np.in1d(gene_list, gene_set, assume_unique=True).astype(int) # notice that the sign is 0 (no tag) or 1 (tag)\n\n if weighted_score_type == 0 :\n correl_vector = np.repeat(1, N)\n else:\n correl_vector = np.abs(correl_vector)**weighted_score_type\n\n # get indices of tag_indicator\n hit_ind = np.flatnonzero(tag_indicator).tolist()\n # if used for compute esnull, set esnull equal to permutation number, e.g. 1000\n # else just compute enrichment scores\n # set axis to 1, because we have 2D array\n axis = 1\n tag_indicator = np.tile(tag_indicator, (nperm+1,1))\n correl_vector = np.tile(correl_vector,(nperm+1,1))\n # gene list permutation\n rs = np.random.RandomState(rs)\n for i in range(nperm): rs.shuffle(tag_indicator[i])\n # np.apply_along_axis(rs.shuffle, 1, tag_indicator)\n\n Nhint = tag_indicator.sum(axis=axis, keepdims=True)\n sum_correl_tag = np.sum(correl_vector*tag_indicator, axis=axis, keepdims=True)\n # compute ES score, the code below is identical to gsea enrichment_score method.\n no_tag_indicator = 1 - tag_indicator\n Nmiss = N - Nhint\n norm_tag = 1.0/sum_correl_tag\n norm_no_tag = 1.0/Nmiss\n\n RES = np.cumsum(tag_indicator * correl_vector * norm_tag - no_tag_indicator * norm_no_tag, axis=axis)\n\n if scale: RES = RES / N\n if single:\n es_vec = RES.sum(axis=axis)\n else:\n max_ES, min_ES = RES.max(axis=axis), RES.min(axis=axis)\n es_vec = np.where(np.abs(max_ES) > np.abs(min_ES), max_ES, min_ES)\n # extract values\n es, esnull, RES = es_vec[-1], es_vec[:-1], RES[-1,:]\n\n return es, esnull, hit_ind, RES\n\n\n\ndef enrichment_score_tensor(gene_mat, cor_mat, gene_sets, weighted_score_type, nperm=1000,\n rs=None, single=False, scale=False):\n \"\"\"Next generation algorithm of GSEA and ssGSEA.\n\n :param gene_mat: the ordered gene list(vector) with or without gene indices matrix.\n :param cor_mat: correlation vector or matrix (e.g. signal to noise scores)\n corresponding to the genes in the gene list or matrix.\n :param dict gene_sets: gmt file dict.\n :param float weighted_score_type: weighting by the correlation.\n options: 0(classic), 1, 1.5, 2. default:1 for GSEA and 0.25 for ssGSEA.\n :param int nperm: permutation times.\n :param bool scale: If True, normalize the scores by number of genes_mat.\n :param bool single: If True, use ssGSEA algorithm, otherwise use GSEA.\n :param rs: Random state for initialize gene list shuffling.\n Default: seed=None\n :return: a tuple contains::\n\n | ES: Enrichment score (real number between -1 and +1), for ssGSEA, set scale eq to True.\n | ESNULL: Enrichment score calculated from random permutation.\n | Hits_Indices: Indices of genes if genes are included in gene_set.\n | RES: The running enrichment score for all locations in the gene list.\n\n \"\"\"\n rs = np.random.RandomState(rs)\n # gene_mat -> 1d: prerank, ssSSEA or 2d: GSEA\n keys = sorted(gene_sets.keys())\n\n if weighted_score_type == 0:\n # don't bother doing calcuation, just set to 1\n cor_mat = np.ones(cor_mat.shape)\n elif weighted_score_type > 0:\n pass\n else:\n logging.error(\"Using negative values of weighted_score_type, not allowed\")\n sys.exit(0)\n\n cor_mat = np.abs(cor_mat)\n if cor_mat.ndim ==1:\n # ssGSEA or Prerank\n # genestes->M, genes->N, perm-> axis=2\n N, M = len(gene_mat), len(keys)\n # generate gene hits matrix\n # for 1d ndarray of gene_mat, set assume_unique=True,\n # means the input arrays are both assumed to be unique,\n # which can speed up the calculation.\n tag_indicator = np.vstack([np.in1d(gene_mat, gene_sets[key], assume_unique=True) for key in keys])\n tag_indicator = tag_indicator.astype(int)\n # index of hits\n hit_ind = [ np.flatnonzero(tag).tolist() for tag in tag_indicator ]\n # generate permutated hits matrix\n perm_tag_tensor = np.repeat(tag_indicator, nperm+1).reshape((M,N,nperm+1))\n # shuffle matrix, last matrix is not shuffled when nperm > 0\n if nperm: np.apply_along_axis(lambda x: np.apply_along_axis(rs.shuffle,0,x),1, perm_tag_tensor[:,:,:-1])\n # missing hits\n no_tag_tensor = 1 - perm_tag_tensor\n # calculate numerator, denominator of each gene hits\n rank_alpha = (perm_tag_tensor*cor_mat[np.newaxis,:,np.newaxis])** weighted_score_type\n\n elif cor_mat.ndim == 2:\n # GSEA\n # 2d ndarray, gene_mat and cor_mat are shuffled already\n # reshape matrix\n cor_mat = cor_mat.T\n # gene_mat is a tuple contains (gene_name, permuate_gene_name_indices)\n genes, genes_ind = gene_mat\n # genestes->M, genes->N, perm-> axis=2\n # don't use assume_unique=True in 2d array when use np.isin().\n # elements in gene_mat are not unique, or will cause unwanted results\n tag_indicator = np.vstack([np.in1d(genes, gene_sets[key], assume_unique=True) for key in keys])\n tag_indicator = tag_indicator.astype(int)\n perm_tag_tensor = np.stack([tag.take(genes_ind).T for tag in tag_indicator], axis=0)\n #index of hits\n hit_ind = [ np.flatnonzero(tag).tolist() for tag in perm_tag_tensor[:,:,-1] ]\n # nohits\n no_tag_tensor = 1 - perm_tag_tensor\n # calculate numerator, denominator of each gene hits\n rank_alpha = (perm_tag_tensor*cor_mat[np.newaxis,:,:])** weighted_score_type\n else:\n logging.error(\"Program die because of unsupported input\")\n sys.exit(0)\n\n # Nhint = tag_indicator.sum(1)\n # Nmiss = N - Nhint\n axis=1\n P_GW_denominator = np.sum(rank_alpha, axis=axis, keepdims=True)\n P_NG_denominator = np.sum(no_tag_tensor, axis=axis, keepdims=True)\n REStensor = np.cumsum(rank_alpha / P_GW_denominator - no_tag_tensor / P_NG_denominator, axis=axis)\n # ssGSEA: scale es by gene numbers ?\n # https://gist.github.com/gaoce/39e0907146c752c127728ad74e123b33\n if scale: REStensor = REStensor / len(gene_mat)\n if single:\n #ssGSEA\n esmatrix = REStensor.sum(axis=axis)\n else:\n #GSEA\n esmax, esmin = REStensor.max(axis=axis), REStensor.min(axis=axis)\n esmatrix = np.where(np.abs(esmax)>np.abs(esmin), esmax, esmin)\n\n es, esnull, RES = esmatrix[:,-1], esmatrix[:,:-1], REStensor[:,:,-1]\n\n return es, esnull, hit_ind, RES\n\n\ndef ranking_metric_tensor(exprs, method, permutation_num, pos, neg, classes,\n ascending, rs=None):\n \"\"\"Build shuffled ranking matrix when permutation_type eq to phenotype.\n\n :param exprs: gene_expression DataFrame, gene_name indexed.\n :param str method: calculate correlation or ranking. methods including:\n 1. 'signal_to_noise' (s2n) or 'abs_signal_to_noise' (abs_s2n).\n 2. 't_test'.\n 3. 'ratio_of_classes' (also referred to as fold change).\n 4. 'diff_of_classes'.\n 5. 'log2_ratio_of_classes'.\n :param int permuation_num: how many times of classes is being shuffled\n :param str pos: one of labels of phenotype's names.\n :param str neg: one of labels of phenotype's names.\n :param list classes: a list of phenotype labels, to specify which column of\n dataframe belongs to what class of phenotype.\n :param bool ascending: bool. Sort ascending vs. descending.\n\n :return:\n returns two 2d ndarray with shape (nperm, gene_num).\n\n | cor_mat_indices: the indices of sorted and permutated (exclude last row) ranking matrix.\n | cor_mat: sorted and permutated (exclude last row) ranking matrix.\n\n \"\"\"\n rs = np.random.RandomState(rs)\n # S: samples, G: gene number\n G, S = exprs.shape\n # genes = exprs.index.values\n expr_mat = exprs.values.T\n perm_cor_tensor = np.tile(expr_mat, (permutation_num+1,1,1))\n # random shuffle on the first dim, last matrix is not shuffled\n for arr in perm_cor_tensor[:-1]: rs.shuffle(arr)\n classes = np.array(classes)\n pos = classes == pos\n neg = classes == neg\n pos_cor_mean = perm_cor_tensor[:,pos,:].mean(axis=1)\n neg_cor_mean = perm_cor_tensor[:,neg,:].mean(axis=1)\n pos_cor_std = perm_cor_tensor[:,pos,:].std(axis=1, ddof=1)\n neg_cor_std = perm_cor_tensor[:,neg,:].std(axis=1, ddof=1)\n\n if method in ['signal_to_noise', 's2n']:\n cor_mat = (pos_cor_mean - neg_cor_mean)/(pos_cor_std + neg_cor_std)\n elif method in ['abs_signal_to_noise', 'abs_s2n']:\n cor_mat = np.abs((pos_cor_mean - neg_cor_mean)/(pos_cor_std + neg_cor_std))\n elif method == 't_test':\n denom = 1.0/G\n cor_mat = (pos_cor_mean - neg_cor_mean)/ np.sqrt(denom*pos_cor_std**2 + denom*neg_cor_std**2)\n elif method == 'ratio_of_classes':\n cor_mat = pos_cor_mean / neg_cor_mean\n elif method == 'diff_of_classes':\n cor_mat = pos_cor_mean - neg_cor_mean\n elif method == 'log2_ratio_of_classes':\n cor_mat = np.log2(pos_cor_mean / neg_cor_mean)\n else:\n logging.error(\"Please provide correct method name!!!\")\n sys.exit(0)\n # return matix[nperm+1, perm_cors]\n cor_mat_ind = cor_mat.argsort()\n # ndarray: sort in place\n cor_mat.sort()\n # genes_mat = genes.take(cor_mat_ind)\n if ascending: return cor_mat_ind, cor_mat\n # descending order of ranking and genes\n # return genes_mat[:,::-1], cor_mat[:,::-1]\n return cor_mat_ind[:, ::-1], cor_mat[:, ::-1]\n\ndef ranking_metric(df, method, pos, neg, classes, ascending):\n \"\"\"The main function to rank an expression table.\n\n :param df: gene_expression DataFrame.\n :param method: The method used to calculate a correlation or ranking. Default: 'log2_ratio_of_classes'.\n Others methods are:\n\n 1. 'signal_to_noise' (s2n) or 'abs_signal_to_noise' (abs_s2n)\n\n You must have at least three samples for each phenotype to use this metric.\n The larger the signal-to-noise ratio, the larger the differences of the means (scaled by the standard deviations);\n that is, the more distinct the gene expression is in each phenotype and the more the gene acts as a “class marker.”\n \n\n 2. 't_test'\n\n Uses the difference of means scaled by the standard deviation and number of samples.\n Note: You must have at least three samples for each phenotype to use this metric.\n The larger the tTest ratio, the more distinct the gene expression is in each phenotype\n and the more the gene acts as a “class marker.”\n\n 3. 'ratio_of_classes' (also referred to as fold change).\n\n Uses the ratio of class means to calculate fold change for natural scale data.\n\n 4. 'diff_of_classes'\n\n Uses the difference of class means to calculate fold change for natural scale data\n\n 5. 'log2_ratio_of_classes'\n\n Uses the log2 ratio of class means to calculate fold change for natural scale data.\n This is the recommended statistic for calculating fold change for log scale data.\n\n\n :param str pos: one of labels of phenotype's names.\n :param str neg: one of labels of phenotype's names.\n :param dict classes: column id to group mapping.\n :param bool ascending: bool or list of bool. Sort ascending vs. descending.\n :return:\n\n returns a pd.Series of correlation to class of each variable. Gene_name is index, and value is rankings.\n\n visit here for more docs: http://software.broadinstitute.org/gsea/doc/GSEAUserGuideFrame.html\n \"\"\"\n\n # exclude any zero stds.\n df_mean = df.groupby(by=classes, axis=1).mean()\n df_std = df.groupby(by=classes, axis=1).std()\n\n if method in ['signal_to_noise', 's2n']:\n ser = (df_mean[pos] - df_mean[neg])/(df_std[pos] + df_std[neg])\n elif method in ['abs_signal_to_noise', 'abs_s2n']:\n ser = ((df_mean[pos] - df_mean[neg])/(df_std[pos] + df_std[neg])).abs()\n elif method == 't_test':\n ser = (df_mean[pos] - df_mean[neg])/ np.sqrt(df_std[pos]**2/len(df_std)+df_std[neg]**2/len(df_std) )\n elif method == 'ratio_of_classes':\n ser = df_mean[pos] / df_mean[neg]\n elif method == 'diff_of_classes':\n ser = df_mean[pos] - df_mean[neg]\n elif method == 'log2_ratio_of_classes':\n ser = np.log2(df_mean[pos] / df_mean[neg])\n else:\n logging.error(\"Please provide correct method name!!!\")\n sys.exit(0)\n ser = ser.sort_values(ascending=ascending)\n\n return ser\n\n\ndef gsea_compute_tensor(data, gmt, n, weighted_score_type, permutation_type,\n method, pheno_pos, pheno_neg, classes, ascending,\n processes=1, seed=None, single=False, scale=False):\n \"\"\"compute enrichment scores and enrichment nulls.\n\n :param data: preprocessed expression dataframe or a pre-ranked file if prerank=True.\n :param dict gmt: all gene sets in .gmt file. need to call load_gmt() to get results.\n :param int n: permutation number. default: 1000.\n :param str method: ranking_metric method. see above.\n :param str pheno_pos: one of labels of phenotype's names.\n :param str pheno_neg: one of labels of phenotype's names.\n :param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.\n :param float weighted_score_type: default:1\n :param bool ascending: sorting order of rankings. Default: False.\n :param seed: random seed. Default: np.random.RandomState()\n :param bool scale: if true, scale es by gene number.\n\n :return: a tuple contains::\n\n | zipped results of es, nes, pval, fdr.\n | nested list of hit indices of input gene_list.\n | nested list of ranked enrichment score of each input gene_sets.\n | list of enriched terms\n\n \"\"\"\n w = weighted_score_type\n subsets = sorted(gmt.keys())\n rs = np.random.RandomState(seed)\n genes_mat, cor_mat = data.index.values, data.values\n base = 5 if data.shape[0] >= 5000 else 10\n block = ceil(len(subsets) / base)\n\n np.random.seed(seed)\n random_state = np.random.randint(np.iinfo(np.int32).max, size=block)\n\n if permutation_type == \"phenotype\":\n # shuffling classes and generate random correlation rankings\n logging.debug(\"Start to permutate classes..............................\")\n genes_ind = []\n cor_mat = []\n\n temp_rnk = Parallel(n_jobs=processes)(delayed(ranking_metric_tensor)(\n data, method, base, pheno_pos, pheno_neg, classes, ascending, rs) for rs in random_state)\n\n for k, temp in enumerate(temp_rnk):\n gi, cor = temp\n if k+1 == block:\n genes_ind.append(gi)\n cor_mat.append(cor)\n else:\n genes_ind.append(gi[:-1])\n cor_mat.append(cor[:-1])\n\n genes_ind, cor_mat = np.vstack(genes_ind), np.vstack(cor_mat)\n # convert to tuple\n genes_mat = (data.index.values, genes_ind)\n\n logging.debug(\"Start to compute es and esnulls........................\")\n # Prerank, ssGSEA, GSEA\n es = []\n RES = []\n hit_ind = []\n esnull = []\n temp_esnu = []\n #pool_esnu = Pool(processes=processes)\n # split large array into smaller blocks to avoid memory overflow\n i, m = 1, 0\n gmt_block = []\n while i <= block:\n # you have to reseed, or all your processes are sharing the same seed value\n rs = random_state[i-1]\n gmtrim = {k: gmt.get(k) for k in subsets[m:base * i]}\n gmt_block.append(gmtrim)\n # temp_esnu.append(pool_esnu.apply_async(enrichment_score_tensor,\n # args=(genes_mat, cor_mat,\n # gmtrim, w, n, rs,\n # single, scale)))\n m = base * i\n i += 1\n # use joblib\n temp_esnu = Parallel(n_jobs=processes)(delayed(enrichment_score_tensor)(\n genes_mat, cor_mat, gmtrim, w, n, rs, single, scale) \n for gmtrim, rs in zip(gmt_block, random_state))\n # pool_esnu.close()\n # pool_esnu.join()\n\n # esn is a list, don't need to use append method.\n for si, temp in enumerate(temp_esnu):\n # e, enu, hit, rune = temp.get()\n e, enu, hit, rune = temp\n esnull.append(enu)\n es.append(e)\n RES.append(rune)\n hit_ind += hit\n # concate results\n es, esnull, RES = np.hstack(es), np.vstack(esnull), np.vstack(RES)\n\n return gsea_significance(es, esnull), hit_ind, RES, subsets\n\n\n\ndef gsea_compute(data, gmt, n, weighted_score_type, permutation_type,\n method, pheno_pos, pheno_neg, classes, ascending,\n processes=1, seed=None, single=False, scale=False):\n \"\"\"compute enrichment scores and enrichment nulls.\n\n :param data: preprocessed expression dataframe or a pre-ranked file if prerank=True.\n :param dict gmt: all gene sets in .gmt file. need to call load_gmt() to get results.\n :param int n: permutation number. default: 1000.\n :param str method: ranking_metric method. see above.\n :param str pheno_pos: one of labels of phenotype's names.\n :param str pheno_neg: one of labels of phenotype's names.\n :param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.\n :param float weighted_score_type: default:1\n :param bool ascending: sorting order of rankings. Default: False.\n :param seed: random seed. Default: np.random.RandomState()\n :param bool scale: if true, scale es by gene number.\n\n :return: a tuple contains::\n\n | zipped results of es, nes, pval, fdr.\n | nested list of hit indices of input gene_list.\n | nested list of ranked enrichment score of each input gene_sets.\n | list of enriched terms\n\n \"\"\"\n \n w = weighted_score_type\n subsets = sorted(gmt.keys())\n es = []\n RES=[]\n hit_ind=[]\n esnull = [ [] for a in range(len(subsets)) ]\n\n logging.debug(\"Start to compute enrichment scores......................\")\n\n if permutation_type == \"phenotype\":\n logging.debug(\"Start to permutate classes..............................\")\n # shuffling classes and generate random correlation rankings\n rs = np.random.RandomState(seed)\n genes_mat, cor_mat = ranking_metric_tensor(exprs=data, method=method,\n permutation_num=n,\n pos=pheno_pos, neg=pheno_neg,\n classes=classes,\n ascending=ascending, rs=rs)\n\n # compute es, esnulls. hits, RES\n logging.debug(\"Start to compute enrichment nulls.......................\")\n es, esnull, hit_ind, RES = enrichment_score_tensor(gene_mat=genes_mat,\n cor_mat=cor_mat,\n gene_sets=gmt,\n weighted_score_type=w,\n nperm=n, rs=rs,\n single=False, scale=False,)\n\n else:\n # Prerank, ssGSEA, GSEA with gene_set permutation\n gl, cor_vec = data.index.values, data.values\n logging.debug(\"Start to compute es and esnulls........................\")\n # es, esnull, hit_ind, RES = enrichment_score_tensor(gene_mat=gl,\n # cor_mat=cor_mat,\n # gene_sets=gmt,\n # weighted_score_type=w,\n # nperm=n, rs=rs\n # single=single, scale=scale)\n\n # split large array into smaller blocks to avoid memory overflow\n temp_esnu=[]\n #pool_esnu = Pool(processes=processes)\n # you have to reseed, or all your processes are sharing the same seed value\n np.random.seed(seed)\n random_state = np.random.randint(np.iinfo(np.int32).max, size=len(subsets))\n # for subset, rs in zip(subsets, random_state):\n # temp_esnu.append(pool_esnu.apply_async(enrichment_score,\n # args=(gl, cor_vec, gmt.get(subset), w,\n # n, rs, single, scale)))\n\n # pool_esnu.close()\n # pool_esnu.join()\n\n temp_esnu = Parallel(n_jobs=processes)(delayed(enrichment_score)(\n gl, cor_vec, gmt.get(subset), w, n, \n rs, single, scale) \n for subset, rs in zip(subsets, random_state)) \n # esn is a list, don't need to use append method.\n for si, temp in enumerate(temp_esnu):\n #e, enu, hit, rune = temp.get()\n e, enu, hit, rune = temp\n esnull[si] = enu\n es.append(e)\n RES.append(rune)\n hit_ind.append(hit)\n\n return gsea_significance(es, esnull), hit_ind, RES, subsets\n\n\ndef normalize(es, esnull):\n \"\"\"normalize the ES(S,pi) and the observed ES(S), separately rescaling\n the positive and negative scores by dividing the mean of the ES(S,pi).\n \n return: NES, NESnull\n \"\"\"\n\n nEnrichmentScores =np.zeros(es.shape)\n nEnrichmentNulls=np.zeros(esnull.shape)\n\n esnull_pos = (esnull * (esnull >= 0)).mean(axis=1)\n esnull_neg = (esnull * (esnull < 0)).mean(axis=1)\n # calculate nESnulls\n for i in range(esnull.shape[0]):\n # NES\n if es[i] >= 0:\n nEnrichmentScores[i] = es[i] / esnull_pos[i]\n else:\n nEnrichmentScores[i] = - es[i] / esnull_neg[i]\n\n # NESnull\n for j in range(esnull.shape[1]):\n if esnull[i,j] >= 0:\n nEnrichmentNulls[i,j] = esnull[i,j] / esnull_pos[i]\n else:\n nEnrichmentNulls[i,j] = - esnull[i,j] / esnull_neg[i]\n\n return nEnrichmentScores, nEnrichmentNulls\n\ndef gsea_pval(es, esnull):\n \"\"\"Compute nominal p-value.\n\n From article (PNAS):\n estimate nominal p-value for S from esnull by using the positive\n or negative portion of the distribution corresponding to the sign\n of the observed ES(S).\n \"\"\"\n\n # to speed up, using numpy function to compute pval in parallel.\n condlist = [ es < 0, es >=0]\n choicelist = [(esnull < es.reshape(len(es),1)).sum(axis=1)/ (esnull < 0).sum(axis=1),\n (esnull >= es.reshape(len(es),1)).sum(axis=1)/ (esnull >= 0).sum(axis=1)]\n pvals = np.select(condlist, choicelist)\n\n return pvals\n\n\ndef gsea_fdr(nEnrichmentScores, nEnrichmentNulls):\n \"\"\"Create a histogram of all NES(S,pi) over all S and pi.\n Use this null distribution to compute an FDR q value.\n \n :param nEnrichmentScores: normalized ES\n :param nEnrichmentNulls: normalized ESnulls\n :return: FDR\n \"\"\"\n\n # FDR null distribution histogram\n # vals = reduce(lambda x,y: x+y, nEnrichmentNulls, [])\n # nvals = np.array(sorted(vals))\n # or\n nvals = np.sort(nEnrichmentNulls.flatten())\n nnes = np.sort(nEnrichmentScores)\n fdrs = []\n # FDR computation\n for i in range(len(nEnrichmentScores)):\n nes = nEnrichmentScores[i]\n # use the same pval method to calculate fdr\n if nes >= 0:\n allPos = int(len(nvals) - np.searchsorted(nvals, 0, side=\"left\"))\n allHigherAndPos = int(len(nvals) - np.searchsorted(nvals, nes, side=\"left\"))\n nesPos = len(nnes) - int(np.searchsorted(nnes, 0, side=\"left\"))\n nesHigherAndPos = len(nnes) - int(np.searchsorted(nnes, nes, side=\"left\"))\n # allPos = (nvals >= 0).sum()\n # allHigherAndPos = (nvals >= nes).sum()\n # nesPos = (nnes >=0).sum()\n # nesHigherAndPos = (nnes >= nes).sum()\n else:\n allPos = int(np.searchsorted(nvals, 0, side=\"left\"))\n allHigherAndPos = int(np.searchsorted(nvals, nes, side=\"right\"))\n nesPos = int(np.searchsorted(nnes, 0, side=\"left\"))\n nesHigherAndPos = int(np.searchsorted(nnes, nes, side=\"right\"))\n # allPos = (nvals < 0).sum()\n # allHigherAndPos = (nvals < nes).sum()\n # nesPos = (nnes < 0).sum()\n # nesHigherAndPos = (nnes < nes).sum()\n\n try:\n pi_norm = allHigherAndPos / float(allPos)\n pi_obs = nesHigherAndPos / float(nesPos)\n fdr = pi_norm / pi_obs\n fdrs.append(fdr if fdr < 1 else 1.0)\n except:\n fdrs.append(1000000000.0)\n\n logging.debug(\"Statistical testing finished.............................\")\n\n return fdrs\n\n\ndef gsea_significance(enrichment_scores, enrichment_nulls):\n \"\"\"Compute nominal pvals, normalized ES, and FDR q value.\n\n For a given NES(S) = NES* >= 0. The FDR is the ratio of the percentage of all (S,pi) with\n NES(S,pi) >= 0, whose NES(S,pi) >= NES*, divided by the percentage of\n observed S wih NES(S) >= 0, whose NES(S) >= NES*, and similarly if NES(S) = NES* <= 0.\n \"\"\"\n # For a zero by zero division (undetermined, results in a NaN),\n np.seterr(divide='ignore', invalid='ignore')\n # import warnings\n # warnings.simplefilter(\"ignore\")\n es = np.array(enrichment_scores)\n esnull = np.array(enrichment_nulls)\n logging.debug(\"Start to compute pvals..................................\")\n # P-values.\n pvals = gsea_pval(es, esnull).tolist()\n\n logging.debug(\"Start to compute nes and nesnull........................\")\n # NES\n # nEnrichmentScores, nEnrichmentNulls = normalize(es, esnull)\n # new normalized enrichment score implementation.\n # this could speed up significantly.\n esnull_pos = (esnull*(esnull>=0)).mean(axis=1)\n esnull_neg = (esnull*(esnull<0)).mean(axis=1)\n nEnrichmentScores = np.where(es>=0, es/esnull_pos, -es/esnull_neg)\n nEnrichmentNulls = np.where(esnull>=0, esnull/esnull_pos[:,np.newaxis],\n -esnull/esnull_neg[:,np.newaxis])\n\n logging.debug(\"Start to compute fdrs..................................\")\n # FDR\n fdrs = gsea_fdr(nEnrichmentScores, nEnrichmentNulls)\n\n #TODO: use multiple testing correction for ssgsea? ssGSEA2.0 use BH correction.\n # https://github.com/broadinstitute/ssGSEA2.0/blob/master/src/ssGSEA2.0.R\n # line 969\n # fdrs, _ = multiple_testing_correction(pvals, alpha=0.05)\n\n return zip(enrichment_scores, nEnrichmentScores, pvals, fdrs)\n\n\n\n\n" ]
[ [ "numpy.sqrt", "numpy.in1d", "numpy.cumsum", "numpy.seterr", "numpy.iinfo", "numpy.select", "numpy.searchsorted", "numpy.where", "numpy.hstack", "numpy.flatnonzero", "numpy.apply_along_axis", "numpy.repeat", "numpy.zeros", "numpy.array", "numpy.random.RandomState", "numpy.sum", "numpy.log2", "numpy.abs", "numpy.random.seed", "numpy.tile", "numpy.sort", "numpy.ones", "numpy.vstack" ] ]
flyliu2017/bert
[ "cc6e676ff8693a6cc31ade9d7a6cbb0789d7877c" ]
[ "run_token_level_classifier_multigpus.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BERT finetuning runner.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport csv\nimport os\n\nfrom tensorflow.python.ops.losses.losses_impl import Reduction\n\nimport modeling\nimport optimization_multigpus\nimport tokenization\nimport tensorflow as tf\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\n## Required parameters\nflags.DEFINE_string(\n \"data_dir\", None,\n \"The input data dir. Should contain the .tsv files (or other data files) \"\n \"for the task.\")\n\nflags.DEFINE_string(\n \"bert_config_file\", None,\n \"The config json file corresponding to the pre-trained BERT model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_string(\"task_name\", None, \"The name of the task to train.\")\n\nflags.DEFINE_string(\"vocab_file\", None,\n \"The vocabulary file that the BERT model was trained on.\")\n\nflags.DEFINE_string(\n \"output_dir\", None,\n \"The output directory where the model checkpoints will be written.\")\n\n## Other parameters\n\nflags.DEFINE_string(\n \"init_checkpoint\", None,\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\nflags.DEFINE_bool(\n \"do_lower_case\", True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 128,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\")\n\nflags.DEFINE_bool(\"do_train\", False, \"Whether to run training.\")\n\nflags.DEFINE_bool(\"do_eval\", False, \"Whether to run eval on the dev set.\")\n\nflags.DEFINE_bool(\n \"do_predict\", False,\n \"Whether to run the model in inference mode on the test set.\")\n\nflags.DEFINE_bool(\n \"data_converted\", True,\n \"Whether data had been converted to tfrecord.\")\n\nflags.DEFINE_integer(\"train_batch_size\", 32, \"Total batch size for training.\")\n\nflags.DEFINE_integer(\"eval_batch_size\", 8, \"Total batch size for eval.\")\n\nflags.DEFINE_integer(\"predict_batch_size\", 8, \"Total batch size for predict.\")\n\nflags.DEFINE_float(\"learning_rate\", 5e-5, \"The initial learning rate for Adam.\")\n\nflags.DEFINE_float(\"num_train_epochs\", 3.0,\n \"Total number of training epochs to perform.\")\n\nflags.DEFINE_float(\n \"warmup_proportion\", 0.1,\n \"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10% of training.\")\n\nflags.DEFINE_integer(\"save_checkpoints_steps\", 1000,\n \"How often to save the model checkpoint.\")\n\nflags.DEFINE_integer(\"iterations_per_loop\", 1000,\n \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\ntf.flags.DEFINE_string(\n \"tpu_name\", None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \"\n \"url.\")\n\ntf.flags.DEFINE_string(\n \"tpu_zone\", None,\n \"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\n \"gcp_project\", None,\n \"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\", 8,\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\nflags.DEFINE_integer(\n \"num_gpus\", 1,\n \"number of GPU to use.\")\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n\nclass PaddingInputExample(object):\n \"\"\"Fake example so the num input examples is a multiple of the batch size.\n\n When running eval/predict on the TPU, we need to pad the number of examples\n to be a multiple of the batch size, because the TPU requires a fixed batch\n size. The alternative is to drop the last batch, which is bad because it means\n the entire output data won't be generated.\n\n We use this class instead of `None` because treating `None` as padding\n battches could cause silent errors.\n \"\"\"\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self,\n input_ids,\n input_mask,\n segment_ids,\n label_id,\n is_real_example=True):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n self.is_real_example = is_real_example\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()\n\n def get_test_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for prediction.\"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines\n\n\nclass XnliProcessor(DataProcessor):\n \"\"\"Processor for the XNLI data set.\"\"\"\n\n def __init__(self):\n self.language = \"zh\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n lines = self._read_tsv(\n os.path.join(data_dir, \"multinli\",\n \"multinli.train.%s.tsv\" % self.language))\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"train-%d\" % (i)\n text_a = tokenization.convert_to_unicode(line[0])\n text_b = tokenization.convert_to_unicode(line[1])\n label = tokenization.convert_to_unicode(line[2])\n if label == tokenization.convert_to_unicode(\"contradictory\"):\n label = tokenization.convert_to_unicode(\"contradiction\")\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n lines = self._read_tsv(os.path.join(data_dir, \"xnli.dev.tsv\"))\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"dev-%d\" % (i)\n language = tokenization.convert_to_unicode(line[0])\n if language != tokenization.convert_to_unicode(self.language):\n continue\n text_a = tokenization.convert_to_unicode(line[6])\n text_b = tokenization.convert_to_unicode(line[7])\n label = tokenization.convert_to_unicode(line[1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]\n\n\nclass MnliProcessor(DataProcessor):\n \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")),\n \"dev_matched\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test_matched.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0]))\n text_a = tokenization.convert_to_unicode(line[8])\n text_b = tokenization.convert_to_unicode(line[9])\n if set_type == \"test\":\n label = \"contradiction\"\n else:\n label = tokenization.convert_to_unicode(line[-1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass MrpcProcessor(DataProcessor):\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[3])\n text_b = tokenization.convert_to_unicode(line[4])\n if set_type == \"test\":\n label = \"0\"\n else:\n label = tokenization.convert_to_unicode(line[0])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass CommentsTagsProcessor(DataProcessor):\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n data_dir, \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n data_dir, \"eval\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n data_dir, \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, data_dir, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n with open(os.path.join(data_dir, '{}_xs_converted_tag.txt'.format(set_type)), 'r', encoding='utf8') as f:\n txts = f.read().splitlines()\n with open(os.path.join(data_dir, '{}_ys_converted_tag.txt'.format(set_type)), 'r', encoding='utf8') as f:\n labels = f.read().splitlines()\n\n for (i, n) in enumerate(zip(txts, labels)):\n txt, label = n\n guid = \"%s-%s\" % (set_type, i)\n text_a,text_b=txt.split(' | ')\n label = label.split(' | ')[0]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n\n return examples\n\nclass SegmentedCommentsTagsProcessor(DataProcessor):\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n data_dir, \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n data_dir, \"eval\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n data_dir, \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, data_dir, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n with open(os.path.join(data_dir, '{}_xs_converted_tag.txt'.format(set_type)), 'r', encoding='utf8') as f:\n txts = f.read().splitlines()\n with open(os.path.join(data_dir, '{}_ys_converted_tag.txt'.format(set_type)), 'r', encoding='utf8') as f:\n labels = f.read().splitlines()\n\n for (i, n) in enumerate(zip(txts, labels)):\n txt, label = n\n guid = \"%s-%s\" % (set_type, i)\n text_a,text_b=txt.split(' | ')\n text_a=' '.join(list(text_a))\n label = label.split(' | ')[0]\n label=' '.join(list(label))\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n\n return examples\n\n\nclass ColaProcessor(DataProcessor):\n \"\"\"Processor for the CoLA data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n # Only the test set has a header\n if set_type == \"test\" and i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n if set_type == \"test\":\n text_a = tokenization.convert_to_unicode(line[1])\n label = \"0\"\n else:\n text_a = tokenization.convert_to_unicode(line[3])\n label = tokenization.convert_to_unicode(line[1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\n\ndef convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=[0]*max_seq_length,\n is_real_example=False)\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label=example.label\n words=tokenizer.tokenize(label)\n length=len(words)\n start=end=None\n for i in range(len(tokens)):\n if tokens[i:i+length]==words:\n start=i\n end=i+length\n break\n\n if start is None:\n # print(tokens)\n # print(words)\n # raise ValueError('can not find mark text in comment.')\n return None\n\n # start,end=label.split()\n # start=int(start)\n # end=int(end)\n\n label_id = [0]*max_seq_length\n label_id[start:end]=[1]*(end-start)\n\n if ex_index < 5:\n\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"label: {}\" .format(label_id))\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n is_real_example=True)\n return feature\n\n\ndef file_based_convert_examples_to_features(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n if feature:\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature(feature.label_id)\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n writer.close()\n\n\ndef file_based_input_fn_builder(input_file, seq_length, is_training,\n drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"label_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"is_real_example\": tf.FixedLenFeature([1], tf.int64),\n }\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\ndef create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use model.get_sequence_output()\n # instead.\n output_layer = model.get_sequence_output()\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\",[], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.reduce_sum(tf.multiply(output_layer,output_weights),-1)\n logits = tf.add(logits, output_bias)\n\n probabilities=tf.sigmoid(logits)\n # labels=tf.constant(labels,dtype=tf.int32)\n\n per_example_loss=tf.losses.sigmoid_cross_entropy(multi_class_labels=labels, logits=logits,reduction=Reduction.NONE)\n per_example_loss=tf.reduce_sum(per_example_loss,axis=-1)\n loss = tf.reduce_mean(per_example_loss,name='train_loss')\n\n return (loss, per_example_loss, logits, probabilities)\n\n\ndef model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n is_real_example = None\n if \"is_real_example\" in features:\n is_real_example = tf.cast(features[\"is_real_example\"], dtype=tf.float32)\n else:\n is_real_example = tf.ones(label_ids.shape[0], dtype=tf.float32)\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n init_op=tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n scaffold_fn=tf.train.Scaffold(init_op=init_op)\n\n # def train_scafflod():\n # tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n #\n # scaffold_fn=tf.train.Scaffold(init_fn=train_scafflod)\n\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization_multigpus.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold=scaffold_fn\n )\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, logits, is_real_example):\n # predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n # ones=tf.get_variable('ones',shape=logits.shape,initializer=tf.ones_initializer)\n # zeros=tf.get_variable('zeros',shape=logits.shape,initializer=tf.zeros_initializer)\n predictions=tf.where(logits>=0,tf.ones(tf.shape(logits)),tf.zeros(tf.shape(logits)))\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions, weights=is_real_example)\n loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n eval_metrics = (metric_fn,\n [per_example_loss, label_ids, logits, is_real_example])\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics,\n scaffold=scaffold_fn)\n else:\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions={\"probabilities\": probabilities},\n scaffold=scaffold_fn\n )\n return output_spec\n\n return model_fn\n\n\n# This function is not used by this file but is still used by the Colab and\n# people who depend on it.\ndef input_fn_builder(features, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n all_label_ids = []\n\n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n all_label_ids.append(feature.label_id)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples,seq_length], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d\n\n return input_fn\n\n\n# This function is not used by this file but is still used by the Colab and\n# people who depend on it.\ndef convert_examples_to_features(examples, label_list, max_seq_length,\n tokenizer):\n \"\"\"Convert a set of `InputExample`s to a list of `InputFeatures`.\"\"\"\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n features.append(feature)\n return features\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n processors = {\n \"cola\": ColaProcessor,\n \"mnli\": MnliProcessor,\n \"mrpc\": MrpcProcessor,\n \"xnli\": XnliProcessor,\n \"tag\": CommentsTagsProcessor,\n \"segtag\":SegmentedCommentsTagsProcessor\n }\n\n tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,\n FLAGS.init_checkpoint)\n\n if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:\n raise ValueError(\n \"At least one of `do_train`, `do_eval` or `do_predict' must be True.\")\n\n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\n\n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the BERT model \"\n \"was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\n\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n task_name = FLAGS.task_name.lower()\n\n if task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (task_name))\n\n processor = processors[task_name]()\n\n label_list = processor.get_labels()\n\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n tpu_cluster_resolver = None\n if FLAGS.use_tpu and FLAGS.tpu_name:\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)\n\n is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n\n\n train_examples = None\n num_train_steps = None\n num_warmup_steps = None\n if FLAGS.do_train:\n train_examples = processor.get_train_examples(FLAGS.data_dir)\n num_train_steps = int(\n len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)\n num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)\n\n model_fn = model_fn_builder(\n bert_config=bert_config,\n num_labels=len(label_list),\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_tpu)\n\n # run_config = tf.contrib.tpu.RunConfig(\n # cluster=tpu_cluster_resolver,\n # master=FLAGS.master,\n # model_dir=FLAGS.output_dir,\n # save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n # tpu_config=tf.contrib.tpu.TPUConfig(\n # iterations_per_loop=FLAGS.iterations_per_loop,\n # num_shards=FLAGS.num_tpu_cores,\n # per_host_input_for_training=is_per_host))\n # # If TPU is not available, this will fall back to normal Estimator on CPU\n # # or GPU.\n # estimator = tf.contrib.tpu.TPUEstimator(\n # use_tpu=FLAGS.use_tpu,\n # model_fn=model_fn,\n # config=run_config,\n # train_batch_size=FLAGS.train_batch_size,\n # eval_batch_size=FLAGS.eval_batch_size,\n # predict_batch_size=FLAGS.predict_batch_size)\n\n strategy=tf.contrib.distribute.MirroredStrategy(num_gpus=FLAGS.num_gpus,\n cross_tower_ops=tf.contrib.distribute.AllReduceCrossTowerOps(\n 'nccl', num_packs=int(FLAGS.num_gpus))\n )\n run_config = tf.estimator.RunConfig(\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n train_distribute=strategy\n )\n estimator = tf.estimator.Estimator(\n model_fn=model_fn,\n config=run_config,\n params={'batch_size':FLAGS.train_batch_size}\n )\n\n if FLAGS.do_train:\n train_file = os.path.join(FLAGS.output_dir, \"train.tf_record\")\n if not tf.gfile.Exists(train_file) or not FLAGS.data_converted:\n file_based_convert_examples_to_features(\n train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)\n tf.logging.info(\"***** Running training *****\")\n tf.logging.info(\" Num examples = %d\", len(train_examples))\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n tf.logging.info(\" Num steps = %d\", num_train_steps)\n train_input_fn = file_based_input_fn_builder(\n input_file=train_file,\n seq_length=FLAGS.max_seq_length,\n is_training=True,\n drop_remainder=True)\n train_hook=tf.train.LoggingTensorHook(['loss/train_loss'],every_n_iter=100)\n estimator.train(input_fn=train_input_fn, max_steps=num_train_steps,hooks=[train_hook])\n\n if FLAGS.do_eval:\n eval_examples = processor.get_dev_examples(FLAGS.data_dir)\n num_actual_eval_examples = len(eval_examples)\n if FLAGS.use_tpu:\n # TPU requires a fixed batch size for all batches, therefore the number\n # of examples must be a multiple of the batch size, or else examples\n # will get dropped. So we pad with fake examples which are ignored\n # later on. These do NOT count towards the metric (all tf.metrics\n # support a per-instance weight, and these get a weight of 0.0).\n while len(eval_examples) % FLAGS.eval_batch_size != 0:\n eval_examples.append(PaddingInputExample())\n\n eval_file = os.path.join(FLAGS.output_dir, \"eval.tf_record\")\n if not tf.gfile.Exists(eval_file) or not FLAGS.data_converted:\n file_based_convert_examples_to_features(\n eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)\n\n tf.logging.info(\"***** Running evaluation *****\")\n tf.logging.info(\" Num examples = %d (%d actual, %d padding)\",\n len(eval_examples), num_actual_eval_examples,\n len(eval_examples) - num_actual_eval_examples)\n tf.logging.info(\" Batch size = %d\", FLAGS.eval_batch_size)\n\n # This tells the estimator to run through the entire set.\n eval_steps = None\n # However, if running eval on the TPU, you will need to specify the\n # number of steps.\n if FLAGS.use_tpu:\n assert len(eval_examples) % FLAGS.eval_batch_size == 0\n eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)\n\n eval_drop_remainder = True if FLAGS.use_tpu else False\n eval_input_fn = file_based_input_fn_builder(\n input_file=eval_file,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=eval_drop_remainder)\n\n result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)\n\n output_eval_file = os.path.join(FLAGS.output_dir, \"eval_results.txt\")\n with tf.gfile.GFile(output_eval_file, \"w\") as writer:\n tf.logging.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n tf.logging.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n if FLAGS.do_predict:\n predict_examples = processor.get_test_examples(FLAGS.data_dir)\n num_actual_predict_examples = len(predict_examples)\n if FLAGS.use_tpu:\n # TPU requires a fixed batch size for all batches, therefore the number\n # of examples must be a multiple of the batch size, or else examples\n # will get dropped. So we pad with fake examples which are ignored\n # later on.\n while len(predict_examples) % FLAGS.predict_batch_size != 0:\n predict_examples.append(PaddingInputExample())\n\n predict_file = os.path.join(FLAGS.output_dir, \"predict.tf_record\")\n if not tf.gfile.Exists(predict_file) or not FLAGS.data_converted:\n file_based_convert_examples_to_features(predict_examples, label_list,\n FLAGS.max_seq_length, tokenizer,\n predict_file)\n\n tf.logging.info(\"***** Running prediction*****\")\n tf.logging.info(\" Num examples = %d (%d actual, %d padding)\",\n len(predict_examples), num_actual_predict_examples,\n len(predict_examples) - num_actual_predict_examples)\n tf.logging.info(\" Batch size = %d\", FLAGS.predict_batch_size)\n\n predict_drop_remainder = True if FLAGS.use_tpu else False\n predict_input_fn = file_based_input_fn_builder(\n input_file=predict_file,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=predict_drop_remainder)\n\n result = estimator.predict(input_fn=predict_input_fn)\n\n output_predict_file = os.path.join(FLAGS.output_dir, \"test_results.tsv\")\n\n with tf.gfile.GFile(output_predict_file, \"w\") as writer:\n num_written_lines = 0\n tf.logging.info(\"***** Predict results *****\")\n for (i, prediction) in enumerate(result):\n if i >= num_actual_predict_examples:\n break\n probabilities = prediction[\"probabilities\"]\n texta=predict_examples[i].text_a\n texta=tokenizer.tokenize(texta)\n phrase=[texta[j] if probabilities[j]>=0.5 else ' ' for j in range(min(len(texta),128))]\n phrase=''.join(phrase).strip()\n # output_line = \"\\t\".join(\n # str(class_probability)\n # for class_probability in probabilities) + \"\\n\"\n writer.write(phrase+'\\n')\n num_written_lines += 1\n assert num_written_lines == num_actual_predict_examples\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"data_dir\")\n flags.mark_flag_as_required(\"task_name\")\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"bert_config_file\")\n flags.mark_flag_as_required(\"output_dir\")\n tf.app.run()\n" ]
[ [ "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.train.LoggingTensorHook", "tensorflow.metrics.accuracy", "tensorflow.FixedLenFeature", "tensorflow.gfile.Exists", "tensorflow.reduce_sum", "tensorflow.gfile.GFile", "tensorflow.cast", "tensorflow.train.init_from_checkpoint", "tensorflow.gfile.MakeDirs", "tensorflow.estimator.RunConfig", "tensorflow.to_int32", "tensorflow.data.TFRecordDataset", "tensorflow.truncated_normal_initializer", "tensorflow.python_io.TFRecordWriter", "tensorflow.add", "tensorflow.logging.set_verbosity", "tensorflow.trainable_variables", "tensorflow.parse_single_example", "tensorflow.app.run", "tensorflow.nn.dropout", "tensorflow.metrics.mean", "tensorflow.estimator.Estimator", "tensorflow.gfile.Open", "tensorflow.shape", "tensorflow.zeros_initializer", "tensorflow.logging.info", "tensorflow.train.Features", "tensorflow.multiply", "tensorflow.train.Scaffold", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.flags.DEFINE_string", "tensorflow.losses.sigmoid_cross_entropy", "tensorflow.sigmoid", "tensorflow.ones", "tensorflow.estimator.EstimatorSpec", "tensorflow.variable_scope" ] ]
SergioSV96/fastML
[ "e9311f3e51cfd06a898d254b0456b2b31e191746" ]
[ "fastML.py" ]
[ "\r\ndef fastML(X, Y, size):\r\n from sklearn.metrics import accuracy_score, confusion_matrix, classification_report\r\n from sklearn.model_selection import train_test_split\r\n from sklearn.ensemble import RandomForestClassifier\r\n from sklearn.tree import DecisionTreeClassifier\r\n from sklearn.neighbors import KNeighborsClassifier\r\n from sklearn.svm import SVC\r\n from sklearn import utils\r\n import pandas as pd\r\n\r\n SVC = SVC()\r\n KNN = KNeighborsClassifier()\r\n DTC = DecisionTreeClassifier()\r\n RF = RandomForestClassifier()\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=size)\r\n\r\n SVC.fit(X_train, y_train)\r\n KNN.fit(X_train, y_train)\r\n DTC.fit(X_train, y_train)\r\n RF.fit(X_train, y_train)\r\n\r\n DTC_prediction = DTC.predict(X_test)\r\n KNN_prediction = (KNN.predict(X_test))\r\n SVC_prediction = SVC.predict(X_test)\r\n RF_prediction = RF.predict(X_test)\r\n print(\r\n \"Accuracy score for Decision tree Classifier is \" + str(accuracy_score(y_test, DTC_prediction)))\r\n print(\r\n \"Confusion Matrix for Decision tree Classifier is \" + str(confusion_matrix(y_test, DTC_prediction)))\r\n print(\r\n \"Classification Report for Decision tree Classifier is \" + str(classification_report(y_test, DTC_prediction)))\r\n\r\n print(\r\n \"Accuracy score for K-Nearest Neighbors is \" + str(accuracy_score(y_test, KNN_prediction)))\r\n print(\r\n \"Confusion Matrix for K-Nearest Neighbors is \" + str(confusion_matrix(y_test, KNN_prediction)))\r\n print(\r\n \"Classification Report for K-Nearest Neighbors is \" + str(classification_report(y_test, KNN_prediction)))\r\n\r\n print(\r\n \"Accuracy score for Support Vector Machine is \" + str(accuracy_score(y_test, SVC_prediction)))\r\n print(\r\n \"Confusion Matrix for Support Vector Machine is \" + str(confusion_matrix(y_test, SVC_prediction)))\r\n print(\r\n \"Classification Report for Support Vector Machine is \" + str(classification_report(y_test, SVC_prediction)))\r\n\r\n print(\r\n \"Accuracy score for Random Forest Classifier is \" + str(accuracy_score(y_test, RF_prediction)))\r\n print(\r\n \"Confusion Matrix for Random Forest Classifier \" + str(confusion_matrix(y_test, RF_prediction)))\r\n print(\r\n \"Classification Report for Random Forest Classifier is \" + str(classification_report(y_test, RF_prediction)))\r\n DTC_accuracy = str(accuracy_score(y_test, DTC_prediction))\r\n KNN_accuracy = str(accuracy_score(y_test, KNN_prediction))\r\n SVC_accuracy = str(accuracy_score(y_test, SVC_prediction))\r\n RF_accuracy = str(accuracy_score(y_test, RF_prediction))\r\n\r\n acc_score = [['Decision tree', DTC_accuracy], ['K-Nearest Neighbors', KNN_accuracy], ['Support Vector Machine',\r\n SVC_accuracy], ['Random Forest', RF_accuracy]]\r\n df = pd.DataFrame(acc_score, columns=['Model', 'Accuracy'])\r\n print(df)\r\n" ]
[ [ "sklearn.ensemble.RandomForestClassifier", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "sklearn.neighbors.KNeighborsClassifier", "sklearn.metrics.confusion_matrix", "sklearn.tree.DecisionTreeClassifier", "sklearn.metrics.classification_report", "sklearn.metrics.accuracy_score" ] ]
jinPrelude/eye_tracking
[ "2d9e14df274e4de5d26de4589e47756522662506" ]
[ "eye_tracking/eye_track_cnn.py" ]
[ "import tensorflow as tf\nimport numpy as np\nimport cv2\nimport os\n\nimg_width = 640\nimg_height = 480\n\nos.chdir('/home/leejin/git/image_processing/eye-tracking')\n\n\n\ndef next_batch(i, batch_size):\n os.chdir('/home/leejin/git/image_processing/eye-tracking')\n list = np.loadtxt('dataset/3/eyesPos.csv', dtype='int', delimiter=',')\n\n start = i * batch_size\n end = start + batch_size\n\n img_list = cv2.imread('dataset/3/%d.jpg' % start)\n\n list = list[start:end+1][1:]\n label = np.array([0, 0, 0, 0])\n img_list = img_list[np.newaxis, :, :, :]\n for j in range(start + 1, end):\n new_img = cv2.imread('dataset/3/%d.jpg' % j)\n\n new_img = new_img[np.newaxis, :, :, :]\n img_list = np.concatenate((img_list, new_img))\n\n for k in range(batch_size) :\n list_tmp = (list[k][0], list[k][1], list[k][2], list[k][3])\n label = np.vstack((label, list_tmp))\n\n label = label[1:][:]\n return (img_list, label)\n\n\n\n\nimg = cv2.imread('dataset/0/0.jpg')\n\nX = tf.placeholder(tf.float32, [None, img_height, img_width, 3])\nY = tf.placeholder(tf.float32, [None, 4])\n\nW1 = tf.get_variable('W1',[3, 3, 3, 32],dtype=tf.float32, initializer=tf.keras.initializers.he_normal())\nL1 = tf.nn.conv2d(X, W1,strides=[1,1,1,1], padding='SAME')\nL1 = tf.nn.relu(L1)\nL1 = tf.nn.max_pool(L1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\n\nW2 = tf.get_variable('W2',[3, 3, 32, 64],dtype=tf.float32, initializer=tf.keras.initializers.he_normal())\nL2 = tf.nn.conv2d(L1, W2,strides=[1,1,1,1], padding='SAME')\nL2 = tf.nn.relu(L2)\nL2 = tf.nn.max_pool(L2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\n\nW3 = tf.get_variable('W3',[3, 3, 64, 128],dtype=tf.float32, initializer=tf.keras.initializers.he_normal())\nL3 = tf.nn.conv2d(L2, W3,strides=[1,1,1,1], padding='SAME')\nL3 = tf.nn.relu(L3)\nL3 = tf.nn.max_pool(L3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\n\nW4 = tf.get_variable('W4',[3, 3, 128, 256],dtype=tf.float32, initializer=tf.keras.initializers.he_normal())\nL4 = tf.nn.conv2d(L3, W4,strides=[1,1,1,1], padding='SAME')\nL4 = tf.nn.relu(L4)\nL4 = tf.nn.max_pool(L4, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\nL4 = tf.reshape(L4, [-1, 30*40*256])\n\nW5 = tf.get_variable('W5', [30*40*256, 2048], dtype=tf.float32, initializer=tf.keras.initializers.he_normal())\nL5 = tf.matmul(L4, W5)\nL5 = tf.nn.relu(L5)\n\nW6 = tf.get_variable('W6', [2048, 512], dtype=tf.float32, initializer=tf.keras.initializers.he_normal())\nL6 = tf.matmul(L5, W6)\nL6 = tf.nn.relu(L6)\n\nW7 = tf.get_variable('W7', [512, 64], dtype=tf.float32, initializer=tf.keras.initializers.he_normal())\nL7 = tf.matmul(L6, W7)\nL7 = tf.nn.relu(L7)\n\nW8 = tf.get_variable('W8', [64, 8], dtype=tf.float32, initializer=tf.keras.initializers.he_normal())\nL8 = tf.matmul(L7, W8)\nL8 = tf.nn.relu(L8)\n\nW9 = tf.get_variable('W9', [8, 8], dtype=tf.float32, initializer=tf.keras.initializers.he_normal())\nL9 = tf.matmul(L8, W9)\nL9 = tf.nn.relu(L9)\n\nW10 = tf.get_variable('W10', [8, 4], dtype=tf.float32, initializer=tf.keras.initializers.he_normal())\n\nL10 = tf.matmul(L9, W10)\n\n\n\n\ncost = tf.reduce_mean(tf.square((L10 - Y)))\noptimizer = tf.train.AdadeltaOptimizer(0.05).minimize(cost)\n\n\nckpt = tf.train.get_checkpoint_state('./model_save_big')\n\ninit = tf.global_variables_initializer()\nsess = tf.Session()\n\nsaver = tf.train.Saver(tf.global_variables())\n\nif ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path) :\n saver.restore(sess, ckpt.model_checkpoint_path)\n print(\"reload\")\nelse :\n sess.run(init)\n\n\nbatch_size = 2\ntotal_batch = 22\n\nfor epoch in range(10) :\n total_cost = 0\n\n for i in range(total_batch) :\n batch_xs, batch_ys = next_batch(i, batch_size)\n _, cost_val = sess.run([optimizer, cost], feed_dict={X:batch_xs, Y:batch_ys})\n\n total_cost += cost_val\n\n print('Epoch : ', '%04d'%(epoch+1), 'avg. cost =','{:.4f}'.format(total_cost/total_batch))\n\n save_path = saver.save(sess, \"model_save_big/eye_track_model.ckpt\")\n print(\"Model saved in path: %s\" % save_path)\n\n\n" ]
[ [ "tensorflow.train.get_checkpoint_state", "tensorflow.nn.relu", "tensorflow.matmul", "tensorflow.nn.max_pool", "numpy.vstack", "tensorflow.reshape", "tensorflow.global_variables", "tensorflow.placeholder", "tensorflow.train.AdadeltaOptimizer", "numpy.concatenate", "tensorflow.global_variables_initializer", "tensorflow.train.checkpoint_exists", "tensorflow.keras.initializers.he_normal", "tensorflow.Session", "tensorflow.square", "numpy.array", "numpy.loadtxt", "tensorflow.nn.conv2d" ] ]
jjkotni/faas-profiler
[ "3a7565981d7b6f7a03b1d7595e458ef1ff20e8b9" ]
[ "workload-analyzer/ContextSwitches.py" ]
[ "import pdb\nfrom PerfMonAnalyzer import ReadPerfMon\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pdb\n\nfrom WorkloadChecker import CheckWorkloadValidity\n\ndef main():\n axes =[]\n plt.figure()\n# for i in [20, 45, 60, 120]: #gap between expts\n# for j in [3,6, 9, 12, 15, 18, 21, 24, 27, 30, 40, 50, 75, 100, 125 ]: #rate of invocation\n for idx, i in enumerate([20, 45, 60, 120]): #gap between expts\n axes.append(None)\n for j in [6, 9, 18, 30, 75, 125]: #rate of invocation\n df_all_runs = None\n for k in [1, 2, 3, 4, 5]: #run number\n out_file = \"logs/cs_\" + str(k) + \"_\" + str(j) + \"_\" + str(i) + \"/perf-mon.out\"\n df = ReadPerfMon(out_file)\n if df_all_runs is None:\n df_all_runs = df\n else:\n df_all_runs = pd.concat((df_all_runs, df))\n by_row_index = df_all_runs.groupby(df_all_runs.index)\n df_mean = by_row_index.mean()\n\n label=str(j)\n if axes[idx] is None:\n axes[idx] = df_mean.plot(y='context-switches', label=label)\n else:\n df_mean.plot(y='context-switches', label=label, ax=axes[idx])\n\n img = \"plots/context-switches-5thread-\" + str(i) + \".png\"\n plt.ylabel(\"Context Switches\")\n plt.xlabel(\"Time Step\")\n plt.legend(title=\"Invocation Rate\")\n plt.savefig(img)\n plt.figure()\n\nif __name__== \"__main__\":\n pdb.set_trace()\n main()" ]
[ [ "matplotlib.pyplot.legend", "pandas.concat", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
Lucas-SEB/Sewer-Pipes-Measurement
[ "b449a8b5110cedcd93b9e7f3a63d044bd299ba9f" ]
[ "src/realsense-ransac/Python_scripts/point_cloud_848-480_resolution.py" ]
[ "import numpy as np # Fundamental package for scientific computing\nimport matplotlib.pyplot as plt # 2D plotting library producing publication quality figures\nimport pyrealsense2.pyrealsense2 as rs # Intel RealSense cross-platform open-source API\nimport open3d as o3d\nimport imageio\nimport cv2\nimport pyransac3d as pyrsc # open3d librairie to use RANSAC for different shapes\nfrom datetime import datetime as date # Library use to get the actual date and time\n\n\n# Configure depth and color streams\n# Change resolution here\npipe = rs.pipeline()\ncfg = rs.config()\ncfg.enable_stream(rs.stream.depth,848, 480)\ncfg.enable_stream(rs.stream.color, 848, 480)\n\n#Start streaming\npipe.start(cfg)\n\n# Filter generates color images based on input depth frame\ncolorizer = rs.colorizer()\n\n# Skip 5 first frames to give the Auto-Exposure time to adjust\nfor x in range(5):pipe.wait_for_frames()\n\n# Get intrinsic camera parameters\nprofile = pipe.get_active_profile()\n\n# Change the type of stereo vision\ndevice = profile.get_device()\ndepth_sensor = device.query_sensors()[0]\ndepth_sensor.set_option(rs.option.emitter_enabled, 0.0)\n\n# Store frameset\nframeset = pipe.wait_for_frames()\ndepth_frame = frameset.get_depth_frame()\n\n# Cleanup\npipe.stop()\nprint(\"Frames Captured\")\n\n# Convert images to numpy arrays\ndepth_image = np.asanyarray(depth_frame.get_data())\n\n# Collect the actual date and time\ntimestamp = date.now().strftime(\"%Y-%m-%d-%H-%M\")\n# save both images (the name is changed each time using the timestamp in order to save all the images)\nimageio.imwrite(\"depth\"+timestamp+\".png\", depth_image)\n\n#Start streaming\npipe.start(cfg)\n\n# Skip 5 first frames to give the Auto-Exposure time to adjust\nfor x in range(5):pipe.wait_for_frames()\n\n# Get intrinsic camera parameters\nprofile2 = pipe.get_active_profile()\n\n# Change the type of stereo vision\ndevice2 = profile2.get_device()\ndepth_sensor2 = device2.query_sensors()[0]\ndepth_sensor2.set_option(rs.option.emitter_enabled, 1.0)\n\n# Store frameset\nframeset2 = pipe.wait_for_frames()\ndepth_frame2 = frameset2.get_depth_frame()\n\n# Cleanup\npipe.stop()\n\n# Convert images to numpy arrays\ndepth_image2 = np.asanyarray(depth_frame2.get_data())\n\n# save both images (the name is changed each time using the timestamp in order to save all the images)\nimageio.imwrite(\"depth2\"+timestamp+\".png\", depth_image2)\n\n# Get back the images\ndepth_raw = o3d.io.read_image(\"depth\"+timestamp+\".png\")\n\n# Get back the images\ndepth_raw2 = o3d.io.read_image(\"depth2\"+timestamp+\".png\")\n\n# Draw the RGB and depth image for passive and active mode\nplt.subplot(1, 2, 1)\nplt.title('Depth image')\nplt.imshow(depth_raw)\nplt.subplot(1, 2, 2)\nplt.title('Depth image2')\nplt.imshow(depth_raw2)\nplt.show()\n\n# Get the default intrinsic parameters of the camera\np = o3d.camera.PinholeCameraIntrinsic(o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault)\n# Change the intrinsic parameters of the camera to match the chosen resolution\np.intrinsic_matrix=[[421.139, 0.0, 426.176], [ 0.0, 421.139, 237.017], [ 0.0, 0.0, 1.0]] # 848*480 resolution\n# Create the point cloud from the rgbd image\npcd = o3d.geometry.PointCloud.create_from_depth_image(\n depth_raw,p)\npcd2 = o3d.geometry.PointCloud.create_from_depth_image(\n depth_raw2,p)\n\n# Flip itthe point cloud, otherwise it will be upside down\npcd2.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n# Save the point cloud\no3d.io.write_point_cloud(\"cloud2\"+timestamp+\".ply\", pcd2)\n# Get back the point cloud\npcd_load2 = o3d.io.read_point_cloud(\"cloud2\"+timestamp+\".ply\")\n\n# Visualize the merge point cloud\no3d.visualization.draw_geometries([pcd_load2])\n\n# open3d librairie to use RANSAC for a circular shape\ncirc = pyrsc.Circle()\n\n# convert Open3D.o3d.geometry.PointCloud to numpy array (RANSAC needs a numpy array to work)\nxyz_load = np.asarray(pcd_load2.points)\n# RANSAC implementation for circular shape detection in point clouds\ncenter, axis, radius, inliers = circ.fit(xyz_load, thresh=0.03, maxIteration=300)\nprint(radius*2000)\n\n# Select the inliers and the outliers points\ninlier_cloud = pcd_load2.select_by_index(inliers)\noutlier_cloud = pcd_load2.select_by_index(inliers, invert=True)\n\n# Merge the two point clouds\n# Save the data structure\ninlier_np = np.asarray(inlier_cloud.points)\noutlier_np = np.asarray(outlier_cloud.points)\n# Add the point clouds all together\nrans_np = np.concatenate((inlier_np,outlier_np), axis=0)\n# Get the individuals colors of each point cloud\ninlier_color = np.asarray(inlier_cloud.colors)\noutlier_color = np.asarray(outlier_cloud.colors)\nrans_color = np.concatenate((inlier_color, outlier_color), axis=0)\n# Create a new pointcloud instance and read the pointcloud, color together\nrans = o3d.geometry.PointCloud()\nrans.points = o3d.utility.Vector3dVector(rans_np)\nrans.colors = o3d.utility.Vector3dVector(rans_color)\n# Save the inlier and outlier point cloud\no3d.io.write_point_cloud(\"ransac_cloud\"+timestamp+\".ply\", rans)\n# Get back the inlier and outlier point cloud\nrans_load = o3d.io.read_point_cloud(\"ransac_cloud\"+timestamp+\".ply\")\n# Visualize the inlier and outlier point clouds\no3d.visualization.draw_geometries([rans_load])" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.title", "numpy.asarray", "numpy.concatenate", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show" ] ]
EelcovdW/dists.pt
[ "994c698c846226ccf1905302f2bab860a2b24ca4" ]
[ "probabll/distributions/concrete.py" ]
[ "import torch\nfrom torch.distributions.uniform import Uniform\nfrom torch.distributions.kl import register_kl\n\nEPS = 1e-5\n\nclass BinaryConcrete(torch.distributions.relaxed_bernoulli.RelaxedBernoulli):\n \n def __init__(self, temperature, probs=None, logits=None, validate_args=None):\n super(BinaryConcrete, self).__init__(temperature, probs=probs, logits=logits, validate_args=validate_args)\n \n def cdf(self, value):\n return torch.sigmoid((torch.log(value + EPS) - torch.log(1. - value + EPS)) * self.temperature - self.logits)\n \n def icdf(self, value):\n return torch.sigmoid((torch.log(value + EPS) - torch.log(1. - value + EPS) + self.logits) / self.temperature)\n \n def rsample_truncated(self, k0, k1, sample_shape=torch.Size()): \n shape = self._extended_shape(sample_shape)\n probs = torch.distributions.utils.clamp_probs(self.probs.expand(shape))\n uniforms = Uniform(self.cdf(torch.full_like(self.logits, k0)), \n self.cdf(torch.full_like(self.logits, k1))).rsample(sample_shape)\n x = (uniforms.log() - (-uniforms).log1p() + probs.log() - (-probs).log1p()) / self.temperature\n return torch.sigmoid(x)\n\n\ndef kl_concrete_concrete(p, q, n_samples=1):\n \"\"\"\n KL is estimated for the logits of the concrete distribution to avoid underflow.\n \"\"\"\n x_logit = p.base_dist.rsample(torch.Size([n_samples]))\n return (p.base_dist.log_prob(x_logit) - q.base_dist.log_prob(x_logit)).mean(0)\n\n\n@register_kl(BinaryConcrete, BinaryConcrete)\ndef _kl_concrete_concrete(p, q):\n return kl_concrete_concrete(p, q, n_samples=1)\n" ]
[ [ "torch.sigmoid", "torch.Size", "torch.distributions.kl.register_kl", "torch.log", "torch.full_like" ] ]