repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
dvirg/auctions | [
"da706f3d11b9582c7f811de9f50b96b43ac8cbd0"
] | [
"old/SimultaneousAscendingPriceVector.py"
] | [
"#!python3\n\n\"\"\"\nDefines classes related to price-vectors.\n\nAuthor: Erel Segal-Halevi\nSince: 2019-12\n\"\"\"\n\nimport logging, sys\nfrom typing import *\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.StreamHandler(sys.stdout))\n# To enable tracing, set logger.setLevel(logging.INFO)\n\nfrom prices import *\n\ndef dot(xx:list,yy:list):\n \"\"\"\n Dot product of two lists.\n >>> dot([1,2,3],[4,5,6])\n 32\n \"\"\"\n return sum([x*y for (x,y) in zip(xx,yy)])\n\nfrom enum import Enum\nclass PriceStatus(Enum):\n STOPPED_AT_AGENT_VALUE = 1\n STOPPED_AT_ZERO_SUM = 2\n\n\nclass SimultaneousAscendingPriceVectors:\n \"\"\"\n Represents a collection of tied price-vectors, corresponding to different PS recipes in the same market.\n All price-vectors must have the same number of categories.\n During price-increases, all price-vectors retain the same sum.\n\n >>> pv = SimultaneousAscendingPriceVectors([[1, 1, 0, 0], [1, 0, 1, 1]], -10000)\n >>> str(pv)\n \"['[-5000.0, -5000.0, -2500.0, -2500.0]', '[-5000.0, -5000.0, -2500.0, -2500.0]'] None\"\n \"\"\"\n def __init__(self, ps_recipes: List[List[int]], initial_price_sum:float):\n if len(ps_recipes)==0:\n raise ValueError(\"Empty list of recipes\")\n\n num_categories = len(ps_recipes[0])\n for ps_recipe in ps_recipes:\n if len(ps_recipe) != num_categories:\n raise ValueError(\"Different category counts: {} vs {}\".format(num_categories, len(ps_recipe)))\n\n initial_prices = calculate_initial_prices(ps_recipes, initial_price_sum)\n vectors = []\n for ps_recipe in ps_recipes:\n vectors.append(AscendingPriceVector(ps_recipe, initial_prices))\n\n self.vectors = vectors\n self.num_categories = num_categories\n self.status = None # status of the latest price-increase operation\n\n def price_sum(self):\n return self.vectors[0].price_sum()\n\n def map_category_index_to_price(self):\n \"\"\"\n >>> pv = SimultaneousAscendingPriceVectors([[1, 1, 0, 0], [1, 0, 1, 1]], -10000)\n >>> pv.map_category_index_to_price()\n [-5000.0, -5000.0, -2500.0, -2500.0]\n >>> pv.increase_prices ([(1,10,\"seller\"), (2,10,\"half-seller-A\")])\n >>> pv.map_category_index_to_price()[2]\n 10.0\n >>> pv.increase_prices ([(1,10,\"seller\"), (3,10,\"half-seller-B\")])\n >>> pv.map_category_index_to_price()[1]\n 10.0\n \"\"\"\n result = [None]*self.num_categories\n for vector in self.vectors:\n for category_index in range(self.num_categories):\n if vector.ps_recipe[category_index] > 0:\n if result[category_index] is None:\n result[category_index] = vector.prices[category_index]\n elif result[category_index] != vector.prices[category_index]:\n raise ValueError(\"Inconsistent prices for category {}: {} vs {}\".format(category_index, result[category_index], vector.prices[category_index]))\n else:\n pass\n return result\n\n def __getitem__(self, vector_index:int):\n return self.vectors[vector_index]\n\n def increase_prices(self, increases:List[Tuple[int,float,str]]):\n \"\"\"\n Simultaneously increase the prices of all vectors, keeping their sum equal.\n :param increases: a list of tuples; each tuple contains arguments to the increase_prices method of AscendingPriceVector:\n (category_index, new_price, description)\n\n >>> pv = SimultaneousAscendingPriceVectors([[1, 1, 0, 0], [1, 0, 1, 1]], -10000)\n >>> pv.increase_prices ([(0,10,\"buyer\"), (0,10,\"buyer\")])\n >>> str(pv)\n \"['[10.0, -5000.0, -2500.0, -2500.0]', '[10.0, -5000.0, -2500.0, -2500.0]'] PriceStatus.STOPPED_AT_AGENT_VALUE\"\n >>> pv.increase_prices ([(1,-80, \"seller\"), (2,-80,\"halfseller-A\")])\n >>> str(pv)\n \"['[10.0, -2580.0, -80.0, -2500.0]', '[10.0, -2580.0, -80.0, -2500.0]'] PriceStatus.STOPPED_AT_AGENT_VALUE\"\n >>> pv.increase_prices ([(1,-80, \"seller\"), (3,-80,\"halfseller-B\")])\n >>> str(pv)\n \"['[10.0, -160.0, -80.0, -80.0]', '[10.0, -160.0, -80.0, -80.0]'] PriceStatus.STOPPED_AT_AGENT_VALUE\"\n \"\"\"\n if len(increases) != len(self.vectors):\n raise ValueError(\"There should be an increase-triplet per vector. increases={}, vectors={}\".format(increases, self.vectors))\n logger.info(\" Prices before increase: %s\", self.map_category_index_to_price())\n logger.info(\" Planned increase: %s\", increases)\n new_sums = [0]*len(self.vectors)\n for vector_index, vector in enumerate(self.vectors):\n (category_index, new_price, _) = increases[vector_index]\n new_sums[vector_index] = vector.price_sum_after_increase(category_index, new_price)\n min_new_sum = min(0, min(new_sums))\n if -0.00000000001 < min_new_sum and min_new_sum < 0.00000000001:\n min_new_sum = 0\n logger.info(\" Price-sums after increase: %s. Min sum: %f\", new_sums, min_new_sum)\n\n for vector_index, vector in enumerate(self.vectors):\n (category_index, new_price, description) = increases[vector_index]\n vector.increase_price_up_to_balance(category_index, new_price, description, sum_upper_bound=min_new_sum)\n\n self.status = PriceStatus.STOPPED_AT_ZERO_SUM if min_new_sum==0 else PriceStatus.STOPPED_AT_AGENT_VALUE\n\n\n def __str__(self):\n return str([str(v) for v in self.vectors]) + \" \" + str(self.status)\n\n\n\ndef calculate_initial_prices(ps_recipes:List[int], initial_price_sum:float)->List[float]:\n \"\"\"\n :param ps_recipes: A list of PS recipes.\n :param initial_price_sum: The sum that the price-vectors for all recipes should have.\n :return: A vector of initial prices.\n\n >>> p = calculate_initial_prices([[1,1,0,0],[1,0,1,1]], -10000)\n >>> p[0]\n -5000.0\n >>> p[1]\n -5000.0\n >>> p[2]\n -2500.0\n >>> p[3]\n -2500.0\n \"\"\"\n num_recipes = len(ps_recipes)\n num_categories = len(ps_recipes[0])\n max_price = initial_price_sum/num_categories\n\n from scipy.optimize import linprog\n result = linprog( # variables: price_heder, m, price_salon\n [-1]*num_categories, # Minimize the sum of prices\n A_eq=ps_recipes,\n b_eq=[initial_price_sum]*num_recipes, # The sum of every recipe must be the same\n bounds=[(None, max_price)]*num_categories,\n method=\"revised simplex\"\n )\n if result.status==0:\n return list(result.x)\n else:\n raise ValueError(\"Cannot determine initial prices: \"+result.message)\n\n\n\nif __name__ == \"__main__\":\n import doctest\n (failures,tests) = doctest.testmod(report=True)\n print (\"{} failures, {} tests\".format(failures,tests))\n # ps_recipes = [\n # [1, 1, 0, 0],\n # [1, 0, 1, 1]]\n # sum_prices = -10000\n # print(calculate_initial_prices(ps_recipes,sum_prices))\n"
] | [
[
"scipy.optimize.linprog"
]
] |
maxxiefjv/privpack | [
"b6bff78588362e57bc3f1268ab864026db4a1afa"
] | [
"privpack-app/__main__.py"
] | [
"from runners import GaussianNetworkRunner, BinaryNetworkRunner\nfrom experiments import BinaryExperiment, GaussianExperiment\nfrom priv_bmi import BMIExperiment\nfrom utils import get_binary_data, get_gaussian_data\nfrom utils import save_results\n\nfrom privpack import BinaryPrivacyPreservingAdversarialNetwork as BinaryGAN\nfrom privpack import GaussianPrivacyPreservingAdversarialNetwork as GaussGAN\n\nfrom privpack.core.criterion import PGANCriterion\nfrom privpack.core.criterion import BinaryMutualInformation, BinaryHammingDistance, NegativeBinaryMutualInformation, BinaryMaximalLeakage, NegativeBinaryMaximalLeakage\nfrom privpack.core.criterion import BinaryAlphaLeakage, NegativeBinaryAlphaLeakage\n\nfrom privpack.core.criterion import GaussianAlphaLeakage, NegativeGaussianAlphaLeakage\nfrom privpack.core.criterion import GaussianMaximalLeakage, NegativeGaussianMaximalLeakage\n\nfrom privpack.core.criterion import NegativeGaussianMutualInformation, GaussianMutualInformation, MeanSquaredError\n\nimport argparse\nimport torch\nimport numpy as np\nimport json\n\ndef run_bmi(args):\n BMIExperiment().run(args)\n\ndef run_gaussian_1(args):\n (privacy_size, public_size, noise_size, hidden_layers_width, release_size) = (1, 1, 1, 5, 1)\n run_gaussian(privacy_size, public_size, noise_size, hidden_layers_width, release_size, observation_model='full', args=args)\n\ndef run_gaussian_5(args):\n (privacy_size, public_size, noise_size, hidden_layers_width, release_size) = (5, 5, 8, 20, 5)\n run_gaussian(privacy_size, public_size, noise_size, hidden_layers_width, release_size, observation_model='public', args=args)\n\ndef run_gaussian(privacy_size, public_size, noise_size, hidden_layers_width, release_size, observation_model, args):\n (epochs, batch_size, lambd, delta, ks) = (args.epochs, args.batchsize, args.lambd, args.delta, args.sample)\n (train_data, test_data) = get_gaussian_data(privacy_size, public_size, args.train_input, print_metrics=True)\n\n if not ks:\n ks = [1]\n\n print(\"Training with lambdas: {}, deltas: {}, ks: {}\".format(lambd, delta, ks))\n lr = args.learning_rate\n \n results = {}\n if not args.validate:\n for d in delta:\n for l in lambd:\n for k in ks:\n for a in args.alpha:\n runner = GaussianNetworkRunner(privacy_size, public_size, noise_size, hidden_layers_width, release_size, observation_model, l, d, gan_criterion=gauss_criterion_switch[args.criterion](l, d, a), lr=lr)\n result = runner.run(train_data, test_data, epochs, batch_size, k, verbose=True)\n results.setdefault(a, {}).setdefault(d, {}).setdefault(l, result)\n save_results(results, args)\n\n else:\n runner = GaussianExperiment(privacy_size, public_size, noise_size, hidden_layers_width, release_size, observation_model)\n results = runner.run(train_data, epochs, batch_size, lambd, delta, k, verbose=True)\n\n print(json.dumps(results, sort_keys=True, indent=4))\n save_results(results, args)\n\ndef run_binary(args):\n (privacy_size, public_size, release_size) = (1, 1, 1)\n (epochs, batch_size, lambd, delta) = (args.epochs, args.batchsize, args.lambd, args.delta)\n (train_data, test_data) = get_binary_data(privacy_size, public_size, args.train_input, print_metrics=True)\n\n print(\"Training with lambdas: {}, deltas: {}\".format(lambd, delta))\n\n results = {}\n if not args.validate:\n for d in delta:\n for l in lambd:\n for a in args.alpha:\n runner = BinaryNetworkRunner(l, d, gan_criterion=binary_criterion_switch[args.criterion](l, d, a))\n result = runner.run(train_data, test_data, epochs, batch_size, verbose=True)\n results.setdefault(a, {}).setdefault(d, {}).setdefault(l, result)\n\n else:\n runner = BinaryExperiment()\n results = runner.run(train_data, epochs, batch_size, lambd, delta, args.criterion, args.alpha, verbose=True)\n\n print(json.dumps(results, indent=4))\n save_results(results, args)\n\nbinary_criterion_switch = {\n 'mi': lambda lambd, delta, x: PGANCriterion().add_privacy_criterion(BinaryMutualInformation()).add_privacy_criterion(BinaryHammingDistance(lambd, delta)).add_adversary_criterion(NegativeBinaryMutualInformation()),\n 'maxl': lambda lambd, delta, x: PGANCriterion().add_privacy_criterion(BinaryMaximalLeakage()).add_privacy_criterion(BinaryHammingDistance(lambd, delta)).add_adversary_criterion(NegativeBinaryMaximalLeakage()),\n 'alpha': lambda lambd, delta, x: PGANCriterion().add_privacy_criterion(BinaryAlphaLeakage(x)).add_privacy_criterion(BinaryHammingDistance(lambd, delta)).add_adversary_criterion(NegativeBinaryAlphaLeakage(x)),\n}\n\ngauss_criterion_switch = {\n 'mi': lambda lambd, delta, x: PGANCriterion().add_privacy_criterion(GaussianMutualInformation()).add_privacy_criterion(MeanSquaredError(lambd, delta)).add_adversary_criterion(NegativeGaussianMutualInformation()),\n 'maxl': lambda lambd, delta, x: PGANCriterion().add_privacy_criterion(GaussianMaximalLeakage()).add_privacy_criterion(MeanSquaredError(lambd, delta)).add_adversary_criterion(NegativeGaussianMaximalLeakage()),\n 'alpha': lambda lambd, delta, x: PGANCriterion().add_privacy_criterion(GaussianAlphaLeakage(x)).add_privacy_criterion(MeanSquaredError(lambd, delta)).add_adversary_criterion(NegativeGaussianAlphaLeakage(x)),\n}\n\nnetwork_arg_switcher = {\n 'binary': run_binary,\n 'gaussian1': run_gaussian_1,\n 'gaussian5': run_gaussian_5,\n 'bmi': run_bmi\n}\n\nap = argparse.ArgumentParser(description=\"\"\"\nThis is an (example) implementation of the privpack library. Please consult the privpack documenation for the exact use of the \narguments and options defined below.\n\"\"\")\n\nap.add_argument('network', help=\"Define which implementation of the GAN defined in the privpack library to run.\", \n metavar=list(network_arg_switcher.keys()),\n choices=network_arg_switcher.keys())\n\nap.add_argument('-l', '--lambd', help=\"Define the lambda to use in the loss function. Train a network instance per value specified.\",\n type=int,\n nargs='*',\n default=[500])\n\nap.add_argument('-d', '--delta', help=\"Define the delta to use in the loss function. Train a network instance per value specified.\",\n type=float,\n nargs='*',\n default=np.linspace(1, 0, 11))\n\nap.add_argument('-k', '--sample', help=\"Only valid for gaussian network.Define the number of samples to be drawn from the privatizer network during training. Train a network instance per value specified.\",\n type=int,\n nargs='*',\n metavar=\"NO. SAMPLES\",\n default=None)\n\nap.add_argument('-b', '--batchsize', help=\"Define the number of samples used per minibatch iteration.\",\n type=int,\n default=200)\n\nap.add_argument('-e', '--epochs', help=\"Define the number of epochs to run the training process.\",\n type=int,\n default=500)\n\nap.add_argument('-o', '--output', help=\"Store the results in a specified file to json format. Default output is no output.\",\n type=str,\n default=None)\n\nap.add_argument('-r', '--learning-rate', help=\"Specify the learning rate with which you want to train the release mechanism.\",\n type=float,\n default=1e-2)\n\nap.add_argument('-i', '--train-input', help=\"Specify the input to use in the training procedure.\",\n type=str,\n default='uncorrelated')\n\nap.add_argument('-c', '--criterion', help=\"(Finite variables assumed) Specify the criterion to use in the training procedure.\",\n type=str,\n metavar=list(binary_criterion_switch.keys()),\n default='mi')\n\nap.add_argument('-a', '--alpha', help=\"(Finite variables assumed) Specify the alpha in the criterion.\",\n type=float, nargs='*',\n default=[1])\n\nap.add_argument('--output-as-suffix', help='Use the output argument as suffix to the default generated outputname',\n default=False, dest='suffix',\n action='store_true')\n\nap.add_argument('--validate', help='Validate this run with kfold validation.',\n default=False,\n action='store_true')\n\ndef main():\n args = ap.parse_args()\n network_arg_switcher[args.network](args)\n\nif __name__ == \"__main__\":\n main()\nelse:\n raise EnvironmentError('This is an (example) implementation of the privpack library and should not be used as library.')\n\n"
] | [
[
"numpy.linspace"
]
] |
KennyCandy/HAR | [
"739ede1907374215cfc1dd6bd525d8d5b5f4606e"
] | [
"module45/CCCPC_32_32.py"
] | [
"# Note that the dataset must be already downloaded for this script to work, do:\n# $ cd data/\n# $ python download_dataset.py\n# quoc_trinh\n\nimport tensorflow as tf\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\n\nimport os\nimport sys\nimport datetime\n\n# get current file_name as [0] of array\nfile_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]\nprint(\" File Name:\")\nprint(file_name)\nprint(\"\")\n# FLAG to know that whether this is traning process or not.\nFLAG = 'train'\nPOOL_X = 16\nPOOL_Y = 18\nN_HIDDEN_CONFIG = 32\n\nsave_path_name = file_name + \"/model.ckpt\"\n\nprint(datetime.datetime.now())\n# Write to file: time to start, type, time to end\nf = open(file_name + '/time.txt', 'a+')\nf.write(\"------------- \\n\")\nf.write(\"This is time \\n\")\nf.write(\"Started at \\n\")\nf.write(str(datetime.datetime.now())+'\\n')\n\nif __name__ == \"__main__\":\n\n # -----------------------------\n # step1: load and prepare data\n # -----------------------------\n # Those are separate normalised input features for the neural network\n INPUT_SIGNAL_TYPES = [\n \"body_acc_x_\",\n \"body_acc_y_\",\n \"body_acc_z_\",\n \"body_gyro_x_\",\n \"body_gyro_y_\",\n \"body_gyro_z_\",\n \"total_acc_x_\",\n \"total_acc_y_\",\n \"total_acc_z_\"\n ]\n\n # Output classes to learn how to classify\n LABELS = [\n \"WALKING\",\n \"WALKING_UPSTAIRS\",\n \"WALKING_DOWNSTAIRS\",\n \"SITTING\",\n \"STANDING\",\n \"LAYING\"\n ]\n\n DATA_PATH = \"../data/\"\n DATASET_PATH = DATA_PATH + \"UCI HAR Dataset/\"\n print(\"\\n\" + \"Dataset is now located at: \" + DATASET_PATH)\n # Preparing data set:\n TRAIN = \"train/\"\n TEST = \"test/\"\n\n # Load \"X\" (the neural network's training and testing inputs)\n def load_X(X_signals_paths):\n X_signals = []\n\n for signal_type_path in X_signals_paths:\n file = open(signal_type_path, 'rb')\n # Read dataset from disk, dealing with text files' syntax\n X_signals.append(\n [np.array(serie, dtype=np.float32) for serie in [\n row.replace(' ', ' ').strip().split(' ') for row in file\n ]]\n )\n file.close()\n\n \"\"\"Examples\n --------\n >> > x = np.arange(4).reshape((2, 2))\n >> > x\n array([[0, 1],\n [2, 3]])\n\n >> > np.transpose(x)\n array([[0, 2],\n [1, 3]])\n\n >> > x = np.ones((1, 2, 3))\n >> > np.transpose(x, (1, 0, 2)).shape\n (2, 1, 3)\n \"\"\"\n\n return np.transpose(np.array(X_signals), (1, 2, 0))\n\n X_train_signals_paths = [\n DATASET_PATH + TRAIN + \"Inertial Signals/\" + signal + \"train.txt\" for signal in INPUT_SIGNAL_TYPES\n ]\n X_test_signals_paths = [\n DATASET_PATH + TEST + \"Inertial Signals/\" + signal + \"test.txt\" for signal in INPUT_SIGNAL_TYPES\n ]\n X_train = load_X(X_train_signals_paths) # [7352, 128, 9]\n X_test = load_X(X_test_signals_paths) # [7352, 128, 9]\n\n # print(X_train)\n print(len(X_train)) # 7352\n print(len(X_train[0])) # 128\n print(len(X_train[0][0])) # 9\n\n print(type(X_train))\n\n X_train = np.reshape(X_train, [-1, 32, 36])\n X_test = np.reshape(X_test, [-1, 32, 36])\n\n print(\"-----------------X_train---------------\")\n # print(X_train)\n print(len(X_train)) # 7352\n print(len(X_train[0])) # 32\n print(len(X_train[0][0])) # 36\n\n print(type(X_train))\n # exit()\n\n y_train_path = DATASET_PATH + TRAIN + \"y_train.txt\"\n y_test_path = DATASET_PATH + TEST + \"y_test.txt\"\n\n def one_hot(label):\n \"\"\"convert label from dense to one hot\n argument:\n label: ndarray dense label ,shape: [sample_num,1]\n return:\n one_hot_label: ndarray one hot, shape: [sample_num,n_class]\n \"\"\"\n label_num = len(label)\n new_label = label.reshape(label_num) # shape : [sample_num]\n # because max is 5, and we will create 6 columns\n n_values = np.max(new_label) + 1\n return np.eye(n_values)[np.array(new_label, dtype=np.int32)]\n\n # Load \"y\" (the neural network's training and testing outputs)\n def load_y(y_path):\n file = open(y_path, 'rb')\n # Read dataset from disk, dealing with text file's syntax\n y_ = np.array(\n [elem for elem in [\n row.replace(' ', ' ').strip().split(' ') for row in file\n ]],\n dtype=np.int32\n )\n file.close()\n # Subtract 1 to each output class for friendly 0-based indexing\n return y_ - 1\n\n\n y_train = one_hot(load_y(y_train_path))\n y_test = one_hot(load_y(y_test_path))\n\n print(\"---------y_train----------\")\n # print(y_train)\n print(len(y_train)) # 7352\n print(len(y_train[0])) # 6\n\n # -----------------------------------\n # step2: define parameters for model\n # -----------------------------------\n class Config(object):\n \"\"\"\n define a class to store parameters,\n the input should be feature mat of training and testing\n \"\"\"\n\n def __init__(self, X_train, X_test):\n # Input data\n self.train_count = len(X_train) # 7352 training series\n self.test_data_count = len(X_test) # 2947 testing series\n self.n_steps = len(X_train[0]) # 128 time_steps per series\n\n # Training\n self.learning_rate = 0.0025\n self.lambda_loss_amount = 0.0015\n self.training_epochs = 300\n self.batch_size = 1000\n\n # LSTM structure\n self.n_inputs = len(X_train[0][0]) # Features count is of 9: three 3D sensors features over time\n self.n_hidden = N_HIDDEN_CONFIG # nb of neurons inside the neural network\n self.n_classes = 6 # Final output classes\n self.W = {\n 'hidden': tf.Variable(tf.random_normal([self.n_inputs, self.n_hidden])), # [9, 32]\n 'output': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes])) # [32, 6]\n }\n self.biases = {\n 'hidden': tf.Variable(tf.random_normal([self.n_hidden], mean=1.0)), # [32]\n 'output': tf.Variable(tf.random_normal([self.n_classes])) # [6]\n }\n\n config = Config(X_train, X_test)\n # print(\"Some useful info to get an insight on dataset's shape and normalisation:\")\n # print(\"features shape, labels shape, each features mean, each features standard deviation\")\n # print(X_test.shape, y_test.shape,\n # np.mean(X_test), np.std(X_test))\n # print(\"the dataset is therefore properly normalised, as expected.\")\n #\n #\n # ------------------------------------------------------\n # step3: Let's get serious and build the neural network\n # ------------------------------------------------------\n # [none, 128, 9]\n X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs])\n # [none, 6]\n Y = tf.placeholder(tf.float32, [None, config.n_classes])\n\n print(\"-------X Y----------\")\n print(X)\n X = tf.reshape(X, shape=[-1, 32, 36])\n print(X)\n\n print(Y)\n Y = tf.reshape(Y, shape=[-1, 6])\n print(Y)\n\n # Weight Initialization\n def weight_variable(shape):\n # tra ve 1 gia tri random theo thuat toan truncated_ normal\n initial = tf.truncated_normal(shape, mean=0.0, stddev=0.1, dtype=tf.float32)\n return tf.Variable(initial)\n\n def bias_varibale(shape):\n initial = tf.constant(0.1, shape=shape, name='Bias')\n return tf.Variable(initial)\n\n # Convolution and Pooling\n def conv2d(x, W):\n # Must have `strides[0] = strides[3] = 1 `.\n # For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1] `.\n return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME', name='conv_2d')\n\n def max_pool_2x2(x):\n return tf.nn.max_pool(value=x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME', name='max_pool')\n\n def LSTM_Network(feature_mat, config):\n \"\"\"model a LSTM Network,\n it stacks 2 LSTM layers, each layer has n_hidden=32 cells\n and 1 output layer, it is a full connet layer\n argument:\n feature_mat: ndarray feature matrix, shape=[batch_size,time_steps,n_inputs]\n config: class containing config of network\n return:\n : matrix output shape [batch_size,n_classes]\n \"\"\"\n\n W_conv1 = weight_variable([3, 3, 1, 32])\n b_conv1 = bias_varibale([32])\n # x_image = tf.reshape(x, shape=[-1, 28, 28, 1])\n feature_mat_image = tf.reshape(feature_mat, shape=[-1, 32, 36, 1])\n print(\"----feature_mat_image-----\")\n print(feature_mat_image.get_shape())\n\n h_conv1 = tf.nn.relu(conv2d(feature_mat_image, W_conv1) + b_conv1)\n h_pool1 = h_conv1\n\n # Second Convolutional Layer\n W_conv2 = weight_variable([3, 3, 32, 32])\n b_conv2 = weight_variable([32])\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n h_pool2 = h_conv2\n\n # Third Convolutional Layer\n W_conv3 = weight_variable([3, 3, 32, 32])\n b_conv3 = weight_variable([32])\n h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)\n h_pool3 = max_pool_2x2(h_conv3)\n\n # Forth Convolutional Layer\n W_conv4 = weight_variable([3, 3, 32, 128])\n b_conv4 = weight_variable([128])\n h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4)\n h_pool4 = h_conv4\n\n # Fifth Convolutional Layer\n W_conv5 = weight_variable([3, 3, 128, 1])\n b_conv5 = weight_variable([1])\n h_conv5 = tf.nn.relu(conv2d(h_pool4, W_conv5) + b_conv5)\n h_pool5 = h_conv5\n\n \n\n h_pool5 = tf.reshape(h_pool5, shape=[-1, POOL_X, POOL_Y])\n feature_mat = h_pool5\n print(\"----feature_mat-----\")\n print(feature_mat)\n # exit()\n\n # W_fc1 = weight_variable([8 * 9 * 1, 1024])\n # b_fc1 = bias_varibale([1024])\n # h_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 9 * 1])\n # h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n # print(\"----h_fc1_drop-----\")\n # print(h_fc1)\n # exit()\n #\n # # keep_prob = tf.placeholder(tf.float32)\n # keep_prob = tf.placeholder(1.0)\n # h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=keep_prob)\n # print(\"----h_fc1_drop-----\")\n # print(h_fc1_drop)\n # exit()\n #\n # W_fc2 = weight_variable([1024, 10])\n # b_fc2 = bias_varibale([10])\n #\n # y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n # print(\"----y_conv-----\")\n # print(y_conv)\n # exit()\n\n # Exchange dim 1 and dim 0\n # Start at: [0,1,2] = [batch_size, 128, 9] => [batch_size, 32, 36]\n feature_mat = tf.transpose(feature_mat, [1, 0, 2])\n # New feature_mat's shape: [time_steps, batch_size, n_inputs] [128, batch_size, 9]\n print(\"----feature_mat-----\")\n print(feature_mat)\n # exit()\n\n # Temporarily crush the feature_mat's dimensions\n feature_mat = tf.reshape(feature_mat, [-1, config.n_inputs]) # 9\n # New feature_mat's shape: [time_steps*batch_size, n_inputs] # 128 * batch_size, 9\n\n # Linear activation, reshaping inputs to the LSTM's number of hidden:\n hidden = tf.nn.relu(tf.matmul(\n feature_mat, config.W['hidden']\n ) + config.biases['hidden'])\n # New feature_mat (hidden) shape: [time_steps*batch_size, n_hidden] [128*batch_size, 32]\n\n print(\"--n_steps--\")\n print(config.n_steps)\n print(\"--hidden--\")\n print(hidden)\n\n # Split the series because the rnn cell needs time_steps features, each of shape:\n hidden = tf.split(0, config.n_steps/4, hidden) # (0, 128, [128*batch_size, 32])\n # New hidden's shape: a list of length \"time_step\" containing tensors of shape [batch_size, n_hidden]\n\n # Define LSTM cell of first hidden layer:\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0)\n\n # Stack two LSTM layers, both layers has the same shape\n lsmt_layers = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2)\n\n # Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here\n outputs, _ = tf.nn.rnn(lsmt_layers, hidden, dtype=tf.float32)\n # outputs' shape: a list of lenght \"time_step\" containing tensors of shape [batch_size, n_hidden]\n\n print(\"------------------list-------------------\")\n print(outputs)\n # Get last time step's output feature for a \"many to one\" style classifier,\n # as in the image describing RNNs at the top of this page\n lstm_last_output = outputs[-1] # Get the last element of the array: [?, 32]\n\n print(\"------------------last outputs-------------------\")\n print (lstm_last_output)\n\n # Linear activation\n return tf.matmul(lstm_last_output, config.W['output']) + config.biases['output']\n\n pred_Y = LSTM_Network(X, config) # shape[?,6]\n print(\"------------------pred_Y-------------------\")\n print(pred_Y)\n\n # Loss,train_step,evaluation\n l2 = config.lambda_loss_amount * \\\n sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())\n # Softmax loss and L2\n cost = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(pred_Y, Y)) + l2\n train_step = tf.train.AdamOptimizer(\n learning_rate=config.learning_rate).minimize(cost)\n\n correct_prediction = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))\n\n # --------------------------------------------\n # step4: Hooray, now train the neural network\n # --------------------------------------------\n # Note that log_device_placement can be turned ON but will cause console spam.\n\n # Initializing the variables\n init = tf.initialize_all_variables()\n # Add ops to save and restore all the variables.\n saver = tf.train.Saver()\n best_accuracy = 0.0\n\n # sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False))\nif (FLAG == 'train') : # If it is the training mode\n with tf.Session() as sess:\n # tf.initialize_all_variables().run()\n sess.run(init) # .run()\n f.write(\"---Save model \\n\")\n\n # Start training for each batch and loop epochs\n for i in range(config.training_epochs):\n for start, end in zip(range(0, config.train_count, config.batch_size), # (0, 7352, 1500)\n range(config.batch_size, config.train_count + 1,\n config.batch_size)): # (1500, 7353, 1500)\n print(start)\n print(end)\n\n sess.run(train_step, feed_dict={X: X_train[start:end],\n Y: y_train[start:end]})\n # Test completely at every epoch: calculate accuracy\n pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={\n X: X_test, Y: y_test})\n print(\"traing iter: {},\".format(i) + \\\n \" test accuracy : {},\".format(accuracy_out) + \\\n \" loss : {}\".format(loss_out))\n best_accuracy = max(best_accuracy, accuracy_out)\n \n # Save the model in this session\n save_path = saver.save(sess, file_name + \"/model.ckpt\")\n print(\"Model saved in file: %s\" % save_path)\n\n print(\"\")\n print(\"final loss: {}\").format(loss_out)\n print(\"final test accuracy: {}\".format(accuracy_out))\n print(\"best epoch's test accuracy: {}\".format(best_accuracy))\n print(\"\")\n # Write all output to file\n f.write(\"final loss:\" + str(format(loss_out)) +\" \\n\") \n f.write(\"final test accuracy:\" + str(format(accuracy_out)) +\" \\n\")\n f.write(\"best epoch's test accuracy:\" + str(format(best_accuracy)) + \" \\n\")\nelse :\n # Running a new session\n print(\"Starting 2nd session...\")\n with tf.Session() as sess:\n # Initialize variables\n sess.run(init)\n f.write(\"---Restore model \\n\")\n\n # Restore model weights from previously saved model\n saver.restore(sess, file_name+ \"/model.ckpt\")\n print(\"Model restored from file: %s\" % save_path_name)\n # Test completely at every epoch: calculate accuracy\n pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={\n X: X_test, Y: y_test})\n # print(\"traing iter: {},\" + \\\n # \" test accuracy : {},\".format(accuracy_out) + \\\n # \" loss : {}\".format(loss_out))\n best_accuracy = max(best_accuracy, accuracy_out)\n\n print(\"\")\n print(\"final loss: {}\").format(loss_out)\n print(\"final test accuracy: {}\".format(accuracy_out))\n print(\"best epoch's test accuracy: {}\".format(best_accuracy))\n print(\"\")\n # Write all output to file\n f.write(\"final loss:\" + str(format(loss_out)) +\" \\n\")\n f.write(\"final test accuracy:\" + str(format(accuracy_out)) +\" \\n\")\n f.write(\"best epoch's test accuracy:\" + str(format(best_accuracy)) + \" \\n\")\n\n #\n # #------------------------------------------------------------------\n # # step5: Training is good, but having visual insight is even better\n # #------------------------------------------------------------------\n # # The code is in the .ipynb\n #\n # #------------------------------------------------------------------\n # # step6: And finally, the multi-class confusion matrix and metrics!\n # #------------------------------------------------------------------\n # # The code is in the .ipynb\n\nf.write(\"Ended at \\n\")\nf.write(str(datetime.datetime.now())+'\\n')\nf.write(\"------------- \\n\")\nf.close()"
] | [
[
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.nn.conv2d",
"tensorflow.matmul",
"tensorflow.reshape",
"tensorflow.nn.rnn_cell.BasicLSTMCell",
"tensorflow.random_normal",
"tensorflow.cast",
"numpy.max",
"tensorflow.trainable_variables",
"tensorflow.argmax",
"tensorflow.train.Saver",
"tensorflow.Variable",
"tensorflow.transpose",
"numpy.eye",
"tensorflow.constant",
"tensorflow.nn.rnn_cell.MultiRNNCell",
"tensorflow.split",
"tensorflow.nn.max_pool",
"numpy.array",
"tensorflow.train.AdamOptimizer",
"tensorflow.initialize_all_variables",
"numpy.reshape",
"tensorflow.nn.rnn",
"tensorflow.Session",
"tensorflow.nn.l2_loss",
"tensorflow.truncated_normal",
"tensorflow.placeholder"
]
] |
dychen24/magx | [
"3d72cfa447bcab050e97ee517b1688ef99dd480d"
] | [
"Codes/optimization/real_time_pos_face.py"
] | [
"from os import read\nimport queue\nfrom codetiming import Timer\nimport asyncio\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nimport random\nfrom itertools import count\nimport time\nfrom matplotlib.animation import FuncAnimation\nfrom numpy.core.numeric import True_\nimport matplotlib\nimport queue\nimport asyncio\nimport struct\nimport sys\nimport time\nimport datetime\nimport atexit\nimport time\nimport numpy as np\nfrom bleak import BleakClient\nimport matplotlib.pyplot as plt\nfrom bleak import exc\nimport pandas as pd\nimport atexit\nfrom multiprocessing import Pool\nimport multiprocessing\nimport keyboard\n\nfrom src.solver import Solver_jac, Solver\nfrom src.filter import Magnet_KF, Magnet_UKF\nfrom src.preprocess import Calibrate_Data\nfrom config import pSensor_smt, pSensor_large_smt, pSensor_small_smt, pSensor_median_smt, pSensor_imu\nimport cppsolver as cs\n\n'''The parameter user should change accordingly'''\n# Change pSensor if a different sensor layout is used\npSensor = pSensor_small_smt\n\n# Change this parameter for different initial value for 1 magnet\nparams = np.array([40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6,\n 0, np.log(3), 1e-2 * (-2), 1e-2 * (2), 1e-2 * (11), 0, 0])\n# Change this parameter for different initial value for 2 magnets\nparams2 = np.array([\n 40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6, 0, np.log(3),\n 1e-2 * 6, 1e-2 * 0, 1e-2 * (-1), 0, 0,\n 1e-2 * 5, 1e-2 * (4), 1e-2 * (-1), 0, 0,\n])\n\n# Your adafruit nrd52832 ble address\nble_address = \"2A59A2D4-BCD8-4AF7-B750-E51195C1CA13\"\n# Absolute or relative path to the calibration data, stored in CSV\ncali_path = 'Path to the calibration data, stored in CSV'\n\n\n'''The calculation and visualization process'''\n\n\nmatplotlib.use('Qt5Agg')\n\n\nt = 0\n# Nordic NUS characteristic for RX, which should be writable\nUART_RX_UUID = \"6e400002-b5a3-f393-e0a9-e50e24dcca9e\"\n# Nordic NUS characteristic for TX, which should be readable\nUART_TX_UUID = \"6e400003-b5a3-f393-e0a9-e50e24dcca9e\"\n\n\nresult = []\n\nworklist = multiprocessing.Manager().Queue()\n\nresults = multiprocessing.Manager().Queue()\nresults2 = multiprocessing.Manager().Queue()\n\n\ntrigger_calibration = multiprocessing.Manager().Queue()\n\n\ndef end():\n print('End of the program')\n sys.exit(0)\n\n\ndef classification(pos):\n # pos = data[:3] + 15 * data[3:]\n names = ['left cheek', 'left eye', 'mouth',\n 'nose', 'right cheek', 'right eye', 'no touch']\n\n # all boundaries are in the sensor's frame of reference\n # change according to user's face shape and sensor position\n left_boundary = -16 # y, the left most y coordinate of one's face\n right_boundary = 18 # y, the right most y coordinate of one's face\n front_boundary = 26 # z, the front most z coordinate of one's face\n low_boundary = 7 # x, the down most x coordinate of one's face\n\n # center coordinate of different part of the user's face\n centers = np.array([\n [5, -5, 14], # left cheek\n [11, -1, 17], # left eye\n [1.6, 2, 14], # mouth\n [7, 2.4, 18.5], # nose\n [8.8, 12.3, 12.4], # right cheek\n [10.8, 6, 18.2], # right eye\n ])\n\n if pos[2] > front_boundary or pos[1] < left_boundary or pos[1] > right_boundary or pos[0] < low_boundary:\n print(names[6])\n return\n dis = []\n for i in range(centers.shape[0]):\n dis.append(np.linalg.norm(pos - centers[i], ord=2))\n dis = np.stack(dis)\n pred_typ = np.argmin(dis)\n print(names[pred_typ])\n\n\ndef calculation_parallel(magcount=1, use_kf=0, use_wrist=False):\n global worklist\n global params\n global params2\n global results\n global results2\n global pSensor\n global trigger_calibration\n calibration = np.load('result/calibration.npz')\n offset = calibration['offset'].reshape(-1)\n scale = calibration['scale'].reshape(-1)\n local_trigger = 0\n calibration_offset = np.zeros_like(pSensor).reshape(-1)\n myparams1 = params\n myparams2 = params2\n while True:\n if not worklist.empty():\n raw_datai = worklist.get()\n if not trigger_calibration.empty():\n trigger_calibration.get()\n calibration_offset = raw_datai\n print('calibrated')\n continue\n\n datai = (raw_datai-calibration_offset)\n datai = datai.reshape(-1, 3)\n if magcount == 1:\n if np.max(np.abs(myparams1[4:7])) > 1:\n myparams1 = params\n left_boundary = -16 # y\n right_boundary = 18\n front_boundary = 26 # z\n low_boundary = 7 # x\n\n resulti = cs.solve_1mag(\n datai.reshape(-1), pSensor.reshape(-1), myparams1)\n myparams1 = resulti\n tmp = np.array([(resulti[4] + 0.175*np.sin(resulti[7])*np.cos(resulti[8])) * 1e2,\n (resulti[5] + 0.175*np.sin(resulti[7])*np.sin(resulti[8])) * 1e2, (resulti[6] + 0.175*np.cos(resulti[7])) * 1e2])\n # print(\"Real Pos: \", tmp)\n classification(tmp)\n if tmp[2] > front_boundary or tmp[1] < left_boundary or tmp[1] > right_boundary or tmp[0] < low_boundary:\n myparams1 = params\n elif magcount == 2:\n if np.max(\n np.abs(myparams2[4: 7])) > 1 or np.max(\n np.abs(myparams2[9: 12])) > 1:\n myparams2 = params2\n\n resulti = cs.solve_2mag(\n datai.reshape(-1), pSensor.reshape(-1), myparams2)\n myparams2 = resulti\n result = [resulti[4] * 1e2,\n resulti[5] * 1e2, resulti[6] * 1e2]\n\n if magcount == 2:\n result2 = [resulti[9] * 1e2,\n resulti[10] * 1e2, resulti[11] * 1e2]\n results2.put(result2)\n\n if magcount == 1:\n pass\n\n if magcount == 2:\n print(\n \"Mag 1 Position: {:.2f}, {:.2f}, {:.2f}, dis={:.2f} \\n Mag 2 Position: {:.2f}, {:.2f}, {:.2f}, dis={:.2f}\". format(\n result[0],\n result[1],\n result[2],\n np.sqrt(\n result[0] ** 2 +\n result[1] ** 2 +\n result[2] ** 2),\n result2[0],\n result2[1],\n result2[2],\n np.sqrt(\n result2[0] ** 2 +\n result2[1] ** 2 +\n result2[2] ** 2)))\n d1 = np.sqrt(result[0]**2 + result[1]**2 + result[2]**2)\n d2 = np.sqrt(result2[0]**2 + result2[1]**2 + result2[2]**2)\n if (d1 < 16 and d2 < 16):\n print(\"Two mags\")\n elif (d1 > 16 and d2 > 16):\n print(\"Zero mag\")\n else:\n print(\"One mag\")\n results.put(tmp)\n\n\nasync def show_mag(magcount=1):\n global t\n global pSensor\n global results\n global results2\n myresults = np.array([[0, 0, 10]])\n myresults2 = np.array([[0, 0, 10]])\n fig = plt.figure(figsize=(5, 5))\n ax = fig.gca(projection='3d')\n ax.view_init(70, -120)\n # TODO: add title\n ax.set_xlabel('x(cm)')\n ax.set_ylabel('y(cm)')\n ax.set_zlabel('z(cm)')\n ax.set_xlim([-20, 20])\n ax.set_ylim([-20, 20])\n ax.set_zlim([-10, 40])\n Xs = 1e2 * pSensor[:, 0]\n Ys = 1e2 * pSensor[:, 1]\n Zs = 1e2 * pSensor[:, 2]\n\n XXs = Xs\n YYs = Ys\n ZZs = Zs\n ax.scatter(XXs, YYs, ZZs, c='r', s=1, alpha=0.5)\n\n (magnet_pos,) = ax.plot(t / 100.0 * 5, t / 100.0 * 5, t /\n 100.0 * 5, linewidth=3, animated=True)\n if magcount == 2:\n (magnet_pos2,) = ax.plot(t / 100.0 * 5, t / 100.0 * 5, t /\n 100.0 * 5, linewidth=3, animated=True)\n plt.show(block=False)\n plt.pause(0.1)\n bg = fig.canvas.copy_from_bbox(fig.bbox)\n ax.draw_artist(magnet_pos)\n fig.canvas.blit(fig.bbox)\n # timer = Timer(text=f\"frame elapsed time: {{: .5f}}\")\n\n while True:\n # timer.start()\n fig.canvas.restore_region(bg)\n # update the artist, neither the canvas state nor the screen have\n # changed\n\n # update myresults\n if not results.empty():\n myresult = results.get()\n myresults = np.concatenate(\n [myresults, np.array(myresult).reshape(1, -1)])\n\n if myresults.shape[0] > 5:\n myresults = myresults[-5:]\n\n x = myresults[:, 0]\n y = myresults[:, 1]\n z = myresults[:, 2]\n\n xx = x\n yy = y\n zz = z\n\n magnet_pos.set_xdata(xx)\n magnet_pos.set_ydata(yy)\n magnet_pos.set_3d_properties(zz, zdir='z')\n # re-render the artist, updating the canvas state, but not the screen\n ax.draw_artist(magnet_pos)\n\n if magcount == 2:\n if not results2.empty():\n myresult2 = results2.get()\n myresults2 = np.concatenate(\n [myresults2, np.array(myresult2).reshape(1, -1)])\n\n if myresults2.shape[0] > 30:\n myresults2 = myresults2[-30:]\n x = myresults2[:, 0]\n y = myresults2[:, 1]\n z = myresults2[:, 2]\n\n xx = x\n yy = y\n zz = z\n\n magnet_pos2.set_xdata(xx)\n magnet_pos2.set_ydata(yy)\n magnet_pos2.set_3d_properties(zz, zdir='z')\n ax.draw_artist(magnet_pos2)\n\n # copy the image to the GUI state, but screen might not changed yet\n fig.canvas.blit(fig.bbox)\n # flush any pending GUI events, re-painting the screen if needed\n fig.canvas.flush_events()\n await asyncio.sleep(0)\n # timer.stop()\n\n\n@ atexit.register\ndef clean():\n print(\"Output csv\")\n # test = pd.DataFrame(columns=name, data=result)\n # test.to_csv(\"22.csv\")\n print(\"Exited\")\n\n\ndef notification_handler(sender, data):\n \"\"\"Simple notification handler which prints the data received.\"\"\"\n num = int(pSensor.size/3)\n\n global worklist\n all_data = []\n sensors = np.zeros((num, 3))\n current = [datetime.datetime.now()]\n calibration = np.load('result/calibration.npz')\n offset = calibration['offset'].reshape(-1)\n scale = calibration['scale'].reshape(-1)\n for i in range(num):\n sensors[i, 0] = struct.unpack('f', data[12 * i: 12 * i + 4])[0]\n sensors[i, 1] = struct.unpack('f', data[12 * i + 4: 12 * i + 8])[0]\n sensors[i, 2] = struct.unpack('f', data[12 * i + 8: 12 * i + 12])[0]\n # print(\"Sensor \" + str(i+1)+\": \"+str(sensors[i, 0]) + \", \" + str(sensors[i, 1]) + \", \" + str(sensors[i, 2]))\n current.append(\n \"(\" + str(sensors[i, 0]) + \", \" + str(sensors[i, 1]) + \", \" +\n str(sensors[i, 2]) + \")\")\n # battery_voltage = struct.unpack('f', data[12 * num: 12 * num + 4])[0]\n # print(\"Battery voltage: \" + str(battery_voltage))\n sensors = sensors.reshape(-1)\n sensors = (sensors - offset) / scale * np.mean(scale)\n\n if len(all_data) > 3:\n sensors = (sensors + all_data[-1] + all_data[-2]) / 3\n all_data.append(sensors)\n worklist.put(sensors)\n # print(\"############\")\n\n\nasync def run_ble(address, loop):\n async with BleakClient(address, loop=loop) as client:\n # wait for BLE client to be connected\n x = await client.is_connected()\n print(\"Connected: {0}\".format(x))\n print(\"Press Enter to quit...\")\n # wait for data to be sent from client\n await client.start_notify(UART_TX_UUID, notification_handler)\n while True:\n await asyncio.sleep(0.01)\n # data = await client.read_gatt_char(UART_TX_UUID)\n\n\nasync def main(magcount=1):\n \"\"\"\n This is the main entry point for the program\n \"\"\"\n # Address of the BLE device\n global ble_address\n address = (ble_address)\n\n # Run the tasks\n with Timer(text=\"\\nTotal elapsed time: {:.1f}\"):\n multiprocessing.Process(\n target=calculation_parallel, args=(magcount, 1, False)).start()\n await asyncio.gather(\n asyncio.create_task(run_ble(address, asyncio.get_event_loop())),\n asyncio.create_task(show_mag(magcount)),\n )\n\n\nif __name__ == '__main__':\n if True:\n calibration = Calibrate_Data(cali_path)\n [offset, scale] = calibration.cali_result()\n np.savez('result/calibration.npz', offset=offset, scale=scale)\n print(np.mean(scale))\n # sys.exit(0)\n\n def trigger(e):\n print(\"You triggered the calibration\")\n global trigger_calibration\n trigger_calibration.put(1)\n\n keyboard.on_press_key(\"r\", trigger)\n asyncio.run(main(1))\n"
] | [
[
"matplotlib.use",
"numpy.array",
"numpy.linalg.norm",
"numpy.zeros_like",
"numpy.sin",
"numpy.argmin",
"numpy.zeros",
"numpy.log",
"numpy.load",
"numpy.mean",
"matplotlib.pyplot.figure",
"numpy.stack",
"numpy.savez",
"matplotlib.pyplot.pause",
"numpy.sqrt",
"numpy.abs",
"numpy.cos",
"matplotlib.pyplot.show"
]
] |
cloud-cds/cds-stack | [
"d68a1654d4f604369a071f784cdb5c42fc855d6e"
] | [
"etl/alerts/server.py"
] | [
"import pandas as pd\npd.set_option('display.width', 200)\nimport asyncio, asyncpg\nimport concurrent.futures\nimport logging\nimport json\nimport etl.io_config.server_protocol as protocol\nfrom etl.io_config.database import Database\nfrom etl.alerts.predictor_manager import PredictorManager\nimport datetime as dt\nfrom dateutil import parser\nimport pytz\nimport socket\nimport random, string\nimport functools\nimport os\nfrom etl.io_config.cloudwatch import Cloudwatch\n\ndef randomword(length):\n return ''.join(random.choice(string.ascii_uppercase) for i in range(length))\n\n\nstart_timeout = 15 #seconds\n\nHB_TIMEOUT = 5\n\nSRV_LOG_FMT = '%(asctime)s|%(name)s|%(process)s-%(thread)s|%(levelname)s|%(message)s'\nlogging.basicConfig(level=logging.INFO, format=SRV_LOG_FMT)\n\n\nclass AlertServer:\n def __init__(self, event_loop, alert_server_port=31000,\n alert_dns='0.0.0.0'):\n self.db = Database()\n self.loop = event_loop\n self.alert_message_queue = asyncio.Queue(loop =event_loop)\n self.predictor_manager = PredictorManager(self.alert_message_queue, self.loop)\n self.alert_server_port = alert_server_port\n self.alert_dns = alert_dns\n self.channel = os.getenv('etl_channel', 'on_opsdx_dev_etl')\n self.suppression_tasks = {}\n self.model = os.getenv('suppression_model', 'trews')\n self.TREWS_ETL_SUPPRESSION = int(os.getenv('TREWS_ETL_SUPPRESSION', 0))\n self.notify_web = int(os.getenv('notify_web', 0))\n self.lookbackhours = int(os.getenv('TREWS_ETL_HOURS', 24))\n self.nprocs = int(os.getenv('nprocs', 2))\n self.hospital_to_predict = os.getenv('hospital_to_predict', 'HCGH')\n self.push_based = bool(os.getenv('push_based', 0))\n self.workspace = os.getenv('workspace', 'workspace')\n self.cloudwatch_logger = Cloudwatch()\n self.job_status = {}\n\n async def async_init(self):\n self.db_pool = await self.db.get_connection_pool()\n\n\n\n async def convert_enc_ids_to_pat_ids(self, enc_ids):\n ''' Return a list of pat_ids from their corresponding enc_ids '''\n async with self.db_pool.acquire() as conn:\n sql = '''\n SELECT distinct pat_id FROM pat_enc where enc_id\n in ({})\n '''.format(','.join([str(i) for i in enc_ids]))\n pat_ids = await conn.fetch(sql)\n return pat_ids\n\n\n\n async def suppression(self, pat_id, tsp):\n ''' Alert suppression task for a single patient\n and notify frontend that the patient has updated'''\n\n async def criteria_ready(conn, pat_id, tsp):\n '''\n criteria is ready when\n 1. criteria is updated after tsp\n 2. no new data in criteria_meas within lookbackhours (ETL will not update criteria)\n '''\n sql = '''\n SELECT count(*) > 0\n or (select count(*) = 0 from criteria_meas m\n where m.pat_id = '{pat_id}' and now() - tsp < (select value::interval from parameters where name = 'lookbackhours')) ready\n FROM criteria where pat_id = '{pat_id}'\n and update_date > '{tsp}'::timestamptz\n '''.format(pat_id=pat_id, tsp=tsp)\n cnt = await conn.fetch(sql)\n\n return cnt[0]['ready']\n\n async with self.db_pool.acquire() as conn:\n n = 0\n N = 60\n\n logging.info(\"enter suppression task for {} - {}\".format(pat_id, tsp))\n while not await criteria_ready(conn, pat_id, tsp):\n await asyncio.sleep(10)\n n += 1\n logging.info(\"retry criteria_ready {} times for {}\".format(n, pat_id))\n if n >= 60:\n break\n if n < 60:\n logging.info(\"criteria is ready for {}\".format(pat_id))\n sql = '''\n select update_suppression_alert('{pat_id}', '{channel}', '{model}', '{notify}');\n '''.format(pat_id=pat_id, channel=self.channel, model=self.model, notify=self.notify_web)\n logging.info(\"suppression sql: {}\".format(sql))\n await conn.fetch(sql)\n logging.info(\"generate suppression alert for {}\".format(pat_id))\n else:\n logging.info(\"criteria is not ready for {}\".format(pat_id))\n\n\n\n def garbage_collect_suppression_tasks(self, hosp):\n for task in self.suppression_tasks.get(hosp, []):\n task.cancel()\n self.suppression_tasks[hosp] = []\n\n\n\n async def alert_queue_consumer(self):\n '''\n Check message queue and process messages\n '''\n logging.info(\"alert_queue_consumer started\")\n while True:\n msg = await self.alert_message_queue.get()\n logging.info(\"alert_message_queue recv msg: {}\".format(msg))\n # Predictor finished\n if msg.get('type') == 'FIN':\n if self.model == 'lmc' or self.model == 'trews-jit':\n if self.TREWS_ETL_SUPPRESSION == 1:\n suppression_future = asyncio.ensure_future(self.run_suppression(msg), loop=self.loop)\n elif self.TREWS_ETL_SUPPRESSION == 2:\n suppression_future = asyncio.ensure_future(self.run_suppression_mode_2(msg), loop=self.loop)\n else:\n logging.error(\"Unknown model: {}\".format(self.model))\n # self.suppression_tasks[msg['hosp']].append(suppression_future)\n # logging.info(\"create {model} suppression task for {}\".format(self.model,msg['hosp']))\n logging.info(\"alert_queue_consumer quit\")\n\n async def suppression(self, pat_id, tsp):\n ''' Alert suppression task for a single patient\n and notify frontend that the patient has updated'''\n\n async def run_suppression_mode_2(self, msg):\n t_fin = dt.datetime.now()\n # if msg['hosp']+msg['time'] in self.job_status:\n if msg['job_id'] in self.job_status:\n t_start = self.job_status[msg['job_id']]['t_start']\n self.cloudwatch_logger.push_many(\n dimension_name = 'AlertServer',\n metric_names = [\n 'prediction_time_{}{}'.format(msg['hosp'], '_push' if self.push_based else ''),\n 'prediction_enc_cnt_in_{}{}'.format(msg['hosp'], '_push' if self.push_based else ''),\n 'prediction_enc_cnt_out_{}{}'.format(msg['hosp'], '_push' if self.push_based else '')],\n metric_values = [\n (t_fin - t_start).total_seconds(),\n len(msg['enc_ids']),\n len(msg['predicted_enc_ids'])],\n metric_units = ['Seconds', 'Count', 'Count']\n )\n logging.info(\"start to run suppression mode 2 for msg {}\".format(msg))\n tsp = msg['time']\n enc_id_str = ','.join([str(i) for i in msg['enc_ids'] if i])\n hospital = msg['hosp']\n logging.info(\"received FIN for enc_ids: {}\".format(enc_id_str))\n # calculate criteria here\n # NOTE: I turst the enc_ids from FIN msg\n async with self.db_pool.acquire() as conn:\n if self.notify_web:\n if self.push_based:\n job_id = msg['job_id']\n await self.calculate_criteria_enc(conn, msg['enc_ids'])\n sql = '''\n with pats as (\n select p.enc_id, p.pat_id from pat_enc p\n where p.enc_id in ({enc_ids})\n ),\n refreshed as (\n insert into refreshed_pats (refreshed_tsp, pats)\n select now(), jsonb_agg(pat_id) from pats\n returning id\n )\n select pg_notify('{channel}', 'invalidate_cache_batch:' || id || ':' || '{model}') from refreshed;\n '''.format(channel=self.channel, model=self.model, enc_ids=enc_id_str)\n else:\n await self.calculate_criteria_hospital(conn, hospital)\n sql = '''\n with pats as (\n select p.enc_id, p.pat_id from pat_enc p\n inner join get_latest_enc_ids('{hosp}') e on p.enc_id = e.enc_id\n ),\n refreshed as (\n insert into refreshed_pats (refreshed_tsp, pats)\n select now(), jsonb_agg(pat_id) from pats\n returning id\n )\n select pg_notify('{channel}', 'invalidate_cache_batch:' || id || ':' || '{model}') from refreshed;\n '''.format(channel=self.channel, model=self.model, enc_id_str=enc_id_str, hosp=msg['hosp'])\n logging.info(\"trews alert sql: {}\".format(sql))\n await conn.fetch(sql)\n logging.info(\"generated trews alert for {}\".format(hospital))\n logging.info(\"complete to run suppression mode 2 for msg {}\".format(msg))\n t_end = dt.datetime.now()\n if msg['job_id'] in self.job_status:\n t_start = self.job_status[msg['job_id']]['t_start']\n self.cloudwatch_logger.push_many(\n dimension_name = 'AlertServer',\n metric_names = ['e2e_time_{}{}'.format(msg['hosp'], '_push' if self.push_based else ''),\n 'criteria_time_{}{}'.format(msg['hosp'], '_push' if self.push_based else ''),\n ],\n metric_values = [(t_end - t_start).total_seconds(),\n (t_end - t_fin).total_seconds(),\n ],\n metric_units = ['Seconds','Seconds']\n )\n self.job_status.pop(msg['job_id'], None)\n\n\n async def run_suppression(self, msg):\n # Wait for Advance Criteria Snapshot to finish and then start generating notifications\n logging.info(\"start to run suppression for msg {}\".format(msg))\n tsp = msg['time']\n pat_ids = await self.convert_enc_ids_to_pat_ids(msg['enc_ids'])\n pats_str = ','.join([str(i) for i in pat_ids])\n hospital = msg['hosp']\n logging.info(\"received FIN for enc_ids: {}\".format(pats_str))\n\n async def criteria_ready(conn, enc_ids, tsp):\n '''\n criteria is ready when\n 1. criteria is updated after tsp\n 2. no new data in criteria_meas within lookbackhours (ETL will not update criteria)\n '''\n sql = '''\n with pats as (\n select distinct enc_id from criteria where enc_id in ({enc_ids})\n ),\n updated_pats as (\n select distinct enc_id from criteria where enc_id in ({enc_ids}) and update_date >= '{tsp}'::timestamptz\n )\n SELECT * from pats except select * from updated_pats\n '''.format(enc_ids=enc_ids, tsp=tsp)\n cnt = await conn.fetch(sql)\n if cnt is None or len(cnt) == 0:\n logging.info(\"criteria is ready\")\n return True\n else:\n logging.info(\"criteria is not ready ({})\".format(len(cnt)))\n return False\n\n async with self.db_pool.acquire() as conn:\n n = 0\n N = 60\n\n logging.info(\"enter suppression task for {}\".format(msg))\n while not await criteria_ready(conn, pats_str, tsp):\n await asyncio.sleep(10)\n n += 1\n logging.info(\"retry criteria_ready {} times for {}\".format(n, pats_str))\n if n >= 60:\n break\n if n < 60:\n if self.notify_web:\n sql = '''\n with pats as (\n select enc_id, pat_id from pat_enc where enc_id in ({pats})\n ),\n alerts as (\n select update_suppression_alert(enc_id, '{channel}', '{model}', 'false') from pats),\n refreshed as (\n insert into refreshed_pats (refreshed_tsp, pats)\n select now(), jsonb_agg(pat_id) from pats\n returning id\n )\n select pg_notify('{channel}', 'invalidate_cache_batch:' || id || ':' || '{model}') from refreshed;\n '''.format(channel=self.channel, model=self.model, pats=pats_str)\n else:\n sql = '''\n with pats as (\n select enc_id from pat_enc where enc_id in ({pats})\n )\n select update_suppression_alert(enc_id, '{channel}', '{model}', 'false') from pats)\n '''.format(channel=self.channel, model=self.model, pats=pats_str)\n logging.info(\"lmc suppression sql: {}\".format(sql))\n await conn.fetch(sql)\n logging.info(\"generate suppression alert for {}\".format(hospital))\n else:\n logging.info(\"criteria is not ready for {}\".format(pats_str))\n\n async def distribute_calculate_criteria(self, conn, job_id):\n server = 'dev_db' if 'dev' in self.channel else 'prod_db'\n hospital = None\n if 'hcgh' in job_id:\n hospital = 'HCGH'\n elif 'bmc' in job_id:\n hospital = 'BMC'\n elif 'jhh' in job_id:\n hospital = 'JHH'\n else:\n logging.error(\"Invalid job id: {}\".format(job_id))\n if hospital:\n sql = \"select garbage_collection('{}');\".format(hospital)\n logging.info(\"calculate_criteria sql: {}\".format(sql))\n await conn.fetch(sql)\n sql = '''\n select distribute_advance_criteria_snapshot_for_job('{server}', {hours}, '{job_id}', {nprocs});\n '''.format(server=server,hours=self.lookbackhours,job_id=job_id,nprocs=self.nprocs)\n logging.info(\"calculate_criteria sql: {}\".format(sql))\n await conn.fetch(sql)\n\n async def distribute_calculate_criteria_hospital(self, conn, hospital):\n server = 'dev_db' if 'dev' in self.channel else 'prod_db'\n sql = \"select garbage_collection('{}');\".format(hospital)\n logging.info(\"calculate_criteria sql: {}\".format(sql))\n await conn.fetch(sql)\n sql = '''\n select distribute_advance_criteria_snapshot_for_online_hospital('{server}', '{hospital}', {nprocs});\n '''.format(server=server,hospital=hospital,nprocs=self.nprocs)\n logging.info(\"calculate_criteria sql: {}\".format(sql))\n await conn.fetch(sql)\n\n async def calculate_criteria_hospital(self, conn, hospital):\n sql = \"select garbage_collection('{}', '{}');\".format(hospital, self.workspace)\n logging.info(\"calculate_criteria sql: {}\".format(sql))\n await conn.fetch(sql)\n sql = '''\n select advance_criteria_snapshot(enc_id) from get_latest_enc_ids('{hospital}');\n '''.format(hospital=hospital)\n logging.info(\"calculate_criteria sql: {}\".format(sql))\n await conn.fetch(sql)\n\n async def calculate_criteria_enc(self, conn, enc_ids):\n sql = \"select garbage_collection_enc_ids(array[{}],'{}')\".format(','.join([str(enc_id) for enc_id in enc_ids]), self.workspace)\n logging.info(\"calculate_criteria sql: {}\".format(sql))\n await conn.fetch(sql)\n sql = 'select advance_criteria_snapshot_enc_ids(array[{}])'.format(','.join([str(enc_id) for enc_id in enc_ids]))\n logging.info(\"calculate_criteria sql: {}\".format(sql))\n await conn.fetch(sql)\n logging.info(\"complete calculate_criteria_enc\")\n\n async def calculate_criteria_push(self, conn, job_id, excluded=None):\n sql = '''\n select garbage_collection(enc_id, '{workspace}')\n from (select distinct enc_id from {workspace}.cdm_t\n where job_id = '{job_id}' {where}) e;\n '''.format(workspace=self.workspace, job_id=job_id, where=excluded)\n logging.info(\"calculate_criteria sql: {}\".format(sql))\n await conn.fetch(sql)\n sql = '''\n select advance_criteria_snapshot_batch(\n 'select distinct enc_id from {workspace}.cdm_t where job_id = ''{job_id}'' {where}'\n );\n '''.format(workspace=self.workspace, job_id=job_id, where=excluded)\n # sql = '''\n # select advance_criteria_snapshot(enc_id)\n # from (select distinct enc_id from {workspace}.cdm_t\n # where job_id = '{job_id}' {where}) e;\n # '''.format(workspace=self.workspace, job_id=job_id, where=excluded)\n logging.info(\"calculate_criteria sql: {}\".format(sql))\n await conn.fetch(sql)\n logging.info(\"complete calculate_criteria_enc\")\n\n async def get_enc_ids_to_predict(self, job_id):\n async with self.db_pool.acquire() as conn:\n # rule to select predictable enc_ids:\n # 1. has changes delta twf > 0\n # 2. adult\n # 3. HCGH patients only\n sql = '''\n select distinct t.enc_id\n from {workspace}.cdm_t t\n inner join cdm_twf twf on t.enc_id = twf.enc_id\n left join {workspace}.{job_id}_cdm_twf twf_delta on twf_delta.enc_id = t.enc_id\n left join {workspace}.{job_id}_cdm_twf_del twf_del on twf_del.enc_id = t.enc_id\n inner join cdm_s s on twf.enc_id = s.enc_id\n inner join cdm_s s2 on twf.enc_id = s2.enc_id\n where s.fid = 'age' and s.value::float >= 18.0\n and s2.fid = 'hospital' and s2.value = 'HCGH'\n and job_id = '{job_id}' and (twf_delta.enc_id is not null or twf_del.enc_id is not null)\n '''.format(workspace=self.workspace, job_id=job_id)\n res = await conn.fetch(sql)\n predict_enc_ids = [row[0] for row in res]\n return predict_enc_ids\n\n async def run_trews_alert(self, job_id, hospital, excluded_enc_ids=None):\n async with self.db_pool.acquire() as conn:\n if self.push_based and hospital == 'PUSH':\n # calculate criteria here\n excluded = ''\n if excluded_enc_ids:\n excluded = 'and enc_id not in ({})'.format(','.join([str(id) for id in excluded_enc_ids]))\n await self.calculate_criteria_push(conn, job_id, excluded=excluded)\n if self.notify_web:\n sql = '''\n with pats as (\n select e.enc_id, p.pat_id from (\n select distinct enc_id from {workspace}.cdm_t\n where job_id = '{job_id}'\n {where}\n ) e\n inner join pat_enc p on e.enc_id = p.enc_id\n ),\n refreshed as (\n insert into refreshed_pats (refreshed_tsp, pats)\n select now(), jsonb_agg(pat_id) from pats\n returning id\n )\n select pg_notify('{channel}', 'invalidate_cache_batch:' || id || ':' || '{model}') from refreshed;\n '''.format(channel=self.channel, model=self.model, where=excluded, workspace=self.workspace, job_id=job_id)\n logging.info(\"trews alert sql: {}\".format(sql))\n await conn.fetch(sql)\n logging.info(\"generated trews alert for {} without prediction\".format(hospital))\n elif self.TREWS_ETL_SUPPRESSION == 2:\n # calculate criteria here\n await self.calculate_criteria_hospital(conn, hospital)\n if self.notify_web:\n sql = '''\n with pats as (\n select e.enc_id, p.pat_id from get_latest_enc_ids('{hospital}') e inner join pat_enc p on e.enc_id = p.enc_id\n ),\n refreshed as (\n insert into refreshed_pats (refreshed_tsp, pats)\n select now(), jsonb_agg(pat_id) from pats\n returning id\n )\n select pg_notify('{channel}', 'invalidate_cache_batch:' || id || ':' || '{model}') from refreshed;\n '''.format(channel=self.channel, model=self.model, hospital=hospital)\n logging.info(\"trews alert sql: {}\".format(sql))\n await conn.fetch(sql)\n logging.info(\"generated trews alert for {}\".format(hospital))\n elif self.TREWS_ETL_SUPPRESSION == 1:\n if self.notify_web:\n sql = '''\n with pats as (\n select e.enc_id, p.pat_id from get_latest_enc_ids('{hospital}') e inner join pat_enc p on e.enc_id = p.enc_id\n ),\n alerts as (\n select update_suppression_alert(enc_id, '{channel}', '{model}', 'false') from pats),\n refreshed as (\n insert into refreshed_pats (refreshed_tsp, pats)\n select now(), jsonb_agg(pat_id) from pats\n returning id\n )\n select pg_notify('{channel}', 'invalidate_cache_batch:' || id || ':' || '{model}') from refreshed;\n '''.format(channel=self.channel, model=self.model, hospital=hospital)\n else:\n sql = '''\n select update_suppression_alert(enc_id, '{channel}', '{model}', 'false') from\n (select distinct t.enc_id from cdm_t t\n inner join get_latest_enc_ids('{hospital}') h on h.enc_id = t.enc_id\n where now() - tsp < (select value::interval from parameters where name = 'lookbackhours')) sub;\n '''.format(channel=self.channel, model=self.model, hospital=hospital)\n logging.info(\"trews suppression sql: {}\".format(sql))\n await conn.fetch(sql)\n logging.info(\"generate trews suppression alert for {}\".format(hospital))\n\n async def connection_handler(self, reader, writer):\n ''' Alert server connection handler '''\n addr = writer.transport.get_extra_info('peername')\n sock = writer.transport.get_extra_info('socket')\n\n if not addr:\n logging.error('Connection made without a valid remote address, (Timeout %s)' % str(sock.gettimeout()))\n return\n else:\n logging.debug('Connection from %s (Timeout %s)' % (str(addr), str(sock.gettimeout())))\n\n # Get the message that started this callback function\n message = await protocol.read_message(reader, writer)\n logging.info(\"connection_handler: recv msg from {} type {}\".format(message.get('from'), message.get('type')))\n if message.get('from') == 'predictor':\n return await self.predictor_manager.register(reader, writer, message)\n\n elif message.get('type') == 'ETL':\n self.cloudwatch_logger.push_many(\n dimension_name = 'AlertServer',\n metric_names = ['etl_done_{}'.format(message['hosp'])],\n metric_values = [1],\n metric_units = ['Count']\n )\n # self.job_status[message['hosp'] + message['time']] = {\n # 'msg': message, 't_start': dt.datetime.now()\n # }\n if self.model == 'lmc' or self.model == 'trews-jit':\n job_id_items = message['job_id'].split('_')\n t_start = parser.parse(job_id_items[-1] if len(job_id_items) == 4 else job_id_items[-2])\n if self.push_based:\n # create predict task for predictor\n predict_enc_ids = await self.get_enc_ids_to_predict(message['job_id'])\n if predict_enc_ids:\n self.job_status[message['job_id']] = {'t_start': t_start}\n self.predictor_manager.cancel_predict_tasks(job_id=message['job_id'])\n self.predictor_manager.create_predict_tasks(hosp=message['hosp'],\n time=message['time'],\n job_id=message['job_id'],\n active_encids=predict_enc_ids)\n else:\n logging.info(\"predict_enc_ids is None or empty\")\n # create criteria update task for patients who do not need to predict\n t_fin = dt.datetime.now()\n await self.run_trews_alert(message['job_id'],message['hosp'], excluded_enc_ids=predict_enc_ids)\n t_end = dt.datetime.now()\n self.cloudwatch_logger.push_many(\n dimension_name = 'AlertServer',\n metric_names = ['e2e_time_{}{}'.format(message['hosp'], '_short' if self.push_based else ''),\n 'criteria_time_{}{}'.format(message['hosp'], '_short' if self.push_based else ''),\n ],\n metric_values = [(t_end - t_start).total_seconds(),\n (t_end - t_fin).total_seconds(),\n ],\n metric_units = ['Seconds','Seconds']\n )\n elif message.get('hosp') in self.hospital_to_predict:\n if self.model == 'lmc':\n self.garbage_collect_suppression_tasks(message['hosp'])\n self.job_status[message['job_id']] = {'t_start': t_start}\n self.predictor_manager.cancel_predict_tasks(job_id=message['hosp'])\n self.predictor_manager.create_predict_tasks(hosp=message['hosp'],\n time=message['time'],\n job_id=message['job_id'])\n else:\n logging.info(\"skip prediction for msg: {}\".format(message))\n t_fin = dt.datetime.now()\n await self.run_trews_alert(message['job_id'],message['hosp'])\n t_end = dt.datetime.now()\n self.cloudwatch_logger.push_many(\n dimension_name = 'AlertServer',\n metric_names = ['e2e_time_{}'.format(message['hosp']),\n 'criteria_time_{}'.format(message['hosp']),\n ],\n metric_values = [(t_end - t_start).total_seconds(),\n (t_end - t_fin).total_seconds(),\n ],\n metric_units = ['Seconds','Seconds']\n )\n elif self.model == 'trews':\n await self.run_trews_alert(message['job_id'],message['hosp'])\n else:\n logging.error(\"Unknown suppression model {}\".format(self.model))\n else:\n logging.error(\"Don't know how to process this message\")\n\n\n\n def start(self):\n ''' Start the alert server and queue consumer '''\n self.loop.run_until_complete(self.async_init())\n consumer_future = asyncio.ensure_future(self.alert_queue_consumer())\n server_future = self.loop.run_until_complete(asyncio.start_server(\n self.connection_handler, self.alert_dns, self.alert_server_port, loop=self.loop\n ))\n logging.info('Serving on {}'.format(server_future.sockets[0].getsockname()))\n # Run server until Ctrl+C is pressed\n try:\n self.loop.run_forever()\n except KeyboardInterrupt:\n print(\"Exiting\")\n consumer_future.cancel()\n # Close the server\n logging.info('received stop signal, cancelling tasks...')\n for task in asyncio.Task.all_tasks():\n task.cancel()\n logging.info('bye, exiting in a minute...')\n server_future.close()\n self.loop.run_until_complete(server_future.wait_closed())\n self.loop.stop()\n finally:\n self.loop.close()\n\n\ndef main():\n loop = asyncio.get_event_loop()\n server = AlertServer(loop)\n server.start()\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"pandas.set_option"
]
] |
LucaCappelletti94/EnsmallenGraph | [
"572532b6d3f4352bf58f9ccca955376acd95fd89"
] | [
"notebooks_and_scripts/graph_miner/repositories/monarch_initiative_graph_repository.py"
] | [
"\"\"\"Sub-module handling the retrieval and building of graphs from MonarchInitiative.\"\"\"\nfrom typing import List, Dict\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport compress_json\nfrom .graph_repository import GraphRepository\n\n\nclass MonarchInitiativeGraphRepository(GraphRepository):\n\n def __init__(self):\n \"\"\"Create new String Graph Repository object.\"\"\"\n super().__init__()\n # We load the data that cannot be automatically scraped\n self._data = compress_json.local_load(\"monarch_initiative.json\")\n # The arguments keys used to load this graph\n general_kwargs = {\n \"sources_column\": \"subject\",\n \"destinations_column\": \"object\",\n \"edge_list_edge_types_column\": \"predicate\",\n \"nodes_column\": \"id\",\n \"node_list_node_types_column\": \"category\",\n \"node_types_separator\": \"|\",\n \"name\": \"Monarch\"\n }\n # We extend the data through scraping the Google Bucket\n base_url = \"https://storage.googleapis.com/monarch-ingest/\"\n xml = pd.read_xml(base_url).fillna(\"NaN\")\n xml = xml[xml.Key.str.endswith(\"/monarch-kg.tar.gz\")]\n for path in xml.Key:\n version = path.split(\"/\")[0]\n self._data[\"Monarch\"][version] = {\n \"urls\": [\n base_url + path\n ],\n \"arguments\": {\n \"edge_path\": \"monarch-kg/monarch-kg_edges.tsv\",\n \"node_path\": \"monarch-kg/monarch-kg_nodes.tsv\",\n **general_kwargs\n }\n }\n\n def build_stored_graph_name(self, partial_graph_name: str) -> str:\n \"\"\"Return built graph name.\n\n Parameters\n -----------------------\n partial_graph_name: str,\n Partial graph name to be built.\n\n Returns\n -----------------------\n Complete name of the graph.\n \"\"\"\n return partial_graph_name\n\n def get_formatted_repository_name(self) -> str:\n \"\"\"Return formatted repository name.\"\"\"\n return \"MonarchInitiative\"\n\n def get_graph_arguments(\n self,\n graph_name: str,\n version: str\n ) -> List[str]:\n \"\"\"Return arguments for the given graph and version.\n\n Parameters\n -----------------------\n graph_name: str,\n Name of graph to retrievel arguments for.\n version: str,\n Version to retrieve this information for.\n\n Returns\n -----------------------\n The arguments list to use to build the graph.\n \"\"\"\n return self._data[graph_name][version][\"arguments\"]\n\n def get_graph_versions(\n self,\n graph_name: str,\n ) -> List[str]:\n \"\"\"Return list of versions of the given graph.\n\n Parameters\n -----------------------\n graph_name: str,\n Name of graph to retrieve versions for.\n\n Returns\n -----------------------\n List of versions for the given graph.\n \"\"\"\n return list(self._data[graph_name].keys())\n\n def get_graph_urls(\n self,\n graph_name: str,\n version: str\n ) -> List[str]:\n \"\"\"Return urls for the given graph and version.\n\n Parameters\n -----------------------\n graph_name: str,\n Name of graph to retrievel URLs for.\n version: str,\n Version to retrieve this information for.\n\n Returns\n -----------------------\n The urls list from where to download the graph data.\n \"\"\"\n return self._data[graph_name][version][\"urls\"]\n\n def get_graph_references(self, graph_name: str, version: str) -> List[str]:\n \"\"\"Return url for the given graph.\n\n Parameters\n -----------------------\n graph_name: str,\n Name of graph to retrievel URLs for.\n version: str,\n Version to retrieve this information for.\n\n Returns\n -----------------------\n Citations relative to the Kg graphs.\n \"\"\"\n return [\n open(\n \"{}/models/MonarchInitiative.bib\".format(\n os.path.dirname(os.path.abspath(__file__))\n ),\n \"r\"\n ).read()\n ]\n\n def get_graph_paths(self, graph_name: str, urls: List[str]) -> List[str]:\n \"\"\"Return url for the given graph.\n\n Parameters\n -----------------------\n graph_name: str,\n Name of graph to retrievel URLs for.\n urls: List[str],\n Urls from where to download the graphs.\n\n Returns\n -----------------------\n The paths where to store the downloaded graphs.\n\n Implementative details\n -----------------------\n It is returned None because the path that is automatically\n used by downloader is sufficiently precise.\n \"\"\"\n return None\n\n def get_graph_list(self) -> List[str]:\n \"\"\"Return list of graph names.\"\"\"\n return list(self._data.keys())\n"
] | [
[
"pandas.read_xml"
]
] |
NeilDG/SGID-PFF | [
"e027ac65e63f3c052665290cd0438bb7bdeabf9f"
] | [
"code/logger/logger.py"
] | [
"import torch\nimport imageio\nimport numpy as np\nimport os\nimport datetime\n\nimport matplotlib\n\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\n\n\nclass Logger:\n def __init__(self, args, init_loss_log):\n self.args = args\n self.psnr_log = torch.Tensor()\n self.loss_log = {}\n for key in init_loss_log.keys():\n self.loss_log[key] = torch.Tensor()\n\n if args.load == '.':\n if args.save == '.':\n args.save = datetime.datetime.now().strftime('%Y%m%d_%H:%M')\n self.dir = args.experiment_dir + args.save\n else:\n self.dir = args.experiment_dir + args.load\n if not os.path.exists(self.dir):\n args.load = '.'\n else:\n self.loss_log = torch.load(self.dir + '/loss_log.pt')\n self.psnr_log = torch.load(self.dir + '/psnr_log.pt')\n print('Continue from epoch {}...'.format(len(self.psnr_log)))\n\n if not os.path.exists(self.dir):\n os.makedirs(self.dir)\n if not os.path.exists(self.dir + '/model'):\n os.makedirs(self.dir + '/model')\n if not os.path.exists(self.dir + '/result/' + self.args.data_test):\n print(\"Creating dir for saving images...\", self.dir + '/result/' + self.args.data_test)\n os.makedirs(self.dir + '/result/' + self.args.data_test)\n\n print('Save Path : {}'.format(self.dir))\n\n open_type = 'a' if os.path.exists(self.dir + '/log.txt') else 'w'\n self.log_file = open(self.dir + '/log.txt', open_type)\n with open(self.dir + '/config.txt', open_type) as f:\n f.write('From epoch {}...'.format(len(self.psnr_log)) + '\\n\\n')\n for arg in vars(args):\n f.write('{}: {}\\n'.format(arg, getattr(args, arg)))\n f.write('\\n')\n\n def write_log(self, log):\n print(log)\n self.log_file.write(log + '\\n')\n\n def save(self, trainer, epoch, is_best):\n trainer.model.save(self.dir, epoch, is_best)\n torch.save(self.loss_log, os.path.join(self.dir, 'loss_log.pt'))\n torch.save(self.psnr_log, os.path.join(self.dir, 'psnr_log.pt'))\n torch.save(trainer.optimizer.state_dict(), os.path.join(self.dir, 'optimizer.pt'))\n self.plot_loss_log(epoch)\n self.plot_psnr_log(epoch)\n\n def save_images(self, filename, save_list):\n dirname = '{}/result/{}'.format(self.dir, self.args.data_test)\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n filename = '{}/{}'.format(dirname, filename)\n if self.args.task == '.':\n postfix = ['combine']\n else:\n postfix = ['combine']\n for img, post in zip(save_list, postfix):\n imageio.imwrite('{}_{}.png'.format(filename, post), img)\n\n def start_log(self, train=True):\n if train:\n for key in self.loss_log.keys():\n self.loss_log[key] = torch.cat((self.loss_log[key], torch.zeros(1)))\n else:\n self.psnr_log = torch.cat((self.psnr_log, torch.zeros(1)))\n\n def report_log(self, item, train=True):\n if train:\n for key in item.keys():\n self.loss_log[key][-1] += item[key]\n else:\n self.psnr_log[-1] += item\n\n def end_log(self, n_div, train=True):\n if train:\n for key in self.loss_log.keys():\n self.loss_log[key][-1].div_(n_div)\n else:\n self.psnr_log[-1].div_(n_div)\n\n def display_loss(self, batch):\n n_samples = batch + 1\n log = []\n for key in self.loss_log.keys():\n log.append('[{}: {:.4f}]'.format(key, self.loss_log[key][-1] / n_samples))\n return ''.join(log)\n\n def plot_loss_log(self, epoch):\n axis = np.linspace(1, epoch, epoch)\n for key in self.loss_log.keys():\n label = '{} Loss'.format(key)\n fig = plt.figure()\n plt.title(label)\n plt.plot(axis, self.loss_log[key].numpy())\n plt.legend()\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.grid(True)\n plt.savefig(os.path.join(self.dir, 'loss_{}.pdf'.format(key)))\n plt.close(fig)\n\n def plot_psnr_log(self, epoch):\n axis = np.linspace(1, epoch, epoch)\n fig = plt.figure()\n plt.title('PSNR Graph')\n plt.plot(axis, self.psnr_log.numpy())\n plt.legend()\n plt.xlabel('Epochs')\n plt.ylabel('PSNR')\n plt.grid(True)\n plt.savefig(os.path.join(self.dir, 'psnr.pdf'))\n plt.close(fig)\n\n def done(self):\n self.log_file.close()\n"
] | [
[
"matplotlib.use",
"torch.zeros",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"torch.load",
"numpy.linspace",
"torch.Tensor"
]
] |
magic20191/spark | [
"74ebef243c18e7a8f32bf90ea75ab6afed9e3132"
] | [
"python/pyspark/ml/linalg/__init__.py"
] | [
"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nMLlib utilities for linear algebra. For dense vectors, MLlib\nuses the NumPy `array` type, so you can simply pass NumPy arrays\naround. For sparse vectors, users can construct a :class:`SparseVector`\nobject from MLlib or pass SciPy `scipy.sparse` column vectors if\nSciPy is available in their environment.\n\"\"\"\n\nimport sys\nimport array\nimport struct\n\nimport numpy as np\n\nfrom pyspark.sql.types import (\n UserDefinedType,\n StructField,\n StructType,\n ArrayType,\n DoubleType,\n IntegerType,\n ByteType,\n BooleanType,\n)\n\n\n__all__ = [\n \"Vector\",\n \"DenseVector\",\n \"SparseVector\",\n \"Vectors\",\n \"Matrix\",\n \"DenseMatrix\",\n \"SparseMatrix\",\n \"Matrices\",\n]\n\n\n# Check whether we have SciPy. MLlib works without it too, but if we have it, some methods,\n# such as _dot and _serialize_double_vector, start to support scipy.sparse matrices.\n\ntry:\n import scipy.sparse\n\n _have_scipy = True\nexcept BaseException:\n # No SciPy in environment, but that's okay\n _have_scipy = False\n\n\ndef _convert_to_vector(d):\n if isinstance(d, Vector):\n return d\n elif type(d) in (array.array, np.array, np.ndarray, list, tuple, range):\n return DenseVector(d)\n elif _have_scipy and scipy.sparse.issparse(d):\n assert d.shape[1] == 1, \"Expected column vector\"\n # Make sure the converted csc_matrix has sorted indices.\n csc = d.tocsc()\n if not csc.has_sorted_indices:\n csc.sort_indices()\n return SparseVector(d.shape[0], csc.indices, csc.data)\n else:\n raise TypeError(\"Cannot convert type %s into Vector\" % type(d))\n\n\ndef _vector_size(v):\n \"\"\"\n Returns the size of the vector.\n\n Examples\n --------\n >>> _vector_size([1., 2., 3.])\n 3\n >>> _vector_size((1., 2., 3.))\n 3\n >>> _vector_size(array.array('d', [1., 2., 3.]))\n 3\n >>> _vector_size(np.zeros(3))\n 3\n >>> _vector_size(np.zeros((3, 1)))\n 3\n >>> _vector_size(np.zeros((1, 3)))\n Traceback (most recent call last):\n ...\n ValueError: Cannot treat an ndarray of shape (1, 3) as a vector\n \"\"\"\n if isinstance(v, Vector):\n return len(v)\n elif type(v) in (array.array, list, tuple, range):\n return len(v)\n elif type(v) == np.ndarray:\n if v.ndim == 1 or (v.ndim == 2 and v.shape[1] == 1):\n return len(v)\n else:\n raise ValueError(\"Cannot treat an ndarray of shape %s as a vector\" % str(v.shape))\n elif _have_scipy and scipy.sparse.issparse(v):\n assert v.shape[1] == 1, \"Expected column vector\"\n return v.shape[0]\n else:\n raise TypeError(\"Cannot treat type %s as a vector\" % type(v))\n\n\ndef _format_float(f, digits=4):\n s = str(round(f, digits))\n if \".\" in s:\n s = s[: s.index(\".\") + 1 + digits]\n return s\n\n\ndef _format_float_list(xs):\n return [_format_float(x) for x in xs]\n\n\ndef _double_to_long_bits(value):\n if np.isnan(value):\n value = float(\"nan\")\n # pack double into 64 bits, then unpack as long int\n return struct.unpack(\"Q\", struct.pack(\"d\", value))[0]\n\n\nclass VectorUDT(UserDefinedType):\n \"\"\"\n SQL user-defined type (UDT) for Vector.\n \"\"\"\n\n @classmethod\n def sqlType(cls):\n return StructType(\n [\n StructField(\"type\", ByteType(), False),\n StructField(\"size\", IntegerType(), True),\n StructField(\"indices\", ArrayType(IntegerType(), False), True),\n StructField(\"values\", ArrayType(DoubleType(), False), True),\n ]\n )\n\n @classmethod\n def module(cls):\n return \"pyspark.ml.linalg\"\n\n @classmethod\n def scalaUDT(cls):\n return \"org.apache.spark.ml.linalg.VectorUDT\"\n\n def serialize(self, obj):\n if isinstance(obj, SparseVector):\n indices = [int(i) for i in obj.indices]\n values = [float(v) for v in obj.values]\n return (0, obj.size, indices, values)\n elif isinstance(obj, DenseVector):\n values = [float(v) for v in obj]\n return (1, None, None, values)\n else:\n raise TypeError(\"cannot serialize %r of type %r\" % (obj, type(obj)))\n\n def deserialize(self, datum):\n assert (\n len(datum) == 4\n ), \"VectorUDT.deserialize given row with length %d but requires 4\" % len(datum)\n tpe = datum[0]\n if tpe == 0:\n return SparseVector(datum[1], datum[2], datum[3])\n elif tpe == 1:\n return DenseVector(datum[3])\n else:\n raise ValueError(\"do not recognize type %r\" % tpe)\n\n def simpleString(self):\n return \"vector\"\n\n\nclass MatrixUDT(UserDefinedType):\n \"\"\"\n SQL user-defined type (UDT) for Matrix.\n \"\"\"\n\n @classmethod\n def sqlType(cls):\n return StructType(\n [\n StructField(\"type\", ByteType(), False),\n StructField(\"numRows\", IntegerType(), False),\n StructField(\"numCols\", IntegerType(), False),\n StructField(\"colPtrs\", ArrayType(IntegerType(), False), True),\n StructField(\"rowIndices\", ArrayType(IntegerType(), False), True),\n StructField(\"values\", ArrayType(DoubleType(), False), True),\n StructField(\"isTransposed\", BooleanType(), False),\n ]\n )\n\n @classmethod\n def module(cls):\n return \"pyspark.ml.linalg\"\n\n @classmethod\n def scalaUDT(cls):\n return \"org.apache.spark.ml.linalg.MatrixUDT\"\n\n def serialize(self, obj):\n if isinstance(obj, SparseMatrix):\n colPtrs = [int(i) for i in obj.colPtrs]\n rowIndices = [int(i) for i in obj.rowIndices]\n values = [float(v) for v in obj.values]\n return (\n 0,\n obj.numRows,\n obj.numCols,\n colPtrs,\n rowIndices,\n values,\n bool(obj.isTransposed),\n )\n elif isinstance(obj, DenseMatrix):\n values = [float(v) for v in obj.values]\n return (1, obj.numRows, obj.numCols, None, None, values, bool(obj.isTransposed))\n else:\n raise TypeError(\"cannot serialize type %r\" % (type(obj)))\n\n def deserialize(self, datum):\n assert (\n len(datum) == 7\n ), \"MatrixUDT.deserialize given row with length %d but requires 7\" % len(datum)\n tpe = datum[0]\n if tpe == 0:\n return SparseMatrix(*datum[1:])\n elif tpe == 1:\n return DenseMatrix(datum[1], datum[2], datum[5], datum[6])\n else:\n raise ValueError(\"do not recognize type %r\" % tpe)\n\n def simpleString(self):\n return \"matrix\"\n\n\nclass Vector:\n\n __UDT__ = VectorUDT()\n\n \"\"\"\n Abstract class for DenseVector and SparseVector\n \"\"\"\n\n def toArray(self):\n \"\"\"\n Convert the vector into an numpy.ndarray\n\n :return: numpy.ndarray\n \"\"\"\n raise NotImplementedError\n\n\nclass DenseVector(Vector):\n \"\"\"\n A dense vector represented by a value array. We use numpy array for\n storage and arithmetics will be delegated to the underlying numpy\n array.\n\n Examples\n --------\n >>> v = Vectors.dense([1.0, 2.0])\n >>> u = Vectors.dense([3.0, 4.0])\n >>> v + u\n DenseVector([4.0, 6.0])\n >>> 2 - v\n DenseVector([1.0, 0.0])\n >>> v / 2\n DenseVector([0.5, 1.0])\n >>> v * u\n DenseVector([3.0, 8.0])\n >>> u / v\n DenseVector([3.0, 2.0])\n >>> u % 2\n DenseVector([1.0, 0.0])\n >>> -v\n DenseVector([-1.0, -2.0])\n \"\"\"\n\n def __init__(self, ar):\n if isinstance(ar, bytes):\n ar = np.frombuffer(ar, dtype=np.float64)\n elif not isinstance(ar, np.ndarray):\n ar = np.array(ar, dtype=np.float64)\n if ar.dtype != np.float64:\n ar = ar.astype(np.float64)\n self.array = ar\n\n def __reduce__(self):\n return DenseVector, (self.array.tobytes(),)\n\n def numNonzeros(self):\n \"\"\"\n Number of nonzero elements. This scans all active values and count non zeros\n \"\"\"\n return np.count_nonzero(self.array)\n\n def norm(self, p):\n \"\"\"\n Calculates the norm of a DenseVector.\n\n Examples\n --------\n >>> a = DenseVector([0, -1, 2, -3])\n >>> a.norm(2)\n 3.7...\n >>> a.norm(1)\n 6.0\n \"\"\"\n return np.linalg.norm(self.array, p)\n\n def dot(self, other):\n \"\"\"\n Compute the dot product of two Vectors. We support\n (Numpy array, list, SparseVector, or SciPy sparse)\n and a target NumPy array that is either 1- or 2-dimensional.\n Equivalent to calling numpy.dot of the two vectors.\n\n Examples\n --------\n >>> dense = DenseVector(array.array('d', [1., 2.]))\n >>> dense.dot(dense)\n 5.0\n >>> dense.dot(SparseVector(2, [0, 1], [2., 1.]))\n 4.0\n >>> dense.dot(range(1, 3))\n 5.0\n >>> dense.dot(np.array(range(1, 3)))\n 5.0\n >>> dense.dot([1.,])\n Traceback (most recent call last):\n ...\n AssertionError: dimension mismatch\n >>> dense.dot(np.reshape([1., 2., 3., 4.], (2, 2), order='F'))\n array([ 5., 11.])\n >>> dense.dot(np.reshape([1., 2., 3.], (3, 1), order='F'))\n Traceback (most recent call last):\n ...\n AssertionError: dimension mismatch\n \"\"\"\n if type(other) == np.ndarray:\n if other.ndim > 1:\n assert len(self) == other.shape[0], \"dimension mismatch\"\n return np.dot(self.array, other)\n elif _have_scipy and scipy.sparse.issparse(other):\n assert len(self) == other.shape[0], \"dimension mismatch\"\n return other.transpose().dot(self.toArray())\n else:\n assert len(self) == _vector_size(other), \"dimension mismatch\"\n if isinstance(other, SparseVector):\n return other.dot(self)\n elif isinstance(other, Vector):\n return np.dot(self.toArray(), other.toArray())\n else:\n return np.dot(self.toArray(), other)\n\n def squared_distance(self, other):\n \"\"\"\n Squared distance of two Vectors.\n\n Examples\n --------\n >>> dense1 = DenseVector(array.array('d', [1., 2.]))\n >>> dense1.squared_distance(dense1)\n 0.0\n >>> dense2 = np.array([2., 1.])\n >>> dense1.squared_distance(dense2)\n 2.0\n >>> dense3 = [2., 1.]\n >>> dense1.squared_distance(dense3)\n 2.0\n >>> sparse1 = SparseVector(2, [0, 1], [2., 1.])\n >>> dense1.squared_distance(sparse1)\n 2.0\n >>> dense1.squared_distance([1.,])\n Traceback (most recent call last):\n ...\n AssertionError: dimension mismatch\n >>> dense1.squared_distance(SparseVector(1, [0,], [1.,]))\n Traceback (most recent call last):\n ...\n AssertionError: dimension mismatch\n \"\"\"\n assert len(self) == _vector_size(other), \"dimension mismatch\"\n if isinstance(other, SparseVector):\n return other.squared_distance(self)\n elif _have_scipy and scipy.sparse.issparse(other):\n return _convert_to_vector(other).squared_distance(self)\n\n if isinstance(other, Vector):\n other = other.toArray()\n elif not isinstance(other, np.ndarray):\n other = np.array(other)\n diff = self.toArray() - other\n return np.dot(diff, diff)\n\n def toArray(self):\n \"\"\"\n Returns the underlying numpy.ndarray\n \"\"\"\n return self.array\n\n @property\n def values(self):\n \"\"\"\n Returns the underlying numpy.ndarray\n \"\"\"\n return self.array\n\n def __getitem__(self, item):\n return self.array[item]\n\n def __len__(self):\n return len(self.array)\n\n def __str__(self):\n return \"[\" + \",\".join([str(v) for v in self.array]) + \"]\"\n\n def __repr__(self):\n return \"DenseVector([%s])\" % (\", \".join(_format_float(i) for i in self.array))\n\n def __eq__(self, other):\n if isinstance(other, DenseVector):\n return np.array_equal(self.array, other.array)\n elif isinstance(other, SparseVector):\n if len(self) != other.size:\n return False\n return Vectors._equals(list(range(len(self))), self.array, other.indices, other.values)\n return False\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n size = len(self)\n result = 31 + size\n nnz = 0\n i = 0\n while i < size and nnz < 128:\n if self.array[i] != 0:\n result = 31 * result + i\n bits = _double_to_long_bits(self.array[i])\n result = 31 * result + (bits ^ (bits >> 32))\n nnz += 1\n i += 1\n return result\n\n def __getattr__(self, item):\n return getattr(self.array, item)\n\n def __neg__(self):\n return DenseVector(-self.array)\n\n def _delegate(op):\n def func(self, other):\n if isinstance(other, DenseVector):\n other = other.array\n return DenseVector(getattr(self.array, op)(other))\n\n return func\n\n __add__ = _delegate(\"__add__\")\n __sub__ = _delegate(\"__sub__\")\n __mul__ = _delegate(\"__mul__\")\n __div__ = _delegate(\"__div__\")\n __truediv__ = _delegate(\"__truediv__\")\n __mod__ = _delegate(\"__mod__\")\n __radd__ = _delegate(\"__radd__\")\n __rsub__ = _delegate(\"__rsub__\")\n __rmul__ = _delegate(\"__rmul__\")\n __rdiv__ = _delegate(\"__rdiv__\")\n __rtruediv__ = _delegate(\"__rtruediv__\")\n __rmod__ = _delegate(\"__rmod__\")\n\n\nclass SparseVector(Vector):\n \"\"\"\n A simple sparse vector class for passing data to MLlib. Users may\n alternatively pass SciPy's {scipy.sparse} data types.\n \"\"\"\n\n def __init__(self, size, *args):\n \"\"\"\n Create a sparse vector, using either a dictionary, a list of\n (index, value) pairs, or two separate arrays of indices and\n values (sorted by index).\n\n Examples\n --------\n size : int\n Size of the vector.\n args\n Active entries, as a dictionary {index: value, ...},\n a list of tuples [(index, value), ...], or a list of strictly\n increasing indices and a list of corresponding values [index, ...],\n [value, ...]. Inactive entries are treated as zeros.\n\n Examples\n --------\n >>> SparseVector(4, {1: 1.0, 3: 5.5})\n SparseVector(4, {1: 1.0, 3: 5.5})\n >>> SparseVector(4, [(1, 1.0), (3, 5.5)])\n SparseVector(4, {1: 1.0, 3: 5.5})\n >>> SparseVector(4, [1, 3], [1.0, 5.5])\n SparseVector(4, {1: 1.0, 3: 5.5})\n >>> SparseVector(4, {1:1.0, 6:2.0})\n Traceback (most recent call last):\n ...\n AssertionError: Index 6 is out of the size of vector with size=4\n >>> SparseVector(4, {-1:1.0})\n Traceback (most recent call last):\n ...\n AssertionError: Contains negative index -1\n \"\"\"\n self.size = int(size)\n \"\"\" Size of the vector. \"\"\"\n assert 1 <= len(args) <= 2, \"must pass either 2 or 3 arguments\"\n if len(args) == 1:\n pairs = args[0]\n if type(pairs) == dict:\n pairs = pairs.items()\n pairs = sorted(pairs)\n self.indices = np.array([p[0] for p in pairs], dtype=np.int32)\n \"\"\" A list of indices corresponding to active entries. \"\"\"\n self.values = np.array([p[1] for p in pairs], dtype=np.float64)\n \"\"\" A list of values corresponding to active entries. \"\"\"\n else:\n if isinstance(args[0], bytes):\n assert isinstance(args[1], bytes), \"values should be string too\"\n if args[0]:\n self.indices = np.frombuffer(args[0], np.int32)\n self.values = np.frombuffer(args[1], np.float64)\n else:\n # np.frombuffer() doesn't work well with empty string in older version\n self.indices = np.array([], dtype=np.int32)\n self.values = np.array([], dtype=np.float64)\n else:\n self.indices = np.array(args[0], dtype=np.int32)\n self.values = np.array(args[1], dtype=np.float64)\n assert len(self.indices) == len(self.values), \"index and value arrays not same length\"\n for i in range(len(self.indices) - 1):\n if self.indices[i] >= self.indices[i + 1]:\n raise TypeError(\n \"Indices %s and %s are not strictly increasing\"\n % (self.indices[i], self.indices[i + 1])\n )\n\n if self.indices.size > 0:\n assert (\n np.max(self.indices) < self.size\n ), \"Index %d is out of the size of vector with size=%d\" % (\n np.max(self.indices),\n self.size,\n )\n assert np.min(self.indices) >= 0, \"Contains negative index %d\" % (np.min(self.indices))\n\n def numNonzeros(self):\n \"\"\"\n Number of nonzero elements. This scans all active values and count non zeros.\n \"\"\"\n return np.count_nonzero(self.values)\n\n def norm(self, p):\n \"\"\"\n Calculates the norm of a SparseVector.\n\n Examples\n --------\n >>> a = SparseVector(4, [0, 1], [3., -4.])\n >>> a.norm(1)\n 7.0\n >>> a.norm(2)\n 5.0\n \"\"\"\n return np.linalg.norm(self.values, p)\n\n def __reduce__(self):\n return (SparseVector, (self.size, self.indices.tobytes(), self.values.tobytes()))\n\n def dot(self, other):\n \"\"\"\n Dot product with a SparseVector or 1- or 2-dimensional Numpy array.\n\n Examples\n --------\n >>> a = SparseVector(4, [1, 3], [3.0, 4.0])\n >>> a.dot(a)\n 25.0\n >>> a.dot(array.array('d', [1., 2., 3., 4.]))\n 22.0\n >>> b = SparseVector(4, [2], [1.0])\n >>> a.dot(b)\n 0.0\n >>> a.dot(np.array([[1, 1], [2, 2], [3, 3], [4, 4]]))\n array([ 22., 22.])\n >>> a.dot([1., 2., 3.])\n Traceback (most recent call last):\n ...\n AssertionError: dimension mismatch\n >>> a.dot(np.array([1., 2.]))\n Traceback (most recent call last):\n ...\n AssertionError: dimension mismatch\n >>> a.dot(DenseVector([1., 2.]))\n Traceback (most recent call last):\n ...\n AssertionError: dimension mismatch\n >>> a.dot(np.zeros((3, 2)))\n Traceback (most recent call last):\n ...\n AssertionError: dimension mismatch\n \"\"\"\n\n if isinstance(other, np.ndarray):\n if other.ndim not in [2, 1]:\n raise ValueError(\"Cannot call dot with %d-dimensional array\" % other.ndim)\n assert len(self) == other.shape[0], \"dimension mismatch\"\n return np.dot(self.values, other[self.indices])\n\n assert len(self) == _vector_size(other), \"dimension mismatch\"\n\n if isinstance(other, DenseVector):\n return np.dot(other.array[self.indices], self.values)\n\n elif isinstance(other, SparseVector):\n # Find out common indices.\n self_cmind = np.in1d(self.indices, other.indices, assume_unique=True)\n self_values = self.values[self_cmind]\n if self_values.size == 0:\n return 0.0\n else:\n other_cmind = np.in1d(other.indices, self.indices, assume_unique=True)\n return np.dot(self_values, other.values[other_cmind])\n\n else:\n return self.dot(_convert_to_vector(other))\n\n def squared_distance(self, other):\n \"\"\"\n Squared distance from a SparseVector or 1-dimensional NumPy array.\n\n Examples\n --------\n >>> a = SparseVector(4, [1, 3], [3.0, 4.0])\n >>> a.squared_distance(a)\n 0.0\n >>> a.squared_distance(array.array('d', [1., 2., 3., 4.]))\n 11.0\n >>> a.squared_distance(np.array([1., 2., 3., 4.]))\n 11.0\n >>> b = SparseVector(4, [2], [1.0])\n >>> a.squared_distance(b)\n 26.0\n >>> b.squared_distance(a)\n 26.0\n >>> b.squared_distance([1., 2.])\n Traceback (most recent call last):\n ...\n AssertionError: dimension mismatch\n >>> b.squared_distance(SparseVector(3, [1,], [1.0,]))\n Traceback (most recent call last):\n ...\n AssertionError: dimension mismatch\n \"\"\"\n assert len(self) == _vector_size(other), \"dimension mismatch\"\n\n if isinstance(other, np.ndarray) or isinstance(other, DenseVector):\n if isinstance(other, np.ndarray) and other.ndim != 1:\n raise ValueError(\n \"Cannot call squared_distance with %d-dimensional array\" % other.ndim\n )\n if isinstance(other, DenseVector):\n other = other.array\n sparse_ind = np.zeros(other.size, dtype=bool)\n sparse_ind[self.indices] = True\n dist = other[sparse_ind] - self.values\n result = np.dot(dist, dist)\n\n other_ind = other[~sparse_ind]\n result += np.dot(other_ind, other_ind)\n return result\n\n elif isinstance(other, SparseVector):\n result = 0.0\n i, j = 0, 0\n while i < len(self.indices) and j < len(other.indices):\n if self.indices[i] == other.indices[j]:\n diff = self.values[i] - other.values[j]\n result += diff * diff\n i += 1\n j += 1\n elif self.indices[i] < other.indices[j]:\n result += self.values[i] * self.values[i]\n i += 1\n else:\n result += other.values[j] * other.values[j]\n j += 1\n while i < len(self.indices):\n result += self.values[i] * self.values[i]\n i += 1\n while j < len(other.indices):\n result += other.values[j] * other.values[j]\n j += 1\n return result\n else:\n return self.squared_distance(_convert_to_vector(other))\n\n def toArray(self):\n \"\"\"\n Returns a copy of this SparseVector as a 1-dimensional numpy.ndarray.\n \"\"\"\n arr = np.zeros((self.size,), dtype=np.float64)\n arr[self.indices] = self.values\n return arr\n\n def __len__(self):\n return self.size\n\n def __str__(self):\n inds = \"[\" + \",\".join([str(i) for i in self.indices]) + \"]\"\n vals = \"[\" + \",\".join([str(v) for v in self.values]) + \"]\"\n return \"(\" + \",\".join((str(self.size), inds, vals)) + \")\"\n\n def __repr__(self):\n inds = self.indices\n vals = self.values\n entries = \", \".join(\n [\"{0}: {1}\".format(inds[i], _format_float(vals[i])) for i in range(len(inds))]\n )\n return \"SparseVector({0}, {{{1}}})\".format(self.size, entries)\n\n def __eq__(self, other):\n if isinstance(other, SparseVector):\n return (\n other.size == self.size\n and np.array_equal(other.indices, self.indices)\n and np.array_equal(other.values, self.values)\n )\n elif isinstance(other, DenseVector):\n if self.size != len(other):\n return False\n return Vectors._equals(self.indices, self.values, list(range(len(other))), other.array)\n return False\n\n def __getitem__(self, index):\n inds = self.indices\n vals = self.values\n if not isinstance(index, int):\n raise TypeError(\"Indices must be of type integer, got type %s\" % type(index))\n\n if index >= self.size or index < -self.size:\n raise IndexError(\"Index %d out of bounds.\" % index)\n if index < 0:\n index += self.size\n\n if (inds.size == 0) or (index > inds.item(-1)):\n return 0.0\n\n insert_index = np.searchsorted(inds, index)\n row_ind = inds[insert_index]\n if row_ind == index:\n return vals[insert_index]\n return 0.0\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n result = 31 + self.size\n nnz = 0\n i = 0\n while i < len(self.values) and nnz < 128:\n if self.values[i] != 0:\n result = 31 * result + int(self.indices[i])\n bits = _double_to_long_bits(self.values[i])\n result = 31 * result + (bits ^ (bits >> 32))\n nnz += 1\n i += 1\n return result\n\n\nclass Vectors:\n\n \"\"\"\n Factory methods for working with vectors.\n\n Notes\n -----\n Dense vectors are simply represented as NumPy array objects,\n so there is no need to covert them for use in MLlib. For sparse vectors,\n the factory methods in this class create an MLlib-compatible type, or users\n can pass in SciPy's `scipy.sparse` column vectors.\n \"\"\"\n\n @staticmethod\n def sparse(size, *args):\n \"\"\"\n Create a sparse vector, using either a dictionary, a list of\n (index, value) pairs, or two separate arrays of indices and\n values (sorted by index).\n\n Parameters\n ----------\n size : int\n Size of the vector.\n args\n Non-zero entries, as a dictionary, list of tuples,\n or two sorted lists containing indices and values.\n\n Examples\n --------\n >>> Vectors.sparse(4, {1: 1.0, 3: 5.5})\n SparseVector(4, {1: 1.0, 3: 5.5})\n >>> Vectors.sparse(4, [(1, 1.0), (3, 5.5)])\n SparseVector(4, {1: 1.0, 3: 5.5})\n >>> Vectors.sparse(4, [1, 3], [1.0, 5.5])\n SparseVector(4, {1: 1.0, 3: 5.5})\n \"\"\"\n return SparseVector(size, *args)\n\n @staticmethod\n def dense(*elements):\n \"\"\"\n Create a dense vector of 64-bit floats from a Python list or numbers.\n\n Examples\n --------\n >>> Vectors.dense([1, 2, 3])\n DenseVector([1.0, 2.0, 3.0])\n >>> Vectors.dense(1.0, 2.0)\n DenseVector([1.0, 2.0])\n \"\"\"\n if len(elements) == 1 and not isinstance(elements[0], (float, int)):\n # it's list, numpy.array or other iterable object.\n elements = elements[0]\n return DenseVector(elements)\n\n @staticmethod\n def squared_distance(v1, v2):\n \"\"\"\n Squared distance between two vectors.\n a and b can be of type SparseVector, DenseVector, np.ndarray\n or array.array.\n\n Examples\n --------\n >>> a = Vectors.sparse(4, [(0, 1), (3, 4)])\n >>> b = Vectors.dense([2, 5, 4, 1])\n >>> a.squared_distance(b)\n 51.0\n \"\"\"\n v1, v2 = _convert_to_vector(v1), _convert_to_vector(v2)\n return v1.squared_distance(v2)\n\n @staticmethod\n def norm(vector, p):\n \"\"\"\n Find norm of the given vector.\n \"\"\"\n return _convert_to_vector(vector).norm(p)\n\n @staticmethod\n def zeros(size):\n return DenseVector(np.zeros(size))\n\n @staticmethod\n def _equals(v1_indices, v1_values, v2_indices, v2_values):\n \"\"\"\n Check equality between sparse/dense vectors,\n v1_indices and v2_indices assume to be strictly increasing.\n \"\"\"\n v1_size = len(v1_values)\n v2_size = len(v2_values)\n k1 = 0\n k2 = 0\n all_equal = True\n while all_equal:\n while k1 < v1_size and v1_values[k1] == 0:\n k1 += 1\n while k2 < v2_size and v2_values[k2] == 0:\n k2 += 1\n\n if k1 >= v1_size or k2 >= v2_size:\n return k1 >= v1_size and k2 >= v2_size\n\n all_equal = v1_indices[k1] == v2_indices[k2] and v1_values[k1] == v2_values[k2]\n k1 += 1\n k2 += 1\n return all_equal\n\n\nclass Matrix:\n\n __UDT__ = MatrixUDT()\n\n \"\"\"\n Represents a local matrix.\n \"\"\"\n\n def __init__(self, numRows, numCols, isTransposed=False):\n self.numRows = numRows\n self.numCols = numCols\n self.isTransposed = isTransposed\n\n def toArray(self):\n \"\"\"\n Returns its elements in a numpy.ndarray.\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def _convert_to_array(array_like, dtype):\n \"\"\"\n Convert Matrix attributes which are array-like or buffer to array.\n \"\"\"\n if isinstance(array_like, bytes):\n return np.frombuffer(array_like, dtype=dtype)\n return np.asarray(array_like, dtype=dtype)\n\n\nclass DenseMatrix(Matrix):\n \"\"\"\n Column-major dense matrix.\n \"\"\"\n\n def __init__(self, numRows, numCols, values, isTransposed=False):\n Matrix.__init__(self, numRows, numCols, isTransposed)\n values = self._convert_to_array(values, np.float64)\n assert len(values) == numRows * numCols\n self.values = values\n\n def __reduce__(self):\n return DenseMatrix, (\n self.numRows,\n self.numCols,\n self.values.tobytes(),\n int(self.isTransposed),\n )\n\n def __str__(self):\n \"\"\"\n Pretty printing of a DenseMatrix\n\n Examples\n --------\n >>> dm = DenseMatrix(2, 2, range(4))\n >>> print(dm)\n DenseMatrix([[ 0., 2.],\n [ 1., 3.]])\n >>> dm = DenseMatrix(2, 2, range(4), isTransposed=True)\n >>> print(dm)\n DenseMatrix([[ 0., 1.],\n [ 2., 3.]])\n \"\"\"\n # Inspired by __repr__ in scipy matrices.\n array_lines = repr(self.toArray()).splitlines()\n\n # We need to adjust six spaces which is the difference in number\n # of letters between \"DenseMatrix\" and \"array\"\n x = \"\\n\".join([(\" \" * 6 + line) for line in array_lines[1:]])\n return array_lines[0].replace(\"array\", \"DenseMatrix\") + \"\\n\" + x\n\n def __repr__(self):\n \"\"\"\n Representation of a DenseMatrix\n\n Examples\n --------\n >>> dm = DenseMatrix(2, 2, range(4))\n >>> dm\n DenseMatrix(2, 2, [0.0, 1.0, 2.0, 3.0], False)\n \"\"\"\n # If the number of values are less than seventeen then return as it is.\n # Else return first eight values and last eight values.\n if len(self.values) < 17:\n entries = _format_float_list(self.values)\n else:\n entries = (\n _format_float_list(self.values[:8]) + [\"...\"] + _format_float_list(self.values[-8:])\n )\n\n entries = \", \".join(entries)\n return \"DenseMatrix({0}, {1}, [{2}], {3})\".format(\n self.numRows, self.numCols, entries, self.isTransposed\n )\n\n def toArray(self):\n \"\"\"\n Return a :py:class:`numpy.ndarray`\n\n Examples\n --------\n >>> m = DenseMatrix(2, 2, range(4))\n >>> m.toArray()\n array([[ 0., 2.],\n [ 1., 3.]])\n \"\"\"\n if self.isTransposed:\n return np.asfortranarray(self.values.reshape((self.numRows, self.numCols)))\n else:\n return self.values.reshape((self.numRows, self.numCols), order=\"F\")\n\n def toSparse(self):\n \"\"\"Convert to SparseMatrix\"\"\"\n if self.isTransposed:\n values = np.ravel(self.toArray(), order=\"F\")\n else:\n values = self.values\n indices = np.nonzero(values)[0]\n colCounts = np.bincount(indices // self.numRows)\n colPtrs = np.cumsum(np.hstack((0, colCounts, np.zeros(self.numCols - colCounts.size))))\n values = values[indices]\n rowIndices = indices % self.numRows\n\n return SparseMatrix(self.numRows, self.numCols, colPtrs, rowIndices, values)\n\n def __getitem__(self, indices):\n i, j = indices\n if i < 0 or i >= self.numRows:\n raise IndexError(\"Row index %d is out of range [0, %d)\" % (i, self.numRows))\n if j >= self.numCols or j < 0:\n raise IndexError(\"Column index %d is out of range [0, %d)\" % (j, self.numCols))\n\n if self.isTransposed:\n return self.values[i * self.numCols + j]\n else:\n return self.values[i + j * self.numRows]\n\n def __eq__(self, other):\n if self.numRows != other.numRows or self.numCols != other.numCols:\n return False\n if isinstance(other, SparseMatrix):\n return np.all(self.toArray() == other.toArray())\n\n self_values = np.ravel(self.toArray(), order=\"F\")\n other_values = np.ravel(other.toArray(), order=\"F\")\n return np.all(self_values == other_values)\n\n\nclass SparseMatrix(Matrix):\n \"\"\"Sparse Matrix stored in CSC format.\"\"\"\n\n def __init__(self, numRows, numCols, colPtrs, rowIndices, values, isTransposed=False):\n Matrix.__init__(self, numRows, numCols, isTransposed)\n self.colPtrs = self._convert_to_array(colPtrs, np.int32)\n self.rowIndices = self._convert_to_array(rowIndices, np.int32)\n self.values = self._convert_to_array(values, np.float64)\n\n if self.isTransposed:\n if self.colPtrs.size != numRows + 1:\n raise ValueError(\n \"Expected colPtrs of size %d, got %d.\" % (numRows + 1, self.colPtrs.size)\n )\n else:\n if self.colPtrs.size != numCols + 1:\n raise ValueError(\n \"Expected colPtrs of size %d, got %d.\" % (numCols + 1, self.colPtrs.size)\n )\n if self.rowIndices.size != self.values.size:\n raise ValueError(\n \"Expected rowIndices of length %d, got %d.\"\n % (self.rowIndices.size, self.values.size)\n )\n\n def __str__(self):\n \"\"\"\n Pretty printing of a SparseMatrix\n\n Examples\n --------\n >>> sm1 = SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4])\n >>> print(sm1)\n 2 X 2 CSCMatrix\n (0,0) 2.0\n (1,0) 3.0\n (1,1) 4.0\n >>> sm1 = SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4], True)\n >>> print(sm1)\n 2 X 2 CSRMatrix\n (0,0) 2.0\n (0,1) 3.0\n (1,1) 4.0\n \"\"\"\n spstr = \"{0} X {1} \".format(self.numRows, self.numCols)\n if self.isTransposed:\n spstr += \"CSRMatrix\\n\"\n else:\n spstr += \"CSCMatrix\\n\"\n\n cur_col = 0\n smlist = []\n\n # Display first 16 values.\n if len(self.values) <= 16:\n zipindval = zip(self.rowIndices, self.values)\n else:\n zipindval = zip(self.rowIndices[:16], self.values[:16])\n for i, (rowInd, value) in enumerate(zipindval):\n if self.colPtrs[cur_col + 1] <= i:\n cur_col += 1\n if self.isTransposed:\n smlist.append(\"({0},{1}) {2}\".format(cur_col, rowInd, _format_float(value)))\n else:\n smlist.append(\"({0},{1}) {2}\".format(rowInd, cur_col, _format_float(value)))\n spstr += \"\\n\".join(smlist)\n\n if len(self.values) > 16:\n spstr += \"\\n..\" * 2\n return spstr\n\n def __repr__(self):\n \"\"\"\n Representation of a SparseMatrix\n\n Examples\n --------\n >>> sm1 = SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4])\n >>> sm1\n SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2.0, 3.0, 4.0], False)\n \"\"\"\n rowIndices = list(self.rowIndices)\n colPtrs = list(self.colPtrs)\n\n if len(self.values) <= 16:\n values = _format_float_list(self.values)\n\n else:\n values = (\n _format_float_list(self.values[:8]) + [\"...\"] + _format_float_list(self.values[-8:])\n )\n rowIndices = rowIndices[:8] + [\"...\"] + rowIndices[-8:]\n\n if len(self.colPtrs) > 16:\n colPtrs = colPtrs[:8] + [\"...\"] + colPtrs[-8:]\n\n values = \", \".join(values)\n rowIndices = \", \".join([str(ind) for ind in rowIndices])\n colPtrs = \", \".join([str(ptr) for ptr in colPtrs])\n return \"SparseMatrix({0}, {1}, [{2}], [{3}], [{4}], {5})\".format(\n self.numRows, self.numCols, colPtrs, rowIndices, values, self.isTransposed\n )\n\n def __reduce__(self):\n return SparseMatrix, (\n self.numRows,\n self.numCols,\n self.colPtrs.tobytes(),\n self.rowIndices.tobytes(),\n self.values.tobytes(),\n int(self.isTransposed),\n )\n\n def __getitem__(self, indices):\n i, j = indices\n if i < 0 or i >= self.numRows:\n raise IndexError(\"Row index %d is out of range [0, %d)\" % (i, self.numRows))\n if j < 0 or j >= self.numCols:\n raise IndexError(\"Column index %d is out of range [0, %d)\" % (j, self.numCols))\n\n # If a CSR matrix is given, then the row index should be searched\n # for in ColPtrs, and the column index should be searched for in the\n # corresponding slice obtained from rowIndices.\n if self.isTransposed:\n j, i = i, j\n\n colStart = self.colPtrs[j]\n colEnd = self.colPtrs[j + 1]\n nz = self.rowIndices[colStart:colEnd]\n ind = np.searchsorted(nz, i) + colStart\n if ind < colEnd and self.rowIndices[ind] == i:\n return self.values[ind]\n else:\n return 0.0\n\n def toArray(self):\n \"\"\"\n Return a numpy.ndarray\n \"\"\"\n A = np.zeros((self.numRows, self.numCols), dtype=np.float64, order=\"F\")\n for k in range(self.colPtrs.size - 1):\n startptr = self.colPtrs[k]\n endptr = self.colPtrs[k + 1]\n if self.isTransposed:\n A[k, self.rowIndices[startptr:endptr]] = self.values[startptr:endptr]\n else:\n A[self.rowIndices[startptr:endptr], k] = self.values[startptr:endptr]\n return A\n\n def toDense(self):\n densevals = np.ravel(self.toArray(), order=\"F\")\n return DenseMatrix(self.numRows, self.numCols, densevals)\n\n # TODO: More efficient implementation:\n def __eq__(self, other):\n return np.all(self.toArray() == other.toArray())\n\n\nclass Matrices:\n @staticmethod\n def dense(numRows, numCols, values):\n \"\"\"\n Create a DenseMatrix\n \"\"\"\n return DenseMatrix(numRows, numCols, values)\n\n @staticmethod\n def sparse(numRows, numCols, colPtrs, rowIndices, values):\n \"\"\"\n Create a SparseMatrix\n \"\"\"\n return SparseMatrix(numRows, numCols, colPtrs, rowIndices, values)\n\n\ndef _test():\n import doctest\n\n try:\n # Numpy 1.14+ changed it's string format.\n np.set_printoptions(legacy=\"1.13\")\n except TypeError:\n pass\n (failure_count, test_count) = doctest.testmod(optionflags=doctest.ELLIPSIS)\n if failure_count:\n sys.exit(-1)\n\n\nif __name__ == \"__main__\":\n _test()\n"
] | [
[
"numpy.bincount",
"numpy.max",
"numpy.count_nonzero",
"numpy.isnan",
"numpy.dot",
"numpy.linalg.norm",
"numpy.zeros",
"numpy.asarray",
"numpy.array_equal",
"numpy.set_printoptions",
"numpy.array",
"numpy.min",
"numpy.nonzero",
"numpy.frombuffer",
"numpy.all",
"numpy.searchsorted",
"numpy.in1d"
]
] |
wtjiang98/mmpose-gyl | [
"ed369ba4dfcd875ed7dba9263660f1f660658b0a"
] | [
"mmpose/models/detectors/top_down.py"
] | [
"# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport mmcv\nimport numpy as np\nfrom mmcv.image import imwrite\nfrom mmcv.utils.misc import deprecated_api_warning\nfrom mmcv.visualization.image import imshow\n\nfrom mmpose.core import imshow_bboxes, imshow_keypoints\nfrom .. import builder\nfrom ..builder import POSENETS\nfrom .base import BasePose\n\ntry:\n from mmcv.runner import auto_fp16\nexcept ImportError:\n warnings.warn('auto_fp16 from mmpose will be deprecated from v0.15.0'\n 'Please install mmcv>=1.1.4')\n from mmpose.core import auto_fp16\n\n\[email protected]_module()\nclass TopDown(BasePose):\n \"\"\"Top-down pose detectors.\n\n Args:\n backbone (dict): Backbone modules to extract feature.\n keypoint_head (dict): Keypoint head to process feature.\n train_cfg (dict): Config for training. Default: None.\n test_cfg (dict): Config for testing. Default: None.\n pretrained (str): Path to the pretrained models.\n loss_pose (None): Deprecated arguments. Please use\n `loss_keypoint` for heads instead.\n \"\"\"\n\n def __init__(self,\n backbone,\n neck=None,\n keypoint_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n loss_pose=None,\n ):\n super().__init__()\n self.export = False\n print(\"model export:\", self.export)\n\n self.fp16_enabled = False\n\n self.backbone = builder.build_backbone(backbone)\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n if neck is not None:\n self.neck = builder.build_neck(neck)\n\n if keypoint_head is not None:\n keypoint_head['train_cfg'] = train_cfg\n keypoint_head['test_cfg'] = test_cfg\n\n if 'loss_keypoint' not in keypoint_head and loss_pose is not None:\n warnings.warn(\n '`loss_pose` for TopDown is deprecated, '\n 'use `loss_keypoint` for heads instead. See '\n 'https://github.com/open-mmlab/mmpose/pull/382'\n ' for more information.', DeprecationWarning)\n keypoint_head['loss_keypoint'] = loss_pose\n\n self.keypoint_head = builder.build_head(keypoint_head)\n\n self.init_weights(pretrained=pretrained)\n\n @property\n def with_neck(self):\n \"\"\"Check if has keypoint_head.\"\"\"\n return hasattr(self, 'neck')\n\n @property\n def with_keypoint(self):\n \"\"\"Check if has keypoint_head.\"\"\"\n return hasattr(self, 'keypoint_head')\n\n def init_weights(self, pretrained=None):\n \"\"\"Weight initialization for model.\"\"\"\n self.backbone.init_weights(pretrained)\n if self.with_neck:\n self.neck.init_weights()\n if self.with_keypoint:\n self.keypoint_head.init_weights()\n\n @auto_fp16(apply_to=('img', ))\n def forward(self,\n img,\n target=None,\n target_weight=None,\n img_metas=None,\n return_loss=True,\n return_heatmap=False,\n **kwargs):\n \"\"\"Calls either forward_train or forward_test depending on whether\n return_loss=True. Note this setting will change the expected inputs.\n When `return_loss=True`, img and img_meta are single-nested (i.e.\n Tensor and List[dict]), and when `resturn_loss=False`, img and img_meta\n should be double nested (i.e. List[Tensor], List[List[dict]]), with\n the outer list indicating test time augmentations.\n\n Note:\n batch_size: N\n num_keypoints: K\n num_img_channel: C (Default: 3)\n img height: imgH\n img width: imgW\n heatmaps height: H\n heatmaps weight: W\n\n Args:\n img (torch.Tensor[NxCximgHximgW]): Input images.\n target (torch.Tensor[NxKxHxW]): Target heatmaps.\n target_weight (torch.Tensor[NxKx1]): Weights across\n different joint types.\n img_metas (list(dict)): Information about data augmentation\n By default this includes:\n - \"image_file: path to the image file\n - \"center\": center of the bbox\n - \"scale\": scale of the bbox\n - \"rotation\": rotation of the bbox\n - \"bbox_score\": score of bbox\n return_loss (bool): Option to `return loss`. `return loss=True`\n for training, `return loss=False` for validation & test.\n return_heatmap (bool) : Option to return heatmap.\n\n Returns:\n dict|tuple: if `return loss` is true, then return losses.\n Otherwise, return predicted poses, boxes, image paths\n and heatmaps.\n \"\"\"\n if self.export is True:\n return self.forward_dummy(img)\n\n if return_loss:\n return self.forward_train(img, target, target_weight, img_metas,\n **kwargs)\n return self.forward_test(\n img, img_metas, return_heatmap=return_heatmap, **kwargs)\n\n def forward_train(self, img, target, target_weight, img_metas, **kwargs):\n \"\"\"Defines the computation performed at every call when training.\"\"\"\n output = self.backbone(img)\n if self.with_neck:\n output = self.neck(output)\n if self.with_keypoint:\n output = self.keypoint_head(output)\n\n # if return loss\n losses = dict()\n if self.with_keypoint:\n keypoint_losses = self.keypoint_head.get_loss(\n output, target, target_weight)\n losses.update(keypoint_losses)\n keypoint_accuracy = self.keypoint_head.get_accuracy(\n output, target, target_weight)\n losses.update(keypoint_accuracy)\n\n return losses\n\n def forward_test(self, img, img_metas, return_heatmap=False, **kwargs):\n \"\"\"Defines the computation performed at every call when testing.\"\"\"\n assert img.size(0) == len(img_metas)\n batch_size, _, img_height, img_width = img.shape\n if batch_size > 1:\n assert 'bbox_id' in img_metas[0]\n\n result = {}\n\n features = self.backbone(img)\n\n if self.with_neck:\n features = self.neck(features)\n if self.with_keypoint:\n output_heatmap = self.keypoint_head.inference_model(\n features, flip_pairs=None)\n\n if self.test_cfg.get('flip_test', True):\n img_flipped = img.flip(3)\n features_flipped = self.backbone(img_flipped)\n if self.with_neck:\n features_flipped = self.neck(features_flipped)\n if self.with_keypoint:\n output_flipped_heatmap = self.keypoint_head.inference_model(\n features_flipped, img_metas[0]['flip_pairs'])\n output_heatmap = (output_heatmap +\n output_flipped_heatmap) * 0.5\n\n if self.with_keypoint:\n keypoint_result = self.keypoint_head.decode(\n img_metas, output_heatmap, img_size=[img_width, img_height])\n result.update(keypoint_result)\n\n if not return_heatmap:\n output_heatmap = None\n\n result['output_heatmap'] = output_heatmap\n\n return result\n\n def forward_dummy(self, img):\n \"\"\"Used for computing network FLOPs.\n\n See ``tools/get_flops.py``.\n\n Args:\n img (torch.Tensor): Input image.\n\n Returns:\n Tensor: Output heatmaps.\n \"\"\"\n output = self.backbone(img)\n if self.with_neck:\n output = self.neck(output)\n if self.with_keypoint:\n output = self.keypoint_head(output)\n return output\n\n @deprecated_api_warning({'pose_limb_color': 'pose_link_color'},\n cls_name='TopDown')\n def show_result(self,\n img,\n result,\n skeleton=None,\n kpt_score_thr=0.3,\n bbox_color='green',\n pose_kpt_color=None,\n pose_link_color=None,\n text_color='white',\n radius=4,\n thickness=1,\n font_scale=0.5,\n bbox_thickness=1,\n win_name='',\n show=False,\n show_keypoint_weight=False,\n wait_time=0,\n out_file=None):\n \"\"\"Draw `result` over `img`.\n\n Args:\n img (str or Tensor): The image to be displayed.\n result (list[dict]): The results to draw over `img`\n (bbox_result, pose_result).\n skeleton (list[list]): The connection of keypoints.\n skeleton is 0-based indexing.\n kpt_score_thr (float, optional): Minimum score of keypoints\n to be shown. Default: 0.3.\n bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.\n pose_kpt_color (np.array[Nx3]`): Color of N keypoints.\n If None, do not draw keypoints.\n pose_link_color (np.array[Mx3]): Color of M links.\n If None, do not draw links.\n text_color (str or tuple or :obj:`Color`): Color of texts.\n radius (int): Radius of circles.\n thickness (int): Thickness of lines.\n font_scale (float): Font scales of texts.\n win_name (str): The window name.\n show (bool): Whether to show the image. Default: False.\n show_keypoint_weight (bool): Whether to change the transparency\n using the predicted confidence scores of keypoints.\n wait_time (int): Value of waitKey param.\n Default: 0.\n out_file (str or None): The filename to write the image.\n Default: None.\n\n Returns:\n Tensor: Visualized img, only if not `show` or `out_file`.\n \"\"\"\n img = mmcv.imread(img)\n img = img.copy()\n\n bbox_result = []\n pose_result = []\n for res in result:\n if 'bbox' in res:\n bbox_result.append(res['bbox'])\n pose_result.append(res['keypoints'])\n\n if bbox_result:\n bboxes = np.vstack(bbox_result)\n labels = None\n if 'label' in result[0]:\n labels = [res['label'] for res in result]\n # draw bounding boxes\n imshow_bboxes(\n img,\n bboxes,\n labels=labels,\n colors=bbox_color,\n text_color=text_color,\n thickness=bbox_thickness,\n font_scale=font_scale,\n show=False)\n\n imshow_keypoints(img, pose_result, skeleton, kpt_score_thr,\n pose_kpt_color, pose_link_color, radius, thickness)\n\n if show:\n imshow(img, win_name, wait_time)\n\n if out_file is not None:\n imwrite(img, out_file)\n\n return img\n"
] | [
[
"numpy.vstack"
]
] |
Yoshi-0921/EXP | [
"8da7c2baa7fc956cbf91b20ff870208b134b99d6"
] | [
"experiments/exp5/exp5_agent.py"
] | [
"# -*- coding: utf-8 -*-\n\nfrom random import random\nfrom typing import Tuple\n\nimport numpy as np\nimport torch\nfrom torch import nn, optim\n\nfrom models.dqn_conv import DQN_Conv\nfrom utils.agent import Agent\nfrom utils.buffer import Experience\nfrom utils.tools import hard_update\n\n\nclass DQNAgent(Agent):\n def __init__(self, obs_size, act_size, config):\n super(DQNAgent, self).__init__()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n # set neural networks\n self.dqn = DQN_Conv(obs_size, act_size, config.hidden1, config.hidden2, config.hidden3).to(self.device)\n self.dqn_target = DQN_Conv(obs_size, act_size, config.hidden1, config.hidden2, config.hidden3).to(self.device)\n self.criterion = nn.MSELoss()\n\n # configure optimizer\n if config.opt.name == 'adam':\n self.optimizer = optim.Adam(params=self.dqn.parameters(),\n lr=config.opt.learning_rate,\n betas=config.opt.betas,\n eps=config.opt.eps)\n elif config.opt.name == 'rmsprop':\n self.optimizer = optim.RMSprop(params=self.dqn.parameters(),\n lr=config.opt.learning_rate,\n alpha=config.opt.alpha,\n eps=config.opt.eps)\n\n # synch weight\n hard_update(self.dqn_target, self.dqn)\n\n self.gamma = config.gamma\n\n def get_action(self, state, epsilon):\n self.dqn.eval()\n if np.random.random() < epsilon:\n action = self.random_action()\n else:\n with torch.no_grad():\n state = state.unsqueeze(0).to(self.device)\n\n q_values = self.dqn(state)\n _, action = torch.max(q_values, dim=1)\n action = int(action.item())\n\n return action\n\n def update(self, state, action, reward, done, next_state):\n self.dqn.eval()\n self.dqn_target.eval()\n state_action_values = self.dqn(state).gather(1, action.unsqueeze(-1)).squeeze(-1)\n with torch.no_grad():\n next_state_values = self.dqn_target(next_state).max(1)[0]\n next_state_values[done] = 0.0\n next_state_values = next_state_values.detach()\n expected_state_action_values = reward + self.gamma * (1 - done) * next_state_values\n\n self.dqn.train()\n self.optimizer.zero_grad()\n loss = self.criterion(state_action_values, expected_state_action_values)\n loss.backward()\n nn.utils.clip_grad_norm_(self.dqn.parameters(), 0.1)\n self.optimizer.step()\n\n return loss"
] | [
[
"torch.nn.MSELoss",
"torch.max",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.random.random"
]
] |
AlexanderKalistratov/mlir-extensions | [
"7f6bee6b19a73b177eb7f3c2ac735657e60c98c6"
] | [
"numba_dpcomp/mlir/numpy/funcs.py"
] | [
"# Copyright 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom ..linalg_builder import register_func, register_attr, is_literal, broadcast_type, eltwise, convert_array, asarray\n\nimport numpy\nimport math\nfrom numba import prange\n\ndef is_int(t, b):\n types = [\n b.int8,\n b.uint8,\n b.int16,\n b.uint16,\n b.int32,\n b.uint32,\n b.int64,\n b.uint64,\n ]\n return t in types\n\ndef is_float(t, b):\n return t == b.float16 or t == b.float32 or t == b.float64\n\ndef promote_int(t, b):\n if is_int(t, b):\n return b.int64\n return t\n\n@register_func('array.sum')\n@register_func('numpy.sum', numpy.sum)\ndef sum_impl(builder, arg, axis=None):\n if axis is None:\n shape = arg.shape\n num_dims = len(shape)\n iterators = ['reduction' for _ in range(num_dims)]\n dims = ','.join(['d%s' % i for i in range(num_dims)])\n expr1 = f'({dims}) -> ({dims})'\n expr2 = f'({dims}) -> (0)'\n maps = [expr1,expr2]\n init = builder.from_elements(0, promote_int(arg.dtype, builder))\n\n def body(a, b):\n return a + b\n\n res = builder.generic(arg, init, iterators, maps, body)\n return builder.extract(res, 0)\n elif isinstance(axis, int):\n shape = arg.shape\n num_dims = len(shape)\n iterators = [('reduction' if i == axis else 'parallel') for i in range(num_dims)]\n dims1 = ','.join(['d%s' % i for i in range(num_dims)])\n dims2 = ','.join(['d%s' % i for i in range(num_dims) if i != axis])\n expr1 = f'({dims1}) -> ({dims1})'\n expr2 = f'({dims1}) -> ({dims2})'\n maps = [expr1,expr2]\n res_shape = tuple(shape[i] for i in range(len(shape)) if i != axis)\n\n orig_type = arg.dtype\n if is_int(orig_type, builder):\n res_type = builder.int64\n else:\n res_type = orig_type\n init = builder.init_tensor(res_shape, res_type, 0)\n\n def body(a, b):\n return a + b\n\n return builder.generic(arg, init, iterators, maps, body)\n\n\n@register_func('numpy.mean', numpy.mean)\ndef mean_impl(builder, arg, axis=None):\n return sum_impl(builder, arg, axis) / size_impl(builder, arg)\n\n\ndef _gen_unary_ops():\n unary_ops = [\n (register_func('numpy.sqrt', numpy.sqrt), True, lambda a, b: math.sqrt(a)),\n (register_func('numpy.square', numpy.square), False, lambda a, b: a * a),\n (register_func('numpy.log', numpy.log), True, lambda a, b: math.log(a)),\n (register_func('numpy.sin', numpy.sin), True, lambda a, b: math.sin(a)),\n (register_func('numpy.cos', numpy.cos), True, lambda a, b: math.cos(a)),\n ]\n\n def make_func(f64, body):\n def func(builder, arg):\n return eltwise(builder, arg, body, builder.float64 if f64 else None)\n return func\n\n for reg, f64, body in unary_ops:\n reg(make_func(f64, body))\n\n_gen_unary_ops()\n\ndef _gen_binary_ops():\n binary_ops = [\n (register_func('numpy.add', numpy.add), False, lambda a, b, c: a + b),\n (register_func('operator.add'), False, lambda a, b, c: a + b),\n (register_func('numpy.subtract', numpy.subtract), False, lambda a, b, c: a - b),\n (register_func('operator.sub'), False, lambda a, b, c: a - b),\n (register_func('numpy.multiply', numpy.multiply), False, lambda a, b, c: a * b),\n (register_func('operator.mul'), False, lambda a, b, c: a * b),\n (register_func('numpy.true_divide', numpy.true_divide), True, lambda a, b, c: a / b),\n (register_func('operator.truediv'), True, lambda a, b, c: a / b),\n (register_func('numpy.power', numpy.power), False, lambda a, b, c: a ** b),\n (register_func('operator.pow'), False, lambda a, b, c: a ** b),\n ]\n\n def make_func(f64, body):\n def func(builder, arg1, arg2):\n return eltwise(builder, (arg1, arg2), body, builder.float64 if f64 else None)\n return func\n\n for reg, f64, body in binary_ops:\n reg(make_func(f64, body))\n\n_gen_binary_ops()\n\ndef _init_impl(builder, shape, dtype, init=None):\n if dtype is None:\n dtype = builder.float64\n\n if len(shape) == 0:\n shape = (shape,)\n\n if init is None:\n return builder.init_tensor(shape, dtype)\n else:\n init = builder.cast(init, dtype)\n return builder.init_tensor(shape, dtype, init)\n\n@register_func('numpy.empty', numpy.empty)\ndef empty_impl(builder, shape, dtype=None):\n return _init_impl(builder, shape, dtype)\n\n@register_func('numpy.zeros', numpy.zeros)\ndef zeros_impl(builder, shape, dtype=None):\n return _init_impl(builder, shape, dtype, 0)\n\n@register_func('numpy.dot', numpy.dot)\ndef dot_impl(builder, a, b):\n shape1 = a.shape\n shape2 = b.shape\n if len(shape1) == 1 and len(shape2) == 1:\n iterators = ['reduction']\n expr1 = '(d0) -> (d0)'\n expr2 = '(d0) -> (0)'\n maps = [expr1,expr1,expr2]\n init = builder.from_elements(0, a.dtype)\n\n def body(a, b, c):\n return a * b + c\n\n res = builder.generic((a,b), init, iterators, maps, body)\n return builder.extract(res, 0)\n if len(shape1) == 2 and len(shape2) == 2:\n iterators = ['parallel','parallel','reduction']\n expr1 = '(d0,d1,d2) -> (d0,d2)'\n expr2 = '(d0,d1,d2) -> (d2,d1)'\n expr3 = '(d0,d1,d2) -> (d0,d1)'\n maps = [expr1,expr2,expr3]\n res_shape = (shape1[0], shape2[1])\n init = builder.init_tensor(res_shape, a.dtype, 0)\n\n def body(a, b, c):\n return a * b + c\n\n return builder.generic((a,b), init, iterators, maps, body)\n\n@register_attr('array.size')\ndef size_impl(builder, arg):\n shape = arg.shape\n res = 1\n for i in range(len(shape)):\n res = res * shape[i]\n return builder.cast(res, builder.int64)\n\n@register_attr('array.T')\ndef transpose_impl(builder, arg):\n shape = arg.shape\n dims = len(shape)\n if dims == 1:\n return arg\n if dims == 2:\n iterators = ['parallel','parallel']\n expr1 = '(d0,d1) -> (d0,d1)'\n expr2 = '(d0,d1) -> (d1,d0)'\n maps = [expr1,expr2]\n res_shape = (shape[1], shape[0])\n init = builder.init_tensor(res_shape, arg.dtype)\n\n def body(a, b):\n return a\n\n return builder.generic(arg, init, iterators, maps, body)\n\n@register_attr('array.dtype')\ndef dtype_impl(builder, arg):\n return arg.dtype\n\n@register_func('array.reshape')\ndef reshape_impl(builder, arg, new_shape):\n return builder.reshape(arg, new_shape)\n\n# @register_attr('array.flat')\n@register_func('array.flatten')\ndef flatten_impl(builder, arg):\n size = size_impl(builder, arg)\n return builder.reshape(arg, size)\n\ndef dtype_str(builder, dtype):\n names = [\n (builder.int8, 'int8'),\n (builder.int16, 'int16'),\n (builder.int32, 'int32'),\n (builder.int64, 'int64'),\n (builder.float32, 'float32'),\n (builder.float64, 'float64'),\n ]\n for t, name in names:\n if t == dtype:\n return name\n assert(False)\n\n@register_func('numpy.linalg.eig', numpy.linalg.eig)\ndef eig_impl(builder, arg):\n shape = arg.shape\n if len(shape) == 2:\n dtype = arg.dtype\n func_name = f'dpcomp_linalg_eig_{dtype_str(builder, dtype)}'\n size = shape[0]\n vals = builder.init_tensor([size], dtype)\n vecs = builder.init_tensor([size,size], dtype)\n return builder.external_call(func_name, arg, (vals, vecs))\n\n@register_func('numpy.atleast_2d', numpy.atleast_2d)\ndef atleast2d_impl(builder, arr):\n shape = arr.shape\n dims = len(shape)\n if dims == 0:\n return builder.init_tensor([1,1], arr.dtype, arr)\n elif dims == 1:\n init = builder.init_tensor([1,shape[0]], arr.dtype)\n iterators = ['parallel', 'parallel']\n expr1 = '(d0,d1) -> (d1)'\n expr2 = '(d0,d1) -> (d0,d1)'\n maps = [expr1,expr2]\n return builder.generic(arr, init, iterators, maps, lambda a, b: a)\n else:\n return arr\n\n@register_func('numpy.concatenate', numpy.concatenate)\ndef concat_impl(builder, arrays, axis=0):\n if isinstance(axis, int):\n shapes = [a.shape for a in arrays]\n num_dims = len(shapes[0])\n dtype = broadcast_type(builder, arrays)\n new_len = sum((s[axis] for s in shapes), 0)\n new_shape = [new_len if i == axis else shapes[0][i] for i in range(len(shapes[0]))]\n res = builder.init_tensor(new_shape, dtype)\n offsets = [0]*num_dims\n strides = [1]*num_dims\n for sizes, array in zip(shapes, arrays):\n res = builder.insert(array, res, offsets, sizes, strides)\n offsets[axis] += sizes[axis]\n return res\n\ndef _cov_get_ddof_func(ddof_is_none):\n if ddof_is_none:\n def ddof_func(bias, ddof):\n if bias:\n return 0\n else:\n return 1\n else:\n def ddof_func(bias, ddof):\n return ddof\n return ddof_func\n\n\ndef _cov_impl_inner(X, ddof):\n # determine the normalization factor\n fact = X.shape[1] - ddof\n\n # numpy warns if less than 0 and floors at 0\n # fact = max(fact, 0.0)\n fact = fact if fact > 0.0 else 0.0\n\n # _row_wise_average\n m, n = X.shape\n R = numpy.empty((m, 1), dtype=X.dtype)\n\n for i in prange(m):\n R[i, 0] = numpy.sum(X[i, :]) / n\n\n # de-mean\n X = X - R\n\n c = numpy.dot(X, X.T)\n # c = numpy.dot(X, numpy.conj(X.T))\n c = c * numpy.true_divide(1, fact)\n return c\n\ndef _prepare_cov_input(builder, m, y, rowvar):\n def get_func():\n if y is None:\n dtype = m.dtype\n def _prepare_cov_input_impl(m, y, rowvar):\n m_arr = numpy.atleast_2d(m)\n\n if not rowvar:\n m_arr = m_arr.T\n\n return m_arr\n else:\n dtype = broadcast_type(builder, (m, y))\n def _prepare_cov_input_impl(m, y, rowvar):\n m_arr = numpy.atleast_2d(m)\n y_arr = numpy.atleast_2d(y)\n\n # transpose if asked to and not a (1, n) vector - this looks\n # wrong as you might end up transposing one and not the other,\n # but it's what numpy does\n if not rowvar:\n if m_arr.shape[0] != 1:\n m_arr = m_arr.T\n if y_arr.shape[0] != 1:\n y_arr = y_arr.T\n\n m_rows, m_cols = m_arr.shape\n y_rows, y_cols = y_arr.shape\n\n # if m_cols != y_cols:\n # raise ValueError(\"m and y have incompatible dimensions\")\n\n # allocate and fill output array\n return numpy.concatenate((m_arr, y_arr), axis=0)\n\n return dtype, _prepare_cov_input_impl\n dtype, func = get_func()\n if is_int(dtype, builder):\n dtype = builder.float64\n res = builder.inline_func(func, m, y, rowvar)\n return convert_array(builder, res, dtype)\n\ndef _cov_scalar_result_expected(mandatory_input, optional_input):\n opt_is_none = optional_input is None\n\n if len(mandatory_input.shape) == 1:\n return opt_is_none\n\n return False\n\n@register_func('numpy.cov', numpy.cov)\ndef cov_impl(builder, m, y=None, rowvar=True, bias=False, ddof=None):\n m = asarray(builder, m)\n if not y is None:\n y = asarray(builder, y)\n X = _prepare_cov_input(builder, m, y, rowvar)\n ddof = builder.inline_func(_cov_get_ddof_func(ddof is None), bias, ddof)\n res = builder.inline_func(_cov_impl_inner, X, ddof)\n if _cov_scalar_result_expected(m, y):\n res = res[0, 0]\n return res\n"
] | [
[
"numpy.concatenate",
"numpy.dot",
"numpy.empty",
"numpy.sum",
"numpy.true_divide",
"numpy.atleast_2d"
]
] |
FloList/GCE_NN | [
"3f6dd059fd366164a6fcda07643a9dae970a6aba"
] | [
"GCE/utils.py"
] | [
"import os\nimport sys\nimport time\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom shutil import copyfile\nimport psutil\nimport gc\n\n\ndef import_from(module, name):\n \"\"\"\n from 'module' import 'name'\n :param module: module name\n :param name: name of function that shall be imported\n :return: imported function\n \"\"\"\n if module.endswith(\".py\"):\n module = module[:-3]\n if \"/\" in module:\n path_split = os.path.split(module)\n module = path_split[-1]\n sys.path.append(os.path.join(*path_split[:-1]))\n module = __import__(module, fromlist=[name])\n return getattr(module, name)\n\n\n# Customised dictionary class enabling dot access for convenience\nclass DotDict(dict):\n def __init__(self, *args, **kwargs):\n\n super(DotDict, self).__init__(*args, **kwargs)\n for arg in args:\n if isinstance(arg, dict):\n for k, v in arg.items():\n if isinstance(v, dict):\n v = DotDict(v)\n if isinstance(v, list):\n self.__convert(v)\n self[k] = v\n\n if kwargs:\n for k, v in kwargs.items():\n if isinstance(v, dict):\n v = DotDict(v)\n elif isinstance(v, list):\n self.__convert(v)\n self[k] = v\n\n def __convert(self, v):\n for elem in range(0, len(v)):\n if isinstance(v[elem], dict):\n v[elem] = DotDict(v[elem])\n elif isinstance(v[elem], list):\n self.__convert(v[elem])\n\n def __getattr__(self, attr):\n return self.get(attr)\n\n def __setattr__(self, key, value):\n self.__setitem__(key, value)\n\n def __setitem__(self, key, value):\n super(DotDict, self).__setitem__(key, value)\n self.__dict__.update({key: value})\n\n def __delattr__(self, item):\n self.__delitem__(item)\n\n def __delitem__(self, key):\n super(DotDict, self).__delitem__(key)\n del self.__dict__[key]\n\n def __getstate__(self):\n return self.__dict__\n\n def __setstate__(self, d):\n self.__dict__.update(d)\n\n def convert_to_dict(self, delete_functions=False):\n out_dict = dict()\n for key in self.keys():\n if isinstance(self[key], DotDict):\n out_dict[key] = self[key].convert_to_dict(delete_functions=delete_functions)\n else:\n if not callable(self[key]) or not delete_functions:\n out_dict[key] = self[key]\n return out_dict\n\n\ndef backup_one_file(file_in, location):\n \"\"\"\n This function copies the file file_in to the specified location, with a timestamp added.\n :param file_in: filename\n :param location: backup location\n \"\"\"\n datetime = time.ctime().replace(\" \", \"_\").replace(\" \", \"_\").replace(\":\", \"-\")\n file_backup = os.path.join(location, os.path.split(file_in)[-1][:-3] + \"_\" + datetime + \".py\")\n copyfile(file_in, file_backup)\n\n\ndef multipage(filename, figs=None, dpi=360):\n pp = PdfPages(filename)\n if figs is None:\n figs = [plt.figure(n) for n in plt.get_fignums()]\n for fig in figs:\n # fig.set_rasterized(True) # rasterised?\n fig.savefig(pp, format='pdf', dpi=dpi)\n pp.close()\n\n\ndef auto_garbage_collect(pct=80.0):\n \"\"\"\n This function collects the garbage.\n :param pct: Collect garbage when the memory consumption is higher than pct percent.\n \"\"\"\n if psutil.virtual_memory().percent >= pct:\n gc.collect()\n"
] | [
[
"matplotlib.pyplot.get_fignums",
"matplotlib.backends.backend_pdf.PdfPages",
"matplotlib.pyplot.figure"
]
] |
kashiy/optimized_pyointillism | [
"61755ecb26951f0131c9a73524e44b0c302d46b8"
] | [
"pyointillism_pop50_selection__mutation_crossover_random - M0.4 X0.8.py"
] | [
"# -------------------------------------------------------------------------------------------------\r\n# MIT License\r\n# If you use this in something awesome, credit would be nice but isn't required\r\n# Written by Keilan\r\n# Website: www.scholtek.com\r\n# Github: https://github.com/Keilan\r\n# -------------------------------------------------------------------------------------------------\r\n\r\n# -------------------------------------------------------------------------------------------------\r\n# Imports\r\n# -------------------------------------------------------------------------------------------------\r\nimport os\r\nimport sys\r\nimport random\r\nfrom copy import deepcopy\r\nimport multiprocessing\r\nimport jsonpickle\r\n\r\nimport numpy\r\nfrom PIL import Image, ImageDraw\r\n\r\n# -------------------------------------------------------------------------------------------------\r\n# Knobs and Dials\r\n# -------------------------------------------------------------------------------------------------\r\n# from docutils.utils.math.math2html import file\r\n\r\nPOP_SIZE = 50\r\nPOP_PER_GENERATION = 50\r\nMUTATION_CHANCE = 0.4\r\nCROSSOVER_CHANCE = 0.8\r\nADD_GENE_CHANCE = 0.3\r\nREM_GENE_CHANCE = 0.2\r\nINITIAL_GENES = 50\r\nTOURNAMENT_SIZE = 5\r\n# todo - 200\r\n\r\n# How often to output images and save files\r\nGENERATIONS_PER_IMAGE = 1000\r\nGENERATIONS_PER_SAVE = 1000\r\n\r\nprint(\"XOVER_MUTATE_SELECT_RANDOM\")\r\ntry:\r\n globalTarget = Image.open(\"reference.png\")\r\nexcept IOError:\r\n print(\"File reference.png must be located in the same directory as poly_artist.py.\")\r\n exit()\r\n\r\n\r\n# -------------------------------------------------------------------------------------------------\r\n# Helper Classes\r\n# -------------------------------------------------------------------------------------------------\r\nclass Point:\r\n \"\"\"\r\n A 2D point. You can add them together if you want.\r\n \"\"\"\r\n\r\n def __init__(self, x, y):\r\n self.x = x\r\n self.y = y\r\n\r\n def __add__(self, o):\r\n return Point(self.x + o.x, self.y + o.y)\r\n\r\n\r\nclass Color:\r\n \"\"\"\r\n A color. You can shift it by a given value.\r\n \"\"\"\r\n\r\n def __init__(self, r, g, b):\r\n self.r = r\r\n self.g = g\r\n self.b = b\r\n\r\n def shift(self, r, g, b):\r\n self.r = max(0, min(255, self.r + r))\r\n self.g = max(0, min(255, self.g + g))\r\n self.b = max(0, min(255, self.b + b))\r\n\r\n def __str__(self):\r\n return \"({},{},{})\".format(self.r, self.g, self.b)\r\n\r\n\r\n# -------------------------------------------------------------------------------------------------\r\n# Genetic Classes\r\n# -------------------------------------------------------------------------------------------------\r\nclass Gene:\r\n \"\"\"\r\n A gene is the object that can be mutated. Genetic algorithms work by randomly mutating genes\r\n and then using some function to determine how \"ideal\" the resulting organism is.\r\n\r\n This one is basically a circle, with a size, color, and position on the canvas.\r\n \"\"\"\r\n\r\n def __init__(self, size):\r\n self.size = size # The canvas size so we know the maximum position value\r\n\r\n self.diameter = random.randint(1,10)\r\n self.pos = Point(random.randint(0, size[0]), random.randint(0, size[1]))\r\n self.color = Color(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))\r\n self.params = [\"diameter\", \"pos\", \"color\"]\r\n\r\n def mutate(self):\r\n # Decide how big the mutation will be\r\n mutation_size = max(1, int(round(random.gauss(10, 4)))) / 100\r\n\r\n # Decide what will be mutated\r\n mutation_type = random.choice(self.params)\r\n\r\n # Mutate the thing\r\n if mutation_type == \"diameter\":\r\n self.diameter = max(1, random.randint(int(self.diameter * (1 - mutation_size)),\r\n int(self.diameter * (1 + mutation_size))))\r\n\r\n elif mutation_type == \"pos\":\r\n x = max(0, random.randint(int(self.pos.x * (1 - mutation_size)), int(self.pos.x * (1 + mutation_size))))\r\n y = max(0, random.randint(int(self.pos.y * (1 - mutation_size)), int(self.pos.y * (1 + mutation_size))))\r\n self.pos = Point(min(x, self.size[0]), min(y, self.size[1]))\r\n\r\n elif mutation_type == \"color\":\r\n r = min(max(0, random.randint(int(self.color.r * (1 - mutation_size)),\r\n int(self.color.r * (1 + mutation_size)))), 255)\r\n g = min(max(0, random.randint(int(self.color.g * (1 - mutation_size)),\r\n int(self.color.g * (1 + mutation_size)))), 255)\r\n b = min(max(0, random.randint(int(self.color.b * (1 - mutation_size)),\r\n int(self.color.b * (1 + mutation_size)))), 255)\r\n self.color = Color(r, g, b)\r\n\r\n def getSave(self):\r\n \"\"\"\r\n Allows us to save an individual gene in case the program is stopped.\r\n \"\"\"\r\n so = {}\r\n so[\"size\"] = self.size\r\n so[\"diameter\"] = self.diameter\r\n so[\"pos\"] = (self.pos.x, self.pos.y)\r\n so[\"color\"] = (self.color.r, self.color.g, self.color.b)\r\n return so\r\n\r\n def loadSave(self, so):\r\n \"\"\"\r\n Allows us to load an individual gene in case the program is stopped.\r\n \"\"\"\r\n self.size = so[\"size\"]\r\n self.diameter = so[\"diameter\"]\r\n self.pos = Point(so[\"pos\"][0], so[\"pos\"][1])\r\n self.color = Color(so[\"color\"][0], so[\"color\"][1], so[\"color\"][2])\r\n\r\n\r\nclass Organism:\r\n \"\"\"\r\n The organism consists of a variety of genes that work together for some sort of effect. The main\r\n effect of the genetic algorithm takes place here, as each step involves mutating some of the\r\n organisms genes to produce offspring, and the best performing of those offspring carries on.\r\n\r\n This organism contains a bunch of genes that draw circles, working together to draw a picture.\r\n \"\"\"\r\n\r\n def __init__(self, size, num):\r\n self.size = size\r\n\r\n # Create random genes up to the number given\r\n self.genes = [Gene(size) for _ in range(num)]\r\n\r\n def mutate(self):\r\n # For small numbers of genes, each one has a random chance of mutating\r\n if len(self.genes) < 200:\r\n for g in self.genes:\r\n if random.random() < MUTATION_CHANCE:\r\n g.mutate()\r\n\r\n # For large numbers of genes, pick a random sample, this is statistically equivalent and faster\r\n else:\r\n for g in random.sample(self.genes, int(len(self.genes) * MUTATION_CHANCE)):\r\n g.mutate()\r\n\r\n # We also have a chance to add or remove a gene\r\n if ADD_GENE_CHANCE < random.random():\r\n self.genes.append(Gene(self.size))\r\n if len(self.genes) > 0 and REM_GENE_CHANCE < random.random():\r\n self.genes.remove(random.choice(self.genes))\r\n\r\n def crossover(self, other):\r\n if CROSSOVER_CHANCE > random.random():\r\n index = random.randint(1, len(self.genes) - 1)\r\n o1 = deepcopy(self)\r\n o1.genes = self.genes[0:index] + other.genes[index:]\r\n o2 = deepcopy(self)\r\n o2.genes = other.genes[0:index] + self.genes[index:]\r\n return o1, o2\r\n\r\n def drawImage(self):\r\n \"\"\"\r\n Using the Image module, use the genes to draw the image.\r\n \"\"\"\r\n image = Image.new(\"RGB\", self.size, (255, 255, 255))\r\n canvas = ImageDraw.Draw(image)\r\n\r\n for g in self.genes:\r\n color = (g.color.r, g.color.g, g.color.b)\r\n canvas.ellipse([g.pos.x - g.diameter, g.pos.y - g.diameter, g.pos.x + g.diameter, g.pos.y + g.diameter],\r\n outline=color, fill=color)\r\n\r\n return image\r\n\r\n def getSave(self, generation):\r\n \"\"\"\r\n Allows us to save an individual organism in case the program is stopped.\r\n \"\"\"\r\n so = [generation]\r\n return so + [g.getSave() for g in self.genes]\r\n\r\n def loadSave(self, so):\r\n \"\"\"\r\n Allows us to load an individual organism in case the program is stopped.\r\n \"\"\"\r\n self.genes = []\r\n gen = so[0]\r\n so = so[1:]\r\n for g in so:\r\n newGene = Gene(self.size)\r\n newGene.loadSave(g)\r\n self.genes.append(newGene)\r\n return gen\r\n\r\n\r\ndef fitness(im1, im2):\r\n \"\"\"\r\n The fitness function is used by the genetic algorithm to determine how successful a given organism\r\n is. Usually a genetic algorithm is trying to either minimize or maximize this function.\r\n\r\n This one uses numpy to quickly compute the sum of the differences between the pixels.\r\n \"\"\"\r\n # Convert Image types to numpy arrays\r\n i1 = numpy.array(im1, numpy.int16)\r\n i2 = numpy.array(im2, numpy.int16)\r\n # todo - sum of squers\r\n dif = numpy.sum(numpy.abs(i1 - i2))\r\n return (dif / 255.0 * 100) / i1.size\r\n\r\n\r\ndef init_population(pop_size):\r\n parents = []\r\n target = globalTarget\r\n while pop_size > 0:\r\n parents.append(Organism(target.size, INITIAL_GENES))\r\n pop_size -= 1\r\n return parents\r\n\r\n\r\n# select one individual using tournament selection\r\ndef tournament_selection(parents):\r\n target = globalTarget\r\n tournament = [random.choice(parents) for j in range(TOURNAMENT_SIZE)]\r\n fitnesses = [fitness(tournament[l].drawImage(), target) for l in range(TOURNAMENT_SIZE)]\r\n # return the best individual from selected parents\r\n return tournament[fitnesses.index(max(fitnesses))]\r\n\r\n\r\n# return array of size 10 with selected parents after mutation(with prob.)\r\ndef select_and_mutate(parents):\r\n mutated_parents = []\r\n for num in range(int(POP_SIZE/2)):\r\n parent = tournament_selection(parents)\r\n mutated_parents.append(mutateAndTest(parent))\r\n # add to new generation\r\n return mutated_parents\r\n\r\n\r\ndef select_and_crossover(parents):\r\n crossover_parents = []\r\n # target = globalTarget\r\n\r\n for num in range(int(POP_SIZE / 4)):\r\n parent1 = tournament_selection(parents)\r\n parent2 = tournament_selection(parents)\r\n children = parent1.crossover(parent2)\r\n if children is not None:\r\n child1, child2 = children[0], children[1]\r\n crossover_parents.append(child1)\r\n crossover_parents.append(child2)\r\n # fitness1 = fitness(child1.drawImage(), target)\r\n # fitness2 = fitness(child2.drawImage(), target)\r\n # if fitness1 >= fitness2:\r\n # crossover_parents.append(fitness1)\r\n # else:\r\n # crossover_parents.append(fitness2)\r\n fitness_crossover = test_fitness(crossover_parents)\r\n return fitness_crossover\r\n\r\n\r\n# -------------------------------------------------------------------------------------------------\r\n# Functions to Make Stuff Run\r\n# -------------------------------------------------------------------------------------------------\r\ndef run(cores, so=None):\r\n \"\"\"\r\n Contains the loop that creates and tests new generations.\r\n \"\"\"\r\n # Create storage directory in current directory\r\n if not os.path.exists(\"results\"):\r\n os.mkdir(\"results\")\r\n\r\n # Create output log file\r\n f = open(os.path.join(\"results\", \"log.txt\"), 'a')\r\n\r\n target = globalTarget\r\n\r\n # Create the parent organism (with random genes)\r\n generation = 1\r\n parents = init_population(POP_SIZE)\r\n\r\n # Setup the multiprocessing pool\r\n p = multiprocessing.Pool(cores)\r\n\r\n before_scores = [fitness(parent.drawImage(), target) for parent in parents]\r\n best = sorted(before_scores)[0]\r\n\r\n # Infinite loop (until the process is interrupted)\r\n while generation <= 40000:\r\n # Print the current score and write it to the log file\r\n print(\"Generation {} - {}\".format(generation, best))\r\n f.write(\"Generation {} - {}\\n\".format(generation, best))\r\n\r\n # Save an image of the current best organism to the results directory\r\n if (generation) % GENERATIONS_PER_IMAGE == 0:\r\n parents[0].drawImage().save(os.path.join(\"results\", \"{}.png\".format(generation)))\r\n\r\n generation += 1\r\n\r\n # Perform the mutations and add to the parent\r\n try:\r\n results_parents = test_fitness(parents)\r\n results_select = select_and_mutate(parents)\r\n results_crossover = select_and_crossover(parents)\r\n except KeyboardInterrupt:\r\n print('Bye!')\r\n p.close()\r\n return\r\n\r\n scores, parents = zip(*results_parents)\r\n mutate_scores, mutate_parents = zip(*results_select)\r\n crossover_scores, crossover_parents = zip(*results_crossover)\r\n\r\n # update parents and scores with all new operations results\r\n parents += mutate_parents + crossover_parents\r\n scores += mutate_scores + crossover_scores\r\n\r\n # Find the winners from sorted scores array\r\n winners = sorted(zip(parents, scores), key=lambda x: x[1])\r\n parents, scores = zip(*winners[:POP_SIZE])\r\n best = scores[0]\r\n\r\n # Store a backup to resume running if the program is interrupted\r\n if generation % 1000 == 0:\r\n sf = open(os.path.join(\"results\", \"{}.txt\".format(generation)), 'w')\r\n sf.write(jsonpickle.encode(parents[0].getSave(generation)))\r\n sf.close()\r\n\r\n\r\n\r\ndef test_fitness(parents):\r\n results = []\r\n target = globalTarget\r\n for parent in parents:\r\n fit = fitness(parent.drawImage(), target), parent\r\n results.append(fit)\r\n return results\r\n\r\n\r\ndef mutateAndTest(o):\r\n \"\"\"\r\n Given an organism, perform a random mutation on it, and then use the fitness function to\r\n determine how accurate of a result the mutated offspring draws.\r\n \"\"\"\r\n try:\r\n c = deepcopy(o)\r\n c.mutate()\r\n i1 = c.drawImage()\r\n i2 = globalTarget\r\n return fitness(i1, i2), c\r\n except KeyboardInterrupt as e:\r\n pass\r\n\r\n\r\ndef groupMutate(o, p):\r\n \"\"\"\r\n Mutates and tests a number of organisms using the multiprocessing module.\r\n \"\"\"\r\n results = p.map(mutateAndTest, o)\r\n # results = [mutateAndTest(i) for i in [o]*int(number)]\r\n return results\r\n\r\n\r\n# -------------------------------------------------------------------------------------------------\r\n# Main Function\r\n# -------------------------------------------------------------------------------------------------\r\nif __name__ == \"__main__\":\r\n # Set defaults\r\n cores = max(1, multiprocessing.cpu_count() - 1)\r\n so = None\r\n\r\n # Check the arguments, options are currents -t (number of threads) and -s (save file)\r\n if len(sys.argv) > 1:\r\n args = sys.argv[1:]\r\n\r\n for i, a in enumerate(args):\r\n if a == \"-t\":\r\n cores = int(args[i + 1])\r\n elif a == \"-s\":\r\n with open(args[i + 1], 'r') as save:\r\n so = save.read()\r\n\r\n run(cores, so)\r\n"
] | [
[
"numpy.array",
"numpy.abs"
]
] |
eeishaan/ift6759-block2 | [
"be7c870746cbe73cdab6ee63def2263b745d4591"
] | [
"horoma/experiments/__init__.py"
] | [
"#!/usr/bin/env python3\n\nfrom types import SimpleNamespace\n\nimport torch\nimport numpy as np\n\nfrom horoma.cfg import DEVICE\nfrom horoma.constants import TrainMode\nfrom horoma.utils.score import compute_metrics\n\n\nclass HoromaExperiment(object):\n '''\n Base class for experiments\n '''\n\n def __init__(\n self,\n experiment_file,\n embedding_model,\n cluster_obj,\n summary_writer=None,\n embedding_optim=None,\n embedding_crit=None,\n patience=10,\n **params\n ):\n self._embedding_file = experiment_file\n self._cluster_file = \"{}_{}\".format(experiment_file, '.cluster')\n self._embedding_model = embedding_model\n self._cluster_obj = cluster_obj\n self._embedding_optim = embedding_optim\n self._embedding_crit = embedding_crit\n self._cluster_label_mapping = {}\n self._summary_writer = summary_writer\n self._patience = patience\n # send model to device\n self._embedding_model.to(DEVICE)\n if summary_writer is not None:\n self._summary_writer.add_text(\n 'embedding_model', repr(self._embedding_model), 0)\n # initialize epoch var correctly\n self._start_epoch = 0\n\n # intialize a schedule for lr decay\n self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n self._embedding_optim) \\\n if self._embedding_optim is not None else None\n\n for k, v in params.items():\n setattr(self, k, v)\n\n def _remap(self, x):\n return self._cluster_label_mapping[x]\n\n def after_forwardp(self, ctx, outputs, data):\n pass\n\n def after_minibatch_test(self, ctx, outputs):\n # map them to correct labels\n predictions = [self._remap(x) for x in outputs]\n ctx.predictions.extend(predictions)\n\n def after_test(self, ctx):\n return np.array(ctx.predictions)\n\n def after_train(self, ctx):\n # save embedding model after 10 epochs\n if ctx.epoch % 10 != 9:\n self.save_experiment(ctx, save_embedding=True, save_cluster=False)\n\n # print loss\n loss = ctx.running_loss.item()\n epoch = ctx.epoch\n message = \"Epoch: {} Train Loss: {}\".format(epoch, loss)\n print(message)\n self._summary_writer.add_scalar('train_train_loss', loss, epoch)\n # return True as we don't want to stop\n return True\n\n def before_backprop(self, ctx, outputs, data):\n pass\n\n def before_forwardp(self, ctx, data):\n return data\n\n def before_test(self, ctx):\n ctx.predictions = []\n\n def before_train(self, ctx, train_train_no_aug_loader):\n print(\"Starting epoch {}\".format(ctx.epoch))\n # train cluster on learnt or random embedding\n v_f1 = self._train_cluster(train_train_no_aug_loader,\n ctx.valid_train_loader, ctx.valid_valid_loader, ctx.epoch)\n self.lr_scheduler.step(v_f1)\n\n def compute_loss(self, ctx, outputs, labels):\n loss = self._embedding_crit(outputs, labels)\n return loss\n\n def load_experiment(self, load_embedding=True, load_cluster=True):\n if load_embedding:\n checkpoint = torch.load(self._embedding_file)\n self._embedding_model.load_state_dict(\n checkpoint['model_state_dict'])\n if self._embedding_optim is not None and \\\n 'optimizer_state_dict' in checkpoint \\\n and hasattr(self, '_embedding_optim'):\n self._embedding_optim.load_state_dict(\n checkpoint['optimizer_state_dict'])\n self._start_epoch = checkpoint.get('epoch', 0)\n if load_cluster:\n checkpoint = torch.load(self._cluster_file)\n self._cluster_obj = checkpoint['cluster_obj']\n self._cluster_label_mapping = checkpoint['cluster_label_mapping']\n\n def save_experiment(self, ctx, save_embedding=True, save_cluster=True):\n if save_embedding:\n save_dict = {\n 'epoch': ctx.epoch,\n 'model_state_dict': self._embedding_model.state_dict(),\n 'optimizer_state_dict': self._embedding_optim.state_dict(),\n }\n torch.save(save_dict, self._embedding_file)\n if save_cluster:\n save_dict = {\n 'cluster_obj': self._cluster_obj,\n 'cluster_label_mapping': self._cluster_label_mapping,\n }\n torch.save(save_dict, self._cluster_file)\n\n def test(self, dataloader):\n self._embedding_model.eval()\n with torch.no_grad():\n ctx = SimpleNamespace()\n self.before_test(ctx)\n for _, data in enumerate(dataloader):\n if isinstance(data, list):\n data = data[0]\n data = data.to(DEVICE)\n embedding = self._embedding_model.embedding(data)\n predictions = self._cluster_obj.predict(embedding)\n self.after_minibatch_test(ctx, predictions)\n return self.after_test(ctx)\n\n def _train_embedding(self,\n train_train_loader,\n train_train_no_aug_loader,\n train_valid_loader,\n epochs,\n start_epoch,\n valid_train_loader,\n valid_valid_loader):\n for epoch in range(start_epoch, epochs):\n # first train embedding model\n self._embedding_model.train()\n\n # prepare context for hooks\n ctx = SimpleNamespace(\n epoch=epoch,\n batch=0,\n running_loss=0,\n train_valid_loader=train_valid_loader,\n valid_train_loader=valid_train_loader,\n valid_valid_loader=valid_valid_loader,\n )\n\n self.before_train(ctx, train_train_no_aug_loader)\n for batch, data in enumerate(train_train_loader):\n ctx.batch = batch\n data = data.to(DEVICE)\n\n # before_forwardp can add second layer of transformation\n data = self.before_forwardp(ctx, data)\n\n # zero out previous gradient\n self._embedding_model.zero_grad()\n\n outputs = self._embedding_model(data)\n self.before_backprop(ctx, outputs, data)\n loss = self.compute_loss(ctx, outputs, data)\n ctx.running_loss += loss\n loss.backward()\n self._embedding_optim.step()\n\n self.after_forwardp(ctx, outputs, data)\n\n # Divide the loss by the number of batches\n ctx.running_loss /= len(train_train_loader)\n is_continue = self.after_train(ctx)\n if not is_continue:\n break\n\n def _train_cluster(self,\n train_train_no_aug_loader,\n valid_train_loader,\n valid_valid_loader,\n epoch,\n no_save=False):\n '''\n Train cluster and evaluate performance metrics on validation data\n '''\n # fit the cluster on train_data\n embeddings = []\n self._embedding_model.eval()\n with torch.no_grad():\n for data in train_train_no_aug_loader:\n data = data.to(DEVICE)\n data_embedding = self._embedding_model.embedding(data)\n embeddings.extend(data_embedding.tolist())\n self._cluster_obj.fit(embeddings)\n\n # learn the cluster labels using valid_train data\n\n # get validation data embedding\n true_labels = []\n embeddings = []\n self._embedding_model.eval()\n with torch.no_grad():\n for data, labels in valid_train_loader:\n data = data.to(DEVICE)\n true_labels.extend(labels.int().view(-1).tolist())\n data_embedding = self._embedding_model.embedding(data)\n embeddings.extend(data_embedding.tolist())\n true_labels = np.array(true_labels)\n\n predicted_labels = self._cluster_obj.predict(embeddings)\n self._cluster_label_mapping = {}\n\n # get number of clusters for GMM or Kmeans\n n_clusters = getattr(self._cluster_obj, 'weights_', None)\n if n_clusters is None:\n n_clusters = getattr(self._cluster_obj, 'cluster_centers_', None)\n n_clusters = n_clusters.shape[0]\n\n # construct the cluster_label mapping\n for i in range(n_clusters):\n # filter data which was predicted to be in ith cluster and\n # get their true label\n idx = np.where(predicted_labels == i)[0]\n if len(idx) != 0:\n labels_freq = np.bincount(true_labels[idx])\n self._cluster_label_mapping[i] = np.argmax(labels_freq)\n else:\n # No validation point found on this cluster. We can't label it.\n self._cluster_label_mapping[i] = -1\n\n # coditionally save the model\n if not no_save:\n self.save_experiment(None, save_embedding=False)\n\n # evaluate cluster on valid_train set\n predicted_labels = [self._remap(x) for x in predicted_labels]\n acc, f1, ari, nmi = compute_metrics(true_labels, predicted_labels)\n print(\"Validation Train Acc: {:.4f} F1 score: {:.4f} ARI: {:.4f} NMI: {:.4f}\".format(\n acc, f1, ari, nmi))\n self._summary_writer.add_scalar('valid_train_acc', acc, epoch)\n self._summary_writer.add_scalar('valid_train_f1', f1, epoch)\n self._summary_writer.add_scalar('valid_train_ari', ari, epoch)\n self._summary_writer.add_scalar('valid_train_nmi', nmi, epoch)\n # evaluate performance on valid_valid set\n true_labels = []\n embeddings = []\n self._embedding_model.eval()\n with torch.no_grad():\n for data, labels in valid_valid_loader:\n data = data.to(DEVICE)\n true_labels.extend(labels.int().view(-1).tolist())\n data_embedding = self._embedding_model.embedding(data)\n embeddings.extend(data_embedding.tolist())\n true_labels = np.array(true_labels)\n predicted_labels = self._cluster_obj.predict(embeddings)\n predicted_labels = [self._remap(x) for x in predicted_labels]\n acc, f1, ari, nmi = compute_metrics(true_labels, predicted_labels)\n print(\"Validation Valid Acc: {:.4f} F1 score: {:.4f} ARI: {:.4f} NMI: {:.4f}\".format(\n acc, f1, ari, nmi))\n self._summary_writer.add_scalar('valid_valid_acc', acc, epoch)\n self._summary_writer.add_scalar('valid_valid_f1', f1, epoch)\n self._summary_writer.add_scalar('valid_valid_ari', ari, epoch)\n self._summary_writer.add_scalar('valid_valid_nmi', nmi, epoch)\n\n # return f1 for LR decay\n return f1\n\n def train(self, train_train_loader, train_train_no_aug_loader,\n train_valid_loader, epochs,\n valid_train_loader=None, valid_valid_loader=None,\n start_epoch=None, mode=TrainMode.TRAIN_ALL):\n # set start_epoch differently if you want to resume training from a\n # checkpoint.\n start_epoch = start_epoch \\\n if start_epoch is not None \\\n else self._start_epoch\n\n if mode == TrainMode.TRAIN_ONLY_CLUSTER:\n self.load_experiment(load_embedding=True, load_cluster=False)\n else:\n self._train_embedding(train_train_loader,\n train_train_no_aug_loader,\n train_valid_loader,\n epochs,\n start_epoch,\n valid_train_loader,\n valid_valid_loader)\n\n if mode == TrainMode.TRAIN_ONLY_EMBEDDING:\n return\n if valid_train_loader is None:\n err = 'ERROR: Validation dataset is required for' +\\\n ' training cluster model'\n print(err)\n return\n\n self._train_cluster(train_train_no_aug_loader,\n valid_train_loader, valid_valid_loader, epochs)\n"
] | [
[
"numpy.bincount",
"numpy.array",
"torch.no_grad",
"torch.save",
"numpy.where",
"numpy.argmax",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.load"
]
] |
hasandiwan/pandas | [
"f6059224456f6e287ec43524c6a1286fe398c548"
] | [
"pandas/io/formats/style.py"
] | [
"\"\"\"\nModule for applying conditional formatting to DataFrames and Series.\n\"\"\"\nfrom __future__ import annotations\n\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nimport copy\nfrom functools import partial\nfrom itertools import product\nfrom typing import (\n Any,\n Callable,\n DefaultDict,\n Dict,\n Hashable,\n List,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\nfrom uuid import uuid4\n\nimport numpy as np\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import lib\nfrom pandas._typing import Axis, FrameOrSeries, FrameOrSeriesUnion, IndexLabel\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.util._decorators import doc\n\nfrom pandas.core.dtypes.common import is_float\n\nimport pandas as pd\nfrom pandas.api.types import is_dict_like, is_list_like\nfrom pandas.core import generic\nimport pandas.core.common as com\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.generic import NDFrame\nfrom pandas.core.indexing import maybe_numeric_slice, non_reducing_slice\n\njinja2 = import_optional_dependency(\"jinja2\", extra=\"DataFrame.style requires jinja2.\")\n\n\ntry:\n from matplotlib import colors\n import matplotlib.pyplot as plt\n\n has_mpl = True\nexcept ImportError:\n has_mpl = False\n no_mpl_message = \"{0} requires matplotlib.\"\n\n\n@contextmanager\ndef _mpl(func: Callable):\n if has_mpl:\n yield plt, colors\n else:\n raise ImportError(no_mpl_message.format(func.__name__))\n\n\nclass Styler:\n \"\"\"\n Helps style a DataFrame or Series according to the data with HTML and CSS.\n\n Parameters\n ----------\n data : Series or DataFrame\n Data to be styled - either a Series or DataFrame.\n precision : int\n Precision to round floats to, defaults to pd.options.display.precision.\n table_styles : list-like, default None\n List of {selector: (attr, value)} dicts; see Notes.\n uuid : str, default None\n A unique identifier to avoid CSS collisions; generated automatically.\n caption : str, default None\n Caption to attach to the table.\n table_attributes : str, default None\n Items that show up in the opening ``<table>`` tag\n in addition to automatic (by default) id.\n cell_ids : bool, default True\n If True, each cell will have an ``id`` attribute in their HTML tag.\n The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``\n where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row\n number and ``<num_col>`` is the column number.\n na_rep : str, optional\n Representation for missing values.\n If ``na_rep`` is None, no special formatting is applied.\n\n .. versionadded:: 1.0.0\n\n uuid_len : int, default 5\n If ``uuid`` is not specified, the length of the ``uuid`` to randomly generate\n expressed in hex characters, in range [0, 32].\n\n .. versionadded:: 1.2.0\n\n Attributes\n ----------\n env : Jinja2 jinja2.Environment\n template : Jinja2 Template\n loader : Jinja2 Loader\n\n See Also\n --------\n DataFrame.style : Return a Styler object containing methods for building\n a styled HTML representation for the DataFrame.\n\n Notes\n -----\n Most styling will be done by passing style functions into\n ``Styler.apply`` or ``Styler.applymap``. Style functions should\n return values with strings containing CSS ``'attr: value'`` that will\n be applied to the indicated cells.\n\n If using in the Jupyter notebook, Styler has defined a ``_repr_html_``\n to automatically render itself. Otherwise call Styler.render to get\n the generated HTML.\n\n CSS classes are attached to the generated HTML\n\n * Index and Column names include ``index_name`` and ``level<k>``\n where `k` is its level in a MultiIndex\n * Index label cells include\n\n * ``row_heading``\n * ``row<n>`` where `n` is the numeric position of the row\n * ``level<k>`` where `k` is the level in a MultiIndex\n\n * Column label cells include\n * ``col_heading``\n * ``col<n>`` where `n` is the numeric position of the column\n * ``level<k>`` where `k` is the level in a MultiIndex\n\n * Blank cells include ``blank``\n * Data cells include ``data``\n \"\"\"\n\n loader = jinja2.PackageLoader(\"pandas\", \"io/formats/templates\")\n env = jinja2.Environment(loader=loader, trim_blocks=True)\n template = env.get_template(\"html.tpl\")\n\n def __init__(\n self,\n data: FrameOrSeriesUnion,\n precision: Optional[int] = None,\n table_styles: Optional[List[Dict[str, List[Tuple[str, str]]]]] = None,\n uuid: Optional[str] = None,\n caption: Optional[str] = None,\n table_attributes: Optional[str] = None,\n cell_ids: bool = True,\n na_rep: Optional[str] = None,\n uuid_len: int = 5,\n ):\n self.ctx: DefaultDict[Tuple[int, int], List[str]] = defaultdict(list)\n self._todo: List[Tuple[Callable, Tuple, Dict]] = []\n\n if not isinstance(data, (pd.Series, pd.DataFrame)):\n raise TypeError(\"``data`` must be a Series or DataFrame\")\n if data.ndim == 1:\n data = data.to_frame()\n if not data.index.is_unique or not data.columns.is_unique:\n raise ValueError(\"style is not supported for non-unique indices.\")\n\n self.data = data\n self.index = data.index\n self.columns = data.columns\n\n if not isinstance(uuid_len, int) or not uuid_len >= 0:\n raise TypeError(\"``uuid_len`` must be an integer in range [0, 32].\")\n self.uuid_len = min(32, uuid_len)\n self.uuid = (uuid or uuid4().hex[: self.uuid_len]) + \"_\"\n self.table_styles = table_styles\n self.caption = caption\n if precision is None:\n precision = get_option(\"display.precision\")\n self.precision = precision\n self.table_attributes = table_attributes\n self.hidden_index = False\n self.hidden_columns: Sequence[int] = []\n self.cell_ids = cell_ids\n self.na_rep = na_rep\n\n self.tooltips: Optional[_Tooltips] = None\n\n self.cell_context: Dict[str, Any] = {}\n\n # display_funcs maps (row, col) -> formatting function\n\n def default_display_func(x):\n if self.na_rep is not None and pd.isna(x):\n return self.na_rep\n elif is_float(x):\n display_format = f\"{x:.{self.precision}f}\"\n return display_format\n else:\n return x\n\n self._display_funcs: DefaultDict[\n Tuple[int, int], Callable[[Any], str]\n ] = defaultdict(lambda: default_display_func)\n\n def _repr_html_(self) -> str:\n \"\"\"\n Hooks into Jupyter notebook rich display system.\n \"\"\"\n return self.render()\n\n def _init_tooltips(self):\n \"\"\"\n Checks parameters compatible with tooltips and creates instance if necessary\n \"\"\"\n if not self.cell_ids:\n # tooltips not optimised for individual cell check. requires reasonable\n # redesign and more extensive code for a feature that might be rarely used.\n raise NotImplementedError(\n \"Tooltips can only render with 'cell_ids' is True.\"\n )\n if self.tooltips is None:\n self.tooltips = _Tooltips()\n\n def set_tooltips(self, ttips: DataFrame) -> Styler:\n \"\"\"\n Add string based tooltips that will appear in the `Styler` HTML result. These\n tooltips are applicable only to`<td>` elements.\n\n .. versionadded:: 1.3.0\n\n Parameters\n ----------\n ttips : DataFrame\n DataFrame containing strings that will be translated to tooltips, mapped\n by identical column and index values that must exist on the underlying\n `Styler` data. None, NaN values, and empty strings will be ignored and\n not affect the rendered HTML.\n\n Returns\n -------\n self : Styler\n\n Notes\n -----\n Tooltips are created by adding `<span class=\"pd-t\"></span>` to each data cell\n and then manipulating the table level CSS to attach pseudo hover and pseudo\n after selectors to produce the required the results. For styling control\n see `:meth:Styler.set_tooltips_class`.\n Tooltips are not designed to be efficient, and can add large amounts of\n additional HTML for larger tables, since they also require that `cell_ids`\n is forced to `True`.\n\n Examples\n --------\n >>> df = pd.DataFrame(data=[[0, 1], [2, 3]])\n >>> ttips = pd.DataFrame(\n ... data=[[\"Min\", \"\"], [np.nan, \"Max\"]], columns=df.columns, index=df.index\n ... )\n >>> s = df.style.set_tooltips(ttips).render()\n \"\"\"\n self._init_tooltips()\n assert self.tooltips is not None # mypy requiremen\n self.tooltips.tt_data = ttips\n return self\n\n def set_tooltips_class(\n self,\n name: Optional[str] = None,\n properties: Optional[Sequence[Tuple[str, Union[str, int, float]]]] = None,\n ) -> Styler:\n \"\"\"\n Manually configure the name and/or properties of the class for\n creating tooltips on hover.\n\n .. versionadded:: 1.3.0\n\n Parameters\n ----------\n name : str, default None\n Name of the tooltip class used in CSS, should conform to HTML standards.\n properties : list-like, default None\n List of (attr, value) tuples; see example.\n\n Returns\n -------\n self : Styler\n\n Notes\n -----\n If arguments are `None` will not make any changes to the underlying ``Tooltips``\n existing values.\n\n The default properties for the tooltip CSS class are:\n\n - visibility: hidden\n - position: absolute\n - z-index: 1\n - background-color: black\n - color: white\n - transform: translate(-20px, -20px)\n\n The property ('visibility', 'hidden') is a key prerequisite to the hover\n functionality, and should always be included in any manual properties\n specification.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(10, 4))\n >>> df.style.set_tooltips_class(name='tt-add', properties=[\n ... ('visibility', 'hidden'),\n ... ('position', 'absolute'),\n ... ('z-index', 1)])\n \"\"\"\n self._init_tooltips()\n assert self.tooltips is not None # mypy requirement\n if properties:\n self.tooltips.class_properties = properties\n if name:\n self.tooltips.class_name = name\n return self\n\n @doc(\n NDFrame.to_excel,\n klass=\"Styler\",\n storage_options=generic._shared_docs[\"storage_options\"],\n )\n def to_excel(\n self,\n excel_writer,\n sheet_name: str = \"Sheet1\",\n na_rep: str = \"\",\n float_format: Optional[str] = None,\n columns: Optional[Sequence[Hashable]] = None,\n header: Union[Sequence[Hashable], bool] = True,\n index: bool = True,\n index_label: Optional[IndexLabel] = None,\n startrow: int = 0,\n startcol: int = 0,\n engine: Optional[str] = None,\n merge_cells: bool = True,\n encoding: Optional[str] = None,\n inf_rep: str = \"inf\",\n verbose: bool = True,\n freeze_panes: Optional[Tuple[int, int]] = None,\n ) -> None:\n\n from pandas.io.formats.excel import ExcelFormatter\n\n formatter = ExcelFormatter(\n self,\n na_rep=na_rep,\n cols=columns,\n header=header,\n float_format=float_format,\n index=index,\n index_label=index_label,\n merge_cells=merge_cells,\n inf_rep=inf_rep,\n )\n formatter.write(\n excel_writer,\n sheet_name=sheet_name,\n startrow=startrow,\n startcol=startcol,\n freeze_panes=freeze_panes,\n engine=engine,\n )\n\n def _translate(self):\n \"\"\"\n Convert the DataFrame in `self.data` and the attrs from `_build_styles`\n into a dictionary of {head, body, uuid, cellstyle}.\n \"\"\"\n table_styles = self.table_styles or []\n caption = self.caption\n ctx = self.ctx\n precision = self.precision\n hidden_index = self.hidden_index\n hidden_columns = self.hidden_columns\n uuid = self.uuid\n ROW_HEADING_CLASS = \"row_heading\"\n COL_HEADING_CLASS = \"col_heading\"\n INDEX_NAME_CLASS = \"index_name\"\n\n DATA_CLASS = \"data\"\n BLANK_CLASS = \"blank\"\n BLANK_VALUE = \"\"\n\n def format_attr(pair):\n return f\"{pair['key']}={pair['value']}\"\n\n # for sparsifying a MultiIndex\n idx_lengths = _get_level_lengths(self.index)\n col_lengths = _get_level_lengths(self.columns, hidden_columns)\n\n cell_context = self.cell_context\n\n n_rlvls = self.data.index.nlevels\n n_clvls = self.data.columns.nlevels\n rlabels = self.data.index.tolist()\n clabels = self.data.columns.tolist()\n\n if n_rlvls == 1:\n rlabels = [[x] for x in rlabels]\n if n_clvls == 1:\n clabels = [[x] for x in clabels]\n clabels = list(zip(*clabels))\n\n cellstyle_map = defaultdict(list)\n head = []\n\n for r in range(n_clvls):\n # Blank for Index columns...\n row_es = [\n {\n \"type\": \"th\",\n \"value\": BLANK_VALUE,\n \"display_value\": BLANK_VALUE,\n \"is_visible\": not hidden_index,\n \"class\": \" \".join([BLANK_CLASS]),\n }\n ] * (n_rlvls - 1)\n\n # ... except maybe the last for columns.names\n name = self.data.columns.names[r]\n cs = [\n BLANK_CLASS if name is None else INDEX_NAME_CLASS,\n f\"level{r}\",\n ]\n name = BLANK_VALUE if name is None else name\n row_es.append(\n {\n \"type\": \"th\",\n \"value\": name,\n \"display_value\": name,\n \"class\": \" \".join(cs),\n \"is_visible\": not hidden_index,\n }\n )\n\n if clabels:\n for c, value in enumerate(clabels[r]):\n cs = [\n COL_HEADING_CLASS,\n f\"level{r}\",\n f\"col{c}\",\n ]\n cs.extend(\n cell_context.get(\"col_headings\", {}).get(r, {}).get(c, [])\n )\n es = {\n \"type\": \"th\",\n \"value\": value,\n \"display_value\": value,\n \"class\": \" \".join(cs),\n \"is_visible\": _is_visible(c, r, col_lengths),\n }\n colspan = col_lengths.get((r, c), 0)\n if colspan > 1:\n es[\"attributes\"] = [\n format_attr({\"key\": \"colspan\", \"value\": f'\"{colspan}\"'})\n ]\n row_es.append(es)\n head.append(row_es)\n\n if (\n self.data.index.names\n and com.any_not_none(*self.data.index.names)\n and not hidden_index\n ):\n index_header_row = []\n\n for c, name in enumerate(self.data.index.names):\n cs = [INDEX_NAME_CLASS, f\"level{c}\"]\n name = \"\" if name is None else name\n index_header_row.append(\n {\"type\": \"th\", \"value\": name, \"class\": \" \".join(cs)}\n )\n\n index_header_row.extend(\n [{\"type\": \"th\", \"value\": BLANK_VALUE, \"class\": \" \".join([BLANK_CLASS])}]\n * (len(clabels[0]) - len(hidden_columns))\n )\n\n head.append(index_header_row)\n\n body = []\n for r, idx in enumerate(self.data.index):\n row_es = []\n for c, value in enumerate(rlabels[r]):\n rid = [\n ROW_HEADING_CLASS,\n f\"level{c}\",\n f\"row{r}\",\n ]\n es = {\n \"type\": \"th\",\n \"is_visible\": (_is_visible(r, c, idx_lengths) and not hidden_index),\n \"value\": value,\n \"display_value\": value,\n \"id\": \"_\".join(rid[1:]),\n \"class\": \" \".join(rid),\n }\n rowspan = idx_lengths.get((c, r), 0)\n if rowspan > 1:\n es[\"attributes\"] = [\n format_attr({\"key\": \"rowspan\", \"value\": f'\"{rowspan}\"'})\n ]\n row_es.append(es)\n\n for c, col in enumerate(self.data.columns):\n cs = [DATA_CLASS, f\"row{r}\", f\"col{c}\"]\n cs.extend(cell_context.get(\"data\", {}).get(r, {}).get(c, []))\n formatter = self._display_funcs[(r, c)]\n value = self.data.iloc[r, c]\n row_dict = {\n \"type\": \"td\",\n \"value\": value,\n \"class\": \" \".join(cs),\n \"display_value\": formatter(value),\n \"is_visible\": (c not in hidden_columns),\n }\n # only add an id if the cell has a style\n props = []\n if self.cell_ids or (r, c) in ctx:\n row_dict[\"id\"] = \"_\".join(cs[1:])\n for x in ctx[r, c]:\n # have to handle empty styles like ['']\n if x.count(\":\"):\n props.append(tuple(x.split(\":\")))\n else:\n props.append((\"\", \"\"))\n row_es.append(row_dict)\n cellstyle_map[tuple(props)].append(f\"row{r}_col{c}\")\n body.append(row_es)\n\n cellstyle = [\n {\"props\": list(props), \"selectors\": selectors}\n for props, selectors in cellstyle_map.items()\n ]\n\n table_attr = self.table_attributes\n use_mathjax = get_option(\"display.html.use_mathjax\")\n if not use_mathjax:\n table_attr = table_attr or \"\"\n if 'class=\"' in table_attr:\n table_attr = table_attr.replace('class=\"', 'class=\"tex2jax_ignore ')\n else:\n table_attr += ' class=\"tex2jax_ignore\"'\n\n d = {\n \"head\": head,\n \"cellstyle\": cellstyle,\n \"body\": body,\n \"uuid\": uuid,\n \"precision\": precision,\n \"table_styles\": table_styles,\n \"caption\": caption,\n \"table_attributes\": table_attr,\n }\n if self.tooltips:\n d = self.tooltips._translate(self.data, self.uuid, d)\n\n return d\n\n def format(self, formatter, subset=None, na_rep: Optional[str] = None) -> Styler:\n \"\"\"\n Format the text display value of cells.\n\n Parameters\n ----------\n formatter : str, callable, dict or None\n If ``formatter`` is None, the default formatter is used.\n subset : IndexSlice\n An argument to ``DataFrame.loc`` that restricts which elements\n ``formatter`` is applied to.\n na_rep : str, optional\n Representation for missing values.\n If ``na_rep`` is None, no special formatting is applied.\n\n .. versionadded:: 1.0.0\n\n Returns\n -------\n self : Styler\n\n Notes\n -----\n ``formatter`` is either an ``a`` or a dict ``{column name: a}`` where\n ``a`` is one of\n\n - str: this will be wrapped in: ``a.format(x)``\n - callable: called with the value of an individual cell\n\n The default display value for numeric values is the \"general\" (``g``)\n format with ``pd.options.display.precision`` precision.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])\n >>> df.style.format(\"{:.2%}\")\n >>> df['c'] = ['a', 'b', 'c', 'd']\n >>> df.style.format({'c': str.upper})\n \"\"\"\n if formatter is None:\n assert self._display_funcs.default_factory is not None\n formatter = self._display_funcs.default_factory()\n\n if subset is None:\n row_locs = range(len(self.data))\n col_locs = range(len(self.data.columns))\n else:\n subset = non_reducing_slice(subset)\n if len(subset) == 1:\n subset = subset, self.data.columns\n\n sub_df = self.data.loc[subset]\n row_locs = self.data.index.get_indexer_for(sub_df.index)\n col_locs = self.data.columns.get_indexer_for(sub_df.columns)\n\n if is_dict_like(formatter):\n for col, col_formatter in formatter.items():\n # formatter must be callable, so '{}' are converted to lambdas\n col_formatter = _maybe_wrap_formatter(col_formatter, na_rep)\n col_num = self.data.columns.get_indexer_for([col])[0]\n\n for row_num in row_locs:\n self._display_funcs[(row_num, col_num)] = col_formatter\n else:\n # single scalar to format all cells with\n formatter = _maybe_wrap_formatter(formatter, na_rep)\n locs = product(*(row_locs, col_locs))\n for i, j in locs:\n self._display_funcs[(i, j)] = formatter\n return self\n\n def set_td_classes(self, classes: DataFrame) -> Styler:\n \"\"\"\n Add string based CSS class names to data cells that will appear within the\n `Styler` HTML result. These classes are added within specified `<td>` elements.\n\n Parameters\n ----------\n classes : DataFrame\n DataFrame containing strings that will be translated to CSS classes,\n mapped by identical column and index values that must exist on the\n underlying `Styler` data. None, NaN values, and empty strings will\n be ignored and not affect the rendered HTML.\n\n Returns\n -------\n self : Styler\n\n Examples\n --------\n >>> df = pd.DataFrame(data=[[1, 2, 3], [4, 5, 6]], columns=[\"A\", \"B\", \"C\"])\n >>> classes = pd.DataFrame([\n ... [\"min-val red\", \"\", \"blue\"],\n ... [\"red\", None, \"blue max-val\"]\n ... ], index=df.index, columns=df.columns)\n >>> df.style.set_td_classes(classes)\n\n Using `MultiIndex` columns and a `classes` `DataFrame` as a subset of the\n underlying,\n\n >>> df = pd.DataFrame([[1,2],[3,4]], index=[\"a\", \"b\"],\n ... columns=[[\"level0\", \"level0\"], [\"level1a\", \"level1b\"]])\n >>> classes = pd.DataFrame([\"min-val\"], index=[\"a\"],\n ... columns=[[\"level0\"],[\"level1a\"]])\n >>> df.style.set_td_classes(classes)\n\n Form of the output with new additional css classes,\n\n >>> df = pd.DataFrame([[1]])\n >>> css = pd.DataFrame([\"other-class\"])\n >>> s = Styler(df, uuid=\"_\", cell_ids=False).set_td_classes(css)\n >>> s.hide_index().render()\n '<style type=\"text/css\" ></style>'\n '<table id=\"T__\" >'\n ' <thead>'\n ' <tr><th class=\"col_heading level0 col0\" >0</th></tr>'\n ' </thead>'\n ' <tbody>'\n ' <tr><td class=\"data row0 col0 other-class\" >1</td></tr>'\n ' </tbody>'\n '</table>'\n \"\"\"\n classes = classes.reindex_like(self.data)\n\n mask = (classes.isna()) | (classes.eq(\"\"))\n self.cell_context[\"data\"] = {\n r: {c: [str(classes.iloc[r, c])]}\n for r, rn in enumerate(classes.index)\n for c, cn in enumerate(classes.columns)\n if not mask.iloc[r, c]\n }\n\n return self\n\n def render(self, **kwargs) -> str:\n \"\"\"\n Render the built up styles to HTML.\n\n Parameters\n ----------\n **kwargs\n Any additional keyword arguments are passed\n through to ``self.template.render``.\n This is useful when you need to provide\n additional variables for a custom template.\n\n Returns\n -------\n rendered : str\n The rendered HTML.\n\n Notes\n -----\n ``Styler`` objects have defined the ``_repr_html_`` method\n which automatically calls ``self.render()`` when it's the\n last item in a Notebook cell. When calling ``Styler.render()``\n directly, wrap the result in ``IPython.display.HTML`` to view\n the rendered HTML in the notebook.\n\n Pandas uses the following keys in render. Arguments passed\n in ``**kwargs`` take precedence, so think carefully if you want\n to override them:\n\n * head\n * cellstyle\n * body\n * uuid\n * precision\n * table_styles\n * caption\n * table_attributes\n \"\"\"\n self._compute()\n # TODO: namespace all the pandas keys\n d = self._translate()\n # filter out empty styles, every cell will have a class\n # but the list of props may just be [['', '']].\n # so we have the nested anys below\n trimmed = [x for x in d[\"cellstyle\"] if any(any(y) for y in x[\"props\"])]\n d[\"cellstyle\"] = trimmed\n d.update(kwargs)\n return self.template.render(**d)\n\n def _update_ctx(self, attrs: DataFrame) -> None:\n \"\"\"\n Update the state of the Styler.\n\n Collects a mapping of {index_label: ['<property>: <value>']}.\n\n Parameters\n ----------\n attrs : DataFrame\n should contain strings of '<property>: <value>;<prop2>: <val2>'\n Whitespace shouldn't matter and the final trailing ';' shouldn't\n matter.\n \"\"\"\n coli = {k: i for i, k in enumerate(self.columns)}\n rowi = {k: i for i, k in enumerate(self.index)}\n for jj in range(len(attrs.columns)):\n cn = attrs.columns[jj]\n j = coli[cn]\n for rn, c in attrs[[cn]].itertuples():\n if not c:\n continue\n c = c.rstrip(\";\")\n if not c:\n continue\n i = rowi[rn]\n for pair in c.split(\";\"):\n self.ctx[(i, j)].append(pair)\n\n def _copy(self, deepcopy: bool = False) -> Styler:\n styler = Styler(\n self.data,\n precision=self.precision,\n caption=self.caption,\n uuid=self.uuid,\n table_styles=self.table_styles,\n na_rep=self.na_rep,\n )\n if deepcopy:\n styler.ctx = copy.deepcopy(self.ctx)\n styler._todo = copy.deepcopy(self._todo)\n else:\n styler.ctx = self.ctx\n styler._todo = self._todo\n return styler\n\n def __copy__(self) -> Styler:\n \"\"\"\n Deep copy by default.\n \"\"\"\n return self._copy(deepcopy=False)\n\n def __deepcopy__(self, memo) -> Styler:\n return self._copy(deepcopy=True)\n\n def clear(self) -> None:\n \"\"\"\n Reset the styler, removing any previously applied styles.\n\n Returns None.\n \"\"\"\n self.ctx.clear()\n self.tooltips = None\n self.cell_context = {}\n self._todo = []\n\n def _compute(self):\n \"\"\"\n Execute the style functions built up in `self._todo`.\n\n Relies on the conventions that all style functions go through\n .apply or .applymap. The append styles to apply as tuples of\n\n (application method, *args, **kwargs)\n \"\"\"\n r = self\n for func, args, kwargs in self._todo:\n r = func(self)(*args, **kwargs)\n return r\n\n def _apply(\n self,\n func: Callable[..., Styler],\n axis: Optional[Axis] = 0,\n subset=None,\n **kwargs,\n ) -> Styler:\n subset = slice(None) if subset is None else subset\n subset = non_reducing_slice(subset)\n data = self.data.loc[subset]\n if axis is not None:\n result = data.apply(func, axis=axis, result_type=\"expand\", **kwargs)\n result.columns = data.columns\n else:\n result = func(data, **kwargs)\n if not isinstance(result, pd.DataFrame):\n raise TypeError(\n f\"Function {repr(func)} must return a DataFrame when \"\n f\"passed to `Styler.apply` with axis=None\"\n )\n if not (\n result.index.equals(data.index) and result.columns.equals(data.columns)\n ):\n raise ValueError(\n f\"Result of {repr(func)} must have identical \"\n f\"index and columns as the input\"\n )\n\n result_shape = result.shape\n expected_shape = self.data.loc[subset].shape\n if result_shape != expected_shape:\n raise ValueError(\n f\"Function {repr(func)} returned the wrong shape.\\n\"\n f\"Result has shape: {result.shape}\\n\"\n f\"Expected shape: {expected_shape}\"\n )\n self._update_ctx(result)\n return self\n\n def apply(\n self,\n func: Callable[..., Styler],\n axis: Optional[Axis] = 0,\n subset=None,\n **kwargs,\n ) -> Styler:\n \"\"\"\n Apply a function column-wise, row-wise, or table-wise.\n\n Updates the HTML representation with the result.\n\n Parameters\n ----------\n func : function\n ``func`` should take a Series or DataFrame (depending\n on ``axis``), and return an object with the same shape.\n Must return a DataFrame with identical index and\n column labels when ``axis=None``.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n Apply to each column (``axis=0`` or ``'index'``), to each row\n (``axis=1`` or ``'columns'``), or to the entire DataFrame at once\n with ``axis=None``.\n subset : IndexSlice\n A valid indexer to limit ``data`` to *before* applying the\n function. Consider using a pandas.IndexSlice.\n **kwargs : dict\n Pass along to ``func``.\n\n Returns\n -------\n self : Styler\n\n Notes\n -----\n The output shape of ``func`` should match the input, i.e. if\n ``x`` is the input row, column, or table (depending on ``axis``),\n then ``func(x).shape == x.shape`` should be true.\n\n This is similar to ``DataFrame.apply``, except that ``axis=None``\n applies the function to the entire DataFrame at once,\n rather than column-wise or row-wise.\n\n Examples\n --------\n >>> def highlight_max(x):\n ... return ['background-color: yellow' if v == x.max() else ''\n for v in x]\n ...\n >>> df = pd.DataFrame(np.random.randn(5, 2))\n >>> df.style.apply(highlight_max)\n \"\"\"\n self._todo.append(\n (lambda instance: getattr(instance, \"_apply\"), (func, axis, subset), kwargs)\n )\n return self\n\n def _applymap(self, func: Callable, subset=None, **kwargs) -> Styler:\n func = partial(func, **kwargs) # applymap doesn't take kwargs?\n if subset is None:\n subset = pd.IndexSlice[:]\n subset = non_reducing_slice(subset)\n result = self.data.loc[subset].applymap(func)\n self._update_ctx(result)\n return self\n\n def applymap(self, func: Callable, subset=None, **kwargs) -> Styler:\n \"\"\"\n Apply a function elementwise.\n\n Updates the HTML representation with the result.\n\n Parameters\n ----------\n func : function\n ``func`` should take a scalar and return a scalar.\n subset : IndexSlice\n A valid indexer to limit ``data`` to *before* applying the\n function. Consider using a pandas.IndexSlice.\n **kwargs : dict\n Pass along to ``func``.\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.where: Updates the HTML representation with a style which is\n selected in accordance with the return value of a function.\n \"\"\"\n self._todo.append(\n (lambda instance: getattr(instance, \"_applymap\"), (func, subset), kwargs)\n )\n return self\n\n def where(\n self,\n cond: Callable,\n value: str,\n other: Optional[str] = None,\n subset=None,\n **kwargs,\n ) -> Styler:\n \"\"\"\n Apply a function elementwise.\n\n Updates the HTML representation with a style which is\n selected in accordance with the return value of a function.\n\n Parameters\n ----------\n cond : callable\n ``cond`` should take a scalar and return a boolean.\n value : str\n Applied when ``cond`` returns true.\n other : str\n Applied when ``cond`` returns false.\n subset : IndexSlice\n A valid indexer to limit ``data`` to *before* applying the\n function. Consider using a pandas.IndexSlice.\n **kwargs : dict\n Pass along to ``cond``.\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.applymap: Updates the HTML representation with the result.\n \"\"\"\n if other is None:\n other = \"\"\n\n return self.applymap(\n lambda val: value if cond(val) else other, subset=subset, **kwargs\n )\n\n def set_precision(self, precision: int) -> Styler:\n \"\"\"\n Set the precision used to render.\n\n Parameters\n ----------\n precision : int\n\n Returns\n -------\n self : Styler\n \"\"\"\n self.precision = precision\n return self\n\n def set_table_attributes(self, attributes: str) -> Styler:\n \"\"\"\n Set the table attributes.\n\n These are the items that show up in the opening ``<table>`` tag\n in addition to automatic (by default) id.\n\n Parameters\n ----------\n attributes : str\n\n Returns\n -------\n self : Styler\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(10, 4))\n >>> df.style.set_table_attributes('class=\"pure-table\"')\n # ... <table class=\"pure-table\"> ...\n \"\"\"\n self.table_attributes = attributes\n return self\n\n def export(self) -> List[Tuple[Callable, Tuple, Dict]]:\n \"\"\"\n Export the styles to applied to the current Styler.\n\n Can be applied to a second style with ``Styler.use``.\n\n Returns\n -------\n styles : list\n\n See Also\n --------\n Styler.use: Set the styles on the current Styler.\n \"\"\"\n return self._todo\n\n def use(self, styles: List[Tuple[Callable, Tuple, Dict]]) -> Styler:\n \"\"\"\n Set the styles on the current Styler.\n\n Possibly uses styles from ``Styler.export``.\n\n Parameters\n ----------\n styles : list\n List of style functions.\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.export : Export the styles to applied to the current Styler.\n \"\"\"\n self._todo.extend(styles)\n return self\n\n def set_uuid(self, uuid: str) -> Styler:\n \"\"\"\n Set the uuid for a Styler.\n\n Parameters\n ----------\n uuid : str\n\n Returns\n -------\n self : Styler\n \"\"\"\n self.uuid = uuid\n return self\n\n def set_caption(self, caption: str) -> Styler:\n \"\"\"\n Set the caption on a Styler.\n\n Parameters\n ----------\n caption : str\n\n Returns\n -------\n self : Styler\n \"\"\"\n self.caption = caption\n return self\n\n def set_table_styles(self, table_styles, axis=0, overwrite=True) -> Styler:\n \"\"\"\n Set the table styles on a Styler.\n\n These are placed in a ``<style>`` tag before the generated HTML table.\n\n This function can be used to style the entire table, columns, rows or\n specific HTML selectors.\n\n Parameters\n ----------\n table_styles : list or dict\n If supplying a list, each individual table_style should be a\n dictionary with ``selector`` and ``props`` keys. ``selector``\n should be a CSS selector that the style will be applied to\n (automatically prefixed by the table's UUID) and ``props``\n should be a list of tuples with ``(attribute, value)``.\n If supplying a dict, the dict keys should correspond to\n column names or index values, depending upon the specified\n `axis` argument. These will be mapped to row or col CSS\n selectors. MultiIndex values as dict keys should be\n in their respective tuple form. The dict values should be\n a list as specified in the form with CSS selectors and\n props that will be applied to the specified row or column.\n\n .. versionchanged:: 1.2.0\n\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n Apply to each column (``axis=0`` or ``'index'``), to each row\n (``axis=1`` or ``'columns'``). Only used if `table_styles` is\n dict.\n\n .. versionadded:: 1.2.0\n\n overwrite : boolean, default True\n Styles are replaced if `True`, or extended if `False`. CSS\n rules are preserved so most recent styles set will dominate\n if selectors intersect.\n\n .. versionadded:: 1.2.0\n\n Returns\n -------\n self : Styler\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(10, 4),\n ... columns=['A', 'B', 'C', 'D'])\n >>> df.style.set_table_styles(\n ... [{'selector': 'tr:hover',\n ... 'props': [('background-color', 'yellow')]}]\n ... )\n\n Adding column styling by name\n\n >>> df.style.set_table_styles({\n ... 'A': [{'selector': '',\n ... 'props': [('color', 'red')]}],\n ... 'B': [{'selector': 'td',\n ... 'props': [('color', 'blue')]}]\n ... }, overwrite=False)\n\n Adding row styling\n\n >>> df.style.set_table_styles({\n ... 0: [{'selector': 'td:hover',\n ... 'props': [('font-size', '25px')]}]\n ... }, axis=1, overwrite=False)\n \"\"\"\n if is_dict_like(table_styles):\n if axis in [0, \"index\"]:\n obj, idf = self.data.columns, \".col\"\n else:\n obj, idf = self.data.index, \".row\"\n\n table_styles = [\n {\n \"selector\": s[\"selector\"] + idf + str(obj.get_loc(key)),\n \"props\": s[\"props\"],\n }\n for key, styles in table_styles.items()\n for s in styles\n ]\n\n if not overwrite and self.table_styles is not None:\n self.table_styles.extend(table_styles)\n else:\n self.table_styles = table_styles\n return self\n\n def set_na_rep(self, na_rep: str) -> Styler:\n \"\"\"\n Set the missing data representation on a Styler.\n\n .. versionadded:: 1.0.0\n\n Parameters\n ----------\n na_rep : str\n\n Returns\n -------\n self : Styler\n \"\"\"\n self.na_rep = na_rep\n return self\n\n def hide_index(self) -> Styler:\n \"\"\"\n Hide any indices from rendering.\n\n Returns\n -------\n self : Styler\n \"\"\"\n self.hidden_index = True\n return self\n\n def hide_columns(self, subset) -> Styler:\n \"\"\"\n Hide columns from rendering.\n\n Parameters\n ----------\n subset : IndexSlice\n An argument to ``DataFrame.loc`` that identifies which columns\n are hidden.\n\n Returns\n -------\n self : Styler\n \"\"\"\n subset = non_reducing_slice(subset)\n hidden_df = self.data.loc[subset]\n self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)\n return self\n\n # -----------------------------------------------------------------------\n # A collection of \"builtin\" styles\n # -----------------------------------------------------------------------\n\n @staticmethod\n def _highlight_null(v, null_color: str) -> str:\n return f\"background-color: {null_color}\" if pd.isna(v) else \"\"\n\n def highlight_null(\n self,\n null_color: str = \"red\",\n subset: Optional[IndexLabel] = None,\n ) -> Styler:\n \"\"\"\n Shade the background ``null_color`` for missing values.\n\n Parameters\n ----------\n null_color : str, default 'red'\n subset : label or list of labels, default None\n A valid slice for ``data`` to limit the style application to.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n self : Styler\n \"\"\"\n self.applymap(self._highlight_null, null_color=null_color, subset=subset)\n return self\n\n def background_gradient(\n self,\n cmap=\"PuBu\",\n low: float = 0,\n high: float = 0,\n axis: Optional[Axis] = 0,\n subset=None,\n text_color_threshold: float = 0.408,\n vmin: Optional[float] = None,\n vmax: Optional[float] = None,\n ) -> Styler:\n \"\"\"\n Color the background in a gradient style.\n\n The background color is determined according\n to the data in each column (optionally row). Requires matplotlib.\n\n Parameters\n ----------\n cmap : str or colormap\n Matplotlib colormap.\n low : float\n Compress the range by the low.\n high : float\n Compress the range by the high.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n Apply to each column (``axis=0`` or ``'index'``), to each row\n (``axis=1`` or ``'columns'``), or to the entire DataFrame at once\n with ``axis=None``.\n subset : IndexSlice\n A valid slice for ``data`` to limit the style application to.\n text_color_threshold : float or int\n Luminance threshold for determining text color. Facilitates text\n visibility across varying background colors. From 0 to 1.\n 0 = all text is dark colored, 1 = all text is light colored.\n\n .. versionadded:: 0.24.0\n\n vmin : float, optional\n Minimum data value that corresponds to colormap minimum value.\n When None (default): the minimum value of the data will be used.\n\n .. versionadded:: 1.0.0\n\n vmax : float, optional\n Maximum data value that corresponds to colormap maximum value.\n When None (default): the maximum value of the data will be used.\n\n .. versionadded:: 1.0.0\n\n Returns\n -------\n self : Styler\n\n Raises\n ------\n ValueError\n If ``text_color_threshold`` is not a value from 0 to 1.\n\n Notes\n -----\n Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the\n text legible by not using the entire range of the color map. The range\n of the data is extended by ``low * (x.max() - x.min())`` and ``high *\n (x.max() - x.min())`` before normalizing.\n \"\"\"\n subset = maybe_numeric_slice(self.data, subset)\n subset = non_reducing_slice(subset)\n self.apply(\n self._background_gradient,\n cmap=cmap,\n subset=subset,\n axis=axis,\n low=low,\n high=high,\n text_color_threshold=text_color_threshold,\n vmin=vmin,\n vmax=vmax,\n )\n return self\n\n @staticmethod\n def _background_gradient(\n s,\n cmap=\"PuBu\",\n low: float = 0,\n high: float = 0,\n text_color_threshold: float = 0.408,\n vmin: Optional[float] = None,\n vmax: Optional[float] = None,\n ):\n \"\"\"\n Color background in a range according to the data.\n \"\"\"\n if (\n not isinstance(text_color_threshold, (float, int))\n or not 0 <= text_color_threshold <= 1\n ):\n msg = \"`text_color_threshold` must be a value from 0 to 1.\"\n raise ValueError(msg)\n\n with _mpl(Styler.background_gradient) as (plt, colors):\n smin = np.nanmin(s.to_numpy()) if vmin is None else vmin\n smax = np.nanmax(s.to_numpy()) if vmax is None else vmax\n rng = smax - smin\n # extend lower / upper bounds, compresses color range\n norm = colors.Normalize(smin - (rng * low), smax + (rng * high))\n # matplotlib colors.Normalize modifies inplace?\n # https://github.com/matplotlib/matplotlib/issues/5427\n rgbas = plt.cm.get_cmap(cmap)(norm(s.to_numpy(dtype=float)))\n\n def relative_luminance(rgba) -> float:\n \"\"\"\n Calculate relative luminance of a color.\n\n The calculation adheres to the W3C standards\n (https://www.w3.org/WAI/GL/wiki/Relative_luminance)\n\n Parameters\n ----------\n color : rgb or rgba tuple\n\n Returns\n -------\n float\n The relative luminance as a value from 0 to 1\n \"\"\"\n r, g, b = (\n x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)\n for x in rgba[:3]\n )\n return 0.2126 * r + 0.7152 * g + 0.0722 * b\n\n def css(rgba) -> str:\n dark = relative_luminance(rgba) < text_color_threshold\n text_color = \"#f1f1f1\" if dark else \"#000000\"\n return f\"background-color: {colors.rgb2hex(rgba)};color: {text_color};\"\n\n if s.ndim == 1:\n return [css(rgba) for rgba in rgbas]\n else:\n return pd.DataFrame(\n [[css(rgba) for rgba in row] for row in rgbas],\n index=s.index,\n columns=s.columns,\n )\n\n def set_properties(self, subset=None, **kwargs) -> Styler:\n \"\"\"\n Method to set one or more non-data dependent properties or each cell.\n\n Parameters\n ----------\n subset : IndexSlice\n A valid slice for ``data`` to limit the style application to.\n **kwargs : dict\n A dictionary of property, value pairs to be set for each cell.\n\n Returns\n -------\n self : Styler\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(10, 4))\n >>> df.style.set_properties(color=\"white\", align=\"right\")\n >>> df.style.set_properties(**{'background-color': 'yellow'})\n \"\"\"\n values = \";\".join(f\"{p}: {v}\" for p, v in kwargs.items())\n f = lambda x: values\n return self.applymap(f, subset=subset)\n\n @staticmethod\n def _bar(\n s,\n align: str,\n colors: List[str],\n width: float = 100,\n vmin: Optional[float] = None,\n vmax: Optional[float] = None,\n ):\n \"\"\"\n Draw bar chart in dataframe cells.\n \"\"\"\n # Get input value range.\n smin = np.nanmin(s.to_numpy()) if vmin is None else vmin\n smax = np.nanmax(s.to_numpy()) if vmax is None else vmax\n if align == \"mid\":\n smin = min(0, smin)\n smax = max(0, smax)\n elif align == \"zero\":\n # For \"zero\" mode, we want the range to be symmetrical around zero.\n smax = max(abs(smin), abs(smax))\n smin = -smax\n # Transform to percent-range of linear-gradient\n normed = width * (s.to_numpy(dtype=float) - smin) / (smax - smin + 1e-12)\n zero = -width * smin / (smax - smin + 1e-12)\n\n def css_bar(start: float, end: float, color: str) -> str:\n \"\"\"\n Generate CSS code to draw a bar from start to end.\n \"\"\"\n css = \"width: 10em; height: 80%;\"\n if end > start:\n css += \"background: linear-gradient(90deg,\"\n if start > 0:\n css += f\" transparent {start:.1f}%, {color} {start:.1f}%, \"\n e = min(end, width)\n css += f\"{color} {e:.1f}%, transparent {e:.1f}%)\"\n return css\n\n def css(x):\n if pd.isna(x):\n return \"\"\n\n # avoid deprecated indexing `colors[x > zero]`\n color = colors[1] if x > zero else colors[0]\n\n if align == \"left\":\n return css_bar(0, x, color)\n else:\n return css_bar(min(x, zero), max(x, zero), color)\n\n if s.ndim == 1:\n return [css(x) for x in normed]\n else:\n return pd.DataFrame(\n [[css(x) for x in row] for row in normed],\n index=s.index,\n columns=s.columns,\n )\n\n def bar(\n self,\n subset=None,\n axis: Optional[Axis] = 0,\n color=\"#d65f5f\",\n width: float = 100,\n align: str = \"left\",\n vmin: Optional[float] = None,\n vmax: Optional[float] = None,\n ) -> Styler:\n \"\"\"\n Draw bar chart in the cell backgrounds.\n\n Parameters\n ----------\n subset : IndexSlice, optional\n A valid slice for `data` to limit the style application to.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n Apply to each column (``axis=0`` or ``'index'``), to each row\n (``axis=1`` or ``'columns'``), or to the entire DataFrame at once\n with ``axis=None``.\n color : str or 2-tuple/list\n If a str is passed, the color is the same for both\n negative and positive numbers. If 2-tuple/list is used, the\n first element is the color_negative and the second is the\n color_positive (eg: ['#d65f5f', '#5fba7d']).\n width : float, default 100\n A number between 0 or 100. The largest value will cover `width`\n percent of the cell's width.\n align : {'left', 'zero',' mid'}, default 'left'\n How to align the bars with the cells.\n\n - 'left' : the min value starts at the left of the cell.\n - 'zero' : a value of zero is located at the center of the cell.\n - 'mid' : the center of the cell is at (max-min)/2, or\n if values are all negative (positive) the zero is aligned\n at the right (left) of the cell.\n vmin : float, optional\n Minimum bar value, defining the left hand limit\n of the bar drawing range, lower values are clipped to `vmin`.\n When None (default): the minimum value of the data will be used.\n\n .. versionadded:: 0.24.0\n\n vmax : float, optional\n Maximum bar value, defining the right hand limit\n of the bar drawing range, higher values are clipped to `vmax`.\n When None (default): the maximum value of the data will be used.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n self : Styler\n \"\"\"\n if align not in (\"left\", \"zero\", \"mid\"):\n raise ValueError(\"`align` must be one of {'left', 'zero',' mid'}\")\n\n if not (is_list_like(color)):\n color = [color, color]\n elif len(color) == 1:\n color = [color[0], color[0]]\n elif len(color) > 2:\n raise ValueError(\n \"`color` must be string or a list-like \"\n \"of length 2: [`color_neg`, `color_pos`] \"\n \"(eg: color=['#d65f5f', '#5fba7d'])\"\n )\n\n subset = maybe_numeric_slice(self.data, subset)\n subset = non_reducing_slice(subset)\n self.apply(\n self._bar,\n subset=subset,\n axis=axis,\n align=align,\n colors=color,\n width=width,\n vmin=vmin,\n vmax=vmax,\n )\n\n return self\n\n def highlight_max(\n self, subset=None, color: str = \"yellow\", axis: Optional[Axis] = 0\n ) -> Styler:\n \"\"\"\n Highlight the maximum by shading the background.\n\n Parameters\n ----------\n subset : IndexSlice, default None\n A valid slice for ``data`` to limit the style application to.\n color : str, default 'yellow'\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n Apply to each column (``axis=0`` or ``'index'``), to each row\n (``axis=1`` or ``'columns'``), or to the entire DataFrame at once\n with ``axis=None``.\n\n Returns\n -------\n self : Styler\n \"\"\"\n return self._highlight_handler(subset=subset, color=color, axis=axis, max_=True)\n\n def highlight_min(\n self, subset=None, color: str = \"yellow\", axis: Optional[Axis] = 0\n ) -> Styler:\n \"\"\"\n Highlight the minimum by shading the background.\n\n Parameters\n ----------\n subset : IndexSlice, default None\n A valid slice for ``data`` to limit the style application to.\n color : str, default 'yellow'\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n Apply to each column (``axis=0`` or ``'index'``), to each row\n (``axis=1`` or ``'columns'``), or to the entire DataFrame at once\n with ``axis=None``.\n\n Returns\n -------\n self : Styler\n \"\"\"\n return self._highlight_handler(\n subset=subset, color=color, axis=axis, max_=False\n )\n\n def _highlight_handler(\n self,\n subset=None,\n color: str = \"yellow\",\n axis: Optional[Axis] = None,\n max_: bool = True,\n ) -> Styler:\n subset = non_reducing_slice(maybe_numeric_slice(self.data, subset))\n self.apply(\n self._highlight_extrema, color=color, axis=axis, subset=subset, max_=max_\n )\n return self\n\n @staticmethod\n def _highlight_extrema(\n data: FrameOrSeries, color: str = \"yellow\", max_: bool = True\n ):\n \"\"\"\n Highlight the min or max in a Series or DataFrame.\n \"\"\"\n attr = f\"background-color: {color}\"\n\n if max_:\n extrema = data == np.nanmax(data.to_numpy())\n else:\n extrema = data == np.nanmin(data.to_numpy())\n\n if data.ndim == 1: # Series from .apply\n return [attr if v else \"\" for v in extrema]\n else: # DataFrame from .tee\n return pd.DataFrame(\n np.where(extrema, attr, \"\"), index=data.index, columns=data.columns\n )\n\n @classmethod\n def from_custom_template(cls, searchpath, name):\n \"\"\"\n Factory function for creating a subclass of ``Styler``.\n\n Uses a custom template and Jinja environment.\n\n Parameters\n ----------\n searchpath : str or list\n Path or paths of directories containing the templates.\n name : str\n Name of your custom template to use for rendering.\n\n Returns\n -------\n MyStyler : subclass of Styler\n Has the correct ``env`` and ``template`` class attributes set.\n \"\"\"\n loader = jinja2.ChoiceLoader([jinja2.FileSystemLoader(searchpath), cls.loader])\n\n # mypy doesnt like dynamically-defined class\n # error: Variable \"cls\" is not valid as a type [valid-type]\n # error: Invalid base class \"cls\" [misc]\n class MyStyler(cls): # type:ignore[valid-type,misc]\n env = jinja2.Environment(loader=loader)\n template = env.get_template(name)\n\n return MyStyler\n\n def pipe(self, func: Callable, *args, **kwargs):\n \"\"\"\n Apply ``func(self, *args, **kwargs)``, and return the result.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n func : function\n Function to apply to the Styler. Alternatively, a\n ``(callable, keyword)`` tuple where ``keyword`` is a string\n indicating the keyword of ``callable`` that expects the Styler.\n *args : optional\n Arguments passed to `func`.\n **kwargs : optional\n A dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object :\n The value returned by ``func``.\n\n See Also\n --------\n DataFrame.pipe : Analogous method for DataFrame.\n Styler.apply : Apply a function row-wise, column-wise, or table-wise to\n modify the dataframe's styling.\n\n Notes\n -----\n Like :meth:`DataFrame.pipe`, this method can simplify the\n application of several user-defined functions to a styler. Instead\n of writing:\n\n .. code-block:: python\n\n f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c)\n\n users can write:\n\n .. code-block:: python\n\n (df.style.set_precision(3)\n .pipe(g, arg1=a)\n .pipe(f, arg2=b, arg3=c))\n\n In particular, this allows users to define functions that take a\n styler object, along with other parameters, and return the styler after\n making styling changes (such as calling :meth:`Styler.apply` or\n :meth:`Styler.set_properties`). Using ``.pipe``, these user-defined\n style \"transformations\" can be interleaved with calls to the built-in\n Styler interface.\n\n Examples\n --------\n >>> def format_conversion(styler):\n ... return (styler.set_properties(**{'text-align': 'right'})\n ... .format({'conversion': '{:.1%}'}))\n\n The user-defined ``format_conversion`` function above can be called\n within a sequence of other style modifications:\n\n >>> df = pd.DataFrame({'trial': list(range(5)),\n ... 'conversion': [0.75, 0.85, np.nan, 0.7, 0.72]})\n >>> (df.style\n ... .highlight_min(subset=['conversion'], color='yellow')\n ... .pipe(format_conversion)\n ... .set_caption(\"Results with minimum conversion highlighted.\"))\n \"\"\"\n return com.pipe(self, func, *args, **kwargs)\n\n\nclass _Tooltips:\n \"\"\"\n An extension to ``Styler`` that allows for and manipulates tooltips on hover\n of table data-cells in the HTML result.\n\n Parameters\n ----------\n css_name: str, default \"pd-t\"\n Name of the CSS class that controls visualisation of tooltips.\n css_props: list-like, default; see Notes\n List of (attr, value) tuples defining properties of the CSS class.\n tooltips: DataFrame, default empty\n DataFrame of strings aligned with underlying ``Styler`` data for tooltip\n display.\n\n Notes\n -----\n The default properties for the tooltip CSS class are:\n\n - visibility: hidden\n - position: absolute\n - z-index: 1\n - background-color: black\n - color: white\n - transform: translate(-20px, -20px)\n\n Hidden visibility is a key prerequisite to the hover functionality, and should\n always be included in any manual properties specification.\n \"\"\"\n\n def __init__(\n self,\n css_props: Sequence[Tuple[str, Union[str, int, float]]] = [\n (\"visibility\", \"hidden\"),\n (\"position\", \"absolute\"),\n (\"z-index\", 1),\n (\"background-color\", \"black\"),\n (\"color\", \"white\"),\n (\"transform\", \"translate(-20px, -20px)\"),\n ],\n css_name: str = \"pd-t\",\n tooltips: DataFrame = DataFrame(),\n ):\n self.class_name = css_name\n self.class_properties = css_props\n self.tt_data = tooltips\n self.table_styles: List[Dict[str, Union[str, List[Tuple[str, str]]]]] = []\n\n @property\n def _class_styles(self):\n \"\"\"\n Combine the ``_Tooltips`` CSS class name and CSS properties to the format\n required to extend the underlying ``Styler`` `table_styles` to allow\n tooltips to render in HTML.\n\n Returns\n -------\n styles : List\n \"\"\"\n return [{\"selector\": f\".{self.class_name}\", \"props\": self.class_properties}]\n\n def _pseudo_css(self, uuid: str, name: str, row: int, col: int, text: str):\n \"\"\"\n For every table data-cell that has a valid tooltip (not None, NaN or\n empty string) must create two pseudo CSS entries for the specific\n <td> element id which are added to overall table styles:\n an on hover visibility change and a content change\n dependent upon the user's chosen display string.\n\n For example:\n [{\"selector\": \"T__row1_col1:hover .pd-t\",\n \"props\": [(\"visibility\", \"visible\")]},\n {\"selector\": \"T__row1_col1 .pd-t::after\",\n \"props\": [(\"content\", \"Some Valid Text String\")]}]\n\n Parameters\n ----------\n uuid: str\n The uuid of the Styler instance\n name: str\n The css-name of the class used for styling tooltips\n row : int\n The row index of the specified tooltip string data\n col : int\n The col index of the specified tooltip string data\n text : str\n The textual content of the tooltip to be displayed in HTML.\n\n Returns\n -------\n pseudo_css : List\n \"\"\"\n return [\n {\n \"selector\": \"#T_\"\n + uuid\n + \"row\"\n + str(row)\n + \"_col\"\n + str(col)\n + f\":hover .{name}\",\n \"props\": [(\"visibility\", \"visible\")],\n },\n {\n \"selector\": \"#T_\"\n + uuid\n + \"row\"\n + str(row)\n + \"_col\"\n + str(col)\n + f\" .{name}::after\",\n \"props\": [(\"content\", f'\"{text}\"')],\n },\n ]\n\n def _translate(self, styler_data: FrameOrSeriesUnion, uuid: str, d: Dict):\n \"\"\"\n Mutate the render dictionary to allow for tooltips:\n\n - Add `<span>` HTML element to each data cells `display_value`. Ignores\n headers.\n - Add table level CSS styles to control pseudo classes.\n\n Parameters\n ----------\n styler_data : DataFrame\n Underlying ``Styler`` DataFrame used for reindexing.\n uuid : str\n The underlying ``Styler`` uuid for CSS id.\n d : dict\n The dictionary prior to final render\n\n Returns\n -------\n render_dict : Dict\n \"\"\"\n self.tt_data = (\n self.tt_data.reindex_like(styler_data)\n .dropna(how=\"all\", axis=0)\n .dropna(how=\"all\", axis=1)\n )\n if self.tt_data.empty:\n return d\n\n name = self.class_name\n\n mask = (self.tt_data.isna()) | (self.tt_data.eq(\"\")) # empty string = no ttip\n self.table_styles = [\n style\n for sublist in [\n self._pseudo_css(uuid, name, i, j, str(self.tt_data.iloc[i, j]))\n for i in range(len(self.tt_data.index))\n for j in range(len(self.tt_data.columns))\n if not mask.iloc[i, j]\n ]\n for style in sublist\n ]\n\n if self.table_styles:\n # add span class to every cell only if at least 1 non-empty tooltip\n for row in d[\"body\"]:\n for item in row:\n if item[\"type\"] == \"td\":\n item[\"display_value\"] = (\n str(item[\"display_value\"])\n + f'<span class=\"{self.class_name}\"></span>'\n )\n d[\"table_styles\"].extend(self._class_styles)\n d[\"table_styles\"].extend(self.table_styles)\n\n return d\n\n\ndef _is_visible(idx_row, idx_col, lengths) -> bool:\n \"\"\"\n Index -> {(idx_row, idx_col): bool}).\n \"\"\"\n return (idx_col, idx_row) in lengths\n\n\ndef _get_level_lengths(index, hidden_elements=None):\n \"\"\"\n Given an index, find the level length for each element.\n\n Optional argument is a list of index positions which\n should not be visible.\n\n Result is a dictionary of (level, initial_position): span\n \"\"\"\n if isinstance(index, pd.MultiIndex):\n levels = index.format(sparsify=lib.no_default, adjoin=False)\n else:\n levels = index.format()\n\n if hidden_elements is None:\n hidden_elements = []\n\n lengths = {}\n if index.nlevels == 1:\n for i, value in enumerate(levels):\n if i not in hidden_elements:\n lengths[(0, i)] = 1\n return lengths\n\n for i, lvl in enumerate(levels):\n for j, row in enumerate(lvl):\n if not get_option(\"display.multi_sparse\"):\n lengths[(i, j)] = 1\n elif (row is not lib.no_default) and (j not in hidden_elements):\n last_label = j\n lengths[(i, last_label)] = 1\n elif row is not lib.no_default:\n # even if its hidden, keep track of it in case\n # length >1 and later elements are visible\n last_label = j\n lengths[(i, last_label)] = 0\n elif j not in hidden_elements:\n lengths[(i, last_label)] += 1\n\n non_zero_lengths = {\n element: length for element, length in lengths.items() if length >= 1\n }\n\n return non_zero_lengths\n\n\ndef _maybe_wrap_formatter(\n formatter: Union[Callable, str], na_rep: Optional[str]\n) -> Callable:\n if isinstance(formatter, str):\n formatter_func = lambda x: formatter.format(x)\n elif callable(formatter):\n formatter_func = formatter\n else:\n msg = f\"Expected a template string or callable, got {formatter} instead\"\n raise TypeError(msg)\n\n if na_rep is None:\n return formatter_func\n elif isinstance(na_rep, str):\n return lambda x: na_rep if pd.isna(x) else formatter_func(x)\n else:\n msg = f\"Expected a string, got {na_rep} instead\"\n raise TypeError(msg)\n"
] | [
[
"pandas.isna",
"pandas.io.formats.excel.ExcelFormatter",
"pandas.api.types.is_list_like",
"pandas.core.indexing.maybe_numeric_slice",
"pandas.core.indexing.non_reducing_slice",
"pandas.util._decorators.doc",
"pandas.compat._optional.import_optional_dependency",
"pandas.core.common.any_not_none",
"matplotlib.colors.rgb2hex",
"pandas.core.frame.DataFrame",
"pandas._config.get_option",
"numpy.where",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.cm.get_cmap",
"pandas.api.types.is_dict_like",
"pandas.core.dtypes.common.is_float",
"pandas.core.common.pipe"
]
] |
nielsdrost/pymt | [
"ae39bf807428827a6904202bf4d3b927daa255ea"
] | [
"pymt/framework/bmi_plot.py"
] | [
"#! /usr/bin/env python\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef quick_plot(bmi, name, **kwds):\n gid = bmi.get_var_grid(name)\n gtype = bmi.get_grid_type(gid)\n grid = bmi.grid[gid]\n\n x, y = grid.node_x.values, grid.node_y.values\n z = bmi.get_value(name)\n\n x_label = \"{name} ({units})\".format(\n name=grid.node_x.standard_name, units=grid.node_x.units\n )\n y_label = \"{name} ({units})\".format(\n name=grid.node_y.standard_name, units=grid.node_y.units\n )\n\n if gtype in (\"unstructured_triangular\",):\n tris = bmi.get_grid_face_node_connectivity(gid).reshape((-1, 3))\n plt.tripcolor(x, y, tris, z, **kwds)\n elif gtype in (\"uniform_rectilinear\", \"structured_quad\"):\n shape = bmi.get_grid_shape(gid)\n spacing = bmi.get_grid_spacing(gid)\n origin = bmi.get_grid_origin(gid)\n x = np.arange(shape[-1]) * spacing[-1] + origin[-1]\n y = np.arange(shape[-2]) * spacing[-2] + origin[-2]\n plt.pcolormesh(x, y, z.reshape(shape), **kwds)\n else:\n raise ValueError(\"no plotter for {gtype}\".format(gtype=gtype))\n\n plt.axis(\"tight\")\n plt.gca().set_aspect(\"equal\")\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n\n cbar = plt.colorbar()\n cbar.ax.set_ylabel(\n \"{name} ({units})\".format(name=name, units=bmi.get_var_units(name))\n )\n"
] | [
[
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tripcolor",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.axis"
]
] |
chusloj/taxdata | [
"52c02c5f457145413983762280f8c1552b5ac740"
] | [
"tests/test_data.py"
] | [
"\"\"\"\nTest CPS and PUF data file contents.\n\"\"\"\nimport os\nimport pytest\nimport numpy as np\n\n\ndef unique_recid(data, dataname):\n \"\"\"\n Test that RECID values are unique.\n \"\"\"\n recid = data[\"RECID\"]\n unique, counts = np.unique(recid, return_counts=True)\n recid_count = dict(zip(unique, counts))\n duplicates = False\n msg = \"\"\n for rid in sorted(recid_count.keys()):\n if recid_count[rid] > 1:\n duplicates = True\n msg += \"\\nRECID={} has COUNT={}\".format(rid, recid_count[rid])\n if duplicates:\n title = \"The following {} RECIDs have COUNTS greater than one:\"\n raise ValueError(title.format(dataname) + msg)\n\n\ndef min_max(data, meta, dataname):\n \"\"\"\n Test that variable variables are within their minimum/maximum range.\n \"\"\"\n for var in meta.keys():\n availability = meta[var][\"availability\"]\n min_value = meta[var][\"range\"][\"min\"]\n max_value = meta[var][\"range\"][\"max\"]\n in_data = True\n if dataname not in availability:\n in_data = False\n if in_data:\n m = \"{}-{} contains values less than min value\".format(dataname, var)\n assert np.all(data[var] >= min_value), m\n m = \"{}-{} contains values greater than max value\".format(dataname, var)\n assert np.all(data[var] <= max_value), m\n\n\ndef relationships(data, dataname):\n \"\"\"\n Test the relationships between variables.\n\n Note (1): we have weakened the XTOT == sum of nu18, n1820, n21 assertion\n for the PUF because in PUF data the value of XTOT is capped by IRS-SOI.\n\n Note (2): we have weakened the n24 <= nu18 assertion for the PUF because\n the only way to ensure it held true would be to create extreamly small\n bins during the tax unit matching process, which had the potential to\n reduce the overall match accuracy.\n \"\"\"\n eq_str = \"{}-{} not equal to {}\"\n less_than_str = \"{}-{} not less than or equal to {}\"\n tol = 0.020001\n\n eq_vars = [\n (\"e00200\", [\"e00200p\", \"e00200s\"]),\n (\"e00900\", [\"e00900p\", \"e00900s\"]),\n (\"e02100\", [\"e02100p\", \"e02100s\"]),\n ]\n for lhs, rhs in eq_vars:\n if not np.allclose(data[lhs], data[rhs].sum(axis=1), atol=tol):\n raise ValueError(eq_str.format(dataname, lhs, rhs))\n\n nsums = data[[\"nu18\", \"n1820\", \"n21\"]].sum(axis=1)\n if dataname == \"CPS\":\n m = eq_str.format(dataname, \"XTOT\", \"sum of nu18, n1820, n21\")\n assert np.all(data[\"XTOT\"] >= nsums), m\n else:\n # see Note (1) in docstring\n m = less_than_str.format(dataname, \"XTOT\", \"sum of nu18, n1820, n21\")\n assert np.all(data[\"XTOT\"] <= nsums), m\n\n m = less_than_str.format(dataname, \"n24\", \"nu18\")\n if dataname == \"CPS\":\n assert np.all(data[\"n24\"] <= data[\"nu18\"]), m\n else:\n # see Note (2) in docstring\n m = \"Number of records where n24 > nu18 has changed\"\n assert (data[\"n24\"] > data[\"nu18\"]).sum() == 14941, m\n subdata = data[data[\"n24\"] > data[\"nu18\"]]\n m = \"n24 > nu18 + 3\"\n assert np.all(subdata[\"n24\"] <= subdata[\"nu18\"] + 3), m\n\n m = less_than_str.format(dataname, \"e00650\", \"e00600\")\n assert np.all(data[\"e00600\"] >= data[\"e00650\"]), m\n\n m = less_than_str.format(dataname, \"e01700\", \"e01500\")\n assert np.all(data[\"e01500\"] >= data[\"e01700\"]), m\n\n m = less_than_str.format(dataname, \"pencon_p\", \"e00200p+pencon_p\")\n assert np.all((data[\"e00200p\"] + data[\"pencon_p\"]) >= data[\"pencon_p\"]), m\n\n m = less_than_str.format(dataname, \"pencon_s\", \"e00200s+pencon_s\")\n assert np.all((data[\"e00200s\"] + data[\"pencon_s\"]) >= data[\"pencon_s\"]), m\n\n # check that all non-married filers have zero spouse income\n nonmarried = data[data[\"MARS\"] != 2]\n zeros = np.zeros_like(len(nonmarried))\n msg = \"{} not always zero for non-married filing unit\"\n spouse_vars = [\"e00200s\", \"e00900s\", \"e02100s\"]\n for var in spouse_vars:\n if not np.allclose(nonmarried[var], zeros):\n raise ValueError(msg.format(var))\n\n\ndef variable_check(test_path, data, dataname):\n \"\"\"\n Test aggregate values in the data.\n \"\"\"\n expected_file_name = \"{}_agg_expected.txt\".format(dataname)\n efile_path = os.path.join(test_path, expected_file_name)\n with open(efile_path, \"r\") as efile:\n expected_txt = efile.readlines()\n expected_sum = dict()\n expected_min = dict()\n expected_max = dict()\n for line in expected_txt[1:]:\n txt = line.rstrip()\n split = txt.split()\n assert len(split) == 4\n var = split[0]\n expected_sum[var] = int(split[1])\n expected_min[var] = int(split[2])\n expected_max[var] = int(split[3])\n\n # loop through each column in the dataset and check sum, min, max\n actual_txt = \"{:20}{:>15}{:>15}{:>15}\\n\".format(\"VARIABLE\", \"SUM\", \"MIN\", \"MAX\")\n var_inform = \"{:20}{:15d}{:15d}{:15d}\\n\"\n diffs = False\n diff_list_str = \"\" # string to hold all of the variables with errors\n new_vars = False\n new_var_list_str = \"\" # srint to hold all of the unexpected variables\n for var in sorted(data.columns):\n sum = int(data[var].sum())\n min = int(data[var].min())\n max = int(data[var].max())\n actual_txt += var_inform.format(var, sum, min, max)\n try:\n var_diff = (\n sum != expected_sum[var]\n or min != expected_min[var]\n or max != expected_max[var]\n )\n if var_diff:\n diffs = True\n diff_list_str += var + \"\\n\"\n except KeyError:\n # if the variable is not expected, print a new message\n new_vars = True\n new_var_list_str += var + \"\\n\"\n\n # check for any missing variables\n missing_vars = False\n missing_vars_set = set(expected_sum.keys()) - set(data.columns)\n if missing_vars_set:\n missing_vars = True\n missing_vars_str = \"\\n\".join(v for v in missing_vars_set)\n\n # if there is an error, write the actual file\n if diffs or new_vars or missing_vars:\n msg = \"{}\\n\".format(dataname.upper)\n actual_file_name = \"{}_agg_actual.txt\".format(dataname)\n actual_file_path = os.path.join(test_path, actual_file_name)\n with open(actual_file_path, \"w\") as afile:\n afile.write(actual_txt)\n # modify error message based on which errors are raised\n if diffs:\n diff_msg = \"Aggregate results differ for following variables:\\n\"\n diff_msg += diff_list_str\n msg += diff_msg + \"\\n\"\n if new_vars:\n new_msg = \"The following unexpected variables were discoverd:\\n\"\n new_msg += new_var_list_str\n msg += new_msg + \"\\n\"\n if missing_vars:\n msg += \"The following expected variables are missing in the data:\"\n msg += \"\\n\" + missing_vars_str + \"\\n\\n\"\n msg += \"If new results OK, copy {} to {}\".format(\n actual_file_name, expected_file_name\n )\n raise ValueError(msg)\n\n\ndef check_cps_benefits(data, expect_ben_stat):\n \"\"\"\n Test benefit variables in CPS data.\n\n expect_ben_stat is a dictionary containing the expected minimum, maximum,\n and average value for each of the benefits in the CPS. That information\n can be found in cps_benefits_metadata.json\n \"\"\"\n BNAMES = [\"mcare\", \"mcaid\", \"ssi\", \"snap\", \"wic\", \"tanf\", \"housing\", \"vet\", \"other\"]\n # # compare actual and expected benefit statistics\n error_msg = \"\"\n wgt = data[\"s006\"] * 0.01\n for bname in BNAMES:\n col = \"{}_ben\".format(bname)\n assert col in data.columns\n ben = data[col]\n minben = ben.min()\n maxben = ben.max()\n pos = ben > 0\n minpben = ben[pos].min()\n avgben = (ben[pos] * wgt[pos]).sum() / wgt[pos].sum()\n if not np.allclose([minben], [0], rtol=0, atol=0.1):\n msg = \"\\nCPS {}_ben minben={} != 0\"\n error_msg += msg.format(bname, minben)\n exp_minpben = expect_ben_stat[bname][\"min\"]\n if not np.allclose([minpben], [exp_minpben], rtol=0, atol=0.1):\n msg = \"\\nCPS {}_ben minpben={} != {}\"\n error_msg += msg.format(bname, minpben, exp_minpben)\n exp_maxben = expect_ben_stat[bname][\"max\"]\n if not np.allclose([maxben], [exp_maxben], rtol=0, atol=0.1):\n msg = \"\\nCPS {}_ben maxben={} != {}\"\n error_msg += msg.format(bname, maxben, exp_maxben)\n expect_avgben = expect_ben_stat[bname][\"avg\"]\n if not np.allclose([avgben], [expect_avgben], rtol=0, atol=1.0):\n msg = \"\\nCPS {}_ben avgben={:.2f} != {:.2f}\"\n error_msg += msg.format(bname, avgben, expect_avgben)\n # check that mc??? benefits are actuarial values of health insurance\n if bname == \"mcare\" or bname == \"mcaid\":\n ratio = float(maxben) / minpben\n expect_ratio = round(ratio)\n if not np.allclose([ratio], [expect_ratio], rtol=0, atol=0.001):\n msg = \"\\nCPS {}_ben ratio={:.6f} != {:.0f}\"\n error_msg += msg.format(bname, ratio, expect_ratio)\n if error_msg:\n raise ValueError(error_msg)\n\n\[email protected]_pufcsv\ndef test_puf_unique_recid(puf, metadata, test_path):\n unique_recid(puf, \"PUF\")\n\n\[email protected]_pufcsv\ndef test_puf_min_max(puf, metadata):\n min_max(puf, metadata, \"puf\")\n\n\[email protected]_pufcsv\ndef test_puf_relationships(puf):\n relationships(puf, \"PUF\")\n\n\[email protected]_pufcsv\ndef test_puf_variables(puf, test_path):\n variable_check(test_path, puf, \"puf\")\n\n\ndef test_cps_unique_recid(cps):\n unique_recid(cps, \"CPS\")\n\n\ndef test_cps_min_max(cps, metadata):\n min_max(cps, metadata, \"cps\")\n\n\ndef test_cps_relationships(cps):\n relationships(cps, \"CPS\")\n\n\ndef test_cps_variables(cps, test_path):\n variable_check(test_path, cps, \"cps\")\n\n\ndef test_cps_benefits(cps, cps_benefit_metadata):\n check_cps_benefits(cps, cps_benefit_metadata)\n"
] | [
[
"numpy.all",
"numpy.allclose",
"numpy.unique"
]
] |
AaronRegan/ObjectTracker | [
"d834f67963e7659e693df0792aadcfeecb4a2b21"
] | [
"hog.py"
] | [
"# import the necessary packages\nfrom __future__ import print_function\nfrom non_max_suppression import non_max_suppression\nfrom myqueue import myqueue\nfrom frames import frames\nfrom object import Object\nimport numpy as np\nimport argparse\nimport datetime\nimport imutils\nimport cv2\nimport time\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-v\", \"--video\", help=\"path to the video file\")\nap.add_argument(\"-a\", \"--min-area\", type=int, default=500, help=\"minimum area size\")\nargs = vars(ap.parse_args())\n\nif args.get(\"video\", None) is None:\n camera = cv2.VideoCapture(0)\n# otherwise, we are reading from a video file\nelse:\n print(\"[INFO] starting video file thread...\")\n camera = myqueue(args[\"video\"]).start()\n time.sleep(1.0)\n\ni = 0\ncenterX = 0\ncenterY = 0\nobjList = []\nmeas = []\npred = []\nmp = np.array((2, 1), np.float32) # measurement\ntp = np.zeros((2, 1), np.float32) # tracked / prediction\n\nkalman = cv2.KalmanFilter(4, 2)\nkalman.measurementMatrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0]], np.float32)\nkalman.transitionMatrix = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32)\nkalman.processNoiseCov = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32) * 0.03\n\n\ndef onPed(x, y):\n global mp, meas\n mp = np.array([[np.float32(x)], [np.float32(y)]])\n meas.append((x, y))\n\n\ndef kalPredict(mp):\n global tp, pred\n kalman.correct(mp)\n tp = kalman.predict()\n pred.append((int(tp[0]), int(tp[1])))\n\n\ndef paint(tp, xA, yA, xB, yB):\n global frame, pred\n # cv2.circle(frame, ((tp[0]), (tp[1])), 3, (0, 0, 255), -1)\n cv2.rectangle(frame, ((tp[0]) - ((xB - xA) / 2), (tp[1]) + (yB - yA) / 2),\n (((tp[0]) + ((xB - xA) / 2)), ((tp[1]) - (yB - yA) / 2)), (0, 0, 255), 2)\n\nfps = frames().start()\n# initialize the HOG descriptor/person detector\nhog = cv2.HOGDescriptor()\nhog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())\n# loop over the image paths\nwhile camera.more():\n frame = camera.read()\n frame = imutils.resize(frame, width=600)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # start = datetime.datetime.now()\n # detect people in the image\n (rects, weights) = hog.detectMultiScale(frame, winStride=(8, 8),\n padding=(32, 32), scale=1.05)\n # print(\"[INFO] detection took: {}\".format(\n #(datetime.datetime.now() - start).total_seconds()))\n # apply non-maxima suppression to the bounding boxes using a\n # fairly large overlap threshold to try to maintain overlapping\n # boxes that are still people\n rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])\n pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)\n\n # draw the final bounding boxes\n for (xA, yA, xB, yB) in pick:\n i = i+1\n cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)\n centerX = (xB + xA) / 2\n centerY = (yB + yA) / 2\n obj = Object(centerX, centerY, i)\n objList.append(obj)\n onPed(centerX, centerY)\n kalPredict(mp)\n paint(tp, xA, yA, xB, yB)\n\n cv2.putText(frame, \"Queue Size: {}\".format(camera.Q.qsize()),\n (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)\n\n peds_found = \"Found \" + str(len(pick)) + \" Pedestrians\"\n cv2.putText(frame, peds_found, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)\n\n # show the output images\n cv2.imshow(\"HOG\", frame)\n cv2.waitKey(1)\n fps.update()\n k = cv2.waitKey(27) & 0xff\n if k == 27:\n break\nfps.stop()\nfor objects in range(len(objList) - 1):\n print(str(objList[objects]))\nprint(\"[INFO] elapsed time: {:.2f}\".format(fps.elapsed()))\nprint(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\ncv2.destroyAllWindows()\ncamera.stop()\n"
] | [
[
"numpy.array",
"numpy.float32",
"numpy.zeros"
]
] |
Struth-Rourke/packagedata-struth-rourke | [
"b8a0d734b87f21047678a31b1461fba237d150f0"
] | [
"newpandaspackage/pandas_package.py"
] | [
"import pandas as pd\nfrom newpandaspackage.new_mod import train_val_test_func, date_splitting, add_state_names, list_series_df_col, StateWrangle\n\n\n## Function 1 (train_val_test_func)\n# Mock df\ndfx = pd.DataFrame({'a': [1,2,3,4,5,6,7,8,9,10], 'b': [1,2,3,4,5,6,7,8,9,10]})\n\n# Calling function\ntrain_val_test_func(dfx)\n\n\n\n## Function 2 (date_splitting)\n# Mock df\ndfy = pd.DataFrame({'a': ['01/01/2019', '01/02/2019', '01/03/2019']})\n\n# Calling function\ndate_splitting(dfy)\n\n\n\n## Function 3 (add_state_names)\n# Mock df\ndfz = ['CA', 'CO', 'CT', 'DC', 'TX']\nprint(dfz)\n\n# Calling and assigning function\ndfz2 = add_state_names(dfz)\nprint(dfz2)\n\n\n\n## Function 4 (list_series_df_col)\n# Mock df\ndfq = [1, 2, 3, 4, 5]\n# Mock header\nheader = ['Numbers']\n\n# Calling and printing the Function\nprint(list_series_df_col(dfq, header))\n\n\n\n## Class / Methods / Functions\n# Mock lists\nlst1 = ['NY', 'NJ', 'CT', 'RI', 'VT']\nlst2 = [1, 2, 3, 4, 5]\nheaders = ['Ab', 'Name', 'Number']\n\nx = StateWrangle(lst1, lst2, headers)\nprint(x.lst1)\nprint(x.add_state_names())\nprint(x.list_series_df_col())\n"
] | [
[
"pandas.DataFrame"
]
] |
nateanl/recipes-1 | [
"3b46a7479508608f73b6f24deffdc8fcffd25ee5"
] | [
"torchrecipes/vision/data/modules/tests/test_mnist_data_module.py"
] | [
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n#!/usr/bin/env python3\n\nimport unittest\nfrom tempfile import TemporaryDirectory\n\nimport torch\nfrom hydra.core.config_store import ConfigStore\nfrom hydra.experimental import compose, initialize\nfrom hydra.utils import instantiate\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom torchrecipes.core.test_utils.conf_utils import conf_asdict\nfrom torchrecipes.vision.data.modules.mnist_data_module import (\n MNISTDataModule,\n MNISTDataModuleConf,\n)\nfrom torchrecipes.vision.data.transforms import build_transforms\nfrom torchvision.datasets import MNIST\n\n\nclass TestMNISTDataModule(unittest.TestCase):\n data_path: str\n\n @classmethod\n def setUpClass(cls) -> None:\n data_path_ctx = TemporaryDirectory()\n cls.addClassCleanup(data_path_ctx.cleanup)\n cls.data_path = data_path_ctx.name\n\n # download the dataset\n MNIST(cls.data_path, train=True, download=True)\n MNIST(cls.data_path, train=False, download=True)\n\n def test_misconfiguration(self) -> None:\n \"\"\"Tests init configuration validation.\"\"\"\n with self.assertRaises(MisconfigurationException):\n MNISTDataModule(val_split=-1)\n\n def test_dataloading(self) -> None:\n \"\"\"Tests loading batches from the dataset.\"\"\"\n module = MNISTDataModule(data_dir=self.data_path, batch_size=1)\n module.prepare_data()\n module.setup()\n dataloder = module.train_dataloader()\n batch = next(iter(dataloder))\n # batch contains images and labels\n self.assertEqual(len(batch), 2)\n self.assertEqual(len(batch[0]), 1)\n\n def test_split_dataset(self) -> None:\n \"\"\"Tests splitting the full dataset into train and validation set.\"\"\"\n module = MNISTDataModule(data_dir=self.data_path, val_split=100)\n module.prepare_data()\n module.setup()\n # pyre-ignore[6]: dataset has length\n self.assertEqual(len(module.datasets[\"train\"]), 59900)\n # pyre-ignore[6]: dataset has length\n self.assertEqual(len(module.datasets[\"val\"]), 100)\n\n def test_transforms(self) -> None:\n \"\"\"Tests images being transformed correctly.\"\"\"\n transform_config = [\n {\n \"_target_\": \"torchvision.transforms.Resize\",\n \"size\": 64,\n },\n {\n \"_target_\": \"torchvision.transforms.ToTensor\",\n },\n ]\n transforms = build_transforms(transform_config)\n module = MNISTDataModule(\n data_dir=self.data_path, batch_size=1, train_transforms=transforms\n )\n module.prepare_data()\n module.setup()\n dataloder = module.train_dataloader()\n image, _ = next(iter(dataloder))\n self.assertEqual(image.size(), torch.Size([1, 1, 64, 64]))\n\n def test_module_conf_dataclass(self) -> None:\n \"\"\"Tests creating module with dataclass.\"\"\"\n module = MNISTDataModule(**conf_asdict(MNISTDataModuleConf()))\n self.assertIsInstance(module, MNISTDataModule)\n\n def test_init_with_hydra(self) -> None:\n \"\"\"Tests creating module with Hydra.\"\"\"\n # Set up Hydra configs\n cs = ConfigStore.instance()\n cs.store(name=\"mnist_data_module\", node=MNISTDataModuleConf)\n with initialize():\n test_conf = compose(config_name=\"mnist_data_module\")\n mnist_data_module = instantiate(test_conf)\n self.assertIsInstance(mnist_data_module, MNISTDataModule)\n"
] | [
[
"torch.Size"
]
] |
Joreshic/python-for-android | [
"c60e02d2e32e31a3a754838c51e9242cbadcd9e8"
] | [
"testapps/testapp_keyboard/main.py"
] | [
"print('main.py was successfully called')\n\nimport os\nprint('imported os')\n\nfrom kivy import platform\n\nif platform == 'android':\n print('contents of ./lib/python2.7/site-packages/ etc.')\n print(os.listdir('./lib'))\n print(os.listdir('./lib/python2.7'))\n print(os.listdir('./lib/python2.7/site-packages'))\n\n print('this dir is', os.path.abspath(os.curdir))\n\n print('contents of this dir', os.listdir('./'))\n\n with open('./lib/python2.7/site-packages/kivy/app.pyo', 'rb') as fileh:\n print('app.pyo size is', len(fileh.read()))\n\nimport sys\nprint('pythonpath is', sys.path)\n\nimport kivy\nprint('imported kivy')\nprint('file is', kivy.__file__)\n\nfrom kivy.app import App\n\nfrom kivy.lang import Builder\nfrom kivy.properties import StringProperty\n\nfrom kivy.uix.popup import Popup\nfrom kivy.clock import Clock\n\nprint('Imported kivy')\nfrom kivy.utils import platform\nprint('platform is', platform)\n\n\nkv = '''\n#:import Metrics kivy.metrics.Metrics\n#:import Window kivy.core.window.Window\n\n<FixedSizeButton@Button>:\n size_hint_y: None\n height: dp(60)\n\n\nBoxLayout:\n orientation: 'vertical'\n BoxLayout:\n size_hint_y: None\n height: dp(50)\n orientation: 'horizontal'\n Button:\n text: 'None'\n on_press: Window.softinput_mode = ''\n Button:\n text: 'pan'\n on_press: Window.softinput_mode = 'pan'\n Button:\n text: 'below_target'\n on_press: Window.softinput_mode = 'below_target'\n Button:\n text: 'resize'\n on_press: Window.softinput_mode = 'resize'\n Widget:\n Scatter:\n id: scatter\n size_hint: None, None\n size: dp(300), dp(80)\n on_parent: self.pos = (300, 100)\n BoxLayout:\n size: scatter.size\n orientation: 'horizontal'\n canvas:\n Color:\n rgba: 1, 0, 0, 1\n Rectangle:\n pos: 0, 0\n size: self.size\n Widget:\n size_hint_x: None\n width: dp(30)\n TextInput:\n text: 'type in me'\n'''\n\n\nclass ErrorPopup(Popup):\n error_text = StringProperty('')\n\ndef raise_error(error):\n print('ERROR:', error)\n ErrorPopup(error_text=error).open()\n\nclass TestApp(App):\n def build(self):\n root = Builder.load_string(kv)\n Clock.schedule_interval(self.print_something, 2)\n # Clock.schedule_interval(self.test_pyjnius, 5)\n print('testing metrics')\n from kivy.metrics import Metrics\n print('dpi is', Metrics.dpi)\n print('density is', Metrics.density)\n print('fontscale is', Metrics.fontscale)\n return root\n\n def print_something(self, *args):\n print('App print tick', Clock.get_boottime())\n\n def on_pause(self):\n return True\n\n def test_pyjnius(self, *args):\n try:\n from jnius import autoclass\n except ImportError:\n raise_error('Could not import pyjnius')\n return\n \n print('Attempting to vibrate with pyjnius')\n # PythonActivity = autoclass('org.renpy.android.PythonActivity')\n # activity = PythonActivity.mActivity\n PythonActivity = autoclass('org.kivy.android.PythonActivity')\n activity = PythonActivity.mActivity\n Intent = autoclass('android.content.Intent')\n Context = autoclass('android.content.Context')\n vibrator = activity.getSystemService(Context.VIBRATOR_SERVICE)\n\n vibrator.vibrate(1000)\n\n def test_ctypes(self, *args):\n import ctypes\n \n def test_numpy(self, *args):\n import numpy\n\n print(numpy.zeros(5))\n print(numpy.arange(5))\n print(numpy.random.random((3, 3)))\n\nTestApp().run()\n"
] | [
[
"numpy.random.random",
"numpy.arange",
"numpy.zeros"
]
] |
zzdang/cascade_rcnn_gluon | [
"b4018001719ec56a688be26c2aab18be664e4bdd"
] | [
"scripts/detection/faster_rcnn/demo_faster_rcnn.py"
] | [
"\"\"\"Faster RCNN Demo script.\"\"\"\nimport os\nimport argparse\nimport mxnet as mx\nimport gluoncv as gcv\nfrom gluoncv.data.transforms import presets\nfrom matplotlib import pyplot as plt\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Test with Faster RCNN networks.')\n parser.add_argument('--network', type=str, default='faster_rcnn_resnet50_v1b_voc',\n help=\"Faster RCNN full network name\")\n parser.add_argument('--images', type=str, default='',\n help='Test images, use comma to split multiple.')\n parser.add_argument('--gpus', type=str, default='0',\n help='Training with GPUs, you can specify 1,3 for example.')\n parser.add_argument('--pretrained', type=str, default='True',\n help='Load weights from previously saved parameters. You can specify parameter file name.')\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n args = parse_args()\n # context list\n ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]\n ctx = [mx.cpu()] if not ctx else ctx\n\n # grab some image if not specified\n if not args.images.strip():\n gcv.utils.download('https://github.com/dmlc/web-data/blob/master/' +\n 'gluoncv/detection/biking.jpg?raw=true', 'biking.jpg')\n image_list = ['biking.jpg']\n else:\n image_list = [x.strip() for x in args.images.split(',') if x.strip()]\n\n if args.pretrained.lower() in ['true', '1', 'yes', 't']:\n net = gcv.model_zoo.get_model(args.network, pretrained=True)\n else:\n net = gcv.model_zoo.get_model(args.network, pretrained=False)\n net.load_parameters(args.pretrained)\n net.set_nms(0.3, 200)\n\n ax = None\n for image in image_list:\n x, img = presets.rcnn.load_test(image, short=net.short, max_size=net.max_size)\n ids, scores, bboxes = [xx[0].asnumpy() for xx in net(x)]\n ax = gcv.utils.viz.plot_bbox(img, bboxes, scores, ids,\n class_names=net.classes, ax=ax)\n plt.show()"
] | [
[
"matplotlib.pyplot.show"
]
] |
PitPietro/pascal-triangle | [
"eb81e9fc4728f4e09a631922c470201a9f897195"
] | [
"charts/lines_bars_markers/filling/fill_area_between_lines.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\n\n\"\"\"\nhttps://matplotlib.org/gallery/lines_bars_and_markers/fill_between_demo.html#sphx-glr-gallery-lines-bars-and-markers-fill-between-demo-py\n\"\"\"\n\n\ndef fill_area_1():\n x = np.arange(0.0, 2, 0.01)\n y1 = np.sin(2 * np.pi * x)\n y2 = 0.8 * np.sin(4 * np.pi * x)\n\n fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, figsize=(6, 6))\n\n ax1.fill_between(x, y1)\n ax1.set_title('fill between y1 and 0')\n\n ax2.fill_between(x, y1, 1)\n ax2.set_title('fill between y1 and 1')\n\n ax3.fill_between(x, y1, y2)\n ax3.set_title('fill between y1 and y2')\n ax3.set_xlabel('x')\n fig.tight_layout()\n plt.show()\n\n\ndef confidence_bands():\n \"\"\"\n A common application for fill_between is the indication of confidence bands.\n fill_between uses the colors of the color cycle as the fill color. These may be a bit strong when\n applied to fill areas. It is therefore often a good practice to lighten the color by making the\n area semi-transparent using alpha.\n\n :return:\n \"\"\"\n n = 21\n x = np.linspace(0, 10, 11)\n y = [3.9, 4.4, 10.8, 10.3, 11.2, 13.1, 14.1, 9.9, 13.9, 15.1, 12.5]\n\n # fit a linear curve an estimate its y-values and their error.\n a, b = np.polyfit(x, y, deg=1)\n y_est = a * x + b\n y_err = x.std() * np.sqrt(1 / len(x) +\n (x - x.mean()) ** 2 / np.sum((x - x.mean()) ** 2))\n\n fig, ax = plt.subplots()\n ax.plot(x, y_est, '-')\n ax.fill_between(x, y_est - y_err, y_est + y_err, alpha=0.2)\n ax.plot(x, y, 'o', color='tab:brown')\n plt.show()\n\n\ndef fill_horizontal_region():\n \"\"\"\n > Selectively filling horizontal regions\n The parameter where allows to specify the x-ranges to fill. It's a boolean array with the same size as x.\n Only x-ranges of contiguous True sequences are filled. As a result the range between neighboring True and\n False values is never filled. This often undesired when the data points should represent a contiguous quantity.\n It is therefore recommended to set interpolate=True unless the x-distance of the data points is fine enough so\n that the above effect is not noticeable. Interpolation approximates the actual x position at which the where\n condition will change and extends the filling up to there.\n\n Note:\n Similar gaps will occur if y1 or y2 are masked arrays. Since missing values cannot be\n approximated, interpolate has no effect in this case. The gaps around masked values\n can only be reduced by adding more data points close to the masked values.\n :return:\n \"\"\"\n x = np.array([0, 1, 2, 3])\n y1 = np.array([0.8, 0.8, 0.2, 0.2])\n y2 = np.array([0, 0, 1, 1])\n\n fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)\n\n ax1.set_title('interpolation=False')\n ax1.plot(x, y1, 'o--')\n ax1.plot(x, y2, 'o--')\n ax1.fill_between(x, y1, y2, where=(y1 > y2), color='C0', alpha=0.3)\n ax1.fill_between(x, y1, y2, where=(y1 < y2), color='C1', alpha=0.3)\n\n ax2.set_title('interpolation=True')\n ax2.plot(x, y1, 'o--')\n ax2.plot(x, y2, 'o--')\n ax2.fill_between(x, y1, y2, where=(y1 > y2), color='C0', alpha=0.3,\n interpolate=True)\n ax2.fill_between(x, y1, y2, where=(y1 <= y2), color='C1', alpha=0.3,\n interpolate=True)\n fig.tight_layout()\n plt.show()\n\n\ndef mark_horizontal_region():\n \"\"\"\n > Selectively marking horizontal regions across the whole Axes\n The same selection mechanism can be applied to fill the full vertical height of the axes. To be independent of\n y-limits, we add a transform that interprets the x-values in data coordinates and the y-values in axes coordinates.\n The following example marks the regions in which the y-data are above a given threshold.\n :return:\n \"\"\"\n fig, ax = plt.subplots()\n x = np.arange(0, 4 * np.pi, 0.01)\n y = np.sin(x)\n ax.plot(x, y, color='black')\n\n threshold = 0.75\n ax.axhline(threshold, color='green', lw=2, alpha=0.7)\n ax.fill_between(\n x,\n 0,\n 1,\n where=y > threshold,\n color='green',\n alpha=0.5,\n transform=ax.get_xaxis_transform()\n )\n plt.show()\n\n\nif __name__ == '__main__':\n mark_horizontal_region()\n"
] | [
[
"numpy.sin",
"numpy.array",
"matplotlib.pyplot.subplots",
"numpy.arange",
"numpy.polyfit",
"matplotlib.pyplot.show",
"numpy.linspace"
]
] |
niddal-imam/End-2-End-image-spam-detector-pixellink | [
"a546b6d55ae611806eef7182c4648be6cce73580"
] | [
"pixel_crnn/tool/create_dataset.py"
] | [
"import os\nimport lmdb\nimport cv2\nimport numpy as np\nimport argparse\nimport shutil\nimport sys\n\ndef checkImageIsValid(imageBin):\n if imageBin is None:\n return False\n \n try:\n imageBuf = np.fromstring(imageBin, dtype=np.uint8)\n img = cv2.imdecode(imageBuf, cv2.IMREAD_GRAYSCALE)\n imgH, imgW = img.shape[0], img.shape[1]\n except:\n return False\n else:\n if imgH * imgW == 0:\n return False\n \n return True\n\n\ndef writeCache(env, cache):\n with env.begin(write=True) as txn:\n for k, v in cache.items():\n if type(k) == str:\n k = k.encode()\n if type(v) == str:\n v = v.encode()\n txn.put(k,v)\n\ndef createDataset(outputPath, imagePathList, labelList, lexiconList=None, checkValid=True):\n \"\"\"\n Create LMDB dataset for CRNN training.\n\n ARGS:\n outputPath : LMDB output path\n imagePathList : list of image path\n labelList : list of corresponding groundtruth texts\n lexiconList : (optional) list of lexicon lists\n checkValid : if true, check the validity of every image\n \"\"\"\n # If lmdb file already exists, remove it. Or the new data will add to it.\n if os.path.exists(outputPath):\n shutil.rmtree(outputPath)\n os.makedirs(outputPath)\n else:\n os.makedirs(outputPath)\n\n assert (len(imagePathList) == len(labelList))\n nSamples = len(imagePathList)\n env = lmdb.open(outputPath, map_size=1099511627776)\n cache = {}\n cnt = 1\n for i in range(nSamples):\n imagePath = imagePathList[i]\n label = labelList[i]\n \n if not os.path.exists(imagePath):\n print('%s does not exist' % imagePath)\n continue\n with open(imagePath, 'rb') as f:\n imageBin = f.read()\n if checkValid:\n if not checkImageIsValid(imageBin):\n print('%s is not a valid image' % imagePath)\n continue\n\n imageKey = 'image-%09d' % cnt\n labelKey = 'label-%09d' % cnt\n cache[imageKey] = imageBin\n cache[labelKey] = label\n if lexiconList:\n lexiconKey = 'lexicon-%09d' % cnt\n cache[lexiconKey] = ' '.join(lexiconList[i])\n if cnt % 1000 == 0:\n writeCache(env, cache)\n cache = {}\n print('Written %d / %d' % (cnt, nSamples))\n cnt += 1\n nSamples = cnt-1\n cache['num-samples'] = str(nSamples)\n writeCache(env, cache)\n env.close()\n print('Created dataset with %d samples' % nSamples)\n\ndef read_data_from_folder(folder_path):\n image_path_list = []\n label_list = []\n pics = os.listdir(folder_path)\n pics.sort(key = lambda i: len(i))\n for pic in pics:\n image_path_list.append(folder_path + '/' + pic)\n label_list.append(pic.split('_')[0])\n return image_path_list, label_list\n\ndef read_data_from_file(file_path):\n image_path_list = []\n label_list = []\n f = open(file_path)\n while True:\n line1 = f.readline()\n line2 = f.readline() \n if not line1 or not line2:\n break\n line1 = line1.replace('\\r', '').replace('\\n', '')\n line2 = line2.replace('\\r', '').replace('\\n', '')\n image_path_list.append(line1)\n label_list.append(line2)\n\n return image_path_list, label_list\n\ndef show_demo(demo_number, image_path_list, label_list):\n print ('\\nShow some demo to prevent creating wrong lmdb data')\n print ('The first line is the path to image and the second line is the image label')\n for i in range(demo_number):\n print ('image: %s\\nlabel: %s\\n' % (image_path_list[i], label_list[i]))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--out', type = str, required = True, help = 'lmdb data output path')\n parser.add_argument('--folder', type = str, help = 'path to folder which contains the images')\n parser.add_argument('--file', type = str, help = 'path to file which contains the image path and label')\n args = parser.parse_args()\n \n if args.file is not None:\n image_path_list, label_list = read_data_from_file(args.file)\n createDataset(args.out, image_path_list, label_list)\n show_demo(2, image_path_list, label_list)\n elif args.folder is not None:\n image_path_list, label_list = read_data_from_folder(args.folder)\n createDataset(args.out, image_path_list, label_list)\n show_demo(2, image_path_list, label_list)\n else:\n print ('Please use --floder or --file to assign the input. Use -h to see more.')\n sys.exit()"
] | [
[
"numpy.fromstring"
]
] |
PraveenAdepu/gcp | [
"de80ff86102cfa2d58dca951cd5fc372ec043462"
] | [
"vertex-trainer/trainer/task.py"
] | [
"# https://codelabs.developers.google.com/codelabs/vertex-ai-custom-code-training#3\n\nimport numpy as np\nimport pandas as pd\nimport io\n\npd.set_option(\"display.max_rows\", None)\npd.set_option(\"display.max_columns\", None)\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_auc_score\nimport pickle\nfrom google.cloud import storage\n\n\"\"\"\nfrom src.feature_engineering import (\n dummy_dict,\n internet_dict,\n train_yesno_cols,\n internet_cols,\n data_transformations,\n)\n\"\"\"\n\ndummy_dict = {\"Yes\": 1, \"No\": 0}\ninternet_dict = {\"No\": 0, \"No internet service\": 1, \"Yes\": 2}\ntrain_yesno_cols = [\n \"Partner\",\n \"Dependents\",\n \"PhoneService\",\n \"PaperlessBilling\",\n \"Churn\",\n]\ninference_yesno_cols = [\"Partner\", \"Dependents\", \"PhoneService\", \"PaperlessBilling\"]\ninternet_cols = [\n \"OnlineSecurity\",\n \"OnlineBackup\",\n \"DeviceProtection\",\n \"TechSupport\",\n \"StreamingTV\",\n \"StreamingMovies\",\n]\n\n# preprocessing categorical features\ndef data_transformations(data, dummy_dict, internet_dict, yesno_cols, internet_cols):\n\n data[yesno_cols] = data[yesno_cols].apply(lambda x: x.map(dummy_dict))\n data[internet_cols] = data[internet_cols].apply(lambda x: x.map(internet_dict))\n\n # manual map\n data[\"gender\"] = data[\"gender\"].map({\"Female\": 0, \"Male\": 1})\n data[\"MultipleLines\"] = data[\"MultipleLines\"].map(\n {\"No\": 0, \"No phone service\": 1, \"Yes\": 2}\n )\n data[\"InternetService\"] = data[\"InternetService\"].map(\n {\"DSL\": 0, \"Fiber optic\": 1, \"No\": 2}\n )\n data[\"Contract\"] = data[\"Contract\"].map(\n {\"Month-to-month\": 0, \"One year\": 1, \"Two year\": 2}\n )\n data[\"PaymentMethod\"] = data[\"PaymentMethod\"].map(\n {\n \"Bank transfer (automatic)\": 0,\n \"Credit card (automatic)\": 1,\n \"Electronic check\": 2,\n \"Mailed check\": 3,\n }\n )\n return data\n\n\n# data = pd.read_csv(\"gs://prav_timeseries_features/data/trainingSet.csv\")\n\n# storage_client = storage.Client.from_service_account_json(\"statscope-1c023b909ea7.json\")\nstorage_client = storage.Client()\nbucket_name = \"prav_timeseries_features\"\nblob_name = \"data/trainingSet.csv\"\nsource_file_name = \"data/trainingSet.csv\"\n\n\ndef gcp_csv_to_df(storage_client, bucket_name, blob_name, source_file_name):\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(blob_name)\n data = blob.download_as_string()\n df = pd.read_csv(io.BytesIO(data))\n print(f\"Pulled down file from bucket {bucket_name}, file name: {source_file_name}\")\n return df\n\n\ndef df_to_gcp_csv(storage_client, df, bucket, blob_name, source_file_name):\n bucket = storage_client.bucket(bucket)\n blob = bucket.blob(blob_name)\n blob.upload_from_string(df.to_csv(), \"text/csv\")\n print(f\"DataFrame uploaded to bucket {bucket}, file name: {source_file_name}\")\n\n\ndata = gcp_csv_to_df(storage_client, bucket_name, blob_name, source_file_name=blob_name)\n\ndata.head()\ndata.shape\ndata[\"TotalCharges\"] = pd.to_numeric(data[\"TotalCharges\"], errors=\"coerce\")\ndata.isnull().sum()\n\ndata.dropna(inplace=True)\n\ndata.shape\n\ndata = data_transformations(\n data, dummy_dict, internet_dict, train_yesno_cols, internet_cols\n)\n\ndata.head()\n\n# modeling\ny = data[\"Churn\"].values\nX = data.drop(columns=[\"customerID\", \"Churn\"])\n\n# aim is not to build world class model, rather a simple model for pipeline build/testing\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, random_state=101\n)\nmodel_rf = RandomForestClassifier(\n n_estimators=1000,\n oob_score=True,\n n_jobs=-1,\n random_state=50,\n max_features=\"auto\",\n max_leaf_nodes=30,\n)\nmodel_rf.fit(X_train, y_train)\n\n# Make predictions\nprediction_test = model_rf.predict(X_test)\n\nX_test[\"predictions\"] = prediction_test\n\nbucket_name = \"prav_timeseries_features\"\nblob_name = \"data/predictionSet.csv\"\nsource_file_name = \"data/predictionSet.csv\"\n\ndf_to_gcp_csv(storage_client, X_test, bucket_name, blob_name, source_file_name)\n\n\n\"\"\"\npython setup.py sdist\n\ngsutil cp dist/trainer-0.1.tar.gz \"gs://prav_timeseries_features/data/trainer-0.1.tar.gz\"\n\"\"\"\n"
] | [
[
"sklearn.model_selection.train_test_split",
"pandas.to_numeric",
"pandas.set_option",
"sklearn.ensemble.RandomForestClassifier"
]
] |
ferdyandannes/Monocular-3D-Object-Detection | [
"85c424ce0ab386da9b30629819d63f7ec888c9c1"
] | [
"model/vgg16_1010.py"
] | [
"'''\nRefs:\n Very Deep Convolutional Networks for Large-Scale Image Recognition -- https://arxiv.org/abs/1409.1556\n'''\n\nimport tensorflow as tf\nlayers = tf.keras.layers\nreg = tf.keras.regularizers\n\nfrom config import config as cfg\n#from tensorflow.python.keras.layers import Lambda;\n#from tensorflow.python.keras.layers import Multiply\n\nfrom keras.layers import Multiply\n\nfrom keras.applications.vgg16 import VGG16\nfrom keras.models import Model\nfrom keras.layers import Flatten, Dense, Dropout, Reshape\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.advanced_activations import LeakyReLU\n\nfrom keras.layers import Input, Dense\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras.layers.core import Flatten, Dropout, Reshape, Lambda\nfrom keras.models import Model\nfrom keras.applications.vgg16 import VGG16\nimport tensorflow as tf\n\ndef l2_normalize(x):\n return tf.nn.l2_normalize(x, axis=2)\n\ndef network():\n\n # inputs = layers.Input(shape=(cfg().norm_h, cfg().norm_w, 3))\n # inputs_depth = layers.Input(shape=(cfg().norm_h, cfg().norm_w, 3))\n\n # ##################################################### 1 ############################################################\n # # Block 1__\n # x = layers.Conv2D(64, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block1_conv1')(inputs)\n # x = layers.Activation('relu')(x)\n # x = layers.Conv2D(64, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block1_conv2')(x)\n # x = layers.Activation('relu')(x)\n # x = layers.MaxPooling2D(strides=(2,2), name='block1_pool')(x)\n\n # # Block 2\n # x = layers.Conv2D(128, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block2_conv1')(x)\n # x = layers.Activation('relu')(x)\n # x = layers.Conv2D(128, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block2_conv2')(x)\n # x = layers.Activation('relu')(x)\n # x = layers.MaxPooling2D(strides=(2,2), name='block2_pool')(x)\n\n # # Block 3\n # x = layers.Conv2D(256, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block3_conv1')(x)\n # x = layers.Activation('relu')(x)\n # x = layers.Conv2D(256, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block3_conv2')(x)\n # x = layers.Activation('relu')(x)\n # x = layers.Conv2D(256, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block3_conv3')(x)\n # x = layers.Activation('relu')(x)\n # x = layers.MaxPooling2D(strides=(2,2), name='block3_pool')(x)\n\n # # Block 4\n # x = layers.Conv2D(512, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block4_conv1')(x)\n # x = layers.Activation('relu')(x)\n # x = layers.Conv2D(512, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block4_conv2')(x)\n # x = layers.Activation('relu')(x)\n # x = layers.Conv2D(512, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block4_conv3')(x)\n # x = layers.Activation('relu')(x)\n # x = layers.MaxPooling2D(strides=(2,2), name='block4_pool')(x)\n\n # # Block 5\n # x = layers.Conv2D(512, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block5_conv1')(x)\n # x = layers.Activation('relu')(x)\n # x = layers.Conv2D(512, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block5_conv2')(x)\n # x = layers.Activation('relu')(x)\n # x = layers.Conv2D(512, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block5_conv3')(x)\n # x = layers.Activation('relu')(x)\n # x = layers.MaxPooling2D(strides=(2,2), name='block5_pool')(x)\n\n # # layers.Flatten\n # #x = layers.Flatten(name='Flatten')(x)\n\n\n # ##################################################### 2 ############################################################\n # # Block 1__\n # y = layers.Conv2D(64, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block1_conv1_d')(inputs_depth)\n # y = layers.Activation('relu')(y)\n # y = layers.Conv2D(64, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block1_conv2_d')(y)\n # y = layers.Activation('relu')(y)\n # y = layers.MaxPooling2D(strides=(2,2), name='block1_pool_d')(y)\n\n # # Block 2\n # y = layers.Conv2D(128, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block2_conv1_d')(y)\n # y = layers.Activation('relu')(y)\n # y = layers.Conv2D(128, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block2_conv2_d')(y)\n # y = layers.Activation('relu')(y)\n # y = layers.MaxPooling2D(strides=(2,2), name='block2_pool_d')(y)\n\n # # Block 3\n # y = layers.Conv2D(256, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block3_conv1_d')(y)\n # y = layers.Activation('relu')(y)\n # y = layers.Conv2D(256, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block3_conv2_d')(y)\n # y = layers.Activation('relu')(y)\n # y = layers.Conv2D(256, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block3_conv3_d')(y)\n # y = layers.Activation('relu')(y)\n # y = layers.MaxPooling2D(strides=(2,2), name='block3_pool_d')(y)\n\n # # Block 4\n # y = layers.Conv2D(512, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block4_conv1_d')(y)\n # y = layers.Activation('relu')(y)\n # y = layers.Conv2D(512, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block4_conv2_d')(y)\n # y = layers.Activation('relu')(y)\n # y = layers.Conv2D(512, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block4_conv3_d')(y)\n # y = layers.Activation('relu')(y)\n # y = layers.MaxPooling2D(strides=(2,2), name='block4_pool_d')(y)\n\n # # Block 5\n # y = layers.Conv2D(512, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block5_conv1_d')(y)\n # y = layers.Activation('relu')(y)\n # y = layers.Conv2D(512, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block5_conv2_d')(y)\n # y = layers.Activation('relu')(y)\n # y = layers.Conv2D(512, (3,3), padding='same', kernel_initializer='he_normal', kernel_regularizer=reg.l2(1e-4), name='block5_conv3_d')(y)\n # y = layers.Activation('relu')(y)\n # y = layers.MaxPooling2D(strides=(2,2), name='block5_pool_d')(y)\n\n # # layers.Flatten\n # #y = layers.Flatten(name='Flatten_d')(y)\n\n\n vgg16_model_rgb = VGG16(include_top=False, weights='imagenet', input_shape=(224,224,3))\n\n for layer_rgb in vgg16_model_rgb.layers:\n layer_rgb.trainable = False\n\n for layer_rgb in vgg16_model_rgb.layers:\n print(layer_rgb, layer_rgb.trainable)\n\n\n vgg16_model_depth = VGG16(include_top=False, weights='imagenet', input_shape=(224,224,3))\n\n for layer_depth in vgg16_model_depth.layers:\n layer_depth.name = layer_depth.name + str(\"_2\")\n\n for layer_depth in vgg16_model_depth.layers:\n layer_depth.trainable = False\n\n for layer_depth in vgg16_model_depth.layers:\n print(layer_depth, layer_depth.trainable)\n\n ############################################### COMBINE ##################################################\n # xy = layers.Concatenate()([x, y])\n xy = Multiply()([vgg16_model_rgb.output, vgg16_model_depth.output])\n xy = Flatten(name='Flatten_d')(xy)\n\n # Dimensions branch\n dimensions = Dense(512)(xy)\n dimensions = LeakyReLU(alpha=0.1)(dimensions)\n dimensions = Dropout(0.5)(dimensions)\n dimensions = Dense(3, name='dimensions')(dimensions)\n\n # Orientation branch\n orientation = Dense(256)(xy)\n orientation = LeakyReLU(alpha=0.1)(orientation)\n orientation = Dropout(0.5)(orientation)\n orientation = Dense(cfg().bin * 2)(orientation)\n #orientation = LeakyReLU(alpha=0.1)(orientation)\n orientation = Reshape((cfg().bin, -1))(orientation)\n orientation = Lambda(l2_normalize, name='orientation')(orientation)\n\n # Confidence branch\n confidence = Dense(256)(xy)\n confidence = LeakyReLU(alpha=0.1)(confidence)\n confidence = Dropout(0.5)(confidence)\n confidence = Dense(cfg().bin, activation='softmax', name='confidence')(confidence)\n\n print(\"a\")\n\n # Build model\n model = Model([vgg16_model_rgb.input, vgg16_model_depth.input], [dimensions, orientation, confidence])\n model.summary()\n\n return model\n"
] | [
[
"tensorflow.nn.l2_normalize"
]
] |
zhangkuantian/Spark | [
"4f43421a5b33988a841c49d11d8b916e9d4414f4"
] | [
"python/pyspark/pandas/groupby.py"
] | [
"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nA wrapper for GroupedData to behave similar to pandas GroupBy.\n\"\"\"\n\nfrom abc import ABCMeta, abstractmethod\nimport inspect\nfrom collections import defaultdict, namedtuple\nfrom distutils.version import LooseVersion\nfrom functools import partial\nfrom itertools import product\nfrom typing import (\n Any,\n Callable,\n Dict,\n Generic,\n Iterator,\n Mapping,\n List,\n Optional,\n Sequence,\n Set,\n Tuple,\n Type,\n Union,\n cast,\n TYPE_CHECKING,\n)\nimport warnings\n\nimport pandas as pd\nfrom pandas.api.types import is_hashable, is_list_like # type: ignore[attr-defined]\n\nif LooseVersion(pd.__version__) >= LooseVersion(\"1.3.0\"):\n from pandas.core.common import _builtin_table # type: ignore[attr-defined]\nelse:\n from pandas.core.base import SelectionMixin\n\n _builtin_table = SelectionMixin._builtin_table # type: ignore[attr-defined]\n\nfrom pyspark import SparkContext\nfrom pyspark.sql import Column, DataFrame as SparkDataFrame, Window, functions as F\nfrom pyspark.sql.types import (\n BooleanType,\n DataType,\n NumericType,\n StructField,\n StructType,\n StringType,\n)\n\nfrom pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.\nfrom pyspark.pandas._typing import Axis, FrameLike, Label, Name\nfrom pyspark.pandas.typedef import infer_return_type, DataFrameType, ScalarType, SeriesType\nfrom pyspark.pandas.frame import DataFrame\nfrom pyspark.pandas.internal import (\n InternalField,\n InternalFrame,\n HIDDEN_COLUMNS,\n NATURAL_ORDER_COLUMN_NAME,\n SPARK_INDEX_NAME_FORMAT,\n SPARK_DEFAULT_SERIES_NAME,\n SPARK_INDEX_NAME_PATTERN,\n)\nfrom pyspark.pandas.missing.groupby import (\n MissingPandasLikeDataFrameGroupBy,\n MissingPandasLikeSeriesGroupBy,\n)\nfrom pyspark.pandas.series import Series, first_series\nfrom pyspark.pandas.spark import functions as SF\nfrom pyspark.pandas.config import get_option\nfrom pyspark.pandas.utils import (\n align_diff_frames,\n is_name_like_tuple,\n is_name_like_value,\n name_like_string,\n same_anchor,\n scol_for,\n verify_temp_column_name,\n log_advice,\n)\nfrom pyspark.pandas.spark.utils import as_nullable_spark_type, force_decimal_precision_scale\nfrom pyspark.pandas.exceptions import DataError\n\nif TYPE_CHECKING:\n from pyspark.pandas.window import RollingGroupby, ExpandingGroupby, ExponentialMovingGroupby\n\n\n# to keep it the same as pandas\nNamedAgg = namedtuple(\"NamedAgg\", [\"column\", \"aggfunc\"])\n\n\nclass GroupBy(Generic[FrameLike], metaclass=ABCMeta):\n \"\"\"\n :ivar _psdf: The parent dataframe that is used to perform the groupby\n :type _psdf: DataFrame\n :ivar _groupkeys: The list of keys that will be used to perform the grouping\n :type _groupkeys: List[Series]\n \"\"\"\n\n def __init__(\n self,\n psdf: DataFrame,\n groupkeys: List[Series],\n as_index: bool,\n dropna: bool,\n column_labels_to_exclude: Set[Label],\n agg_columns_selected: bool,\n agg_columns: List[Series],\n ):\n self._psdf = psdf\n self._groupkeys = groupkeys\n self._as_index = as_index\n self._dropna = dropna\n self._column_labels_to_exclude = column_labels_to_exclude\n self._agg_columns_selected = agg_columns_selected\n self._agg_columns = agg_columns\n\n @property\n def _groupkeys_scols(self) -> List[Column]:\n return [s.spark.column for s in self._groupkeys]\n\n @property\n def _agg_columns_scols(self) -> List[Column]:\n return [s.spark.column for s in self._agg_columns]\n\n @abstractmethod\n def _apply_series_op(\n self,\n op: Callable[[\"SeriesGroupBy\"], Series],\n should_resolve: bool = False,\n numeric_only: bool = False,\n ) -> FrameLike:\n pass\n\n @abstractmethod\n def _cleanup_and_return(self, psdf: DataFrame) -> FrameLike:\n pass\n\n # TODO: Series support is not implemented yet.\n # TODO: not all arguments are implemented comparing to pandas' for now.\n def aggregate(\n self,\n func_or_funcs: Optional[Union[str, List[str], Dict[Name, Union[str, List[str]]]]] = None,\n *args: Any,\n **kwargs: Any,\n ) -> DataFrame:\n \"\"\"Aggregate using one or more operations over the specified axis.\n\n Parameters\n ----------\n func_or_funcs : dict, str or list\n a dict mapping from column name (string) to\n aggregate functions (string or list of strings).\n\n Returns\n -------\n Series or DataFrame\n\n The return can be:\n\n * Series : when DataFrame.agg is called with a single function\n * DataFrame : when DataFrame.agg is called with several functions\n\n Return Series or DataFrame.\n\n Notes\n -----\n `agg` is an alias for `aggregate`. Use the alias.\n\n See Also\n --------\n pyspark.pandas.Series.groupby\n pyspark.pandas.DataFrame.groupby\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 1, 2, 2],\n ... 'B': [1, 2, 3, 4],\n ... 'C': [0.362, 0.227, 1.267, -0.562]},\n ... columns=['A', 'B', 'C'])\n\n >>> df\n A B C\n 0 1 1 0.362\n 1 1 2 0.227\n 2 2 3 1.267\n 3 2 4 -0.562\n\n Different aggregations per column\n\n >>> aggregated = df.groupby('A').agg({'B': 'min', 'C': 'sum'})\n >>> aggregated[['B', 'C']].sort_index() # doctest: +NORMALIZE_WHITESPACE\n B C\n A\n 1 1 0.589\n 2 3 0.705\n\n >>> aggregated = df.groupby('A').agg({'B': ['min', 'max']})\n >>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE\n B\n min max\n A\n 1 1 2\n 2 3 4\n\n >>> aggregated = df.groupby('A').agg('min')\n >>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE\n B C\n A\n 1 1 0.227\n 2 3 -0.562\n\n >>> aggregated = df.groupby('A').agg(['min', 'max'])\n >>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE\n B C\n min max min max\n A\n 1 1 2 0.227 0.362\n 2 3 4 -0.562 1.267\n\n To control the output names with different aggregations per column, pandas-on-Spark\n also supports 'named aggregation' or nested renaming in .agg. It can also be\n used when applying multiple aggregation functions to specific columns.\n\n >>> aggregated = df.groupby('A').agg(b_max=ps.NamedAgg(column='B', aggfunc='max'))\n >>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE\n b_max\n A\n 1 2\n 2 4\n\n >>> aggregated = df.groupby('A').agg(b_max=('B', 'max'), b_min=('B', 'min'))\n >>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE\n b_max b_min\n A\n 1 2 1\n 2 4 3\n\n >>> aggregated = df.groupby('A').agg(b_max=('B', 'max'), c_min=('C', 'min'))\n >>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE\n b_max c_min\n A\n 1 2 0.227\n 2 4 -0.562\n \"\"\"\n # I think current implementation of func and arguments in pandas-on-Spark for aggregate\n # is different than pandas, later once arguments are added, this could be removed.\n if func_or_funcs is None and kwargs is None:\n raise ValueError(\"No aggregation argument or function specified.\")\n\n relabeling = func_or_funcs is None and is_multi_agg_with_relabel(**kwargs)\n if relabeling:\n (\n func_or_funcs,\n columns,\n order,\n ) = normalize_keyword_aggregation( # type: ignore[assignment]\n kwargs\n )\n\n if not isinstance(func_or_funcs, (str, list)):\n if not isinstance(func_or_funcs, dict) or not all(\n is_name_like_value(key)\n and (\n isinstance(value, str)\n or isinstance(value, list)\n and all(isinstance(v, str) for v in value)\n )\n for key, value in func_or_funcs.items()\n ):\n raise ValueError(\n \"aggs must be a dict mapping from column name \"\n \"to aggregate functions (string or list of strings).\"\n )\n\n else:\n agg_cols = [col.name for col in self._agg_columns]\n func_or_funcs = {col: func_or_funcs for col in agg_cols}\n\n psdf: DataFrame = DataFrame(\n GroupBy._spark_groupby(self._psdf, func_or_funcs, self._groupkeys)\n )\n\n if self._dropna:\n psdf = DataFrame(\n psdf._internal.with_new_sdf(\n psdf._internal.spark_frame.dropna(\n subset=psdf._internal.index_spark_column_names\n )\n )\n )\n\n if not self._as_index:\n should_drop_index = set(\n i for i, gkey in enumerate(self._groupkeys) if gkey._psdf is not self._psdf\n )\n if len(should_drop_index) > 0:\n psdf = psdf.reset_index(level=should_drop_index, drop=True)\n if len(should_drop_index) < len(self._groupkeys):\n psdf = psdf.reset_index()\n\n if relabeling:\n psdf = psdf[order]\n psdf.columns = columns # type: ignore[assignment]\n return psdf\n\n agg = aggregate\n\n @staticmethod\n def _spark_groupby(\n psdf: DataFrame,\n func: Mapping[Name, Union[str, List[str]]],\n groupkeys: Sequence[Series] = (),\n ) -> InternalFrame:\n groupkey_names = [SPARK_INDEX_NAME_FORMAT(i) for i in range(len(groupkeys))]\n groupkey_scols = [s.spark.column.alias(name) for s, name in zip(groupkeys, groupkey_names)]\n\n multi_aggs = any(isinstance(v, list) for v in func.values())\n reordered = []\n data_columns = []\n column_labels = []\n for key, value in func.items():\n label = key if is_name_like_tuple(key) else (key,)\n if len(label) != psdf._internal.column_labels_level:\n raise TypeError(\"The length of the key must be the same as the column label level.\")\n for aggfunc in [value] if isinstance(value, str) else value:\n column_label = tuple(list(label) + [aggfunc]) if multi_aggs else label\n column_labels.append(column_label)\n\n data_col = name_like_string(column_label)\n data_columns.append(data_col)\n\n col_name = psdf._internal.spark_column_name_for(label)\n if aggfunc == \"nunique\":\n reordered.append(\n F.expr(\"count(DISTINCT `{0}`) as `{1}`\".format(col_name, data_col))\n )\n\n # Implement \"quartiles\" aggregate function for ``describe``.\n elif aggfunc == \"quartiles\":\n reordered.append(\n F.expr(\n \"percentile_approx(`{0}`, array(0.25, 0.5, 0.75)) as `{1}`\".format(\n col_name, data_col\n )\n )\n )\n\n else:\n reordered.append(\n F.expr(\"{1}(`{0}`) as `{2}`\".format(col_name, aggfunc, data_col))\n )\n\n sdf = psdf._internal.spark_frame.select(groupkey_scols + psdf._internal.data_spark_columns)\n sdf = sdf.groupby(*groupkey_names).agg(*reordered)\n\n return InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in groupkey_names],\n index_names=[psser._column_label for psser in groupkeys],\n index_fields=[\n psser._internal.data_fields[0].copy(name=name)\n for psser, name in zip(groupkeys, groupkey_names)\n ],\n column_labels=column_labels,\n data_spark_columns=[scol_for(sdf, col) for col in data_columns],\n )\n\n def count(self) -> FrameLike:\n \"\"\"\n Compute count of group, excluding missing values.\n\n See Also\n --------\n pyspark.pandas.Series.groupby\n pyspark.pandas.DataFrame.groupby\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 1, 2, 1, 2],\n ... 'B': [np.nan, 2, 3, 4, 5],\n ... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])\n >>> df.groupby('A').count().sort_index() # doctest: +NORMALIZE_WHITESPACE\n B C\n A\n 1 2 3\n 2 2 2\n \"\"\"\n return self._reduce_for_stat_function(F.count)\n\n # TODO: We should fix See Also when Series implementation is finished.\n def first(self, numeric_only: Optional[bool] = False) -> FrameLike:\n \"\"\"\n Compute first of group values.\n\n Parameters\n ----------\n numeric_only : bool, default False\n Include only float, int, boolean columns. If None, will attempt to use\n everything, then use only numeric data.\n\n .. versionadded:: 3.4.0\n\n See Also\n --------\n pyspark.pandas.Series.groupby\n pyspark.pandas.DataFrame.groupby\n\n Examples\n --------\n >>> df = ps.DataFrame({\"A\": [1, 2, 1, 2], \"B\": [True, False, False, True],\n ... \"C\": [3, 3, 4, 4], \"D\": [\"a\", \"b\", \"b\", \"a\"]})\n >>> df\n A B C D\n 0 1 True 3 a\n 1 2 False 3 b\n 2 1 False 4 b\n 3 2 True 4 a\n\n >>> df.groupby(\"A\").first().sort_index()\n B C D\n A\n 1 True 3 a\n 2 False 3 b\n\n Include only float, int, boolean columns when set numeric_only True.\n\n >>> df.groupby(\"A\").first(numeric_only=True).sort_index()\n B C\n A\n 1 True 3\n 2 False 3\n \"\"\"\n return self._reduce_for_stat_function(\n F.first, accepted_spark_types=(NumericType, BooleanType) if numeric_only else None\n )\n\n def last(self, numeric_only: Optional[bool] = False) -> FrameLike:\n \"\"\"\n Compute last of group values.\n\n Parameters\n ----------\n numeric_only : bool, default False\n Include only float, int, boolean columns. If None, will attempt to use\n everything, then use only numeric data.\n\n .. versionadded:: 3.4.0\n\n See Also\n --------\n pyspark.pandas.Series.groupby\n pyspark.pandas.DataFrame.groupby\n\n Examples\n --------\n >>> df = ps.DataFrame({\"A\": [1, 2, 1, 2], \"B\": [True, False, False, True],\n ... \"C\": [3, 3, 4, 4], \"D\": [\"a\", \"b\", \"b\", \"a\"]})\n >>> df\n A B C D\n 0 1 True 3 a\n 1 2 False 3 b\n 2 1 False 4 b\n 3 2 True 4 a\n\n >>> df.groupby(\"A\").last().sort_index()\n B C D\n A\n 1 False 4 b\n 2 True 4 a\n\n Include only float, int, boolean columns when set numeric_only True.\n\n >>> df.groupby(\"A\").last(numeric_only=True).sort_index()\n B C\n A\n 1 False 4\n 2 True 4\n \"\"\"\n return self._reduce_for_stat_function(\n lambda col: F.last(col, ignorenulls=True),\n accepted_spark_types=(NumericType, BooleanType) if numeric_only else None,\n )\n\n def max(self, numeric_only: Optional[bool] = False) -> FrameLike:\n \"\"\"\n Compute max of group values.\n\n Parameters\n ----------\n numeric_only : bool, default False\n Include only float, int, boolean columns. If None, will attempt to use\n everything, then use only numeric data.\n\n .. versionadded:: 3.4.0\n\n See Also\n --------\n pyspark.pandas.Series.groupby\n pyspark.pandas.DataFrame.groupby\n\n Examples\n --------\n >>> df = ps.DataFrame({\"A\": [1, 2, 1, 2], \"B\": [True, False, False, True],\n ... \"C\": [3, 4, 3, 4], \"D\": [\"a\", \"b\", \"b\", \"a\"]})\n\n >>> df.groupby(\"A\").max().sort_index()\n B C D\n A\n 1 True 3 b\n 2 True 4 b\n\n Include only float, int, boolean columns when set numeric_only True.\n\n >>> df.groupby(\"A\").max(numeric_only=True).sort_index()\n B C\n A\n 1 True 3\n 2 True 4\n \"\"\"\n return self._reduce_for_stat_function(\n F.max, accepted_spark_types=(NumericType, BooleanType) if numeric_only else None\n )\n\n def mean(self, numeric_only: Optional[bool] = True) -> FrameLike:\n \"\"\"\n Compute mean of groups, excluding missing values.\n\n Parameters\n ----------\n numeric_only : bool, default False\n Include only float, int, boolean columns. If None, will attempt to use\n everything, then use only numeric data.\n\n .. versionadded:: 3.4.0\n\n Returns\n -------\n pyspark.pandas.Series or pyspark.pandas.DataFrame\n\n See Also\n --------\n pyspark.pandas.Series.groupby\n pyspark.pandas.DataFrame.groupby\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 1, 2, 1, 2],\n ... 'B': [np.nan, 2, 3, 4, 5],\n ... 'C': [1, 2, 1, 1, 2],\n ... 'D': [True, False, True, False, True]})\n\n Groupby one column and return the mean of the remaining columns in\n each group.\n\n >>> df.groupby('A').mean().sort_index() # doctest: +NORMALIZE_WHITESPACE\n B C D\n A\n 1 3.0 1.333333 0.333333\n 2 4.0 1.500000 1.000000\n \"\"\"\n self._validate_agg_columns(numeric_only=numeric_only, function_name=\"median\")\n\n return self._reduce_for_stat_function(\n F.mean, accepted_spark_types=(NumericType,), bool_to_numeric=True\n )\n\n def min(self, numeric_only: Optional[bool] = False) -> FrameLike:\n \"\"\"\n Compute min of group values.\n\n Parameters\n ----------\n numeric_only : bool, default False\n Include only float, int, boolean columns. If None, will attempt to use\n everything, then use only numeric data.\n\n .. versionadded:: 3.4.0\n\n See Also\n --------\n pyspark.pandas.Series.groupby\n pyspark.pandas.DataFrame.groupby\n\n Examples\n --------\n >>> df = ps.DataFrame({\"A\": [1, 2, 1, 2], \"B\": [True, False, False, True],\n ... \"C\": [3, 4, 3, 4], \"D\": [\"a\", \"b\", \"b\", \"a\"]})\n >>> df.groupby(\"A\").min().sort_index()\n B C D\n A\n 1 False 3 a\n 2 False 4 a\n\n Include only float, int, boolean columns when set numeric_only True.\n\n >>> df.groupby(\"A\").min(numeric_only=True).sort_index()\n B C\n A\n 1 False 3\n 2 False 4\n \"\"\"\n return self._reduce_for_stat_function(\n F.min, accepted_spark_types=(NumericType, BooleanType) if numeric_only else None\n )\n\n # TODO: sync the doc.\n def std(self, ddof: int = 1) -> FrameLike:\n \"\"\"\n Compute standard deviation of groups, excluding missing values.\n\n Parameters\n ----------\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\n\n Examples\n --------\n >>> df = ps.DataFrame({\"A\": [1, 2, 1, 2], \"B\": [True, False, False, True],\n ... \"C\": [3, 4, 3, 4], \"D\": [\"a\", \"b\", \"b\", \"a\"]})\n\n >>> df.groupby(\"A\").std()\n B C\n A\n 1 0.707107 0.0\n 2 0.707107 0.0\n\n See Also\n --------\n pyspark.pandas.Series.groupby\n pyspark.pandas.DataFrame.groupby\n \"\"\"\n assert ddof in (0, 1)\n\n # Raise the TypeError when all aggregation columns are of unaccepted data types\n all_unaccepted = True\n for _agg_col in self._agg_columns:\n if isinstance(_agg_col.spark.data_type, (NumericType, BooleanType)):\n all_unaccepted = False\n break\n if all_unaccepted:\n raise TypeError(\n \"Unaccepted data types of aggregation columns; numeric or bool expected.\"\n )\n\n return self._reduce_for_stat_function(\n F.stddev_pop if ddof == 0 else F.stddev_samp,\n accepted_spark_types=(NumericType,),\n bool_to_numeric=True,\n )\n\n def sum(self) -> FrameLike:\n \"\"\"\n Compute sum of group values\n\n Examples\n --------\n >>> df = ps.DataFrame({\"A\": [1, 2, 1, 2], \"B\": [True, False, False, True],\n ... \"C\": [3, 4, 3, 4], \"D\": [\"a\", \"b\", \"b\", \"a\"]})\n\n >>> df.groupby(\"A\").sum()\n B C\n A\n 1 1 6\n 2 1 8\n\n See Also\n --------\n pyspark.pandas.Series.groupby\n pyspark.pandas.DataFrame.groupby\n \"\"\"\n return self._reduce_for_stat_function(\n F.sum, accepted_spark_types=(NumericType,), bool_to_numeric=True\n )\n\n # TODO: sync the doc.\n def var(self, ddof: int = 1) -> FrameLike:\n \"\"\"\n Compute variance of groups, excluding missing values.\n\n Parameters\n ----------\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\n\n Examples\n --------\n >>> df = ps.DataFrame({\"A\": [1, 2, 1, 2], \"B\": [True, False, False, True],\n ... \"C\": [3, 4, 3, 4], \"D\": [\"a\", \"b\", \"b\", \"a\"]})\n\n >>> df.groupby(\"A\").var()\n B C\n A\n 1 0.5 0.0\n 2 0.5 0.0\n\n See Also\n --------\n pyspark.pandas.Series.groupby\n pyspark.pandas.DataFrame.groupby\n \"\"\"\n assert ddof in (0, 1)\n\n return self._reduce_for_stat_function(\n F.var_pop if ddof == 0 else F.var_samp,\n accepted_spark_types=(NumericType,),\n bool_to_numeric=True,\n )\n\n def skew(self) -> FrameLike:\n \"\"\"\n Compute skewness of groups, excluding missing values.\n\n .. versionadded:: 3.4.0\n\n Examples\n --------\n >>> df = ps.DataFrame({\"A\": [1, 2, 1, 1], \"B\": [True, False, False, True],\n ... \"C\": [3, 4, 3, 4], \"D\": [\"a\", \"b\", \"b\", \"a\"]})\n\n >>> df.groupby(\"A\").skew()\n B C\n A\n 1 -1.732051 1.732051\n 2 NaN NaN\n\n See Also\n --------\n pyspark.pandas.Series.groupby\n pyspark.pandas.DataFrame.groupby\n \"\"\"\n\n def skew(scol: Column) -> Column:\n sql_utils = SparkContext._active_spark_context._jvm.PythonSQLUtils\n return Column(sql_utils.pandasSkewness(scol._jc))\n\n return self._reduce_for_stat_function(\n skew,\n accepted_spark_types=(NumericType,),\n bool_to_numeric=True,\n )\n\n # TODO: skipna should be implemented.\n def all(self) -> FrameLike:\n \"\"\"\n Returns True if all values in the group are truthful, else False.\n\n See Also\n --------\n pyspark.pandas.Series.groupby\n pyspark.pandas.DataFrame.groupby\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 1, 2, 2, 3, 3, 4, 4, 5, 5],\n ... 'B': [True, True, True, False, False,\n ... False, None, True, None, False]},\n ... columns=['A', 'B'])\n >>> df\n A B\n 0 1 True\n 1 1 True\n 2 2 True\n 3 2 False\n 4 3 False\n 5 3 False\n 6 4 None\n 7 4 True\n 8 5 None\n 9 5 False\n\n >>> df.groupby('A').all().sort_index() # doctest: +NORMALIZE_WHITESPACE\n B\n A\n 1 True\n 2 False\n 3 False\n 4 True\n 5 False\n \"\"\"\n return self._reduce_for_stat_function(\n lambda col: F.min(F.coalesce(col.cast(\"boolean\"), SF.lit(True)))\n )\n\n # TODO: skipna should be implemented.\n def any(self) -> FrameLike:\n \"\"\"\n Returns True if any value in the group is truthful, else False.\n\n See Also\n --------\n pyspark.pandas.Series.groupby\n pyspark.pandas.DataFrame.groupby\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 1, 2, 2, 3, 3, 4, 4, 5, 5],\n ... 'B': [True, True, True, False, False,\n ... False, None, True, None, False]},\n ... columns=['A', 'B'])\n >>> df\n A B\n 0 1 True\n 1 1 True\n 2 2 True\n 3 2 False\n 4 3 False\n 5 3 False\n 6 4 None\n 7 4 True\n 8 5 None\n 9 5 False\n\n >>> df.groupby('A').any().sort_index() # doctest: +NORMALIZE_WHITESPACE\n B\n A\n 1 True\n 2 True\n 3 False\n 4 True\n 5 False\n \"\"\"\n return self._reduce_for_stat_function(\n lambda col: F.max(F.coalesce(col.cast(\"boolean\"), SF.lit(False)))\n )\n\n # TODO: groupby multiply columns should be implemented.\n def size(self) -> Series:\n \"\"\"\n Compute group sizes.\n\n See Also\n --------\n pyspark.pandas.Series.groupby\n pyspark.pandas.DataFrame.groupby\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 2, 3, 3, 3],\n ... 'B': [1, 1, 2, 3, 3, 3]},\n ... columns=['A', 'B'])\n >>> df\n A B\n 0 1 1\n 1 2 1\n 2 2 2\n 3 3 3\n 4 3 3\n 5 3 3\n\n >>> df.groupby('A').size().sort_index()\n A\n 1 1\n 2 2\n 3 3\n dtype: int64\n\n >>> df.groupby(['A', 'B']).size().sort_index()\n A B\n 1 1 1\n 2 1 1\n 2 1\n 3 3 3\n dtype: int64\n\n For Series,\n\n >>> df.B.groupby(df.A).size().sort_index()\n A\n 1 1\n 2 2\n 3 3\n Name: B, dtype: int64\n\n >>> df.groupby(df.A).B.size().sort_index()\n A\n 1 1\n 2 2\n 3 3\n Name: B, dtype: int64\n \"\"\"\n groupkeys = self._groupkeys\n groupkey_names = [SPARK_INDEX_NAME_FORMAT(i) for i in range(len(groupkeys))]\n groupkey_scols = [s.spark.column.alias(name) for s, name in zip(groupkeys, groupkey_names)]\n sdf = self._psdf._internal.spark_frame.select(\n groupkey_scols + self._psdf._internal.data_spark_columns\n )\n sdf = sdf.groupby(*groupkey_names).count()\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in groupkey_names],\n index_names=[psser._column_label for psser in groupkeys],\n index_fields=[\n psser._internal.data_fields[0].copy(name=name)\n for psser, name in zip(groupkeys, groupkey_names)\n ],\n column_labels=[None],\n data_spark_columns=[scol_for(sdf, \"count\")],\n )\n return first_series(DataFrame(internal))\n\n def diff(self, periods: int = 1) -> FrameLike:\n \"\"\"\n First discrete difference of element.\n\n Calculates the difference of a DataFrame element compared with another element in the\n DataFrame group (default is the element in the same column of the previous row).\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for calculating difference, accepts negative values.\n\n Returns\n -------\n diffed : DataFrame or Series\n\n See Also\n --------\n pyspark.pandas.Series.groupby\n pyspark.pandas.DataFrame.groupby\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4, 5, 6],\n ... 'b': [1, 1, 2, 3, 5, 8],\n ... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])\n >>> df\n a b c\n 0 1 1 1\n 1 2 1 4\n 2 3 2 9\n 3 4 3 16\n 4 5 5 25\n 5 6 8 36\n\n >>> df.groupby(['b']).diff().sort_index()\n a c\n 0 NaN NaN\n 1 1.0 3.0\n 2 NaN NaN\n 3 NaN NaN\n 4 NaN NaN\n 5 NaN NaN\n\n Difference with previous column in a group.\n\n >>> df.groupby(['b'])['a'].diff().sort_index()\n 0 NaN\n 1 1.0\n 2 NaN\n 3 NaN\n 4 NaN\n 5 NaN\n Name: a, dtype: float64\n \"\"\"\n return self._apply_series_op(\n lambda sg: sg._psser._diff(periods, part_cols=sg._groupkeys_scols), should_resolve=True\n )\n\n def cumcount(self, ascending: bool = True) -> Series:\n \"\"\"\n Number each item in each group from 0 to the length of that group - 1.\n\n Essentially this is equivalent to\n\n .. code-block:: python\n\n self.apply(lambda x: pd.Series(np.arange(len(x)), x.index))\n\n Parameters\n ----------\n ascending : bool, default True\n If False, number in reverse, from length of group - 1 to 0.\n\n Returns\n -------\n Series\n Sequence number of each element within each group.\n\n Examples\n --------\n\n >>> df = ps.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],\n ... columns=['A'])\n >>> df\n A\n 0 a\n 1 a\n 2 a\n 3 b\n 4 b\n 5 a\n >>> df.groupby('A').cumcount().sort_index()\n 0 0\n 1 1\n 2 2\n 3 0\n 4 1\n 5 3\n dtype: int64\n >>> df.groupby('A').cumcount(ascending=False).sort_index()\n 0 3\n 1 2\n 2 1\n 3 1\n 4 0\n 5 0\n dtype: int64\n \"\"\"\n ret = (\n self._groupkeys[0]\n .rename()\n .spark.transform(lambda _: SF.lit(0))\n ._cum(F.count, True, part_cols=self._groupkeys_scols, ascending=ascending)\n - 1\n )\n internal = ret._internal.resolved_copy\n return first_series(DataFrame(internal))\n\n def cummax(self) -> FrameLike:\n \"\"\"\n Cumulative max for each group.\n\n Returns\n -------\n Series or DataFrame\n\n See Also\n --------\n Series.cummax\n DataFrame.cummax\n\n Examples\n --------\n >>> df = ps.DataFrame(\n ... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],\n ... columns=list('ABC'))\n >>> df\n A B C\n 0 1 NaN 4\n 1 1 0.1 3\n 2 1 20.0 2\n 3 4 10.0 1\n\n By default, iterates over rows and finds the sum in each column.\n\n >>> df.groupby(\"A\").cummax().sort_index()\n B C\n 0 NaN 4\n 1 0.1 4\n 2 20.0 4\n 3 10.0 1\n\n It works as below in Series.\n\n >>> df.C.groupby(df.A).cummax().sort_index()\n 0 4\n 1 4\n 2 4\n 3 1\n Name: C, dtype: int64\n \"\"\"\n return self._apply_series_op(\n lambda sg: sg._psser._cum(F.max, True, part_cols=sg._groupkeys_scols),\n should_resolve=True,\n numeric_only=True,\n )\n\n def cummin(self) -> FrameLike:\n \"\"\"\n Cumulative min for each group.\n\n Returns\n -------\n Series or DataFrame\n\n See Also\n --------\n Series.cummin\n DataFrame.cummin\n\n Examples\n --------\n >>> df = ps.DataFrame(\n ... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],\n ... columns=list('ABC'))\n >>> df\n A B C\n 0 1 NaN 4\n 1 1 0.1 3\n 2 1 20.0 2\n 3 4 10.0 1\n\n By default, iterates over rows and finds the sum in each column.\n\n >>> df.groupby(\"A\").cummin().sort_index()\n B C\n 0 NaN 4\n 1 0.1 3\n 2 0.1 2\n 3 10.0 1\n\n It works as below in Series.\n\n >>> df.B.groupby(df.A).cummin().sort_index()\n 0 NaN\n 1 0.1\n 2 0.1\n 3 10.0\n Name: B, dtype: float64\n \"\"\"\n return self._apply_series_op(\n lambda sg: sg._psser._cum(F.min, True, part_cols=sg._groupkeys_scols),\n should_resolve=True,\n numeric_only=True,\n )\n\n def cumprod(self) -> FrameLike:\n \"\"\"\n Cumulative product for each group.\n\n Returns\n -------\n Series or DataFrame\n\n See Also\n --------\n Series.cumprod\n DataFrame.cumprod\n\n Examples\n --------\n >>> df = ps.DataFrame(\n ... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],\n ... columns=list('ABC'))\n >>> df\n A B C\n 0 1 NaN 4\n 1 1 0.1 3\n 2 1 20.0 2\n 3 4 10.0 1\n\n By default, iterates over rows and finds the sum in each column.\n\n >>> df.groupby(\"A\").cumprod().sort_index()\n B C\n 0 NaN 4\n 1 0.1 12\n 2 2.0 24\n 3 10.0 1\n\n It works as below in Series.\n\n >>> df.B.groupby(df.A).cumprod().sort_index()\n 0 NaN\n 1 0.1\n 2 2.0\n 3 10.0\n Name: B, dtype: float64\n \"\"\"\n return self._apply_series_op(\n lambda sg: sg._psser._cumprod(True, part_cols=sg._groupkeys_scols),\n should_resolve=True,\n numeric_only=True,\n )\n\n def cumsum(self) -> FrameLike:\n \"\"\"\n Cumulative sum for each group.\n\n Returns\n -------\n Series or DataFrame\n\n See Also\n --------\n Series.cumsum\n DataFrame.cumsum\n\n Examples\n --------\n >>> df = ps.DataFrame(\n ... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],\n ... columns=list('ABC'))\n >>> df\n A B C\n 0 1 NaN 4\n 1 1 0.1 3\n 2 1 20.0 2\n 3 4 10.0 1\n\n By default, iterates over rows and finds the sum in each column.\n\n >>> df.groupby(\"A\").cumsum().sort_index()\n B C\n 0 NaN 4\n 1 0.1 7\n 2 20.1 9\n 3 10.0 1\n\n It works as below in Series.\n\n >>> df.B.groupby(df.A).cumsum().sort_index()\n 0 NaN\n 1 0.1\n 2 20.1\n 3 10.0\n Name: B, dtype: float64\n \"\"\"\n return self._apply_series_op(\n lambda sg: sg._psser._cumsum(True, part_cols=sg._groupkeys_scols),\n should_resolve=True,\n numeric_only=True,\n )\n\n def apply(self, func: Callable, *args: Any, **kwargs: Any) -> Union[DataFrame, Series]:\n \"\"\"\n Apply function `func` group-wise and combine the results together.\n\n The function passed to `apply` must take a DataFrame as its first\n argument and return a DataFrame. `apply` will\n then take care of combining the results back together into a single\n dataframe. `apply` is therefore a highly flexible\n grouping method.\n\n While `apply` is a very flexible method, its downside is that\n using it can be quite a bit slower than using more specific methods\n like `agg` or `transform`. pandas-on-Spark offers a wide range of method that will\n be much faster than using `apply` for their specific purposes, so try to\n use them before reaching for `apply`.\n\n .. note:: this API executes the function once to infer the type which is\n potentially expensive, for instance, when the dataset is created after\n aggregations or sorting.\n\n To avoid this, specify return type in ``func``, for instance, as below:\n\n >>> def pandas_div(x) -> ps.DataFrame[int, [float, float]]:\n ... return x[['B', 'C']] / x[['B', 'C']]\n\n If the return type is specified, the output column names become\n `c0, c1, c2 ... cn`. These names are positionally mapped to the returned\n DataFrame in ``func``.\n\n To specify the column names, you can assign them in a NumPy compound type style\n as below:\n\n >>> def pandas_div(x) -> ps.DataFrame[(\"index\", int), [(\"a\", float), (\"b\", float)]]:\n ... return x[['B', 'C']] / x[['B', 'C']]\n\n >>> pdf = pd.DataFrame({'B': [1.], 'C': [3.]})\n >>> def plus_one(x) -> ps.DataFrame[\n ... (pdf.index.name, pdf.index.dtype), zip(pdf.columns, pdf.dtypes)]:\n ... return x[['B', 'C']] / x[['B', 'C']]\n\n .. note:: the dataframe within ``func`` is actually a pandas dataframe. Therefore,\n any pandas API within this function is allowed.\n\n Parameters\n ----------\n func : callable\n A callable that takes a DataFrame as its first argument, and\n returns a dataframe.\n *args\n Positional arguments to pass to func.\n **kwargs\n Keyword arguments to pass to func.\n\n Returns\n -------\n applied : DataFrame or Series\n\n See Also\n --------\n aggregate : Apply aggregate function to the GroupBy object.\n DataFrame.apply : Apply a function to a DataFrame.\n Series.apply : Apply a function to a Series.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': 'a a b'.split(),\n ... 'B': [1, 2, 3],\n ... 'C': [4, 6, 5]}, columns=['A', 'B', 'C'])\n >>> g = df.groupby('A')\n\n Notice that ``g`` has two groups, ``a`` and ``b``.\n Calling `apply` in various ways, we can get different grouping results:\n\n Below the functions passed to `apply` takes a DataFrame as\n its argument and returns a DataFrame. `apply` combines the result for\n each group together into a new DataFrame:\n\n >>> def plus_min(x):\n ... return x + x.min()\n >>> g.apply(plus_min).sort_index() # doctest: +NORMALIZE_WHITESPACE\n A B C\n 0 aa 2 8\n 1 aa 3 10\n 2 bb 6 10\n\n >>> g.apply(sum).sort_index() # doctest: +NORMALIZE_WHITESPACE\n A B C\n A\n a aa 3 10\n b b 3 5\n\n >>> g.apply(len).sort_index() # doctest: +NORMALIZE_WHITESPACE\n A\n a 2\n b 1\n dtype: int64\n\n You can specify the type hint and prevent schema inference for better performance.\n\n >>> def pandas_div(x) -> ps.DataFrame[int, [float, float]]:\n ... return x[['B', 'C']] / x[['B', 'C']]\n >>> g.apply(pandas_div).sort_index() # doctest: +NORMALIZE_WHITESPACE\n c0 c1\n 0 1.0 1.0\n 1 1.0 1.0\n 2 1.0 1.0\n\n >>> def pandas_div(x) -> ps.DataFrame[(\"index\", int), [(\"f1\", float), (\"f2\", float)]]:\n ... return x[['B', 'C']] / x[['B', 'C']]\n >>> g.apply(pandas_div).sort_index() # doctest: +NORMALIZE_WHITESPACE\n f1 f2\n index\n 0 1.0 1.0\n 1 1.0 1.0\n 2 1.0 1.0\n\n In case of Series, it works as below.\n\n >>> def plus_max(x) -> ps.Series[np.int]:\n ... return x + x.max()\n >>> df.B.groupby(df.A).apply(plus_max).sort_index() # doctest: +SKIP\n 0 6\n 1 3\n 2 4\n Name: B, dtype: int64\n\n >>> def plus_min(x):\n ... return x + x.min()\n >>> df.B.groupby(df.A).apply(plus_min).sort_index()\n 0 2\n 1 3\n 2 6\n Name: B, dtype: int64\n\n You can also return a scalar value as a aggregated value of the group:\n\n >>> def plus_length(x) -> np.int:\n ... return len(x)\n >>> df.B.groupby(df.A).apply(plus_length).sort_index() # doctest: +SKIP\n 0 1\n 1 2\n Name: B, dtype: int64\n\n The extra arguments to the function can be passed as below.\n\n >>> def calculation(x, y, z) -> np.int:\n ... return len(x) + y * z\n >>> df.B.groupby(df.A).apply(calculation, 5, z=10).sort_index() # doctest: +SKIP\n 0 51\n 1 52\n Name: B, dtype: int64\n \"\"\"\n if not callable(func):\n raise TypeError(\"%s object is not callable\" % type(func).__name__)\n\n spec = inspect.getfullargspec(func)\n return_sig = spec.annotations.get(\"return\", None)\n should_infer_schema = return_sig is None\n should_retain_index = should_infer_schema\n\n is_series_groupby = isinstance(self, SeriesGroupBy)\n\n psdf = self._psdf\n\n if self._agg_columns_selected:\n agg_columns = self._agg_columns\n else:\n agg_columns = [\n psdf._psser_for(label)\n for label in psdf._internal.column_labels\n if label not in self._column_labels_to_exclude\n ]\n\n psdf, groupkey_labels, groupkey_names = GroupBy._prepare_group_map_apply(\n psdf, self._groupkeys, agg_columns\n )\n\n if is_series_groupby:\n name = psdf.columns[-1]\n pandas_apply = _builtin_table.get(func, func)\n else:\n f = _builtin_table.get(func, func)\n\n def pandas_apply(pdf: pd.DataFrame, *a: Any, **k: Any) -> Any:\n return f(pdf.drop(groupkey_names, axis=1), *a, **k)\n\n should_return_series = False\n\n if should_infer_schema:\n # Here we execute with the first 1000 to get the return type.\n log_advice(\n \"If the type hints is not specified for `grouby.apply`, \"\n \"it is expensive to infer the data type internally.\"\n )\n limit = get_option(\"compute.shortcut_limit\")\n # Ensure sampling rows >= 2 to make sure apply's infer schema is accurate\n # See related: https://github.com/pandas-dev/pandas/issues/46893\n sample_limit = limit + 1 if limit else 2\n pdf = psdf.head(sample_limit)._to_internal_pandas()\n groupkeys = [\n pdf[groupkey_name].rename(psser.name)\n for groupkey_name, psser in zip(groupkey_names, self._groupkeys)\n ]\n grouped = pdf.groupby(groupkeys)\n if is_series_groupby:\n pser_or_pdf = grouped[name].apply(pandas_apply, *args, **kwargs)\n else:\n pser_or_pdf = grouped.apply(pandas_apply, *args, **kwargs)\n psser_or_psdf = ps.from_pandas(pser_or_pdf)\n\n if len(pdf) <= limit:\n if isinstance(psser_or_psdf, ps.Series) and is_series_groupby:\n psser_or_psdf = psser_or_psdf.rename(cast(SeriesGroupBy, self)._psser.name)\n return cast(Union[Series, DataFrame], psser_or_psdf)\n\n if len(grouped) <= 1:\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\")\n warnings.warn(\n \"The amount of data for return type inference might not be large enough. \"\n \"Consider increasing an option `compute.shortcut_limit`.\"\n )\n\n if isinstance(psser_or_psdf, Series):\n should_return_series = True\n psdf_from_pandas = psser_or_psdf._psdf\n else:\n psdf_from_pandas = cast(DataFrame, psser_or_psdf)\n\n index_fields = [\n field.normalize_spark_type() for field in psdf_from_pandas._internal.index_fields\n ]\n data_fields = [\n field.normalize_spark_type() for field in psdf_from_pandas._internal.data_fields\n ]\n return_schema = StructType([field.struct_field for field in index_fields + data_fields])\n else:\n return_type = infer_return_type(func)\n if not is_series_groupby and isinstance(return_type, SeriesType):\n raise TypeError(\n \"Series as a return type hint at frame groupby is not supported \"\n \"currently; however got [%s]. Use DataFrame type hint instead.\" % return_sig\n )\n\n if isinstance(return_type, DataFrameType):\n data_fields = return_type.data_fields\n return_schema = return_type.spark_type\n index_fields = return_type.index_fields\n should_retain_index = len(index_fields) > 0\n psdf_from_pandas = None\n else:\n should_return_series = True\n dtype = cast(Union[SeriesType, ScalarType], return_type).dtype\n spark_type = cast(Union[SeriesType, ScalarType], return_type).spark_type\n if is_series_groupby:\n data_fields = [\n InternalField(\n dtype=dtype, struct_field=StructField(name=name, dataType=spark_type)\n )\n ]\n else:\n data_fields = [\n InternalField(\n dtype=dtype,\n struct_field=StructField(\n name=SPARK_DEFAULT_SERIES_NAME, dataType=spark_type\n ),\n )\n ]\n return_schema = StructType([field.struct_field for field in data_fields])\n\n def pandas_groupby_apply(pdf: pd.DataFrame) -> pd.DataFrame:\n\n if is_series_groupby:\n pdf_or_ser = pdf.groupby(groupkey_names)[name].apply(pandas_apply, *args, **kwargs)\n else:\n pdf_or_ser = pdf.groupby(groupkey_names).apply(pandas_apply, *args, **kwargs)\n if should_return_series and isinstance(pdf_or_ser, pd.DataFrame):\n pdf_or_ser = pdf_or_ser.stack()\n\n if not isinstance(pdf_or_ser, pd.DataFrame):\n return pd.DataFrame(pdf_or_ser)\n else:\n return pdf_or_ser\n\n sdf = GroupBy._spark_group_map_apply(\n psdf,\n pandas_groupby_apply,\n [psdf._internal.spark_column_for(label) for label in groupkey_labels],\n return_schema,\n retain_index=should_retain_index,\n )\n\n if should_retain_index:\n # If schema is inferred, we can restore indexes too.\n if psdf_from_pandas is not None:\n internal = psdf_from_pandas._internal.with_new_sdf(\n spark_frame=sdf, index_fields=index_fields, data_fields=data_fields\n )\n else:\n index_names: Optional[List[Optional[Tuple[Any, ...]]]] = None\n\n index_spark_columns = [\n scol_for(sdf, index_field.struct_field.name) for index_field in index_fields\n ]\n\n if not any(\n [\n SPARK_INDEX_NAME_PATTERN.match(index_field.struct_field.name)\n for index_field in index_fields\n ]\n ):\n index_names = [(index_field.struct_field.name,) for index_field in index_fields]\n internal = InternalFrame(\n spark_frame=sdf,\n index_names=index_names,\n index_spark_columns=index_spark_columns,\n index_fields=index_fields,\n data_fields=data_fields,\n )\n else:\n # Otherwise, it loses index.\n internal = InternalFrame(\n spark_frame=sdf, index_spark_columns=None, data_fields=data_fields\n )\n\n if should_return_series:\n psser = first_series(DataFrame(internal))\n if is_series_groupby:\n psser = psser.rename(cast(SeriesGroupBy, self)._psser.name)\n return psser\n else:\n return DataFrame(internal)\n\n # TODO: implement 'dropna' parameter\n def filter(self, func: Callable[[FrameLike], FrameLike]) -> FrameLike:\n \"\"\"\n Return a copy of a DataFrame excluding elements from groups that\n do not satisfy the boolean criterion specified by func.\n\n Parameters\n ----------\n f : function\n Function to apply to each subframe. Should return True or False.\n dropna : Drop groups that do not pass the filter. True by default;\n if False, groups that evaluate False are filled with NaNs.\n\n Returns\n -------\n filtered : DataFrame or Series\n\n Notes\n -----\n Each subframe is endowed the attribute 'name' in case you need to know\n which group you are working on.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',\n ... 'foo', 'bar'],\n ... 'B' : [1, 2, 3, 4, 5, 6],\n ... 'C' : [2.0, 5., 8., 1., 2., 9.]}, columns=['A', 'B', 'C'])\n >>> grouped = df.groupby('A')\n >>> grouped.filter(lambda x: x['B'].mean() > 3.)\n A B C\n 1 bar 2 5.0\n 3 bar 4 1.0\n 5 bar 6 9.0\n\n >>> df.B.groupby(df.A).filter(lambda x: x.mean() > 3.)\n 1 2\n 3 4\n 5 6\n Name: B, dtype: int64\n \"\"\"\n if not callable(func):\n raise TypeError(\"%s object is not callable\" % type(func).__name__)\n\n is_series_groupby = isinstance(self, SeriesGroupBy)\n\n psdf = self._psdf\n\n if self._agg_columns_selected:\n agg_columns = self._agg_columns\n else:\n agg_columns = [\n psdf._psser_for(label)\n for label in psdf._internal.column_labels\n if label not in self._column_labels_to_exclude\n ]\n\n data_schema = (\n psdf[agg_columns]._internal.resolved_copy.spark_frame.drop(*HIDDEN_COLUMNS).schema\n )\n\n psdf, groupkey_labels, groupkey_names = GroupBy._prepare_group_map_apply(\n psdf, self._groupkeys, agg_columns\n )\n\n if is_series_groupby:\n\n def pandas_filter(pdf: pd.DataFrame) -> pd.DataFrame:\n return pd.DataFrame(pdf.groupby(groupkey_names)[pdf.columns[-1]].filter(func))\n\n else:\n f = _builtin_table.get(func, func)\n\n def wrapped_func(pdf: pd.DataFrame) -> pd.DataFrame:\n return f(pdf.drop(groupkey_names, axis=1))\n\n def pandas_filter(pdf: pd.DataFrame) -> pd.DataFrame:\n return pdf.groupby(groupkey_names).filter(wrapped_func).drop(groupkey_names, axis=1)\n\n sdf = GroupBy._spark_group_map_apply(\n psdf,\n pandas_filter,\n [psdf._internal.spark_column_for(label) for label in groupkey_labels],\n data_schema,\n retain_index=True,\n )\n\n psdf = DataFrame(self._psdf[agg_columns]._internal.with_new_sdf(sdf))\n if is_series_groupby:\n return cast(FrameLike, first_series(psdf))\n else:\n return cast(FrameLike, psdf)\n\n @staticmethod\n def _prepare_group_map_apply(\n psdf: DataFrame, groupkeys: List[Series], agg_columns: List[Series]\n ) -> Tuple[DataFrame, List[Label], List[str]]:\n groupkey_labels: List[Label] = [\n verify_temp_column_name(psdf, \"__groupkey_{}__\".format(i))\n for i in range(len(groupkeys))\n ]\n psdf = psdf[[s.rename(label) for s, label in zip(groupkeys, groupkey_labels)] + agg_columns]\n groupkey_names = [label if len(label) > 1 else label[0] for label in groupkey_labels]\n return DataFrame(psdf._internal.resolved_copy), groupkey_labels, groupkey_names\n\n @staticmethod\n def _spark_group_map_apply(\n psdf: DataFrame,\n func: Callable[[pd.DataFrame], pd.DataFrame],\n groupkeys_scols: List[Column],\n return_schema: StructType,\n retain_index: bool,\n ) -> SparkDataFrame:\n output_func = GroupBy._make_pandas_df_builder_func(psdf, func, return_schema, retain_index)\n sdf = psdf._internal.spark_frame.drop(*HIDDEN_COLUMNS)\n return sdf.groupby(*groupkeys_scols).applyInPandas(output_func, return_schema)\n\n @staticmethod\n def _make_pandas_df_builder_func(\n psdf: DataFrame,\n func: Callable[[pd.DataFrame], pd.DataFrame],\n return_schema: StructType,\n retain_index: bool,\n ) -> Callable[[pd.DataFrame], pd.DataFrame]:\n \"\"\"\n Creates a function that can be used inside the pandas UDF. This function can construct\n the same pandas DataFrame as if the pandas-on-Spark DataFrame is collected to driver side.\n The index, column labels, etc. are re-constructed within the function.\n \"\"\"\n from pyspark.sql.utils import is_timestamp_ntz_preferred\n\n arguments_for_restore_index = psdf._internal.arguments_for_restore_index\n prefer_timestamp_ntz = is_timestamp_ntz_preferred()\n\n def rename_output(pdf: pd.DataFrame) -> pd.DataFrame:\n pdf = InternalFrame.restore_index(pdf.copy(), **arguments_for_restore_index)\n\n pdf = func(pdf)\n\n # If schema should be inferred, we don't restore index. pandas seems restoring\n # the index in some cases.\n # When Spark output type is specified, without executing it, we don't know\n # if we should restore the index or not. For instance, see the example in\n # https://github.com/pyspark.pandas/issues/628.\n pdf, _, _, _, _ = InternalFrame.prepare_pandas_frame(\n pdf, retain_index=retain_index, prefer_timestamp_ntz=prefer_timestamp_ntz\n )\n\n # Just positionally map the column names to given schema's.\n pdf.columns = return_schema.names\n\n return pdf\n\n return rename_output\n\n def rank(self, method: str = \"average\", ascending: bool = True) -> FrameLike:\n \"\"\"\n Provide the rank of values within each group.\n\n Parameters\n ----------\n method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'\n * average: average rank of group\n * min: lowest rank in group\n * max: highest rank in group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups\n ascending : boolean, default True\n False for ranks by high (1) to low (N)\n\n Returns\n -------\n DataFrame with ranking of values within each group\n\n Examples\n --------\n\n >>> df = ps.DataFrame({\n ... 'a': [1, 1, 1, 2, 2, 2, 3, 3, 3],\n ... 'b': [1, 2, 2, 2, 3, 3, 3, 4, 4]}, columns=['a', 'b'])\n >>> df\n a b\n 0 1 1\n 1 1 2\n 2 1 2\n 3 2 2\n 4 2 3\n 5 2 3\n 6 3 3\n 7 3 4\n 8 3 4\n\n >>> df.groupby(\"a\").rank().sort_index()\n b\n 0 1.0\n 1 2.5\n 2 2.5\n 3 1.0\n 4 2.5\n 5 2.5\n 6 1.0\n 7 2.5\n 8 2.5\n\n >>> df.b.groupby(df.a).rank(method='max').sort_index()\n 0 1.0\n 1 3.0\n 2 3.0\n 3 1.0\n 4 3.0\n 5 3.0\n 6 1.0\n 7 3.0\n 8 3.0\n Name: b, dtype: float64\n\n \"\"\"\n return self._apply_series_op(\n lambda sg: sg._psser._rank(method, ascending, part_cols=sg._groupkeys_scols),\n should_resolve=True,\n )\n\n # TODO: add axis parameter\n def idxmax(self, skipna: bool = True) -> FrameLike:\n \"\"\"\n Return index of first occurrence of maximum over requested axis in group.\n NA/null values are excluded.\n\n Parameters\n ----------\n skipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result will be NA.\n\n See Also\n --------\n Series.idxmax\n DataFrame.idxmax\n pyspark.pandas.Series.groupby\n pyspark.pandas.DataFrame.groupby\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 1, 2, 2, 3],\n ... 'b': [1, 2, 3, 4, 5],\n ... 'c': [5, 4, 3, 2, 1]}, columns=['a', 'b', 'c'])\n\n >>> df.groupby(['a'])['b'].idxmax().sort_index() # doctest: +NORMALIZE_WHITESPACE\n a\n 1 1\n 2 3\n 3 4\n Name: b, dtype: int64\n\n >>> df.groupby(['a']).idxmax().sort_index() # doctest: +NORMALIZE_WHITESPACE\n b c\n a\n 1 1 0\n 2 3 2\n 3 4 4\n \"\"\"\n if self._psdf._internal.index_level != 1:\n raise ValueError(\"idxmax only support one-level index now\")\n\n groupkey_names = [\"__groupkey_{}__\".format(i) for i in range(len(self._groupkeys))]\n\n sdf = self._psdf._internal.spark_frame\n for s, name in zip(self._groupkeys, groupkey_names):\n sdf = sdf.withColumn(name, s.spark.column)\n index = self._psdf._internal.index_spark_column_names[0]\n\n stat_exprs = []\n for psser, scol in zip(self._agg_columns, self._agg_columns_scols):\n name = psser._internal.data_spark_column_names[0]\n\n if skipna:\n order_column = scol.desc_nulls_last()\n else:\n order_column = scol.desc_nulls_first()\n\n window = Window.partitionBy(*groupkey_names).orderBy(\n order_column, NATURAL_ORDER_COLUMN_NAME\n )\n sdf = sdf.withColumn(\n name, F.when(F.row_number().over(window) == 1, scol_for(sdf, index)).otherwise(None)\n )\n stat_exprs.append(F.max(scol_for(sdf, name)).alias(name))\n\n sdf = sdf.groupby(*groupkey_names).agg(*stat_exprs)\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in groupkey_names],\n index_names=[psser._column_label for psser in self._groupkeys],\n index_fields=[\n psser._internal.data_fields[0].copy(name=name)\n for psser, name in zip(self._groupkeys, groupkey_names)\n ],\n column_labels=[psser._column_label for psser in self._agg_columns],\n data_spark_columns=[\n scol_for(sdf, psser._internal.data_spark_column_names[0])\n for psser in self._agg_columns\n ],\n )\n return self._cleanup_and_return(DataFrame(internal))\n\n # TODO: add axis parameter\n def idxmin(self, skipna: bool = True) -> FrameLike:\n \"\"\"\n Return index of first occurrence of minimum over requested axis in group.\n NA/null values are excluded.\n\n Parameters\n ----------\n skipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result will be NA.\n\n See Also\n --------\n Series.idxmin\n DataFrame.idxmin\n pyspark.pandas.Series.groupby\n pyspark.pandas.DataFrame.groupby\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 1, 2, 2, 3],\n ... 'b': [1, 2, 3, 4, 5],\n ... 'c': [5, 4, 3, 2, 1]}, columns=['a', 'b', 'c'])\n\n >>> df.groupby(['a'])['b'].idxmin().sort_index() # doctest: +NORMALIZE_WHITESPACE\n a\n 1 0\n 2 2\n 3 4\n Name: b, dtype: int64\n\n >>> df.groupby(['a']).idxmin().sort_index() # doctest: +NORMALIZE_WHITESPACE\n b c\n a\n 1 0 1\n 2 2 3\n 3 4 4\n \"\"\"\n if self._psdf._internal.index_level != 1:\n raise ValueError(\"idxmin only support one-level index now\")\n\n groupkey_names = [\"__groupkey_{}__\".format(i) for i in range(len(self._groupkeys))]\n\n sdf = self._psdf._internal.spark_frame\n for s, name in zip(self._groupkeys, groupkey_names):\n sdf = sdf.withColumn(name, s.spark.column)\n index = self._psdf._internal.index_spark_column_names[0]\n\n stat_exprs = []\n for psser, scol in zip(self._agg_columns, self._agg_columns_scols):\n name = psser._internal.data_spark_column_names[0]\n\n if skipna:\n order_column = scol.asc_nulls_last()\n else:\n order_column = scol.asc_nulls_first()\n\n window = Window.partitionBy(*groupkey_names).orderBy(\n order_column, NATURAL_ORDER_COLUMN_NAME\n )\n sdf = sdf.withColumn(\n name, F.when(F.row_number().over(window) == 1, scol_for(sdf, index)).otherwise(None)\n )\n stat_exprs.append(F.max(scol_for(sdf, name)).alias(name))\n\n sdf = sdf.groupby(*groupkey_names).agg(*stat_exprs)\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in groupkey_names],\n index_names=[psser._column_label for psser in self._groupkeys],\n index_fields=[\n psser._internal.data_fields[0].copy(name=name)\n for psser, name in zip(self._groupkeys, groupkey_names)\n ],\n column_labels=[psser._column_label for psser in self._agg_columns],\n data_spark_columns=[\n scol_for(sdf, psser._internal.data_spark_column_names[0])\n for psser in self._agg_columns\n ],\n )\n return self._cleanup_and_return(DataFrame(internal))\n\n def fillna(\n self,\n value: Optional[Any] = None,\n method: Optional[str] = None,\n axis: Optional[Axis] = None,\n inplace: bool = False,\n limit: Optional[int] = None,\n ) -> FrameLike:\n \"\"\"Fill NA/NaN values in group.\n\n Parameters\n ----------\n value : scalar, dict, Series\n Value to use to fill holes. alternately a dict/Series of values\n specifying which value to use for each column.\n DataFrame is not supported.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series pad / ffill: propagate last valid\n observation forward to next valid backfill / bfill:\n use NEXT valid observation to fill gap\n axis : {0 or `index`}\n 1 and `columns` are not supported.\n inplace : boolean, default False\n Fill in place (do not create a new object)\n limit : int, default None\n If method is specified, this is the maximum number of consecutive NaN values to\n forward/backward fill. In other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. If method is not specified,\n this is the maximum number of entries along the entire axis where NaNs will be filled.\n Must be greater than 0 if not None\n\n Returns\n -------\n DataFrame\n DataFrame with NA entries filled.\n\n Examples\n --------\n >>> df = ps.DataFrame({\n ... 'A': [1, 1, 2, 2],\n ... 'B': [2, 4, None, 3],\n ... 'C': [None, None, None, 1],\n ... 'D': [0, 1, 5, 4]\n ... },\n ... columns=['A', 'B', 'C', 'D'])\n >>> df\n A B C D\n 0 1 2.0 NaN 0\n 1 1 4.0 NaN 1\n 2 2 NaN NaN 5\n 3 2 3.0 1.0 4\n\n We can also propagate non-null values forward or backward in group.\n\n >>> df.groupby(['A'])['B'].fillna(method='ffill').sort_index()\n 0 2.0\n 1 4.0\n 2 NaN\n 3 3.0\n Name: B, dtype: float64\n\n >>> df.groupby(['A']).fillna(method='bfill').sort_index()\n B C D\n 0 2.0 NaN 0\n 1 4.0 NaN 1\n 2 3.0 1.0 5\n 3 3.0 1.0 4\n \"\"\"\n return self._apply_series_op(\n lambda sg: sg._psser._fillna(\n value=value, method=method, axis=axis, limit=limit, part_cols=sg._groupkeys_scols\n ),\n should_resolve=(method is not None),\n )\n\n def bfill(self, limit: Optional[int] = None) -> FrameLike:\n \"\"\"\n Synonym for `DataFrame.fillna()` with ``method=`bfill```.\n\n Parameters\n ----------\n axis : {0 or `index`}\n 1 and `columns` are not supported.\n inplace : boolean, default False\n Fill in place (do not create a new object)\n limit : int, default None\n If method is specified, this is the maximum number of consecutive NaN values to\n forward/backward fill. In other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. If method is not specified,\n this is the maximum number of entries along the entire axis where NaNs will be filled.\n Must be greater than 0 if not None\n\n Returns\n -------\n DataFrame\n DataFrame with NA entries filled.\n\n Examples\n --------\n >>> df = ps.DataFrame({\n ... 'A': [1, 1, 2, 2],\n ... 'B': [2, 4, None, 3],\n ... 'C': [None, None, None, 1],\n ... 'D': [0, 1, 5, 4]\n ... },\n ... columns=['A', 'B', 'C', 'D'])\n >>> df\n A B C D\n 0 1 2.0 NaN 0\n 1 1 4.0 NaN 1\n 2 2 NaN NaN 5\n 3 2 3.0 1.0 4\n\n Propagate non-null values backward.\n\n >>> df.groupby(['A']).bfill().sort_index()\n B C D\n 0 2.0 NaN 0\n 1 4.0 NaN 1\n 2 3.0 1.0 5\n 3 3.0 1.0 4\n \"\"\"\n return self.fillna(method=\"bfill\", limit=limit)\n\n backfill = bfill\n\n def ffill(self, limit: Optional[int] = None) -> FrameLike:\n \"\"\"\n Synonym for `DataFrame.fillna()` with ``method=`ffill```.\n\n Parameters\n ----------\n axis : {0 or `index`}\n 1 and `columns` are not supported.\n inplace : boolean, default False\n Fill in place (do not create a new object)\n limit : int, default None\n If method is specified, this is the maximum number of consecutive NaN values to\n forward/backward fill. In other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. If method is not specified,\n this is the maximum number of entries along the entire axis where NaNs will be filled.\n Must be greater than 0 if not None\n\n Returns\n -------\n DataFrame\n DataFrame with NA entries filled.\n\n Examples\n --------\n >>> df = ps.DataFrame({\n ... 'A': [1, 1, 2, 2],\n ... 'B': [2, 4, None, 3],\n ... 'C': [None, None, None, 1],\n ... 'D': [0, 1, 5, 4]\n ... },\n ... columns=['A', 'B', 'C', 'D'])\n >>> df\n A B C D\n 0 1 2.0 NaN 0\n 1 1 4.0 NaN 1\n 2 2 NaN NaN 5\n 3 2 3.0 1.0 4\n\n Propagate non-null values forward.\n\n >>> df.groupby(['A']).ffill().sort_index()\n B C D\n 0 2.0 NaN 0\n 1 4.0 NaN 1\n 2 NaN NaN 5\n 3 3.0 1.0 4\n \"\"\"\n return self.fillna(method=\"ffill\", limit=limit)\n\n pad = ffill\n\n def _limit(self, n: int, asc: bool) -> FrameLike:\n \"\"\"\n Private function for tail and head.\n \"\"\"\n psdf = self._psdf\n\n if self._agg_columns_selected:\n agg_columns = self._agg_columns\n else:\n agg_columns = [\n psdf._psser_for(label)\n for label in psdf._internal.column_labels\n if label not in self._column_labels_to_exclude\n ]\n\n psdf, groupkey_labels, _ = GroupBy._prepare_group_map_apply(\n psdf,\n self._groupkeys,\n agg_columns,\n )\n\n groupkey_scols = [psdf._internal.spark_column_for(label) for label in groupkey_labels]\n\n sdf = psdf._internal.spark_frame\n\n window = Window.partitionBy(*groupkey_scols)\n # This part is handled differently depending on whether it is a tail or a head.\n ordered_window = (\n window.orderBy(F.col(NATURAL_ORDER_COLUMN_NAME).asc())\n if asc\n else window.orderBy(F.col(NATURAL_ORDER_COLUMN_NAME).desc())\n )\n\n if n >= 0 or LooseVersion(pd.__version__) < LooseVersion(\"1.4.0\"):\n tmp_row_num_col = verify_temp_column_name(sdf, \"__row_number__\")\n sdf = (\n sdf.withColumn(tmp_row_num_col, F.row_number().over(ordered_window))\n .filter(F.col(tmp_row_num_col) <= n)\n .drop(tmp_row_num_col)\n )\n else:\n # Pandas supports Groupby positional indexing since v1.4.0\n # https://pandas.pydata.org/docs/whatsnew/v1.4.0.html#groupby-positional-indexing\n #\n # To support groupby positional indexing, we need add a `__tmp_lag__` column to help\n # us filtering rows before the specified offset row.\n #\n # For example for the dataframe:\n # >>> df = ps.DataFrame([[\"g\", \"g0\"],\n # ... [\"g\", \"g1\"],\n # ... [\"g\", \"g2\"],\n # ... [\"g\", \"g3\"],\n # ... [\"h\", \"h0\"],\n # ... [\"h\", \"h1\"]], columns=[\"A\", \"B\"])\n # >>> df.groupby(\"A\").head(-1)\n #\n # Below is a result to show the `__tmp_lag__` column for above df, the limit n is\n # `-1`, the `__tmp_lag__` will be set to `0` in rows[:-1], and left will be set to\n # `null`:\n #\n # >>> sdf.withColumn(tmp_lag_col, F.lag(F.lit(0), -1).over(ordered_window))\n # +-----------------+--------------+---+---+-----------------+-----------+\n # |__index_level_0__|__groupkey_0__| A| B|__natural_order__|__tmp_lag__|\n # +-----------------+--------------+---+---+-----------------+-----------+\n # | 0| g| g| g0| 0| 0|\n # | 1| g| g| g1| 8589934592| 0|\n # | 2| g| g| g2| 17179869184| 0|\n # | 3| g| g| g3| 25769803776| null|\n # | 4| h| h| h0| 34359738368| 0|\n # | 5| h| h| h1| 42949672960| null|\n # +-----------------+--------------+---+---+-----------------+-----------+\n #\n tmp_lag_col = verify_temp_column_name(sdf, \"__tmp_lag__\")\n sdf = (\n sdf.withColumn(tmp_lag_col, F.lag(F.lit(0), n).over(ordered_window))\n .where(~F.isnull(F.col(tmp_lag_col)))\n .drop(tmp_lag_col)\n )\n\n internal = psdf._internal.with_new_sdf(sdf)\n return self._cleanup_and_return(DataFrame(internal).drop(groupkey_labels, axis=1))\n\n def head(self, n: int = 5) -> FrameLike:\n \"\"\"\n Return first n rows of each group.\n\n Returns\n -------\n DataFrame or Series\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 1, 1, 1, 2, 2, 2, 3, 3, 3],\n ... 'b': [2, 3, 1, 4, 6, 9, 8, 10, 7, 5],\n ... 'c': [3, 5, 2, 5, 1, 2, 6, 4, 3, 6]},\n ... columns=['a', 'b', 'c'],\n ... index=[7, 2, 4, 1, 3, 4, 9, 10, 5, 6])\n >>> df\n a b c\n 7 1 2 3\n 2 1 3 5\n 4 1 1 2\n 1 1 4 5\n 3 2 6 1\n 4 2 9 2\n 9 2 8 6\n 10 3 10 4\n 5 3 7 3\n 6 3 5 6\n\n >>> df.groupby('a').head(2).sort_index()\n a b c\n 2 1 3 5\n 3 2 6 1\n 4 2 9 2\n 5 3 7 3\n 7 1 2 3\n 10 3 10 4\n\n >>> df.groupby('a')['b'].head(2).sort_index()\n 2 3\n 3 6\n 4 9\n 5 7\n 7 2\n 10 10\n Name: b, dtype: int64\n\n Supports Groupby positional indexing Since pandas on Spark 3.4 (with pandas 1.4+):\n\n >>> df = ps.DataFrame([[\"g\", \"g0\"],\n ... [\"g\", \"g1\"],\n ... [\"g\", \"g2\"],\n ... [\"g\", \"g3\"],\n ... [\"h\", \"h0\"],\n ... [\"h\", \"h1\"]], columns=[\"A\", \"B\"])\n >>> df.groupby(\"A\").head(-1) # doctest: +SKIP\n A B\n 0 g g0\n 1 g g1\n 2 g g2\n 4 h h0\n \"\"\"\n return self._limit(n, asc=True)\n\n def tail(self, n: int = 5) -> FrameLike:\n \"\"\"\n Return last n rows of each group.\n\n Similar to `.apply(lambda x: x.tail(n))`, but it returns a subset of rows from\n the original DataFrame with original index and order preserved (`as_index` flag is ignored).\n\n Does not work for negative values of n.\n\n Returns\n -------\n DataFrame or Series\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 1, 1, 1, 2, 2, 2, 3, 3, 3],\n ... 'b': [2, 3, 1, 4, 6, 9, 8, 10, 7, 5],\n ... 'c': [3, 5, 2, 5, 1, 2, 6, 4, 3, 6]},\n ... columns=['a', 'b', 'c'],\n ... index=[7, 2, 3, 1, 3, 4, 9, 10, 5, 6])\n >>> df\n a b c\n 7 1 2 3\n 2 1 3 5\n 3 1 1 2\n 1 1 4 5\n 3 2 6 1\n 4 2 9 2\n 9 2 8 6\n 10 3 10 4\n 5 3 7 3\n 6 3 5 6\n\n >>> df.groupby('a').tail(2).sort_index()\n a b c\n 1 1 4 5\n 3 1 1 2\n 4 2 9 2\n 5 3 7 3\n 6 3 5 6\n 9 2 8 6\n\n >>> df.groupby('a')['b'].tail(2).sort_index()\n 1 4\n 3 1\n 4 9\n 5 7\n 6 5\n 9 8\n Name: b, dtype: int64\n\n Supports Groupby positional indexing Since pandas on Spark 3.4 (with pandas 1.4+):\n\n >>> df = ps.DataFrame([[\"g\", \"g0\"],\n ... [\"g\", \"g1\"],\n ... [\"g\", \"g2\"],\n ... [\"g\", \"g3\"],\n ... [\"h\", \"h0\"],\n ... [\"h\", \"h1\"]], columns=[\"A\", \"B\"])\n >>> df.groupby(\"A\").tail(-1) # doctest: +SKIP\n A B\n 3 g g3\n 2 g g2\n 1 g g1\n 5 h h1\n \"\"\"\n return self._limit(n, asc=False)\n\n def shift(self, periods: int = 1, fill_value: Optional[Any] = None) -> FrameLike:\n \"\"\"\n Shift each group by periods observations.\n\n Parameters\n ----------\n periods : integer, default 1\n number of periods to shift\n fill_value : optional\n\n Returns\n -------\n Series or DataFrame\n Object shifted within each group.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({\n ... 'a': [1, 1, 1, 2, 2, 2, 3, 3, 3],\n ... 'b': [1, 2, 2, 2, 3, 3, 3, 4, 4]}, columns=['a', 'b'])\n >>> df\n a b\n 0 1 1\n 1 1 2\n 2 1 2\n 3 2 2\n 4 2 3\n 5 2 3\n 6 3 3\n 7 3 4\n 8 3 4\n\n >>> df.groupby('a').shift().sort_index() # doctest: +SKIP\n b\n 0 NaN\n 1 1.0\n 2 2.0\n 3 NaN\n 4 2.0\n 5 3.0\n 6 NaN\n 7 3.0\n 8 4.0\n\n >>> df.groupby('a').shift(periods=-1, fill_value=0).sort_index() # doctest: +SKIP\n b\n 0 2\n 1 2\n 2 0\n 3 3\n 4 3\n 5 0\n 6 4\n 7 4\n 8 0\n \"\"\"\n return self._apply_series_op(\n lambda sg: sg._psser._shift(periods, fill_value, part_cols=sg._groupkeys_scols),\n should_resolve=True,\n )\n\n def transform(self, func: Callable[..., pd.Series], *args: Any, **kwargs: Any) -> FrameLike:\n \"\"\"\n Apply function column-by-column to the GroupBy object.\n\n The function passed to `transform` must take a Series as its first\n argument and return a Series. The given function is executed for\n each series in each grouped data.\n\n While `transform` is a very flexible method, its downside is that\n using it can be quite a bit slower than using more specific methods\n like `agg` or `transform`. pandas-on-Spark offers a wide range of method that will\n be much faster than using `transform` for their specific purposes, so try to\n use them before reaching for `transform`.\n\n .. note:: this API executes the function once to infer the type which is\n potentially expensive, for instance, when the dataset is created after\n aggregations or sorting.\n\n To avoid this, specify return type in ``func``, for instance, as below:\n\n >>> def convert_to_string(x) -> ps.Series[str]:\n ... return x.apply(\"a string {}\".format)\n\n When the given function has the return type annotated, the original index of the\n GroupBy object will be lost and a default index will be attached to the result.\n Please be careful about configuring the default index. See also `Default Index Type\n <https://koalas.readthedocs.io/en/latest/user_guide/options.html#default-index-type>`_.\n\n .. note:: the series within ``func`` is actually a pandas series. Therefore,\n any pandas API within this function is allowed.\n\n\n Parameters\n ----------\n func : callable\n A callable that takes a Series as its first argument, and\n returns a Series.\n *args\n Positional arguments to pass to func.\n **kwargs\n Keyword arguments to pass to func.\n\n Returns\n -------\n applied : DataFrame\n\n See Also\n --------\n aggregate : Apply aggregate function to the GroupBy object.\n Series.apply : Apply a function to a Series.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'A': [0, 0, 1],\n ... 'B': [1, 2, 3],\n ... 'C': [4, 6, 5]}, columns=['A', 'B', 'C'])\n\n >>> g = df.groupby('A')\n\n Notice that ``g`` has two groups, ``0`` and ``1``.\n Calling `transform` in various ways, we can get different grouping results:\n Below the functions passed to `transform` takes a Series as\n its argument and returns a Series. `transform` applies the function on each series\n in each grouped data, and combine them into a new DataFrame:\n\n >>> def convert_to_string(x) -> ps.Series[str]:\n ... return x.apply(\"a string {}\".format)\n >>> g.transform(convert_to_string) # doctest: +NORMALIZE_WHITESPACE\n B C\n 0 a string 1 a string 4\n 1 a string 2 a string 6\n 2 a string 3 a string 5\n\n >>> def plus_max(x) -> ps.Series[np.int]:\n ... return x + x.max()\n >>> g.transform(plus_max) # doctest: +NORMALIZE_WHITESPACE\n B C\n 0 3 10\n 1 4 12\n 2 6 10\n\n You can omit the type hint and let pandas-on-Spark infer its type.\n\n >>> def plus_min(x):\n ... return x + x.min()\n >>> g.transform(plus_min) # doctest: +NORMALIZE_WHITESPACE\n B C\n 0 2 8\n 1 3 10\n 2 6 10\n\n In case of Series, it works as below.\n\n >>> df.B.groupby(df.A).transform(plus_max)\n 0 3\n 1 4\n 2 6\n Name: B, dtype: int64\n\n >>> (df * -1).B.groupby(df.A).transform(abs)\n 0 1\n 1 2\n 2 3\n Name: B, dtype: int64\n\n You can also specify extra arguments to pass to the function.\n\n >>> def calculation(x, y, z) -> ps.Series[np.int]:\n ... return x + x.min() + y + z\n >>> g.transform(calculation, 5, z=20) # doctest: +NORMALIZE_WHITESPACE\n B C\n 0 27 33\n 1 28 35\n 2 31 35\n \"\"\"\n if not callable(func):\n raise TypeError(\"%s object is not callable\" % type(func).__name__)\n\n spec = inspect.getfullargspec(func)\n return_sig = spec.annotations.get(\"return\", None)\n\n psdf, groupkey_labels, groupkey_names = GroupBy._prepare_group_map_apply(\n self._psdf, self._groupkeys, agg_columns=self._agg_columns\n )\n\n def pandas_transform(pdf: pd.DataFrame) -> pd.DataFrame:\n return pdf.groupby(groupkey_names).transform(func, *args, **kwargs)\n\n should_infer_schema = return_sig is None\n\n if should_infer_schema:\n # Here we execute with the first 1000 to get the return type.\n # If the records were less than 1000, it uses pandas API directly for a shortcut.\n log_advice(\n \"If the type hints is not specified for `grouby.transform`, \"\n \"it is expensive to infer the data type internally.\"\n )\n limit = get_option(\"compute.shortcut_limit\")\n pdf = psdf.head(limit + 1)._to_internal_pandas()\n pdf = pdf.groupby(groupkey_names).transform(func, *args, **kwargs)\n psdf_from_pandas: DataFrame = DataFrame(pdf)\n return_schema = force_decimal_precision_scale(\n as_nullable_spark_type(\n psdf_from_pandas._internal.spark_frame.drop(*HIDDEN_COLUMNS).schema\n )\n )\n if len(pdf) <= limit:\n return self._cleanup_and_return(psdf_from_pandas)\n\n sdf = GroupBy._spark_group_map_apply(\n psdf,\n pandas_transform,\n [psdf._internal.spark_column_for(label) for label in groupkey_labels],\n return_schema,\n retain_index=True,\n )\n # If schema is inferred, we can restore indexes too.\n internal = psdf_from_pandas._internal.with_new_sdf(\n sdf,\n index_fields=[\n field.copy(nullable=True) for field in psdf_from_pandas._internal.index_fields\n ],\n data_fields=[\n field.copy(nullable=True) for field in psdf_from_pandas._internal.data_fields\n ],\n )\n else:\n return_type = infer_return_type(func)\n if not isinstance(return_type, SeriesType):\n raise TypeError(\n \"Expected the return type of this function to be of Series type, \"\n \"but found type {}\".format(return_type)\n )\n\n dtype = return_type.dtype\n spark_type = return_type.spark_type\n\n data_fields = [\n InternalField(dtype=dtype, struct_field=StructField(name=c, dataType=spark_type))\n for c in psdf._internal.data_spark_column_names\n if c not in groupkey_names\n ]\n\n return_schema = StructType([field.struct_field for field in data_fields])\n\n sdf = GroupBy._spark_group_map_apply(\n psdf,\n pandas_transform,\n [psdf._internal.spark_column_for(label) for label in groupkey_labels],\n return_schema,\n retain_index=False,\n )\n # Otherwise, it loses index.\n internal = InternalFrame(\n spark_frame=sdf, index_spark_columns=None, data_fields=data_fields\n )\n\n return self._cleanup_and_return(DataFrame(internal))\n\n def nunique(self, dropna: bool = True) -> FrameLike:\n \"\"\"\n Return DataFrame with number of distinct observations per group for each column.\n\n Parameters\n ----------\n dropna : boolean, default True\n Don’t include NaN in the counts.\n\n Returns\n -------\n nunique : DataFrame or Series\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',\n ... 'ham', 'ham'],\n ... 'value1': [1, 5, 5, 2, 5, 5],\n ... 'value2': list('abbaxy')}, columns=['id', 'value1', 'value2'])\n >>> df\n id value1 value2\n 0 spam 1 a\n 1 egg 5 b\n 2 egg 5 b\n 3 spam 2 a\n 4 ham 5 x\n 5 ham 5 y\n\n >>> df.groupby('id').nunique().sort_index() # doctest: +SKIP\n value1 value2\n id\n egg 1 1\n ham 1 2\n spam 2 1\n\n >>> df.groupby('id')['value1'].nunique().sort_index() # doctest: +NORMALIZE_WHITESPACE\n id\n egg 1\n ham 1\n spam 2\n Name: value1, dtype: int64\n \"\"\"\n if dropna:\n\n def stat_function(col: Column) -> Column:\n return F.countDistinct(col)\n\n else:\n\n def stat_function(col: Column) -> Column:\n return F.countDistinct(col) + F.when(\n F.count(F.when(col.isNull(), 1).otherwise(None)) >= 1, 1\n ).otherwise(0)\n\n return self._reduce_for_stat_function(stat_function)\n\n def rolling(\n self, window: int, min_periods: Optional[int] = None\n ) -> \"RollingGroupby[FrameLike]\":\n \"\"\"\n Return an rolling grouper, providing rolling\n functionality per group.\n\n .. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.\n Unlike pandas, NA is also counted as the period. This might be changed\n in the near future.\n\n Parameters\n ----------\n window : int, or offset\n Size of the moving window.\n This is the number of observations used for calculating the statistic.\n Each window will be a fixed size.\n\n min_periods : int, default 1\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n\n See Also\n --------\n Series.groupby\n DataFrame.groupby\n \"\"\"\n from pyspark.pandas.window import RollingGroupby\n\n return RollingGroupby(self, window, min_periods=min_periods)\n\n def expanding(self, min_periods: int = 1) -> \"ExpandingGroupby[FrameLike]\":\n \"\"\"\n Return an expanding grouper, providing expanding\n functionality per group.\n\n .. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.\n Unlike pandas, NA is also counted as the period. This might be changed\n in the near future.\n\n Parameters\n ----------\n min_periods : int, default 1\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n\n See Also\n --------\n Series.groupby\n DataFrame.groupby\n \"\"\"\n from pyspark.pandas.window import ExpandingGroupby\n\n return ExpandingGroupby(self, min_periods=min_periods)\n\n # TODO: 'adjust', 'axis', 'method' parameter should be implemented.\n def ewm(\n self,\n com: Optional[float] = None,\n span: Optional[float] = None,\n halflife: Optional[float] = None,\n alpha: Optional[float] = None,\n min_periods: Optional[int] = None,\n ignore_na: bool = False,\n ) -> \"ExponentialMovingGroupby[FrameLike]\":\n \"\"\"\n Return an ewm grouper, providing ewm functionality per group.\n\n .. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.\n Unlike pandas, NA is also counted as the period. This might be changed\n in the near future.\n\n .. versionadded:: 3.4.0\n\n Parameters\n ----------\n com : float, optional\n Specify decay in terms of center of mass.\n alpha = 1 / (1 + com), for com >= 0.\n\n span : float, optional\n Specify decay in terms of span.\n alpha = 2 / (span + 1), for span >= 1.\n\n halflife : float, optional\n Specify decay in terms of half-life.\n alpha = 1 - exp(-ln(2) / halflife), for halflife > 0.\n\n alpha : float, optional\n Specify smoothing factor alpha directly.\n 0 < alpha <= 1.\n\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n\n ignore_na : bool, default False\n Ignore missing values when calculating weights.\n\n - When ``ignore_na=False`` (default), weights are based on absolute positions.\n For example, the weights of :math:`x_0` and :math:`x_2` used in calculating\n the final weighted average of [:math:`x_0`, None, :math:`x_2`] are\n :math:`(1-\\alpha)^2` and :math:`1` if ``adjust=True``, and\n :math:`(1-\\alpha)^2` and :math:`\\alpha` if ``adjust=False``.\n\n - When ``ignore_na=True``, weights are based\n on relative positions. For example, the weights of :math:`x_0` and :math:`x_2`\n used in calculating the final weighted average of\n [:math:`x_0`, None, :math:`x_2`] are :math:`1-\\alpha` and :math:`1` if\n ``adjust=True``, and :math:`1-\\alpha` and :math:`\\alpha` if ``adjust=False``.\n \"\"\"\n from pyspark.pandas.window import ExponentialMovingGroupby\n\n return ExponentialMovingGroupby(\n self,\n com=com,\n span=span,\n halflife=halflife,\n alpha=alpha,\n min_periods=min_periods,\n ignore_na=ignore_na,\n )\n\n def get_group(self, name: Union[Name, List[Name]]) -> FrameLike:\n \"\"\"\n Construct DataFrame from group with provided name.\n\n Parameters\n ----------\n name : object\n The name of the group to get as a DataFrame.\n\n Returns\n -------\n group : same type as obj\n\n Examples\n --------\n >>> psdf = ps.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=['name', 'class', 'max_speed'],\n ... index=[0, 2, 3, 1])\n >>> psdf\n name class max_speed\n 0 falcon bird 389.0\n 2 parrot bird 24.0\n 3 lion mammal 80.5\n 1 monkey mammal NaN\n\n >>> psdf.groupby(\"class\").get_group(\"bird\").sort_index()\n name class max_speed\n 0 falcon bird 389.0\n 2 parrot bird 24.0\n\n >>> psdf.groupby(\"class\").get_group(\"mammal\").sort_index()\n name class max_speed\n 1 monkey mammal NaN\n 3 lion mammal 80.5\n \"\"\"\n groupkeys = self._groupkeys\n if not is_hashable(name):\n raise TypeError(\"unhashable type: '{}'\".format(type(name).__name__))\n elif len(groupkeys) > 1:\n if not isinstance(name, tuple):\n raise ValueError(\"must supply a tuple to get_group with multiple grouping keys\")\n if len(groupkeys) != len(name):\n raise ValueError(\n \"must supply a same-length tuple to get_group with multiple grouping keys\"\n )\n if not is_list_like(name):\n name = [name]\n cond = SF.lit(True)\n for groupkey, item in zip(groupkeys, name):\n scol = groupkey.spark.column\n cond = cond & (scol == item)\n if self._agg_columns_selected:\n internal = self._psdf._internal\n spark_frame = internal.spark_frame.select(\n internal.index_spark_columns + self._agg_columns_scols\n ).filter(cond)\n\n internal = internal.copy(\n spark_frame=spark_frame,\n index_spark_columns=[\n scol_for(spark_frame, col) for col in internal.index_spark_column_names\n ],\n column_labels=[s._column_label for s in self._agg_columns],\n data_spark_columns=[\n scol_for(spark_frame, s._internal.data_spark_column_names[0])\n for s in self._agg_columns\n ],\n data_fields=[s._internal.data_fields[0] for s in self._agg_columns],\n )\n else:\n internal = self._psdf._internal.with_filter(cond)\n if internal.spark_frame.head() is None:\n raise KeyError(name)\n\n return self._cleanup_and_return(DataFrame(internal))\n\n def median(self, numeric_only: Optional[bool] = True, accuracy: int = 10000) -> FrameLike:\n \"\"\"\n Compute median of groups, excluding missing values.\n\n For multiple groupings, the result index will be a MultiIndex\n\n .. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon\n approximate percentile computation because computing median across a large dataset\n is extremely expensive.\n\n Parameters\n ----------\n numeric_only : bool, default False\n Include only float, int, boolean columns. If None, will attempt to use\n everything, then use only numeric data.\n\n .. versionadded:: 3.4.0\n\n Returns\n -------\n Series or DataFrame\n Median of values within each group.\n\n Examples\n --------\n >>> psdf = ps.DataFrame({'a': [1., 1., 1., 1., 2., 2., 2., 3., 3., 3.],\n ... 'b': [2., 3., 1., 4., 6., 9., 8., 10., 7., 5.],\n ... 'c': [3., 5., 2., 5., 1., 2., 6., 4., 3., 6.]},\n ... columns=['a', 'b', 'c'],\n ... index=[7, 2, 4, 1, 3, 4, 9, 10, 5, 6])\n >>> psdf\n a b c\n 7 1.0 2.0 3.0\n 2 1.0 3.0 5.0\n 4 1.0 1.0 2.0\n 1 1.0 4.0 5.0\n 3 2.0 6.0 1.0\n 4 2.0 9.0 2.0\n 9 2.0 8.0 6.0\n 10 3.0 10.0 4.0\n 5 3.0 7.0 3.0\n 6 3.0 5.0 6.0\n\n DataFrameGroupBy\n\n >>> psdf.groupby('a').median().sort_index() # doctest: +NORMALIZE_WHITESPACE\n b c\n a\n 1.0 2.0 3.0\n 2.0 8.0 2.0\n 3.0 7.0 4.0\n\n SeriesGroupBy\n\n >>> psdf.groupby('a')['b'].median().sort_index()\n a\n 1.0 2.0\n 2.0 8.0\n 3.0 7.0\n Name: b, dtype: float64\n \"\"\"\n if not isinstance(accuracy, int):\n raise TypeError(\n \"accuracy must be an integer; however, got [%s]\" % type(accuracy).__name__\n )\n\n self._validate_agg_columns(numeric_only=numeric_only, function_name=\"median\")\n\n def stat_function(col: Column) -> Column:\n return F.percentile_approx(col, 0.5, accuracy)\n\n return self._reduce_for_stat_function(\n stat_function,\n accepted_spark_types=(NumericType,),\n bool_to_numeric=True,\n )\n\n def _validate_agg_columns(self, numeric_only: Optional[bool], function_name: str) -> None:\n \"\"\"Validate aggregation columns and raise an error or a warning following pandas.\"\"\"\n has_non_numeric = False\n for _agg_col in self._agg_columns:\n if not isinstance(_agg_col.spark.data_type, (NumericType, BooleanType)):\n has_non_numeric = True\n break\n if has_non_numeric:\n if isinstance(self, SeriesGroupBy):\n raise TypeError(\"Only numeric aggregation column is accepted.\")\n\n if not numeric_only:\n if has_non_numeric:\n warnings.warn(\n \"Dropping invalid columns in DataFrameGroupBy.mean is deprecated. \"\n \"In a future version, a TypeError will be raised. \"\n \"Before calling .%s, select only columns which should be \"\n \"valid for the function.\" % function_name,\n FutureWarning,\n )\n\n def _reduce_for_stat_function(\n self,\n sfun: Callable[[Column], Column],\n accepted_spark_types: Optional[Tuple[Type[DataType], ...]] = None,\n bool_to_numeric: bool = False,\n ) -> FrameLike:\n \"\"\"Apply an aggregate function `sfun` per column and reduce to a FrameLike.\n\n Parameters\n ----------\n sfun : The aggregate function to apply per column.\n accepted_spark_types: Accepted spark types of columns to be aggregated;\n default None means all spark types are accepted.\n bool_to_numeric: If True, boolean columns are converted to numeric columns, which\n are accepted for all statistical functions regardless of\n `accepted_spark_types`.\n \"\"\"\n groupkey_names = [SPARK_INDEX_NAME_FORMAT(i) for i in range(len(self._groupkeys))]\n groupkey_scols = [s.alias(name) for s, name in zip(self._groupkeys_scols, groupkey_names)]\n\n agg_columns = []\n for psser in self._agg_columns:\n if bool_to_numeric and isinstance(psser.spark.data_type, BooleanType):\n agg_columns.append(psser.astype(int))\n elif (accepted_spark_types is None) or isinstance(\n psser.spark.data_type, accepted_spark_types\n ):\n agg_columns.append(psser)\n\n sdf = self._psdf._internal.spark_frame.select(\n *groupkey_scols, *[psser.spark.column for psser in agg_columns]\n )\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in groupkey_names],\n index_names=[psser._column_label for psser in self._groupkeys],\n index_fields=[\n psser._internal.data_fields[0].copy(name=name)\n for psser, name in zip(self._groupkeys, groupkey_names)\n ],\n data_spark_columns=[\n scol_for(sdf, psser._internal.data_spark_column_names[0]) for psser in agg_columns\n ],\n column_labels=[psser._column_label for psser in agg_columns],\n data_fields=[psser._internal.data_fields[0] for psser in agg_columns],\n column_label_names=self._psdf._internal.column_label_names,\n )\n psdf: DataFrame = DataFrame(internal)\n\n if len(psdf._internal.column_labels) > 0:\n stat_exprs = []\n for label in psdf._internal.column_labels:\n psser = psdf._psser_for(label)\n stat_exprs.append(\n sfun(psser._dtype_op.nan_to_null(psser).spark.column).alias(\n psser._internal.data_spark_column_names[0]\n )\n )\n sdf = sdf.groupby(*groupkey_names).agg(*stat_exprs)\n else:\n sdf = sdf.select(*groupkey_names).distinct()\n\n internal = internal.copy(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in groupkey_names],\n data_spark_columns=[scol_for(sdf, col) for col in internal.data_spark_column_names],\n data_fields=None,\n )\n psdf = DataFrame(internal)\n\n if self._dropna:\n psdf = DataFrame(\n psdf._internal.with_new_sdf(\n psdf._internal.spark_frame.dropna(\n subset=psdf._internal.index_spark_column_names\n )\n )\n )\n\n if not self._as_index:\n should_drop_index = set(\n i for i, gkey in enumerate(self._groupkeys) if gkey._psdf is not self._psdf\n )\n if len(should_drop_index) > 0:\n psdf = psdf.reset_index(level=should_drop_index, drop=True)\n if len(should_drop_index) < len(self._groupkeys):\n psdf = psdf.reset_index()\n return self._cleanup_and_return(psdf)\n\n @staticmethod\n def _resolve_grouping_from_diff_dataframes(\n psdf: DataFrame, by: List[Union[Series, Label]]\n ) -> Tuple[DataFrame, List[Series], Set[Label]]:\n column_labels_level = psdf._internal.column_labels_level\n\n column_labels = []\n additional_pssers = []\n additional_column_labels = []\n tmp_column_labels = set()\n for i, col_or_s in enumerate(by):\n if isinstance(col_or_s, Series):\n if col_or_s._psdf is psdf:\n column_labels.append(col_or_s._column_label)\n elif same_anchor(col_or_s, psdf):\n temp_label = verify_temp_column_name(psdf, \"__tmp_groupkey_{}__\".format(i))\n column_labels.append(temp_label)\n additional_pssers.append(col_or_s.rename(temp_label))\n additional_column_labels.append(temp_label)\n else:\n temp_label = verify_temp_column_name(\n psdf,\n tuple(\n ([\"\"] * (column_labels_level - 1)) + [\"__tmp_groupkey_{}__\".format(i)]\n ),\n )\n column_labels.append(temp_label)\n tmp_column_labels.add(temp_label)\n elif isinstance(col_or_s, tuple):\n psser = psdf[col_or_s]\n if not isinstance(psser, Series):\n raise ValueError(name_like_string(col_or_s))\n column_labels.append(col_or_s)\n else:\n raise ValueError(col_or_s)\n\n psdf = DataFrame(\n psdf._internal.with_new_columns(\n [psdf._psser_for(label) for label in psdf._internal.column_labels]\n + additional_pssers\n )\n )\n\n def assign_columns(\n psdf: DataFrame, this_column_labels: List[Label], that_column_labels: List[Label]\n ) -> Iterator[Tuple[Series, Label]]:\n raise NotImplementedError(\n \"Duplicated labels with groupby() and \"\n \"'compute.ops_on_diff_frames' option are not supported currently \"\n \"Please use unique labels in series and frames.\"\n )\n\n for col_or_s, label in zip(by, column_labels):\n if label in tmp_column_labels:\n psser = col_or_s\n psdf = align_diff_frames(\n assign_columns,\n psdf,\n psser.rename(label),\n fillna=False,\n how=\"inner\",\n preserve_order_column=True,\n )\n\n tmp_column_labels |= set(additional_column_labels)\n\n new_by_series = []\n for col_or_s, label in zip(by, column_labels):\n if label in tmp_column_labels:\n psser = col_or_s\n new_by_series.append(psdf._psser_for(label).rename(psser.name))\n else:\n new_by_series.append(psdf._psser_for(label))\n\n return psdf, new_by_series, tmp_column_labels\n\n @staticmethod\n def _resolve_grouping(psdf: DataFrame, by: List[Union[Series, Label]]) -> List[Series]:\n new_by_series = []\n for col_or_s in by:\n if isinstance(col_or_s, Series):\n new_by_series.append(col_or_s)\n elif isinstance(col_or_s, tuple):\n psser = psdf[col_or_s]\n if not isinstance(psser, Series):\n raise ValueError(name_like_string(col_or_s))\n new_by_series.append(psser)\n else:\n raise ValueError(col_or_s)\n return new_by_series\n\n\nclass DataFrameGroupBy(GroupBy[DataFrame]):\n @staticmethod\n def _build(\n psdf: DataFrame, by: List[Union[Series, Label]], as_index: bool, dropna: bool\n ) -> \"DataFrameGroupBy\":\n if any(isinstance(col_or_s, Series) and not same_anchor(psdf, col_or_s) for col_or_s in by):\n (\n psdf,\n new_by_series,\n column_labels_to_exclude,\n ) = GroupBy._resolve_grouping_from_diff_dataframes(psdf, by)\n else:\n new_by_series = GroupBy._resolve_grouping(psdf, by)\n column_labels_to_exclude = set()\n return DataFrameGroupBy(\n psdf,\n new_by_series,\n as_index=as_index,\n dropna=dropna,\n column_labels_to_exclude=column_labels_to_exclude,\n )\n\n def __init__(\n self,\n psdf: DataFrame,\n by: List[Series],\n as_index: bool,\n dropna: bool,\n column_labels_to_exclude: Set[Label],\n agg_columns: List[Label] = None,\n ):\n agg_columns_selected = agg_columns is not None\n if agg_columns_selected:\n for label in agg_columns:\n if label in column_labels_to_exclude:\n raise KeyError(label)\n else:\n agg_columns = [\n label\n for label in psdf._internal.column_labels\n if not any(label == key._column_label and key._psdf is psdf for key in by)\n and label not in column_labels_to_exclude\n ]\n\n super().__init__(\n psdf=psdf,\n groupkeys=by,\n as_index=as_index,\n dropna=dropna,\n column_labels_to_exclude=column_labels_to_exclude,\n agg_columns_selected=agg_columns_selected,\n agg_columns=[psdf[label] for label in agg_columns],\n )\n\n def __getattr__(self, item: str) -> Any:\n if hasattr(MissingPandasLikeDataFrameGroupBy, item):\n property_or_func = getattr(MissingPandasLikeDataFrameGroupBy, item)\n if isinstance(property_or_func, property):\n return property_or_func.fget(self)\n else:\n return partial(property_or_func, self)\n return self.__getitem__(item)\n\n def __getitem__(self, item: Any) -> GroupBy:\n if self._as_index and is_name_like_value(item):\n return SeriesGroupBy(\n self._psdf._psser_for(item if is_name_like_tuple(item) else (item,)),\n self._groupkeys,\n dropna=self._dropna,\n )\n else:\n if is_name_like_tuple(item):\n item = [item]\n elif is_name_like_value(item):\n item = [(item,)]\n else:\n item = [i if is_name_like_tuple(i) else (i,) for i in item]\n if not self._as_index:\n groupkey_names = set(key._column_label for key in self._groupkeys)\n for name in item:\n if name in groupkey_names:\n raise ValueError(\n \"cannot insert {}, already exists\".format(name_like_string(name))\n )\n return DataFrameGroupBy(\n self._psdf,\n self._groupkeys,\n as_index=self._as_index,\n dropna=self._dropna,\n column_labels_to_exclude=self._column_labels_to_exclude,\n agg_columns=item,\n )\n\n def _apply_series_op(\n self,\n op: Callable[[\"SeriesGroupBy\"], Series],\n should_resolve: bool = False,\n numeric_only: bool = False,\n ) -> DataFrame:\n applied = []\n for column in self._agg_columns:\n applied.append(op(column.groupby(self._groupkeys)))\n if numeric_only:\n applied = [col for col in applied if isinstance(col.spark.data_type, NumericType)]\n if not applied:\n raise DataError(\"No numeric types to aggregate\")\n internal = self._psdf._internal.with_new_columns(applied, keep_order=False)\n if should_resolve:\n internal = internal.resolved_copy\n return DataFrame(internal)\n\n def _cleanup_and_return(self, psdf: DataFrame) -> DataFrame:\n return psdf\n\n # TODO: Implement 'percentiles', 'include', and 'exclude' arguments.\n # TODO: Add ``DataFrame.select_dtypes`` to See Also when 'include'\n # and 'exclude' arguments are implemented.\n def describe(self) -> DataFrame:\n \"\"\"\n Generate descriptive statistics that summarize the central tendency,\n dispersion and shape of a dataset's distribution, excluding\n ``NaN`` values.\n\n Analyzes both numeric and object series, as well\n as ``DataFrame`` column sets of mixed data types. The output\n will vary depending on what is provided. Refer to the notes\n below for more detail.\n\n .. note:: Unlike pandas, the percentiles in pandas-on-Spark are based upon\n approximate percentile computation because computing percentiles\n across a large dataset is extremely expensive.\n\n Returns\n -------\n DataFrame\n Summary statistics of the DataFrame provided.\n\n See Also\n --------\n DataFrame.count\n DataFrame.max\n DataFrame.min\n DataFrame.mean\n DataFrame.std\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 1, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})\n >>> df\n a b c\n 0 1 4 7\n 1 1 5 8\n 2 3 6 9\n\n Describing a ``DataFrame``. By default only numeric fields\n are returned.\n\n >>> described = df.groupby('a').describe()\n >>> described.sort_index() # doctest: +NORMALIZE_WHITESPACE\n b c\n count mean std min 25% 50% 75% max count mean std min 25% 50% 75% max\n a\n 1 2.0 4.5 0.707107 4.0 4.0 4.0 5.0 5.0 2.0 7.5 0.707107 7.0 7.0 7.0 8.0 8.0\n 3 1.0 6.0 NaN 6.0 6.0 6.0 6.0 6.0 1.0 9.0 NaN 9.0 9.0 9.0 9.0 9.0\n\n \"\"\"\n for col in self._agg_columns:\n if isinstance(col.spark.data_type, StringType):\n raise NotImplementedError(\n \"DataFrameGroupBy.describe() doesn't support for string type for now\"\n )\n\n psdf = self.aggregate([\"count\", \"mean\", \"std\", \"min\", \"quartiles\", \"max\"])\n sdf = psdf._internal.spark_frame\n agg_column_labels = [col._column_label for col in self._agg_columns]\n formatted_percentiles = [\"25%\", \"50%\", \"75%\"]\n\n # Split \"quartiles\" columns into first, second, and third quartiles.\n for label in agg_column_labels:\n quartiles_col = name_like_string(tuple(list(label) + [\"quartiles\"]))\n for i, percentile in enumerate(formatted_percentiles):\n sdf = sdf.withColumn(\n name_like_string(tuple(list(label) + [percentile])),\n scol_for(sdf, quartiles_col)[i],\n )\n sdf = sdf.drop(quartiles_col)\n\n # Reorder columns lexicographically by agg column followed by stats.\n stats = [\"count\", \"mean\", \"std\", \"min\"] + formatted_percentiles + [\"max\"]\n column_labels = [tuple(list(label) + [s]) for label, s in product(agg_column_labels, stats)]\n data_columns = map(name_like_string, column_labels)\n\n # Reindex the DataFrame to reflect initial grouping and agg columns.\n internal = psdf._internal.copy(\n spark_frame=sdf,\n column_labels=column_labels,\n data_spark_columns=[scol_for(sdf, col) for col in data_columns],\n data_fields=None,\n )\n\n # Cast columns to ``\"float64\"`` to match `pandas.DataFrame.groupby`.\n return DataFrame(internal).astype(\"float64\")\n\n\nclass SeriesGroupBy(GroupBy[Series]):\n @staticmethod\n def _build(\n psser: Series, by: List[Union[Series, Label]], as_index: bool, dropna: bool\n ) -> \"SeriesGroupBy\":\n if any(\n isinstance(col_or_s, Series) and not same_anchor(psser, col_or_s) for col_or_s in by\n ):\n psdf, new_by_series, _ = GroupBy._resolve_grouping_from_diff_dataframes(\n psser.to_frame(), by\n )\n return SeriesGroupBy(\n first_series(psdf).rename(psser.name),\n new_by_series,\n as_index=as_index,\n dropna=dropna,\n )\n else:\n new_by_series = GroupBy._resolve_grouping(psser._psdf, by)\n return SeriesGroupBy(psser, new_by_series, as_index=as_index, dropna=dropna)\n\n def __init__(self, psser: Series, by: List[Series], as_index: bool = True, dropna: bool = True):\n if not as_index:\n raise TypeError(\"as_index=False only valid with DataFrame\")\n super().__init__(\n psdf=psser._psdf,\n groupkeys=by,\n as_index=True,\n dropna=dropna,\n column_labels_to_exclude=set(),\n agg_columns_selected=True,\n agg_columns=[psser],\n )\n self._psser = psser\n\n def __getattr__(self, item: str) -> Any:\n if hasattr(MissingPandasLikeSeriesGroupBy, item):\n property_or_func = getattr(MissingPandasLikeSeriesGroupBy, item)\n if isinstance(property_or_func, property):\n return property_or_func.fget(self)\n else:\n return partial(property_or_func, self)\n raise AttributeError(item)\n\n def _apply_series_op(\n self,\n op: Callable[[\"SeriesGroupBy\"], Series],\n should_resolve: bool = False,\n numeric_only: bool = False,\n ) -> Series:\n if numeric_only and not isinstance(self._agg_columns[0].spark.data_type, NumericType):\n raise DataError(\"No numeric types to aggregate\")\n psser = op(self)\n if should_resolve:\n internal = psser._internal.resolved_copy\n return first_series(DataFrame(internal))\n else:\n return psser.copy()\n\n def _cleanup_and_return(self, psdf: DataFrame) -> Series:\n return first_series(psdf).rename().rename(self._psser.name)\n\n def agg(self, *args: Any, **kwargs: Any) -> None:\n return MissingPandasLikeSeriesGroupBy.agg(self, *args, **kwargs)\n\n def aggregate(self, *args: Any, **kwargs: Any) -> None:\n return MissingPandasLikeSeriesGroupBy.aggregate(self, *args, **kwargs)\n\n def size(self) -> Series:\n return super().size().rename(self._psser.name)\n\n size.__doc__ = GroupBy.size.__doc__\n\n # TODO: add keep parameter\n def nsmallest(self, n: int = 5) -> Series:\n \"\"\"\n Return the smallest `n` elements.\n\n Parameters\n ----------\n n : int\n Number of items to retrieve.\n\n See Also\n --------\n pyspark.pandas.Series.nsmallest\n pyspark.pandas.DataFrame.nsmallest\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 1, 1, 2, 2, 2, 3, 3, 3],\n ... 'b': [1, 2, 2, 2, 3, 3, 3, 4, 4]}, columns=['a', 'b'])\n\n >>> df.groupby(['a'])['b'].nsmallest(1).sort_index() # doctest: +NORMALIZE_WHITESPACE\n a\n 1 0 1\n 2 3 2\n 3 6 3\n Name: b, dtype: int64\n \"\"\"\n if self._psser._internal.index_level > 1:\n raise ValueError(\"nsmallest do not support multi-index now\")\n\n groupkey_col_names = [SPARK_INDEX_NAME_FORMAT(i) for i in range(len(self._groupkeys))]\n sdf = self._psser._internal.spark_frame.select(\n *[scol.alias(name) for scol, name in zip(self._groupkeys_scols, groupkey_col_names)],\n *[\n scol.alias(SPARK_INDEX_NAME_FORMAT(i + len(self._groupkeys)))\n for i, scol in enumerate(self._psser._internal.index_spark_columns)\n ],\n self._psser.spark.column,\n NATURAL_ORDER_COLUMN_NAME,\n )\n\n window = Window.partitionBy(*groupkey_col_names).orderBy(\n scol_for(sdf, self._psser._internal.data_spark_column_names[0]).asc(),\n NATURAL_ORDER_COLUMN_NAME,\n )\n\n temp_rank_column = verify_temp_column_name(sdf, \"__rank__\")\n sdf = (\n sdf.withColumn(temp_rank_column, F.row_number().over(window))\n .filter(F.col(temp_rank_column) <= n)\n .drop(temp_rank_column)\n ).drop(NATURAL_ORDER_COLUMN_NAME)\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=(\n [scol_for(sdf, col) for col in groupkey_col_names]\n + [\n scol_for(sdf, SPARK_INDEX_NAME_FORMAT(i + len(self._groupkeys)))\n for i in range(self._psdf._internal.index_level)\n ]\n ),\n index_names=(\n [psser._column_label for psser in self._groupkeys]\n + self._psdf._internal.index_names\n ),\n index_fields=(\n [\n psser._internal.data_fields[0].copy(name=name)\n for psser, name in zip(self._groupkeys, groupkey_col_names)\n ]\n + [\n field.copy(name=SPARK_INDEX_NAME_FORMAT(i + len(self._groupkeys)))\n for i, field in enumerate(self._psdf._internal.index_fields)\n ]\n ),\n column_labels=[self._psser._column_label],\n data_spark_columns=[scol_for(sdf, self._psser._internal.data_spark_column_names[0])],\n data_fields=[self._psser._internal.data_fields[0]],\n )\n return first_series(DataFrame(internal))\n\n # TODO: add keep parameter\n def nlargest(self, n: int = 5) -> Series:\n \"\"\"\n Return the first n rows ordered by columns in descending order in group.\n\n Return the first n rows with the smallest values in columns, in descending order.\n The columns that are not specified are returned as well, but not used for ordering.\n\n Parameters\n ----------\n n : int\n Number of items to retrieve.\n\n See Also\n --------\n pyspark.pandas.Series.nlargest\n pyspark.pandas.DataFrame.nlargest\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 1, 1, 2, 2, 2, 3, 3, 3],\n ... 'b': [1, 2, 2, 2, 3, 3, 3, 4, 4]}, columns=['a', 'b'])\n\n >>> df.groupby(['a'])['b'].nlargest(1).sort_index() # doctest: +NORMALIZE_WHITESPACE\n a\n 1 1 2\n 2 4 3\n 3 7 4\n Name: b, dtype: int64\n \"\"\"\n if self._psser._internal.index_level > 1:\n raise ValueError(\"nlargest do not support multi-index now\")\n\n groupkey_col_names = [SPARK_INDEX_NAME_FORMAT(i) for i in range(len(self._groupkeys))]\n sdf = self._psser._internal.spark_frame.select(\n *[scol.alias(name) for scol, name in zip(self._groupkeys_scols, groupkey_col_names)],\n *[\n scol.alias(SPARK_INDEX_NAME_FORMAT(i + len(self._groupkeys)))\n for i, scol in enumerate(self._psser._internal.index_spark_columns)\n ],\n self._psser.spark.column,\n NATURAL_ORDER_COLUMN_NAME,\n )\n\n window = Window.partitionBy(*groupkey_col_names).orderBy(\n scol_for(sdf, self._psser._internal.data_spark_column_names[0]).desc(),\n NATURAL_ORDER_COLUMN_NAME,\n )\n\n temp_rank_column = verify_temp_column_name(sdf, \"__rank__\")\n sdf = (\n sdf.withColumn(temp_rank_column, F.row_number().over(window))\n .filter(F.col(temp_rank_column) <= n)\n .drop(temp_rank_column)\n ).drop(NATURAL_ORDER_COLUMN_NAME)\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=(\n [scol_for(sdf, col) for col in groupkey_col_names]\n + [\n scol_for(sdf, SPARK_INDEX_NAME_FORMAT(i + len(self._groupkeys)))\n for i in range(self._psdf._internal.index_level)\n ]\n ),\n index_names=(\n [psser._column_label for psser in self._groupkeys]\n + self._psdf._internal.index_names\n ),\n index_fields=(\n [\n psser._internal.data_fields[0].copy(name=name)\n for psser, name in zip(self._groupkeys, groupkey_col_names)\n ]\n + [\n field.copy(name=SPARK_INDEX_NAME_FORMAT(i + len(self._groupkeys)))\n for i, field in enumerate(self._psdf._internal.index_fields)\n ]\n ),\n column_labels=[self._psser._column_label],\n data_spark_columns=[scol_for(sdf, self._psser._internal.data_spark_column_names[0])],\n data_fields=[self._psser._internal.data_fields[0]],\n )\n return first_series(DataFrame(internal))\n\n # TODO: add bins, normalize parameter\n def value_counts(\n self, sort: Optional[bool] = None, ascending: Optional[bool] = None, dropna: bool = True\n ) -> Series:\n \"\"\"\n Compute group sizes.\n\n Parameters\n ----------\n sort : boolean, default None\n Sort by frequencies.\n ascending : boolean, default False\n Sort in ascending order.\n dropna : boolean, default True\n Don't include counts of NaN.\n\n See Also\n --------\n pyspark.pandas.Series.groupby\n pyspark.pandas.DataFrame.groupby\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 2, 3, 3, 3],\n ... 'B': [1, 1, 2, 3, 3, np.nan]},\n ... columns=['A', 'B'])\n >>> df\n A B\n 0 1 1.0\n 1 2 1.0\n 2 2 2.0\n 3 3 3.0\n 4 3 3.0\n 5 3 NaN\n\n >>> df.groupby('A')['B'].value_counts().sort_index() # doctest: +NORMALIZE_WHITESPACE\n A B\n 1 1.0 1\n 2 1.0 1\n 2.0 1\n 3 3.0 2\n Name: B, dtype: int64\n\n Don't include counts of NaN when dropna is False.\n\n >>> df.groupby('A')['B'].value_counts(\n ... dropna=False).sort_index() # doctest: +NORMALIZE_WHITESPACE\n A B\n 1 1.0 1\n 2 1.0 1\n 2.0 1\n 3 3.0 2\n NaN 1\n Name: B, dtype: int64\n \"\"\"\n groupkeys = self._groupkeys + self._agg_columns\n groupkey_names = [SPARK_INDEX_NAME_FORMAT(i) for i in range(len(groupkeys))]\n groupkey_cols = [s.spark.column.alias(name) for s, name in zip(groupkeys, groupkey_names)]\n\n sdf = self._psdf._internal.spark_frame\n\n agg_column = self._agg_columns[0]._internal.data_spark_column_names[0]\n sdf = sdf.groupby(*groupkey_cols).count().withColumnRenamed(\"count\", agg_column)\n\n if self._dropna:\n _groupkey_column_names = groupkey_names[: len(self._groupkeys)]\n sdf = sdf.dropna(subset=_groupkey_column_names)\n\n if dropna:\n _agg_columns_names = groupkey_names[len(self._groupkeys) :]\n sdf = sdf.dropna(subset=_agg_columns_names)\n\n if sort:\n if ascending:\n sdf = sdf.orderBy(scol_for(sdf, agg_column).asc())\n else:\n sdf = sdf.orderBy(scol_for(sdf, agg_column).desc())\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in groupkey_names],\n index_names=[psser._column_label for psser in groupkeys],\n index_fields=[\n psser._internal.data_fields[0].copy(name=name)\n for psser, name in zip(groupkeys, groupkey_names)\n ],\n column_labels=[self._agg_columns[0]._column_label],\n data_spark_columns=[scol_for(sdf, agg_column)],\n )\n return first_series(DataFrame(internal))\n\n def unique(self) -> Series:\n \"\"\"\n Return unique values in group.\n\n Uniques are returned in order of unknown. It does NOT sort.\n\n See Also\n --------\n pyspark.pandas.Series.unique\n pyspark.pandas.Index.unique\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 1, 1, 2, 2, 2, 3, 3, 3],\n ... 'b': [1, 2, 2, 2, 3, 3, 3, 4, 4]}, columns=['a', 'b'])\n\n >>> df.groupby(['a'])['b'].unique().sort_index() # doctest: +SKIP\n a\n 1 [1, 2]\n 2 [2, 3]\n 3 [3, 4]\n Name: b, dtype: object\n \"\"\"\n return self._reduce_for_stat_function(F.collect_set)\n\n\ndef is_multi_agg_with_relabel(**kwargs: Any) -> bool:\n \"\"\"\n Check whether the kwargs pass to .agg look like multi-agg with relabling.\n\n Parameters\n ----------\n **kwargs : dict\n\n Returns\n -------\n bool\n\n Examples\n --------\n >>> is_multi_agg_with_relabel(a='max')\n False\n >>> is_multi_agg_with_relabel(a_max=('a', 'max'),\n ... a_min=('a', 'min'))\n True\n >>> is_multi_agg_with_relabel()\n False\n \"\"\"\n if not kwargs:\n return False\n return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values())\n\n\ndef normalize_keyword_aggregation(\n kwargs: Dict[str, Tuple[Name, str]],\n) -> Tuple[Dict[Name, List[str]], List[str], List[Tuple]]:\n \"\"\"\n Normalize user-provided kwargs.\n\n Transforms from the new ``Dict[str, NamedAgg]`` style kwargs\n to the old defaultdict[str, List[scalar]].\n\n Parameters\n ----------\n kwargs : dict\n\n Returns\n -------\n aggspec : dict\n The transformed kwargs.\n columns : List[str]\n The user-provided keys.\n order : List[Tuple[str, str]]\n Pairs of the input and output column names.\n\n Examples\n --------\n >>> normalize_keyword_aggregation({'output': ('input', 'sum')})\n (defaultdict(<class 'list'>, {'input': ['sum']}), ['output'], [('input', 'sum')])\n \"\"\"\n aggspec: Dict[Union[Any, Tuple], List[str]] = defaultdict(list)\n order: List[Tuple] = []\n columns, pairs = zip(*kwargs.items())\n\n for column, aggfunc in pairs:\n if column in aggspec:\n aggspec[column].append(aggfunc)\n else:\n aggspec[column] = [aggfunc]\n\n order.append((column, aggfunc))\n # For MultiIndex, we need to flatten the tuple, e.g. (('y', 'A'), 'max') needs to be\n # flattened to ('y', 'A', 'max'), it won't do anything on normal Index.\n if isinstance(order[0][0], tuple):\n order = [(*levs, method) for levs, method in order]\n return aggspec, list(columns), order\n\n\ndef _test() -> None:\n import os\n import doctest\n import sys\n import numpy\n from pyspark.sql import SparkSession\n import pyspark.pandas.groupby\n\n os.chdir(os.environ[\"SPARK_HOME\"])\n\n globs = pyspark.pandas.groupby.__dict__.copy()\n globs[\"np\"] = numpy\n globs[\"ps\"] = pyspark.pandas\n spark = (\n SparkSession.builder.master(\"local[4]\")\n .appName(\"pyspark.pandas.groupby tests\")\n .getOrCreate()\n )\n (failure_count, test_count) = doctest.testmod(\n pyspark.pandas.groupby,\n globs=globs,\n optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,\n )\n spark.stop()\n if failure_count:\n sys.exit(-1)\n\n\nif __name__ == \"__main__\":\n _test()\n"
] | [
[
"pandas.DataFrame",
"pandas.core.common._builtin_table.get",
"pandas.api.types.is_list_like",
"pandas.api.types.is_hashable"
]
] |
ManMohan291/PyProgram | [
"edcaa927bd70676bd14355acad7262ae2d32b8e5"
] | [
"01_SimpleLinearRegression/linearRegression.py"
] | [
"from os import system\nimport numpy as np\nimport scipy.optimize as op\nimport matplotlib.pyplot as plt\n\n####################################################################\ndef initTheta(size):\n return np.zeros((size, 1))\n\n####################################################################\ndef addBiasVector(X):\n return np.concatenate((np.ones((X.shape[0],1)),X),axis=1)\n\ndef concatenateVectors(X,Y):\n return np.concatenate((X,Y),axis=1)\n\n####################################################################\ndef clearScreen():\n system('cls')\n return\n\n####################################################################\ndef loadData(fileName):\n data= np.loadtxt(fileName, delimiter=',',unpack=True,dtype=float)\n data=data.T\n if (len(data.shape)==1):\n data.shape=(data.shape[0],1)\n return data\n\n####################################################################\ndef predict(theta,X):\n X=addBiasVector(X)\n return np.matmul(X, theta)\n\n####################################################################\ndef plotHypothesis(theta,X,y):\n plt.subplot(122)\n plt.scatter(X,y) \n Py=predict(theta,X) \n plt.plot(X, Py,color='r')\n plt.show()\n\n####################################################################\ndef computeCost(theta,X,y):\n m = X.shape[0] \n h=np.matmul( X,theta) #Hypothesis\n err=h-y\n errSqr=np.multiply(err,err)\n J=(1.0/(2.0*m))* np.sum(errSqr)\n return J\n \n####################################################################\ndef gradientDescent(X, y, theta, alpha, iterations): \n X=addBiasVector(X)\n m=len(y)\n I=np.zeros((iterations,1),dtype=float)\n J=np.zeros((iterations,1),dtype=float)\n for k in range(iterations):\n h=np.matmul( X,theta) #Hypothesis\n err=h-y\n d=np.matmul(err.T,X) \n g= alpha*((1.0/m)*d) #Derivative\n theta=theta -g.T #Theta Itrations \n I[k]=k*1.0\n J[k]=computeCost(theta,X,y)\n plt.subplot(121)\n plt.plot(I, J,color='r')\n return theta\n\n\n"
] | [
[
"numpy.concatenate",
"numpy.matmul",
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.plot",
"numpy.ones",
"numpy.multiply",
"numpy.loadtxt",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.subplot"
]
] |
imraviagrawal/sonnet | [
"3a305e16af9e274b89db2834e3b7cea9cea6806f"
] | [
"sonnet/python/modules/nets/alexnet_test.py"
] | [
"# Copyright 2017 The Sonnet Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"Tests for snt.nets.alexnet.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport sonnet as snt\nimport tensorflow as tf\n\nfrom tensorflow.python.ops import variables\n\n\nclass AlexNetTest(tf.test.TestCase):\n\n def testCalcMinSize(self):\n \"\"\"Test the minimum input size calculator.\"\"\"\n net = snt.nets.AlexNet(mode=snt.nets.AlexNet.MINI)\n\n self.assertEqual(net._calc_min_size([(None, (3, 1), None)]), 3)\n self.assertEqual(net._calc_min_size([(None, (3, 1), (3, 2))]), 5)\n self.assertEqual(net._calc_min_size([(None, (3, 1), (3, 2)),\n (None, (3, 2), (5, 2))]), 25)\n\n def testModes(self):\n \"\"\"Test that each mode can be instantiated.\"\"\"\n\n modes = [\n snt.nets.AlexNet.FULL,\n snt.nets.AlexNet.HALF,\n snt.nets.AlexNet.MINI,\n ]\n\n keep_prob = tf.placeholder(tf.float32)\n\n for mode in modes:\n net = snt.nets.AlexNet(name=\"net_{}\".format(mode), mode=mode)\n input_shape = [None, net._min_size, net._min_size, 3]\n inputs = tf.placeholder(tf.float32, shape=input_shape)\n net(inputs, keep_prob)\n\n def testBatchNorm(self):\n \"\"\"Test that batch norm can be instantiated.\"\"\"\n\n net = snt.nets.AlexNet(mode=snt.nets.AlexNet.FULL,\n use_batch_norm=True)\n input_shape = [net._min_size, net._min_size, 3]\n inputs = tf.placeholder(tf.float32, shape=[None] + input_shape)\n output = net(inputs)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(output, feed_dict={inputs: np.random.rand(10, *input_shape)})\n\n # Check Tensorflow flags work\n is_training = tf.placeholder(tf.bool)\n test_local_stats = tf.placeholder(tf.bool)\n net(inputs,\n is_training=is_training,\n test_local_stats=test_local_stats)\n\n # Check Python is_training flag works\n net(inputs, is_training=False, test_local_stats=False)\n\n # Check that the appropriate moving statistics variables have been created.\n variance_name = \"alex_net/batch_norm/moving_variance:0\"\n mean_name = \"alex_net/batch_norm/moving_mean:0\"\n var_names = [var.name for var in tf.global_variables()]\n self.assertIn(variance_name, var_names)\n self.assertIn(mean_name, var_names)\n\n def testBatchNormConfig(self):\n batch_norm_config = {\n \"scale\": True,\n }\n\n model = snt.nets.AlexNet(mode=snt.nets.AlexNet.FULL,\n use_batch_norm=True,\n batch_norm_config=batch_norm_config)\n\n input_to_net = tf.placeholder(tf.float32, shape=(1, 224, 224, 3))\n\n model(input_to_net)\n model_variables = model.get_variables()\n\n self.assertEqual(len(model_variables), 7 * 4)\n\n def testInputTooSmall(self):\n \"\"\"Check that an error is raised if the input image is too small.\"\"\"\n\n keep_prob = tf.placeholder(tf.float32)\n net = snt.nets.AlexNet(mode=snt.nets.AlexNet.FULL)\n\n input_shape = [None, net._min_size, net._min_size, 1]\n inputs = tf.placeholder(tf.float32, shape=input_shape)\n net(inputs, keep_prob)\n\n with self.assertRaisesRegexp(snt.IncompatibleShapeError,\n \"Image shape too small: (.*?, .*?) < .*?\"):\n input_shape = [None, net._min_size - 1, net._min_size - 1, 1]\n inputs = tf.placeholder(tf.float32, shape=input_shape)\n net(inputs, keep_prob)\n\n def testSharing(self):\n \"\"\"Check that the correct number of variables are made when sharing.\"\"\"\n\n net = snt.nets.AlexNet(mode=snt.nets.AlexNet.MINI)\n inputs1 = tf.placeholder(tf.float32, shape=[None, 64, 64, 3])\n inputs2 = tf.placeholder(tf.float32, shape=[None, 64, 64, 3])\n keep_prob1 = tf.placeholder(tf.float32)\n keep_prob2 = tf.placeholder(tf.float32)\n\n net(inputs1, keep_prob1)\n net(inputs2, keep_prob2)\n\n self.assertEqual(len(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)),\n 7 * 2)\n\n model_variables = net.get_variables()\n self.assertEqual(len(model_variables), 7 * 2)\n\n def testInvalidInitializationParameters(self):\n err = \"Invalid initializer keys.*\"\n with self.assertRaisesRegexp(KeyError, err):\n snt.nets.AlexNet(\n initializers={\"not_w\": tf.truncated_normal_initializer(stddev=1.0)})\n\n err = \"Initializer for 'w' is not a callable function\"\n with self.assertRaisesRegexp(TypeError, err):\n snt.nets.AlexNet(initializers={\"w\": tf.zeros([1, 2, 3])})\n\n def testInvalidRegularizationParameters(self):\n with self.assertRaisesRegexp(KeyError, \"Invalid regularizer keys.*\"):\n snt.nets.AlexNet(\n regularizers={\"not_w\": tf.contrib.layers.l1_regularizer(scale=0.5)})\n\n err = \"Regularizer for 'w' is not a callable function\"\n with self.assertRaisesRegexp(TypeError, err):\n snt.nets.AlexNet(regularizers={\"w\": tf.zeros([1, 2, 3])})\n\n def testRegularizersInRegularizationLosses(self):\n regularizers = {\"w\": tf.contrib.layers.l1_regularizer(scale=0.5),\n \"b\": tf.contrib.layers.l2_regularizer(scale=0.5)}\n\n alex_net = snt.nets.AlexNet(regularizers=regularizers, name=\"alexnet1\")\n\n input_shape = [alex_net._min_size, alex_net._min_size, 3]\n inputs = tf.placeholder(tf.float32, shape=[None] + input_shape)\n alex_net(inputs)\n\n graph_regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n\n alex_net_conv_layers = len(alex_net.conv_modules)\n for i in range(0, 2 * alex_net_conv_layers, 2):\n self.assertRegexpMatches(graph_regularizers[i].name, \".*l1_regularizer.*\")\n self.assertRegexpMatches(\n graph_regularizers[i + 1].name, \".*l2_regularizer.*\")\n\n def testInitializers(self):\n initializers = {\n \"w\": tf.constant_initializer(1.5),\n \"b\": tf.constant_initializer(2.5),\n }\n alex_net = snt.nets.AlexNet(mode=snt.nets.AlexNet.FULL,\n initializers=initializers)\n input_shape = [None, alex_net.min_input_size, alex_net.min_input_size, 3]\n inputs = tf.placeholder(dtype=tf.float32, shape=input_shape)\n alex_net(inputs)\n init = tf.global_variables_initializer()\n\n with self.test_session() as sess:\n sess.run(init)\n for conv_module in alex_net.conv_modules:\n w_v, b_v = sess.run([conv_module.w, conv_module.b])\n self.assertAllClose(w_v, 1.5 * np.ones(w_v.shape))\n self.assertAllClose(b_v, 2.5 * np.ones(b_v.shape))\n\n def testPartitioners(self):\n partitioners = {\n \"w\": tf.fixed_size_partitioner(num_shards=2),\n \"b\": tf.fixed_size_partitioner(num_shards=2),\n }\n\n alex_net = snt.nets.AlexNet(partitioners=partitioners, name=\"alexnet1\")\n\n input_shape = [alex_net._min_size, alex_net._min_size, 3]\n inputs = tf.placeholder(tf.float32, shape=[None] + input_shape)\n alex_net(inputs)\n\n for conv_module in alex_net.conv_modules:\n self.assertEqual(type(conv_module.w), variables.PartitionedVariable)\n self.assertEqual(type(conv_module.b), variables.PartitionedVariable)\n\n for linear_module in alex_net.linear_modules:\n self.assertEqual(type(linear_module.w), variables.PartitionedVariable)\n self.assertEqual(type(linear_module.b), variables.PartitionedVariable)\n\n def testErrorHandling(self):\n err = \"`batch_norm_config` must be a mapping, e.g. `dict`.\"\n with self.assertRaisesRegexp(TypeError, err):\n snt.nets.AlexNet(batch_norm_config=\"not a valid config\")\n\n err = \"AlexNet construction mode 'BLAH' not recognised\"\n with self.assertRaisesRegexp(snt.Error, err):\n snt.nets.AlexNet(mode=\"BLAH\")\n\n def testGetLinearModules(self):\n alex_net = snt.nets.AlexNet(mode=snt.nets.AlexNet.FULL)\n input_shape = [None, alex_net.min_input_size, alex_net.min_input_size, 3]\n inputs = tf.placeholder(dtype=tf.float32, shape=input_shape)\n alex_net(inputs)\n for mod in alex_net.linear_modules:\n self.assertEqual(mod.output_size, 4096)\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.zeros",
"tensorflow.constant_initializer",
"numpy.random.rand",
"tensorflow.fixed_size_partitioner",
"numpy.ones",
"tensorflow.global_variables",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.placeholder",
"tensorflow.test.main",
"tensorflow.truncated_normal_initializer",
"tensorflow.contrib.layers.l1_regularizer",
"tensorflow.global_variables_initializer",
"tensorflow.get_collection"
]
] |
LanceNorskog/keras-sift | [
"6942552684ec3f20f26c85627a32d4dd02e54b5f"
] | [
"keras_sift/keras_sift.py"
] | [
"import os\nimport sys\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport math\nfrom random import getrandbits\n\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.layers import Flatten, Activation, Input\nfrom keras.layers import Concatenate\nfrom keras.layers import Add, Multiply, Conv2D, Lambda, Reshape, ZeroPadding2D\n\nfrom copy import deepcopy\n\ndef Concat(x):\n return tf.concat(x, axis=-1)\n\ndef L2norm(x):\n return x / (0.000001 + K.sqrt(K.sum(K.square(x), axis=-1, keepdims=True)))\n\ndef MOD(x,a):\n return tf.math.mod(x,a)\n\ndef EQ(x,a):\n return tf.cast(tf.equal(x,a), dtype='float32')\n\ndef FL(x):\n return tf.floor(x)\n\ndef MulConst(x,y):\n return x * y\n\ndef KAtan2(x):\n return tf.atan2(x[1], x[0])\n\ndef sameshape(input_shape):\n return input_shape\n\ndef KAtan2_shape(input_shape):\n return input_shape[0]\n\ndef CircularGaussKernel(kernlen=21):\n halfSize = kernlen / 2;\n r2 = halfSize*halfSize;\n sigma2 = 0.9 * r2;\n disq = 0;\n kernel = np.zeros((kernlen,kernlen))\n for y in range(kernlen):\n for x in range(kernlen):\n disq = (y - halfSize)*(y - halfSize) + (x - halfSize)*(x - halfSize);\n if disq < r2:\n kernel[y,x] = math.exp(-disq / sigma2)\n else:\n kernel[y,x] = 0\n return kernel\n\ndef get_bin_weight_kernel_size_and_stride(patch_size, num_spatial_bins):\n bin_weight_stride = int(round(2.0 * math.floor(patch_size / 2) / float(num_spatial_bins + 1)))\n bin_weight_kernel_size = int(2 * bin_weight_stride - 1);\n return bin_weight_kernel_size, bin_weight_stride\n\ndef get_sift_model(feed, img_rows = 65, num_ang_bins = 8, num_spatial_bins = 4, clipval = 0.2):\n gk = CircularGaussKernel(kernlen=img_rows)\n gauss_kernel = tf.constant(value=gk, dtype='float32')\n grad_x = Conv2D(1, (3, 1), name = 'gx_' + str(getrandbits(20)))(feed)\n grad_x = ZeroPadding2D(padding=(1, 0))(grad_x)\n grad_x = Reshape((img_rows, img_rows))(grad_x)\n grad_y = Conv2D(1, (1, 3), name = 'gy_' + str(getrandbits(20)))(feed)\n grad_y = ZeroPadding2D(padding=(0,1))(grad_y)\n grad_y = Reshape((img_rows, img_rows))(grad_y)\n grad_x_2 = Lambda(lambda x: x ** 2)(grad_x)\n grad_y_2 = Lambda(lambda x: x ** 2)(grad_y)\n grad_sum_sq = Add()([grad_x_2, grad_y_2])\n magnitude = Lambda(lambda x: x ** 0.5)(grad_sum_sq)\n gauss_weighted_magn = Lambda(MulConst, arguments={'y': gauss_kernel})(magnitude)\n angle_shape = KAtan2_shape(grad_x)\n angle = Lambda(lambda x: KAtan2(x), output_shape = angle_shape)((grad_x, grad_y))\n o_big = Lambda(lambda x: (x + 2.0*math.pi)/ (2.0*math.pi) * float(num_ang_bins))(angle)\n bo0_big = Lambda(FL)(o_big)\n munis_bo0_big = Lambda(lambda x: -x)(bo0_big )\n wo1_big = Add()([o_big, munis_bo0_big])\n bo0_big = Lambda(MOD, arguments = {'a':num_ang_bins})(bo0_big)\n bo0_big_plus1 = Lambda(lambda x: (x +1.))(bo0_big) \n bo1_big = Lambda(MOD, arguments = {'a':num_ang_bins})(bo0_big_plus1) \n wo0_big = Lambda(lambda x: 1. - x)(wo1_big)\n wo0_big = Multiply()([wo0_big, gauss_weighted_magn])\n wo1_big = Multiply()([wo1_big, gauss_weighted_magn])\n\n ang_bins = []\n bin_weight_kernel_size, bin_weight_stride = get_bin_weight_kernel_size_and_stride(img_rows, num_spatial_bins)\n for i in range(0, num_ang_bins):\n mask1 = Lambda(EQ, arguments = {'a': i})(bo0_big)\n amask1 = Lambda(EQ, arguments = {'a': i})(bo1_big)\n weights1 = Multiply()([mask1,wo0_big])\n weights11 = Multiply()([amask1,wo1_big])\n ori0 = Add()([weights1, weights11])\n ori0 = Reshape((img_rows, img_rows, 1))(ori0)\n bin_weight = Conv2D(1, (bin_weight_kernel_size, bin_weight_kernel_size), \n strides = [bin_weight_stride, bin_weight_stride], \n name = 'bin_weight_' + str(getrandbits(20)))(ori0)\n bin_weight = Flatten()(bin_weight)\n ang_bins.append(bin_weight)\n \n ang_bins_merged = Concatenate()(ang_bins)\n l2norm = Lambda(L2norm)(ang_bins_merged)\n clipping = Lambda(lambda x: K.minimum(x,clipval))(l2norm)\n l2norm_again = Lambda(L2norm)(clipping)\n conv2d = Reshape((num_ang_bins, num_spatial_bins, num_spatial_bins, 1))(l2norm_again)\n return conv2d\n\ndef getPoolingKernel(kernel_size = 25):\n step = 1. / (1e-5 + float(kernel_size // 2))\n x_coef = np.arange(step/2., 1. ,step)\n xc2 = np.hstack([x_coef,[1], x_coef[::-1]])\n kernel = np.outer(xc2.T,xc2)\n kernel = np.maximum(0,kernel)\n return kernel\n\ndef initializeSIFT(model):\n for layer in model.layers:\n l_name = layer.get_config()['name']\n w_all = layer.get_weights()\n if l_name[0:2] == 'gy':\n new_weights = np.array([-1, 0, 1], dtype=np.float32)\n new_weights = np.reshape(new_weights, w_all[0].shape)\n elif l_name[0:2] == 'gx':\n new_weights = np.array([-1, 0, 1], dtype=np.float32)\n new_weights = np.reshape(new_weights, w_all[0].shape)\n elif 'bin_weight' in l_name:\n kernel_size = w_all[0].shape[0]\n nw = getPoolingKernel(kernel_size=kernel_size)\n new_weights = np.array(nw.reshape((kernel_size, kernel_size, 1, 1)))\n else:\n continue\n biases = np.array(w_all[1])\n biases[:] = 0\n w_all_new = [new_weights, biases]\n layer.set_weights(w_all_new)\n return model\n\n''' Can be used as the first layer of a larger model '''\ndef getSIFTModel(inputs=None, patch_size = 65, num_ang_bins = 8, num_spatial_bins = 4, clipval = 0.2):\n if inputs is None:\n inputs = tf.keras.layers.Input(shape=(patch_size, patch_size, 1))\n # assert shape is n, n, 1\n kerassift = get_sift_model(inputs, img_rows=patch_size, num_ang_bins=num_ang_bins, num_spatial_bins=num_spatial_bins, clipval=clipval)\n model = Model(inputs=inputs, outputs=kerassift)\n model = initializeSIFT(model)\n model.trainable = False\n return model\n\ndef getCompiledSIFTModel(patch_size = 65):\n inputs = Input((patch_size, patch_size, 1), name='main_input')\n kerassift = get_sift_model(inputs)\n model = Model(inputs=inputs, outputs=kerassift)\n model.compile(optimizer='Adam', loss='mse')\n model = initializeSIFT(model)\n model.trainable = False\n return model\n"
] | [
[
"tensorflow.floor",
"numpy.array",
"tensorflow.concat",
"numpy.zeros",
"tensorflow.keras.layers.Input",
"numpy.reshape",
"tensorflow.equal",
"tensorflow.atan2",
"tensorflow.constant",
"numpy.arange",
"numpy.outer",
"numpy.hstack",
"tensorflow.math.mod",
"numpy.maximum"
]
] |
EnjoyLifeFund/Debian_py36_packages | [
"1985d4c73fabd5f08f54b922e73a9306e09c77a5"
] | [
"theano/tensor/opt.py"
] | [
"from __future__ import absolute_import, print_function, division\n\"\"\" Tensor optimizations addressing the ops in basic.py.\n\"\"\"\n# TODO: intelligent merge for mul/add\n# TODO: 0*x -> 0\n\nfrom collections import defaultdict\nimport logging\nimport itertools\nimport operator\nimport sys\nimport time\nimport traceback\nimport warnings\n\nimport numpy as np\nfrom six import integer_types, iteritems\nfrom six.moves import reduce, xrange\n\nimport theano\nfrom theano import gof\nfrom theano.compat import izip\nfrom theano.gof import opt, InconsistencyError, TopoOptimizer, graph\nfrom theano.gof import Variable, Constant\nfrom theano.gof.opt import copy_stack_trace, in2out\nfrom theano.gof.utils import MethodNotDefined\nfrom theano.gradient import DisconnectedType\nfrom theano import config\nfrom theano.tensor.elemwise import Elemwise, DimShuffle\nfrom theano.tensor.subtensor import (get_idx_list, get_canonical_form_slice,\n Subtensor, IncSubtensor, make_constant,\n AdvancedIncSubtensor1,\n AdvancedIncSubtensor,\n AdvancedSubtensor1,\n advanced_subtensor,\n advanced_subtensor1,\n advanced_inc_subtensor1)\nfrom theano.tensor.sort import TopKOp\nfrom theano import scalar\nfrom theano.scalar import basic\nfrom theano.tensor import basic as T\nfrom theano import compile # to register the optimizer built by this file\nfrom theano.compile.ops import Shape, Shape_i\nfrom theano.tensor.type import (values_eq_approx_remove_inf,\n values_eq_approx_remove_nan,\n values_eq_approx_remove_inf_nan)\n\nfrom theano.gof.opt import (Optimizer, pre_constant_merge,\n pre_greedy_local_optimizer)\nfrom theano.gof import toolbox\nfrom theano.tensor.basic import (Alloc, get_scalar_constant_value, ShapeError,\n extract_constant, NotScalarConstantError,\n Reshape)\nfrom six import StringIO\n\n_logger = logging.getLogger('theano.tensor.opt')\n\n# Utilities\n\n\ndef _fill_chain(new_out, orig_inputs):\n for i in orig_inputs:\n new_out = T.fill(i, new_out)\n return [new_out]\n\n\ndef encompasses_broadcastable(b1, b2):\n \"\"\"\n\n Parameters\n ----------\n b1\n The broadcastable attribute of a tensor type.\n b2\n The broadcastable attribute of a tensor type.\n\n Returns\n -------\n bool\n True if the broadcastable patterns b1 and b2 are such that b2 is\n broadcasted to b1's shape and not the opposite.\n\n \"\"\"\n if len(b1) < len(b2):\n return False\n b1 = b1[-len(b2):]\n return not any(v1 and not v2 for v1, v2 in zip(b1, b2))\n\n\ndef merge_broadcastables(broadcastables):\n return [all(bcast) for bcast in zip(*broadcastables)]\n\n\ndef scalarconsts_rest(inputs, elemwise=True, only_process_constants=False):\n \"\"\"Partition a list of variables into two kinds:\n scalar constants, and the rest.\"\"\"\n consts = []\n origconsts = []\n nonconsts = []\n for i in inputs:\n try:\n v = get_scalar_constant_value(i, elemwise=elemwise,\n only_process_constants=only_process_constants)\n consts.append(v)\n origconsts.append(i)\n except NotScalarConstantError:\n nonconsts.append(i)\n return consts, origconsts, nonconsts\n\n\ndef broadcast_like(value, template, fgraph, dtype=None):\n \"\"\"\n Return a Variable with the same shape and dtype as the template,\n filled by broadcasting value through it. `value` will be cast as\n necessary.\n\n \"\"\"\n value = T.as_tensor_variable(value)\n if value.type == template.type:\n return value\n if template not in fgraph.variables:\n raise NotImplementedError('broadcast_like currently requires the '\n 'template Variable to be in the fgraph already')\n if dtype is None:\n dtype = template.dtype\n value = T.cast(value, dtype)\n if value.type == template.type:\n return value\n if hasattr(fgraph, 'shape_feature'):\n new_shape = fgraph.shape_feature.shape_of[template]\n else:\n new_shape = template.shape\n rval = T.alloc(value, *new_shape)\n # the template may have 1s in its shape without being broadcastable\n if rval.broadcastable != template.broadcastable:\n rval = T.unbroadcast(rval, *[i for i in xrange(rval.ndim)\n if rval.broadcastable[i] and\n not template.broadcastable[i]])\n assert rval.type.dtype == dtype\n\n if rval.type.broadcastable != template.broadcastable:\n raise AssertionError(\"rval.type.broadcastable is \" +\n str(rval.type.broadcastable) +\n \" but template.broadcastable is\" +\n str(template.broadcastable))\n\n return rval\n\n\nclass InplaceElemwiseOptimizer(Optimizer):\n \"\"\"\n We parametrise it to make it work for Elemwise and GpuElemwise op.\n \"\"\"\n def __init__(self, OP):\n self.op = OP\n\n def add_requirements(self, fgraph):\n fgraph.attach_feature(theano.gof.destroyhandler.DestroyHandler())\n\n @staticmethod\n def print_profile(stream, prof, level=0):\n blanc = (' ' * level)\n print(blanc, \"InplaceElemwiseOptimizer \", prof['opt'].op, file=stream)\n for k in ['node_before',\n 'nb_call_replace',\n 'nb_call_validate',\n 'nb_inconsistent']:\n print(blanc, k, prof[k], file=stream)\n ndim = prof['ndim']\n if ndim:\n print(blanc, \"ndim\", \"nb\", file=stream)\n for n in sorted(ndim.keys()):\n print(blanc, n, ndim[n], file=stream)\n\n def apply(self, fgraph):\n \"\"\"\n Usage: InplaceElemwiseOptimizer(op).optimize(fgraph)\n\n Attempts to replace all Broadcast ops by versions of them\n that operate inplace. It operates greedily: for each Broadcast\n Op that is encountered, for each output, tries each input to\n see if it can operate inplace on that input. If so, makes the\n change and go to the next output or Broadcast Op.\n\n Examples\n --------\n\n `x + y + z -> x += y += z`\n\n `(x + y) * (x * y) -> (x += y) *= (x * y) or (x + y) *= (x *= y)`\n\n \"\"\"\n # We should not validate too often as this takes too much time to\n # execute!\n # It is the _dfs_toposort() fct in theano/gof/destroyhandler.py\n # that takes so much time.\n # Should we try to use another lib that does toposort?\n # igraph: http://igraph.sourceforge.net/\n # networkx: https://networkx.lanl.gov/\n # Should we try to use cython?\n # Compiling only that fct is not enough, should we try to add the\n # deque class too?\n # And init the deque and other list to an upper bound number of\n # elements?\n # Maybe Theano should do online toposort as in\n # http://code.google.com/p/acyclic\n #\n # The next longest optimizer is the canonizer phase.\n # Then I think it is the [io_?]toposort (need to validate) so check if\n # the solution is also applicable there.\n\n # We execute `validate` after this number of change.\n prof = {'opt': self,\n 'node_before': len(fgraph.apply_nodes),\n 'nb_call_replace': 0,\n 'nb_call_validate': 0,\n 'nb_inconsistent': 0,\n 'ndim': defaultdict(lambda: 0)}\n\n check_each_change = config.tensor.insert_inplace_optimizer_validate_nb\n if check_each_change == -1:\n if len(fgraph.apply_nodes) > 500:\n check_each_change = 10\n else:\n check_each_change = 1\n\n nb_change_no_validate = 0\n chk = fgraph.checkpoint()\n\n if fgraph.update_mapping:\n update_outs = [fgraph.outputs[i] for i in fgraph.update_mapping]\n else:\n update_outs = []\n\n protected_inputs = [\n f.protected for f in fgraph._features if\n isinstance(f, theano.compile.function_module.Supervisor)]\n protected_inputs = sum(protected_inputs, []) # flatten the list\n protected_inputs.extend(fgraph.outputs)\n for node in list(graph.io_toposort(fgraph.inputs, fgraph.outputs)):\n op = node.op\n # gpuarray GpuElemwise inherit from Elemwise\n if not type(op) == self.op:\n continue\n # If big graph and the outputs are scalar, do not make it\n # inplace.\n if (check_each_change != 1 and\n # If multiple outputs, they must all have the same size,\n # so only check the first.\n getattr(node.outputs[0].type, 'ndim', -1) == 0):\n continue\n\n if op.inplace_pattern:\n # Maybe this isn't needed anymore, but I don't want to\n # rish regression now. This case only happen if the\n # original node add already some inplace patter and we\n # still try to add more pattern.\n\n baseline = op.inplace_pattern\n candidate_outputs = [i for i in xrange(len(node.outputs))\n if i not in baseline]\n # node inputs that are Constant, already destroyed,\n # or fgraph protected inputs and fgraph outputs can't be used as\n # inplace target.\n # Remove here as faster.\n candidate_inputs = [i for i in xrange(len(node.inputs))\n if i not in baseline.values() and\n not isinstance(node.inputs[i], Constant) and\n # the next line should not be costly most of the time.\n not fgraph.has_destroyers([node.inputs[i]]) and\n node.inputs[i] not in protected_inputs]\n else:\n baseline = []\n candidate_outputs = list(range(len(node.outputs)))\n # node inputs that are Constant, already destroyed,\n # fgraph protected inputs and fgraph outputs can't be used as inplace\n # target.\n # Remove here as faster.\n candidate_inputs = [i for i in xrange(len(node.inputs))\n if not isinstance(node.inputs[i], Constant) and\n not fgraph.has_destroyers([node.inputs[i]]) and\n node.inputs[i] not in protected_inputs]\n\n verbose = False\n\n raised_warning = not verbose\n\n for candidate_output in candidate_outputs:\n\n # If the output of the node can be established as an update\n # output of the fgraph, visit the candidate_inputs in an order\n # that will improve the chances of making the node operate\n # inplace on the input it's meant to update\n candidate_out_var = node.outputs[candidate_output]\n sorted_candidate_inputs = candidate_inputs\n\n if candidate_out_var in update_outs:\n\n # The candidate output is an update. Sort the\n # variables in candidate_inputs in the following order:\n # - Vars corresponding to the actual updated input\n # (best case scenario is for the node that procudes\n # an update to operate inplace on the variable to\n # update)\n # - Vars computed inplace on the updates input (second\n # best scenario if for the node to work inplace on\n # a variable obtained by a chain of inplace on the\n # variable to update. In some cases, this will be\n # equivalent to operating inplace on the variable to\n # update)\n # - Remaining variables\n updated_inputs = []\n for i, f_out in enumerate(fgraph.outputs):\n if (f_out is candidate_out_var and i in fgraph.update_mapping):\n updated_inp_idx = fgraph.update_mapping[i]\n updated_inputs.append(fgraph.inputs[updated_inp_idx])\n\n updated_vars = []\n vars_from_inplace = []\n other_vars = []\n for inp_idx in candidate_inputs:\n inp = node.inputs[inp_idx]\n if inp in updated_inputs:\n # the candidate input is the actual updated input\n updated_vars.append(inp_idx)\n elif (hasattr(fgraph, 'destroy_handler') and\n inp.owner and\n any([fgraph.destroy_handler.root_destroyer.get(up_inp, None) is inp.owner\n for up_inp in updated_inputs])):\n\n # the candidate input is a variable computed\n # inplace on the updated input via a sequence of\n # one or more inplace operations\n vars_from_inplace.append(inp_idx)\n else:\n other_vars.append(inp_idx)\n\n sorted_candidate_inputs = (updated_vars +\n vars_from_inplace + other_vars)\n\n for candidate_input in sorted_candidate_inputs:\n # remove inputs that don't have the same dtype as the output\n if node.inputs[candidate_input].type != node.outputs[\n candidate_output].type:\n continue\n\n inplace_pattern = dict(baseline)\n inplace_pattern[candidate_output] = candidate_input\n try:\n if hasattr(op.scalar_op, \"make_new_inplace\"):\n new_scal = op.scalar_op.make_new_inplace(\n scalar.transfer_type(\n *[inplace_pattern.get(i, o.dtype)\n for i, o in enumerate(node.outputs)]))\n else:\n new_scal = op.scalar_op.__class__(\n scalar.transfer_type(\n *[inplace_pattern.get(i, None)\n for i in xrange(len(node.outputs))]))\n new_outputs = self.op(new_scal, inplace_pattern)(\n *node.inputs, **dict(return_list=True))\n new_node = new_outputs[0].owner\n\n for r, new_r in zip(node.outputs, new_outputs):\n prof['nb_call_replace'] += 1\n fgraph.replace(r, new_r,\n reason=\"inplace_elemwise_optimizer\")\n nb_change_no_validate += 1\n prof['ndim'][candidate_out_var.ndim] += 1\n if nb_change_no_validate >= check_each_change:\n prof['nb_call_validate'] += 1\n fgraph.validate()\n chk = fgraph.checkpoint()\n nb_change_no_validate = 0\n except (ValueError, InconsistencyError) as e:\n prof['nb_inconsistent'] += 1\n if check_each_change != 1 and not raised_warning:\n print((\"Some inplace optimization was not \"\n \"performed due to unexpected error:\"),\n file=sys.stderr)\n print(e, file=sys.stderr)\n raised_warning = True\n fgraph.revert(chk)\n continue\n candidate_inputs.remove(candidate_input)\n node = new_node\n baseline = inplace_pattern\n break\n\n if nb_change_no_validate > 0:\n try:\n fgraph.validate()\n except Exception:\n if not raised_warning:\n print((\"Some inplace optimization was not \"\n \"performed due to unexpected error\"),\n file=sys.stderr)\n fgraph.revert(chk)\n return prof\n\n def print_summary(self, stream=sys.stdout, level=0, depth=-1):\n print(\"%s%s (%s)\" % (\n (' ' * level), self.__class__.__name__, self.op), file=stream)\n return inplace_elemwise_optimizer\n\ninplace_elemwise_optimizer = InplaceElemwiseOptimizer(T.Elemwise)\ncompile.optdb.register('inplace_elemwise_opt', inplace_elemwise_optimizer, 75,\n 'inplace_opt', # for historic reason\n 'inplace_elemwise_optimizer',\n 'fast_run', 'inplace')\n\n\ndef register_useless(lopt, *tags, **kwargs):\n if type(lopt) == str:\n def register(inner_lopt):\n return register_useless(inner_lopt, lopt, *tags, **kwargs)\n return register\n else:\n name = kwargs.pop('name', None) or lopt.__name__\n\n compile.mode.local_useless.register(name, lopt, 'last', 'fast_run',\n *tags, **kwargs)\n return lopt\n\n\ndef register_canonicalize(lopt, *tags, **kwargs):\n if type(lopt) == str:\n def register(inner_lopt):\n return register_canonicalize(inner_lopt, lopt, *tags, **kwargs)\n return register\n else:\n name = kwargs.pop('name', None) or lopt.__name__\n compile.optdb['canonicalize'].register(name, lopt, 'fast_run',\n *tags, **kwargs)\n return lopt\n\n\ndef register_stabilize(lopt, *tags, **kwargs):\n if type(lopt) == str:\n def register(inner_lopt):\n return register_stabilize(inner_lopt, lopt, *tags, **kwargs)\n return register\n else:\n name = kwargs.pop('name', None) or lopt.__name__\n compile.optdb['stabilize'].register(name, lopt, 'fast_run',\n *tags, **kwargs)\n return lopt\n\n\ndef register_specialize(lopt, *tags, **kwargs):\n if type(lopt) == str:\n def register(inner_lopt):\n return register_specialize(inner_lopt, lopt, *tags, **kwargs)\n return register\n else:\n name = kwargs.pop('name', None) or lopt.__name__\n compile.optdb['specialize'].register(name, lopt, 'fast_run',\n *tags, **kwargs)\n return lopt\n\n\ndef register_uncanonicalize(lopt, *tags, **kwargs):\n if type(lopt) == str:\n def register(inner_lopt):\n return register_uncanonicalize(inner_lopt, lopt, *tags, **kwargs)\n return register\n else:\n name = (kwargs and kwargs.pop('name', None)) or lopt.__name__\n compile.optdb['uncanonicalize'].register(name, lopt, 'fast_run', *tags,\n **kwargs)\n return lopt\n\n\ndef register_specialize_device(lopt, *tags, **kwargs):\n if type(lopt) == str:\n def register(inner_lopt):\n return register_specialize_device(inner_lopt, lopt, *tags, **kwargs)\n return register\n else:\n name = (kwargs and kwargs.pop('name', None)) or lopt.__name__\n compile.optdb['specialize_device'].register(name, lopt, 'fast_run', *tags,\n **kwargs)\n return lopt\n\n\n#####################\n# Dot optimizations #\n#####################\n\n@register_canonicalize\n@register_stabilize\[email protected]_optimizer([T.Dot])\ndef local_0_dot_x(node):\n if not isinstance(node.op, T.Dot):\n return False\n\n x = node.inputs[0]\n y = node.inputs[1]\n replace = False\n try:\n if get_scalar_constant_value(x, only_process_constants=True) == 0:\n replace = True\n except NotScalarConstantError:\n pass\n\n try:\n if get_scalar_constant_value(y, only_process_constants=True) == 0:\n replace = True\n except NotScalarConstantError:\n pass\n\n if replace:\n constant_zero = T.constant(0, dtype=node.outputs[0].type.dtype)\n if x.ndim == 2 and y.ndim == 2:\n constant_zero = assert_(constant_zero,\n T.eq(x.shape[1], y.shape[0]))\n return [T.alloc(constant_zero, x.shape[0], y.shape[1])]\n elif x.ndim == 1 and y.ndim == 2:\n constant_zero = assert_(constant_zero,\n T.eq(x.shape[0], y.shape[0]))\n return [T.alloc(constant_zero, y.shape[1])]\n elif x.ndim == 2 and y.ndim == 1:\n constant_zero = assert_(constant_zero,\n T.eq(x.shape[1], y.shape[0]))\n return [T.alloc(constant_zero, x.shape[0])]\n elif x.ndim == 1 and y.ndim == 1:\n constant_zero = assert_(constant_zero,\n T.eq(x.shape[0], y.shape[0]))\n return [constant_zero]\n else:\n _logger.warning(\"Optimization Warning: \"\n \"Optimization theano/opt.py:local_0_dot_x Found \"\n \"that it could apply, but was not implemented \"\n \"for dot product with these input types:\\n\"\n \"(%s, %s)\",\n x.type, y.type)\n\n######################\n# DimShuffle lifters #\n######################\n\n\ndef apply_local_dimshuffle_lift(var):\n # return var\n # lift recursively\n if not var.owner:\n return var\n new = local_dimshuffle_lift.transform(var.owner)\n if new:\n return new[0]\n return var\n\n\n# Checks for two types of useless dimshuffles:\n# 1 - dimshuffle all dimensions in order.\n# 2 - dimshuffle a broadcastable dimension.\ndef is_dimshuffle_useless(new_order, input):\n is_useless = True\n if len(new_order) == input.type.ndim:\n all_broadcastable_dims = [i for (i, is_broadcastable)\n in enumerate(input.type.broadcastable)\n if is_broadcastable] + ['x']\n for i in range(input.type.ndim):\n if (new_order[i] == i or\n (i in all_broadcastable_dims and\n new_order[i] in all_broadcastable_dims)):\n is_useless = True\n else:\n is_useless = False\n break\n else:\n is_useless = False\n return is_useless\n\n\[email protected]_optimizer([DimShuffle])\ndef local_dimshuffle_lift(node):\n \"\"\"\n \"Lifts\" DimShuffle through Elemwise operations and merges\n consecutive DimShuffles. Basically, applies the following\n transformations on the whole graph:\n\n DimShuffle(Elemwise(x, y)) => Elemwise(DimShuffle(x), DimShuffle(y))\n DimShuffle(DimShuffle(x)) => DimShuffle(x)\n DimShuffle{0,1,...}(x) => x (when the dimshuffle do nothing)\n\n After this transform, clusters of Elemwise operations are\n void of DimShuffle operations.\n\n \"\"\"\n op = node.op\n if not isinstance(op, DimShuffle):\n return False\n\n input = node.inputs[0]\n inode = input.owner\n new_order = op.new_order\n if inode and isinstance(inode.op, Elemwise) and (len(input.clients) == 1):\n # Don't use make_node to have tag.test_value set.\n new_inputs = []\n for inp in inode.inputs:\n new_inp = op.__class__(inp.type.broadcastable,\n op.new_order)(inp)\n new_inputs.append(apply_local_dimshuffle_lift(new_inp))\n copy_stack_trace(node.outputs[0], new_inputs)\n ret = inode.op(*new_inputs, **dict(return_list=True))\n return ret\n if inode and isinstance(inode.op, DimShuffle):\n new_order = [x == 'x' and 'x' or inode.op.new_order[x] for x in\n new_order]\n input = inode.inputs[0]\n\n if is_dimshuffle_useless(new_order, input):\n return [input]\n elif inode and isinstance(inode.op, DimShuffle):\n ret = op.__class__(input.type.broadcastable, new_order)(input)\n ret = apply_local_dimshuffle_lift(ret)\n copy_stack_trace(node.outputs[0], ret)\n return [ret]\n\n\n@register_canonicalize\[email protected]_optimizer([Reshape])\ndef local_useless_dimshuffle_in_reshape(node):\n \"\"\"\n Removes useless DimShuffle operation inside Reshape:\n\n reshape(vector.dimshuffle('x', 0), shp) => reshape(vector, shp)\n reshape(matrix.dimshuffle('x', 0, 'x', 1), shp) => reshape(matrix, shp)\n reshape(row.dimshuffle(1, 'x'), shp) => reshape(row, shp)\n reshape(col.dimshuffle(0), shp) => reshape(col, shp)\n\n \"\"\"\n op = node.op\n if not isinstance(op, Reshape):\n return False\n if not (node.inputs[0].owner is not None and\n isinstance(node.inputs[0].owner.op, DimShuffle)):\n return False\n\n new_order = node.inputs[0].owner.op.new_order\n input = node.inputs[0].owner.inputs[0]\n broadcastables = node.inputs[0].broadcastable\n new_order_of_nonbroadcast = []\n for i, bd in zip(new_order, broadcastables):\n if not bd:\n new_order_of_nonbroadcast.append(i)\n no_change_in_order = all(\n new_order_of_nonbroadcast[i] <= new_order_of_nonbroadcast[i + 1]\n for i in xrange(len(new_order_of_nonbroadcast) - 1))\n if no_change_in_order:\n shape = node.inputs[1]\n ret = op.__class__(node.outputs[0].ndim)(input, shape)\n copy_stack_trace(node.outputs[0], ret)\n return [ret]\n\n\n@register_canonicalize\[email protected]_optimizer([DimShuffle])\ndef local_lift_transpose_through_dot(node):\n \"\"\"\n dot(x,y).T -> dot(y.T, x.T)\n\n These optimizations \"lift\" (propagate towards the inputs) DimShuffle\n through dot product. It allows to put the graph in a more standard shape,\n and to later merge consecutive DimShuffles.\n\n The transformation should be apply whether or not the transpose is\n inplace. The newly-introduced transpositions are not inplace, this will\n be taken care of in a later optimization phase.\n\n \"\"\"\n if not (isinstance(node.op, T.DimShuffle) and node.op.new_order == (1, 0)):\n return False\n if not (node.inputs[0].owner and\n isinstance(node.inputs[0].owner.op, T.Dot)):\n return False\n x, y = node.inputs[0].owner.inputs\n\n if x.ndim == y.ndim == 2:\n # Output is dot product of transposed inputs in reverse order\n ret = [T.dot(y.T, x.T)]\n\n # Copy over stack trace to output from result of dot-product\n copy_stack_trace(node.inputs[0], ret)\n return ret\n\nregister_canonicalize(local_dimshuffle_lift)\nregister_specialize(local_dimshuffle_lift)\n\n######################\n# Casting operations #\n######################\n\n\n@register_canonicalize\n@register_specialize\[email protected]_optimizer([T.TensorFromScalar])\ndef local_tensor_scalar_tensor(node):\n '''tensor_from_scalar(scalar_from_tensor(x)) -> x'''\n if isinstance(node.op, T.TensorFromScalar):\n s = node.inputs[0]\n if s.owner and isinstance(s.owner.op, T.ScalarFromTensor):\n t = s.owner.inputs[0]\n\n # We don't need to copy over any stack traces here\n return [t]\n\n\n@register_canonicalize\n@register_specialize\[email protected]_optimizer([T.ScalarFromTensor])\ndef local_scalar_tensor_scalar(node):\n '''scalar_from_tensor(tensor_from_scalar(x)) -> x'''\n if isinstance(node.op, T.ScalarFromTensor):\n t = node.inputs[0]\n if t.owner and isinstance(t.owner.op, T.TensorFromScalar):\n s = t.owner.inputs[0]\n\n # We don't need to copy over any stack traces here\n return [s]\n\n#####################################\n# ShapeFeature, Shape optimizations\n#####################################\n\n\nclass MakeVector(T.Op):\n \"\"\"Concatenate a number of scalars together into a vector.\n\n This is a simple version of stack() that introduces far less cruft\n into the graph. Should work with 0 inputs. The constant_folding\n optimization will remove it.\n\n \"\"\"\n\n __props__ = (\"dtype\",)\n\n def __init__(self, dtype='int64'):\n self.dtype = dtype\n\n def make_node(self, *inputs):\n inputs = list(map(T.as_tensor_variable, inputs))\n if (not all(a.type == inputs[0].type for a in inputs) or\n (len(inputs) > 0 and inputs[0].dtype != self.dtype)):\n dtype = theano.scalar.upcast(self.dtype, *[i.dtype for i in inputs])\n # upcast the input to the determined dtype,\n # but don't downcast anything\n assert dtype == self.dtype, (\n \"The upcast of the inputs to MakeVector should match the \"\n \"dtype given in __init__.\")\n if not all(self.dtype == T.cast(i, dtype=dtype).dtype\n for i in inputs):\n raise TypeError(\"MakeVector.make_node expected inputs\"\n \" upcastable to %s. got %s\" %\n (self.dtype, str([i.dtype for i in inputs])))\n inputs = [T.cast(i, dtype=dtype) for i in inputs]\n assert all(self.dtype == a.dtype for a in inputs)\n assert all(a.ndim == 0 for a in inputs)\n\n if inputs:\n dtype = inputs[0].type.dtype\n else:\n dtype = self.dtype\n # bcastable = (len(inputs) == 1)\n bcastable = False\n otype = T.TensorType(broadcastable=(bcastable,), dtype=dtype)\n return T.Apply(self, inputs, [otype()])\n\n def perform(self, node, inputs, out_):\n out, = out_\n # not calling theano._asarray as optimization\n if (out[0] is None) or (out[0].size != len(inputs)):\n out[0] = theano._asarray(inputs, dtype=node.outputs[0].dtype)\n else:\n # assume that out has correct dtype. there is no cheap way to check\n out[0][...] = inputs\n\n def c_code_cache_version(self):\n return (2,)\n\n def c_code(self, node, name, inp, out_, sub):\n out, = out_\n # Shouldn't use PyArray_TYPE(inp[0]) for the dtype\n # when len(inp) == 0 (we need to support this case.\n # So there will be (1 * nb_dtype) + ((nb len(inp) - 1 ))\n # different c code with the following algo\n out_shape = len(inp)\n out_num = np.dtype(node.outputs[0].dtype).num\n # don't use dtype_%(out)s as when check_input=False, it isn't defined.\n out_dtype = node.outputs[0].type.dtype_specs()[1]\n if len(inp) > 0:\n assert self.dtype == node.inputs[0].dtype\n out_num = 'PyArray_TYPE(%s)' % inp[0]\n\n ret = \"\"\"\n npy_intp dims[1];\n dims[0] = %(out_shape)s;\n if(!%(out)s || PyArray_DIMS(%(out)s)[0] != %(out_shape)s){\n Py_XDECREF(%(out)s);\n %(out)s = (PyArrayObject*)PyArray_EMPTY(1, dims, %(out_num)s, 0);\n }\n \"\"\" % locals()\n for idx, i in enumerate(inp):\n ret += \"\"\"\n *((%(out_dtype)s *)PyArray_GETPTR1(%(out)s, %(idx)s)) = *((%(out_dtype)s *) PyArray_DATA(%(i)s));\n \"\"\" % locals()\n return ret\n\n def infer_shape(self, node, ishapes):\n return [(len(ishapes),)]\n\n def grad(self, inputs, output_gradients):\n # If the output is of an integer dtype, no gradient shall pass\n if self.dtype in theano.tensor.discrete_dtypes:\n return [ipt.zeros_like().astype(theano.config.floatX)\n for ipt in inputs]\n\n grads = []\n for i, inp in enumerate(inputs):\n grads.append(output_gradients[0][i])\n return grads\n\n def R_op(self, inputs, eval_points):\n if None in eval_points:\n return [None]\n return self.make_node(*eval_points).outputs\n\nmake_vector = MakeVector()\n\n\nclass MakeVectorPrinter:\n def process(self, r, pstate):\n if r.owner is None:\n raise TypeError(\"Can only print make_vector.\")\n elif isinstance(r.owner.op, MakeVector):\n old_precedence = getattr(pstate, 'precedence', None)\n try:\n pstate.precedence = 1000\n s = [pstate.pprinter.process(input)\n for input in r.owner.inputs]\n finally:\n pstate.precedence = old_precedence\n return \"[%s]\" % \", \".join(s)\n else:\n raise TypeError(\"Can only print make_vector.\")\n\nT.pprint.assign(MakeVector, MakeVectorPrinter())\n\n\nclass ShapeFeature(object):\n \"\"\"Graph optimizer for removing all calls to shape().\n\n This optimizer replaces all Shapes and Subtensors of Shapes with\n Shape_i and MakeVector Ops.\n\n This optimizer has several goals:\n\n 1. to 'lift' Shapes to as close to the inputs as possible.\n\n 2. to infer the shape of every node in the graph in terms of the\n input shapes.\n\n 3. remove all fills (T.second, T.fill) from the graph\n\n Lifting shapes as close to the inputs as possible is important for\n canonicalization because it is very bad form to have to compute\n something just to know how big it will be. Firstly, it is a waste\n of time to compute such outputs. But it is important to get rid\n of these outputs as early as possible in the compilation process\n because the extra computations make it appear as if many internal\n graph nodes have multiple clients. Many optimizations refuse to\n work on nodes with multiple clients.\n\n Lifting is done by using an `<Op>.infer_shape` function if one is\n present, or else using a conservative default. An Op that\n supports shape-lifting should define a infer_shape(self, node,\n input_shapes) function. The argument input_shapes is a tuple of\n tuples... there is an interior tuple for each input to the node.\n The tuple has as many elements as dimensions. The element in\n position i of tuple j represents the i'th shape component of the\n j'th input. The function should return a tuple of tuples. One\n output tuple for each node.output. Again, the i'th element of the\n j'th output tuple represents the output[j].shape[i] of the\n function. If an output is not a TensorType, then None should be\n returned instead of a tuple for that output.\n\n For example the infer_shape for a matrix-matrix product would accept\n input_shapes=((x0,x1), (y0,y1)) and return ((x0, y1),).\n\n Inferring the shape of internal nodes in the graph is important\n for doing size-driven optimizations. If we know how big various\n intermediate results will be, we can estimate the cost of many Ops\n accurately, and generate c-code that is specific [e.g. unrolled]\n to particular sizes.\n\n In cases where you cannot figure out the shape, raise a ShapeError.\n\n Notes\n -----\n Right now there is only the ConvOp that could really take\n advantage of this shape inference, but it is worth it even\n just for the ConvOp. All that's necessary to do shape\n inference is 1) to mark shared inputs as having a particular\n shape, either via a .tag or some similar hacking; and 2) to\n add an optional In() argument to promise that inputs will\n have a certain shape (or even to have certain shapes in\n certain dimensions). We can't automatically infer the shape of\n shared variables as they can change of shape during the\n execution by default. (NOT IMPLEMENTED YET, BUT IS IN TRAC)\n\n\n **Using Shape information in Optimizations**\n\n To use this shape information in OPTIMIZATIONS, use the\n ``shape_of`` dictionary.\n\n For example:\n\n .. code-block:: python\n\n try:\n shape_of = node.fgraph.shape_feature.shape_of\n except AttributeError:\n # This can happen when the mode doesn't include the ShapeFeature.\n return\n\n shape_of_output_zero = shape_of[node.output[0]]\n\n The ``shape_of_output_zero`` symbol will contain a tuple, whose\n elements are either integers or symbolic integers.\n\n TODO: check to see if the symbols are necessarily\n non-constant... or are integer literals sometimes Theano\n constants?? That would be confusing.\n\n \"\"\"\n def get_node_infer_shape(self, node):\n try:\n shape_infer = node.op.infer_shape\n except AttributeError:\n shape_infer = self.default_infer_shape\n\n try:\n o_shapes = shape_infer(node,\n [self.shape_of[r] for r in node.inputs])\n except ShapeError:\n o_shapes = self.default_infer_shape(node, [self.shape_of[r] for\n r in node.inputs])\n except NotImplementedError as e:\n raise NotImplementedError(\n 'Code called by infer_shape failed raising a '\n 'NotImplementedError. Raising NotImplementedError to '\n 'indicate that a shape cannot be computed is no longer '\n 'supported, and one should now use tensor.ShapeError '\n 'instead. The original exception message is: %s' % e)\n except Exception as e:\n msg = ('Failed to infer_shape from Op %s.\\nInput shapes: '\n '%s\\nException encountered during infer_shape: '\n '%s\\nException message: %s\\nTraceback: %s') % (\n node.op, [self.shape_of[r] for r in node.inputs],\n type(e), str(e), traceback.format_exc())\n if config.on_shape_error == \"raise\":\n raise Exception(msg)\n else:\n _logger.warning(msg)\n o_shapes = self.default_infer_shape(\n node, [self.shape_of[r] for r in node.inputs])\n\n return o_shapes\n\n def get_shape(self, var, idx):\n \"\"\" Optimization can call this to get the current shape_i\n\n It is better to call this then use directly shape_of[var][idx]\n as this method should update shape_of if needed.\n\n TODO: Up to now, we don't update it in all cases. Update in all cases.\n \"\"\"\n r = self.shape_of[var][idx]\n if (r.owner and\n isinstance(r.owner.op, Shape_i) and\n r.owner.inputs[0] not in var.fgraph.variables):\n assert var.owner\n node = var.owner\n # recur on inputs\n for i in node.inputs:\n if getattr(i, 'ndim', None) > 0:\n self.get_shape(i, 0)\n o_shapes = self.get_node_infer_shape(node)\n assert len(o_shapes) == len(node.outputs)\n\n # Only change the variables and dimensions that would introduce\n # extra computation\n for new_shps, out in zip(o_shapes, node.outputs):\n if not hasattr(out, 'ndim'):\n continue\n\n merged_shps = list(self.shape_of[out])\n changed = False\n for i in range(out.ndim):\n n_r = merged_shps[i]\n if (n_r.owner and\n isinstance(n_r.owner.op, Shape_i) and\n n_r.owner.inputs[0] not in var.fgraph.variables):\n changed = True\n merged_shps[i] = new_shps[i]\n if changed:\n self.set_shape(out, merged_shps, override=True)\n r = self.shape_of[var][idx]\n return r\n\n def shape_ir(self, i, r):\n \"\"\"Return symbolic r.shape[i] for tensor variable r, int i.\"\"\"\n if hasattr(r.type, \"broadcastable\") and r.type.broadcastable[i]:\n return self.lscalar_one\n else:\n # Do not call make_node for test_value\n s = Shape_i(i)(r)\n try:\n s = get_scalar_constant_value(s)\n except NotScalarConstantError:\n pass\n return s\n\n def shape_tuple(self, r):\n \"\"\"Return a tuple of symbolic shape vars for tensor variable r.\"\"\"\n if not hasattr(r, 'ndim'):\n # This happen for NoneConst.\n return None\n return tuple([self.shape_ir(i, r) for i in xrange(r.ndim)])\n\n def default_infer_shape(self, node, i_shapes):\n \"\"\"Return a list of shape tuple or None for the outputs of node.\n\n This function is used for Ops that don't implement infer_shape.\n Ops that do implement infer_shape should use the i_shapes parameter,\n but this default implementation ignores it.\n\n \"\"\"\n rval = []\n for r in node.outputs:\n try:\n rval.append(self.shape_tuple(r))\n except AttributeError:\n rval.append(None)\n return rval\n\n def unpack(self, s_i, var):\n \"\"\"Return a symbolic integer scalar for the shape element s_i.\n\n The s_i argument was produced by the infer_shape() of an Op subclass.\n\n var: the variable that correspond to s_i. This is just for\n error reporting.\n\n \"\"\"\n # unpack the s_i that the Op returned\n assert s_i is not None\n if s_i == 1:\n # don't make the optimizer merge a zillion ones together\n # by always returning the same object to represent 1\n return self.lscalar_one\n if type(s_i) is float and int(s_i) == s_i:\n s_i = int(s_i)\n if (type(s_i) in integer_types or\n isinstance(s_i, np.integer) or\n (isinstance(s_i, np.ndarray) and s_i.ndim == 0)):\n # this shape is a constant\n if s_i < 0:\n msg = \"There is a negative shape in the graph!\"\n msg += gof.utils.get_variable_trace_string(var)\n # The rest of the pipeline don't handle correctly this\n # case. So we have 2 choices, stop compilation or\n # consider the shape as unknow. As we have more\n # chance to give the stack trace here then later, I\n # choose that options as it would give better error\n # message.\n raise AssertionError(msg)\n return T.constant(s_i, dtype='int64')\n if type(s_i) in (tuple, list):\n # this dimension is the same as many of the inputs\n # which tells us that if one of the inputs is known,\n # the others all become known.\n # TODO: should be implemented in Elemwise, and Dot\n #\n # worst case, we loop over shape_of and replace things\n raise NotImplementedError(s_i)\n\n # s_i is x.shape[i] for some x, we change it to shape_of[x][i]\n if (s_i.owner and\n isinstance(s_i.owner.op, Subtensor) and\n s_i.owner.inputs[0].owner and\n isinstance(s_i.owner.inputs[0].owner.op, T.Shape)):\n assert s_i.ndim == 0\n assert len(s_i.owner.op.idx_list) == 1\n\n # The current Subtensor always put constant index in the graph.\n # This was not True in the past. So call the Subtensor function\n # that will return the right index.\n idx = get_idx_list(s_i.owner.inputs, s_i.owner.op.idx_list)\n assert len(idx) == 1\n idx = idx[0]\n try:\n i = get_scalar_constant_value(idx)\n except NotScalarConstantError:\n pass\n else:\n # Executed only if no exception was raised\n x = s_i.owner.inputs[0].owner.inputs[0]\n # x should already have been imported, and should be in shape_of.\n s_i = self.shape_of[x][i]\n\n if s_i.type.dtype in theano.tensor.integer_dtypes:\n if getattr(s_i.type, 'ndim', 0):\n raise TypeError('Shape element must be scalar', s_i)\n return s_i\n else:\n raise TypeError('Unsupported shape element',\n s_i, type(s_i), getattr(s_i, 'type', None))\n\n def set_shape(self, r, s, override=False):\n \"\"\"Assign the shape `s` to previously un-shaped variable `r`.\n\n Parameters\n ----------\n r : a variable\n s : None or a tuple of symbolic integers\n override : If False, it mean r is a new object in the fgraph.\n If True, it mean r is already in the fgraph and we want to\n override its shape.\n\n \"\"\"\n if not override:\n assert r not in self.shape_of, 'r already in shape_of'\n if s is None:\n self.shape_of[r] = s\n else:\n if not isinstance(s, (tuple, list)):\n raise TypeError('shapes must be tuple/list', (r, s))\n\n if r.ndim != len(s):\n sio = StringIO()\n theano.printing.debugprint(r, file=sio, print_type=True)\n raise AssertionError(\n \"Something inferred a shape with %d dimensions \"\n \"for a variable with %d dimensions\"\n \" for the variable:\\n%s\" % (\n len(s), r.ndim, sio.getvalue()))\n\n shape_vars = []\n for i in xrange(r.ndim):\n if (hasattr(r.type, 'broadcastable') and\n r.type.broadcastable[i]):\n shape_vars.append(self.lscalar_one)\n else:\n shape_vars.append(self.unpack(s[i], r))\n assert all([not hasattr(r.type, \"broadcastable\") or\n not r.type.broadcastable[i] or\n # The two following comparison are a speed optimization\n # But we never timed this speed optimization!\n self.lscalar_one.equals(shape_vars[i]) or\n self.lscalar_one.equals(\n T.extract_constant(shape_vars[i]))\n for i in xrange(r.ndim)])\n self.shape_of[r] = tuple(shape_vars)\n for sv in shape_vars:\n self.shape_of_reverse_index.setdefault(sv, set()).add(r)\n\n def update_shape(self, r, other_r):\n \"\"\"Replace shape of r by shape of other_r.\n\n If, on some dimensions, the shape of other_r is not informative,\n keep the shape of r on those dimensions.\n\n \"\"\"\n # other_r should already have a shape\n assert other_r in self.shape_of, ('other_r not in shape_of', other_r)\n other_shape = self.shape_of[other_r]\n\n # If other_shape has no information, call is pointless.\n if other_shape is None:\n return\n\n if r in self.shape_of:\n r_shape = self.shape_of[r]\n else:\n # If no info is known on r's shape, use other_shape\n self.set_shape(r, other_shape)\n return\n if (other_r.owner and r.owner and\n other_r.owner.inputs == r.owner.inputs and\n other_r.owner.op == r.owner.op):\n # We are doing a merge. So the 2 shapes graph will be the\n # same. This is only a speed optimization to call\n # ancestors() less frequently.\n return\n\n # Merge other_shape with r_shape, giving the priority to other_shape\n merged_shape = []\n for i, ps in enumerate(other_shape):\n if r_shape is None and other_shape:\n merged_shape.append(other_shape[i])\n elif (ps.owner and\n isinstance(getattr(ps.owner, 'op', None), Shape_i) and\n ps.owner.op.i == i and\n ps.owner.inputs[0] in (r, other_r)):\n # If other_shape[i] is uninformative, use r_shape[i].\n # For now, we consider 2 cases of uninformative other_shape[i]:\n # - Shape_i(i)(other_r);\n # - Shape_i(i)(r).\n merged_shape.append(r_shape[i])\n elif isinstance(r_shape[i], (Constant, integer_types)):\n # We do this to call less often ancestors and make\n # sure we have the simplest shape possible.\n merged_shape.append(r_shape[i])\n elif isinstance(other_shape[i], (Constant, integer_types)):\n # We do this to call less often ancestors and make\n # sure we have the simplest shape possible.\n merged_shape.append(other_shape[i])\n elif other_shape[i] == r_shape[i]:\n # This mean the shape is equivalent\n # We do not want to do the ancestor check in those cases\n merged_shape.append(r_shape[i])\n elif r_shape[i] in theano.gof.graph.ancestors([other_shape[i]]):\n # Another case where we want to use r_shape[i] is when\n # other_shape[i] actually depends on r_shape[i]. In that case,\n # we do not want to substitute an expression with another that\n # is strictly more complex. Such a substitution could also lead\n # to cycles: if (in the future) r_shape[i] gets replaced by an\n # expression of other_shape[i], other_shape[i] may end up\n # depending on itself.\n merged_shape.append(r_shape[i])\n else:\n merged_shape.append(other_shape[i])\n assert all([(not hasattr(r.type, \"broadcastable\") or\n not r.type.broadcastable[i] and\n not other_r.type.broadcastable[i]) or\n # The two following comparison are a speed optimization\n # But we never timed this speed optimization!\n self.lscalar_one.equals(merged_shape[i]) or\n self.lscalar_one.equals(\n T.extract_constant(merged_shape[i], only_process_constants=True))\n for i in xrange(r.ndim)])\n self.shape_of[r] = tuple(merged_shape)\n for sv in self.shape_of[r]:\n self.shape_of_reverse_index.setdefault(sv, set()).add(r)\n\n def set_shape_i(self, r, i, s_i):\n '''Replace element i of shape_of[r] by s_i'''\n assert r in self.shape_of\n prev_shape = self.shape_of[r]\n # prev_shape is a tuple, so we cannot change it inplace,\n # so we build another one.\n new_shape = []\n for j, s_j in enumerate(prev_shape):\n if j == i:\n new_shape.append(self.unpack(s_i, r))\n else:\n new_shape.append(s_j)\n assert all([not hasattr(r.type, \"broadcastable\") or\n not r.type.broadcastable[idx] or\n # The two following comparison are a speed optimization\n # But we never timed this speed optimization!\n self.lscalar_one.equals(new_shape[idx]) or\n self.lscalar_one.equals(T.extract_constant(new_shape[idx]))\n for idx in xrange(r.ndim)])\n self.shape_of[r] = tuple(new_shape)\n for sv in self.shape_of[r]:\n self.shape_of_reverse_index.setdefault(sv, set()).add(r)\n\n def init_r(self, r):\n '''Register r's shape in the shape_of dictionary.'''\n if r not in self.shape_of:\n try:\n self.set_shape(r, self.shape_tuple(r))\n except AttributeError: # XXX: where would this come from?\n self.set_shape(r, None)\n\n def make_vector_shape(self, r):\n return make_vector(*self.shape_of[r])\n\n #\n # Feature interface\n #\n #\n def on_attach(self, fgraph):\n assert not hasattr(fgraph, 'shape_feature')\n fgraph.shape_feature = self\n # Must be local to the object as otherwise we reuse the same\n # variable for multiple fgraph!\n self.lscalar_one = T.constant(1, dtype='int64')\n assert self.lscalar_one.type == T.lscalar\n\n self.shape_of = {}\n # Variable -> tuple(scalars) or None (All tensor vars map to tuple)\n\n self.scheduled = {}\n # Variable ->\n\n self.shape_of_reverse_index = {}\n # shape var -> graph v\n\n for node in fgraph.toposort():\n self.on_import(fgraph, node, reason='on_attach')\n\n def on_detach(self, fgraph):\n self.shape_of = {}\n self.scheduled = {}\n self.shape_of_reverse_index = {}\n del fgraph.shape_feature\n\n def on_import(self, fgraph, node, reason):\n if node.outputs[0] in self.shape_of:\n # this is a revert, not really an import\n for r in node.outputs + node.inputs:\n assert r in self.shape_of\n return\n\n for i, r in enumerate(node.inputs):\n # make sure we have shapes for the inputs\n self.init_r(r)\n\n o_shapes = self.get_node_infer_shape(node)\n\n # this is packed information\n # an element of o_shapes is either None or a tuple\n # elements of the tuple can be either strings, or ints\n if len(o_shapes) != len(node.outputs):\n raise Exception(\n ('The infer_shape method for the Op \"%s\" returned a list ' +\n 'with the wrong number of element: len(o_shapes) = %d ' +\n ' != len(node.outputs) = %d') % (str(node.op),\n len(o_shapes),\n len(node.outputs)))\n\n # Ensure shapes are in 'int64'. This is to make sure the assert\n # found in the `local_useless_subtensor` optimization does not fail.\n for sh_idx, sh in enumerate(o_shapes):\n if sh is None:\n continue\n if not isinstance(sh, (list, tuple)):\n raise ValueError(\"infer_shape of %s didn't return a list of\"\n \" list. It returned '%s'\" % (str(node), str(o_shapes)))\n new_shape = []\n for i, d in enumerate(sh):\n # Note: we ignore any shape element that is not typed (i.e.,\n # does not have a 'dtype' attribute). This means there may\n # still remain int elements that are int32 on 32-bit platforms,\n # but this works with `local_useless_subtensor`, so for now we\n # keep it this way. See #266 for a better long-term fix.\n if getattr(d, 'dtype', 'int64') != 'int64':\n assert d.dtype in theano.tensor.discrete_dtypes, (node, d.dtype)\n assert str(d.dtype) != 'uint64', node\n new_shape += sh[len(new_shape):i + 1]\n if isinstance(d, T.Constant):\n casted_d = T.constant(d.data, dtype='int64')\n else:\n casted_d = theano.tensor.cast(d, 'int64')\n new_shape[i] = casted_d\n if new_shape:\n # We replace the shape with wrong dtype by the one with\n # 'int64'.\n new_shape += sh[len(new_shape):]\n o_shapes[sh_idx] = tuple(new_shape)\n\n for r, s in izip(node.outputs, o_shapes):\n self.set_shape(r, s)\n\n def on_change_input(self, fgraph, node, i, r, new_r, reason):\n if new_r not in self.shape_of:\n # It happen that the fgraph didn't called on_import for some\n # new_r. This happen when new_r don't have an\n # owner(i.e. it is a constant or an input of the graph)\n # update_shape suppose that r and new_r are in shape_of.\n self.init_r(new_r)\n\n # This tells us that r and new_r must have the same shape if\n # we didn't know that the shapes are related, now we do.\n self.update_shape(new_r, r)\n\n # change_input happens in two cases:\n # 1) we are trying to get rid of r, or\n # 2) we are putting things back after a failed transaction.\n\n # In case 1, if r has a shape_i client, we will want to\n # replace the shape_i of r with the shape of new_r. Say that\n # r is *scheduled*.\n # At that point, node is no longer a client of r, but of new_r\n for (shpnode, idx) in (r.clients + [(node, i)]):\n if isinstance(getattr(shpnode, 'op', None), Shape_i):\n idx = shpnode.op.i\n repl = self.shape_of[new_r][idx]\n if repl.owner is shpnode:\n # This mean the replacement shape object is\n # exactly the same as the current shape object. So\n # no need for replacement. This happen for example\n # with the InputToGpuOptimizer optimizer.\n continue\n if (repl.owner and\n repl.owner.inputs[0] is shpnode.inputs[0] and\n isinstance(repl.owner.op, Shape_i) and\n repl.owner.op.i == shpnode.op.i):\n # The replacement is a shape_i of the same\n # input. So no need to do this equivalent\n # replacement.\n continue\n\n if shpnode.outputs[0] in theano.gof.graph.ancestors([repl]):\n raise InconsistencyError(\n \"This substitution would insert a cycle in the graph:\"\n \"node: %s, i: %i, r: %s, new_r: %s\"\n % (node, i, r, new_r))\n\n self.scheduled[shpnode] = new_r\n # In case 2, if r is a variable that we've scheduled for shape update,\n # then we should cancel it.\n unscheduled = [k for k, v in self.scheduled.items() if v == r]\n for k in unscheduled:\n del self.scheduled[k]\n\n # In either case, r could be in shape_of.values(), that is, r itself\n # is the shape of something. In that case, we want to update\n # the value in shape_of, to keep it up-to-date.\n for v in self.shape_of_reverse_index.get(r, []):\n # The reverse index is only approximate. It is not updated on\n # deletion of variables, or on change_input so it might be the\n # case that there are a few extra `v`'s in it that no longer have\n # a shape of r or possibly have been deleted from shape_of\n # entirely. The important thing is that it permits to recall\n # all variables with r in their shape.\n for ii, svi in enumerate(self.shape_of.get(v, [])):\n if svi == r:\n self.set_shape_i(v, ii, new_r)\n self.shape_of_reverse_index[r] = set()\n\n def same_shape(self, x, y, dim_x=None, dim_y=None):\n \"\"\"Return True if we are able to assert that x and y have the\n same shape.\n\n dim_x and dim_y are optional. If used, they should be an index\n to compare only 1 dimension of x and y.\n\n \"\"\"\n sx = self.shape_of[x]\n sy = self.shape_of[y]\n if sx is None or sy is None:\n return False\n if dim_x is not None:\n sx = [sx[dim_x]]\n if dim_y is not None:\n sy = [sy[dim_y]]\n assert len(sx) == len(sy)\n\n # We look on each dimensions we want to compare.\n # If any of them can't be asserted to be equal, return False.\n # Otherwise, we return True at the end.\n for dx, dy in zip(sx, sy):\n if dx is dy:\n continue\n # Need to try to find that they are the same shape. We\n # need to compare the full graph. It could be slow. So I\n # just implement for now the case of Shape_i.\n if not dx.owner or not dy.owner:\n return False\n if (not isinstance(dx.owner.op, Shape_i) or\n not isinstance(dy.owner.op, Shape_i)):\n return False\n opx = dx.owner.op\n opy = dy.owner.op\n if not (opx.i == opy.i):\n return False\n # FB I'm not sure if this handle correctly constants.\n if dx.owner.inputs[0] == dy.owner.inputs[0]:\n continue\n # To be sure to cover all case, call equal_computation.\n # Can't use theano.gof.graph.is_same_graph(dx, dy)\n # As it currently expect that dx and dy aren't in a FunctionGraph\n from theano.scan_module.scan_utils import equal_computations\n if not equal_computations([dx], [dy]):\n return False\n return True\n\n\nclass ShapeOptimizer(Optimizer):\n \"\"\"Optimizer that serves to add ShapeFeature as an fgraph feature.\"\"\"\n def add_requirements(self, fgraph):\n fgraph.attach_feature(ShapeFeature())\n\n def apply(self, fgraph):\n pass\n\n\nclass UnShapeOptimizer(Optimizer):\n \"\"\"Optimizer remove ShapeFeature as an fgraph feature.\"\"\"\n def apply(self, fgraph):\n for feature in fgraph._features:\n if isinstance(feature, ShapeFeature):\n fgraph.remove_feature(feature)\n\n# Register it after merge1 optimization at 0. We don't want to track\n# the shape of merged node.\ntheano.compile.mode.optdb.register('ShapeOpt', ShapeOptimizer(),\n 0.1, 'fast_run', 'fast_compile')\n# Not enabled by default for now. Some crossentropy opt use the\n# shape_feature. They are at step 2.01. uncanonicalize is at step\n# 3. After it goes to 48.5 that move to the gpu. So 10 seem resonable.\ntheano.compile.mode.optdb.register('UnShapeOpt', UnShapeOptimizer(),\n 10)\n\n\ndef local_elemwise_alloc_op(ElemwiseOP, AllocOP, DimShuffleOP):\n def local_elemwise_alloc(node):\n \"\"\"\n elemwise(alloc(x, shp), ..., y.TensorType(BROADCAST CONDITION))\n -> elemwise(x, y.TensorType(BROADCAST CONDITION))\n\n elemwise(dimshuffle(alloc(x, shp)),... ,y.TensorType(BROADCAST CONDITION))\n -> elemwise(x.dimshuffle(...), y.TensorType(BROADCAST CONDITION))\n\n BROADCAST CONDITION: the condition is that the one input that are\n not to be optimized to have the same broadcast pattern as the\n output.\n\n We can change the alloc by a dimshuffle as the elemwise\n already have the shape info. The dimshuffle will be faster\n to exec.\n\n \"\"\"\n if not isinstance(node.op, ElemwiseOP):\n return False\n\n if len(node.outputs) > 1:\n # Ensure all outputs have the same broadcast pattern\n # This is a supposition that I'm not sure is always true.\n assert all([o.type.broadcastable ==\n node.outputs[0].type.broadcastable for o in\n node.outputs[1:]])\n\n # The broadcast pattern of the ouptut must match the broadcast\n # pattern of at least one of the inputs.\n if not any([i.type.broadcastable ==\n node.outputs[0].type.broadcastable for i in node.inputs]):\n return False\n\n def dimshuffled_alloc(i):\n return (isinstance(i.owner.op, DimShuffleOP) and\n i.owner.inputs[0].owner and\n isinstance(i.owner.inputs[0].owner.op, AllocOP))\n\n # At least one input must have an owner that is either a AllocOP or a\n # DimShuffleOP with an owner that is a AllocOP -- otherwise there is\n # nothing to optimize.\n if not any([i.owner and (isinstance(i.owner.op, AllocOP) or\n dimshuffled_alloc(i)) for i in node.inputs]):\n return False\n\n # Search for input that we can use as a baseline for the dimensions.\n assert_op_idx = -1\n for idx, i in enumerate(node.inputs):\n if i.type.broadcastable == node.outputs[0].type.broadcastable:\n # Prefer an input that is not a AllocOP nor a DimShuffleOP of a\n # AllocOP so that all allocs can be optimized.\n if not (i.owner and (isinstance(i.owner.op, AllocOP) or\n dimshuffled_alloc(i))):\n assert_op_idx = idx\n break\n\n # It may be the case that only AllocOP and DimShuffleOP of AllocOP exist.\n if assert_op_idx < 0:\n # We want to optimize as many allocs as possible. When\n # there is more than one then do all but one. number of\n # inputs with alloc or dimshuffle alloc\n l2 = [i for i in node.inputs\n if (i.owner and (isinstance(i.owner.op, AllocOP) or\n dimshuffled_alloc(i)))]\n # If only 1 alloc or dimshuffle alloc, it is the one we\n # will use for the shape. So no alloc would be removed.\n if len(l2) > 1:\n # l containt inputs with alloc or dimshuffle alloc\n # only. Its length will always be at least one, as we\n # checked that before\n l = [idx for idx, i in enumerate(node.inputs)\n if i.broadcastable == node.outputs[0].broadcastable]\n assert_op_idx = l[0] # The first one is as good as any to use.\n else:\n # Nothing would be optimized!\n return False\n\n assert_op = node.inputs[assert_op_idx]\n cmp_op = assert_op\n new_i = []\n same_shape = node.fgraph.shape_feature.same_shape\n for i in node.inputs:\n # Remove alloc\n if (i.owner and isinstance(i.owner.op, AllocOP) and\n i.owner.inputs[0].type != i.owner.outputs[0].type):\n # when i.owner.inputs[0].type == i.owner.outputs[0].type we\n # will remove that alloc later\n assert i.type.ndim == cmp_op.ndim\n if theano.config.experimental.local_alloc_elemwise_assert:\n get_shape = node.fgraph.shape_feature.get_shape\n cond = []\n for idx in xrange(i.type.ndim):\n if (not i.type.broadcastable[idx] and\n not same_shape(i, cmp_op, idx, idx)):\n i_shp = get_shape(i, idx)\n cmp_shp = get_shape(cmp_op, idx)\n cond.append(T.eq(i_shp, cmp_shp))\n if cond:\n assert_op = assert_(assert_op, *cond)\n new_i.append(i.owner.inputs[0])\n\n # Remove Alloc in DimShuffle\n elif i.owner and dimshuffled_alloc(i):\n assert i.type.ndim == cmp_op.type.ndim\n if theano.config.experimental.local_alloc_elemwise_assert:\n assert_cond = [T.eq(i.shape[idx], cmp_op.shape[idx])\n for idx in xrange(i.type.ndim)\n if not i.type.broadcastable[idx] and\n not same_shape(i, cmp_op, idx, idx)]\n if assert_cond:\n assert_op = assert_(assert_op, *assert_cond)\n alloc_input = i.owner.inputs[0].owner.inputs[0]\n if alloc_input.ndim != i.owner.inputs[0].ndim:\n # The alloc can add dimension to the value\n # We add a dimshuffle to add them.\n # We let later optimization merge the multiple dimshuffle\n nb_dim_to_add = i.owner.inputs[0].ndim - alloc_input.ndim\n alloc_input = alloc_input.dimshuffle(\n ['x'] * nb_dim_to_add +\n list(range(alloc_input.ndim)))\n\n # We need to keep the dimshuffle. It could swap axes or\n # add dimensions anywhere.\n r_i = i.owner.op(alloc_input)\n\n # Copy stack trace from i to new_i\n copy_stack_trace(i, r_i)\n new_i.append(r_i)\n else:\n new_i.append(i)\n new_i[assert_op_idx] = assert_op\n\n ret = node.op(*new_i, return_list=True)\n\n # Copy over stack trace from previous outputs to new outputs.\n copy_stack_trace(node.outputs, ret)\n return ret\n\n return local_elemwise_alloc\n\n# TODO, global optimizer that lift the assert to the beginning of the graph.\n# TODO, optimize all inputs when possible -- currently when all inputs have\n# an alloc all but one is optimized.\n\nlocal_elemwise_alloc = register_specialize(\n gof.local_optimizer([T.Elemwise])(\n local_elemwise_alloc_op(T.Elemwise, T.Alloc, T.DimShuffle)),\n 'local_alloc_elemwise')\n\n\[email protected]_optimizer([T.Elemwise])\ndef local_fill_sink(node):\n \"\"\"\n f(fill(a, b), fill(c, d), e) -> fill(c, fill(a, f(b, d, e)))\n f need to be an elemwise that isn't a fill.\n \"\"\"\n if (not hasattr(node, 'op') or\n not isinstance(node.op, T.Elemwise) or\n node.op == T.fill):\n return False\n models = []\n inputs = []\n for input in node.inputs:\n if input.owner and input.owner.op == T.fill:\n models.append(input.owner.inputs[0])\n inputs.append(input.owner.inputs[1])\n else:\n inputs.append(input)\n if not models:\n return False\n c = node.op(*inputs)\n for model in models:\n if model.type != c.type:\n c = T.fill(model, c)\n\n # The newly created node c doesn't has 'clients',\n # so this iteration is took place with node.outputs[0]\n replacements = {node.outputs[0]: c}\n for client, cl_idx in node.outputs[0].clients:\n if (hasattr(client, 'op') and\n isinstance(client.op, T.Elemwise) and\n not client.op == T.fill):\n client_inputs = client.inputs[:]\n client_inputs[cl_idx] = c\n new_client = client.op(*client_inputs)\n\n # Add clients to new_client\n new_client.owner.outputs[0].clients = client.outputs[0].clients\n r = local_fill_sink.transform(new_client.owner)\n if not r:\n continue\n replacements.update(r)\n return replacements\n\nregister_canonicalize(local_fill_sink)\n\n\n@register_specialize\n@register_stabilize\n# @register_canonicalize # We make full pass after the canonizer phase.\[email protected]_optimizer([T.fill])\ndef local_fill_to_alloc(node):\n \"\"\"fill(s,v) -> alloc(v, shape(s))\n\n This is an important optimization because with the shape_to_shape_i\n optimization, the dependency on 's' is often removed.\n\n \"\"\"\n if node.op == T.fill:\n r, v = node.inputs\n if v.type == node.outputs[0].type:\n # this is a useless fill, erase it.\n rval = [v]\n elif v.type.broadcastable == node.outputs[0].type.broadcastable:\n # this is a cast\n rval = [T.cast(v, node.outputs[0].type.dtype)]\n elif r.type.broadcastable == node.outputs[0].type.broadcastable:\n # we are broadcasting v somehow, but not r\n o = broadcast_like(v, r, node.fgraph, dtype=v.dtype)\n copy_stack_trace(node.outputs[0], o)\n rval = [o]\n else:\n # we are broadcasting both v and r,\n # the output shape must be computed\n #\n # TODO: implement this case (including a test!)\n #\n # I think the strategy should be to extend the shorter\n # shape vector with 1s (how?) and then take the\n # elementwise max of the two. - how to flag an error of\n # shape mismatch where broadcasting should be illegal?\n return\n # TODO: cut out un-necessary dimshuffles of v\n\n assert rval[0].type == node.outputs[0].type, (\n 'rval', rval[0].type, 'orig', node.outputs[0].type, 'node',\n node,) # theano.printing.debugprint(node.outputs[0], file='str'))\n return rval\n\n# Register this after stabilize at 1.5 to make sure stabilize don't\n# get affected by less canonicalized graph due to alloc.\ncompile.optdb.register('local_fill_to_alloc',\n in2out(local_fill_to_alloc),\n 1.51, 'fast_run')\n# Needed to clean some extra alloc added by local_fill_to_alloc\ncompile.optdb.register('local_elemwise_alloc',\n in2out(local_elemwise_alloc),\n 1.52, 'fast_run')\n\n\n@register_canonicalize(\"fast_compile\")\n@register_useless\[email protected]_optimizer([T.fill])\ndef local_useless_fill(node):\n \"\"\"fill(s,v) -> v\n\n This optimization is only needed in FAST_COMPILE to make the code\n more readable. Normally, it is done by the local_fill_to_alloc\n opt.\n\n \"\"\"\n if node.op == T.fill:\n r, v = node.inputs\n if v.type == node.outputs[0].type:\n # this is a useless fill, erase it.\n # also, we don't need to copy over any stack traces here\n return [v]\n\n\n@register_specialize\n@register_stabilize\n@register_canonicalize\n@register_useless\[email protected]_optimizer([T.alloc])\ndef local_useless_alloc(node):\n \"\"\"\n If the input type is the same as the output type (dtype and broadcast)\n there is no change in the shape of the input. So this is just a simple copy\n of the input. This is not needed.\n\n \"\"\"\n op = node.op\n if not isinstance(op, Alloc):\n return False\n\n input = node.inputs[0]\n output = node.outputs[0]\n\n # Check if dtype and broadcast remain the same.\n if input.type == output.type:\n # We don't need to copy over any stack traces here\n return [input]\n\n\n@register_specialize\n@register_stabilize\n@register_canonicalize\[email protected]_optimizer([T.alloc])\ndef local_canonicalize_alloc(node):\n \"\"\"If the input type is the same as the output type (dtype and broadcast)\n there is no change in the shape of the input. So this is just a simple copy\n of the input. This is not needed. (as local_useless_alloc)\n\n Also, it will canonicalize alloc by creating Dimshuffle after the\n alloc to introduce the dimensions of constant size 1.\n\n See https://github.com/Theano/Theano/issues/4072 to know why this\n is needed.\n\n \"\"\"\n op = node.op\n if not isinstance(op, Alloc):\n return False\n\n input = node.inputs[0]\n output = node.outputs[0]\n\n # Check if dtype and broadcast remain the same.\n if input.type == output.type:\n # We don't need to copy over any stack traces here\n return [input]\n\n # Allow local_merge_alloc to do its work first\n clients = getattr(output, 'clients', [])\n for client, i in clients:\n if client != \"output\" and isinstance(client.op, Alloc):\n return\n\n # Check if alloc adds a broadcastable dimension with shape 1.\n\n output_shape = node.inputs[1:]\n num_dims_with_size_1_added_to_left = 0\n for i in range(len(output_shape) - input.ndim):\n if extract_constant(output_shape[i], only_process_constants=True) == 1:\n num_dims_with_size_1_added_to_left += 1\n else:\n break\n new_output_shape = output_shape[num_dims_with_size_1_added_to_left:]\n if num_dims_with_size_1_added_to_left > 0 and len(new_output_shape) >= input.ndim:\n if output.broadcastable[num_dims_with_size_1_added_to_left:] == input.broadcastable:\n inner = input\n else:\n inner = op(*([input] + new_output_shape))\n dimshuffle_new_order = (['x'] * num_dims_with_size_1_added_to_left +\n list(xrange(len(new_output_shape))))\n return [DimShuffle(inner.type.broadcastable, dimshuffle_new_order)(inner)]\n\n\n# Don't register by default.\[email protected]_optimizer([T.AllocEmpty])\ndef local_alloc_empty_to_zeros(node):\n \"\"\"This convert AllocEmpty to Alloc of 0.\n\n This help investigate NaN with NanGuardMode. Not registered by\n default. To activate it, use the Theano flag\n optimizer_including=alloc_empty_to_zeros. This also enable\n the GPU version of this optimizations.\n\n \"\"\"\n if isinstance(node.op, T.AllocEmpty):\n return [T.zeros(node.inputs, dtype=node.outputs[0].dtype)]\ncompile.optdb.register('local_alloc_empty_to_zeros',\n in2out(local_alloc_empty_to_zeros),\n # After move to gpu and merge2, before inplace.\n 49.3,\n 'alloc_empty_to_zeros',)\n\n\n@register_specialize\n@register_canonicalize\[email protected]_optimizer([T.Shape])\ndef local_shape_to_shape_i(node):\n if node.op == T.shape:\n # This optimization needs ShapeOpt and fgraph.shape_feature\n if not hasattr(node.fgraph, 'shape_feature'):\n return\n shape_feature = node.fgraph.shape_feature\n ret = shape_feature.make_vector_shape(node.inputs[0])\n\n # We need to copy over stack trace from input to output\n copy_stack_trace(node.outputs[0], ret)\n return [ret]\n\n\n# TODO: Not sure what type of node we are expecting here\n@register_specialize\n@register_canonicalize\[email protected]_optimizer(None)\ndef local_track_shape_i(node):\n try:\n shape_feature = node.fgraph.shape_feature\n except AttributeError:\n return\n if node in shape_feature.scheduled:\n # Don't unschedule node as it could be reinserted in the\n # fgraph as we don't change it in the shapefeature internal\n # structure.\n assert isinstance(node.op, Shape_i)\n replacement = shape_feature.scheduled[node]\n return [shape_feature.shape_of[replacement][node.op.i]]\n\n\n@register_specialize\n@register_canonicalize\[email protected]_optimizer([Subtensor])\ndef local_subtensor_inc_subtensor(node):\n \"\"\"\n Subtensor(SetSubtensor(x, y, idx), idx) -> y\n\n \"\"\"\n if isinstance(node.op, Subtensor):\n x = node.inputs[0]\n if not x.owner or not isinstance(x.owner.op, IncSubtensor):\n return\n if not x.owner.op.set_instead_of_inc:\n return\n\n if (x.owner.inputs[2:] == node.inputs[1:] and\n tuple(x.owner.op.idx_list) == tuple(node.op.idx_list)):\n out = node.outputs[0]\n y = x.owner.inputs[1]\n # If the dtypes differ, cast y into x.dtype\n if x.dtype != y.dtype:\n y = y.astype(x.dtype)\n if out.type == y.type:\n # if x[idx] and y have the same type, directly return y\n return [y]\n else:\n # The difference is related to broadcasting pattern\n assert out.broadcastable != y.broadcastable\n # We have to alloc y to the shape of x[idx]\n x_subtensor = node.op(x.owner.inputs[0], *x.owner.inputs[2:])\n return [T.alloc(y, *x_subtensor.shape)]\n else:\n return\n\n\n@register_specialize\n@register_canonicalize\[email protected]_optimizer([Subtensor])\ndef local_subtensor_remove_broadcastable_index(node):\n \"\"\"\n Remove broadcastable dimension with index 0 or -1\n a[:,:,:,0] -> a.dimshuffle(0,1,2), when\n a.broadcastable = (False, False, False, True)\n a[0,:,-1,:] -> a.dimshuffle(1,3), when\n a.broadcastable = (True, False, True, False)\n\n \"\"\"\n if isinstance(node.op, Subtensor):\n idx = node.op.idx_list\n else:\n return\n\n remove_dim = []\n node_inputs_idx = 1\n for dim, elem in enumerate(idx):\n if isinstance(elem, (scalar.Scalar)):\n # The idx is a Scalar, ie a Type. This means the actual index\n # is contained in node.inputs[1]\n dim_index = node.inputs[node_inputs_idx]\n if type(dim_index) == theano.scalar.basic.ScalarConstant:\n dim_index = dim_index.value\n if dim_index in [0, -1] and node.inputs[0].broadcastable[dim]:\n remove_dim.append(dim)\n node_inputs_idx += 1\n else:\n return\n elif isinstance(elem, slice):\n if elem != slice(None):\n return\n elif isinstance(elem, (integer_types, np.integer)):\n if elem in [0, -1] and node.inputs[0].broadcastable[dim]:\n remove_dim.append(dim)\n else:\n raise TypeError('case not expected')\n\n if len(remove_dim) == 0:\n return\n else:\n all_dim = range(node.inputs[0].ndim)\n remain_dim = [x for x in all_dim if x not in remove_dim]\n return [node.inputs[0].dimshuffle(tuple(remain_dim))]\n\n\n@register_specialize\n@register_canonicalize('fast_compile_gpu')\n@register_useless\[email protected]_optimizer([Subtensor, AdvancedSubtensor1])\ndef local_subtensor_make_vector(node):\n \"\"\"\n Replace all subtensor(make_vector) like:\n [a,b,c][0] -> a\n [a,b,c][0:2] -> [a,b]\n\n Replace all AdvancedSubtensor1(make_vector) like:\n [a,b,c][[0,2]] -> [a,c]\n\n We can do this for constant indexes.\n\n \"\"\"\n x = node.inputs[0]\n if not x.owner or x.owner.op != make_vector:\n return\n\n if isinstance(node.op, Subtensor):\n # This optimization needs ShapeOpt and fgraph.shape_feature\n try:\n idx, = node.op.idx_list\n except Exception:\n # 'how can you have multiple indexes into a shape?'\n raise\n\n if isinstance(idx, (scalar.Scalar, T.TensorType)):\n # The idx is a Scalar, ie a Type. This means the actual index\n # is contained in node.inputs[1]\n old_idx, idx = idx, node.inputs[1]\n assert idx.type == old_idx\n elif isinstance(node.op, AdvancedSubtensor1):\n idx = node.inputs[1]\n else:\n return\n\n if isinstance(idx, (integer_types, np.integer)):\n # We don't need to copy over any stack traces here\n return [x.owner.inputs[idx]]\n elif isinstance(idx, Variable):\n if idx.ndim == 0:\n # if it is a constant we can do something with it\n try:\n v = get_scalar_constant_value(idx, only_process_constants=True)\n if isinstance(v, np.integer):\n # Python 2.4 wants to index only with Python integers\n v = int(v)\n # We don't need to copy over any stack traces here\n try:\n ret = [x.owner.inputs[v]]\n except IndexError:\n raise NotScalarConstantError(\"Bad user graph!\")\n return ret\n except NotScalarConstantError:\n pass\n elif idx.ndim == 1 and isinstance(idx, T.Constant):\n values = list(map(int, list(idx.value)))\n ret = make_vector(*[x.owner.inputs[v] for v in values])\n\n # Copy over stack trace from previous output to new output\n copy_stack_trace(node.outputs[0], ret)\n ret = T.patternbroadcast(ret, node.outputs[0].broadcastable)\n return [ret]\n else:\n raise TypeError('case not expected')\n elif isinstance(idx, slice):\n # it is a slice of ints and/or Variables\n # check subtensor to see if it can contain constant variables, and if\n # it can, then try to unpack them.\n try:\n const_slice = node.op.get_constant_idx(node.inputs,\n allow_partial=False)[0]\n ret = make_vector(*x.owner.inputs[const_slice])\n # Copy over stack trace from previous outputs to new output\n copy_stack_trace(node.outputs, ret)\n ret = T.patternbroadcast(ret, node.outputs[0].broadcastable)\n return [ret]\n except NotScalarConstantError:\n pass\n else:\n raise TypeError('case not expected')\n\n\n# TODO: the other optimization for and, or, xor, le and ge see ticket #496.\n\n@register_useless\n@register_canonicalize('fast_compile')\n@register_specialize\[email protected]_optimizer([T.Elemwise])\ndef local_useless_elemwise(node):\n \"\"\"\n eq(x, x) -> 1\n neq(x, x) -> 0\n mul(x) -> x\n add(x) -> x\n identity(x) -> x\n and(x, 1) -> x (if x.dtype == 'bool')\n and(x, 0) -> zeros_like(x)\n or(x, 0) -> x\n or(x, 1) -> ones_like(x) (if x.dtype == 'bool')\n xor(x, x) -> zeros_like(x)\n\n \"\"\"\n if isinstance(node.op, T.Elemwise):\n # We call zeros_like and one_like with opt=True to generate a\n # cleaner graph.\n dtype = node.outputs[0].dtype\n\n if node.op.scalar_op == theano.scalar.eq and len(node.inputs) == 2:\n if node.inputs[0] == node.inputs[1]:\n # it is the same var in the graph. That will always be true\n ret = T.ones_like(node.inputs[0], dtype=dtype, opt=True)\n\n # Copy stack trace from input to constant output\n copy_stack_trace(node.outputs[0], ret)\n return [ret]\n elif node.op.scalar_op == theano.scalar.neq and len(node.inputs) == 2:\n if node.inputs[0] == node.inputs[1]:\n # it is the same var in the graph. That will always be false\n ret = T.zeros_like(node.inputs[0], dtype=dtype, opt=True)\n\n # Copy stack trace from input to constant output\n copy_stack_trace(node.outputs[0], ret)\n return [ret]\n\n elif node.op.scalar_op == theano.scalar.mul and len(node.inputs) == 1:\n # No need to copy over any stack trace\n return [node.inputs[0]]\n\n elif node.op.scalar_op == theano.scalar.add and len(node.inputs) == 1:\n # No need to copy over any stack trace\n return [node.inputs[0]]\n elif (node.op.scalar_op == theano.scalar.identity and\n len(node.inputs) == 1):\n return [node.inputs[0]]\n\n elif (isinstance(node.op.scalar_op, scalar.AND) and\n len(node.inputs) == 2):\n\n if isinstance(node.inputs[0], T.TensorConstant):\n const_val = T.extract_constant(node.inputs[0], only_process_constants=True)\n if not isinstance(const_val, Variable):\n if const_val == 0:\n return [T.zeros_like(node.inputs[1], dtype=dtype,\n opt=True)]\n elif node.outputs[0].dtype == 'bool':\n # If the output is not Boolean, it is the bitwise AND,\n # and this optimization would be wrong\n return [node.inputs[1].astype(node.outputs[0].dtype)]\n\n if isinstance(node.inputs[1], T.TensorConstant):\n const_val = T.extract_constant(node.inputs[1], only_process_constants=True)\n if not isinstance(const_val, Variable):\n if const_val == 0:\n return [T.zeros_like(node.inputs[0], dtype=dtype,\n opt=True)]\n elif node.outputs[0].dtype == 'bool':\n # If the output is not Boolean, it is the bitwise AND,\n # and this optimization would be wrong\n return [node.inputs[0].astype(node.outputs[0].dtype)]\n\n elif (isinstance(node.op.scalar_op, scalar.OR) and\n len(node.inputs) == 2):\n\n if isinstance(node.inputs[0], T.TensorConstant):\n const_val = T.extract_constant(node.inputs[0], only_process_constants=True)\n if not isinstance(const_val, Variable):\n if const_val == 0:\n return [node.inputs[1].astype(node.outputs[0].dtype)]\n elif node.outputs[0].dtype == 'bool':\n # If the output is not Boolean, it is the bitwise OR,\n # and this optimization would be wrong\n return [T.ones_like(node.inputs[1], dtype=dtype,\n opt=True)]\n\n if isinstance(node.inputs[1], T.TensorConstant):\n const_val = T.extract_constant(node.inputs[1], only_process_constants=True)\n if not isinstance(const_val, Variable):\n if const_val == 0:\n return [node.inputs[0].astype(node.outputs[0].dtype)]\n elif node.outputs[0].dtype == 'bool':\n # If the output is not Boolean, it is the bitwise OR,\n # and this optimization would be wrong\n return [T.ones_like(node.inputs[0], dtype=dtype,\n opt=True)]\n\n elif (isinstance(node.op.scalar_op, scalar.XOR) and\n len(node.inputs) == 2):\n if node.inputs[0] is node.inputs[1]:\n return [T.zeros_like(node.inputs[0], dtype=dtype, opt=True)]\n\n\n@register_specialize\[email protected]_optimizer([T.Elemwise])\ndef local_alloc_unary(node):\n \"\"\"unary(alloc(x, shp)) -> alloc(unary(x), shp)\"\"\"\n if isinstance(node.op, T.Elemwise) and len(node.inputs) == 1:\n a = node.inputs[0]\n if a.owner and isinstance(a.owner.op, T.Alloc):\n x = a.owner.inputs[0]\n shp = a.owner.inputs[1:]\n v = node.op(x)\n # T.alloc does not preserve the stacktrace of v,\n # so we need to copy it over from x.\n copy_stack_trace(node.outputs[0], v)\n ret = T.alloc(T.cast(v, node.outputs[0].dtype), *shp)\n\n # T.cast does not preserve the stacktrace of x,\n # so we need to copy it over to the output.\n copy_stack_trace([node.outputs[0], a], ret)\n return [ret]\n\n\n@register_canonicalize\n@register_specialize\[email protected]_optimizer([T.Elemwise])\ndef local_cast_cast(node):\n \"\"\"cast(cast(x, dtype1), dtype2)\n\n when those contrain:\n dtype1 == dtype2\n OR the base dtype is the same (int, uint, float, complex)\n and the first cast cause an upcast.\n\n \"\"\"\n if (not isinstance(node.op, T.Elemwise) or\n not isinstance(node.op.scalar_op, scalar.Cast)):\n return\n x = node.inputs[0]\n if (not x.owner or\n not isinstance(x.owner.op, T.Elemwise) or\n not isinstance(x.owner.op.scalar_op, scalar.Cast)):\n return\n\n type1 = x.owner.op.scalar_op.o_type\n type2 = node.op.scalar_op.o_type\n base = x.owner.inputs[0]\n\n if type1 == type2:\n # We don't need to copy over any stack traces here\n return [x]\n\n if(is_an_upcast(base.dtype, type1.dtype)):\n # Checking for further redundancy. Eg: int8 -> int32 -> int8\n if(type2.dtype == base.dtype):\n return x.owner.inputs\n else:\n # Apply the second cast only\n v = node.op(base)\n # Copy stack trace from the output of the original cast\n copy_stack_trace(node.outputs[0], v)\n return [v]\n\n\ndef is_an_upcast(type1, type2):\n \"\"\"Given two data types (as strings), check if converting to\n type2 from type1 constitutes an upcast.\n Differs from theano.scalar.upcast\n\n \"\"\"\n category = {\n # The first number in the pair is the dtype (bool, uint, int, float,\n # complex). Conversion from higher to lower is never an upcast.\n # The second number roughly indicates the precision. Again, conversion\n # from higher to lower is never an upcast.\n\n 'bool': (0, 0),\n 'uint8': (1, 1), 'uint16': (1, 2), 'uint32': (1, 3), 'uint64': (1, 4),\n 'int8': (2, 1), 'int16': (2, 2), 'int32': (2, 3), 'int64': (2, 4),\n 'float16': (3, 1.5), 'float32': (3, 2.5), 'float64': (3, 3.5),\n 'complex64': (4, 3), 'complex128': (4, 4)\n }\n\n cat1 = category[type1]\n cat2 = category[type2]\n\n if(cat2[0] >= cat1[0] and cat2[1] > cat1[1]):\n return True\n else:\n return False\n\n\n@register_canonicalize\n@register_specialize\[email protected]_optimizer([T.Elemwise])\ndef local_func_inv(node):\n \"\"\"\n Check for two consecutive operations that are functional inverses\n and remove them from the function graph.\n\n \"\"\"\n inv_pairs = (\n (basic.Deg2Rad, basic.Rad2Deg),\n (basic.Cosh, basic.ArcCosh),\n (basic.Tanh, basic.ArcTanh),\n (basic.Sinh, basic.ArcSinh),\n (basic.Conj, basic.Conj),\n (basic.Neg, basic.Neg),\n (basic.Inv, basic.Inv),\n )\n x = node.inputs[0]\n\n if not isinstance(node.op, T.Elemwise):\n return\n if (not x.owner or not isinstance(x.owner.op, T.Elemwise)):\n return\n\n prev_op = x.owner.op.scalar_op\n node_op = node.op.scalar_op\n\n for inv_pair in inv_pairs:\n if is_inverse_pair(node_op, prev_op, inv_pair):\n # We don't need to copy stack trace, because the optimization\n # is trivial and maintains the earlier stack trace\n return x.owner.inputs\n\n return\n\n\ndef is_inverse_pair(node_op, prev_op, inv_pair):\n \"\"\"\n Given two consecutive operations, check if they are the\n provided pair of inverse functions.\n\n \"\"\"\n node_is_op0 = isinstance(node_op, inv_pair[0])\n node_is_op1 = isinstance(node_op, inv_pair[1])\n prev_is_op0 = isinstance(prev_op, inv_pair[0])\n prev_is_op1 = isinstance(prev_op, inv_pair[1])\n\n return (node_is_op0 and prev_is_op1) or (node_is_op1 and prev_is_op0)\n\n\nclass Assert(T.Op):\n \"\"\"\n Implements assertion in a computational graph.\n\n Returns the first parameter if the condition is true, otherwise, triggers\n AssertionError.\n\n Notes\n -----\n This Op is a debugging feature. It can be removed from the graph\n because of optimizations, and can hide some possible optimizations to\n the optimizer. Specifically, removing happens if it can be determined\n that condition will always be true. Also, the output of the Op must be\n used in the function computing the graph, but it doesn't have to be\n returned.\n\n Examples\n --------\n >>> import theano\n >>> T = theano.tensor\n >>> x = T.vector('x')\n >>> assert_op = T.opt.Assert()\n >>> func = theano.function([x], assert_op(x, x.size<2))\n\n \"\"\"\n _f16_ok = True\n __props__ = ('msg',)\n view_map = {0: [0]}\n\n check_input = False\n\n def __init__(self, msg=\"Theano Assert failed!\"):\n self.msg = msg\n\n def __setstate__(self, attrs):\n self.__dict__.update(attrs)\n if not hasattr(self, 'msg'):\n self.msg = \"Theano Assert failed!\"\n\n def make_node(self, value, *conds):\n if not isinstance(value, Variable):\n value = T.as_tensor_variable(value)\n cond = [T.as_tensor_variable(c) for c in conds]\n assert np.all([c.type.ndim == 0 for c in cond])\n return gof.Apply(self, [value] + cond, [value.type()])\n\n def perform(self, node, inputs, out_):\n out, = out_\n v = inputs[0]\n out[0] = v\n assert np.all(inputs[1:]), self.msg\n\n def grad(self, input, output_gradients):\n return output_gradients + [DisconnectedType()()] * (len(input) - 1)\n\n def connection_pattern(self, node):\n return [[1]] + [[0]] * (len(node.inputs) - 1)\n\n def c_code(self, node, name, inames, onames, sub):\n value = inames[0]\n out = onames[0]\n check = []\n fail = sub['fail']\n msg = self.msg.replace('\"', '\\\\\"').replace('\\n', '\\\\n')\n for idx in xrange(len(inames) - 1):\n i = inames[idx + 1]\n dtype = node.inputs[idx + 1].dtype\n check.append('if(!((npy_%(dtype)s*)PyArray_DATA(%(i)s))[0])'\n '{PyErr_SetString(PyExc_AssertionError,\"%(msg)s\");'\n '%(fail)s}' % locals())\n check = \"\\n\".join(check)\n return \"\"\"\n %(check)s\n Py_XDECREF(%(out)s);\n %(out)s = %(value)s;\n Py_INCREF(%(value)s);\n \"\"\" % locals()\n\n def c_code_cache_version(self):\n return (3, 0)\n\n def infer_shape(self, node, input_shapes):\n return [input_shapes[0]]\n\nassert_ = Assert()\n# Unittest.assert_ is a deprecated name for assertTrue.\n# 2to3 convert theano.tensor.opt.assert_ to theano.tensor.opt.assertTrue\n# So I define a new name as a work around.\nassert_op = assert_\n\n\n@register_specialize\[email protected]_optimizer([Assert])\ndef local_remove_useless_assert(node):\n if isinstance(node.op, Assert):\n cond = []\n for c in node.inputs[1:]:\n try:\n const = get_scalar_constant_value(c)\n\n if 0 != const.ndim or const == 0:\n # Should we raise an error here? How to be sure it\n # is not catched?\n cond.append(c)\n except NotScalarConstantError:\n cond.append(c)\n\n if len(cond) == 0:\n # We don't need to copy over any stack traces here\n return [node.inputs[0]]\n if len(cond) != len(node.inputs) - 1:\n ret = assert_(node.inputs[0], *cond)\n\n # We copy over stack trace from the output of the original assert\n copy_stack_trace(node.outputs[0], ret)\n return [ret]\n\n\[email protected]_optimizer([Assert])\ndef local_remove_all_assert(node):\n \"\"\"An optimization disabled by default that removes all asserts from\n the graph.\n\n Notes\n -----\n See the :ref:`unsafe` section to know how to enable it.\n\n \"\"\"\n if not isinstance(node.op, Assert):\n return\n\n # We don't need to copy over any stack traces here\n return [node.inputs[0]]\n# Disabled by default\ncompile.optdb['canonicalize'].register('local_remove_all_assert',\n local_remove_all_assert,\n 'unsafe',\n use_db_name_as_tag=False)\ncompile.optdb['stabilize'].register('local_remove_all_assert',\n local_remove_all_assert,\n 'unsafe',\n use_db_name_as_tag=False)\ncompile.optdb['specialize'].register('local_remove_all_assert',\n local_remove_all_assert,\n 'unsafe',\n use_db_name_as_tag=False)\ncompile.optdb['useless'].register('local_remove_all_assert',\n local_remove_all_assert,\n 'unsafe',\n use_db_name_as_tag=False)\n\n#######################\n# Constant Canonicalization\n############################\n\n\n@register_canonicalize\[email protected]_optimizer([T.Elemwise])\ndef local_upcast_elemwise_constant_inputs(node):\n \"\"\"This explicitly upcasts constant inputs to elemwise Ops, when\n those Ops do implicit upcasting anyway.\n\n Rationale: it helps merge things like (1-x) and (1.0 - x).\n\n \"\"\"\n if len(node.outputs) > 1:\n return\n try:\n shape_i = node.fgraph.shape_feature.shape_i\n except AttributeError:\n shape_i = None\n if isinstance(node.op, T.Elemwise):\n scalar_op = node.op.scalar_op\n # print \"aa\", scalar_op.output_types_preference\n if (getattr(scalar_op, 'output_types_preference', None)\n in (T.scal.upgrade_to_float, T.scal.upcast_out)):\n # this is the kind of op that we can screw with the input\n # dtypes by upcasting explicitly\n output_dtype = node.outputs[0].type.dtype\n new_inputs = []\n for i in node.inputs:\n if i.type.dtype == output_dtype:\n new_inputs.append(i)\n else:\n try:\n # works only for scalars\n cval_i = get_scalar_constant_value(i,\n only_process_constants=True)\n if all(i.broadcastable):\n new_inputs.append(T.shape_padleft(\n T.cast(cval_i, output_dtype),\n i.ndim))\n else:\n if shape_i is None:\n return\n new_inputs.append(\n T.alloc(T.cast(cval_i, output_dtype),\n *[shape_i(d)(i)\n for d in xrange(i.ndim)]))\n # print >> sys.stderr, \"AAA\",\n # *[Shape_i(d)(i) for d in xrange(i.ndim)]\n except NotScalarConstantError:\n # for the case of a non-scalar\n if isinstance(i, T.TensorConstant):\n new_inputs.append(T.cast(i, output_dtype))\n else:\n new_inputs.append(i)\n\n if new_inputs != node.inputs:\n rval = [node.op(*new_inputs)]\n if rval[0].type != node.outputs[0].type:\n # This can happen for example when floatX=float32\n # and we do the true division between and int64\n # and a constant that will get typed as int8.\n\n # As this is just to allow merging more case, if\n # the upcast don't work, we can just skip it.\n return\n\n # Copy over output stacktrace from before upcasting\n copy_stack_trace(node.outputs[0], rval)\n return rval\n\n##################\n# Subtensor opts #\n##################\n\n\n@register_useless\n@register_canonicalize\n@register_specialize\[email protected]_optimizer([IncSubtensor])\ndef local_useless_inc_subtensor(node):\n \"\"\"\n Remove IncSubtensor, when we overwrite the full inputs with the\n new value.\n\n \"\"\"\n if not isinstance(node.op, IncSubtensor):\n return\n if node.op.set_instead_of_inc is False:\n # This is an IncSubtensor, so the init value must be zeros\n try:\n c = get_scalar_constant_value(node.inputs[0],\n only_process_constants=True)\n if c != 0:\n return\n except NotScalarConstantError:\n return\n if (node.inputs[0].ndim != node.inputs[1].ndim or\n node.inputs[0].broadcastable != node.inputs[1].broadcastable):\n # FB: I didn't check if this case can happen, but this opt\n # don't support it.\n return\n # We have a SetSubtensor or an IncSubtensor on zeros\n # If is this IncSubtensor useful?\n\n # Check that we keep all the original data.\n # Put the constant inputs in the slice.\n idx_cst = get_idx_list(node.inputs[1:], node.op.idx_list)\n if all(isinstance(e, slice) and e.start is None and\n e.stop is None and (e.step is None or T.extract_constant(e.step,\n only_process_constants=True) == -1)\n for e in idx_cst):\n # IncSubtensor broadcast node.inputs[1] on node.inputs[0]\n # based on run time shapes, so we must check they are the same.\n if not hasattr(node.fgraph, 'shape_feature'):\n return\n if not node.fgraph.shape_feature.same_shape(node.inputs[0],\n node.inputs[1]):\n return\n # There is no reverse, so we don't need a replacement.\n if all(e.step is None\n for e in node.op.idx_list):\n # They are the same shape, so we can remore this IncSubtensor\n return [node.inputs[1]]\n ret = Subtensor(node.op.idx_list)(*node.inputs[1:])\n # Copy over previous output stacktrace\n copy_stack_trace(node.outputs, ret)\n return [ret]\n\n\n@register_canonicalize\[email protected]_optimizer([AdvancedIncSubtensor1])\ndef local_set_to_inc_subtensor(node):\n \"\"\"\n AdvancedIncSubtensor1(x, x[ilist]+other, ilist, set_instead_of_inc=True) ->\n AdvancedIncSubtensor1(x, other, ilist, set_instead_of_inc=False)\n\n \"\"\"\n if (isinstance(node.op, AdvancedIncSubtensor1) and\n node.op.set_instead_of_inc and\n node.inputs[1].owner and\n isinstance(node.inputs[1].owner.op, Elemwise) and\n isinstance(node.inputs[1].owner.op.scalar_op, scalar.Add)):\n addn = node.inputs[1].owner\n subn = None\n other = None\n\n if (addn.inputs[0].owner and\n isinstance(addn.inputs[0].owner.op, AdvancedSubtensor1)):\n subn = addn.inputs[0].owner\n other = addn.inputs[1]\n elif (addn.inputs[1].owner and\n isinstance(addn.inputs[1].owner.op, AdvancedSubtensor1)):\n subn = addn.inputs[1].owner\n other = addn.inputs[0]\n else:\n return\n if (subn.inputs[1] != node.inputs[2] or\n subn.inputs[0] != node.inputs[0]):\n return\n ret = advanced_inc_subtensor1(node.inputs[0], other, node.inputs[2])\n # Copy over previous output stacktrace\n # Julian: I'm not sure about this at all...\n copy_stack_trace(node.outputs, ret)\n return [ret]\n\n\n@register_useless\n@register_canonicalize\n@register_specialize\[email protected]_optimizer([Subtensor])\ndef local_useless_slice(node):\n \"\"\"\n Remove Subtensor of the form X[0, :] -> X[0]\n \"\"\"\n if isinstance(node.op, Subtensor):\n slices = get_idx_list(node.inputs, node.op.idx_list)\n last_slice = len(slices)\n for s in slices[::-1]:\n # check if slice and then check slice indices\n if (isinstance(s, slice) and s.start is None and s.stop is None and\n (s.step is None or T.extract_constant(s.step,\n only_process_constants=True) == 1)):\n last_slice -= 1\n else:\n break\n # check if we removed something\n if last_slice < len(slices):\n subtens = Subtensor(slices[:last_slice])\n sl_ins = Subtensor.collapse(slices[:last_slice],\n lambda x: isinstance(x, T.Variable))\n out = subtens(node.inputs[0], *sl_ins)\n # Copy over previous output stacktrace\n copy_stack_trace(node.outputs, out)\n return [out]\n\n\n@register_canonicalize\n@register_specialize\[email protected]_optimizer([Subtensor, AdvancedSubtensor1])\ndef local_useless_subtensor(node):\n \"\"\"\n Remove Subtensor/AdvancedSubtensor1 if it takes the full input. In the\n AdvancedSubtensor1 case, the full input is taken when the indices are\n equivalent to `arange(0, input.shape[0], 1)` using either an explicit\n list/vector or the ARange op.\n\n \"\"\"\n\n # If the optimization is tried over a node that is not a part of graph before\n if not hasattr(node, 'fgraph'):\n return\n\n # This optimization needs ShapeOpt and fgraph.shape_feature\n if not hasattr(node.fgraph, 'shape_feature'):\n return\n\n shape_of = node.fgraph.shape_feature.shape_of\n\n if isinstance(node.op, Subtensor):\n cdata = node.op.get_constant_idx(node.inputs, allow_partial=True,\n only_process_constants=True)\n for pos, idx in enumerate(cdata):\n if not isinstance(idx, slice):\n # If idx is not a slice, this means we remove this dimension\n # from the output, so the subtensor is not useless\n return False\n if idx.start is not None and idx.start != 0:\n # If the start of the slice is different from 0, or is a\n # variable, then we assume the subtensor is not useless\n return False\n if idx.step is not None and idx.step != 1:\n # If we are going backwards, or skipping elements, then this\n # is not a useless subtensor\n return False\n\n for pos, idx in enumerate(cdata):\n\n length_pos = shape_of[node.inputs[0]][pos]\n\n if isinstance(idx.stop, (integer_types, np.integer)):\n length_pos_data = sys.maxsize\n try:\n length_pos_data = get_scalar_constant_value(length_pos,\n only_process_constants=True)\n except NotScalarConstantError:\n pass\n\n if idx.stop < length_pos_data:\n return False\n elif isinstance(idx.stop, gof.Variable):\n length_pos_shape_i = idx.stop\n # length_pos is a tensor variable, but length_pos_shape_i\n # is a scalar variable. We try to see if they represent\n # the same underlying variable.\n if (length_pos_shape_i.owner and\n isinstance(length_pos_shape_i.owner.op,\n T.ScalarFromTensor)):\n length_pos_shape_i = length_pos_shape_i.owner.inputs[0]\n elif (length_pos.owner and\n isinstance(length_pos.owner.op, T.TensorFromScalar)):\n length_pos = length_pos.owner.inputs[0]\n else:\n # We did not find underlying variables of the same type\n return False\n\n # The type can be different: int32 vs int64. length_pos\n # should always be int64 as that is what the shape\n # tracker keep. Subtensor accept any scalar int{8,16,32,64}\n # as index type.\n assert str(length_pos.type.dtype) == \"int64\"\n assert str(length_pos_shape_i.type.dtype) in [\"int8\", \"int16\",\n \"int32\", \"int64\"]\n\n # length_pos_shape_i cannot be None\n if length_pos_shape_i != length_pos:\n return False\n elif idx.stop is None:\n pass\n else:\n return False\n elif isinstance(node.op, AdvancedSubtensor1):\n # get length of the indexed tensor along the first axis\n try:\n length = get_scalar_constant_value(shape_of[node.inputs[0]][0],\n only_process_constants=True)\n except NotScalarConstantError:\n return False\n\n # get index (which must be a vector by definition)\n idx = node.inputs[1]\n\n # `idx` must be equivalent to [0,1,...,shape[0] - 1] to qualify for\n # this optimization\n if isinstance(idx, T.Constant):\n idx = idx.value\n if len(idx) != length:\n return False\n if np.any(idx != np.arange(length)):\n return False\n elif idx.owner is not None and isinstance(idx.owner.op, T.ARange):\n try:\n start, stop, step = map(lambda x: get_scalar_constant_value(x,\n only_process_constants=True),\n idx.owner.inputs)\n except NotScalarConstantError:\n return False\n\n if start != 0:\n return False\n if stop != length:\n return False\n if step != 1:\n return False\n else:\n return False\n else:\n return False\n\n # We don't need to copy over any stacktrace here,\n # because previous stacktrace should suffice.\n return [node.inputs[0]]\n\n\n# fast_compile to allow opt subtensor(cast{float32}(make_vector))\n@register_canonicalize('fast_compile')\[email protected]_optimizer([Subtensor])\ndef local_subtensor_lift(node):\n \"\"\"\n unary(x)[idx] -> unary(x[idx])#any broadcast pattern.\n\n Handles the following unary ops:\n elemwise(x,...)[idx] -> elemwise(x[idx],...)\n when x,... are broadcasted scalar or not broadcasted at all\n rebroadcast(x)[idx] => rebroadcast(x[idx])\n\n \"\"\"\n if isinstance(node.op, Subtensor):\n u = node.inputs[0]\n if not u.owner or len(u.clients) > 1:\n return False\n\n if isinstance(u.owner.op, T.Elemwise) and len(u.owner.inputs) == 1:\n idx = node.inputs[1:]\n x_idx = node.op(u.owner.inputs[0], *idx)\n # Copy over previous output stacktrace\n copy_stack_trace(node.outputs, x_idx)\n ret = u.owner.op(x_idx)\n # Copy over previous output stacktrace\n # and stacktrace from previous unary operation\n copy_stack_trace([node.outputs[0], node.inputs[0]], ret)\n return [ret]\n\n if isinstance(u.owner.op, T.Elemwise):\n new_inputs = []\n if all([sum(i.type.broadcastable) == 0 for i in u.owner.inputs]):\n # There is no broadcastable in the inputs\n idx = node.inputs[1:]\n new_inputs = [node.op(i, *idx) for i in u.owner.inputs]\n # Copy over previous output stacktrace\n copy_stack_trace(node.outputs[0], new_inputs)\n\n ret = u.owner.op(*new_inputs)\n # Copy over previous output stacktrace\n # and stacktrace from previous unary operation\n copy_stack_trace([node.outputs[0], node.inputs[0]], ret)\n return [ret]\n elif all([sum(i.type.broadcastable) in [i.ndim, 0]\n for i in u.owner.inputs]):\n # There is no broadcastable in the inputs or it is scalar\n idx = node.inputs[1:]\n new_inputs = []\n for i in u.owner.inputs:\n if sum(i.type.broadcastable) == 0:\n new_inputs.append(node.op(i, *idx))\n else:\n # If the subtensor remove some dims, we must\n # lower the number of dimensions of this scalar.\n if node.outputs[0].ndim == i.ndim:\n new_inputs.append(i)\n else:\n new_inputs.append(\n i.dimshuffle(['x'] * node.outputs[0].ndim))\n\n # Copy over previous output stacktrace\n copy_stack_trace(node.outputs[0], new_inputs)\n\n ret = u.owner.op(*new_inputs)\n # Copy over previous output stacktrace\n # and stacktrace from previous unary operation\n copy_stack_trace([node.outputs[0], node.inputs[0]], ret)\n return [ret]\n\n if isinstance(u.owner.op, T.Rebroadcast):\n # make sure that Rebroadcast has only 1 input\n assert len(u.owner.inputs) == 1\n\n # Subtensor might reduce dim., adapt broadcast pattern accordingly\n new_axis = []\n\n # loop through indices being subtensor-ed\n # i indexes broadcastable pattern before subtensor\n # j indexes broadcastable pattern after subtensor\n j = 0\n for (i, x) in enumerate(node.op.idx_list):\n # if its not a slice, it will reduce the dimension, should\n # not appear in the broascastable dimensions\n if isinstance(x, slice):\n new_axis += [(j, u.broadcastable[i])]\n j += 1\n # now keep the broadcastable pattern of all\n # items not appearing in subtensor list\n for i in xrange(len(node.op.idx_list), len(u.broadcastable)):\n new_axis += [(j, u.broadcastable[i])]\n j += 1\n\n subt_x = node.op(u.owner.inputs[0], *node.inputs[1:])\n # Copy over previous output stacktrace\n copy_stack_trace(node.outputs[0], subt_x)\n\n rbcast_subt_x = T.Rebroadcast(*new_axis)(subt_x)\n # Copy over previous output stacktrace\n # and stacktrace from previous unary operation\n copy_stack_trace([node.outputs[0], node.inputs[0]], rbcast_subt_x)\n\n return [rbcast_subt_x]\n\n\ndef merge_two_slices(slice1, len1, slice2, len2):\n \"\"\"\n This function merges two slices into a single slice. The code works on\n the assumption that:\n\n a) slice1 is actually a slice and not an index, while slice2\n can be just an index.\n\n b) the two slices **have been applied consecutively** on the same\n tensor\n\n The output slice is **not** in canonical form, but actually just a slice\n that can be applied to a tensor to produce the same output as applying\n the two consecutive slices.\n ``len1`` is the length of the tensor **before** applying the first slice,\n while ``len2`` is the length **after** applying the first slice.\n \"\"\"\n list_opt = [local_abs_merge, local_mul_switch_sink,\n local_upcast_elemwise_constant_inputs,\n local_useless_switch, constant_folding]\n\n if type(slice1) is not slice:\n raise ValueError(('First provided slice should actually be of type'\n 'slice and not an index !'), slice1)\n sl1, reverse1 = get_canonical_form_slice(slice1, len1)\n sl2, reverse2 = get_canonical_form_slice(slice2, len2)\n\n if type(sl2) is not slice:\n if reverse1 is None:\n # The first slice is not in reverse, which makes things a lot\n # more clear.\n # In this case we need to take care only of the special cases:\n # len2 <=0 -> throw index error regardless of sl2\n # sl2 > len2 -> throw index error\n # sl2 < -len2 -> throw index error\n # To get a index error we simply use len1+1 to indicate we are\n # out of bounds, because passing this index through the formula\n # of getting the mixed slice is not guaranteed to result in an\n # index error. The **issue though** if that the error will\n # complain about accessing element len1+1 which is probably not\n # too intuitive for the user\n val = sl1.start + sl2 * sl1.step\n val = T.switch(T.le(len2, 0), len1 + 1, val)\n val = T.switch(T.ge(sl2, len2), len1 + 1, val)\n val = T.switch(T.lt(sl2, 0), - len1 - 1, val)\n if sl1.step:\n val = T.switch(T.eq(sl1.step, 0), len1 + 1, val)\n val = pre_greedy_local_optimizer(list_opt, val)\n return val\n else:\n # We are in the more complex case when we do not actually know\n # if the first slice was in reverse or not.\n # in case it was not in reverse:\n p_val = sl1.start + sl2 * sl1.step\n # case it was in reverse we need to realize that we do not want\n # the k-th element from sl.start but the k-th element from\n # sl.stop backwards\n n_val = sl1.stop - 1 - sl2 * sl1.step\n if config.warn.subtensor_merge_bug:\n warnings.warn((\n 'Your current code is fine, but Theano versions '\n 'prior to 0.5rc2 might have given an incorrect result. '\n 'To disable this warning, set the Theano flag '\n 'warn.subtensor_merge_bug to False.'))\n # we need to pick either n_val or p_val and then follow same\n # steps as above for covering the index error cases\n val = T.switch(T.lt(reverse1, 0), n_val, p_val)\n val = T.switch(T.le(len2, 0), len1 + 1, val)\n val = T.switch(T.ge(sl2, len2), len1 + 1, val)\n val = T.switch(T.lt(sl2, 0), - len1 - 1, val)\n if sl1.step:\n val = T.switch(T.eq(sl1.step, 0), len1 + 1, val)\n val = pre_greedy_local_optimizer(list_opt, val)\n return val\n else:\n # We are deleaing with two slices that need to be put together\n # according to the two steps we have 4 different combinations of\n # positive/negative. I will denote the case I'm looking at by\n # suffixes to the variables (nn,np,pn,pp):\n flen = sl2.stop - sl2.start\n p_step = sl1.step * sl2.step\n n_step = sl1.step * sl2.step * -1\n\n pp_start = T.minimum(sl1.start + sl2.start * sl1.step, sl1.stop)\n pp_stop = T.minimum(sl1.start + sl2.stop * sl1.step, sl1.stop)\n\n pn_stop = sl1.start + (sl2.start - 1) * sl1.step\n pn_stop = T.switch(T.and_(T.lt(pn_stop, 0),\n T.gt(flen, 0)),\n -len1 - 1,\n T.minimum(pn_stop, sl1.stop))\n pn_start = sl1.start + (sl2.stop - 1) * sl1.step\n pn_start = T.minimum(pn_start, sl1.stop)\n pn_start = T.maximum(pn_start, 0)\n\n np_stop = sl1.stop - sl2.stop * sl1.step - 1\n np_stop = T.switch(T.and_(T.lt(np_stop, 0),\n T.gt(flen, 0)),\n -len1 - 1,\n T.maximum(sl1.start - 1, np_stop))\n np_start = T.maximum(sl1.start, sl1.stop - sl2.start * sl1.step - 1)\n\n nn_start = T.maximum(sl1.start,\n (sl1.stop - 1) - (sl2.stop - 1) * sl1.step)\n nn_stop = T.maximum(sl1.start, sl1.stop - sl2.start * sl1.step)\n\n start = T.switch(T.lt(reverse2 * reverse1, 0),\n T.switch(T.lt(reverse1, 0), np_start, pn_start),\n T.switch(T.lt(reverse1, 0), nn_start,\n pp_start))\n\n stop = T.switch(T.lt(reverse2 * reverse1, 0),\n T.switch(T.lt(reverse1, 0), np_stop, pn_stop),\n T.switch(T.lt(reverse1, 0), nn_stop, pp_stop))\n\n step = T.switch(T.lt(reverse2 * reverse1, 0), n_step, p_step)\n start = T.switch(T.le(flen, 0), 0, start)\n stop = T.switch(T.le(flen, 0), 0, stop)\n\n # The canonical form of the slice is pretty complicated\n # and is not simplified. We simplify it in advance here\n # as otherwise this create too many useless optimization that\n # DebugMode must check.\n start = pre_greedy_local_optimizer(list_opt, start)\n stop = pre_greedy_local_optimizer(list_opt, stop)\n step = pre_greedy_local_optimizer(list_opt, step)\n start = pre_greedy_local_optimizer(list_opt, start)\n stop = pre_greedy_local_optimizer(list_opt, stop)\n step = pre_greedy_local_optimizer(list_opt, step)\n\n # Pre merge constant for the same reason.\n start, stop, step = pre_constant_merge([start, stop, step])\n\n return slice(start, stop, step)\n\n\n@register_canonicalize\n@register_specialize\[email protected]_optimizer([Subtensor])\ndef local_subtensor_merge(node):\n \"\"\"\n Refactored optimization to deal with all cases of tensor merging.\n Given a subgraph of the form Subtensor(Subtensor(u)), the optimization\n expresses all slices in a canonical form, and then merges them together.\n\n \"\"\"\n\n if isinstance(node.op, Subtensor):\n u = node.inputs[0]\n if u.owner and isinstance(u.owner.op, Subtensor):\n # We can merge :)\n # x actual tensor on which we are picking slices\n x = u.owner.inputs[0]\n # slices of the first applied subtensor\n slices1 = get_idx_list(u.owner.inputs, u.owner.op.idx_list)\n slices2 = get_idx_list(node.inputs, node.op.idx_list)\n # Get the shapes of the vectors !\n try:\n # try not to introduce new shape into the graph\n xshape = node.fgraph.shape_feature.shape_of[x]\n ushape = node.fgraph.shape_feature.shape_of[u]\n except AttributeError:\n # Following the suggested use of shape_feature which should\n # consider the case when the compilation mode doesn't\n # include the ShapeFeature\n xshape = x.shape\n ushape = u.shape\n\n merged_slices = []\n pos_2 = 0\n pos_1 = 0\n while (pos_1 < len(slices1)) and (pos_2 < len(slices2)):\n slice1 = slices1[pos_1]\n if type(slice1) is slice:\n merged_slices.append(\n merge_two_slices(slice1,\n xshape[pos_1],\n slices2[pos_2],\n ushape[pos_2]))\n pos_2 += 1\n else:\n merged_slices.append(slice1)\n pos_1 += 1\n\n if pos_2 < len(slices2):\n merged_slices += slices2[pos_2:]\n else:\n merged_slices += slices1[pos_1:]\n\n merged_slices = make_constant(merged_slices)\n subtens = Subtensor(merged_slices)\n\n sl_ins = Subtensor.collapse(\n merged_slices,\n lambda x: isinstance(x, T.Variable))\n # Do not call make_node for test_value\n out = subtens(x, *sl_ins)\n\n # Copy over previous output stacktrace\n # and stacktrace from previous slicing operation.\n # Why? Because, the merged slicing operation could have failed\n # because of either of the two original slicing operations\n orig_out = node.outputs[0]\n copy_stack_trace([orig_out, node.inputs[0]], out)\n\n # Restore original broadcastable dimensions that `subtens()` may\n # have been unable to infer again\n if out.type != orig_out.type:\n assert out.dtype == orig_out.dtype\n assert out.ndim == orig_out.ndim\n out = T.patternbroadcast(out, orig_out.broadcastable)\n copy_stack_trace([orig_out, node.inputs[0]], out)\n return [out]\n\n\n@register_useless\n@register_canonicalize\n@register_specialize\[email protected]_optimizer([Subtensor])\ndef local_subtensor_of_alloc(node):\n \"\"\"\n\n alloc(val)[x:y] -> alloc(val[...])\n alloc(val)[x:y] -> alloc(val)\n This can be seen as a lift, but it also reduce the number of computation/memory.\n\n \"\"\"\n if not isinstance(node.op, Subtensor):\n return False\n u = node.inputs[0]\n if u.owner is None:\n return False\n if not isinstance(u.owner.op, T.Alloc):\n return False\n slices = get_idx_list(node.inputs, node.op.idx_list)\n val = u.owner.inputs[0]\n dims = u.owner.inputs[1:]\n assert len(slices) <= len(dims)\n\n # Number of dimensions added to val\n n_added_dims = u.ndim - val.ndim\n # Dimensions of the returned alloc\n nw_dims = []\n # Slices to take from val\n val_slices = []\n\n for i, (sl, dim) in enumerate(zip(slices, dims)):\n # If val was not copied over that dim,\n # we need to take the appropriate subtensor on it.\n if i >= n_added_dims:\n # We check that the corresponding val dimensions was\n # not a broadcasted dimensions.\n if (val.type.ndim > (i - n_added_dims) and\n val.type.broadcastable[i - n_added_dims]):\n val_slices.append(slice(None))\n else:\n val_slices.append(sl)\n\n csl, _ = get_canonical_form_slice(sl, dim)\n if type(csl) is not slice:\n # That dimension is removed.\n pass\n else:\n nw_dim = csl.stop - csl.start\n\n if csl.step != 1:\n # Do not add the ceil_intdiv() graphs in the graphs\n # when this is not needed as it prevent detecting the\n # correct broadcast pattern.\n nw_dim = T.ceil_intdiv(nw_dim, csl.step)\n nw_dims += [nw_dim]\n\n nw_val = val[tuple(val_slices)]\n nw_dims += dims[len(slices):]\n if nw_val.ndim > len(nw_dims):\n return False\n rval = T.alloc(nw_val, *nw_dims)\n if type(rval) not in (list, tuple):\n rval = [rval]\n if rval[0].type != node.outputs[0].type:\n # It happen that the make_node() isn't able to infer the same pattern.\n # We know it is safe, so fix that.\n rval[0] = T.patternbroadcast(rval[0], node.outputs[0].broadcastable)\n\n return rval\n\n\n@register_canonicalize\n@register_stabilize\n@register_specialize\[email protected]_optimizer([Subtensor])\ndef local_subtensor_of_dot(node):\n \"\"\"\n This optimization translates T.dot(A, B)[idxs] into T.dot(A[idxs_a], B[idxs_b]),\n where idxs_a and idxs_b are defined appropriately.\n\n idxs_a is the first A.ndim-1 entries of idxs,\n and idxs_b is the remaining entries of idxs (if any),\n modified to skip the second-to-last dimension of B\n (because dot sums over this dimension).\n\n \"\"\"\n if not isinstance(node.op, Subtensor):\n return\n if (not node.inputs[0].owner or\n not isinstance(node.inputs[0].owner.op, T.Dot)):\n return\n # If there is other node that use the outputs of the dot\n # We don't want to compute twice the sub part.\n if len(node.inputs[0].clients) > 1:\n return\n\n a = node.inputs[0].owner.inputs[0]\n b = node.inputs[0].owner.inputs[1]\n\n idx_list = get_idx_list(node.inputs, node.op.idx_list)\n\n num_a_indices = min(a.ndim - 1, len(idx_list))\n a_indices = idx_list[:num_a_indices]\n b_indices = idx_list[num_a_indices:]\n\n # This is necessary because np.dot sums the last index of a with the second to last of b\n # so we want to skip the second-to-last index into b.\n # This wasn't necessary for a, because we just ommitted the last index.\n # We skip this if b.ndim = 1, since then we just want b_sub = b, not b_sub = b[:]\n # (dot also handles b.ndim < 2 as a special case)\n if b.ndim > 1 and len(b_indices) >= b.ndim - 1:\n b_indices = (b_indices[:b.ndim - 2] +\n (slice(None, None, None),) + b_indices[b.ndim - 2:])\n\n a_sub = a.__getitem__(tuple(a_indices))\n b_sub = b.__getitem__(tuple(b_indices)) if b_indices else b\n\n # Copy over previous output stacktrace to a_sub and b_sub,\n # because an error in the subtensor operation (e.g. an index error)\n # on either a or b must correspond to an error in the\n # subtensor operation on their dot product.\n copy_stack_trace(node.outputs[0], [a_sub, b_sub])\n\n # Copy over previous output stacktrace and previous dot product stacktrace,\n # because an error here may correspond to an either in either the original\n # dot product, or in the dot product after the subtensor operation.\n r = T.dot(a_sub, b_sub)\n copy_stack_trace([node.outputs[0], node.inputs[0]], r)\n\n return [r]\n\n\n@register_canonicalize\[email protected]_optimizer([T.add])\ndef local_IncSubtensor_serialize(node):\n \"\"\"\n When using Subtensor, gradient graphs can be ugly.\n\n If we ask for grad(f(a[0]), a), we are going to get something like\n\n IncSubtensor(Elemwise{second}(a, 0), g(f(a[0])), [0])\n\n This might be ugly, but at least it's as fast as you could want.\n If we ask for grad(f(a[0], a[1], a[2]), a), it's much worse...\n\n Elemwise{Add}\n IncSubtensor(Elemwise{second}(a, 0), g(f(a[0])), [0])\n IncSubtensor(Elemwise{second}(a, 0), g(f(a[1])), [1])\n IncSubtensor(Elemwise{second}(a, 0), g(f(a[2])), [2])\n\n This is much worse because this time we have to produce 3 matrices\n the size of 'a', just so we can add them together.\n\n This Op rearranges IncSubtensor's that all work on the same\n initial argument (here, Elemwise{second}(a,0)) into a chain. The\n advantage of the chain structure is that each one can be optimized\n later in the pipeline to operate inplace.\n\n Ideally, the op will do something like this:\n\n #\n # add(x, incsubtensor(b, c), incsubtensor(b, d))\n # -> incsubtensor(incsubtensor(add(x,b,b), c), d)\n\n \"\"\"\n def movable(i):\n # Return True iff this is a incsubtensor that we can move\n return (i.owner and\n isinstance(i.owner.op, (IncSubtensor,\n AdvancedIncSubtensor1,\n AdvancedIncSubtensor,)) and\n i.type == o_type and\n len(i.clients) == 1 and\n not i.owner.op.set_instead_of_inc)\n\n if node.op == T.add:\n o_type = node.outputs[0].type\n\n movable_inputs = [i for i in node.inputs if movable(i)]\n\n if movable_inputs:\n new_inputs = ([i for i in node.inputs if not movable(i)] +\n [mi.owner.inputs[0] for mi in movable_inputs])\n if len(new_inputs) == 0:\n new_add = new_inputs[0]\n else:\n new_add = T.add(*new_inputs)\n\n # Copy over stacktrace from original output, as an error\n # (e.g. an index error) in this add operation should\n # correspond to an error in the original add operation.\n copy_stack_trace(node.outputs[0], new_add)\n\n # stack up the new incsubtensors\n tip = new_add\n for mi in movable_inputs:\n assert tip.type == o_type\n assert tip.type == mi.owner.inputs[0].type\n tip = mi.owner.op(tip, *mi.owner.inputs[1:])\n # Copy over stacktrace from outputs of the original\n # \"movable\" operation to the new operation.\n copy_stack_trace(node.outputs + mi.owner.outputs, tip)\n\n return [tip]\n\n # print incsub_inputs, [id(i.owner.inputs[0]) for i in incsub_inputs]\n\n# We register it in a TopoOptimizer inside the canonizer EQ optimizer.\n# Otherwise in some cases it was making the EQ optimizer use 45. In\n# the TopoOptimizer, the EQ only use 5 passes.\ncompile.optdb.register('pre_local_IncSubtensor_serialize',\n in2out(local_IncSubtensor_serialize),\n # Just before canonizer\n 0.99, 'fast_run')\n\n\n# after priority 50 Destructive inplace operations\n# gemm is the first one now, at priority 70\n\[email protected]_optimizer([IncSubtensor], inplace=True)\ndef local_inplace_setsubtensor(node):\n \"\"\"\n Also work for GpuIncSubtensor.\n\n \"\"\"\n if isinstance(node.op, IncSubtensor) and not node.op.inplace:\n dta = node.op.destroyhandler_tolerate_aliased\n new_op = node.op.__class__(\n node.op.idx_list, inplace=True,\n set_instead_of_inc=node.op.set_instead_of_inc,\n destroyhandler_tolerate_aliased=dta)\n new_node = new_op(*node.inputs)\n val = getattr(node.outputs[0].tag, 'nan_guard_mode_check', True)\n new_node.tag.nan_guard_mode_check = val\n\n # Copy stacktrace from original outputs to new outputs.\n # This is sensible, because the new operation is the\n # same as the old one, but now with different attributes.\n copy_stack_trace(node.outputs, new_node)\n return [new_node]\n return False\ncompile.optdb.register('local_inplace_setsubtensor',\n TopoOptimizer(\n local_inplace_setsubtensor,\n failure_callback=TopoOptimizer.warn_inplace),\n 60, 'fast_run', 'inplace') # DEBUG\n\n\[email protected]_optimizer([AdvancedIncSubtensor1], inplace=True)\ndef local_inplace_incsubtensor1(node):\n \"\"\"\n Also work for GpuAdvancedIncSubtensor1.\n\n \"\"\"\n if isinstance(node.op, AdvancedIncSubtensor1) and not node.op.inplace:\n new_op = node.op.clone_inplace()\n new_node = new_op(*node.inputs)\n\n # Copy stacktrace from original outputs to new outputs.\n # This is sensible, because the new operation is the\n # same as the old one, but now with different attributes.\n copy_stack_trace(node.outputs, new_node)\n return [new_node]\n return False\ncompile.optdb.register('local_inplace_incsubtensor1',\n TopoOptimizer(\n local_inplace_incsubtensor1,\n failure_callback=TopoOptimizer.warn_inplace),\n 60, 'fast_run', 'inplace') # DEBUG\n\n\n# Register old name\n@register_canonicalize(\"local_incsubtensor_of_allocs\")\n@register_stabilize(\"local_incsubtensor_of_allocs\")\[email protected]_optimizer([IncSubtensor,\n AdvancedIncSubtensor,\n AdvancedIncSubtensor1])\ndef local_incsubtensor_of_zeros(node):\n \"\"\"\n IncSubtensor(x, zeros, idx) -> x\n\n \"\"\"\n if (isinstance(node.op, (IncSubtensor,\n AdvancedIncSubtensor,\n AdvancedIncSubtensor1)) and\n not node.op.set_instead_of_inc):\n x = node.inputs[0]\n y = node.inputs[1]\n try:\n # Don't use only_process_constants=True. We need to\n # investigate Alloc of 0s but with non constant shape.\n if get_scalar_constant_value(y, elemwise=False) == 0:\n # No need to copy over the stacktrace,\n # because x should already have a stacktrace\n return [x]\n except NotScalarConstantError:\n return\n\n\n@register_canonicalize\n@register_specialize\[email protected]_optimizer([IncSubtensor])\ndef local_incsubtensor_of_zeros_to_setsubtensor(node):\n \"\"\"\n IncSubtensor(zeros, x, ...) -> SetSubtensor(zeros, x, ...)\n \"\"\"\n if (isinstance(node.op, (IncSubtensor)) and not node.op.set_instead_of_inc):\n x = node.inputs[0]\n\n if isinstance(x, T.Constant) and not np.any(x.data):\n return [IncSubtensor(node.op.idx_list,\n node.op.inplace,\n set_instead_of_inc=True,\n destroyhandler_tolerate_aliased=node.op.destroyhandler_tolerate_aliased,\n )(*node.inputs)]\n\n\n@register_canonicalize('local_setsubtensor_of_allocs')\n@register_stabilize('local_setsubtensor_of_allocs')\[email protected]_optimizer([IncSubtensor])\ndef local_setsubtensor_of_constants(node):\n \"\"\"\n SetSubtensor(x, x[idx], idx) -> x\n\n when x is constant or alloc.\n\n \"\"\"\n if isinstance(node.op, IncSubtensor) and node.op.set_instead_of_inc:\n x = node.inputs[0]\n y = node.inputs[1]\n\n # Don't use only_process_constants=True. We need to\n # investigate Alloc of 0s but with non constant shape.\n try:\n replace_x = get_scalar_constant_value(x, elemwise=False)\n except NotScalarConstantError:\n return\n\n try:\n replace_y = get_scalar_constant_value(y, elemwise=False)\n except NotScalarConstantError:\n return\n\n if replace_x == replace_y:\n\n # No need to copy over the stacktrace,\n # because x should already have a stacktrace\n return [x]\n else:\n return False\n\n\n@register_canonicalize\n@register_stabilize\[email protected]_optimizer([AdvancedSubtensor1])\ndef local_adv_sub1_adv_inc_sub1(node):\n \"\"\"Optimize the possible AdvSub1(AdvSetSub1(...), ...).\n\n AdvancedSubtensor1(AdvancedSetSubtensor1(x, y, idx), idx) -> y\n\n Notes\n -----\n This opt add AssertOp. Otherwise, it would remove shape and\n index error. If you want to get rid of them, see the\n :ref:`unsafe_optimization` section.\n\n WARNING:\n A previous version of this optimization also matched\n AdvancedSubtensor1(AdvancedIncSubtensor1(0s, y, idx), idx) -> y\n This is incorrect when there are duplicate indices.\n The current version warns the user about potential past issues.\n\n \"\"\"\n if not isinstance(node.op, AdvancedSubtensor1):\n return\n inp = node.inputs[0]\n if (not inp.owner or\n not isinstance(inp.owner.op, AdvancedIncSubtensor1)):\n return\n idx = node.inputs[1]\n idx2 = inp.owner.inputs[2]\n x = inp.owner.inputs[0]\n y = inp.owner.inputs[1]\n if idx is not idx2:\n return\n if (not inp.owner.op.set_instead_of_inc and\n # Don't use only_process_constants=True. We need to\n # investigate Alloc of 0s but with non constant shape.\n T.extract_constant(x, elemwise=False) != 0):\n return\n\n if not inp.owner.op.set_instead_of_inc:\n if config.warn.inc_subtensor1_opt:\n warnings.warn(\n 'Your current code is fine, but Theano versions '\n 'between 0.7rc1 and 0.10 (or development versions '\n 'between Nov. 2014 and May 2017) '\n 'might have given incorrect results. This graph has '\n 'following pattern: inc_subtensor(zeros[idx], x)[idx], '\n 'where idx is an array of integers. This used to be '\n 'optimized to \"x\", which is incorrect if there are '\n 'duplicated indices in idx. '\n 'To disable this warning, set the Theano flag '\n 'warn.inc_subtensor1_opt to False.')\n return\n\n cond = [T.all(T.and_(T.lt(idx, x.shape[0]), T.ge(idx, -x.shape[0])))]\n if not node.fgraph.shape_feature.same_shape(idx, y, 0, 0):\n cond.append(T.eq(idx.shape[0], y.shape[0]))\n r = Assert(\"Bad indexing or shapes in a AdvancedIncSubtensor1 \"\n \"that was optimized away\")(y, *cond)\n copy_stack_trace(y, r)\n\n if r.dtype == node.outputs[0].dtype:\n return [r]\n # It is possible that y is upcast or downcast to x.dtype.\n # In all case, as we set or add with 0, we can just cast y.\n r2 = T.cast(r, node.outputs[0].dtype)\n\n # Copy over stacktrace from before casting, since\n # we don't expect problems in the casting operation,\n # and any problems in the indexing would have been spotted above.\n copy_stack_trace(r, r2)\n return [r2]\n\n\n@register_specialize\n@register_stabilize\n@register_canonicalize\n@register_useless\[email protected]_optimizer([IncSubtensor,\n AdvancedIncSubtensor,\n AdvancedIncSubtensor1])\ndef local_useless_inc_subtensor_alloc(node):\n \"\"\"\n Replaces an [Advanced]IncSubtensor[1], whose increment is an `alloc` of\n a fully or partially broadcastable variable, by one that skips the\n intermediate `alloc` where possible.\n\n \"\"\"\n if isinstance(node.op, (IncSubtensor,\n AdvancedIncSubtensor,\n AdvancedIncSubtensor1)):\n x = node.inputs[0]\n y = node.inputs[1]\n i = node.inputs[2:]\n\n if y.owner is not None and isinstance(y.owner.op, T.Alloc):\n # `z` is the input of the Alloc op, i.e. T.alloc(z, <shape>)\n z = y.owner.inputs[0]\n\n try:\n shape_feature = node.fgraph.shape_feature\n except AttributeError:\n # The shape feature may not be available in some mode, but we\n # need it for this optimization, so don't continue.\n return False\n\n shape_of = shape_feature.shape_of\n same_shape = shape_feature.same_shape\n\n # Get the subtensor of `x` indexed by `i` in order to compare\n # shapes later.\n if isinstance(node.op, IncSubtensor):\n xi = Subtensor(node.op.idx_list)(x, *i)\n elif isinstance(node.op, AdvancedIncSubtensor):\n xi = advanced_subtensor(x, *i)\n elif isinstance(node.op, AdvancedIncSubtensor1):\n xi = advanced_subtensor1(x, *i)\n else:\n raise Exception('Should never happen!')\n\n reason = 'local_useless_incsubtensor_alloc'\n\n # Add `xi` to the shape feature `fgraph`. This is important for\n # shape inference later because the variable must be part of the\n # function graph in order to call `same_shape` on it.\n if xi not in shape_of:\n shape_feature.on_import(node.fgraph, xi.owner,\n '%s: add `xi`' % reason)\n\n # `xi` may have more dimensions than `y` since the subtensor ops\n # do automatic broadcasting of the increment internally. Thus, we\n # need to make the leading implicitly broadcasted dimensions\n # explicit for shape comparison later.\n if xi.ndim > y.ndim:\n y = T.shape_padleft(y, xi.ndim - y.ndim)\n if y not in shape_of:\n shape_feature.on_import(node.fgraph, y.owner,\n '%s: add `y`' % reason)\n\n # Build `z_broad` explicitly to include extra implicit dimensions.\n z_broad = ((True,) * (xi.ndim - z.ndim) + z.broadcastable)\n\n cond = [\n # The shapes of `y` and `xi` must either agree or `y` may\n # also have shape equal to 1 which may be treated as a\n # broadcastable dimension by the subtensor op.\n T.or_(T.eq(y.shape[k], 1), T.eq(y.shape[k], xi.shape[k]))\n # Loop over all dimensions.\n for k in xrange(xi.ndim)\n # We need to check the above shapes, if\n # * the pre-alloc increment `z` is broadcastable in\n # dimension `k` (if it isn't, then the shapes of `z` and\n # `y` are the same by the definition of the `Alloc` op in\n # this dimension and replacing `y` by `z` will not hide a\n # shape error), and\n # * `xi` and `y` do not have the same shape in dimension\n # `k` or we cannot infer the shape statically (if the\n # shapes of `xi` and `y` are not the same, then replacing\n # `y` by `z` will hide the shape error of `y`), and\n # * the shape of `y` is not equal to 1 or we cannot infer\n # the shape statically (if the shape of `y` is equal to\n # 1, then `y` is broadcasted by the inc_subtensor op\n # internally, so the shapes of `xi` and `y` do not need\n # to match in dimension `k`; else we need to check at\n # runtime that the shape of `y` is either 1 or the same\n # as `xi` or otherwise replacing `y` by `z` will hide a\n # shape error).\n if (z_broad[k] and\n not same_shape(xi, y, dim_x=k, dim_y=k) and\n shape_of[y][k] != 1)]\n\n if len(cond) > 0:\n msg = '`x[i]` and `y` do not have the same shape.'\n z = Assert(msg)(z, *cond)\n\n r = node.op(x, z, *i)\n # Copy over stacktrace from previous output, since\n # we don't expect problems when removing the intermediate\n # alloc operation and so we still want to point at the line\n # of the inc_subtensor operation.\n copy_stack_trace(node.outputs, r)\n\n return [r]\n\n\n####################\n# Rebroadcast opts #\n####################\n\n@register_useless\n@register_canonicalize\n@register_specialize\[email protected]_optimizer([T.Rebroadcast])\ndef local_useless_rebroadcast(node):\n \"\"\"\n Remove Rebroadcast if id does not actually change the broadcasting pattern.\n\n \"\"\"\n if isinstance(node.op, T.Rebroadcast):\n x = node.inputs[0]\n if np.all(x.broadcastable == node.outputs[0].broadcastable):\n # No broadcastable flag was modified\n # No need to copy over stack trace,\n # because x should already have a stack trace.\n return [x]\n else:\n # Keep the flags that modify something\n new_axis = {}\n for dim, bc in list(node.op.axis.items()):\n if x.broadcastable[dim] != bc:\n new_axis[dim] = bc\n if new_axis == node.op.axis:\n # All flags are useful\n return\n else:\n r = T.Rebroadcast(*list(new_axis.items()))(x)\n # Copy over stacktrace from previous output\n copy_stack_trace(node.outputs, r)\n return [r]\n\n\n@register_canonicalize\n@register_specialize\[email protected]_optimizer([T.Rebroadcast])\ndef local_rebroadcast_lift(node):\n \"\"\"\n Lifts Rebroadcast through unary Elemwise operations,\n and merges consecutive Rebroadcasts.\n\n Rebroadcast(Elemwise(x)) => Elemwise(Rebroadcast(x))\n Rebroadcast(Rebroadcast(x)) => Rebroadcast(x)\n\n \"\"\"\n op = node.op\n if not isinstance(op, T.Rebroadcast):\n return False\n\n input = node.inputs[0]\n inode = input.owner\n if inode and isinstance(inode.op, Elemwise) and len(inode.inputs) == 1:\n # It may happen that `input` has no client because this optimization\n # is called from `apply_rebroadcast_opt`, which in particular is used\n # by the `unbroadcast` function before we are in the actual function\n # compilation phase.\n if hasattr(input, 'clients') and len(input.clients) == 1:\n rebroadcasted = T.Rebroadcast(*list(op.axis.items()))(\n inode.inputs[0])\n # Copy over stacktrace from previous output (after rebroadcasting)\n # to new output, because an error in the new graph right after\n # rebroadcasting must have been caused by the previous rebroadcasting.\n copy_stack_trace(node.outputs, rebroadcasted)\n\n rval = inode.op.make_node(rebroadcasted).outputs\n\n # Copy over stacktrace from previous output (after rebroadcasting)\n # and input (after elemwise operation) to new output, because an\n # error in the new graph could have been caused by either of the\n # two ops.\n copy_stack_trace(node.outputs + node.inputs, rval)\n\n return rval\n if inode and isinstance(inode.op, T.Rebroadcast):\n # the \"axis\" specification in the outer Rebroadcast overrides\n # the axis of the inner one\n axis = inode.op.axis.copy()\n axis.update(op.axis)\n iinput = inode.inputs[0]\n\n rval = [T.Rebroadcast(*list(axis.items()))(iinput)]\n\n # Copy over stacktrace from previous output (after second rebroadcast)\n # and from previous input (after first rebroadcast op) because an error in\n # the new graph could have been caused by either of the two\n # rebroadcast ops.\n copy_stack_trace(node.outputs + node.inputs, rval)\n return rval\n\n\ndef apply_rebroadcast_opt(rval):\n \"\"\"\n Apply as many times as required the optimization local_useless_rebroadcast\n and local_rebroadcast_lift.\n\n Parameters\n ----------\n rval: a Variable\n\n Returns\n -------\n A Variable (the same if no optimization can be applied)\n\n \"\"\"\n\n changed = True\n while changed and rval.owner:\n changed = False\n rval2 = theano.tensor.opt.local_useless_rebroadcast.transform(\n rval.owner)\n if rval2:\n assert len(rval2) == 1\n rval = rval2[0]\n changed = True\n if rval.owner:\n rval2 = theano.tensor.opt.local_rebroadcast_lift.transform(\n rval.owner)\n if rval2:\n assert len(rval2) == 1\n rval = rval2[0]\n changed = True\n return rval\n\n\n#############\n# Join opts #\n#############\n@register_specialize\n@register_canonicalize\n@register_useless\[email protected]_optimizer([T.Join])\ndef local_join_1(node):\n \"\"\"Join(i, x) => x\n\n Remove Join() when only one element is joined.\n\n \"\"\"\n if not isinstance(node.op, T.Join):\n return\n tensors = node.inputs[1:]\n if len(tensors) == 1:\n # We don't need to copy over any stacktrace here, because the\n # input variable should already have its own stacktrace.\n return [tensors[0]]\n\n\n# TODO: merge in local_useless_join\n@register_useless\n@register_specialize\n@register_canonicalize\[email protected]_optimizer([T.Join])\ndef local_join_empty(node):\n \"\"\"Join(i, x, y, empty) => Join(i, x, y)\n\n Remove empty inputs to joins. The empty inputs can be anywhere.\n\n \"\"\"\n if not isinstance(node.op, T.Join):\n return\n new_inputs = []\n try:\n join_idx = get_scalar_constant_value(node.inputs[0],\n only_process_constants=True)\n except NotScalarConstantError:\n return\n for idx in xrange(1, len(node.inputs)):\n inp = node.inputs[idx]\n # We can not use size == 0,, as this can change shape from 3,0\n # to 2,0. This trigger DebugMode error. This happen with\n # stack(...,[]) as this add a dimshuffle on [], that add a\n # dimensions with shape 1.\n if isinstance(inp, theano.Constant) and inp.data.shape[join_idx] == 0:\n continue\n new_inputs.append(inp)\n if len(new_inputs) < len(node.inputs) - 1:\n if len(new_inputs) == 0:\n # T.join do not work in that case.\n # constant folding will take care of this case.\n return\n ret = T.join(node.inputs[0], *new_inputs)\n o = node.outputs[0]\n if ret.dtype != o.dtype:\n # Join can upcast some inputs\n return\n\n # Copy over stacktrace from previous output (after join op)\n # to new output, because an error in the new op must be caused\n # by an error in the old join op.\n copy_stack_trace(node.outputs, ret)\n\n if ret.type != o.type:\n assert ret.dtype == o.dtype\n assert ret.ndim == o.ndim\n ret = T.patternbroadcast(ret, node.outputs[0].broadcastable)\n\n # Copy over stacktrace from previous output\n # (after patternbroadcast op) for same reasons as before.\n copy_stack_trace(node.outputs, ret)\n\n return [ret]\n\n\n@register_specialize\n@register_canonicalize\n@register_useless\[email protected]_optimizer([T.Join])\ndef local_join_make_vector(node):\n \"\"\"Join(0, make_vector1, make_vector2, ...) => Join(0, make_vector12, ...)\n\n Merge MakeVector inputs to Join. This can make the join completly\n disapear with the local_join_1 opt.\n\n \"\"\"\n if not isinstance(node.op, T.Join) or node.outputs[0].ndim != 1:\n return\n new_inputs = [node.inputs[1]]\n for idx in xrange(2, len(node.inputs)):\n inp = node.inputs[idx]\n if (inp.owner and\n isinstance(inp.owner.op, MakeVector) and\n new_inputs[-1].owner and\n isinstance(new_inputs[-1].owner.op, MakeVector) and\n # MakeVector have a dtype parameter\n inp.owner.op == new_inputs[-1].owner.op):\n inps = new_inputs[-1].owner.inputs + inp.owner.inputs\n new_inputs[-1] = inp.owner.op(*inps)\n\n # Copy over stacktrace from previous output (after join op)\n # to new intermediate output, because an error in the intermediate\n # op must be caused by an error in the old join op.\n copy_stack_trace(node.outputs, new_inputs[-1])\n else:\n new_inputs.append(inp)\n if len(new_inputs) < len(node.inputs) - 1:\n ret = T.join(node.inputs[0], *new_inputs)\n\n # Copy over stacktrace from previous output (after join op)\n # to new output, because an error in the new op must be caused\n # by an error in the old join op.\n copy_stack_trace(node.outputs, ret)\n return [ret]\n\n\n#################\n# speed/memory #\n#################\n@register_canonicalize\n@register_specialize\[email protected]_optimizer([T.elemwise.Sum])\ndef local_sumsqr2dot(node):\n \"\"\"\n This optimization detects T.sqr( W.dimshuffle('x',0,1) * G.dimshuffle(0,'x',1) ).sum(axis=(1,2))\n and converts this to T.dot(T.sqr(G), T.sqr(W).sum(axis=0)).\n \"\"\"\n if (isinstance(node.op, T.elemwise.Sum) and\n isinstance(node.op.scalar_op, theano.scalar.basic.Add) and node.op.axis == (1, 2)):\n in1 = node.inputs[0]\n out = node.outputs[0]\n\n if (in1.owner and isinstance(in1.owner.op, T.Elemwise) and isinstance(in1.owner.op.scalar_op, theano.scalar.basic.Sqr)):\n in_sqr = in1.owner.inputs[0]\n if (in_sqr.owner and isinstance(in_sqr.owner.op, T.Elemwise) and\n isinstance(in_sqr.owner.op.scalar_op, theano.scalar.basic.Mul) and len(in_sqr.owner.inputs) == 2):\n in_mul1, in_mul2 = in_sqr.owner.inputs\n\n if (isinstance(in_mul1.owner.op, T.elemwise.DimShuffle) and in_mul1.owner.op.new_order == ('x', 0, 1) and\n isinstance(in_mul2.owner.op, T.elemwise.DimShuffle) and in_mul2.owner.op.new_order == (0, 'x', 1)):\n W = in_mul1.owner.inputs[0]\n G = in_mul2.owner.inputs[0]\n\n new_out = T.dot(T.sqr(G), T.sqr(W).sum(axis=0))\n if new_out.dtype != out.dtype:\n new_out = T.cast(new_out, dtype=out.dtype)\n return [new_out]\n\n\n#################\n# Exp stability #\n#################\n@register_stabilize\n@register_specialize\n@register_canonicalize\[email protected]_optimizer([T.Elemwise])\ndef local_expm1(node):\n \"\"\"\n This optimization detects exp(a)-1 and converts this to expm1(a).\n \"\"\"\n if (isinstance(node.op, T.Elemwise) and\n isinstance(node.op.scalar_op, theano.scalar.basic.Sub)):\n in1, in2 = node.inputs\n out = node.outputs[0]\n\n if (in1.owner and isinstance(in1.owner.op, T.Elemwise) and isinstance(in1.owner.op.scalar_op, theano.scalar.basic.Exp) and\n T.extract_constant(in2, only_process_constants=False) == 1):\n in11 = in1.owner.inputs[0]\n new_out = T.expm1(in11)\n\n if new_out.dtype != out.dtype:\n new_out = T.cast(new_out, dtype=out.dtype)\n if new_out.type != out.type:\n return\n return [new_out]\n\n\n###############\n# Switch opts #\n###############\n@register_useless('local_remove_switch_const_cond')\n@register_canonicalize('fast_compile', 'local_remove_switch_const_cond')\n@register_specialize\[email protected]_optimizer([T.Elemwise])\ndef local_useless_switch(node):\n \"\"\"\n This optimization makes the following changes in the graph:\n T.switch(cond,left,right) -->\n if cond is constant and cond == 0: right\n if cond is constant and cond != 0: left\n if left is right -> left\n\n T.switch(le(shape_i{id}(X), 0), 0, shape_i{id}(X)) -> shape_i{id}(X)\n \"\"\"\n if (isinstance(node.op, T.Elemwise) and\n isinstance(node.op.scalar_op, scalar.basic.Switch)):\n cond = T.extract_constant(node.inputs[0],\n only_process_constants=True)\n if ((type(cond) is np.ndarray and cond.ndim == 0) or\n isinstance(cond, np.number)):\n if cond == 0:\n correct_out = node.inputs[2]\n else:\n correct_out = node.inputs[1]\n\n if correct_out.ndim != node.outputs[0].ndim:\n # TODO: broadcast?\n return False\n if correct_out.dtype != node.outputs[0].dtype:\n out = T.cast(correct_out, node.outputs[0].dtype)\n else:\n out = correct_out\n\n if out.type.broadcastable != node.outputs[0].type.broadcastable:\n # We need to copy data to the new dimensions during execution\n\n # We should not depend on node.outputs as this would\n # make the new node depend on the old one that will\n # get optimized again. So this create a cycle.\n shps = []\n for idx, (b1, b2), in enumerate(zip(out.type.broadcastable,\n node.outputs[0].type.broadcastable)):\n if b1 == b2:\n shps.append(out.shape[idx])\n elif not node.inputs[1].type.broadcastable[idx]:\n shps.append(node.inputs[1].shape[idx])\n else:\n shps.append(node.inputs[2].shape[idx])\n out = T.alloc(out, *shps)\n else:\n out = out\n\n # Copy over stacktrace from selected output to new output\n copy_stack_trace(node.outputs + correct_out, out)\n return [out]\n # if left is right -> left\n if node.inputs[1] is node.inputs[2]:\n # Note: No need to copy over stacktrace, because the input node\n # already has its own stacktrace\n if cond.type == node.inputs[1].type:\n return [node.inputs[1]]\n\n ret = T.fill(cond, node.inputs[1])\n\n # Copy over stacktrace from switch output and correct branch\n copy_stack_trace(node.outputs + node.inputs[1], ret)\n return [ret]\n\n # This case happens with scan.\n # Elemwise{switch}(le(shape_i{id}(X), 0), 0, shape_i{id}(X)) -> shape_i{id}(X)\n left = node.inputs[1]\n right = node.inputs[2]\n cond_var = node.inputs[0]\n if cond_var.owner and \\\n isinstance(cond_var.owner.op, T.Elemwise) and \\\n isinstance(cond_var.owner.op.scalar_op, scalar.LE) and \\\n cond_var.owner.inputs[0].owner and \\\n isinstance(cond_var.owner.inputs[0].owner.op, Shape_i) and \\\n T.extract_constant(cond_var.owner.inputs[1], only_process_constants=True) == 0 and \\\n T.extract_constant(left, only_process_constants=True) == 0 and \\\n right is cond_var.owner.inputs[0]:\n assert right.type == node.outputs[0].type\n # No need to copy over stacktrace, because the right input node\n # already has its own stacktrace\n return [right]\n return False\n return False\n\n\n@register_specialize\n@register_canonicalize\[email protected]_optimizer([T.mul])\ndef local_mul_switch_sink(node):\n \"\"\"\n This optimization makes the folowing changes in the graph:\n T.mul(A,T.switch(cond,0,iff),B) --> T.switch(cond,0,T.mul(A,B,iff))\n T.mul(A,T.switch(cond,ift,0),B) --> T.switch(cond,T.mul(A,B,ift),0)\n A and B being several (or none) symbolic variables.\n This is useful because A and B may not be numerically stable and give\n NaN or inf values for cases where the switch returns 0.\n With this optimization T.grad(T.switch(...)) has the right behavior.\n\n Examples\n --------\n x -> f(x)\n x -> g(x)\n y = T.switch(cond,f(x),g(x))\n **without the optimization\n T.grad(y,x) -> grad(f(x),x) * grad(y,f(x)) + grad(g(x),x) * grad(y,g(x))\n **with the optimization\n T.grad(y,x) -> switch(cond,grad(f(x),x), 0) + switch(cond,0,grad(g(x),x))\n This will be particularly useful for the lazyif because we skip\n an entire part of the graph.\n\n \"\"\"\n if node.op != T.mul:\n return False\n for idx, i in enumerate(node.inputs):\n if i.owner and i.owner.op == T.switch:\n switch = i.owner\n try:\n if (get_scalar_constant_value(\n switch.inputs[1], only_process_constants=True) == 0.):\n listmul = node.inputs[:idx] + node.inputs[idx + 1:]\n fmul = T.mul(*(listmul + [switch.inputs[2]]))\n\n # Copy over stacktrace for elementwise multiplication op\n # from previous elementwise multiplication op.\n # An error in the multiplication (e.g. errors due to\n # inconsistent shapes), will point to the\n # multiplication op.\n copy_stack_trace(node.outputs, fmul)\n\n fct = [T.switch(switch.inputs[0], 0,\n fmul)]\n fct[0].tag.values_eq_approx = values_eq_approx_remove_nan\n\n # Copy over stacktrace for switch op from both previous\n # elementwise multiplication op and previous switch op,\n # because an error in this part can be caused by either\n # of the two previous ops.\n copy_stack_trace(node.outputs + switch.outputs, fct)\n return fct\n except NotScalarConstantError:\n pass\n try:\n if (get_scalar_constant_value(\n switch.inputs[2], only_process_constants=True) == 0.):\n listmul = node.inputs[:idx] + node.inputs[idx + 1:]\n fmul = T.mul(*(listmul + [switch.inputs[1]]))\n # Copy over stacktrace for elementwise multiplication op\n # from previous elementwise multiplication op.\n # An error in the multiplication (e.g. errors due to\n # inconsistent shapes), will point to the\n # multiplication op.\n copy_stack_trace(node.outputs, fmul)\n\n fct = [T.switch(switch.inputs[0],\n fmul, 0)]\n fct[0].tag.values_eq_approx = values_eq_approx_remove_nan\n\n # Copy over stacktrace for switch op from both previous\n # elementwise multiplication op and previous switch op,\n # because an error in this part can be caused by either\n # of the two previous ops.\n copy_stack_trace(node.outputs + switch.outputs, fct)\n return fct\n except NotScalarConstantError:\n pass\n return False\n\n\n@register_canonicalize\[email protected]_optimizer([T.true_div, T.int_div])\ndef local_div_switch_sink(node):\n \"\"\"\n This optimization makes the folowing changes in the graph:\n T.div(T.switch(cond,0,iff),A) --> T.switch(cond,0,T.div(iff,A))\n T.div(T.switch(cond,ift,0),A) --> T.switch(cond,T.div(ift,A),0)\n\n A being a symbolic variable.\n This is useful because A may not be numerically stable and give\n NaN or inf values for cases where the switch returns 0.\n See local_mul_switch_sink for more details.\n\n \"\"\"\n if (node.op != T.true_div and node.op != T.int_div):\n return False\n op = node.op\n if node.inputs[0].owner and node.inputs[0].owner.op == T.switch:\n switch = node.inputs[0].owner\n try:\n if get_scalar_constant_value(switch.inputs[1],\n only_process_constants=True) == 0.:\n fdiv = op(switch.inputs[2], node.inputs[1])\n # Copy over stacktrace for elementwise division op\n # from previous elementwise multiplication op.\n # An error in the division (e.g. errors due to\n # inconsistent shapes or division by zero),\n # will point to the new division op.\n copy_stack_trace(node.outputs, fdiv)\n\n fct = [T.switch(switch.inputs[0], 0,\n fdiv)]\n fct[0].tag.values_eq_approx = values_eq_approx_remove_nan\n\n # Copy over stacktrace for switch op from both previous\n # elementwise division op and previous switch op,\n # because an error in this part can be caused by either\n # of the two previous ops.\n copy_stack_trace(node.outputs + switch.outputs, fct)\n return fct\n except NotScalarConstantError:\n pass\n try:\n if get_scalar_constant_value(switch.inputs[2],\n only_process_constants=True) == 0.:\n fdiv = op(switch.inputs[1], node.inputs[1])\n # Copy over stacktrace for elementwise division op\n # from previous elementwise multiplication op.\n # An error in the division (e.g. errors due to\n # inconsistent shapes or division by zero),\n # will point to the new division op.\n copy_stack_trace(node.outputs, fdiv)\n\n fct = [T.switch(switch.inputs[0],\n fdiv, 0)]\n fct[0].tag.values_eq_approx = values_eq_approx_remove_nan\n\n # Copy over stacktrace for switch op from both previous\n # elementwise division op and previous switch op,\n # because an error in this part can be caused by either\n # of the two previous ops.\n copy_stack_trace(node.outputs + switch.outputs, fct)\n return fct\n except NotScalarConstantError:\n pass\n return False\n\n\n# Merge add/sub/mul/div/minimum/maximum/... of switches sharing the same\n# condition, to enable further simplification of their branches\n# Example: switch(c, a, b) + switch(c, x, y) -> switch(c, a+x, b+y)\n@register_canonicalize\[email protected]_optimizer([T.Elemwise])\ndef local_merge_switch_same_cond(node):\n scal = theano.scalar\n # node must be binary elemwise or add or mul\n if not isinstance(node.op, T.Elemwise) or not isinstance(\n node.op.scalar_op, (scal.BinaryScalarOp, scal.Add, scal.Mul)):\n return\n # all inputs must be switch\n if not all(s.owner and isinstance(s.owner.op, T.Elemwise) and\n isinstance(s.owner.op.scalar_op, scal.Switch)\n for s in node.inputs):\n return\n # all switch conditions must be the same\n cond = node.inputs[0].owner.inputs[0]\n if not all(s.owner.inputs[0] is cond for s in node.inputs[1:]):\n return\n # pull out switch\n return [T.switch(cond,\n node.op(*[s.owner.inputs[1] for s in node.inputs]),\n node.op(*[s.owner.inputs[2] for s in node.inputs]))]\n\n\n#############\n# Tile Opts #\n#############\n@register_useless\n@register_canonicalize\n@register_stabilize\[email protected]_optimizer([T.Tile])\ndef local_useless_tile(node):\n \"\"\"Tile(x, (1,)*N) -> x\n\n This is useless tile. (1,)*N, just mean a vector with all element\n being 1.\n\n \"\"\"\n if isinstance(node.op, T.Tile):\n try:\n a = T.get_scalar_constant_value(node.inputs[1],\n only_process_constants=True)\n if a == 1:\n try:\n l = T.get_vector_length(node.inputs[1])\n if l == node.inputs[0].ndim:\n # No need to copy over any stacktrace as previous\n # input variable already has a stacktrace\n return [node.inputs[0]]\n elif l < node.inputs[0].ndim:\n # The Op don't support that case, so we can't\n # implement the opt and test it.\n return\n return [node.inputs[0]]\n else:\n # The Op don't support that case, so we can't\n # implement the opt and test it.\n return\n x_nd = node.inputs[0].ndim\n broad = ['x'] * (l - x_nd) + xrange(x_nd)\n ret = node.inputs[0].dimshuffle(broad)\n # Copy over stacktrace from previous output node,\n # and from node before tiling operation.\n copy_stack_trace(node.outputs + node.inputs[0], ret)\n return [ret]\n except ValueError:\n return\n except NotScalarConstantError:\n return\n\n\n##############\n# Split Opts #\n##############\n@register_useless\n@register_canonicalize\n@register_specialize\[email protected]_optimizer([T.Split])\ndef local_useless_split(node):\n \"\"\" Split{n_splits=1}(x, y) -> x\n\n Remove Split with only 1 split.\n\n \"\"\"\n if isinstance(node.op, T.Split):\n if node.op.len_splits == 1:\n x, axis, splits = node.inputs\n out = assert_op(x, T.eq(splits.shape[0], 1))\n # Copy over stacktrace from previous output node.\n copy_stack_trace(node.outputs, out)\n out2 = assert_op(out, T.eq(x.shape[axis], splits[0]))\n # Copy over stacktrace from previous output node.\n copy_stack_trace(out, out2)\n\n return [out2]\n\n\n################\n# Flatten Opts #\n################\n@register_canonicalize\n@register_stabilize\[email protected]_optimizer([T.Flatten])\ndef local_flatten_lift(node):\n \"\"\"\n Flatten(UnaryElemwise(x)) -> UnaryElemwise(Flatten(x))\n\n This optimization is needed by optimization\n nnet/sigm.py:log1msigm_to_softplus to get applied when there is a flatten.\n\n \"\"\"\n if (isinstance(node.op, T.Flatten) and\n node.inputs[0].owner and\n isinstance(node.inputs[0].owner.op, T.Elemwise) and\n len(node.inputs[0].owner.inputs) == 1):\n f = node.op(node.inputs[0].owner.inputs[0])\n\n # Copy over stacktrace from previous output node (flatten op),\n # since this is the op which may cause an error for f.\n copy_stack_trace(node.outputs, f)\n\n e = node.inputs[0].owner.op(f)\n\n # Copy over stacktrace from previous output node and from unary\n # elementwise output node since if there was an error, it would\n # probably have come from that operation.\n copy_stack_trace(node.outputs + [node.inputs[0]], e)\n\n return [e]\n\n##################\n# Reshape opts #\n##################\n\n\ndef local_reshape_chain(op):\n @gof.local_optimizer([op])\n def f(node):\n \"\"\"\n Reshape(Reshape(shape1),shape2) -> Reshape(shape2)\n\n \"\"\"\n if not opt.check_chain(node, op, op):\n return False\n\n # TODO: this can permit a failing program to run by eliminating\n # the lower reshape\n rval = node.op(node.inputs[0].owner.inputs[0], node.inputs[1])\n\n # Copy over stacktrace from previous output node, as any error\n # in new computational graph would have been caused by last op\n # in the old computational graph.\n copy_stack_trace(node.outputs, rval)\n\n # It might happen that the desired output of this node has a\n # broadcastable pattern that does not match that of 'rval'. This is\n # when originally, we were able to figure out that one of the\n # dimensions of the reshape is one, but some other transformation\n # replaced the shape by one for which this cannot be guessed.\n # We should try to figure out why we lost the information about this\n # constant value... but in the meantime, better not apply this\n # optimization.\n if rval.broadcastable == node.outputs[0].broadcastable:\n return [rval]\n else:\n return False\n\n return f\nregister_canonicalize(local_reshape_chain(T.Reshape),\n name='local_reshape_chain')\n\n\n@register_useless\n@register_canonicalize\n@register_stabilize\[email protected]_optimizer([T.Reshape])\ndef local_useless_reshape(node):\n \"\"\"\n Remove two kinds of useless reshape.\n\n Remove Reshape when both the input and output have a single dimension.\n Remove Reshape when reshaping to the shape of the input.\n\n \"\"\"\n op = node.op\n if not isinstance(op, Reshape):\n return False\n\n input = node.inputs[0]\n output = node.outputs[0]\n output_shape = node.inputs[1]\n\n if input.ndim != output.ndim:\n return False\n\n # Simple case: both input and output have a single dimension.\n # This could hide errors if the user provides inconsistent shapes.\n if (input.ndim == 1 and output.ndim == 1 and\n input.broadcastable == output.broadcastable):\n return [input]\n\n # Second case: all the shapes match the input shape\n # Match Reshape(x, x.shape)\n if output_shape.owner and isinstance(output_shape.owner.op, Shape):\n shape_input = output_shape.owner.inputs[0]\n if shape_input == input:\n return [input]\n\n # Match Reshape(x, [x.shape[0], ..., x.shape[-1]]), accounting for\n # broadcastable and constant dimensions\n if output_shape.owner and isinstance(output_shape.owner.op, MakeVector):\n output_shape_is = output_shape.owner.inputs\n\n if not hasattr(node, 'fgraph'):\n shape_feature = None\n else:\n shape_feature = getattr(node.fgraph, 'shape_feature', None)\n\n nb_m1 = 0\n shape_match = [False] * input.ndim\n for dim in xrange(input.ndim):\n outshp_i = output_shape_is[dim]\n # Match Shape_i{dim}(input)\n if (outshp_i.owner and isinstance(outshp_i.owner.op, Shape_i) and\n outshp_i.owner.op.i == dim and\n outshp_i.owner.inputs[0] == input):\n shape_match[dim] = True\n continue\n\n # Match Shape(input)[dim]\n if (outshp_i.owner and isinstance(outshp_i.owner.op, Subtensor) and\n len(outshp_i.owner.inputs) == 2 and\n extract_constant(outshp_i.owner.inputs[1]) == dim):\n subtensor_inp = outshp_i.owner.inputs[0]\n if (subtensor_inp.owner and\n isinstance(subtensor_inp.owner.op, Shape)):\n shape_input_i = subtensor_inp.owner.inputs[0]\n if shape_input_i == input:\n shape_match[dim] = True\n continue\n\n # Match 1 if input.broadcastable[dim] is True\n cst_outshp_i = extract_constant(outshp_i, only_process_constants=1)\n if input.broadcastable[dim] and cst_outshp_i == 1:\n shape_match[dim] = True\n continue\n\n # Match -1\n if cst_outshp_i == -1:\n shape_match[dim] = True\n nb_m1 += 1\n continue\n\n # Match shape_of[input][dim] or its constant equivalent\n if shape_feature:\n inpshp_i = shape_feature.get_shape(input, dim)\n if (inpshp_i == outshp_i or\n (extract_constant(inpshp_i, only_process_constants=1) ==\n extract_constant(outshp_i, only_process_constants=1))):\n shape_match[dim] = True\n continue\n\n if all(shape_match) and nb_m1 <= 1:\n return [input]\n\n # TODO later: if all the shapes except one match, we may want to\n # consider it useless as well, like we do in the 1-dim case.\n\n\n@register_canonicalize\[email protected]_optimizer([T.Reshape])\ndef local_reshape_to_dimshuffle(node):\n \"\"\"\n Broadcastable dimensions in Reshape are replaced with dimshuffle.\n\n The goal is to avoid using reshape to add or remove broadcastable\n dimensions, but use dimshuffle instead, so dimshuffles can cancel out\n or be removed later on.\n\n For example:\n - reshape(x, (1, n)) --> dimshuffle{x,0}(reshape(x, (n,))\n - reshape(x, (1, m, 1, n, 1, 1))\n --> dimshuffle{x,0,x,1,x,x}(reshape(x, (m, n)))\n \"\"\"\n op = node.op\n if not isinstance(op, Reshape):\n return False\n\n input = node.inputs[0]\n output = node.outputs[0]\n output_shape = node.inputs[1]\n\n dimshuffle_new_order = []\n new_output_shape = []\n index = 0 # index over the output of the new reshape\n for i in xrange(output.ndim):\n # Since output_shape is a symbolic vector, we trust extract_constant\n # to go through however it is formed to see if its i-th element is 1.\n # We need only_process_constants=False for that.\n dim = extract_constant(output_shape[i], only_process_constants=False,\n elemwise=False)\n if dim == 1:\n dimshuffle_new_order.append('x')\n else:\n dimshuffle_new_order.append(index)\n new_output_shape.append(dim)\n index = index + 1\n if index != output.ndim:\n inner = op.__class__(len(new_output_shape))(input, new_output_shape)\n copy_stack_trace(output, inner)\n new_node = [DimShuffle(inner.type.broadcastable, dimshuffle_new_order)(inner)]\n copy_stack_trace(output, new_node)\n return new_node\n\n\n@register_canonicalize\n@register_stabilize\[email protected]_optimizer([T.Reshape])\ndef local_reshape_lift(node):\n \"\"\"\n Reshape(UnaryElemwise(x)) -> UnaryElemwise(Reshape(x))\n\n This optimization is needed by optimization\n nnet/sigm.py:log1msigm_to_softplus to get applied when there is a reshape.\n\n \"\"\"\n if (isinstance(node.op, T.Reshape) and\n node.inputs[0].owner and\n isinstance(node.inputs[0].owner.op, T.Elemwise) and\n len(node.inputs[0].owner.inputs) == 1):\n r = node.op(node.inputs[0].owner.inputs[0], node.inputs[1])\n # Copy stacktrace from previous Reshape op, as an error in new\n # Reshape op could only have been caused by old one.\n copy_stack_trace(node.outputs, r)\n\n e = node.inputs[0].owner.op(r)\n # Copy stacktrace from both previous Reshape and UnaryElemwise op\n # because an error in new cg could have been caused by either ops.\n copy_stack_trace(node.outputs + node.inputs, e)\n\n # In rare case the original broadcast was (False, True), but\n # the new one is (False, False). So don't crash in that case.\n if e.type != node.outputs[0].type:\n re = T.patternbroadcast(e, node.outputs[0].broadcastable)\n\n # Copy over stack trace.\n # If the graph fails it is usually due to the fact that a dimension\n # that should be broadcastable does not actually have length 1,\n copy_stack_trace(e, re)\n else:\n re = e\n\n return [re]\n\n\n##################\n# Middleman cuts #\n##################\n\nregister_canonicalize(gof.OpRemove(T.tensor_copy), name='remove_tensor_copy')\n\n################\n# Canonization #\n################\n\n\nclass Canonizer(gof.LocalOptimizer):\n \"\"\"\n Simplification tool. The variable is a local_optimizer. It is best used\n with a TopoOptimizer in in_to_out order.\n\n Usage: Canonizer(main, inverse, reciprocal, calculate)\n\n Parameters\n ----------\n main\n A suitable Op class that is commutative, associative and\n takes one to an arbitrary number of inputs, e.g. add or\n mul\n inverse\n An Op class such that inverse(main(x, y), y) == x\n e.g. sub or true_div\n reciprocal\n A function such that main(x, reciprocal(y)) == inverse(x, y)\n e.g. neg or inv\n calculate\n Function that takes a list of numpy.ndarray instances\n for the numerator, another list for the denumerator,\n and calculates inverse(main(\\*num), main(\\*denum)). It\n takes a keyword argument, aslist. If True, the value\n should be returned as a list of one element, unless\n the value is such that value = main(). In that case,\n the return value should be an empty list.\n\n Examples\n --------\n >>> import theano.tensor as T\n >>> from theano.tensor.opt import Canonizer\n >>> add_canonizer = Canonizer(T.add, T.sub, T.neg, \\\\\n ... lambda n, d: sum(n) - sum(d))\n >>> mul_canonizer = Canonizer(T.mul, T.true_div, T.inv, \\\\\n ... lambda n, d: prod(n) / prod(d))\n\n Examples of optimizations mul_canonizer can perform:\n\n | x / x -> 1\n | (x * y) / x -> y\n | x / y / x -> 1 / y\n | x / y / z -> x / (y * z)\n | x / (y / z) -> (x * z) / y\n | (a / b) * (b / c) * (c / d) -> a / d\n | (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\n | 2 * x / 2 -> x\n | x * y * z -> Elemwise(T.mul){x,y,z} #only one pass over the memory.\n | !-> Elemwise(T.mul){x,Elemwise(T.mul){y,z}}\n\n \"\"\"\n\n def __init__(self, main, inverse, reciprocal, calculate,\n use_reciprocal=True):\n self.main = main\n self.inverse = inverse\n self.reciprocal = reciprocal\n self.calculate = calculate\n self.use_reciprocal = use_reciprocal\n\n self.external_simplifiers = []\n\n def add_simplifier(self, simplifier, reason):\n self.external_simplifiers.append((reason, simplifier))\n\n def tracks(self):\n return [self.main, self.inverse, self.reciprocal]\n\n def get_num_denum(self, input):\n \"\"\"\n This extract two lists, num and denum, such that the input is:\n self.inverse(self.main(\\*num), self.main(\\*denum)). It returns\n the two lists in a (num, denum) pair.\n\n For example, for main, inverse and reciprocal = \\*, / and inv(),\n\n | input -> returned value (num, denum)\n\n | x*y -> ([x, y], [])\n | inv(x) -> ([], [x])\n | inv(x) * inv(y) -> ([], [x, y])\n | x*y/z -> ([x, y], [z])\n | log(x) / y * (z + x) / y -> ([log(x), z + x], [y, y])\n | (((a / b) * c) / d) -> ([a, c], [b, d])\n | a / (b / c) -> ([a, c], [b])\n | log(x) -> ([log(x)], [])\n | x**y -> ([x**y], [])\n | x * y * z -> ([x, y, z], [])\n\n \"\"\"\n # This function is recursive. The idea is that there is a\n # get_num_denum recursion in which the internal ops are all\n # one of (main, inverse, reciprocal, DimShuffle) and the\n # internal data nodes all have the dtype of the 'input'\n # argument. The leaf-Variables of the graph covered by the\n # recursion may be of any Variable type.\n\n if input.owner is None or input.owner.op not in [\n self.main, self.inverse, self.reciprocal]:\n if input.owner and isinstance(input.owner.op, T.DimShuffle):\n # If input is a DimShuffle of some input which does\n # something like this:\n\n # * change a vector of length N into a 1xN row matrix\n # * change a scalar into a 1x1x1 tensor\n # * in general, complete the shape of a tensor\n # with broadcastable 1s to the *left*\n # Then we will simply discard the DimShuffle and return\n # the num/denum of its input\n dsn = input.owner # dimshuffle node\n dsop = dsn.op # dimshuffle op\n\n # the first input of the dimshuffle i.e. the ndarray to redim\n dsi0 = dsn.inputs[0]\n\n # The compatible order is a DimShuffle \"new_order\" of the form:\n # ('x', ..., 'x', 0, 1, 2, ..., dimshuffle_input.type.ndim)\n\n # That kind of DimShuffle only adds broadcastable\n # dimensions on the left, without discarding any\n # existing broadcastable dimension and is inserted\n # automatically by Elemwise when the inputs have\n # different numbers of dimensions (hence why we can\n # discard its information - we know we can retrieve it\n # later on).\n compatible_order = (('x',) *\n (input.type.ndim - dsi0.type.ndim) +\n tuple(range(dsi0.type.ndim)))\n if dsop.new_order == compatible_order:\n # If the \"new_order\" is the one we recognize,\n # we return the num_denum of the dimshuffled input.\n return self.get_num_denum(input.owner.inputs[0])\n else:\n # This is when the input isn't produced by main,\n # inverse or reciprocal.\n return [input], []\n else:\n return [input], []\n num = []\n denum = []\n parent = input.owner\n\n # We get the (num, denum) pairs for each input\n # pairs = [self.get_num_denum(input2) if input2.type.dtype ==\n # input.type.dtype else ([input2], []) for input2 in\n # parent.inputs]\n pairs = [self.get_num_denum(input2) for input2 in parent.inputs]\n\n if parent.op == self.main:\n # If we have main(x, y, ...), numx, denumx, numy, denumy, ...\n # then num is concat(numx, numy, num...) and denum is\n # concat(denumx, denumy, denum...) note that main() can have any\n # number of arguments >= 0 concat is list concatenation\n num = reduce(list.__iadd__, map(operator.itemgetter(0), pairs))\n denum = reduce(list.__iadd__, map(operator.itemgetter(1), pairs))\n elif parent.op == self.inverse:\n # If we have inverse(x, y), numx, denumx, numy and denumy\n # then num is concat(numx, denumy) and denum is\n # concat(denumx, numy) note that inverse() is binary\n num = pairs[0][0] + pairs[1][1]\n denum = pairs[0][1] + pairs[1][0]\n elif parent.op == self.reciprocal:\n # If we have reciprocal(x), numx, denumx\n # then num is denumx and denum is numx\n # note that reciprocal() is unary\n num = pairs[0][1]\n denum = pairs[0][0]\n return num, denum\n\n def merge_num_denum(self, num, denum):\n \"\"\"\n Utility function which takes two lists, num and denum, and\n returns something which is equivalent to inverse(main(\\*num),\n main(\\*denum)), but depends on the length of num and the length\n of denum (in order to minimize the number of operations).\n\n Let n = len(num) and d = len(denum):\n\n | n=0, d=0: neutral element (given by self.calculate([], []))\n | (for example, this would be 0 if main is addition\n | and 1 if main is multiplication)\n | n=1, d=0: num[0]\n | n=0, d=1: reciprocal(denum[0])\n | n=1, d=1: inverse(num[0], denum[0])\n | n=0, d>1: reciprocal(main(\\*denum))\n | n>1, d=0: main(\\*num)\n | n=1, d>1: inverse(num[0], main(\\*denum))\n | n>1, d=1: inverse(main(\\*num), denum[0])\n | n>1, d>1: inverse(main(\\*num), main(\\*denum))\n\n Given the values of n and d to which they are associated, all\n of the above are equivalent to:\n inverse(main(\\*num), main(\\*denum))\n\n \"\"\"\n\n ln, ld = len(num), len(denum)\n if not ln and not ld:\n return T.as_tensor_variable(self.calculate([], []))\n if not ln:\n if self.use_reciprocal:\n return self.reciprocal(self.merge_num_denum(denum, []))\n else:\n ln = [self.calculate([], [], aslist=False)]\n if not ld:\n if ln == 1:\n # num[0] should always be a variable\n assert isinstance(num[0], gof.Variable)\n return num[0]\n else:\n return self.main(*num)\n return self.inverse(self.merge_num_denum(num, []),\n self.merge_num_denum(denum, []))\n\n @staticmethod\n def get_constant(v):\n \"\"\"\n\n Returns\n -------\n object\n A numeric constant if v is a Constant or, well, a\n numeric constant. If v is a plain Variable, returns None.\n\n \"\"\"\n if isinstance(v, Constant):\n if getattr(v.tag, 'unique_value', None) is not None:\n data = v.tag.unique_value\n else:\n data = v.data\n if data.ndim == 0:\n return data\n else:\n return None\n elif isinstance(v, Variable):\n return None\n else:\n return v\n\n def simplify(self, num, denum, out_type):\n \"\"\"\n Shorthand for:\n\n .. code-block:: python\n\n self.simplify_constants(*self.simplify_factors(num, denum))\n\n \"\"\"\n rval = self.simplify_constants(*self.simplify_factors(num, denum),\n out_type=out_type)\n for reason, simplifier in self.external_simplifiers:\n # TODO: document that 'reason' is associated with this\n # simplification to help auditing when things go\n # wrong\n rval = simplifier(*rval)\n return rval\n\n def simplify_factors(self, num, denum):\n \"\"\"\n For any Variable r which is both in num and denum, removes it\n from both lists. Modifies the lists inplace. Returns the\n modified lists. For example:\n\n | [x], [x] -> [], []\n | [x, y], [x] -> [y], []\n | [a, b], [c, d] -> [a, b], [c, d]\n\n \"\"\"\n ln = len(num)\n ld = len(denum)\n if (ld > 2 and ln > 2):\n # Faster version for \"big\" inputs.\n while True:\n s = set(num)\n # Inputs can appear multiple times\n redo = len(s) != len(num)\n inter = s.intersection(denum)\n for v in inter:\n num.remove(v)\n denum.remove(v)\n if not redo or not inter:\n break\n else:\n for v in list(num):\n if v in denum:\n num.remove(v)\n denum.remove(v)\n return num, denum\n\n def simplify_constants(self, orig_num, orig_denum, out_type=None):\n \"\"\"\n Find all constants and put them together into a single constant.\n\n Finds all constants in orig_num and orig_denum (using\n get_constant) and puts them together into a single\n constant. The constant is inserted as the first element of the\n numerator. If the constant is the neutral element, it is\n removed from the numerator.\n\n Examples\n --------\n Let main be multiplication:\n\n | [2, 3, x], [] -> [6, x], []\n | [x, y, 2], [4, z] -> [0.5, x, y], [z]\n | [x, 2, y], [z, 2] -> [x, y], [z]\n\n \"\"\"\n # Lists representing the numerator and denumerator\n num, denum = [], []\n\n # Lists representing the *constant* elements of num and denum\n numct, denumct = [], []\n\n for v in orig_num:\n ct = self.get_constant(v)\n if ct is not None:\n # We found a constant in the numerator!\n # We add it to numct\n numct.append(ct)\n else:\n num.append(v)\n for v in orig_denum:\n ct = self.get_constant(v)\n if ct is not None:\n denumct.append(ct)\n else:\n denum.append(v)\n\n if self.use_reciprocal or num:\n # This will calculate either:\n # [inverse(main(*numct), main(*denumct))]\n # [] - if inverse(main(*numct), main(*denumct)) is the\n # neutral element\n ct = self.calculate(numct, denumct, aslist=True,\n out_type=out_type)\n else:\n # This happens if we don't allow the reciprocal and the\n # numerator is empty. That means we will need to represent\n # reciprocal(x) like inverse(neutral_element, x) so\n # we can't allow ct == []\n # TODO: why is this branch needed when merge_num_denum\n # does it for us?\n ct = [self.calculate(numct, denumct, aslist=False,\n out_type=out_type)]\n\n # Wrapping ct in a Constant with the right dtype\n ct = [T.constant(c, dtype=out_type.dtype) for c in ct]\n\n if orig_num and len(numct) == 1 and len(denumct) == 0 and ct:\n # In that case we should only have one constant in `ct`.\n assert len(ct) == 1\n first_num_ct = self.get_constant(orig_num[0])\n if first_num_ct is not None and ct[0].type.values_eq(ct[0].data,\n first_num_ct):\n # This is an important trick :( if it so happens that:\n # * there's exactly one constant on the numerator and none on\n # the denominator\n # * it's not the neutral element (ct is an empty list in that\n # case)\n # * the constant is the same as the first argument in the\n # numerator (we only check the first argument because the\n # canonizer puts the computed constants first)\n # -> then we return very exactly the original num/denum.\n # If we don't do that the optimizer will just loop\n # infinitely because it will not catch on that there are\n # no changes to be made and everytime it will want to\n # replace something by the same thing...\n # Note that it is important to use `values_eq` instead of\n # the == operator, to handle NaN values correctly.\n return orig_num, orig_denum\n\n return ct + num, denum\n\n def transform(self, node):\n op = node.op\n if op not in [self.main, self.inverse, self.reciprocal]:\n return False\n\n assert len(node.outputs) == 1\n out = node.outputs[0]\n\n # out won't have a clients field when we didn't commit a\n # started change in the graph. We can't do the check if we\n # want to skip it, so we force the skip it. It should be\n # reapplied later.\n if not hasattr(out, 'clients'):\n return\n\n # check if any of the clients of this node would be part of\n # this canonized graph... if so, we do nothing and wait for\n # them to be transformed.\n for c, c_idx in out.clients:\n if c == 'output':\n continue\n while (isinstance(getattr(c, 'op', None), DimShuffle) and\n len(c.outputs[0].clients) <= 1):\n c = c.outputs[0].clients[0][0]\n if getattr(c, 'op', '') in [self.main, self.inverse,\n self.reciprocal]:\n return False\n\n # Here we make the canonical version of the graph around this node\n # See the documentation of get_num_denum and simplify\n orig_num, orig_denum = self.get_num_denum(node.outputs[0])\n num, denum = self.simplify(list(orig_num), list(orig_denum), out.type)\n\n def same(x, y):\n return len(x) == len(y) and all(np.all(xe == ye) for xe, ye in\n zip(x, y))\n\n if same(orig_num, num) and same(orig_denum, denum):\n # We return False if there are no changes\n return False\n\n new = self.merge_num_denum(num, denum)\n if new.type.dtype != out.type.dtype:\n new = T.cast(new, out.type.dtype)\n\n assert (new.type == out.type) == (not (new.type != out.type))\n\n if not (new.type == out.type):\n new = _fill_chain(new, node.inputs)[0]\n\n if new.type == out.type:\n # This happen with test\n # theano/tensor/tests/test_opt.py:T_local_switch_sink\n new.tag.values_eq_approx = values_eq_approx_remove_inf_nan\n\n # We need to implement the copy over of the stacktrace.\n # See issue #5104.\n return [new]\n else:\n _logger.warning(' '.join(('CANONIZE FAILED: new, out = ',\n new, ',', out, 'types',\n new.type, ',', out.type)))\n return False\n\n def __str__(self):\n return getattr(self, 'name', 'Canonizer(%s, %s, %s)' % (\n self.main, self.inverse, self.reciprocal))\n\n\ndef mul_calculate(num, denum, aslist=False, out_type=None):\n if not num and not denum:\n # Smallest 1 possible.\n if aslist:\n return []\n else:\n return np.int8(1)\n\n # Make sure we do not accidently upcast data types.\n if out_type is None:\n out_dtype = scalar.upcast(*[v.dtype for v in (num + denum)])\n else:\n out_dtype = out_type.dtype\n one = theano._asarray(1, dtype=out_dtype)\n\n v = reduce(np.multiply, num, one) / reduce(np.multiply, denum, one)\n if aslist:\n if np.all(v == 1):\n return []\n else:\n return [v]\n return v\n\nlocal_mul_canonizer = Canonizer(T.mul, T.true_div, T.inv, mul_calculate, False)\nregister_canonicalize(local_mul_canonizer, name='local_mul_canonizer')\n\n\[email protected]_optimizer([T.neg])\ndef local_neg_to_mul(node):\n if node.op == T.neg:\n return [T.mul(np.array(-1, dtype=node.inputs[0].dtype),\n node.inputs[0])]\nregister_canonicalize(local_neg_to_mul)\n\n\n@register_specialize\[email protected]_optimizer([T.Sum, T.elemwise.Prod])\ndef local_sum_prod_mul_by_scalar(node):\n \"\"\"\n sum(scalar * smth) -> scalar * sum(smth)\n sum(-smth) -> -sum(smth)\n\n or\n\n prod(scalar * smth) -> scalar ** size(smth) * prod(smth)\n prod(-smth) -> -1 ** size(smth) * prod(smth)\n\n \"\"\"\n # TODO: if the the thing inside the Sum is a division,\n # we should get at the numerator....\n if isinstance(node.op, (T.Sum, T.elemwise.Prod)):\n node_inps, = node.inputs\n if node_inps.owner and node_inps.owner.op == T.mul:\n terms = node_inps.owner.inputs\n scalars = [t.dimshuffle() for t in terms if\n np.all(t.type.broadcastable)]\n\n if len(scalars) == 0:\n # Nothing to optimize here\n return\n\n non_scalars = [t for t in terms if not np.all(t.broadcastable)]\n\n # Perform the op only on the non-scalar inputs, if applicable\n if len(non_scalars) == 0:\n new_op_input_nb_elements = 1\n new_op_output = 1\n elif len(non_scalars) == 1:\n new_op_input_nb_elements = non_scalars[0].size\n new_op_output = node.op(non_scalars[0])\n else:\n new_op_input = T.mul(*non_scalars)\n # We assume that errors always come from the prod/mul op in the\n # original computational graph, and therefore need to only\n # copy over its output stacktrace.\n copy_stack_trace(node.outputs, new_op_input)\n\n new_op_input_nb_elements = new_op_input.size\n new_op_output = node.op(new_op_input)\n\n if not len(non_scalars) == 0:\n # Copy over stacktrace from previous output to new mul op,\n # for same reason as above.\n copy_stack_trace(node.outputs, new_op_output)\n\n # If node.op is a T.elemwise.Prod, then the scalars need to be\n # raised to the power of the number of elements in the input\n # to the Prod\n if (isinstance(node.op, T.elemwise.Prod) and\n new_op_input_nb_elements != 1):\n\n scalars = [s ** new_op_input_nb_elements for s in scalars]\n\n # Scale the output of the op by the scalars and return as\n # replacement for the original output\n mul_inputs = scalars\n if new_op_input_nb_elements != 1:\n mul_inputs.append(new_op_output)\n\n if len(mul_inputs) == 1:\n # Copy over stacktrace from previous output to new mul op,\n # for same reason as above.\n copy_stack_trace(node.outputs, mul_inputs)\n\n return mul_inputs\n else:\n ret = T.mul(*mul_inputs)\n # Copy over stacktrace from previous output to new mul op,\n # for same reason as above.\n copy_stack_trace(node.outputs, [ret] + mul_inputs)\n\n return [ret]\n\n if isinstance(node.op, T.Sum) and node_inps.owner and node_inps.owner.op == T.neg:\n s = node.op(node_inps.owner.inputs[0])\n ret = T.neg(s)\n # There are never errors in the negative op, thus\n # we need only to copy over stacktrace from previous output node to\n # the two new ops.\n copy_stack_trace(node.outputs, [s, ret])\n\n return [ret]\n\n\n@register_specialize\[email protected]_optimizer([T.Elemwise])\ndef local_elemwise_sub_zeros(node):\n \"\"\"\n Elemwise{sub}(X,X) -> zeros_like(X)\n \"\"\"\n if (isinstance(node.op, T.Elemwise) and\n node.op.scalar_op.nin == 2 and\n node.op.scalar_op == scalar.sub and\n node.inputs[0] == node.inputs[1]):\n res = T.zeros_like(node.inputs[0])\n # Copy over stacktrace from previous output.\n # This could help for failures due to out-of-memory.\n copy_stack_trace(node.outputs, res)\n return [res]\n\n\n@register_useless\n@register_specialize\n@register_stabilize\n@register_canonicalize\[email protected]_optimizer([T.Elemwise])\ndef local_useless_elemwise_comparison(node):\n \"\"\"...\n\n :note: These cases appear in the graph generated by scan.\n These optimizations will make the graph easier to read.\n # Comparing to itself is constant\n Elemwise[{LT,GT}](X, X) -> Elemwise[zeros](X)\n Elemwise[{LE,GE}](X, X) -> Elemwise[ones](X)\n Elemwise[{minimum,maximum}](X, X) -> X\n\n # Comparing shape to 0 can be constant\n Elemwise[LT](X.shape[i], 0) -> Elemwise[zeros](X)\n Elemwise[GE](X.shape[i], 0) -> Elemwise[ones](X)\n Elemwise[maximum](X.shape[i], 0) -> X.shape[i]\n Elemwise[maximum](0, X.shape[i]) -> X.shape[i]\n Elemwise[minimum](X.shape[i], 0) -> 0\n Elemwise[minimum](0, X.shape[i]) -> 0\n\n # The shape can be replaced with sum of shapes\n Elemwise[LT](add([anything that is shapes]), 0) -> Elemwise[zeros](X)\n Elemwise[GE](add([anything that is shapes]), 0) -> Elemwise[ones](X)\n\n # Shapes are never negative\n # Needed by Reshape.infer_shape\n Elemwise[EQ](Subtensor(Shape(x)), -N) -> Elemwise[zeros](X)\n\n \"\"\"\n if not isinstance(node.op, T.Elemwise):\n return\n if node.op.scalar_op.nin != 2:\n return\n\n # We call zeros_like and one_like with opt=True to generate a\n # cleaner graph.\n dtype = node.outputs[0].dtype\n\n # Elemwise[{LT,GT}](X, X) -> Elemwise[zeros](X)\n if isinstance(node.op.scalar_op, (scalar.LT, scalar.GT)) and \\\n node.inputs[0] is node.inputs[1]:\n res = T.zeros_like(node.inputs[0], dtype=dtype, opt=True)\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n # Elemwise[{LE,GE}](X, X) -> Elemwise[ones](X)\n if isinstance(node.op.scalar_op, (scalar.LE, scalar.GE)) and \\\n node.inputs[0] is node.inputs[1]:\n res = T.ones_like(node.inputs[0], dtype=dtype, opt=True)\n\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n # Elemwise[{minimum,maximum}](X, X) -> X\n if isinstance(node.op.scalar_op, (scalar.Minimum, scalar.Maximum)) and \\\n node.inputs[0] is node.inputs[1]:\n res = node.inputs[0]\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n\n # Elemwise[LT](X.shape[i], 0) -> Elemwise[zeros](X)\n if isinstance(node.op.scalar_op, scalar.LT) and \\\n node.inputs[0].owner and \\\n isinstance(node.inputs[0].owner.op, Shape_i) and \\\n T.extract_constant(node.inputs[1], only_process_constants=True) == 0:\n res = T.zeros_like(node.inputs[0], dtype=dtype, opt=True)\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n # Elemwise[GE](X.shape[i], 0) -> Elemwise[ones](X)\n if isinstance(node.op.scalar_op, scalar.GE) and \\\n node.inputs[0].owner and \\\n isinstance(node.inputs[0].owner.op, Shape_i) and \\\n T.extract_constant(node.inputs[1], only_process_constants=True) == 0:\n res = T.ones_like(node.inputs[0], dtype=dtype, opt=True)\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n # Elemwise[maximum](X.shape[i], 0) -> X.shape[i]\n if isinstance(node.op.scalar_op, scalar.Maximum) and \\\n node.inputs[0].owner and \\\n isinstance(node.inputs[0].owner.op, Shape_i) and \\\n T.extract_constant(node.inputs[1], only_process_constants=True) == 0:\n # No need to copy over stacktrace.\n return [node.inputs[0]]\n # Elemwise[maximum](0, X.shape[i]) -> X.shape[i]\n if isinstance(node.op.scalar_op, scalar.Maximum) and \\\n T.extract_constant(node.inputs[0], only_process_constants=True) == 0 and \\\n node.inputs[1].owner and \\\n isinstance(node.inputs[1].owner.op, Shape_i):\n # No need to copy over stacktrace.\n return [node.inputs[1]]\n # Elemwise[minimum](X.shape[i], 0) -> 0\n if isinstance(node.op.scalar_op, scalar.Minimum) and \\\n node.inputs[0].owner and \\\n isinstance(node.inputs[0].owner.op, Shape_i) and \\\n T.extract_constant(node.inputs[1], only_process_constants=True) == 0:\n res = T.zeros_like(node.inputs[0], dtype=dtype, opt=True)\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n\n # Elemwise[minimum](0, X.shape[i]) -> 0\n if isinstance(node.op.scalar_op, scalar.Minimum) and \\\n T.extract_constant(node.inputs[0], only_process_constants=True) == 0 and \\\n node.inputs[1].owner and \\\n isinstance(node.inputs[1].owner.op, Shape_i):\n res = T.zeros_like(node.inputs[1], dtype=dtype, opt=True)\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n\n # Elemwise[LT](add([anything that is shapes]), 0) -> Elemwise[zeros](X)\n if isinstance(node.op.scalar_op, scalar.LT) and \\\n node.inputs[0].owner and \\\n isinstance(node.inputs[0].owner.op, Elemwise) and \\\n isinstance(node.inputs[0].owner.op.scalar_op, scalar.Add) and \\\n all([isinstance(var.owner and var.owner.op, Shape_i)\n for var in node.inputs[0].owner.inputs]) and \\\n T.extract_constant(node.inputs[1], only_process_constants=True) == 0:\n res = T.zeros_like(node.inputs[0], dtype=dtype, opt=True)\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n # Elemwise[GE](add([anything that is shapes]), 0) -> Elemwise[ones](X)\n if isinstance(node.op.scalar_op, scalar.GE) and \\\n node.inputs[0].owner and \\\n isinstance(node.inputs[0].owner.op, Elemwise) and \\\n isinstance(node.inputs[0].owner.op.scalar_op, scalar.Add) and \\\n all([isinstance(var.owner and var.owner.op, Shape_i)\n for var in node.inputs[0].owner.inputs]) and \\\n T.extract_constant(node.inputs[1], only_process_constants=True) == 0:\n res = T.ones_like(node.inputs[0], dtype=dtype, opt=True)\n\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n\n # Elemwise[EQ](Subtensor(Shape(x)), -N)\n # Elemwise[EQ](somegraph that only depend of shape, -N)\n # TODO: handle the case where the -N is on either side\n \"\"\"\n |Elemwise{eq,no_inplace} [id B] ''\n | |Subtensor{int64} [id C] ''\n | | |Join [id D] ''\n | | | |TensorConstant{0} [id E]\n | | | |Subtensor{int64:int64:} [id F] ''\n | | | | |Shape [id G] ''\n \"\"\"\n def investigate(node):\n \" Return True if values will be shapes, so >= 0\"\n if isinstance(node.op, (T.Shape, Shape_i)):\n return True\n elif isinstance(node.op, Subtensor) and node.inputs[0].owner:\n return investigate(node.inputs[0].owner)\n elif isinstance(node.op, T.Join):\n return all(v.owner and\n investigate(v.owner) for v in node.inputs[1:])\n elif isinstance(node.op, MakeVector):\n return all(v.owner and\n investigate(v.owner) for v in node.inputs)\n\n if (isinstance(node.op.scalar_op, scalar.EQ) and\n node.inputs[0].owner and\n investigate(node.inputs[0].owner)):\n try:\n cst = get_scalar_constant_value(node.inputs[1],\n only_process_constants=True)\n\n res = T.zeros_like(node.inputs[0], dtype=dtype, opt=True)\n\n if cst < 0:\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n\n return [res]\n\n except NotScalarConstantError:\n pass\n return\n\n\n@register_canonicalize\n@register_specialize\[email protected]_optimizer([T.Sum, T.elemwise.Prod])\ndef local_sum_prod_div_dimshuffle(node):\n \"\"\"\n sum(a / dimshuffle{...}(b), axis=l) -> sum(a, axis={...}) / b,\n if dimension l of the DimShuffle is 'x'\n\n or\n\n prod(a / dimshuffle{...}(b), axis=l) ->\n prod(a, axis={...}) / b ** a.shape[l],\n if dimension l of the DimShuffle is 'x'\n \"\"\"\n\n # It does not make much sense now to extend it to the case where the\n # dimshuffle is in the numerator, since elemwise inversion of the\n # denominator would still be needed before the summation or production.\n\n if isinstance(node.op, (T.Sum, T.elemwise.Prod)):\n axis = node.op.axis\n if axis is None:\n axis = list(range(node.inputs[0].ndim))\n node_input = node.inputs[0]\n if node_input.owner and node_input.owner.op == T.true_div:\n numerator, denominator = node_input.owner.inputs\n\n # Old, bugged logic, reproduced here only to warn users\n if (config.warn.sum_div_dimshuffle_bug and\n isinstance(node.op, T.Sum) and\n numerator.owner and\n isinstance(numerator.owner.op, T.DimShuffle)):\n # Check compatibility\n new_order = numerator.owner.op.new_order\n compatible_dims = True\n for ax in axis:\n if len(new_order) <= ax or new_order[ax] != 'x':\n compatible_dims = False\n break\n\n if compatible_dims:\n _logger.warn('WARNING: Your current code is fine, but'\n ' Theano versions between '\n 'rev. 3bd9b789f5e8 (2010-06-16) and'\n ' cfc6322e5ad4 (2010-08-03) would '\n 'have given an incorrect result. '\n 'To disable this warning, set the Theano'\n ' flag warn.sum_div_dimshuffle_bug to'\n ' False.')\n\n if denominator.owner and isinstance(denominator.owner.op,\n T.DimShuffle):\n dimshuffle_input = denominator.owner.inputs[0]\n dimshuffle_order = denominator.owner.op.new_order\n\n compatible_dims = []\n incompatible_dims = []\n for ax in axis:\n if (ax < len(dimshuffle_order) and\n dimshuffle_order[ax] == 'x'):\n compatible_dims.append(ax)\n else:\n incompatible_dims.append(ax)\n reordered_incompatible_dims = []\n for ic_ax in incompatible_dims:\n reordered_incompatible_dims.append(\n ic_ax - sum(\n [1 for c_ax in compatible_dims if c_ax < ic_ax]))\n\n if len(compatible_dims) > 0:\n optimized_dimshuffle_order = list(\n ax for i, ax in enumerate(dimshuffle_order)\n if (i not in axis) or (ax != 'x'))\n\n # Removing leading 'x' (since it will be done automatically)\n while (len(optimized_dimshuffle_order) > 0 and\n optimized_dimshuffle_order[0] == 'x'):\n del optimized_dimshuffle_order[0]\n\n # if optimized_dimshuffle_order is sorted with\n # not 'x', then dimshuffle is useless.\n if all(i == e for i, e in\n enumerate(optimized_dimshuffle_order)):\n optimized_dimshuffle = dimshuffle_input\n else:\n optimized_dimshuffle = T.DimShuffle(\n dimshuffle_input.type.broadcastable,\n optimized_dimshuffle_order)(dimshuffle_input)\n\n if (config.warn.sum_div_dimshuffle_bug and\n isinstance(node.op, T.Sum)):\n _logger.warn('WARNING: Your current code is fine,'\n ' but Theano versions between '\n 'rev. 3bd9b789f5e8 (2010-06-16) and'\n ' cfc6322e5ad4 (2010-08-03) would '\n 'have given an incorrect result. '\n 'To disable this warning, set the'\n ' Theano flag '\n 'warn.sum_div_dimshuffle_bug'\n ' to False.')\n\n if isinstance(node.op, T.Sum):\n op_on_compatible_dims = T.sum(\n numerator, axis=compatible_dims)\n rval = T.true_div(\n op_on_compatible_dims,\n optimized_dimshuffle)\n if len(reordered_incompatible_dims) > 0:\n rval = T.sum(rval,\n axis=reordered_incompatible_dims)\n elif isinstance(node.op, T.elemwise.Prod):\n op_on_compatible_dims = T.prod(\n numerator, axis=compatible_dims)\n dtype = numerator.dtype\n rval = T.true_div(\n op_on_compatible_dims,\n (optimized_dimshuffle **\n T.prod([numerator.shape[ax].astype(dtype)\n for ax in compatible_dims])))\n if len(reordered_incompatible_dims) > 0:\n rval = T.prod(rval,\n axis=reordered_incompatible_dims)\n return [rval]\n\n\n@register_canonicalize\[email protected]_optimizer([T.Sum, T.elemwise.Prod])\ndef local_sum_prod_all_to_none(node):\n \"\"\"\n Sum{0,1,...N} -> Sum{} or\n Prod{0,1,...N} -> Prod{}\n\n \"\"\"\n if isinstance(node.op, T.Sum) or isinstance(node.op, T.elemwise.Prod):\n opt_type = T.Sum if isinstance(node.op, T.Sum) else T.elemwise.Prod\n # if all the axes are named, then use None as a shorthand\n # this permits more merging\n if node.op.axis is None:\n return\n if set(node.op.axis) == set(range(node.inputs[0].type.ndim)):\n return [opt_type(axis=None, dtype=node.op.dtype)(node.inputs[0])]\n\n\n@register_canonicalize\[email protected]_optimizer([T.Sum, T.elemwise.Prod])\ndef local_op_of_op(node):\n \"\"\"\n Prod(Prod()) -> single Prod()\n or\n Sum(Sum()) -> single Sum()\n\n \"\"\"\n if isinstance(node.op, T.elemwise.Prod) or isinstance(node.op, T.Sum):\n opt_type = T.Sum if isinstance(node.op, T.Sum) else T.elemwise.Prod\n node_inps, = node.inputs\n out_dtype = node.op.dtype\n # We manipulate the graph so this is done to make sure the opt\n # doesn't affect other computations.\n if len(node_inps.clients) == 1:\n if (node_inps.owner and\n (isinstance(node_inps.owner.op, node.op.__class__))):\n\n # check to see either the inner or outer prod is doing a\n # product over all axis, in which case we can remove it\n if node_inps.owner.op.axis is None or node.op.axis is None:\n return [opt_type(None, dtype=out_dtype)(\n node_inps.owner.inputs[0])]\n\n # figure out which axes were in the original sum\n newaxis = list(tuple(node_inps.owner.op.axis))\n for i in node.op.axis:\n new_i = i\n for ii in node_inps.owner.op.axis:\n if new_i >= ii:\n new_i += 1\n assert new_i not in newaxis\n newaxis.append(new_i)\n\n assert len(newaxis) == len(list(node_inps.owner.op.axis) +\n list(node.op.axis))\n\n # The old bugged logic. We keep it there to generate a warning\n # when we generated bad code.\n alldims = list(range(node_inps.owner.inputs[0].type.ndim))\n alldims = [d for i, d in enumerate(alldims) if i\n in node_inps.owner.op.axis]\n alldims = [d for i, d in enumerate(alldims)\n if i in node.op.axis]\n newaxis_old = [i for i in\n xrange(node_inps.owner.inputs[0].type.ndim)\n if i not in alldims]\n\n if (theano.config.warn.sum_sum_bug and\n newaxis != newaxis_old and\n len(newaxis) == len(newaxis_old)):\n _logger.warn(\n \"WARNING (YOUR CURRENT CODE IS FINE): Theano \"\n \"versions between version 9923a40c7b7a and August \"\n \"2nd, 2010 generated bugged code in this case. \"\n \"This happens when there are two consecutive sums \"\n \"in the graph and the intermediate sum is not \"\n \"used elsewhere in the code. Some safeguard \"\n \"removed some bad code, but not in all cases. You \"\n \"are in one such case. To disable this warning \"\n \"(that you can safely ignore since this bug has \"\n \"been fixed) set the theano flag \"\n \"`warn.sum_sum_bug` to False.\")\n\n combined = opt_type(newaxis, dtype=out_dtype)\n return [combined(node_inps.owner.inputs[0])]\n\n\nALL_REDUCE = [T.elemwise.CAReduce, T.elemwise.All, T.elemwise.Any,\n T.elemwise.Sum, T.elemwise.Prod,\n T.elemwise.ProdWithoutZeros]\n\n\n@register_canonicalize\n@register_uncanonicalize # Needed for MaxAndArgmax -> CAReduce\[email protected]_optimizer(ALL_REDUCE)\ndef local_reduce_join(node):\n \"\"\"\n Reduce{scalar.op}(Join(axis=0, a, b), axis=0) -> Elemwise{scalar.op}(a, b)\n\n Notes\n -----\n Supported scalar.op are Maximum, Mimimum in some cases and Add and Mul in\n all cases.\n\n Currently we must reduce on axis 0. It is probably extensible to the case\n where we join and reduce on the same set of axis.\n\n \"\"\"\n if (isinstance(node.op, T.CAReduce) and\n node.inputs[0].owner and\n isinstance(node.inputs[0].owner.op, T.Join)):\n join = node.inputs[0].owner\n if T.extract_constant(join.inputs[0], only_process_constants=True) != 0:\n return\n\n if isinstance(node.op.scalar_op, (scalar.Maximum, scalar.Minimum)):\n # Support only 2 inputs for now\n if len(join.inputs) != 3:\n return\n elif not isinstance(node.op.scalar_op, (scalar.Add, scalar.Mul)):\n return\n elif len(join.inputs) <= 2:\n # This is a useless join, that will get removed by another opt.\n return\n\n new_inp = []\n for inp in join.inputs[1:]:\n inp = inp.owner\n if not inp:\n return\n if (not isinstance(inp.op, DimShuffle) or\n inp.op.new_order != ('x',) +\n tuple(range(inp.inputs[0].ndim))):\n return\n new_inp.append(inp.inputs[0])\n ret = Elemwise(node.op.scalar_op)(*new_inp)\n\n if ret.dtype != node.outputs[0].dtype:\n # The reduction do something about the dtype.\n return\n\n reduce_axis = node.op.axis\n if reduce_axis is None:\n reduce_axis = tuple(xrange(node.inputs[0].ndim))\n\n # I put this warning late to don't add extra warning.\n if len(reduce_axis) != 1 or 0 not in reduce_axis:\n if theano.config.warn.reduce_join:\n warnings.warn((\n 'Your current code is fine, but Theano versions '\n 'prior to 0.7 (or this development version Sept 2014) '\n 'might have given an incorrect result for this code. '\n 'To disable this warning, set the Theano flag '\n 'warn.reduce_join to False. The problem was an '\n 'optimization, that modified the pattern '\n '\"Reduce{scalar.op}(Join(axis=0, a, b), axis=0)\", '\n 'did not check the reduction axis. So if the '\n 'reduction axis was not 0, you got a wrong answer.'))\n return\n\n # We add the new check late to don't add extra warning.\n try:\n join_axis = get_scalar_constant_value(join.inputs[0],\n only_process_constants=True)\n\n if join_axis != reduce_axis[0]:\n return\n except NotScalarConstantError:\n return\n\n return [ret]\n\n\n@register_canonicalize('fast_compile', 'local_cut_useless_reduce')\n@register_useless('local_cut_useless_reduce')\[email protected]_optimizer(ALL_REDUCE)\ndef local_useless_reduce(node):\n \"\"\"Sum(a, axis=[]) -> a \"\"\"\n if isinstance(node.op, T.CAReduce):\n summed, = node.inputs\n # if reduce were doing anything, the output ndim would be reduced\n if summed.type == node.outputs[0].type:\n return [summed]\n\n\n@register_canonicalize\n@register_uncanonicalize\n@register_specialize\[email protected]_optimizer(ALL_REDUCE)\ndef local_reduce_broadcastable(node):\n \"\"\"Remove reduction over broadcastable dimensions.\"\"\"\n if isinstance(node.op, T.CAReduce):\n reduced, = node.inputs\n odtype = node.outputs[0].dtype\n if node.op.axis is None:\n if all(reduced.broadcastable):\n return [reduced.dimshuffle().astype(odtype)]\n else:\n axis = list(node.op.axis)\n cuttable = [a for a in axis if reduced.broadcastable[a]]\n if cuttable:\n # -- we can remove some axes of summation,\n # which simplifies the codegen for sum, especially on GPU\n new_axis = []\n pattern = []\n ii = 0\n for p in xrange(reduced.ndim):\n if p not in cuttable:\n if p in axis:\n new_axis.append(ii)\n pattern.append(p)\n ii += 1\n new_reduced = reduced.dimshuffle(*pattern)\n if new_axis:\n if type(node.op) == theano.tensor.elemwise.CAReduce:\n # This happen for tensor.max(), tensor.min()\n new_op = node.op.__class__(node.op.scalar_op,\n axis=new_axis)\n else:\n new_op = node.op.__class__(axis=new_axis)\n return [new_op(new_reduced)]\n else:\n # -- in this case we can remove the reduction completely\n return [new_reduced.astype(odtype)]\n\n\n@register_specialize\[email protected]_optimizer([T.Sum, T.elemwise.Prod])\ndef local_opt_alloc(node):\n \"\"\"\n sum(alloc(constant,shapes...)) => constant*prod(shapes)\n or\n prod(alloc(constant,shapes...)) => constant**prod(shapes)\n\n \"\"\"\n if isinstance(node.op, T.Sum) or isinstance(node.op, T.elemwise.Prod):\n node_inps, = node.inputs\n if node_inps.owner and isinstance(node_inps.owner.op, T.Alloc):\n input = node_inps.owner.inputs[0]\n shapes = node_inps.owner.inputs[1:]\n try:\n val = get_scalar_constant_value(input,\n only_process_constants=True)\n assert val.size == 1\n val = val.reshape(1)[0]\n # check which type of op\n size = T.mul(*shapes)\n if input.dtype in [\"float16\", \"float32\"]:\n # shapes are ints and normally int64.\n # We don't want to have a float64 upcast\n # We don't want to downcast to float16\n # as we fear it could loose too much precision\n # that will be amplified by the mul/pow below.\n size = size.astype('float32')\n if (node.op.axis is None or\n node.op.axis == tuple(range(input.ndim))):\n if isinstance(node.op, T.Sum):\n val = val * size\n else:\n val = val ** size\n # Sum can change the input dtype (upcast or bool\n # -> float32) by default or by user request.\n # We can ignore the acc_dtype, as there is only 1\n # elemwise we will do and not a sequence, so there is no\n # accumulation of errors.\n # So mostly, we just need to cast the output to the old\n # dtype.\n val = val.astype(node.outputs[0].dtype)\n return [val]\n to_prod = [shapes[i] for i in xrange(len(shapes))\n if i in node.op.axis]\n if to_prod:\n size = T.mul(*to_prod)\n if isinstance(node.op, T.Sum):\n val *= size\n else:\n val = val ** size\n # See comments above.\n val = val.astype(node.outputs[0].dtype)\n return [T.alloc(val,\n *[shapes[i] for i in xrange(len(shapes))\n if i not in node.op.axis])]\n except NotScalarConstantError:\n pass\n\n\n@register_specialize\[email protected]_optimizer([T.neg])\ndef local_neg_neg(node):\n # other specializations shouldn't put this in,\n # but sometimes they do\n if node.op == T.neg:\n if node.inputs[0].owner and node.inputs[0].owner.op == T.neg:\n return [node.inputs[0].owner.inputs[0]]\n\n\n@register_specialize\[email protected]_optimizer([T.neg])\ndef local_neg_div_neg(node):\n \"\"\"\n - (-a / b) -> a / b\n\n Also performs - (c / b) -> ((-c) / b) when c is a scalar constant.\n\n \"\"\"\n if node.op == T.neg:\n if node.inputs[0].owner and node.inputs[0].owner.op == T.true_div:\n frac = node.inputs[0]\n num, denom = frac.owner.inputs\n if num.owner and num.owner.op == T.neg:\n if len(frac.clients) == 1:\n # No other clients of the original division\n new_num = num.owner.inputs[0]\n return [T.true_div(new_num, denom)]\n elif np.all(num.broadcastable) and isinstance(num, Constant):\n if len(frac.clients) == 1:\n new_num = -num.data\n return [T.true_div(new_num, denom)]\n\n\[email protected]_optimizer([T.mul])\ndef local_mul_zero(node):\n \"\"\"\n As part of canonicalization, we replace multiplication by zero\n with zero.\n\n \"\"\"\n if node.op == T.mul:\n otype = node.outputs[0].type\n\n for i in node.inputs:\n try:\n value = get_scalar_constant_value(i)\n except NotScalarConstantError:\n continue\n # print 'MUL by value', value, node.inputs\n if value == 0:\n # print '... returning zeros'\n return _fill_chain(theano._asarray(0, dtype=otype.dtype),\n node.inputs)\nregister_canonicalize(local_mul_zero)\n\n\[email protected]_optimizer([T.true_div])\ndef local_div_to_inv(node):\n if node.op == T.true_div and np.all(\n local_mul_canonizer.get_constant(node.inputs[0]) == 1.0):\n out = node.outputs[0]\n new_out = T.inv(local_mul_canonizer.merge_num_denum(node.inputs[1:],\n []))\n # The ones could have forced upcasting\n if new_out.dtype != out.dtype:\n new_out = T.cast(new_out, dtype=out.dtype)\n # The ones could have forced a specific length\n if new_out.type != out.type:\n new_out = broadcast_like(new_out, out, node.fgraph)\n return [new_out]\n else:\n return False\nregister_specialize(local_div_to_inv)\n\n\[email protected]_optimizer([T.inv])\ndef local_inv_canon(node):\n if node.op == T.inv:\n return [T.pow(node.inputs[0], -1.0)]\n else:\n return False\nregister_canonicalize(local_inv_canon)\n\n\[email protected]_optimizer([T.pow])\ndef local_pow_canonicalize(node):\n if node.op == T.pow:\n cst = local_mul_canonizer.get_constant(node.inputs[1])\n if cst == 0:\n return [broadcast_like(1, node.outputs[0], node.fgraph)]\n if cst == 1:\n return [broadcast_like(node.inputs[0], node.outputs[0], node.fgraph)]\n else:\n return False\nregister_canonicalize(local_pow_canonicalize)\n\n\n@register_specialize\[email protected]_optimizer([T.mul])\ndef local_mul_to_sqr(node):\n \"\"\"\n x*x -> sqr(x)\n\n This is faster on the GPU when memory fetching is a big part of\n the computation time.\n\n \"\"\"\n if node.op == T.mul:\n if len(node.inputs) == 2:\n if node.inputs[0] is node.inputs[1]:\n return [T.sqr(node.inputs[0])]\n\n\n@register_canonicalize\[email protected]_optimizer([T.int_div])\ndef local_intdiv_by_one(node):\n \"\"\"x // 1 -> x\n \"\"\"\n if node.op in [T.int_div]:\n if isinstance(node.inputs[1], T.TensorConstant) and \\\n np.all(node.inputs[1].value == 1):\n return [node.inputs[0].astype(node.outputs[0].dtype)]\n\n\n@register_canonicalize\n@register_specialize\[email protected]_optimizer([T.int_div, T.true_div])\ndef local_zero_div(node):\n \"\"\"0 / x -> 0\n \"\"\"\n if isinstance(node.op, T.Elemwise) and isinstance(\n node.op.scalar_op, (theano.scalar.IntDiv, theano.scalar.TrueDiv)):\n if local_mul_canonizer.get_constant(node.inputs[0]) == 0:\n ret = broadcast_like(0, node.outputs[0], node.fgraph)\n ret.tag.values_eq_approx = values_eq_approx_remove_nan\n return [ret]\n\n\[email protected]_optimizer([T.pow])\ndef local_pow_specialize(node):\n # here, we are past the point of canonicalization, so we don't want\n # to put in un-necessary fills.\n if node.op == T.pow:\n # the idea here is that we have pow(x, y)\n odtype = node.outputs[0].dtype\n xsym = node.inputs[0]\n ysym = node.inputs[1]\n y = local_mul_canonizer.get_constant(ysym)\n if (y is not None) \\\n and encompasses_broadcastable(xsym.type.broadcastable,\n ysym.type.broadcastable):\n rval = None\n\n if np.all(y == 2):\n rval = [T.sqr(xsym)]\n if np.all(y == 1):\n rval = [xsym]\n if np.all(y == 0):\n rval = [T.fill(xsym, np.asarray(1, dtype=odtype))]\n if np.all(y == 0.5):\n rval = [T.sqrt(xsym)]\n if np.all(y == -0.5):\n rval = [T.inv(T.sqrt(xsym))]\n if np.all(y == -1):\n rval = [T.inv(xsym)]\n if np.all(y == -2):\n rval = [T.inv(T.sqr(xsym))]\n if rval:\n rval[0] = T.cast(rval[0], odtype)\n assert rval[0].type == node.outputs[0].type, (\n rval, node.outputs)\n return rval\n else:\n return False\nregister_specialize(local_pow_specialize)\n\n\n@register_specialize_device\[email protected]_optimizer([T.pow])\ndef local_pow_specialize_device(node):\n \"\"\"\n This optimization is not the same on all device. We do it only on cpu here.\n \"\"\"\n if node.op == T.pow:\n # the idea here is that we have pow(x, y)\n odtype = node.outputs[0].dtype\n xsym = node.inputs[0]\n ysym = node.inputs[1]\n y = local_mul_canonizer.get_constant(ysym)\n\n # the next line is needed to fix a strange case that I don't\n # know how to make a separate test.\n # That happen in the test_opt.py:test_log_erfc test.\n # y is a ndarray with dtype int8 and value 2,4 or 6. This make\n # the abs(y) <= 512 fail!\n # taking the value outside ndarray solve the problem.\n # it could be that in that case, numpy make the comparaison\n # into the wrong type(do in int8 that overflow.)\n if isinstance(y, np.ndarray):\n assert y.size == 1\n try:\n y = y[0]\n except IndexError:\n pass\n if (y is not None) \\\n and encompasses_broadcastable(xsym.type.broadcastable,\n ysym.type.broadcastable):\n rval = None\n # 512 is too small for the cpu and too big for some gpu!\n if abs(y) == int(abs(y)) and abs(y) <= 512:\n pow2 = [xsym]\n pow2_scal = [theano.scalar.get_scalar_type(xsym.dtype)()]\n y_to_do = abs(y)\n for i in xrange(int(np.log2(y_to_do))):\n pow2.append(T.sqr(pow2[i]))\n pow2_scal.append(theano.scalar.sqr(pow2_scal[i]))\n rval1 = None\n rval1_scal = None\n while y_to_do > 0:\n log_to_do = int(np.log2(y_to_do))\n if rval1:\n rval1 *= pow2[log_to_do]\n rval1_scal *= pow2_scal[log_to_do]\n else:\n rval1 = pow2[log_to_do]\n rval1_scal = pow2_scal[log_to_do]\n y_to_do -= 2 ** log_to_do\n\n if abs(y) > 2:\n # We fuse all the pow together here to make\n # compilation faster\n rval1 = Elemwise(\n theano.scalar.Composite(\n [pow2_scal[0]], [rval1_scal])).make_node(xsym)\n if y < 0:\n rval = [T.inv(rval1)]\n else:\n rval = [rval1]\n if rval:\n rval[0] = T.cast(rval[0], odtype)\n assert rval[0].type == node.outputs[0].type, (\n rval, node.outputs)\n return rval\n\n\[email protected]_optimizer([T.mul])\ndef local_mul_specialize(node):\n \"\"\"\n Remove special-case constants from mul arguments and useless neg in inputs.\n\n mul(-1, x) -> neg(x)\n mul(1, x, y) -> mul(x, y)\n mul(0, ...) -> alloc(0, shapes...)\n\n This is not done if we would add more nodes in the graph, like with:\n\n mul(-1, x, y) -/-> neg(mul(x, y))\n\n \"\"\"\n # here, we are past the point of canonicalization, so we don't\n # want to put in un-necessary fills.\n #\n # at this point [post canonicalize], mul() may have many inputs.\n if node.op == T.mul:\n # the idea here is that we have pow(x, y)\n neg = False\n new_inputs = []\n nb_neg_node = 0\n nb_cst = 0\n for input in node.inputs:\n # remove any neg arguments\n while input.owner and input.owner.op == T.neg:\n neg ^= True\n input = input.owner.inputs[0]\n nb_neg_node += 1\n\n # remove special case arguments of 1, -1 or 0\n y = local_mul_canonizer.get_constant(input)\n if y == 1.0:\n nb_cst += 1\n elif y == -1.0:\n nb_cst += 1\n neg ^= True # toggles\n elif y == 0.0:\n # if we find any zero, we just return right away\n return [broadcast_like(0, node.outputs[0], node.fgraph)]\n else:\n new_inputs.append(input)\n\n if new_inputs != node.inputs:\n if new_inputs:\n if len(new_inputs) == 1:\n if neg:\n if new_inputs[0].dtype in (T.uint_dtypes + ['bool']):\n return\n else:\n rval = -new_inputs[0]\n else:\n rval = new_inputs[0]\n else:\n # The next case would cause a replace by an equivalent case.\n if (neg and\n nb_neg_node == 0 and\n nb_cst == 1):\n return\n elif neg:\n # Don't add an extra neg node as we can't\n # fully replace this mul by a neg.\n m1 = np.asarray(-1, dtype=node.outputs[0].dtype)\n new_inputs = [m1] + new_inputs\n rval = T.mul(*new_inputs)\n\n return [broadcast_like(rval, node.outputs[0], node.fgraph)]\n else:\n # there are no variable inputs to mul\n # N.B. this could have been constant-folded...\n if neg:\n return [broadcast_like(-1, node.outputs[0], node.fgraph)]\n else:\n return [broadcast_like(1, node.outputs[0], node.fgraph)]\n\nregister_specialize(local_mul_specialize)\n\n\[email protected]_optimizer([T.add])\ndef local_add_specialize(node):\n def fill_chain(v):\n out = _fill_chain(v, node.inputs)\n return out\n\n # here, we are past the point of canonicalization, so we don't want\n # to put in un-necessary fills.\n if node.op == T.add:\n new_inputs = []\n for input in node.inputs:\n try:\n y = get_scalar_constant_value(input)\n except NotScalarConstantError:\n y = input\n if np.all(y == 0.0):\n continue\n new_inputs.append(input)\n\n if len(new_inputs) < len(node.inputs):\n dtype = node.outputs[0].type.dtype\n if len(new_inputs) == 0:\n # we got rid of the entire expression!\n ndim = node.outputs[0].type.ndim\n # Reuse call to constant for cache()\n cst = T.constant(np.zeros((1,) * ndim, dtype=dtype))\n assert cst.type.broadcastable == (True,) * ndim\n return fill_chain(cst)\n\n if len(new_inputs) == 1:\n ret = fill_chain(new_inputs[0])\n else:\n ret = fill_chain(T.add(*new_inputs))\n # The dtype should not be changed. It can happen if the input\n # that was forcing upcasting was equal to 0.\n if ret[0].dtype != dtype:\n ret = [T.cast(ret[0], dtype)]\n return ret\n else:\n return False\nregister_specialize(local_add_specialize)\n\nmul_canonizer = in2out(gof.LocalOptGroup(local_mul_canonizer,\n local_fill_sink, apply_all_opts=True),\n name='mul_canonizer_groups')\n\n\ndef check_for_x_over_absX(numerators, denominators):\n \"\"\"Convert x/abs(x) into sign(x). \"\"\"\n # TODO: this function should dig/search through dimshuffles\n # This won't catch a dimshuffled absolute value\n for den in list(denominators):\n if (den.owner and den.owner.op == T.abs_ and\n den.owner.inputs[0] in numerators):\n if den.owner.inputs[0].type.dtype.startswith('complex'):\n # TODO: Make an Op that projects a complex number to\n # have unit length but projects 0 to 0. That\n # would be a weird Op, but consistent with the\n # special case below. I heard there's some\n # convention in Matlab that is similar to\n # this... but not sure.\n pass\n else:\n denominators.remove(den)\n numerators.remove(den.owner.inputs[0])\n numerators.append(T.sgn(den.owner.inputs[0]))\n return numerators, denominators\nlocal_mul_canonizer.add_simplifier(check_for_x_over_absX, 'X_over_absX')\n\n\n@register_canonicalize\[email protected]_optimizer([T.abs_])\ndef local_abs_lift(node):\n \"\"\"\n Move the abs toward the input.\n\n This is needed for check_for_x_over_absX to apply in more case.\n\n \"\"\"\n if node.op == T.abs_ and node.inputs[0].owner:\n assert node.nin == 1\n if node.inputs[0].owner.op == T.mul:\n return [T.mul(*[T.abs_(i) for i in node.inputs[0].owner.inputs])]\n if node.inputs[0].owner.op == T.true_div:\n i = node.inputs[0].owner.inputs\n return [T.true_div(T.abs_(i[0]), T.abs_(i[1]))]\n\n\n@register_specialize\[email protected]_optimizer([T.mul, T.true_div])\ndef local_abs_merge(node):\n \"\"\"\n Merge abs generated by local_abs_lift when the canonizer don't\n need it anymore\n\n \"\"\"\n if node.op == T.mul and sum([i.owner.op == T.abs_ for i in node.inputs\n if i.owner]) > 1:\n inputs = []\n for i in node.inputs:\n if i.owner and i.owner.op == T.abs_:\n inputs.append(i.owner.inputs[0])\n elif isinstance(i, Constant):\n try:\n const = get_scalar_constant_value(i,\n only_process_constants=True)\n except NotScalarConstantError:\n return False\n if not (const >= 0).all():\n return False\n inputs.append(i)\n else:\n return False\n return [T.abs_(T.mul(*inputs))]\n if node.op == T.true_div and sum([i.owner.op == T.abs_ for i in\n node.inputs if i.owner]) == 2:\n return [T.abs_(T.true_div(node.inputs[0].owner.inputs[0],\n node.inputs[1].owner.inputs[0]))]\n\n\n@register_stabilize\n@register_specialize\[email protected]_optimizer([T.log])\ndef local_log1p(node):\n # log(1+x) -> log1p(x)\n # log(1-x) -> log1p(-x)\n if node.op == T.log:\n log_arg, = node.inputs\n if log_arg.owner and log_arg.owner.op == T.add:\n scalars, scalar_inputs, nonconsts = scalarconsts_rest(\n log_arg.owner.inputs, only_process_constants=True)\n # scalar_inputs are potentially dimshuffled and fill'd scalars\n if scalars and np.allclose(np.sum(scalars), 1):\n if nonconsts:\n if len(nonconsts) > 1:\n ninp = T.add(*nonconsts)\n else:\n ninp = nonconsts[0]\n if ninp.dtype != log_arg.type.dtype:\n ninp = ninp.astype(node.outputs[0].dtype)\n return _fill_chain(T.log1p(ninp), scalar_inputs)\n\n elif log_arg.owner and log_arg.owner.op == T.sub:\n one = T.extract_constant(log_arg.owner.inputs[0],\n only_process_constants=True)\n if one != 1:\n return\n other = log_arg.owner.inputs[1]\n if other.dtype != log_arg.dtype:\n other = other.astype(log_arg.dtype)\n return [T.log1p(T.neg(other))]\n\n\n# TODO: in canonicalize, change log10 and log2 -> log\n@register_stabilize\n@register_specialize\[email protected]_optimizer([T.log])\ndef local_log_add(node):\n # log(exp(x)+exp(y))\n #\n # Suppose x >= y\n # log(exp(x) + exp(y))\n # log(exp(x) * (1 + exp(y)/exp(x)))\n # x + log(1 + exp(y)/exp(x))\n # x + log1p(exp(y)/exp(x))\n # x + log1p(exp(y-x))\n if node.op == T.log:\n z = node.inputs[0]\n if z.owner and z.owner.op == T.add:\n zi = z.owner.inputs\n if len(zi) != 2:\n # -- upgrading Maximum to handle multiple inputs wasn't trivial\n # TODO\n # raise NotImplementedError()\n return\n pre_exp = [x.owner.inputs[0] for x in zi\n if x.owner and x.owner.op == T.exp]\n if len(pre_exp) == len(zi):\n # all arguments to add are exp(<something>)\n max_pre = T.maximum(*pre_exp)\n\n ret = max_pre + T.log1p(T.exp(T.add(*[p - max_pre\n for p in pre_exp])))\n ret.tag.values_eq_approx = values_eq_approx_remove_inf\n return [ret]\n\n\[email protected]_optimizer([T.log])\ndef local_log_sum_exp(node):\n # log(sum_i(exp(x_i))) = x_max + log(sum_i(exp(x_i - x_max)))\n\n if node.op != T.log:\n return\n\n sum_node = node.inputs[0].owner\n # If the sum has keepdims=True, there might be a dimshuffle\n if sum_node and isinstance(sum_node.op, T.DimShuffle):\n dimshuffle_op = sum_node.op\n sum_node = sum_node.inputs[0].owner\n else:\n dimshuffle_op = None\n\n if not sum_node or not isinstance(sum_node.op, T.Sum):\n return\n\n exp_node, axis = sum_node.inputs[0].owner, sum_node.op.axis\n if not exp_node or not (\n isinstance(exp_node.op, Elemwise) and\n isinstance(exp_node.op.scalar_op, scalar.Exp)):\n return\n\n pre_exp = exp_node.inputs[0]\n max_pre_exp = T.max(pre_exp, axis=axis)\n max_pre_exp_keepdims = T.makeKeepDims(pre_exp, max_pre_exp, axis)\n\n ret = (max_pre_exp +\n T.log(T.sum(T.exp(pre_exp - max_pre_exp_keepdims), axis=axis)))\n\n # Restore the dimshuffle op, if any.\n if dimshuffle_op:\n ret = dimshuffle_op(ret)\n\n return [ret]\n\n\ncompile.optdb.register('local_log_sum_exp',\n in2out(local_log_sum_exp, ignore_newtrees=True),\n 1.6, 'fast_run')\n\n\ndef add_calculate(num, denum, aslist=False, out_type=None):\n # TODO: make sure that this function and mul_calculate are similar\n if out_type is None:\n zero = 0.0\n else:\n zero = theano._asarray(0, dtype=out_type.dtype)\n # zero = 0.0 if out_type is None else theano._asarray(0,\n # dtype=out_type.dtype)\n v = reduce(np.add, num, zero) - reduce(np.add, denum, zero)\n if aslist:\n if np.all(v == 0):\n return []\n else:\n return [v]\n return v\n\n\nlocal_add_canonizer = Canonizer(T.add, T.sub, T.neg, add_calculate)\nadd_canonizer = in2out(gof.LocalOptGroup(local_add_canonizer,\n local_fill_sink, apply_all_opts=True),\n name='add_canonizer_group')\n\n\nregister_canonicalize(local_add_canonizer, name='local_add_canonizer')\n\n\n##################\n# Distributivity #\n##################\n\n\ndef distribute_greedy(pos_pairs, neg_pairs, num, denum,\n out_type, minscore=0):\n # each pair in pos_pairs and neg_pairs is a num/denum pair. this\n # function attempts to add num and denum to the corresponding parts\n # of each pair, and counts how many multiplications/divisions can\n # be saved in that way.\n\n # each division is counted like div_cost multiplications\n # (typically, division costs more so we are willing to multiply more\n # in order to divide less)\n # 1.5 was obtained through an informal test and may very well be\n # platform dependent\n div_cost = 1.5\n\n # score is number of operations saved, higher is better\n score = len(num) + div_cost * len(denum)\n new_pos_pairs = list(itertools.starmap(local_mul_canonizer.simplify,\n [(n + num, d + denum, out_type) for (n, d)\n in pos_pairs]))\n new_neg_pairs = list(itertools.starmap(local_mul_canonizer.simplify,\n [(n + num, d + denum, out_type) for (n, d)\n in neg_pairs]))\n for (n, d), (nn, dd) in zip(pos_pairs + neg_pairs, new_pos_pairs +\n new_neg_pairs):\n # We calculate how many operations we are saving with the new\n # num and denum\n score += len(n) + div_cost * len(d) - len(nn) - div_cost * len(dd)\n if score <= minscore:\n # the change is not applied because it adds too many operations\n return False, pos_pairs, neg_pairs\n return True, new_pos_pairs, new_neg_pairs\n\n\ndef attempt_distribution(factor, num, denum, out_type):\n # we try to insert each num and each denum in the factor\n # returns: changes?, new_factor, new_num, new_denum\n # if there are changes, new_num and new_denum contain all the numerators\n # and denumerators that could not be distributed in the factor\n pos, neg = local_add_canonizer.get_num_denum(factor)\n if len(pos) == 1 and not neg:\n return False, factor, num, denum\n pos_pairs = list(map(local_mul_canonizer.get_num_denum, pos))\n neg_pairs = list(map(local_mul_canonizer.get_num_denum, neg))\n change = False\n for n in list(num):\n success, pos_pairs, neg_pairs = distribute_greedy(pos_pairs,\n neg_pairs, [n], [], out_type)\n if success:\n change = True\n num.remove(n)\n for d in list(denum):\n success, pos_pairs, neg_pairs = distribute_greedy(pos_pairs,\n neg_pairs, [], [d], out_type)\n if success:\n change = True\n denum.remove(d)\n if not change:\n return change, factor, num, denum\n else:\n return change, local_add_canonizer.merge_num_denum(\n list(itertools.starmap(local_mul_canonizer.merge_num_denum,\n pos_pairs)),\n list(itertools.starmap(local_mul_canonizer.merge_num_denum,\n neg_pairs))), num, denum\n\n\n@register_canonicalize\n@register_stabilize\[email protected]_optimizer([T.mul, T.true_div, T.inv])\ndef local_greedy_distributor(node):\n \"\"\"\n Optimize by reducing the number of multiplications and/or divisions.\n\n This optimization tries to apply distributivity of multiplication\n to addition in order to reduce the number of multiplications\n and/or divisions that must be done. The algorithm weighs division\n more than multiplication to account for the former's slightly\n greater computational cost.\n\n The following expressions are simplified:\n 1. ((a/x + b/y) * x * y) --> a*y + b*x\n 2. ((a/x + b) * x) --> a + b*x\n 3. There are other forms too where node is a true_div.\n\n The following expressions are not simplified:\n 4. ((a + b) * x) -/-> a*x + b*x\n\n This optimization aims to reduce computational cost. It may also\n increase numerical stability, e.g. when x and/or y tend to 0 in\n example 1.\n\n \"\"\"\n\n out = node.outputs[0]\n num, denum = local_mul_canonizer.get_num_denum(out)\n if len(num) == 1 and not denum:\n return False\n\n new_num, new_denum = [], []\n\n change = False\n\n out_type = out.type\n for candidate in list(num):\n if candidate not in num:\n continue\n num.remove(candidate)\n _change, candidate, num, denum = attempt_distribution(\n candidate, num, denum, out_type,)\n\n change |= _change\n new_num.append(candidate)\n\n for candidate in list(denum):\n if candidate not in denum:\n continue\n denum.remove(candidate)\n _change, candidate, denum, num = attempt_distribution(\n candidate, denum, num, out_type)\n change |= _change\n new_denum.append(candidate)\n if not change:\n return False\n\n new_num += num\n new_denum += denum\n\n rval = local_mul_canonizer.merge_num_denum(new_num, new_denum)\n\n if not (rval.type == out.type):\n # WHY DOES THIS HAPPEN?\n return False\n\n return [rval]\n\n\[email protected]_optimizer(None)\ndef constant_folding(node):\n for input in node.inputs:\n if not isinstance(input, Constant):\n return False\n # condition: all inputs are constant\n if not node.op.do_constant_folding(node):\n # The op asks not to be constant folded.\n return False\n\n storage_map = dict([(i, [i.data]) for i in node.inputs])\n compute_map = dict([(i, [True]) for i in node.inputs])\n for o in node.outputs:\n storage_map[o] = [None]\n compute_map[o] = [False]\n impl = None\n if (hasattr(node.op, 'python_constant_folding') and\n node.op.python_constant_folding(node)):\n impl = 'py'\n thunk = node.op.make_thunk(node, storage_map, compute_map,\n no_recycling=[], impl=impl)\n\n required = thunk()\n assert not required # a node whose inputs are all provided should always\n # return successfully\n rval = []\n for output in node.outputs:\n assert compute_map[output][0], (output, storage_map[output][0])\n try:\n constant = output.type.Constant\n except AttributeError:\n constant = Constant\n\n v = constant(output.type, storage_map[output][0])\n copy_stack_trace(output, v)\n\n rval.append(v)\n return rval\n\n\ntopo_constant_folding = in2out(constant_folding, ignore_newtrees=True,\n name=\"topo_constant_folding\")\nregister_canonicalize(topo_constant_folding, 'fast_compile', final_opt=True)\nregister_uncanonicalize(topo_constant_folding, 'fast_compile', final_opt=True)\nregister_stabilize(topo_constant_folding, 'fast_compile', final_opt=True)\nregister_specialize(topo_constant_folding, 'fast_compile', final_opt=True)\n\n\ndef get_clients(node):\n \"\"\"\n Used by erf/erfc opt to track less frequent op.\n\n \"\"\"\n return [c for c, i in node.outputs[0].clients\n if c != \"output\"]\n\n\ndef get_clients2(node):\n \"\"\"\n Used by erf/erfc opt to track less frequent op.\n\n \"\"\"\n l = []\n for c, i in node.outputs[0].clients:\n if c != \"output\":\n for var in c.outputs:\n l.extend([cc for cc, ii in var.clients if cc != \"output\"])\n return l\n\n# 1+erf(x)=>erfc(-x)\nlocal_one_plus_erf = gof.PatternSub((T.add,\n 1,\n (T.erf, 'x')),\n (T.erfc, (T.neg, 'x')),\n allow_multiple_clients=True,\n name='local_one_plus_erf',\n tracks=[T.erf],\n get_nodes=get_clients)\nregister_canonicalize(local_one_plus_erf)\nregister_stabilize(local_one_plus_erf)\nregister_specialize(local_one_plus_erf)\n\n# 1-erf(x)=>erfc(x)\nlocal_one_minus_erf = gof.PatternSub((T.sub,\n 1,\n (T.erf, 'x')),\n (T.erfc, 'x'),\n allow_multiple_clients=True,\n name='local_one_minus_erf',)\nregister_canonicalize(local_one_minus_erf)\nregister_stabilize(local_one_minus_erf)\nregister_specialize(local_one_minus_erf)\n\nlocal_one_minus_erf2 = gof.PatternSub((T.add,\n 1,\n (T.mul, -1, (T.erf, 'x'))),\n (T.erfc, 'x'),\n allow_multiple_clients=True,\n name='local_one_minus_erf2')\nregister_canonicalize(local_one_minus_erf2)\nregister_stabilize(local_one_minus_erf2)\nregister_specialize(local_one_minus_erf2)\n\n# 1+(-erf(x))=>erfc(x) This is a different graph then the previous as\n# the canonicalize don't work completly\nlocal_one_plus_neg_erf = gof.PatternSub((T.add,\n 1,\n (T.neg, (T.erf, 'x'))),\n (T.erfc, 'x'),\n allow_multiple_clients=True,\n name='local_one_plus_neg_erf',\n tracks=[T.erf],\n get_nodes=get_clients2)\nregister_canonicalize(local_one_plus_neg_erf)\nregister_stabilize(local_one_plus_neg_erf)\nregister_specialize(local_one_plus_neg_erf)\n\n# (-1)+erf(x) => -erfc(x) don't need erf(x)+(-1) as the canonicalize\n# will put the -1 as the first argument.\nlocal_erf_minus_one = gof.PatternSub((T.add,\n -1,\n (T.erf, 'x')),\n (T.neg, (T.erfc, 'x')),\n allow_multiple_clients=True,\n name='local_erf_minus_one',\n tracks=[T.erf],\n get_nodes=get_clients)\nregister_canonicalize(local_erf_minus_one)\nregister_stabilize(local_erf_minus_one)\nregister_specialize(local_erf_minus_one)\n\n# 1-erfc(x) => erf(x)\nlocal_one_minus_erfc = gof.PatternSub((T.sub,\n 1,\n (T.erfc, 'x')),\n (T.erf, 'x'),\n allow_multiple_clients=True,\n name='local_one_minus_erfc',\n tracks=[T.erfc],\n get_nodes=get_clients)\nregister_canonicalize(local_one_minus_erfc)\nregister_stabilize(local_one_minus_erfc)\nregister_specialize(local_one_minus_erfc)\n\nlocal_one_minus_erfc2 = gof.PatternSub((T.add,\n 1,\n (T.neg, (T.erfc, 'x'))),\n (T.erf, 'x'),\n allow_multiple_clients=True,\n name='local_one_minus_erfc2',\n tracks=[T.erfc],\n get_nodes=get_clients2)\nregister_canonicalize(local_one_minus_erfc2)\nregister_stabilize(local_one_minus_erfc2)\nregister_specialize(local_one_minus_erfc2)\n\nlocal_one_minus_erfc3 = gof.PatternSub((T.add,\n 1,\n (T.mul, -1, (T.erfc, 'x'))),\n (T.erf, 'x'),\n allow_multiple_clients=True,\n name='local_one_minus_erfc3',\n tracks=[T.erfc],\n get_nodes=get_clients2)\nregister_canonicalize(local_one_minus_erfc3)\nregister_stabilize(local_one_minus_erfc3)\nregister_specialize(local_one_minus_erfc3)\n\n# 1+(-erfc(x)) => erf(x) This is a different graph then the previous as\n# the canonicalize don't work completly\nlocal_one_add_neg_erfc = gof.PatternSub((T.add,\n 1,\n (T.neg, (T.erfc, 'x'))),\n (T.erf, 'x'),\n allow_multiple_clients=True,\n name='local_one_add_neg_erfc',\n tracks=[T.erfc],\n get_nodes=get_clients2)\n\nregister_canonicalize(local_one_add_neg_erfc)\nregister_stabilize(local_one_add_neg_erfc)\nregister_specialize(local_one_add_neg_erfc)\n\n# (-1)+erfc(-x)=>erf(x)\nlocal_erf_neg_minus_one = gof.PatternSub((T.add,\n -1,\n (T.erfc, (T.neg, 'x'))),\n (T.erf, 'x'),\n allow_multiple_clients=True,\n name='local_erf_neg_minus_one',\n tracks=[T.erfc],\n get_nodes=get_clients)\nregister_canonicalize(local_erf_neg_minus_one)\nregister_stabilize(local_erf_neg_minus_one)\nregister_specialize(local_erf_neg_minus_one)\n\n# (-1)+erfc(-1*x)=>erf(x)\nlocal_erf_neg_minus_one2 = gof.PatternSub((T.add,\n -1,\n (T.erfc, (T.mul, -1, 'x'))),\n (T.erf, 'x'),\n allow_multiple_clients=True,\n name='local_erf_neg_minus_one2',\n tracks=[T.erfc],\n get_nodes=get_clients)\nregister_canonicalize(local_erf_neg_minus_one2)\nregister_stabilize(local_erf_neg_minus_one2)\nregister_specialize(local_erf_neg_minus_one2)\n\n\n# Stability optimization\n# log(erfc(x)) => when x>threashold,\n# -x**2-log(x)-.5*log(pi)+log(1-1/(2*x**2)+3/(4*x**4)-15/(8*x**6))\n# for float64: threshold=26.641747557 was choosed with:\n# [(i,numpy.log(scipy.special.erfc(numpy.asarray([i],dtype='float64'))))\n# for i in numpy.arange(26.641747557,26.6417475571,.00000000001)]\n# for float32: threshold=10.0541949, [(i,numpy.log(scipy.special.erfc(\n# numpy.asarray([i],dtype='float32')))) for i in numpy.arange(\n# 10.0541948,10.0541951,.0000001)]\n@register_stabilize\n@register_specialize\[email protected]_optimizer([T.log])\ndef local_log_erfc(node):\n if node.op != T.log:\n return False\n if not node.inputs[0].owner or node.inputs[0].owner.op != T.erfc:\n return False\n\n if hasattr(node.tag, 'local_log_erfc_applied'):\n # We use that flag to don't apply the optimization recursively\n return False\n node.tag.local_log_erfc_applied = True\n\n x = node.inputs[0].owner.inputs[0]\n stab_value = (-x ** 2 - T.log(x) - .5 * T.log(np.pi) +\n T.log(1 - 1 / (2 * x ** 2) + 3 / (4 * x ** 4) -\n 15 / (8 * x ** 6)))\n\n if (node.outputs[0].dtype == 'float32' or\n node.outputs[0].dtype == 'float16'):\n threshold = 10.0541949\n elif node.outputs[0].dtype == 'float64':\n threshold = 26.641747557\n\n ret = T.switch(x < threshold, node.outputs[0], stab_value)\n ret.tag.values_eq_approx = values_eq_approx_remove_inf\n return [ret]\n\n\n# Stability optimization of the grad of log(erfc(x))\n# ([y*]exp(-(x**2)))/erfc(x) # The y* is optional\n# ([y*]exp(x**2))/erfc(-x) => [y*](when x>threashold,\n# sqrt(pi)*-x/(1-1/(2*x**2)+3/(4*x**4)-15/(8*x**6)))\n# for float64: threshold=26.63 see at the end of the fct for the explaination\n# for float32: threshold=9.3 see at the end of the fct for the explaination\n# TODO: remove the contraint that there are only 2 inputs to exp(x**2)\n# is the second.\n# TODO: at the test point 10 in float32, there is instability in the original\n# value. The original gives -30.0, the stab -20.1 and in float64 -18.1.\n# Make it so that the test does not generate an error in that case!\n@register_stabilize\n@register_specialize\[email protected]_optimizer([T.true_div])\ndef local_grad_log_erfc_neg(node):\n if node.op != T.true_div:\n return False\n if not node.inputs[1].owner or node.inputs[1].owner.op != T.erfc:\n return False\n erfc = node.inputs[1]\n erfc_x = erfc.owner.inputs[0]\n if not node.inputs[0].owner:\n return False\n\n # The mul is optional.\n if node.inputs[0].owner.op != T.mul:\n mul = None\n y = []\n if not node.inputs[0].owner or node.inputs[0].owner.op != T.exp:\n return False\n exp = node.inputs[0]\n else:\n mul = node.inputs[0]\n exp = None\n for idx, inp in enumerate(mul.owner.inputs):\n if inp.owner and inp.owner.op == T.exp:\n exp = inp\n break\n if len(mul.owner.inputs) == 2:\n y = [mul.owner.inputs[1 - idx]]\n else:\n y = mul.owner.inputs[:]\n del y[idx]\n del mul\n if not exp.owner.inputs[0].owner:\n return False\n\n if exp.owner.inputs[0].owner.op == T.neg:\n neg = exp.owner.inputs[0]\n if (not neg.owner.inputs[0].owner or\n neg.owner.inputs[0].owner.op != T.sqr):\n return False\n sqr = neg.owner.inputs[0]\n x = sqr.owner.inputs[0]\n elif exp.owner.inputs[0].owner.op == T.mul:\n # We should compare that -(erfc_x**2) is equivalent to mul_neg.\n # There is currently no easy way to do this in the general case,\n # so we implement some common case for now.\n\n # In many cases the neg are replaced by mul in the graph.\n # This also allows to stabilize log(erfc(cst*x)).\n mul_neg = exp.owner.inputs[0]\n\n # In case that multiple mul are not fused together, we do it here.\n def check_input(inputs):\n new_inputs = []\n for i in inputs:\n if i.owner and i.owner.op == T.mul:\n new_inputs.extend(check_input(i.owner.inputs))\n else:\n new_inputs.append(i)\n return new_inputs\n mul_inputs = check_input(mul_neg.owner.inputs)\n\n # Put the constant first.\n for i in xrange(len(mul_inputs)):\n if isinstance(i, Constant):\n if i == 0:\n break\n else:\n tmp = mul_inputs[0]\n mul_inputs[0] = mul_inputs[i]\n mul_inputs[i] = tmp\n break\n mul_neg = T.mul(*mul_inputs)\n\n try:\n cst2 = get_scalar_constant_value(mul_neg.owner.inputs[0],\n only_process_constants=True)\n except NotScalarConstantError:\n return False\n\n if len(mul_neg.owner.inputs) == 2:\n if (not mul_neg.owner.inputs[1].owner or\n mul_neg.owner.inputs[1].owner.op != T.sqr):\n return False\n sqr = mul_neg.owner.inputs[1]\n x = sqr.owner.inputs[0]\n elif len(mul_neg.owner.inputs) == 3:\n if mul_neg.owner.inputs[1] is not mul_neg.owner.inputs[2]:\n return False\n x = mul_neg.owner.inputs[1]\n else:\n return False\n\n if cst2 != -1:\n if (not erfc_x.owner or erfc_x.owner.op != T.mul or\n len(erfc_x.owner.inputs) != 2):\n # todo implement that case\n return False\n if erfc_x.owner.inputs[1] is not mul_neg.owner.inputs[1]:\n return False\n\n x = erfc_x\n try:\n cst = get_scalar_constant_value(erfc_x.owner.inputs[0],\n only_process_constants=True)\n except NotScalarConstantError:\n return False\n if cst2 != -cst * 2:\n return False\n\n # The constant is valid. Must check that the\n elif erfc_x is not x:\n return False\n\n else:\n return False\n\n if hasattr(node.tag, 'local_grad_log_erfc_neg'):\n # We use that flag to don't apply the optimization recursively\n return False\n\n # we move the y outside the div.\n true_div_no_mul = T.true_div(exp, erfc)\n true_div_no_mul.owner.tag.local_grad_log_erfc_neg = True\n\n # aaron value\n stab_value = (x * T.pow(1 - 1 / (2 * (x ** 2)) +\n 3 / (4 * (x ** 4)) - 15 / (8 * (x ** 6)), -1) *\n T.cast(T.sqrt(np.pi), dtype=x.dtype))\n\n if x.dtype == 'float32' or x.dtype == 'float16':\n threshold = 9.3\n # threshold = 10.1\n elif x.dtype == 'float64':\n threshold = 26.641747557\n ret = T.switch(x < threshold, true_div_no_mul, stab_value)\n if y:\n ret = T.mul(ret, *y)\n ret.tag.values_eq_approx = values_eq_approx_remove_inf_nan\n return [ret]\n \"\"\"\nThe libm used for the test is amdlibm\n #([y*]exp(-(x**2)))/erfc(x) # The mul is optional\n#exp(x**2)/erfc(-x) => when x>threashold,\n#-x*(1-1/(2*x**2)+3/(4*x**4)-15/(8*x**6))*sqrt(pi) for float64:\n#threshold=26.63 see below for float32: threshold=9.3 see below TODO\n#remove the contraint that there are only 2 inputs to mul TODO: should\n#we cast numpy.pi to x.dtype?\n\n#float32 threshold 9.3 as the approximation is more precise at that\n#point and more stable.\nimport numpy, scipy.special\nr = numpy.arange(9,10.06,.01)\n\np64=[(numpy.exp(-(x**2)))/scipy.special.erfc(x) for x in r]\np32=[(numpy.exp(-(x**2)))/scipy.special.erfc(x) for x in\nnumpy.asarray(r,dtype='float32')]\na64=[x*((1-1/(2*x**2)+3/(4*x**4)-15/(8*x**6))**(-1))*numpy.sqrt(numpy.pi)\nfor x in r]\na32=[x*((1-1/(2*x**2)+3/(4*x**4)-15/(8*x**6))**(-1))\n * numpy.float32(numpy.sqrt(numpy.pi))\nfor x in numpy.asarray(r,dtype='float32')] for idx,(a,b,c,d,e) in\nenumerate(zip(r,p64,p32,a64,a32)):print\na,b,c,d,e,c-b,e-b,numpy.absolute(c-b)<numpy.absolute(e-b)\n\n#, show that the value don't look stable at some point before inf.\nfor i in xrange(1,len(p32)): print r[i], p32[i]-p32[i-1]\n\n#float64 threshold is 26.63 the approx seam more precise at that\npoint. r = numpy.arange(26.2,26.7,.001)\n#scipy.special.erfc(numpy.float128(x)) don't work\n#p128=[(numpy.exp(-(x**2)))/scipy.special.erfc(x)for x in\nnumpy.float128(r)] #those value have been computed with g++\ntheano/misc/erfc_stability_threshold.c && ./a.out\np128=numpy.float128(['46.47206725', '46.47383842', '46.47560959',\n'46.47738076', '46.47915193', '46.48092309', '46.48269426',\n'46.48446543', '46.48623660', '46.48800777', '46.48977894',\n'46.49155011', '46.49332128', '46.49509245', '46.49686362',\n'46.49863479', '46.50040596', '46.50217713', '46.50394830',\n'46.50571947', '46.50749064', '46.50926181', '46.51103298',\n'46.51280415', '46.51457532', '46.51634649', '46.51811766',\n'46.51988883', '46.52166000', '46.52343118', '46.52520235',\n'46.52697352', '46.52874469', '46.53051586', '46.53228703',\n'46.53405820', '46.53582938', '46.53760055', '46.53937172',\n'46.54114289', '46.54291407', '46.54468524', '46.54645641',\n'46.54822758', '46.54999876', '46.55176993', '46.55354110',\n'46.55531227', '46.55708345', '46.55885462', '46.56062579',\n'46.56239697', '46.56416814', '46.56593931', '46.56771049',\n'46.56948166', '46.57125283', '46.57302401', '46.57479518',\n'46.57656636', '46.57833753', '46.58010871', '46.58187988',\n'46.58365105', '46.58542223', '46.58719340', '46.58896458',\n'46.59073575', '46.59250693', '46.59427810', '46.59604928',\n'46.59782045', '46.59959163', '46.60136280', '46.60313398',\n'46.60490516', '46.60667633', '46.60844751', '46.61021868',\n'46.61198986', '46.61376104', '46.61553221', '46.61730339',\n'46.61907456', '46.62084574', '46.62261692', '46.62438809',\n'46.62615927', '46.62793045', '46.62970163', '46.63147280',\n'46.63324398', '46.63501516', '46.63678633', '46.63855751',\n'46.64032869', '46.64209987', '46.64387104', '46.64564222',\n'46.64741340', '46.64918458', '46.65095576', '46.65272693',\n'46.65449811', '46.65626929', '46.65804047', '46.65981165',\n'46.66158283', '46.66335401', '46.66512519', '46.66689636',\n'46.66866754', '46.67043872', '46.67220990', '46.67398108',\n'46.67575226', '46.67752344', '46.67929462', '46.68106580',\n'46.68283698', '46.68460816', '46.68637934', '46.68815052',\n'46.68992170', '46.69169288', '46.69346406', '46.69523524',\n'46.69700642', '46.69877760', '46.70054878', '46.70231997',\n'46.70409115', '46.70586233', '46.70763351', '46.70940469',\n'46.71117587', '46.71294705', '46.71471824', '46.71648942',\n'46.71826060', '46.72003178', '46.72180296', '46.72357414',\n'46.72534533', '46.72711651', '46.72888769', '46.73065887',\n'46.73243006', '46.73420124', '46.73597242', '46.73774361',\n'46.73951479', '46.74128597', '46.74305715', '46.74482834',\n'46.74659952', '46.74837070', '46.75014189', '46.75191307',\n'46.75368426', '46.75545544', '46.75722662', '46.75899781',\n'46.76076899', '46.76254018', '46.76431136', '46.76608254',\n'46.76785373', '46.76962491', '46.77139610', '46.77316728',\n'46.77493847', '46.77670965', '46.77848084', '46.78025202',\n'46.78202321', '46.78379439', '46.78556558', '46.78733677',\n'46.78910795', '46.79087914', '46.79265032', '46.79442151',\n'46.79619269', '46.79796388', '46.79973507', '46.80150625',\n'46.80327744', '46.80504863', '46.80681981', '46.80859100',\n'46.81036219', '46.81213337', '46.81390456', '46.81567575',\n'46.81744693', '46.81921812', '46.82098931', '46.82276050',\n'46.82453168', '46.82630287', '46.82807406', '46.82984525',\n'46.83161644', '46.83338762', '46.83515881', '46.83693000',\n'46.83870119', '46.84047238', '46.84224357', '46.84401475',\n'46.84578594', '46.84755713', '46.84932832', '46.85109951',\n'46.85287070', '46.85464189', '46.85641308', '46.85818427',\n'46.85995546', '46.86172665', '46.86349784', '46.86526903',\n'46.86704022', '46.86881141', '46.87058260', '46.87235379',\n'46.87412498', '46.87589617', '46.87766736', '46.87943855',\n'46.88120974', '46.88298093', '46.88475212', '46.88652331',\n'46.88829450', '46.89006569', '46.89183688', '46.89360807',\n'46.89537927', '46.89715046', '46.89892165', '46.90069284',\n'46.90246403', '46.90423522', '46.90600642', '46.90777761',\n'46.90954880', '46.91131999', '46.91309119', '46.91486238',\n'46.91663357', '46.91840476', '46.92017596', '46.92194715',\n'46.92371834', '46.92548953', '46.92726073', '46.92903192',\n'46.93080311', '46.93257431', '46.93434550', '46.93611669',\n'46.93788789', '46.93965908', '46.94143028', '46.94320147',\n'46.94497266', '46.94674386', '46.94851505', '46.95028625',\n'46.95205744', '46.95382864', '46.95559983', '46.95737103',\n'46.95914222', '46.96091341', '46.96268461', '46.96445581',\n'46.96622700', '46.96799820', '46.96976939', '46.97154059',\n'46.97331178', '46.97508298', '46.97685417', '46.97862537',\n'46.98039657', '46.98216776', '46.98393896', '46.98571015',\n'46.98748135', '46.98925255', '46.99102374', '46.99279494',\n'46.99456614', '46.99633733', '46.99810853', '46.99987973',\n'47.00165092', '47.00342212', '47.00519332', '47.00696452',\n'47.00873571', '47.01050691', '47.01227811', '47.01404931',\n'47.01582050', '47.01759170', '47.01936290', '47.02113410',\n'47.02290530', '47.02467649', '47.02644769', '47.02821889',\n'47.02999009', '47.03176129', '47.03353249', '47.03530369',\n'47.03707489', '47.03884608', '47.04061728', '47.04238848',\n'47.04415968', '47.04593088', '47.04770208', '47.04947328',\n'47.05124448', '47.05301568', '47.05478688', '47.05655808',\n'47.05832928', '47.06010048', '47.06187168', '47.06364288',\n'47.06541408', '47.06718528', '47.06895648', '47.07072768',\n'47.07249888', '47.07427009', '47.07604129', '47.', '47.07958369',\n'47.08135489', '47.08312609', '47.08489729', '47.08666850',\n'47.08843970', '47.09021090', '47.09198210', '47.09375330',\n'47.09552450', '47.09729571', '47.09906691', '47.10083811',\n'47.10260931', '47.10438052', '47.10615172', '47.10792292',\n'47.10969412', '47.11146533', '47.11323653', '47.11500773',\n'47.11677894', '47.11855014', '47.12032134', '47.12209255',\n'47.12386375', '47.12563495', '47.12740616', '47.12917736',\n'47.13094857', '47.13271977', '47.13449097', '47.13626218',\n'47.13803338', '47.13980459', '47.14157579', '47.14334700',\n'47.14511820', '47.14688941', '47.14866061', '47.15043182',\n'47.15220302', '47.15397423', '47.15574543', '47.15751664',\n'47.15928784', '47.16105905', '47.16283025', '47.16460146',\n'47.16637266', '47.16814387', '47.16991508', '47.17168628',\n'47.17345749', '47.17522869', '47.17699990', '47.17877111',\n'47.18054231', '47.18231352', '47.18408473', '47.18585593',\n'47.18762714', '47.18939835', '47.19116956', '47.19294076',\n'47.19471197', '47.19648318', '47.19825439', '47.20002559',\n'47.20179680', '47.20356801', '47.20533922', '47.20711042',\n'47.20888163', '47.21065284', '47.21242405', '47.21419526',\n'47.21596647', '47.21773767', '47.21950888', '47.22128009',\n'47.22305130', '47.22482251', '47.22659372', '47.22836493',\n'47.23013614', '47.23190735', '47.23367855', '47.23544976',\n'47.23722097', '47.23899218', '47.24076339', '47.24253460',\n'47.24430581', '47.24607702', '47.24784823', '47.24961944',\n'47.25139065', '47.25316186', '47.25493307', '47.25670429',\n'47.25847550', '47.26024671', '47.26201792', '47.26378913',\n'47.26556034', '47.26733155', '47.26910276', '47.27087397',\n'47.27264518', '47.27441640', '47.27618761', '47.27795882',\n'47.27973003', '47.28150124', '47.28327246', '47.28504367',\n'47.28681488', '47.28858609', '47.29035730', '47.29212852',\n'47.29389973', '47.29567094', '47.29744215', '47.29921337',\n'47.30098458', '47.30275579', '47.30452701', '47.30629822',\n'47.30806943', '47.30984065', '47.31161186', '47.31338307',\n'47.31515429', '47.31692550', '47.31869671', '47.32046793',\n'47.32223914', '47.32401036', '47.32578157', '47.32755278',\n'47.32932400', '47.33109521', '47.33286643', '47.33463764',\n'47.33640886', '47.33818007', '47.33995129', '47.34172250',\n'47.34349372', '47.34526493', '47.34703615', '47.34880736',\n'47.35057858', '47.35234979', '47.35412101', '47.35589223'])\np64=[(numpy.exp(-(x**2)))/scipy.special.erfc(x)for x in r]\na128=[x*((1-1/(2*x**2)+3/(4*x**4)-15/(8*x**6))**(-1))\n *numpy.float128(numpy.sqrt(numpy.pi))\n for x in numpy.asarray(r,dtype='float128')]\na64=[x*((1-1/(2*x**2)+3/(4*x**4)-15/(8*x**6)+63/(7*x**8))**(-1))\n *numpy.sqrt(numpy.pi)\n for x in r] for a,b,c,d in zip(r,p128,p64,a64):print a,b,c,d,c-b,d-b\n\nfor i in xrange(1,len(p64)): print i, 64[i]-p64[i-1]\n \"\"\"\n\n\n# ###############\n# # Loop fusion #\n# ###############\ndef local_elemwise_fusion_op(OP, max_input_fct=lambda node: 32,\n maker=None):\n \"\"\"\n We parametrize it to make it work for Elemwise and GpuElemwise op.\n\n Parameters\n ----------\n OP\n GpuElemwise or Elemwise class (the one that we want to fuse)\n max_input_fct\n A function that returns the maximum number of inputs\n that this elemwise can take (useful for GpuElemwise).\n GPU kernel currently has a limit of 256 bytes for\n the size of all parameters passed to it. As currently\n we pass many information only by parameter, we must\n limit how many ops we fuse together to avoid busting\n that 256 limit.\n\n On the CPU we limit to 32 input variables\n since that is the maximum numpy support.\n\n \"\"\"\n if maker is None:\n def maker(node, scalar_op):\n return OP(scalar_op)\n\n def local_fuse(node):\n \"\"\"\n As part of specialization, we fuse two consecutive elemwise Ops of the\n same shape.\n\n For mixed dtype, we let the Composite op do the cast. It lets the C\n compiler do the cast.\n The number of dimensions is validated at call time by theano itself.\n\n \"\"\"\n # META TODO: PUT THESE THINGS IN TRAC, NOT TODO NOTES!!\n # TODO: use broadcast flag?\n\n # TODO: don't do this optimization as a localOptimizer.\n # Analyze the graph in terms of elemwise subgraphs, and then\n # replace each subgraph with a Composite version.\n\n # TODO: use malloc and copy to transfer arguments that don't\n # fit within the parameter space of 256 bytes\n #\n # TODO: Merge with multiple output to merge when an inputs\n # have multiple clients. This can't be done with a local\n # optimiser.\n\n # TODO: Related: Support composites with multiple outputs\n\n # TODO: Use Composite to combine Elemwise and Reduce\n # operations. We have to loop over the data anyway... might\n # as well sum it up while we're at it (this can be trickier\n # than i'm making it seound here. The data-traversal should be\n # done contiguously, and the summing-up might not be easy or\n # worthwhile if the summation axis doesn't line up with a\n # contiguous dimension)\n\n if type(node.op) is not OP:\n return False\n\n if len(node.outputs) > 1:\n # We don't support the fusion for node with multiple outputs.\n return\n inputs = [] # inputs of the new Elemwise op.\n s_inputs = [] # inputs of the new scalar op used by the Composite.\n # Inputs of the new scalar op that represents the current node.\n s_g = []\n\n # There is a hard limit of 256 bytes for the formal argument list to a\n # GPU kernel function.\n max_nb_input = max_input_fct(node)\n # The number of inputs to the new fused op if we do not fuse more\n # inputs.\n new_nb_input = len(node.inputs)\n # Did we fuse something?\n # Needed as we can fuse unary op that don't change the number of\n # inputs.\n # And there is a case where the inputs are the same as the current\n # node. That won't change the number of inputs of the new op.\n fused = False\n\n for i in node.inputs:\n do_fusion = False\n catch = False\n # Will store inputs of the fused node that are not currently inputs\n # of the node we want to create (to avoid duplicating inputs).\n tmp_input = []\n # Same as tmp_input, but for scalars.\n tmp_scalar = []\n\n # We should not check the number of inputs here\n # As fusing op don't always change the number of input.\n # If a variable is used as multiple into to the same node,\n # we still want to fusion. So we take the set.\n if (i.owner and\n isinstance(i.owner.op, OP) and\n len(set([n for n, idx in i.clients])) == 1 and\n # Do not merge elemwise that don't have the same\n # broadcastable pattern to don't redo duplicate\n # computation due to broadcast.\n i.owner.outputs[0].broadcastable ==\n node.outputs[0].broadcastable):\n do_fusion = True\n try:\n tmp_s_input = []\n # we should not put duplicate input into s_inputs and inputs\n for ii in i.owner.inputs:\n if ii in inputs:\n tmp_s_input.append(s_inputs[inputs.index(ii)])\n elif ii in tmp_input:\n tmp_s_input.append(tmp_scalar[tmp_input.index(ii)])\n else:\n tmp = scalar.get_scalar_type(ii.dtype).make_variable()\n try:\n tv = gof.op.get_test_value(ii)\n if tv.size > 0:\n tmp.tag.test_value = tv.flatten()[0]\n else:\n tmp.tag.test_value = tv\n except AttributeError:\n pass\n tmp_s_input.append(tmp)\n tmp_input.append(ii)\n tmp_scalar.append(tmp_s_input[-1])\n s_op = i.owner.op.scalar_op(*tmp_s_input,\n return_list=True)\n\n # if the scalar_op don't have a c implementation,\n # we skip its fusion to allow the fusion of the\n # other ops.\n i.owner.op.scalar_op.c_code(s_op[0].owner,\n \"test_presence_of_c_code\",\n [\"x\" for x in i.owner.inputs],\n [\"z\" for z in i.owner.outputs],\n {\"fail\": \"%(fail)s\"})\n except MethodNotDefined:\n catch = True\n except NotImplementedError:\n catch = True\n if catch:\n _logger.info((\"%s does not implement the c_code function.\"\n \" As well as being potentially slow, this\"\n \" disables loop fusion of this op.\") %\n str(i.owner.op.scalar_op))\n do_fusion = False\n\n # Compute the number of inputs in case we fuse this input.\n # We subtract 1 because we replace the existing input with the new\n # inputs from `tmp_input`.\n new_nb_input_ = new_nb_input + len(tmp_input) - 1\n\n # If the new input is already an input of the current node, it was\n # already counted when `new_nb_input` was initialized to\n # len(node.inputs).\n # This can happen when a variable is used both by the Elemwise to\n # fuse and the current node.\n for x in tmp_input:\n if x in node.inputs:\n new_nb_input_ -= 1\n\n if do_fusion and (new_nb_input_ <= max_nb_input):\n fused = True\n new_nb_input = new_nb_input_\n inputs.extend(tmp_input)\n s_inputs.extend(tmp_scalar)\n s_g.extend(s_op)\n else:\n # We must support the case where the same variable appear many\n # time in the inputs\n if inputs.count(i) == node.inputs.count(i):\n s = s_inputs[inputs.index(i)]\n else:\n s = scalar.get_scalar_type(i.dtype).make_variable()\n try:\n if theano.config.compute_test_value != 'off':\n v = gof.op.get_test_value(i)\n if v.size > 0:\n s.tag.test_value = v.flatten()[0]\n except AttributeError:\n pass\n\n inputs.append(i)\n s_inputs.append(s)\n s_g.append(s)\n\n if not fused:\n return False\n\n if new_nb_input != len(inputs) or len(s_inputs) != len(inputs):\n raise Exception(\"\"\"Something has gone wrong with the elemwise\nfusion optimization. We skip this optimization. You can ignore this message,\nyour code will run correctly, but may be slower.\"\"\")\n\n s_new_out = node.op.scalar_op(*s_g, return_list=True)\n try:\n s_new_out[0].owner.op.c_code(s_new_out[0].owner,\n \"test_presence_of_c_code\",\n [\"x\" for x in s_g],\n [\"z\" for x in s_new_out],\n {\"fail\": \"%(fail)s\"})\n except MethodNotDefined:\n _logger.info((\"%s does not implement the c_code function.\"\n \" As well as being potentially slow, this disables \"\n \"loop fusion of this op.\") % str(\n s_new_out[0].owner.op))\n return False\n except NotImplementedError:\n _logger.info((\"%s does not implement the c_code function. As well\"\n \" as being potentially slow, this disables loop\"\n \" fusion of this op.\") % str(s_new_out[0].owner.op))\n return False\n\n # create the composite op.\n C = scalar.Composite(s_inputs, s_new_out)\n\n # create the new node.\n # Do not call make_node to have test_value\n n = maker(node, C)(*inputs).owner\n assert len(n.outputs) == 1\n assert node.outputs[0].dtype == n.outputs[0].dtype\n\n if len(n.inputs) > max_nb_input:\n _logger.info('loop fusion failed because Op would exceed'\n ' kernel argument limit.')\n return False\n\n # we fuse as many that we can at the same time to make debug mode faster\n # debug mode will be faster as it won't test all intermediate step.\n while True:\n ret = local_fuse(n)\n if ret is not False and ret is not None:\n # print n,ret\n assert len(ret) == len(n.outputs)\n assert len(ret) == 1\n n = ret[0].owner\n else:\n break\n\n return n.outputs\n return local_fuse\n\n\ndef elemwise_max_input_fct(node):\n # The Elemwise.perform use numpy ufunc and they are limited to 31\n # inputs.\n if not theano.config.cxx:\n return 31\n return 1024\n\n\nlocal_elemwise_fusion = local_elemwise_fusion_op(T.Elemwise,\n elemwise_max_input_fct)\n\n\nclass FusionOptimizer(Optimizer):\n \"\"\"Graph optimizer for Fusion of elemwise operations.\"\"\"\n def __init__(self, local_optimizer):\n Optimizer.__init__(self)\n self.optimizer = local_optimizer\n\n def add_requirements(self, fgraph):\n fgraph.attach_feature(toolbox.ReplaceValidate())\n\n def apply(self, fgraph):\n did_something = True\n nb_iter = 0\n nb_replacement = 0\n nb_inconsistency_replace = 0\n time_toposort = 0\n if fgraph.profile:\n validate_before = fgraph.profile.validate_time\n callbacks_before = fgraph.execute_callbacks_times.copy()\n callback_before = fgraph.execute_callbacks_time\n while did_something:\n t0 = time.time()\n nodelist = list(fgraph.toposort())\n time_toposort += time.time() - t0\n nodelist.reverse()\n did_something = False\n for node in nodelist:\n # Don't try to fuse node that have already been fused.\n if node in fgraph.apply_nodes:\n new_outputs = self.optimizer(node)\n if new_outputs:\n assert len(new_outputs) == len(node.outputs)\n try:\n fgraph.replace_all_validate(\n list(zip(node.outputs, new_outputs)),\n reason=self.__class__.__name__)\n did_something = True\n nb_replacement += 1\n except InconsistencyError:\n nb_inconsistency_replace += 1\n pass\n nb_iter += 1\n\n if fgraph.profile:\n validate_time = fgraph.profile.validate_time - validate_before\n callback_time = fgraph.execute_callbacks_time - callback_before\n callbacks_time = {}\n for k, v in iteritems(fgraph.execute_callbacks_times):\n if k in callbacks_before:\n callbacks_time[k] = v - callbacks_before[k]\n else:\n callbacks_time[k] = v\n else:\n validate_time = None\n callback_time = None\n callbacks_time = {}\n return (self, nb_iter, nb_replacement,\n nb_inconsistency_replace,\n validate_time, callback_time, callbacks_time,\n time_toposort)\n\n @staticmethod\n def print_profile(stream, prof, level=0):\n blanc = (' ' * level)\n print(blanc, \"FusionOptimizer\", file=stream)\n print(blanc, \" nb_iter\", prof[1], file=stream)\n print(blanc, \" nb_replacement\", prof[2], file=stream)\n print(blanc, \" nb_inconsistency_replace\", prof[3], file=stream)\n print(blanc, \" validate_time\", prof[4], file=stream)\n print(blanc, \" callback_time\", prof[5], file=stream)\n if prof[5] > 1:\n print(blanc, \" callbacks_time\", file=stream)\n for i in sorted(iteritems(prof[6]), key=lambda a: a[1])[::-1]:\n if i[1] > 0:\n print(blanc, \" \", i)\n print(blanc, \" time_toposort\", prof[7], file=stream)\n\n\ndef local_add_mul_fusion(node):\n \"\"\"Fuse consecutive add or mul in one such node with more inputs.\n\n It is better to fuse add/mul that way then in a Composite node as\n this make the inner graph of the Composite smaller. This allow to\n put more computation in a Composite before hitting the max\n recusion limit when pickling Composite.\n\n \"\"\"\n if (not isinstance(node.op, Elemwise) or\n not isinstance(node.op.scalar_op, (scalar.Add, scalar.Mul))):\n return False\n\n s_op = node.op.scalar_op.__class__\n new_inp = []\n fused = False\n nb_inputs = len(node.inputs)\n max_inputs = float('inf')\n if hasattr(node.op, 'max_inputs'):\n max_inputs = node.op.max_inputs(node)\n for inp in node.inputs:\n if (inp.owner and\n isinstance(inp.owner.op, Elemwise) and\n isinstance(inp.owner.op.scalar_op, s_op) and\n # Do not duplicate the operation.\n len(inp.clients) == 1 and\n (nb_inputs + len(inp.owner.inputs) - 1) <= max_inputs):\n new_inp.extend(inp.owner.inputs)\n fused = True\n else:\n new_inp.append(inp)\n\n # We can not compare the number of inputs as Mul and Add could have\n # 0 or 1 inputs in some corner cases.\n if fused:\n output = node.op(*new_inp)\n copy_stack_trace(node.outputs[0], output)\n\n # Do the recursion here to help lower the number of\n # FusionOptimizer iteration.\n if output.owner:\n output2 = local_add_mul_fusion(output.owner)\n if output2:\n return output2\n return [output]\n\nif config.tensor.local_elemwise_fusion:\n _logger.debug(\"enabling optimization fusion elemwise in fast_run\")\n # Must be after gpu(48.5) and before AddDestroyHandler(49.5)\n fuse_seqopt = gof.SequenceDB()\n fuse_seqopt.register('local_add_mul_fusion',\n FusionOptimizer(local_add_mul_fusion),\n 0, 'fast_run', 'fusion')\n fuse_seqopt.register('composite_elemwise_fusion',\n FusionOptimizer(local_elemwise_fusion),\n 1, 'fast_run', 'fusion')\n compile.optdb.register('elemwise_fusion',\n fuse_seqopt, 49,\n 'fast_run', 'fusion', 'local_elemwise_fusion',\n 'FusionOptimizer')\nelse:\n _logger.debug(\"not enabling optimization fusion elemwise in fast_run\")\n compile.optdb.register('elemwise_fusion',\n FusionOptimizer(local_elemwise_fusion), 49,\n 'fusion', 'local_elemwise_fusion',\n 'FusionOptimizer')\n\n\n@register_canonicalize\[email protected]_optimizer([Elemwise])\ndef local_useless_composite(node):\n \"\"\"For elemwise Composite that have multiple outputs, remove the\n outputs that are not used.\n\n \"\"\"\n if (not isinstance(node.op, Elemwise) or\n not isinstance(node.op.scalar_op, scalar.Composite)):\n return\n comp = node.op.scalar_op\n idx = [i for i, o_extern in enumerate(node.outputs)\n if o_extern.clients]\n if len(idx) < len(node.outputs):\n new_outputs = [comp.outputs[i] for i in idx]\n c = scalar.Composite(inputs=comp.inputs,\n outputs=new_outputs)\n e = Elemwise(scalar_op=c)(*node.inputs, return_list=True)\n return dict(zip([node.outputs[i] for i in idx], e))\n\n# ############################\n# # Remove consider_constant #\n# ############################\n\n\n# Although the ops ConsiderConstant, ZeroGrad and DisconnectedGrad\n# just returns the input, it should be removed from the graph to\n@register_canonicalize('fast_compile')\n@register_useless('fast_compile')\[email protected]_optimizer(None)\ndef local_view_op(node):\n if isinstance(node.op, theano.compile.ops.ViewOp):\n return node.inputs\n\n\n@register_useless\n@register_canonicalize\n@register_stabilize\n@register_specialize\[email protected]_optimizer([T.Alloc])\ndef local_merge_alloc(node):\n # This opt takes care of several cases:\n # Alloc(Alloc(m, x, 1, 1, 1), x, y, z, w) -> Alloc(m, x, y, z, w)\n # Alloc(Alloc(m, y, 1, 1), x, y, z, w) -> Alloc(m, x, y, z, w)\n # Alloc(Alloc(m, y1, 1, 1), x, y2, z, w) -> Alloc(m, x, assert(y1, y1==y2), z, w)\n if not isinstance(node.op, T.Alloc):\n return False\n if not node.inputs[0].owner or not isinstance(\n node.inputs[0].owner.op, T.Alloc):\n return False\n inputs_outer = node.inputs\n inputs_inner = node.inputs[0].owner.inputs\n dims_outer = inputs_outer[1:]\n dims_inner = inputs_inner[1:]\n dims_outer_rev = dims_outer[::-1]\n dims_inner_rev = dims_inner[::-1]\n # check if the pattern of broadcasting is matched, in the reversed ordering.\n # The reverse ordering is needed when an Alloc add an implicit new\n # broadcasted dimensions to its inputs[0]. Eg:\n # Alloc(Alloc(m, y, 1, 1), x, y, z, w) -> Alloc(m, x, y, z, w)\n i = 0\n for dim_inner, dim_outer in zip(dims_inner_rev, dims_outer_rev):\n if dim_inner != dim_outer:\n if isinstance(dim_inner, Constant) and dim_inner.data == 1:\n pass\n else:\n dims_outer[-1 - i] = Assert(\n \"You have a shape error in your graph. To see a better\"\n \" error message and a stack trace of where in your code\"\n \" the error is created, use the Theano flags\"\n \" optimizer=None or optimizer=fast_compile.\")(\n dim_outer, T.eq(dim_outer, dim_inner))\n i += 1\n return [T.alloc(inputs_inner[0], *dims_outer)]\n\n\n@register_useless('fast_compile')\[email protected]_optimizer([TopKOp])\ndef local_useless_topk(node):\n \"\"\"\n TopKOp generates two outputs by default\n This opt removes the useless ones\n\n \"\"\"\n op = node.op\n if not isinstance(op, TopKOp):\n return\n if not (op.return_values and op.return_indices):\n return False\n\n x, k = node.inputs\n ret_val = bool(node.outputs[0].clients)\n ret_idx = bool(node.outputs[1].clients)\n\n if not (ret_val ^ ret_idx):\n # both true -> nothing to remove\n # both false -> let pruner handle\n return False\n\n old_output = node.outputs[ret_idx]\n new_output = TopKOp(\n axis=op.axis,\n idx_dtype=op.idx_dtype,\n return_values=ret_val,\n return_indices=ret_idx)(x, k)\n return {old_output: new_output}\n"
] | [
[
"numpy.int8",
"numpy.array",
"numpy.asarray",
"numpy.zeros",
"numpy.sum",
"numpy.any",
"numpy.arange",
"numpy.all",
"numpy.log2",
"numpy.dtype"
]
] |
flymin/auto-attack | [
"4e8122dc436f06b067052bc836f77e2c8aeb45a9"
] | [
"autoattack/fab_base.py"
] | [
"# Copyright (c) 2019-present, Francesco Croce\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport time\n\nimport torch\nfrom torch.autograd.gradcheck import zero_gradients\n\nfrom .fab_projections import projection_linf, projection_l2,\\\n projection_l1\n\nDEFAULT_EPS_DICT_BY_NORM = {'Linf': .3, 'L2': 1., 'L1': 5.0}\n\n\nclass FABAttack():\n \"\"\"\n Fast Adaptive Boundary Attack (Linf, L2, L1)\n https://arxiv.org/abs/1907.02044\n \n :param norm: Lp-norm to minimize ('Linf', 'L2', 'L1' supported)\n :param n_restarts: number of random restarts\n :param n_iter: number of iterations\n :param eps: epsilon for the random restarts\n :param alpha_max: alpha_max\n :param eta: overshooting\n :param beta: backward step\n \"\"\"\n\n def __init__(\n self,\n norm='Linf',\n n_restarts=1,\n n_iter=100,\n eps=None,\n alpha_max=0.1,\n eta=1.05,\n beta=0.9,\n loss_fn=None,\n verbose=False,\n seed=0,\n targeted=False,\n device=None,\n n_target_classes=9):\n \"\"\" FAB-attack implementation in pytorch \"\"\"\n\n self.norm = norm\n self.n_restarts = n_restarts\n self.n_iter = n_iter\n self.eps = eps if eps is not None else DEFAULT_EPS_DICT_BY_NORM[norm]\n self.alpha_max = alpha_max\n self.eta = eta\n self.beta = beta\n self.targeted = False\n self.verbose = verbose\n self.seed = seed\n self.target_class = None\n self.device = device\n self.n_target_classes = n_target_classes\n\n def check_shape(self, x):\n return x if len(x.shape) > 0 else x.unsqueeze(0)\n\n def _predict_fn(self, x):\n raise NotImplementedError(\"Virtual function.\")\n\n def _get_predicted_label(self, x):\n raise NotImplementedError(\"Virtual function.\")\n\n def get_diff_logits_grads_batch(self, imgs, la):\n raise NotImplementedError(\"Virtual function.\")\n\n def get_diff_logits_grads_batch_targeted(self, imgs, la, la_target):\n raise NotImplementedError(\"Virtual function.\")\n\n def attack_single_run(self, x, y=None, use_rand_start=False, is_targeted=False):\n \"\"\"\n :param x: clean images\n :param y: clean labels, if None we use the predicted labels\n :param is_targeted True if we ise targeted version. Targeted class is assigned by `self.target_class`\n \"\"\"\n\n if self.device is None:\n self.device = x.device\n self.orig_dim = list(x.shape[1:])\n self.ndims = len(self.orig_dim)\n\n x = x.detach().clone().float().to(self.device)\n #assert next(self.predict.parameters()).device == x.device\n\n y_pred = self._get_predicted_label(x)\n if y is None:\n y = y_pred.detach().clone().long().to(self.device)\n else:\n y = y.detach().clone().long().to(self.device)\n pred = y_pred == y\n corr_classified = pred.float().sum()\n if self.verbose:\n print('Clean accuracy: {:.2%}'.format(pred.float().mean()))\n if pred.sum() == 0:\n return x\n pred = self.check_shape(pred.nonzero(as_tuple=False).squeeze())\n\n if is_targeted:\n output = self._predict_fn(x)\n la_target = output.sort(dim=-1)[1][:, -self.target_class]\n la_target2 = la_target[pred].detach().clone()\n\n startt = time.time()\n # runs the attack only on correctly classified points\n im2 = x[pred].detach().clone()\n la2 = y[pred].detach().clone()\n if len(im2.shape) == self.ndims:\n im2 = im2.unsqueeze(0)\n bs = im2.shape[0]\n u1 = torch.arange(bs)\n adv = im2.clone()\n adv_c = x.clone()\n res2 = 1e10 * torch.ones([bs]).to(self.device)\n res_c = torch.zeros([x.shape[0]]).to(self.device)\n x1 = im2.clone()\n x0 = im2.clone().reshape([bs, -1])\n counter_restarts = 0\n\n while counter_restarts < 1:\n if use_rand_start:\n if self.norm == 'Linf':\n t = 2 * torch.rand(x1.shape).to(self.device) - 1\n x1 = im2 + (torch.min(res2,\n self.eps * torch.ones(res2.shape)\n .to(self.device)\n ).reshape([-1, *[1]*self.ndims])\n ) * t / (t.reshape([t.shape[0], -1]).abs()\n .max(dim=1, keepdim=True)[0]\n .reshape([-1, *[1]*self.ndims])) * .5\n elif self.norm == 'L2':\n t = torch.randn(x1.shape).to(self.device)\n x1 = im2 + (torch.min(res2,\n self.eps * torch.ones(res2.shape)\n .to(self.device)\n ).reshape([-1, *[1]*self.ndims])\n ) * t / ((t ** 2)\n .view(t.shape[0], -1)\n .sum(dim=-1)\n .sqrt()\n .view(t.shape[0], *[1]*self.ndims)) * .5\n elif self.norm == 'L1':\n t = torch.randn(x1.shape).to(self.device)\n x1 = im2 + (torch.min(res2,\n self.eps * torch.ones(res2.shape)\n .to(self.device)\n ).reshape([-1, *[1]*self.ndims])\n ) * t / (t.abs().view(t.shape[0], -1)\n .sum(dim=-1)\n .view(t.shape[0], *[1]*self.ndims)) / 2\n\n x1 = x1.clamp(0.0, 1.0)\n\n counter_iter = 0\n while counter_iter < self.n_iter:\n with torch.no_grad():\n if is_targeted:\n df, dg = self.get_diff_logits_grads_batch_targeted(x1, la2, la_target2)\n else:\n df, dg = self.get_diff_logits_grads_batch(x1, la2)\n if self.norm == 'Linf':\n dist1 = df.abs() / (1e-12 +\n dg.abs()\n .reshape(dg.shape[0], dg.shape[1], -1)\n .sum(dim=-1))\n elif self.norm == 'L2':\n dist1 = df.abs() / (1e-12 + (dg ** 2)\n .reshape(dg.shape[0], dg.shape[1], -1)\n .sum(dim=-1).sqrt())\n elif self.norm == 'L1':\n dist1 = df.abs() / (1e-12 + dg.abs().reshape(\n [df.shape[0], df.shape[1], -1]).max(dim=2)[0])\n else:\n raise ValueError('norm not supported')\n ind = dist1.min(dim=1)[1]\n dg2 = dg[u1, ind]\n b = (- df[u1, ind] + (dg2 * x1).reshape(x1.shape[0], -1)\n .sum(dim=-1))\n w = dg2.reshape([bs, -1])\n\n if self.norm == 'Linf':\n d3 = projection_linf(\n torch.cat((x1.reshape([bs, -1]), x0), 0),\n torch.cat((w, w), 0),\n torch.cat((b, b), 0))\n elif self.norm == 'L2':\n d3 = projection_l2(\n torch.cat((x1.reshape([bs, -1]), x0), 0),\n torch.cat((w, w), 0),\n torch.cat((b, b), 0))\n elif self.norm == 'L1':\n d3 = projection_l1(\n torch.cat((x1.reshape([bs, -1]), x0), 0),\n torch.cat((w, w), 0),\n torch.cat((b, b), 0))\n d1 = torch.reshape(d3[:bs], x1.shape)\n d2 = torch.reshape(d3[-bs:], x1.shape)\n if self.norm == 'Linf':\n a0 = d3.abs().max(dim=1, keepdim=True)[0]\\\n .view(-1, *[1]*self.ndims)\n elif self.norm == 'L2':\n a0 = (d3 ** 2).sum(dim=1, keepdim=True).sqrt()\\\n .view(-1, *[1]*self.ndims)\n elif self.norm == 'L1':\n a0 = d3.abs().sum(dim=1, keepdim=True)\\\n .view(-1, *[1]*self.ndims)\n a0 = torch.max(a0, 1e-8 * torch.ones(\n a0.shape).to(self.device))\n a1 = a0[:bs]\n a2 = a0[-bs:]\n alpha = torch.min(torch.max(a1 / (a1 + a2),\n torch.zeros(a1.shape)\n .to(self.device)),\n self.alpha_max * torch.ones(a1.shape)\n .to(self.device))\n x1 = ((x1 + self.eta * d1) * (1 - alpha) +\n (im2 + d2 * self.eta) * alpha).clamp(0.0, 1.0)\n\n is_adv = self._get_predicted_label(x1) != la2\n\n if is_adv.sum() > 0:\n ind_adv = is_adv.nonzero(as_tuple=False).squeeze()\n ind_adv = self.check_shape(ind_adv)\n if self.norm == 'Linf':\n t = (x1[ind_adv] - im2[ind_adv]).reshape(\n [ind_adv.shape[0], -1]).abs().max(dim=1)[0]\n elif self.norm == 'L2':\n t = ((x1[ind_adv] - im2[ind_adv]) ** 2)\\\n .reshape(ind_adv.shape[0], -1).sum(dim=-1).sqrt()\n elif self.norm == 'L1':\n t = (x1[ind_adv] - im2[ind_adv])\\\n .abs().reshape(ind_adv.shape[0], -1).sum(dim=-1)\n adv[ind_adv] = x1[ind_adv] * (t < res2[ind_adv]).\\\n float().reshape([-1, *[1]*self.ndims]) + adv[ind_adv]\\\n * (t >= res2[ind_adv]).float().reshape(\n [-1, *[1]*self.ndims])\n res2[ind_adv] = t * (t < res2[ind_adv]).float()\\\n + res2[ind_adv] * (t >= res2[ind_adv]).float()\n x1[ind_adv] = im2[ind_adv] + (\n x1[ind_adv] - im2[ind_adv]) * self.beta\n\n counter_iter += 1\n\n counter_restarts += 1\n\n ind_succ = res2 < 1e10\n if self.verbose:\n print('success rate: {:.0f}/{:.0f}'\n .format(ind_succ.float().sum(), corr_classified) +\n ' (on correctly classified points) in {:.1f} s'\n .format(time.time() - startt))\n\n res_c[pred] = res2 * ind_succ.float() + 1e10 * (1 - ind_succ.float())\n ind_succ = self.check_shape(ind_succ.nonzero(as_tuple=False).squeeze())\n adv_c[pred[ind_succ]] = adv[ind_succ].clone()\n\n return adv_c\n\n def perturb(self, x, y):\n if self.device is None:\n self.device = x.device\n adv = x.clone()\n with torch.no_grad():\n acc = self._predict_fn(x).max(1)[1] == y\n\n startt = time.time()\n\n torch.random.manual_seed(self.seed)\n torch.cuda.random.manual_seed(self.seed)\n\n if not self.targeted:\n for counter in range(self.n_restarts):\n ind_to_fool = acc.nonzero(as_tuple=False).squeeze()\n if len(ind_to_fool.shape) == 0: ind_to_fool = ind_to_fool.unsqueeze(0)\n if ind_to_fool.numel() != 0:\n x_to_fool, y_to_fool = x[ind_to_fool].clone(), y[ind_to_fool].clone()\n adv_curr = self.attack_single_run(x_to_fool, y_to_fool, use_rand_start=(counter > 0), is_targeted=False)\n\n acc_curr = self._predict_fn(adv_curr).max(1)[1] == y_to_fool\n if self.norm == 'Linf':\n res = (x_to_fool - adv_curr).abs().reshape(x_to_fool.shape[0], -1).max(1)[0]\n elif self.norm == 'L2':\n res = ((x_to_fool - adv_curr) ** 2).reshape(x_to_fool.shape[0], -1).sum(dim=-1).sqrt()\n elif self.norm == 'L1':\n res = (x_to_fool - adv_curr).abs().reshape(x_to_fool.shape[0], -1).sum(-1)\n acc_curr = torch.max(acc_curr, res > self.eps)\n\n ind_curr = (acc_curr == 0).nonzero(as_tuple=False).squeeze()\n acc[ind_to_fool[ind_curr]] = 0\n adv[ind_to_fool[ind_curr]] = adv_curr[ind_curr].clone()\n\n if self.verbose:\n print('restart {} - robust accuracy: {:.2%} at eps = {:.5f} - cum. time: {:.1f} s'.format(\n counter, acc.float().mean(), self.eps, time.time() - startt))\n\n else:\n for target_class in range(2, self.n_target_classes + 2):\n self.target_class = target_class\n for counter in range(self.n_restarts):\n ind_to_fool = acc.nonzero(as_tuple=False).squeeze()\n if len(ind_to_fool.shape) == 0: ind_to_fool = ind_to_fool.unsqueeze(0)\n if ind_to_fool.numel() != 0:\n x_to_fool, y_to_fool = x[ind_to_fool].clone(), y[ind_to_fool].clone()\n adv_curr = self.attack_single_run(x_to_fool, y_to_fool, use_rand_start=(counter > 0), is_targeted=True)\n\n acc_curr = self._predict_fn(adv_curr).max(1)[1] == y_to_fool\n if self.norm == 'Linf':\n res = (x_to_fool - adv_curr).abs().reshape(x_to_fool.shape[0], -1).max(1)[0]\n elif self.norm == 'L2':\n res = ((x_to_fool - adv_curr) ** 2).reshape(x_to_fool.shape[0], -1).sum(dim=-1).sqrt()\n elif self.norm == 'L1':\n res = (x_to_fool - adv_curr).abs().reshape(x_to_fool.shape[0], -1).sum(-1)\n acc_curr = torch.max(acc_curr, res > self.eps)\n\n ind_curr = (acc_curr == 0).nonzero(as_tuple=False).squeeze()\n acc[ind_to_fool[ind_curr]] = 0\n adv[ind_to_fool[ind_curr]] = adv_curr[ind_curr].clone()\n\n if self.verbose:\n print('restart {} - target_class {} - robust accuracy: {:.2%} at eps = {:.5f} - cum. time: {:.1f} s'.format(\n counter, self.target_class, acc.float().mean(), self.eps, time.time() - startt))\n\n return adv\n"
] | [
[
"torch.zeros",
"torch.rand",
"torch.cat",
"torch.arange",
"torch.cuda.random.manual_seed",
"torch.max",
"torch.no_grad",
"torch.ones",
"torch.random.manual_seed",
"torch.randn",
"torch.reshape"
]
] |
sudogauss/twasc | [
"527ddd8bb1fe76bb0aa8c0837cdff0acc1ddfc32"
] | [
"main.py"
] | [
"import sys\nimport getopt\nimport tensorflow as tf\nfrom model.model import Model\nfrom model.model_const import *\nimport numpy as np\n\nif __name__ == \"__main__\":\n\n arg_list = sys.argv[1:]\n\n short_opts = \"h\"\n long_opts = [\"help\", \"twitter_pattern=\", \"weights=\"]\n\n try:\n args, vals = getopt.getopt(arg_list, short_opts, long_opts)\n except getopt.error as err:\n print(\"Use -h or --help for help\")\n print(str(err))\n\n twitter_pattern = None\n weights_path = None\n\n for arg, val in args:\n if arg in (\"-h\", \"--help\"):\n print(\"=====Help=====\")\n print(\"Use -h or --help for help\")\n print(\"Use --twitter_pattern=<pattern> to use a twitter pattern. You must use the appropriate weights\")\n print(\"Use --weights=<weights_path> to use weights. This option is required\")\n print(\"==========\")\n sys.exit(0)\n elif arg in (\"--twitter_pattern\"):\n twitter_pattern = val\n elif arg in (\"--weights\"):\n weights_path = val\n\n if weights_path is None:\n print(\"weights are required\")\n sys.exit(1)\n\n model = Model()\n model.load_weights(weights_path)\n model.compile()\n\n if twitter_pattern is None:\n file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')\n text = open(file, 'rb').read().decode(encoding='utf-8').lower()\n text_len = 300000\n text = text[:text_len]\n dataX = []\n dataY = []\n for i in range(0, text_len - SEQ_LENGTH, 1):\n seq_in = text[i:i + SEQ_LENGTH]\n seq_out = text[i + SEQ_LENGTH]\n dataX.append([CHAR_TO_INT[char] for char in seq_in])\n dataY.append(CHAR_TO_INT[seq_out]) \n print(model.out(dataX[np.random.randint(0, len(dataX) - 1)], 400))\n else:\n print.model.out(twitter_pattern, 100)"
] | [
[
"tensorflow.keras.utils.get_file"
]
] |
adelezaini/MachineLearning | [
"dc3f34f5d509bed6a993705373c46be4da3f97db"
] | [
"Projects/Project1/task45b.py"
] | [
"# The MIT License (MIT)\n#\n# Copyright © 2021 Fridtjof Gjengset, Adele Zaini, Gaute Holen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the “Software”), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,\n# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of\n# the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT\n# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF\n# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\nfrom numpy.core.function_base import logspace\nfrom regan import *\nimport numpy as np\n\nsavefigures = True\n\nnp.random.seed(1234)\n\n# Datapoints (squared root of datapoints -> meshgrid)\nn = 30\n# Paramaters of noise distribution\nmu_N = 0; sigma_N = 0.2\n# Max degree of complexity\nmaxdegree = 20\n\n# Create vanilla dataset:\nx,y,z = create_xyz_dataset(n,mu_N, sigma_N)\n\nlambdas = [10**x for x in [-12, -6, -3, 0, 3]]\n\nrun_plot_compare(z, 100, N=n, k=5,poly_degree = 18,plot=True, saveplots=savefigures, foldername = 'Task5')\n\ncompare_lmd_BS(z, n, lambdas, maxdegree, solver = 'RIDGE', n_resampling = 100, saveplots = savefigures, foldername = 'Task4')\ncompare_lmd_CV(z, n, 5, lambdas, maxdegree, solver = 'RIDGE', saveplots = savefigures, foldername = 'Task4')\ncompare_lmd_CV(z, n, 10, lambdas, maxdegree, solver = 'RIDGE', saveplots = savefigures, foldername = 'Task4')\n\ncompare_lmd_BS(z, n, lambdas, maxdegree, solver = 'LASSO', n_resampling = 100, saveplots = savefigures, foldername = 'Task5')\ncompare_lmd_CV(z, n, 5, lambdas, maxdegree, solver = 'LASSO', saveplots = savefigures, foldername = 'Task5')\ncompare_lmd_CV(z, n, 10, lambdas, maxdegree, solver = 'LASSO', saveplots = savefigures, foldername = 'Task5')\n\n"
] | [
[
"numpy.random.seed"
]
] |
lambdanauts/vilbert-multi-task | [
"12265c3fcd43b24c33544578ae1911fdc52305c8"
] | [
"vilbert/datasets/concept_cap_dataset.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport copy\nimport json\nimport logging\nimport math\nimport os\nimport random\n\nimport lmdb\nimport numpy as np\nimport tensorpack.dataflow as td\n\nimport torch\nfrom torch.utils.data import Dataset\nfrom torch.utils.data.sampler import Sampler\nimport torch.distributed as dist\nimport sys\nimport pdb\n\nimport msgpack\nimport msgpack_numpy\n\nmsgpack_numpy.patch()\n\nMAX_MSGPACK_LEN = 1000000000\n\nlogging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\n\n\ndef iou(anchors, gt_boxes):\n \"\"\"\n anchors: (N, 4) ndarray of float\n gt_boxes: (K, 4) ndarray of float\n overlaps: (N, K) ndarray of overlap between boxes and query_boxes\n \"\"\"\n N = anchors.shape[0]\n K = gt_boxes.shape[0]\n\n gt_boxes_area = (\n (gt_boxes[:, 2] - gt_boxes[:, 0] + 1) * (gt_boxes[:, 3] - gt_boxes[:, 1] + 1)\n ).reshape(1, K)\n\n anchors_area = (\n (anchors[:, 2] - anchors[:, 0] + 1) * (anchors[:, 3] - anchors[:, 1] + 1)\n ).reshape(N, 1)\n\n boxes = np.repeat(anchors.reshape(N, 1, 4), K, axis=1)\n query_boxes = np.repeat(gt_boxes.reshape(1, K, 4), N, axis=0)\n\n iw = (\n np.minimum(boxes[:, :, 2], query_boxes[:, :, 2])\n - np.maximum(boxes[:, :, 0], query_boxes[:, :, 0])\n + 1\n )\n iw[iw < 0] = 0\n\n ih = (\n np.minimum(boxes[:, :, 3], query_boxes[:, :, 3])\n - np.maximum(boxes[:, :, 1], query_boxes[:, :, 1])\n + 1\n )\n ih[ih < 0] = 0\n\n ua = anchors_area + gt_boxes_area - (iw * ih)\n overlaps = iw * ih / ua\n\n return overlaps\n\n\ndef deserialize_lmdb(ds):\n return msgpack.loads(\n ds[1],\n raw=False,\n max_bin_len=MAX_MSGPACK_LEN,\n max_array_len=MAX_MSGPACK_LEN,\n max_map_len=MAX_MSGPACK_LEN,\n max_str_len=MAX_MSGPACK_LEN,\n )\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for the language model.\"\"\"\n\n def __init__(\n self,\n image_feat=None,\n image_target=None,\n caption=None,\n is_next=None,\n lm_labels=None,\n image_loc=None,\n num_boxes=None,\n overlaps=None,\n ):\n \"\"\"Constructs a InputExample.\n Args:\n guid: Unique id for the example.\n tokens_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n tokens_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.image_feat = image_feat\n self.caption = caption\n self.is_next = is_next # nextSentence\n self.lm_labels = lm_labels # masked words for language model\n self.image_loc = image_loc\n self.image_target = image_target\n self.num_boxes = num_boxes\n self.overlaps = overlaps\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(\n self,\n input_ids=None,\n input_mask=None,\n segment_ids=None,\n is_next=None,\n lm_label_ids=None,\n image_feat=None,\n image_target=None,\n image_loc=None,\n image_label=None,\n image_mask=None,\n masked_label=None,\n ):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.is_next = is_next\n self.lm_label_ids = lm_label_ids\n self.image_feat = image_feat\n self.image_loc = image_loc\n self.image_label = image_label\n self.image_target = image_target\n self.image_mask = image_mask\n self.masked_label = masked_label\n\n\nclass ConceptCapLoaderTrain(object):\n \"\"\"\n Data loader. Combines a dataset and a sampler, and provides\n single- or multi-process iterators over the dataset.\n Arguments:\n mode (str, required): mode of dataset to operate in, one of ['train', 'val']\n batch_size (int, optional): how many samples per batch to load\n (default: 1).\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\n at every epoch (default: False).\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means that the data will be loaded in the main process\n (default: 0)\n cache (int, optional): cache size to use when loading data,\n drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\n if the dataset size is not divisible by the batch size. If ``False`` and\n the size of dataset is not divisible by the batch size, then the last batch\n will be smaller. (default: False)\n cuda (bool, optional): set to ``True`` and the PyTorch tensors will get preloaded\n to the GPU for you (necessary because this lets us to uint8 conversion on the \n GPU, which is faster).\n \"\"\"\n\n def __init__(\n self,\n corpus_path,\n tokenizer,\n bert_model,\n seq_len,\n encoding=\"utf-8\",\n visual_target=0,\n hard_negative=False,\n batch_size=512,\n shuffle=False,\n num_workers=25,\n cache=10000,\n drop_last=False,\n cuda=False,\n local_rank=-1,\n objective=0,\n visualization=False,\n ):\n TRAIN_DATASET_SIZE = 3119449\n\n if dist.is_available() and local_rank != -1:\n\n num_replicas = dist.get_world_size()\n rank = dist.get_rank()\n\n lmdb_file = os.path.join(\n corpus_path, \"training_feat_part_\" + str(rank) + \".lmdb\"\n )\n else:\n lmdb_file = os.path.join(corpus_path, \"training_feat_all.lmdb\")\n # lmdb_file = os.path.join(corpus_path, \"validation_feat_all.lmdb\")\n\n print(\"Loading from %s\" % lmdb_file)\n\n ds = td.LMDBSerializer.load(lmdb_file, shuffle=False)\n self.num_dataset = len(ds)\n ds = td.LocallyShuffleData(ds, cache)\n caption_path = os.path.join(corpus_path, \"caption_train.json\")\n # caption_path = os.path.join(corpus_path, \"caption_val.json\")\n\n preprocess_function = BertPreprocessBatch(\n caption_path,\n tokenizer,\n bert_model,\n seq_len,\n 36,\n self.num_dataset,\n encoding=\"utf-8\",\n visual_target=visual_target,\n objective=objective,\n )\n\n #ds = td.PrefetchData(ds, 5000, 1)\n ds = td.MapData(ds, preprocess_function)\n # self.ds = td.PrefetchData(ds, 1)\n ds = td.PrefetchDataZMQ(ds, num_workers)\n self.ds = td.BatchData(ds, batch_size)\n # self.ds = ds\n self.ds.reset_state()\n\n self.batch_size = batch_size\n self.num_workers = num_workers\n\n def __iter__(self):\n\n for batch in self.ds.get_data():\n input_ids, input_mask, segment_ids, lm_label_ids, is_next, image_feat, image_loc, image_target, image_label, image_mask, masked_label, image_id = (\n batch\n )\n\n batch_size = input_ids.shape[0]\n\n sum_count = np.sum(masked_label == 0, axis=1, keepdims=True)\n sum_count[sum_count == 0] = 1\n g_image_feat = np.sum(image_feat, axis=1) / sum_count\n image_feat = np.concatenate(\n [np.expand_dims(g_image_feat, axis=1), image_feat], axis=1\n )\n image_feat = np.array(image_feat, dtype=np.float32)\n\n g_image_loc = np.repeat(\n np.array([[0, 0, 1, 1, 1]], dtype=np.float32), batch_size, axis=0\n )\n image_loc = np.concatenate(\n [np.expand_dims(g_image_loc, axis=1), image_loc], axis=1\n )\n\n image_loc = np.array(image_loc, dtype=np.float32)\n g_image_mask = np.repeat(np.array([[1]]), batch_size, axis=0)\n image_mask = np.concatenate([g_image_mask, image_mask], axis=1)\n\n batch = (\n input_ids,\n input_mask,\n segment_ids,\n lm_label_ids,\n is_next,\n image_feat,\n image_loc,\n image_target,\n image_label,\n image_mask,\n )\n\n yield tuple([torch.tensor(data) for data in batch] + [image_id])\n\n def __len__(self):\n return self.ds.size()\n\n\nclass ConceptCapLoaderVal(object):\n \"\"\"\n Data loader. Combines a dataset and a sampler, and provides\n single- or multi-process iterators over the dataset.\n Arguments:\n mode (str, required): mode of dataset to operate in, one of ['train', 'val']\n batch_size (int, optional): how many samples per batch to load\n (default: 1).\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\n at every epoch (default: False).\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means that the data will be loaded in the main process\n (default: 0)\n cache (int, optional): cache size to use when loading data,\n drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\n if the dataset size is not divisible by the batch size. If ``False`` and\n the size of dataset is not divisible by the batch size, then the last batch\n will be smaller. (default: False)\n cuda (bool, optional): set to ``True`` and the PyTorch tensors will get preloaded\n to the GPU for you (necessary because this lets us to uint8 conversion on the \n GPU, which is faster).\n \"\"\"\n\n def __init__(\n self,\n corpus_path,\n tokenizer,\n bert_model,\n seq_len,\n encoding=\"utf-8\",\n visual_target=0,\n batch_size=512,\n shuffle=False,\n num_workers=25,\n cache=5000,\n drop_last=False,\n cuda=False,\n objective=0,\n visualization=False,\n ):\n\n lmdb_file = os.path.join(corpus_path, \"validation_feat_all.lmdb\")\n caption_path = os.path.join(corpus_path, \"caption_val.json\")\n print(\"Loading from %s\" % lmdb_file)\n\n ds = td.LMDBSerializer.load(lmdb_file, shuffle=False)\n self.num_dataset = len(ds)\n preprocess_function = BertPreprocessBatch(\n caption_path,\n tokenizer,\n bert_model,\n seq_len,\n 36,\n self.num_dataset,\n encoding=\"utf-8\",\n visual_target=visual_target,\n visualization=visualization,\n objective=objective,\n )\n\n ds = td.MapData(ds, preprocess_function)\n self.ds = td.BatchData(ds, batch_size)\n self.ds.reset_state()\n\n self.batch_size = batch_size\n self.num_workers = num_workers\n\n def __iter__(self):\n for batch in self.ds.get_data():\n input_ids, input_mask, segment_ids, lm_label_ids, is_next, image_feat, image_loc, image_target, image_label, image_mask, masked_label, image_id = (\n batch\n )\n\n batch_size = input_ids.shape[0]\n sum_count = np.sum(masked_label == 0, axis=1, keepdims=True)\n sum_count[sum_count == 0] = 1\n g_image_feat = np.sum(image_feat, axis=1) / sum_count\n image_feat = np.concatenate(\n [np.expand_dims(g_image_feat, axis=1), image_feat], axis=1\n )\n image_feat = np.array(image_feat, dtype=np.float32)\n\n g_image_loc = np.repeat(\n np.array([[0, 0, 1, 1, 1]], dtype=np.float32), batch_size, axis=0\n )\n image_loc = np.concatenate(\n [np.expand_dims(g_image_loc, axis=1), image_loc], axis=1\n )\n\n image_loc = np.array(image_loc, dtype=np.float32)\n g_image_mask = np.repeat(np.array([[1]]), batch_size, axis=0)\n image_mask = np.concatenate([g_image_mask, image_mask], axis=1)\n\n batch = (\n input_ids,\n input_mask,\n segment_ids,\n lm_label_ids,\n is_next,\n image_feat,\n image_loc,\n image_target,\n image_label,\n image_mask,\n )\n\n yield tuple([torch.tensor(data) for data in batch] + [image_id])\n\n def __len__(self):\n return self.ds.size()\n\n\nclass BertPreprocessBatch(object):\n def __init__(\n self,\n caption_path,\n tokenizer,\n bert_model,\n seq_len,\n region_len,\n data_size,\n split=\"Train\",\n encoding=\"utf-8\",\n visual_target=0,\n visualization=False,\n objective=0,\n ):\n\n self.split = split\n self.seq_len = seq_len\n self.region_len = region_len\n self.tokenizer = tokenizer\n self.visual_target = visual_target\n self.num_caps = data_size\n self.captions = list(json.load(open(caption_path, \"r\")).values())\n self.visualization = visualization\n self.objective = objective\n self.bert_model = bert_model\n\n def __call__(self, data):\n\n image_feature_wp, image_target_wp, image_location_wp, num_boxes, image_h, image_w, image_id, caption = (\n data\n )\n\n image_feature = np.zeros((self.region_len, 2048), dtype=np.float32)\n image_target = np.zeros((self.region_len, 1601), dtype=np.float32)\n image_location = np.zeros((self.region_len, 5), dtype=np.float32)\n\n # calculate the IOU here.\n overlaps = iou(image_location_wp, image_location_wp)\n\n num_boxes = int(num_boxes)\n image_feature[:num_boxes] = image_feature_wp\n image_target[:num_boxes] = image_target_wp\n image_location[:num_boxes, :4] = image_location_wp\n\n image_location[:, 4] = (\n (image_location[:, 3] - image_location[:, 1])\n * (image_location[:, 2] - image_location[:, 0])\n / (float(image_w) * float(image_h))\n )\n\n image_location[:, 0] = image_location[:, 0] / float(image_w)\n image_location[:, 1] = image_location[:, 1] / float(image_h)\n image_location[:, 2] = image_location[:, 2] / float(image_w)\n image_location[:, 3] = image_location[:, 3] / float(image_h)\n\n if self.visual_target == 0:\n image_feature = copy.deepcopy(image_feature)\n image_target = copy.deepcopy(image_target)\n else:\n image_feature = copy.deepcopy(image_feature)\n image_target = copy.deepcopy(image_feature)\n\n caption, label = self.random_cap(caption)\n\n tokens_caption = self.tokenizer.encode(caption)\n\n cur_example = InputExample(\n image_feat=image_feature,\n image_target=image_target,\n caption=tokens_caption,\n is_next=label,\n image_loc=image_location,\n num_boxes=num_boxes,\n overlaps=overlaps,\n )\n\n # transform sample to features\n cur_features = self.convert_example_to_features(\n cur_example, self.seq_len, self.tokenizer, self.region_len\n )\n\n cur_tensors = (\n cur_features.input_ids,\n cur_features.input_mask,\n cur_features.segment_ids,\n cur_features.lm_label_ids,\n cur_features.is_next,\n cur_features.image_feat,\n cur_features.image_loc,\n cur_features.image_target,\n cur_features.image_label,\n cur_features.image_mask,\n cur_features.masked_label,\n image_id,\n )\n return cur_tensors\n\n def random_cap(self, caption):\n \"\"\"\n Get one sample from corpus consisting of two sentences. With prob. 50% these are two subsequent sentences\n from one doc. With 50% the second sentence will be a random one from another doc.\n :param index: int, index of sample.\n :return: (str, str, int), sentence 1, sentence 2, isNextSentence Label\n \"\"\"\n\n if self.visualization:\n return caption, 0\n\n if self.objective != 2 and random.random() > 0.5:\n caption = self.get_random_caption()\n label = 1\n else:\n label = 0\n\n return caption, label\n\n def get_random_caption(self):\n \"\"\"\n Get random caption from another document for nextSentence task.\n :return: str, content of one line\n \"\"\"\n # add the hard negative mining objective here.\n rand_doc_idx = random.randint(0, self.num_caps - 1)\n caption = self.captions[rand_doc_idx]\n\n return caption\n\n def convert_example_to_features(\n self, example, max_seq_length, tokenizer, max_region_length\n ):\n \"\"\"\n \"\"\"\n image_feat = example.image_feat\n tokens = example.caption\n image_loc = example.image_loc\n image_target = example.image_target\n num_boxes = int(example.num_boxes)\n is_next = example.is_next\n overlaps = example.overlaps\n\n self._truncate_seq_pair(tokens, max_seq_length - 2)\n\n tokens, tokens_label = self.random_word(tokens, tokenizer, is_next)\n image_feat, image_loc, image_label, masked_label = self.random_region(\n image_feat, image_loc, num_boxes, is_next, overlaps\n )\n\n # concatenate lm labels and account for CLS, SEP, SEP\n lm_label_ids = [-1] + tokens_label + [-1]\n tokens = tokenizer.add_special_tokens_single_sentence(tokens)\n segment_ids = [0] * len(tokens)\n\n input_ids = tokens # tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n # input_ids = input_ids[:1] input_ids[1:]\n input_mask = [1] * (len(input_ids))\n image_mask = [1] * (num_boxes)\n # Zero-pad up to the visual sequence length.\n while len(image_mask) < max_region_length:\n image_mask.append(0)\n image_label.append(-1)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n lm_label_ids.append(-1)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n assert len(lm_label_ids) == max_seq_length\n assert len(image_mask) == max_region_length\n assert len(image_label) == max_region_length\n\n features = InputFeatures(\n input_ids=np.array(input_ids),\n input_mask=np.array(input_mask),\n segment_ids=np.array(segment_ids),\n lm_label_ids=np.array(lm_label_ids),\n is_next=np.array(example.is_next),\n image_feat=image_feat,\n image_target=image_target,\n image_loc=image_loc,\n image_label=np.array(image_label),\n image_mask=np.array(image_mask),\n masked_label=masked_label,\n )\n return features\n\n def _truncate_seq_pair(self, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_b)\n if total_length <= max_length:\n break\n\n tokens_b.pop()\n\n def random_word(self, tokens, tokenizer, is_next):\n output_label = []\n\n for i, token in enumerate(tokens):\n prob = random.random()\n # mask token with 15% probability\n\n # if is_next == 1 and self.objective != 0:\n # prob = 1 # not sample mask\n if prob < 0.15 and (not self.visualization):\n prob /= 0.15\n\n # 80% randomly change token to mask token\n if prob < 0.8:\n tokens[i] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)\n\n # 10% randomly change token to random token\n elif prob < 0.9:\n tokens[i] = np.random.randint(len(tokenizer))\n # torch.randint(len(tokenizer), labels.shape, dtype=torch.long)\n\n # -> rest 10% randomly keep current token\n # append current token to output (we will predict these later)\n output_label.append(token)\n else:\n # no masking token (will be ignored by loss function later)\n output_label.append(-1)\n\n return tokens, output_label\n\n def random_region(self, image_feat, image_loc, num_boxes, is_next, overlaps):\n \"\"\"\n \"\"\"\n output_label = []\n masked_label = np.zeros((image_feat.shape[0]))\n\n for i in range(num_boxes):\n prob = random.random()\n # mask token with 15% probability\n\n # if is_next == 1 and self.objective != 0:\n # prob = 1 # if the target is inaligned mask, then not sample mask\n if prob < 0.15 and not self.visualization:\n prob /= 0.15\n\n # 80% randomly change token to mask token\n if prob < 0.9:\n image_feat[i] = 0\n # mask the overlap regions into zeros\n masked_label = np.logical_or(masked_label, overlaps[i] > 0.4)\n\n # 10% randomly change token to random token\n # elif prob < 0.9:\n # tokens[i] = random.choice(list(tokenizer.vocab.items()))[0]\n\n # -> rest 10% randomly keep current token\n # append current token to output (we will predict these later)\n output_label.append(1)\n else:\n # no masking token (will be ignored by loss function later)\n output_label.append(-1)\n\n return image_feat, image_loc, output_label, masked_label\n\n\nclass ConceptCapLoaderRetrieval(object):\n \"\"\"\n Data loader. Combines a dataset and a sampler, and provides\n single- or multi-process iterators over the dataset.\n Arguments:\n mode (str, required): mode of dataset to operate in, one of ['train', 'val']\n batch_size (int, optional): how many samples per batch to load\n (default: 1).\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\n at every epoch (default: False).\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means that the data will be loaded in the main process\n (default: 0)\n cache (int, optional): cache size to use when loading data,\n drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\n if the dataset size is not divisible by the batch size. If ``False`` and\n the size of dataset is not divisible by the batch size, then the last batch\n will be smaller. (default: False)\n cuda (bool, optional): set to ``True`` and the PyTorch tensors will get preloaded\n to the GPU for you (necessary because this lets us to uint8 conversion on the \n GPU, which is faster).\n \"\"\"\n\n def __init__(\n self,\n corpus_path,\n tokenizer,\n seq_len,\n encoding=\"utf-8\",\n visual_target=0,\n batch_size=512,\n shuffle=False,\n num_workers=10,\n cache=50000,\n drop_last=False,\n cuda=False,\n ):\n\n lmdb_file = \"/coc/dataset/conceptual_caption/validation_feat_all.lmdb\"\n if not os.path.exists(lmdb_file):\n lmdb_file = \"/coc/pskynet2/jlu347/multi-modal-bert/data/conceptual_caption/validation_feat_all.lmdb\"\n caption_path = \"/coc/pskynet2/jlu347/multi-modal-bert/data/conceptual_caption/caption_val.json\"\n\n print(\"Loading from %s\" % lmdb_file)\n\n ds = td.LMDBSerializer.load(lmdb_file, shuffle=False)\n self.num_dataset = len(ds)\n preprocess_function = BertPreprocessRetrieval(\n caption_path,\n tokenizer,\n seq_len,\n 36,\n 1000,\n encoding=\"utf-8\",\n visual_target=visual_target,\n )\n\n ds = td.MapData(ds, preprocess_function)\n self.ds = td.BatchData(ds, 1)\n self.ds.reset_state()\n\n self.batch_size = 1\n self.num_workers = num_workers\n self._entry = []\n\n self.features_all = np.zeros((1000, 37, 2048), dtype=np.float32)\n self.spatials_all = np.zeros((1000, 37, 5), dtype=np.float32)\n self.image_mask_all = np.zeros((1000, 37), dtype=np.float32)\n self.image_ids = []\n # load first 1000 file here.\n for i, batch in enumerate(self.ds.get_data()):\n if i >= 1000:\n break\n input_ids, input_mask, segment_ids, is_next, image_feat, image_loc, image_mask, image_id, caption = (\n batch\n )\n\n batch_size = input_ids.shape[0]\n g_image_feat = np.sum(image_feat, axis=1) / np.sum(\n image_mask, axis=1, keepdims=True\n )\n image_feat = np.concatenate(\n [np.expand_dims(g_image_feat, axis=1), image_feat], axis=1\n )\n image_feat = np.array(image_feat, dtype=np.float32)\n\n g_image_loc = np.repeat(\n np.array([[0, 0, 1, 1, 1]], dtype=np.float32), batch_size, axis=0\n )\n image_loc = np.concatenate(\n [np.expand_dims(g_image_loc, axis=1), image_loc], axis=1\n )\n\n image_loc = np.array(image_loc, dtype=np.float32)\n g_image_mask = np.repeat(np.array([[1]]), batch_size, axis=0)\n image_mask = np.concatenate([g_image_mask, image_mask], axis=1)\n\n batch = (input_ids, input_mask, segment_ids, image_id, caption)\n self._entry.append(batch)\n\n self.features_all[i] = image_feat\n self.image_mask_all[i] = np.array(image_mask)\n self.spatials_all[i] = image_loc\n self.image_ids.append(image_id)\n sys.stdout.write(\"%d/%d\\r\" % (i, 1000))\n sys.stdout.flush()\n\n def __iter__(self):\n\n for index in range(self.__len__()):\n caption_idx = int(index / 2)\n image_idx = index % 2\n\n if image_idx == 0:\n image_entries = self.image_ids[:500]\n features_all = self.features_all[:500]\n spatials_all = self.spatials_all[:500]\n image_mask_all = self.image_mask_all[:500]\n\n else:\n image_entries = self.image_ids[500:]\n features_all = self.features_all[500:]\n spatials_all = self.spatials_all[500:]\n image_mask_all = self.image_mask_all[500:]\n\n caption, input_mask, segment_ids, txt_image_id, caption = self._entry[\n caption_idx\n ]\n target_all = np.zeros((500))\n for i, image_id in enumerate(image_entries):\n if image_id == txt_image_id:\n target_all[i] = 1\n\n batch = (\n features_all,\n spatials_all,\n image_mask_all,\n caption,\n input_mask,\n segment_ids,\n target_all,\n caption_idx,\n image_idx,\n )\n batch = [torch.tensor(data) for data in batch]\n batch.append(txt_image_id)\n batch.append(caption)\n\n yield batch\n\n def __len__(self):\n return len(self._entry) * 2\n\n\nclass BertPreprocessRetrieval(object):\n def __init__(\n self,\n caption_path,\n tokenizer,\n seq_len,\n region_len,\n data_size,\n split=\"Train\",\n encoding=\"utf-8\",\n visual_target=0,\n ):\n\n self.split = split\n self.seq_len = seq_len\n self.region_len = region_len\n self.tokenizer = tokenizer\n self.visual_target = visual_target\n self.num_caps = data_size\n self.captions = list(json.load(open(caption_path, \"r\")).values())[:data_size]\n\n def __call__(self, data):\n\n image_feature_wp, image_target_wp, image_location_wp, num_boxes, image_h, image_w, image_id, caption = (\n data\n )\n\n image_feature = np.zeros((self.region_len, 2048), dtype=np.float32)\n image_target = np.zeros((self.region_len, 1601), dtype=np.float32)\n image_location = np.zeros((self.region_len, 5), dtype=np.float32)\n\n num_boxes = int(num_boxes)\n image_feature[:num_boxes] = image_feature_wp\n image_target[:num_boxes] = image_target_wp\n image_location[:num_boxes, :4] = image_location_wp\n\n image_location[:, 4] = (\n (image_location[:, 3] - image_location[:, 1])\n * (image_location[:, 2] - image_location[:, 0])\n / (float(image_w) * float(image_h))\n )\n\n image_location[:, 0] = image_location[:, 0] / float(image_w)\n image_location[:, 1] = image_location[:, 1] / float(image_h)\n image_location[:, 2] = image_location[:, 2] / float(image_w)\n image_location[:, 3] = image_location[:, 3] / float(image_h)\n\n label = 0\n\n tokens_caption = self.tokenizer.tokenize(caption)\n cur_example = InputExample(\n image_feat=image_feature,\n image_target=image_target,\n caption=tokens_caption,\n is_next=label,\n image_loc=image_location,\n num_boxes=num_boxes,\n )\n\n # transform sample to features\n cur_features = self.convert_example_to_features(\n cur_example, self.seq_len, self.tokenizer, self.region_len\n )\n\n cur_tensors = (\n cur_features.input_ids,\n cur_features.input_mask,\n cur_features.segment_ids,\n cur_features.is_next,\n cur_features.image_feat,\n cur_features.image_loc,\n cur_features.image_mask,\n float(image_id),\n caption,\n )\n return cur_tensors\n\n def convert_example_to_features(\n self, example, max_seq_length, tokenizer, max_region_length\n ):\n \"\"\"\n Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with\n IDs, LM labels, input_mask, CLS and SEP tokens etc.\n :param example: InputExample, containing sentence input as strings and is_next label\n :param max_seq_length: int, maximum length of sequence.\n :param tokenizer: Tokenizer\n :return: InputFeatures, containing all inputs and labels of one sample as IDs (as used for model training)\n \"\"\"\n image_feat = example.image_feat\n caption = example.caption\n image_loc = example.image_loc\n # image_target = example.image_target\n num_boxes = int(example.num_boxes)\n self._truncate_seq_pair(caption, max_seq_length - 2)\n caption, caption_label = self.random_word(caption, tokenizer)\n caption_label = None\n image_feat, image_loc, image_label, masked_label = self.random_region(\n image_feat, image_loc, num_boxes\n )\n image_label = None\n\n tokens = []\n segment_ids = []\n\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n\n for token in caption:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n # input_ids = input_ids[:1] input_ids[1:]\n input_mask = [1] * (len(input_ids))\n image_mask = [1] * (num_boxes)\n # Zero-pad up to the visual sequence length.\n while len(image_mask) < max_region_length:\n image_mask.append(0)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n assert len(image_mask) == max_region_length\n\n features = InputFeatures(\n input_ids=np.array(input_ids),\n input_mask=np.array(input_mask),\n segment_ids=np.array(segment_ids),\n is_next=np.array(example.is_next),\n image_feat=image_feat,\n image_loc=image_loc,\n image_mask=np.array(image_mask),\n )\n return features\n\n def _truncate_seq_pair(self, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_b)\n if total_length <= max_length:\n break\n\n tokens_b.pop()\n"
] | [
[
"numpy.concatenate",
"torch.distributed.is_available",
"torch.distributed.get_world_size",
"numpy.array",
"numpy.logical_or",
"numpy.zeros",
"numpy.minimum",
"numpy.sum",
"torch.tensor",
"torch.distributed.get_rank",
"numpy.expand_dims",
"numpy.maximum"
]
] |
mnmueller/auto_LiRPA | [
"7e1fbf12d857ef8d411d80eef1bd73d9ae4ba3be"
] | [
"examples/vision/models/densenet_no_bn.py"
] | [
"'''DenseNet in PyTorch.\nhttps://github.com/kuangliu/pytorch-cifar\n'''\n\n\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Bottleneck(nn.Module):\n def __init__(self, in_planes, growth_rate):\n super(Bottleneck, self).__init__()\n # self.bn1 = nn.BatchNorm2d(in_planes)\n self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=True)\n # self.bn2 = nn.BatchNorm2d(4*growth_rate)\n self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=True)\n\n def forward(self, x):\n # out = self.conv1(F.relu(self.bn1(x)))\n # out = self.conv2(F.relu(self.bn2(out)))\n out = self.conv1(F.relu(x))\n out = self.conv2(F.relu(out))\n out = torch.cat([out,x], 1)\n return out\n\n\nclass Transition(nn.Module):\n def __init__(self, in_planes, out_planes):\n super(Transition, self).__init__()\n # self.bn = nn.BatchNorm2d(in_planes)\n self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=True)\n\n def forward(self, x):\n out = self.conv(F.relu(x))\n out = F.avg_pool2d(out, 2)\n return out\n\n\nclass DenseNet(nn.Module):\n def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):\n super(DenseNet, self).__init__()\n self.growth_rate = growth_rate\n\n num_planes = 2*growth_rate\n self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=True)\n\n self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])\n num_planes += nblocks[0]*growth_rate\n out_planes = int(math.floor(num_planes*reduction))\n self.trans1 = Transition(num_planes, out_planes)\n num_planes = out_planes\n\n self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])\n num_planes += nblocks[1]*growth_rate\n out_planes = int(math.floor(num_planes*reduction))\n self.trans2 = Transition(num_planes, out_planes)\n num_planes = out_planes\n\n self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])\n num_planes += nblocks[2]*growth_rate\n # out_planes = int(math.floor(num_planes*reduction))\n # self.trans3 = Transition(num_planes, out_planes)\n # num_planes = out_planes\n\n # self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])\n # num_planes += nblocks[3]*growth_rate\n\n # self.bn = nn.BatchNorm2d(num_planes)\n self.linear1 = nn.Linear(9216, 512)\n self.linear2 = nn.Linear(512, num_classes)\n\n\n def _make_dense_layers(self, block, in_planes, nblock):\n layers = []\n for i in range(nblock):\n layers.append(block(in_planes, self.growth_rate))\n in_planes += self.growth_rate\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.trans1(self.dense1(out))\n out = self.trans2(self.dense2(out))\n out = self.dense3(out)\n # out = self.dense4(out)\n out = F.relu(out)\n out = out.view(out.size(0), -1)\n out = F.relu(self.linear1(out))\n out = self.linear2(out)\n\n return out\n\n# def DenseNet121():\n# return DenseNet(Bottleneck, [6,12,24,16], growth_rate=32)\n#\n# def DenseNet169():\n# return DenseNet(Bottleneck, [6,12,32,32], growth_rate=32)\n#\n# def DenseNet201():\n# return DenseNet(Bottleneck, [6,12,48,32], growth_rate=32)\n#\n# def DenseNet161():\n# return DenseNet(Bottleneck, [6,12,36,24], growth_rate=48)\n\ndef Densenet_cifar_wobn():\n return DenseNet(Bottleneck, [2,4,6], growth_rate=16)\n\n\nif __name__ == \"__main__\":\n net = Densenet_cifar_wobn()\n x = torch.randn(1,3,32,32)\n y = net(x)\n print(net)\n print(y)\n"
] | [
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.functional.avg_pool2d",
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.functional.relu",
"torch.randn"
]
] |
leliel12/scikitcriteria | [
"f13a75b5a39cd2d3db30a37b69e61a2814a5cea4"
] | [
"skcriteria/preprocessing/scalers.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# License: BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised))\n# Copyright (c) 2016-2021, Cabral, Juan; Luczywo, Nadia\n# Copyright (c) 2022, QuatroPe\n# All rights reserved.\n\n# =============================================================================\n# DOCS\n# =============================================================================\n\n\"\"\"Functionalities for scale values based on differrent strategies.\n\nIn addition to the Transformers, a collection of an MCDA agnostic functions\nare offered to scale an array along an arbitrary axis.\n\n\"\"\"\n\n\n# =============================================================================\n# IMPORTS\n# =============================================================================\n\n\nimport numpy as np\nfrom numpy import linalg\n\nfrom ..core import SKCMatrixAndWeightTransformerABC\nfrom ..utils import doc_inherit\n\n# =============================================================================\n# STANDAR SCALER\n# =============================================================================\n\n\ndef scale_by_stdscore(arr, axis=None):\n r\"\"\"Standardize the values by removing the mean and divided by the std-dev.\n\n The standard score of a sample `x` is calculated as:\n\n .. math::\n\n z = (x - \\mu) / \\sigma\n\n Parameters\n ----------\n arr: :py:class:`numpy.ndarray` like.\n A array with values\n axis : :py:class:`int` optional\n Axis along which to operate. By default, flattened input is used.\n\n Returns\n -------\n :py:class:`numpy.ndarray`\n array of ratios\n\n Examples\n --------\n .. code-block:: pycon\n\n >>> from skcriteria.preprocess import scale_by_stdscore\n >>> mtx = [[1, 2], [3, 4]]\n\n # ratios with the max value of the array\n >>> scale_by_stdscore(mtx)\n array([[-1.34164079, -0.4472136 ],\n [ 0.4472136 , 1.34164079]])\n\n # ratios with the max value of the arr by column\n >>> scale_by_stdscore(mtx, axis=0)\n array([[-1., -1.],\n [ 1., 1.]])\n\n # ratios with the max value of the array by row\n >>> scale_by_stdscore(mtx, axis=1)\n array([[-1., 1.],\n [-1., 1.]])\n\n \"\"\"\n arr = np.asarray(arr, dtype=float)\n mean = np.mean(arr, axis=axis, keepdims=True)\n std = np.std(arr, axis=axis, keepdims=True)\n return (arr - mean) / std\n\n\nclass StandarScaler(SKCMatrixAndWeightTransformerABC):\n \"\"\"Standardize the dm by removing the mean and scaling to unit variance.\n\n The standard score of a sample `x` is calculated as:\n\n z = (x - u) / s\n\n where `u` is the mean of the values, and `s` is the standard deviation\n of the training samples or one if `with_std=False`.\n\n \"\"\"\n\n @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights)\n def _transform_weights(self, weights):\n return scale_by_stdscore(weights, axis=None)\n\n @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix)\n def _transform_matrix(self, matrix):\n return scale_by_stdscore(matrix, axis=0)\n\n\n# =============================================================================\n# VECTOR SCALER\n# =============================================================================\n\n\ndef scale_by_vector(arr, axis=None):\n r\"\"\"Divide the array by norm of values defined vector along an axis.\n\n Calculates the set of ratios as the square roots of the sum of squared\n responses of a given axis as denominators. If *axis* is *None* sum all\n the array.\n\n .. math::\n\n \\overline{X}_{ij} =\n \\frac{X_{ij}}{\\sqrt{\\sum\\limits_{j=1}^m X_{ij}^{2}}}\n\n Parameters\n ----------\n arr: :py:class:`numpy.ndarray` like.\n A array with values\n axis : :py:class:`int` optional\n Axis along which to operate. By default, flattened input is used.\n\n Returns\n -------\n :py:class:`numpy.ndarray`\n array of ratios\n\n Examples\n --------\n .. code-block:: pycon\n\n >>> from skcriteria.preprocess import scale_by_vector\n >>> mtx = [[1, 2], [3, 4]]\n\n # ratios with the vector value of the array\n >>> scale_by_vector(mtx)\n array([[ 0.18257418, 0.36514837],\n [ 0.54772252, 0.73029673]])\n\n # ratios by column\n >>> scale_by_vector(mtx, axis=0)\n array([[ 0.31622776, 0.44721359],\n [ 0.94868326, 0.89442718]])\n\n # ratios by row\n >>> scale_by_vector(mtx, axis=1)\n array([[ 0.44721359, 0.89442718],\n [ 0.60000002, 0.80000001]])\n\n \"\"\"\n arr = np.asarray(arr, dtype=float)\n frob = linalg.norm(arr, None, axis=axis)\n return arr / frob\n\n\nclass VectorScaler(SKCMatrixAndWeightTransformerABC):\n r\"\"\"Scaler based on the norm of the vector..\n\n .. math::\n\n \\overline{X}_{ij} =\n \\frac{X_{ij}}{\\sqrt{\\sum\\limits_{j=1}^m X_{ij}^{2}}}\n\n If the scaler is configured to work with 'matrix' each value\n of each criteria is divided by the norm of the vector defined by the values\n of that criteria.\n In other hand if is configure to work with 'weights',\n each value of weight is divided by the vector defined by the values\n of the weights.\n\n \"\"\"\n\n @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights)\n def _transform_weights(self, weights):\n return scale_by_vector(weights, axis=None)\n\n @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix)\n def _transform_matrix(self, matrix):\n return scale_by_vector(matrix, axis=0)\n\n\n# =============================================================================\n# MINMAX\n# =============================================================================\n\n\ndef scale_by_minmax(arr, axis=None):\n r\"\"\"Fraction of the range normalizer.\n\n Subtracts to each value of the array the minimum and then divides\n it by the total range.\n\n .. math::\n\n \\overline{X}_{ij} =\n \\frac{X_{ij} - \\min{X_{ij}}}{\\max_{X_{ij}} - \\min_{X_{ij}}}\n\n Parameters\n ----------\n arr: :py:class:`numpy.ndarray` like.\n A array with values\n axis : :py:class:`int` optional\n Axis along which to operate. By default, flattened input is used.\n\n Returns\n -------\n :py:class:`numpy.ndarray`\n array of ratios\n\n\n Examples\n --------\n .. code-block:: pycon\n\n >>> from skcriteria.preprocess import scale_by_minmax\n >>> mtx = [[1, 2], [3, 4]]\n\n # ratios with the range of the array\n >>> scale_by_minmax(mtx)\n array([[0. , 0.33333333],\n [0.66666667, 1. ]])\n\n # ratios with the range by column\n >>> scale_by_minmax(mtx, axis=0)\n array([[0., 0.],\n [1., 1.]])\n\n # ratios with the range by row\n >>> scale_by_minmax(mtx, axis=1)\n array([[0., 1.],\n [0., 1.]])\n\n \"\"\"\n arr = np.asarray(arr, dtype=float)\n minval = np.min(arr, axis=axis, keepdims=True)\n maxval = np.max(arr, axis=axis, keepdims=True)\n return (arr - minval) / (maxval - minval)\n\n\nclass MinMaxScaler(SKCMatrixAndWeightTransformerABC):\n r\"\"\"Scaler based on the range.\n\n .. math::\n\n \\overline{X}_{ij} =\n \\frac{X_{ij} - \\min{X_{ij}}}{\\max_{X_{ij}} - \\min_{X_{ij}}}\n\n If the scaler is configured to work with 'matrix' each value\n of each criteria is divided by the range of that criteria.\n In other hand if is configure to work with 'weights',\n each value of weight is divided by the range the weights.\n\n \"\"\"\n\n @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights)\n def _transform_weights(self, weights):\n return scale_by_minmax(weights, axis=None)\n\n @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix)\n def _transform_matrix(self, matrix):\n return scale_by_minmax(matrix, axis=0)\n\n\n# =============================================================================\n# SUM\n# =============================================================================\n\n\ndef scale_by_sum(arr, axis=None):\n r\"\"\"Divide of every value on the array by sum of values along an axis.\n\n .. math::\n\n \\overline{X}_{ij} = \\frac{X_{ij}}{\\sum\\limits_{j=1}^m X_{ij}}\n\n Parameters\n ----------\n arr: :py:class:`numpy.ndarray` like.\n A array with values\n axis : :py:class:`int` optional\n Axis along which to operate. By default, flattened input is used.\n\n Returns\n -------\n :py:class:`numpy.ndarray`\n array of ratios\n\n Examples\n --------\n .. code-block:: pycon\n\n >>> from skcriteria.preprocess import scale_by_sum\n >>> mtx = [[1, 2], [3, 4]]\n\n >>> scale_by_sum(mtx) # ratios with the sum of the array\n array([[ 0.1 , 0.2 ],\n [ 0.30000001, 0.40000001]])\n\n # ratios with the sum of the array by column\n >>> scale_by_sum(mtx, axis=0)\n array([[ 0.25 , 0.33333334],\n [ 0.75 , 0.66666669]])\n\n # ratios with the sum of the array by row\n >>> scale_by_sum(mtx, axis=1)\n array([[ 0.33333334, 0.66666669],\n [ 0.42857143, 0.5714286 ]])\n\n \"\"\"\n arr = np.asarray(arr, dtype=float)\n sumval = np.sum(arr, axis=axis, keepdims=True)\n return arr / sumval\n\n\nclass SumScaler(SKCMatrixAndWeightTransformerABC):\n r\"\"\"Scalerbased on the total sum of values.\n\n .. math::\n\n \\overline{X}_{ij} = \\frac{X_{ij}}{\\sum\\limits_{j=1}^m X_{ij}}\n\n If the scaler is configured to work with 'matrix' each value\n of each criteria is divided by the total sum of all the values of that\n criteria.\n In other hand if is configure to work with 'weights',\n each value of weight is divided by the total sum of all the weights.\n\n \"\"\"\n\n @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights)\n def _transform_weights(self, weights):\n return scale_by_sum(weights, axis=None)\n\n @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix)\n def _transform_matrix(self, matrix):\n return scale_by_sum(matrix, axis=0)\n\n\n# =============================================================================\n# MAX\n# =============================================================================\n\n\ndef scale_by_max(arr, axis=None):\n r\"\"\"Divide of every value on the array by max value along an axis.\n\n .. math::\n\n \\overline{X}_{ij} = \\frac{X_{ij}}{\\max_{X_{ij}}}\n\n Parameters\n ----------\n arr: :py:class:`numpy.ndarray` like.\n A array with values\n axis : :py:class:`int` optional\n Axis along which to operate. By default, flattened input is used.\n\n Returns\n -------\n :py:class:`numpy.ndarray`\n array of ratios\n\n Examples\n --------\n .. code-block:: pycon\n\n >>> from skcriteria.preprocess import scale_by_max\n >>> mtx = [[1, 2], [3, 4]]\n\n # ratios with the max value of the array\n >>> scale_by_max(mtx)\n array([[ 0.25, 0.5 ],\n [ 0.75, 1. ]])\n\n # ratios with the max value of the arr by column\n >>> scale_by_max(mtx, axis=0)\n array([[ 0.33333334, 0.5],\n [ 1. , 1. ]])\n\n # ratios with the max value of the array by row\n >>> scale_by_max(mtx, axis=1)\n array([[ 0.5 , 1.],\n [ 0.75, 1.]])\n\n \"\"\"\n arr = np.asarray(arr, dtype=float)\n maxval = np.max(arr, axis=axis, keepdims=True)\n return arr / maxval\n\n\nclass MaxScaler(SKCMatrixAndWeightTransformerABC):\n r\"\"\"Scaler based on the maximum values.\n\n .. math::\n\n \\overline{X}_{ij} = \\frac{X_{ij}}{\\max_{X_{ij}}}\n\n If the scaler is configured to work with 'matrix' each value\n of each criteria is divided by the maximum value of that criteria.\n In other hand if is configure to work with 'weights',\n each value of weight is divided by the maximum value the weights.\n\n \"\"\"\n\n @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights)\n def _transform_weights(self, weights):\n return scale_by_max(weights, axis=None)\n\n @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix)\n def _transform_matrix(self, matrix):\n return scale_by_max(matrix, axis=0)\n"
] | [
[
"numpy.max",
"numpy.linalg.norm",
"numpy.asarray",
"numpy.sum",
"numpy.min",
"numpy.mean",
"numpy.std"
]
] |
sdss/apogee_drp | [
"20639052e56413a1afd862dfbf5d0262863fd98e"
] | [
"python/apogee_drp/utils/yanny.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# -*- coding: utf-8 -*-\n\"\"\"Python library for reading & writing yanny files.\n\nyanny is an object-oriented interface to FTCL/yanny data files following\nthese specifications_.\n\nThe format of the returned object is similar to that returned by\n``read_yanny()`` in the efftickle perl package (in the yannytools product).\n\nCurrently multidimensional arrays are only supported for type ``char``, and a\nclose reading of the specifications indicates that multidimensional arrays\nwere only ever intended to be supported for type ``char``. So no\nmultidimensional arrays, sorry.\n\n.. _specifications: http://www.sdss3.org/dr8/software/par.php\n\"\"\"\n#\n# Modules\n#\nfrom __future__ import print_function\nimport collections\nimport re\nimport os\nimport os.path\nimport datetime\nimport numpy\n# commented out by parejkoj\nimport six\n#from astropy.extern import six\n\nif six.PY3:\n long = int\n\nimport warnings\nwarnings.filterwarnings(\"ignore\", message=\"Possible nested set\")\n\n#\n# Classes\n#\nclass yanny(collections.OrderedDict) :\n \"\"\"An object interface to a yanny file.\n\n Create a yanny object using a yanny file, `filename`. If the file exists,\n it is read, & the dict structure of the object will be basically the\n same as that returned by ``read_yanny()`` in the efftickle package.\n\n If the file does not exist, or if no filename is given, a blank\n structure is returned. Other methods allow for subsequent writing\n to the file.\n\n Parameters\n ----------\n filename : str or file-like, optional\n The name of a yanny file or a file-like object representing a yanny file.\n np : bool, optional\n If ``True``, data in a yanny file will be converted into a NumPy record\n array. Default is ``False``.\n debug : bool, optional\n If ``True``, some simple debugging statements will be turned on. Default\n is ``False``.\n\n Attributes\n ----------\n np : bool\n If True, data in a yanny file will be converted into a NumPy record\n array.\n debug : bool\n If True, some simple debugging statements will be turned on.\n filename : str\n The name of a yanny parameter file. If a file-like object was used\n to initialize the object, this will have the value 'in_memory.par'.\n _contents : str\n The complete contents of a yanny parameter file.\n _struct_type_caches : dict\n A dictionary of dictionaries, one dictionary for every structure\n definition in a yanny parameter file. Contains the types of\n each column\n _struct_isarray_caches : dict\n A dictionary of dictionaries, one dictionary for every structure\n definition in a yanny parameter file. Contains a boolean value\n for every column.\n _enum_cache : dict\n Initially ``None``, this attribute is initialized the first time\n the ``isenum()`` method is called. The keyword is the name of the\n enum type, the value is a list of the possible values of that type.\n\n \"\"\"\n #\n #\n #\n @staticmethod\n def get_token(string):\n \"\"\"Removes the first 'word' from string.\n\n If the 'word' is enclosed in double quotes, it returns the\n contents of the double quotes. If the 'word' is enclosed in\n braces, it returns the contents of the braces, but does not\n attempt to split the array. If the 'word' is the last word of the\n string, remainder is set equal to the empty string. This is\n basically a wrapper on some convenient regular expressions.\n\n Parameters\n ----------\n string : str\n A string containing words.\n\n Returns\n -------\n get_token : tuple\n A tuple containing the first word and the remainder of the string.\n\n Examples\n --------\n >>> from pydl.pydlutils.yanny import yanny\n >>> yanny.get_token(\"The quick brown fox\")\n ('The', 'quick brown fox')\n \"\"\"\n if string[0] == '\"':\n (word, remainder) = re.search(r'^\"([^\"]*)\"\\s*(.*)',\n string).groups()\n elif string[0] == '{':\n (word, remainder) = re.search(r'^\\{\\s*([^}]*)\\s*\\}\\s*(.*)',\n string).groups()\n else:\n try:\n (word, remainder) = re.split(r'\\s+',string,1)\n except ValueError:\n (word, remainder) = (string, '')\n if remainder is None:\n remainder = ''\n return (word,remainder)\n #\n #\n #\n @staticmethod\n def protect(x):\n \"\"\"Used to appropriately quote string that might contain whitespace.\n\n This method is mostly for internal use by the yanny object.\n\n Parameters\n ----------\n x : str\n The data to protect.\n\n Returns\n -------\n protect : str\n The data with white space protected by quotes.\n\n Examples\n --------\n >>> from pydl.pydlutils.yanny import yanny\n >>> yanny.protect('This string contains whitespace.')\n '\"This string contains whitespace.\"'\n >>> yanny.protect('This string contains a #hashtag.')\n '\"This string contains a #hashtag.\"'\n \"\"\"\n if isinstance(x,numpy.bytes_):\n s = x.decode()\n else:\n s = str(x)\n if len(s) == 0 or s.find('#') >= 0 or re.search(r'\\s+',s) is not None:\n return '\"' + s + '\"'\n else:\n return s\n #\n #\n #\n @staticmethod\n def trailing_comment(line):\n \"\"\"Identify a trailing comment and strip it.\n\n This routine works on the theory that a properly quoted comment mark\n will be surrounted by an odd number of double quotes, & we can\n skip to searching for the last one in the line.\n\n Parameters\n ----------\n line : str\n A line from a yanny file potentially containing trailing comments.\n\n Returns\n -------\n trailing_comment : str\n The line with any trailing comment and any residual white space\n trimmed off.\n\n Bugs\n ----\n This may fail in certain pathological cases, for example if a\n real trailing comment contains a single double-quote::\n\n # a 'pathological\" trailing comment\n\n or if someone is over-enthusiastically commenting::\n\n # # # # # I like # characters.\n\n Examples\n --------\n >>> from pydl.pydlutils.yanny import yanny\n >>> yanny.trailing_comment('mystruct 1234 \"#hashtag\" # a comment.')\n 'mystruct 1234 \"#hashtag\"'\n >>> yanny.trailing_comment('mystruct 1234 \"#hashtag\" # a \"comment\".')\n 'mystruct 1234 \"#hashtag\"'\n \"\"\"\n lastmark = line.rfind('#')\n if lastmark >= 0:\n #\n # Count the number of double quotes in the remainder of the line\n #\n if (len([c for c in line[lastmark:] if c == '\"']) % 2) == 0:\n #\n # Even number of quotes\n #\n return line[0:lastmark].rstrip()\n return line\n #\n #\n #\n @staticmethod\n def dtype_to_struct(dt,structname='mystruct',enums=None):\n \"\"\"Convert a NumPy dtype object describing a record array to\n a typedef struct statement.\n\n The second argument is the name of the structure.\n If any of the columns are enum types, enums must\n be a dictionary with the keys the column names, and the values\n are a tuple containing the name of the enum type as the first item\n and a tuple or list of possible values as the second item.\n\n Parameters\n ----------\n dt : numpy.dtype\n The dtype of a NumPy record array.\n structname : str, optional\n The name to give the structure in the yanny file. Defaults to 'MYSTRUCT'.\n enums : dict, optional\n A dictionary containing enum information. See details above.\n\n Returns\n -------\n dtype_to_struct : dict\n A dictionary suitable for setting the 'symbols' dictionary of a new\n yanny object.\n\n Examples\n --------\n \"\"\"\n dtmap = {'i2':'short','i4':'int','i8':'long','f4':'float',\n 'f8':'double'}\n returnenums = list()\n if enums is not None:\n for e in enums:\n lines = list()\n lines.append('typedef enum {')\n for n in enums[e][1]:\n lines.append(\" {0},\".format(n))\n lines[-1] = lines[-1].strip(',')\n lines.append('}} {0};'.format(enums[e][0].upper()))\n returnenums.append(\"\\n\".join(lines))\n #lines.append('')\n lines = list()\n lines.append('typedef struct {')\n for c in dt.names:\n if dt[c].kind == 'V':\n t = dt[c].subdtype[0].str[1:]\n l = dt[c].subdtype[1][0]\n s = dt[c].subdtype[0].itemsize\n else:\n t = dt[c].str[1:]\n l = 0\n s = dt[c].itemsize\n line = ' '\n if t[0] == 'S':\n if c in enums:\n line += enums[c][0].upper()\n else:\n line += 'char'\n else:\n line += dtmap[t]\n line += ' {0}'.format(c)\n if l > 0:\n line += \"[{0:d}]\".format(l)\n if t[0] == 'S' and c not in enums:\n line += \"[{0:d}]\".format(s)\n line += ';'\n lines.append(line)\n lines.append('}} {0};'.format(structname.upper()))\n return {structname.upper():list(dt.names),'enum':returnenums,'struct':[\"\\n\".join(lines)]}\n #\n #\n #\n def __init__(self,filename=None,string=None,np=False,debug=False):\n \"\"\"Create a yanny object using a yanny file.\n \n @filename Contents of file will be read from provided filename.\n @string Object will be created from provided string.\n @np Convert numeric data into NumPy arrays?\n @debug Turn on simple debugging?\n \"\"\"\n super(yanny, self).__init__()\n #\n # The symbol hash is inherited from the old read_yanny\n #\n self['symbols'] = dict()\n #\n # Create special attributes that contain the internal status of the object\n # this should prevent overlap with keywords in the data files\n #\n self.filename = ''\n self._contents = ''\n #\n # Since the re is expensive, cache the structure types keyed by the field.\n # Create a dictionary for each structure found.\n #\n self._struct_type_caches = dict()\n self._struct_isarray_caches = dict()\n self._enum_cache = None\n #\n # Optionally convert numeric data into NumPy arrays\n #\n self.np = np\n #\n # Turn on simple debugging\n #\n self.debug = debug\n\n if all([filename, string]):\n raise Exception(\"A yanny object can only be initialized with a filename OR string.\")\n\n #\n # If the file exists, read it\n #\n if filename is not None:\n #\n # Handle file-like objects\n #\n if isinstance(filename, six.string_types):\n if os.access(filename,os.R_OK):\n self.filename = filename\n with open(filename,'r') as f:\n self._contents = f.read()\n else:\n #\n # Assume file-like\n #\n self.filename = 'in_memory.par'\n self._contents = filename.read()\n self._parse()\n elif string is not None:\n #\n # Handle content passed in as string\n #\n if isinstance(string, six.string_types):\n self.filename = 'in_memory.par'\n self._contents = string\n self._parse()\n else:\n raise Exception(\"The value passed in for 'string' is not a string ({0}).\"\\\n .format(type(string)))\n return\n #\n #\n #\n def __str__(self):\n \"\"\"Implement the ``str()`` function for yanny objects.\n\n Simply prints the current contents of the yanny file.\n \"\"\"\n return self._contents\n #\n #\n #\n def __eq__(self,other):\n \"\"\"Test two yanny objects for equality.\n\n Two yanny objects are assumed to be equal if their contents are equal.\n \"\"\"\n if isinstance(other,yanny):\n return str(other) == str(self)\n return NotImplemented\n #\n #\n #\n def __ne__(self,other):\n \"\"\"Test two yanny objects for inequality.\n\n Two yanny objects are assumed to be unequal if their contents are unequal.\n \"\"\"\n if isinstance(other,yanny):\n return str(other) != str(self)\n return NotImplemented\n #\n #\n #\n def __bool__(self):\n \"\"\"Give a yanny object a definite truth value.\n\n A yanny object is considered ``True`` if its contents are non-zero.\n \"\"\"\n return len(self._contents) > 0\n\n # `__nonzero__` is needed for Python 2.\n # Python 3 uses `__bool__`.\n # http://stackoverflow.com/a/2233850/498873\n __nonzero__=__bool__\n\n #\n #\n #\n def type(self,structure,variable):\n \"\"\"Returns the type of a variable defined in a structure.\n\n Returns ``None`` if the structure or the variable is undefined.\n\n Parameters\n ----------\n structure : str\n The name of the structure that contains `variable`.\n variable : str\n The name of the column whose type you want.\n\n Returns\n -------\n type : str\n The type of the variable.\n \"\"\"\n if structure not in self:\n return None\n if variable not in self.columns(structure):\n return None\n #\n # Added code to cache values to speed up parsing large files.\n # 2009.05.11 / Demitri Muna, NYU\n # Find (or create) the cache for this structure.\n #\n try:\n cache = self._struct_type_caches[structure]\n except KeyError:\n self._struct_type_caches[structure] = collections.OrderedDict()\n cache = self._struct_type_caches[structure] # cache for one struct type\n #\n # Lookup (or create) the value for this variable\n #\n try:\n var_type = cache[variable]\n except KeyError:\n if self.debug:\n print(variable)\n defl = [ x for x in self['symbols']['struct'] if x.find(structure.lower()) > 0 ]\n defu = [ x for x in self['symbols']['struct'] if x.find(structure.upper()) > 0 ]\n if len(defl) != 1 and len(defu) != 1:\n return None\n elif len(defl) == 1:\n definition = defl\n else:\n definition = defu\n typere = re.compile(r'(\\S+)\\s+{0}([[<].*[]>]|);'.format(variable))\n (typ,array) = typere.search(definition[0]).groups()\n var_type = typ + array.replace('<','[').replace('>',']')\n cache[variable] = var_type\n return var_type\n #\n #\n #\n def basetype(self,structure,variable):\n \"\"\"Returns the bare type of a variable, stripping off any array information.\n\n Parameters\n ----------\n structure : str\n The name of the structure that contains `variable`.\n variable : str\n The name of the column whose type you want.\n\n Returns\n -------\n basetype : str\n The type of the variable, stripped of array information.\n \"\"\"\n typ = self.type(structure,variable)\n if self.debug:\n print(variable, typ)\n try:\n return typ[0:typ.index('[')]\n except ValueError:\n return typ\n #\n #\n #\n def isarray(self,structure,variable):\n \"\"\"Returns ``True`` if the variable is an array type.\n\n For character types, this means a two-dimensional array,\n *e.g.*: ``char[5][20]``.\n\n Parameters\n ----------\n structure : str\n The name of the structure that contains `variable`.\n variable : str\n The name of the column to check for array type.\n\n Returns\n -------\n isarray : bool\n ``True`` if the variable is an array.\n \"\"\"\n try:\n cache = self._struct_isarray_caches[structure]\n except KeyError:\n self._struct_isarray_caches[structure] = collections.OrderedDict()\n cache = self._struct_isarray_caches[structure]\n try:\n result = cache[variable]\n except KeyError:\n typ = self.type(structure,variable)\n character_array = re.compile(r'char[[<]\\d*[]>][[<]\\d*[]>]')\n if ((character_array.search(typ) is not None) or\n (typ.find('char') < 0 and (typ.find('[') >= 0\n or typ.find('<') >= 0))):\n cache[variable] = True\n else:\n cache[variable] = False\n result = cache[variable]\n return result\n #\n #\n #\n def isenum(self,structure,variable):\n \"\"\"Returns true if a variable is an enum type.\n\n Parameters\n ----------\n structure : str\n The name of the structure that contains `variable`.\n variable : str\n The name of the column to check for enum type.\n\n Returns\n -------\n isenum : bool\n ``True`` if the variable is enum type.\n \"\"\"\n if self._enum_cache is None:\n self._enum_cache = collections.OrderedDict()\n if 'enum' in self['symbols']:\n for e in self['symbols']['enum']:\n m = re.search(r'typedef\\s+enum\\s*\\{([^}]+)\\}\\s*(\\w+)\\s*;',e).groups()\n self._enum_cache[m[1]] = re.split(r',\\s*',m[0].strip())\n else:\n return False\n return self.basetype(structure,variable) in self._enum_cache\n #\n #\n #\n def array_length(self,structure,variable):\n \"\"\"Returns the length of an array type or 1 if the variable is not an array.\n\n For character types, this is the length of a two-dimensional\n array, *e.g.*, ``char[5][20]`` has length 5.\n\n Parameters\n ----------\n structure : str\n The name of the structure that contains `variable`.\n variable : str\n The name of the column to check for array length.\n\n Returns\n -------\n array_length : int\n The length of the array variable\n \"\"\"\n if self.isarray(structure,variable):\n typ = self.type(structure,variable)\n return int(typ[typ.index('[')+1:typ.index(']')])\n else:\n return 1\n #\n #\n #\n def char_length(self,structure,variable):\n \"\"\"Returns the length of a character field.\n\n *e.g.* ``char[5][20]`` is an array of 5 strings of length 20.\n Returns ``None`` if the variable is not a character type. If the\n length is not specified, *i.e.* ``char[]``, it returns the length of\n the largest string.\n\n Parameters\n ----------\n structure : str\n The name of the structure that contains `variable`.\n variable : str\n The name of the column to check for char length.\n\n Returns\n -------\n char_length : int or None\n The length of the char variable.\n \"\"\"\n typ = self.type(structure,variable)\n if typ.find('char') < 0:\n return None\n try:\n return int(typ[typ.rfind('[')+1:typ.rfind(']')])\n except ValueError:\n if self.isarray(structure,variable):\n return max([max([len(x) for x in r]) for r in self[structure][variable]])\n else:\n return max([len(x) for x in self[structure][variable]])\n #\n #\n #\n def dtype(self,structure):\n \"\"\"Returns a NumPy dtype object suitable for describing a table as a record array.\n\n Treats enums as string, which is what the IDL reader does.\n\n Parameters\n ----------\n structure : str\n The name of the structure.\n\n Returns\n -------\n dtype : numpy.dtype\n A dtype object suitable for describing the yanny structure as a record array.\n \"\"\"\n dt = list()\n dtmap = {'short':'i2', 'int':'i4', 'long':'i8', 'float':'f',\n 'double':'d' }\n for c in self.columns(structure):\n typ = self.basetype(structure,c)\n if typ == 'char':\n d = \"S{0:d}\".format(self.char_length(structure,c))\n elif self.isenum(structure,c):\n d = \"S{0:d}\".format(max([len(x) for x in self._enum_cache[typ]]))\n else:\n d = dtmap[typ]\n if self.isarray(structure,c):\n dt.append((c,d,(self.array_length(structure,c),)))\n else:\n dt.append((c,d))\n dt = numpy.dtype(dt)\n return dt\n #\n #\n #\n def convert(self,structure,variable,value):\n \"\"\"Converts value into the appropriate (Python) type.\n\n * ``short`` & ``int`` are converted to Python ``int``.\n * ``long`` is converted to Python ``long``.\n * ``float`` & ``double`` are converted to Python ``float``.\n * Other types are not altered.\n\n There may be further conversions into NumPy types, but this is the\n first stage.\n\n Parameters\n ----------\n structure : str\n The name of the structure that contains `variable`.\n variable : str\n The name of the column undergoing conversion.\n value : str\n The value contained in a particular row of `variable`.\n\n Returns\n -------\n convert : int, long, float or str\n `value` converted to a Python numerical type.\n \"\"\"\n typ = self.basetype(structure,variable)\n if (typ == 'short' or typ == 'int'):\n if self.isarray(structure,variable):\n return [int(v) for v in value]\n else:\n return int(value)\n if typ == 'long':\n if self.isarray(structure,variable):\n return [long(v) for v in value]\n else:\n return long(value)\n if (typ == 'float' or typ == 'double'):\n if self.isarray(structure,variable):\n return [float(v) for v in value]\n else:\n return float(value)\n return value\n #\n #\n #\n def tables(self):\n \"\"\"Returns a list of all the defined structures.\n\n This is just the list of keys of the object with the 'internal'\n keys removed.\n \"\"\"\n foo = list()\n for k in self['symbols'].keys():\n if k not in ('struct','enum'):\n foo.append(k)\n return foo\n #\n #\n #\n def columns(self,table):\n \"\"\"Returns an ordered list of column names associated with a particular table.\n\n The order is the same order as they are defined in the yanny file.\n\n Parameters\n ----------\n table : str\n The table whose columns are desired.\n\n Returns\n -------\n columns : list\n The list of column names.\n \"\"\"\n foo = list()\n if table in self['symbols']:\n return self['symbols'][table]\n return foo\n #\n #\n #\n def size(self,table):\n \"\"\"Returns the number of rows in a table.\n\n Parameters\n ----------\n table : str\n The table whose size desired.\n\n Returns\n -------\n size : int\n The number of rows in `table`.\n \"\"\"\n foo = self.columns(table)\n return len(self[table][foo[0]])\n #\n #\n #\n def pairs(self):\n \"\"\"Returns a list of keys to keyword/value pairs.\n\n Equivalent to doing ``self.keys()``, but with all the data tables &\n other control structures stripped out.\n \"\"\"\n p = list()\n foo = self.tables()\n for k in self.keys():\n if k != 'symbols' and k not in foo:\n p.append(k)\n return p\n #\n #\n #\n def row(self,table,index):\n \"\"\"Returns a list containing a single row from a specified table in column order\n\n If index is out of range, it returns an empty list.\n\n If the yanny object instance is set up for NumPy record arrays, then\n a single row can be obtained with::\n\n row0 = par['TABLE'][0]\n\n Parameters\n ----------\n table : str\n The table whose row is desired.\n index : int\n The number of the row to return.\n\n Returns\n -------\n row : list\n A row from `table`.\n \"\"\"\n datarow = list()\n if table in self and index >= 0 and index < self.size(table):\n for c in self.columns(table):\n datarow.append(self[table][c][index])\n return datarow\n #\n #\n #\n def list_of_dicts(self, table):\n \"\"\"Construct a list of dictionaries.\n\n Takes a table from the yanny object and constructs a list object\n containing one row per entry. Each item in the list is a dictionary\n keyed by the struct value names.\n\n If the yanny object instance is set up for NumPy record arrays, then\n the same functionality can be obtained with::\n\n foo = par['TABLE'][0]['column']\n\n Parameters\n ----------\n table : str\n The table to convert\n\n Returns\n -------\n list_of_dicts : list\n A list containing the rows of `table` converted to ``dict``.\n \"\"\"\n return_list = list()\n d = collections.OrderedDict()\n struct_fields = self.columns(table) # I'm assuming these are in order...\n for i in range(self.size(table)):\n one_row = self.row(table, i) # one row as a list\n j = 0\n for key in struct_fields:\n d[key] = one_row[j]\n j = j + 1\n return_list.append(collections.OrderedDict(d)) # append a new dict (copy of d)\n return return_list\n #\n #\n #\n def new_dict_from_pairs(self):\n \"\"\"Returns a new dictionary of keyword/value pairs.\n\n The new dictionary (*i.e.*, not a yanny object) contains the keys\n that ``self.pairs()`` returns. There are two reasons this is convenient:\n\n * the key 'symbols' that is part of the yanny object will not be present\n * a simple yanny file can be read with no further processing\n\n Example\n -------\n\n Read a yanny file and return only the pairs::\n\n >>> from os.path import dirname\n >>> from pydl.pydlutils.yanny import yanny\n >>> new_dict = yanny(dirname(__file__)+'/tests/t/test.par').new_dict_from_pairs()\n >>> new_dict['mjd']\n '54579'\n >>> new_dict['alpha']\n 'beta gamma delta'\n\n added: Demitri Muna, NYU 2009-04-28\n \"\"\"\n new_dictionary = collections.OrderedDict()\n for key in self.pairs():\n new_dictionary[key] = self[key]\n return new_dictionary\n #\n #\n #\n def write(self,newfile=None,comments=None):\n \"\"\"Write a yanny object to a file.\n\n This assumes that the filename used to create the object was not that\n of a pre-existing file. If a file of the same name is detected,\n this method will *not* attempt to overwrite it, but will print a warning.\n This also assumes that the special 'symbols' key has been properly\n created. This will not necessarily make the file very human-readable,\n especially if the data lines are long. If the name of a new file is\n given, it will write to the new file (assuming it doesn't exist).\n If the writing is successful, the data in the object will be updated.\n\n Parameters\n ----------\n newfile : str, optional\n The name of the file to write.\n comments : str or list of str, optional\n Comments that will be placed at the head of the file. If a\n single string is passed, it will be written out verbatim, so it\n had better contain '#' characters. If a list of strings is\n passed, comment characters will be added and the strings\n will be joined together.\n \"\"\"\n if newfile is None:\n if len(self.filename) > 0:\n newfile = self.filename\n else:\n raise ValueError(\"No filename specified!\")\n if comments is None:\n basefile = os.path.basename(newfile)\n timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')\n comments = \"#\\n# {0}\\n#\\n# Created by pydl.pydlutils.yanny.yanny\\n#\\n# {1}\\n#\\n\".format(basefile,timestamp)\n else:\n if not isinstance(comments, str):\n comments = \"\\n\".join([\"# {0}\".format(c) for c in comments]) + \"\\n\"\n contents = comments\n #\n # Print any key/value pairs\n #\n for key in self.pairs():\n contents += \"{0} {1}\\n\".format(key,self[key])\n #\n # Print out enum definitions\n #\n if len(self['symbols']['enum']) > 0:\n contents += \"\\n\" + \"\\n\\n\".join(self['symbols']['enum']) + \"\\n\"\n #\n # Print out structure definitions\n #\n if len(self['symbols']['struct']) > 0:\n contents += \"\\n\" + \"\\n\\n\".join(self['symbols']['struct']) + \"\\n\"\n contents += \"\\n\"\n #\n # Print out the data tables\n #\n for sym in self.tables():\n columns = self.columns(sym)\n for k in range(self.size(sym)):\n line = list()\n line.append(sym)\n for col in columns:\n if self.isarray(sym,col):\n datum = '{' + ' '.join([self.protect(x) for x in self[sym][col][k]]) + '}'\n else:\n datum = self.protect(self[sym][col][k])\n line.append(datum)\n contents += \"{0}\\n\".format(' '.join(line))\n #\n # Actually write the data to file\n #\n if os.access(newfile,os.F_OK):\n print(\"{0} exists, aborting write!\".format(newfile))\n print(\"For reference, here's what would have been written:\")\n print(contents)\n else:\n with open(newfile,'w') as f:\n f.write(contents)\n self._contents = contents\n self.filename = newfile\n self._parse()\n return\n #\n #\n #\n def append(self,datatable):\n \"\"\"Appends data to an existing FTCL/yanny file.\n\n Tries as much as possible to preserve the ordering & format of the\n original file. The datatable should adhere to the format of the\n yanny object, but it is not necessary to reproduce the 'symbols'\n dictionary. It will not try to append data to a file that does not\n exist. If the append is successful, the data in the object will be updated.\n\n Parameters\n ----------\n datatable : dict\n The data to append.\n \"\"\"\n if len(self.filename) == 0:\n raise ValueError(\"No filename is set for this object. Use the filename attribute to set the filename!\")\n if type(datatable) != dict:\n raise ValueError(\"Data to append is not of the correct type. Use a dict!\")\n timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')\n contents = ''\n #\n # Print any key/value pairs\n #\n for key in datatable.keys():\n if key.upper() in self.tables() or key == 'symbols':\n continue\n contents += \"{0} {1}\\n\".format(key, datatable[key])\n #\n # Print out the data tables\n #\n for sym in self.tables():\n if sym.lower() in datatable:\n datasym = sym.lower()\n else:\n datasym = sym\n if datasym in datatable:\n columns = self.columns(sym)\n for k in range(len(datatable[datasym][columns[0]])):\n line = list()\n line.append(sym)\n for col in columns:\n if self.isarray(sym,col):\n datum = '{' + ' '.join([self.protect(x) for x in datatable[datasym][col][k]]) + '}'\n else:\n datum = self.protect(datatable[datasym][col][k])\n line.append(datum)\n contents += \"{0}\\n\".format(' '.join(line))\n #\n # Actually write the data to file\n #\n if len(contents) > 0:\n contents = (\"# Appended by yanny.py at {0}.\\n\".format(timestamp)) + contents\n if os.access(self.filename,os.W_OK):\n with open(self.filename,'a') as f:\n f.write(contents)\n self._contents += contents\n self._parse()\n else:\n print(\"{0} does not exist, aborting append!\".format(self.filename))\n print(\"For reference, here's what would have been written:\")\n print(contents)\n else:\n print(\"Nothing to be appended!\")\n return\n #\n #\n #\n def _parse(self):\n \"\"\"Converts text into tables that users can use.\n\n This method is for use internally by the yanny object. It is not\n meant to be called by users.\n\n Parsing proceeds in this order:\n\n #. Lines that end with a backslash character ``\\`` are reattached\n to following lines.\n #. Structure & enum definitions are identified, saved into the\n 'symbols' dictionary & stripped from the contents.\n #. Structure definitions are interpreted.\n #. At this point, the remaining lines of the original file can only\n contain these things:\n\n * 'blank' lines, including lines that only contain comments\n * keyword/value pairs\n * structure rows\n\n #. The remaining lines are scanned sequentially.\n\n #. 'Blank' lines are identified & ignored.\n #. Whitespace & comments are stripped from non-blank lines.\n #. Empty double braces ``{{}}`` are converted into empty double\n quotes ``\"\"``.\n #. If the first word on a line matches the name of a structure,\n the line is broken up into tokens & each token or set of tokens\n (for arrays) is converted to the appropriate Python type.\n #. If the first word on a line does not match the name of a\n structure, it must be a keyword, so this line is interpreted\n as a keyword/value pair. No further processing is done to\n the value.\n\n #. At the conclusion of parsing, if ``self.np`` is ``True``, the\n structures are converted into NumPy record arrays.\n \"\"\"\n #\n # there are five things we might find\n # 1. 'blank' lines including comments\n # 2. keyword/value pairs (which may have trailing comments)\n # 3. enumeration definitions\n # 4. structure definitions\n # 5. data\n #\n lines = self._contents\n #\n # Reattach lines ending with \\\n #\n lines = re.sub(r'\\\\\\s*\\n',' ',lines)\n #\n # Find structure & enumeration definitions & strip them out\n #\n self['symbols']['struct'] = re.findall(r'typedef\\s+struct\\s*\\{[^}]+\\}\\s*\\w+\\s*;',lines)\n self['symbols']['enum'] = re.findall(r'typedef\\s+enum\\s*\\{[^}]+\\}\\s*\\w+\\s*;',lines)\n lines = re.sub(r'typedef\\s+struct\\s*\\{[^}]+\\}\\s*\\w+\\s*;','',lines)\n lines = re.sub(r'typedef\\s+enum\\s*\\{[^}]+\\}\\s*\\w+\\s*;','',lines)\n #\n # Interpret the structure definitions\n #\n typedefre = re.compile(r'typedef\\s+struct\\s*\\{([^}]+)\\}\\s*(\\w*)\\s*;')\n for typedef in self['symbols']['struct']:\n typedefm = typedefre.search(typedef)\n (definition,name) = typedefm.groups()\n self[name.upper()] = collections.OrderedDict()\n self['symbols'][name.upper()] = list()\n definitions = re.findall(r'\\S+\\s+\\S+;',definition)\n for d in definitions:\n d = d.replace(';','')\n (datatype,column) = re.split(r'\\s+',d)\n column = re.sub(r'[[<].*[]>]$','',column)\n self['symbols'][name.upper()].append(column)\n self[name.upper()][column] = list()\n comments = re.compile(r'^\\s*#') # Remove lines containing only comments\n blanks = re.compile(r'^\\s*$') # Remove lines containing only whitespace\n #\n # Remove trailing comments, but not if they are enclosed in quotes.\n #\n #trailing_comments = re.compile(r'\\s*\\#.*$')\n #trailing_comments = re.compile(r'\\s*\\#[^\"]+$')\n double_braces = re.compile(r'\\{\\s*\\{\\s*\\}\\s*\\}') # Double empty braces get replaced with empty quotes\n if len(lines) > 0:\n for line in lines.split('\\n'):\n if self.debug:\n print(line)\n if len(line) == 0:\n continue\n if comments.search(line) is not None:\n continue\n if blanks.search(line) is not None:\n continue\n #\n # Remove leading & trailing blanks & comments\n #\n line = line.strip()\n line = self.trailing_comment(line)\n #line = trailing_comments.sub('',line)\n line = double_braces.sub('\"\"',line)\n #\n # Now if the first word on the line does not match a\n # structure definition it is a keyword/value pair\n #\n (key, value) = self.get_token(line)\n uckey = key.upper()\n if uckey in self['symbols'].keys():\n #\n # Structure data\n #\n for column in self['symbols'][uckey]:\n if len(value) > 0 and blanks.search(value) is None:\n (data,value) = self.get_token(value)\n if self.isarray(uckey,column):\n #\n # An array value\n # if it's character data, it won't be\n # delimited by {} unless it is a multidimensional\n # string array. It may or may not be delimited\n # by double quotes\n #\n # Note, we're assuming here that the only\n # multidimensional arrays are string arrays\n #\n arraydata = list()\n while len(data) > 0:\n (token, data) = self.get_token(data)\n arraydata.append(token)\n self[uckey][column].append(\n self.convert(uckey,column,arraydata))\n else:\n #\n # A single value\n #\n self[uckey][column].append(\n self.convert(uckey,column,data))\n else:\n break\n else:\n #\n # Keyword/value pair\n #\n self[key] = value\n #\n # If self.np is True, convert tables into NumPy record arrays\n #\n if self.np:\n for t in self.tables():\n record = numpy.zeros((self.size(t),),dtype=self.dtype(t))\n for c in self.columns(t):\n record[c] = self[t][c]\n self[t] = record\n return\n\n\ndef write_ndarray_to_yanny(filename, datatable, structname='mystruct',\n enums=dict(),hdr=dict(), overwrite=False):\n \"\"\"Converts a NumPy record array into a new FTCL/yanny file.\n\n Returns a new yanny object corresponding to the file.\n\n Parameters\n ----------\n filename : str\n The name of a parameter file.\n datatable : numpy.ndarray\n A NumPy record array containing data that can be copied into a\n yanny object.\n structname : str, optional\n The name to give the structure in the yanny file. Defaults to\n 'MYSTRUCT'.\n enums : dict, optional\n A dictionary containing enum information. See details above.\n hdr : dict, optional\n A dictionary containing keyword/value pairs for the 'header' of the\n yanny file.\n\n Returns\n -------\n par : yanny.yanny\n The yanny object resulting from writing the file.\n\n Examples\n --------\n \"\"\"\n\n par = yanny(filename, np=True, debug=False)\n\n par['symbols'] = par.dtype_to_struct(\n datatable.dtype, structname=structname, enums=enums)\n\n par[structname.upper()] = datatable\n\n for key in hdr:\n par[key] = hdr[key]\n \n if overwrite:\n os.remove(filename)\n\n par.write(filename)\n\n return par\n"
] | [
[
"numpy.dtype"
]
] |
ryanfox/NFLWin | [
"25967e7c11f7283289851912c5cc97a3a48394ab"
] | [
"nflwin/model.py"
] | [
"\"\"\"Tools for creating and running the model.\"\"\"\nfrom __future__ import print_function, division\n\nimport os\n\nimport numpy as np\nfrom scipy import integrate\nfrom scipy import stats\n\nimport joblib\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.metrics import brier_score_loss\nfrom sklearn.neighbors import KernelDensity\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.utils.validation import NotFittedError\n\nfrom . import preprocessing, utilities\n\nclass WPModel(object):\n \"\"\"The object that computes win probabilities.\n\n In addition to holding the model itself, it defines some columns names likely to be\n used in the model as parameters to allow other users to more easily figure out which\n columns go into the model.\n\n Parameters\n ----------\n copy_data : boolean (default=``True``)\n Whether or not to copy data when fitting and applying the model. Running the model\n in-place (``copy_data=False``) will be faster and have a smaller memory footprint,\n but if not done carefully can lead to data integrity issues.\n\n Attributes\n ----------\n model : A Scikit-learn pipeline (or equivalent)\n The actual model used to compute WP. Upon initialization it will be set to\n a default model, but can be overridden by the user.\n column_descriptions : dictionary\n A dictionary whose keys are the names of the columns used in the model, and the values are\n string descriptions of what the columns mean. Set at initialization to be the default model,\n if you create your own model you'll need to update this attribute manually.\n training_seasons : A list of ints, or ``None`` (default=``None``)\n If the model was trained using data downloaded from nfldb, a list of the seasons\n used to train the model. If nfldb was **not** used, an empty list. If no model\n has been trained yet, ``None``.\n training_season_types : A list of strings or ``None`` (default=``None``)\n Same as ``training_seasons``, except for the portions of the seasons used in training the\n model (\"Preseason\", \"Regular\", and/or \"Postseason\").\n validation_seasons : same as ``training_seasons``, but for validation data.\n validation_season_types : same as ``training_season_types``, but for validation data.\n sample_probabilities : A numpy array of floats or ``None`` (default=``None``)\n After the model has been validated, contains the sampled predicted probabilities used to\n compute the validation statistic.\n predicted_win_percents : A numpy array of floats or ``None`` (default=``None``)\n After the model has been validated, contains the actual probabilities in the test\n set at each probability in ``sample_probabilities``.\n num_plays_used : A numpy array of floats or ``None`` (default=``None``)\n After the model has been validated, contains the number of plays used to compute each\n element of ``predicted_win_percents``.\n model_directory : string\n The directory where all models will be saved to or loaded from.\n\n \"\"\"\n model_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"models\")\n _default_model_filename = \"default_model.nflwin\"\n\n def __init__(self,\n copy_data=True\n ):\n self.copy_data = copy_data\n\n self.model = self.create_default_pipeline()\n self._training_seasons = None\n self._training_season_types = None\n self._validation_seasons = None\n self._validation_season_types = None\n\n self._sample_probabilities = None\n self._predicted_win_percents = None\n self._num_plays_used = None\n\n\n @property\n def training_seasons(self):\n return self._training_seasons\n @property\n def training_seasons_types(self):\n return self._training_season_types\n @property\n def validation_seasons(self):\n return self._validation_seasons\n @property\n def validation_seasons_types(self):\n return self._validation_season_types\n\n @property\n def sample_probabilities(self):\n return self._sample_probabilities\n @property\n def predicted_win_percents(self):\n return self._predicted_win_percents\n @property\n def num_plays_used(self):\n return self._num_plays_used\n\n def train_model(self,\n source_data=\"nfldb\",\n training_seasons=(2009, 2010, 2011, 2012, 2013, 2014),\n training_season_types=(\"Regular\", \"Postseason\"),\n target_colname=\"offense_won\"):\n \"\"\"Train the model.\n\n Once a modeling pipeline is set up (either the default or something\n custom-generated), historical data needs to be fed into it in order to\n \"fit\" the model so that it can then be used to predict future results.\n This method implements a simple wrapper around the core Scikit-learn functionality\n which does this.\n\n The default is to use data from the nfldb database, however that can be changed\n to a simple Pandas DataFrame if desired (for instance if you wish to use data\n from another source).\n\n There is no particular output from this function, rather the parameters governing\n the fit of the model are saved inside the model object itself. If you want to get an\n estimate of the quality of the fit, use the ``validate_model`` method after running\n this method.\n\n Notes\n -----\n If you are loading in the default model, **there is no need to re-run this method**.\n In fact, doing so will likely result in weird errors and could corrupt the model if you\n were to try to save it back to disk.\n\n Parameters\n ----------\n source_data : the string ``\"nfldb\"`` or a Pandas DataFrame (default=``\"nfldb\"``)\n The data to be used to train the model. If ``\"nfldb\"``, will query the nfldb\n database for the training data (note that this requires a correctly configured\n installation of nfldb's database).\n training_seasons : list of ints (default=``[2009, 2010, 2011, 2012, 2013, 2014]``)\n What seasons to use to train the model if getting data from the nfldb database.\n If ``source_data`` is not ``\"nfldb\"``, this argument will be ignored.\n **NOTE:** it is critical not to use all possible data in order to train the\n model - some will need to be reserved for a final validation (see the\n ``validate_model`` method). A good dataset to reserve\n for validation is the most recent one or two NFL seasons.\n training_season_types : list of strings (default=``[\"Regular\", \"Postseason\"]``)\n If querying from the nfldb database, what parts of the seasons to use.\n Options are \"Preseason\", \"Regular\", and \"Postseason\". If ``source_data`` is not\n ``\"nfldb\"``, this argument will be ignored.\n target_colname : string or integer (default=``\"offense_won\"``)\n The name of the target variable column. \n\n Returns\n -------\n ``None``\n \"\"\"\n self._training_seasons = []\n self._training_season_types = []\n if isinstance(source_data, str):\n if source_data == \"nfldb\":\n source_data = utilities.get_nfldb_play_data(season_years=training_seasons,\n season_types=training_season_types)\n self._training_seasons = training_seasons\n self._training_season_types = training_season_types\n else:\n raise ValueError(\"WPModel: if source_data is a string, it must be 'nfldb'\")\n target_col = source_data[target_colname]\n feature_cols = source_data.drop(target_colname, axis=1)\n self.model.fit(feature_cols, target_col)\n\n def validate_model(self,\n source_data=\"nfldb\",\n validation_seasons=[2015],\n validation_season_types=[\"Regular\", \"Postseason\"],\n target_colname=\"offense_won\"):\n \"\"\"Validate the model.\n\n Once a modeling pipeline is trained, a different dataset must be fed into the trained model\n to validate the quality of the fit.\n This method implements a simple wrapper around the core Scikit-learn functionality\n which does this.\n\n The default is to use data from the nfldb database, however that can be changed\n to a simple Pandas DataFrame if desired (for instance if you wish to use data\n from another source).\n\n The output of this method is a p value which represents the confidence at which\n we can reject the null hypothesis that the model predicts the appropriate win\n probabilities. This number is computed by first smoothing the predicted win probabilities of both all test data and\n just the data where the offense won with a gaussian `kernel density\n estimate <http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KernelDensity.html#sklearn.neighbors.KernelDensity>`_\n with standard deviation = 0.01. Once the data is smooth, ratios at each percentage point from 1% to 99% are computed (i.e.\n what fraction of the time did the offense win when the model says they have a 1% chance of winning, 2% chance, etc.). Each of\n these ratios should be well approximated by the binomial distribution, since they are essentially independent (not perfectly\n but hopefully close enough) weighted coin flips, giving a p value. From there `Fisher's method <https://en.wikipedia.org/wiki/Fisher%27s_method>`_\n is used to combine the p values into a global p value. A p value close to zero means that the model is unlikely to be\n properly predicting the correct win probabilities. A p value close to one, **while not proof that the model is correct**,\n means that the model is at least not inconsistent with the hypothesis that it predicts good win probabilities.\n\n Parameters\n ----------\n source_data : the string ``\"nfldb\"`` or a Pandas DataFrame (default=``\"nfldb\"``)\n The data to be used to train the model. If ``\"nfldb\"``, will query the nfldb\n database for the training data (note that this requires a correctly configured\n installation of nfldb's database).\n training_seasons : list of ints (default=``[2015]``)\n What seasons to use to validate the model if getting data from the nfldb database.\n If ``source_data`` is not ``\"nfldb\"``, this argument will be ignored.\n **NOTE:** it is critical not to use the same data to validate the model as was used\n in the fit. Generally a good data set to use for validation is one from a time\n period more recent than was used to train the model. For instance, if the model was trained\n on data from 2009-2014, data from the 2015 season would be a sensible choice to validate the model.\n training_season_types : list of strings (default=``[\"Regular\", \"Postseason\"]``)\n If querying from the nfldb database, what parts of the seasons to use.\n Options are \"Preseason\", \"Regular\", and \"Postseason\". If ``source_data`` is not\n ``\"nfldb\"``, this argument will be ignored.\n target_colname : string or integer (default=``\"offense_won\"``)\n The name of the target variable column. \n\n Returns\n -------\n float, between 0 and 1\n The combined p value, where smaller values indicate that the model is not accurately predicting win\n probabilities.\n \n Raises\n ------\n NotFittedError\n If the model hasn't been fit.\n\n Notes\n -----\n Probabilities are computed between 1 and 99 percent because a single incorrect prediction at 100% or 0% automatically drives\n the global p value to zero. Since the model is being smoothed this situation can occur even when there are no model predictions\n at those extreme values, and therefore leads to erroneous p values.\n\n While it seems reasonable (to me at least), I am not totally certain that this approach is entirely correct.\n It's certainly sub-optimal in that you would ideally reject the null hypothesis that the model predictions\n **aren't** appropriate, but that seems to be a much harder problem (and one that would need much more test\n data to beat down the uncertainties involved). I'm also not sure if using Fisher's method is appropriate here,\n and I wonder if it might be necessary to Monte Carlo this. I would welcome input from others on better ways to do this.\n \n \"\"\"\n\n if self.training_seasons is None:\n raise NotFittedError(\"Must fit model before validating.\")\n \n self._validation_seasons = []\n self._validation_season_types = []\n if isinstance(source_data, str):\n if source_data == \"nfldb\":\n source_data = utilities.get_nfldb_play_data(season_years=validation_seasons,\n season_types=validation_season_types)\n self._validation_seasons = validation_seasons\n self._validation_season_types = validation_season_types\n else:\n raise ValueError(\"WPModel: if source_data is a string, it must be 'nfldb'\")\n \n target_col = source_data[target_colname]\n feature_cols = source_data.drop(target_colname, axis=1)\n predicted_probabilities = self.model.predict_proba(feature_cols)[:,1]\n\n self._sample_probabilities, self._predicted_win_percents, self._num_plays_used = (\n WPModel._compute_predicted_percentages(target_col.values, predicted_probabilities))\n\n #Compute the maximal deviation from a perfect prediction as well as the area under the\n #curve of the residual between |predicted - perfect|:\n max_deviation, residual_area = self._compute_prediction_statistics(self.sample_probabilities,\n self.predicted_win_percents)\n return max_deviation, residual_area\n \n #Compute p-values for each where null hypothesis is that distributions are same, then combine\n #them all to make sure data is not inconsistent with accurate predictions.\n # combined_pvalue = self._test_distribution(self.sample_probabilities,\n # self.predicted_win_percents,\n # self.num_plays_used)\n \n # return combined_pvalue\n\n @staticmethod\n def _compute_prediction_statistics(sample_probabilities, predicted_win_percents):\n \"\"\"Take the KDE'd model estimates, then compute statistics.\n\n Returns\n -------\n A tuple of (``max_deviation``, ``residual_area``), where ``max_deviation``\n is the largest discrepancy between the model and expectation at any WP,\n and ``residual_area`` is the total area under the curve of |predicted WP - expected WP|.\n \"\"\"\n abs_deviations = np.abs(predicted_win_percents - sample_probabilities)\n max_deviation = np.max(abs_deviations)\n residual_area = integrate.simps(abs_deviations,\n sample_probabilities)\n return (max_deviation, residual_area)\n \n\n def predict_wp(self, plays):\n \"\"\"Estimate the win probability for a set of plays.\n\n Basically a simple wrapper around ``WPModel.model.predict_proba``,\n takes in a DataFrame and then spits out an array of predicted\n win probabilities.\n\n Parameters\n ----------\n plays : Pandas DataFrame\n The input data to use to make the predictions.\n\n Returns\n -------\n Numpy array, of length ``len(plays)``\n Predicted probability that the offensive team in each play\n will go on to win the game.\n\n Raises\n ------\n NotFittedError\n If the model hasn't been fit.\n \"\"\"\n if self.training_seasons is None:\n raise NotFittedError(\"Must fit model before predicting WP.\")\n\n return self.model.predict_proba(plays)[:,1]\n\n\n def plot_validation(self, axis=None, **kwargs):\n \"\"\"Plot the validation data.\n\n Parameters\n ----------\n axis : matplotlib.pyplot.axis object or ``None`` (default=``None``)\n If provided, the validation line will be overlaid on ``axis``.\n Otherwise, a new figure and axis will be generated and plotted on.\n **kwargs\n Arguments to ``axis.plot``.\n\n Returns\n -------\n matplotlib.pylot.axis\n The axis the plot was made on.\n\n Raises\n ------\n NotFittedError\n If the model hasn't been fit **and** validated.\n \"\"\"\n\n if self.sample_probabilities is None:\n raise NotFittedError(\"Must validate model before plotting.\")\n \n import matplotlib.pyplot as plt\n if axis is None:\n axis = plt.figure().add_subplot(111)\n axis.plot([0, 100], [0, 100], ls=\"--\", lw=2, color=\"black\")\n axis.set_xlabel(\"Predicted WP\")\n axis.set_ylabel(\"Actual WP\")\n axis.plot(self.sample_probabilities,\n self.predicted_win_percents,\n **kwargs)\n\n return axis\n \n\n @staticmethod\n def _test_distribution(sample_probabilities, predicted_win_percents, num_plays_used):\n \"\"\"Based off assuming the data at each probability is a Bernoulli distribution.\"\"\"\n\n #Get the p-values:\n p_values = [stats.binom_test(np.round(predicted_win_percents[i] * num_plays_used[i]),\n np.round(num_plays_used[i]),\n p=sample_probabilities[i]) for i in range(len(sample_probabilities))]\n combined_p_value = stats.combine_pvalues(p_values)[1]\n return(combined_p_value)\n\n @staticmethod\n def _compute_predicted_percentages(actual_results, predicted_win_probabilities):\n \"\"\"Compute the sample percentages from a validation data set.\n \"\"\"\n kde_offense_won = KernelDensity(kernel='gaussian', bandwidth=0.01).fit(\n (predicted_win_probabilities[(actual_results == 1)])[:, np.newaxis])\n kde_total = KernelDensity(kernel='gaussian', bandwidth=0.01).fit(\n predicted_win_probabilities[:, np.newaxis])\n sample_probabilities = np.linspace(0.01, 0.99, 99)\n number_density_offense_won = np.exp(kde_offense_won.score_samples(sample_probabilities[:, np.newaxis])) * np.sum((actual_results))\n number_density_total = np.exp(kde_total.score_samples(sample_probabilities[:, np.newaxis])) * len(actual_results)\n number_offense_won = number_density_offense_won * np.sum(actual_results) / np.sum(number_density_offense_won)\n number_total = number_density_total * len(actual_results) / np.sum(number_density_total)\n predicted_win_percents = number_offense_won / number_total\n\n return 100.*sample_probabilities, 100.*predicted_win_percents, number_total\n \n def create_default_pipeline(self):\n \"\"\"Create the default win probability estimation pipeline.\n\n\n Returns\n -------\n Scikit-learn pipeline\n The default pipeline, suitable for computing win probabilities\n but by no means the best possible model.\n\n This can be run any time a new default pipeline is required,\n and either set to the ``model`` attribute or used independently.\n \"\"\"\n\n steps = []\n\n offense_team_colname = \"offense_team\"\n home_team_colname = \"home_team\"\n home_score_colname = \"curr_home_score\"\n away_score_colname = \"curr_away_score\"\n down_colname = \"down\"\n quarter_colname = \"quarter\"\n time_colname = \"seconds_elapsed\"\n yardline_colname = \"yardline\"\n yards_to_go_colname=\"yards_to_go\"\n\n self.column_descriptions = {\n offense_team_colname: \"Abbreviation for the offensive team\",\n home_team_colname: \"Abbreviation for the home team\",\n away_score_colname: \"Abbreviation for the visiting team\",\n down_colname: \"The current down\",\n yards_to_go_colname: \"Yards to a first down (or the endzone)\",\n quarter_colname: \"The quarter\",\n time_colname: \"Seconds elapsed in the quarter\",\n yardline_colname: (\"The yardline, given by (yards from own goalline - 50). \"\n \"-49 is your own 1 while 49 is the opponent's 1.\")\n }\n\n is_offense_home = preprocessing.ComputeIfOffenseIsHome(offense_team_colname,\n home_team_colname,\n copy=self.copy_data)\n steps.append((\"compute_offense_home\", is_offense_home))\n score_differential = preprocessing.CreateScoreDifferential(home_score_colname,\n away_score_colname,\n is_offense_home.offense_home_team_colname,\n copy=self.copy_data)\n steps.append((\"create_score_differential\", score_differential))\n steps.append((\"map_downs_to_int\", preprocessing.MapToInt(down_colname, copy=self.copy_data)))\n total_time_elapsed = preprocessing.ComputeElapsedTime(quarter_colname, time_colname, copy=self.copy_data)\n steps.append((\"compute_total_time_elapsed\", total_time_elapsed))\n steps.append((\"remove_unnecessary_columns\", preprocessing.CheckColumnNames(\n column_names=[is_offense_home.offense_home_team_colname,\n score_differential.score_differential_colname,\n total_time_elapsed.total_time_colname,\n yardline_colname,\n yards_to_go_colname,\n down_colname],\n copy=self.copy_data)))\n steps.append((\"encode_categorical_columns\", preprocessing.OneHotEncoderFromDataFrame(\n categorical_feature_names=[down_colname],\n copy=self.copy_data)))\n\n search_grid = {'base_estimator__penalty': ['l1', 'l2'],\n 'base_estimator__C': [0.01, 0.1, 1, 10, 100]\n }\n base_model = LogisticRegression()\n calibrated_model = CalibratedClassifierCV(base_model, cv=2, method=\"isotonic\")\n #grid_search_model = GridSearchCV(calibrated_model, search_grid,\n # scoring=self._brier_loss_scorer)\n steps.append((\"compute_model\", calibrated_model))\n\n pipe = Pipeline(steps)\n return pipe\n\n def save_model(self, filename=None):\n \"\"\"Save the WPModel instance to disk.\n\n All models are saved to the same place, with the installed\n NFLWin library (given by ``WPModel.model_directory``). \n\n Parameters\n ----------\n filename : string (default=None):\n The filename to use for the saved model. If this parameter\n is not specified, save to the default filename. Note that if a model\n already lists with this filename, it will be overwritten. Note also that\n this is a filename only, **not** a full path. If a full path is specified\n it is likely (albeit not guaranteed) to cause errors.\n\n Returns\n -------\n ``None``\n \"\"\"\n\n if filename is None:\n filename = self._default_model_filename\n joblib.dump(self, os.path.join(self.model_directory, filename))\n\n @classmethod\n def load_model(cls, filename=None):\n \"\"\"Load a saved WPModel.\n\n Parameters\n ----------\n Same as ``save_model``.\n\n Returns\n -------\n ``nflwin.WPModel`` instance.\n \"\"\"\n if filename is None:\n filename = cls._default_model_filename\n \n return joblib.load(os.path.join(cls.model_directory, filename))\n\n @staticmethod\n def _brier_loss_scorer(estimator, X, y):\n \"\"\"Use the Brier loss to estimate model score.\n\n For use in GridSearchCV, instead of accuracy.\n \"\"\"\n predicted_positive_probabilities = estimator.predict_proba(X)[:, 1]\n return 1. - brier_score_loss(y, predicted_positive_probabilities)\n"
] | [
[
"numpy.max",
"scipy.integrate.simps",
"sklearn.calibration.CalibratedClassifierCV",
"numpy.round",
"numpy.sum",
"sklearn.metrics.brier_score_loss",
"sklearn.utils.validation.NotFittedError",
"matplotlib.pyplot.figure",
"sklearn.linear_model.LogisticRegression",
"scipy.stats.combine_pvalues",
"numpy.abs",
"sklearn.neighbors.KernelDensity",
"numpy.linspace",
"sklearn.pipeline.Pipeline"
]
] |
goncaloperes/pymc3 | [
"59ce9af97f0b840f992322f33ec8dee203d7d1c0"
] | [
"pymc3/distributions/bart.py"
] | [
"# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom aesara.tensor.random.op import RandomVariable, default_shape_from_params\n\nfrom pymc3.distributions.distribution import NoDistribution\n\n__all__ = [\"BART\"]\n\n\nclass BARTRV(RandomVariable):\n \"\"\"\n Base class for BART\n \"\"\"\n\n name = \"BART\"\n ndim_supp = 1\n ndims_params = [2, 1, 0, 0, 0, 1]\n dtype = \"floatX\"\n _print_name = (\"BART\", \"\\\\operatorname{BART}\")\n all_trees = None\n\n def _shape_from_params(self, dist_params, rep_param_idx=1, param_shapes=None):\n return default_shape_from_params(self.ndim_supp, dist_params, rep_param_idx, param_shapes)\n\n @classmethod\n def rng_fn(cls, rng=np.random.default_rng(), *args, **kwargs):\n size = kwargs.pop(\"size\", None)\n X_new = kwargs.pop(\"X_new\", None)\n all_trees = cls.all_trees\n if all_trees:\n\n if size is None:\n size = ()\n elif isinstance(size, int):\n size = [size]\n\n flatten_size = 1\n for s in size:\n flatten_size *= s\n\n idx = rng.randint(len(all_trees), size=flatten_size)\n\n if X_new is None:\n pred = np.zeros((flatten_size, all_trees[0][0].num_observations))\n for ind, p in enumerate(pred):\n for tree in all_trees[idx[ind]]:\n p += tree.predict_output()\n else:\n pred = np.zeros((flatten_size, X_new.shape[0]))\n for ind, p in enumerate(pred):\n for tree in all_trees[idx[ind]]:\n p += np.array([tree.predict_out_of_sample(x) for x in X_new])\n return pred.reshape((*size, -1))\n else:\n return np.full_like(cls.Y, cls.Y.mean())\n\n\nbart = BARTRV()\n\n\nclass BART(NoDistribution):\n \"\"\"\n Bayesian Additive Regression Tree distribution.\n\n Distribution representing a sum over trees\n\n Parameters\n ----------\n X : array-like\n The covariate matrix.\n Y : array-like\n The response vector.\n m : int\n Number of trees\n alpha : float\n Control the prior probability over the depth of the trees. Even when it can takes values in\n the interval (0, 1), it is recommended to be in the interval (0, 0.5].\n k : float\n Scale parameter for the values of the leaf nodes. Defaults to 2. Recomended to be between 1\n and 3.\n split_prior : array-like\n Each element of split_prior should be in the [0, 1] interval and the elements should sum to\n 1. Otherwise they will be normalized.\n Defaults to None, i.e. all covariates have the same prior probability to be selected.\n \"\"\"\n\n def __new__(\n cls,\n name,\n X,\n Y,\n m=50,\n alpha=0.25,\n k=2,\n split_prior=None,\n **kwargs,\n ):\n\n cls.all_trees = []\n\n bart_op = type(\n f\"BART_{name}\",\n (BARTRV,),\n dict(\n name=\"BART\",\n all_trees=cls.all_trees,\n inplace=False,\n initval=Y.mean(),\n X=X,\n Y=Y,\n m=m,\n alpha=alpha,\n k=k,\n split_prior=split_prior,\n ),\n )()\n\n NoDistribution.register(BARTRV)\n\n cls.rv_op = bart_op\n params = [X, Y, m, alpha, k]\n return super().__new__(cls, name, *params, **kwargs)\n\n @classmethod\n def dist(cls, *params, **kwargs):\n return super().dist(params, **kwargs)\n"
] | [
[
"numpy.random.default_rng",
"numpy.zeros"
]
] |
ybhan/Machine-Learning-Projects | [
"80301a774247bb6ad11cccaeef54e9ec588a61b0"
] | [
"Identification-of-Paintings-and-Style-Transfer/Identification/svm.py"
] | [
"from feature import *\r\nimport util\r\nimport numpy as np\r\n\r\nX, y, X_test, Xname, Xname_test = util.load_image()\r\ny = np.array(y)\r\nTheta = [0, np.pi / 6, np.pi / 3, np.pi / 2, 2 * np.pi / 3, 5 * np.pi / 6]\r\nSigma = [2, 3, 4, 5]\r\n\r\nX = util.resize(X, 11)\r\nX_test = util.resize(X_test, 11)\r\n\r\nA, B = feature_gabor_list(X, Theta, Sigma)\r\nT = gen_energy_array(A, B)\r\n\r\nA_test, B_test = feature_gabor_list(X_test, Theta, Sigma)\r\nT_test = gen_energy_array(A_test, B_test)\r\n\r\ns = np.std(T, 0)\r\nT1 = T / s\r\nT_test1 = T_test / s\r\n\r\n\r\ndef svm(T, y, T_test):\r\n from sklearn.svm import SVC\r\n clf = SVC()\r\n clf.fit(T, y)\r\n result = clf.predict(T_test)\r\n return result\r\n\r\n\r\ndef cross_validation(T, y):\r\n \"\"\"Use Cross Validation (leave-one-out) to select features.\r\n Args:\r\n T: feature statistics list\r\n y: labels\r\n \"\"\"\r\n from sklearn.model_selection import LeaveOneOut\r\n y = np.array(y)\r\n judge = list()\r\n\r\n for train_index, valid_index in LeaveOneOut().split(T):\r\n T_train = T[train_index]\r\n T_valid = T[valid_index]\r\n y_train = y[train_index]\r\n y_valid = y[valid_index]\r\n\r\n s = np.std(T_train, 0)\r\n T_train = T_train / s\r\n T_valid = T_valid / s\r\n ans = svm(T_train, y_train, T_valid)\r\n ans = ans.reshape(1, -1)[0]\r\n print(ans)\r\n\r\n if abs(ans[0] - y_valid[0]) < 0.5:\r\n judge.append(1)\r\n else:\r\n judge.append(0)\r\n return np.array(judge)\r\n"
] | [
[
"numpy.std",
"numpy.array",
"sklearn.model_selection.LeaveOneOut",
"sklearn.svm.SVC"
]
] |
jorgecarleitao/connector-x | [
"8c37965b50190fa1addd52e8bdb71669b83f920b"
] | [
"connectorx-python/connectorx/tests/test_redshift.py"
] | [
"import os\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom pandas.testing import assert_frame_equal\n\nfrom .. import read_sql\n\n\[email protected](scope=\"module\") # type: ignore\ndef redshift_url() -> str:\n conn = os.environ[\"REDSHIFT_URL\"]\n return conn\n\[email protected](os.environ.get(\"TEST_COVER\", \"main\") != \"all\", reason=\"Only test main wire protocols unless TEST_COVER=all\")\ndef test_redshift_without_partition(redshift_url: str) -> None:\n query = \"SELECT * FROM test_table\"\n df = read_sql(redshift_url, query, protocol=\"cursor\")\n # result from redshift might have different order each time\n df.sort_values(by=\"test_int\", inplace=True, ignore_index=True)\n expected = pd.DataFrame(\n index=range(6),\n data={\n \"test_int\": pd.Series([0, 1, 2, 3, 4, 1314], dtype=\"Int64\"),\n \"test_nullint\": pd.Series([5, 3, None, 7, 9, 2], dtype=\"Int64\"),\n \"test_str\": pd.Series(\n [\"a\", \"str1\", \"str2\", \"b\", \"c\", None], dtype=\"object\"\n ),\n \"test_float\": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype=\"float64\"),\n \"test_bool\": pd.Series(\n [None, True, False, False, None, True], dtype=\"boolean\"\n ),\n },\n )\n assert_frame_equal(df, expected, check_names=True)\n\n\[email protected](os.environ.get(\"TEST_COVER\", \"main\") != \"all\", reason=\"Only test main wire protocols unless TEST_COVER=all\")\ndef test_redshift_with_partition(redshift_url: str) -> None:\n query = \"SELECT * FROM test_table\"\n df = read_sql(\n redshift_url,\n query,\n partition_on=\"test_int\",\n partition_range=(0, 2000),\n partition_num=3,\n protocol=\"cursor\"\n )\n # result from redshift might have different order each time\n df.sort_values(by=\"test_int\", inplace=True, ignore_index=True)\n expected = pd.DataFrame(\n index=range(6),\n data={\n \"test_int\": pd.Series([0, 1, 2, 3, 4, 1314], dtype=\"Int64\"),\n \"test_nullint\": pd.Series([5, 3, None, 7, 9, 2], dtype=\"Int64\"),\n \"test_str\": pd.Series(\n [\"a\", \"str1\", \"str2\", \"b\", \"c\", None], dtype=\"object\"\n ),\n \"test_float\": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype=\"float64\"),\n \"test_bool\": pd.Series(\n [None, True, False, False, None, True], dtype=\"boolean\"\n ),\n },\n )\n assert_frame_equal(df, expected, check_names=True)\n\n\[email protected](os.environ.get(\"TEST_COVER\", \"main\") != \"all\", reason=\"Only test main wire protocols unless TEST_COVER=all\")\ndef test_redshift_types(redshift_url: str) -> None:\n query = \"SELECT test_int16, test_char, test_time, test_datetime FROM test_types\"\n df = read_sql(redshift_url, query, protocol=\"cursor\")\n # result from redshift might have different order each time\n df.sort_values(by=\"test_int16\", inplace=True, ignore_index=True)\n expected = pd.DataFrame(\n index=range(4),\n data={\n \"test_int16\": pd.Series([0, 1, 2, 3], dtype=\"Int64\"),\n \"test_char\": pd.Series([\"a\", \"b\", \"c\", \"d\"], dtype=\"object\"),\n \"test_time\": pd.Series(\n [\"08:12:40\", \"10:03:00\", \"23:00:10\", \"18:30:00\"], dtype=\"object\"\n ),\n \"test_datetime\": pd.Series(\n [\n np.datetime64(\"2007-01-01T10:00:19\"),\n np.datetime64(\"2005-01-01T22:03:00\"),\n None,\n np.datetime64(\"1987-01-01T11:00:00\"),\n ], dtype=\"datetime64[ns]\"\n ),\n\n },\n )\n assert_frame_equal(df, expected, check_names=True)\n\n\[email protected](os.environ.get(\"TEST_COVER\", \"main\") != \"all\", reason=\"Only test main wire protocols unless TEST_COVER=all\")\ndef test_read_sql_on_utf8(redshift_url: str) -> None:\n query = \"SELECT * FROM test_str\"\n df = read_sql(redshift_url, query, protocol=\"cursor\")\n # result from redshift might have different order each time\n df.sort_values(by=\"id\", inplace=True, ignore_index=True)\n expected = pd.DataFrame(\n index=range(8),\n data={\n \"id\": pd.Series([0, 1, 2, 3, 4, 5, 6, 7], dtype=\"Int64\"),\n \"test_language\": pd.Series(\n [\n \"English\",\n \"中文\",\n \"日本語\",\n \"русский\",\n \"Emoji\",\n \"Latin1\",\n \"Extra\",\n \"Mixed\",\n ],\n dtype=\"object\",\n ),\n \"test_hello\": pd.Series(\n [\n \"Hello\",\n \"你好\",\n \"こんにちは\",\n \"Здра́вствуйте\",\n \"😁😂😜\",\n \"¥§¤®ð\",\n \"y̆\",\n \"Ha好ち😁ðy̆\",\n ],\n dtype=\"object\",\n ),\n },\n )\n assert_frame_equal(df, expected, check_names=True)\n"
] | [
[
"pandas.testing.assert_frame_equal",
"numpy.datetime64",
"pandas.Series"
]
] |
ettoremessina/differential-equations | [
"b0f74aa177e090ef654574e11af5e54e2c7b1472"
] | [
"ODEs/solver-demos/python/NeuroDiffEq/ode_2nd_ord_ivp_01.py"
] | [
"\"\"\"\nPlease see\nhttps://computationalmindset.com/en/neural-networks/ordinary-differential-equation-solvers.html#sys1\nfor details\n\"\"\"\n\n# x'' + x' + 2x = 0\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\n\nfrom neurodiffeq import diff\nfrom neurodiffeq.ode import solve\nfrom neurodiffeq.ode import IVP\nfrom neurodiffeq.ode import Monitor\nimport neurodiffeq.networks as ndenw\n\node_fn = lambda x, t: diff(x, t, order=2) + diff(x, t, order=1) + 2. * x\n\nan_sol = lambda t : \\\n\tnp.exp(-t/2.) * (np.cos(np.sqrt(7) * t / 2.) + \\\n\tnp.sin(np.sqrt(7) * t / 2.)/np.sqrt(7.))\n\nt_begin=0.\nt_end=12.\nt_nsamples=100\nt_space = np.linspace(t_begin, t_end, t_nsamples)\nx_init = IVP(t_0=t_begin, x_0=1.0, x_0_prime=0.0)\n\nx_an_sol = an_sol(t_space)\n\nnet = ndenw.FCNN(n_hidden_layers=6, n_hidden_units=50, actv=torch.nn.Tanh)\noptimizer = torch.optim.Adam(net.parameters(), lr=0.002)\nnum_sol, loss_sol = solve(ode_fn, x_init, t_min=t_begin, t_max=t_end,\n\tbatch_size=200,\n\tmax_epochs=500,\n\treturn_best=True,\n\tnet=net,\n\toptimizer=optimizer,\n\tmonitor=Monitor(t_min=t_begin, t_max=t_end, check_every=10))\nx_num_sol = num_sol(t_space, as_type='np')\n\nplt.figure()\nplt.plot(t_space, x_an_sol, '--', linewidth=2, label='analytical')\nplt.plot(t_space, x_num_sol, linewidth=1, label='numerical')\nplt.title('ODE 2nd order IVP solved by NeuroDiffEq by FCNN')\nplt.xlabel('t')\nplt.ylabel('x')\nplt.legend()\nplt.show()\n\n"
] | [
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"numpy.exp",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"numpy.sqrt",
"matplotlib.pyplot.show",
"numpy.linspace"
]
] |
apahl/rdkit_ipynb_tools | [
"c259ac8ee75709becd2a5e67f9a913bd20e0ae38"
] | [
"rdkit_ipynb_tools/bokeh_tools.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n###########\nBokeh Tools\n###########\n\n*Created on 2015-12-12 by A. Pahl*\n\nBokeh plotting functionality for Mol_lists.\n\"\"\"\n\nimport colorsys\nimport math\n\nimport numpy as np\n\n# from bkcharts import Bar\nfrom bokeh.plotting import figure, ColumnDataSource\nimport bokeh.io as io\nfrom bokeh.models import HoverTool, OpenURL, TapTool\n\nAVAIL_COLORS = [\"#1F77B4\", \"firebrick\", \"goldenrod\", \"aqua\", \"brown\", \"chartreuse\", \"darkmagenta\"\n \"aquamarine\", \"blue\", \"red\", \"blueviolet\", \"darkorange\", \"forestgreen\", \"lime\"]\n# AVAIL_MARKERS: circle, diamond, triangle, square, inverted_triangle, asterisk,\n# circle_cross, circle_x, cross, diamond_cross, square_cross, square_x, asterisk, diamond\n\nio.output_notebook()\n\n\nclass ColorScale():\n\n def __init__(self, num_values, val_min, val_max, middle_color=\"yellow\", reverse=False):\n self.num_values = num_values\n self.num_val_1 = num_values - 1\n self.value_min = val_min\n self.value_max = val_max\n self.reverse = reverse\n self.value_range = self.value_max - self.value_min\n self.color_scale = []\n if middle_color.startswith(\"y\"): # middle color yellow\n hsv_tuples = [(0.0 + ((x * 0.35) / (self.num_val_1)), 0.99, 0.9) for x in range(self.num_values)]\n self.reverse = not self.reverse\n else: # middle color blue\n hsv_tuples = [(0.35 + ((x * 0.65) / (self.num_val_1)), 0.9, 0.9) for x in range(self.num_values)]\n rgb_tuples = map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)\n for rgb in rgb_tuples:\n rgb_int = [int(255 * x) for x in rgb]\n self.color_scale.append('#{:02x}{:02x}{:02x}'.format(*rgb_int))\n\n if self.reverse:\n self.color_scale.reverse()\n\n def __call__(self, value):\n \"\"\"return the color from the scale corresponding to the place in the value_min .. value_max range\"\"\"\n pos = int(((value - self.value_min) / self.value_range) * self.num_val_1)\n\n return self.color_scale[pos]\n\n\n def legend(self):\n \"\"\"Return the value_range and a list of tuples (value, color) to be used in a legend.\"\"\"\n legend = []\n for idx, color in enumerate(self.color_scale):\n val = self.value_min + idx / self.num_val_1 * self.value_range\n legend.append((val, color))\n\n return legend\n\n\nclass Chart():\n \"\"\"A Bokeh Plot.\"\"\"\n\n def __init__(self, kind=\"scatter\", **kwargs):\n \"\"\"Useful Chart kwargs:\n\n Parameters:\n xlabel (str): override the automatic x_axis_label. Default is None.\n ylabel (str): override the automatic y_axis_label. Default is None.\n callback (str): clicking on a point will link to the given HTML address. `@<IdProperty>` can be used as placeholder for the point id (e.g. Compound_Id). Default is None.\"\"\"\n\n self.data = {}\n self.kwargs = kwargs\n self.kind = kind\n self.height = kwargs.get(\"height\", 450)\n self.title = kwargs.get(\"title\", \"Scatter Plot\")\n self.position = kwargs.get(\"position\", kwargs.get(\"pos\", \"top_left\"))\n\n self.series_counter = 0\n self.tools_added = False\n tools = [\"pan\", \"wheel_zoom\", \"box_zoom\", \"reset\", \"resize\", \"save\"]\n self.callback = kwargs.get(\"callback\", None)\n if self.callback is not None:\n tools.append(\"tap\")\n\n self.plot = figure(plot_height=self.height, title=self.title, tools=tools)\n self.plot.axis.axis_label_text_font_size = \"14pt\"\n self.plot.axis.major_label_text_font_size = \"14pt\"\n self.plot.title.text_font_size = \"18pt\"\n if self.callback is not None:\n taptool = self.plot.select(type=TapTool)\n taptool.callback = OpenURL(url=self.callback)\n\n\n def _add_series(self, x, y, series, size, source):\n color = self.add_data_kwargs.get(\"color\", AVAIL_COLORS[self.series_counter])\n\n if self.series_counter == 0:\n self.plot_type = self.plot.circle\n\n elif self.series_counter == 1:\n self.plot_type = self.plot.diamond\n if isinstance(size, int):\n size += 3 # diamonds appear smaller than circles of the same size\n elif self.series_counter == 2:\n self.plot_type = self.plot.triangle\n elif self.series_counter == 4:\n self.plot_type = self.plot.inverted_triangle\n elif self.series_counter == 5:\n self.plot_type = self.plot.asterisk\n elif self.series_counter == 6:\n self.plot_type = self.plot.circle_cross\n elif self.series_counter == 7:\n self.plot_type = self.plot.circle_x\n elif self.series_counter == 8:\n self.plot_type = self.plot.cross\n elif self.series_counter == 9:\n self.plot_type = self.plot.diamond_cross\n elif self.series_counter == 10:\n self.plot_type = self.plot.square_cross\n elif self.series_counter == 11:\n self.plot_type = self.plot.square_x\n else:\n self.plot_type = self.plot.asterisk\n\n self.plot_type(x, y, legend=series, size=size, color=color, source=source)\n if self.add_data_kwargs.get(\"line\", False):\n self.plot.line(x, y, legend=series, color=color,\n line_width=self.add_data_kwargs.get(\"width\", 3), source=source)\n\n self.series_counter += 1\n if self.series_counter >= len(AVAIL_COLORS):\n print(\"* series overflow, starting again.\")\n self.series_counter = 0\n\n\n\n def add_data(self, d, x, y, **kwargs):\n \"\"\"Added line option. This does not work with the color_by option.\n\n Parameters:\n color, color_by, series, series_by, size, size_by;\n line (bool): whether to plot a line or not. Default is False.\n width (int): line width when line is plotted. Default is 3.\"\"\"\n\n colors = \"#1F77B4\"\n self.add_data_kwargs = kwargs\n series = kwargs.get(\"series\", None)\n if series is not None:\n series_by = \"Series\"\n else:\n series_by = kwargs.get(\"series_by\", None)\n\n color_by = kwargs.get(\"color_by\", None)\n size_by = kwargs.get(\"size_by\", None)\n pid = kwargs.get(\"pid\", None)\n\n tooltip = get_tooltip(x, y,\n pid,\n series,\n series_by,\n color_by,\n size_by,\n kwargs.get(\"tooltip\", None))\n\n if self.series_counter == 0:\n self.plot.add_tools(tooltip)\n\n self.plot.xaxis.axis_label = self.kwargs.get(\"xlabel\", x)\n self.plot.yaxis.axis_label = self.kwargs.get(\"ylabel\", y)\n\n if size_by is not None:\n size = \"{}_sizes\".format(size_by)\n d[size] = get_sizes_from_values(d[size_by])\n else:\n size = kwargs.get(\"radius\", kwargs.get(\"r\", kwargs.get(\"size\", kwargs.get(\"s\", 10))))\n\n reverse = kwargs.get(\"invert\", False)\n\n if series:\n d[\"x\"] = d[x]\n d[\"y\"] = d[y]\n d[\"series\"] = [series] * len(d[x])\n\n self._add_series(x, y, series, size=size, source=ColumnDataSource(d))\n\n elif series_by:\n series_keys = set()\n for idx, item in enumerate(d[series_by]):\n if item is None:\n d[series_by][idx] = \"None\"\n elif item is np.nan:\n d[series_by][idx] = \"NaN\"\n\n series_keys.add(d[series_by][idx])\n\n for series in series_keys:\n d_series = {x: [], y: [], \"series\": []}\n if size_by is not None:\n d_series[size_by] = []\n d_series[size] = []\n if pid is not None:\n d_series[pid] = []\n d_series[\"mol\"] = []\n for idx, el in enumerate(d[x]):\n if d[series_by][idx] == series:\n d_series[x].append(d[x][idx])\n d_series[y].append(d[y][idx])\n d_series[\"series\"].append(d[series_by][idx])\n if size_by is not None:\n d_series[size_by].append(d[size_by][idx])\n d_series[size].append(d[size][idx])\n if pid is not None:\n d_series[pid].append(d[pid][idx])\n d_series[\"mol\"].append(d[\"mol\"][idx])\n\n\n d_series[\"x\"] = d_series[x]\n d_series[\"y\"] = d_series[y]\n\n self._add_series(x, y, series, size=size, source=ColumnDataSource(d_series))\n\n\n elif color_by:\n color_by_min = min(d[color_by])\n color_by_max = max(d[color_by])\n color_scale = ColorScale(20, color_by_min, color_by_max, reverse)\n colors = []\n for val in d[color_by]:\n if val is not None and val is not np.nan:\n colors.append(color_scale(val))\n else:\n colors.append(\"black\")\n\n d[\"colors\"] = colors\n d[\"x\"] = d[x]\n d[\"y\"] = d[y]\n self.plot.circle(x, y, size=size, color=colors, source=ColumnDataSource(d))\n\n else:\n d[\"x\"] = d[x]\n d[\"y\"] = d[y]\n self.plot.circle(x, y, size=size, source=ColumnDataSource(d))\n if self.add_data_kwargs.get(\"line\", False):\n self.plot.line(x, y, line_width=self.add_data_kwargs.get(\"width\", 3),\n source=ColumnDataSource(d))\n\n\n def show(self):\n self.plot.legend.location = self.position\n io.show(self.plot)\n\n\nclass Hist():\n \"\"\"A Bokeh histogram. from numpy histogram and a Bokeh quad plot.\n The highlevel Bokeh Chart Histogram class gave false results on the y axis for me (as of 9-Mar-2016).\"\"\"\n\n def __init__(self, title=\"Histogram\", xlabel=\"Values\", ylabel=\"Occurrence\", **kwargs):\n \"\"\"Generates a histogram.\n Possible useful additional kwargs include: plot_width, plot_height, y_axis_type=\"log\",\n tick_size=\"14pt\".\"\"\"\n\n self.colors = [\"#FF596A\", \"#0066FF\", \"#00CC88\", \"#FFDD00\"]\n self.plot_no = -1\n self.kwargs = kwargs\n self.pos = \"top_left\"\n tick_size = self.kwargs.pop(\"tick_size\", \"14pt\")\n\n for arg in [\"pos\", \"position\"]:\n if arg in self.kwargs:\n self.pos = self.kwargs[arg]\n self.kwargs.pop(arg)\n\n self.plot = figure(title=title, **kwargs)\n self.plot.xaxis.axis_label = xlabel\n self.plot.yaxis.axis_label = ylabel\n self.plot.axis.major_label_text_font_size = tick_size\n self.plot.axis.axis_label_text_font_size = \"14pt\"\n self.plot.axis.major_label_text_font_size = \"14pt\"\n self.plot.title.text_font_size = \"18pt\"\n\n\n\n def add_data(self, data, bins=10, series=None, color=None, normed=False, **kwargs):\n \"\"\"Add actual data to the plot.\"\"\"\n # manage colors\n self.plot_no += 1\n if self.plot_no > len(self.colors) - 1:\n self.plot_no = 0\n if color is None:\n color = self.colors[self.plot_no]\n\n data = remove_nan(data)\n hist, edges = np.histogram(data, bins=bins)\n if normed:\n hist = normalize_largest_bin_to_one(hist)\n self.source = ColumnDataSource(data=dict(top=hist, left=edges[:-1], right=edges[1:]))\n\n if series is not None:\n self.plot.quad(top=\"top\", bottom=0, left=\"left\", right=\"right\",\n color=color, line_color=\"black\", alpha=0.5, legend=series,\n source=self.source)\n\n else:\n self.plot.quad(top=\"top\", bottom=0, left=\"left\", right=\"right\", color=color, line_color=\"black\", alpha=0.8, source=self.source)\n\n\n def show(self):\n self.plot.legend.location = self.pos\n io.show(self.plot)\n\n\n# def bar_chart(d, x, show=True, **kwargs):\n# \"\"\"Displays a bar chart for the occurrence of the given x-value.\n# This plot type is especially useful for plotting the occurrence of categorical data,\n# where only a small number (<= 10) of different values are present.\n# This function is directly calling the advanced bokeh bar chart type,\n# therefore no additional class is used.\n# Useful kwargs include: title, plot_height, plot_width.\"\"\"\n# title = kwargs.pop(\"title\", \"Occurrence of {}\".format(x))\n# p = Bar(d, x, values=x, agg=\"count\", legend=False, title=title, **kwargs)\n# p.yaxis.axis_label = \"Occurrence\"\n# p.axis.axis_label_text_font_size = \"14pt\"\n# p.axis.major_label_text_font_size = \"14pt\"\n# p.title.text_font_size = \"18pt\"\n# if show:\n# io.show(p)\n# else:\n# return p\n\n\ndef get_tooltip(x, y, pid=None, series=None, series_by=None, color_by=None, size_by=None, tooltip=None):\n if pid is not None:\n pid_tag = '<span style=\"font-size: 13px; color: #000000;\">{pid}: @{pid}</span><br>'.format(pid=pid)\n else:\n pid_tag = \"\"\n\n if size_by is not None:\n size_tag = '<span style=\"font-size: 13px; color: #000000;\">{size_by}: @{size_by} </span><br>'.format(size_by=size_by)\n else:\n size_tag = \"\"\n\n if series_by:\n series_tag = '<span style=\"font-size: 13px; color: #000000;\"><b>{series_by}: @series</b> </span><br>'.format(series_by=series_by)\n color_tag = \"\"\n elif color_by:\n series_tag = \"\"\n color_tag = '<span style=\"font-size: 13px; color: #000000;\">{color_by}: @{color_by} </span><span style=\"font-size: 14px; color: @colors;\">⚫</span>'.format(color_by=color_by)\n else:\n color_tag = \"\"\n series_tag = \"\"\n\n if tooltip == \"struct\":\n templ = HoverTool(\n tooltips=\"\"\"\n <div>\n <div style=\"width: 200px; height: 200px;\">\n <img\n src=\"data:image/png;base64,@mol\"\n\n border=\"2\" alt=\"Mol\"\n ></img>\n </div>\n <div>\n {series_tag}{pid_tag}\n <span style=\"font-size: 13px; color: #000000;\">{x}: @x<br>\n {y}: @y</span><br>{color_tag}{size_tag}\n </div>\n </div>\n \"\"\".format(pid_tag=pid_tag, series_tag=series_tag, color_tag=color_tag, size_tag=size_tag, x=x, y=y)\n )\n else:\n templ = HoverTool(\n tooltips=\"\"\"\n <div>\n {series_tag}{pid_tag}\n <span style=\"font-size: 13px; color: #000000;\">{x}: @x<br>\n {y}: @y</span><br>{color_tag}{size_tag}\n </div>\n \"\"\".format(pid_tag=pid_tag, series_tag=series_tag, color_tag=color_tag, size_tag=size_tag, x=x, y=y)\n )\n # templ = HoverTool(tooltips=[(x, \"@x\"), (y, \"@y\")])\n\n return templ\n\n\ndef remove_nan(l):\n \"\"\"Remove Nans from a list for histograms.\"\"\"\n return [x for x in l if x is not np.nan]\n\n\ndef guess_id_prop(prop_list): # try to guess an id_prop\n for prop in prop_list:\n if prop.lower().endswith(\"id\"):\n return prop\n return None\n\n\ndef normalize_largest_bin_to_one(hist):\n \"\"\"Takes a Numpy histogram list and normalizes all values, so that the highest value becomes 1.0.\n Returns a new list.\"\"\"\n max_bin = max(hist)\n norm_hist = [b / max_bin for b in hist]\n return norm_hist\n\n\ndef get_bin_centers(edges):\n \"\"\"Returns a list of bin centers from a list of np.histogram edges.\n The returned centers are one element shorter than the provided edges list.\"\"\"\n l = len(edges)\n centers = []\n for idx in range(l - 1):\n center = (edges[idx] + edges[idx + 1]) / 2\n center = float(\"{:.3f}\".format(center)) # limit to three decimals\n centers.append(center)\n\n return centers\n\n\ndef get_sizes_from_values(values, min_size=10, max_size=60, log_scale=True):\n max_val = max(values)\n mult = max_size - min_size\n\n if log_scale:\n min_val = min(values) - 1\n norm = math.log10(max_val - min_val)\n sizes = [min_size + mult * math.log10(x - min_val) / norm for x in values]\n\n else:\n min_val = min(values)\n norm = max_val - min_val\n sizes = [min_size + mult * (x - min_val) / norm for x in values]\n\n return sizes\n\n\ndef cpd_scatter(df, x, y, r=7, pid=None, **kwargs):\n \"\"\"Predefined Plot #1.\n Quickly plot an RDKit Pandas dataframe or a molecule dictionary with structure tooltips.\"\"\"\n\n if not pid:\n if isinstance(df, dict):\n prop_list = df.keys()\n else:\n prop_list = [df.index.name]\n prop_list.extend(df.columns.values)\n\n pid = guess_id_prop(prop_list)\n\n callback = kwargs.pop(\"callback\", None)\n title = kwargs.pop(\"title\", \"Compound Scatter Plot\")\n scatter = Chart(title=title, r=r, callback=callback)\n scatter.add_data(df, x, y, pid=pid, **kwargs)\n return scatter.show()\n"
] | [
[
"numpy.histogram"
]
] |
danshirron/inference | [
"31ae9b30ca5b1081a2d35f73ffcde10ae1fdaf41"
] | [
"cloud/image_classification/python/main.py"
] | [
"\"\"\"\nmlperf inference benchmarking tool\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport json\nimport logging\nimport os\nimport threading\nimport time\nfrom queue import Queue\n\nimport mlperf_loadgen as lg\nimport numpy as np\n\nimport dataset\nimport imagenet\nimport coco\n\nlogging.basicConfig(level=logging.INFO)\nlog = logging.getLogger(\"main\")\n\nNANO_SEC = 1e9\nMILLI_SEC = 1000\n\n# pylint: disable=missing-docstring\n\n# the datasets we support\nSUPPORTED_DATASETS = {\n \"imagenet\":\n (imagenet.Imagenet, dataset.pre_process_vgg, dataset.PostProcessCommon(offset=-1),\n {\"image_size\": [224, 224, 3]}),\n \"imagenet_mobilenet\":\n (imagenet.Imagenet, dataset.pre_process_mobilenet, dataset.PostProcessArgMax(offset=-1),\n {\"image_size\": [224, 224, 3]}),\n \"coco\":\n (coco.Coco, dataset.pre_process_coco_mobilenet, coco.PostProcessCoco(),\n {\"image_size\": [-1, -1, 3]}),\n \"coco-300\":\n (coco.Coco, dataset.pre_process_coco_mobilenet, coco.PostProcessCoco(),\n {\"image_size\": [300, 300, 3]}),\n \"coco-1200\":\n (coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCoco(),\n {\"image_size\": [1200, 1200, 3]}),\n}\n\n# pre-defined command line options so simplify things. They are used as defaults and can be\n# overwritten from command line\nDEFAULT_LATENCY_BUCKETS = \"0.010,0.050,0.100,0.200,0.400\"\n\nSUPPORTED_PROFILES = {\n \"defaults\": {\n \"dataset\": \"imagenet\",\n \"backend\": \"tensorflow\",\n \"cache\": 0,\n \"time\": 128,\n \"max-latency\": DEFAULT_LATENCY_BUCKETS,\n },\n\n # resnet\n \"resnet50-tf\": {\n \"inputs\": \"input_tensor:0\",\n \"outputs\": \"ArgMax:0\",\n \"dataset\": \"imagenet\",\n \"backend\": \"tensorflow\",\n },\n \"resnet50-onnxruntime\": {\n \"dataset\": \"imagenet\",\n \"outputs\": \"ArgMax:0\",\n \"backend\": \"onnxruntime\",\n },\n\n # mobilenet\n \"mobilenet-tf\": {\n \"inputs\": \"input:0\",\n \"outputs\": \"MobilenetV1/Predictions/Reshape_1:0\",\n \"dataset\": \"imagenet_mobilenet\",\n \"backend\": \"tensorflow\",\n },\n \"mobilenet-onnxruntime\": {\n \"dataset\": \"imagenet_mobilenet\",\n \"outputs\": \"MobilenetV1/Predictions/Reshape_1:0\",\n \"backend\": \"onnxruntime\",\n },\n\n # ssd-mobilenet\n \"ssd-mobilenet-tf\": {\n \"inputs\": \"image_tensor:0\",\n \"outputs\": \"num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0\",\n \"dataset\": \"coco-300\",\n \"backend\": \"tensorflow\",\n },\n \"ssd-mobilenet-onnxruntime\": {\n \"dataset\": \"coco-300\",\n \"outputs\": \"num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0\",\n \"backend\": \"onnxruntime\",\n \"data-format\": \"NHWC\",\n },\n\n # ssd-resnet34\n \"ssd-resnet34-tf\": {\n \"inputs\": \"0:0\",\n \"outputs\": \"concat_63:0,concat_64:0\",\n \"dataset\": \"coco-1200\",\n \"backend\": \"tensorflow\",\n },\n \"ssd-resnet34-onnxruntime\": {\n \"dataset\": \"coco-1200\",\n \"inputs\": \"image\",\n \"outputs\": \"bboxes,labels,scores\",\n \"backend\": \"onnxruntime\",\n \"data-format\": \"NCHW\",\n },\n}\n\nSCENARIO_MAP = {\n \"SingleStream\": lg.TestScenario.SingleStream,\n \"MultiStream\": lg.TestScenario.MultiStream,\n \"Server\": lg.TestScenario.Server,\n \"Offline\": lg.TestScenario.Offline,\n}\n\nlast_timeing = []\n\n\ndef get_args():\n \"\"\"Parse commandline.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dataset\", choices=SUPPORTED_DATASETS.keys(), help=\"dataset\")\n parser.add_argument(\"--dataset-path\", required=True, help=\"path to the dataset\")\n parser.add_argument(\"--dataset-list\", help=\"path to the dataset list\")\n parser.add_argument(\"--data-format\", choices=[\"NCHW\", \"NHWC\"], help=\"data format\")\n parser.add_argument(\"--profile\", choices=SUPPORTED_PROFILES.keys(), help=\"standard profiles\")\n parser.add_argument(\"--scenario\", default=\"SingleStream\",\n help=\"benchmark scenario, list of \" + str(list(SCENARIO_MAP.keys())))\n parser.add_argument(\"--model\", required=True, help=\"model file\")\n parser.add_argument(\"--output\", help=\"test results\")\n parser.add_argument(\"--inputs\", help=\"model inputs\")\n parser.add_argument(\"--outputs\", help=\"model outputs\")\n parser.add_argument(\"--backend\", help=\"runtime to use\")\n parser.add_argument(\"--threads\", default=os.cpu_count(), type=int, help=\"threads\")\n parser.add_argument(\"--time\", type=int, help=\"time to scan in seconds\")\n parser.add_argument(\"--count\", type=int, help=\"dataset items to use\")\n parser.add_argument(\"--qps\", type=int, default=10, help=\"target qps estimate\")\n parser.add_argument(\"--max-latency\", type=str, help=\"max latency in 99pct tile\")\n parser.add_argument(\"--cache\", type=int, default=0, help=\"use cache\")\n parser.add_argument(\"--accuracy\", action=\"store_true\", help=\"enable accuracy pass\")\n args = parser.parse_args()\n\n # don't use defaults in argparser. Instead we default to a dict, override that with a profile\n # and take this as default unless command line give\n defaults = SUPPORTED_PROFILES[\"defaults\"]\n\n if args.profile:\n profile = SUPPORTED_PROFILES[args.profile]\n defaults.update(profile)\n for k, v in defaults.items():\n kc = k.replace(\"-\", \"_\")\n if getattr(args, kc) is None:\n setattr(args, kc, v)\n if args.inputs:\n args.inputs = args.inputs.split(\",\")\n if args.outputs:\n args.outputs = args.outputs.split(\",\")\n if args.max_latency:\n args.max_latency = [float(i) for i in args.max_latency.split(\",\")]\n try:\n args.scenario = [SCENARIO_MAP[scenario] for scenario in args.scenario.split(\",\")]\n except:\n parser.error(\"valid scanarios:\" + str(list(SCENARIO_MAP.keys())))\n return args\n\n\ndef get_backend(backend):\n if backend == \"tensorflow\":\n from backend_tf import BackendTensorflow\n backend = BackendTensorflow()\n elif backend == \"onnxruntime\":\n from backend_onnxruntime import BackendOnnxruntime\n backend = BackendOnnxruntime()\n elif backend == \"null\":\n from backend_null import BackendNull\n backend = BackendNull()\n elif backend == \"pytorch\":\n from backend_pytorch import BackendPytorch\n backend = BackendPytorch()\n elif backend == \"tflite\":\n from backend_tflite import BackendTflite\n backend = BackendTflite()\n else:\n raise ValueError(\"unknown backend: \" + backend)\n return backend\n\n\nclass Item:\n \"\"\"An item that we queue for processing by the thread pool.\"\"\"\n\n def __init__(self, query_id, content_id, img, label=None):\n self.query_id = query_id\n self.content_id = content_id\n self.img = img\n self.label = label\n self.start = time.time()\n\n\nclass Runner:\n def __init__(self, model, ds, threads, post_proc=None):\n self.tasks = Queue(maxsize=threads * 5)\n self.workers = []\n self.model = model\n self.post_process = post_proc\n self.threads = threads\n self.result_dict = {}\n self.take_accuracy = False\n self.ds = ds\n\n def handle_tasks(self, tasks_queue):\n \"\"\"Worker thread.\"\"\"\n while True:\n qitem = tasks_queue.get()\n if qitem is None:\n # None in the queue indicates the parent want us to exit\n tasks_queue.task_done()\n break\n\n try:\n # run the prediction\n results = self.model.predict({self.model.inputs[0]: qitem.img})\n if self.take_accuracy:\n response = self.post_process(results, qitem.content_id, qitem.label, self.result_dict)\n except Exception as ex: # pylint: disable=broad-except\n src = [self.ds.get_item_loc(i) for i in qitem.content_id]\n log.error(\"thread: failed on contentid=%s, %s\", src, ex)\n finally:\n response = []\n for query_id in qitem.query_id:\n # FIXME: unclear what to return here\n response.append(lg.QuerySampleResponse(query_id, 0, 0))\n lg.QuerySamplesComplete(response)\n tasks_queue.task_done()\n\n def handle_tasks_nolg(self, tasks_queue):\n \"\"\"Worker thread.\"\"\"\n while True:\n qitem = tasks_queue.get()\n if qitem is None:\n # None in the queue indicates the parent want us to exit\n tasks_queue.task_done()\n break\n\n try:\n # run the prediction\n start = time.time()\n results = self.model.predict({self.model.inputs[0]: qitem.img})\n self.result_dict[\"timing\"].append(time.time() - start)\n if self.take_accuracy:\n response = self.post_process(results, qitem.content_id, qitem.label, self.result_dict)\n except Exception as ex: # pylint: disable=broad-except\n src = [self.ds.get_item_loc(i) for i in qitem.content_id]\n log.error(\"thread: failed on contentid=%s, %s\", src, ex)\n finally:\n tasks_queue.task_done()\n\n def start_pool(self, nolg=False):\n if nolg:\n handler =self.handle_tasks_nolg\n else:\n handler =self.handle_tasks\n for _ in range(self.threads):\n worker = threading.Thread(target=handler, args=(self.tasks,))\n worker.daemon = True\n self.workers.append(worker)\n worker.start()\n\n def start_run(self, result_dict, take_accuracy):\n self.result_dict = result_dict\n self.take_accuracy = take_accuracy\n self.post_process.start()\n\n def enqueue(self, id, ids, data, label):\n item = Item(id, ids, data, label)\n self.tasks.put(item)\n\n def finish(self):\n # exit all threads\n for _ in self.workers:\n self.tasks.put(None)\n for worker in self.workers:\n worker.join()\n\n\ndef add_results(final_results, name, result_dict, result_list, took):\n percentiles = [50., 80., 90., 95., 99., 99.9]\n buckets = np.percentile(result_list, percentiles).tolist()\n buckets_str = \",\".join([\"{}:{:.4f}\".format(p, b) for p, b in zip(percentiles, buckets)])\n\n if result_dict[\"total\"] == 0:\n result_dict[\"total\"] = len(result_list)\n\n # this is what we record for each run\n result = {\n \"mean\": np.mean(result_list),\n \"took\": took,\n \"qps\": len(result_list) / took,\n \"count\": len(result_list),\n \"percentiles\": {str(k): v for k, v in zip(percentiles, buckets)},\n \"good_items\": result_dict[\"good\"],\n \"total_items\": result_dict[\"total\"],\n \"accuracy\": 100. * result_dict[\"good\"] / result_dict[\"total\"],\n }\n mAP = \"\"\n if \"mAP\" in result_dict:\n result[\"mAP\"] = result_dict[\"mAP\"]\n mAP = \", mAP={:.2f}\".format(result_dict[\"mAP\"])\n\n # add the result to the result dict\n final_results[name] = result\n\n # to stdout\n print(\"{} qps={:.2f}, mean={:.6f}, time={:.2f}, acc={:.2f}{}, queries={}, tiles={}\".format(\n name, result[\"qps\"], result[\"mean\"], took, result[\"accuracy\"], mAP,\n len(result_list), buckets_str))\n\n\ndef main():\n global last_timeing\n args = get_args()\n\n log.info(args)\n\n # find backend\n backend = get_backend(args.backend)\n\n # override image format if given\n image_format = args.data_format if args.data_format else backend.image_format()\n\n # dataset to use\n wanted_dataset, pre_proc, post_proc, kwargs = SUPPORTED_DATASETS[args.dataset]\n ds = wanted_dataset(data_path=args.dataset_path,\n image_list=args.dataset_list,\n name=args.dataset,\n image_format=image_format,\n pre_process=pre_proc,\n use_cache=args.cache,\n count=args.count, **kwargs)\n\n # load model to backend\n model = backend.load(args.model, inputs=args.inputs, outputs=args.outputs)\n\n final_results = {\n \"runtime\": model.name(),\n \"version\": model.version(),\n \"time\": int(time.time()),\n \"cmdline\": str(args),\n }\n\n #\n # make one pass over the dataset to validate accuracy\n #\n count = args.count if args.count else ds.get_item_count()\n\n runner = Runner(model, ds, args.threads, post_proc=post_proc)\n\n #\n # warmup\n #\n log.info(\"warmup ...\")\n ds.load_query_samples([0])\n for _ in range(5):\n img, _ = ds.get_samples([0])\n _ = backend.predict({backend.inputs[0]: img})\n ds.unload_query_samples(None)\n\n if args.accuracy:\n #\n # accuracy pass\n #\n log.info(\"starting accuracy pass on {} items\".format(count))\n runner.start_pool(nolg=True)\n result_dict = {\"good\": 0, \"total\": 0, \"scenario\": \"Accuracy\", \"timing\": []}\n runner.start_run(result_dict, True)\n start = time.time()\n for idx in range(0, count):\n ds.load_query_samples([idx])\n data, label = ds.get_samples([idx])\n runner.enqueue([idx], [idx], data, label)\n runner.finish()\n # aggregate results\n post_proc.finalize(result_dict, ds, output_dir=os.path.dirname(args.output))\n last_timeing = result_dict[\"timing\"]\n del result_dict[\"timing\"]\n add_results(final_results, \"Accuracy\", result_dict, last_timeing, time.time() - start)\n\n #\n # run the benchmark with timing\n #\n runner.start_pool()\n\n def issue_query(query_samples):\n idx = [q.index for q in query_samples]\n query_id = [q.id for q in query_samples]\n data, label = ds.get_samples(idx)\n runner.enqueue(query_id, idx, data, label)\n\n def process_latencies(latencies_ns):\n global last_timeing\n last_timeing = [t / 1e9 for t in latencies_ns]\n\n sut = lg.ConstructSUT(issue_query, process_latencies)\n qsl = lg.ConstructQSL(count, min(count, 1000), ds.load_query_samples, ds.unload_query_samples)\n\n for scenario in args.scenario:\n for target_latency in args.max_latency:\n log.info(\"starting {}, latency={}\".format(scenario, target_latency))\n settings = lg.TestSettings()\n settings.scenario = scenario\n\n if args.qps:\n settings.enable_spec_overrides = True\n qps = float(args.qps)\n settings.server_target_qps = qps\n settings.offline_expected_qps = qps\n\n if args.time:\n settings.enable_spec_overrides = True\n settings.override_min_duration_ms = args.time * MILLI_SEC\n settings.override_max_duration_ms = args.time * MILLI_SEC\n qps = args.qps or 100\n settings.override_min_query_count = qps * args.time\n settings.override_max_query_count = qps * args.time\n\n if args.time or args.qps:\n settings.mode = lg.TestMode.PerformanceOnly\n # FIXME: add SubmissionRun once available\n\n settings.enable_spec_overrides = True\n settings.single_stream_expected_latency_ns = int(target_latency * NANO_SEC)\n settings.override_target_latency_ns = int(target_latency * NANO_SEC)\n\n result_dict = {\"good\": 0, \"total\": 0, \"scenario\": str(scenario)}\n runner.start_run(result_dict, False)\n lg.StartTest(sut, qsl, settings)\n\n add_results(final_results, \"{}-{}\".format(scenario, target_latency),\n result_dict, last_timeing, time.time() - ds.last_loaded)\n\n #\n # write final results\n #\n if args.output:\n with open(args.output, \"w\") as f:\n json.dump(final_results, f, sort_keys=True, indent=4)\n\n runner.finish()\n lg.DestroyQSL(qsl)\n lg.DestroySUT(sut)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.percentile",
"numpy.mean"
]
] |
xren1982/pytorch-image-models | [
"8c9814e3f500e8b37aae86dd4db10aba2c295bd2"
] | [
"timm/models/rexnet.py"
] | [
"\"\"\" ReXNet\n\nA PyTorch impl of `ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network` -\nhttps://arxiv.org/abs/2007.00992\n\nAdapted from original impl at https://github.com/clovaai/rexnet\nCopyright (c) 2020-present NAVER Corp. MIT license\n\nChanges for timm, feature extraction, and rounded channel variant hacked together by Ross Wightman\nCopyright 2020 Ross Wightman\n\"\"\"\n\nimport torch.nn as nn\nfrom math import ceil\n\nfrom timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD\nfrom .helpers import build_model_with_cfg\nfrom .layers import ClassifierHead, create_act_layer, ConvBnAct\nfrom .registry import register_model\n\n\ndef _cfg(url=''):\n return {\n 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),\n 'crop_pct': 0.875, 'interpolation': 'bicubic',\n 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,\n 'first_conv': 'stem.conv', 'classifier': 'head.fc',\n }\n\n\ndefault_cfgs = dict(\n rexnet_100=_cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_100-1b4dddf4.pth'),\n rexnet_130=_cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_130-590d768e.pth'),\n rexnet_150=_cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_150-bd1a6aa8.pth'),\n rexnet_200=_cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_200-8c0b7f2d.pth'),\n rexnetr_100=_cfg(\n url=''),\n rexnetr_130=_cfg(\n url=''),\n rexnetr_150=_cfg(\n url=''),\n rexnetr_200=_cfg(\n url=''),\n)\n\n\ndef make_divisible(v, divisor=8, min_value=None):\n min_value = min_value or divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n return new_v\n\n\nclass SEWithNorm(nn.Module):\n\n def __init__(self, channels, reduction=16, act_layer=nn.ReLU, divisor=1, reduction_channels=None,\n gate_layer='sigmoid'):\n super(SEWithNorm, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n reduction_channels = reduction_channels or make_divisible(channels // reduction, divisor=divisor)\n self.fc1 = nn.Conv2d(\n channels, reduction_channels, kernel_size=1, padding=0, bias=True)\n self.bn = nn.BatchNorm2d(reduction_channels)\n self.act = act_layer(inplace=True)\n self.fc2 = nn.Conv2d(\n reduction_channels, channels, kernel_size=1, padding=0, bias=True)\n self.gate = create_act_layer(gate_layer)\n\n def forward(self, x):\n x_se = self.avg_pool(x)\n x_se = self.fc1(x_se)\n x_se = self.bn(x_se)\n x_se = self.act(x_se)\n x_se = self.fc2(x_se)\n return x * self.gate(x_se)\n\n\nclass LinearBottleneck(nn.Module):\n def __init__(self, in_chs, out_chs, stride, exp_ratio=1.0, use_se=True, se_rd=12, ch_div=1):\n super(LinearBottleneck, self).__init__()\n self.use_shortcut = stride == 1 and in_chs <= out_chs\n self.in_channels = in_chs\n self.out_channels = out_chs\n\n if exp_ratio != 1.:\n dw_chs = make_divisible(round(in_chs * exp_ratio), divisor=ch_div)\n self.conv_exp = ConvBnAct(in_chs, dw_chs, act_layer=\"swish\")\n else:\n dw_chs = in_chs\n self.conv_exp = None\n\n self.conv_dw = ConvBnAct(dw_chs, dw_chs, 3, stride=stride, groups=dw_chs, apply_act=False)\n self.se = SEWithNorm(dw_chs, reduction=se_rd, divisor=ch_div) if use_se else None\n self.act_dw = nn.ReLU6()\n\n self.conv_pwl = ConvBnAct(dw_chs, out_chs, 1, apply_act=False)\n\n def feat_channels(self, exp=False):\n return self.conv_dw.out_channels if exp else self.out_channels\n\n def forward(self, x):\n shortcut = x\n if self.conv_exp is not None:\n x = self.conv_exp(x)\n x = self.conv_dw(x)\n if self.se is not None:\n x = self.se(x)\n x = self.act_dw(x)\n x = self.conv_pwl(x)\n if self.use_shortcut:\n x[:, 0:self.in_channels] += shortcut\n return x\n\n\ndef _block_cfg(width_mult=1.0, depth_mult=1.0, initial_chs=16, final_chs=180, use_se=True, ch_div=1):\n layers = [1, 2, 2, 3, 3, 5]\n strides = [1, 2, 2, 2, 1, 2]\n layers = [ceil(element * depth_mult) for element in layers]\n strides = sum([[element] + [1] * (layers[idx] - 1) for idx, element in enumerate(strides)], [])\n exp_ratios = [1] * layers[0] + [6] * sum(layers[1:])\n depth = sum(layers[:]) * 3\n base_chs = initial_chs / width_mult if width_mult < 1.0 else initial_chs\n\n # The following channel configuration is a simple instance to make each layer become an expand layer.\n out_chs_list = []\n for i in range(depth // 3):\n out_chs_list.append(make_divisible(round(base_chs * width_mult), divisor=ch_div))\n base_chs += final_chs / (depth // 3 * 1.0)\n\n if use_se:\n use_ses = [False] * (layers[0] + layers[1]) + [True] * sum(layers[2:])\n else:\n use_ses = [False] * sum(layers[:])\n\n return zip(out_chs_list, exp_ratios, strides, use_ses)\n\n\ndef _build_blocks(block_cfg, prev_chs, width_mult, se_rd=12, ch_div=1, feature_location='bottleneck'):\n feat_exp = feature_location == 'expansion'\n feat_chs = [prev_chs]\n feature_info = []\n curr_stride = 2\n features = []\n for block_idx, (chs, exp_ratio, stride, se) in enumerate(block_cfg):\n if stride > 1:\n fname = 'stem' if block_idx == 0 else f'features.{block_idx - 1}'\n if block_idx > 0 and feat_exp:\n fname += '.act_dw'\n feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=fname)]\n curr_stride *= stride\n features.append(LinearBottleneck(\n in_chs=prev_chs, out_chs=chs, exp_ratio=exp_ratio, stride=stride, use_se=se, se_rd=se_rd, ch_div=ch_div))\n prev_chs = chs\n feat_chs += [features[-1].feat_channels(feat_exp)]\n pen_chs = make_divisible(1280 * width_mult, divisor=ch_div)\n feature_info += [dict(\n num_chs=pen_chs if feat_exp else feat_chs[-1], reduction=curr_stride,\n module=f'features.{len(features) - int(not feat_exp)}')]\n features.append(ConvBnAct(prev_chs, pen_chs, act_layer=\"swish\"))\n return features, feature_info\n\n\nclass ReXNetV1(nn.Module):\n def __init__(self, in_chans=3, num_classes=1000, global_pool='avg', output_stride=32,\n initial_chs=16, final_chs=180, width_mult=1.0, depth_mult=1.0, use_se=True,\n se_rd=12, ch_div=1, drop_rate=0.2, feature_location='bottleneck'):\n super(ReXNetV1, self).__init__()\n self.drop_rate = drop_rate\n\n assert output_stride == 32 # FIXME support dilation\n stem_base_chs = 32 / width_mult if width_mult < 1.0 else 32\n stem_chs = make_divisible(round(stem_base_chs * width_mult), divisor=ch_div)\n self.stem = ConvBnAct(in_chans, stem_chs, 3, stride=2, act_layer='swish')\n\n block_cfg = _block_cfg(width_mult, depth_mult, initial_chs, final_chs, use_se, ch_div)\n features, self.feature_info = _build_blocks(\n block_cfg, stem_chs, width_mult, se_rd, ch_div, feature_location)\n self.num_features = features[-1].out_channels\n self.features = nn.Sequential(*features)\n\n self.head = ClassifierHead(self.num_features, num_classes, global_pool, drop_rate)\n\n # FIXME weight init, the original appears to use PyTorch defaults\n\n def get_classifier(self):\n return self.head.fc\n\n def reset_classifier(self, num_classes, global_pool='avg'):\n self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)\n\n def forward_features(self, x):\n x = self.stem(x)\n x = self.features(x)\n return x\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.head(x)\n return x\n\n\ndef _create_rexnet(variant, pretrained, **kwargs):\n feature_cfg = dict(flatten_sequential=True)\n if kwargs.get('feature_location', '') == 'expansion':\n feature_cfg['feature_cls'] = 'hook'\n return build_model_with_cfg(\n ReXNetV1, variant, pretrained, default_cfg=default_cfgs[variant], feature_cfg=feature_cfg, **kwargs)\n\n\n@register_model\ndef rexnet_100(pretrained=False, **kwargs):\n \"\"\"ReXNet V1 1.0x\"\"\"\n return _create_rexnet('rexnet_100', pretrained, **kwargs)\n\n\n@register_model\ndef rexnet_130(pretrained=False, **kwargs):\n \"\"\"ReXNet V1 1.3x\"\"\"\n return _create_rexnet('rexnet_130', pretrained, width_mult=1.3, **kwargs)\n\n\n@register_model\ndef rexnet_150(pretrained=False, **kwargs):\n \"\"\"ReXNet V1 1.5x\"\"\"\n return _create_rexnet('rexnet_150', pretrained, width_mult=1.5, **kwargs)\n\n\n@register_model\ndef rexnet_200(pretrained=False, **kwargs):\n \"\"\"ReXNet V1 2.0x\"\"\"\n return _create_rexnet('rexnet_200', pretrained, width_mult=2.0, **kwargs)\n\n\n@register_model\ndef rexnetr_100(pretrained=False, **kwargs):\n \"\"\"ReXNet V1 1.0x w/ rounded (mod 8) channels\"\"\"\n return _create_rexnet('rexnetr_100', pretrained, ch_div=8, **kwargs)\n\n\n@register_model\ndef rexnetr_130(pretrained=False, **kwargs):\n \"\"\"ReXNet V1 1.3x w/ rounded (mod 8) channels\"\"\"\n return _create_rexnet('rexnetr_130', pretrained, width_mult=1.3, ch_div=8, **kwargs)\n\n\n@register_model\ndef rexnetr_150(pretrained=False, **kwargs):\n \"\"\"ReXNet V1 1.5x w/ rounded (mod 8) channels\"\"\"\n return _create_rexnet('rexnetr_150', pretrained, width_mult=1.5, ch_div=8, **kwargs)\n\n\n@register_model\ndef rexnetr_200(pretrained=False, **kwargs):\n \"\"\"ReXNet V1 2.0x w/ rounded (mod 8) channels\"\"\"\n return _create_rexnet('rexnetr_200', pretrained, width_mult=2.0, ch_div=8, **kwargs)\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU6",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d"
]
] |
acadien/lazyPlot | [
"bb36dc237bb11febfe6ea6e59c9c79167238beab"
] | [
"smoothing.py"
] | [
"#!/usr/bin/python\n\nfrom numpy import *\nfrom scipy import weave\nfrom scipy.weave import converters\n\n#uses a guassian smooth convoluted with finite differences to get an absurdly smooth line but with edge effects\nsuperSmoothCode=\"\"\"\ndouble pre=0.3989422804014327/sigma;\ndouble dx,xmus;\n\nfor(int a=0;a<N;a++){\n for(int b=0;b<N;b++){\n if(b==0)\n dx = xs[b+1]-xs[b];\n if(b==N-1)\n dx = xs[b]-xs[b-1];\n if(b>1 && b<N-1)\n dx = (xs[b+1]-xs[b-1])/2.0;\n\n xmus = (xs[a]-xs[b])/sigma;\n smoothys[a] += pre * exp( xmus * xmus * -0.5) * ys[b] * dx;\n}}\n\"\"\"\ndef superSmooth(xs,ys,sigma=0.1):\n N=len(ys)\n smoothys=zeros(N)\n xs=array(xs)\n ys=array(ys)\n weave.inline(superSmoothCode,['xs','ys','N','smoothys','sigma'])\n return smoothys\n\n#1D data\ndef windowAvg(a,n=11,option='same'):\n #a: the list/array to run the window average over\n #n: the size of the window\n return convolve(a, ones(n)/n,option)\n"
] | [
[
"scipy.weave.inline"
]
] |
alexeyyakimovich/DeepPavlov | [
"f10b9485c118cdec69e73c89833a1a5a164404de"
] | [
"deeppavlov/models/classifiers/utils.py"
] | [
"# Copyright 2017 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom logging import getLogger\nfrom typing import List\n\nimport numpy as np\n\nlog = getLogger(__name__)\n\n\ndef labels2onehot(labels: [List[str], List[List[str]], np.ndarray], classes: [list, np.ndarray]) -> np.ndarray:\n \"\"\"\n Convert labels to one-hot vectors for multi-class multi-label classification\n\n Args:\n labels: list of samples where each sample is a class or a list of classes which sample belongs with\n classes: array of classes' names\n\n Returns:\n 2d array with one-hot representation of given samples\n \"\"\"\n n_classes = len(classes)\n y = []\n for sample in labels:\n curr = np.zeros(n_classes)\n if isinstance(sample, list):\n for intent in sample:\n if intent not in classes:\n log.warning('Unknown intent {} detected. Assigning no class'.format(intent))\n else:\n curr[np.where(np.array(classes) == intent)[0]] = 1\n else:\n curr[np.where(np.array(classes) == sample)[0]] = 1\n y.append(curr)\n y = np.asarray(y)\n return y\n\n\ndef proba2labels(proba: [list, np.ndarray], confident_threshold: float, classes: [list, np.ndarray]) -> List[List]:\n \"\"\"\n Convert vectors of probabilities to labels using confident threshold\n (if probability to belong with the class is bigger than confident_threshold, sample belongs with the class;\n if no probabilities bigger than confident threshold, sample belongs with the class with the biggest probability)\n\n Args:\n proba: list of samples where each sample is a vector of probabilities to belong with given classes\n confident_threshold (float): boundary of probability to belong with a class\n classes: array of classes' names\n\n Returns:\n list of lists of labels for each sample\n \"\"\"\n y = []\n for sample in proba:\n to_add = np.where(sample > confident_threshold)[0]\n if len(to_add) > 0:\n y.append(np.array(classes)[to_add].tolist())\n else:\n y.append(np.array([np.array(classes)[np.argmax(sample)]]).tolist())\n\n return y\n\n\ndef proba2onehot(proba: [list, np.ndarray], confident_threshold: float, classes: [list, np.ndarray]) -> np.ndarray:\n \"\"\"\n Convert vectors of probabilities to one-hot representations using confident threshold\n\n Args:\n proba: samples where each sample is a vector of probabilities to belong with given classes\n confident_threshold: boundary of probability to belong with a class\n classes: array of classes' names\n\n Returns:\n 2d array with one-hot representation of given samples\n \"\"\"\n return labels2onehot(proba2labels(proba, confident_threshold, classes), classes)\n"
] | [
[
"numpy.array",
"numpy.asarray",
"numpy.zeros",
"numpy.where",
"numpy.argmax"
]
] |
wavestoweather/enstools | [
"d0f612b0187b0ad54dfbbb78aa678564f46eaedf"
] | [
"tests/test_io_read_01.py"
] | [
"from numpy.testing import assert_equal\nimport xarray\nimport tempfile\nimport numpy\nimport os\nimport shutil\nimport enstools.io\nimport pytest\n\n\[email protected]\ndef test_dir():\n \"\"\"\n name of the test directoy\n \"\"\"\n test_dir = tempfile.mkdtemp()\n yield test_dir\n\n # cleanup\n shutil.rmtree(test_dir)\n\[email protected]\ndef file1(test_dir):\n \"\"\"\n create a file with defined content for testing, netcdf\n \"\"\"\n # first file\n ds = xarray.DataArray(numpy.random.rand(7, 5, 6),\n coords={\"lon\": numpy.linspace(1, 5, 5),\n \"lat\": numpy.linspace(1, 6, 6),\n \"time\": numpy.linspace(1, 7, 7)\n },\n dims=[\"time\", \"lon\", \"lat\"],\n name=\"noise\")\n filename = os.path.join(test_dir, \"01.nc\")\n ds.to_netcdf(filename)\n return filename\n\[email protected]\ndef file2(test_dir):\n \"\"\"\n create a file with defined content for testing, HDF5\n \"\"\"\n # second file\n ds = xarray.DataArray(numpy.random.rand(7, 5, 6),\n coords={\"lon\": numpy.linspace(1, 5, 5),\n \"lat\": numpy.linspace(1, 6, 6),\n \"time\": numpy.linspace(8, 14, 7)\n },\n dims=[\"time\", \"lon\", \"lat\"],\n name=\"noise\")\n filename = os.path.join(test_dir, \"02.nc\")\n ds.to_netcdf(filename, engine=\"h5netcdf\")\n return filename\n\n\ndef test_read_single_file(file1):\n \"\"\"\n read one netcdf file\n \"\"\"\n ds = enstools.io.read(file1)\n assert_equal(ds[\"noise\"].shape, (7, 5, 6))\n assert_equal(ds[\"noise\"].dims, (\"time\", \"lon\", \"lat\"))\n\n\ndef test_read_multiple_files(file1, file2):\n \"\"\"\n read two netcdf files\n \"\"\"\n ds = enstools.io.read([file1, file2])\n assert_equal(ds[\"noise\"].shape, (14, 5, 6))\n assert_equal(ds[\"noise\"].dims, (\"time\", \"lon\", \"lat\"))\n numpy.testing.assert_array_equal(ds[\"noise\"].coords[\"time\"], numpy.linspace(1, 14, 14))\n\n # same test with pattern\n ds = enstools.io.read(os.path.dirname(file1) + \"/??.nc\")\n assert_equal(ds[\"noise\"].shape, (14, 5, 6))\n assert_equal(ds[\"noise\"].dims, (\"time\", \"lon\", \"lat\"))\n numpy.testing.assert_array_equal(ds[\"noise\"].coords[\"time\"], numpy.linspace(1, 14, 14))\n\n\ndef test_open_with_wrong_argument():\n \"\"\"\n try to open an something with an unsupported argument type\n \"\"\"\n with numpy.testing.assert_raises(NotImplementedError):\n ds = enstools.io.read(None)\n"
] | [
[
"numpy.linspace",
"numpy.testing.assert_raises",
"numpy.random.rand",
"numpy.testing.assert_equal"
]
] |
qe-team/marmot | [
"38e09ff1d0a3025a6b7edeaaf6086ed047ec45ff"
] | [
"marmot/evaluation/evaluation_utils.py"
] | [
"from __future__ import print_function, division\nfrom sklearn.metrics import f1_score\n\n\n\ndef write_res_to_file(test_file, test_predictions, output_file=''):\n if output_file == '':\n output_file = test_file+'.predictions'\n\n output = open(output_file, 'w')\n try:\n for idx, line in enumerate(open(test_file)):\n chunks = line.decode('utf-8').strip().split('\\t')\n prefix = u'\\t'.join(chunks[:5])\n # TODO: check if number of strings and predictions match\n output.write('%s\\t%s\\n' % (prefix.encode('utf-8'), test_predictions[idx].encode('utf-8')))\n except IndexError:\n print('Predictions size:', len(test_predictions), ', current number: ', idx)\n finally:\n output.close()\n\n return output_file\n\n\ndef compare_vocabulary(datasets):\n '''\n :param datasets: a list of datasets, which are all lists of token sequences\n :return: a list of objects describing each dataset\n '''\n\n def get_vocab(dataset):\n return set([w for seq in dataset for w in seq])\n\n vocabs = [get_vocab(dataset) for dataset in datasets]\n common_vocab = set.intersection(*vocabs)\n out = []\n for i, vocab in enumerate(vocabs):\n out.append({'coverage': len(common_vocab) / len(vocab)})\n\n return out\n\n\n# evaluation without checking the sentence numbers\n# odd_col -- number of columns that should be ignored (e.g. system ID)\ndef evaluate_simple(ref_file, hyp_file, odd_col=0, check_words=True, average='weighted'):\n tags_ref, tags_hyp = [], []\n tags_dict = {u'BAD': 0, u'OK': 1}\n for idx, (ref, hyp) in enumerate(zip(open(ref_file), open(hyp_file))):\n chunks_ref = ref.decode('utf-8').strip().split('\\t')\n chunks_hyp = hyp.decode('utf-8').strip().split('\\t')\n if chunks_ref[2] != chunks_hyp[2+odd_col] and check_words:\n print(\"Words don't match at string\", idx)\n return -1\n tags_ref.append(chunks_ref[-1])\n tags_hyp.append(chunks_hyp[-1])\n# all_tags.append(chunks_ref[-1])\n# all_tags.append(chunks_hyp[-1])\n\n# return f1_score([tags_dict[i] for i in tags_ref], [tags_dict[i] for i in tags_hyp])\n return f1_score([tags_dict[i] for i in tags_ref], [tags_dict[i] for i in tags_hyp], average=average)\n"
] | [
[
"sklearn.metrics.f1_score"
]
] |
mirzaim/cs231n | [
"d982c7f023a1cedd961b4104b3e652ce3c43e738"
] | [
"assignments/assignment3/cs231n/image_utils.py"
] | [
"\"\"\"Utility functions used for viewing and processing images.\"\"\"\n\nimport urllib.request, urllib.error, urllib.parse, os, tempfile\n\nimport numpy as np\nfrom imageio import imread\nfrom PIL import Image\n\n\n\ndef blur_image(X):\n \"\"\"\n A very gentle image blurring operation, to be used as a regularizer for\n image generation.\n\n Inputs:\n - X: Image data of shape (N, 3, H, W)\n\n Returns:\n - X_blur: Blurred version of X, of shape (N, 3, H, W)\n \"\"\"\n from .fast_layers import conv_forward_fast\n\n w_blur = np.zeros((3, 3, 3, 3))\n b_blur = np.zeros(3)\n blur_param = {\"stride\": 1, \"pad\": 1}\n for i in range(3):\n w_blur[i, i] = np.asarray([[1, 2, 1], [2, 188, 2], [1, 2, 1]], dtype=np.float32)\n w_blur /= 200.0\n return conv_forward_fast(X, w_blur, b_blur, blur_param)[0]\n\n\nSQUEEZENET_MEAN = np.array([0.485, 0.456, 0.406], dtype=np.float32)\nSQUEEZENET_STD = np.array([0.229, 0.224, 0.225], dtype=np.float32)\n\n\ndef preprocess_image(img):\n \"\"\"Preprocess an image for squeezenet.\n\n Subtracts the pixel mean and divides by the standard deviation.\n \"\"\"\n return (img.astype(np.float32) / 255.0 - SQUEEZENET_MEAN) / SQUEEZENET_STD\n\n\ndef deprocess_image(img, rescale=False):\n \"\"\"Undo preprocessing on an image and convert back to uint8.\"\"\"\n img = img * SQUEEZENET_STD + SQUEEZENET_MEAN\n if rescale:\n vmin, vmax = img.min(), img.max()\n img = (img - vmin) / (vmax - vmin)\n return np.clip(255 * img, 0.0, 255.0).astype(np.uint8)\n\n\ndef image_from_url(url):\n \"\"\"\n Read an image from a URL. Returns a numpy array with the pixel data.\n We write the image to a temporary file then read it back. Kinda gross.\n \"\"\"\n try:\n f = urllib.request.urlopen(url)\n fd, fname = tempfile.mkstemp()\n with open(fname, \"wb\") as ff:\n ff.write(f.read())\n img = imread(fname)\n os.close(fd)\n os.remove(fname)\n return img\n except urllib.error.URLError as e:\n print(\"URL Error: \", e.reason, url)\n except urllib.error.HTTPError as e:\n print(\"HTTP Error: \", e.code, url)\n\n\ndef load_image(filename, size=None):\n \"\"\"Load and resize an image from disk.\n\n Inputs:\n - filename: path to file\n - size: size of shortest dimension after rescaling\n \"\"\"\n img = imread(filename)\n if size is not None:\n orig_shape = np.array(img.shape[:2])\n min_idx = np.argmin(orig_shape)\n scale_factor = float(size) / orig_shape[min_idx]\n new_shape = (orig_shape * scale_factor).astype(int)\n # TODO: the width, height values are currently flipped here, and we should\n # change the resampling method to BILINEAR to match the torch implementation\n img = np.array(Image.fromarray(img).resize(new_shape, resample=Image.NEAREST))\n return img\n"
] | [
[
"numpy.array",
"numpy.asarray",
"numpy.zeros",
"numpy.argmin",
"numpy.clip"
]
] |
yyeboah/fairseq | [
"e69a1608851977daf9e2e8b51185f608d431b238"
] | [
"fairseq/modules/transformer_layer.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Dict, List, Optional\n\nimport torch\nimport torch.nn as nn\nfrom fairseq import utils\nfrom fairseq.modules import LayerNorm, MultiheadAttention\nfrom fairseq.modules.fairseq_dropout import FairseqDropout\nfrom fairseq.modules.quant_noise import quant_noise\nfrom torch import Tensor\n\n\nclass TransformerEncoderLayer(nn.Module):\n \"\"\"Encoder layer block.\n\n In the original paper each operation (multi-head attention or FFN) is\n postprocessed with: `dropout -> add residual -> layernorm`. In the\n tensor2tensor code they suggest that learning is more robust when\n preprocessing each layer with layernorm and postprocessing with:\n `dropout -> add residual`. We default to the approach in the paper, but the\n tensor2tensor approach can be enabled by setting\n *args.encoder_normalize_before* to ``True``.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n \"\"\"\n\n def __init__(self, args):\n super().__init__()\n self.args = args\n self.embed_dim = args.encoder_embed_dim\n self.quant_noise = getattr(args, 'quant_noise_pq', 0)\n self.quant_noise_block_size = getattr(args, 'quant_noise_pq_block_size', 8) or 8\n self.self_attn = self.build_self_attention(self.embed_dim, args)\n self.self_attn_layer_norm = LayerNorm(self.embed_dim)\n self.dropout_module = FairseqDropout(\n args.dropout, module_name=self.__class__.__name__\n )\n self.activation_fn = utils.get_activation_fn(\n activation=getattr(args, 'activation_fn', 'relu') or \"relu\"\n )\n activation_dropout_p = getattr(args, \"activation_dropout\", 0) or 0\n if activation_dropout_p == 0:\n # for backwards compatibility with models that use args.relu_dropout\n activation_dropout_p = getattr(args, \"relu_dropout\", 0) or 0\n self.activation_dropout_module = FairseqDropout(\n float(activation_dropout_p), module_name=self.__class__.__name__\n )\n self.normalize_before = args.encoder_normalize_before\n self.fc1 = self.build_fc1(\n self.embed_dim,\n args.encoder_ffn_embed_dim,\n self.quant_noise,\n self.quant_noise_block_size,\n )\n self.fc2 = self.build_fc2(\n args.encoder_ffn_embed_dim,\n self.embed_dim,\n self.quant_noise,\n self.quant_noise_block_size,\n )\n\n self.final_layer_norm = LayerNorm(self.embed_dim)\n\n def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):\n return quant_noise(\n nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size\n )\n\n def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):\n return quant_noise(\n nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size\n )\n\n def build_self_attention(self, embed_dim, args):\n return MultiheadAttention(\n embed_dim,\n args.encoder_attention_heads,\n dropout=args.attention_dropout,\n self_attention=True,\n q_noise=self.quant_noise,\n qn_block_size=self.quant_noise_block_size,\n )\n\n def residual_connection(self, x, residual):\n return residual + x\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"\n Rename layer norm states from `...layer_norms.0.weight` to\n `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to\n `...final_layer_norm.weight`\n \"\"\"\n layer_norm_map = {\"0\": \"self_attn_layer_norm\", \"1\": \"final_layer_norm\"}\n for old, new in layer_norm_map.items():\n for m in (\"weight\", \"bias\"):\n k = \"{}.layer_norms.{}.{}\".format(name, old, m)\n if k in state_dict:\n state_dict[\"{}.{}.{}\".format(name, new, m)] = state_dict[k]\n del state_dict[k]\n\n def forward(self, x, encoder_padding_mask, attn_mask: Optional[Tensor] = None):\n \"\"\"\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\n `(batch, seq_len)` where padding elements are indicated by ``1``.\n attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`,\n where `tgt_len` is the length of output and `src_len` is the\n length of input, though here both are equal to `seq_len`.\n `attn_mask[tgt_i, src_j] = 1` means that when calculating the\n embedding for `tgt_i`, we exclude (mask out) `src_j`. This is\n useful for strided self-attention.\n\n Returns:\n encoded output of shape `(seq_len, batch, embed_dim)`\n \"\"\"\n # anything in original attn_mask = 1, becomes -1e8\n # anything in original attn_mask = 0, becomes 0\n # Note that we cannot use -inf here, because at some edge cases,\n # the attention weight (before softmax) for some padded element in query\n # will become -inf, which results in NaN in model parameters\n if attn_mask is not None:\n attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8)\n\n residual = x\n if self.normalize_before:\n x = self.self_attn_layer_norm(x)\n x, _ = self.self_attn(\n query=x,\n key=x,\n value=x,\n key_padding_mask=encoder_padding_mask,\n attn_mask=attn_mask,\n )\n x = self.dropout_module(x)\n x = self.residual_connection(x, residual)\n if not self.normalize_before:\n x = self.self_attn_layer_norm(x)\n\n residual = x\n if self.normalize_before:\n x = self.final_layer_norm(x)\n x = self.activation_fn(self.fc1(x))\n x = self.activation_dropout_module(x)\n x = self.fc2(x)\n x = self.dropout_module(x)\n x = self.residual_connection(x, residual)\n if not self.normalize_before:\n x = self.final_layer_norm(x)\n return x\n\n\nclass TransformerDecoderLayer(nn.Module):\n \"\"\"Decoder layer block.\n\n In the original paper each operation (multi-head attention, encoder\n attention or FFN) is postprocessed with: `dropout -> add residual ->\n layernorm`. In the tensor2tensor code they suggest that learning is more\n robust when preprocessing each layer with layernorm and postprocessing with:\n `dropout -> add residual`. We default to the approach in the paper, but the\n tensor2tensor approach can be enabled by setting\n *args.decoder_normalize_before* to ``True``.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n \"\"\"\n\n def __init__(\n self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False\n ):\n super().__init__()\n self.embed_dim = args.decoder_embed_dim\n self.dropout_module = FairseqDropout(\n args.dropout, module_name=self.__class__.__name__\n )\n self.quant_noise = getattr(args, \"quant_noise_pq\", 0)\n self.quant_noise_block_size = getattr(args, \"quant_noise_pq_block_size\", 8)\n\n self.cross_self_attention = getattr(args, \"cross_self_attention\", False)\n\n self.self_attn = self.build_self_attention(\n self.embed_dim,\n args,\n add_bias_kv=add_bias_kv,\n add_zero_attn=add_zero_attn,\n )\n\n self.activation_fn = utils.get_activation_fn(\n activation=str(args.activation_fn)\n if getattr(args, \"activation_fn\", None) is not None\n else \"relu\"\n )\n activation_dropout_p = getattr(args, \"activation_dropout\", 0) or 0\n if activation_dropout_p == 0:\n # for backwards compatibility with models that use args.relu_dropout\n activation_dropout_p = getattr(args, \"relu_dropout\", 0) or 0\n self.activation_dropout_module = FairseqDropout(\n float(activation_dropout_p), module_name=self.__class__.__name__\n )\n self.normalize_before = args.decoder_normalize_before\n\n # use layerNorm rather than FusedLayerNorm for exporting.\n # char_inputs can be used to determint this.\n # TODO remove this once we update apex with the fix\n export = getattr(args, \"char_inputs\", False)\n self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)\n\n if no_encoder_attn:\n self.encoder_attn = None\n self.encoder_attn_layer_norm = None\n else:\n self.encoder_attn = self.build_encoder_attention(self.embed_dim, args)\n self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)\n\n self.fc1 = self.build_fc1(\n self.embed_dim,\n args.decoder_ffn_embed_dim,\n self.quant_noise,\n self.quant_noise_block_size,\n )\n self.fc2 = self.build_fc2(\n args.decoder_ffn_embed_dim,\n self.embed_dim,\n self.quant_noise,\n self.quant_noise_block_size,\n )\n\n self.final_layer_norm = LayerNorm(self.embed_dim, export=export)\n self.need_attn = True\n\n self.onnx_trace = False\n\n def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):\n return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)\n\n def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):\n return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)\n\n def build_self_attention(\n self, embed_dim, args, add_bias_kv=False, add_zero_attn=False\n ):\n return MultiheadAttention(\n embed_dim,\n args.decoder_attention_heads,\n dropout=args.attention_dropout,\n add_bias_kv=add_bias_kv,\n add_zero_attn=add_zero_attn,\n self_attention=not getattr(args, \"cross_self_attention\", False),\n q_noise=self.quant_noise,\n qn_block_size=self.quant_noise_block_size,\n )\n\n def build_encoder_attention(self, embed_dim, args):\n return MultiheadAttention(\n embed_dim,\n args.decoder_attention_heads,\n kdim=getattr(args, \"encoder_embed_dim\", None),\n vdim=getattr(args, \"encoder_embed_dim\", None),\n dropout=args.attention_dropout,\n encoder_decoder_attention=True,\n q_noise=self.quant_noise,\n qn_block_size=self.quant_noise_block_size,\n )\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n def residual_connection(self, x, residual):\n return residual + x\n\n def forward(\n self,\n x,\n encoder_out: Optional[torch.Tensor] = None,\n encoder_padding_mask: Optional[torch.Tensor] = None,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n prev_self_attn_state: Optional[List[torch.Tensor]] = None,\n prev_attn_state: Optional[List[torch.Tensor]] = None,\n self_attn_mask: Optional[torch.Tensor] = None,\n self_attn_padding_mask: Optional[torch.Tensor] = None,\n need_attn: bool = False,\n need_head_weights: bool = False,\n ):\n \"\"\"\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor, optional): binary\n ByteTensor of shape `(batch, src_len)` where padding\n elements are indicated by ``1``.\n need_attn (bool, optional): return attention weights\n need_head_weights (bool, optional): return attention weights\n for each head (default: return average over heads).\n\n Returns:\n encoded output of shape `(seq_len, batch, embed_dim)`\n \"\"\"\n if need_head_weights:\n need_attn = True\n\n residual = x\n if self.normalize_before:\n x = self.self_attn_layer_norm(x)\n if prev_self_attn_state is not None:\n prev_key, prev_value = prev_self_attn_state[:2]\n saved_state: Dict[str, Optional[Tensor]] = {\n \"prev_key\": prev_key,\n \"prev_value\": prev_value,\n }\n if len(prev_self_attn_state) >= 3:\n saved_state[\"prev_key_padding_mask\"] = prev_self_attn_state[2]\n assert incremental_state is not None\n self.self_attn._set_input_buffer(incremental_state, saved_state)\n _self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)\n if self.cross_self_attention and not (\n incremental_state is not None\n and _self_attn_input_buffer is not None\n and \"prev_key\" in _self_attn_input_buffer\n ):\n if self_attn_mask is not None:\n assert encoder_out is not None\n self_attn_mask = torch.cat(\n (x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1\n )\n if self_attn_padding_mask is not None:\n if encoder_padding_mask is None:\n assert encoder_out is not None\n encoder_padding_mask = self_attn_padding_mask.new_zeros(\n encoder_out.size(1), encoder_out.size(0)\n )\n self_attn_padding_mask = torch.cat(\n (encoder_padding_mask, self_attn_padding_mask), dim=1\n )\n assert encoder_out is not None\n y = torch.cat((encoder_out, x), dim=0)\n else:\n y = x\n\n x, attn = self.self_attn(\n query=x,\n key=y,\n value=y,\n key_padding_mask=self_attn_padding_mask,\n incremental_state=incremental_state,\n need_weights=False,\n attn_mask=self_attn_mask,\n )\n x = self.dropout_module(x)\n x = self.residual_connection(x, residual)\n if not self.normalize_before:\n x = self.self_attn_layer_norm(x)\n\n if self.encoder_attn is not None and encoder_out is not None:\n residual = x\n if self.normalize_before:\n x = self.encoder_attn_layer_norm(x)\n if prev_attn_state is not None:\n prev_key, prev_value = prev_attn_state[:2]\n saved_state: Dict[str, Optional[Tensor]] = {\n \"prev_key\": prev_key,\n \"prev_value\": prev_value,\n }\n if len(prev_attn_state) >= 3:\n saved_state[\"prev_key_padding_mask\"] = prev_attn_state[2]\n assert incremental_state is not None\n self.encoder_attn._set_input_buffer(incremental_state, saved_state)\n\n x, attn = self.encoder_attn(\n query=x,\n key=encoder_out,\n value=encoder_out,\n key_padding_mask=encoder_padding_mask,\n incremental_state=incremental_state,\n static_kv=True,\n need_weights=need_attn or (not self.training and self.need_attn),\n need_head_weights=need_head_weights,\n )\n x = self.dropout_module(x)\n x = self.residual_connection(x, residual)\n if not self.normalize_before:\n x = self.encoder_attn_layer_norm(x)\n\n residual = x\n if self.normalize_before:\n x = self.final_layer_norm(x)\n\n x = self.activation_fn(self.fc1(x))\n x = self.activation_dropout_module(x)\n x = self.fc2(x)\n x = self.dropout_module(x)\n x = self.residual_connection(x, residual)\n if not self.normalize_before:\n x = self.final_layer_norm(x)\n if self.onnx_trace and incremental_state is not None:\n saved_state = self.self_attn._get_input_buffer(incremental_state)\n assert saved_state is not None\n if self_attn_padding_mask is not None:\n self_attn_state = [\n saved_state[\"prev_key\"],\n saved_state[\"prev_value\"],\n saved_state[\"prev_key_padding_mask\"],\n ]\n else:\n self_attn_state = [saved_state[\"prev_key\"], saved_state[\"prev_value\"]]\n return x, attn, self_attn_state\n return x, attn, None\n\n def make_generation_fast_(self, need_attn: bool = False, **kwargs):\n self.need_attn = need_attn\n"
] | [
[
"torch.nn.Linear",
"torch.cat"
]
] |
lhutton1/tvm | [
"d9ec031ec33e046044fd9521f02ed63213ad07b8"
] | [
"tests/python/relay/test_op_level3.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\" Support level3 operator test cases.\n\"\"\"\nfrom typing import Callable, Optional\n\nimport numpy as np\nimport pytest\nimport tvm\nimport tvm.testing\nfrom tvm import relay, te\nfrom tvm.error import TVMError\nfrom tvm.relay import create_executor, transform\nfrom tvm.relay.testing import check_grad, run_infer_type\nfrom utils import ref_funcs\n\n\ndef test_zeros_ones():\n for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]:\n y = op(shape=(124, 50), dtype=\"float64\")\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((124, 50), \"float64\")\n intrp = create_executor()\n intrp_res = intrp.evaluate(y).numpy()\n np.testing.assert_allclose(intrp_res, ref((124, 50), \"float64\"))\n\n\ndef test_unary_identity():\n for op, ref in [\n (relay.zeros_like, np.zeros_like),\n (relay.ones_like, np.ones_like),\n (relay.ceil, np.ceil),\n (relay.floor, np.floor),\n (relay.trunc, np.trunc),\n (relay.round, np.round),\n (relay.abs, np.abs),\n (relay.copy, None), # np.copy\n (relay.negative, np.negative),\n (relay.sign, np.sign),\n ]:\n shape = (8, 9, 4)\n x = relay.var(\"x\", relay.TensorType(shape, \"float32\"))\n y = op(x)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(shape, \"float32\")\n\n if ref is not None:\n data = np.random.rand(*shape).astype(\"float32\")\n intrp = create_executor()\n op_res = intrp.evaluate(y, {x: relay.const(data)})\n ref_res = ref(data)\n np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)\n\n\ndef test_cast():\n x = relay.var(\"x\", relay.TensorType((8, 9, 4), \"float32\"))\n y = x.astype(\"int32\")\n yy = run_infer_type(y)\n assert \"dtype=\" in yy.astext()\n assert yy.checked_type == relay.TensorType((8, 9, 4), \"int32\")\n\n x = relay.var(\"x\", relay.TensorType((8, 9, 4), \"float32\"))\n y = relay.cast(x, \"int32\")\n yy = run_infer_type(y)\n assert \"dtype=\" in yy.astext()\n assert yy.checked_type == relay.TensorType((8, 9, 4), \"int32\")\n\n\ndef test_clip():\n a = relay.var(\"a\", relay.TensorType((10, 4), \"float32\"))\n y = relay.clip(a, 1.0, 4.0)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((10, 4), \"float32\")\n\n data = np.random.rand(10, 4).astype(\"float32\")\n intrp = create_executor()\n op_res = intrp.evaluate(y, {a: relay.const(data)})\n ref_res = np.clip(data, 1.0, 4.0)\n np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)\n\n\ndef test_fixed_point_multiply():\n # Test 23 * 1/16\n # [m,s] = [0.5, -3] = frexp(1/16)\n # M = 0.5*2^31 = 1073741824\n # so M = 1073741824 and s = -3\n\n a = relay.var(\"a\", relay.TensorType((10, 4), \"int32\"))\n y = relay.fixed_point_multiply(a, 1073741824, -3)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((10, 4), \"int32\")\n\n data = 23 * np.ones((10, 4)).astype(\"int32\")\n intrp = create_executor()\n op_res = intrp.evaluate(y, {a: relay.const(data)})\n ref_res = np.ones((10, 4)).astype(\"int32\")\n np.testing.assert_allclose(op_res.numpy(), ref_res, atol=1)\n\n\ndef test_reinterpret():\n a = relay.var(\"a\", relay.TensorType((1000, 4), \"float32\"))\n y = relay.reinterpret(a, \"int32\")\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((1000, 4), \"int32\")\n\n data = np.random.randn(1000, 4).astype(\"float32\") * 1000\n intrp = create_executor()\n op_res = intrp.evaluate(y, {a: relay.const(data)})\n ref_res = data.view(\"int32\")\n np.testing.assert_equal(op_res.numpy(), ref_res)\n\n\ndef test_approximate_transcendental():\n def C(x):\n return relay.expr.const(x, \"float32\")\n\n def approx_exp(x):\n # An approximation derived from Opus,\n # https://github.com/xiph/opus/blob/c1c247/celt/mathops.h#L147-L165\n x = relay.minimum(relay.maximum(x, C(-88.0)), C(88.0))\n x = C(127.0) + x * C(1.44269504)\n xf = relay.floor(x)\n i = relay.cast(xf, \"int32\")\n x = x - xf\n Y = C(0.99992522) + x * (C(0.69583354) + x * (C(0.22606716) + x * C(0.078024523)))\n exponent = relay.left_shift(i, relay.expr.const(23, \"int32\"))\n exponent = relay.reinterpret(exponent, \"float32\")\n return exponent * Y\n\n def approximate_sigmoid(x):\n y = approx_exp(x)\n return y / (y + C(1.0))\n\n def approximate_tanh(x):\n x = x * C(2.0)\n y = approx_exp(x)\n return (y - C(1.0)) / (y + C(1.0))\n\n a = relay.var(\"a\", relay.TensorType((1000,), \"float32\"))\n y = approximate_sigmoid(a)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((1000,), \"float32\")\n data = np.linspace(-5, 5, 1000).astype(\"float32\")\n intrp = create_executor()\n op_res = intrp.evaluate(y, {a: relay.const(data)})\n\n def reference_sigmoid(x):\n return np.exp(-np.logaddexp(0, -x))\n\n np.testing.assert_allclose(op_res.numpy(), reference_sigmoid(data), atol=2e-5, rtol=1e-9)\n\n y = approximate_tanh(a)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((1000,), \"float32\")\n data = np.linspace(-5, 5, 1000).astype(\"float32\")\n intrp = create_executor()\n op_res = intrp.evaluate(y, {a: relay.const(data)})\n\n def reference_tanh(x):\n return np.tanh(x)\n\n np.testing.assert_allclose(op_res.numpy(), reference_tanh(data), atol=4e-5, rtol=1e-9)\n\n\ndef test_squeeze():\n def verify_squeeze(shape, dtype, axis):\n x = relay.var(\"x\", relay.TensorType(shape, dtype))\n squeeze = relay.squeeze(x, axis=axis)\n\n np_axis = tuple(axis) if axis is not None else None\n\n data = np.random.random_sample(shape).astype(dtype)\n intrp = create_executor()\n op_res = intrp.evaluate(squeeze, {x: relay.const(data)})\n ref_res = np.squeeze(data, axis=np_axis)\n np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)\n\n verify_squeeze((1, 3, 2, 5), \"float32\", None)\n verify_squeeze((1, 3, 1), \"float32\", [0])\n verify_squeeze((1, 2, 1, 2, 1), \"float32\", [0, 2])\n\n\ndef test_transpose_infer_type():\n n, t, d = te.size_var(\"n\"), te.size_var(\"t\"), 100\n x = relay.var(\"x\", relay.TensorType((n, t, d), \"float32\"))\n y = relay.transpose(x, axes=(1, 0, 2))\n assert \"axes=\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((t, n, 100), \"float32\")\n\n y = relay.transpose(x)\n assert \"axes=\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((100, t, n), \"float32\")\n\n\[email protected]_gpu\ndef test_transpose():\n def verify_transpose(dshape, axes):\n x = relay.var(\"x\", relay.TensorType(dshape, \"float32\"))\n z = relay.transpose(x, axes=axes)\n\n func = relay.Function([x], z)\n x_data = np.random.uniform(low=-1, high=1, size=dshape).astype(\"float32\")\n ref_res = np.transpose(x_data, axes=axes)\n\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)\n\n verify_transpose((2, 3, 4), (0, 2, 1))\n\n\ndef test_squeeze_infer_type():\n n, t, d = 1, 4, 1\n x = relay.var(\"x\", relay.TensorType((n, t, d), \"float32\"))\n y = relay.squeeze(x, axis=(2,))\n assert \"axis=\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((1, 4), \"float32\")\n\n n, t, d = 1, 4, 1\n x = relay.var(\"x\", relay.TensorType((n, t, d), \"float32\"))\n y = relay.squeeze(x)\n assert \"axis=\" not in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((4,), \"float32\")\n\n\[email protected](raises=tvm._ffi.base.TVMError)\ndef test_squeeze_bad_axes_infer_type():\n n, t, d = 1, 4, 1\n x = relay.var(\"x\", relay.TensorType((n, t, d), \"float32\"))\n y = relay.squeeze(x, axis=(1,))\n yy = run_infer_type(y)\n\n\ndef test_reshape_infer_type():\n n, t, d1, d2 = 10, 20, 100, 20\n x = relay.var(\"x\", relay.TensorType((n, t, d1, d2), \"float32\"))\n y = relay.reshape(x, newshape=(n, t, 2000))\n assert \"newshape=\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, t, 2000), \"float32\")\n\n\[email protected]_gpu\ndef test_reshape():\n def verify_reshape(shape, newshape, oshape):\n x = relay.var(\"x\", relay.TensorType(shape, \"float32\"))\n z = relay.reshape(x, newshape=newshape)\n zz = run_infer_type(z)\n assert \"newshape=\" in z.astext()\n assert zz.checked_type == relay.ty.TensorType(oshape, \"float32\")\n\n func = relay.Function([x], z)\n check_grad(func)\n x_data = np.random.uniform(low=-1, high=1, size=shape).astype(\"float32\")\n ref_res = np.reshape(x_data, oshape)\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)\n\n verify_reshape((2, 3, 4), (8, 3), (8, 3))\n verify_reshape((4, 7), (2, 7, 2), (2, 7, 2))\n verify_reshape((2, 3, 4), (4, 0, 2), (4, 3, 2))\n verify_reshape((2, 3, 4), (2, 0, 0), (2, 3, 4))\n verify_reshape((2, 3, 4), (0, -1), (2, 12))\n verify_reshape((2, 3, 4), (-1, 0), (8, 3))\n verify_reshape((2, 3, 4), (2, -2), (2, 3, 4))\n verify_reshape((2, 3, 4), (-2, 1, 1), (2, 3, 4, 1, 1))\n verify_reshape((2, 3, 4), (-3, 4), (6, 4))\n verify_reshape((2, 3, 4, 5), (-3, -3), (6, 20))\n verify_reshape((2, 3, 4), (0, -3), (2, 12))\n verify_reshape((2, 3, 4), (-3, -2), (6, 4))\n verify_reshape((2, 3, 4), (-4, 1, 2, -2), (1, 2, 3, 4))\n verify_reshape((2, 3, 4), (2, -4, -1, 3, -2), (2, 1, 3, 4))\n verify_reshape((1,), (), ())\n\n\ndef test_reshape_fail():\n with pytest.raises(TVMError) as reshape_err:\n x = relay.var(\"x\", relay.TensorType([2, 3], \"float32\"))\n z = relay.reshape(x, [7])\n zz = run_infer_type(z)\n\n\ndef test_reshape_like_infer_type():\n # concrete shape\n x = relay.var(\"x\", relay.TensorType((1, 2, 3), \"float32\"))\n y = relay.var(\"y\", relay.TensorType((1, 6), \"float32\"))\n z = relay.reshape_like(x, y)\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((1, 6), \"float32\")\n\n # symbolic shape\n n, c, h, w = te.size_var(\"n\"), 2, 3, te.size_var(\"w\")\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\"))\n y = relay.var(\"y\", relay.TensorType((1, 8, 8), \"float32\"))\n z = relay.reshape_like(x, y)\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((1, 8, 8), \"float32\")\n\n # partial reshaping\n x = relay.var(\"x\", relay.TensorType((1, 2, 3, 4), \"float32\"))\n y = relay.var(\"y\", relay.TensorType((1, 6, 5), \"float32\"))\n z = relay.reshape_like(x, y, lhs_begin=1, lhs_end=3, rhs_begin=1, rhs_end=2)\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((1, 6, 4), \"float32\")\n\n x = relay.var(\"x\", relay.TensorType((1, 2, 3, 4), \"float32\"))\n y = relay.var(\"y\", relay.TensorType((2, 3, 4, 1, 6), \"float32\"))\n z = relay.reshape_like(x, y, rhs_end=3)\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((2, 3, 4), \"float32\")\n z = relay.reshape_like(x, y, rhs_begin=2)\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((4, 1, 6), \"float32\")\n\n # symbolic partial reshaping\n n, c, h, w = te.size_var(\"n\"), 2, 3, te.size_var(\"w\")\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\"))\n y = relay.var(\"y\", relay.TensorType((5, 6), \"float32\"))\n z = relay.var(\"z\", relay.TensorType((4,), \"float32\"))\n w = relay.reshape_like(x, y, lhs_end=3)\n w = relay.reshape_like(w, z, lhs_begin=2)\n w = run_infer_type(w)\n assert w.checked_type == relay.TensorType((5, 6, 4), \"float32\")\n\n\[email protected]_gpu\ndef test_reshape_like():\n def verify_reshape_like(shape, oshape, shape_like=None, reshape_like_kwargs={}):\n if shape_like is None:\n shape_like = oshape\n x_data = np.random.uniform(low=-1, high=1, size=shape).astype(\"float32\")\n y_data = np.random.uniform(low=-1, high=1, size=shape_like).astype(\"float32\")\n ref_res = np.reshape(x_data, oshape)\n\n x = relay.var(\"x\", relay.TensorType(shape, \"float32\"))\n y = relay.var(\"x\", relay.TensorType(shape_like, \"float32\"))\n z = relay.reshape_like(x, y, **reshape_like_kwargs)\n zz = run_infer_type(z)\n assert zz.checked_type == relay.ty.TensorType(ref_res.shape, \"float32\")\n\n func = relay.Function([x, y], z)\n\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)(x_data, y_data)\n tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)\n\n verify_reshape_like((2, 3, 4), (1, 8, 3))\n verify_reshape_like((4, 7), (2, 7, 2))\n verify_reshape_like(\n (1, 2, 3, 4), (1, 6, 4), (1, 6, 5), dict(lhs_begin=1, lhs_end=3, rhs_begin=1, rhs_end=2)\n )\n\n\ndef test_take_infer_type():\n def verify_take(dshape, indices_shape, oshape, axis=None):\n x = relay.var(\"x\", relay.TensorType(dshape, \"float32\"))\n indices = relay.var(\"indices\", relay.TensorType(indices_shape, \"int32\"))\n y = relay.take(x, indices, axis=axis)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(oshape, \"float32\")\n\n d1, d2, d3 = te.var(\"d1\"), te.var(\"d2\"), te.var(\"d3\")\n d4, d5, d6 = te.var(\"d4\"), te.var(\"d5\"), te.var(\"d6\")\n verify_take((d1,), (1,), (1,), 0)\n verify_take((4,), (d1, d2), (d1, d2))\n verify_take((3, 3, 3), (1, d2), (1, d2))\n verify_take((d1, d2), (d3, d4, d5), (d3, d4, d5, d2), 0)\n verify_take((d1, d2), (d3, d4, d5), (d1, d3, d4, d5), 1)\n verify_take((d1, d2, d3, d4), (d5, d6), (d1, d2, d5, d6, d4), -2)\n\n\[email protected]_gpu\ndef test_take():\n def verify_take(src_shape, indices_src, axis=None, mode=\"clip\"):\n src_dtype = \"float32\"\n indices_dtype = \"int32\"\n indices_src = np.array(indices_src, dtype=indices_dtype)\n x = relay.var(\"x\", relay.TensorType(src_shape, src_dtype))\n indices = relay.var(\"indices\", relay.TensorType(indices_src.shape, indices_dtype))\n z = relay.take(x, indices, axis=axis, mode=mode)\n\n func = relay.Function([x, indices], z)\n x_data = np.random.uniform(low=-1, high=1, size=src_shape).astype(src_dtype)\n np_mode = \"raise\" if mode == \"fast\" else mode\n ref_res = np.take(x_data, indices=indices_src, axis=axis, mode=np_mode)\n\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)(x_data, indices_src)\n tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)\n\n verify_take((4,), [1])\n verify_take((4,), [[0, 1, 2, 3]])\n verify_take((3, 3, 3), [[11, 25]])\n verify_take((4,), [[0, 1], [2, 3]])\n verify_take((4,), [1], 0)\n verify_take((2, 2), [[[1, 0], [0, 1]]], 0)\n verify_take((2, 2), [[[1, 0], [0, 1]]], 1)\n verify_take((4, 3, 5, 6), [[2, 1, 0, 0]], -2)\n verify_take((3, 4), [-5, 20])\n verify_take((3, 4), [-5, 20], mode=\"wrap\")\n verify_take((3, 4), [-1, 2], axis=0)\n verify_take((3, 4), [-1, 2], axis=0, mode=\"wrap\")\n verify_take((3, 4), [-1, 2], axis=1)\n verify_take((3, 4), [-1, 2], axis=1, mode=\"wrap\")\n verify_take((3, 3, 3), [[11, 25]], mode=\"fast\")\n verify_take((3, 4), [0, 2], axis=0, mode=\"fast\")\n verify_take((3, 4), [0, 2], axis=1, mode=\"fast\")\n\n\ndef test_split_infer_type():\n def verify_split(dshape, indices_or_sections, ret_type, axis=None):\n x = relay.var(\"x\", relay.ty.TensorType(dshape, \"float32\"))\n y = relay.split(x, indices_or_sections, axis=axis)\n yy = run_infer_type(y.astuple())\n assert yy.checked_type == ret_type\n\n idxd = tvm.tir.indexdiv\n\n d1, d2, d3, d4 = te.var(\"d1\"), te.var(\"d2\"), te.var(\"d3\"), te.var(\"d4\")\n axis = te.var(\"axis\")\n verify_split(\n (5, 5, 2, 2),\n 5,\n relay.ty.TupleType(\n tvm.runtime.convert(\n [\n relay.ty.TensorType((5, 1, 2, 2), \"float32\"),\n relay.ty.TensorType((5, 1, 2, 2), \"float32\"),\n relay.ty.TensorType((5, 1, 2, 2), \"float32\"),\n relay.ty.TensorType((5, 1, 2, 2), \"float32\"),\n relay.ty.TensorType((5, 1, 2, 2), \"float32\"),\n ]\n )\n ),\n axis=1,\n )\n verify_split(\n (5, 5, 2, 2),\n 5,\n relay.ty.TupleType(\n tvm.runtime.convert(\n [\n relay.ty.TensorType((1, 5, 2, 2), \"float32\"),\n relay.ty.TensorType((1, 5, 2, 2), \"float32\"),\n relay.ty.TensorType((1, 5, 2, 2), \"float32\"),\n relay.ty.TensorType((1, 5, 2, 2), \"float32\"),\n relay.ty.TensorType((1, 5, 2, 2), \"float32\"),\n ]\n )\n ),\n axis=0,\n )\n verify_split(\n (d1, d2, d3, d4),\n 4,\n relay.ty.TupleType(\n tvm.runtime.convert(\n [\n relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), \"float32\"),\n relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), \"float32\"),\n relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), \"float32\"),\n relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), \"float32\"),\n ]\n )\n ),\n axis=2,\n )\n verify_split(\n (d1, d2, d3, d4),\n 2,\n relay.ty.TupleType(\n tvm.runtime.convert(\n [\n relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), \"float32\"),\n relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), \"float32\"),\n ]\n )\n ),\n axis=0,\n )\n verify_split(\n (d1, d2, d3, d4),\n (2, 4, 7),\n relay.ty.TupleType(\n tvm.runtime.convert(\n [\n relay.ty.TensorType((d1, 2, d3, d4), \"float32\"),\n relay.ty.TensorType((d1, 2, d3, d4), \"float32\"),\n relay.ty.TensorType((d1, 3, d3, d4), \"float32\"),\n relay.ty.TensorType((d1, (d2 - 7), d3, d4), \"float32\"),\n ]\n )\n ),\n axis=1,\n )\n\n\ndef test_full_infer_type():\n # default settings: match input dtype\n x = relay.var(\"x\", relay.TensorType((), \"int8\"))\n y = relay.full(x, ())\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((), \"int8\")\n\n # change the shape and dtype\n x = relay.var(\"x\", relay.TensorType((), \"float32\"))\n y = relay.full(x, (1, 2), \"int8\")\n \"shape=\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((1, 2), \"int8\")\n\n\[email protected]_gpu\ndef test_full():\n def verify_full(fill_value, src_shape, dtype):\n x = relay.var(\"x\", relay.scalar_type(dtype))\n z = relay.full(x, src_shape, dtype)\n func = relay.Function([x], z)\n ref_res = np.full(src_shape, fill_value)\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)(np.array(fill_value, dtype))\n tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)\n\n verify_full(4, (1, 3, 4, 4), \"int32\")\n # verify_full(4, (1, 3, 4, 4), \"int64\") # This does not pass, python int32 is not upcast to int64, not sure how to fix it.\n verify_full(4.0, (1, 4), \"float32\")\n\n\ndef test_full_like_infer_type():\n # concrete shape\n base = relay.var(\"base\", relay.TensorType((1, 2, 3), \"float32\"))\n fill = relay.var(\"fill\", relay.TensorType((), \"float32\"))\n y = relay.full_like(base, fill)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((1, 2, 3), \"float32\")\n\n # symbolic shape\n n, c, h, w = te.size_var(\"n\"), 2, 3, te.size_var(\"w\")\n base = relay.var(\"base\", relay.TensorType((n, c, h, w), \"float32\"))\n fill = relay.var(\"fill\", relay.TensorType((), \"float32\"))\n y = relay.full_like(base, fill)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, h, w), \"float32\")\n\n\[email protected]_gpu\ndef test_full_like():\n def verify_full_like(base, fill_value, dtype):\n x_data = np.random.uniform(low=-1, high=1, size=base).astype(dtype)\n x = relay.var(\"x\", relay.TensorType(base, dtype))\n y = relay.var(\"y\", relay.scalar_type(dtype))\n z = relay.full_like(x, y)\n\n func = relay.Function([x, y], z)\n ref_res = np.full_like(x_data, fill_value)\n\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)(x_data, np.array(fill_value, dtype))\n tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)\n\n verify_full_like((1, 3, 4, 4), 4, \"int32\")\n verify_full_like((1, 1), 44.0, \"float32\")\n\n\[email protected]_gpu\ndef test_infer_type_leaky_relu():\n n, c, h, w = te.size_var(\"n\"), te.size_var(\"c\"), te.size_var(\"h\"), te.size_var(\"w\")\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\"))\n y = relay.nn.leaky_relu(x, alpha=0.1)\n \"alpha=0.1\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, h, w), \"float32\")\n\n shape = (1, 5, 10, 10)\n dtype = \"float32\"\n x = relay.var(\"x\", relay.TensorType(shape, dtype))\n z = relay.nn.leaky_relu(x, alpha=0.1)\n assert \"alpha=0.1\" in z.astext()\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType(shape, dtype)\n func = relay.Function([x], z)\n x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)\n ref_res = np.where(x_data > 0, x_data, x_data * 0.1)\n\n for target, dev in tvm.testing.enabled_targets():\n intrp1 = relay.create_executor(\"graph\", device=dev, target=target)\n intrp2 = relay.create_executor(\"debug\", device=dev, target=target)\n op_res1 = intrp1.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5)\n op_res2 = intrp2.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=1e-5)\n\n\ndef verify_infer_type_prelu(data, alpha, axis, output, dtype=\"float32\"):\n x = relay.var(\"data\", relay.TensorType(data, dtype))\n if alpha:\n y = relay.var(\"alpha\", relay.TensorType(alpha, dtype))\n else:\n y = relay.var(\"alpha\", relay.IncompleteType())\n z = relay.nn.prelu(x, y, axis=axis)\n zz = run_infer_type(z)\n if axis != 1:\n assert \"axis\" in z.astext()\n assert zz.checked_type == relay.ty.TensorType(output, dtype)\n if not alpha:\n axis = axis if axis else 1\n alpha_shape = (data[axis],)\n assert zz.args[1].checked_type == relay.TensorType(alpha_shape, \"float32\")\n\n if all(isinstance(v, tvm.tir.Var) == 1 for v in data) or not alpha:\n return\n\n func = relay.Function([x, y], z)\n x_data = np.random.uniform(low=-1, high=1, size=data).astype(dtype)\n a_data = np.random.uniform(low=-1, high=1, size=alpha).astype(dtype)\n\n if axis == 1:\n ref_res = (x_data < 0) * (x_data * a_data.reshape(3, 1, 1)) + (x_data >= 0) * x_data\n else:\n ref_res = (x_data < 0) * (x_data * a_data.reshape(1, 1, 3)) + (x_data >= 0) * x_data\n\n for target, dev in tvm.testing.enabled_targets():\n intrp1 = relay.create_executor(\"graph\", device=dev, target=target)\n intrp2 = relay.create_executor(\"debug\", device=dev, target=target)\n op_res1 = intrp1.evaluate(func)(x_data, a_data)\n tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5)\n op_res2 = intrp2.evaluate(func)(x_data, a_data)\n tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=1e-5)\n\n\[email protected]_gpu\ndef test_infer_type_prelu():\n n, c, h, w = te.size_var(\"n\"), te.size_var(\"c\"), te.size_var(\"h\"), te.size_var(\"w\")\n verify_infer_type_prelu((n, c, h, w), (c,), 1, (n, c, h, w))\n verify_infer_type_prelu((n, h, w, c), (c,), 3, (n, h, w, c))\n verify_infer_type_prelu((n, c, h, w), None, 1, (n, c, h, w))\n verify_infer_type_prelu((n, h, w, c), None, 3, (n, h, w, c))\n verify_infer_type_prelu((1, 3, 2, 2), (3,), 1, (1, 3, 2, 2))\n verify_infer_type_prelu((1, 2, 2, 3), (3,), 3, (1, 2, 2, 3))\n verify_infer_type_prelu((1, 3, 2, 2), None, 1, (1, 3, 2, 2))\n verify_infer_type_prelu((1, 2, 2, 3), None, 3, (1, 2, 2, 3))\n\n\[email protected]_gpu\ndef test_arange():\n def verify_arange(start, stop, step):\n dtype = \"float32\"\n if start is None and step is None:\n x = relay.arange(relay.const(stop, dtype=dtype))\n ref_res = np.arange(stop).astype(dtype)\n elif start is None:\n x = relay.arange(relay.const(stop, dtype=dtype), step=relay.const(step, dtype=dtype))\n ref_res = np.arange(stop, step=step).astype(dtype)\n elif step is None:\n x = relay.arange(relay.const(start, dtype=dtype), relay.const(stop, dtype=dtype))\n ref_res = np.arange(start, stop).astype(dtype)\n else:\n x = relay.arange(\n relay.const(start, dtype=dtype),\n relay.const(stop, dtype=dtype),\n relay.const(step, dtype=dtype),\n )\n ref_res = np.arange(start, stop, step).astype(dtype)\n\n func = relay.Function([], x)\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)()\n tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)\n\n verify_arange(None, 20, None)\n verify_arange(None, 20, 2)\n verify_arange(1, 20, None)\n verify_arange(1, 20, 2)\n # arange doesnt' support floating point right now, see type relation\n # verify_arange(1, 20, 1.5)\n verify_arange(1, 20.5, None)\n verify_arange(1, 20, 3)\n verify_arange(20, 1, -1)\n # arange doesnt' support floating point right now, see type relation\n # verify_arange(20, 1, -1.5)\n\n\[email protected]_gpu\ndef test_meshgrid():\n def verify_meshgrid(lengths, indexing=\"ij\"):\n input_vars = []\n input_data = []\n for i, length in enumerate(lengths):\n input_name = \"x_{}\".format(i)\n if length == 0:\n # Scalar\n input_vars.append(relay.var(input_name, relay.scalar_type(\"float32\")))\n input_data.append(np.array(1, \"float32\"))\n else:\n input_vars.append(relay.var(input_name, relay.TensorType((length,), \"float32\")))\n input_data.append(np.arange(length).astype(\"float32\"))\n\n z = relay.meshgrid(input_vars, indexing=indexing).astuple()\n func = relay.Function(input_vars, z)\n # Get ref\n ref_res = np.meshgrid(*input_data, indexing=indexing)\n\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)(*input_data)\n assert len(op_res) == len(ref_res)\n for i in range(len(op_res)):\n tvm.testing.assert_allclose(op_res[i].numpy(), ref_res[i], rtol=1e-5)\n\n verify_meshgrid([3, 5])\n verify_meshgrid([4, 2], indexing=\"xy\")\n verify_meshgrid([3, 5, 2])\n verify_meshgrid([3, 1, 5], indexing=\"xy\")\n # Length 0 signifies scalar.\n verify_meshgrid([3, 5, 0])\n\n\[email protected]_gpu\ndef test_tile():\n def verify_tile(dshape, reps):\n x = relay.var(\"x\", relay.TensorType(dshape, \"float32\"))\n z = relay.tile(x, reps=reps)\n\n func = relay.Function([x], z)\n x_data = np.random.uniform(low=-1, high=1, size=dshape).astype(\"float32\")\n ref_res = np.tile(x_data, reps=reps)\n\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)\n\n verify_tile((2, 3, 4), (3, 2, 1))\n verify_tile((2, 3, 4), (1, 2))\n verify_tile((2, 3), (3, 2, 1))\n\n\[email protected]_gpu\ndef test_repeat():\n def verify_repeat(dshape, repeats, axis):\n x = relay.Var(\"x\", relay.TensorType(dshape, \"float32\"))\n func = relay.Function([x], relay.repeat(x, repeats, axis))\n data = np.random.uniform(size=dshape).astype(\"float32\")\n ref_res = np.repeat(data, repeats, axis)\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)(data)\n tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)\n\n verify_repeat((3,), 2, 0)\n verify_repeat((3, 10), 2, -1)\n verify_repeat((3, 2, 4), 3, 1)\n\n\[email protected]_gpu\ndef test_stack():\n def produce_input_tuple(dshapes):\n y = [relay.var(\"input\", relay.TensorType(shape, \"float32\")) for shape in dshapes]\n return relay.Tuple(y)\n\n def ref_stack(inputs, axis):\n return np.stack(inputs, axis=axis)\n\n def verify_stack(input_expr, relay_args, ref_res, axis):\n z = relay.stack(input_expr, axis=axis)\n inp_vars = relay.analysis.free_vars(z)\n func = relay.Function(inp_vars, z)\n\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)(*relay_args)\n tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)\n\n def verify_tup_lit_stack(dshapes, axis):\n input_tuple = produce_input_tuple(dshapes)\n input_data = [np.random.normal(size=shape).astype(\"float32\") for shape in dshapes]\n ref_res = ref_stack(input_data, axis)\n verify_stack(input_tuple, input_data, ref_res, axis)\n\n def verify_list_lit_stack(dshapes, axis):\n input_list = produce_input_tuple(dshapes).fields\n input_data = [np.random.normal(size=shape).astype(\"float32\") for shape in dshapes]\n ref_res = ref_stack(input_data, axis)\n verify_stack(input_list, input_data, ref_res, axis)\n\n def verify_tup_expr_stack(dshapes, axis):\n input_data = [np.random.normal(size=shape).astype(\"float32\") for shape in dshapes]\n ref_res = ref_stack(input_data, axis)\n\n # expression that evaluates to a tuple\n # but is not a tuple literal\n x = relay.Var(\"x\")\n input_expr = relay.Let(x, relay.Tuple([relay.const(inp) for inp in input_data]), x)\n verify_stack(input_expr, [], ref_res, axis)\n\n dshape_axis_combos = [\n ([(2,), (2,), (2,)], -1),\n ([(2,), (2,), (2,)], 0),\n ([(2, 2, 4), (2, 2, 4), (2, 2, 4)], 1),\n ([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], -1),\n ([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], 4),\n ]\n\n for dshapes, axis in dshape_axis_combos:\n verify_tup_lit_stack(dshapes, axis)\n verify_list_lit_stack(dshapes, axis)\n verify_tup_expr_stack(dshapes, axis)\n\n\[email protected]_gpu\ndef test_reverse():\n def verify_reverse(dshape, axis):\n x = relay.var(\"x\", relay.TensorType(dshape, \"float32\"))\n z = relay.reverse(x, axis=axis)\n zz = run_infer_type(z)\n\n func = relay.Function([x], z)\n x_data = np.random.uniform(low=-1, high=1, size=dshape).astype(\"float32\")\n ref_res = np.flip(x_data, axis)\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)\n\n verify_reverse((2, 3, 4), 1)\n verify_reverse((4, 7), 0)\n verify_reverse((2, 3, 4), -1)\n\n\[email protected]_gpu\ndef test_reverse_sequence():\n def verify_reverse_sequence(x_data, seq_lengths, batch_axis, seq_axis, ref_res):\n seq_lengths_data = np.array(seq_lengths).astype(\"int32\")\n x = relay.var(\"x\", relay.TensorType(x_data.shape, str(x_data.dtype)))\n z = relay.reverse_sequence(x, relay.const(seq_lengths_data), seq_axis, batch_axis)\n zz = run_infer_type(z)\n assert zz.checked_type == x.type_annotation\n func = relay.Function([x], z)\n\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)\n\n indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype(\"int32\")\n result = [[0, 5, 10, 15], [4, 1, 6, 11], [8, 9, 2, 7], [12, 13, 14, 3]]\n verify_reverse_sequence(indata, [1, 2, 3, 4], 1, 0, np.array(result))\n verify_reverse_sequence(indata, [1, 2, 3, 4], -1, 0, np.array(result))\n verify_reverse_sequence(\n indata.astype(\"float32\"), [1, 2, 3, 4], 1, 0, np.array(result).astype(\"float32\")\n )\n\n indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype(\"int32\")\n result = [[0, 1, 2, 3], [5, 4, 6, 7], [10, 9, 8, 11], [15, 14, 13, 12]]\n verify_reverse_sequence(indata, [1, 2, 3, 4], 0, 1, np.array(result))\n verify_reverse_sequence(indata, [1, 2, 3, 4], 0, -1, np.array(result))\n verify_reverse_sequence(\n indata.astype(\"float32\"), [1, 2, 3, 4], 0, 1, np.array(result).astype(\"float32\")\n )\n\n indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype(\"int32\")\n result = [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [15, 14, 13, 12]]\n verify_reverse_sequence(indata, [-1, 0, 1, 5], 0, 1, np.array(result))\n\n indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype(\"int32\")\n result = [\n [\n [[18, 19, 20], [21, 22, 23], [24, 25, 26]],\n [[9, 10, 11], [12, 13, 14], [15, 16, 17]],\n [[0, 1, 2], [3, 4, 5], [6, 7, 8]],\n ],\n [\n [[45, 46, 47], [48, 49, 50], [51, 52, 53]],\n [[36, 37, 38], [39, 40, 41], [42, 43, 44]],\n [[27, 28, 29], [30, 31, 32], [33, 34, 35]],\n ],\n ]\n verify_reverse_sequence(indata, [3, 3], 0, 1, np.array(result))\n\n indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype(\"int32\")\n result = [\n [\n [[9, 10, 11], [21, 22, 23], [15, 16, 17]],\n [[0, 1, 2], [12, 13, 14], [6, 7, 8]],\n [[18, 19, 20], [3, 4, 5], [24, 25, 26]],\n ],\n [\n [[36, 37, 38], [48, 49, 50], [42, 43, 44]],\n [[27, 28, 29], [39, 40, 41], [33, 34, 35]],\n [[45, 46, 47], [30, 31, 32], [51, 52, 53]],\n ],\n ]\n verify_reverse_sequence(indata, [2, 3, 2], 2, 1, np.array(result))\n\n indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype(\"int32\")\n result = []\n with pytest.raises(Exception) as execinfo:\n verify_reverse_sequence(indata, [2, 3, 2, 4, 5], 1, 0, np.array(result))\n\n assert (\n \"For reverse_sequnece seq_lengths size should match with dimension of batch axis,\"\n \" but got dimension of batch_axis = 4, and seq_length size = 5\" in execinfo.value.args[0]\n )\n\n\[email protected]_gpu\ndef test_scatter():\n def ref_scatter(data, indices, updates, axis=0):\n idx = np.indices(indices.shape).reshape(indices.ndim, -1)\n\n updated_idx = np.copy(idx)\n indices = indices.reshape(-1)\n for i in range(len(indices)):\n updated_idx[axis, i] = indices[i]\n scattered = np.copy(data)\n scattered[tuple(updated_idx)] = updates[tuple(idx)]\n return scattered\n\n def verify_scatter(dshape, ishape, axis=0):\n d = relay.var(\"d\", relay.TensorType(dshape, \"float32\"))\n i = relay.var(\"i\", relay.TensorType(ishape, \"int64\"))\n u = relay.var(\"u\", relay.TensorType(ishape, \"float32\"))\n z = relay.op.scatter(d, i, u, axis)\n\n func = relay.Function([d, i, u], z)\n\n data_np = np.random.uniform(size=dshape).astype(\"float32\")\n updates_np = np.random.uniform(size=ishape).astype(\"float32\")\n indices_np = np.random.randint(-dshape[axis], dshape[axis] - 1, ishape).astype(\"int64\")\n\n ref_res = ref_scatter(data_np, indices_np, updates_np, axis)\n\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)(data_np, indices_np, updates_np)\n tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)\n\n def verify_dynamic_scatter(dshape, ishape, axis=0):\n d = relay.var(\"d\", relay.TensorType([relay.Any() for i in range(len(dshape))], \"float32\"))\n i = relay.var(\"i\", relay.TensorType([relay.Any() for i in range(len(ishape))], \"int64\"))\n u = relay.var(\"u\", relay.TensorType([relay.Any() for i in range(len(ishape))], \"float32\"))\n z = relay.op.scatter(d, i, u, axis)\n\n func = relay.Function([d, i, u], z)\n\n data_np = np.random.uniform(size=dshape).astype(\"float32\")\n updates_np = np.random.uniform(size=ishape).astype(\"float32\")\n indices_np = np.random.randint(-dshape[axis], dshape[axis] - 1, ishape).astype(\"int64\")\n\n ref_res = ref_scatter(data_np, indices_np, updates_np, axis)\n\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"vm\", \"debug\"]:\n mod = tvm.ir.IRModule.from_expr(func)\n intrp = relay.create_executor(kind, mod=mod, device=dev, target=target)\n op_res = intrp.evaluate()(data_np, indices_np, updates_np)\n tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)\n\n verify_scatter((10,), (10,), 0)\n verify_scatter((10, 5), (10, 5), -2)\n verify_scatter((10, 5), (10, 5), -1)\n verify_scatter((10, 5), (3, 5), 0)\n verify_scatter((12, 4), (7, 2), 1)\n verify_scatter((2, 3, 4), (1, 3, 4), 0)\n verify_scatter((2, 3, 4), (2, 1, 4), 1)\n verify_scatter((2, 3, 4), (2, 3, 1), 2)\n verify_scatter((4, 2, 1), (1, 1, 1), 0)\n verify_scatter((2, 3, 4, 5), (1, 3, 4, 5), 0)\n verify_scatter((6, 3, 4, 5), (2, 3, 4, 5), 1)\n verify_scatter((2, 3, 8, 5), (2, 3, 1, 1), 2)\n verify_scatter((16, 16, 4, 5), (16, 16, 4, 5), 3)\n\n verify_dynamic_scatter((10,), (10,), 0)\n verify_dynamic_scatter((10, 5), (10, 5), -2)\n verify_dynamic_scatter((10, 5), (10, 5), -1)\n verify_dynamic_scatter((10, 5), (3, 5), 0)\n verify_dynamic_scatter((12, 4), (7, 2), 1)\n verify_dynamic_scatter((2, 3, 4), (1, 3, 4), 0)\n verify_dynamic_scatter((2, 3, 4), (2, 1, 4), 1)\n verify_dynamic_scatter((2, 3, 4), (2, 3, 1), 2)\n verify_dynamic_scatter((4, 2, 1), (1, 1, 1), 0)\n verify_dynamic_scatter((2, 3, 4, 5), (1, 3, 4, 5), 0)\n verify_dynamic_scatter((6, 3, 4, 5), (2, 3, 4, 5), 1)\n verify_dynamic_scatter((2, 3, 8, 5), (2, 3, 1, 1), 2)\n verify_dynamic_scatter((16, 16, 4, 5), (16, 16, 4, 5), 3)\n\n\[email protected]_gpu\[email protected](\n \"dshape, ishape, axis, dtype\",\n [\n ((10,), (10,), 0, \"int32\"),\n ((1000,), (1000,), 0, \"int32\"),\n ((10, 5), (10, 5), -2, \"float32\"),\n ((10, 5), (10, 5), -1, \"float32\"),\n ((10, 5), (3, 5), 0, \"float32\"),\n ((12, 4), (7, 2), 1, \"float32\"),\n ((2, 3, 4), (1, 3, 4), 0, \"float32\"),\n ((2, 3, 4), (2, 1, 4), 1, \"float32\"),\n ((2, 3, 4), (2, 3, 1), 2, \"float32\"),\n ((2, 3, 4, 5), (1, 3, 4, 5), 0, \"float32\"),\n ((6, 3, 4, 5), (2, 3, 4, 5), 1, \"float32\"),\n ((2, 3, 8, 5), (2, 3, 1, 1), 2, \"float32\"),\n ((16, 16, 4, 5), (16, 16, 4, 5), 3, \"float32\"),\n ],\n)\ndef test_scatter_add(dshape, ishape, axis, dtype):\n def ref_scatter_add(data, indices, updates, axis=0):\n output = np.copy(data)\n for index in np.ndindex(*indices.shape):\n new_index = list(index)\n new_index[axis] = indices[index]\n output[tuple(new_index)] += updates[index]\n return output\n\n def verify_scatter_add(dshape, ishape, axis=0, dtype=\"float32\"):\n d = relay.var(\"d\", relay.TensorType(shape=[relay.Any() for _ in dshape], dtype=dtype))\n i = relay.var(\"i\", relay.TensorType(shape=[relay.Any() for _ in ishape], dtype=\"int64\"))\n u = relay.var(\"u\", relay.TensorType(shape=[relay.Any() for _ in ishape], dtype=dtype))\n z = relay.op.scatter_add(d, i, u, axis)\n\n func = relay.Function([d, i, u], z)\n\n data_np = np.random.uniform(size=dshape).astype(dtype)\n updates_np = np.random.uniform(size=ishape).astype(dtype)\n indices_np = np.random.randint(-dshape[axis], dshape[axis] - 1, ishape).astype(\"int64\")\n\n ref_res = ref_scatter_add(data_np, indices_np, updates_np, axis)\n\n verify_func(\n func,\n [data_np, indices_np, updates_np],\n ref_res,\n )\n\n verify_scatter_add(dshape, ishape, axis, dtype)\n\n\[email protected]_gpu\[email protected](\n \"data, axis, indices, ref_res\",\n [\n ([[1, 2], [3, 4]], 1, [[0, 0], [1, 0]], [[1, 1], [4, 3]]),\n ([[1, 2], [3, 4]], -1, [[0, 0], [1, 0]], [[1, 1], [4, 3]]),\n (\n [[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]],\n 0,\n [[[1, 0, 1], [1, 1, 0]]],\n [[[6, 1, 8], [9, 10, 5]]],\n ),\n (\n [[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]],\n -3,\n [[[1, 0, 1], [1, 1, 0]]],\n [[[6, 1, 8], [9, 10, 5]]],\n ),\n (\n [\n [\n [-0.2321, -0.2024, -1.7624],\n [-0.3829, -0.4246, 0.2448],\n [0.1822, 0.2360, -0.8965],\n [0.4497, -0.2224, 0.6103],\n ],\n [\n [0.0408, -0.7667, -0.4303],\n [-0.3216, 0.7489, -0.1502],\n [0.0144, -0.4699, -0.0064],\n [-0.0768, -1.6064, 1.3390],\n ],\n ],\n 1,\n [[[2, 2, 0], [1, 0, 3]], [[3, 2, 0], [1, 0, 0]]],\n [\n [[0.1822, 0.2360, -1.7624], [-0.3829, -0.2024, 0.6103]],\n [[-0.0768, -0.4699, -0.4303], [-0.3216, -0.7667, -0.4303]],\n ],\n ),\n (\n [\n [\n [-0.2321, -0.2024, -1.7624],\n [-0.3829, -0.4246, 0.2448],\n [0.1822, 0.2360, -0.8965],\n [0.4497, -0.2224, 0.6103],\n ],\n [\n [0.0408, -0.7667, -0.4303],\n [-0.3216, 0.7489, -0.1502],\n [0.0144, -0.4699, -0.0064],\n [-0.0768, -1.6064, 1.3390],\n ],\n ],\n -2,\n [[[2, 2, 0], [1, 0, 3]], [[3, 2, 0], [1, 0, 0]]],\n [\n [[0.1822, 0.2360, -1.7624], [-0.3829, -0.2024, 0.6103]],\n [[-0.0768, -0.4699, -0.4303], [-0.3216, -0.7667, -0.4303]],\n ],\n ),\n (\n [\n [\n [-0.2321, -0.2024, -1.7624],\n [-0.3829, -0.4246, 0.2448],\n [0.1822, 0.2360, -0.8965],\n [0.4497, -0.2224, 0.6103],\n ],\n [\n [0.0408, -0.7667, -0.4303],\n [-0.3216, 0.7489, -0.1502],\n [0.0144, -0.4699, -0.0064],\n [-0.0768, -1.6064, 1.3390],\n ],\n ],\n -2,\n [[[2, 2, 0], [1, 0, 3]], [[3, 2, 0], [1, 0, 0]]],\n [\n [[0.1822, 0.2360, -1.7624], [-0.3829, -0.2024, 0.6103]],\n [[-0.0768, -0.4699, -0.4303], [-0.3216, -0.7667, -0.4303]],\n ],\n ),\n (\n [\n [\n [0.3050, 1.6986, 1.1034],\n [0.7020, -0.6960, -2.1818],\n [0.3116, -0.5773, -0.9912],\n [0.0835, -1.3915, -1.0720],\n ],\n [\n [0.1694, -0.6091, -0.6539],\n [-0.5234, -0.1218, 0.5084],\n [0.2374, -1.9537, -2.0078],\n [-0.5700, -1.0302, 0.1558],\n ],\n ],\n 2,\n [\n [[1, 1, 0, 1], [0, 0, 2, 2], [1, 2, 1, 2], [2, 2, 1, 0]],\n [[0, 0, 1, 2], [2, 2, 1, 0], [1, 2, 0, 0], [0, 2, 0, 2]],\n ],\n [\n [\n [1.6986, 1.6986, 0.3050, 1.6986],\n [0.7020, 0.7020, -2.1818, -2.1818],\n [-0.5773, -0.9912, -0.5773, -0.9912],\n [-1.0720, -1.0720, -1.3915, 0.0835],\n ],\n [\n [0.1694, 0.1694, -0.6091, -0.6539],\n [0.5084, 0.5084, -0.1218, -0.5234],\n [-1.9537, -2.0078, 0.2374, 0.2374],\n [-0.5700, 0.1558, -0.5700, 0.1558],\n ],\n ],\n ),\n (\n [\n [\n [0.3050, 1.6986, 1.1034],\n [0.7020, -0.6960, -2.1818],\n [0.3116, -0.5773, -0.9912],\n [0.0835, -1.3915, -1.0720],\n ],\n [\n [0.1694, -0.6091, -0.6539],\n [-0.5234, -0.1218, 0.5084],\n [0.2374, -1.9537, -2.0078],\n [-0.5700, -1.0302, 0.1558],\n ],\n ],\n -1,\n [\n [[1, 1, 0, 1], [0, 0, 2, 2], [1, 2, 1, 2], [2, 2, 1, 0]],\n [[0, 0, 1, 2], [2, 2, 1, 0], [1, 2, 0, 0], [0, 2, 0, 2]],\n ],\n [\n [\n [1.6986, 1.6986, 0.3050, 1.6986],\n [0.7020, 0.7020, -2.1818, -2.1818],\n [-0.5773, -0.9912, -0.5773, -0.9912],\n [-1.0720, -1.0720, -1.3915, 0.0835],\n ],\n [\n [0.1694, 0.1694, -0.6091, -0.6539],\n [0.5084, 0.5084, -0.1218, -0.5234],\n [-1.9537, -2.0078, 0.2374, 0.2374],\n [-0.5700, 0.1558, -0.5700, 0.1558],\n ],\n ],\n ),\n ],\n)\ndef test_gather(data, axis, indices, ref_res):\n def verify_gather(data, axis, indices, ref_res):\n data = np.asarray(data, dtype=\"float32\")\n indices = np.asarray(indices, dtype=\"int32\")\n ref_res = np.asarray(ref_res)\n d = relay.var(\"x\", relay.TensorType(data.shape, \"float32\"))\n i = relay.var(\"y\", relay.TensorType(indices.shape, \"int32\"))\n z = relay.gather(d, axis, i)\n\n func = relay.Function([d, i], z)\n\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)(data, indices)\n tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)\n\n verify_gather(data, axis, indices, ref_res)\n\n\[email protected]_gpu\ndef test_gather_nd():\n def verify_gather_nd(xshape, yshape, y_data, batch_dims=0):\n x = relay.var(\"x\", relay.TensorType(xshape, \"float32\"))\n y = relay.var(\"y\", relay.TensorType(yshape, \"int32\"))\n z = relay.gather_nd(x, y, batch_dims)\n\n func = relay.Function([x, y], z)\n\n x_data = np.random.uniform(size=xshape).astype(\"float32\")\n\n if y_data:\n y_data = np.array(y_data, dtype=\"int32\")\n else:\n y_data = np.random.randint(low=0, high=2, size=yshape, dtype=\"int32\")\n\n ref_res = ref_funcs.gather_nd(x_data, y_data, batch_dims)\n\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)(x_data, y_data)\n tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)\n\n verify_gather_nd((2, 2), (2, 3), [[1, 1, 0], [0, 1, 0]])\n verify_gather_nd((2, 2, 2), (2, 2), [[0, 1], [1, 0]])\n verify_gather_nd((3, 2, 2), (2, 2), [[0, 1], [1, 0]])\n verify_gather_nd((3, 2), (2, 2, 3), [[[0, 1, 2], [2, 0, 1]], [[0, 0, 0], [1, 1, 1]]])\n\n # Examples from tensorflow gather_nd doc\n # https://www.tensorflow.org/api_docs/python/tf/gather_nd\n verify_gather_nd((2, 2, 2), (1, 2), [[1, 0]], 1)\n verify_gather_nd((2, 2, 2), (1, 2, 1), [[[1], [0]]], 1)\n verify_gather_nd((2, 2, 2), (2, 2, 1), [[[1], [0]], [[0], [1]]], 1)\n\n # Test cases from tensorflow gather_nd tests kernel_tests/array_ops_test.py\n verify_gather_nd((2, 2, 2), (1, 2), None, 1)\n verify_gather_nd((2, 2, 2), (2, 2), None, 1)\n verify_gather_nd((2, 2, 3, 2), (3, 2), None, 1)\n verify_gather_nd((2, 2, 3, 2), (2, 2), None, 1)\n verify_gather_nd((2, 2, 3, 2), (1, 2), None, 1)\n verify_gather_nd((2, 2, 3, 2), (3, 2, 1), None, 1)\n verify_gather_nd((2, 2, 3, 2), (2, 2, 2), None, 1)\n verify_gather_nd((2, 2, 3, 2), (1, 2, 3), None, 1)\n\n verify_gather_nd((3, 2, 2, 3, 4), (3, 3, 2), None, 2)\n verify_gather_nd((3, 2, 2, 3, 4), (2, 3, 2), None, 2)\n verify_gather_nd((3, 2, 2, 3, 4), (1, 3, 2), None, 2)\n verify_gather_nd((3, 2, 2, 3, 4), (3, 3, 2, 1), None, 2)\n verify_gather_nd((3, 2, 2, 3, 4), (2, 3, 2, 2), None, 2)\n verify_gather_nd((3, 2, 2, 3, 4), (1, 3, 2, 3), None, 2)\n\n\ndef _verify_infiniteness_ops(relay_op, ref_op):\n for dtype in [\"float32\", \"float16\", \"float16\", \"int32\", \"int16\"]:\n shape = (2, 8, 8)\n x = relay.var(\"x\", relay.TensorType(shape, dtype))\n y = relay_op(x)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(shape, \"bool\")\n\n data = np.random.uniform(size=shape).astype(dtype)\n if dtype.startswith(\"float\"):\n data.ravel()[\n np.random.choice(data.size, int(data.size * 0.5), replace=False)\n ] = np.infty\n data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.nan\n\n intrp = create_executor()\n op_res = intrp.evaluate(y, {x: data})\n ref_res = ref_op(data)\n np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)\n\n\ndef test_isfinite():\n _verify_infiniteness_ops(relay.isfinite, np.isfinite)\n\n\ndef test_isinf():\n _verify_infiniteness_ops(relay.isinf, np.isinf)\n\n\[email protected]_gpu\ndef test_unravel_index():\n def verify_unravel_index(indices, shape, dtype):\n x_data = np.array(indices).astype(dtype)\n y_data = np.array(shape).astype(dtype)\n x = relay.var(\"x\", relay.TensorType(x_data.shape, dtype))\n y = relay.var(\"y\", relay.TensorType(y_data.shape, dtype))\n\n z = relay.unravel_index(x, y)\n zz = run_infer_type(z)\n\n if len(x_data.shape) == 1:\n out_shape = [y_data.shape[0], x_data.shape[0]]\n else:\n out_shape = [y_data.shape[0]]\n assert zz.checked_type == relay.ty.TensorType(out_shape, dtype)\n\n func = relay.Function([x, y], z)\n ref_res = np.unravel_index(x_data, y_data)\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)(x_data, y_data)\n tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)\n\n for dtype in [\"int64\", \"int32\"]:\n verify_unravel_index([0, 1, 2, 3], [2, 2], dtype)\n verify_unravel_index([144], [5, 5, 5, 2], dtype)\n verify_unravel_index(144, [5, 5, 5, 2], dtype)\n verify_unravel_index([100, 13, 5], [5, 5, 5, 2], dtype)\n\n # In below example, 5 is out of bound for array of size 4.\n # Numpy implementation throws error for it\n # TVM implementation does not throw error instead it produces\n # output which is inline with Tensorflow\n # verify_unravel_index([0, 1, 2, 5], [2, 2], dtype)\n\n\[email protected]_gpu\ndef test_sparse_to_dense():\n def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape, xpected):\n sparse_indices_data = np.array(sparse_indices)\n sparse_values_data = np.array(sparse_values)\n default_value_data = np.array(default_value)\n\n a = relay.var(\n \"a\", relay.TensorType(sparse_indices_data.shape, str(sparse_indices_data.dtype))\n )\n b = relay.var(\n \"b\", relay.TensorType(sparse_values_data.shape, str(sparse_values_data.dtype))\n )\n if default_value is None:\n args = [a, b]\n d = relay.sparse_to_dense(a, output_shape, b)\n else:\n c = relay.var(\n \"c\", relay.TensorType(default_value_data.shape, str(default_value_data.dtype))\n )\n args = [a, b, c]\n d = relay.sparse_to_dense(a, output_shape, b, c)\n\n zz = run_infer_type(d)\n assert zz.checked_type == relay.ty.TensorType(output_shape, str(sparse_values_data.dtype))\n\n func = relay.Function(args, d)\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n if default_value is None:\n op_res = intrp.evaluate(func)(sparse_indices_data, sparse_values_data)\n else:\n op_res = intrp.evaluate(func)(\n sparse_indices_data, sparse_values_data, default_value_data\n )\n tvm.testing.assert_allclose(op_res.numpy(), xpected, rtol=1e-5)\n\n verify_sparse_to_dense(1, 3, 0, [5], [0, 3, 0, 0, 0]) # scalar\n verify_sparse_to_dense([0, 1, 4], [3, 3, 3], 0, [5], [3, 3, 0, 0, 3]) # vector\n verify_sparse_to_dense(\n [[0, 0], [1, 2]], [1, 2], 0, [3, 4], [[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]]\n ) # nXd\n verify_sparse_to_dense(\n [[0, 0, 0], [1, 2, 3]],\n [1, 2],\n 4,\n [2, 3, 4],\n [[[1, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 4]], [[4, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 2]]],\n ) # nXd\n verify_sparse_to_dense(\n [0, 1, 4], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1]\n ) # floats\n verify_sparse_to_dense(1, 3, None, [5], [0, 3, 0, 0, 0]) # default value not specified\n\n # negative test cases\n # sparse indices should be ints\n # verify_sparse_to_dense([[0.1, 1.1, 4.1], [0,2,4]], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])\n # sparse_values should be 0d or 1d only\n # verify_sparse_to_dense([[0, 1, 4], [0, 2, 4]], [[[3.1, 3.1, 3.1]]], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])\n # sparse_indices should not be > 2d tensor\n # verify_sparse_to_dense([[[[0, 1, 4], [0, 2, 4]]]], [[[[3.1, 3.1, 3.1]]]], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])\n\n\[email protected]_gpu\[email protected](\n \"sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np\",\n [\n (\n np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 2, 3]], dtype=np.int32),\n np.array([7, 5, 6, 3, 9], dtype=np.int32),\n np.array([2, 3, 6], dtype=np.int32),\n np.array([9, -1], dtype=np.int32),\n ),\n (\n np.array(\n [[0, 0, 0, 0], [0, 0, 1, 2], [0, 1, 0, 3], [1, 0, 0, 4], [1, 2, 3, 6]],\n dtype=np.int64,\n ),\n np.array([7, 5, 6, 3, 9], dtype=np.int64),\n np.array([2, 3, 6, 7], dtype=np.int64),\n np.array([9, -1, 7], dtype=np.int64),\n ),\n (\n np.array(\n [\n [0, 0, 0, 0, 0],\n [0, 0, 1, 2, 3],\n [0, 1, 0, 3, 5],\n [1, 0, 0, 4, 6],\n [1, 2, 3, 6, 8],\n ],\n dtype=np.int64,\n ),\n np.array([7, 5, 6, 3, 9], dtype=np.int64),\n np.array([2, 3, 6, 7, 9], dtype=np.int64),\n np.array([9, -1, 7], dtype=np.int64),\n ),\n (\n np.array([[0, 0], [0, 1], [3, 4], [4, 3], [7, 3]], dtype=np.int32),\n np.array([7, 5, 6, 3, 9], dtype=np.int32),\n np.array([9, 4], dtype=np.int32),\n np.array([2, -1, 6], dtype=np.int32),\n ),\n (\n np.array([[0, 0], [0, 1], [3, 4], [4, 3], [7, 3]], dtype=np.int64),\n np.array([7, 5, 6, 3, 9], dtype=np.int64),\n np.array([9, 4], dtype=np.int64),\n np.array([-1], dtype=np.int64),\n ),\n (\n np.array([[0], [5], [10], [20], [24]], dtype=np.int32),\n np.array([7, 5, 6, 3, 9], dtype=np.int32),\n np.array([25], dtype=np.int32),\n np.array([5, 5], dtype=np.int32),\n ),\n (\n np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),\n np.array([7, 5, 6, 3, 9], dtype=np.int64),\n np.array([500, 20], dtype=np.int64),\n np.array([500, 20], dtype=np.int64),\n ),\n (\n np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int32),\n np.array([7, 5, 6, 3, 9], dtype=np.int32),\n np.array([500, 20], dtype=np.int32),\n np.array([500, -1], dtype=np.int32),\n ),\n (\n np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),\n np.array([7, 5, 6, 3, 9], dtype=np.int64),\n np.array([500, 20], dtype=np.int64),\n np.array([250, 40], dtype=np.int64),\n ),\n (\n np.ones((0, 1), dtype=np.int32),\n np.array([], dtype=np.int32),\n np.array([4], dtype=np.int32),\n np.array([2, -1], dtype=np.int32),\n ),\n (\n np.ones((0, 1), dtype=np.int64),\n np.array([], dtype=np.int64),\n np.array([4], dtype=np.int64),\n np.array([2, 2], dtype=np.int64),\n ),\n (\n np.ones((0, 2), dtype=np.int32),\n np.array([], dtype=np.int32),\n np.array([3, 6], dtype=np.int32),\n np.array([-1, 2], dtype=np.int32),\n ),\n ],\n)\[email protected](\"use_dyn\", [True, False])\ndef test_sparse_reshape(sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np, use_dyn):\n def ref_sparse_reshape(\n sparse_indices: np.ndarray,\n prev_shape: np.ndarray,\n new_shape: np.ndarray,\n ):\n \"\"\"\n This function calculates the expected output of sparseshape operator given the inputs.\n \"\"\"\n\n new_sparse_indices = np.ones(\n (sparse_indices.shape[0], new_shape.shape[0]), dtype=sparse_indices.dtype\n )\n multipliers = np.ones(prev_shape.shape[0])\n dividers = np.ones(new_shape.shape[0])\n total_ele = np.prod(prev_shape)\n division_total_ele = 1\n for i in range(new_shape.shape[0]):\n if new_shape[i] == -1:\n continue\n division_total_ele *= new_shape[i]\n for i in range(prev_shape.shape[0] - 2, -1, -1):\n multipliers[i] = prev_shape[i + 1] * multipliers[i + 1]\n\n for i in range(len(new_shape)):\n if new_shape[i] == -1:\n new_shape[i] = total_ele // division_total_ele\n\n if np.array_equal(prev_shape, new_shape):\n return sparse_indices, prev_shape\n\n for i in range(new_shape.shape[0] - 2, -1, -1):\n dividers[i] = new_shape[i + 1] * dividers[i + 1]\n\n for row_num, sparse_row in enumerate(sparse_indices):\n flat_idx = 0\n if len(sparse_indices.shape) != 1:\n for i, ele in enumerate(sparse_row):\n flat_idx += sparse_row[i] * multipliers[i]\n else:\n flat_idx += sparse_row\n if len(new_sparse_indices.shape) != 1:\n for i in range(new_sparse_indices.shape[1]):\n new_sparse_indices[row_num][i] = flat_idx // dividers[i]\n flat_idx = flat_idx % dividers[i]\n else:\n new_sparse_indices[row_num] = flat_idx\n\n return new_sparse_indices, new_shape\n\n def verify_sparse_reshape(\n sparse_indices_np: np.ndarray,\n sparse_values_np: np.ndarray,\n prev_shape_np: np.ndarray,\n new_shape_np: np.ndarray,\n ):\n \"\"\"\n This function verifies the relay output of sparse_reshape with its expected output.\n \"\"\"\n if use_dyn:\n sparse_indices = relay.var(\n \"sparse_indices\",\n shape=[relay.Any(), relay.Any()],\n dtype=str(sparse_indices_np.dtype),\n )\n prev_shape = relay.var(\n \"prev_shape\",\n shape=[relay.Any()],\n dtype=str(prev_shape_np.dtype),\n )\n new_shape = relay.var(\n \"new_shape\",\n shape=[relay.Any()],\n dtype=str(new_shape_np.dtype),\n )\n else:\n sparse_indices = relay.var(\n \"sparse_indices\",\n relay.TensorType(sparse_indices_np.shape, str(sparse_indices_np.dtype)),\n )\n prev_shape = relay.var(\n \"prev_shape\", relay.TensorType(prev_shape_np.shape, str(prev_shape_np.dtype))\n )\n new_shape = relay.var(\n \"new_shape\", relay.TensorType(new_shape_np.shape, str(new_shape_np.dtype))\n )\n z = relay.op.sparse_reshape(sparse_indices, prev_shape, new_shape).astuple()\n\n func = relay.Function([sparse_indices, prev_shape, new_shape], z)\n\n ref_res = ref_sparse_reshape(sparse_indices_np, prev_shape_np, new_shape_np)\n outputs = run_infer_type(z)\n new_sparse_indices_infer_type, new_shape_infer_type = (\n outputs.checked_type.fields[0].dtype,\n outputs.checked_type.fields[1].dtype,\n )\n\n assert new_sparse_indices_infer_type == sparse_indices_np.dtype\n assert new_shape_infer_type == new_shape_np.dtype\n verify_func(\n func,\n [sparse_indices_np, prev_shape_np, new_shape_np],\n ref_res,\n )\n\n verify_sparse_reshape(\n sparse_indices_np,\n sparse_values_np,\n prev_shape_np,\n new_shape_np,\n )\n\n\[email protected]_gpu\[email protected](\n \"data_np, segment_ids_np, num_segments\",\n [\n (\n np.array([5, 1, 7, 2, 3, 4], dtype=np.float32),\n np.array([0, 0, 1, 1, 0, 1], dtype=np.int32),\n None,\n ),\n (\n np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),\n np.array([0, 0, 1], dtype=np.int32),\n None,\n ),\n (\n np.random.random((6, 4, 5)),\n np.array([2, 0, 1, 0, 3, 2], dtype=np.int64),\n None,\n ),\n (\n np.array([[[1, 7]], [[3, 8]], [[2, 9]]], dtype=np.float32),\n np.array([0, 0, 1], dtype=np.int32),\n None,\n ),\n (\n np.random.random((9, 4, 5, 7)),\n np.array([5, 0, 1, 0, 3, 6, 8, 7, 7], dtype=np.int64),\n 9,\n ),\n (\n np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),\n np.array([0, 2], dtype=np.int32),\n 4,\n ),\n (\n np.random.random((6, 4, 5)),\n np.array([0, 0, 1, 5, 5], dtype=np.int32),\n 100,\n ),\n ],\n)\[email protected](\"use_dyn\", [True, False])\ndef test_segment_sum(data_np, segment_ids_np, num_segments, use_dyn):\n def ref_segment_sum(\n data: np.ndarray,\n segment_ids: np.ndarray,\n num_segments: Optional[int] = None,\n ):\n \"\"\"\n This function calculates the expected output of segment_sum operator given the inputs.\n \"\"\"\n if not num_segments:\n num_segments = np.unique(segment_ids).shape[0]\n\n result = np.zeros((num_segments,) + data.shape[1:], data.dtype)\n for i, index in enumerate(segment_ids):\n result[index] += data[i]\n return result\n\n def verify_segment_sum(\n data_np: np.ndarray, segment_ids_np: np.ndarray, num_segments: Optional[int]\n ):\n \"\"\"\n This function verifies the relay output of segment_sum with its expected output.\n \"\"\"\n if use_dyn:\n data = relay.var(\n \"data\",\n shape=[relay.Any() for _ in data_np.shape],\n dtype=str(data_np.dtype),\n )\n segment_ids = relay.var(\n \"segment_ids\",\n shape=[relay.Any()],\n dtype=str(segment_ids_np.dtype),\n )\n else:\n data = relay.var(\n \"data\",\n relay.TensorType(data_np.shape, str(data_np.dtype)),\n )\n segment_ids = relay.var(\n \"segment_ids\", relay.TensorType(segment_ids_np.shape, str(segment_ids_np.dtype))\n )\n z = relay.op.segment_sum(data, segment_ids, num_segments)\n\n func = relay.Function([data, segment_ids], z)\n ref_res = ref_segment_sum(data_np, segment_ids_np, num_segments=num_segments)\n segment_sum_result = run_infer_type(z)\n assert segment_sum_result.checked_type.dtype == data_np.dtype\n verify_func(\n func,\n [data_np, segment_ids_np],\n ref_res,\n )\n\n verify_segment_sum(data_np, segment_ids_np, num_segments)\n\n\ndef verify_func(func, data, ref_res, target_device=tvm.testing.enabled_targets()):\n assert isinstance(data, list)\n for target, dev in target_device:\n for kind in [\"vm\"]:\n mod = tvm.ir.IRModule.from_expr(func)\n intrp = relay.create_executor(kind, mod=mod, device=dev, target=target)\n op_res = intrp.evaluate()(*data)\n if isinstance(op_res, tvm.runtime.container.ADT):\n assert len(op_res) == len(\n ref_res\n ), \"Outputs from TVM and Python implementation must be equal \"\n\n for op_result, ref_result in zip(op_res, ref_res):\n tvm.testing.assert_allclose(op_result.numpy(), ref_result, rtol=1e-5)\n else:\n tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)\n relay.backend.compile_engine.get().clear()\n\n\[email protected]_gpu\ndef test_adv_index():\n def verify_adv_index(data_shape, index_shapes):\n dtype = \"float32\"\n inputs = [relay.var(\"data\", relay.TensorType(data_shape, dtype))]\n np_data = np.random.uniform(size=data_shape).astype(dtype)\n np_indices = []\n for i, index_shape in enumerate(index_shapes):\n limit = data_shape[i]\n np_indices.append(np.random.uniform(0, limit - 1, size=index_shape).astype(\"int64\"))\n inputs.append(relay.var(\"index_{}\".format(i), relay.TensorType(index_shape, \"int64\")))\n np_out = np_data[tuple(np_indices)]\n np_args = [np_data] + np_indices\n out = relay.op.adv_index(inputs)\n\n func = relay.Function(inputs, out)\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)(*np_args)\n tvm.testing.assert_allclose(op_res.numpy(), np_out, rtol=1e-5)\n\n verify_adv_index((10, 5), [(3, 4), (3, 1)])\n verify_adv_index(\n (10, 5),\n [\n (2,),\n ],\n )\n verify_adv_index((10, 5, 15), [(1, 2, 1), (1, 2, 7)])\n\n\n# Helper for testing binop functions\nscanops_supported = {\"cumsum\": relay.op.cumsum, \"cumprod\": relay.op.cumprod}\n\n\ndef run_binop_tests(\n target, dev, binop_type: str, gt_func: Callable[..., np.array], identity_value: int\n):\n def assert_relay_scanop(\n data_np: np.array,\n np_out: np.array,\n axis: int = None,\n out_dtype: str = None,\n rtol: float = 1e-5,\n atol: float = 1e-5,\n exclusive: bool = False,\n ):\n inp = relay.var(\"data\", relay.TensorType(data_np.shape, str(data_np.dtype)))\n\n if binop_type not in scanops_supported.keys():\n raise ValueError(f\"Unknown function {binop_type}. Options: {scanops_supported.keys()}\")\n out = scanops_supported[binop_type](inp, axis, out_dtype, exclusive=exclusive)\n func = relay.Function([inp], out)\n\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)(data_np)\n tvm.testing.assert_allclose(op_res.numpy(), np_out, rtol=rtol, atol=atol)\n\n data = np.array([2, 3, 0])\n assert_relay_scanop(data, gt_func(data))\n assert_relay_scanop(data, gt_func(data), out_dtype=\"int64\")\n\n data = np.random.randn(10, 10)\n assert_relay_scanop(data, gt_func(data))\n assert_relay_scanop(data, gt_func(data, axis=0), axis=0)\n assert_relay_scanop(data, gt_func(data, axis=1), axis=1)\n\n data = np.random.randn(10, 5, 10).astype(\"float32\")\n assert_relay_scanop(data, gt_func(data), rtol=1e-4, atol=1e-4)\n assert_relay_scanop(data, gt_func(data, axis=0), axis=0, rtol=1e-4, atol=1e-4)\n assert_relay_scanop(data, gt_func(data, axis=1), axis=1, rtol=1e-4, atol=1e-4)\n assert_relay_scanop(data, gt_func(data, axis=-1), axis=-1, rtol=1e-4, atol=1e-4)\n\n data = np.random.rand(10) > 0.5\n data = data.astype(np.int32)\n assert_relay_scanop(data, gt_func(data, dtype=np.int32))\n assert_relay_scanop(data, gt_func(data, dtype=\"int64\"), out_dtype=\"int64\")\n\n # Test exclusivity operations\n data = np.random.randint(-100, 100, size=(10, 10)).astype(\"int64\")\n expected_result = np.roll(gt_func(data), 1)\n expected_result[0] = identity_value\n assert_relay_scanop(data, expected_result, exclusive=True)\n\n expected_result = np.roll(gt_func(data, axis=0), 1, axis=0)\n expected_result[0, :] = identity_value\n assert_relay_scanop(data, expected_result, exclusive=True, axis=0)\n\n expected_result = np.roll(gt_func(data, axis=1), 1, axis=1)\n expected_result[:, 0] = identity_value\n assert_relay_scanop(data, expected_result, exclusive=True, axis=1)\n\n\[email protected]_targets\ndef test_cumsum(target, dev):\n run_binop_tests(target, dev, binop_type=\"cumsum\", gt_func=np.cumsum, identity_value=0)\n\n\[email protected]_targets\ndef test_cumprod(target, dev):\n run_binop_tests(target, dev, binop_type=\"cumprod\", gt_func=np.cumprod, identity_value=1)\n\n\[email protected]_targets\ndef test_scatter_nd(target, dev):\n def verify_scatter_nd(\n data_np, indices_np, updates_np, ref_res, mode=\"add\", rtol=1e-5, atol=1e-5\n ):\n data = relay.var(\"data\", shape=data_np.shape, dtype=str(data_np.dtype))\n indices = relay.var(\"indices\", shape=indices_np.shape, dtype=str(indices_np.dtype))\n updates = relay.var(\"updates\", shape=updates_np.shape, dtype=str(updates_np.dtype))\n\n out = relay.op.scatter_nd(data, indices, updates, mode)\n func = relay.Function([data, indices, updates], out)\n\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)(data_np, indices_np, updates_np)\n tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol, atol=atol)\n\n def verify_scatter_nd_with_stack(\n data_np, indices_np, updates_np, ref_res, mode=\"add\", rtol=1e-5, atol=1e-5\n ):\n data = relay.var(\"data\", shape=data_np.shape, dtype=str(data_np.dtype))\n indices_vars = [\n relay.var(\"ind%d\" % i, shape=v.shape, dtype=str(v.dtype))\n for i, v in enumerate(indices_np)\n ]\n updates = relay.var(\"updates\", shape=updates_np.shape, dtype=str(updates_np.dtype))\n\n # test if scatter_nd works in case indices are prepared by another Relay operator\n indices = relay.op.stack(indices_vars, axis=0)\n out = relay.op.scatter_nd(data, indices, updates, mode)\n func = relay.Function(\n [data, updates] + indices_vars,\n out,\n )\n\n fargs = [data_np, updates_np]\n for a in indices_np:\n fargs.append(a)\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)(*fargs)\n tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol, atol=atol)\n\n data = np.zeros((2, 2)).astype(\"int64\")\n indices = np.array([[1, 1, 0], [0, 1, 0]])\n updates = np.array([2, 3, 0])\n out = np.array([[0, 0], [2, 3]])\n verify_scatter_nd(data, indices, updates, out)\n verify_scatter_nd_with_stack(data, indices, updates, out)\n\n data = np.zeros((2, 2, 2, 2)).astype(\"int64\")\n indices = np.array([[0, 1], [1, 1]])\n updates = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])\n out = np.array([[[[0, 0], [0, 0]], [[1, 2], [3, 4]]], [[[0, 0], [0, 0]], [[5, 6], [7, 8]]]])\n verify_scatter_nd(data, indices, updates, out)\n verify_scatter_nd_with_stack(data, indices, updates, out)\n\n indices = np.array([[1, 0, 0]])\n updates = np.reshape(np.arange(1560 * 3), (3, 1560)).astype(\"float32\")\n shape = (2, 1560)\n data = np.zeros(shape).astype(\"float32\")\n out = data.copy()\n out[1, :] += updates[0, :]\n out[0, :] += updates[1, :]\n out[0, :] += updates[2, :]\n verify_scatter_nd(data, indices, updates, out, mode=\"add\")\n verify_scatter_nd_with_stack(data, indices, updates, out)\n\n for mode in [\"add\", \"update\"]:\n indices = np.stack((np.random.randint(2, size=5), np.random.randint(7, size=5))).astype(\n \"int64\"\n )\n updates = np.ones((5, 3)).astype(\"float64\")\n shape = (2, 7, 3)\n data = np.random.random(shape).astype(\"float64\")\n out = data.copy()\n for i in range(indices.shape[1]):\n for j in range(updates.shape[1]):\n if mode == \"add\":\n out[indices[0, i], indices[1, i], j] += updates[i, j]\n elif mode == \"update\":\n out[indices[0, i], indices[1, i], j] = updates[i, j]\n verify_scatter_nd(data, indices, updates, out, mode)\n verify_scatter_nd_with_stack(data, indices, updates, out, mode)\n\n\ndef test_unique():\n def calc_numpy_unique(data, is_sorted=False):\n uniq, index, inverse, counts = np.unique(\n data, return_index=True, return_inverse=True, return_counts=True\n )\n num_uniq = np.array([len(uniq)]).astype(\"int32\")\n if not is_sorted:\n order = np.argsort(index)\n reverse_order = np.argsort(order)\n uniq = uniq[order].astype(data.dtype)\n inverse = np.array([reverse_order[i] for i in inverse]).astype(\"int32\")\n counts = counts[order].astype(\"int32\")\n index = np.sort(index) # In unsorted case, need to sort the index of first occurence\n return [\n uniq.astype(data.dtype),\n index.astype(\"int32\"),\n inverse.astype(\"int32\"),\n num_uniq,\n counts,\n ]\n\n def verify_unique(n, dtype, is_dyn=False, is_sorted=False, return_counts=False):\n if is_dyn:\n x = relay.var(\"x\", relay.TensorType([relay.Any()], dtype))\n else:\n x = relay.var(\"x\", relay.TensorType([n], dtype))\n outs = relay.unique(x, is_sorted, return_counts)\n outs = outs.astuple()\n func = relay.Function([x], outs)\n x_data = np.random.randint(50, size=n).astype(dtype)\n\n if is_dyn:\n backends = [\"vm\", \"debug\"]\n else:\n backends = [\"graph\", \"debug\"]\n\n for target, dev in tvm.testing.enabled_targets():\n for kind in backends:\n mod = tvm.ir.IRModule.from_expr(func)\n intrp = relay.create_executor(kind, mod=mod, device=dev, target=target)\n tvm_res = intrp.evaluate()(\n x_data\n ) # unique, indices, inverse_indices, num_unique, (counts)\n np_res = calc_numpy_unique(\n x_data, is_sorted\n ) # unique, indices, inverse_indices, num_unique, counts\n num_unique = np_res[3][0]\n\n # num_unique\n assert num_unique == tvm_res[3].numpy()[0]\n # unique\n tvm.testing.assert_allclose(tvm_res[0].numpy()[:num_unique], np_res[0], rtol=1e-5)\n # indices\n tvm.testing.assert_allclose(tvm_res[1].numpy()[:num_unique], np_res[1], rtol=1e-5)\n # inverse_indices\n tvm.testing.assert_allclose(tvm_res[2].numpy(), np_res[2], rtol=1e-5)\n # counts\n if return_counts:\n tvm.testing.assert_allclose(\n tvm_res[4].numpy()[:num_unique], np_res[4], rtol=1e-5\n )\n\n for dtype in [\"int32\", \"int64\"]:\n for i in range(8):\n is_dyn, is_sorted, return_counts = bool(i & 1), bool(i & 2), bool(i & 4)\n verify_unique(10, dtype, is_dyn, is_sorted, return_counts)\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__])\n"
] | [
[
"numpy.array_equal",
"numpy.random.rand",
"numpy.copy",
"numpy.tile",
"numpy.where",
"numpy.sort",
"numpy.unique",
"numpy.random.random",
"numpy.full_like",
"numpy.full",
"numpy.ndindex",
"numpy.indices",
"numpy.random.normal",
"numpy.take",
"numpy.unravel_index",
"numpy.tanh",
"numpy.prod",
"numpy.transpose",
"numpy.random.randint",
"numpy.arange",
"numpy.logaddexp",
"numpy.array",
"numpy.reshape",
"numpy.zeros",
"numpy.random.randn",
"numpy.random.random_sample",
"numpy.stack",
"numpy.argsort",
"numpy.clip",
"numpy.squeeze",
"numpy.asarray",
"numpy.ones",
"numpy.random.uniform",
"numpy.repeat",
"numpy.linspace",
"numpy.meshgrid",
"numpy.flip"
]
] |
TristenHarr/api2db | [
"8c8b14280441f5153ff146c23359a0eb91022ddb"
] | [
"src/api2db/ingest/api_form.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nContains the ApiForm class\n==========================\n\"\"\"\nfrom .pre_process.pre import Pre\nfrom .data_feature.feature import Feature\nfrom .post_process.post import Post\nfrom typing import Optional, List, Any\nimport os\nimport pickle\nimport pandas as pd\nimport json\n\n\nclass ApiForm(object):\n \"\"\"Used to clean and process incoming data arriving from an Api\"\"\"\n\n def __init__(self,\n name: str,\n pre_process: Optional[List[Pre]]=None,\n data_features: Optional[List[Feature]]=None,\n post_process: Optional[List[Post]]=None):\n \"\"\"\n Creates an ApiForm\n\n NOTE:\n The ApiForm is used by api2db to do the processing and cleaning of data.\n Incoming data goes through 3 phases.\n\n 1. Pre-Processing\n\n * Extract global data-features\n\n * Extract a list of data-points that will serve as the rows in a database\n\n * Flatten nested arrays of data\n\n * Swap extraneous rows returned from poorly implemented APIs\n\n 2. Feature Extraction\n\n * Extracts the data features for each row that will be stored in a database\n\n 3. Post-Processing\n\n * Add new columns of data that will be the same globally for the arriving data.\n I.e. arrival timestamps\n\n * Apply functions across data columns, replacing the data with the calculated value.\n I.e. Reformat strings, strip whitespace, etc.\n\n * Add new columns of data that are derived from performing calculations on existing columns.\n I.e. Use a `latitude` and `longitude` column to calculate a new column called `country`\n\n * Cast columns that contain datetime data from strings to date times.\n\n * Drop columns that should not contain null values.\n\n * Perform merging of incoming data with locally stored reference tables.\n I.e. Incoming data has column `location_id` field, a reference table contains location info with\n the `location_id` field being a link between the two.\n This allows for data to be merged on column `location_id` in order to contain all data in a\n single table.\n\n Args:\n name: The name of the collector the ApiForm is associated with\n pre_process: An array pre-processing objects to be applied sequentially on incoming data\n data_features: An array of data features to be extracted from the incoming data.\n The programmer can choose which data features they require, and keep only those.\n post_process: An array of post-processing objects to be applied sequentially on the data after data has been\n cleaned and extracted to a `pandas.DataFrame`\n \"\"\"\n self.name = name\n self.pre_process = [] if pre_process is None else pre_process\n self.data_features = [] if data_features is None else data_features\n self.post_process = [] if post_process is None else post_process\n if name == \"lab\":\n print(\"building laboratory...\")\n self.pre_process = pre_process\n self.data_features = data_features\n self.post_process = post_process\n\n def add_pre(self, pre: Pre) -> None:\n \"\"\"\n Allows the programmer to manually add a item to the pre-processing array.\n\n Args:\n pre: The pre-processing object to add\n\n Returns:\n None\n \"\"\"\n self.pre_process.append(pre)\n\n def add_feature(self, feat: Feature) -> None:\n \"\"\"\n Allows the programmer to manually add a item to the data-features array.\n\n Args:\n feat: The feature object to add\n\n Returns:\n None\n \"\"\"\n self.data_features.append(feat)\n\n def add_post(self, post: Post) -> None:\n \"\"\"\n Allows the programmer to manually add a item to the post-processing array.\n\n Args:\n post: The post-processing object to add\n\n Returns:\n None\n \"\"\"\n self.post_process.append(post)\n\n def pandas_typecast(self) -> dict:\n \"\"\"\n Performs typecasting from python native types to their pandas counterparts.\n Currently supported types are:\n\n * int\n * float\n * bool\n * str\n\n Since API data is inconsistent, all typecasting makes the values nullable inside the DataFrame. Null values can\n be removed during post-processing.\n\n Returns:\n A dictionary that can be used to cast a DataFrames types using DataFrame.astype()\n \"\"\"\n res = {}\n for feat in self.data_features:\n res[feat.key] = ApiForm.typecast(feat.dtype)\n return res\n\n @staticmethod\n def typecast(dtype: Any) -> str:\n \"\"\"\n Yields a string containing the pandas dtype when given a python native type.\n\n Args:\n dtype: The python native type\n\n Returns:\n The string representing the type that the native type converts to when put into a DataFrame\n \"\"\"\n if dtype is int:\n return \"Int64\"\n elif dtype is float:\n return \"Float64\"\n elif dtype is bool:\n return \"bool\"\n return \"string\"\n\n def experiment(self, CACHE, import_target) -> bool:\n \"\"\"\n Tool used to build an ApiForm\n\n NOTE:\n\n The laboratory is an experimental feature and does not currently support the StaticMerge post-processor.\n\n Args:\n CACHE: If the data imports should be cached. I.e. Only call the API once\n import_target: The target function that performs an API import\n\n Returns:\n True if experiment is ready for export otherwise False\n \"\"\"\n cache_path = os.path.join(os.getcwd(), \"lab_cache.pickle\")\n if CACHE and os.path.isfile(cache_path):\n with open(cache_path, \"rb\") as f:\n data = pickle.load(f)\n else:\n data = import_target()\n if CACHE and data is not None and type(data) is list and type(data[0]) is dict:\n with open(cache_path, \"wb\") as f:\n pickle.dump(data, f)\n\n pre_2_post = {}\n if type(data) is not list or (type(data) is list and len(data) == 0) or (\n type(data) is list and type(data[0]) is not dict):\n print(f\"import_target must return a list of dictionary data.\\nimport_target:\\n{data}\\n\")\n return False\n data = data[0]\n with open(\"data_without_preprocess.json\", \"w\") as f:\n json.dump(data, f, indent=10)\n if self.pre_process is None or type(self.pre_process) is not list:\n print(f\"data:\\n{data}\\n\")\n print(f\"data keys:\\n{data.keys()}\\n\")\n print(f\"pre_process must return a list of 0 or more pre-processors.\\npre_process:\\n{self.pre_process}\\n\")\n return False\n for pre in self.pre_process:\n if pre.ctype == \"global_extract\":\n pre_2_post[pre.key] = pre(lam_arg=data)\n else:\n data = pre(lam_arg=data)\n with open(\"data_after_preprocess.json\", \"w\") as f:\n json.dump(data, f, indent=10)\n if type(data) is not list:\n print(f\"data should be a list of dicts once pre-processing has been completed.\\ndata:\\n{data}\\n\")\n return False\n if type(self.data_features) is not list or len(self.data_features) == 0:\n for i in range(3 if len(data) > 3 else len(data)):\n print(f\"data point {i+1}:\\n{data[i]}\\n\")\n print(f\"data_features must return a list of data-features.\\ndata_features:\\n{self.data_features}\\n\")\n return False\n rows = []\n for d in data:\n row = {}\n for feat in self.data_features:\n row[feat.key] = feat(d)\n rows.append(row)\n df = pd.DataFrame(rows)\n for feat in self.data_features:\n df[feat.key] = df[feat.key].astype(ApiForm.typecast(feat.dtype))\n for k, v in pre_2_post.items():\n df[k] = v[\"value\"]\n df[k] = df[k].astype(ApiForm.typecast(v[\"dtype\"]))\n with open(\"data_after_feature_extraction.json\", \"w\") as f:\n df.to_json(path_or_buf=f, orient=\"records\", indent=10)\n if type(self.post_process) is not list:\n for i in range(3 if len(data) > 3 else len(data)):\n print(f\"data point {i+1}:\\n{data[i]}\\n\")\n print(f\"data:\\n{df}\\n\")\n print(f\"data dtypes:\\n{df.dtypes}\\n\")\n return False\n for post in self.post_process:\n df = post(df)\n for i in range(3 if len(data) > 3 else len(data)):\n print(f\"data point {i+1}:\\n{data[i]}\\n\")\n print(f\"finalized data:\\n{df}\\n\")\n print(f\"finalized data dtypes:\\n{df.dtypes}\\n\")\n return True\n"
] | [
[
"pandas.DataFrame"
]
] |
vivacebelles/QCEngine | [
"d9a033f66cbbd4476bd0848d08323988c2726531"
] | [
"qcengine/programs/qchem.py"
] | [
"\"\"\"\nCalls the Q-Chem executable.\n\"\"\"\n\nimport os\nimport tempfile\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport numpy as np\n\nfrom qcelemental.models import AtomicResult\nfrom qcelemental.util import parse_version, safe_version, which\n\nfrom ..exceptions import InputError, UnknownError\nfrom ..util import disk_files, execute, popen, temporary_directory\nfrom .model import ProgramHarness\n\n\nclass QChemHarness(ProgramHarness):\n _defaults: Dict[str, Any] = {\n \"name\": \"QChem\",\n \"scratch\": True,\n \"thread_safe\": False,\n \"thread_parallel\": True,\n \"node_parallel\": False,\n \"managed_memory\": True,\n }\n version_cache: Dict[str, str] = {}\n\n def found(self, raise_error: bool = False) -> bool:\n return which(\n \"qchem\",\n return_bool=True,\n raise_error=raise_error,\n raise_msg=\"Please install by visiting the Q-Chem website.\",\n )\n\n def _get_qc_path(self, config: Optional[\"TaskConfig\"] = None):\n paths = os.environ.copy()\n paths[\"QCSCRATCH\"] = tempfile.gettempdir()\n if config and config.scratch_directory:\n paths[\"QCSCRATCH\"] = config.scratch_directory\n\n # Nothing else to pass in\n if \"QC\" in os.environ:\n return paths\n\n # Assume QC path is\n qchem_path = which(\"qchem\")\n if qchem_path:\n paths[\"QC\"] = os.path.dirname(os.path.dirname(qchem_path))\n\n return paths\n\n def get_version(self) -> str:\n self.found(raise_error=True)\n\n which_prog = which(\"qchem\")\n if which_prog not in self.version_cache:\n with popen([which_prog, \"-h\"], popen_kwargs={\"env\": self._get_qc_path()}) as exc:\n exc[\"proc\"].wait(timeout=15)\n\n if \"QC not defined\" in exc[\"stdout\"]:\n return safe_version(\"0.0.0\")\n\n self.version_cache[which_prog] = safe_version(exc[\"stdout\"].splitlines()[0].split()[-1])\n\n return self.version_cache[which_prog]\n\n def compute(self, input_model: \"AtomicInput\", config: \"TaskConfig\") -> \"AtomicResult\":\n \"\"\"\n Run qchem\n \"\"\"\n # Check if qchem executable is found\n self.found(raise_error=True)\n\n # Check qchem version\n qceng_ver = \"5.2\"\n if parse_version(self.get_version()) < parse_version(qceng_ver):\n raise TypeError(f\"Q-Chem version <{qceng_ver} not supported (found version {self.get_version()})\")\n\n # Setup the job\n job_inputs = self.build_input(input_model, config)\n\n # Run qchem\n exe_success, proc = self.execute(job_inputs)\n\n # Determine whether the calculation succeeded\n if exe_success:\n # If execution succeeded, collect results\n result = self.parse_output(proc[\"outfiles\"], input_model)\n return result\n else:\n outfile = proc[\"outfiles\"][\"dispatch.out\"]\n if \"fatal error occurred in module qparser\" in outfile:\n raise InputError(proc[\"outfiles\"][\"dispatch.out\"])\n else:\n # Return UnknownError for error propagation\n raise UnknownError(proc[\"outfiles\"][\"dispatch.out\"])\n\n def execute(\n self,\n inputs: Dict[str, Any],\n *,\n extra_infiles: Optional[Dict[str, str]] = None,\n extra_outfiles: Optional[List[str]] = None,\n extra_commands: Optional[List[str]] = None,\n scratch_name: Optional[str] = None,\n scratch_messy: bool = False,\n timeout: Optional[int] = None,\n ) -> Tuple[bool, Dict[str, Any]]:\n \"\"\"\n For option documentation go look at qcengine/util.execute\n \"\"\"\n\n # Collect all input files and update with extra_infiles\n infiles = inputs[\"infiles\"]\n if extra_infiles is not None:\n infiles.update(extra_infiles)\n\n binary_files = [os.path.join(\"savepath\", x) for x in [\"99.0\", \"131.0\", \"132.0\"]]\n\n # Collect all output files and extend with with extra_outfiles\n outfiles = [\"dispatch.out\"]\n if extra_outfiles is not None:\n outfiles.extend(extra_outfiles)\n\n # Replace commands with extra_commands if present\n commands = inputs[\"commands\"] + [\"savepath\"]\n if extra_commands is not None:\n commands = extra_commands\n\n envs = self._get_qc_path()\n\n with temporary_directory(parent=inputs[\"scratch_directory\"], suffix=\"_qchem_scratch\") as tmpdir:\n envs[\"QCSCRATCH\"] = tmpdir\n bdict = {x: None for x in binary_files}\n\n with disk_files({}, bdict, cwd=tmpdir, as_binary=binary_files):\n exe_success, proc = execute(\n commands,\n infiles=infiles,\n outfiles=outfiles,\n scratch_name=scratch_name,\n scratch_directory=tmpdir,\n scratch_messy=scratch_messy,\n timeout=timeout,\n environment=envs,\n )\n\n proc[\"outfiles\"].update({os.path.split(k)[-1]: v for k, v in bdict.items()})\n\n if (proc[\"outfiles\"][\"dispatch.out\"] is None) or (\n \"Thank you very much for using Q-Chem\" not in proc[\"outfiles\"][\"dispatch.out\"]\n ):\n exe_success = False\n\n # QChem does not create an output file and only prints to stdout\n return exe_success, proc\n\n def build_input(\n self, input_model: \"AtomicInput\", config: \"TaskConfig\", template: Optional[str] = None\n ) -> Dict[str, Any]:\n\n # Check some bounds on what cannot be parsed\n if \"ccsd\" in input_model.model.method.lower() or \"ccd\" in input_model.model.method.lower():\n raise InputError(\"Cannot handle CC* methods currently.\")\n\n # Build keywords\n keywords = {k.upper(): v for k, v in input_model.keywords.items()}\n keywords[\"INPUT_BOHR\"] = \"TRUE\"\n keywords[\"MEM_TOTAL\"] = str(int(config.memory * 1024)) # In MB\n\n if input_model.driver == \"energy\":\n keywords[\"JOBTYPE\"] = \"sp\"\n elif input_model.driver == \"gradient\":\n keywords[\"JOBTYPE\"] = \"force\"\n elif input_model.driver == \"hessian\":\n keywords[\"JOBTYPE\"] = \"freq\"\n else:\n raise InputError(f\"Driver of type {input_model.driver} is not yet supported.\")\n\n if input_model.molecule.fix_com or input_model.molecule.fix_orientation:\n keywords[\"SYM_IGNORE\"] = \"TRUE\"\n\n keywords[\"METHOD\"] = input_model.model.method\n if input_model.model.basis:\n keywords[\"BASIS\"] = input_model.model.basis\n\n # Begin the input file\n input_file = []\n input_file.append(\n f\"\"\"$comment\nAutomatically generated Q-Chem input file by QCEngine\n$end\n \"\"\"\n )\n\n # Add Molecule, TODO: Add to QCElemental\n mol = input_model.molecule\n input_file.append(\"$molecule\")\n input_file.append(f\"\"\"{int(mol.molecular_charge)} {mol.molecular_multiplicity}\"\"\")\n\n for real, sym, geom in zip(mol.real, mol.symbols, mol.geometry):\n if real is False:\n raise InputError(\"Cannot handle ghost atoms yet.\")\n input_file.append(f\"{sym} {geom[0]:14.8f} {geom[1]:14.8f} {geom[2]:14.8f}\")\n\n input_file.append(\"$end\\n\")\n\n # Write out the keywords\n input_file.append(\"$rem\")\n for k, v in keywords.items():\n input_file.append(f\"{k:20s} {v}\")\n input_file.append(\"$end\\n\")\n\n ret = {\n \"infiles\": {\"dispatch.in\": \"\\n\".join(input_file)},\n \"commands\": [which(\"qchem\"), \"-nt\", str(config.ncores), \"dispatch.in\", \"dispatch.out\"],\n \"scratch_directory\": config.scratch_directory,\n }\n\n return ret\n\n def parse_output(self, outfiles: Dict[str, str], input_model: \"AtomicInput\") -> \"AtomicResult\":\n\n output_data = {}\n\n bdata = {}\n for k, v in outfiles.items():\n if k == \"dispatch.out\":\n continue\n if v is None:\n continue\n bdata[k] = np.frombuffer(v)\n\n if input_model.driver == \"energy\":\n output_data[\"return_result\"] = bdata[\"99.0\"][-1]\n elif input_model.driver == \"gradient\":\n output_data[\"return_result\"] = bdata[\"131.0\"]\n elif input_model.driver == \"hessian\":\n output_data[\"return_result\"] = bdata[\"132.0\"]\n else:\n raise ValueError(f\"Could not parse driver of type {input_model.driver}.\")\n\n properties = {\n \"nuclear_repulsion_energy\": bdata[\"99.0\"][0],\n \"scf_total_energy\": bdata[\"99.0\"][1],\n \"return_energy\": bdata[\"99.0\"][-1],\n }\n\n # Correct CCSD because its odd?\n # if input_model.model.method.lower() == \"ccsd\":\n # m1 = re.findall(\" CCSD correlation energy.+=.+\\d+\\.\\d+\", outfiles[\"dispatch.out\"])\n # m2 = re.findall(\" CCSD total energy.+=.+\\d+\\.\\d+\", outfiles[\"dispatch.out\"])\n\n output_data[\"properties\"] = properties\n output_data[\"stdout\"] = outfiles[\"dispatch.out\"]\n output_data[\"success\"] = True\n\n return AtomicResult(**{**input_model.dict(), **output_data})\n"
] | [
[
"numpy.frombuffer"
]
] |
yumere/document-qa-tmp | [
"7605b893a51209f39029769b9b10a58605ca3daf"
] | [
"docqa/data_processing/multi_paragraph_qa.py"
] | [
"from collections import Counter\nfrom typing import List, Union\n\nimport numpy as np\nfrom docqa.data_processing.qa_training_data import ParagraphAndQuestionSpec, WordCounts, ParagraphAndQuestion, \\\n ContextAndQuestion, ParagraphAndQuestionDataset\nfrom docqa.data_processing.span_data import TokenSpans\nfrom docqa.dataset import Dataset, ListBatcher, ClusteredBatcher\n\nfrom docqa.data_processing.preprocessed_corpus import DatasetBuilder, FilteredData\n\n\"\"\"\nData for cases where we have many paragraphs mapped to a single question\n\"\"\"\n\n\nclass ParagraphWithAnswers(object):\n __slots__ = [\"text\", \"answer_spans\", \"answer_yes_no\"]\n\n def __init__(self, text: List[str], answer_spans: np.ndarray, answer_yes_no: np.ndarray=None):\n self.text = text\n self.answer_spans = answer_spans\n\n if answer_yes_no is not None:\n self.answer_yes_no = answer_yes_no\n\n @classmethod\n def merge(cls, paras: List):\n paras.sort(key=lambda x: x.get_order())\n answer_spans = []\n text = []\n answer_yes_no = None\n for para in paras:\n answer_spans.append(len(text) + para.answer_spans)\n text += para.text\n answer_yes_no = para.answer_yes_no\n return ParagraphWithAnswers(text, np.concatenate(answer_spans), answer_yes_no)\n\n def get_context(self):\n return self.text\n\n def get_order(self):\n raise NotImplementedError()\n\n def build_qa_pair(self, question, question_id, answer_text, group=None) -> ContextAndQuestion:\n if answer_text is None:\n ans = None\n elif group is None:\n ans = TokenSpans(answer_text, self.answer_spans, self.answer_yes_no)\n else:\n ans = TokenSpanGroup(answer_text, self.answer_spans, group)\n return ParagraphAndQuestion(self.text, question, ans, question_id)\n\n\nclass DocumentParagraph(ParagraphWithAnswers):\n __slots__ = [\"doc_id\", \"start\", \"end\", \"rank\"]\n\n def __init__(self, doc_id: str, start: int, end: int, rank: int,\n answer_spans: np.ndarray, text: List[str], answer_yes_no: np.ndarray=None):\n super().__init__(text, answer_spans, answer_yes_no)\n self.doc_id = doc_id\n self.start = start\n self.rank = rank\n self.end = end\n\n def get_order(self):\n return self.start\n\n\nclass MultiParagraphQuestion(object):\n \"\"\" Question associated with multiple paragraph w/pre-computed answer spans \"\"\"\n\n __slots__ = [\"question_id\", \"question\", \"end\", \"answer_text\", \"paragraphs\"]\n\n def __init__(self, question_id: str, question: List[str], answer_text: List[str],\n paragraphs: List[ParagraphWithAnswers]):\n self.question_id = question_id\n self.question = question\n self.answer_text = answer_text\n self.paragraphs = paragraphs\n\n\nclass RandomParagraphDataset(Dataset):\n \"\"\" Samples a random set of paragraphs from question to build question-paragraph pairs \"\"\"\n\n def __init__(self,\n questions: List[MultiParagraphQuestion],\n force_answer: float,\n true_len: int,\n n_to_sample: int,\n batcher: ListBatcher):\n self.questions = questions\n self.n_to_sample = n_to_sample\n self.force_answer = force_answer\n self.batcher = batcher\n self.true_len = true_len\n self._n_examples = sum(min(self.n_to_sample, len(q.paragraphs)) for q in questions)\n\n def get_vocab(self):\n voc = set()\n for q in self.questions:\n voc.update(q.question)\n for para in q.paragraphs:\n voc.update(para.text)\n return voc\n\n def get_spec(self):\n max_q_len = max(len(q.question) for q in self.questions)\n max_c_len = max(max(len(p.text) for p in q.paragraphs) for q in self.questions)\n return ParagraphAndQuestionSpec(None if self.n_to_sample != 1 else self.batcher.get_fixed_batch_size(),\n max_q_len, max_c_len, None)\n\n def get_samples(self, n_examples):\n n_batches = n_examples // self.batcher.get_max_batch_size()\n return self.get_batches(n_batches), n_batches\n\n def get_epoch(self):\n # We first pick a paragraph for each question in the entire training set so we\n # can cluster by context length accurately\n questions = self.questions\n out = []\n for q in questions:\n if len(q.paragraphs) <= self.n_to_sample:\n selected = q.paragraphs\n elif self.force_answer == 0:\n selected = np.random.choice(q.paragraphs, self.n_to_sample, replace=False)\n else:\n answer_probs = np.array([len(p.answer_spans) > 0 for p in q.paragraphs], dtype=np.float64)\n answer_probs /= answer_probs.sum()\n uniform_probs = np.full(len(q.paragraphs), 1.0/len(q.paragraphs))\n probs = (answer_probs + uniform_probs) / 2.0\n selected = np.random.choice(q.paragraphs, self.n_to_sample, p=probs, replace=False)\n for s in selected:\n out.append(s.build_qa_pair(q.question, q.question_id, q.answer_text))\n\n return self.batcher.get_epoch(out)\n\n def percent_filtered(self):\n return 0\n\n def __len__(self):\n return self.batcher.epoch_size(self._n_examples)\n\n\nclass StratifyParagraphsDataset(Dataset):\n \"\"\"\n Samples paragraph for each question to build question-paragraph pairs, but\n stratify the sampling across epochs so paragraphs are seen at about the same rate\n \"\"\"\n\n def __init__(self,\n questions: List[MultiParagraphQuestion],\n true_len: int,\n overample_first_answer: List[int],\n batcher: ListBatcher):\n self.questions = questions\n self.overample_first_answer = overample_first_answer\n self.batcher = batcher\n self.true_len = true_len\n\n self._order = []\n self._on = np.zeros(len(questions), dtype=np.int32)\n for i in range(len(questions)):\n paras = questions[i].paragraphs\n order = list(range(len(paras)))\n if len(self.overample_first_answer) > 0:\n ix = 0\n for i, p in enumerate(paras):\n if len(p.answer_spans) > 0:\n order += [i] * self.overample_first_answer[ix]\n ix += 1\n if ix >= len(self.overample_first_answer):\n break\n\n order = np.array(order, dtype=np.int32)\n np.random.shuffle(order)\n self._order.append(order)\n\n def get_vocab(self):\n voc = set()\n for q in self.questions:\n voc.update(q.question)\n for para in q.paragraphs:\n voc.update(para.text)\n return voc\n\n def get_spec(self):\n max_q_len = max(len(q.question) for q in self.questions)\n max_c_len = max(max(len(p.text) for p in q.paragraphs) for q in self.questions)\n return ParagraphAndQuestionSpec(self.batcher.get_fixed_batch_size(), max_q_len,\n max_c_len, None)\n\n def get_samples(self, n_examples):\n n_batches = n_examples // self.batcher.get_max_batch_size()\n return self.get_batches(n_batches), n_batches\n\n def get_epoch(self):\n questions = self.questions\n out = []\n for i, q in enumerate(questions):\n order = self._order[i]\n selected = q.paragraphs[order[self._on[i]]]\n self._on[i] += 1\n if self._on[i] == len(order):\n np.random.shuffle(order)\n self._on[i] = 0\n\n out.append(selected.build_qa_pair(q.question, q.question_id, q.answer_text))\n\n return self.batcher.get_epoch(out)\n\n def percent_filtered(self):\n return (self.true_len - len(self.questions)) / self.true_len\n\n def __len__(self):\n return self.batcher.epoch_size(len(self.questions))\n\n def __setstate__(self, state):\n if \"oversample_answer\" in state:\n raise ValueError()\n super().__setstate__(state)\n\n\nclass TokenSpanGroup(TokenSpans):\n def __init__(self, answer_text: List[str], answer_spans: np.ndarray, group_id: int):\n super().__init__(answer_text, answer_spans)\n self.group_id = group_id\n\n\nclass ParagraphSelection(object):\n def __init__(self, question: MultiParagraphQuestion, selection):\n self.question = question\n self.selection = selection\n self.n_context_words = max(len(question.paragraphs[i].text) for i in selection)\n\n\nclass HotpotParagraphSetDataset(Dataset):\n \"\"\"\n Sample multiple paragraphs for each question and include them in the same batch\n \"\"\"\n\n def __init__(self,\n questions: List[MultiParagraphQuestion], true_len: int, n_paragraphs: int,\n batch_size: int, mode: str, force_answer: bool,\n oversample_first_answer: List[int]):\n self.mode = mode\n self.questions = questions\n self.force_answer = force_answer\n self.true_len = true_len\n self.n_paragraphs = n_paragraphs\n self.oversample_first_answer = oversample_first_answer\n self._n_pairs = sum(min(len(q.paragraphs), n_paragraphs) for q in questions)\n self.batcher = ClusteredBatcher(batch_size, lambda x: x.n_context_words, truncate_batches=True)\n\n def get_vocab(self):\n voc = set()\n for q in self.questions:\n voc.update(q.question)\n for para in q.paragraphs:\n voc.update(para.text)\n return voc\n\n def get_spec(self):\n max_q_len = max(len(q.question) for q in self.questions)\n max_c_len = max(max(len(p.text) for p in q.paragraphs) for q in self.questions)\n return ParagraphAndQuestionSpec(self.batcher.get_fixed_batch_size() if self.mode == \"merge\" else None,\n max_q_len, max_c_len, None)\n\n def get_epoch(self):\n return self._build_expanded_batches(self.questions)\n\n def _build_expanded_batches(self, questions):\n # We first pick paragraph(s) for each question in the entire training set so we\n # can cluster by context length accurately\n out = []\n for q in questions:\n # with_answer = [i for i, p in enumerate(q.paragraphs) if len(p.answer_spans) > 0]\n # for ix, over_sample in zip(list(with_answer), self.oversample_first_answer):\n # with_answer += [ix] * over_sample\n # try:\n # answer_selection = with_answer[np.random.randint(len(with_answer))]\n # except:\n # # print(with_answer)\n # continue\n # other = np.array([i for i, x in enumerate(q.paragraphs) if i != answer_selection])\n # selected = np.random.choice(other, min(len(other), self.n_paragraphs-1), replace=False)\n # selected = np.insert(selected, 0, answer_selection)\n\n out.append(ParagraphSelection(q, [0, 1]))\n\n out.sort(key=lambda x: x.n_context_words)\n\n if self.mode == \"merge\":\n for selection_batch in self.batcher.get_epoch(out):\n batch = []\n for selected in selection_batch:\n q = selected.question\n paras = [q.paragraphs[i] for i in selected.selection]\n para = paras[0].merge(paras)\n batch.append(para.build_qa_pair(q.question, q.question_id, q.answer_text))\n yield batch\n else:\n raise RuntimeError()\n\n def get_samples(self, n_examples):\n questions = np.random.choice(self.questions, n_examples, replace=True)\n if self.mode == \"flatten\":\n n_batches = self.batcher.epoch_size(sum(min(len(q.paragraphs), self.n_paragraphs) for q in questions))\n else:\n n_batches = self.batcher.epoch_size(n_examples)\n return self._build_expanded_batches(np.random.choice(questions, n_examples, replace=False)), n_batches\n\n def percent_filtered(self):\n return (self.true_len - len(self.questions)) / self.true_len\n\n def __len__(self):\n if self.mode == \"flatten\":\n return self.batcher.epoch_size(self._n_pairs)\n else:\n return self.batcher.epoch_size(len(self.questions))\n\n\nclass RandomParagraphSetDataset(Dataset):\n \"\"\"\n Sample multiple paragraphs for each question and include them in the same batch\n \"\"\"\n\n def __init__(self,\n questions: List[MultiParagraphQuestion], true_len: int, n_paragraphs: int,\n batch_size: int, mode: str, force_answer: bool,\n oversample_first_answer: List[int]):\n self.mode = mode\n self.questions = questions\n self.force_answer = force_answer\n self.true_len = true_len\n self.n_paragraphs = n_paragraphs\n self.oversample_first_answer = oversample_first_answer\n self._n_pairs = sum(min(len(q.paragraphs), n_paragraphs) for q in questions)\n self.batcher = ClusteredBatcher(batch_size, lambda x: x.n_context_words, truncate_batches=True)\n\n def get_vocab(self):\n voc = set()\n for q in self.questions:\n voc.update(q.question)\n for para in q.paragraphs:\n voc.update(para.text)\n return voc\n\n def get_spec(self):\n max_q_len = max(len(q.question) for q in self.questions)\n max_c_len = max(max(len(p.text) for p in q.paragraphs) for q in self.questions)\n return ParagraphAndQuestionSpec(self.batcher.get_fixed_batch_size() if self.mode == \"merge\" else None,\n max_q_len, max_c_len, None)\n\n def get_epoch(self):\n return self._build_expanded_batches(self.questions)\n\n def _build_expanded_batches(self, questions):\n # We first pick paragraph(s) for each question in the entire training set so we\n # can cluster by context length accurately\n out = []\n for q in questions:\n if len(q.paragraphs) <= self.n_paragraphs:\n selected = np.arange(len(q.paragraphs))\n elif not self.force_answer and len(self.oversample_first_answer) == 0:\n selected = np.random.choice(len(q.paragraphs), self.n_paragraphs, replace=False)\n else:\n if not self.force_answer:\n raise NotImplementedError()\n with_answer = [i for i, p in enumerate(q.paragraphs) if len(p.answer_spans) > 0]\n for ix, over_sample in zip(list(with_answer), self.oversample_first_answer):\n with_answer += [ix] * over_sample\n try:\n answer_selection = with_answer[np.random.randint(len(with_answer))]\n except:\n # print(with_answer)\n continue\n other = np.array([i for i, x in enumerate(q.paragraphs) if i != answer_selection])\n selected = np.random.choice(other, min(len(other), self.n_paragraphs-1), replace=False)\n selected = np.insert(selected, 0, answer_selection)\n\n if self.mode == \"flatten\":\n for i in selected:\n out.append(q.paragraphs[i].build_qa_pair(q.question, q.question_id, q.answer_text))\n else:\n out.append(ParagraphSelection(q, selected))\n\n out.sort(key=lambda x: x.n_context_words)\n\n if self.mode == \"flatten\":\n for batch in self.batcher.get_epoch(out):\n yield batch\n elif self.mode == \"group\":\n group = 0\n for selection_batch in self.batcher.get_epoch(out):\n batch = []\n for selected in selection_batch:\n q = selected.question\n for i in selected.selection:\n para = q.paragraphs[i]\n batch.append(para.build_qa_pair(q.question, q.question_id, q.answer_text, group))\n group += 1\n yield batch\n elif self.mode == \"merge\":\n for selection_batch in self.batcher.get_epoch(out):\n batch = []\n for selected in selection_batch:\n q = selected.question\n paras = [q.paragraphs[i] for i in selected.selection]\n para = paras[0].merge(paras)\n batch.append(para.build_qa_pair(q.question, q.question_id, q.answer_text))\n yield batch\n else:\n raise RuntimeError()\n\n def get_samples(self, n_examples):\n questions = np.random.choice(self.questions, n_examples, replace=True)\n if self.mode == \"flatten\":\n n_batches = self.batcher.epoch_size(sum(min(len(q.paragraphs), self.n_paragraphs) for q in questions))\n else:\n n_batches = self.batcher.epoch_size(n_examples)\n return self._build_expanded_batches(np.random.choice(questions, n_examples, replace=False)), n_batches\n\n def percent_filtered(self):\n return (self.true_len - len(self.questions)) / self.true_len\n\n def __len__(self):\n if self.mode == \"flatten\":\n return self.batcher.epoch_size(self._n_pairs)\n else:\n return self.batcher.epoch_size(len(self.questions))\n\n\nclass StratifiedParagraphSetDataset(Dataset):\n \"\"\"\n Sample multiple paragraphs each epoch and include them in the same batch,\n but stratify the sampling across epochs\n \"\"\"\n\n def __init__(self,\n questions: List[MultiParagraphQuestion],\n true_len: int,\n batch_size: int,\n force_answer: bool,\n oversample_first_answer: List[int],\n merge: bool):\n \"\"\"\n :param true_len: Number questions before any filtering was done\n :param batch_size: Batch size to use\n :param force_answer: Require an answer exists for at least\n one paragraph for each question each batch\n :param oversample_first_answer: Over sample the top-ranked answer-containing paragraphs\n by duplicating them the specified amount\n :param merge: Merge all selected paragraphs for each question into a single super-paragraph\n \"\"\"\n self.overample_first_answer = oversample_first_answer\n self.questions = questions\n self.merge = merge\n self.true_len = true_len\n self.batcher = ClusteredBatcher(batch_size, lambda x: x.n_context_words, truncate_batches=True)\n self._order = []\n self._on = np.zeros(len(questions), dtype=np.int32)\n for q in questions:\n if len(q.paragraphs) == 1:\n self._order.append(np.zeros((1, 1), dtype=np.int32))\n continue\n if force_answer:\n sample1 = [i for i, p in enumerate(q.paragraphs) if len(p.answer_spans) > 0]\n else:\n sample1 = list(range(len(q.paragraphs)))\n\n if (len(self.overample_first_answer) > 0 and\n not (force_answer and len(sample1) == 1)): # don't bother if there only is one answer\n ix = 0\n for i, p in enumerate(q.paragraphs):\n if len(p.answer_spans) > 0:\n sample1 += [i] * self.overample_first_answer[ix]\n ix += 1\n if ix >= len(self.overample_first_answer):\n break\n\n permutations = []\n for i in sample1:\n for j in range(len(q.paragraphs)):\n if j != i:\n permutations.append((i, j))\n permutations = np.array(permutations, dtype=np.int32)\n np.random.shuffle(permutations)\n self._order.append(permutations)\n\n def get_vocab(self):\n voc = set()\n for q in self.questions:\n voc.update(q.question)\n for para in q.paragraphs:\n voc.update(para.text)\n return voc\n\n def get_spec(self):\n max_q_len = max(len(q.question) for q in self.questions)\n max_c_len = max(max(len(p.text) for p in q.paragraphs) for q in self.questions)\n return ParagraphAndQuestionSpec(None, max_q_len, max_c_len, None)\n\n def get_epoch(self):\n return self._build_expanded_batches(self.questions)\n\n def _build_expanded_batches(self, questions):\n out = []\n # Decide what paragraphs to use for each question\n for i, q in enumerate(questions):\n order = self._order[i]\n out.append(ParagraphSelection(q, order[self._on[i]]))\n self._on[i] += 1\n if self._on[i] == len(order):\n self._on[i] = 0\n np.random.shuffle(order)\n\n # Sort by context length\n out.sort(key=lambda x: x.n_context_words)\n\n # Yield the correct batches\n group = 0\n for selection_batch in self.batcher.get_epoch(out):\n batch = []\n for selected in selection_batch:\n q = selected.question\n if self.merge:\n paras = [q.paragraphs[i] for i in selected.selection]\n # Sort paragraph by reading order, not rank order\n paras.sort(key=lambda x: x.get_order())\n answer_spans = []\n text = []\n for para in paras:\n answer_spans.append(len(text) + para.answer_spans)\n text += para.text\n batch.append(ParagraphAndQuestion(text, q.question,\n TokenSpans(q.answer_text, np.concatenate(answer_spans)),\n q.question_id))\n else:\n for i in selected.selection:\n para = q.paragraphs[i]\n batch.append(para.build_qa_pair(q.question, q.question_id, q.answer_text, group))\n group += 1\n yield batch\n\n def get_samples(self, n_examples):\n n_batches = self.batcher.epoch_size(n_examples)\n return self._build_expanded_batches(np.random.choice(self.questions, n_examples, replace=False)), n_batches\n\n def percent_filtered(self):\n return (self.true_len - len(self.questions)) / self.true_len\n\n def __len__(self):\n return self.batcher.epoch_size(len(self.questions))\n\n\ndef multi_paragraph_word_counts(data):\n wc = Counter()\n for point in data:\n wc.update(point.question)\n for para in point.paragraphs:\n wc.update(para.get_context())\n return WordCounts(wc)\n\n\nclass IndividualParagraphBuilder(DatasetBuilder):\n \"\"\" Treat each paragraph as its own training point \"\"\"\n\n def __init__(self, batcher: ListBatcher, force_answer: float):\n self.batcher = batcher\n self.force_answer = force_answer\n\n def build_stats(self, data: FilteredData):\n return multi_paragraph_word_counts(data.data)\n\n def build_dataset(self, data: FilteredData, corpus) -> Dataset:\n flattened = []\n for point in data.data:\n for para in point.paragraphs:\n flattened.append(ParagraphAndQuestion(para.text, point.question,\n TokenSpans(point.answer_text, para.answer_spans),\n point.question_id))\n return ParagraphAndQuestionDataset(flattened, self.batcher)\n\n\nclass RandomParagraphsBuilder(DatasetBuilder):\n def __init__(self, batching: ListBatcher, force_answer: float, n_to_sample=1):\n self.batching = batching\n self.force_answer = force_answer\n self.n_to_sample = n_to_sample\n\n def build_stats(self, data: Union[FilteredData, List]):\n if isinstance(data, FilteredData):\n return multi_paragraph_word_counts(data.data)\n else:\n return multi_paragraph_word_counts(data)\n\n def build_dataset(self, data: Union[FilteredData, List], corpus) -> Dataset:\n if isinstance(data, FilteredData):\n l = data.true_len\n data = data.data\n else:\n l = len(data)\n return RandomParagraphDataset(data, self.force_answer, l,\n self.n_to_sample, self.batching)\n\n\nclass StratifyParagraphsBuilder(DatasetBuilder):\n def __init__(self, batcher: ListBatcher, oversample_answers: Union[int, List[int]],\n only_answers: bool=False):\n self.batcher = batcher\n self.oversample_answers = oversample_answers\n self.only_answers = only_answers\n\n def build_dataset(self, data, evidence) -> Dataset:\n if isinstance(data, FilteredData):\n l = data.true_len\n data = data.data\n else:\n l = len(data)\n if self.only_answers:\n for q in data:\n q.paragraphs = [x for x in q.paragraphs if len(x.answer_spans) > 0]\n data = [x for x in data if len(x.paragraphs) > 0]\n\n if isinstance(self.oversample_answers, int):\n ov = [self.oversample_answers]\n else:\n ov = self.oversample_answers\n return StratifyParagraphsDataset(data, l, ov, self.batcher)\n\n @property\n def version(self):\n # Changed how sampling works\n return 2\n\n def build_stats(self, data) -> object:\n if isinstance(data, FilteredData):\n return multi_paragraph_word_counts(data.data)\n else:\n return multi_paragraph_word_counts(data)\n\n def __setstate__(self, state):\n if \"only_answers\" not in state:\n state[\"only_answers\"] = False\n print(state)\n if state.get(\"oversample\", 0) != 0:\n raise NotImplementedError()\n if \"oversample_first\" in state:\n state[\"oversample_answers\"] = [state[\"oversample_first\"]]\n del state[\"oversample_first\"]\n\n super().__setstate__(state)\n\n\nclass RandomParagraphSetDatasetBuilder(DatasetBuilder):\n def __init__(self, batch_size: int, mode: str, force_answer: bool,\n oversample_first_answer: Union[int, List[int]]):\n self.mode = mode\n self.oversample_first_answer = oversample_first_answer\n self.batch_size = batch_size\n self.force_answer = force_answer\n\n def build_stats(self, data: Union[FilteredData, List]):\n if isinstance(data, FilteredData):\n return multi_paragraph_word_counts(data.data)\n else:\n return multi_paragraph_word_counts(data)\n\n def build_dataset(self, data: Union[FilteredData, List], corpus) -> Dataset:\n if isinstance(data, FilteredData):\n l = data.true_len\n data = data.data\n else:\n l = len(data)\n if isinstance(self.oversample_first_answer, int):\n ov = [self.oversample_first_answer]\n else:\n ov = self.oversample_first_answer\n return RandomParagraphSetDataset(data, l, 2, self.batch_size, self.mode, self.force_answer, ov)\n\n\nclass HotpotParagraphSetDatasetBuilder(RandomParagraphSetDatasetBuilder):\n\n def build_dataset(self, data: Union[FilteredData, List], corpus) -> Dataset:\n if isinstance(data, FilteredData):\n l = data.true_len\n data = data.data\n else:\n l = len(data)\n if isinstance(self.oversample_first_answer, int):\n ov = [self.oversample_first_answer]\n else:\n ov = self.oversample_first_answer\n return HotpotParagraphSetDataset(data, l, 10, self.batch_size, self.mode, self.force_answer, ov)\n\n\nclass StratifyParagraphSetsBuilder(DatasetBuilder):\n def __init__(self, batch_size: int, merge: bool, force_answer: bool,\n oversample_first_answer: Union[int, List[int]]):\n self.batch_size = batch_size\n self.merge = merge\n self.force_answer = force_answer\n self.oversample_first_answer = oversample_first_answer\n\n def build_stats(self, data: Union[List, FilteredData]):\n if isinstance(data, FilteredData):\n return multi_paragraph_word_counts(data.data)\n else:\n return multi_paragraph_word_counts(data)\n\n def build_dataset(self, data: Union[FilteredData, List], corpus) -> Dataset:\n if isinstance(data, FilteredData):\n l = data.true_len\n data = data.data\n else:\n l = len(data)\n if isinstance(self.oversample_first_answer, int):\n ov = [self.oversample_first_answer]\n else:\n ov = self.oversample_first_answer\n return StratifiedParagraphSetDataset(data, l, self.batch_size, self.force_answer,\n ov, self.merge)\n\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.random.choice",
"numpy.zeros",
"numpy.random.shuffle",
"numpy.insert"
]
] |
tangyeqiu/keras-yolo3-Cascade | [
"5427dfa2f88a0ab7cb77c657f580b8a15827cf13"
] | [
"yolo.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nClass definition of YOLO_v3 style detection model on image and video\r\n\"\"\"\r\n\r\nimport colorsys\r\n# import os\r\nfrom timeit import default_timer as timer\r\nimport datetime\r\n\r\nimport numpy as np\r\nfrom keras import backend as K\r\nfrom keras.models import load_model\r\nfrom keras.layers import Input\r\nfrom PIL import Image, ImageFont, ImageDraw\r\n\r\nfrom yolo3.model import yolo_eval, yolo_body, tiny_yolo_body\r\nfrom yolo3.utils import letterbox_image\r\nimport os\r\nfrom keras.utils import multi_gpu_model\r\n\r\nimport cv2\r\n\r\n\r\nclass YOLO(object):\r\n _defaults = {\r\n \"model_path\": 'model_data/yolo.h5',\r\n \"anchors_path\": 'model_data/yolo_anchors.txt',\r\n \"classes_path\": 'model_data/coco_classes.txt',\r\n \"tiny_model_path\": 'model_data/trained_weights.h5', #\r\n \"tiny_anchors_path\": 'model_data/tiny_yolo_anchors.txt', #\r\n \"tiny_classes_path\": 'model_data/voc_classes.txt', #\r\n \"score\" : 0.3,\r\n \"iou\" : 0.45,\r\n \"model_image_size\" : (416, 416),\r\n \"gpu_num\" : 1,\r\n }\r\n\r\n @classmethod\r\n def get_defaults(cls, n):\r\n if n in cls._defaults:\r\n return cls._defaults[n]\r\n else:\r\n return \"Unrecognized attribute name '\" + n + \"'\"\r\n\r\n def __init__(self, **kwargs):\r\n self.__dict__.update(self._defaults) # set up default values\r\n self.__dict__.update(kwargs) # and update with user overrides\r\n self.class_names = self._get_class()\r\n self.anchors = self._get_anchors()\r\n self.sess = K.get_session()\r\n self.boxes, self.scores, self.classes = self.generate()\r\n self.tiny_class_names = self._get_tiny_class() #\r\n self.tiny_anchors = self._get_tiny_anchors() #\r\n self.tiny_boxes, self.tiny_scores, self.tiny_classes = self.tiny_generate() #\r\n self.count_num = 0\r\n self.last_num = 1\r\n self.dis_i = 0\r\n self.test_date = datetime.datetime.now()\r\n self.box_buffer = [[] for i in range(5)]\r\n\r\n def _get_class(self):\r\n classes_path = os.path.expanduser(self.classes_path)\r\n with open(classes_path) as f:\r\n class_names = f.readlines()\r\n class_names = [c.strip() for c in class_names]\r\n return class_names\r\n\r\n def _get_tiny_class(self):\r\n tiny_classes_path = os.path.expanduser(self.tiny_classes_path)\r\n with open(tiny_classes_path) as f:\r\n tiny_class_names = f.readlines()\r\n tiny_class_names = [c.strip() for c in tiny_class_names]\r\n return tiny_class_names\r\n\r\n def _get_anchors(self):\r\n anchors_path = os.path.expanduser(self.anchors_path)\r\n with open(anchors_path) as f:\r\n anchors = f.readline()\r\n anchors = [float(x) for x in anchors.split(',')]\r\n return np.array(anchors).reshape(-1, 2)\r\n\r\n def _get_tiny_anchors(self):\r\n tiny_anchors_path = os.path.expanduser(self.tiny_anchors_path)\r\n with open(tiny_anchors_path) as f:\r\n tiny_anchors = f.readline()\r\n tiny_anchors = [float(x) for x in tiny_anchors.split(',')]\r\n return np.array(tiny_anchors).reshape(-1, 2)\r\n\r\n def generate(self):\r\n model_path = os.path.expanduser(self.model_path)\r\n assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'\r\n\r\n # Load model, or construct model and load weights.\r\n num_anchors = len(self.anchors)\r\n num_classes = len(self.class_names)\r\n is_tiny_version = num_anchors==6 # default setting\r\n try:\r\n self.yolo_model = load_model(model_path, compile=False)\r\n except:\r\n self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \\\r\n if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)\r\n self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match\r\n else:\r\n assert self.yolo_model.layers[-1].output_shape[-1] == \\\r\n num_anchors/len(self.yolo_model.output) * (num_classes + 5), \\\r\n 'Mismatch between model and given anchor and class sizes'\r\n\r\n print('{} model, anchors, and classes loaded.'.format(model_path))\r\n\r\n # Generate colors for drawing bounding boxes.\r\n hsv_tuples = [(x / len(self.class_names), 1., 1.)\r\n for x in range(len(self.class_names))]\r\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\r\n self.colors = list(\r\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\r\n self.colors))\r\n np.random.seed(10101) # Fixed seed for consistent colors across runs.\r\n np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.\r\n np.random.seed(None) # Reset seed to default.\r\n\r\n # Generate output tensor targets for filtered bounding boxes.\r\n self.input_image_shape = K.placeholder(shape=(2, ))\r\n if self.gpu_num>=2:\r\n self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)\r\n boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,\r\n len(self.class_names), self.input_image_shape,\r\n score_threshold=self.score, iou_threshold=self.iou)\r\n return boxes, scores, classes\r\n\r\n def tiny_generate(self):\r\n tiny_model_path = os.path.expanduser(self.tiny_model_path)\r\n assert tiny_model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'\r\n\r\n # Load model, or construct model and load weights.\r\n tiny_num_anchors = len(self.tiny_anchors)\r\n tiny_num_classes = len(self.tiny_class_names)\r\n is_tiny_version = tiny_num_anchors==6 # default setting\r\n try:\r\n self.tiny_yolo_model = load_model(tiny_model_path, compile=False)\r\n except:\r\n self.tiny_yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), tiny_num_anchors//2, tiny_num_classes) \\\r\n if is_tiny_version else tiny_yolo_body(Input(shape=(None,None,3)), tiny_num_anchors//3, tiny_num_classes)\r\n self.tiny_yolo_model.load_weights(self.tiny_model_path) # make sure model, anchors and classes match\r\n else:\r\n assert self.tiny_yolo_model.layers[-1].output_shape[-1] == \\\r\n tiny_num_anchors/len(self.yolo_model.output) * (tiny_num_classes + 5), \\\r\n 'Mismatch between model and given anchor and class sizes'\r\n\r\n print('{} tiny_model, tiny_anchors, and tiny_classes loaded.'.format(tiny_model_path))\r\n\r\n # Generate colors for drawing bounding boxes.\r\n hsv_tuples = [(x / len(self.tiny_class_names), 1., 1.)\r\n for x in range(len(self.tiny_class_names))]\r\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\r\n self.colors = list(\r\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\r\n self.colors))\r\n np.random.seed(10101) # Fixed seed for consistent colors across runs.\r\n np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.\r\n np.random.seed(None) # Reset seed to default.\r\n\r\n # Generate output tensor targets for filtered bounding boxes.\r\n self.tiny_input_image_shape = K.placeholder(shape=(2, ))\r\n if self.gpu_num>=2:\r\n self.tiny_yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)\r\n tiny_boxes, tiny_scores, tiny_classes = yolo_eval(self.tiny_yolo_model.output, self.tiny_anchors,\r\n len(self.tiny_class_names), self.tiny_input_image_shape,\r\n score_threshold=self.score, iou_threshold=self.iou)\r\n return tiny_boxes, tiny_scores, tiny_classes\r\n\r\n def detect_big_image(self, image):\r\n if self.model_image_size != (None, None):\r\n assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'\r\n assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'\r\n boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))\r\n else:\r\n new_image_size = (image.width - (image.width % 32),\r\n image.height - (image.height % 32))\r\n boxed_image = letterbox_image(image, new_image_size)\r\n image_data = np.array(boxed_image, dtype='float32')\r\n\r\n # print('Start roi detection')\r\n image_data /= 255.\r\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\r\n\r\n out_boxes, out_scores, out_classes = self.sess.run(\r\n [self.boxes, self.scores, self.classes],\r\n feed_dict={\r\n self.yolo_model.input: image_data,\r\n self.input_image_shape: [image.size[1], image.size[0]],\r\n K.learning_phase(): 0\r\n })\r\n return out_boxes, out_scores, out_classes\r\n\r\n def detect_num(self, image):\r\n if self.model_image_size != (None, None):\r\n assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'\r\n assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'\r\n boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))\r\n else:\r\n new_image_size = (image.width - (image.width % 32),\r\n image.height - (image.height % 32))\r\n boxed_image = letterbox_image(image, new_image_size)\r\n image_data = np.array(boxed_image, dtype='float32')\r\n\r\n # print('Start roi detection')\r\n image_data /= 255.\r\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\r\n\r\n out_boxes, out_scores, out_classes = self.sess.run(\r\n [self.tiny_boxes, self.tiny_scores, self.tiny_classes],\r\n feed_dict={\r\n self.tiny_yolo_model.input: image_data,\r\n self.tiny_input_image_shape: [image.size[1], image.size[0]],\r\n K.learning_phase(): 0\r\n })\r\n return out_boxes, out_scores, out_classes\r\n\r\n def num_trans(self, tiny_out_boxes, tiny_out_scores, tiny_out_classes):\r\n num_pos = []\r\n num_char = []\r\n num_score = []\r\n for tiny_i, tiny_c in reversed(list(enumerate(tiny_out_classes))):\r\n tiny_predicted_class = self.tiny_class_names[tiny_c]\r\n tiny_box = tiny_out_boxes[tiny_i]\r\n tiny_score = tiny_out_scores[tiny_i]\r\n tiny_top, tiny_left, tiny_bottom, tiny_right = tiny_box\r\n tiny_left_int = np.floor(tiny_left + 0.5).astype('int32')\r\n num_pos.append(tiny_left_int)\r\n if tiny_score > 0.25:\r\n num_char.append(tiny_predicted_class)\r\n else:\r\n num_char.append('0')\r\n num_score.append(tiny_score)\r\n return num_pos, num_char, num_score\r\n\r\n def num_filter(self, last_num, num_char, num_pos, count_num):\r\n num_dis = 0\r\n last_num_10 = last_num // 10\r\n last_num_01 = last_num % 10\r\n last_num_list = [str(last_num_10), str(last_num_01)]\r\n last_num_10_1 = (last_num - 1) // 10\r\n last_num_01_1 = (last_num - 1) % 10\r\n last_num_list_1 = [str(last_num_10_1), str(last_num_01_1)]\r\n if len(num_char) == 2:\r\n if num_pos[0] < num_pos[1]:\r\n num_dis = int(num_char[0]) * 10 + int(num_char[1])\r\n if num_pos[1] < num_pos[0]:\r\n num_dis = int(num_char[1]) * 10 + int(num_char[0])\r\n elif 0 < len(num_char) < 2:\r\n if num_char in last_num_list_1:\r\n num_dis = last_num - 1\r\n else:\r\n num_dis = last_num\r\n elif len(num_char) == 0:\r\n num_dis = 0\r\n else:\r\n if last_num_list_1 in num_char:\r\n num_dis = last_num - 1\r\n else:\r\n num_dis = last_num\r\n\r\n if num_dis == 1:\r\n count_num += 1\r\n else:\r\n count_num = 0\r\n\r\n if count_num > 35:\r\n count_num = 0\r\n num_dis = 0\r\n\r\n # if last_num > 0:\r\n # if last_num == num_dis:\r\n # if count_num > 31:\r\n # num_dis = last_num - 1\r\n # count_num = 0\r\n # else:\r\n # num_dis = last_num\r\n # elif last_num < num_dis:\r\n # num_dis = last_num\r\n # count_num = 0\r\n # else:\r\n # count_num = 0\r\n return num_dis, count_num\r\n\r\n def new_num_filter(self, last_num, num_char, num_pos, count_num):\r\n # -------------------------------\r\n num_dis = 0\r\n last_num_10_1 = (last_num - 1) // 10\r\n last_num_01_1 = (last_num - 1) % 10\r\n last_num_list_1 = [str(last_num_10_1), str(last_num_01_1)]\r\n if len(num_char) == 2:\r\n if num_pos[0] < num_pos[1]:\r\n num_dis = int(num_char[0]) * 10 + int(num_char[1])\r\n if num_pos[1] < num_pos[0]:\r\n num_dis = int(num_char[1]) * 10 + int(num_char[0])\r\n elif 0 < len(num_char) < 2:\r\n if num_char in last_num_list_1:\r\n num_dis = last_num - 1\r\n else:\r\n num_dis = last_num\r\n elif len(num_char) == 0:\r\n num_dis = 0\r\n else:\r\n if last_num_list_1 in num_char:\r\n num_dis = last_num - 1\r\n else:\r\n num_dis = last_num\r\n # ------------------------------------------------------------NEW FILTER\r\n if num_dis != last_num: # counting difference between input and last number\r\n count_num += 1\r\n\r\n if num_dis == 0: # if input=0, two cases: (1)counting down is over (2)wrong recognition\r\n if count_num > 9: # assume case(1):countdown finished, threshold 10 frames\r\n num_out = num_dis # shows \"zero\", purge counter\r\n count_num = 0\r\n else: # case(2):wrong recognition\r\n num_out = last_num # shows last valid number\r\n\r\n else: # num_dis != 0 # if input!=0. two cases: (1)next number (2)wrong recognition\r\n if count_num > 4: # assume case(1)next_number, threshold 5 frames\r\n num_out = num_dis # show new number, purge counter\r\n count_num = 0\r\n else: # assume case(2)wrong recognition(flickering within 5 frames)\r\n num_out = last_num # shows last valid number\r\n\r\n return max(num_out, 0), count_num\r\n # ---------------------------------------------\r\n\r\n def colour_detect(self, roi):\r\n roi_hsv = cv2.cvtColor(np.asarray(roi), cv2.COLOR_RGB2HSV)\r\n\r\n lower_red1 = np.array([0, 43, 46])\r\n upper_red1 = np.array([10, 255, 255])\r\n lower_red2 = np.array([156, 43, 46])\r\n upper_red2 = np.array([180, 255, 255])\r\n lower_green = np.array([35, 43, 46])\r\n upper_green = np.array([80, 255, 255]) #135\r\n lower_yellow = np.array([18, 43, 46])\r\n upper_yellow = np.array([34, 255, 255])\r\n\r\n mask_green = cv2.inRange(roi_hsv, lower_green, upper_green)\r\n mask_red1 = cv2.inRange(roi_hsv, lower_red1, upper_red1)\r\n mask_red2 = cv2.inRange(roi_hsv, lower_red2, upper_red2)\r\n mask_red = mask_red1 ^ mask_red2 # XOR\r\n mask_yellow = cv2.inRange(roi_hsv, lower_yellow, upper_yellow)\r\n\r\n roi_hsv_val = np.sum(roi_hsv)\r\n mask_green_val = np.sum(mask_green)\r\n mask_red_val = np.sum(mask_red)\r\n mask_yellow_val = np.sum(mask_yellow)\r\n\r\n if mask_green_val > mask_red_val and mask_green_val > mask_yellow_val and mask_green_val/roi_hsv_val > 0.03:\r\n colour = \"Green \"\r\n colour_mark = (0, 255, 0)\r\n elif mask_red_val > mask_green_val and mask_red_val > mask_yellow_val and mask_red_val/roi_hsv_val > 0.03:\r\n colour = \"Red \"\r\n colour_mark = (255, 0, 0)\r\n elif mask_yellow_val > mask_green_val and mask_yellow_val > mask_red_val and mask_yellow_val/roi_hsv_val > 0.03:\r\n colour = \"Yellow\"\r\n colour_mark = (255, 255, 0)\r\n else:\r\n colour = \"OFF \"\r\n colour_mark = (190, 190, 190)\r\n # print(mask_green_val, mask_red_val, mask_yellow_val)\r\n # print(colour, mask_green_val/roi_hsv_val, mask_red_val/roi_hsv_val, mask_yellow_val/roi_hsv_val)\r\n return colour, colour_mark\r\n\r\n def find_contour(self, yolo_roi):\r\n # pre-process img: convert to bi-value img\r\n gray = cv2.cvtColor(yolo_roi, cv2.COLOR_BGR2GRAY)\r\n thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\r\n\r\n contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n # Check for compatibility with different OpenCV versions\r\n contours = contours[0] # if imutils.is_cv2() else contours[1]\r\n\r\n TL_regions = [] # coordinates\r\n\r\n for contour in contours:\r\n # Get the rectangle that contains the contour\r\n (x, y, w, h) = cv2.boundingRect(contour)\r\n # filter some wrong contour\r\n if w > 5 and h > 5 and h / w > 1.8:\r\n TL_regions.append((x, y, w, h))\r\n\r\n # sort according to contour area [0] notes the biggest one. 每一个元素包含 x,y,w,h 四个信息,即x, y, w, h = TL_Max_region[i]\r\n TL_Max_region = sorted(TL_regions, key=lambda k: k[2] * k[3], reverse=True)\r\n\r\n if len(TL_Max_region):\r\n # if TL_Max_region is no empty, crop original roi by contour: yolo_roi[y:y + h, x:x + w]\r\n x, y, w, h = TL_Max_region[0]\r\n TL_img = yolo_roi[y:y + h, x:x + w]\r\n else:\r\n # if empty then ouput original roi\r\n TL_img = yolo_roi\r\n\r\n return TL_img\r\n\r\n\r\n def detect_image(self, image):\r\n # raw_image = image.copy()\r\n w, h = image.width, image.height\r\n image_left = int(w / 10 * 3)\r\n image_top = 0 # int(h / 6)\r\n image_right = w - image_left\r\n image_bottom = h/4*3 # - image_top\r\n image_detect_roi = image_left, image_top, image_right, image_bottom\r\n image_roi = image.crop(image_detect_roi)\r\n # raw_image_roi = image_roi.copy()\r\n out_boxes, out_scores, out_classes = self.detect_big_image(image_roi)\r\n # print('Found {} boxes for {}'.format(len(out_boxes), 'img'))\r\n tl_index = 0\r\n num_display = []\r\n num_char_display = []\r\n num_score_display = []\r\n\r\n colour_display = []\r\n colour_str = []\r\n\r\n # Start the Draw\r\n draw = ImageDraw.Draw(image)\r\n # self.count_num += 1\r\n\r\n class_to_sort = []\r\n box_to_sort = []\r\n box_left = []\r\n score_to_sort = []\r\n for i, c in reversed(list(enumerate(out_classes))):\r\n predicted_class = self.class_names[c]\r\n box = out_boxes[i]\r\n score = out_scores[i]\r\n top, left, bottom, right = box\r\n # Take Traffic Light ONLY\r\n if (predicted_class == 'traffic light') and (score >= 0.50) and ((bottom - top) * (right - left)) / (image_roi.width * image_roi.height) > 0.001 and ((bottom - top) / (right - left)) > 1.5:\r\n class_to_sort.append(predicted_class)\r\n box_to_sort.append(box)\r\n box_left.append(left)\r\n score_to_sort.append(score)\r\n\r\n # Sort boxes from left to nine\r\n if len(box_to_sort) > 0:\r\n ZIP_Pack = zip(box_left, class_to_sort, box_to_sort, score_to_sort)\r\n ZIP_Pack_new = sorted(ZIP_Pack, reverse=True)\r\n new_box_left, new_class, new_box, new_score = zip(*ZIP_Pack_new)\r\n out_boxes, out_scores, out_classes = new_box, new_score, new_class\r\n else:\r\n out_boxes, out_scores, out_classes = box_to_sort, score_to_sort, class_to_sort\r\n # End of sort\r\n # out_boxes, out_scores, out_classes = box_to_sort, score_to_sort, class_to_sort\r\n\r\n # Debounce\r\n # End of debounce\r\n\r\n # Store the boxes\r\n # self.box_buffer.append(out_boxes)\r\n if len(self.box_buffer) > 5:\r\n del self.box_buffer[0]\r\n # End of store\r\n\r\n for i, c in reversed(list(enumerate(out_classes))):\r\n # predicted_class = self.class_names[c]\r\n box = out_boxes[i]\r\n # score = out_scores[i]\r\n top, left, bottom, right = box\r\n # Take Traffic Light ONLY\r\n # if (predicted_class == 'traffic light') and (score >= 0.50):\r\n # and ((bottom - top) * (right - left)) / (\r\n # image_roi.width * image_roi.height) > 0.0015:\r\n tl_index = tl_index + 1\r\n\r\n # Take TL ROI\r\n top = max(0, np.floor(top + 0.5).astype('int32'))\r\n left = max(0, np.floor(left + 0.5).astype('int32'))\r\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\r\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\r\n # print(label, (top, left), (bottom, right))\r\n box_roi = left, top, right, bottom\r\n roi = image_roi.crop(box_roi)\r\n # End of ROI\r\n\r\n # New func test 20190702\r\n roi = np.asarray(roi)\r\n roi = cv2.cvtColor(roi,cv2.COLOR_RGB2BGR)\r\n roi = self.find_contour(roi)\r\n roi = cv2.cvtColor(roi,cv2.COLOR_BGR2RGB)\r\n roi = Image.fromarray(roi)\r\n # End of New func test 20190702\r\n\r\n\r\n # Colour detection\r\n colour, colour_mark = self.colour_detect(roi)\r\n # End of colour detection\r\n\r\n colour_str.append(colour)\r\n colour_display.append(colour_mark)\r\n\r\n # Num recognition\r\n if colour == \"Green \" or colour == \"Red \":\r\n tiny_out_boxes, tiny_out_scores, tiny_out_classes = self.detect_num(roi)\r\n else:\r\n tiny_out_boxes, tiny_out_scores, tiny_out_classes = [], [], []\r\n num_pos, num_char, num_score = self.num_trans(tiny_out_boxes, tiny_out_scores, tiny_out_classes)\r\n # End of Num recognition\r\n\r\n # Num filter\r\n # num_dis, count_num = self.num_filter(self.last_num, num_char, num_pos, self.count_num)\r\n num_dis, count_num = self.new_num_filter(self.last_num, num_char, num_pos, self.count_num)\r\n self.last_num = num_dis\r\n self.count_num = count_num\r\n # End of filter\r\n\r\n num_display.append(num_dis)\r\n num_char_display.append(num_char)\r\n num_score_display.append(num_score)\r\n\r\n # Show box\r\n label = '{}'.format(tl_index)\r\n font = ImageFont.truetype(font='font/FiraMono-Medium.otf',\r\n size=np.floor(3e-2 * image_roi.size[1] + 0.5).astype('int32'))\r\n thickness = (image_roi.size[0] + image_roi.size[1]) // 500\r\n label_size = draw.textsize(label, font)\r\n # if top - label_size[1] >= 0:\r\n # text_origin = np.array([left+image_left, top+image_top - label_size[1]])\r\n # else:\r\n # text_origin = np.array([left+image_left, top+image_top + 1])\r\n\r\n for i in range(thickness):\r\n draw.rectangle(\r\n [left+image_left + i, top+image_top + i, right+image_left - i, bottom+image_top - i],\r\n outline=colour_mark)\r\n # draw.rectangle(\r\n # [tuple(text_origin), tuple(text_origin + label_size)],\r\n # fill=colour_mark)\r\n # draw.text(text_origin, label, fill=(128, 128, 128), font=font)\r\n # draw.text(text_origin, label, fill=(255-colour_mark[0], 255-colour_mark[1], 255-colour_mark[2]), font=font)\r\n # End of show\r\n\r\n # Noob's operation\r\n num_index = []\r\n new_num_display = []\r\n new_colour_str = []\r\n new_colour_display = []\r\n if len(out_boxes) > 0:\r\n if len(self.box_buffer[4]) >= len(out_boxes):\r\n self.box_buffer.append(self.box_buffer[4])\r\n diff = [[] for i in range(len(out_boxes))]\r\n for i in range(len(out_boxes)):\r\n curr_top, curr_left, curr_bottom, curr_right = out_boxes[i]\r\n for j in range(len(self.box_buffer[4])):\r\n last_top, last_left, last_bottom, last_right = self.box_buffer[4][j]\r\n diff[i].append(abs(last_left - curr_left))\r\n num_index.append(diff[i].index(min(diff[i])))\r\n for i in range(len(self.box_buffer[4])):\r\n new_num_display.append(0)\r\n new_colour_str.append(\" \")\r\n new_colour_display.append((127, 127, 127))\r\n for i in range(len(out_boxes)):\r\n # del new_num_display[num_index[i]]\r\n new_num_display[num_index[i]] = num_display[i]\r\n # del new_colour_str[num_index[i]]\r\n new_colour_str[num_index[i]] = colour_str[i]\r\n # del new_colour_display[num_index[i]]\r\n new_colour_display[num_index[i]] = colour_display[i]\r\n else:\r\n self.box_buffer.append(out_boxes)\r\n diff = [[] for i in range(len(self.box_buffer[4]))]\r\n for i in range(len(self.box_buffer[4])):\r\n curr_top, curr_left, curr_bottom, curr_right = self.box_buffer[4][i]\r\n for j in range(len(out_boxes)):\r\n last_top, last_left, last_bottom, last_right = out_boxes[j]\r\n diff[i].append(abs(last_left - curr_left))\r\n num_index.append(diff[i].index(min(diff[i])))\r\n for i in range(len(out_boxes)):\r\n new_num_display.append(0)\r\n new_colour_str.append(\" \")\r\n new_colour_display.append((127, 127, 127))\r\n for i in range(len(self.box_buffer[4])):\r\n # del new_num_display[num_index[i]]\r\n new_num_display[num_index[i]] = num_display[i]\r\n # del new_colour_str[num_index[i]]\r\n new_colour_str[num_index[i]] = colour_str[i]\r\n # del new_colour_display[num_index[i]]\r\n new_colour_display[num_index[i]] = colour_display[i]\r\n else:\r\n self.box_buffer.append([])\r\n new_num_display = []\r\n new_colour_str = []\r\n new_colour_display = []\r\n # End of noobs\r\n\r\n # Show info:\r\n draw.rectangle(\r\n [0, image.height / 8 * 6, image.width, image.height],\r\n fill=(0, 0, 0))\r\n font_cd = ImageFont.truetype(font='font/FiraMono-Medium.otf',\r\n size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\r\n\r\n # for dis_i in range(len(num_display)):\r\n # draw.text([image_left, image.height / 8 * 6 + dis_i * 40, image_right, image.height],\r\n # \"|No.\" + str(dis_i+1) + \"| \" + colour_str[dis_i] + \"| Countdown: \" + str(num_display[dis_i]) + \" s\",\r\n # fill=colour_display[dis_i], font=font_cd)\r\n # draw.text([image_left, image.height / 8 * 6 + 20 + dis_i * 40, image_right, image.height],\r\n # \"------------------------------\",\r\n # fill=colour_display[dis_i], font=font_cd)\r\n\r\n for dis_i in range(len(new_num_display)):\r\n draw.text([image_left, image.height / 8 * 6 + dis_i * 40, image_right, image.height],\r\n \"| \" + new_colour_str[dis_i] + \"| Countdown: \" + str(new_num_display[dis_i]) + \" s\",\r\n fill=new_colour_display[dis_i], font=font_cd)\r\n draw.text([image_left, image.height / 8 * 6 + 20 + dis_i * 40, image_right, image.height],\r\n \"------------------------------\",\r\n fill=new_colour_display[dis_i], font=font_cd)\r\n # End of info\r\n\r\n # Show logo and test date\r\n font_logo = ImageFont.truetype(font='font/FiraMono-Medium.otf',\r\n size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\r\n\r\n draw.text([0, image.height / 8 * 6, image_left, image.height],\r\n \"BOSCH AI Demo:\",\r\n fill=(255, 255, 255), font=font_logo)\r\n draw.text([0, image.height / 8 * 6 + 40, image_left, image.height],\r\n \"Traffic light\",\r\n fill=(255, 255, 255), font=font_logo)\r\n draw.text([0, image.height / 8 * 6 + 80, image_left, image.height],\r\n \"and Countdown\",\r\n fill=(255, 255, 255), font=font_logo)\r\n draw.text([0, image.height / 8 * 6 + 120, image_left, image.height],\r\n \"Recognition\",\r\n fill=(255, 255, 255), font=font_logo)\r\n draw.text([0, image.height / 8 * 6 + 160, image_left, image.height],\r\n \"Box track:\",\r\n fill=(255, 255, 255), font=font_logo)\r\n\r\n font_track = ImageFont.truetype(font='font/FiraMono-Medium.otf',\r\n size=np.floor(2e-2 * image.size[1] + 0.5).astype('int32'))\r\n for i in range(len(self.box_buffer)):\r\n draw.text([0, image.height / 8 * 6 + 200 + i * 30, image_left, image.height],\r\n str(self.box_buffer[i]),\r\n fill=(255, 255, 255), font=font_track)\r\n\r\n # draw.text([image_right, image.height / 8 * 6, image.width, image.height],\r\n # \"Test date:\",\r\n # fill=(255, 255, 255), font=font_cd)\r\n # draw.text([image_right, image.height / 8 * 6 + 40, image.width, image.height],\r\n # str(self.test_date)[0:19],\r\n # fill=(255, 255, 255), font=font_cd)\r\n\r\n draw.text([image_right, image.height / 8 * 6, image.width, image.height],\r\n \"Num chars and scores\",\r\n fill=(255, 255, 255), font=font_cd)\r\n for dis_i in range(tl_index):\r\n draw.text([image_right, image.height / 8 * 6 + 40 + 60 * dis_i, image.width, image.height],\r\n str(num_char_display[dis_i]),\r\n fill=(255, 255, 255), font=font_track)\r\n draw.text([image_right, image.height / 8 * 6 + 70 + 60 * dis_i, image.width, image.height],\r\n str(num_score_display[dis_i]),\r\n fill=(255, 255, 255), font=font_track)\r\n # End of show + str(num_char_display) + str(num_score_display)\r\n\r\n # Draw ROI (Optional)\r\n label = 'ROI'\r\n font = ImageFont.truetype(font='font/FiraMono-Medium.otf',\r\n size=np.floor(5e-2 * image_roi.size[1] + 0.5).astype('int32'))\r\n thickness = (image_roi.size[0] + image_roi.size[1]) // 300\r\n label_size = draw.textsize(label, font)\r\n text_origin = np.array([image_left, image_top]) # + label_size[1]])\r\n for i in range(thickness):\r\n draw.rectangle(\r\n [image_left + i, image_top + i, image_right - i, image_bottom - i],\r\n outline=(0, 0, 0))\r\n draw.rectangle(\r\n [tuple(text_origin), tuple(text_origin + label_size)],\r\n fill=(0, 0, 0))\r\n draw.text(text_origin, label, fill=(255, 255, 255), font=font)\r\n # End of ROI\r\n\r\n del draw\r\n # End of Draw\r\n\r\n return image\r\n\r\n def close_session(self):\r\n self.sess.close()\r\n\r\n\r\ndef detect_video_offline(yolo, video_path, output_path=\"\"):\r\n from imageio import get_writer\r\n\r\n # vid = cv2.VideoCapture(0)\r\n vid = cv2.VideoCapture(video_path)\r\n if not vid.isOpened():\r\n raise IOError(\"Couldn't open webcam or video\")\r\n # video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))\r\n # video_fps = vid.get(cv2.CAP_PROP_FPS)\r\n # video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),\r\n # int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))\r\n isOutput = True if output_path != \"\" else False\r\n if isOutput:\r\n out1 = get_writer(\r\n output_path,\r\n fps=30, # FPS is in units Hz; should be real-time.\r\n codec='libx264', # When used properly, this is basically\r\n # \"PNG for video\" (i.e. lossless)\r\n quality=None, # disables variable compression\r\n pixelformat='yuv420p', # keep it as RGB colours\r\n ffmpeg_params=[ # compatibility with older library versions\r\n '-preset', # set to faster, veryfast, superfast, ultrafast\r\n 'fast', # for higher speed but worse compression\r\n '-crf', # quality; set to 0 for lossless, but keep in mind\r\n '11' # that the camera probably adds static anyway\r\n ]\r\n )\r\n # print(\"!!! TYPE:\", type(output_path), type(video_FourCC), type(video_fps), type(video_size))\r\n # out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)\r\n # accum_time = 0\r\n # curr_fps = 0\r\n # fps = \"FPS: ??\"\r\n # prev_time = timer()\r\n\r\n while True:\r\n return_value, frame = vid.read()\r\n\r\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n\r\n image = Image.fromarray(frame)\r\n image = yolo.detect_image(image)\r\n result = np.asarray(image)\r\n\r\n result_show = cv2.cvtColor(result, cv2.COLOR_RGB2BGR)\r\n\r\n # curr_time = timer()\r\n # exec_time = curr_time - prev_time\r\n # prev_time = curr_time\r\n # accum_time = accum_time + exec_time\r\n # curr_fps = curr_fps + 1\r\n # if accum_time > 1:\r\n # accum_time = accum_time - 1\r\n # fps = \"FPS: \" + str(curr_fps)\r\n # curr_fps = 0\r\n # cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\r\n # fontScale=0.50, color=(255, 0, 0), thickness=2)\r\n cv2.namedWindow(\"result\", cv2.WINDOW_NORMAL)\r\n cv2.imshow(\"result\", result_show)\r\n if isOutput:\r\n out1.append_data(result)\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n yolo.close_session()\r\n\r\n\r\ndef detect_video_online(yolo, video_path, output_path=\"\"):\r\n import cv2\r\n from pypylon import pylon\r\n from imageio import get_writer\r\n ##################################################################\r\n # conecting to the first available camera\r\n\r\n camera = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateFirstDevice())\r\n\r\n # Grabing Continusely (video) with minimal delay\r\n camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)\r\n converter = pylon.ImageFormatConverter()\r\n\r\n # converting to opencv bgr format\r\n converter.OutputPixelFormat = pylon.PixelType_BGR8packed\r\n converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned\r\n ##################################################################\r\n # vid = cv2.VideoCapture(video_path)\r\n # vid = cv2.VideoCapture(0)\r\n # if not vid.isOpened():\r\n # raise IOError(\"Couldn't open webcam or video\")\r\n if camera.IsGrabbing():\r\n vid = camera.RetrieveResult(1000, pylon.TimeoutHandling_ThrowException)\r\n\r\n # video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))\r\n # video_fps = vid.get(cv2.CAP_PROP_FPS)\r\n # video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),\r\n # int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))\r\n\r\n isOutput = True if output_path != \"\" else False\r\n if isOutput:\r\n out_raw = get_writer(\r\n \"C:/Users/tag2sgh/Documents\\GitHub/raw_video/raw_vid8_20190807.mp4\",\r\n fps=30, # FPS is in units Hz; should be real-time.\r\n codec='libx264', # When used properly, this is basically\r\n # \"PNG for video\" (i.e. lossless)\r\n quality=None, # disables variable compression\r\n pixelformat='yuv420p', # keep it as RGB colours\r\n ffmpeg_params=[ # compatibility with older library versions\r\n '-preset', # set to faster, veryfast, superfast, ultrafast\r\n 'fast', # for higher speed but worse compression\r\n '-crf', # quality; set to 0 for lossless, but keep in mind\r\n '11' # that the camera probably adds static anyway\r\n ]\r\n )\r\n out1 = get_writer(\r\n output_path,\r\n fps=30, # FPS is in units Hz; should be real-time.\r\n codec='libx264', # When used properly, this is basically\r\n # \"PNG for video\" (i.e. lossless)\r\n quality=None, # disables variable compression\r\n pixelformat='yuv420p', # keep it as RGB colours\r\n ffmpeg_params=[ # compatibility with older library versions\r\n '-preset', # set to faster, veryfast, superfast, ultrafast\r\n 'fast', # for higher speed but worse compression\r\n '-crf', # quality; set to 0 for lossless, but keep in mind\r\n '11' # that the camera probably adds static anyway\r\n ]\r\n )\r\n # print(\"!!! TYPE:\", type(output_path), type(video_FourCC), type(video_fps), type(video_size))\r\n # out = cv2.VideoWriter(output_path, video_FourCC, 30, video_size)\r\n accum_time = 0\r\n curr_fps = 0\r\n fps = \"FPS: ??\"\r\n prev_time = timer()\r\n while camera.IsGrabbing():\r\n grabResult = camera.RetrieveResult(1000, pylon.TimeoutHandling_ThrowException)\r\n\r\n if grabResult.GrabSucceeded():\r\n # Access the image data\r\n image = converter.Convert(grabResult)\r\n frame = image.GetArray()\r\n # while True:\r\n\r\n # return_value, frame = vid.read()\r\n\r\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n\r\n out_raw.append_data(frame)\r\n image = Image.fromarray(frame)\r\n image = yolo.detect_image(image)\r\n result = np.asarray(image)\r\n\r\n result_show = cv2.cvtColor(result, cv2.COLOR_RGB2BGR)\r\n curr_time = timer()\r\n exec_time = curr_time - prev_time\r\n prev_time = curr_time\r\n accum_time = accum_time + exec_time\r\n curr_fps = curr_fps + 1\r\n if accum_time > 1:\r\n accum_time = accum_time - 1\r\n fps = \"FPS: \" + str(curr_fps)\r\n curr_fps = 0\r\n cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\r\n fontScale=0.50, color=(255, 0, 0), thickness=2)\r\n cv2.putText(result_show, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\r\n fontScale=0.50, color=(255, 0, 0), thickness=2)\r\n cv2.namedWindow(\"result\", cv2.WINDOW_NORMAL)\r\n cv2.imshow(\"result\", result_show)\r\n if isOutput:\r\n # out.write(result)\r\n out1.append_data(result)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n grabResult.Release()\r\n camera.StopGrabbing()\r\n yolo.close_session()\r\n\r\n\r\ndef detect_video(yolo, video_path, output_path=\"\"):\r\n # detect_video_offline(yolo, video_path, output_path)\r\n detect_video_online(yolo, video_path, output_path)\r\n"
] | [
[
"numpy.array",
"numpy.asarray",
"numpy.random.seed",
"numpy.sum",
"numpy.random.shuffle",
"numpy.expand_dims",
"numpy.floor"
]
] |
BrightXiaoHan/OpusFilter | [
"804c82a46837fc57ca69934314622043248f6042"
] | [
"opusfilter/classifier.py"
] | [
"\"\"\"Filter classifier\"\"\"\n\nimport json\nimport logging\nimport collections\nimport functools\nimport math\nimport scipy.optimize\n\nimport numpy as np\nimport pandas as pd\nfrom pandas import json_normalize\nimport sklearn.linear_model\nfrom sklearn.metrics import accuracy_score, confusion_matrix, roc_auc_score, log_loss\n\nfrom .util import grouper, file_open\n\nlogger = logging.getLogger(__name__)\n\n\ndef lists_to_dicts(obj):\n \"\"\"Convert lists in a JSON-style object to dicts recursively\n\n Examples:\n\n >>> lists_to_dicts([3, 4])\n {\"0\": 3, \"1\": 4}\n >>> lists_to_dicts([3, [4, 5]])\n {\"0\": 3, \"1\": {\"0\": 4, \"1\": 5}}\n >>> lists_to_dicts({\"a\": [3, 4], \"b\": []})\n {\"a\": {\"0\": 3, \"1\": 4}, \"b\": {}}\n\n \"\"\"\n if isinstance(obj, dict):\n return {key: lists_to_dicts(value) for key, value in obj.items()}\n if isinstance(obj, list):\n return {str(idx): lists_to_dicts(value) for idx, value in enumerate(obj)}\n return obj\n\n\ndef load_dataframe(data_file):\n \"\"\"Load normalized scores dataframe from a JSON lines file\"\"\"\n data = []\n with file_open(data_file) as dfile:\n for line in dfile:\n try:\n data.append(lists_to_dicts(json.loads(line)))\n except json.decoder.JSONDecodeError as err:\n logger.error(line)\n raise err\n return pd.DataFrame(json_normalize(data))\n\n\ndef load_dataframe_in_chunks(data_file, chunksize):\n \"\"\"Yield normalized scores dataframes from a chunked JSON lines file\n\n Use instead of load_dataframe if the data is too large to fit in memory.\n\n \"\"\"\n with file_open(data_file) as dfile:\n for num, chunk in enumerate(grouper(dfile, chunksize)):\n data = []\n for line in chunk:\n try:\n data.append(lists_to_dicts(json.loads(line)))\n except json.decoder.JSONDecodeError as err:\n logger.error(line)\n raise err\n logger.info(\"Processing chunk %s with %s lines\", num, len(data))\n yield pd.DataFrame(json_normalize(data))\n\n\ndef standardize_dataframe_scores(dataframe, features, means_stds=None):\n \"\"\"Normalize, zero average, and set direction for scores in each column\"\"\"\n new_df = pd.DataFrame()\n if not means_stds:\n means_stds = {}\n for column in dataframe:\n vect = dataframe[column].to_numpy()\n if features[column].get('clean-direction', 'high') == 'low':\n direction = -1\n else:\n direction = 1\n means_stds[column] = (vect.mean(), vect.std(), direction)\n for column in features:\n vect = dataframe[column].to_numpy()\n mean, std, direction = means_stds[column]\n if std == 0:\n vect = [0 for i in range(len(dataframe[column]))]\n else:\n vect = direction * (vect - mean) / std\n new_df[column] = vect\n return new_df, means_stds\n\n\nclass Classifier:\n \"\"\"Wrapper for sklearn classifiers (e.g. LogisticRegression)\n\n Includes feature selection and standardization from pandas\n dataframes.\n\n \"\"\"\n\n def __init__(self, classname, params, features, standardize_params):\n self.classname = classname\n cls = getattr(sklearn.linear_model, self.classname)\n self.classifier = cls(**params)\n self.features = features\n self.standardize_params = standardize_params\n\n def standardize(self, dataframe):\n \"\"\"Standardize features in the data frame\"\"\"\n if not self.standardize_params:\n logger.warning(\"Feature standardization parameters missing\")\n return dataframe[self.features]\n return standardize_dataframe_scores(dataframe, self.features, self.standardize_params)[0]\n\n def train(self, dataframe, labels, standardize=True):\n \"\"\"Train logistic regression with training_data\"\"\"\n dataframe = self.standardize(dataframe) if standardize else dataframe\n self.classifier.fit(dataframe[self.features], labels)\n\n def write_preds(self, input_fname, output_fname, true_label=None,\n standardize=True, chunksize=None):\n \"\"\"Write predicted class labels to output file\"\"\"\n if chunksize:\n dfs_tbc = load_dataframe_in_chunks(input_fname, chunksize)\n else:\n dfs_tbc = [load_dataframe(input_fname)]\n logger.info(\"Classifier labels: %s\", self.classifier.classes_)\n with file_open(output_fname, 'w') as output:\n for df_tbc in dfs_tbc:\n df_std = self.standardize(df_tbc) if standardize else df_tbc\n labels = self.classifier.predict(df_std[self.features])\n if true_label:\n true_labels = df_tbc[true_label]\n logger.info('accuracy: %s', accuracy_score(true_labels, labels))\n logger.info('confusion matrix:\\n%s', confusion_matrix(true_labels, labels))\n for label in labels:\n output.write(f'{label}\\n')\n\n def write_probs(self, input_fname, output_fname, true_label=None,\n standardize=True, chunksize=None):\n \"\"\"Write classification probabilities to output file\"\"\"\n if chunksize:\n dfs_tbc = load_dataframe_in_chunks(input_fname, chunksize)\n else:\n dfs_tbc = [load_dataframe(input_fname)]\n logger.info(\"Classifier labels: %s\", self.classifier.classes_)\n with file_open(output_fname, 'w') as output:\n for df_tbc in dfs_tbc:\n df_std = self.standardize(df_tbc) if standardize else df_tbc\n probas = self.classifier.predict_proba(df_std[self.features])\n if true_label:\n true_labels = df_tbc[true_label]\n logger.info('roc_auc: %s', roc_auc_score(true_labels, probas[:, 1]))\n for proba in probas[:, 1]:\n output.write(f'{proba:.10f}\\n')\n\n def weights(self):\n \"\"\"Yield classifier weights\"\"\"\n if self.classname == \"LogisticRegression\":\n yield '(intercept)', self.classifier.intercept_[0]\n for name, value in zip(self.features, self.classifier.coef_[0]):\n yield name, value\n else:\n logger.warning(\"Method weights unsupported for %s\", self.classname)\n return\n\n\nclass TrainClassifier:\n \"\"\"Classify clean and noisy sentence pairs\"\"\"\n\n def __init__(self, training_scores=None, dev_scores=None, model_type=None,\n model_parameters=None, features=None):\n logger.info(\"Loading training data\")\n self.df_training_data = load_dataframe(training_scores)\n\n self.group_config = features\n self.feature_config = {}\n for t_key in self.df_training_data.keys():\n for f_key in features.keys():\n if t_key.startswith(f_key):\n self.feature_config[t_key] = features[f_key]\n\n self.df_training_data = self.df_training_data[self.feature_config.keys()]\n self.df_training_data, self.means_stds = standardize_dataframe_scores(\n self.df_training_data, self.feature_config)\n\n if dev_scores:\n logger.info(\"Loading development data\")\n self.dev_data = load_dataframe(dev_scores)\n self.dev_labels = self.dev_data.pop('label')\n self.dev_data = self.dev_data[self.feature_config.keys()]\n self.dev_data = standardize_dataframe_scores(\n self.dev_data, self.feature_config, self.means_stds)[0]\n else:\n self.dev_data = None\n self.dev_labels = None\n\n if model_type is None:\n self.model_type = 'LogisticRegression'\n else:\n self.model_type = model_type\n if model_parameters is None:\n self.model_parameters = {}\n else:\n self.model_parameters = model_parameters\n\n def train_classifier(self, training_data, labels):\n \"\"\"Train logistic regression with training_data\"\"\"\n classifier = Classifier(self.model_type, self.model_parameters,\n training_data.columns, self.means_stds)\n classifier.train(training_data, labels, standardize=False)\n return classifier\n\n def get_roc_auc(self, model, dev_data):\n \"\"\"Calculate ROC AUC for a given model (requires dev_data)\"\"\"\n probs = model.classifier.predict_proba(dev_data)\n # pred = model.classifier.predict(dev_data)\n # logger.info(\"Classifier labels: %s\", model.classifier.classes_)\n # logger.info(\"Predicted labels: %s\", collections.Counter(pred))\n return roc_auc_score(self.dev_labels, probs[:, 1])\n\n @staticmethod\n def get_sse(model, training_data, labels):\n \"\"\"Calculate the residual sum of squares\"\"\"\n y_hat = model.classifier.predict(training_data)\n resid = labels - y_hat\n sse = sum(resid**2)+0.01\n return sse\n\n @staticmethod\n def get_ce(model, training_data, labels):\n \"\"\"Calculate cross entropy for a given model\"\"\"\n y_pred = model.classifier.predict_proba(training_data)\n return log_loss(labels, y_pred)\n\n @classmethod\n def get_aic(cls, model, training_data, labels):\n \"\"\"Calculate AIC for a given model\"\"\"\n loss = cls.get_ce(model, training_data, labels)\n k = training_data.shape[1] # number of variables\n aic = 2 * k - 2 * math.log(loss)\n return aic\n\n @classmethod\n def get_bic(cls, model, training_data, labels):\n \"\"\"Calculate BIC for a given model\"\"\"\n # pylint: disable=C0103\n loss = cls.get_ce(model, training_data, labels)\n k = training_data.shape[1] # number of variables\n n = training_data.shape[0] # number of observations\n bic = n * math.log(loss / n) + k * math.log(n)\n return bic\n\n @staticmethod\n def get_labels(training_data, cutoffs):\n \"\"\"Get labels for training data based on cutoffs\"\"\"\n labels = []\n training_data_dict = training_data.copy().to_dict()\n for i in range(len(training_data.index)):\n label = 1\n for key in cutoffs.keys():\n if training_data_dict[key][i] < cutoffs[key]:\n label = 0\n labels.append(label)\n return labels\n\n @staticmethod\n def get_cutoffs(training_data, quantiles, features):\n \"\"\"Get cutoff values based on discard percentages\"\"\"\n cutoffs = {}\n for key in features:\n cutoffs[key] = training_data[key].quantile(quantiles[key])\n return cutoffs\n\n @staticmethod\n def _load_feature_bounds_and_init(fdict):\n \"\"\"Load feature boundaries and initial values from config dict\"\"\"\n features = []\n bounds = []\n initial = []\n for key, params in fdict.items():\n features.append(key)\n if 'quantiles' in params:\n min_ = params['quantiles'].get('min', 0)\n max_ = params['quantiles'].get('max', 1)\n else:\n min_, max_ = 0, 1\n logger.warning(\n \"No quantile bounds defined for %s, setting to [%s, %s]\",\n key, min_, max_)\n bounds.append([min_, max_])\n if 'initial' in params.get('quantiles', {}):\n init = params['quantiles']['initial']\n else:\n init = 0.1\n logger.warning(\n \"No initial quantile defined for %s, setting to %s\",\n key, init)\n initial.append(init)\n initial = np.array(initial)\n return features, bounds, initial\n\n def _cost(self, qvector, features, criterion):\n \"\"\"Return cost of qvector for given features and criterion\"\"\"\n best_quantiles = dict(zip(features, qvector))\n logger.info('Training logistic regression model with quantiles:\\n%s',\n '\\n'.join(f'* {t[0]}: {t[1]}' for t in best_quantiles.items()))\n if any(q == 0 for q in best_quantiles.values()):\n # Remove unused features\n df_train_copy, df_dev_copy, active = self._prune_datasets(best_quantiles, features)\n else:\n df_train_copy, df_dev_copy, active = self.df_training_data, self.dev_data, set(features)\n\n cutoffs = self.get_cutoffs(df_train_copy, best_quantiles, active)\n labels = self.get_labels(df_train_copy, cutoffs)\n counts = collections.Counter(labels)\n logger.info(\"Label counts in data: %s\", counts)\n if len(counts) > 1:\n classifier = self.train_classifier(df_train_copy, labels)\n if criterion['dev']:\n crit_value = criterion['func'](classifier, df_dev_copy)\n else:\n crit_value = criterion['func'](classifier, df_train_copy, labels)\n else:\n crit_value = np.inf if criterion['best'] == 'low' else -np.inf\n\n logger.info('Model criterion value: %s', crit_value)\n return crit_value if criterion['best'] == 'low' else -crit_value\n\n def _prune_datasets(self, quantiles, features):\n \"\"\"Return datasets without features that have zero value in quantiles\"\"\"\n df_train_copy = self.df_training_data.copy()\n df_dev_copy = self.dev_data.copy() if self.dev_data is not None else None\n active = set(features)\n for key, value in quantiles.items():\n if value == 0:\n df_train_copy.pop(key)\n if self.dev_data is not None:\n df_dev_copy.pop(key)\n active.remove(key)\n return df_train_copy, df_dev_copy, active\n\n def _get_criterion(self, name):\n \"\"\"Return function and specifications for optimization criterion\"\"\"\n criteria = {\n 'AIC': {'func': self.get_aic, 'best': 'low', 'dev': False},\n 'BIC': {'func': self.get_bic, 'best': 'low', 'dev': False},\n 'SSE': {'func': self.get_sse, 'best': 'low', 'dev': False},\n 'CE': {'func': self.get_ce, 'best': 'low', 'dev': False},\n 'ROC_AUC': {'func': self.get_roc_auc, 'best': 'high', 'dev': True}\n }\n if name not in criteria:\n raise ValueError(f'Invalid criterion. Expected one of: {\", \".join(criteria)}')\n return criteria[name]\n\n def find_best_model(self, criterion_name, algorithm='default', options=None):\n \"\"\"Find the model with the best AIC / BIC / SSE / CE / ROC_AUC\"\"\"\n criterion = self._get_criterion(criterion_name)\n features, bounds, initial = self._load_feature_bounds_and_init(self.feature_config)\n cost = functools.partial(self._cost, features=features, criterion=criterion)\n if options is None:\n options = {}\n if algorithm == 'none':\n # Use initial values\n best_quantiles = dict(zip(features, initial))\n elif algorithm == 'default':\n # Default local search with multiplicative updates\n best_quantiles = dict(zip(features, self.default_search(cost, initial, bounds=bounds, **options)))\n else:\n # Use optimization algorithm from scipy\n best_quantiles = dict(zip(features, scipy.optimize.minimize(\n cost, initial, method=algorithm, bounds=bounds, options=options).x))\n df_train_copy, df_dev_copy, active = self._prune_datasets(best_quantiles, features)\n labels = self.get_labels(df_train_copy, self.get_cutoffs(df_train_copy, best_quantiles, active))\n classifier = self.train_classifier(df_train_copy, labels)\n if criterion['dev']:\n return classifier, criterion['func'](classifier, df_dev_copy), best_quantiles\n return classifier, criterion['func'](classifier, df_train_copy, labels), best_quantiles\n\n @staticmethod\n def default_search(costfunc, initial, bounds=None, step_coef=1.25):\n \"\"\"Local search algorithm with multiplicative updates\"\"\"\n if bounds is None:\n bounds = [(0, 1) for _ in range(len(initial))]\n cur_x = initial.copy()\n cur_cost = costfunc(cur_x)\n while True:\n no_change = 0\n for fidx in range(len(initial)):\n new_x = cur_x.copy()\n if new_x[fidx] / step_coef >= bounds[fidx][0]:\n new_x[fidx] /= step_coef\n cost = costfunc(new_x)\n if cost < cur_cost:\n cur_cost = cost\n cur_x = new_x\n continue\n new_x = cur_x.copy()\n if new_x[fidx] * step_coef <= bounds[fidx][1]:\n new_x[fidx] *= step_coef\n cost = costfunc(new_x)\n if cost < cur_cost:\n cur_cost = cost\n cur_x = new_x\n continue\n no_change += 1\n if no_change == len(initial):\n return cur_x\n"
] | [
[
"numpy.array",
"sklearn.metrics.confusion_matrix",
"pandas.json_normalize",
"pandas.DataFrame",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.log_loss",
"sklearn.metrics.roc_auc_score"
]
] |
int-brain-lab/iblscripts | [
"db65fb48e44a168d7ed5e08bc854a82e83a44cd0"
] | [
"deploy/videopc/video_lengths.py"
] | [
"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @Author: Niccolò Bonacchi\n# @Date: Tuesday, January 12th 2021, 5:48:08 pm\n\"\"\"\nGiven a specific video session_path will count and printout the number of frames for the video\nthe GPIO pin states and the frame counter files\n\"\"\"\n\nimport sys\nfrom pathlib import Path\nimport numpy as np\nimport cv2\nimport pandas as pd\n\n\ndef load_CameraFrameData_file(session_path, camera: str) -> pd.DataFrame:\n out_dataframe = None\n session_path = Path(session_path)\n if session_path is None:\n return\n raw_path = Path(session_path).joinpath(\"raw_video_data\")\n # Check if csv frame data file exists\n frame_data_file = raw_path.joinpath(f\"_iblrig_{camera}Camera.FrameData.csv\")\n if frame_data_file.exists():\n fdata = pd.read_csv(frame_data_file)\n out_dataframe = fdata\n # Check if bin frame data file exists\n frame_data_file = raw_path.joinpath(f\"_iblrig_{camera}Camera.frameData.bin\")\n if frame_data_file.exists():\n fdata = np.fromfile(frame_data_file, dtype=np.float64)\n assert len(fdata) % 4 == 0, \"Missing values: expected length of array is not % 4\"\n rows = int(len(fdata) / 4)\n fdata_values = np.reshape(fdata.astype(np.int64), (rows, 4))\n columns = [\n \"Timestamp\", # UTC ticks\n \"Value.Metadata.embeddedTimeStamp\",\n \"Value.Metadata.embeddedFrameCounter\",\n \"Value.Metadata.embeddedGPIOPinState\"\n ]\n out_dataframe = pd.DataFrame(fdata_values, columns=columns)\n return out_dataframe\n\n\ndef load_embedded_frame_data(session_path, camera: str, raw=False):\n \"\"\"\n :param session_path:\n :param camera: The specific camera to load, one of ('left', 'right', 'body')\n :param raw: If True the raw data are returned without preprocessing (thresholding, etc.)\n :return: The frame counter, the pin state\n \"\"\"\n session_path = Path(session_path)\n if session_path is None:\n return None, None\n raw_path = Path(session_path).joinpath(\"raw_video_data\")\n # Load frame count\n count_file = raw_path / f\"_iblrig_{camera}Camera.frame_counter.bin\"\n count = np.fromfile(count_file, dtype=np.float64).astype(int) if count_file.exists() else None\n if not (count is None or raw):\n count -= count[0] # start from zero\n # Load pin state\n pin_file = raw_path / f\"_iblrig_{camera}Camera.GPIO.bin\"\n pin_state = np.fromfile(pin_file, dtype=np.float64).astype(int) if pin_file.exists() else None\n if not (pin_state is None or raw):\n pin_state = pin_state > 10\n return count, pin_state\n\n\ndef get_video_length(video_path):\n \"\"\"\n Returns video length\n :param video_path: A path to the video\n :return:\n \"\"\"\n cap = cv2.VideoCapture(str(video_path))\n assert cap.isOpened(), f\"Failed to open video file {video_path}\"\n length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n cap.release()\n return length\n\n\ndef main(session_path, display=True):\n session_path = Path(session_path)\n video_lengths = [get_video_length(p) for p in session_path.rglob(\"*.avi\")]\n data_frames = [\n load_CameraFrameData_file(session_path, camera=c) for c in (\"left\", \"right\", \"body\")\n ]\n len_frames = [len(df) for df in data_frames if df is not None]\n if not len_frames:\n array_lengths = [\n (a.size, b.size)\n for a, b in [\n load_embedded_frame_data(session_path, cam, raw=True)\n for cam in (\"left\", \"right\", \"body\")\n ]\n if (a is not None) or (b is not None)\n ]\n\n array_len = []\n for cam in (\"left\", \"right\", \"body\"):\n a, b = load_embedded_frame_data(session_path, cam, raw=True)\n if (a is not None) or (b is not None):\n array_len.append((a.size, b.size))\n\n frame_counter_lengths = [x[0] for x in array_lengths]\n GPIO_state_lengths = [x[1] for x in array_lengths]\n out = {\n 'session_path': session_path,\n 'video_lengths': video_lengths,\n 'frame_counter_lengths': frame_counter_lengths,\n 'GPIO_state_lengths': GPIO_state_lengths\n }\n print(\n \"\\n\",\n session_path, \"\\n\",\n sorted(video_lengths), \"<-- Video lengths\", \"\\n\",\n sorted(frame_counter_lengths), \"<-- Frame counter lengths\", \"\\n\",\n sorted(GPIO_state_lengths), \"<-- GPIO state lengths\", \"\\n\",\n )\n else:\n out = {\n 'session_path': session_path,\n 'video_lengths': video_lengths,\n 'frame_data_lengths': len_frames\n }\n if display:\n print(\n \"\\n\",\n session_path, \"\\n\",\n sorted(video_lengths), \"<-- Video lengths\", \"\\n\",\n sorted(len_frames), \"<-- Frame Data lengths\", \"\\n\",\n )\n return out\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"I need a session_path as input...\")\n main(sys.argv[1])\n\n # session_path = r\"C:\\iblrig_data\\Subjects\\_iblrig_test_mouse\\2000-01-01\\001\"\n # main(session_path)\n # camera= 'left'\n"
] | [
[
"pandas.DataFrame",
"pandas.read_csv",
"numpy.fromfile"
]
] |
MaxBromberg/networkx | [
"e4d1483f241e9a56fbda247592710b659d29bce5"
] | [
"networkx/algorithms/centrality/current_flow_betweenness.py"
] | [
"\"\"\"Current-flow betweenness centrality measures.\"\"\"\nimport networkx as nx\nfrom networkx.algorithms.centrality.flow_matrix import (\n CGInverseLaplacian,\n flow_matrix_row,\n FullInverseLaplacian,\n laplacian_sparse_matrix,\n SuperLUInverseLaplacian,\n)\nfrom networkx.utils import (\n not_implemented_for,\n reverse_cuthill_mckee_ordering,\n py_random_state,\n)\n\n__all__ = [\n \"current_flow_betweenness_centrality\",\n \"approximate_current_flow_betweenness_centrality\",\n \"edge_current_flow_betweenness_centrality\",\n]\n\n\n@py_random_state(7)\n@not_implemented_for(\"directed\")\ndef approximate_current_flow_betweenness_centrality(\n G,\n normalized=True,\n weight=None,\n dtype=float,\n solver=\"full\",\n epsilon=0.5,\n kmax=10000,\n seed=None,\n):\n r\"\"\"Compute the approximate current-flow betweenness centrality for nodes.\n\n Approximates the current-flow betweenness centrality within absolute\n error of epsilon with high probability [1]_.\n\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n normalized : bool, optional (default=True)\n If True the betweenness values are normalized by 2/[(n-1)(n-2)] where\n n is the number of nodes in G.\n\n weight : string or None, optional (default=None)\n Key for edge data used as the edge weight.\n If None, then use 1 as each edge weight.\n The weight reflects the capacity or the strength of the\n edge.\n\n dtype : data type (float)\n Default data type for internal matrices.\n Set to np.float32 for lower memory consumption.\n\n solver : string (default='full')\n Type of linear solver to use for computing the flow matrix.\n Options are \"full\" (uses most memory), \"lu\" (recommended), and\n \"cg\" (uses least memory).\n\n epsilon: float\n Absolute error tolerance.\n\n kmax: int\n Maximum number of sample node pairs to use for approximation.\n\n seed : integer, random_state, or None (default)\n Indicator of random number generation state.\n See :ref:`Randomness<randomness>`.\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with betweenness centrality as the value.\n\n See Also\n --------\n current_flow_betweenness_centrality\n\n Notes\n -----\n The running time is $O((1/\\epsilon^2)m{\\sqrt k} \\log n)$\n and the space required is $O(m)$ for $n$ nodes and $m$ edges.\n\n If the edges have a 'weight' attribute they will be used as\n weights in this algorithm. Unspecified weights are set to 1.\n\n References\n ----------\n .. [1] Ulrik Brandes and Daniel Fleischer:\n Centrality Measures Based on Current Flow.\n Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).\n LNCS 3404, pp. 533-544. Springer-Verlag, 2005.\n http://algo.uni-konstanz.de/publications/bf-cmbcf-05.pdf\n \"\"\"\n import numpy as np\n\n if not nx.is_connected(G):\n raise nx.NetworkXError(\"Graph not connected.\")\n solvername = {\n \"full\": FullInverseLaplacian,\n \"lu\": SuperLUInverseLaplacian,\n \"cg\": CGInverseLaplacian,\n }\n n = G.number_of_nodes()\n ordering = list(reverse_cuthill_mckee_ordering(G))\n # make a copy with integer labels according to rcm ordering\n # this could be done without a copy if we really wanted to\n H = nx.relabel_nodes(G, dict(zip(ordering, range(n))))\n L = laplacian_sparse_matrix(\n H, nodelist=range(n), weight=weight, dtype=dtype, format=\"csc\"\n )\n C = solvername[solver](L, dtype=dtype) # initialize solver\n betweenness = dict.fromkeys(H, 0.0)\n nb = (n - 1.0) * (n - 2.0) # normalization factor\n cstar = n * (n - 1) / nb\n l = 1 # parameter in approximation, adjustable\n k = l * int(np.ceil((cstar / epsilon) ** 2 * np.log(n)))\n if k > kmax:\n msg = f\"Number random pairs k>kmax ({k}>{kmax}) \"\n raise nx.NetworkXError(msg, \"Increase kmax or epsilon\")\n cstar2k = cstar / (2 * k)\n for i in range(k):\n s, t = seed.sample(range(n), 2)\n b = np.zeros(n, dtype=dtype)\n b[s] = 1\n b[t] = -1\n p = C.solve(b)\n for v in H:\n if v == s or v == t:\n continue\n for nbr in H[v]:\n w = H[v][nbr].get(weight, 1.0)\n betweenness[v] += w * np.abs(p[v] - p[nbr]) * cstar2k\n if normalized:\n factor = 1.0\n else:\n factor = nb / 2.0\n # remap to original node names and \"unnormalize\" if required\n return {ordering[k]: float(v * factor) for k, v in betweenness.items()}\n\n\n@not_implemented_for(\"directed\")\ndef current_flow_betweenness_centrality(\n G, normalized=True, weight=None, dtype=float, solver=\"full\"\n):\n r\"\"\"Compute current-flow betweenness centrality for nodes.\n\n Current-flow betweenness centrality uses an electrical current\n model for information spreading in contrast to betweenness\n centrality which uses shortest paths.\n\n Current-flow betweenness centrality is also known as\n random-walk betweenness centrality [2]_.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n normalized : bool, optional (default=True)\n If True the betweenness values are normalized by 2/[(n-1)(n-2)] where\n n is the number of nodes in G.\n\n weight : string or None, optional (default=None)\n Key for edge data used as the edge weight.\n If None, then use 1 as each edge weight.\n The weight reflects the capacity or the strength of the\n edge.\n\n dtype : data type (float)\n Default data type for internal matrices.\n Set to np.float32 for lower memory consumption.\n\n solver : string (default='full')\n Type of linear solver to use for computing the flow matrix.\n Options are \"full\" (uses most memory), \"lu\" (recommended), and\n \"cg\" (uses least memory).\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with betweenness centrality as the value.\n\n See Also\n --------\n approximate_current_flow_betweenness_centrality\n betweenness_centrality\n edge_betweenness_centrality\n edge_current_flow_betweenness_centrality\n\n Notes\n -----\n Current-flow betweenness can be computed in $O(I(n-1)+mn \\log n)$\n time [1]_, where $I(n-1)$ is the time needed to compute the\n inverse Laplacian. For a full matrix this is $O(n^3)$ but using\n sparse methods you can achieve $O(nm{\\sqrt k})$ where $k$ is the\n Laplacian matrix condition number.\n\n The space required is $O(nw)$ where $w$ is the width of the sparse\n Laplacian matrix. Worse case is $w=n$ for $O(n^2)$.\n\n If the edges have a 'weight' attribute they will be used as\n weights in this algorithm. Unspecified weights are set to 1.\n\n References\n ----------\n .. [1] Centrality Measures Based on Current Flow.\n Ulrik Brandes and Daniel Fleischer,\n Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).\n LNCS 3404, pp. 533-544. Springer-Verlag, 2005.\n http://algo.uni-konstanz.de/publications/bf-cmbcf-05.pdf\n\n .. [2] A measure of betweenness centrality based on random walks,\n M. E. J. Newman, Social Networks 27, 39-54 (2005).\n \"\"\"\n if not nx.is_connected(G):\n raise nx.NetworkXError(\"Graph not connected.\")\n n = G.number_of_nodes()\n ordering = list(reverse_cuthill_mckee_ordering(G))\n # make a copy with integer labels according to rcm ordering\n # this could be done without a copy if we really wanted to\n H = nx.relabel_nodes(G, dict(zip(ordering, range(n))))\n betweenness = dict.fromkeys(H, 0.0) # b[v]=0 for v in H\n for row, (s, t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):\n pos = dict(zip(row.argsort()[::-1], range(n)))\n for i in range(n):\n betweenness[s] += (i - pos[i]) * row[i]\n betweenness[t] += (n - i - 1 - pos[i]) * row[i]\n if normalized:\n nb = (n - 1.0) * (n - 2.0) # normalization factor\n else:\n nb = 2.0\n for v in H:\n betweenness[v] = float((betweenness[v] - v) * 2.0 / nb)\n return {ordering[k]: v for k, v in betweenness.items()}\n\n\n@not_implemented_for(\"directed\")\ndef edge_current_flow_betweenness_centrality(\n G, normalized=True, weight=None, dtype=float, solver=\"full\"\n):\n r\"\"\"Compute current-flow betweenness centrality for edges.\n\n Current-flow betweenness centrality uses an electrical current\n model for information spreading in contrast to betweenness\n centrality which uses shortest paths.\n\n Current-flow betweenness centrality is also known as\n random-walk betweenness centrality [2]_.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n normalized : bool, optional (default=True)\n If True the betweenness values are normalized by 2/[(n-1)(n-2)] where\n n is the number of nodes in G.\n\n weight : string or None, optional (default=None)\n Key for edge data used as the edge weight.\n If None, then use 1 as each edge weight.\n The weight reflects the capacity or the strength of the\n edge.\n\n dtype : data type (default=float)\n Default data type for internal matrices.\n Set to np.float32 for lower memory consumption.\n\n solver : string (default='full')\n Type of linear solver to use for computing the flow matrix.\n Options are \"full\" (uses most memory), \"lu\" (recommended), and\n \"cg\" (uses least memory).\n\n Returns\n -------\n nodes : dictionary\n Dictionary of edge tuples with betweenness centrality as the value.\n\n Raises\n ------\n NetworkXError\n The algorithm does not support DiGraphs.\n If the input graph is an instance of DiGraph class, NetworkXError\n is raised.\n\n See Also\n --------\n betweenness_centrality\n edge_betweenness_centrality\n current_flow_betweenness_centrality\n\n Notes\n -----\n Current-flow betweenness can be computed in $O(I(n-1)+mn \\log n)$\n time [1]_, where $I(n-1)$ is the time needed to compute the\n inverse Laplacian. For a full matrix this is $O(n^3)$ but using\n sparse methods you can achieve $O(nm{\\sqrt k})$ where $k$ is the\n Laplacian matrix condition number.\n\n The space required is $O(nw)$ where $w$ is the width of the sparse\n Laplacian matrix. Worse case is $w=n$ for $O(n^2)$.\n\n If the edges have a 'weight' attribute they will be used as\n weights in this algorithm. Unspecified weights are set to 1.\n\n References\n ----------\n .. [1] Centrality Measures Based on Current Flow.\n Ulrik Brandes and Daniel Fleischer,\n Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).\n LNCS 3404, pp. 533-544. Springer-Verlag, 2005.\n http://algo.uni-konstanz.de/publications/bf-cmbcf-05.pdf\n\n .. [2] A measure of betweenness centrality based on random walks,\n M. E. J. Newman, Social Networks 27, 39-54 (2005).\n \"\"\"\n from networkx.utils import reverse_cuthill_mckee_ordering\n\n if not nx.is_connected(G):\n raise nx.NetworkXError(\"Graph not connected.\")\n n = G.number_of_nodes()\n ordering = list(reverse_cuthill_mckee_ordering(G))\n # make a copy with integer labels according to rcm ordering\n # this could be done without a copy if we really wanted to\n H = nx.relabel_nodes(G, dict(zip(ordering, range(n))))\n edges = (tuple(sorted((u, v))) for u, v in H.edges())\n betweenness = dict.fromkeys(edges, 0.0)\n if normalized:\n nb = (n - 1.0) * (n - 2.0) # normalization factor\n else:\n nb = 2.0\n for row, (e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):\n pos = dict(zip(row.argsort()[::-1], range(1, n + 1)))\n for i in range(n):\n betweenness[e] += (i + 1 - pos[i]) * row[i]\n betweenness[e] += (n - i - pos[i]) * row[i]\n betweenness[e] /= nb\n return {(ordering[s], ordering[t]): float(v) for (s, t), v in betweenness.items()}\n"
] | [
[
"numpy.log",
"numpy.abs",
"numpy.zeros"
]
] |
mcbal/afem | [
"6101605bf427ccc655bd958d27407fcc133d47b4"
] | [
"tests/test_model_gradients.py"
] | [
"import itertools\nimport unittest\n\nimport numpy as np\nimport torch\nfrom torch.autograd import gradcheck\n\nfrom afem.models import VectorSpinModel\n\n\nclass TestAnalyticalGradients(unittest.TestCase):\n def test_phi_t(self):\n num_spins, dim = 7, 61\n\n for (t_vector, J_external, J_symmetric) in itertools.product([True, False], repeat=3):\n with self.subTest(t_vector=t_vector, J_external=J_external, J_symmetric=J_symmetric):\n model = VectorSpinModel(\n num_spins=num_spins,\n dim=dim,\n beta=1.0,\n J_external=J_external,\n J_symmetric=J_symmetric,\n ).double()\n\n h = torch.randn(1, num_spins, dim).double()\n t0 = torch.ones(1, num_spins) if t_vector else torch.ones(1, 1) # (batch explicit)\n t0 = t0.double().requires_grad_()\n\n analytical_grad = model._jac_phi(t0, h)\n numerical_grad = torch.autograd.grad(model._phi(t0, h), t0)[0]\n\n self.assertTrue(\n torch.allclose(analytical_grad, numerical_grad)\n )\n\n def test_grad_phi_t(self):\n num_spins, dim = 7, 61\n\n for (t_vector, J_external, J_symmetric) in itertools.product([True, False], repeat=3):\n with self.subTest(t_vector=t_vector, J_external=J_external, J_symmetric=J_symmetric):\n model = VectorSpinModel(\n num_spins=num_spins,\n dim=dim,\n beta=1.0,\n J_external=J_external,\n J_symmetric=J_symmetric,\n ).double()\n\n h = torch.randn(1, num_spins, dim).double()\n t0 = torch.ones(1, num_spins) if t_vector else torch.ones(1, 1) # (batch explicit)\n t0 = t0.double().requires_grad_()\n\n analytical_grad = model._hess_phi(t0, h).sum(dim=-1)\n numerical_grad = torch.autograd.grad(model._jac_phi(t0, h).sum(dim=-1), t0)[0]\n\n self.assertTrue(\n torch.allclose(analytical_grad, numerical_grad)\n )\n\n\nclass TestRootFindingGradients(unittest.TestCase):\n def test_vector_spin_model_afe(self):\n num_spins, dim = 7, 61\n\n for (t_vector, use_analytical_grads, J_external, J_symmetric) in itertools.product([True, False], repeat=4):\n with self.subTest(\n t_vector=t_vector,\n use_analytical_grads=use_analytical_grads,\n J_external=J_external,\n J_symmetric=J_symmetric\n ):\n model = VectorSpinModel(\n num_spins=num_spins,\n dim=dim,\n beta=1.0,\n J_external=J_external,\n J_symmetric=J_symmetric,\n solver_fwd_max_iter=100,\n solver_fwd_tol=1e-8,\n solver_bwd_max_iter=100,\n solver_bwd_tol=1e-8,\n ).double()\n\n x = (torch.randn(1, num_spins, dim) / np.sqrt(dim)).double()\n t0 = torch.ones(num_spins) if t_vector else torch.ones(1)\n t0 = t0.double().requires_grad_()\n\n self.assertTrue(\n gradcheck(\n lambda z: model(z, t0, return_afe=True, use_analytical_grads=use_analytical_grads).afe,\n x.requires_grad_(),\n eps=1e-6,\n atol=1e-4,\n check_undefined_grad=False,\n )\n )\n\n def test_vector_spin_model_magnetizations(self):\n num_spins, dim = 7, 61\n\n for (t_vector, use_analytical_grads, J_external, J_symmetric) in itertools.product([True, False], repeat=4):\n with self.subTest(\n t_vector=t_vector,\n use_analytical_grads=use_analytical_grads,\n J_external=J_external,\n J_symmetric=J_symmetric\n ):\n model = VectorSpinModel(\n num_spins=num_spins,\n dim=dim,\n beta=1.0,\n J_external=J_external,\n J_symmetric=J_symmetric,\n solver_fwd_max_iter=100,\n solver_fwd_tol=1e-8,\n solver_bwd_max_iter=100,\n solver_bwd_tol=1e-8,\n ).double()\n\n x = (torch.randn(1, num_spins, dim) / np.sqrt(dim)).double()\n t0 = torch.ones(num_spins) if t_vector else torch.ones(1)\n t0 = t0.double().requires_grad_()\n\n self.assertTrue(\n gradcheck(\n lambda z: model(z, t0, return_magnetizations=True, use_analytical_grads=use_analytical_grads).magnetizations,\n x.requires_grad_(),\n eps=1e-6,\n atol=1e-3,\n check_undefined_grad=False,\n )\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"torch.allclose",
"torch.randn",
"numpy.sqrt",
"torch.ones"
]
] |
akan72/PiazzaTextualAnalysis | [
"2a1bbb1381ee4707992bacc5d752144a5950a009"
] | [
"piazzatextualanalysis/transform_posts.py"
] | [
"import os\nimport re\nimport glob\nimport pickle\nimport pandas as pd\n\nfrom utils.transform_utils import *\n\n# Get all posts within the data directory\nposts = glob.glob('data/posts/*.p')\n\n# Iterate over all posts within a class\nfor fp in posts:\n # Load each post into a DataFrame and store its networkid\n df = pd.DataFrame(pickle.load(open(fp, \"rb\")))\n network_id = re.search(\"posts_(.*).p\", fp).group(1)\n \n # Compute different metrics about the class\n df['created'] = pd.to_datetime(df['created'])\n df['num_revisions'] = df['history'].apply(lambda x: len(x))\n df['subject'] = df['history'].apply(lambda x: x[0]['subject'])\n df['is_student'] = df['tags'].apply(lambda x: 'student' in x)\n df['is_instructor'] = df['tags'].apply(lambda x: 'instructor-note' in x)\n df['is_announcement'] = df['config'].apply(lambda x: 1 if 'is_announcement' in x else 0)\n df['num_children'] = df['children'].apply(lambda x: len(list(num_nested_dicts(x[0], 'children'))) if len(x) > 0 else 0)\n\n # Remove HTML from text column\n df['text'] = df['history'].apply(lambda x: re.sub('<[^<]+?>|\\n', ' ', x[0]['content']))\n \n # Reorder the columns\n df = df[['id', 'created', 'type', 'folders', 'tags', 'is_announcement', 'history', 'children', 'tag_good', 'is_student', 'no_answer', 'num_children', 'num_favorites', 'num_revisions', 'unique_views', 'subject','text']]\n \n with open(f\"data/dataframes/{fp[11:-23]}_dataframe_{network_id}.p\", 'wb') as f:\n pickle.dump(df, f)"
] | [
[
"pandas.to_datetime"
]
] |
josecannete/transformers | [
"dfe012ad9d6b6f0c9d30bc508b9f1e4c42280c07"
] | [
"src/transformers/modeling_ctrl.py"
] | [
"# coding=utf-8\n# Copyright 2018 Salesforce and HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch CTRL model.\"\"\"\n\n\nimport logging\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom .configuration_ctrl import CTRLConfig\nfrom .file_utils import add_start_docstrings\nfrom .modeling_utils import Conv1D, PreTrainedModel\n\n\nlogger = logging.getLogger(__name__)\n\nCTRL_PRETRAINED_MODEL_ARCHIVE_MAP = {\"ctrl\": \"https://storage.googleapis.com/sf-ctrl/pytorch/seqlen256_v1.bin\"}\n\n\ndef angle_defn(pos, i, d_model_size):\n angle_rates = 1 / torch.pow(10000, (2 * (i // 2)) / d_model_size)\n return pos * angle_rates\n\n\ndef positional_encoding(position, d_model_size, dtype):\n # create the sinusoidal pattern for the positional encoding\n angle_rads = angle_defn(\n torch.arange(position, dtype=dtype).unsqueeze(1),\n torch.arange(d_model_size, dtype=dtype).unsqueeze(0),\n d_model_size,\n )\n\n sines = torch.sin(angle_rads[:, 0::2])\n cosines = torch.cos(angle_rads[:, 1::2])\n\n pos_encoding = torch.cat([sines, cosines], dim=-1)\n return pos_encoding\n\n\ndef scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None):\n # calculate attention\n matmul_qk = torch.matmul(q, k.permute(0, 1, 3, 2))\n\n dk = k.shape[-1]\n scaled_attention_logits = matmul_qk / np.sqrt(dk)\n\n if mask is not None:\n nd, ns = scaled_attention_logits.size(-2), scaled_attention_logits.size(-1)\n scaled_attention_logits += mask[ns - nd : ns, :ns] * -1e4\n\n if attention_mask is not None:\n # Apply the attention mask\n scaled_attention_logits = scaled_attention_logits + attention_mask\n\n attention_weights = torch.softmax(scaled_attention_logits, dim=-1)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_weights = attention_weights * head_mask\n\n output = torch.matmul(attention_weights, v)\n\n return output, attention_weights\n\n\nclass MultiHeadAttention(torch.nn.Module):\n def __init__(self, d_model_size, num_heads, output_attentions=False):\n super(MultiHeadAttention, self).__init__()\n self.output_attentions = output_attentions\n self.num_heads = num_heads\n self.d_model_size = d_model_size\n\n self.depth = int(d_model_size / self.num_heads)\n\n self.Wq = torch.nn.Linear(d_model_size, d_model_size)\n self.Wk = torch.nn.Linear(d_model_size, d_model_size)\n self.Wv = torch.nn.Linear(d_model_size, d_model_size)\n\n self.dense = torch.nn.Linear(d_model_size, d_model_size)\n\n def split_into_heads(self, x, batch_size):\n x = x.reshape(batch_size, -1, self.num_heads, self.depth)\n return x.permute([0, 2, 1, 3])\n\n def forward(self, v, k, q, mask, layer_past=None, attention_mask=None, head_mask=None):\n batch_size = q.shape[0]\n\n q = self.Wq(q)\n k = self.Wk(k)\n v = self.Wv(v)\n\n q = self.split_into_heads(q, batch_size)\n k = self.split_into_heads(k, batch_size)\n v = self.split_into_heads(v, batch_size)\n if layer_past is not None:\n past_key, past_value = layer_past[0], layer_past[1]\n k = torch.cat((past_key, k), dim=-2)\n v = torch.cat((past_value, v), dim=-2)\n present = torch.stack((k, v))\n\n output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask)\n scaled_attention = output[0].permute([0, 2, 1, 3])\n attn = output[1]\n original_size_attention = scaled_attention.reshape(batch_size, -1, self.d_model_size)\n output = self.dense(original_size_attention)\n\n outputs = (output, present)\n if self.output_attentions:\n outputs = outputs + (attn,)\n return outputs\n\n\ndef point_wise_feed_forward_network(d_model_size, dff):\n return torch.nn.Sequential(torch.nn.Linear(d_model_size, dff), torch.nn.ReLU(), torch.nn.Linear(dff, d_model_size))\n\n\nclass EncoderLayer(torch.nn.Module):\n def __init__(self, d_model_size, num_heads, dff, rate=0.1, output_attentions=False):\n super(EncoderLayer, self).__init__()\n\n self.multi_head_attention = MultiHeadAttention(d_model_size, num_heads, output_attentions)\n self.ffn = point_wise_feed_forward_network(d_model_size, dff)\n\n self.layernorm1 = torch.nn.LayerNorm(d_model_size, eps=1e-6)\n self.layernorm2 = torch.nn.LayerNorm(d_model_size, eps=1e-6)\n\n self.dropout1 = torch.nn.Dropout(rate)\n self.dropout2 = torch.nn.Dropout(rate)\n\n def forward(self, x, mask, layer_past=None, attention_mask=None, head_mask=None):\n normed = self.layernorm1(x)\n attn_outputs = self.multi_head_attention(\n normed, normed, normed, mask, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask\n )\n attn_output = attn_outputs[0]\n attn_output = self.dropout1(attn_output)\n out1 = x + attn_output\n\n out2 = self.layernorm2(out1)\n ffn_output = self.ffn(out2)\n ffn_output = self.dropout2(ffn_output)\n out2 = out1 + ffn_output\n\n outputs = (out2,) + attn_outputs[1:]\n return outputs\n\n\nclass CTRLPreTrainedModel(PreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = CTRLConfig\n pretrained_model_archive_map = CTRL_PRETRAINED_MODEL_ARCHIVE_MAP\n base_model_prefix = \"transformer\"\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\nCTRL_START_DOCSTRING = r\"\"\" CTRL model was proposed in\n `CTRL: A Conditional Transformer Language Model for Controllable Generation`_\n by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.\n It's a causal (unidirectional) transformer pre-trained using language modeling on a very large\n corpus of ~140 GB of text data with the first token reserved as a control code (such as Links, Books, Wikipedia etc.).\n\n This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and\n refer to the PyTorch documentation for all matter related to general usage and behavior.\n\n .. _`CTRL: A Conditional Transformer Language Model for Controllable Generation`:\n https://www.github.com/salesforce/ctrl\n\n .. _`torch.nn.Module`:\n https://pytorch.org/docs/stable/nn.html#module\n\n Parameters:\n config (:class:`~transformers.CTRLConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nCTRL_INPUTS_DOCSTRING = r\"\"\" Inputs:\n **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Indices of input sequence tokens in the vocabulary.\n CTRL is a model with absolute position embeddings so it's usually advised to pad the inputs on\n the right rather than the left.\n Indices can be obtained using :class:`transformers.CTRLTokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.\n **past**:\n list of ``torch.FloatTensor`` (one for each layer):\n that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model\n (see `past` output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model\n should not be passed as input ids as they have already been computed.\n **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n A parallel sequence of tokens (can be used to indicate various portions of the inputs).\n The embeddings from these tokens will be summed with the respective token embeddings.\n Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices).\n **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Indices of positions of each input sequence tokens in the position embeddings.\n Selected in the range ``[0, config.max_position_embeddings - 1]``.\n **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.\n **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``:\n Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.\",\n CTRL_START_DOCSTRING,\n CTRL_INPUTS_DOCSTRING,\n)\nclass CTRLModel(CTRLPreTrainedModel):\n r\"\"\"\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``\n Sequence of hidden-states at the last layer of the model.\n **past**:\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``:\n that contains pre-computed hidden-states (key and values in the attention blocks).\n Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model\n should not be passed as input ids as they have already been computed.\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = CTRLTokenizer.from_pretrained('ctrl')\n model = CTRLModel.from_pretrained('ctrl')\n input_ids = torch.tensor(tokenizer.encode(\"Links Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n\n \"\"\"\n\n def __init__(self, config):\n super(CTRLModel, self).__init__(config)\n self.output_hidden_states = config.output_hidden_states\n self.output_attentions = config.output_attentions\n self.output_past = config.output_past\n\n self.d_model_size = config.n_embd\n self.num_layers = config.n_layer\n\n self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size, torch.float)\n\n self.w = nn.Embedding(config.vocab_size, config.n_embd)\n\n self.dropout = nn.Dropout(config.embd_pdrop)\n self.h = nn.ModuleList(\n [\n EncoderLayer(config.n_embd, config.n_head, config.dff, config.resid_pdrop, config.output_attentions)\n for _ in range(config.n_layer)\n ]\n )\n self.layernorm = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.w\n\n def set_input_embeddings(self, new_embeddings):\n self.w = new_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.h[layer].attn.prune_heads(heads)\n\n def forward(\n self,\n input_ids=None,\n past=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n ):\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if past is None:\n past_length = 0\n past = [None] * len(self.h)\n else:\n past_length = past[0][0].size(-2)\n if position_ids is None:\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)\n position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])\n\n # Attention mask.\n if attention_mask is not None:\n attention_mask = attention_mask.view(-1, input_shape[-1])\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n attention_mask = (1.0 - attention_mask) * -10000.0\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # head_mask has shape n_layer x batch x n_heads x N x N\n if head_mask is not None:\n if head_mask.dim() == 1:\n head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)\n elif head_mask.dim() == 2:\n head_mask = (\n head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)\n ) # We can specify head_mask for each layer\n head_mask = head_mask.to(\n dtype=next(self.parameters()).dtype\n ) # switch to fload if need + fp16 compatibility\n else:\n head_mask = [None] * self.config.n_layer\n\n if token_type_ids is not None:\n token_type_ids = token_type_ids.view(-1, input_shape[-1])\n token_type_embeds = self.w(token_type_ids)\n token_type_embeds *= np.sqrt(self.d_model_size)\n else:\n token_type_embeds = 0\n position_ids = position_ids.view(-1, input_shape[-1])\n\n if inputs_embeds is None:\n inputs_embeds = self.w(input_ids)\n # inputs_embeds = embedded.unsqueeze(0) if len(input_ids.shape)<2 else embedded\n seq_len = input_shape[-1]\n mask = torch.triu(torch.ones(seq_len + past_length, seq_len + past_length), 1).to(inputs_embeds.device)\n\n inputs_embeds *= np.sqrt(self.d_model_size)\n\n pos_embeds = self.pos_encoding[position_ids, :].to(inputs_embeds.device)\n\n hidden_states = inputs_embeds + pos_embeds + token_type_embeds\n\n hidden_states = self.dropout(hidden_states)\n\n output_shape = input_shape + (inputs_embeds.size(-1),)\n presents = ()\n all_hidden_states = ()\n all_attentions = []\n for i, (h, layer_past) in enumerate(zip(self.h, past)):\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)\n outputs = h(\n hidden_states, mask, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask[i]\n )\n hidden_states, present = outputs[:2]\n if self.output_past:\n presents = presents + (present,)\n\n if self.output_attentions:\n all_attentions.append(outputs[2])\n\n hidden_states = self.layernorm(hidden_states)\n hidden_states = hidden_states.view(*output_shape)\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if self.output_past:\n outputs = outputs + (presents,)\n if self.output_hidden_states:\n outputs = outputs + (all_hidden_states,)\n if self.output_attentions:\n # let the number of heads free (-1) so we can extract attention even after head pruning\n attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:]\n all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions)\n outputs = outputs + (all_attentions,)\n return outputs\n\n\n@add_start_docstrings(\n \"\"\"The CTRL Model transformer with a language modeling head on top\n(linear layer with weights tied to the input embeddings). \"\"\",\n CTRL_START_DOCSTRING,\n CTRL_INPUTS_DOCSTRING,\n)\nclass CTRLLMHeadModel(CTRLPreTrainedModel):\n r\"\"\"\n **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Labels for language modeling.\n Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``\n Indices are selected in ``[-100, 0, ..., config.vocab_size]``\n All labels set to ``-100`` are ignored (masked), the loss is only\n computed for labels in ``[0, ..., config.vocab_size]``\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Language modeling loss.\n **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n **past**:\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``:\n that contains pre-computed hidden-states (key and values in the attention blocks).\n Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model\n should not be passed as input ids as they have already been computed.\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n import torch\n from transformers import CTRLTokenizer, CTRLLMHeadModel\n\n tokenizer = CTRLTokenizer.from_pretrained('ctrl')\n model = CTRLLMHeadModel.from_pretrained('ctrl')\n\n input_ids = torch.tensor(tokenizer.encode(\"Links Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=input_ids)\n loss, logits = outputs[:2]\n\n \"\"\"\n\n def __init__(self, config):\n super(CTRLLMHeadModel, self).__init__(config)\n self.transformer = CTRLModel(config)\n self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=True)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def prepare_inputs_for_generation(self, input_ids, **kwargs):\n # only last token for inputs_ids if past is defined in kwargs\n if \"past\" in kwargs and kwargs[\"past\"]:\n input_ids = input_ids[:, -1].unsqueeze(-1)\n\n inputs = {\"input_ids\": input_ids}\n inputs.update(kwargs)\n return inputs\n\n def forward(\n self,\n input_ids=None,\n past=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n ):\n transformer_outputs = self.transformer(\n input_ids,\n past=past,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n\n hidden_states = transformer_outputs[0]\n\n lm_logits = self.lm_head(hidden_states)\n\n outputs = (lm_logits,) + transformer_outputs[1:]\n\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = lm_logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n outputs = (loss,) + outputs\n\n return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions)\n"
] | [
[
"torch.nn.Linear",
"torch.cos",
"torch.cat",
"torch.nn.LayerNorm",
"torch.stack",
"torch.nn.Dropout",
"torch.sin",
"torch.arange",
"torch.nn.CrossEntropyLoss",
"torch.softmax",
"torch.ones",
"torch.nn.ReLU",
"numpy.sqrt",
"torch.matmul",
"torch.nn.Embedding",
"torch.pow"
]
] |
jreganpb/face-alignment | [
"9391ae6f2b8e569c14e91553d7ecbb5e1fc1c72b"
] | [
"face_alignment/api.py"
] | [
"from __future__ import print_function\r\nimport os\r\nimport glob\r\nimport dlib\r\nimport torch\r\nimport torch.nn as nn\r\nfrom enum import Enum\r\nfrom skimage import io\r\nfrom skimage import color\r\nimport cv2\r\nimport uuid\r\nimport torchvision\r\nfrom torchvision import transforms\r\ntry:\r\n import urllib.request as request_file\r\nexcept BaseException:\r\n import urllib as request_file\r\n\r\nfrom .models import FAN, ResNetDepth\r\nfrom .utils import *\r\n#from pose import datasets, hopenet, utils\r\nfrom .pose.hopenet import Hopenet\r\nfrom PIL import Image\r\nfrom torch.autograd import Variable\r\nimport torch.nn.functional as F\r\n\r\n\r\nclass LandmarksType(Enum):\r\n _2D = 1\r\n _2halfD = 2\r\n _3D = 3\r\n\r\n\r\nclass NetworkSize(Enum):\r\n # TINY = 1\r\n # SMALL = 2\r\n # MEDIUM = 3\r\n LARGE = 4\r\n\r\n def __new__(cls, value):\r\n member = object.__new__(cls)\r\n member._value_ = value\r\n return member\r\n\r\n def __int__(self):\r\n return self.value\r\n\r\n\r\nclass FaceAlignment:\r\n \"\"\"Initialize the face alignment pipeline\r\n\r\n Args:\r\n landmarks_type (``LandmarksType`` object): an enum defining the type of predicted points.\r\n network_size (``NetworkSize`` object): an enum defining the size of the network (for the 2D and 2.5D points).\r\n enable_cuda (bool, optional): If True, all the computations will be done on a CUDA-enabled GPU (recommended).\r\n enable_cudnn (bool, optional): If True, cudnn library will be used in the benchmark mode\r\n flip_input (bool, optional): Increase the network accuracy by doing a second forward passed with\r\n the flipped version of the image\r\n use_cnn_face_detector (bool, optional): If True, dlib's CNN based face detector is used even if CUDA\r\n is disabled.\r\n\r\n Example:\r\n >>> FaceAlignment(NetworkSize.2D, flip_input=False)\r\n \"\"\"\r\n\r\n def __init__(self, landmarks_type, network_size=NetworkSize.LARGE,\r\n enable_cuda=True, enable_cudnn=True, flip_input=False,\r\n use_cnn_face_detector=False):\r\n self.enable_cuda = enable_cuda\r\n self.use_cnn_face_detector = use_cnn_face_detector\r\n self.flip_input = flip_input\r\n self.landmarks_type = landmarks_type\r\n self.pose_model = Hopenet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], 66)\r\n if enable_cuda:\r\n self.pose_model.cuda(0)\r\n self.pose_model.eval()\r\n base_path = os.path.join(appdata_dir('face_alignment'), \"data\")\r\n\r\n if not os.path.exists(base_path):\r\n os.makedirs(base_path)\r\n\r\n if enable_cudnn and self.enable_cuda:\r\n torch.backends.cudnn.benchmark = True\r\n\r\n # Initialise the face detector\r\n if self.use_cnn_face_detector:\r\n path_to_detector = os.path.join(\r\n base_path, \"mmod_human_face_detector.dat\")\r\n if not os.path.isfile(path_to_detector):\r\n print(\"Downloading the face detection CNN. Please wait...\")\r\n\r\n path_to_temp_detector = os.path.join(\r\n base_path, \"mmod_human_face_detector.dat.download\")\r\n\r\n if os.path.isfile(path_to_temp_detector):\r\n os.remove(os.path.join(path_to_temp_detector))\r\n\r\n request_file.urlretrieve(\r\n \"https://www.adrianbulat.com/downloads/dlib/mmod_human_face_detector.dat\",\r\n os.path.join(path_to_temp_detector))\r\n\r\n os.rename(os.path.join(path_to_temp_detector),os.path.join(path_to_detector))\r\n\r\n self.face_detector = dlib.cnn_face_detection_model_v1(\r\n path_to_detector)\r\n\r\n else:\r\n self.face_detector = dlib.get_frontal_face_detector()\r\n\r\n # Initialise the face alignemnt networks\r\n self.face_alignment_net = FAN(int(network_size))\r\n if landmarks_type == LandmarksType._2D:\r\n network_name = '2DFAN-' + str(int(network_size)) + '.pth.tar'\r\n else:\r\n network_name = '3DFAN-' + str(int(network_size)) + '.pth.tar'\r\n fan_path = os.path.join(base_path, network_name)\r\n\r\n if not os.path.isfile(fan_path):\r\n print(\"Downloading the Face Alignment Network(FAN). Please wait...\")\r\n\r\n fan_temp_path = os.path.join(base_path,network_name+'.download')\r\n\r\n if os.path.isfile(fan_temp_path):\r\n os.remove(os.path.join(fan_temp_path))\r\n\r\n request_file.urlretrieve(\r\n \"https://www.adrianbulat.com/downloads/python-fan/\" +\r\n network_name, os.path.join(fan_temp_path))\r\n\r\n os.rename(os.path.join(fan_temp_path),os.path.join(fan_path))\r\n\r\n fan_weights = torch.load(\r\n fan_path,\r\n map_location=lambda storage,\r\n loc: storage)\r\n\r\n self.face_alignment_net.load_state_dict(fan_weights)\r\n\r\n if self.enable_cuda:\r\n self.face_alignment_net.cuda()\r\n self.face_alignment_net.eval()\r\n\r\n # Initialiase the depth prediciton network\r\n if landmarks_type == LandmarksType._3D:\r\n self.depth_prediciton_net = ResNetDepth()\r\n depth_model_path = os.path.join(base_path, 'depth.pth.tar')\r\n if not os.path.isfile(depth_model_path):\r\n print(\r\n \"Downloading the Face Alignment depth Network (FAN-D). Please wait...\")\r\n\r\n depth_model_temp_path = os.path.join(base_path, 'depth.pth.tar.download')\r\n\r\n if os.path.isfile(depth_model_temp_path):\r\n os.remove(os.path.join(depth_model_temp_path))\r\n\r\n\r\n request_file.urlretrieve(\r\n \"https://www.adrianbulat.com/downloads/python-fan/depth.pth.tar\",\r\n os.path.join(depth_model_temp_path))\r\n\r\n os.rename(os.path.join(depth_model_temp_path),os.path.join(depth_model_path))\r\n\r\n depth_weights = torch.load(\r\n depth_model_path,\r\n map_location=lambda storage,\r\n loc: storage)\r\n depth_dict = {\r\n k.replace('module.', ''): v for k,\r\n v in depth_weights['state_dict'].items()}\r\n self.depth_prediciton_net.load_state_dict(depth_dict)\r\n\r\n if self.enable_cuda:\r\n self.depth_prediciton_net.cuda()\r\n self.depth_prediciton_net.eval()\r\n\r\n def detect_faces(self, image):\r\n \"\"\"Run the dlib face detector over an image\r\n\r\n Args:\r\n image (``ndarray`` object or string): either the path to the image or an image previosly opened\r\n on which face detection will be performed.\r\n\r\n Returns:\r\n Returns a list of detected faces\r\n \"\"\"\r\n return self.face_detector(image, 1)\r\n\r\n def get_landmarks(self, input_image, all_faces=False):\r\n with torch.no_grad():\r\n if isinstance(input_image, str):\r\n try:\r\n image = io.imread(input_image)\r\n except IOError:\r\n print(\"error opening file :: \", input_image)\r\n return None\r\n else:\r\n image = input_image\r\n\r\n # Use grayscale image instead of RGB to speed up face detection\r\n detected_faces = self.detect_faces(cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)) \r\n \r\n if image.ndim == 2:\r\n image = color.gray2rgb(image)\r\n\r\n if len(detected_faces) > 0:\r\n landmarks = []\r\n for i, d in enumerate(detected_faces):\r\n if i > 0 and not all_faces:\r\n break\r\n if self.use_cnn_face_detector:\r\n d = d.rect\r\n\r\n center = torch.FloatTensor(\r\n [d.right() - (d.right() - d.left()) / 2.0, d.bottom() -\r\n (d.bottom() - d.top()) / 2.0])\r\n center[1] = center[1] - (d.bottom() - d.top()) * 0.12\r\n scale = (d.right() - d.left() +\r\n d.bottom() - d.top()) / 195.0\r\n\r\n inp = crop(image, center, scale)\r\n inp = torch.from_numpy(inp.transpose(\r\n (2, 0, 1))).float().div(255.0).unsqueeze_(0)\r\n\r\n if self.enable_cuda:\r\n inp = inp.cuda()\r\n\r\n out = self.face_alignment_net(inp)[-1].data.cpu()\r\n if self.flip_input:\r\n out += flip(self.face_alignment_net(flip(inp))\r\n [-1].data.cpu(), is_label=True)\r\n\r\n pts, pts_img = get_preds_fromhm(out, center, scale)\r\n pts, pts_img = pts.view(68, 2) * 4, pts_img.view(68, 2)\r\n\r\n if self.landmarks_type == LandmarksType._3D:\r\n heatmaps = np.zeros((68, 256, 256))\r\n for i in range(68):\r\n if pts[i, 0] > 0:\r\n heatmaps[i] = draw_gaussian(\r\n heatmaps[i], pts[i], 2)\r\n heatmaps = torch.from_numpy(\r\n heatmaps).view(1, 68, 256, 256).float()\r\n if self.enable_cuda:\r\n heatmaps = heatmaps.cuda()\r\n depth_pred = self.depth_prediciton_net(\r\n torch.cat((inp, heatmaps), 1)).data.cpu().view(68, 1)\r\n pts_img = torch.cat(\r\n (pts_img, depth_pred * (1.0 / (256.0 / (200.0 * scale)))), 1)\r\n\r\n landmarks.append(pts_img.numpy())\r\n else:\r\n print(\"Warning: No faces were detected.\")\r\n return None\r\n\r\n return landmarks\r\n\r\n def process_folder(self, path, all_faces=False):\r\n types = ('*.jpg', '*.png')\r\n images_list = []\r\n for files in types:\r\n images_list.extend(glob.glob(os.path.join(path, files)))\r\n\r\n predictions = []\r\n for image_name in images_list:\r\n predictions.append((\r\n image_name, self.get_landmarks(image_name, all_faces)))\r\n\r\n return predictions\r\n\r\n def remove_models(self):\r\n base_path = os.path.join(appdata_dir('face_alignment'), \"data\")\r\n for data_model in os.listdir(base_path):\r\n file_path = os.path.join(base_path, data_model)\r\n try:\r\n if os.path.isfile(file_path):\r\n print('Removing ' + data_model + ' ...')\r\n os.unlink(file_path)\r\n except Exception as e:\r\n print(e)\r\n\r\n def get_default_bounding_box(self,input_image,det):\r\n x_min = det.left()\r\n y_min = det.top()\r\n x_max = det.right()\r\n y_max = det.bottom()\r\n bbox_width = abs(x_max - x_min)\r\n bbox_height = abs(y_max - y_min)\r\n x_min -= bbox_width / 4\r\n x_max += bbox_width / 4\r\n y_min -= bbox_height / 4\r\n y_max += 3 * bbox_height / 4\r\n x_min = max(x_min, 0)\r\n y_min = max(y_min, 0)\r\n x_max = min(input_image.shape[1], x_max)\r\n y_max = min(input_image.shape[0], y_max)\r\n x_min = int(np.round(x_min)); x_max = int(np.round(x_max))\r\n y_min = int(np.round(y_min)); y_max = int(np.round(y_max))\r\n return x_min, x_max, y_min, y_max\r\n\r\n def get_head_pose(self, input_image):\r\n idx_tensor = [idx for idx in range(66)]\r\n idx_tensor = torch.FloatTensor(idx_tensor)\r\n if self.enable_cuda:\r\n idx_tensor = idx_tensor.cuda(0)\r\n img = Image.fromarray(input_image)\r\n transformations = transforms.Compose([transforms.Scale(224),\r\n transforms.CenterCrop(224), transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225])])\r\n img = transformations(img)\r\n img_shape = img.size()\r\n img = img.view(1, img_shape[0], img_shape[1], img_shape[2])\r\n img = Variable(img)\r\n if self.enable_cuda:\r\n img = img.cuda(0)\r\n yaw, pitch, roll = self.pose_model(img)\r\n yaw_predicted = F.softmax(yaw)\r\n pitch_predicted = F.softmax(pitch)\r\n roll_predicted = F.softmax(roll)\r\n # Get continuous predictions in degrees.\r\n yaw_predicted = torch.sum(yaw_predicted.data[0] * idx_tensor) * 3 - 99\r\n pitch_predicted = torch.sum(pitch_predicted.data[0] * idx_tensor) * 3 - 99\r\n roll_predicted = torch.sum(roll_predicted.data[0] * idx_tensor) * 3 - 99\r\n return {'yaw' : yaw_predicted, 'pitch' : pitch_predicted, 'roll' : roll_predicted}\r\n\r\n def get_landmarks_with_rectangles(self, input_image, all_faces=False): ## Returns dictionary w/ uuid's pre-made\r\n with torch.no_grad():\r\n if isinstance(input_image, str):\r\n try:\r\n image = io.imread(input_image)\r\n except IOError:\r\n print(\"error opening file :: \", input_image)\r\n return None\r\n else:\r\n image = input_image\r\n\r\n # Use grayscale image instead of RGB to speed up face detection\r\n detected_faces = self.detect_faces(cv2.cvtColor(image, cv2.COLOR_RGB2GRAY))\r\n\r\n if image.ndim == 2:\r\n image = color.gray2rgb(image)\r\n\r\n if len(detected_faces) > 0:\r\n landmarks = {}\r\n for i, d in enumerate(detected_faces):\r\n if i > 0 and not all_faces:\r\n break\r\n new_uuid = uuid.uuid4().hex\r\n rect = d\r\n if self.use_cnn_face_detector:\r\n d = d.rect\r\n\r\n center = torch.FloatTensor(\r\n [d.right() - (d.right() - d.left()) / 2.0, d.bottom() -\r\n (d.bottom() - d.top()) / 2.0])\r\n center[1] = center[1] - (d.bottom() - d.top()) * 0.12\r\n scale = (d.right() - d.left() +\r\n d.bottom() - d.top()) / 195.0\r\n\r\n inp = crop(image, center, scale)\r\n inp = torch.from_numpy(inp.transpose(\r\n (2, 0, 1))).float().div(255.0).unsqueeze_(0)\r\n\r\n if self.enable_cuda:\r\n inp = inp.cuda()\r\n\r\n out = self.face_alignment_net(inp)[-1].data.cpu()\r\n if self.flip_input:\r\n out += flip(self.face_alignment_net(flip(inp))\r\n [-1].data.cpu(), is_label=True)\r\n\r\n pts, pts_img = get_preds_fromhm(out, center, scale)\r\n pts, pts_img = pts.view(68, 2) * 4, pts_img.view(68, 2)\r\n\r\n if self.landmarks_type == LandmarksType._3D:\r\n heatmaps = np.zeros((68, 256, 256))\r\n for i in range(68):\r\n if pts[i, 0] > 0:\r\n heatmaps[i] = draw_gaussian(\r\n heatmaps[i], pts[i], 2)\r\n heatmaps = torch.from_numpy(\r\n heatmaps).view(1, 68, 256, 256).float()\r\n if self.enable_cuda:\r\n heatmaps = heatmaps.cuda()\r\n depth_pred = self.depth_prediciton_net(\r\n torch.cat((inp, heatmaps), 1)).data.cpu().view(68, 1)\r\n pts_img = torch.cat(\r\n (pts_img, depth_pred * (1.0 / (256.0 / (200.0 * scale)))), 1)\r\n\r\n x_min, x_max, y_min, y_max = self.get_default_bounding_box(image,d)\r\n pose = self.get_head_pose(input_image=image[y_min:y_max,x_min:x_max])\r\n\r\n #landmarks.append(pts_img.numpy())\r\n landmarks[new_uuid] = {}\r\n landmarks[new_uuid]['rectangle'] = d\r\n landmarks[new_uuid]['landmarks'] = pts_img.numpy()\r\n landmarks[new_uuid]['confidence'] = rect.confidence\r\n landmarks[new_uuid]['pose'] = pose\r\n else:\r\n print(\"Warning: No faces were detected.\")\r\n return {}\r\n\r\n return landmarks\r\n"
] | [
[
"torch.cat",
"torch.autograd.Variable",
"torch.FloatTensor",
"torch.no_grad",
"torch.from_numpy",
"torch.load",
"torch.nn.functional.softmax",
"torch.sum"
]
] |
jc19chaoj/PaddleClas | [
"5d96bcfebeef4c3f146b0db6bb32fc6952d28cbc"
] | [
"ppcls/metric/metrics.py"
] | [
"# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\n\nfrom sklearn.metrics import hamming_loss\nfrom sklearn.metrics import accuracy_score as accuracy_metric\nfrom sklearn.metrics import multilabel_confusion_matrix\nfrom sklearn.preprocessing import binarize\n\n\nclass TopkAcc(nn.Layer):\n def __init__(self, topk=(1, 5)):\n super().__init__()\n assert isinstance(topk, (int, list, tuple))\n if isinstance(topk, int):\n topk = [topk]\n self.topk = topk\n\n def forward(self, x, label):\n if isinstance(x, dict):\n x = x[\"logits\"]\n\n metric_dict = dict()\n for k in self.topk:\n metric_dict[\"top{}\".format(k)] = paddle.metric.accuracy(\n x, label, k=k)\n return metric_dict\n\n\nclass mAP(nn.Layer):\n def __init__(self):\n super().__init__()\n\n def forward(self, similarities_matrix, query_img_id, gallery_img_id,\n keep_mask):\n metric_dict = dict()\n\n choosen_indices = paddle.argsort(\n similarities_matrix, axis=1, descending=True)\n gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0])\n gallery_labels_transpose = paddle.broadcast_to(\n gallery_labels_transpose,\n shape=[\n choosen_indices.shape[0], gallery_labels_transpose.shape[1]\n ])\n choosen_label = paddle.index_sample(gallery_labels_transpose,\n choosen_indices)\n equal_flag = paddle.equal(choosen_label, query_img_id)\n if keep_mask is not None:\n keep_mask = paddle.index_sample(\n keep_mask.astype('float32'), choosen_indices)\n equal_flag = paddle.logical_and(equal_flag,\n keep_mask.astype('bool'))\n equal_flag = paddle.cast(equal_flag, 'float32')\n\n num_rel = paddle.sum(equal_flag, axis=1)\n num_rel = paddle.greater_than(num_rel, paddle.to_tensor(0.))\n num_rel_index = paddle.nonzero(num_rel.astype(\"int\"))\n num_rel_index = paddle.reshape(num_rel_index, [num_rel_index.shape[0]])\n equal_flag = paddle.index_select(equal_flag, num_rel_index, axis=0)\n\n acc_sum = paddle.cumsum(equal_flag, axis=1)\n div = paddle.arange(acc_sum.shape[1]).astype(\"float32\") + 1\n precision = paddle.divide(acc_sum, div)\n\n #calc map\n precision_mask = paddle.multiply(equal_flag, precision)\n ap = paddle.sum(precision_mask, axis=1) / paddle.sum(equal_flag,\n axis=1)\n metric_dict[\"mAP\"] = paddle.mean(ap).numpy()[0]\n return metric_dict\n\n\nclass mINP(nn.Layer):\n def __init__(self):\n super().__init__()\n\n def forward(self, similarities_matrix, query_img_id, gallery_img_id,\n keep_mask):\n metric_dict = dict()\n\n choosen_indices = paddle.argsort(\n similarities_matrix, axis=1, descending=True)\n gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0])\n gallery_labels_transpose = paddle.broadcast_to(\n gallery_labels_transpose,\n shape=[\n choosen_indices.shape[0], gallery_labels_transpose.shape[1]\n ])\n choosen_label = paddle.index_sample(gallery_labels_transpose,\n choosen_indices)\n equal_flag = paddle.equal(choosen_label, query_img_id)\n if keep_mask is not None:\n keep_mask = paddle.index_sample(\n keep_mask.astype('float32'), choosen_indices)\n equal_flag = paddle.logical_and(equal_flag,\n keep_mask.astype('bool'))\n equal_flag = paddle.cast(equal_flag, 'float32')\n\n num_rel = paddle.sum(equal_flag, axis=1)\n num_rel = paddle.greater_than(num_rel, paddle.to_tensor(0.))\n num_rel_index = paddle.nonzero(num_rel.astype(\"int\"))\n num_rel_index = paddle.reshape(num_rel_index, [num_rel_index.shape[0]])\n equal_flag = paddle.index_select(equal_flag, num_rel_index, axis=0)\n\n #do accumulative sum\n div = paddle.arange(equal_flag.shape[1]).astype(\"float32\") + 2\n minus = paddle.divide(equal_flag, div)\n auxilary = paddle.subtract(equal_flag, minus)\n hard_index = paddle.argmax(auxilary, axis=1).astype(\"float32\")\n all_INP = paddle.divide(paddle.sum(equal_flag, axis=1), hard_index)\n mINP = paddle.mean(all_INP)\n metric_dict[\"mINP\"] = mINP.numpy()[0]\n return metric_dict\n\n\nclass Recallk(nn.Layer):\n def __init__(self, topk=(1, 5)):\n super().__init__()\n assert isinstance(topk, (int, list, tuple))\n if isinstance(topk, int):\n topk = [topk]\n self.topk = topk\n\n def forward(self, similarities_matrix, query_img_id, gallery_img_id,\n keep_mask):\n metric_dict = dict()\n\n #get cmc\n choosen_indices = paddle.argsort(\n similarities_matrix, axis=1, descending=True)\n gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0])\n gallery_labels_transpose = paddle.broadcast_to(\n gallery_labels_transpose,\n shape=[\n choosen_indices.shape[0], gallery_labels_transpose.shape[1]\n ])\n choosen_label = paddle.index_sample(gallery_labels_transpose,\n choosen_indices)\n equal_flag = paddle.equal(choosen_label, query_img_id)\n if keep_mask is not None:\n keep_mask = paddle.index_sample(\n keep_mask.astype('float32'), choosen_indices)\n equal_flag = paddle.logical_and(equal_flag,\n keep_mask.astype('bool'))\n equal_flag = paddle.cast(equal_flag, 'float32')\n real_query_num = paddle.sum(equal_flag, axis=1)\n real_query_num = paddle.sum(\n paddle.greater_than(real_query_num, paddle.to_tensor(0.)).astype(\n \"float32\"))\n\n acc_sum = paddle.cumsum(equal_flag, axis=1)\n mask = paddle.greater_than(acc_sum,\n paddle.to_tensor(0.)).astype(\"float32\")\n all_cmc = (paddle.sum(mask, axis=0) / real_query_num).numpy()\n\n for k in self.topk:\n metric_dict[\"recall{}\".format(k)] = all_cmc[k - 1]\n return metric_dict\n\n\nclass Precisionk(nn.Layer):\n def __init__(self, topk=(1, 5)):\n super().__init__()\n assert isinstance(topk, (int, list, tuple))\n if isinstance(topk, int):\n topk = [topk]\n self.topk = topk\n\n def forward(self, similarities_matrix, query_img_id, gallery_img_id,\n keep_mask):\n metric_dict = dict()\n\n #get cmc\n choosen_indices = paddle.argsort(\n similarities_matrix, axis=1, descending=True)\n gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0])\n gallery_labels_transpose = paddle.broadcast_to(\n gallery_labels_transpose,\n shape=[\n choosen_indices.shape[0], gallery_labels_transpose.shape[1]\n ])\n choosen_label = paddle.index_sample(gallery_labels_transpose,\n choosen_indices)\n equal_flag = paddle.equal(choosen_label, query_img_id)\n if keep_mask is not None:\n keep_mask = paddle.index_sample(\n keep_mask.astype('float32'), choosen_indices)\n equal_flag = paddle.logical_and(equal_flag,\n keep_mask.astype('bool'))\n equal_flag = paddle.cast(equal_flag, 'float32')\n\n Ns = paddle.arange(gallery_img_id.shape[0]) + 1\n equal_flag_cumsum = paddle.cumsum(equal_flag, axis=1)\n Precision_at_k = (paddle.mean(equal_flag_cumsum, axis=0) / Ns).numpy()\n\n for k in self.topk:\n metric_dict[\"precision@{}\".format(k)] = Precision_at_k[k - 1]\n\n return metric_dict\n\n\nclass DistillationTopkAcc(TopkAcc):\n def __init__(self, model_key, feature_key=None, topk=(1, 5)):\n super().__init__(topk=topk)\n self.model_key = model_key\n self.feature_key = feature_key\n\n def forward(self, x, label):\n if isinstance(x, dict):\n x = x[self.model_key]\n if self.feature_key is not None:\n x = x[self.feature_key]\n return super().forward(x, label)\n\n\nclass GoogLeNetTopkAcc(TopkAcc):\n def __init__(self, topk=(1, 5)):\n super().__init__()\n assert isinstance(topk, (int, list, tuple))\n if isinstance(topk, int):\n topk = [topk]\n self.topk = topk\n\n def forward(self, x, label):\n return super().forward(x[0], label)\n\n\nclass MutiLabelMetric(object):\n def __init__(self):\n pass\n\n def _multi_hot_encode(self, logits, threshold=0.5):\n return binarize(logits, threshold=threshold)\n\n def __call__(self, output):\n output = F.sigmoid(output)\n preds = self._multi_hot_encode(logits=output.numpy(), threshold=0.5)\n return preds\n\n\nclass HammingDistance(MutiLabelMetric):\n \"\"\"\n Soft metric based label for multilabel classification\n Returns:\n The smaller the return value is, the better model is.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n def __call__(self, output, target):\n preds = super().__call__(output)\n metric_dict = dict()\n metric_dict[\"HammingDistance\"] = paddle.to_tensor(\n hamming_loss(target, preds))\n return metric_dict\n\n\nclass AccuracyScore(MutiLabelMetric):\n \"\"\"\n Hard metric for multilabel classification\n Args:\n base: [\"sample\", \"label\"], default=\"sample\"\n if \"sample\", return metric score based sample,\n if \"label\", return metric score based label.\n Returns:\n accuracy:\n \"\"\"\n\n def __init__(self, base=\"label\"):\n super().__init__()\n assert base in [\"sample\", \"label\"\n ], 'must be one of [\"sample\", \"label\"]'\n self.base = base\n\n def __call__(self, output, target):\n preds = super().__call__(output)\n metric_dict = dict()\n if self.base == \"sample\":\n accuracy = accuracy_metric(target, preds)\n elif self.base == \"label\":\n mcm = multilabel_confusion_matrix(target, preds)\n tns = mcm[:, 0, 0]\n fns = mcm[:, 1, 0]\n tps = mcm[:, 1, 1]\n fps = mcm[:, 0, 1]\n accuracy = (sum(tps) + sum(tns)) / (\n sum(tps) + sum(tns) + sum(fns) + sum(fps))\n metric_dict[\"AccuracyScore\"] = paddle.to_tensor(accuracy)\n return metric_dict\n"
] | [
[
"sklearn.metrics.multilabel_confusion_matrix",
"sklearn.metrics.hamming_loss",
"sklearn.preprocessing.binarize",
"sklearn.metrics.accuracy_score"
]
] |
ann89/PHASE-SEPARATION | [
"b905a26577034ddd1e1b6aaa07fa1086956852b5"
] | [
"COM-recenter.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# =============================================================================\n# \"\"\"\n# @author: anna\n# Calculate the center of mass of the (\"phase separated\" aggregated )system using the method described in\n# L. Bai and D. Breen, J. Graphics, GPU, Game Tools 13, 53 (2008)\n# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.151.8565&rep=rep1&type=pdf\n# \n# Let's consider a system of N particles of\n# equal mass in a one-dimensional periodic system of size x_max == Lx.\n# Naturally, for three-dimensional systems, the calculations have\n# to be performed for each dimension separately.\n# We start by mapping each coordinate to an angle:\n# \\begin{equation}\n# \\theta_i = 2\\pi \\frac{x_i}{L_x}\n# \\end {equation}\n# \n# Then, this angle is interpreted to be on a unit circle, and the\n# corresponding two-dimensional coordinates are calculated,\n# \\begin{equation}\n# \\xi_i = cos(\\theta_i)\n# \\zeta_i = sin(\\theta_i)\n# \\end{equation}\n# \n# Now, we calculate the standard center of mass in the twodimensional space,\n# \\begin{equation}\n# \\bar \\xi_i = \\frac{1}{N}\\sum_{i=1}^N \\xi_i\n# \\bar \\zeta_i = \\frac{1}{N}\\sum_{i=1}^N \\zeta_i\n# \\end{equation}\n# Finally, we can map back the common center to an angle\n# \\begin{equation}\n# \\bat \\theta = atan2(-\\bar \\zeta, -\\bar \\xi ) + \\pi\n# \\end{equation}\n# and then map back this average angle to a length coordinate\n# in the range $[0, L_x)$ \n# \\begin{equation}\n# x_{COM} = L_x \\frac{\\bat \\theta}{2\\pi}\n# \\end{equation}\n# . The negation of the arguments in\n# combination with the shift of the function by $\\pi$ ensures that $\\theta$\n# falls within $[0,2\\pi)$.\n# \n# The algorithm is completely unambiguous, even for cases where the mass\n# distribution is wide in comparison to the periodic box. This\n# is not true when trying to calculate the center of mass for\n# such a case using the usual minimum image convention with\n# respect to some more or less random reference point. The\n# algorithm will only fail in the case of a completely uniform\n# mass distribution, for which the center of mass is not defined\n# in a periodic system. Even then, the algorithm will return some\n# value (depending on the implementation of atan2), which is\n# as good as any other for that particular situation (This is for instance the case for the \n# y direction in our membrane systems).\n# \n# Once the position of the center of mass is known, \n# fold back the coordinate in the box and write down the trajectory.\n# \"\"\"\n# \n# =============================================================================\nimport math\nimport MDAnalysis\nimport numpy as np\n\ntop = 'ANALYSIS/selection.pdb'\ntraj = 'ANALYSIS/selection.xtc' \n\nu = MDAnalysis.Universe(top,traj)\n\ndef getCMx(xpos, Lx):\n \n PI= math.pi\n \n radius_i = Lx/ (2. * PI)\n \n x_cm_i = np.mean(radius_i * np.cos(xpos / Lx * 2. * PI), axis=0)\n z_cm_i = np.mean(radius_i * np.sin(xpos / Lx * 2. * PI), axis=0)\n\n return (np.arctan2(-z_cm_i, -x_cm_i) + PI) / (2. * PI) * Lx\n\ndef set_pos_back_to_box(res_idxs, CMx, Lx, pos_x_all_new):\n pos_x_all_new = pos_x_all_new.copy()\n for i in range(len(res_idxs)):\n idx = res_idxs[i]\n pos_res = u.select_atoms('resnum %i'%idx).center_of_geometry()\n if pos_res[0] - CMx + (0.5 * Lx) >= Lx:\n #print('True')\n idx_atoms = u.select_atoms('resnum %i'%idx).ix\n pos_x_all_new[idx_atoms] = pos_x_all_new[idx_atoms] - Lx \n elif pos_res[0] - CMx + (0.5 * Lx) < 0:\n idx_atoms = u.select_atoms('resnum %i'%idx).ix\n pos_x_all_new[idx_atoms] = pos_x_all_new[idx_atoms] + Lx\n #print('second')\n return pos_x_all_new\n\n\ntrj = \"ANALYSIS/recentered_x.xtc\" \npdb = \"ANALYSIS/recentered_x.pdb\" \ngro = \"ANALYSIS/recentered_x.gro\" \n\nresnames = np.unique(u.select_atoms('all and name P or name O2').resnames)\nwith MDAnalysis.Writer(trj, multiframe=True, bonds=None, n_atoms=u.atoms.n_atoms) as XTC:\n \n\n for ts in u.trajectory:\n if len(resnames) > 3 :\n if len(resnames [resnames =='DLIP']) > 0: \n pos_x_all = u.select_atoms('all').positions[:,0]\n pos_DLIPC_x = u.select_atoms('resname DLIP').positions[:,0] #to put DLIP in the middle \n Lx = u.trajectory.ts.triclinic_dimensions[0][0]\n CMx = getCMx(pos_DLIPC_x[:],Lx)\n pos_x_all_new = pos_x_all - CMx + (0.5 * Lx)\n DIPC_idxs = u.select_atoms('resname DLIP and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DIPC_idxs, CMx, Lx, pos_x_all_new) \n DSPC_idxs = u.select_atoms('resname DSPC and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DSPC_idxs, CMx, Lx, pos_x_all_new) \n SSM_idxs = u.select_atoms('resname SSM and name P').resnums\n pos_x_all_new = set_pos_back_to_box(SSM_idxs, CMx, Lx, pos_x_all_new) \n CHL_idxs = u.select_atoms('resname CHL and name O2').resnums\n pos_x_all_new = set_pos_back_to_box(CHL_idxs, CMx, Lx, pos_x_all_new)\n else : \n pos_x_all = u.select_atoms('all').positions[:,0]\n pos_DLIPC_x = u.select_atoms('resname DAPC').positions[:,0] #to put DLIP in the middle \n Lx = u.trajectory.ts.triclinic_dimensions[0][0]\n CMx = getCMx(pos_DLIPC_x[:],Lx)\n pos_x_all_new = pos_x_all - CMx + (0.5 * Lx)\n DIPC_idxs = u.select_atoms('resname DAPC and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DIPC_idxs, CMx, Lx, pos_x_all_new) \n DSPC_idxs = u.select_atoms('resname DSPC and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DSPC_idxs, CMx, Lx, pos_x_all_new) \n SSM_idxs = u.select_atoms('resname SSM and name P').resnums\n pos_x_all_new = set_pos_back_to_box(SSM_idxs, CMx, Lx, pos_x_all_new) \n CHL_idxs = u.select_atoms('resname CHL and name O2').resnums\n pos_x_all_new = set_pos_back_to_box(CHL_idxs, CMx, Lx, pos_x_all_new) \n else:\n if len(resnames [resnames =='DLIP']) > 0:\n pos_x_all = u.select_atoms('all').positions[:,0]\n pos_DLIPC_x = u.select_atoms('resname DLIP').positions[:,0] #to put DLIP in the middle \n Lx = u.trajectory.ts.triclinic_dimensions[0][0]\n CMx = getCMx(pos_DLIPC_x[:],Lx)\n pos_x_all_new = pos_x_all - CMx + (0.5 * Lx)\n DIPC_idxs = u.select_atoms('resname DLIP and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DIPC_idxs, CMx, Lx, pos_x_all_new) \n DSPC_idxs = u.select_atoms('resname DSPC and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DSPC_idxs, CMx, Lx, pos_x_all_new) \n CHL_idxs = u.select_atoms('resname CHL and name O2').resnums\n pos_x_all_new = set_pos_back_to_box(CHL_idxs, CMx, Lx, pos_x_all_new)\n else:\n pos_x_all = u.select_atoms('all').positions[:,0]\n pos_DLIPC_x = u.select_atoms('resname DAPC').positions[:,0] #to put DLIP in the middle \n Lx = u.trajectory.ts.triclinic_dimensions[0][0]\n CMx = getCMx(pos_DLIPC_x[:],Lx)\n pos_x_all_new = pos_x_all - CMx + (0.5 * Lx)\n DIPC_idxs = u.select_atoms('resname DAPC and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DIPC_idxs, CMx, Lx, pos_x_all_new) \n DSPC_idxs = u.select_atoms('resname DSPC and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DSPC_idxs, CMx, Lx, pos_x_all_new) \n CHL_idxs = u.select_atoms('resname CHL and name O2').resnums\n pos_x_all_new = set_pos_back_to_box(CHL_idxs, CMx, Lx, pos_x_all_new) \n \n foo1 = np.asarray([pos_x_all_new, u.select_atoms('all').positions[:,1], \n u.select_atoms('all').positions[:,2]])\n foo2=foo1.transpose()\n \n u.atoms.positions = foo2\n \n XTC.write(u.atoms)\n \n\n\n\nwith MDAnalysis.Writer(pdb, multiframe=True, bonds=None, n_atoms=u.atoms.n_atoms) as PDB:\n for ts in [u.trajectory[0]]:\n if len(resnames) > 3 :\n if len(resnames [resnames =='DLIP']) > 0: \n pos_x_all = u.select_atoms('all').positions[:,0]\n pos_DLIPC_x = u.select_atoms('resname DLIP').positions[:,0] #to put DLIP in the middle \n Lx = u.trajectory.ts.triclinic_dimensions[0][0]\n CMx = getCMx(pos_DLIPC_x[:],Lx)\n pos_x_all_new = pos_x_all - CMx + (0.5 * Lx)\n DIPC_idxs = u.select_atoms('resname DLIP and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DIPC_idxs, CMx, Lx, pos_x_all_new) \n DSPC_idxs = u.select_atoms('resname DSPC and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DSPC_idxs, CMx, Lx, pos_x_all_new) \n SSM_idxs = u.select_atoms('resname SSM and name P').resnums\n pos_x_all_new = set_pos_back_to_box(SSM_idxs, CMx, Lx, pos_x_all_new) \n CHL_idxs = u.select_atoms('resname CHL and name O2').resnums\n pos_x_all_new = set_pos_back_to_box(CHL_idxs, CMx, Lx, pos_x_all_new)\n else : \n pos_x_all = u.select_atoms('all').positions[:,0]\n pos_DLIPC_x = u.select_atoms('resname DAPC').positions[:,0] #to put DLIP in the middle \n Lx = u.trajectory.ts.triclinic_dimensions[0][0]\n CMx = getCMx(pos_DLIPC_x[:],Lx)\n pos_x_all_new = pos_x_all - CMx + (0.5 * Lx)\n DIPC_idxs = u.select_atoms('resname DAPC and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DIPC_idxs, CMx, Lx, pos_x_all_new) \n DSPC_idxs = u.select_atoms('resname DSPC and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DSPC_idxs, CMx, Lx, pos_x_all_new) \n SSM_idxs = u.select_atoms('resname SSM and name P').resnums\n pos_x_all_new = set_pos_back_to_box(SSM_idxs, CMx, Lx, pos_x_all_new) \n CHL_idxs = u.select_atoms('resname CHL and name O2').resnums\n pos_x_all_new = set_pos_back_to_box(CHL_idxs, CMx, Lx, pos_x_all_new) \n else:\n if len(resnames [resnames =='DLIP']) > 0:\n pos_x_all = u.select_atoms('all').positions[:,0]\n pos_DLIPC_x = u.select_atoms('resname DLIP').positions[:,0] #to put DLIP in the middle \n Lx = u.trajectory.ts.triclinic_dimensions[0][0]\n CMx = getCMx(pos_DLIPC_x[:],Lx)\n pos_x_all_new = pos_x_all - CMx + (0.5 * Lx)\n DIPC_idxs = u.select_atoms('resname DLIP and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DIPC_idxs, CMx, Lx, pos_x_all_new) \n DSPC_idxs = u.select_atoms('resname DSPC and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DSPC_idxs, CMx, Lx, pos_x_all_new) \n CHL_idxs = u.select_atoms('resname CHL and name O2').resnums\n pos_x_all_new = set_pos_back_to_box(CHL_idxs, CMx, Lx, pos_x_all_new)\n else:\n pos_x_all = u.select_atoms('all').positions[:,0]\n pos_DLIPC_x = u.select_atoms('resname DAPC').positions[:,0] #to put DLIP in the middle \n Lx = u.trajectory.ts.triclinic_dimensions[0][0]\n CMx = getCMx(pos_DLIPC_x[:],Lx)\n pos_x_all_new = pos_x_all - CMx + (0.5 * Lx)\n DIPC_idxs = u.select_atoms('resname DAPC and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DIPC_idxs, CMx, Lx, pos_x_all_new) \n DSPC_idxs = u.select_atoms('resname DSPC and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DSPC_idxs, CMx, Lx, pos_x_all_new) \n CHL_idxs = u.select_atoms('resname CHL and name O2').resnums\n pos_x_all_new = set_pos_back_to_box(CHL_idxs, CMx, Lx, pos_x_all_new) \n \n foo1 = np.asarray([pos_x_all_new, u.select_atoms('all').positions[:,1], \n u.select_atoms('all').positions[:,2]])\n foo2=foo1.transpose()\n \n u.atoms.positions = foo2\n \n PDB.write(u.atoms)\n \n\nwith MDAnalysis.Writer(gro, reindex=False , bonds=None, n_atoms=u.atoms.n_atoms) as w:\n for ts in [u.trajectory[0]]:\n if len(resnames) > 3 :\n if len(resnames [resnames =='DLIP']) > 0: \n pos_x_all = u.select_atoms('all').positions[:,0]\n pos_DLIPC_x = u.select_atoms('resname DLIP').positions[:,0] #to put DLIP in the middle \n Lx = u.trajectory.ts.triclinic_dimensions[0][0]\n CMx = getCMx(pos_DLIPC_x[:],Lx)\n pos_x_all_new = pos_x_all - CMx + (0.5 * Lx)\n DIPC_idxs = u.select_atoms('resname DLIP and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DIPC_idxs, CMx, Lx, pos_x_all_new) \n DSPC_idxs = u.select_atoms('resname DSPC and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DSPC_idxs, CMx, Lx, pos_x_all_new) \n SSM_idxs = u.select_atoms('resname SSM and name P').resnums\n pos_x_all_new = set_pos_back_to_box(SSM_idxs, CMx, Lx, pos_x_all_new) \n CHL_idxs = u.select_atoms('resname CHL and name O2').resnums\n pos_x_all_new = set_pos_back_to_box(CHL_idxs, CMx, Lx, pos_x_all_new)\n else : \n pos_x_all = u.select_atoms('all').positions[:,0]\n pos_DLIPC_x = u.select_atoms('resname DAPC').positions[:,0] #to put DLIP in the middle \n Lx = u.trajectory.ts.triclinic_dimensions[0][0]\n CMx = getCMx(pos_DLIPC_x[:],Lx)\n pos_x_all_new = pos_x_all - CMx + (0.5 * Lx)\n DIPC_idxs = u.select_atoms('resname DAPC and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DIPC_idxs, CMx, Lx, pos_x_all_new) \n DSPC_idxs = u.select_atoms('resname DSPC and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DSPC_idxs, CMx, Lx, pos_x_all_new) \n SSM_idxs = u.select_atoms('resname SSM and name P').resnums\n pos_x_all_new = set_pos_back_to_box(SSM_idxs, CMx, Lx, pos_x_all_new) \n CHL_idxs = u.select_atoms('resname CHL and name O2').resnums\n pos_x_all_new = set_pos_back_to_box(CHL_idxs, CMx, Lx, pos_x_all_new) \n else:\n if len(resnames [resnames =='DLIP']) > 0:\n pos_x_all = u.select_atoms('all').positions[:,0]\n pos_DLIPC_x = u.select_atoms('resname DLIP').positions[:,0] #to put DLIP in the middle \n Lx = u.trajectory.ts.triclinic_dimensions[0][0]\n CMx = getCMx(pos_DLIPC_x[:],Lx)\n pos_x_all_new = pos_x_all - CMx + (0.5 * Lx)\n DIPC_idxs = u.select_atoms('resname DLIP and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DIPC_idxs, CMx, Lx, pos_x_all_new) \n DSPC_idxs = u.select_atoms('resname DSPC and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DSPC_idxs, CMx, Lx, pos_x_all_new) \n CHL_idxs = u.select_atoms('resname CHL and name O2').resnums\n pos_x_all_new = set_pos_back_to_box(CHL_idxs, CMx, Lx, pos_x_all_new)\n else:\n pos_x_all = u.select_atoms('all').positions[:,0]\n pos_DLIPC_x = u.select_atoms('resname DAPC').positions[:,0] #to put DLIP in the middle \n Lx = u.trajectory.ts.triclinic_dimensions[0][0]\n CMx = getCMx(pos_DLIPC_x[:],Lx)\n pos_x_all_new = pos_x_all - CMx + (0.5 * Lx)\n DIPC_idxs = u.select_atoms('resname DAPC and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DIPC_idxs, CMx, Lx, pos_x_all_new) \n DSPC_idxs = u.select_atoms('resname DSPC and name P').resnums\n pos_x_all_new = set_pos_back_to_box(DSPC_idxs, CMx, Lx, pos_x_all_new) \n CHL_idxs = u.select_atoms('resname CHL and name O2').resnums\n pos_x_all_new = set_pos_back_to_box(CHL_idxs, CMx, Lx, pos_x_all_new) \n \n foo1 = np.asarray([pos_x_all_new, u.select_atoms('all').positions[:,1], \n u.select_atoms('all').positions[:,2]])\n foo2=foo1.transpose()\n \n u.atoms.positions = foo2\n \n w.write(u.atoms)\n \nprint('Re-centered trajectory written')"
] | [
[
"numpy.sin",
"numpy.arctan2",
"numpy.cos"
]
] |
JaiWillems/ATC-Controller | [
"35352cecdef552881259bf887950751b054c7c7c"
] | [
"atc_controller.py"
] | [
"import math\nimport numpy as np\nimport pandas as pd\nimport random\nfrom typing import Tuple\n\n\n# Airspace constants.\nCONTROL_ZONE_RADIUS = 10 # Km.\nHOLDING_PATTERN_RADIUS = 1 # Km.\n\n\n# Runway Specifications.\nNUMBER_OF_RUNWAYS = 2 # Int.\nRUNWAY_SEPARATION = 0.5 # Km.\nRUNWAY_LENGTH = 0.5 # Km.\nRUNWAY_WIDTH = 0.1 # Km.\n\n\n# Aircraft constants.\nMAX_SPEED = 0.14 # km/s.\nMIN_SEPARATION = 0.1 # Km.\n\n\n# Other.\nTIME_STEP_FREQUENCY = 10 # Iterations per second.\nPOSIITION_TOLERANCE = 0.1 # Km.\nCURR_LANDING_AC = 0 # Aircraft identifier.\n\n\ndef _get_ac_heading(x: float, y: float) -> float:\n \"\"\"Return aircraft heading.\n\n Parameters\n ----------\n x : float\n Aicraft x-coordinate in Km.\n y : float\n Aircraft y-coordinate in Km.\n\n Returns\n -------\n float\n Return aircraft heading in radians.\n \"\"\"\n\n ac_position = np.array([x, y])\n num = np.sum(-ac_position * np.array([0, 1]))\n denom = np.linalg.norm(ac_position)\n ang = np.arccos(num / denom)\n ang = ang if x < 0 else 2 * math.pi - ang\n\n return ang\n\n\ndef _spawn_aircraft() -> Tuple[float, float, float, str]:\n \"\"\"Return aircraft on control zone boundary.\n\n Returns\n -------\n Tuple[float, float, float, str]\n Return a tuple containing the x-coordinate, y-coordinate, initial\n heading, and initial state of the spawned aircraft. The coordinates\n will be in km whereas the heading will be in degrees clockwise of\n North.\n\n Notes\n -----\n This function will randomly select an x-coordinate within the domain of the\n control zone and the sign of the y-coordinate. Using the equation of a\n circle with the the radius equal to the control zone radius, a y-coordinate\n is found such that the (x, y) pair lies on the control zone boundary.\n\n Examples\n --------\n With `CONTROL_ZONE_RADIUS = 10`, an example aircraft spawn event is as\n follows:\n\n >>> _spawn_aircraft()\n (5.486794024557362, 8.360328422501214, 3.7223764711048473)\n \"\"\"\n\n # Get aircraft coordinates.\n x = random.uniform(-CONTROL_ZONE_RADIUS, CONTROL_ZONE_RADIUS)\n y = math.sqrt(CONTROL_ZONE_RADIUS ** 2 - x ** 2)\n y = y if random.randint(0, 1) else -y\n\n ang = _get_ac_heading(x, y)\n\n return x, y, ang, \"A\"\n\n\ndef _initialize_aircraft_info(t_sim: float, ac_spawn_rate: float) -> pd.DataFrame:\n \"\"\"Generate aircraft information database.\n\n Parameters\n ----------\n t_sim : float\n Simulation time in seconds.\n ac_spawn_rate : float\n Aircraft spawn rate in aircraft per second. Values less then or equal\n to one are acceptable.\n\n Returns\n -------\n pd.DataFrame\n Aircraft data base containing a column for each aircraft and row for\n each position reporting cycle.\n\n Notes\n -----\n The resolution of the data limits viable aircraft spawn rates. It is\n recommended to use spawn rates less then or equal to one.\n\n The columns will defult to zero for times that the aircraft is not\n initialized for or has not been propagated for. The data frame will also\n contain the inital positions at various time steps for aircraft added to\n suystem.\n \"\"\"\n\n timesteps_num = t_sim * TIME_STEP_FREQUENCY\n index = np.arange(0, t_sim, 1 / TIME_STEP_FREQUENCY)\n index = np.round(index, 1)\n\n aircraft_num = ac_spawn_rate * t_sim\n aircraft_arr = np.zeros((timesteps_num, aircraft_num), dtype=object)\n\n aircraft_info = pd.DataFrame(aircraft_arr, index=index)\n\n time_steps_per_ac = math.floor(timesteps_num / aircraft_num)\n for i in range(aircraft_num):\n\n ind = index[i * time_steps_per_ac]\n aircraft_info[i][ind] = _spawn_aircraft()\n\n return aircraft_info\n\n\ndef _spawn_runways() -> pd.DataFrame:\n \"\"\"Return `DataFrame` with runway locator points.\n\n Returns\n -------\n pd.DataFrame\n Data base containing the runway locator points.\n\n Notes\n -----\n This function will generate the data base containing points centered at\n both thresholds of each runway spawned. The runways form a parallel array\n of runways centered at the origin of the control zone spanning lengthwise.\n\n The columns of the data base are x1-coordinate, y1-coordinate,\n x2-coordinate, y2-coordinate, and the runway status value.\n\n Examples\n --------\n With `NUMBER_OF_RUNWAYS=5`.\n\n >>> _spawn_runways()\n 0 1 2 3 4\n 0 -1.2 -0.25 -1.2 0.25 0.0\n 1 -0.6 -0.25 -0.6 0.25 0.0\n 2 0.0 -0.25 0.0 0.25 0.0\n 3 0.6 -0.25 0.6 0.25 0.0\n 4 1.2 -0.25 1.2 0.25 0.0\n \"\"\"\n\n n = NUMBER_OF_RUNWAYS\n runway_data = np.empty((n, 5))\n\n if not n % 2:\n for i, N in enumerate(range(1, n, 2)):\n\n x = N * (RUNWAY_SEPARATION + RUNWAY_WIDTH) / 2\n y_base, y_top = - RUNWAY_LENGTH / 2, RUNWAY_LENGTH / 2\n\n runway_data[i, 0] = x\n runway_data[i, 1] = y_base\n runway_data[i, 2] = x\n runway_data[i, 3] = y_top\n runway_data[i, 4] = 0\n\n runway_data[i + n // 2, 0] = - x\n runway_data[i + n // 2, 1] = y_base\n runway_data[i + n // 2, 2] = - x\n runway_data[i + n // 2, 3] = y_top\n runway_data[i + n // 2, 4] = 0\n\n else:\n for i, N in enumerate(range(- n // 2 + 1, n // 2 + 1)):\n\n x = N * (RUNWAY_SEPARATION + RUNWAY_WIDTH)\n y_base, y_top = - RUNWAY_LENGTH / 2, RUNWAY_LENGTH / 2\n\n runway_data[i, 0] = x\n runway_data[i, 1] = y_base\n runway_data[i, 2] = x\n runway_data[i, 3] = y_top\n runway_data[i, 4] = 0\n\n runway_info = pd.DataFrame(runway_data)\n return runway_info\n\n\ndef _spawn_holding_patterns() -> pd.DataFrame:\n \"\"\"Return the locations of holding pattern centers.\n\n Returns\n -------\n pd.DataFrame\n Data base containing the holding pattern locator points.\n\n Notes\n -----\n This function calculates the number of risk-free holding patterns that can\n be added onto the circle of `radius` and determines the points.\n\n Examples\n --------\n With `CONTROL_ZONE_RADIUS=3`, `HOLDING_PATTERN_RADIUS=1`, and\n `MIN_SEPARATION=0.1`.\n\n >>> _spawn_holding_patterns()\n 0 1 2\n 0 0.000000 1.800000 0.0\n 1 1.711902 0.556231 0.0\n 2 1.058013 -1.456231 0.0\n 3 -1.058013 -1.456231 0.0\n 4 -1.711902 0.556231 0.0\n \"\"\"\n\n # Determine the number of holding patterns too create.\n radius = CONTROL_ZONE_RADIUS - HOLDING_PATTERN_RADIUS - 2 * MIN_SEPARATION\n c = 2 * math.pi * radius\n M = math.floor(c / (2 * HOLDING_PATTERN_RADIUS - MIN_SEPARATION))\n\n holding_pattern_data = np.empty((M, 3))\n\n # Calculate the angle between successive holding patterns.\n m = np.arange(0, M, 1)\n theta = 2 * math.pi * m / M\n\n # Determine holding patter center points.\n holding_pattern_data[:, 0] = radius * np.sin(theta)\n holding_pattern_data[:, 1] = radius * np.cos(theta)\n holding_pattern_data[:, 2] = 0\n\n holding_pattern_info = pd.DataFrame(holding_pattern_data)\n return holding_pattern_info\n\n\ndef _get_closest_control_zone(x: float, y: float, hp_info: pd.DataFrame) -> int:\n \"\"\"Return index for the closest holding pattern.\n\n Parameters\n ----------\n x : float\n Aircraft x-coordinate.\n y : float\n Aircraft y-coordinate.\n hp_info : pd.DataFrame\n Holding pattern information.\n\n Returns\n -------\n int\n `hp_info` index for the closest holding pattern.\n \"\"\"\n\n min_dist = CONTROL_ZONE_RADIUS\n min_ind = 0\n\n for ind in hp_info.index:\n hp_x = hp_info[0][ind]\n hp_y = hp_info[1][ind]\n\n dist = np.sqrt((x - hp_x) ** 2 + (y - hp_y) ** 2)\n\n if dist < min_dist:\n min_dist = dist\n min_ind = ind\n\n return min_ind\n\n\ndef _get_closest_threshold(x: float, y: float, rw_info: pd.DataFrame) -> Tuple[int, int, int]:\n \"\"\"Return index for the closest runway threshold.\n\n Parameters\n ----------\n x : float\n Aircraft x-coordinate.\n y : float\n Aircraft y-coordinate.\n rw_info : pd.DataFrame\n Runway threshold information.\n\n Returns\n -------\n Tuple[int, int, int]\n `rw_info` index for the closest runway threshold. The first value is\n the row index and the latter two are the x and y position column\n indices.\n \"\"\"\n\n min_dist = CONTROL_ZONE_RADIUS\n min_ind = (0, 0, 0)\n\n for ind in rw_info.index:\n\n hp_x = rw_info[0][ind]\n hp_y = rw_info[1][ind]\n\n dist1 = np.sqrt((x - hp_x) ** 2 + (y - hp_y) ** 2)\n\n hp_x = rw_info[2][ind]\n hp_y = rw_info[3][ind]\n\n dist2 = np.sqrt((x - hp_x) ** 2 + (y - hp_y) ** 2)\n\n if dist1 < min_dist:\n min_dist = dist1\n min_ind = (ind, 0, 1)\n elif dist2 < min_dist:\n min_dist = dist2\n min_ind = (ind, 3, 4)\n\n return min_ind\n\n\ndef _get_next_position(x: float, y: float, heading: float, state: str, hp_info:\n pd.DataFrame, rw_info: pd.DataFrame, ac: int,\n CURR_LANDING_AC) -> Tuple[float, float, float, str]:\n \"\"\"Calculate the aircrafts next position.\n\n Parameters\n ----------\n x : float\n Aircraft x-coordinate in Km.\n y : float\n Aircraft y-coordinate in Km.\n heading : float\n Aircraft heading in decimal radians.\n state : str\n Aircraft current state.\n hp_info : pd.DataFrame\n Holding pattern positions.\n rw_info : pd.DataFrame\n Runway position information.\n ac : int\n Aircraft identifier.\n CURR_LANDING_AC : [type]\n Current landing aircraft.\n\n Returns\n -------\n Tuple[float, float, float, str]\n Return the next x, y, heading, and state information.\n \"\"\"\n\n if state == \"A\":\n\n radius = np.sqrt(x ** 2 + y ** 2)\n\n min_R = CONTROL_ZONE_RADIUS - MIN_SEPARATION - POSIITION_TOLERANCE\n max_R = CONTROL_ZONE_RADIUS - MIN_SEPARATION + POSIITION_TOLERANCE\n\n if (min_R < radius) | (radius < max_R):\n\n hp_ind = _get_closest_control_zone(x, y, hp_info)\n\n if hp_info[2][hp_ind] == 0:\n\n state_new = \"C\"\n heading_new = _get_ac_heading(hp_info[0][hp_ind] - x, hp_info[1][hp_ind] - y)\n\n else:\n\n state_new = \"B\"\n heading_new = (hp_info[2][hp_ind] + np.pi / 2) % (2 * np.pi)\n\n else:\n\n state_new = \"A\"\n heading_new = heading\n\n x_new = x + MAX_SPEED * np.sin(heading_new) / TIME_STEP_FREQUENCY\n y_new = y + MAX_SPEED * np.cos(heading_new) / TIME_STEP_FREQUENCY\n\n elif state == \"B\":\n\n hp_ind = _get_closest_control_zone(x, y, hp_info)\n\n if hp_info[2][hp_ind] == 0:\n\n state_new = \"C\"\n heading_new = _get_ac_heading(hp_info[0][hp_ind] - x, hp_info[1][hp_ind] - y)\n\n else:\n\n state_new = \"B\"\n heading_new = heading - MAX_SPEED / (TIME_STEP_FREQUENCY * (CONTROL_ZONE_RADIUS - MIN_SEPARATION))\n\n x_new = x + MAX_SPEED * np.sin(heading_new) / TIME_STEP_FREQUENCY\n y_new = y + MAX_SPEED * np.cos(heading_new) / TIME_STEP_FREQUENCY\n\n elif state == \"C\":\n\n hp_ind = _get_closest_control_zone(x, y, hp_info)\n dist = np.sqrt((hp_info[0][hp_ind] - x) ** 2 + (hp_info[1][hp_ind] - y) ** 2)\n\n if dist < POSIITION_TOLERANCE + 1:\n\n state_new = \"D\"\n heading_new = heading\n\n x_new = x\n y_new = y\n\n else:\n\n state_new = \"C\"\n heading_new = heading\n\n x_new = x + MAX_SPEED * np.sin(heading_new) / TIME_STEP_FREQUENCY\n y_new = y + MAX_SPEED * np.cos(heading_new) / TIME_STEP_FREQUENCY\n\n elif state == \"D\":\n\n if ac == CURR_LANDING_AC:\n\n row_ind, x_ind, y_ind = _get_closest_threshold(x, y, rw_info)\n\n state_new = \"E\"\n heading_new = _get_ac_heading(rw_info[x_ind][row_ind] - x, rw_info[y_ind][row_ind] - y)\n\n x_new = x + MAX_SPEED * np.sin(heading_new) / TIME_STEP_FREQUENCY\n y_new = y + MAX_SPEED * np.cos(heading_new) / TIME_STEP_FREQUENCY\n\n else:\n\n state_new = \"D\"\n heading_new = heading\n\n x_new = x\n y_new = y\n\n elif state == \"E\":\n\n row_ind, x_ind, y_ind = _get_closest_threshold(x, y, rw_info)\n dist = np.sqrt((rw_info[x_ind][row_ind] - x) ** 2 + (rw_info[y_ind][row_ind] - y) ** 2)\n\n if (dist < MIN_SEPARATION) | (CURR_LANDING_AC == ac):\n\n x_ind = 0 if x_ind == 2 else 2\n y_ind = 1 if y_ind == 3 else 3\n\n CURR_LANDING_AC += 1\n\n state_new = \"F\"\n heading_new = _get_ac_heading(rw_info[x_ind][row_ind] - x, rw_info[y_ind][row_ind] - y)\n\n x_new = x + MAX_SPEED * np.sin(heading_new) / TIME_STEP_FREQUENCY\n y_new = y + MAX_SPEED * np.cos(heading_new) / TIME_STEP_FREQUENCY\n\n else:\n\n state_new = \"E\"\n heading_new = heading\n\n x_new = x + MAX_SPEED * np.sin(heading_new) / TIME_STEP_FREQUENCY\n y_new = y + MAX_SPEED * np.cos(heading_new) / TIME_STEP_FREQUENCY\n\n elif state == \"F\":\n\n row_ind, x_ind, y_ind = _get_closest_threshold(x, y, rw_info)\n dist = np.sqrt((rw_info[x_ind][row_ind] - x) ** 2 + (rw_info[y_ind][row_ind] - y) ** 2)\n\n if abs(dist - RUNWAY_LENGTH / 2) < POSIITION_TOLERANCE:\n x_new, y_new, heading_new, state_new = -1, -1, -1, \"END\"\n\n else:\n\n state_new = \"F\"\n heading_new = heading\n\n x_new = x + MAX_SPEED * np.sin(heading_new) / TIME_STEP_FREQUENCY\n y_new = y + MAX_SPEED * np.cos(heading_new) / TIME_STEP_FREQUENCY\n\n else:\n\n x_new, y_new, heading_new, state_new = -1, -1, -1, \"END\"\n\n return x_new, y_new, heading_new, state_new\n\n\ndef _propagate_positions(hp_info: pd.DataFrame, rw_info: pd.DataFrame, ac_info: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Propagate aircraft positions.\n\n Parameters\n ----------\n hp_info : pd.DataFrame\n Holding pattern informmation.\n rw_info : pd.DataFrame\n Runway position information.\n ac_info : pd.DataFrame\n Aircraft position and state information.\n\n Returns\n -------\n pd.DataFrame\n Propagated aircraft position and state information.\n \"\"\"\n\n for time in ac_info.index:\n for ac in ac_info.columns:\n\n tup = ac_info[ac][time]\n if isinstance(tup, tuple):\n x, y, heading, state = tup\n tup_new = _get_next_position(x, y, heading, state, hp_info, rw_info, ac, CURR_LANDING_AC)\n\n if np.round(time + 1 / TIME_STEP_FREQUENCY, 1) < ac_info.index.max():\n ac_info[ac][np.round(time + 1 / TIME_STEP_FREQUENCY, 1)] = tup_new\n\n return ac_info\n\n\ndef simulate(t_sim: float, ac_spawn_rate: float) -> None:\n \"\"\"Simulate autonomous ATC system.\n\n This function will simulate the autonomous ATC system using the program\n defined constants and the simulation parameters. The propagated position\n data for the aircraft are exported as a csv file.\n\n Parameters\n ----------\n t_sim : float\n Simulation time in seconds.\n ac_spawn_rate : float\n Aircraft spawn rate in aircraft per second. Values less then or equal\n to one are acceptable.\n \"\"\"\n\n hp_info = _spawn_holding_patterns()\n rw_info = _spawn_runways()\n ac_info = _initialize_aircraft_info(t_sim, ac_spawn_rate)\n\n propagated_data = _propagate_positions(hp_info, rw_info, ac_info)\n propagated_data.to_csv(\"propagated_simulation_data.csv\")\n"
] | [
[
"numpy.array",
"numpy.arccos",
"numpy.linalg.norm",
"numpy.empty",
"numpy.zeros",
"numpy.sin",
"numpy.round",
"pandas.DataFrame",
"numpy.arange",
"numpy.cos",
"numpy.sqrt"
]
] |
MgeeeeK/splot | [
"746a7cf8dc80758c1e87c148a36362ed8f0cc85e"
] | [
"splot/_viz_esda_mpl.py"
] | [
"import matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport geopandas as gpd\nimport numpy as np\nfrom libpysal.weights.contiguity import Queen\nfrom libpysal.weights.spatial_lag import lag_spatial\nimport seaborn as sbn\nfrom esda.moran import (Moran_Local, Moran_Local_BV,\n Moran, Moran_BV)\nimport warnings\nfrom spreg import OLS\n\nfrom matplotlib import patches, colors\n\nfrom ._viz_utils import (mask_local_auto, moran_hot_cold_spots,\n splot_colors)\n\n\"\"\"\nLightweight visualizations for esda using Matplotlib and Geopandas\n\nTODO\n* geopandas plotting, change round shapes in legends to boxes\n* prototype moran_facet using `seaborn.FacetGrid`\n\"\"\"\n\n__author__ = (\"Stefanie Lumnitz <[email protected]>\")\n\n\ndef _create_moran_fig_ax(ax, figsize, aspect_equal):\n \"\"\"\n Creates matplotlib figure and axes instances\n for plotting moran visualizations. Adds common viz design.\n \"\"\"\n if ax is None:\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n else:\n fig = ax.get_figure()\n \n ax.spines['left'].set_position(('axes', -0.05))\n ax.spines['right'].set_color('none')\n ax.spines['bottom'].set_position(('axes', -0.05))\n ax.spines['top'].set_color('none')\n if aspect_equal is True:\n ax.set_aspect('equal')\n else:\n ax.spines['left'].set_smart_bounds(True)\n ax.spines['bottom'].set_smart_bounds(True)\n return fig, ax\n\n\ndef moran_scatterplot(moran, zstandard=True, p=None,\n aspect_equal=True, ax=None,\n scatter_kwds=None, fitline_kwds=None):\n \"\"\"\n Moran Scatterplot\n \n Parameters\n ----------\n moran : esda.moran instance\n Values of Moran's I Global, Bivariate and Local\n Autocorrelation Statistics\n zstandard : bool, optional\n If True, Moran Scatterplot will show z-standardized attribute and\n spatial lag values. Default =True.\n p : float, optional\n If given, the p-value threshold for significance\n for Local Autocorrelation analysis. Points will be colored by\n significance. By default it will not be colored.\n Default =None.\n aspect_equal : bool, optional\n If True, Axes will show the same aspect or visual proportions\n for Moran Scatterplot.\n ax : Matplotlib Axes instance, optional\n If given, the Moran plot will be created inside this axis.\n Default =None.\n scatter_kwds : keyword arguments, optional\n Keywords used for creating and designing the scatter points.\n Default =None.\n fitline_kwds : keyword arguments, optional\n Keywords used for creating and designing the moran fitline.\n Default =None.\n\n Returns\n -------\n fig : Matplotlib Figure instance\n Moran scatterplot figure\n ax : matplotlib Axes instance\n Axes in which the figure is plotted\n \n Examples\n --------\n Imports\n \n >>> import matplotlib.pyplot as plt\n >>> from libpysal.weights.contiguity import Queen\n >>> from libpysal import examples\n >>> import geopandas as gpd\n >>> from esda.moran import (Moran, Moran_BV,\n ... Moran_Local, Moran_Local_BV)\n >>> from splot.esda import moran_scatterplot\n \n Load data and calculate weights\n \n >>> link_to_data = examples.get_path('Guerry.shp')\n >>> gdf = gpd.read_file(link_to_data)\n >>> x = gdf['Suicids'].values\n >>> y = gdf['Donatns'].values\n >>> w = Queen.from_dataframe(gdf)\n >>> w.transform = 'r'\n \n Calculate esda.moran Objects\n \n >>> moran = Moran(y, w)\n >>> moran_bv = Moran_BV(y, x, w)\n >>> moran_loc = Moran_Local(y, w)\n >>> moran_loc_bv = Moran_Local_BV(y, x, w)\n \n Plot\n \n >>> fig, axs = plt.subplots(2, 2, figsize=(10,10),\n ... subplot_kw={'aspect': 'equal'})\n >>> moran_scatterplot(moran, p=0.05, ax=axs[0,0])\n >>> moran_scatterplot(moran_loc, p=0.05, ax=axs[1,0])\n >>> moran_scatterplot(moran_bv, p=0.05, ax=axs[0,1])\n >>> moran_scatterplot(moran_loc_bv, p=0.05, ax=axs[1,1])\n >>> plt.show()\n \n \"\"\"\n if isinstance(moran, Moran):\n if p is not None:\n warnings.warn('`p` is only used for plotting `esda.moran.Moran_Local`\\n'\n 'or `Moran_Local_BV` objects')\n fig, ax = _moran_global_scatterplot(moran=moran, zstandard=zstandard,\n ax=ax, aspect_equal=aspect_equal,\n scatter_kwds=scatter_kwds,\n fitline_kwds=fitline_kwds)\n elif isinstance(moran, Moran_BV):\n if p is not None:\n warnings.warn('`p` is only used for plotting `esda.moran.Moran_Local`\\n'\n 'or `Moran_Local_BV` objects')\n fig, ax = _moran_bv_scatterplot(moran_bv=moran, ax=ax,\n aspect_equal=aspect_equal,\n scatter_kwds=scatter_kwds,\n fitline_kwds=fitline_kwds)\n elif isinstance(moran, Moran_Local):\n fig, ax = _moran_loc_scatterplot(moran_loc=moran, zstandard=zstandard,\n ax=ax, p=p, aspect_equal=aspect_equal,\n scatter_kwds=scatter_kwds,\n fitline_kwds=fitline_kwds)\n elif isinstance(moran, Moran_Local_BV):\n fig, ax = _moran_loc_bv_scatterplot(moran_loc_bv=moran, ax=ax,\n p=p, aspect_equal=aspect_equal,\n scatter_kwds=scatter_kwds,\n fitline_kwds=fitline_kwds)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n return fig, ax\n\n\ndef _moran_global_scatterplot(moran, zstandard=True,\n aspect_equal=True, ax=None,\n scatter_kwds=None, fitline_kwds=None):\n \"\"\"\n Global Moran's I Scatterplot.\n\n Parameters\n ----------\n moran : esda.moran.Moran instance\n Values of Moran's I Global Autocorrelation Statistics\n zstandard : bool, optional\n If True, Moran Scatterplot will show z-standardized attribute and\n spatial lag values. Default =True.\n aspect_equal : bool, optional\n If True, Axes will show the same aspect or visual proportions.\n ax : Matplotlib Axes instance, optional\n If given, the Moran plot will be created inside this axis.\n Default =None.\n scatter_kwds : keyword arguments, optional\n Keywords used for creating and designing the scatter points.\n Default =None.\n fitline_kwds : keyword arguments, optional\n Keywords used for creating and designing the moran fitline.\n Default =None.\n\n Returns\n -------\n fig : Matplotlib Figure instance\n Moran scatterplot figure\n ax : matplotlib Axes instance\n Axes in which the figure is plotted\n\n Examples\n --------\n Imports\n \n >>> import matplotlib.pyplot as plt\n >>> from libpysal.weights.contiguity import Queen\n >>> from libpysal import examples\n >>> import geopandas as gpd\n >>> from esda.moran import Moran\n >>> from splot.esda import moran_scatterplot\n \n Load data and calculate weights\n \n >>> link_to_data = examples.get_path('Guerry.shp')\n >>> gdf = gpd.read_file(link_to_data)\n >>> y = gdf['Donatns'].values\n >>> w = Queen.from_dataframe(gdf)\n >>> w.transform = 'r'\n \n Calculate Global Moran\n \n >>> moran = Moran(y, w)\n \n plot\n \n >>> moran_scatterplot(moran)\n >>> plt.show()\n \n customize plot\n \n >>> fig, ax = moran_scatterplot(moran, zstandard=False,\n ... fitline_kwds=dict(color='#4393c3'))\n >>> ax.set_xlabel('Donations')\n >>> plt.show()\n \n \"\"\"\n # to set default as an empty dictionary that is later filled with defaults\n if scatter_kwds is None:\n scatter_kwds = dict()\n if fitline_kwds is None:\n fitline_kwds = dict()\n\n # define customization defaults\n scatter_kwds.setdefault('alpha', 0.6)\n scatter_kwds.setdefault('color', splot_colors['moran_base'])\n scatter_kwds.setdefault('s', 40)\n \n fitline_kwds.setdefault('alpha', 0.9)\n fitline_kwds.setdefault('color', splot_colors['moran_fit'])\n \n # get fig and ax\n fig, ax = _create_moran_fig_ax(ax, figsize=(7, 7),\n aspect_equal=aspect_equal)\n \n # set labels\n ax.set_xlabel('Attribute')\n ax.set_ylabel('Spatial Lag')\n ax.set_title('Moran Scatterplot' +\n ' (' + str(round(moran.I, 2)) + ')')\n\n # plot and set standards\n if zstandard is True:\n lag = lag_spatial(moran.w, moran.z)\n fit = OLS(moran.z[:, None], lag[:, None])\n # plot\n ax.scatter(moran.z, lag, **scatter_kwds)\n ax.plot(lag, fit.predy, **fitline_kwds)\n # v- and hlines\n ax.axvline(0, alpha=0.5, color='k', linestyle='--')\n ax.axhline(0, alpha=0.5, color='k', linestyle='--')\n else:\n lag = lag_spatial(moran.w, moran.y)\n b, a = np.polyfit(moran.y, lag, 1)\n # plot\n ax.scatter(moran.y, lag, **scatter_kwds)\n ax.plot(moran.y, a + b*moran.y, **fitline_kwds)\n # dashed vert at mean of the attribute\n ax.vlines(moran.y.mean(), lag.min(), lag.max(), alpha=0.5,\n linestyle='--')\n # dashed horizontal at mean of lagged attribute\n ax.hlines(lag.mean(), moran.y.min(), moran.y.max(), alpha=0.5,\n linestyle='--')\n return fig, ax\n\n\ndef plot_moran_simulation(moran, aspect_equal=True,\n ax=None, fitline_kwds=None,\n **kwargs):\n \"\"\"\n Global Moran's I simulated reference distribution.\n\n Parameters\n ----------\n moran : esda.moran.Moran instance\n Values of Moran's I Global Autocorrelation Statistics\n aspect_equal : bool, optional\n If True, Axes of Moran Scatterplot will show the same\n aspect or visual proportions.\n ax : Matplotlib Axes instance, optional\n If given, the Moran plot will be created inside this axis.\n Default =None.\n fitline_kwds : keyword arguments, optional\n Keywords used for creating and designing the\n vertical moran fitline. Default =None.\n **kwargs : keyword arguments, optional\n Keywords used for creating and designing the figure,\n passed to seaborn.kdeplot.\n\n Returns\n -------\n fig : Matplotlib Figure instance\n Simulated reference distribution figure\n ax : matplotlib Axes instance\n Axes in which the figure is plotted\n\n Examples\n --------\n Imports\n \n >>> import matplotlib.pyplot as plt\n >>> from libpysal.weights.contiguity import Queen\n >>> from libpysal import examples\n >>> import geopandas as gpd\n >>> from esda.moran import Moran\n >>> from splot.esda import plot_moran_simulation\n \n Load data and calculate weights\n \n >>> link_to_data = examples.get_path('Guerry.shp')\n >>> gdf = gpd.read_file(link_to_data)\n >>> y = gdf['Donatns'].values\n >>> w = Queen.from_dataframe(gdf)\n >>> w.transform = 'r'\n \n Calculate Global Moran\n \n >>> moran = Moran(y, w)\n \n plot\n \n >>> plot_moran_simulation(moran)\n >>> plt.show()\n \n customize plot\n \n >>> plot_moran_simulation(moran, fitline_kwds=dict(color='#4393c3'))\n >>> plt.show()\n \n \"\"\"\n # to set default as an empty dictionary that is later filled with defaults\n if fitline_kwds is None:\n fitline_kwds = dict()\n\n figsize = kwargs.pop('figsize', (7, 7))\n \n # get fig and ax\n fig, ax = _create_moran_fig_ax(ax, figsize,\n aspect_equal=aspect_equal)\n\n # plot distribution\n shade = kwargs.pop('shade', True)\n color = kwargs.pop('color', splot_colors['moran_base'])\n sbn.kdeplot(moran.sim, shade=shade, color=color, ax=ax, **kwargs)\n\n # customize plot\n fitline_kwds.setdefault('color', splot_colors['moran_fit'])\n ax.vlines(moran.I, 0, 1, **fitline_kwds)\n ax.vlines(moran.EI, 0, 1)\n ax.set_title('Reference Distribution')\n ax.set_xlabel('Moran I: ' + str(round(moran.I, 2)))\n return fig, ax\n\n\ndef plot_moran(moran, zstandard=True, aspect_equal=True,\n scatter_kwds=None, fitline_kwds=None, **kwargs):\n \"\"\"\n Global Moran's I simulated reference distribution and scatterplot.\n\n Parameters\n ----------\n moran : esda.moran.Moran instance\n Values of Moran's I Global Autocorrelation Statistics\n zstandard : bool, optional\n If True, Moran Scatterplot will show z-standardized attribute and\n spatial lag values. Default =True.\n aspect_equal : bool, optional\n If True, Axes of Moran Scatterplot will show the same\n aspect or visual proportions.\n scatter_kwds : keyword arguments, optional\n Keywords used for creating and designing the scatter points.\n Default =None.\n fitline_kwds : keyword arguments, optional\n Keywords used for creating and designing the moran fitline\n and vertical fitline. Default =None.\n **kwargs : keyword arguments, optional\n Keywords used for creating and designing the figure,\n passed to seaborne.kdeplot.\n\n Returns\n -------\n fig : Matplotlib Figure instance\n Moran scatterplot and reference distribution figure\n ax : matplotlib Axes instance\n Axes in which the figure is plotted\n\n Examples\n --------\n Imports\n \n >>> import matplotlib.pyplot as plt\n >>> from libpysal.weights.contiguity import Queen\n >>> from libpysal import examples\n >>> import geopandas as gpd\n >>> from esda.moran import Moran\n >>> from splot.esda import plot_moran\n \n Load data and calculate weights\n \n >>> link_to_data = examples.get_path('Guerry.shp')\n >>> gdf = gpd.read_file(link_to_data)\n >>> y = gdf['Donatns'].values\n >>> w = Queen.from_dataframe(gdf)\n >>> w.transform = 'r'\n \n Calculate Global Moran\n \n >>> moran = Moran(y, w)\n \n plot\n \n >>> plot_moran(moran)\n >>> plt.show()\n \n customize plot\n \n >>> plot_moran(moran, zstandard=False,\n ... fitline_kwds=dict(color='#4393c3'))\n >>> plt.show()\n \n \"\"\"\n figsize = kwargs.pop('figsize', (10, 4))\n fig, axs = plt.subplots(1, 2, figsize=figsize,\n subplot_kw={'aspect': 'equal'})\n plot_moran_simulation(moran, ax=axs[0], fitline_kwds=fitline_kwds, **kwargs)\n moran_scatterplot(moran, zstandard=zstandard, ax=axs[1],\n scatter_kwds=scatter_kwds, fitline_kwds=fitline_kwds)\n axs[0].set(aspect=\"auto\")\n if aspect_equal is True:\n axs[1].set_aspect(\"equal\", \"datalim\")\n else: \n axs[1].set_aspect(\"auto\")\n return fig, axs\n\n\ndef _moran_bv_scatterplot(moran_bv, ax=None, aspect_equal=True,\n scatter_kwds=None, fitline_kwds=None):\n \"\"\"\n Bivariate Moran Scatterplot.\n\n Parameters\n ----------\n moran_bv : esda.moran.Moran_BV instance\n Values of Bivariate Moran's I Autocorrelation Statistics\n ax : Matplotlib Axes instance, optional\n If given, the Moran plot will be created inside this axis.\n Default =None.\n aspect_equal : bool, optional\n If True, Axes of Moran Scatterplot will show the same\n aspect or visual proportions.\n scatter_kwds : keyword arguments, optional\n Keywords used for creating and designing the scatter points.\n Default =None.\n fitline_kwds : keyword arguments, optional\n Keywords used for creating and designing the moran fitline.\n Default =None.\n\n Returns\n -------\n fig : Matplotlib Figure instance\n Bivariate moran scatterplot figure\n ax : matplotlib Axes instance\n Axes in which the figure is plotted\n\n Examples\n --------\n Imports\n \n >>> import matplotlib.pyplot as plt\n >>> from libpysal.weights.contiguity import Queen\n >>> from libpysal import examples\n >>> import geopandas as gpd\n >>> from esda.moran import Moran_BV\n >>> from splot.esda import moran_scatterplot\n \n Load data and calculate weights\n \n >>> link_to_data = examples.get_path('Guerry.shp')\n >>> gdf = gpd.read_file(link_to_data)\n >>> x = gdf['Suicids'].values\n >>> y = gdf['Donatns'].values\n >>> w = Queen.from_dataframe(gdf)\n >>> w.transform = 'r'\n \n Calculate Bivariate Moran\n \n >>> moran_bv = Moran_BV(x, y, w)\n \n plot\n \n >>> moran_scatterplot(moran_bv)\n >>> plt.show()\n \n customize plot\n \n >>> moran_scatterplot(moran_bv,\n ... fitline_kwds=dict(color='#4393c3'))\n >>> plt.show()\n \n \"\"\"\n # to set default as an empty dictionary that is later filled with defaults\n if scatter_kwds is None:\n scatter_kwds = dict()\n if fitline_kwds is None:\n fitline_kwds = dict()\n\n # define customization\n scatter_kwds.setdefault('alpha', 0.6)\n scatter_kwds.setdefault('color', splot_colors['moran_base'])\n scatter_kwds.setdefault('s', 40)\n \n fitline_kwds.setdefault('alpha', 0.9)\n fitline_kwds.setdefault('color', splot_colors['moran_fit'])\n\n # get fig and ax\n fig, ax = _create_moran_fig_ax(ax, figsize=(7,7),\n aspect_equal=aspect_equal)\n \n # set labels\n ax.set_xlabel('Attribute X')\n ax.set_ylabel('Spatial Lag of Y')\n ax.set_title('Bivariate Moran Scatterplot' +\n ' (' + str(round(moran_bv.I, 2)) + ')')\n\n # plot and set standards\n lag = lag_spatial(moran_bv.w, moran_bv.zy)\n fit = OLS(moran_bv.zy[:, None], lag[:, None])\n # plot\n ax.scatter(moran_bv.zx, lag, **scatter_kwds)\n ax.plot(lag, fit.predy, **fitline_kwds)\n # v- and hlines\n ax.axvline(0, alpha=0.5, color='k', linestyle='--')\n ax.axhline(0, alpha=0.5, color='k', linestyle='--')\n return fig, ax\n\n\ndef plot_moran_bv_simulation(moran_bv, ax=None, aspect_equal=True,\n fitline_kwds=None, **kwargs):\n \"\"\"\n Bivariate Moran's I simulated reference distribution.\n\n Parameters\n ----------\n moran_bv : esda.moran.Moran_BV instance\n Values of Bivariate Moran's I Autocorrelation Statistics\n ax : Matplotlib Axes instance, optional\n If given, the Moran plot will be created inside this axis.\n Default =None.\n aspect_equal : bool, optional\n If True, Axes of Moran Scatterplot will show the same\n aspect or visual proportions.\n fitline_kwds : keyword arguments, optional\n Keywords used for creating and designing the\n vertical moran fitline. Default =None.\n **kwargs : keyword arguments, optional\n Keywords used for creating and designing the figure,\n passed to seaborne.kdeplot.\n\n Returns\n -------\n fig : Matplotlib Figure instance\n Bivariate moran reference distribution figure\n ax : matplotlib Axes instance\n Axes in which the figure is plotted\n\n Examples\n --------\n Imports\n \n >>> import matplotlib.pyplot as plt\n >>> from libpysal.weights.contiguity import Queen\n >>> from libpysal import examples\n >>> import geopandas as gpd\n >>> from esda.moran import Moran_BV\n >>> from splot.esda import plot_moran_bv_simulation\n \n Load data and calculate weights\n \n >>> link_to_data = examples.get_path('Guerry.shp')\n >>> gdf = gpd.read_file(link_to_data)\n >>> x = gdf['Suicids'].values\n >>> y = gdf['Donatns'].values\n >>> w = Queen.from_dataframe(gdf)\n >>> w.transform = 'r'\n \n Calculate Bivariate Moran\n \n >>> moran_bv = Moran_BV(x, y, w)\n \n plot\n \n >>> plot_moran_bv_simulation(moran_bv)\n >>> plt.show()\n \n customize plot\n \n >>> plot_moran_bv_simulation(moran_bv,\n ... fitline_kwds=dict(color='#4393c3'))\n >>> plt.show()\n \n \"\"\"\n # to set default as an empty dictionary that is later filled with defaults\n if fitline_kwds is None:\n fitline_kwds = dict()\n\n figsize = kwargs.pop('figsize', (7, 7))\n\n # get fig and ax\n fig, ax = _create_moran_fig_ax(ax, figsize,\n aspect_equal=aspect_equal)\n\n # plot distribution\n shade = kwargs.pop('shade', True)\n color = kwargs.pop('color', splot_colors['moran_base'])\n sbn.kdeplot(moran_bv.sim, shade=shade, color=color, ax=ax, **kwargs)\n\n # customize plot\n fitline_kwds.setdefault('color', splot_colors['moran_fit'])\n ax.vlines(moran_bv.I, 0, 1, **fitline_kwds)\n ax.vlines(moran_bv.EI_sim, 0, 1)\n ax.set_title('Reference Distribution')\n ax.set_xlabel('Bivariate Moran I: ' + str(round(moran_bv.I, 2)))\n return fig, ax\n\n\ndef plot_moran_bv(moran_bv, aspect_equal=True,\n scatter_kwds=None, fitline_kwds=None, **kwargs):\n \"\"\"\n Bivariate Moran's I simulated reference distribution and scatterplot.\n\n Parameters\n ----------\n moran_bv : esda.moran.Moran_BV instance\n Values of Bivariate Moran's I Autocorrelation Statistics\n aspect_equal : bool, optional\n If True, Axes of Moran Scatterplot will show the same\n aspect or visual proportions.\n scatter_kwds : keyword arguments, optional\n Keywords used for creating and designing the scatter points.\n Default =None.\n fitline_kwds : keyword arguments, optional\n Keywords used for creating and designing the moran fitline\n and vertical fitline. Default =None.\n **kwargs : keyword arguments, optional\n Keywords used for creating and designing the figure,\n passed to seaborne.kdeplot.\n\n Returns\n -------\n fig : Matplotlib Figure instance\n Bivariate moran scatterplot and reference distribution figure\n ax : matplotlib Axes instance\n Axes in which the figure is plotted\n\n Examples\n --------\n Imports\n \n >>> import matplotlib.pyplot as plt\n >>> from libpysal.weights.contiguity import Queen\n >>> from libpysal import examples\n >>> import geopandas as gpd\n >>> from esda.moran import Moran_BV\n >>> from splot.esda import plot_moran_bv\n \n Load data and calculate weights\n \n >>> link_to_data = examples.get_path('Guerry.shp')\n >>> gdf = gpd.read_file(link_to_data)\n >>> x = gdf['Suicids'].values\n >>> y = gdf['Donatns'].values\n >>> w = Queen.from_dataframe(gdf)\n >>> w.transform = 'r'\n \n Calculate Bivariate Moran\n \n >>> moran_bv = Moran_BV(x, y, w)\n \n plot\n \n >>> plot_moran_bv(moran_bv)\n >>> plt.show()\n \n customize plot\n \n >>> plot_moran_bv(moran_bv, fitline_kwds=dict(color='#4393c3'))\n >>> plt.show()\n \n \"\"\"\n figsize = kwargs.pop('figsize', (10, 4))\n fig, axs = plt.subplots(1, 2, figsize=figsize,\n subplot_kw={'aspect': 'equal'})\n plot_moran_bv_simulation(moran_bv, ax=axs[0], fitline_kwds=fitline_kwds,\n **kwargs)\n moran_scatterplot(moran_bv, ax=axs[1], aspect_equal=aspect_equal,\n scatter_kwds=scatter_kwds, fitline_kwds=fitline_kwds)\n axs[0].set(aspect=\"auto\")\n if aspect_equal is True:\n axs[1].set_aspect(\"equal\", \"datalim\")\n else:\n axs[1].set(aspect=\"auto\")\n return fig, axs\n\n\ndef _moran_loc_scatterplot(moran_loc, zstandard=True, p=None, aspect_equal=True,\n ax=None, scatter_kwds=None, fitline_kwds=None):\n \"\"\"\n Moran Scatterplot with option of coloring of Local Moran Statistics\n\n Parameters\n ----------\n moran_loc : esda.moran.Moran_Local instance\n Values of Moran's I Local Autocorrelation Statistics\n p : float, optional\n If given, the p-value threshold for significance. Points will\n be colored by significance. By default it will not be colored.\n Default =None.\n aspect_equal : bool, optional\n If True, Axes of Moran Scatterplot will show the same\n aspect or visual proportions.\n ax : Matplotlib Axes instance, optional\n If given, the Moran plot will be created inside this axis.\n Default =None.\n scatter_kwds : keyword arguments, optional\n Keywords used for creating and designing the scatter points.\n Default =None.\n fitline_kwds : keyword arguments, optional\n Keywords used for creating and designing the moran fitline.\n Default =None.\n\n Returns\n -------\n fig : Matplotlib Figure instance\n Moran Local scatterplot figure\n ax : matplotlib Axes instance\n Axes in which the figure is plotted\n\n Examples\n --------\n Imports\n \n >>> import matplotlib.pyplot as plt\n >>> import geopandas as gpd\n >>> from libpysal.weights.contiguity import Queen\n >>> from libpysal import examples\n >>> from esda.moran import Moran_Local\n >>> from splot.esda import moran_scatterplot\n \n Load data and calculate Moran Local statistics\n \n >>> link = examples.get_path('Guerry.shp')\n >>> gdf = gpd.read_file(link)\n >>> y = gdf['Donatns'].values\n >>> w = Queen.from_dataframe(gdf)\n >>> w.transform = 'r'\n >>> m = Moran_Local(y, w)\n \n plot\n \n >>> moran_scatterplot(m)\n >>> plt.show()\n \n customize plot\n \n >>> moran_scatterplot(m, p=0.05,\n ... fitline_kwds=dict(color='#4393c3'))\n >>> plt.show()\n \n \"\"\"\n # to set default as an empty dictionary that is later filled with defaults\n if scatter_kwds is None:\n scatter_kwds = dict()\n if fitline_kwds is None:\n fitline_kwds = dict()\n\n if p is not None:\n if not isinstance(moran_loc, Moran_Local):\n raise ValueError(\"`moran_loc` is not a\\n \" +\n \"esda.moran.Moran_Local instance\")\n if 'color' in scatter_kwds or 'c' in scatter_kwds or 'cmap' in scatter_kwds:\n warnings.warn('To change the color use cmap with a colormap of 5,\\n' +\n ' color defines the LISA category')\n\n # colors\n spots = moran_hot_cold_spots(moran_loc, p)\n hmap = colors.ListedColormap(['#bababa', '#d7191c', '#abd9e9',\n '#2c7bb6', '#fdae61'])\n\n # define customization\n scatter_kwds.setdefault('alpha', 0.6)\n scatter_kwds.setdefault('s', 40)\n fitline_kwds.setdefault('alpha', 0.9)\n\n # get fig and ax\n fig, ax = _create_moran_fig_ax(ax, figsize=(7,7),\n aspect_equal=aspect_equal)\n \n # set labels\n ax.set_xlabel('Attribute')\n ax.set_ylabel('Spatial Lag')\n ax.set_title('Moran Local Scatterplot')\n\n # plot and set standards\n if zstandard is True:\n lag = lag_spatial(moran_loc.w, moran_loc.z)\n fit = OLS(moran_loc.z[:, None], lag[:, None])\n # v- and hlines\n ax.axvline(0, alpha=0.5, color='k', linestyle='--')\n ax.axhline(0, alpha=0.5, color='k', linestyle='--')\n if p is not None:\n fitline_kwds.setdefault('color', 'k')\n scatter_kwds.setdefault('cmap', hmap)\n scatter_kwds.setdefault('c', spots)\n ax.plot(lag, fit.predy, **fitline_kwds)\n ax.scatter(moran_loc.z, fit.predy,\n **scatter_kwds)\n else:\n scatter_kwds.setdefault('color', splot_colors['moran_base'])\n fitline_kwds.setdefault('color', splot_colors['moran_fit'])\n ax.plot(lag, fit.predy, **fitline_kwds)\n ax.scatter(moran_loc.z, fit.predy, **scatter_kwds)\n else:\n lag = lag_spatial(moran_loc.w, moran_loc.y)\n b, a = np.polyfit(moran_loc.y, lag, 1)\n # dashed vert at mean of the attribute\n ax.vlines(moran_loc.y.mean(), lag.min(), lag.max(), alpha=0.5,\n linestyle='--')\n # dashed horizontal at mean of lagged attribute\n ax.hlines(lag.mean(), moran_loc.y.min(), moran_loc.y.max(), alpha=0.5,\n linestyle='--')\n if p is not None:\n fitline_kwds.setdefault('color', 'k')\n scatter_kwds.setdefault('cmap', hmap)\n scatter_kwds.setdefault('c', spots)\n ax.plot(moran_loc.y, a + b*moran_loc.y, **fitline_kwds)\n ax.scatter(moran_loc.y, lag, **scatter_kwds)\n else:\n scatter_kwds.setdefault('c', splot_colors['moran_base'])\n fitline_kwds.setdefault('color', splot_colors['moran_fit'])\n ax.plot(moran_loc.y, a + b*moran_loc.y, **fitline_kwds)\n ax.scatter(moran_loc.y, lag, **scatter_kwds)\n return fig, ax\n\n\ndef lisa_cluster(moran_loc, gdf, p=0.05, ax=None,\n legend=True, legend_kwds=None, **kwargs):\n \"\"\"\n Create a LISA Cluster map\n\n Parameters\n ----------\n moran_loc : esda.moran.Moran_Local or Moran_Local_BV instance\n Values of Moran's Local Autocorrelation Statistic\n gdf : geopandas dataframe instance\n The Dataframe containing information to plot. Note that `gdf` will be\n modified, so calling functions should use a copy of the user\n provided `gdf`. (either using gdf.assign() or gdf.copy())\n p : float, optional\n The p-value threshold for significance. Points will\n be colored by significance.\n ax : matplotlib Axes instance, optional\n Axes in which to plot the figure in multiple Axes layout.\n Default = None\n legend : boolean, optional\n If True, legend for maps will be depicted. Default = True\n legend_kwds : dict, optional\n Dictionary to control legend formatting options. Example:\n ``legend_kwds={'loc': 'upper left', 'bbox_to_anchor': (0.92, 1.05)}``\n Default = None\n **kwargs : keyword arguments, optional\n Keywords designing and passed to geopandas.GeoDataFrame.plot().\n\n Returns\n -------\n fig : matplotlip Figure instance\n Figure of LISA cluster map\n ax : matplotlib Axes instance\n Axes in which the figure is plotted\n\n Examples\n --------\n Imports\n \n >>> import matplotlib.pyplot as plt\n >>> from libpysal.weights.contiguity import Queen\n >>> from libpysal import examples\n >>> import geopandas as gpd\n >>> from esda.moran import Moran_Local\n >>> from splot.esda import lisa_cluster\n\n Data preparation and statistical analysis\n \n >>> link = examples.get_path('Guerry.shp')\n >>> gdf = gpd.read_file(link)\n >>> y = gdf['Donatns'].values\n >>> w = Queen.from_dataframe(gdf)\n >>> w.transform = 'r'\n >>> moran_loc = Moran_Local(y, w)\n\n Plotting\n \n >>> fig = lisa_cluster(moran_loc, gdf)\n >>> plt.show()\n \n \"\"\"\n # retrieve colors5 and labels from mask_local_auto\n _, colors5, _, labels = mask_local_auto(moran_loc, p=p)\n\n # define ListedColormap\n hmap = colors.ListedColormap(colors5)\n\n if ax is None:\n figsize = kwargs.pop('figsize', None)\n fig, ax = plt.subplots(1, figsize=figsize)\n else:\n fig = ax.get_figure()\n\n gdf.assign(cl=labels).plot(column='cl', categorical=True,\n k=2, cmap=hmap, linewidth=0.1, ax=ax,\n edgecolor='white', legend=legend,\n legend_kwds=legend_kwds, **kwargs)\n ax.set_axis_off()\n ax.set_aspect('equal')\n return fig, ax\n\n\ndef plot_local_autocorrelation(moran_loc, gdf, attribute, p=0.05,\n region_column=None, mask=None,\n mask_color='#636363', quadrant=None,\n aspect_equal=True,\n legend=True, scheme='Quantiles',\n cmap='YlGnBu', figsize=(15, 4),\n scatter_kwds=None, fitline_kwds=None):\n '''\n Produce three-plot visualisation of Moran Scatteprlot, LISA cluster\n and Choropleth maps, with Local Moran region and quadrant masking\n\n Parameters\n ----------\n moran_loc : esda.moran.Moran_Local or Moran_Local_BV instance\n Values of Moran's Local Autocorrelation Statistic\n gdf : geopandas dataframe\n The Dataframe containing information to plot the two maps.\n attribute : str\n Column name of attribute which should be depicted in Choropleth map.\n p : float, optional\n The p-value threshold for significance. Points and polygons will\n be colored by significance. Default = 0.05.\n region_column: string, optional\n Column name containing mask region of interest. Default = None\n mask: str, optional\n Identifier or name of the region to highlight. Default = None\n mask_color: str, optional\n Color of mask. Default = '#636363'\n quadrant : int, optional\n Quadrant 1-4 in scatterplot masking values in LISA cluster and\n Choropleth maps. Default = None\n aspect_equal : bool, optional\n If True, Axes of Moran Scatterplot will show the same\n aspect or visual proportions.\n figsize: tuple, optional\n W, h of figure. Default = (15,4)\n legend: boolean, optional\n If True, legend for maps will be depicted. Default = True\n scheme: str, optional\n Name of PySAL classifier to be used. Default = 'Quantiles'\n cmap: str, optional\n Name of matplotlib colormap used for plotting the Choropleth.\n Default = 'YlGnBu'\n scatter_kwds : keyword arguments, optional\n Keywords used for creating and designing the scatter points.\n Default =None.\n fitline_kwds : keyword arguments, optional\n Keywords used for creating and designing the moran fitline\n in the scatterplot. Default =None.\n\n Returns\n -------\n fig : Matplotlib figure instance\n Moran Scatterplot, LISA cluster map and Choropleth.\n axs : list of Matplotlib axes\n Lisat of Matplotlib axes plotted.\n\n Examples\n --------\n Imports\n \n >>> import matplotlib.pyplot as plt\n >>> from libpysal.weights.contiguity import Queen\n >>> from libpysal import examples\n >>> import geopandas as gpd\n >>> from esda.moran import Moran_Local\n >>> from splot.esda import plot_local_autocorrelation\n\n Data preparation and analysis\n \n >>> link = examples.get_path('Guerry.shp')\n >>> gdf = gpd.read_file(link)\n >>> y = gdf['Donatns'].values\n >>> w = Queen.from_dataframe(gdf)\n >>> w.transform = 'r'\n >>> moran_loc = Moran_Local(y, w)\n\n Plotting with quadrant mask and region mask\n \n >>> fig = plot_local_autocorrelation(moran_loc, gdf, 'Donatns', p=0.05,\n ... region_column='Dprtmnt',\n ... mask=['Ain'], quadrant=1)\n >>> plt.show()\n \n '''\n fig, axs = plt.subplots(1, 3, figsize=figsize,\n subplot_kw={'aspect': 'equal', 'adjustable':'datalim'})\n # Moran Scatterplot\n moran_scatterplot(moran_loc, p=p, ax=axs[0],\n scatter_kwds=scatter_kwds, fitline_kwds=fitline_kwds)\n if aspect_equal is True:\n axs[0].set_aspect('equal', 'datalim')\n else:\n axs[0].set_aspect('auto')\n\n # Lisa cluster map\n # TODO: Fix legend_kwds: display boxes instead of points\n lisa_cluster(moran_loc, gdf, p=p, ax=axs[1], legend=legend,\n legend_kwds={'loc': 'upper left',\n 'bbox_to_anchor': (0.92, 1.05)})\n axs[1].set_aspect('equal')\n\n # Choropleth for attribute\n gdf.plot(column=attribute, scheme=scheme, cmap=cmap,\n legend=legend, legend_kwds={'loc': 'upper left',\n 'bbox_to_anchor': (0.92, 1.05)},\n ax=axs[2], alpha=1)\n axs[2].set_axis_off()\n axs[2].set_aspect('equal')\n\n # MASKING QUADRANT VALUES\n if quadrant is not None:\n # Quadrant masking in Scatterplot\n mask_angles = {1: 0, 2: 90, 3: 180, 4: 270} # rectangle angles\n # We don't want to change the axis data limits, so use the current ones\n xmin, xmax = axs[0].get_xlim()\n ymin, ymax = axs[0].get_ylim()\n # We are rotating, so we start from 0 degrees and\n # figured out the right dimensions for the rectangles for other angles\n mask_width = {1: abs(xmax),\n 2: abs(ymax),\n 3: abs(xmin),\n 4: abs(ymin)}\n mask_height = {1: abs(ymax),\n 2: abs(xmin),\n 3: abs(ymin),\n 4: abs(xmax)}\n axs[0].add_patch(patches.Rectangle((0, 0), width=mask_width[quadrant],\n height=mask_height[quadrant],\n angle=mask_angles[quadrant],\n color='#E5E5E5', zorder=-1, alpha=0.8))\n # quadrant selection in maps\n non_quadrant = ~(moran_loc.q == quadrant)\n mask_quadrant = gdf[non_quadrant]\n df_quadrant = gdf.iloc[~non_quadrant]\n union2 = df_quadrant.unary_union.boundary\n\n # LISA Cluster mask and cluster boundary\n with warnings.catch_warnings(): # temorarily surpress geopandas warning\n warnings.filterwarnings('ignore', category=UserWarning)\n mask_quadrant.plot(column=attribute, scheme=scheme, color='white',\n ax=axs[1], alpha=0.7, zorder=1)\n gpd.GeoSeries([union2]).plot(linewidth=1, ax=axs[1], color='#E5E5E5')\n\n # CHOROPLETH MASK\n with warnings.catch_warnings(): # temorarily surpress geopandas warning\n warnings.filterwarnings('ignore', category=UserWarning)\n mask_quadrant.plot(column=attribute, scheme=scheme, color='white',\n ax=axs[2], alpha=0.7, zorder=1)\n gpd.GeoSeries([union2]).plot(linewidth=1, ax=axs[2], color='#E5E5E5')\n\n # REGION MASKING\n if region_column is not None:\n # masking inside axs[0] or Moran Scatterplot\n ix = gdf[region_column].isin(mask)\n df_mask = gdf[ix]\n x_mask = moran_loc.z[ix]\n y_mask = lag_spatial(moran_loc.w, moran_loc.z)[ix]\n axs[0].plot(x_mask, y_mask, color=mask_color, marker='o',\n markersize=14, alpha=.8, linestyle=\"None\", zorder=-1)\n\n # masking inside axs[1] or Lisa cluster map\n union = df_mask.unary_union.boundary\n gpd.GeoSeries([union]).plot(linewidth=2, ax=axs[1], color=mask_color)\n\n # masking inside axs[2] or Chloropleth\n gpd.GeoSeries([union]).plot(linewidth=2, ax=axs[2], color=mask_color)\n return fig, axs\n\n\ndef _moran_loc_bv_scatterplot(moran_loc_bv, p=None,\n aspect_equal=True, ax=None,\n scatter_kwds=None,\n fitline_kwds=None):\n \"\"\"\n Moran Bivariate Scatterplot with option of coloring of Local Moran Statistics\n\n Parameters\n ----------\n moran_loc : esda.moran.Moran_Local_BV instance\n Values of Moran's I Local Autocorrelation Statistics\n p : float, optional\n If given, the p-value threshold for significance. Points will\n be colored by significance. By default it will not be colored.\n Default =None.\n aspect_equal : bool, optional\n If True, Axes of Moran Scatterplot will show the same\n aspect or visual proportions.\n ax : Matplotlib Axes instance, optional\n If given, the Moran plot will be created inside this axis.\n Default =None.\n scatter_kwds : keyword arguments, optional\n Keywords used for creating and designing the scatter points.\n Default =None.\n fitline_kwds : keyword arguments, optional\n Keywords used for creating and designing the moran fitline.\n Default =None.\n\n Returns\n -------\n fig : Matplotlib Figure instance\n Bivariate Moran Local scatterplot figure\n ax : matplotlib Axes instance\n Axes in which the figure is plotted\n\n Examples\n --------\n Imports\n \n >>> import matplotlib.pyplot as plt\n >>> import geopandas as gpd\n >>> from libpysal.weights.contiguity import Queen\n >>> from libpysal import examples\n >>> from esda.moran import Moran_Local_BV\n >>> from splot.esda import moran_scatterplot\n \n Load data and calculate Moran Local statistics\n \n >>> link = examples.get_path('Guerry.shp')\n >>> gdf = gpd.read_file(link)\n >>> x = gdf['Suicids'].values\n >>> y = gdf['Donatns'].values\n >>> w = Queen.from_dataframe(gdf)\n >>> w.transform = 'r'\n >>> m = Moran_Local_BV(x, y, w)\n \n Plot\n \n >>> moran_scatterplot(m)\n >>> plt.show()\n \n Customize plot\n \n >>> moran_scatterplot(m, p=0.05,\n ... fitline_kwds=dict(color='#4393c3')))\n >>> plt.show()\n \n \"\"\"\n # to set default as an empty dictionary that is later filled with defaults\n if scatter_kwds is None:\n scatter_kwds = dict()\n if fitline_kwds is None:\n fitline_kwds = dict()\n\n if p is not None:\n if not isinstance(moran_loc_bv, Moran_Local_BV):\n raise ValueError(\"`moran_loc_bv` is not a\\n\" +\n \"esda.moran.Moran_Local_BV instance\")\n if 'color' in scatter_kwds or 'c' in scatter_kwds or 'cmap' in scatter_kwds:\n warnings.warn(\"To change the color use cmap with a colormap of 5,\\n\" +\n \"c defines the LISA category, color will interfere with c\")\n\n # colors\n spots_bv = moran_hot_cold_spots(moran_loc_bv, p)\n hmap = colors.ListedColormap(['#bababa', '#d7191c', '#abd9e9',\n '#2c7bb6', '#fdae61'])\n\n # define customization\n scatter_kwds.setdefault('alpha', 0.6)\n scatter_kwds.setdefault('s', 40)\n fitline_kwds.setdefault('alpha', 0.9)\n\n # get fig and ax\n fig, ax = _create_moran_fig_ax(ax, figsize=(7,7),\n aspect_equal=aspect_equal)\n \n # set labels\n ax.set_xlabel('Attribute')\n ax.set_ylabel('Spatial Lag')\n ax.set_title('Moran BV Local Scatterplot')\n\n # plot and set standards\n lag = lag_spatial(moran_loc_bv.w, moran_loc_bv.zy)\n fit = OLS(moran_loc_bv.zy[:, None], lag[:, None])\n # v- and hlines\n ax.axvline(0, alpha=0.5, color='k', linestyle='--')\n ax.axhline(0, alpha=0.5, color='k', linestyle='--')\n if p is not None:\n fitline_kwds.setdefault('color', 'k')\n scatter_kwds.setdefault('cmap', hmap)\n scatter_kwds.setdefault('c', spots_bv)\n ax.plot(lag, fit.predy, **fitline_kwds)\n ax.scatter(moran_loc_bv.zx, fit.predy,\n **scatter_kwds)\n else:\n scatter_kwds.setdefault('color', splot_colors['moran_base'])\n fitline_kwds.setdefault('color', splot_colors['moran_fit'])\n ax.plot(lag, fit.predy, **fitline_kwds)\n ax.scatter(moran_loc_bv.zy, fit.predy, **scatter_kwds)\n return fig, ax\n\n\ndef moran_facet(moran_matrix, figsize=(16,12),\n scatter_bv_kwds=None, fitline_bv_kwds=None,\n scatter_glob_kwds=dict(color='#737373'), fitline_glob_kwds=None):\n \"\"\"\n Moran Facet visualization.\n Includes BV Morans and Global Morans on the diagonal.\n \n Parameters\n ----------\n moran_matrix : esda.moran.Moran_BV_matrix instance\n Dictionary of Moran_BV objects\n figsize : tuple, optional\n W, h of figure. Default =(16,12)\n scatter_bv_kwds : keyword arguments, optional\n Keywords used for creating and designing the scatter points of\n off-diagonal Moran_BV plots.\n Default =None.\n fitline_bv_kwds : keyword arguments, optional\n Keywords used for creating and designing the moran fitline of\n off-diagonal Moran_BV plots.\n Default =None.\n scatter_glob_kwds : keyword arguments, optional\n Keywords used for creating and designing the scatter points of\n diagonal Moran plots.\n Default =None.\n fitline_glob_kwds : keyword arguments, optional\n Keywords used for creating and designing the moran fitline of\n diagonal Moran plots.\n Default =None.\n\n Returns\n -------\n fig : Matplotlib Figure instance\n Bivariate Moran Local scatterplot figure\n axarr : matplotlib Axes instance\n Axes in which the figure is plotted\n\n Examples\n --------\n Imports\n \n >>> import matplotlib.pyplot as plt\n >>> import libpysal as lp\n >>> import numpy as np\n >>> import geopandas as gpd\n >>> from esda.moran import Moran_BV_matrix\n >>> from splot.esda import moran_facet\n \n Load data and calculate Moran Local statistics\n \n >>> f = gpd.read_file(lp.examples.get_path(\"sids2.dbf\"))\n >>> varnames = ['SIDR74', 'SIDR79', 'NWR74', 'NWR79']\n >>> vars = [np.array(f[var]) for var in varnames]\n >>> w = lp.io.open(lp.examples.get_path(\"sids2.gal\")).read()\n >>> moran_matrix = Moran_BV_matrix(vars, w, varnames = varnames)\n \n Plot\n \n >>> fig, axarr = moran_facet(moran_matrix)\n >>> plt.show()\n \n Customize plot\n \n >>> fig, axarr = moran_facet(moran_matrix,\n ... fitline_bv_kwds=dict(color='#4393c3'))\n >>> plt.show()\n \n \"\"\"\n nrows = int(np.sqrt(len(moran_matrix))) + 1\n ncols = nrows\n \n fig, axarr = plt.subplots(nrows, ncols, figsize=figsize,\n sharey=True, sharex=True)\n fig.suptitle('Moran Facet')\n \n for row in range(nrows):\n for col in range(ncols):\n if row == col:\n global_m = Moran(moran_matrix[row, (row+1) % 4].zy,\n moran_matrix[row, (row+1) % 4].w)\n _moran_global_scatterplot(global_m, ax= axarr[row,col],\n scatter_kwds=scatter_glob_kwds,\n fitline_kwds=fitline_glob_kwds)\n axarr[row, col].set_facecolor('#d9d9d9')\n else:\n _moran_bv_scatterplot(moran_matrix[row,col],\n ax=axarr[row,col], \n scatter_kwds=scatter_bv_kwds,\n fitline_kwds=fitline_bv_kwds)\n \n axarr[row, col].spines['bottom'].set_visible(False)\n axarr[row, col].spines['left'].set_visible(False)\n if row == nrows - 1:\n axarr[row, col].set_xlabel(str(\n moran_matrix[(col+1)%4, col].varnames['x']).format(col))\n axarr[row, col].spines['bottom'].set_visible(True)\n else:\n axarr[row, col].set_xlabel('')\n \n if col == 0:\n axarr[row, col].set_ylabel(('Spatial Lag of '+str(\n moran_matrix[row, (row+1)%4].varnames['y'])).format(row))\n axarr[row, col].spines['left'].set_visible(True)\n else:\n axarr[row, col].set_ylabel('')\n \n axarr[row, col].set_title('') \n plt.tight_layout()\n return fig, axarr"
] | [
[
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"numpy.polyfit",
"matplotlib.pyplot.tight_layout",
"matplotlib.colors.ListedColormap"
]
] |
Mario-Kart-Felix/adanet | [
"5e08fad3f501836bb3e2e22316704a10b5f4507f"
] | [
"research/improve_nas/trainer/optimizer.py"
] | [
"# Lint as: python3\n\"\"\"Definition of optimizers and learning rate schedules.\n\nCopyright 2019 The AdaNet Authors. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport functools\n\nimport tensorflow.compat.v1 as tf\n\n\nclass LearningRateSchedule(object):\n \"\"\"A learning rate decay schedule interface.\"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def apply(self, learning_rate):\n \"\"\"Applies the learning rate decay schedule to the given learning rate.\n\n Args:\n learning_rate: Float `Tensor` learning rate.\n\n Returns:\n Float `Tensor` learning rate with applied decay schedule.\n \"\"\"\n\n\nclass Constant(LearningRateSchedule):\n \"\"\"A constant schedule.\"\"\"\n\n def apply(self, learning_rate):\n \"\"\"See `LearningRateSchedule`.\"\"\"\n\n return learning_rate\n\n\nclass Cosine(LearningRateSchedule):\n \"\"\"Cosine.\"\"\"\n\n def __init__(self, decay_steps, alpha):\n \"\"\"Returns a `Cosine` instance.\n\n Args:\n decay_steps: Number of steps to decay over.\n alpha: Minimum learning rate value as a fraction of learning_rate.\n\n Returns:\n A `Cosine` instance.\n \"\"\"\n\n self._decay_fn = functools.partial(\n tf.train.cosine_decay, decay_steps=decay_steps, alpha=alpha)\n\n def apply(self, learning_rate):\n \"\"\"See `LearningRateSchedule`.\"\"\"\n\n # Start at -1 since we increment before reading.\n global_step = tf.get_variable(\"decay_step\", initializer=-1, trainable=False)\n increment_op = tf.assign_add(global_step, 1)\n with tf.control_dependencies([increment_op]):\n learning_rate = self._decay_fn(\n learning_rate=learning_rate, global_step=global_step.read_value())\n return learning_rate\n\n\ndef fn_with_name(optimizer_name,\n learning_rate_schedule=\"constant\",\n cosine_decay_steps=None):\n \"\"\"Returns an optimizer_fn with the given name.\n\n Args:\n optimizer_name: Optimizer name string for identifying the optimizer. Either\n 'adagrad', 'adam', 'momentum', or 'sgd'.\n learning_rate_schedule: Type of learning rate schedule to use. Opened for\n future extensions.\n cosine_decay_steps: See `Cosine`.\n\n Returns:\n An optimizer_fn which takes a `learning_rate` scalar `Tensor` argument and\n returns an `Optimizer` instance.\n\n Raises:\n ValueError: If `optimizer_name` is invalid.\n \"\"\"\n\n optimizers = {\n \"adagrad\": tf.train.AdagradOptimizer,\n \"adam\": tf.train.AdamOptimizer,\n \"lazy_adam\": tf.contrib.opt.LazyAdamOptimizer,\n \"momentum\": functools.partial(tf.train.MomentumOptimizer, momentum=.9),\n \"rmsprop\": tf.train.RMSPropOptimizer,\n \"sgd\": tf.train.GradientDescentOptimizer,\n }\n optimizer_name = optimizer_name.lower()\n if optimizer_name not in optimizers:\n raise ValueError(\"Invalid optimizer '{}'\".format(optimizer_name))\n optimizer_fn = optimizers[optimizer_name]\n schedules = {\n \"constant\":\n Constant(),\n \"cosine\":\n Cosine(decay_steps=cosine_decay_steps, alpha=0.0),\n }\n schedule_name = learning_rate_schedule.lower()\n if schedule_name not in schedules:\n raise ValueError(\n \"Invalid learning_rate_schedule '{}'\".format(schedule_name))\n schedule = schedules[schedule_name]\n\n def _optimizer_with_schedule(learning_rate):\n learning_rate = schedule.apply(learning_rate)\n optimizer = optimizer_fn(learning_rate)\n return optimizer, learning_rate\n return _optimizer_with_schedule\n"
] | [
[
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.assign_add",
"tensorflow.compat.v1.get_variable"
]
] |
jinjh0123/pytorch_geometric | [
"0a4aaa0fd02f0a7b39ac6aa88e3c392e9672a673"
] | [
"torch_geometric/nn/conv/signed_conv.py"
] | [
"from typing import Union\n\nimport torch\nfrom torch import Tensor\nfrom torch_sparse import SparseTensor, matmul\n\nfrom torch_geometric.nn.conv import MessagePassing\nfrom torch_geometric.nn.dense.linear import Linear\nfrom torch_geometric.typing import Adj, PairTensor\n\n\nclass SignedConv(MessagePassing):\n r\"\"\"The signed graph convolutional operator from the `\"Signed Graph\n Convolutional Network\" <https://arxiv.org/abs/1808.06354>`_ paper\n\n .. math::\n \\mathbf{x}_v^{(\\textrm{pos})} &= \\mathbf{\\Theta}^{(\\textrm{pos})}\n \\left[ \\frac{1}{|\\mathcal{N}^{+}(v)|} \\sum_{w \\in \\mathcal{N}^{+}(v)}\n \\mathbf{x}_w , \\mathbf{x}_v \\right]\n\n \\mathbf{x}_v^{(\\textrm{neg})} &= \\mathbf{\\Theta}^{(\\textrm{neg})}\n \\left[ \\frac{1}{|\\mathcal{N}^{-}(v)|} \\sum_{w \\in \\mathcal{N}^{-}(v)}\n \\mathbf{x}_w , \\mathbf{x}_v \\right]\n\n if :obj:`first_aggr` is set to :obj:`True`, and\n\n .. math::\n \\mathbf{x}_v^{(\\textrm{pos})} &= \\mathbf{\\Theta}^{(\\textrm{pos})}\n \\left[ \\frac{1}{|\\mathcal{N}^{+}(v)|} \\sum_{w \\in \\mathcal{N}^{+}(v)}\n \\mathbf{x}_w^{(\\textrm{pos})}, \\frac{1}{|\\mathcal{N}^{-}(v)|}\n \\sum_{w \\in \\mathcal{N}^{-}(v)} \\mathbf{x}_w^{(\\textrm{neg})},\n \\mathbf{x}_v^{(\\textrm{pos})} \\right]\n\n \\mathbf{x}_v^{(\\textrm{neg})} &= \\mathbf{\\Theta}^{(\\textrm{pos})}\n \\left[ \\frac{1}{|\\mathcal{N}^{+}(v)|} \\sum_{w \\in \\mathcal{N}^{+}(v)}\n \\mathbf{x}_w^{(\\textrm{neg})}, \\frac{1}{|\\mathcal{N}^{-}(v)|}\n \\sum_{w \\in \\mathcal{N}^{-}(v)} \\mathbf{x}_w^{(\\textrm{pos})},\n \\mathbf{x}_v^{(\\textrm{neg})} \\right]\n\n otherwise.\n In case :obj:`first_aggr` is :obj:`False`, the layer expects :obj:`x` to be\n a tensor where :obj:`x[:, :in_channels]` denotes the positive node features\n :math:`\\mathbf{X}^{(\\textrm{pos})}` and :obj:`x[:, in_channels:]` denotes\n the negative node features :math:`\\mathbf{X}^{(\\textrm{neg})}`.\n\n Args:\n in_channels (int): Size of each input sample, or :obj:`-1` to derive\n the size from the first input(s) to the forward method.\n out_channels (int): Size of each output sample.\n first_aggr (bool): Denotes which aggregation formula to use.\n bias (bool, optional): If set to :obj:`False`, the layer will not learn\n an additive bias. (default: :obj:`True`)\n **kwargs (optional): Additional arguments of\n :class:`torch_geometric.nn.conv.MessagePassing`.\n\n Shapes:\n - **input:**\n node features :math:`(|\\mathcal{V}|, F_{in})` or\n :math:`((|\\mathcal{V_s}|, F_{in}), (|\\mathcal{V_t}|, F_{in}))`\n if bipartite,\n positive edge indices :math:`(2, |\\mathcal{E}^{(+)}|)`,\n negative edge indices :math:`(2, |\\mathcal{E}^{(-)}|)`\n - **outputs:** node features :math:`(|\\mathcal{V}|, F_{out})` or\n :math:`(|\\mathcal{V_t}|, F_{out})` if bipartite\n \"\"\"\n def __init__(self, in_channels: int, out_channels: int, first_aggr: bool,\n bias: bool = True, **kwargs):\n\n kwargs.setdefault('aggr', 'mean')\n super().__init__(**kwargs)\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.first_aggr = first_aggr\n\n if first_aggr:\n self.lin_pos_l = Linear(in_channels, out_channels, False)\n self.lin_pos_r = Linear(in_channels, out_channels, bias)\n self.lin_neg_l = Linear(in_channels, out_channels, False)\n self.lin_neg_r = Linear(in_channels, out_channels, bias)\n else:\n self.lin_pos_l = Linear(2 * in_channels, out_channels, False)\n self.lin_pos_r = Linear(in_channels, out_channels, bias)\n self.lin_neg_l = Linear(2 * in_channels, out_channels, False)\n self.lin_neg_r = Linear(in_channels, out_channels, bias)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.lin_pos_l.reset_parameters()\n self.lin_pos_r.reset_parameters()\n self.lin_neg_l.reset_parameters()\n self.lin_neg_r.reset_parameters()\n\n def forward(self, x: Union[Tensor, PairTensor], pos_edge_index: Adj,\n neg_edge_index: Adj):\n \"\"\"\"\"\"\n\n if isinstance(x, Tensor):\n x: PairTensor = (x, x)\n\n # propagate_type: (x: PairTensor)\n if self.first_aggr:\n\n out_pos = self.propagate(pos_edge_index, x=x, size=None)\n out_pos = self.lin_pos_l(out_pos)\n out_pos += self.lin_pos_r(x[1])\n\n out_neg = self.propagate(neg_edge_index, x=x, size=None)\n out_neg = self.lin_neg_l(out_neg)\n out_neg += self.lin_neg_r(x[1])\n\n return torch.cat([out_pos, out_neg], dim=-1)\n\n else:\n F_in = self.in_channels\n\n out_pos1 = self.propagate(pos_edge_index, size=None,\n x=(x[0][..., :F_in], x[1][..., :F_in]))\n out_pos2 = self.propagate(neg_edge_index, size=None,\n x=(x[0][..., F_in:], x[1][..., F_in:]))\n out_pos = torch.cat([out_pos1, out_pos2], dim=-1)\n out_pos = self.lin_pos_l(out_pos)\n out_pos += self.lin_pos_r(x[1][..., :F_in])\n\n out_neg1 = self.propagate(pos_edge_index, size=None,\n x=(x[0][..., F_in:], x[1][..., F_in:]))\n out_neg2 = self.propagate(neg_edge_index, size=None,\n x=(x[0][..., :F_in], x[1][..., :F_in]))\n out_neg = torch.cat([out_neg1, out_neg2], dim=-1)\n out_neg = self.lin_neg_l(out_neg)\n out_neg += self.lin_neg_r(x[1][..., F_in:])\n\n return torch.cat([out_pos, out_neg], dim=-1)\n\n def message(self, x_j: Tensor) -> Tensor:\n return x_j\n\n def message_and_aggregate(self, adj_t: SparseTensor,\n x: PairTensor) -> Tensor:\n adj_t = adj_t.set_value(None, layout=None)\n return matmul(adj_t, x[0], reduce=self.aggr)\n\n def __repr__(self) -> str:\n return (f'{self.__class__.__name__}({self.in_channels}, '\n f'{self.out_channels}, first_aggr={self.first_aggr})')\n"
] | [
[
"torch.cat"
]
] |
ds-chae/animeHAN | [
"dcf87b40fdaf54e1977ddd05c20697a2d3f38bd0"
] | [
"main.py"
] | [
"from __future__ import print_function\nimport os\nimport time\nimport random\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nfrom torch.autograd import Variable\n\n### load project files\nimport models\nfrom models import weights_init\n\nparser = argparse.ArgumentParser()\n#parser.add_argument('--dataRoot', required=True, help='path to dataset')\nparser.add_argument('--dataRoot', default='D:/uni-trend/faces', help='path to dataset')\nparser.add_argument('--workers', type=int, default=2, help='number of data loading workers')\nparser.add_argument('--batchSize', type=int, default=64, help='input batch size')\nparser.add_argument('--imageSize', type=int, default=64, help='the height / width of the input image to network')\nparser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')\nparser.add_argument('--ngf', type=int, default=64)\nparser.add_argument('--ndf', type=int, default=64)\nparser.add_argument('--niter', type=int, default=500, help='number of epochs to train for')\nparser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')\nparser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')\nparser.add_argument('--cuda' , default=True, action='store_true', help='enables cuda')\nparser.add_argument('--ngpu' , type=int, default=1, help='number of GPUs to use')\nparser.add_argument('--netG', default='', help=\"path to netG (to continue training)\")\nparser.add_argument('--netD', default='', help=\"path to netD (to continue training)\")\nparser.add_argument('--outDir', default='.', help='folder to output images and model checkpoints')\nparser.add_argument('--model', type=int, default=1, help='1 for dcgan, 2 for illustrationGAN-like-GAN')\nparser.add_argument('--d_labelSmooth', type=float, default=0, help='for D, use soft label \"1-labelSmooth\" for real samples')\nparser.add_argument('--n_extra_layers_d', type=int, default=0, help='number of extra conv layers in D')\nparser.add_argument('--n_extra_layers_g', type=int, default=1, help='number of extra conv layers in G')\nparser.add_argument('--binary', action='store_true', help='z from bernoulli distribution, with prob=0.5')\n\n# simply prefer this way\n# arg_list = [\n# '--dataRoot', '/home/jielei/data/danbooru-faces',\n# '--workers', '12',\n# '--batchSize', '128',\n# '--imageSize', '64',\n# '--nz', '100',\n# '--ngf', '64',\n# '--ndf', '64',\n# '--niter', '80',\n# '--lr', '0.0002',\n# '--beta1', '0.5',\n# '--cuda', \n# '--ngpu', '1',\n# '--netG', '',\n# '--netD', '',\n# '--outDir', './results',\n# '--model', '1',\n# '--d_labelSmooth', '0.1', # 0.25 from imporved-GAN paper \n# '--n_extra_layers_d', '0',\n# '--n_extra_layers_g', '1', # in the sense that generator should be more powerful\n# ]\n\nopt = parser.parse_args()\n# opt = parser.parse_args(arg_list)\nprint(opt)\nopt.outDir='./results'\n\ntry:\n os.makedirs(opt.outDir)\nexcept OSError:\n pass\n\nopt.manualSeed = random.randint(1,10000) # fix seed, a scalar\nrandom.seed(opt.manualSeed)\ntorch.manual_seed(opt.manualSeed)\n\ncudnn.benchmark = True\n\nif torch.cuda.is_available() and not opt.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n \nnc = 3\nngpu = opt.ngpu\nnz = opt.nz\nngf = opt.ngf\nndf = opt.ndf\nn_extra_d = opt.n_extra_layers_d\nn_extra_g = opt.n_extra_layers_g\n\ndataset = dset.ImageFolder(\n root=opt.dataRoot,\n transform=transforms.Compose([\n transforms.Scale(opt.imageSize),\n # transforms.CenterCrop(opt.imageSize),\n transforms.ToTensor(),\n transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)), # bring images to (-1,1)\n ])\n)\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,\n shuffle=True, num_workers=opt.workers)\n\n# load models \nif opt.model == 1:\n netG = models._netG_1(ngpu, nz, nc, ngf, n_extra_g)\n netD = models._netD_1(ngpu, nz, nc, ndf, n_extra_d)\nelif opt.model == 2:\n netG = models._netG_2(ngpu, nz, nc, ngf)\n netD = models._netD_2(ngpu, nz, nc, ndf)\n\nnetG.apply(weights_init)\nif opt.netG != '':\n netG.load_state_dict(torch.load(opt.netG))\n#print(netG)\n\nnetD.apply(weights_init)\nif opt.netD != '':\n netD.load_state_dict(torch.load(opt.netD))\n#print(netD)\n\ncriterion = nn.BCELoss()\ncriterion_MSE = nn.MSELoss()\n\ninput = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)\nnoise = torch.FloatTensor(opt.batchSize, nz, 1, 1)\nif opt.binary:\n bernoulli_prob = torch.FloatTensor(opt.batchSize, nz, 1, 1).fill_(0.5)\n fixed_noise = torch.bernoulli(bernoulli_prob)\nelse:\n fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)\nlabel = torch.FloatTensor(opt.batchSize)\nreal_label = 1\nfake_label = 0\n\nif opt.cuda:\n netD.cuda()\n netG.cuda()\n criterion.cuda()\n criterion_MSE.cuda()\n input, label = input.cuda(), label.cuda()\n noise, fixed_noise = noise.cuda(), fixed_noise.cuda()\n \ninput = Variable(input)\nlabel = Variable(label)\nnoise = Variable(noise)\nfixed_noise = Variable(fixed_noise)\n\n# setup optimizer\noptimizerD = optim.Adam(netD.parameters(), lr = opt.lr, betas = (opt.beta1, 0.999))\noptimizerG = optim.Adam(netG.parameters(), lr = opt.lr, betas = (opt.beta1, 0.999))\n\ndef train():\n for epoch in range(opt.niter):\n print('epoch %d' % epoch)\n for i, data in enumerate(dataloader, 0):\n start_iter = time.time()\n ############################\n # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))\n ###########################\n # train with real\n netD.zero_grad()\n real_cpu, _ = data\n batch_size = real_cpu.size(0)\n# input.data.resize_(real_cpu.size()).copy_(real_cpu)\n# label.data.resize_(batch_size).fill_(real_label - opt.d_labelSmooth) # use smooth label for discriminator\n with torch.no_grad():\n input.resize_(real_cpu.size()).copy_(real_cpu)\n label.resize_(batch_size).fill_(real_label - opt.d_labelSmooth) # use smooth label for discriminator\n\n output = netD(input)\n errD_real = criterion(output, label)\n errD_real.backward()\n D_x = output.data.mean()\n # train with fake\n# noise.data.resize_(batch_size, nz, 1, 1)\n with torch.no_grad():\n noise.resize_(batch_size, nz, 1, 1)\n if opt.binary:\n bernoulli_prob.resize_(noise.data.size())\n noise.data.copy_(2*(torch.bernoulli(bernoulli_prob)-0.5))\n else:\n noise.data.normal_(0, 1)\n fake,z_prediction = netG(noise)\n label.data.fill_(fake_label)\n output = netD(fake.detach()) # add \".detach()\" to avoid backprop through G\n errD_fake = criterion(output, label)\n errD_fake.backward() # gradients for fake/real will be accumulated\n D_G_z1 = output.data.mean()\n errD = errD_real + errD_fake\n optimizerD.step() # .step() can be called once the gradients are computed\n\n ############################\n # (2) Update G network: maximize log(D(G(z)))\n ###########################\n netG.zero_grad()\n label.data.fill_(real_label) # fake labels are real for generator cost\n output = netD(fake)\n errG = criterion(output, label)\n# errG.backward(retain_variables=True) # True if backward through the graph for the second time\n errG.backward(retain_graph=True) # True if backward through the graph for the second time, retain_variables is changed to retain_graph at 0.3\n if opt.model == 2: # with z predictor\n errG_z = criterion_MSE(z_prediction, noise)\n errG_z.backward()\n D_G_z2 = output.data.mean()\n optimizerG.step()\n\n end_iter = time.time()\n# print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f Elapsed %.2f s'\n# % (epoch, opt.niter, i, len(dataloader),\n# errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2, end_iter-start_iter))\n if i % 100 == 0:\n # the first 64 samples from the mini-batch are saved.\n vutils.save_image(real_cpu[0:64,:,:,:],\n '%s/real_samples.png' % opt.outDir, nrow=8)\n fake,_ = netG(fixed_noise)\n vutils.save_image(fake.data[0:64,:,:,:],\n '%s/fake_samples_epoch_%03d.png' % (opt.outDir, epoch), nrow=8)\n if epoch % 1 == 0:\n # do checkpointing\n torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outDir, epoch))\n torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outDir, epoch))\n\nif __name__ == '__main__':\n train()\n"
] | [
[
"torch.nn.MSELoss",
"torch.autograd.Variable",
"torch.FloatTensor",
"torch.no_grad",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.bernoulli",
"torch.utils.data.DataLoader",
"torch.nn.BCELoss",
"torch.load"
]
] |
EnSlavingBlair/Coincidences | [
"bba3435d9f0530822ddd2ab48a6a0bb84aa95f15"
] | [
"venv/lib/python2.7/site-packages/astropy/modeling/tests/test_functional_models.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nfrom __future__ import (absolute_import, unicode_literals, division,\n print_function)\n\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n\nfrom .. import models, InputParameterError\nfrom ...coordinates import Angle\nfrom .. import fitting\nfrom ...tests.helper import catch_warnings\nfrom ...utils.exceptions import AstropyDeprecationWarning\n\ntry:\n from scipy import optimize # pylint: disable=W0611\n HAS_SCIPY = True\nexcept ImportError:\n HAS_SCIPY = False\n\n\ndef test_Trapezoid1D():\n \"\"\"Regression test for https://github.com/astropy/astropy/issues/1721\"\"\"\n\n model = models.Trapezoid1D(amplitude=4.2, x_0=2.0, width=1.0, slope=3)\n xx = np.linspace(0, 4, 8)\n yy = model(xx)\n yy_ref = [0., 1.41428571, 3.12857143, 4.2, 4.2, 3.12857143, 1.41428571, 0.]\n assert_allclose(yy, yy_ref, rtol=0, atol=1e-6)\n\n\ndef test_GaussianAbsorption1D():\n g_em = models.Gaussian1D(0.8, 3000, 20)\n with catch_warnings(AstropyDeprecationWarning) as w:\n g_ab = models.GaussianAbsorption1D(0.8, 3000, 20)\n assert len(w) == 1\n xx = np.arange(2900, 3100, 2)\n assert_allclose(g_ab(xx), 1 - g_em(xx))\n assert_allclose(g_ab.fit_deriv(xx[0], 0.8, 3000, 20),\n -np.array(g_em.fit_deriv(xx[0], 0.8, 3000, 20)))\n assert g_ab.bounding_box == g_em.bounding_box\n assert_allclose(g_ab.fwhm, 47.096400900618988)\n\n\ndef test_Gaussian2D():\n \"\"\"\n Test rotated elliptical Gaussian2D model.\n https://github.com/astropy/astropy/pull/2038\n \"\"\"\n\n model = models.Gaussian2D(4.2, 1.7, 3.1, x_stddev=5.1, y_stddev=3.3,\n theta=np.pi/6.)\n y, x = np.mgrid[0:5, 0:5]\n g = model(x, y)\n g_ref = [[3.01907812, 2.99051889, 2.81271552, 2.5119566, 2.13012709],\n [3.55982239, 3.6086023, 3.4734158, 3.17454575, 2.75494838],\n [3.88059142, 4.0257528, 3.96554926, 3.70908389, 3.29410187],\n [3.91095768, 4.15212857, 4.18567526, 4.00652015, 3.64146544],\n [3.6440466, 3.95922417, 4.08454159, 4.00113878, 3.72161094]]\n assert_allclose(g, g_ref, rtol=0, atol=1e-6)\n assert_allclose([model.x_fwhm, model.y_fwhm],\n [12.009582229657841, 7.7709061486021325])\n\n\ndef test_Gaussian2DCovariance():\n \"\"\"\n Test rotated elliptical Gaussian2D model when cov_matrix is input.\n https://github.com/astropy/astropy/pull/2199\n \"\"\"\n\n cov_matrix = [[49., -16.], [-16., 9.]]\n model = models.Gaussian2D(17., 2.0, 2.5, cov_matrix=cov_matrix)\n y, x = np.mgrid[0:5, 0:5]\n g = model(x, y)\n g_ref = [[4.3744505, 5.8413977, 7.42988694, 9.00160175, 10.38794269],\n [8.83290201, 10.81772851, 12.61946384, 14.02225593, 14.84113227],\n [13.68528889, 15.37184621, 16.44637743, 16.76048705, 16.26953638],\n [16.26953638, 16.76048705, 16.44637743, 15.37184621, 13.68528889],\n [14.84113227, 14.02225593, 12.61946384, 10.81772851, 8.83290201]]\n assert_allclose(g, g_ref, rtol=0, atol=1e-6)\n\n\ndef test_Gaussian2DRotation():\n amplitude = 42\n x_mean, y_mean = 0, 0\n x_stddev, y_stddev = 2, 3\n theta = Angle(10, 'deg')\n pars = dict(amplitude=amplitude, x_mean=x_mean, y_mean=y_mean,\n x_stddev=x_stddev, y_stddev=y_stddev)\n rotation = models.Rotation2D(angle=theta.degree)\n point1 = (x_mean + 2 * x_stddev, y_mean + 2 * y_stddev)\n point2 = rotation(*point1)\n g1 = models.Gaussian2D(theta=0, **pars)\n g2 = models.Gaussian2D(theta=theta.radian, **pars)\n value1 = g1(*point1)\n value2 = g2(*point2)\n assert_allclose(value1, value2)\n\n\ndef test_Gaussian2D_invalid_inputs():\n x_stddev = 5.1\n y_stddev = 3.3\n theta = 10\n cov_matrix = [[49., -16.], [-16., 9.]]\n\n # first make sure the valid ones are OK\n models.Gaussian2D()\n models.Gaussian2D(x_stddev=x_stddev, y_stddev=y_stddev, theta=theta)\n models.Gaussian2D(x_stddev=None, y_stddev=y_stddev, theta=theta)\n models.Gaussian2D(x_stddev=x_stddev, y_stddev=None, theta=theta)\n models.Gaussian2D(x_stddev=x_stddev, y_stddev=y_stddev, theta=None)\n models.Gaussian2D(cov_matrix=cov_matrix)\n\n with pytest.raises(InputParameterError):\n models.Gaussian2D(x_stddev=0, cov_matrix=cov_matrix)\n with pytest.raises(InputParameterError):\n models.Gaussian2D(y_stddev=0, cov_matrix=cov_matrix)\n with pytest.raises(InputParameterError):\n models.Gaussian2D(theta=0, cov_matrix=cov_matrix)\n\n\[email protected]('gamma', (10, -10))\ndef test_moffat_fwhm(gamma):\n ans = 34.641016151377542\n kwargs = {'gamma': gamma, 'alpha': 0.5}\n m1 = models.Moffat1D(**kwargs)\n m2 = models.Moffat2D(**kwargs)\n assert_allclose([m1.fwhm, m2.fwhm], ans)\n assert_array_less(0, [m1.fwhm, m2.fwhm])\n\n\ndef test_RedshiftScaleFactor():\n \"\"\"Like ``test_ScaleModel()``.\"\"\"\n\n # Scale by a scalar\n m = models.RedshiftScaleFactor(0.4)\n assert m(0) == 0\n assert_array_equal(m([1, 2]), [1.4, 2.8])\n\n assert_allclose(m.inverse(m([1, 2])), [1, 2])\n\n # Scale by a list\n m = models.RedshiftScaleFactor([-0.5, 0, 0.5], n_models=3)\n assert_array_equal(m(0), 0)\n assert_array_equal(m([1, 2], model_set_axis=False),\n [[0.5, 1], [1, 2], [1.5, 3]])\n\n assert_allclose(m.inverse(m([1, 2], model_set_axis=False)),\n [[1, 2], [1, 2], [1, 2]])\n\n\ndef test_Ellipse2D():\n \"\"\"Test Ellipse2D model.\"\"\"\n amplitude = 7.5\n x0, y0 = 15, 15\n theta = Angle(45, 'deg')\n em = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta.radian)\n y, x = np.mgrid[0:30, 0:30]\n e = em(x, y)\n assert np.all(e[e > 0] == amplitude)\n assert e[y0, x0] == amplitude\n\n rotation = models.Rotation2D(angle=theta.degree)\n point1 = [2, 0] # Rotation2D center is (0, 0)\n point2 = rotation(*point1)\n point1 = np.array(point1) + [x0, y0]\n point2 = np.array(point2) + [x0, y0]\n e1 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=0.)\n e2 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=theta.radian)\n assert e1(*point1) == e2(*point2)\n\n\ndef test_Ellipse2D_circular():\n \"\"\"Test that circular Ellipse2D agrees with Disk2D [3736].\"\"\"\n amplitude = 7.5\n radius = 10\n size = (radius * 2) + 1\n y, x = np.mgrid[0:size, 0:size]\n ellipse = models.Ellipse2D(amplitude, radius, radius, radius, radius,\n theta=0)(x, y)\n disk = models.Disk2D(amplitude, radius, radius, radius)(x, y)\n assert np.all(ellipse == disk)\n\n\ndef test_Scale_inverse():\n m = models.Scale(1.2345)\n assert_allclose(m.inverse(m(6.789)), 6.789)\n\n\ndef test_Shift_inverse():\n m = models.Shift(1.2345)\n assert_allclose(m.inverse(m(6.789)), 6.789)\n\n\[email protected]('not HAS_SCIPY')\ndef test_Shift_model_levmar_fit():\n \"\"\"Test fitting Shift model with LevMarLSQFitter (issue #6103).\"\"\"\n\n init_model = models.Shift()\n\n x = np.arange(10)\n y = x+0.1\n\n fitter = fitting.LevMarLSQFitter()\n fitted_model = fitter(init_model, x, y)\n\n assert_allclose(fitted_model.parameters, [0.1], atol=1e-15)\n\n\ndef test_Shift_model_set_linear_fit():\n \"\"\"Test linear fitting of Shift model (issue #6103).\"\"\"\n\n init_model = models.Shift(offset=[0, 0], n_models=2)\n\n x = np.arange(10)\n yy = np.array([x+0.1, x-0.2])\n\n fitter = fitting.LinearLSQFitter()\n fitted_model = fitter(init_model, x, yy)\n\n assert_allclose(fitted_model.parameters, [0.1, -0.2], atol=1e-15)\n\n\ndef test_Scale_model_set_linear_fit():\n \"\"\"Test linear fitting of Scale model (#6103).\"\"\"\n\n init_model = models.Scale(factor=[0, 0], n_models=2)\n\n x = np.arange(-3, 7)\n yy = np.array([1.15*x, 0.96*x])\n\n fitter = fitting.LinearLSQFitter()\n fitted_model = fitter(init_model, x, yy)\n\n assert_allclose(fitted_model.parameters, [1.15, 0.96], atol=1e-15)\n\n\n# https://github.com/astropy/astropy/issues/6178\ndef test_Ring2D_rout():\n m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=2, r_out=5)\n assert m.width.value == 3\n\n\[email protected](\"not HAS_SCIPY\")\ndef test_Voigt1D():\n voi = models.Voigt1D(amplitude_L=-0.5, x_0=1.0, fwhm_L=5.0, fwhm_G=5.0)\n xarr = np.linspace(-5.0, 5.0, num=40)\n yarr = voi(xarr)\n voi_init = models.Voigt1D(amplitude_L=-1.0, x_0=1.0, fwhm_L=5.0, fwhm_G=5.0)\n fitter = fitting.LevMarLSQFitter()\n voi_fit = fitter(voi_init, xarr, yarr)\n assert_allclose(voi_fit.param_sets, voi.param_sets)\n\n\[email protected](\"not HAS_SCIPY\")\ndef test_compound_models_with_class_variables():\n models_2d = [models.AiryDisk2D, models.Sersic2D]\n models_1d = [models.Sersic1D]\n\n for model_2d in models_2d:\n class CompoundModel2D(models.Const2D + model_2d):\n pass\n x, y = np.mgrid[:10, :10]\n f = CompoundModel2D()(x, y)\n assert f.shape == (10, 10)\n\n for model_1d in models_1d:\n class CompoundModel1D(models.Const1D + model_1d):\n pass\n x = np.arange(10)\n f = CompoundModel1D()(x)\n assert f.shape == (10,)\n"
] | [
[
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.testing.assert_array_less",
"numpy.arange",
"numpy.all",
"numpy.linspace"
]
] |
jesusanaya-fb/duckdb | [
"85235f75c13bd91c8ac7fe915cde76c2ddd4f14b"
] | [
"tools/pythonpkg/setup.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport subprocess\nimport shutil\nimport platform\n\nfrom setuptools import setup, Extension\nfrom setuptools.command.sdist import sdist\nimport distutils.spawn\n\nextensions = ['parquet', 'icu', 'fts']\n\ndef open_utf8(fpath, flags):\n import sys\n if sys.version_info[0] < 3:\n return open(fpath, flags)\n else:\n return open(fpath, flags, encoding=\"utf8\")\n\n# make sure we are in the right directory\nos.chdir(os.path.dirname(os.path.realpath(__file__)))\n\nif os.name == 'nt':\n # windows:\n toolchain_args = ['/wd4244', '/wd4267', '/wd4200', '/wd26451', '/wd26495', '/D_CRT_SECURE_NO_WARNINGS']\nelse:\n # macos/linux\n toolchain_args = ['-std=c++11', '-g0']\n if 'DUCKDEBUG' in os.environ:\n toolchain_args = ['-std=c++11', '-Wall', '-O0', '-g']\nif 'DUCKDB_INSTALL_USER' in os.environ and 'install' in sys.argv:\n sys.argv.append('--user')\n\nexisting_duckdb_dir = ''\nnew_sys_args = []\nlibraries = []\nfor i in range(len(sys.argv)):\n recognized = False\n if sys.argv[i].startswith(\"--binary-dir=\"):\n existing_duckdb_dir = sys.argv[i].split('=', 1)[1]\n recognized = True\n elif sys.argv[i].startswith(\"--compile-flags=\"):\n toolchain_args = ['-std=c++11'] + [x.strip() for x in sys.argv[i].split('=', 1)[1].split(' ') if len(x.strip()) > 0]\n recognized = True\n elif sys.argv[i].startswith(\"--libs=\"):\n libraries = [x.strip() for x in sys.argv[i].split('=', 1)[1].split(' ') if len(x.strip()) > 0]\n recognized = True\n if not recognized:\n new_sys_args.append(sys.argv[i])\nsys.argv = new_sys_args\n\nif platform.system() == 'Darwin':\n toolchain_args.extend(['-stdlib=libc++', '-mmacosx-version-min=10.7'])\n\nfor ext in extensions:\n toolchain_args.extend(['-DBUILD_{}_EXTENSION'.format(ext.upper())])\n\nclass get_pybind_include(object):\n def __init__(self, user=False):\n self.user = user\n\n def __str__(self):\n import pybind11\n return pybind11.get_include(self.user)\n\nclass get_numpy_include(object):\n def __str__(self):\n import numpy\n return numpy.get_include()\n\nextra_files = []\nheader_files = []\n\nscript_path = os.path.dirname(os.path.abspath(__file__))\ninclude_directories = [get_numpy_include(), get_pybind_include(), get_pybind_include(user=True)]\nif len(existing_duckdb_dir) == 0:\n # no existing library supplied: compile everything from source\n source_files = ['duckdb_python.cpp']\n\n # check if amalgamation exists\n if os.path.isfile(os.path.join(script_path, '..', '..', 'scripts', 'amalgamation.py')):\n # amalgamation exists: compiling from source directory\n # copy all source files to the current directory\n sys.path.append(os.path.join(script_path, '..', '..', 'scripts'))\n import package_build\n\n (source_list, include_list, original_sources) = package_build.build_package(os.path.join(script_path, 'duckdb'), extensions)\n\n duckdb_sources = [os.path.sep.join(package_build.get_relative_path(script_path, x).split('/')) for x in source_list]\n duckdb_sources.sort()\n\n original_sources = [os.path.join('duckdb', x) for x in original_sources]\n\n duckdb_includes = [os.path.join('duckdb', x) for x in include_list]\n duckdb_includes += ['duckdb']\n\n # gather the include files\n import amalgamation\n header_files = amalgamation.list_includes_files(duckdb_includes)\n\n # write the source list, include list and git hash to separate files\n with open_utf8('sources.list', 'w+') as f:\n for source_file in duckdb_sources:\n f.write(source_file + \"\\n\")\n\n with open_utf8('includes.list', 'w+') as f:\n for include_file in duckdb_includes:\n f.write(include_file + '\\n')\n\n extra_files = ['sources.list', 'includes.list'] + original_sources\n else:\n # if amalgamation does not exist, we are in a package distribution\n # read the include files, source list and include files from the supplied lists\n with open_utf8('sources.list', 'r') as f:\n duckdb_sources = [x for x in f.read().split('\\n') if len(x) > 0]\n\n with open_utf8('includes.list', 'r') as f:\n duckdb_includes = [x for x in f.read().split('\\n') if len(x) > 0]\n\n source_files += duckdb_sources\n include_directories = duckdb_includes + include_directories\n\n libduckdb = Extension('duckdb',\n include_dirs=include_directories,\n sources=source_files,\n extra_compile_args=toolchain_args,\n extra_link_args=toolchain_args,\n language='c++')\nelse:\n sys.path.append(os.path.join(script_path, '..', '..', 'scripts'))\n import package_build\n\n toolchain_args += ['-I' + x for x in package_build.includes(extensions)]\n\n result_libraries = package_build.get_libraries(existing_duckdb_dir, libraries, extensions)\n library_dirs = [x[0] for x in result_libraries if x[0] is not None]\n libnames = [x[1] for x in result_libraries if x[1] is not None]\n\n libduckdb = Extension('duckdb',\n include_dirs=include_directories,\n sources=['duckdb_python.cpp'],\n extra_compile_args=toolchain_args,\n extra_link_args=toolchain_args,\n libraries=libnames,\n library_dirs=library_dirs,\n language='c++')\n\n# Only include pytest-runner in setup_requires if we're invoking tests\nif {'pytest', 'test', 'ptr'}.intersection(sys.argv):\n setup_requires = ['pytest-runner']\nelse:\n setup_requires = []\n\nsetuptools_scm_conf = {\"root\": \"../..\", \"relative_to\": __file__}\nif os.getenv('SETUPTOOLS_SCM_NO_LOCAL', 'no') != 'no':\n setuptools_scm_conf['local_scheme'] = 'no-local-version'\n\n# data files need to be formatted as [(directory, [files...]), (directory2, [files...])]\n# no clue why the setup script can't do this automatically, but hey\ndef setup_data_files(data_files):\n directory_map = {}\n for data_file in data_files:\n normalized_fpath = os.path.sep.join(data_file.split('/'))\n splits = normalized_fpath.rsplit(os.path.sep, 1)\n if len(splits) == 1:\n # no directory specified\n directory = \"\"\n fname = normalized_fpath\n else:\n directory = splits[0]\n fname = splits[1]\n if directory not in directory_map:\n directory_map[directory] = []\n directory_map[directory].append(normalized_fpath)\n new_data_files = []\n for kv in directory_map.keys():\n new_data_files.append((kv, directory_map[kv]))\n return new_data_files\n\ndata_files = setup_data_files(extra_files + header_files)\n\nsetup(\n name = \"duckdb\",\n description = 'DuckDB embedded database',\n keywords = 'DuckDB Database SQL OLAP',\n url=\"https://www.duckdb.org\",\n long_description = 'See here for an introduction: https://duckdb.org/docs/api/python',\n install_requires=[ # these version is still available for Python 2, newer ones aren't\n 'numpy>=1.14'\n ],\n data_files = data_files,\n packages=['duckdb_query_graph'],\n include_package_data=True,\n setup_requires=setup_requires + [\"setuptools_scm\"] + ['pybind11>=2.6.0'],\n use_scm_version = setuptools_scm_conf,\n tests_require=['pytest'],\n classifiers = [\n 'Topic :: Database :: Database Engines/Servers',\n 'Intended Audience :: Developers'\n ],\n ext_modules = [libduckdb],\n maintainer = \"Hannes Muehleisen\",\n maintainer_email = \"[email protected]\"\n)\n"
] | [
[
"numpy.get_include"
]
] |
jordiriu/MP-DQN | [
"eec13eb9b4e2c0099649e0639f2a8b93d7d0d5be"
] | [
"agents/paddpg.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\nimport random\n\nfrom torch.autograd import Variable\n\nfrom agents.agent import Agent\nfrom agents.memory.memory import Memory, MemoryNStepReturns\nfrom agents.utils import soft_update_target_network, hard_update_target_network\nfrom agents.utils.noise import OrnsteinUhlenbeckActionNoise\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass Critic(nn.Module):\n\n def __init__(self, state_size, action_size, action_parameter_size, hidden_layers=None, action_input_layer=0,\n init_type=\"normal\", activation=\"leaky_relu\", init_std=0.01):\n super(Critic, self).__init__()\n self.state_size = state_size\n self.action_size = action_size\n self.action_parameter_size = action_parameter_size\n self.activation = activation\n\n # self.action_input_layer = action_input_layer\n\n # initialise layers\n self.layers = nn.ModuleList()\n input_size = self.state_size + action_size + action_parameter_size\n last_hidden_layer_size = input_size\n if hidden_layers is not None:\n nh = len(hidden_layers)\n self.layers.append(nn.Linear(input_size, hidden_layers[0]))\n for i in range(1, nh):\n self.layers.append(nn.Linear(hidden_layers[i - 1], hidden_layers[i]))\n last_hidden_layer_size = hidden_layers[nh - 1]\n self.output_layer = nn.Linear(last_hidden_layer_size, 1)\n\n # initialise layers\n for i in range(0, len(self.layers)):\n if init_type == \"kaiming\":\n nn.init.kaiming_normal_(self.layers[i].weight.data, nonlinearity=self.activation)\n elif init_type == \"normal\":\n nn.init.normal_(self.layers[i].weight.data, std=init_std)\n else:\n raise ValueError(\"Unknown init_type \"+str(init_type))\n nn.init.zeros_(self.layers[i].bias.data)\n nn.init.normal_(self.output_layer.weight, std=init_std)\n # nn.init.normal_(self.action_output_layer.bias, std=init_std)\n nn.init.zeros_(self.output_layer.bias)\n\n def forward(self, state, actions, action_parameters):\n x = torch.cat((state, actions, action_parameters), dim=1)\n negative_slope = 0.01\n\n num_hidden_layers = len(self.layers)\n for i in range(0, num_hidden_layers):\n if self.activation == \"relu\":\n x = F.relu(self.layers[i](x))\n elif self.activation == \"leaky_relu\":\n x = F.leaky_relu(self.layers[i](x), negative_slope)\n else:\n raise ValueError(\"Unknown activation function \" + str(self.activation))\n Q = self.output_layer(x)\n\n return Q\n\n\nclass Actor(nn.Module):\n\n def __init__(self, state_size, action_size, action_parameter_size, hidden_layers=None, init_std=0.01,\n init_type=\"normal\", activation=\"leaky_relu\", squashing_function=False):\n super(Actor, self).__init__()\n\n self.state_size = state_size\n self.action_size = action_size\n self.action_parameter_size = action_parameter_size\n self.squashing_function=squashing_function\n assert self.squashing_function is False # unsupported, still need to implement rescaling of output\n self.activation = activation\n\n # initialise layers\n self.layers = nn.ModuleList()\n last_hidden_layer_size = self.state_size\n if hidden_layers is not None:\n nh = len(hidden_layers)\n self.layers.append(nn.Linear(self.state_size, hidden_layers[0]))\n for i in range(1, nh):\n self.layers.append(nn.Linear(hidden_layers[i - 1], hidden_layers[i]))\n last_hidden_layer_size = hidden_layers[nh - 1]\n\n # separate output layers for actions and action-parameters\n self.action_output_layer = nn.Linear(last_hidden_layer_size, self.action_size)\n self.action_parameters_output_layer = nn.Linear(last_hidden_layer_size, self.action_parameter_size)\n\n # initialise layers\n for i in range(0, len(self.layers)):\n if init_type == \"kaiming\":\n nn.init.kaiming_normal_(self.layers[i].weight.data, nonlinearity=self.activation)\n elif init_type == \"normal\":\n nn.init.normal_(self.layers[i].weight.data, std=init_std)\n else:\n raise ValueError(\"Unknown init_type \"+str(init_type))\n nn.init.zeros_(self.layers[i].bias.data)\n nn.init.normal_(self.action_output_layer.weight, std=init_std)\n nn.init.zeros_(self.action_output_layer.bias)\n nn.init.normal_(self.action_parameters_output_layer.weight, std=init_std)\n nn.init.zeros_(self.action_parameters_output_layer.bias)\n\n self.action_parameters_passthrough_layer = nn.Linear(self.state_size, self.action_parameter_size)\n nn.init.zeros_(self.action_parameters_passthrough_layer.weight)\n nn.init.zeros_(self.action_parameters_passthrough_layer.bias)\n self.action_parameters_passthrough_layer.weight.requires_grad = False\n self.action_parameters_passthrough_layer.bias.requires_grad = False\n\n def forward(self, state):\n negative_slope = 0.01\n\n x = state\n num_hidden_layers = len(self.layers)\n for i in range(0, num_hidden_layers):\n if self.activation == \"relu\":\n x = F.relu(self.layers[i](x))\n elif self.activation == \"leaky_relu\":\n x = F.leaky_relu(self.layers[i](x), negative_slope)\n else:\n raise ValueError(\"Unknown activation function \" + str(self.activation))\n actions = self.action_output_layer(x)\n action_params = self.action_parameters_output_layer(x)\n action_params += self.action_parameters_passthrough_layer(state)\n\n # if self.squashing_function:\n # assert False # scaling not implemented yet\n # '''\n # actions = actions.tanh()\n # actions = actions *self.action_lim\n # action_params = action_params.tanh()\n # action_params = action_params *self.action_param_lim\n # '''\n\n return actions, action_params\n\n\nclass PADDPGAgent(Agent):\n \"\"\"\n DDPG actor-critic agent for parameterised action spaces\n [Hausknecht and Stone 2016]\n \"\"\"\n def __init__(self,\n observation_space,\n action_space,\n actor_class=Actor,\n actor_kwargs={},\n critic_class=Critic,\n critic_kwargs={},\n epsilon_initial=1.0,\n epsilon_final=0.01,\n epsilon_steps=10000,\n batch_size=64,\n gamma=0.99,\n beta=0.5, # averaging factor between off-policy and on-policy targets during n-step updates\n tau_actor=0.001, # Polyak averaging factor for updating target weights\n tau_critic=0.001,\n replay_memory=None, # memory buffer object\n replay_memory_size=1000000,\n learning_rate_actor=0.00001,\n learning_rate_critic=0.001,\n initial_memory_threshold=0,\n clip_grad=10,\n adam_betas=(0.95, 0.999),\n use_ornstein_noise=False, # if false, uses epsilon-greedy with uniform-random action-parameter exploration\n loss_func=F.mse_loss, # F.smooth_l1_loss\n inverting_gradients=False,\n n_step_returns=False,\n seed=None):\n super(PADDPGAgent, self).__init__(observation_space, action_space)\n\n self.num_actions = self.action_space.spaces[0].n\n self.action_parameter_sizes = np.array([self.action_space.spaces[i].shape[0] for i in range(1, self.num_actions+1)])\n self.action_parameter_size = int(self.action_parameter_sizes.sum())\n self.action_max = torch.from_numpy(np.ones((self.num_actions,))).float().to(device)\n self.action_min = -self.action_max.detach()\n self.action_range = (self.action_max-self.action_min).detach()\n self.action_parameter_max_numpy = np.concatenate([self.action_space.spaces[i].high for i in range(1,self.num_actions+1)]).ravel()\n self.action_parameter_min_numpy = np.concatenate([self.action_space.spaces[i].low for i in range(1,self.num_actions+1)]).ravel()\n self.action_parameter_range_numpy = (self.action_parameter_max_numpy - self.action_parameter_min_numpy)\n self.action_parameter_max = torch.from_numpy(self.action_parameter_max_numpy).float().to(device)\n self.action_parameter_min = torch.from_numpy(self.action_parameter_min_numpy).float().to(device)\n self.action_parameter_range = torch.from_numpy(self.action_parameter_range_numpy).float().to(device)\n\n self.epsilon = epsilon_initial\n self.epsilon_initial = epsilon_initial\n self.epsilon_final = epsilon_final\n self.epsilon_steps = epsilon_steps\n\n self.clip_grad = clip_grad\n self.batch_size = batch_size\n self.gamma = gamma\n self.beta = beta\n self.replay_memory_size = replay_memory_size\n self.initial_memory_threshold = initial_memory_threshold\n self.learning_rate_actor = learning_rate_actor\n self.learning_rate_critic = learning_rate_critic\n self.inverting_gradients = inverting_gradients\n self.tau_actor = tau_actor\n self.tau_critic = tau_critic\n self._step = 0\n self._episode = 0\n self.updates = 0\n\n self.np_random = None\n self.seed = seed\n self._seed(seed)\n\n self.use_ornstein_noise = use_ornstein_noise\n self.noise = OrnsteinUhlenbeckActionNoise(self.action_parameter_size, random_machine=self.np_random, mu=0., theta=0.15, sigma=0.0001)\n\n print(self.num_actions+self.action_parameter_size)\n self.n_step_returns = n_step_returns\n if replay_memory is None:\n self.replay_memory = MemoryNStepReturns(replay_memory_size, observation_space.shape, (self.num_actions+self.action_parameter_size,), next_actions=False, n_step_returns=self.n_step_returns)\n else:\n self.replay_memory = replay_memory\n self.actor = actor_class(self.observation_space.shape[0], self.num_actions, self.action_parameter_size, **actor_kwargs).to(device)\n self.actor_target = actor_class(self.observation_space.shape[0], self.num_actions, self.action_parameter_size, **actor_kwargs).to(device)\n hard_update_target_network(self.actor, self.actor_target)\n self.actor_target.eval()\n\n self.critic = critic_class(self.observation_space.shape[0], self.num_actions, self.action_parameter_size, **critic_kwargs).to(device)\n self.critic_target = critic_class(self.observation_space.shape[0], self.num_actions, self.action_parameter_size, **critic_kwargs).to(device)\n hard_update_target_network(self.critic, self.critic_target)\n self.critic_target.eval()\n\n self.loss_func = loss_func # l1_smooth_loss performs better but original paper used MSE\n\n self.actor_optimiser = optim.Adam(self.actor.parameters(), lr=self.learning_rate_actor, betas=adam_betas)\n self.critic_optimiser = optim.Adam(self.critic.parameters(), lr=self.learning_rate_critic, betas=adam_betas)\n\n def __str__(self):\n desc = (\"P-DDPG Agent with frozen initial weight layer\\n\" +\n \"Actor: {}\\n\".format(self.actor) +\n \"Critic: {}\\n\".format(self.critic) +\n \"Actor Alpha: {}\\n\".format(self.learning_rate_actor) +\n \"Critic Alpha: {}\\n\".format(self.learning_rate_critic) +\n \"Gamma: {}\\n\".format(self.gamma) +\n \"Tau Actor: {}\\n\".format(self.tau_actor) +\n \"Tau Critic: {}\\n\".format(self.tau_critic) +\n \"Beta: {}\\n\".format(self.beta) +\n \"Inverting Gradients: {}\\n\".format(self.inverting_gradients) +\n \"Replay Memory: {}\\n\".format(self.replay_memory_size) +\n \"epsilon_initial: {}\\n\".format(self.epsilon_initial) +\n \"epsilon_final: {}\\n\".format(self.epsilon_final) +\n \"epsilon_steps: {}\\n\".format(self.epsilon_steps) +\n \"Clip norm: {}\\n\".format(self.clip_grad) +\n \"Batch Size: {}\\n\".format(self.batch_size) +\n \"Ornstein Noise?: {}\\n\".format(self.use_ornstein_noise) +\n \"Seed: {}\\n\".format(self.seed))\n return desc\n\n def set_action_parameter_passthrough_weights(self, initial_weights, initial_bias=None):\n passthrough_layer = self.actor.action_parameters_passthrough_layer\n print(initial_weights.shape)\n print(passthrough_layer.weight.data.size())\n assert initial_weights.shape == passthrough_layer.weight.data.size()\n passthrough_layer.weight.data = torch.Tensor(initial_weights).float().to(device)\n if initial_bias is not None:\n print(initial_bias.shape)\n print(passthrough_layer.bias.data.size())\n assert initial_bias.shape == passthrough_layer.bias.data.size()\n passthrough_layer.bias.data = torch.Tensor(initial_bias).float().to(device)\n passthrough_layer.requires_grad = False\n passthrough_layer.weight.requires_grad = False\n passthrough_layer.bias.requires_grad = False\n hard_update_target_network(self.actor, self.actor_target)\n\n def _invert_gradients(self, grad, vals, grad_type, inplace=True):\n # 5x faster on CPU\n if grad_type == \"actions\":\n max_p = self.action_max.cpu()\n min_p = self.action_min.cpu()\n rnge = self.action_range.cpu()\n elif grad_type == \"action_parameters\":\n max_p = self.action_parameter_max.cpu()\n min_p = self.action_parameter_min.cpu()\n rnge = self.action_parameter_range.cpu()\n else:\n raise ValueError(\"Unhandled grad_type: '\"+str(grad_type) + \"'\")\n\n assert grad.shape == vals.shape\n\n if not inplace:\n grad = grad.clone()\n with torch.no_grad():\n for n in range(grad.shape[0]):\n # index = grad < 0 # actually > but Adam minimises, so reversed (could also double negate the grad)\n index = grad[n] > 0\n grad[n][index] *= (index.float() * (max_p - vals[n]) / rnge)[index]\n grad[n][~index] *= ((~index).float() * (vals[n] - min_p) / rnge)[~index]\n\n return grad\n\n def _seed(self, seed=None):\n \"\"\"\n NOTE: this will not reset the randomly initialised weights; use the seed parameter in the constructor instead.\n\n :param seed:\n :return:\n \"\"\"\n self.seed = seed\n random.seed(seed)\n np.random.seed(seed)\n self.np_random = np.random.RandomState(seed=seed)\n if seed is not None:\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\n def _ornstein_uhlenbeck_noise(self, all_action_parameters):\n \"\"\" Continuous action exploration using an Ornstein–Uhlenbeck process. \"\"\"\n return all_action_parameters.data.numpy() + (self.noise.sample() * self.action_parameter_range_numpy)\n\n def start_episode(self):\n pass\n\n def end_episode(self):\n self._episode += 1\n\n # anneal exploration\n if self._episode < self.epsilon_steps:\n self.epsilon = self.epsilon_initial - (self.epsilon_initial - self.epsilon_final) * (\n self._episode / self.epsilon_steps)\n else:\n self.epsilon = self.epsilon_final\n pass\n\n def act(self, state):\n with torch.no_grad():\n state = torch.from_numpy(state).to(device)\n all_actions, all_action_parameters = self.actor.forward(state)\n all_actions = all_actions.detach().cpu().data.numpy()\n all_action_parameters = all_action_parameters.detach().cpu().data.numpy()\n\n # Hausknecht and Stone [2016] use epsilon greedy actions with uniform random action-parameter exploration\n if self.np_random.uniform() < self.epsilon:\n all_actions = self.np_random.uniform(size=all_actions.shape)\n offsets = np.array([self.action_parameter_sizes[i] for i in range(self.num_actions)], dtype=int).cumsum()\n offsets = np.concatenate((np.array([0]), offsets))\n if not self.use_ornstein_noise:\n for i in range(self.num_actions):\n all_action_parameters[offsets[i]:offsets[i+1]] = self.np_random.uniform(\n self.action_parameter_min_numpy[offsets[i]:offsets[i+1]],\n self.action_parameter_max_numpy[offsets[i]:offsets[i+1]])\n\n # select maximum action\n action = np.argmax(all_actions)\n offset = np.array([self.action_parameter_sizes[i] for i in range(action)], dtype=int).sum()\n if self.use_ornstein_noise and self.noise is not None:\n all_action_parameters[offset:offset + self.action_parameter_sizes[action]] += self.noise.sample()[offset:offset + self.action_parameter_sizes[action]]\n action_parameters = all_action_parameters[offset:offset + self.action_parameter_sizes[action]]\n return action, action_parameters, all_actions, all_action_parameters\n\n def step(self, state, action, reward, next_state, next_action, terminal, time_steps=1, optimise=True):\n action, action_params, all_actions, all_action_parameters = action\n self._step += 1\n\n self._add_sample(state, np.concatenate((all_actions.data, all_action_parameters.data)).ravel(), reward, next_state, terminal)\n if optimise and self._step >= self.batch_size and self._step >= self.initial_memory_threshold:\n self._optimize_td_loss()\n\n def _add_sample(self, state, action, reward, next_state, terminal):\n assert not self.n_step_returns\n assert len(action) == self.num_actions+self.action_parameter_size\n self.replay_memory.append(state, action, reward, next_state, terminal)\n\n def _optimize_td_loss(self):\n if self.replay_memory.nb_entries < self.batch_size or \\\n self.replay_memory.nb_entries < self.initial_memory_threshold:\n return\n\n # Sample a batch from replay memory\n if self.n_step_returns:\n states, actions, rewards, next_states, terminals, n_step_returns = self.replay_memory.sample(self.batch_size, random_machine=self.np_random)\n else:\n states, actions, rewards, next_states, terminals = self.replay_memory.sample(self.batch_size, random_machine=self.np_random)\n n_step_returns = None\n\n states = torch.from_numpy(states).to(device)\n actions_combined = torch.from_numpy(actions).to(device) # make sure to separate actions and action-parameters\n actions = actions_combined[:,:self.num_actions]\n action_parameters = actions_combined[:, self.num_actions:]\n rewards = torch.from_numpy(rewards).to(device)\n next_states = torch.from_numpy(next_states).to(device)\n terminals = torch.from_numpy(terminals).to(device)\n if self.n_step_returns:\n n_step_returns = torch.from_numpy(n_step_returns).to(device)\n\n # ---------------------- optimize critic ----------------------\n with torch.no_grad():\n pred_next_actions, pred_next_action_parameters = self.actor_target.forward(next_states)\n off_policy_next_val = self.critic_target.forward(next_states, pred_next_actions, pred_next_action_parameters)\n off_policy_target = rewards + (1 - terminals) * self.gamma * off_policy_next_val\n if self.n_step_returns:\n on_policy_target = n_step_returns\n target = self.beta*on_policy_target + (1.-self.beta) * off_policy_target\n else:\n target = off_policy_target\n\n y_expected = target\n y_predicted = self.critic.forward(states, actions, action_parameters)\n loss_critic = self.loss_func(y_predicted, y_expected)\n\n self.critic_optimiser.zero_grad()\n loss_critic.backward()\n if self.clip_grad > 0:\n torch.nn.utils.clip_grad_norm_(self.critic.parameters(), self.clip_grad)\n self.critic_optimiser.step()\n\n # ---------------------- optimise actor ----------------------\n # 1 - calculate gradients from critic\n with torch.no_grad():\n actions, action_params = self.actor(states)\n action_params = torch.cat((actions, action_params), dim=1)\n action_params.requires_grad = True\n Q_val = self.critic(states, action_params[:, :self.num_actions], action_params[:, self.num_actions:]).mean()\n self.critic.zero_grad()\n Q_val.backward()\n from copy import deepcopy\n delta_a = deepcopy(action_params.grad.data)\n # 2 - apply inverting gradients and combine with gradients from actor\n actions, action_params = self.actor(Variable(states))\n action_params = torch.cat((actions, action_params), dim=1)\n delta_a[:, self.num_actions:] = self._invert_gradients(delta_a[:, self.num_actions:].cpu(), action_params[:, self.num_actions:].cpu(), grad_type=\"action_parameters\", inplace=True)\n delta_a[:, :self.num_actions] = self._invert_gradients(delta_a[:, :self.num_actions].cpu(), action_params[:, :self.num_actions].cpu(), grad_type=\"actions\", inplace=True)\n out = -torch.mul(delta_a, action_params)\n self.actor.zero_grad()\n out.backward(torch.ones(out.shape).to(device))\n\n if self.clip_grad > 0:\n torch.nn.utils.clip_grad_norm_(self.actor.parameters(), self.clip_grad)\n self.actor_optimiser.step()\n\n soft_update_target_network(self.actor, self.actor_target, self.tau_actor)\n soft_update_target_network(self.critic, self.critic_target, self.tau_critic)\n"
] | [
[
"torch.nn.Linear",
"torch.cat",
"torch.cuda.manual_seed",
"torch.nn.ModuleList",
"torch.nn.init.kaiming_normal_",
"torch.ones",
"torch.cuda.is_available",
"numpy.concatenate",
"torch.mul",
"torch.autograd.Variable",
"torch.manual_seed",
"torch.nn.init.normal_",
"numpy.argmax",
"torch.nn.init.zeros_",
"torch.Tensor",
"numpy.array",
"numpy.random.RandomState",
"numpy.random.seed",
"torch.no_grad",
"numpy.ones",
"torch.from_numpy"
]
] |
intelligent-control-lab/BIS | [
"7df10426373696093271e9afcae0c7e8fa7be0f4"
] | [
"src/speed_profile.py"
] | [
"from agents import *\nfrom models import *\nimport numpy as np\nimport matplotlib\nmatplotlib.use('tkagg')\nimport matplotlib.pyplot as plt\nimport sys\nimport pickle\n# end class world\n\ndef speed_profile(file_names):\n \"\"\"\n This function is to plot speed profiles for several evaluation results.\n Args:\n file_names (array of string): file names to be draw speed profile.\n \"\"\"\n\n # instantiate the class\n robots = []\n records = []\n dT = 0.05\n for i in range(1,len(file_names)):\n f = open(file_names[i], 'rb')\n record = pickle.load(f)\n records.append(record)\n exec('robots.append(' + record.model + '(' + record.algorithm + '(), dT))');\n \n print(len(records))\n\n fig = plt.figure() \n ax1=plt.subplot(2, 1, 1)\n ax2=plt.subplot(2, 1, 2)\n\n for i in range(len(records)):\n d = []\n dot_d = []\n human = HumanBall3D(MobileAgent(), dT);\n for t in range(records[0].tot):\n records[i].robot_moves[:, t]\n\n human.update(robots[0])\n human.move(records[0].human_moves[:, t])\n\n robots[i].update(human)\n robots[i].x = records[i].robot_moves[:, t]\n\n Mr = robots[i].m\n Mh = human.m\n\n dim = np.shape(Mr)[0] // 2\n p_idx = np.arange(dim)\n v_idx = p_idx + dim\n\n d.append(np.linalg.norm(Mr[p_idx] - Mh[p_idx]))\n sgn = (Mr[p_idx+dim] - Mh[p_idx+dim]).T * (Mr[p_idx] - Mh[p_idx])\n sgn = -1 if sgn < 0 else 1\n dot_d.append(sgn * np.linalg.norm(Mr[p_idx+dim] - Mh[p_idx+dim]))\n print(d[:10])\n print(dot_d[:10])\n\n \n ax1.plot(d, c='C'+str(i), label=records[i].algorithm, linestyle='-')\n \n ax2.plot(dot_d, c='C'+str(i), label=records[i].algorithm, linestyle='--')\n ax2.plot(range(-100,800,100), np.linspace(0,0,9),c='black', linestyle='-')\n \n\n \n ax1.legend()\n ax1.set_xlim(0,200)\n ax1.set_ylabel('m', fontsize = 20)\n # plt.show()\n # fig.legend()\n\n ax2.set_xlim(0,200)\n ax2.set_xlabel('Frame (0.05s)', fontsize = 20)\n ax2.set_ylabel('m/s', fontsize = 20)\n # tikz_save(model+'.tex')\n fig.savefig('speed_profile.pdf', bbox_inches='tight')\n\nif __name__ == '__main__':\n speed_profile(sys.argv)\n "
] | [
[
"matplotlib.use",
"numpy.linalg.norm",
"numpy.shape",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.linspace",
"matplotlib.pyplot.subplot"
]
] |
laurenmarietta/nircam_simulator | [
"08a92050a89faf944221c3f3cb7f490f460abb74"
] | [
"mirage/yaml/generate_observationlist.py"
] | [
"\"\"\"Generate observation list files based on default values and APT output files.\n\nAuthors\n-------\n - Lauren Chambers\n - Johannes Sahlmann\n\nUse\n---\n ::\n from mirage.yaml import generate_observationlist\n generate_observationlist.get_observation_dict(xml_file, yaml_file, catalogs,\n parameter_defaults=None, verbose=False):\n\nTODO\n----\n - Determine solution to set default parameters explicitly in configuration file or similar.\n - Clarify use and role of FilterConfig\n\n\"\"\"\nimport collections\nimport copy\nimport os\n\nfrom astropy.table import Table, vstack\nimport numpy as np\n\nfrom ..apt import read_apt_xml\n\n\ndef dictionary_slice(dictionary, index):\n \"\"\"Return a dictionary with only the i'th element from every list stored in a key.\n\n Parameters\n ----------\n dictionary\n index\n\n Returns\n -------\n\n \"\"\"\n new_dict = {}\n for key in dictionary.keys():\n new_dict[key] = [dictionary[key][index]]\n return new_dict\n\n\ndef expand_for_dithers(indict, verbose=True):\n \"\"\"Expand a given dictionary to create one entry for each dither.\n\n Supports parallel observations.\n\n Moved here and modified from apt_inputs.py\n\n Parameters\n ----------\n indict : dict\n dictionary of observations\n\n Returns\n -------\n expanded : dict\n Dictionary, expanded to include a separate entry for\n each dither\n \"\"\"\n expanded = {}\n for key in indict:\n expanded[key] = []\n\n # use astropy table operations to expand dithers while maintaining parallels in sync\n # implementation assumes that only one instrument is used in parallel\n table = Table(indict)\n table['row'] = np.arange(len(table))\n expanded_table = None #copy.deepcopy(table)\n\n # complication here is to handle cases with unsupported instruments (MIRI, NIRSpec) in parallel\n for i, row in enumerate(table['row']):\n number_of_dithers = np.int(table['number_of_dithers'][i])\n expand_prime_dithers_only = False\n expand_parallel_dithers = False\n\n # skip over parallel observations because they are already accounted for\n if table['ParallelInstrument'][i]:\n continue\n\n try:\n if (table['CoordinatedParallel'][i] == 'true') and (not table['ParallelInstrument'][i]) \\\n and (table['ParallelInstrument'][i + 1]) and (table['Instrument'][i] != table['Instrument'][i + 1]):\n expand_parallel_dithers = True\n else:\n expand_prime_dithers_only = True\n\n except IndexError: # last row in table is not a parallel\n expand_prime_dithers_only = True\n\n if (table['CoordinatedParallel'][i] == 'false'):\n expand_prime_dithers_only = True\n\n if expand_prime_dithers_only and expand_parallel_dithers:\n raise RuntimeError('Possible conflict found when expanding for dithers.')\n\n if expand_parallel_dithers:\n dither_table = table[i:i + 2]\n\n if (number_of_dithers > 1):\n #replicate parallel observation n times\n dither_table = vstack([dither_table]*number_of_dithers)\n\n if expanded_table is None:\n expanded_table = dither_table\n else:\n # if verbose:\n # print('Parallel: Adding {:>3d} rows to table with {:>3d} rows'.format(len(dither_table), len(expanded_table)))\n expanded_table = vstack((expanded_table, dither_table))\n\n elif expand_prime_dithers_only:\n # add row multiplied by number of dithers\n dither_table = vstack([table[i]]*number_of_dithers)\n\n if expanded_table is None:\n expanded_table = dither_table\n else:\n # print('Prime: Adding {:>3d} rows to table with {:>3d} rows'.format(len(dither_table),\n # len(expanded_table)))\n expanded_table = vstack((expanded_table, dither_table))\n\n # set number of dithers to 1 after expansion\n expanded_table['number_of_dithers'] = np.ones(len(expanded_table)).astype(np.int)\n\n # NIRCam cannot handle when PrimaryDithers=None\n for index, value in enumerate(expanded_table['PrimaryDithers']):\n if value == 'None':\n expanded_table['PrimaryDithers'][index] = expanded_table['number_of_dithers'][index]\n\n expanded = {}\n for key in expanded_table.colnames:\n expanded[key] = np.array(expanded_table[key]).tolist()\n\n if verbose:\n print('Number of entries before expanding dithers: {}'.format(len(table)))\n print('Number of entries after expanding dithers: {}'.format(len(expanded_table)))\n\n if verbose:\n for obs_id in np.unique(expanded_table['ObservationID']):\n print('Expanded table for Observation {} has {} entries'.format(obs_id, len(np.where(expanded_table['ObservationID']==obs_id)[0])))\n return expanded\n\n\ndef get_observation_dict(xml_file, yaml_file, catalogs, parameter_defaults=None, verbose=False):\n \"\"\"Write observation list file (required mirage input) on the basis of APT files.\n\n Parameters\n ----------\n xml_file : str\n path to APT .xml file\n yaml_file : str\n output_file\n catalogs : dict\n Dictionary of catalog files, one entry per instrument. For NIRCam the entry has to be a\n dictionary itself, e.g. catalogs['nircam']['lw'] = somefile\n If the user prvides a list of catalogs, that list has to have one entry per observation in\n the program, accounting for any instrument used.\n parameter_defaults : dict\n Dictionary of default parameter value, e.g. date, roll angle, ...\n\n Returns\n -------\n xml_dict : dict\n Expanded dictionary that holds exposure information\n\n TODO\n ----\n Read default values from configuration file\n\n \"\"\"\n # avoid side effects\n catalogs = copy.deepcopy(catalogs)\n\n # Read in filters from APT .xml file\n readxml_obj = read_apt_xml.ReadAPTXML()\n\n xml_dict = readxml_obj.read_xml(xml_file, verbose=verbose)\n\n # if verbose:\n #print('Summary of observation dictionary:')\n #for key in xml_dict.keys():\n # print('{:<25}: number of elements is {:>5}'.format(key, len(xml_dict[key])))\n\n # create an expanded dictionary that contains lists of parameters expanded for dithers\n xml_dict = expand_for_dithers(xml_dict, verbose=verbose)\n #print('Summary of observation dictionary after expanding for dithers:')\n #for key in xml_dict.keys():\n # print('{:<25}: number of elements is {:>5}'.format(key, len(xml_dict[key])))\n return_dict = None\n\n # array of unique instrument names\n used_instruments = np.unique(xml_dict['Instrument'])\n unique_observation_ids = np.unique(xml_dict['ObservationID']).tolist()\n\n # Only require the number of catalogs equal to the number of observations\n # for each instrument. Keep in mind that multiple instruments can be involved in\n # a given observation due to parallels. But in the case of serial observations\n # with different instruments, we don't want to over-count observations and\n # require more catalogs than are really necessary\n number_of_obs = {}\n for instrument_name in used_instruments:\n inst_observations = np.array(np.array(xml_dict['Instrument']) == instrument_name)\n unique_inst_obs = np.unique(np.array(xml_dict['ObservationID'])[inst_observations])\n number_of_obs[instrument_name.lower()] = len(unique_inst_obs)\n\n # Strip out catalogs for any instruments that aren't used\n input_catalogs = copy.deepcopy(catalogs)\n for key in input_catalogs:\n if key.upper() not in used_instruments:\n del catalogs[key]\n\n number_of_observations = len(unique_observation_ids)\n number_of_exposures = len(xml_dict['ObservationID'])\n\n # Check that the appropriate catalogs have been included\n for inst in used_instruments:\n if inst.lower() not in catalogs.keys():\n raise KeyError('Missing a catalog entry for {} in the catalog dictionary.'.format(inst))\n\n entry_numbers = []\n\n # ensure that catalog files are lists with number of elements matching the number of observations\n if not isinstance(catalogs, collections.Mapping):\n raise ValueError('Please provide a catalog dictionary.')\n\n for key in catalogs.keys():\n catalog_file_list = None\n catalog_files = None\n if key.lower() == 'nircam':\n # check that a dictionary is provided for nircam\n if not isinstance(catalogs[key], collections.Mapping):\n raise ValueError('Please provide a lw/sw dictionary for nircam.')\n else:\n for module_key in catalogs[key].keys():\n catalog_files = catalogs[key][module_key]\n if isinstance(catalog_files, str):\n catalog_file_list = [catalog_files] * number_of_obs[key]\n catalogs[key][module_key] = catalog_file_list\n if len(catalogs[key][module_key]) != number_of_obs[key]:\n raise RuntimeError(('Please specify one catalog per observation for {}. '\n 'Current catalog is {}').format(key.lower(),\n catalogs[key][module_key]))\n\n else:\n catalog_files = catalogs[key]\n if isinstance(catalog_files, str):\n catalog_file_list = [catalog_files] * number_of_obs[key]\n catalogs[key] = catalog_file_list\n if len(catalogs[key]) != number_of_obs[key]:\n raise RuntimeError(('Please specify one catalog per observation for {}. '\n 'Current catalog is {}').format(key.lower(), catalogs[key]))\n\n # if verbose:\n # print('Summary of dictionary extracted from {}'.format(xml_file))\n # for key in xml_dict.keys():\n # print('{:<25}: number of elements is {:>5}'.format(key, len(xml_dict[key])))\n\n\n # set default values. These are overwritten if defaults argument is present\n default_values = {}\n default_values['Date'] = '2019-07-04'\n default_values['PAV3'] = '111.'\n default_values['GalaxyCatalog'] = 'None'\n default_values['ExtendedCatalog'] = 'None'\n default_values['ExtendedScale'] = '1.0'\n default_values['ExtendedCenter'] = '1024,1024'\n default_values['MovingTargetList'] = 'None'\n default_values['MovingTargetSersic'] = 'None'\n default_values['MovingTargetExtended'] = 'None'\n default_values['MovingTargetConvolveExtended'] = 'True'\n default_values['MovingTargetToTrack'] = 'None'\n default_values['BackgroundRate_sw'] = 'low'\n default_values['BackgroundRate_lw'] = 'low'\n default_values['BackgroundRate'] = '0.5'\n\n default_parameter_name_list = [key for key, item in default_values.items() if key not in 'Date PAV3 BackgroundRate BackgroundRate_sw BackgroundRate_lw'.split()]\n\n # set default parameters if given as argument\n if parameter_defaults is not None:\n for key in parameter_defaults.keys():\n if key in default_values.keys():\n default_values[key] = parameter_defaults[key]\n\n # assemble string that will constitute the yaml content\n text_out = [\"# Observation list created by generate_observationlist.py\\n\\n\"]\n\n text = ['']\n entry_number = 0 # running number for every entry in the observation list\n\n # Create an instrument-specific counter to be used with input catalogs\n all_instruments = np.unique(xml_dict['Instrument'])\n counter = {}\n for inst in all_instruments:\n counter[inst] = 0\n\n observation_numbers = np.unique(xml_dict['ObservationID'])\n for observation_index, observation_number in enumerate(observation_numbers):\n first_index = xml_dict['ObservationID'].index(observation_number)\n text += [\n \"Observation{}:\\n\".format(observation_number),\n \" Name: '{}'\\n\".format(xml_dict['ObservationName'][first_index])\n ]\n observation_rows = np.where(np.array(xml_dict['ObservationID']) == observation_number)[0]\n instruments_in_observation = np.unique(np.array(xml_dict['Instrument'])[observation_rows])\n for index in observation_rows:\n number_of_dithers = np.int(xml_dict['number_of_dithers'][index])\n instrument = xml_dict['Instrument'][index]\n for dither_index in range(number_of_dithers):\n text += [\n \" EntryNumber{}:\\n\".format(entry_number),\n \" Instrument: {}\\n\".format(instrument),\n \" Date: {}\\n\".format(default_values['Date']),\n \" PAV3: {}\\n\".format(default_values['PAV3']),\n \" DitherIndex: {}\\n\".format(dither_index),\n ]\n if return_dict is None:\n return_dict = dictionary_slice(xml_dict, index)\n else:\n return_dict = read_apt_xml.append_dictionary(return_dict, dictionary_slice(xml_dict, index))\n if instrument.lower() in ['nircam', 'wfsc']:\n\n sw_filt = xml_dict['ShortFilter'][index]\n lw_filt = xml_dict['LongFilter'][index]\n\n text += [\n \" FilterConfig:\\n\",\n \" SW:\\n\",\n \" Filter: {}\\n\".format(sw_filt),\n \" PointSourceCatalog: {}\\n\".format(catalogs[instrument.lower()]['sw'][counter[instrument]]),\n \" BackgroundRate: {}\\n\".format(default_values['BackgroundRate_sw']),\n ]\n\n for key in default_parameter_name_list:\n text += [\" {}: {}\\n\".format(key, default_values[key])]\n\n text += [\n \" LW:\\n\",\n \" Filter: {}\\n\".format(lw_filt),\n \" PointSourceCatalog: {}\\n\".format(\n catalogs[instrument.lower()]['lw'][counter[instrument]]),\n \" BackgroundRate: {}\\n\".format(\n default_values['BackgroundRate_lw']),\n ]\n\n for key in default_parameter_name_list:\n text += [\" {}: {}\\n\".format(key, default_values[key])]\n\n elif instrument.lower() in ['niriss', 'fgs', 'nirspec', 'miri']:\n if (instrument.lower() == 'niriss') and (xml_dict['APTTemplate'][index] in ['NirissExternalCalibration']):\n filter_wheel_value = xml_dict['FilterWheel'][index]\n pupil_wheel_value = xml_dict['PupilWheel'][index]\n text += [\n \" FilterWheel: {}\\n\".format(filter_wheel_value),\n \" PupilWheel: {}\\n\".format(pupil_wheel_value),\n ]\n if 'CLEAR' in filter_wheel_value:\n filter_value = pupil_wheel_value\n elif ('CLEAR' in pupil_wheel_value) or ('NRM' in pupil_wheel_value):\n filter_value = filter_wheel_value\n else:\n filter_value = xml_dict['Filter'][index]\n\n text += [\n \" Filter: {}\\n\".format(filter_value),\n \" PointSourceCatalog: {}\\n\".format(catalogs[instrument.lower()][counter[instrument]]),\n \" BackgroundRate: {}\\n\".format(default_values['BackgroundRate']),\n ]\n\n for key in default_parameter_name_list:\n text += [\" {}: {}\\n\".format(key, default_values[key])]\n\n entry_numbers.append(entry_number)\n entry_number += 1\n\n # Update the catalog counters for the instruments used in this observation\n for inst_name in instruments_in_observation:\n counter[inst_name] += 1\n\n text_out += text\n\n return_dict['entry_number'] = entry_numbers\n\n # If the directory to hold the observation file does not yet exist, create it\n obs_dir = os.path.dirname(yaml_file)\n if obs_dir is not '' and os.path.isdir(obs_dir) is False:\n try:\n os.mkdir(obs_dir)\n except OSError:\n print(\"Creation of the directory {} failed\".format(obs_dir))\n else:\n print(\"Successfully created the directory {} to hold the observation list file.\".format(obs_dir))\n\n f = open(yaml_file, 'w')\n for line in text_out:\n f.write(line)\n f.close()\n print('\\nWrote {} observations and {} entries to {}'.format(len(observation_numbers), entry_number, yaml_file))\n\n return return_dict\n"
] | [
[
"numpy.where",
"numpy.array",
"numpy.int",
"numpy.unique"
]
] |
Karma-design/PyFlickrMining | [
"54401ebb91fc81b7867e0f0b85dca32bbc015aca"
] | [
"main.py"
] | [
"# -*- coding: utf-8 -*-\nprint(__doc__)\n\n# Code source: Karim Alaoui\n# License: BSD 3 clause\n\n#HOW TO USE :\n# 1.Set the input and output filenames\ninput_filename = 'lyon_cleaned_url_100' ##possible values : 'lyon_cleaned','suburb_cleaned','all_cleaned'\noutput_filename = 'clustering_v2'\n#Set the pictures ratio : 1/(pictures ratio) pictures are displayed\npictRatio = 15\n\n# 2.Comment the ununsed code (MeanShift or Kmeans, with plot)\n# 3.If you use KMeans, don't forget to set the number of clusters\n# NB : Clustering of large amount of data may take some time to perform using MeanShift\n\nimport pandas as pd\nimport numpy as np\nimport os\n\ninput_path = os.path.dirname(os.path.realpath(__file__))+'/%s.csv' \npath = input_path% input_filename\n\ndf = pd.read_csv(path)\nXa = df[['latitude', 'longitude_normalized', 'longitude', 'user','First*(id)','First*(url)']].values #longitude_normalized on 2pos when possible\nlatitudeIdx = 0\nlongitudeNrmIdx = 1\nlongitudeIdx = 2\nuserIdx = 3\nidIdx = 4\nurlIdx = 5\n\n###############################################################################\n# Compute clustering with MeanShift\nfrom sklearn.cluster import MeanShift\n\n# The following bandwidth can be automatically detected using\nbandwidth = 0.0022\n\nms = MeanShift(bandwidth=bandwidth,bin_seeding=True, cluster_all=False, min_bin_freq=15)\nX = Xa[:, 0:2]\n\nms.fit(X)\nlabels = ms.labels_\ncluster_centers = ms.cluster_centers_\n\nlabels_unique = np.unique(labels)\nn_clusters_ = len(labels_unique)-1\n\nprint(\"number of estimated clusters : %d\" % n_clusters_)\n\n##############################\n# Plot result\nimport pylab as pl\nfrom itertools import cycle\n\npl.figure(1)\npl.clf()\n\ncolors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')\nfor k, col in zip(range(n_clusters_), colors):\n my_members = labels == k\n cluster_center = cluster_centers[k]\n pl.plot(X[my_members, 0], X[my_members, 1], col + '.')\n pl.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,\n markeredgecolor='k', markersize=14)\npl.title('Estimated number of clusters: %d' % n_clusters_)\npl.show()\n\n####CLUSTERS JSON\njson=\"var msg = '[\"\ncolor = ['red','blue','purple','yellow','green','lightblue','orange','pink']\nfor k in range(n_clusters_):\n if k != 0:\n json += \",\"\n json += \"{\"\n ##Longitude\n json +=\"\\\"longitude\\\":\"\n my_members = labels == k\n cluster_center = cluster_centers[k]\n json += str(round(cluster_center[1]/1.43,4))\n #json += cluster_center[1].astype('|S6')\n json += \", \"\n ##Latitude\n json +=\"\\\"latitude\\\":\"\n my_members = labels == k\n cluster_center = cluster_centers[k]\n json += cluster_center[0].astype('|S6')\n json += \", \"\n ##Color\n json +=\"\\\"color\\\":\\\"\"\n json += color[k%8]\n json += \"\\\"\"\n ##\n json += \"}\"\n \njson += \"]'; \\n\\n \"\n\n####\n\n###PICTURES JSON\n\njson+=\"var donnees = '[\"\nfor k in range(n_clusters_):\n my_members = labels == k\n for l in range(X[my_members,0].size/pictRatio):\n if l+k != 0:\n json += \",\"\n json += \"{\"\n ##Longitude\n json +=\"\\\"longitude\\\":\"\n array = Xa[my_members, longitudeIdx]\n #json += str(cluster_center[1]/1.43)\n json += str(array[l])#.astype('|S6')\n json += \", \"\n ##Latitude\n json +=\"\\\"latitude\\\":\"\n array = Xa[my_members, latitudeIdx]\n json += str(array[l])#.astype('|S6')\n json += \", \"\n ##Color\n json +=\"\\\"color\\\":\\\"\"\n json += color[k%8]\n json += \"\\\"\"\n json += \", \"\n ##Id\n json +=\"\\\"id\\\":\"\n array = Xa[my_members, idIdx]\n json += str(array[l])#.astype('|S6')\n json += \", \"\n ##url\n json +=\"\\\"url\\\":\\\"\"\n array = Xa[my_members, urlIdx]\n json += str(array[l])#.astype('|S6')\n json += \"\\\", \"\n ##User\n json +=\"\\\"user\\\":\\\"\"\n array = Xa[my_members, userIdx]\n json += array[l]\n json += \"\\\"}\"\n\njson += \"]';\"\n\n#Writing to text file\nwith open(os.path.dirname(os.path.realpath(__file__))+'/res/begin.html', 'r') as text_file:\n begin=text_file.read()\nwith open(os.path.dirname(os.path.realpath(__file__))+'/res/end.html', 'r') as text_file:\n end=text_file.read()\n\nwith open(os.path.dirname(os.path.realpath(__file__))+'/'+output_filename+'.html', \"w\") as text_file:\n #Static file start\n text_file.write(\"{0}\".format(begin))\n #Writing generated content\n text_file.write(\"{0}\".format(json))\n #Static file ending\n text_file.write(\"{0}\".format(end))\n \n###END OTHER JSON\n###############################################################################\n\n'''\n###############################################################################\n#Compute clustering with Kmeans\nkmeans_n_clusters = 50\n\nfrom sklearn.cluster import KMeans\nkmeans = KMeans(n_clusters=kmeans_n_clusters, n_init=10)\nkmeans.fit(X)\n\n##############################\n# Plot Kmeans result\nlabels = kmeans.labels_\ncentroids = kmeans.cluster_centers_\n\nfrom matplotlib import pyplot\nimport numpy as np\n\nfor i in range(kmeans_n_clusters):\n # select only data observations with cluster label == i\n ds = X[np.where(labels==i)]\n # plot the data observations\n pyplot.plot(ds[:,0],ds[:,1],'o')\n # plot the centroids\n lines = pyplot.plot(centroids[i,0],centroids[i,1],'kx')\n # make the centroid x's bigger\n pyplot.setp(lines,ms=15.0)\n pyplot.setp(lines,mew=2.0)\n pyplot.title('KMeans with %d clusters' % kmeans_n_clusters)\npyplot.show()\n###############################################################################\n'''"
] | [
[
"pandas.read_csv",
"sklearn.cluster.MeanShift",
"numpy.unique"
]
] |
mikadam/Maths-tools | [
"8c5dc44ec8c5212e98b44f5cf3f1e9762dbf9cf8"
] | [
"ODE_solver.py"
] | [
"'''\nFile name: ODE_Solvers.py\nAuthor: Michal Adamkiewicz\nDate: 2014\n\nContains UI for ODE solver\n'''\n\nimport math\n\n\nimport matplotlib\nimport tkinter as Tk\nimport tkinter.messagebox as messagebox\nimport tkinter.ttk as ttk\n\nfrom tkinter.filedialog import asksaveasfile\n\nfrom multiprocessing import Process, Pipe, Array, Value\n\nfrom parsers_for_solver import initial_value\nfrom parsers_for_solver import ParametricODETree\n\nmatplotlib.use('TkAgg')\n\nfrom numpy import array, zeros\nimport numpy\n\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom matplotlib.figure import Figure\n\nfrom ODE_Tk import d1_Euler, non_dif_graph, d2_Euler, d2_Euler_shared\n\n\ndef d1_Euler_wrapper(clear=True):\n \n global value_array\n global main_pipe\n global p\n # global triple\n # triple=False\n\n t_init=float(p_start.get())\n t_max=float(p_end.get())\n\n step_size=float(p_step.get())\n skip_size=int(p_skip.get())\n\n func_x=p_func_x.get()\n init_x=p_init_x.get()\n func_y=p_func_y.get()\n init_y=p_init_y.get()\n\n cancel.grid()\n prog['maximum']=int((t_max-t_init)//(step_size*skip_size))+2\n prog.grid()\n # prog.start(interval=None)\n prog.update()\n\n\n\n tree=ParametricODETree(func_x)\n function_x=tree.travel()\n\n tree=ParametricODETree(func_y)\n function_y=tree.travel()\n\n if clear:\n a.clear()\n\n # main_pipe,child_pipe = Pipe()\n\n if(init_x.strip()==''):\n\n # p=Process(target=non_dif_graph,args=(child_pipe,function,t_init,t_max,step_size,skip_size))\n init_x=None\n else:\n init_x=initial_value(init_x)\n\n \n if(init_y.strip()==''):\n init_y=None\n else:\n\n init_y=initial_value(init_y)\n \n\n global a_t,a_x,a_y, count\n a_t=Array('d', zeros(int((t_max-t_init)//(step_size*skip_size))+2))\n a_x=Array('d', zeros(int((t_max-t_init)//(step_size*skip_size))+2))\n a_y=Array('d', zeros(int((t_max-t_init)//(step_size*skip_size))+2))\n\n count=Value('i', 0)\n\n p=Process(target=d2_Euler_shared,args=(count,a_t,a_x,a_y,function_x,function_y,init_x,init_y,t_init,t_max,step_size,skip_size))\n\n\n\n p.daemon=True\n p.start()\n\n root.after(500,check_proc)\n\ndef check_proc():\n\n # if not main_pipe.poll():\n if p.is_alive():\n # print(count.value)\n prog['value']=count.value\n root.after(500,check_proc)\n\n else:\n\n global value_array\n value_array=[]\n value_array.append(numpy.frombuffer(a_t.get_obj()))\n value_array.append(numpy.frombuffer(a_x.get_obj()))\n value_array.append(numpy.frombuffer(a_y.get_obj()))\n\n # print()\n # for line in array([value_array[0],value_array[1],value_array[2]]).T:\n # print(line)\n # print()\n\n if(plot_type.get()==0):\n a.plot(value_array[1],value_array[2])\n elif(plot_type.get()==1):\n a.plot(value_array[0],value_array[1])\n a.plot(value_array[0],value_array[2])\n\n canvas.show()\n prog.stop()\n prog.grid_remove()\n\n #MOD\n cancel.grid_remove()\n\ndef save():\n try:\n value_array\n except NameError:\n messagebox.showerror(\"Can't save\",'The function values have not been calculated yet!')\n return\n try:\n with asksaveasfile(defaultextension='.csv',initialfile='data_file') as f:\n for point in array([value_array[0],value_array[1],value_array[2]]).T:\n f.write(str(point[0])+','+str(point[1])+','+str(point[2])+'\\n')\n except AttributeError:\n pass\n\ndef end_calc():\n global p\n p.terminate()\n\n prog.stop()\n prog.grid_remove()\n\n\nroot = Tk.Tk()\nroot.title(\"ODE Calc\")\n\nf = Figure(figsize=(6,5), dpi=100)\na = f.add_subplot(1,1,1)\n\ncanvas = FigureCanvasTkAgg(f, master=root)\ncanvas.show()\n\n\nTk.Label(root,text=' ').grid(row=0,column=0,columnspan=6)\n\nTk.Label(root,text='Start:').grid(row=1,column=0,columnspan=1)\np_start=Tk.Entry(root,width=3)\np_start.grid(row=1,column=1,columnspan=1,sticky='ew')\n\nTk.Label(root,text='Step:').grid(row=1,column=2,columnspan=1)\np_step=Tk.Entry(root,width=6)\np_step.grid(row=1,column=3,columnspan=1,sticky='ew')\n\nTk.Label(root,text='Functions:').grid(row=1,column=4,columnspan=1)\np_func_x=Tk.Entry(root,width=25)\np_func_x.grid(row=1,column=5,columnspan=1,sticky='ew')\np_func_y=Tk.Entry(root,width=25)\np_func_y.grid(row=1,column=6,columnspan=1,sticky='ew')\n\nTk.Label(root,text='End:').grid(row=2,column=0,columnspan=1)\np_end=Tk.Entry(root,width=3)\np_end.grid(row=2,column=1,columnspan=1,sticky='ew')\n\nTk.Label(root,text='Skip').grid(row=2,column=2,columnspan=1)\np_skip=Tk.Entry(root,width=4)\np_skip.grid(row=2,column=3,columnspan=1,sticky='ew')\n\nTk.Label(root,text='Initial:').grid(row=2,column=4,columnspan=1)\np_init_x=Tk.Entry(root,width=30)\np_init_x.grid(row=2,column=5,columnspan=1,sticky='ew')\np_init_y=Tk.Entry(root,width=30)\np_init_y.grid(row=2,column=6,columnspan=1,sticky='ew')\n\n\ncalc=Tk.Button(root,text='Replace',command=d1_Euler_wrapper)\ncalc.grid(row=3,column=0,columnspan=2,sticky='ew')\n\ncalc=Tk.Button(root,text='Add',command=lambda:d1_Euler_wrapper(False))\ncalc.grid(row=3,column=2,columnspan=2,sticky='ew')\n\nsave=Tk.Button(root,text='Save Newest',command=save)\nsave.grid(row=3,column=4,columnspan=1,sticky='ew')\n\nprog=ttk.Progressbar(root,mode='determinate')\nprog.grid(row=3,column=5,columnspan=1,sticky='ew')\nprog.grid_remove()\n\n#MOD\ncancel=Tk.Button(root,text='Cancel',command=end_calc)\ncancel.grid(row=3,column=6,columnspan=1,sticky='ew')\ncancel.grid_remove()\n#end mod\n\nplot_type = Tk.IntVar()\n\nTk.Radiobutton(root, text=\"Parametric\", variable=plot_type, value=0).grid(row=1,column=7,columnspan=1,sticky='ew')\nTk.Radiobutton(root, text=\"Two Graph\", variable=plot_type, value=1).grid(row=2,column=7,columnspan=1,sticky='ew')\n\ncanvas.get_tk_widget().grid(row=4,column=0,columnspan=8,sticky='ewns')\n\np_func_x.insert(0,'t')\np_init_x.insert(0,'')\n\np_func_y.insert(0,'-50*y[0]*(1-2/(sqrt(9+y[0]^2)))')\np_init_y.insert(0,'2,0')\np_step.insert(0,'0.001')\np_skip.insert(0,'100')\np_start.insert(0,'0')\np_end.insert(0,'10')\n\nroot.grid_columnconfigure(1,weight=1)\nroot.grid_columnconfigure(3,weight=1)\nroot.grid_columnconfigure(5,weight=2)\nroot.grid_rowconfigure(4,weight=1)\n\n\n\nwhile 1:\n try:\n root.mainloop()\n exit()\n except UnicodeDecodeError:\n pass\n\n #Old unsafe parsers\n\n # function_str=''.join(filter(lambda x: x in \"1234567890xy[]+-*/().\", func))\n\n # function_str=string_sanitize(func)\n # def function(x, y):\n # d = eval(function_str)\n # return d\n\n\n"
] | [
[
"matplotlib.use",
"numpy.array",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg",
"matplotlib.figure.Figure"
]
] |
bywang2018/security-dataset | [
"4f153944f4fd218f42f9803186a16832a328ed4d"
] | [
"mmdet/models/necks/pafpn.py"
] | [
"import torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\n\nfrom mmdet.core import auto_fp16\nfrom ..builder import NECKS\nfrom .fpn import FPN\n\n\[email protected]_module()\nclass PAFPN(FPN):\n \"\"\"Path Aggregation Network for Instance Segmentation.\n This is an implementation of the PAFPN in Path Aggregation Network\n (https://arxiv.org/abs/1803.01534).\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale)\n num_outs (int): Number of output scales.\n start_level (int): Index of the start input backbone level used to\n build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive) to\n build the feature pyramid. Default: -1, which means the last level.\n add_extra_convs (bool): Whether to add conv layers on top of the\n original feature maps. Default: False.\n extra_convs_on_inputs (bool): Whether to apply extra conv on\n the original feature from the backbone. Default: False.\n relu_before_extra_convs (bool): Whether to apply relu before the extra\n conv. Default: False.\n no_norm_on_lateral (bool): Whether to apply norm on lateral.\n Default: False.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n act_cfg (str): Config dict for activation layer in ConvModule.\n Default: None.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n num_outs,\n start_level=0,\n end_level=-1,\n add_extra_convs=False,\n extra_convs_on_inputs=True,\n relu_before_extra_convs=False,\n no_norm_on_lateral=False,\n conv_cfg=None,\n norm_cfg=None,\n act_cfg=None):\n super(PAFPN,\n self).__init__(in_channels, out_channels, num_outs, start_level,\n end_level, add_extra_convs, extra_convs_on_inputs,\n relu_before_extra_convs, no_norm_on_lateral,\n conv_cfg, norm_cfg, act_cfg)\n # add extra bottom up pathway\n self.downsample_convs = nn.ModuleList()\n self.pafpn_convs = nn.ModuleList()\n for i in range(self.start_level + 1, self.backbone_end_level):\n d_conv = ConvModule(\n out_channels,\n out_channels,\n 3,\n stride=2,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg,\n inplace=False)\n pafpn_conv = ConvModule(\n out_channels,\n out_channels,\n 3,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg,\n inplace=False)\n self.downsample_convs.append(d_conv)\n self.pafpn_convs.append(pafpn_conv)\n\n @auto_fp16()\n def forward(self, inputs):\n assert len(inputs) == len(self.in_channels)\n\n # build laterals\n laterals = [\n lateral_conv(inputs[i + self.start_level])\n for i, lateral_conv in enumerate(self.lateral_convs)\n ]\n\n # build top-down path\n used_backbone_levels = len(laterals)\n for i in range(used_backbone_levels - 1, 0, -1):\n prev_shape = laterals[i - 1].shape[2:]\n laterals[i - 1] += F.interpolate(\n laterals[i], size=prev_shape, mode='nearest')\n\n # build outputs\n # part 1: from original levels\n inter_outs = [\n self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)\n ]\n\n # part 2: add bottom-up path\n for i in range(0, used_backbone_levels - 1):\n inter_outs[i + 1] += self.downsample_convs[i](inter_outs[i])\n\n outs = []\n outs.append(inter_outs[0])\n outs.extend([\n self.pafpn_convs[i - 1](inter_outs[i])\n for i in range(1, used_backbone_levels)\n ])\n\n # part 3: add extra levels\n if self.num_outs > len(outs):\n # use max pool to get more levels on top of outputs\n # (e.g., Faster R-CNN, Mask R-CNN)\n if not self.add_extra_convs:\n for i in range(self.num_outs - used_backbone_levels):\n outs.append(F.max_pool2d(outs[-1], 1, stride=2))\n # add conv layers on top of original feature maps (RetinaNet)\n else:\n if self.extra_convs_on_inputs:\n orig = inputs[self.backbone_end_level - 1]\n outs.append(self.fpn_convs[used_backbone_levels](orig))\n else:\n outs.append(self.fpn_convs[used_backbone_levels](outs[-1]))\n for i in range(used_backbone_levels + 1, self.num_outs):\n if self.relu_before_extra_convs:\n outs.append(self.fpn_convs[i](F.relu(outs[-1])))\n else:\n outs.append(self.fpn_convs[i](outs[-1]))\n return tuple(outs)"
] | [
[
"torch.nn.functional.relu",
"torch.nn.functional.interpolate",
"torch.nn.functional.max_pool2d",
"torch.nn.ModuleList"
]
] |
mats-granath/EWD-QEC | [
"6ce9ac0940c18e2a63ec244cdd7b80e40b0c7073"
] | [
"src/mcmc.py"
] | [
"import numpy as np\nimport random as rand\nimport copy\nimport pandas as pd\n\nfrom numba import njit\n\nfrom src.xzzx_model import xzzx_code, _apply_random_stabilizer as apply_stabilizer_fast_xzzx\nfrom src.rotated_surface_model import RotSurCode, _apply_random_stabilizer as apply_stabilizer_fast_rotated\nfrom src.planar_model import Planar_code, _apply_random_stabilizer as apply_stabilizer_fast_planar, _count_errors_xyz\nfrom src.toric_model import Toric_code, _apply_random_stabilizer as apply_stabilizer_fast_toric\n\n\nclass Chain:\n def __init__(self, p, code):\n self.code = code\n self.p = p\n self.p_logical = 0\n self.flag = 0\n self.factor = ((self.p / 3.0) / (1.0 - self.p)) # rename me \n\n # runs iters number of steps of the metroplois-hastings algorithm\n def update_chain(self, iters):\n if self.p_logical != 0:\n for _ in range(iters):\n # apply logical or stabilizer with p_logical\n if rand.random() < self.p_logical:\n new_matrix, (dx, dy, dz) = self.code.apply_random_logical()\n else:\n new_matrix, (dx, dy, dz) = self.code.apply_random_stabilizer()\n\n qubit_errors_change = dx + dy + dz\n\n # Avoid calculating r if possible. If self.p is 0.75 r = 1 and we accept all changes\n # If the new qubit matrix has equal or fewer errors, r >= 1 and we also accept all changes\n if self.p >= 0.75 or qubit_errors_change <= 0:\n self.code.qubit_matrix = new_matrix\n continue\n # acceptence ratio\n if rand.random() < self.factor ** qubit_errors_change:\n self.code.qubit_matrix = new_matrix\n\n else:\n for _ in range(iters):\n new_matrix, (dx, dy, dz) = self.code.apply_random_stabilizer()\n\n qubit_errors_change = dx + dy + dz\n\n # acceptence ratio\n if rand.random() < self.factor ** qubit_errors_change:\n self.code.qubit_matrix = new_matrix\n\n def update_chain_fast(self, iters):\n if isinstance(self.code, xzzx_code):\n self.code.qubit_matrix = _update_chain_fast_xzzx(self.code.qubit_matrix, self.factor, iters)\n elif isinstance(self.code, RotSurCode):\n self.code.qubit_matrix = _update_chain_fast_rotated(self.code.qubit_matrix, self.factor, iters)\n elif isinstance(self.code, Planar_code):\n self.code.qubit_matrix = _update_chain_fast_planar(self.code.qubit_matrix, self.factor, iters)\n elif isinstance(self.code, Toric_code):\n self.code.qubit_matrix = _update_chain_fast_toric(self.code.qubit_matrix, self.factor, iters)\n else:\n raise ValueError(\"Fast chain updates not available for this code\")\n\n\nclass Ladder:\n def __init__(self, p_bottom, init_code, Nc, p_logical=0):\n # sampling probability of bottom chain\n self.p_bottom = p_bottom\n\n # seed code\n self.init_code = init_code\n\n # number of chains\n self.Nc = Nc\n\n # logical sampling rate in top chain\n self.p_logical = p_logical\n p_top = 0.75\n\n # temporary list of sampling probabilities\n p_ladder = np.linspace(p_bottom, p_top, Nc)\n self.p_ladder = p_ladder\n\n # list of relative probabilities\n self.p_diff = (p_ladder[:-1] * (1 - p_ladder[1:])) / (p_ladder[1:] * (1 - p_ladder[:-1]))\n\n # list of Chains of increasing p\n self.chains = [Chain(p, copy.deepcopy(init_code)) for p in p_ladder]\n\n # special properties of top chain\n self.chains[-1].flag = 1\n self.chains[-1].p_logical = p_logical\n\n # count of chains that have \"fallen all the way down\"\n self.tops0 = 0\n\n def update_ladder(self, iters):\n for chain in self.chains:\n chain.update_chain(iters)\n\n # returns true if flip should be performed\n def r_flip(self, ind_lo):\n # chain lengths\n ne_lo = self.chains[ind_lo].code.count_errors()\n ne_hi = self.chains[ind_lo + 1].code.count_errors()\n # relative probabilities between chains (except exponent)\n rel_p = self.p_diff[ind_lo]\n return _r_flip(ne_lo, ne_hi, rel_p)\n\n def step(self, iters):\n self.update_ladder(iters)\n for i in reversed(range(self.Nc - 1)):\n if self.r_flip(i):\n self.chains[i].code, self.chains[i + 1].code = self.chains[i + 1].code, self.chains[i].code\n self.chains[i].flag, self.chains[i + 1].flag = self.chains[i + 1].flag, self.chains[i].flag\n self.chains[-1].flag = 1\n if self.chains[0].flag == 1:\n self.tops0 += 1\n self.chains[0].flag = 0\n\n\nclass Chain_xyz:\n def __init__(self, p_xyz, code):\n self.code = code\n self.p_xyz = p_xyz\n self.factors = self.p_xyz / (1.0 - self.p_xyz.sum()) # rename me\n self.qubit_errors = code.count_errors_xyz()\n\n def update_chain_fast(self, iters):\n self.code.qubit_matrix, self.qubit_errors = _update_chain_fast_xyz(self.code.qubit_matrix, self.qubit_errors, self.factors, iters)\n\n\n# This is the object we crate to read a file during training\nclass MCMCDataReader:\n def __init__(self, file_path, size):\n # file_path needs to be dataframe in pickle format\n self.__file_path = file_path\n # size is the size of the toric code\n self.__size = size\n try:\n self.__df = pd.read_pickle(file_path)\n self.__capacity = self.__df.index[-1][0] + 1 # The number of data samples in the dataset\n except: # TODO fix exception\n print('No input file for MCMCDataReader')\n self.__current_index = 0\n\n def full(self):\n return self.__df.to_numpy().ravel()\n\n def has_next(self):\n return self.__current_index < self.__capacity\n\n def current_index(self):\n return self.__current_index\n\n def get_capacity(self):\n return self.__capacity\n\n\n@njit('(int64, int64, float64)')\ndef _r_flip(ne_lo, ne_hi, rel_p):\n if ne_hi < ne_lo:\n return True\n else:\n return rand.random() < rel_p ** (ne_hi - ne_lo)\n\n\n@njit(cache=True)\ndef _update_chain_fast_xzzx(qubit_matrix, factor, iters):\n for _ in range(iters):\n new_matrix, (dx, dy, dz) = apply_stabilizer_fast_xzzx(qubit_matrix)\n\n # acceptence ratio\n if rand.random() < factor ** (dx + dy + dz):\n qubit_matrix = new_matrix\n return qubit_matrix\n\n@njit(cache=True)\ndef _update_chain_fast_rotated(qubit_matrix, factor, iters):\n for _ in range(iters):\n new_matrix, (dx, dy, dz) = apply_stabilizer_fast_rotated(qubit_matrix)\n\n # acceptence ratio\n if rand.random() < factor ** (dx + dy + dz):\n qubit_matrix = new_matrix\n return qubit_matrix\n\n@njit(cache=True)\ndef _update_chain_fast_planar(qubit_matrix, factor, iters):\n for _ in range(iters):\n new_matrix, (dx, dy, dz) = apply_stabilizer_fast_planar(qubit_matrix)\n\n # acceptence ratio\n if rand.random() < factor ** (dx + dy + dz):\n qubit_matrix = new_matrix\n return qubit_matrix\n\n@njit(cache=True)\ndef _update_chain_fast_toric(qubit_matrix, factor, iters):\n for _ in range(iters):\n new_matrix, (dx, dy, dz) = apply_stabilizer_fast_toric(qubit_matrix)\n\n # acceptence ratio\n if rand.random() < factor ** (dx + dy + dz):\n qubit_matrix = new_matrix\n return qubit_matrix\n\n\n@njit(cache=True)\ndef _update_chain_fast_xyz(qubit_matrix, qubit_errors, factors, iters):\n for _ in range(iters):\n new_matrix, _ = _apply_random_stabilizer(qubit_matrix)\n qubit_errors_new = _count_errors_xyz(new_matrix)\n qubit_errors_change = qubit_errors_new - qubit_errors\n\n # acceptence ratio\n if rand.random() < (factors ** qubit_errors_change).prod():\n qubit_matrix = new_matrix\n qubit_errors = qubit_errors_new\n return qubit_matrix, qubit_errors\n"
] | [
[
"pandas.read_pickle",
"numpy.linspace"
]
] |
Finqen/SAT_Solving_Problem_3 | [
"20362a5fc807b6283d7f07f3e90d1eed1998090c"
] | [
"plot-times.py"
] | [
"import pandas as pd\r\nimport seaborn as sns\r\nfrom matplotlib import pyplot as plt\r\n\r\ndf_times = pd.read_csv('cmake-build-debug/times.csv', index_col=0)\r\n\r\na = df_times.to_numpy()\r\n\r\na.sort(axis=1)\r\n\r\nfor y in range(len(a)):\r\n for x in range(len(a[0])):\r\n if x > 0:\r\n a[y, x] = a[y, x] + a[y, x - 1]\r\n\r\ndf_times.replace(a)\r\n\r\ndf_times = df_times.transpose() / 60000\r\n\r\nsns_plot_times = sns.lineplot(data=df_times, markers=True, dashes=False)\r\nsns_plot_times.set(xlabel='Problems solved', ylabel='Cumulative computation time [min]', xticks=[0,25,50,75,100,125,150])\r\n\r\nfiugre_times = sns_plot_times.get_figure()\r\n\r\nfiugre_times.savefig(\"cactus-times.png\")\r\n"
] | [
[
"pandas.read_csv"
]
] |
fradetjulien/loan-handler | [
"18f6b0c784714d9fd568fa010a0452d5f15933ed"
] | [
"module/index.py"
] | [
"'''\nLoan Handler\n'''\nimport tkinter as tk\nimport tkinter.messagebox as message\nfrom tkinter import filedialog as fd\nimport pandas as pd\n\nFORM_FIELDS = ('First Name', 'Last Name', 'Social Security Number', 'Credit Score',\n 'Income', 'Requested Loan Amount', 'Home Value', 'Interest Rate')\n\nclass Application:\n '''\n Create the GUI application main window\n '''\n def __init__(self, root):\n self.root = root\n self.root.title('Loan Handler')\n self.entries = {}\n self.create_widgets()\n\n def create_widgets(self):\n '''\n Create all fields, buttons and radios buttons inside the window\n '''\n self.set_form()\n self.create_radio_buttons()\n self.create_buttons()\n\n def set_form(self):\n '''\n Construct the form\n '''\n for field in FORM_FIELDS:\n row = tk.Frame(self.root, bg='white')\n lab = tk.Label(row, width=22, text=field+\" : \", anchor='w')\n ent = tk.Entry(row)\n row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)\n lab.pack(side=tk.LEFT)\n ent.pack(side=tk.RIGHT, expand=tk.YES, fill=tk.X)\n self.entries[field] = ent\n\n def create_buttons(self):\n '''\n Create buttons and setup listener\n '''\n submit = tk.Button(self.root, highlightbackground='black', text='Submit',\n command=(lambda e=self.entries: self.submit_loan(e)))\n submit.pack(side=tk.LEFT, padx=5, pady=5)\n submit = tk.Button(self.root, highlightbackground='black', text='Import a CSV file',\n command=(lambda e=self.entries: self.import_file(e)))\n submit.pack(side=tk.LEFT, padx=5, pady=5)\n submit = tk.Button(self.root, highlightbackground='black', text='Clear fields',\n command=(lambda e=self.entries: self.clear_fields(self.entries)))\n submit.pack(side=tk.LEFT, padx=5, pady=5)\n submit = tk.Button(self.root, highlightbackground='black', text='Quit',\n command=(lambda e=self.entries: self.root.quit()))\n submit.pack(side=tk.LEFT, padx=5, pady=5)\n\n def create_radio_buttons(self):\n '''\n Radio buttons to decide the loan duration\n '''\n var = tk.IntVar()\n row = tk.Frame(self.root, bg='white')\n lab = tk.Label(row, width=22, text='Loan Duration : ', anchor='w')\n radiobutton_1 = tk.Radiobutton(self.root, variable=var, text=\"5 years\",\n value=5, background='black')\n radiobutton_2 = tk.Radiobutton(self.root, variable=var, text=\"15 years\",\n value=15, background='black')\n radiobutton_3 = tk.Radiobutton(self.root, variable=var, text=\"30 years\",\n value=30, background='black')\n row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)\n lab.pack(side=tk.LEFT)\n radiobutton_1.pack(anchor='w')\n radiobutton_2.pack(anchor='w')\n radiobutton_3.pack(anchor='w')\n self.entries['Loan Duration'] = var\n\n @staticmethod\n def clear_fields(entries):\n '''\n Clear every fields present in the window\n '''\n for field in FORM_FIELDS:\n entries[field].delete(0, 'end')\n\n @staticmethod\n def import_file(entries):\n '''\n Import a CSV file and filled in entries from values\n '''\n file_path = fd.askopenfilename(initialdir=\"/\", title=\"Select a file\",\n filetypes=((\"CSV Files\", \"*.csv\"),))\n df = pd.read_csv(file_path)\n i = 0\n for field in FORM_FIELDS:\n entries[field].delete(0, 'end')\n entries[field].insert(0, df['Value'][i])\n i = i + 1\n\n @staticmethod\n def submit_loan(entries):\n '''\n Manage each step to determine if the loan is accepted\n '''\n if not handle_fields(entries) or not correct_credit_score(entries):\n return False\n criterias = compute_criterias(entries)\n if not criterias:\n message.showinfo(\"Error\", \"Sorry, your request cannot be satisfied.\")\n return False\n if process_request(criterias, entries):\n message.showinfo(\"Success\", \"Congratulations, your loan has been accepted.\")\n display_informations(criterias, entries)\n message.showinfo(\"Average monthly payment\", \"The average monthly payment for\"\n \" the loan is equal to : ${}\"\\\n .format(compute_monthly_paiement(entries)))\n return True\n return False\n\ndef handle_fields(entries):\n '''\n Check that all fields have been filled in\n '''\n for field in FORM_FIELDS:\n if not entries[field].get():\n message.showinfo(\"Error\", \"Please, all fields are required.\")\n return False\n return True\n\ndef correct_credit_score(entries):\n '''\n Check if the Credit Score is a number between 0 and 850\n '''\n try:\n if int(entries['Credit Score'].get()) < 0 or int(entries[\"Credit Score\"].get()) > 850:\n message.showinfo(\"Error\", \"Your Credit Score is invalid.\")\n return False\n except ValueError as error:\n message.showinfo(\"Error\", \"Please, insert a number for the Credit Score.\")\n print(\"Error : {}\".format(error))\n return False\n return True\n\ndef compute_criterias(entries):\n '''\n With the recovered data, determine the values of the different criterias\n '''\n criterias = init_criterias(entries)\n try:\n criterias['annual_interest_payment'] = int(int(entries['Requested Loan Amount'].get())\\\n * float(entries['Interest Rate'].get()) / 100)\n criterias['interest_payment_to_income']['value'] = round(float(criterias['annual_interest_payment']\\\n / float(entries['Income'].get())), 3)\n criterias['loan_to_home']['value'] = round(float(int(entries['Requested Loan Amount'].get()))\\\n / int(entries['Home Value'].get()), 3)\n except ValueError as error:\n print('Error : {}'.format(error))\n return False\n return set_state(criterias)\n\ndef init_criterias(entries):\n '''\n Initialize a new dictionnary of criterias\n '''\n criterias = {\n \"annual_interest_payment\": None,\n \"interest_payment_to_income\": {\n \"value\": None,\n \"state\": \"Fail\"\n },\n \"loan_to_home\": {\n \"value\": None,\n \"state\": \"Fail\"\n },\n \"credit_score\": {\n \"value\": int(entries['Credit Score'].get()),\n \"state\": \"Fail\"\n },\n \"fail_number\": 3\n }\n return criterias\n\ndef set_state(criterias):\n '''\n Depending on values, determine if tests are passed or failed\n '''\n if criterias['interest_payment_to_income']['value'] <= 0.25:\n criterias['interest_payment_to_income']['state'] = \"Pass\"\n criterias['fail_number'] = criterias['fail_number'] - 1\n if criterias['loan_to_home']['value'] <= 0.8:\n criterias['loan_to_home']['state'] = \"Pass\"\n criterias['fail_number'] = criterias['fail_number'] - 1\n if criterias['credit_score']['value'] > 650:\n criterias['credit_score']['state'] = \"Pass\"\n criterias['fail_number'] = criterias['fail_number'] - 1\n return criterias\n\ndef process_request(criterias, entries):\n '''\n Verify that all criterias are respected to give the loan\n '''\n if criterias['fail_number'] > 0:\n message.showinfo(\"Error\", \"Sorry, your request cannot be satisfied.\")\n display_informations(criterias, entries)\n return False\n return True\n\ndef display_informations(criterias, entries):\n '''\n Display a summary of all informations\n '''\n message.showinfo(\"Informations\", \"First Name : {}\\nLast Name : {}\\n\"\n \"Income : ${}\\nSocial Security Number : {}\\n\"\n \"Requested Loan Amount : ${}\\nInterest Rate : {}%\\nHome Value : ${}\\n\"\n \"Annual Interest Payment : $ {}\\n\"\n \"Interest Payment to income : {}% - State : {}\\n\"\n \"Loan to Home : {}% - State : {}\\n\"\n \"Credit Score : {}\"\n \" - State : {}\\n\".format(entries['First Name'].get(),\n entries['Last Name'].get(),\n entries['Income'].get(),\n entries['Social Security Number'].get(),\n entries['Requested Loan Amount'].get(),\n entries['Interest Rate'].get(),\n entries['Home Value'].get(),\n criterias['annual_interest_payment'],\n criterias['interest_payment_to_income']['value'] * 100,\n criterias['interest_payment_to_income']['state'],\n criterias['loan_to_home']['value'] * 100,\n criterias['loan_to_home']['state'],\n criterias['credit_score']['value'],\n criterias['credit_score']['state']))\n\ndef compute_monthly_paiement(entries):\n '''\n Compute the average monthly payment for the loan\n '''\n try:\n monthly_paiements = round(int(entries['Requested Loan Amount'].get())\\\n * float(entries['Interest Rate'].get()) \\\n * ((1 + float(entries['Interest Rate'].get())\\\n **(int(entries['Loan Duration'].get()) * 12)\\\n / ((1 + float(entries['Interest Rate'].get()))\\\n **(int(entries['Loan Duration'].get()) * 12) - 1))), 2)\n except ValueError:\n monthly_paiements = None\n print(\"Sorry, we were unable to compute your average monthly payment.\")\n return monthly_paiements\n\nif __name__ == '__main__':\n ROOT = tk.Tk()\n APP = Application(ROOT)\n ROOT.mainloop()\n"
] | [
[
"pandas.read_csv"
]
] |
mworion/MountWizzard4 | [
"4e06b29ec2ef70be40e114b911b7bdf2f858a4b1"
] | [
"tests/unit_tests/gui/utilities/test_toolsQtWidget.py"
] | [
"############################################################\n# -*- coding: utf-8 -*-\n#\n# # # # # # #\n# ## ## # ## # #\n# # # # # # # # # # #\n# # ## # ## ## ######\n# # # # # # #\n#\n# Python-based Tool for interaction with the 10micron mounts\n# GUI with PyQT5 for python\n#\n# written in python3, (c) 2019-2021 by mworion\n#\n# Licence APL2.0\n#\n###########################################################\n# standard libraries\nimport unittest.mock as mock\nimport pytest\nimport platform\nimport os\nimport math\n\n# external packages\nfrom PyQt5.QtWidgets import QMessageBox, QFileDialog, QWidget, QStyle\nfrom PyQt5.QtWidgets import QPushButton, QComboBox, QTableWidgetItem\nfrom PyQt5.QtCore import pyqtSignal, QObject, QEvent\nfrom skyfield.api import Angle\nimport numpy as np\n\n# local import\nfrom gui.utilities.toolsQtWidget import MWidget\nfrom gui.utilities.toolsQtWidget import FileSortProxyModel\nfrom gui.utilities.toolsQtWidget import QMultiWait, QCustomTableWidgetItem\n\n\[email protected](autouse=True, scope='module')\ndef module(qapp):\n yield\n\n\[email protected](autouse=True, scope='function')\ndef function(module):\n\n window = MWidget()\n yield window\n\n\ndef test_FileSortProxyModel_1():\n f = FileSortProxyModel()\n f.sort()\n\n\ndef test_QMultiWait_1():\n class Test(QObject):\n a = pyqtSignal()\n w = QMultiWait()\n A = Test()\n w.addWaitableSignal(A.a)\n\n\ndef test_QMultiWait_2():\n class Test(QObject):\n a = pyqtSignal()\n w = QMultiWait()\n A = Test()\n w.addWaitableSignal(A.a)\n w.checkSignal()\n\n\ndef test_QMultiWait_3():\n w = QMultiWait()\n w.resetSignals()\n\n\ndef test_QMultiWait_4():\n class Test(QObject):\n a = pyqtSignal()\n w = QMultiWait()\n A = Test()\n w.addWaitableSignal(A.a)\n w.clear()\n\n\ndef test_QCustomTableWidgetItem_1():\n i1 = QCustomTableWidgetItem('')\n i2 = QCustomTableWidgetItem('')\n assert not (i1 < i2)\n\n\ndef test_QCustomTableWidgetItem_2():\n i1 = QCustomTableWidgetItem('-2.0')\n i2 = QCustomTableWidgetItem('')\n assert i1 < i2\n\n\ndef test_QCustomTableWidgetItem_3():\n i1 = QCustomTableWidgetItem('-2.0')\n i2 = QCustomTableWidgetItem('5')\n assert i1 < i2\n\n\ndef test_QCustomTableWidgetItem_4():\n i1 = QCustomTableWidgetItem('-2.0')\n i2 = QTableWidgetItem('5')\n assert i1 < i2\n\n\ndef test_FileSortProxyModel_1():\n w = QWidget()\n dialog = QFileDialog()\n dialog.setProxyModel(FileSortProxyModel(w))\n\n\ndef test_findIndexValue_0(function):\n ui = QComboBox()\n ui.addItem('')\n val = function.findIndexValue(ui=ui,\n searchString='dome')\n assert val == 0\n\n\ndef test_findIndexValue_1(function):\n ui = QComboBox()\n ui.addItem('dome')\n ui.addItem('test')\n val = function.findIndexValue(ui=ui,\n searchString='dome')\n assert val == 0\n\n\ndef test_findIndexValue_2(function):\n ui = QComboBox()\n ui.addItem('dome')\n ui.addItem('indi')\n val = function.findIndexValue(ui=ui,\n searchString='indi')\n assert val == 1\n\n\ndef test_findIndexValue_3(function):\n ui = QComboBox()\n ui.addItem('dome')\n ui.addItem('test')\n ui.addItem('indi - test')\n val = function.findIndexValue(ui=ui,\n searchString='indi')\n assert val == 2\n\n\ndef test_findIndexValue_4(function):\n ui = QComboBox()\n ui.addItem('dome')\n ui.addItem('test')\n ui.addItem('indi - test')\n val = function.findIndexValue(ui=ui,\n searchString='indi',\n relaxed=True)\n assert val == 2\n\n\ndef test_findIndexValue_5(function):\n ui = QComboBox()\n val = function.findIndexValue(ui=ui,\n searchString='indi')\n assert val == 0\n\n\ndef test_wIcon_1(function):\n suc = function.wIcon()\n assert not suc\n\n\ndef test_wIcon_2(function):\n icon = QStyle.SP_DialogApplyButton\n suc = function.wIcon()\n assert not suc\n\n\ndef test_wIcon_3(function):\n ui = QPushButton()\n suc = function.wIcon(gui=ui)\n assert not suc\n\n\ndef test_wIcon_4(function):\n icon = QStyle.SP_DialogApplyButton\n ui = QPushButton()\n suc = function.wIcon(gui=ui, name='load')\n assert suc\n\n\ndef test_getStyle_1(function):\n with mock.patch.object(platform,\n 'system',\n return_value='Darwin'):\n ret = function.getStyle()\n assert ret == function.MAC_STYLE + function.BASIC_STYLE\n\n\ndef test_getStyle_2(function):\n with mock.patch.object(platform,\n 'system',\n return_value='Windows'):\n ret = function.getStyle()\n assert ret == function.NON_MAC_STYLE + function.BASIC_STYLE\n\n\ndef test_initUI_1(function):\n suc = function.initUI()\n assert suc\n\n\ndef test_changeStyleDynamic_1(function):\n suc = function.changeStyleDynamic()\n assert not suc\n\n\ndef test_changeStyleDynamic_2(function):\n ui = QPushButton()\n suc = function.changeStyleDynamic(ui)\n assert not suc\n\n\ndef test_changeStyleDynamic_3(function):\n ui = QPushButton()\n suc = function.changeStyleDynamic(ui, 'color')\n assert not suc\n\n\ndef test_changeStyleDynamic_4(function):\n ui = QPushButton()\n suc = function.changeStyleDynamic(ui, 'color', 'red')\n assert suc\n\n\ndef test_changeStyleDynamic_5(function):\n ui = QPushButton()\n ui.setProperty('color', 'red')\n suc = function.changeStyleDynamic(ui, 'color', 'red')\n assert suc\n\n\ndef test_extractNames_0(function):\n name = ''\n name, short, ext = function.extractNames(name)\n assert name == ''\n assert short == ''\n assert ext == ''\n\n\ndef test_extractNames_1(function):\n name = 1\n name, short, ext = function.extractNames(name)\n assert name == ''\n assert short == ''\n assert ext == ''\n\n\ndef test_extractNames_2(function):\n name = ['test']\n name, short, ext = function.extractNames(name)\n assert name == os.path.abspath(os.getcwd() + '/test')\n assert short == 'test'\n assert ext == ''\n\n\ndef test_extractNames_3(function):\n name = ['c:/test']\n name, short, ext = function.extractNames(name)\n assert name == os.path.abspath('c:/test')\n assert short == 'test'\n assert ext == ''\n\n\ndef test_extractNames_4(function):\n name = ['c:/test.cfg']\n name, short, ext = function.extractNames(name)\n assert name == os.path.abspath('c:/test.cfg')\n assert short == 'test'\n assert ext == '.cfg'\n\n\ndef test_extractNames_5(function):\n name = ['c:/test.cfg', 'c:/test.cfg']\n name, short, ext = function.extractNames(name)\n assert name == [os.path.abspath('c:/test.cfg'),\n os.path.abspath('c:/test.cfg')]\n assert short == ['test', 'test']\n assert ext == ['.cfg', '.cfg']\n\n\ndef test_extractNames_6(function):\n name = ['', 'c:/test.cfg']\n name, short, ext = function.extractNames(name)\n assert name == [os.path.abspath(''),\n os.path.abspath('c:/test.cfg')]\n assert short == ['', 'test']\n assert ext == ['', '.cfg']\n\n\ndef test_prepareFileDialog_1(function):\n suc = function.prepareFileDialog()\n assert not suc\n\n\ndef test_prepareFileDialog_2(function):\n window = QWidget()\n suc = function.prepareFileDialog(window=window)\n assert suc\n\n\ndef test_prepareFileDialog_3(function):\n window = QWidget()\n suc = function.prepareFileDialog(window=window, enableDir=True, reverseOrder=True)\n assert suc\n\n\ndef test_runDialog_1(function):\n dialog = QFileDialog()\n with mock.patch.object(QFileDialog,\n 'exec_',\n return_value=0):\n val = function.runDialog(dialog)\n assert val == 0\n\n\ndef test_messageDialog_1(function):\n widget = QWidget()\n with mock.patch.object(QMessageBox,\n 'question',\n return_value=QMessageBox.No):\n with mock.patch.object(QMessageBox,\n 'show'):\n with mock.patch.object(function,\n 'runDialog',\n return_value=QMessageBox.No):\n suc = function.messageDialog(widget, 'test', 'test')\n assert not suc\n\n\ndef test_messageDialog_2(function):\n widget = QWidget()\n with mock.patch.object(QMessageBox,\n 'question',\n return_value=QMessageBox.Yes):\n with mock.patch.object(QMessageBox,\n 'show'):\n with mock.patch.object(function,\n 'runDialog',\n return_value=QMessageBox.Yes):\n suc = function.messageDialog(widget, 'test', 'test')\n assert suc\n\n\ndef test_openFile_1(function):\n full, short, ext = function.openFile()\n assert full == ''\n assert short == ''\n assert ext == ''\n\n\ndef test_openFile_2(function):\n window = QWidget()\n full, short, ext = function.openFile(window=window)\n assert full == ''\n assert short == ''\n assert ext == ''\n\n\ndef test_openFile_3(function):\n window = QWidget()\n full, short, ext = function.openFile(window=window,\n title='title')\n assert full == ''\n assert short == ''\n assert ext == ''\n\n\ndef test_openFile_4(function):\n window = QWidget()\n full, short, ext = function.openFile(window=window,\n title='title',\n folder='.')\n assert full == ''\n assert short == ''\n assert ext == ''\n\n\ndef test_openFile_5(function):\n window = QWidget()\n with mock.patch.object(function,\n 'runDialog',\n return_value=0):\n full, short, ext = function.openFile(window=window,\n title='title',\n folder='.',\n filterSet='*.*')\n assert full == ''\n assert short == ''\n assert ext == ''\n\n\ndef test_openFile_6(function):\n window = QWidget()\n with mock.patch.object(function,\n 'runDialog',\n return_value=1):\n with mock.patch.object(QFileDialog,\n 'selectedFiles',\n return_value=('test1', 'test2')):\n full, short, ext = function.openFile(window=window,\n title='title',\n folder='.',\n filterSet='*.*',\n multiple=True)\n assert full == ''\n assert short == ''\n assert ext == ''\n\n\ndef test_saveFile_1(function):\n full, short, ext = function.saveFile()\n assert full == ''\n assert short == ''\n assert ext == ''\n\n\ndef test_saveFile_2(function):\n window = QWidget()\n full, short, ext = function.saveFile(window=window)\n assert full == ''\n assert short == ''\n assert ext == ''\n\n\ndef test_saveFile_3(function):\n window = QWidget()\n full, short, ext = function.saveFile(window=window,\n title='title')\n assert full == ''\n assert short == ''\n assert ext == ''\n\n\ndef test_saveFile_4(function):\n window = QWidget()\n full, short, ext = function.saveFile(window=window,\n title='title',\n folder='.')\n assert full == ''\n assert short == ''\n assert ext == ''\n\n\ndef test_saveFile_5(function):\n window = QWidget()\n with mock.patch.object(function,\n 'runDialog',\n return_value=0):\n full, short, ext = function.saveFile(window=window,\n title='title',\n folder='.',\n filterSet='*.*')\n assert full == ''\n assert short == ''\n assert ext == ''\n\n\ndef test_saveFile_6(function):\n window = QWidget()\n with mock.patch.object(function,\n 'runDialog',\n return_value=1):\n with mock.patch.object(QFileDialog,\n 'selectedFiles',\n return_value=(['tests/test.txt'])):\n full, short, ext = function.saveFile(window=window,\n title='title',\n folder='.',\n filterSet='*.*')\n assert short == 'test'\n assert ext == '.txt'\n\n\ndef test_openDir_1(function):\n full, short, ext = function.openDir()\n assert full == ''\n assert short == ''\n assert ext == ''\n\n\ndef test_openDir_2(function):\n window = QWidget()\n full, short, ext = function.openDir(window=window)\n assert full == ''\n assert short == ''\n assert ext == ''\n\n\ndef test_openDir_3(function):\n window = QWidget()\n full, short, ext = function.openDir(window=window,\n title='title')\n assert full == ''\n assert short == ''\n assert ext == ''\n\n\ndef test_openDir_4(function):\n window = QWidget()\n with mock.patch.object(function,\n 'runDialog',\n return_value=1):\n full, short, ext = function.openDir(window=window,\n title='title',\n folder='.')\n assert full == os.getcwd()\n\n\ndef test_openDir_5(function):\n window = QWidget()\n with mock.patch.object(function,\n 'runDialog',\n return_value=None):\n full, short, ext = function.openDir(window=window,\n title='title',\n folder='.')\n assert full == ''\n assert short == ''\n assert ext == ''\n\n\ndef test_clickable_1(function):\n suc = function.clickable()\n assert not suc\n\n\ndef test_clickable_2(function):\n event = QEvent(QEvent.MouseButtonRelease)\n widget = QPushButton()\n suc = function.clickable(widget=widget)\n assert suc\n suc = widget.eventFilter(widget, event)\n assert not suc\n\n\ndef test_guiSetText_1(function):\n suc = function.guiSetText(None, None)\n assert not suc\n\n\ndef test_guiSetText_2(function):\n pb = QPushButton()\n suc = function.guiSetText(pb, None)\n assert not suc\n\n\ndef test_guiSetText_3(function):\n pb = QPushButton()\n suc = function.guiSetText(pb, '3.5f')\n assert suc\n assert pb.text() == '-'\n\n\ndef test_guiSetText_3b(function):\n pb = QPushButton()\n suc = function.guiSetText(pb, '3.5f', [])\n assert suc\n\n\ndef test_guiSetText_3c(function):\n pb = QPushButton()\n suc = function.guiSetText(pb, '3.5f', np.array([]))\n assert suc\n\n\ndef test_guiSetText_4(function):\n pb = QPushButton()\n suc = function.guiSetText(pb, '3.0f', 100)\n assert suc\n assert pb.text() == '100'\n\n\ndef test_guiSetText_5(function):\n pb = QPushButton()\n suc = function.guiSetText(pb, 'HSTR', Angle(hours=10))\n assert suc\n assert pb.text() == '10 00 00'\n\n\ndef test_guiSetText_6(function):\n pb = QPushButton()\n suc = function.guiSetText(pb, 'DSTR', Angle(degrees=90))\n assert suc\n assert pb.text() == '+90 00 00'\n\n\ndef test_guiSetText_7(function):\n pb = QPushButton()\n suc = function.guiSetText(pb, 'H2.2f', Angle(hours=12))\n assert suc\n assert pb.text() == '12.00'\n\n\ndef test_guiSetText_8(function):\n pb = QPushButton()\n suc = function.guiSetText(pb, 'D+2.2f', Angle(degrees=90))\n assert suc\n assert pb.text() == '+90.00'\n\n\ndef test_guiSetText_9(function):\n pb = QPushButton()\n suc = function.guiSetText(pb, 's', 'E')\n assert suc\n assert pb.text() == 'EAST'\n\n\ndef test_guiSetText_10(function):\n pb = QPushButton()\n suc = function.guiSetText(pb, 's', 'W')\n assert suc\n assert pb.text() == 'WEST'\n\n\ndef test_guiSetText_11(function):\n pb = QPushButton()\n suc = function.guiSetText(pb, 's', True)\n assert suc\n assert pb.text() == 'ON'\n\n\ndef test_guiSetText_12(function):\n pb = QPushButton()\n suc = function.guiSetText(pb, 's', False)\n assert suc\n assert pb.text() == 'OFF'\n\n\ndef test_guiSetStyle_1(function):\n pb = QPushButton()\n suc = function.guiSetStyle(pb)\n assert not suc\n\n\ndef test_guiSetStyle_2(function):\n pb = QPushButton()\n suc = function.guiSetStyle(pb, pStyle='color', value=None)\n assert suc\n\n\ndef test_guiSetStyle_3(function):\n pb = QPushButton()\n suc = function.guiSetStyle(pb, pStyle='color', value=True)\n assert suc\n\n\ndef test_guiSetStyle_4(function):\n pb = QPushButton()\n suc = function.guiSetStyle(pb, pStyle='color', value=False)\n assert suc\n\n\ndef test_returnDriver_1(function):\n sender = QWidget()\n searchDict = {}\n driver = function.returnDriver(sender, searchDict)\n assert driver == ''\n\n\ndef test_returnDriver_2(function):\n sender = QWidget()\n searchDict = {}\n driver = function.returnDriver(sender, searchDict, addKey='test')\n assert driver == ''\n"
] | [
[
"numpy.array"
]
] |
labscript-suite-temp-2/runmanager | [
"e5fc09947552a549ecb0d964d86c9b923687f3e9"
] | [
"__main__.py"
] | [
"#####################################################################\n# #\n# __main__.py #\n# #\n# Copyright 2013, Monash University #\n# #\n# This file is part of the program runmanager, in the labscript #\n# suite (see http://labscriptsuite.org), and is licensed under the #\n# Simplified BSD License. See the license.txt file in the root of #\n# the project for the full license. #\n# #\n#####################################################################\nfrom __future__ import division, unicode_literals, print_function, absolute_import\nfrom labscript_utils import PY2\nif PY2:\n str = unicode\n import Queue as queue\nelse:\n import queue\n\nimport os\nimport sys\nimport labscript_utils.excepthook\n\ntry:\n from labscript_utils import check_version\nexcept ImportError:\n raise ImportError('Require labscript_utils > 2.1.0')\n\ncheck_version('labscript_utils', '2.10.0', '3')\n# Splash screen\nfrom labscript_utils.splash import Splash\nsplash = Splash(os.path.join(os.path.dirname(__file__), 'runmanager.svg'))\nsplash.show()\n\nsplash.update_text('importing standard library modules')\nimport time\nimport contextlib\nimport subprocess\nimport threading\nimport ast\nimport pprint\nimport traceback\n\nsplash.update_text('importing matplotlib')\n# Evaluation of globals happens in a thread with the pylab module imported.\n# Although we don't care about plotting, importing pylab makes Qt calls. We\n# can't have that from a non main thread, so we'll just disable matplotlib's\n# GUI integration:\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport signal\n# Quit on ctrl-c\nsignal.signal(signal.SIGINT, signal.SIG_DFL)\n\nsplash.update_text('importing Qt')\ncheck_version('qtutils', '2.2.2', '3.0.0')\n\nsplash.update_text('importing pandas')\ncheck_version('pandas', '0.13', '2')\n\nfrom qtutils.qt import QtCore, QtGui, QtWidgets\nfrom qtutils.qt.QtCore import pyqtSignal as Signal\n\nsplash.update_text('importing labscript suite modules')\ncheck_version('labscript_utils', '2.11.0', '3')\nfrom labscript_utils.ls_zprocess import zmq_get, ProcessTree, ZMQServer\nfrom labscript_utils.labconfig import LabConfig\nfrom labscript_utils.setup_logging import setup_logging\nimport labscript_utils.shared_drive as shared_drive\nfrom labscript_utils import dedent\nfrom zprocess import raise_exception_in_thread\nimport runmanager\nimport runmanager.remote\n\nfrom qtutils import (\n inmain,\n inmain_decorator,\n UiLoader,\n inthread,\n DisconnectContextManager,\n qtlock,\n)\nfrom labscript_utils.qtwidgets.outputbox import OutputBox\nimport qtutils.icons\n\nGLOBAL_MONOSPACE_FONT = \"Consolas\" if os.name == 'nt' else \"Ubuntu Mono\"\n\n# Set working directory to runmanager folder, resolving symlinks\nrunmanager_dir = os.path.dirname(os.path.realpath(__file__))\nos.chdir(runmanager_dir)\n\nprocess_tree = ProcessTree.instance()\n\n# Set a meaningful name for zprocess.locking's client id:\nprocess_tree.zlock_client.set_process_name('runmanager')\n\n\ndef log_if_global(g, g_list, message):\n \"\"\"logs a message if the global name \"g\" is in \"g_list\"\n \n useful if you want to print out a message inside a loop over globals,\n but only for a particular global (or set of globals).\n \n If g_list is empty, then it will use the hardcoded list below\n (useful if you want to change the behaviour globally) \n \"\"\"\n if not isinstance(g_list, list):\n g_list = [g_list]\n \n if not g_list:\n g_list = [] # add global options here\n \n if g in g_list:\n logger.info(message)\n\n \ndef composite_colors(r0, g0, b0, a0, r1, g1, b1, a1):\n \"\"\"composite a second colour over a first with given alpha values and return the\n result\"\"\"\n a0 /= 255\n a1 /= 255\n a = a0 + a1 - a0 * a1\n r = (a1 * r1 + (1 - a1) * a0 * r0) / a\n g = (a1 * g1 + (1 - a1) * a0 * g0) / a\n b = (a1 * b1 + (1 - a1) * a0 * b0) / a\n return [int(round(x)) for x in (r, g, b, 255 * a)]\n\n\ndef set_win_appusermodel(window_id):\n from labscript_utils.winshell import set_appusermodel, appids, app_descriptions\n icon_path = os.path.abspath('runmanager.ico')\n executable = sys.executable.lower()\n if not executable.endswith('w.exe'):\n executable = executable.replace('.exe', 'w.exe')\n relaunch_command = executable + ' ' + os.path.abspath(__file__.replace('.pyc', '.py'))\n relaunch_display_name = app_descriptions['runmanager']\n set_appusermodel(window_id, appids['runmanager'], icon_path, relaunch_command, relaunch_display_name)\n\n\n@inmain_decorator()\ndef error_dialog(message):\n QtWidgets.QMessageBox.warning(app.ui, 'runmanager', message)\n\n\n@inmain_decorator()\ndef question_dialog(message):\n reply = QtWidgets.QMessageBox.question(app.ui, 'runmanager', message,\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)\n return (reply == QtWidgets.QMessageBox.Yes)\n\n\[email protected]\ndef nested(*contextmanagers):\n if contextmanagers:\n with contextmanagers[0]:\n with nested(*contextmanagers[1:]):\n yield\n else:\n yield\n\n\ndef scroll_view_to_row_if_current(view, item):\n \"\"\"Checks to see if the item is in the row of the current item. If it is, scrolls\n the treeview/tableview vertically to ensure that row is visible. This is done by\n recording the horizontal scroll position, then using view.scrollTo(), and then\n restoring the horizontal position\"\"\"\n horizontal_scrollbar = view.horizontalScrollBar()\n existing_horizontal_position = horizontal_scrollbar.value()\n index = item.index()\n current_row = view.currentIndex().row()\n if index.row() == current_row:\n view.scrollTo(index)\n horizontal_scrollbar.setValue(existing_horizontal_position)\n\n\nclass FingerTabBarWidget(QtWidgets.QTabBar):\n\n \"\"\"A TabBar with the tabs on the left and the text horizontal. Credit to\n @LegoStormtroopr, https://gist.github.com/LegoStormtroopr/5075267. We will\n promote the TabBar from the ui file to one of these.\"\"\"\n\n def __init__(self, parent=None, minwidth=180, minheight=30, **kwargs):\n QtWidgets.QTabBar.__init__(self, parent, **kwargs)\n self.minwidth = minwidth\n self.minheight = minheight\n self.iconPosition = kwargs.pop('iconPosition', QtWidgets.QTabWidget.West)\n self._movable = None\n self.tab_movable = {}\n self.paint_clip = None\n\n def setMovable(self, movable, index=None):\n \"\"\"Set tabs movable on an individual basis, or set for all tabs if no\n index specified\"\"\"\n if index is None:\n self._movable = movable\n self.tab_movable = {}\n QtWidgets.QTabBar.setMovable(self, movable)\n else:\n self.tab_movable[int(index)] = bool(movable)\n\n def isMovable(self, index=None):\n if index is None:\n if self._movable is None:\n self._movable = QtWidgets.QTabBar.isMovable(self)\n return self._movable\n return self.tab_movable.get(index, self._movable)\n\n def indexAtPos(self, point):\n for index in range(self.count()):\n if self.tabRect(index).contains(point):\n return index\n\n def mousePressEvent(self, event):\n index = self.indexAtPos(event.pos())\n if not self.tab_movable.get(index, self.isMovable()):\n QtWidgets.QTabBar.setMovable(self, False) # disable dragging until they release the mouse\n return QtWidgets.QTabBar.mousePressEvent(self, event)\n\n def mouseReleaseEvent(self, event):\n if self.isMovable():\n # Restore this in case it was temporarily disabled by mousePressEvent\n QtWidgets.QTabBar.setMovable(self, True)\n return QtWidgets.QTabBar.mouseReleaseEvent(self, event)\n\n def tabLayoutChange(self):\n total_height = 0\n for index in range(self.count()):\n tabRect = self.tabRect(index)\n total_height += tabRect.height()\n if total_height > self.parent().height():\n # Don't paint over the top of the scroll buttons:\n scroll_buttons_area_height = 2*max(self.style().pixelMetric(QtWidgets.QStyle.PM_TabBarScrollButtonWidth),\n qapplication.globalStrut().width())\n self.paint_clip = self.width(), self.parent().height() - scroll_buttons_area_height\n else:\n self.paint_clip = None\n\n def paintEvent(self, event):\n painter = QtWidgets.QStylePainter(self)\n if self.paint_clip is not None:\n painter.setClipRect(0, 0, *self.paint_clip)\n\n option = QtWidgets.QStyleOptionTab()\n for index in range(self.count()):\n tabRect = self.tabRect(index)\n self.initStyleOption(option, index)\n painter.drawControl(QtWidgets.QStyle.CE_TabBarTabShape, option)\n if not self.tabIcon(index).isNull():\n icon = self.tabIcon(index).pixmap(self.iconSize())\n alignment = QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter\n tabRect.moveLeft(10)\n painter.drawItemPixmap(tabRect, alignment, icon)\n tabRect.moveLeft(self.iconSize().width() + 15)\n else:\n tabRect.moveLeft(10)\n painter.drawText(tabRect, QtCore.Qt.AlignVCenter, self.tabText(index))\n if self.paint_clip is not None:\n x_clip, y_clip = self.paint_clip\n painter.setClipping(False)\n palette = self.palette()\n mid_color = palette.color(QtGui.QPalette.Mid)\n painter.setPen(mid_color)\n painter.drawLine(0, y_clip, x_clip, y_clip)\n painter.end()\n\n\n def tabSizeHint(self, index):\n fontmetrics = QtGui.QFontMetrics(self.font())\n text_width = fontmetrics.width(self.tabText(index))\n text_height = fontmetrics.height()\n height = text_height + 15\n height = max(self.minheight, height)\n width = text_width + 15\n\n button = self.tabButton(index, QtWidgets.QTabBar.RightSide)\n if button is not None:\n height = max(height, button.height() + 7)\n # Same amount of space around the button horizontally as it has vertically:\n width += button.width() + height - button.height()\n width = max(self.minwidth, width)\n return QtCore.QSize(width, height)\n\n def setTabButton(self, index, geometry, button):\n if not isinstance(button, TabToolButton):\n raise TypeError('Not a TabToolButton, won\\'t paint correctly. Use a TabToolButton')\n result = QtWidgets.QTabBar.setTabButton(self, index, geometry, button)\n button.move(*button.get_correct_position())\n return result\n\n\nclass TabToolButton(QtWidgets.QToolButton):\n def __init__(self, *args, **kwargs):\n QtWidgets.QToolButton.__init__(self, *args, **kwargs)\n self.setFocusPolicy(QtCore.Qt.NoFocus)\n\n def paintEvent(self, event):\n painter = QtWidgets.QStylePainter(self)\n paint_clip = self.parent().paint_clip\n if paint_clip is not None:\n point = QtCore.QPoint(*paint_clip)\n global_point = self.parent().mapToGlobal(point)\n local_point = self.mapFromGlobal(global_point)\n painter.setClipRect(0, 0, local_point.x(), local_point.y())\n option = QtWidgets.QStyleOptionToolButton()\n self.initStyleOption(option)\n painter.drawComplexControl(QtWidgets.QStyle.CC_ToolButton, option)\n\n def get_correct_position(self):\n parent = self.parent()\n for index in range(parent.count()):\n if parent.tabButton(index, QtWidgets.QTabBar.RightSide) is self:\n break\n else:\n raise LookupError('Tab not found')\n tabRect = parent.tabRect(index)\n tab_x, tab_y, tab_width, tab_height = tabRect.x(), tabRect.y(), tabRect.width(), tabRect.height()\n size = self.sizeHint()\n width = size.width()\n height = size.height()\n padding = int((tab_height - height) / 2)\n correct_x = tab_x + tab_width - width - padding\n correct_y = tab_y + padding\n return correct_x, correct_y\n\n def moveEvent(self, event):\n try:\n correct_x, correct_y = self.get_correct_position()\n except LookupError:\n return # Things aren't initialised yet\n if self.x() != correct_x or self.y() != correct_y:\n # Move back! I shall not be moved!\n self.move(correct_x, correct_y)\n return QtWidgets.QToolButton.moveEvent(self, event)\n\n\nclass FingerTabWidget(QtWidgets.QTabWidget):\n\n \"\"\"A QTabWidget equivalent which uses our FingerTabBarWidget\"\"\"\n\n def __init__(self, parent, *args):\n QtWidgets.QTabWidget.__init__(self, parent, *args)\n self.setTabBar(FingerTabBarWidget(self))\n\n def addTab(self, *args, **kwargs):\n closeable = kwargs.pop('closable', False)\n index = QtWidgets.QTabWidget.addTab(self, *args, **kwargs)\n self.setTabClosable(index, closeable)\n return index\n\n def setTabClosable(self, index, closable):\n right_button = self.tabBar().tabButton(index, QtWidgets.QTabBar.RightSide)\n if closable:\n if not right_button:\n # Make one:\n close_button = TabToolButton(self.parent())\n close_button.setIcon(QtGui.QIcon(':/qtutils/fugue/cross'))\n self.tabBar().setTabButton(index, QtWidgets.QTabBar.RightSide, close_button)\n close_button.clicked.connect(lambda: self._on_close_button_clicked(close_button))\n else:\n if right_button:\n # Get rid of it:\n self.tabBar().setTabButton(index, QtWidgets.QTabBar.RightSide, None)\n\n def _on_close_button_clicked(self, button):\n for index in range(self.tabBar().count()):\n if self.tabBar().tabButton(index, QtWidgets.QTabBar.RightSide) is button:\n self.tabCloseRequested.emit(index)\n break\n\n\nclass ItemView(object):\n \"\"\"Mixin for QTableView and QTreeView that emits a custom signal leftClicked(index)\n after a left click on a valid index, and doubleLeftClicked(index) (in addition) on\n double click. Also has modified tab and arrow key behaviour and custom selection\n highlighting.\"\"\"\n leftClicked = Signal(QtCore.QModelIndex)\n doubleLeftClicked = Signal(QtCore.QModelIndex)\n\n COLOR_HIGHLIGHT = \"#40308CC6\" # Semitransparent blue\n\n def __init__(self, *args):\n super(ItemView, self).__init__(*args)\n self._pressed_index = None\n self._double_click = False\n self.setAutoScroll(False)\n p = self.palette()\n for group in [QtGui.QPalette.Active, QtGui.QPalette.Inactive]:\n p.setColor(\n group,\n QtGui.QPalette.Highlight,\n QtGui.QColor(self.COLOR_HIGHLIGHT))\n p.setColor(\n group,\n QtGui.QPalette.HighlightedText,\n p.color(QtGui.QPalette.Active, QtGui.QPalette.Foreground)\n )\n self.setPalette(p)\n\n def mousePressEvent(self, event):\n result = super(ItemView, self).mousePressEvent(event)\n index = self.indexAt(event.pos())\n if event.button() == QtCore.Qt.LeftButton and index.isValid():\n self._pressed_index = self.indexAt(event.pos())\n return result\n\n def leaveEvent(self, event):\n result = super(ItemView, self).leaveEvent(event)\n self._pressed_index = None\n self._double_click = False\n return result\n\n def mouseDoubleClickEvent(self, event):\n # Ensure our left click event occurs regardless of whether it is the\n # second click in a double click or not\n result = super(ItemView, self).mouseDoubleClickEvent(event)\n index = self.indexAt(event.pos())\n if event.button() == QtCore.Qt.LeftButton and index.isValid():\n self._pressed_index = self.indexAt(event.pos())\n self._double_click = True\n return result\n\n def mouseReleaseEvent(self, event):\n result = super(ItemView, self).mouseReleaseEvent(event)\n index = self.indexAt(event.pos())\n if event.button() == QtCore.Qt.LeftButton and index.isValid() and index == self._pressed_index:\n self.leftClicked.emit(index)\n if self._double_click:\n self.doubleLeftClicked.emit(index)\n self._pressed_index = None\n self._double_click = False\n return result\n\n def keyPressEvent(self, event):\n if event.key() in [QtCore.Qt.Key_Space, QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return]:\n item = self.model().itemFromIndex(self.currentIndex())\n if item.isEditable():\n # Space/enter edits editable items:\n self.edit(self.currentIndex())\n else:\n # Space/enter on non-editable items simulates a left click:\n self.leftClicked.emit(self.currentIndex())\n return super(ItemView, self).keyPressEvent(event)\n\n def moveCursor(self, cursor_action, keyboard_modifiers):\n current_index = self.currentIndex()\n current_row, current_column = current_index.row(), current_index.column()\n if cursor_action == QtWidgets.QAbstractItemView.MoveUp:\n return current_index.sibling(current_row - 1, current_column)\n elif cursor_action == QtWidgets.QAbstractItemView.MoveDown:\n return current_index.sibling(current_row + 1, current_column)\n elif cursor_action == QtWidgets.QAbstractItemView.MoveLeft:\n return current_index.sibling(current_row, current_column - 1)\n elif cursor_action == QtWidgets.QAbstractItemView.MoveRight:\n return current_index.sibling(current_row, current_column + 1)\n elif cursor_action == QtWidgets.QAbstractItemView.MovePrevious:\n return current_index.sibling(current_row, current_column - 1)\n elif cursor_action == QtWidgets.QAbstractItemView.MoveNext:\n return current_index.sibling(current_row, current_column + 1)\n else:\n return super(ItemView, self).moveCursor(cursor_action, keyboard_modifiers)\n\n\nclass TreeView(ItemView, QtWidgets.QTreeView):\n \"\"\"Treeview version of our customised ItemView\"\"\"\n def __init__(self, parent=None):\n super(TreeView, self).__init__(parent)\n # Set columns to their minimum size, disabling resizing. Caller may still\n # configure a specific section to stretch:\n self.header().setSectionResizeMode(\n QtWidgets.QHeaderView.ResizeToContents\n )\n self.setItemDelegate(ItemDelegate(self))\n\n\nclass TableView(ItemView, QtWidgets.QTableView):\n \"\"\"TableView version of our customised ItemView\"\"\"\n def __init__(self, parent=None):\n super(TableView, self).__init__(parent)\n # Set rows and columns to the minimum size, disabling interactive resizing.\n # Caller may still configure a specific column to stretch:\n self.verticalHeader().setSectionResizeMode(\n QtWidgets.QHeaderView.ResizeToContents\n )\n self.horizontalHeader().setSectionResizeMode(\n QtWidgets.QHeaderView.ResizeToContents\n )\n self.horizontalHeader().sectionResized.connect(self.on_column_resized)\n self.setItemDelegate(ItemDelegate(self))\n self.verticalHeader().hide()\n self.setShowGrid(False)\n self.horizontalHeader().setHighlightSections(False)\n\n def on_column_resized(self, col):\n for row in range(self.model().rowCount()):\n self.resizeRowToContents(row)\n\n\nclass AlternatingColorModel(QtGui.QStandardItemModel):\n\n def __init__(self, view):\n QtGui.QStandardItemModel.__init__(self)\n # How much darker in each channel is the alternate base color compared\n # to the base color?\n self.view = view\n palette = view.palette()\n self.normal_color = palette.color(QtGui.QPalette.Base)\n self.alternate_color = palette.color(QtGui.QPalette.AlternateBase)\n r, g, b, a = self.normal_color.getRgb()\n alt_r, alt_g, alt_b, alt_a = self.alternate_color.getRgb()\n self.delta_r = alt_r - r\n self.delta_g = alt_g - g\n self.delta_b = alt_b - b\n self.delta_a = alt_a - a\n\n # A cache, store brushes so we don't have to recalculate them. Is faster.\n self.bg_brushes = {}\n\n def get_bgbrush(self, normal_brush, alternate, selected):\n \"\"\"Get cell colour as a function of its ordinary colour, whether it is on an odd\n row, and whether it is selected.\"\"\"\n normal_rgb = normal_brush.color().getRgb() if normal_brush is not None else None\n try:\n return self.bg_brushes[normal_rgb, alternate, selected]\n except KeyError:\n pass\n # Get the colour of the cell with alternate row shading:\n if normal_rgb is None:\n # No colour has been set. Use palette colours:\n if alternate:\n bg_color = self.alternate_color\n else:\n bg_color = self.normal_color\n else:\n bg_color = normal_brush.color()\n if alternate:\n # Modify alternate rows:\n r, g, b, a = normal_rgb\n alt_r = min(max(r + self.delta_r, 0), 255)\n alt_g = min(max(g + self.delta_g, 0), 255)\n alt_b = min(max(b + self.delta_b, 0), 255)\n alt_a = min(max(a + self.delta_a, 0), 255)\n bg_color = QtGui.QColor(alt_r, alt_g, alt_b, alt_a)\n\n # If parent is a TableView, we handle selection highlighting as part of the\n # background colours:\n if selected and isinstance(self.view, QtWidgets.QTableView):\n # Overlay highlight colour:\n r_s, g_s, b_s, a_s = QtGui.QColor(ItemView.COLOR_HIGHLIGHT).getRgb()\n r_0, g_0, b_0, a_0 = bg_color.getRgb()\n rgb = composite_colors(r_0, g_0, b_0, a_0, r_s, g_s, b_s, a_s)\n bg_color = QtGui.QColor(*rgb)\n\n brush = QtGui.QBrush(bg_color)\n self.bg_brushes[normal_rgb, alternate, selected] = brush\n return brush\n\n def data(self, index, role):\n \"\"\"When background color data is being requested, returns modified colours for\n every second row, according to the palette of the view. This has the effect of\n making the alternate colours visible even when custom colors have been set - the\n same shading will be applied to the custom colours. Only really looks sensible\n when the normal and alternate colors are similar. Also applies selection\n highlight colour (using ItemView.COLOR_HIGHLIGHT), similarly with alternate-row\n shading, for the case of a QTableView.\"\"\"\n if role == QtCore.Qt.BackgroundRole:\n normal_brush = QtGui.QStandardItemModel.data(self, index, QtCore.Qt.BackgroundRole)\n selected = index in self.view.selectedIndexes()\n alternate = index.row() % 2\n return self.get_bgbrush(normal_brush, alternate, selected)\n return QtGui.QStandardItemModel.data(self, index, role)\n\n\nclass Editor(QtWidgets.QTextEdit):\n \"\"\"Popup editor with word wrapping and automatic resizing.\"\"\"\n def __init__(self, parent):\n QtWidgets.QTextEdit.__init__(self, parent)\n self.setWordWrapMode(QtGui.QTextOption.WordWrap)\n self.setAcceptRichText(False)\n self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.textChanged.connect(self.update_size)\n self.initial_height = None\n\n def update_size(self):\n if self.initial_height is not None:\n # Temporarily shrink back to the initial height, just so that the document\n # size below returns the preferred size rather than the current size.\n # QTextDocument doesn't have a sizeHint of minimumSizeHint method, so this\n # is the best we can do to get its minimum size.\n self.setFixedHeight(self.initial_height)\n preferred_height = self.document().size().toSize().height()\n # Do not shrink smaller than the initial height:\n if self.initial_height is not None and preferred_height >= self.initial_height:\n self.setFixedHeight(preferred_height)\n\n def resizeEvent(self, event):\n result = QtWidgets.QTextEdit.resizeEvent(self, event)\n # Record the initial height after it is first set:\n if self.initial_height is None:\n self.initial_height = self.height()\n return result\n \n\n\nclass ItemDelegate(QtWidgets.QStyledItemDelegate):\n\n \"\"\"An item delegate with a larger row height and column width, faint grey vertical\n lines between columns, and a custom editor for handling multi-line data\"\"\"\n MIN_ROW_HEIGHT = 22\n EXTRA_ROW_HEIGHT = 6\n EXTRA_COL_WIDTH = 20\n\n def __init__(self, *args, **kwargs):\n QtWidgets.QStyledItemDelegate.__init__(self, *args, **kwargs)\n self._pen = QtGui.QPen()\n self._pen.setWidth(1)\n self._pen.setColor(QtGui.QColor.fromRgb(128, 128, 128, 64))\n\n def sizeHint(self, *args):\n size = QtWidgets.QStyledItemDelegate.sizeHint(self, *args)\n if size.height() <= self.MIN_ROW_HEIGHT:\n height = self.MIN_ROW_HEIGHT\n else:\n # Esnure cells with multiple lines of text still have some padding:\n height = size.height() + self.EXTRA_ROW_HEIGHT\n return QtCore.QSize(size.width() + self.EXTRA_COL_WIDTH, height)\n\n def paint(self, painter, option, index):\n if isinstance(self.parent(), QtWidgets.QTableView):\n # Disable rendering of selection highlight for TableViews, they handle\n # it themselves with the background colour data:\n option.state &= ~(QtWidgets.QStyle.State_Selected)\n QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)\n if index.column() > 0:\n painter.setPen(self._pen)\n painter.drawLine(option.rect.topLeft(), option.rect.bottomLeft())\n\n def eventFilter(self, obj, event):\n \"\"\"Filter events before they get to the editor, so that editing is ended when\n the user presses tab, shift-tab or enter (which otherwise would not end editing\n in a QTextEdit).\"\"\"\n if event.type() == QtCore.QEvent.KeyPress:\n if event.key() in [QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return]:\n # Allow shift-enter\n if not event.modifiers() & QtCore.Qt.ShiftModifier:\n self.commitData.emit(obj)\n self.closeEditor.emit(obj)\n return True\n elif event.key() == QtCore.Qt.Key_Tab:\n self.commitData.emit(obj)\n self.closeEditor.emit(obj, QtWidgets.QStyledItemDelegate.EditNextItem)\n return True\n elif event.key() == QtCore.Qt.Key_Backtab:\n self.commitData.emit(obj)\n self.closeEditor.emit(obj, QtWidgets.QStyledItemDelegate.EditPreviousItem)\n return True\n return QtWidgets.QStyledItemDelegate.eventFilter(self, obj, event)\n\n def createEditor(self, parent, option, index):\n return Editor(parent)\n\n def setEditorData(self, editor, index):\n editor.setPlainText(index.data())\n font = index.data(QtCore.Qt.FontRole)\n default_font = qapplication.font(self.parent())\n if font is None:\n font = default_font\n font.setPointSize(default_font.pointSize())\n editor.setFont(font)\n font_height = QtGui.QFontMetrics(font).height()\n padding = (self.MIN_ROW_HEIGHT - font_height) / 2 - 1\n editor.document().setDocumentMargin(padding)\n editor.selectAll()\n \n def setModelData(self, editor, model, index):\n model.setData(index, editor.toPlainText())\n\n\nclass GroupTab(object):\n GLOBALS_COL_DELETE = 0\n GLOBALS_COL_NAME = 1\n GLOBALS_COL_VALUE = 2\n GLOBALS_COL_UNITS = 3\n GLOBALS_COL_EXPANSION = 4\n\n GLOBALS_ROLE_IS_DUMMY_ROW = QtCore.Qt.UserRole + 1\n GLOBALS_ROLE_SORT_DATA = QtCore.Qt.UserRole + 2\n GLOBALS_ROLE_PREVIOUS_TEXT = QtCore.Qt.UserRole + 3\n GLOBALS_ROLE_IS_BOOL = QtCore.Qt.UserRole + 4\n\n COLOR_ERROR = '#F79494' # light red\n COLOR_OK = '#A5F7C6' # light green\n COLOR_BOOL_ON = '#63F731' # bright green\n COLOR_BOOL_OFF = '#608060' # dark green\n\n GLOBALS_DUMMY_ROW_TEXT = '<Click to add global>'\n\n def __init__(self, tabWidget, globals_file, group_name):\n\n self.tabWidget = tabWidget\n\n loader = UiLoader()\n loader.registerCustomWidget(TableView)\n self.ui = loader.load('group.ui')\n\n # Add the ui to the parent tabWidget:\n self.tabWidget.addTab(self.ui, group_name, closable=True)\n\n self.set_file_and_group_name(globals_file, group_name)\n\n self.globals_model = AlternatingColorModel(view=self.ui.tableView_globals)\n self.globals_model.setHorizontalHeaderLabels(['Delete', 'Name', 'Value', 'Units', 'Expansion'])\n self.globals_model.setSortRole(self.GLOBALS_ROLE_SORT_DATA)\n\n self.ui.tableView_globals.setModel(self.globals_model)\n self.ui.tableView_globals.setSelectionBehavior(QtWidgets.QTableView.SelectRows)\n self.ui.tableView_globals.setSelectionMode(QtWidgets.QTableView.ExtendedSelection)\n self.ui.tableView_globals.setSortingEnabled(True)\n # Make it so the user can just start typing on an item to edit:\n self.ui.tableView_globals.setEditTriggers(QtWidgets.QTableView.AnyKeyPressed |\n QtWidgets.QTableView.EditKeyPressed)\n # Ensure the clickable region of the delete button doesn't extend forever:\n self.ui.tableView_globals.horizontalHeader().setStretchLastSection(False)\n # Stretch the value column to fill available space:\n self.ui.tableView_globals.horizontalHeader().setSectionResizeMode(\n self.GLOBALS_COL_VALUE, QtWidgets.QHeaderView.Stretch\n )\n # Setup stuff for a custom context menu:\n self.ui.tableView_globals.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n # Make the actions for the context menu:\n self.action_globals_delete_selected = QtWidgets.QAction(\n QtGui.QIcon(':qtutils/fugue/minus'), 'Delete selected global(s)', self.ui)\n self.action_globals_set_selected_true = QtWidgets.QAction(\n QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'Set selected Booleans True', self.ui)\n self.action_globals_set_selected_false = QtWidgets.QAction(\n QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'Set selected Booleans False', self.ui)\n\n self.connect_signals()\n\n # Populate the model with globals from the h5 file:\n self.populate_model()\n # Set sensible column widths:\n for col in range(self.globals_model.columnCount()):\n if col != self.GLOBALS_COL_VALUE:\n self.ui.tableView_globals.resizeColumnToContents(col)\n if self.ui.tableView_globals.columnWidth(self.GLOBALS_COL_NAME) < 200:\n self.ui.tableView_globals.setColumnWidth(self.GLOBALS_COL_NAME, 200)\n if self.ui.tableView_globals.columnWidth(self.GLOBALS_COL_VALUE) < 200:\n self.ui.tableView_globals.setColumnWidth(self.GLOBALS_COL_VALUE, 200)\n if self.ui.tableView_globals.columnWidth(self.GLOBALS_COL_UNITS) < 100:\n self.ui.tableView_globals.setColumnWidth(self.GLOBALS_COL_UNITS, 100)\n if self.ui.tableView_globals.columnWidth(self.GLOBALS_COL_EXPANSION) < 100:\n self.ui.tableView_globals.setColumnWidth(self.GLOBALS_COL_EXPANSION, 100)\n self.ui.tableView_globals.resizeColumnToContents(self.GLOBALS_COL_DELETE)\n\n # Error state of tab\n self.tab_contains_errors = False\n\n def connect_signals(self):\n self.ui.tableView_globals.leftClicked.connect(self.on_tableView_globals_leftClicked)\n self.ui.tableView_globals.customContextMenuRequested.connect(self.on_tableView_globals_context_menu_requested)\n self.action_globals_set_selected_true.triggered.connect(\n lambda: self.on_globals_set_selected_bools_triggered('True'))\n self.action_globals_set_selected_false.triggered.connect(\n lambda: self.on_globals_set_selected_bools_triggered('False'))\n self.action_globals_delete_selected.triggered.connect(self.on_globals_delete_selected_triggered)\n self.globals_model.itemChanged.connect(self.on_globals_model_item_changed)\n # A context manager with which we can temporarily disconnect the above connection.\n self.globals_model_item_changed_disconnected = DisconnectContextManager(\n self.globals_model.itemChanged, self.on_globals_model_item_changed)\n\n def set_file_and_group_name(self, globals_file, group_name):\n \"\"\"Provided as a separate method so the main app can call it if the\n group gets renamed\"\"\"\n self.globals_file = globals_file\n self.group_name = group_name\n self.ui.label_globals_file.setText(globals_file)\n self.ui.label_group_name.setText(group_name)\n index = self.tabWidget.indexOf(self.ui)\n self.tabWidget.setTabText(index, group_name)\n self.tabWidget.setTabToolTip(index, '%s\\n(%s)' % (group_name, globals_file))\n\n def set_tab_icon(self, icon_string):\n index = self.tabWidget.indexOf(self.ui)\n if icon_string is not None:\n icon = QtGui.QIcon(icon_string)\n else:\n icon = QtGui.QIcon()\n if self.tabWidget.tabIcon(index).cacheKey() != icon.cacheKey():\n logger.info('setting tab icon')\n self.tabWidget.setTabIcon(index, icon)\n\n def populate_model(self):\n globals = runmanager.get_globals({self.group_name: self.globals_file})[self.group_name]\n for name, (value, units, expansion) in globals.items():\n row = self.make_global_row(name, value, units, expansion)\n self.globals_model.appendRow(row)\n value_item = row[self.GLOBALS_COL_VALUE]\n self.check_for_boolean_values(value_item)\n expansion_item = row[self.GLOBALS_COL_EXPANSION]\n self.on_globals_model_expansion_changed(expansion_item)\n\n # Add the dummy item at the end:\n dummy_delete_item = QtGui.QStandardItem()\n # This lets later code know that this row does not correspond to an\n # actual global:\n dummy_delete_item.setData(True, self.GLOBALS_ROLE_IS_DUMMY_ROW)\n dummy_delete_item.setFlags(QtCore.Qt.NoItemFlags)\n dummy_delete_item.setToolTip('Click to add global')\n\n dummy_name_item = QtGui.QStandardItem(self.GLOBALS_DUMMY_ROW_TEXT)\n dummy_name_item.setFont(QtGui.QFont(GLOBAL_MONOSPACE_FONT))\n dummy_name_item.setToolTip('Click to add global')\n dummy_name_item.setData(True, self.GLOBALS_ROLE_IS_DUMMY_ROW)\n dummy_name_item.setData(self.GLOBALS_DUMMY_ROW_TEXT, self.GLOBALS_ROLE_PREVIOUS_TEXT)\n dummy_name_item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable) # Clears the 'selectable' flag\n\n dummy_value_item = QtGui.QStandardItem()\n dummy_value_item.setData(True, self.GLOBALS_ROLE_IS_DUMMY_ROW)\n dummy_value_item.setFlags(QtCore.Qt.NoItemFlags)\n dummy_value_item.setToolTip('Click to add global')\n\n dummy_units_item = QtGui.QStandardItem()\n dummy_units_item.setData(True, self.GLOBALS_ROLE_IS_DUMMY_ROW)\n dummy_units_item.setFlags(QtCore.Qt.NoItemFlags)\n dummy_units_item.setToolTip('Click to add global')\n\n dummy_expansion_item = QtGui.QStandardItem()\n dummy_expansion_item.setData(True, self.GLOBALS_ROLE_IS_DUMMY_ROW)\n dummy_expansion_item.setFlags(QtCore.Qt.NoItemFlags)\n dummy_expansion_item.setToolTip('Click to add global')\n\n self.globals_model.appendRow(\n [dummy_delete_item, dummy_name_item, dummy_value_item, dummy_units_item, dummy_expansion_item])\n\n # Sort by name:\n self.ui.tableView_globals.sortByColumn(self.GLOBALS_COL_NAME, QtCore.Qt.AscendingOrder)\n\n def make_global_row(self, name, value='', units='', expansion=''):\n logger.debug('%s:%s - make global row: %s ' % (self.globals_file, self.group_name, name))\n # We just set some data here, other stuff is set in\n # self.update_parse_indication after runmanager has a chance to parse\n # everything and get back to us about what that data should be.\n\n delete_item = QtGui.QStandardItem()\n delete_item.setIcon(QtGui.QIcon(':qtutils/fugue/minus'))\n # Must be set to something so that the dummy row doesn't get sorted first:\n delete_item.setData(False, self.GLOBALS_ROLE_SORT_DATA)\n delete_item.setEditable(False)\n delete_item.setToolTip('Delete global from group.')\n\n name_item = QtGui.QStandardItem(name)\n name_item.setData(name, self.GLOBALS_ROLE_SORT_DATA)\n name_item.setData(name, self.GLOBALS_ROLE_PREVIOUS_TEXT)\n name_item.setToolTip(name)\n name_item.setFont(QtGui.QFont(GLOBAL_MONOSPACE_FONT))\n\n value_item = QtGui.QStandardItem(value)\n value_item.setData(value, self.GLOBALS_ROLE_SORT_DATA)\n value_item.setData(str(value), self.GLOBALS_ROLE_PREVIOUS_TEXT)\n value_item.setToolTip('Evaluating...')\n value_item.setFont(QtGui.QFont(GLOBAL_MONOSPACE_FONT))\n\n units_item = QtGui.QStandardItem(units)\n units_item.setData(units, self.GLOBALS_ROLE_SORT_DATA)\n units_item.setData(units, self.GLOBALS_ROLE_PREVIOUS_TEXT)\n units_item.setData(False, self.GLOBALS_ROLE_IS_BOOL)\n units_item.setToolTip('')\n\n expansion_item = QtGui.QStandardItem(expansion)\n expansion_item.setData(expansion, self.GLOBALS_ROLE_SORT_DATA)\n expansion_item.setData(expansion, self.GLOBALS_ROLE_PREVIOUS_TEXT)\n expansion_item.setToolTip('')\n\n row = [delete_item, name_item, value_item, units_item, expansion_item]\n return row\n\n def on_tableView_globals_leftClicked(self, index):\n if qapplication.keyboardModifiers() != QtCore.Qt.NoModifier:\n # Only handle mouseclicks with no keyboard modifiers.\n return\n item = self.globals_model.itemFromIndex(index)\n # The 'name' item in the same row:\n name_index = index.sibling(index.row(), self.GLOBALS_COL_NAME)\n name_item = self.globals_model.itemFromIndex(name_index)\n global_name = name_item.text()\n if item.data(self.GLOBALS_ROLE_IS_DUMMY_ROW):\n # They clicked on an 'add new global' row. Enter editing mode on\n # the name item so they can enter a name for the new global:\n self.ui.tableView_globals.setCurrentIndex(name_index)\n self.ui.tableView_globals.edit(name_index)\n elif item.data(self.GLOBALS_ROLE_IS_BOOL):\n # It's a bool indicator. Toggle it\n value_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_VALUE)\n if value_item.text() == 'True':\n value_item.setText('False')\n elif value_item.text() == 'False':\n value_item.setText('True')\n else:\n raise AssertionError('expected boolean value')\n elif item.column() == self.GLOBALS_COL_DELETE:\n # They clicked a delete button.\n self.delete_global(global_name)\n elif not item.data(self.GLOBALS_ROLE_IS_BOOL):\n # Edit whatever it is:\n if (self.ui.tableView_globals.currentIndex() != index\n or self.ui.tableView_globals.state() != QtWidgets.QTreeView.EditingState):\n self.ui.tableView_globals.setCurrentIndex(index)\n self.ui.tableView_globals.edit(index)\n\n def on_globals_model_item_changed(self, item):\n if item.column() == self.GLOBALS_COL_NAME:\n self.on_globals_model_name_changed(item)\n elif item.column() == self.GLOBALS_COL_VALUE:\n self.on_globals_model_value_changed(item)\n elif item.column() == self.GLOBALS_COL_UNITS:\n self.on_globals_model_units_changed(item)\n elif item.column() == self.GLOBALS_COL_EXPANSION:\n self.on_globals_model_expansion_changed(item)\n\n def on_globals_model_name_changed(self, item):\n \"\"\"Handles global renaming and creation of new globals due to the user\n editing the <click to add global> item\"\"\"\n item_text = item.text()\n if item.data(self.GLOBALS_ROLE_IS_DUMMY_ROW):\n if item_text != self.GLOBALS_DUMMY_ROW_TEXT:\n # The user has made a new global by editing the <click to add\n # global> item\n global_name = item_text\n self.new_global(global_name)\n else:\n # User has renamed a global.\n new_global_name = item_text\n previous_global_name = item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT)\n # Ensure the name actually changed, rather than something else\n # about the item:\n if new_global_name != previous_global_name:\n self.rename_global(previous_global_name, new_global_name)\n\n def on_globals_model_value_changed(self, item):\n index = item.index()\n new_value = item.text()\n previous_value = item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT)\n name_index = index.sibling(index.row(), self.GLOBALS_COL_NAME)\n name_item = self.globals_model.itemFromIndex(name_index)\n global_name = name_item.text()\n # Ensure the value actually changed, rather than something else about\n # the item:\n if new_value != previous_value:\n self.change_global_value(global_name, previous_value, new_value)\n\n def on_globals_model_units_changed(self, item):\n index = item.index()\n new_units = item.text()\n previous_units = item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT)\n name_index = index.sibling(index.row(), self.GLOBALS_COL_NAME)\n name_item = self.globals_model.itemFromIndex(name_index)\n global_name = name_item.text()\n # If it's a boolean value, ensure the check state matches the bool state:\n if item.data(self.GLOBALS_ROLE_IS_BOOL):\n value_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_VALUE)\n if value_item.text() == 'True':\n item.setCheckState(QtCore.Qt.Checked)\n elif value_item.text() == 'False':\n item.setCheckState(QtCore.Qt.Unchecked)\n else:\n raise AssertionError('expected boolean value')\n # Ensure the value actually changed, rather than something else about\n # the item:\n if new_units != previous_units:\n self.change_global_units(global_name, previous_units, new_units)\n\n def on_globals_model_expansion_changed(self, item):\n index = item.index()\n new_expansion = item.text()\n previous_expansion = item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT)\n name_index = index.sibling(index.row(), self.GLOBALS_COL_NAME)\n name_item = self.globals_model.itemFromIndex(name_index)\n global_name = name_item.text()\n # Don't want icon changing to recurse - which happens even if it is\n # the same icon. So disconnect the signal temporarily:\n with self.globals_model_item_changed_disconnected:\n if new_expansion == 'outer':\n item.setIcon(QtGui.QIcon(':qtutils/custom/outer'))\n item.setToolTip('This global will be interpreted as a list of values, and will ' +\n 'be outer producted with other lists to form a larger parameter space.')\n elif new_expansion:\n item.setIcon(QtGui.QIcon(':qtutils/custom/zip'))\n item.setToolTip('This global will be interpreted as a list of values, and will ' +\n 'be iterated over in lock-step with other globals in the ' +\n '\\'%s\\' zip group.' % new_expansion)\n else:\n item.setData(None, QtCore.Qt.DecorationRole)\n item.setToolTip('This global will be interpreted as a single value and passed to compilation as-is.')\n # Ensure the value actually changed, rather than something else about\n # the item:\n if new_expansion != previous_expansion:\n self.change_global_expansion(global_name, previous_expansion, new_expansion)\n\n def on_tableView_globals_context_menu_requested(self, point):\n menu = QtWidgets.QMenu(self.ui)\n menu.addAction(self.action_globals_set_selected_true)\n menu.addAction(self.action_globals_set_selected_false)\n menu.addAction(self.action_globals_delete_selected)\n menu.exec_(QtGui.QCursor.pos())\n\n def on_globals_delete_selected_triggered(self):\n selected_indexes = self.ui.tableView_globals.selectedIndexes()\n selected_items = (self.globals_model.itemFromIndex(index) for index in selected_indexes)\n name_items = [item for item in selected_items if item.column() == self.GLOBALS_COL_NAME]\n # If multiple selected, show 'delete n groups?' message. Otherwise,\n # pass confirm=True to self.delete_global so it can show the regular\n # message.\n confirm_multiple = (len(name_items) > 1)\n if confirm_multiple:\n if not question_dialog(\"Delete %d globals?\" % len(name_items)):\n return\n for item in name_items:\n global_name = item.text()\n self.delete_global(global_name, confirm=not confirm_multiple)\n\n def on_globals_set_selected_bools_triggered(self, state):\n selected_indexes = self.ui.tableView_globals.selectedIndexes()\n selected_items = [self.globals_model.itemFromIndex(index) for index in selected_indexes]\n value_items = [item for item in selected_items if item.column() == self.GLOBALS_COL_VALUE]\n units_items = [item for item in selected_items if item.column() == self.GLOBALS_COL_UNITS]\n for value_item, units_item in zip(value_items, units_items):\n if units_item.data(self.GLOBALS_ROLE_IS_BOOL):\n value_item.setText(state)\n\n def close(self):\n # It is up to the main runmanager class to drop references to this\n # instance before or after calling this method, so that after the\n # tabWidget no longer owns our widgets, both the widgets and the\n # instance will be garbage collected.\n index = self.tabWidget.indexOf(self.ui)\n self.tabWidget.removeTab(index)\n\n def get_global_item_by_name(self, global_name, column, previous_name=None):\n \"\"\"Returns an item from the row representing a global in the globals model.\n Which item is returned is set by the column argument.\"\"\"\n possible_name_items = self.globals_model.findItems(global_name, column=self.GLOBALS_COL_NAME)\n if previous_name is not None:\n # Filter by previous name, useful for telling rows apart when a\n # rename is in progress and two rows may temporarily contain the\n # same name (though the rename code with throw an error and revert\n # it).\n possible_name_items = [item for item in possible_name_items\n if item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT) == previous_name]\n elif global_name != self.GLOBALS_DUMMY_ROW_TEXT:\n # Don't return the dummy item unless they asked for it explicitly\n # - if a new global is being created, its name might be\n # simultaneously present in its own row and the dummy row too.\n possible_name_items = [item for item in possible_name_items\n if not item.data(self.GLOBALS_ROLE_IS_DUMMY_ROW)]\n if len(possible_name_items) > 1:\n raise LookupError('Multiple items found')\n elif not possible_name_items:\n raise LookupError('No item found')\n name_item = possible_name_items[0]\n name_index = name_item.index()\n # Found the name item, get the sibling item for the column requested:\n item_index = name_index.sibling(name_index.row(), column)\n item = self.globals_model.itemFromIndex(item_index)\n return item\n\n def do_model_sort(self):\n header = self.ui.tableView_globals.horizontalHeader()\n sort_column = header.sortIndicatorSection()\n sort_order = header.sortIndicatorOrder()\n self.ui.tableView_globals.sortByColumn(sort_column, sort_order)\n\n def new_global(self, global_name):\n logger.info('%s:%s - new global: %s', self.globals_file, self.group_name, global_name)\n item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_NAME,\n previous_name=self.GLOBALS_DUMMY_ROW_TEXT)\n try:\n runmanager.new_global(self.globals_file, self.group_name, global_name)\n except Exception as e:\n error_dialog(str(e))\n else:\n # Insert the newly created global into the model:\n global_row = self.make_global_row(global_name)\n last_index = self.globals_model.rowCount()\n # Insert it as the row before the last (dummy) row:\n self.globals_model.insertRow(last_index - 1, global_row)\n self.do_model_sort()\n # Go into edit mode on the 'value' item:\n value_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_VALUE,\n previous_name=global_name)\n value_item_index = value_item.index()\n self.ui.tableView_globals.setCurrentIndex(value_item_index)\n self.ui.tableView_globals.edit(value_item_index)\n self.globals_changed()\n finally:\n # Set the dummy row's text back ready for another group to be created:\n item.setText(self.GLOBALS_DUMMY_ROW_TEXT)\n\n def rename_global(self, previous_global_name, new_global_name):\n logger.info('%s:%s - rename global: %s -> %s',\n self.globals_file, self.group_name, previous_global_name, new_global_name)\n item = self.get_global_item_by_name(new_global_name, self.GLOBALS_COL_NAME,\n previous_name=previous_global_name)\n try:\n runmanager.rename_global(self.globals_file, self.group_name, previous_global_name, new_global_name)\n except Exception as e:\n error_dialog(str(e))\n # Set the item text back to the old name, since the rename failed:\n item.setText(previous_global_name)\n else:\n item.setData(new_global_name, self.GLOBALS_ROLE_PREVIOUS_TEXT)\n item.setData(new_global_name, self.GLOBALS_ROLE_SORT_DATA)\n self.do_model_sort()\n item.setToolTip(new_global_name)\n self.globals_changed()\n value_item = self.get_global_item_by_name(new_global_name, self.GLOBALS_COL_VALUE)\n value = value_item.text()\n if not value and self.ui.tableView_globals.state() != QtWidgets.QAbstractItemView.EditingState:\n # Go into editing the value item automatically if not already in edit mode:\n value_item_index = value_item.index()\n self.ui.tableView_globals.setCurrentIndex(value_item_index)\n self.ui.tableView_globals.edit(value_item_index)\n else:\n # If this changed the sort order, ensure the item is still visible:\n scroll_view_to_row_if_current(self.ui.tableView_globals, item)\n\n def change_global_value(self, global_name, previous_value, new_value, interactive=True):\n logger.info('%s:%s - change global value: %s = %s -> %s' %\n (self.globals_file, self.group_name, global_name, previous_value, new_value))\n item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_VALUE)\n if not interactive:\n # Value was not set interactively by the user, it is up to us to set it:\n with self.globals_model_item_changed_disconnected:\n item.setText(new_value)\n previous_background = item.background()\n previous_icon = item.icon()\n item.setData(new_value, self.GLOBALS_ROLE_PREVIOUS_TEXT)\n item.setData(new_value, self.GLOBALS_ROLE_SORT_DATA)\n item.setData(None, QtCore.Qt.BackgroundRole)\n item.setIcon(QtGui.QIcon(':qtutils/fugue/hourglass'))\n args = global_name, previous_value, new_value, item, previous_background, previous_icon\n if interactive:\n QtCore.QTimer.singleShot(1, lambda: self.complete_change_global_value(*args))\n else:\n self.complete_change_global_value(*args, interactive=False)\n\n def complete_change_global_value(self, global_name, previous_value, new_value, item, previous_background, previous_icon, interactive=True):\n try:\n runmanager.set_value(self.globals_file, self.group_name, global_name, new_value)\n except Exception as e:\n if interactive:\n error_dialog(str(e))\n # Set the item text back to the old name, since the change failed:\n with self.globals_model_item_changed_disconnected:\n item.setText(previous_value)\n item.setData(previous_value, self.GLOBALS_ROLE_PREVIOUS_TEXT)\n item.setData(previous_value, self.GLOBALS_ROLE_SORT_DATA)\n item.setData(previous_background, QtCore.Qt.BackgroundRole)\n item.setIcon(previous_icon)\n if not interactive:\n raise\n else:\n self.check_for_boolean_values(item)\n self.do_model_sort()\n item.setToolTip('Evaluating...')\n self.globals_changed()\n if not interactive:\n return\n units_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_UNITS)\n units = units_item.text()\n if not units and self.ui.tableView_globals.state() != QtWidgets.QAbstractItemView.EditingState:\n # Go into editing the units item automatically if not already in edit mode:\n units_item_index = units_item.index()\n self.ui.tableView_globals.setCurrentIndex(units_item_index)\n self.ui.tableView_globals.edit(units_item_index)\n else:\n # If this changed the sort order, ensure the item is still visible:\n scroll_view_to_row_if_current(self.ui.tableView_globals, item)\n\n def change_global_units(self, global_name, previous_units, new_units):\n logger.info('%s:%s - change units: %s = %s -> %s' %\n (self.globals_file, self.group_name, global_name, previous_units, new_units))\n item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_UNITS)\n try:\n runmanager.set_units(self.globals_file, self.group_name, global_name, new_units)\n except Exception as e:\n error_dialog(str(e))\n # Set the item text back to the old units, since the change failed:\n item.setText(previous_units)\n else:\n item.setData(new_units, self.GLOBALS_ROLE_PREVIOUS_TEXT)\n item.setData(new_units, self.GLOBALS_ROLE_SORT_DATA)\n self.do_model_sort()\n # If this changed the sort order, ensure the item is still visible:\n scroll_view_to_row_if_current(self.ui.tableView_globals, item)\n\n def change_global_expansion(self, global_name, previous_expansion, new_expansion):\n logger.info('%s:%s - change expansion: %s = %s -> %s' %\n (self.globals_file, self.group_name, global_name, previous_expansion, new_expansion))\n item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_EXPANSION)\n try:\n runmanager.set_expansion(self.globals_file, self.group_name, global_name, new_expansion)\n except Exception as e:\n error_dialog(str(e))\n # Set the item text back to the old units, since the change failed:\n item.setText(previous_expansion)\n else:\n item.setData(new_expansion, self.GLOBALS_ROLE_PREVIOUS_TEXT)\n item.setData(new_expansion, self.GLOBALS_ROLE_SORT_DATA)\n self.do_model_sort()\n self.globals_changed()\n # If this changed the sort order, ensure the item is still visible:\n scroll_view_to_row_if_current(self.ui.tableView_globals, item)\n\n def check_for_boolean_values(self, item):\n \"\"\"Checks if the value is 'True' or 'False'. If either, makes the\n units cell checkable, uneditable, and coloured to indicate the state.\n The units cell can then be clicked to toggle the value.\"\"\"\n index = item.index()\n value = item.text()\n name_index = index.sibling(index.row(), self.GLOBALS_COL_NAME)\n units_index = index.sibling(index.row(), self.GLOBALS_COL_UNITS)\n name_item = self.globals_model.itemFromIndex(name_index)\n units_item = self.globals_model.itemFromIndex(units_index)\n global_name = name_item.text()\n logger.debug('%s:%s - check for boolean values: %s' %\n (self.globals_file, self.group_name, global_name))\n if value == 'True':\n units_item.setData(True, self.GLOBALS_ROLE_IS_BOOL)\n units_item.setText('Bool')\n units_item.setData('!1', self.GLOBALS_ROLE_SORT_DATA)\n units_item.setEditable(False)\n units_item.setCheckState(QtCore.Qt.Checked)\n units_item.setBackground(QtGui.QBrush(QtGui.QColor(self.COLOR_BOOL_ON)))\n elif value == 'False':\n units_item.setData(True, self.GLOBALS_ROLE_IS_BOOL)\n units_item.setText('Bool')\n units_item.setData('!0', self.GLOBALS_ROLE_SORT_DATA)\n units_item.setEditable(False)\n units_item.setCheckState(QtCore.Qt.Unchecked)\n units_item.setBackground(QtGui.QBrush(QtGui.QColor(self.COLOR_BOOL_OFF)))\n else:\n was_bool = units_item.data(self.GLOBALS_ROLE_IS_BOOL)\n units_item.setData(False, self.GLOBALS_ROLE_IS_BOOL)\n units_item.setEditable(True)\n # Checkbox still visible unless we do the following:\n units_item.setData(None, QtCore.Qt.CheckStateRole)\n units_item.setData(None, QtCore.Qt.BackgroundRole)\n if was_bool:\n # If the item was a bool and now isn't, clear the\n # units and go into editing so the user can enter a\n # new units string:\n units_item.setText('')\n self.ui.tableView_globals.setCurrentIndex(units_item.index())\n self.ui.tableView_globals.edit(units_item.index())\n\n def globals_changed(self):\n \"\"\"Called whenever something about a global has changed. call\n app.globals_changed to inform the main application that it needs to\n parse globals again. self.update_parse_indication will be called by\n the main app when parsing is done, and will set the colours and\n tooltips appropriately\"\"\"\n # Tell the main app about it:\n app.globals_changed()\n\n def delete_global(self, global_name, confirm=True):\n logger.info('%s:%s - delete global: %s' %\n (self.globals_file, self.group_name, global_name))\n if confirm:\n if not question_dialog(\"Delete the global '%s'?\" % global_name):\n return\n runmanager.delete_global(self.globals_file, self.group_name, global_name)\n # Find the entry for this global in self.globals_model and remove it:\n name_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_NAME)\n self.globals_model.removeRow(name_item.row())\n self.globals_changed()\n\n def update_parse_indication(self, active_groups, sequence_globals, evaled_globals):\n # Check that we are an active group:\n if self.group_name in active_groups and active_groups[self.group_name] == self.globals_file:\n self.tab_contains_errors = False\n # for global_name, value in evaled_globals[self.group_name].items():\n for i in range(self.globals_model.rowCount()):\n name_item = self.globals_model.item(i, self.GLOBALS_COL_NAME)\n if name_item.data(self.GLOBALS_ROLE_IS_DUMMY_ROW):\n continue\n value_item = self.globals_model.item(i, self.GLOBALS_COL_VALUE)\n expansion_item = self.globals_model.item(i, self.GLOBALS_COL_EXPANSION)\n # value_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_VALUE)\n # expansion_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_EXPANSION)\n global_name = name_item.text()\n value = evaled_globals[self.group_name][global_name]\n\n ignore, ignore, expansion = sequence_globals[self.group_name][global_name]\n # Temporarily disconnect the item_changed signal on the model\n # so that we can set the expansion type without triggering\n # another preparse - the parsing has already been done with\n # the new expansion type.\n with self.globals_model_item_changed_disconnected:\n if expansion_item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT) != expansion:\n # logger.info('expansion previous text set')\n expansion_item.setData(expansion, self.GLOBALS_ROLE_PREVIOUS_TEXT)\n if expansion_item.data(self.GLOBALS_ROLE_SORT_DATA) != expansion:\n # logger.info('sort data role set')\n expansion_item.setData(expansion, self.GLOBALS_ROLE_SORT_DATA)\n # The next line will now trigger item_changed, but it will not\n # be detected as an actual change to the expansion type,\n # because previous_text will match text. So it will not look\n # like a change and will not trigger preparsing. However It is\n # still important that other triggers be processed, such as\n # setting the icon in the expansion item, so that will still\n # occur in the callback.\n expansion_item.setText(expansion)\n if isinstance(value, Exception):\n value_item.setBackground(QtGui.QBrush(QtGui.QColor(self.COLOR_ERROR)))\n value_item.setIcon(QtGui.QIcon(':qtutils/fugue/exclamation'))\n tooltip = '%s: %s' % (value.__class__.__name__, str(value))\n self.tab_contains_errors = True\n else:\n if value_item.background().color().name().lower() != self.COLOR_OK.lower():\n value_item.setBackground(QtGui.QBrush(QtGui.QColor(self.COLOR_OK)))\n if not value_item.icon().isNull():\n # logger.info('clearing icon')\n value_item.setData(None, QtCore.Qt.DecorationRole)\n tooltip = repr(value)\n if value_item.toolTip() != tooltip:\n # logger.info('tooltip_changed')\n value_item.setToolTip(tooltip)\n if self.tab_contains_errors:\n self.set_tab_icon(':qtutils/fugue/exclamation')\n else:\n self.set_tab_icon(None)\n else:\n # Clear everything:\n self.set_tab_icon(None)\n for row in range(self.globals_model.rowCount()):\n item = self.globals_model.item(row, self.GLOBALS_COL_VALUE)\n if item.data(self.GLOBALS_ROLE_IS_DUMMY_ROW):\n continue\n item.setData(None, QtCore.Qt.DecorationRole)\n item.setToolTip('Group inactive')\n item.setData(None, QtCore.Qt.BackgroundRole)\n\n\nclass RunmanagerMainWindow(QtWidgets.QMainWindow):\n # A signal to show that the window is shown and painted.\n firstPaint = Signal()\n # A signal for when the window manager has created a new window for this widget:\n newWindow = Signal(int)\n\n def __init__(self, *args, **kwargs):\n QtWidgets.QMainWindow.__init__(self, *args, **kwargs)\n self._previously_painted = False\n\n def closeEvent(self, event):\n if app.on_close_event():\n return QtWidgets.QMainWindow.closeEvent(self, event)\n else:\n event.ignore()\n\n def event(self, event):\n result = QtWidgets.QMainWindow.event(self, event)\n if event.type() == QtCore.QEvent.WinIdChange:\n self.newWindow.emit(self.effectiveWinId())\n return result\n\n def paintEvent(self, event):\n result = QtWidgets.QMainWindow.paintEvent(self, event)\n if not self._previously_painted:\n self._previously_painted = True\n self.firstPaint.emit()\n return result\n\n\nclass PoppedOutOutputBoxWindow(QtWidgets.QDialog):\n # A signal for when the window manager has created a new window for this widget:\n newWindow = Signal(int)\n\n def closeEvent(self, event):\n app.on_output_popout_button_clicked()\n\n def event(self, event):\n result = QtWidgets.QDialog.event(self, event)\n if event.type() == QtCore.QEvent.WinIdChange:\n self.newWindow.emit(self.effectiveWinId())\n return result\n\n\nclass RunManager(object):\n\n # Constants for the model in the axes tab:\n AXES_COL_NAME = 0\n AXES_COL_LENGTH = 1\n AXES_COL_SHUFFLE = 2\n AXES_ROLE_NAME = QtCore.Qt.UserRole + 1\n\n # Constants for the model in the groups tab:\n GROUPS_COL_NAME = 0\n GROUPS_COL_ACTIVE = 1\n GROUPS_COL_DELETE = 2\n GROUPS_COL_OPENCLOSE = 3\n GROUPS_ROLE_IS_DUMMY_ROW = QtCore.Qt.UserRole + 1\n GROUPS_ROLE_PREVIOUS_NAME = QtCore.Qt.UserRole + 2\n GROUPS_ROLE_SORT_DATA = QtCore.Qt.UserRole + 3\n GROUPS_ROLE_GROUP_IS_OPEN = QtCore.Qt.UserRole + 4\n GROUPS_DUMMY_ROW_TEXT = '<Click to add group>'\n\n def __init__(self):\n splash.update_text('loading graphical interface')\n loader = UiLoader()\n loader.registerCustomWidget(FingerTabWidget)\n loader.registerCustomWidget(TreeView)\n self.ui = loader.load('main.ui', RunmanagerMainWindow())\n\n self.output_box = OutputBox(self.ui.verticalLayout_output_tab)\n\n # Add a 'pop-out' button to the output tab:\n output_tab_index = self.ui.tabWidget.indexOf(self.ui.tab_output)\n self.output_popout_button = TabToolButton(self.ui.tabWidget.parent())\n self.output_popout_button.setIcon(QtGui.QIcon(':/qtutils/fugue/arrow-out'))\n self.output_popout_button.setToolTip('Toggle whether the output box is in a separate window')\n self.ui.tabWidget.tabBar().setTabButton(output_tab_index, QtWidgets.QTabBar.RightSide, self.output_popout_button)\n # Fix the first three tabs in place:\n for index in range(3):\n self.ui.tabWidget.tabBar().setMovable(False, index=index)\n # Whether or not the output box is currently popped out:\n self.output_box_is_popped_out = False\n # The window it will be moved to when popped out:\n self.output_box_window = PoppedOutOutputBoxWindow(self.ui, QtCore.Qt.WindowSystemMenuHint)\n self.output_box_window_verticalLayout = QtWidgets.QVBoxLayout(self.output_box_window)\n self.output_box_window_verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.output_box_window.setWindowTitle('runmanager output')\n self.output_box_window.resize(800, 1000)\n self.setup_config()\n self.setup_axes_tab()\n self.setup_groups_tab()\n self.connect_signals()\n\n # The last location from which a labscript file was selected, defaults\n # to labscriptlib:\n self.last_opened_labscript_folder = self.exp_config.get('paths', 'labscriptlib')\n # The last location from which a globals file was selected, defaults\n # to experiment_shot_storage:\n self.last_opened_globals_folder = self.exp_config.get('paths', 'experiment_shot_storage')\n # The last file to which the user saved or loaded a configuration:\n self.last_save_config_file = None\n # The last manually selected shot output folder, defaults to\n # experiment_shot_storage:\n self.last_selected_shot_output_folder = self.exp_config.get('paths', 'experiment_shot_storage')\n self.shared_drive_prefix = self.exp_config.get('paths', 'shared_drive')\n self.experiment_shot_storage = self.exp_config.get('paths', 'experiment_shot_storage')\n # Store the currently open groups as {(globals_filename, group_name): GroupTab}\n self.currently_open_groups = {}\n\n # A thread that will evaluate globals when they change, allowing us to\n # show their values and any errors in the tabs they came from.\n self.preparse_globals_thread = threading.Thread(target=self.preparse_globals_loop)\n self.preparse_globals_thread.daemon = True\n # A Queue for informing the preparser thread when globals have changed, and thus\n # need parsing again. It is a queue rather than a threading.Event() so that\n # callers can call Queue.join() to wait for parsing to complete in a race-free\n # way\n self.preparse_globals_required = queue.Queue()\n self.preparse_globals_thread.start()\n\n # A flag telling the compilation thread to abort:\n self.compilation_aborted = threading.Event()\n\n # A few attributes for self.guess_expansion_modes() to keep track of\n # its state, and thus detect changes:\n self.previous_evaled_globals = {}\n self.previous_global_hierarchy = {}\n self.previous_expansion_types = {}\n self.previous_expansions = {}\n\n # The prospective number of shots resulting from compilation\n self.n_shots = None\n\n # Start the loop that allows compilations to be queued up:\n self.compile_queue = queue.Queue()\n self.compile_queue_thread = threading.Thread(target=self.compile_loop)\n self.compile_queue_thread.daemon = True\n self.compile_queue_thread.start()\n\n splash.update_text('starting compiler subprocess')\n # Start the compiler subprocess:\n self.to_child, self.from_child, self.child = process_tree.subprocess(\n 'batch_compiler.py', output_redirection_port=self.output_box.port\n )\n\n # Is blank until a labscript file is selected:\n self.previous_default_output_folder = ''\n\n # Start a thread to monitor the time of day and create new shot output\n # folders for each day:\n inthread(self.rollover_shot_output_folder)\n self.non_default_folder = None\n\n # The data from the last time we saved the configuration, so we can\n # know if something's changed:\n self.last_save_data = None\n\n # autoload a config file, if labconfig is set to do so:\n try:\n autoload_config_file = self.exp_config.get('runmanager', 'autoload_config_file')\n except (LabConfig.NoOptionError, LabConfig.NoSectionError):\n self.output_box.output('Ready.\\n\\n')\n else:\n self.ui.setEnabled(False)\n self.output_box.output('Loading default config file %s...' % autoload_config_file)\n\n def load_the_config_file():\n try:\n self.load_configuration(autoload_config_file)\n self.output_box.output('done.\\n')\n except Exception as e:\n self.output_box.output('\\nCould not load config file: %s: %s\\n\\n' %\n (e.__class__.__name__, str(e)), red=True)\n else:\n self.output_box.output('Ready.\\n\\n')\n finally:\n self.ui.setEnabled(True)\n # Defer this until 50ms after the window has shown,\n # so that the GUI pops up faster in the meantime\n self.ui.firstPaint.connect(lambda: QtCore.QTimer.singleShot(50, load_the_config_file))\n\n splash.update_text('done')\n self.ui.show()\n\n def setup_config(self):\n required_config_params = {\"DEFAULT\": [\"experiment_name\"],\n \"programs\": [\"text_editor\",\n \"text_editor_arguments\",\n ],\n \"ports\": ['BLACS', 'runviewer'],\n \"paths\": [\"shared_drive\",\n \"experiment_shot_storage\",\n \"labscriptlib\",\n ],\n }\n self.exp_config = LabConfig(required_params = required_config_params)\n\n def setup_axes_tab(self):\n self.axes_model = QtGui.QStandardItemModel()\n\n # Setup the model columns and link to the treeview\n name_header_item = QtGui.QStandardItem('Name')\n name_header_item.setToolTip('The name of the global or zip group being iterated over')\n self.axes_model.setHorizontalHeaderItem(self.AXES_COL_NAME, name_header_item)\n\n length_header_item = QtGui.QStandardItem('Length')\n length_header_item.setToolTip('The number of elements in the axis of the parameter space')\n self.axes_model.setHorizontalHeaderItem(self.AXES_COL_LENGTH, length_header_item)\n\n shuffle_header_item = QtGui.QStandardItem('Shuffle')\n shuffle_header_item.setToolTip('Whether or not the order of the axis should be randomised')\n shuffle_header_item.setIcon(QtGui.QIcon(':qtutils/fugue/arrow-switch'))\n self.axes_model.setHorizontalHeaderItem(self.AXES_COL_SHUFFLE, shuffle_header_item)\n\n self.ui.treeView_axes.setModel(self.axes_model)\n\n # Setup stuff for a custom context menu:\n self.ui.treeView_axes.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n\n # Make the actions for the context menu:\n self.action_axes_check_selected = QtWidgets.QAction(QtGui.QIcon(':qtutils/fugue/ui-check-box'),\n 'Check selected', self.ui)\n self.action_axes_uncheck_selected = QtWidgets.QAction(QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'),\n 'Uncheck selected', self.ui)\n \n # setup header widths\n self.ui.treeView_axes.header().setStretchLastSection(False)\n self.ui.treeView_axes.header().setSectionResizeMode(self.AXES_COL_NAME, QtWidgets.QHeaderView.Stretch)\n \n def setup_groups_tab(self):\n self.groups_model = QtGui.QStandardItemModel()\n self.groups_model.setHorizontalHeaderLabels(['File/group name', 'Active', 'Delete', 'Open/Close'])\n self.groups_model.setSortRole(self.GROUPS_ROLE_SORT_DATA)\n self.ui.treeView_groups.setModel(self.groups_model)\n self.ui.treeView_groups.setAnimated(True) # Pretty\n self.ui.treeView_groups.setSelectionMode(QtWidgets.QTreeView.ExtendedSelection)\n self.ui.treeView_groups.setSortingEnabled(True)\n self.ui.treeView_groups.sortByColumn(self.GROUPS_COL_NAME, QtCore.Qt.AscendingOrder)\n # Set column widths:\n self.ui.treeView_groups.setColumnWidth(self.GROUPS_COL_NAME, 400)\n # Make it so the user can just start typing on an item to edit:\n self.ui.treeView_groups.setEditTriggers(QtWidgets.QTreeView.AnyKeyPressed |\n QtWidgets.QTreeView.EditKeyPressed |\n QtWidgets.QTreeView.SelectedClicked)\n # Ensure the clickable region of the open/close button doesn't extend forever:\n self.ui.treeView_groups.header().setStretchLastSection(False)\n # Stretch the filpath/groupname column to fill available space:\n self.ui.treeView_groups.header().setSectionResizeMode(\n self.GROUPS_COL_NAME, QtWidgets.QHeaderView.Stretch\n )\n # Shrink columns other than the 'name' column to the size of their headers:\n for column in range(self.groups_model.columnCount()):\n if column != self.GROUPS_COL_NAME:\n self.ui.treeView_groups.resizeColumnToContents(column)\n\n self.ui.treeView_groups.setTextElideMode(QtCore.Qt.ElideMiddle)\n # Setup stuff for a custom context menu:\n self.ui.treeView_groups.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n\n # Make the actions for the context menu:\n self.action_groups_set_selection_active = QtWidgets.QAction(\n QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'Set selected group(s) active', self.ui)\n self.action_groups_set_selection_inactive = QtWidgets.QAction(\n QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'Set selected group(s) inactive', self.ui)\n self.action_groups_delete_selected = QtWidgets.QAction(\n QtGui.QIcon(':qtutils/fugue/minus'), 'Delete selected group(s)', self.ui)\n self.action_groups_open_selected = QtWidgets.QAction(\n QtGui.QIcon(':/qtutils/fugue/plus'), 'Open selected group(s)', self.ui)\n self.action_groups_close_selected_groups = QtWidgets.QAction(\n QtGui.QIcon(':/qtutils/fugue/cross'), 'Close selected group(s)', self.ui)\n self.action_groups_close_selected_files = QtWidgets.QAction(\n QtGui.QIcon(':/qtutils/fugue/cross'), 'Close selected file(s)', self.ui)\n\n # A counter for keeping track of the recursion depth of\n # self._groups_model_active_changed(). This is used so that some\n # actions can be taken in response to initial data changes, but not to\n # flow-on changes made by the method itself:\n self.on_groups_model_active_changed_recursion_depth = 0\n\n def connect_signals(self):\n # The button that pops the output box in and out:\n self.output_popout_button.clicked.connect(self.on_output_popout_button_clicked)\n\n # The menu items:\n self.ui.actionLoad_configuration.triggered.connect(self.on_load_configuration_triggered)\n self.ui.actionRevert_configuration.triggered.connect(self.on_revert_configuration_triggered)\n self.ui.actionSave_configuration.triggered.connect(self.on_save_configuration_triggered)\n self.ui.actionSave_configuration_as.triggered.connect(self.on_save_configuration_as_triggered)\n self.ui.actionQuit.triggered.connect(self.ui.close)\n\n # labscript file and folder selection stuff:\n self.ui.toolButton_select_labscript_file.clicked.connect(self.on_select_labscript_file_clicked)\n self.ui.toolButton_select_shot_output_folder.clicked.connect(self.on_select_shot_output_folder_clicked)\n self.ui.toolButton_edit_labscript_file.clicked.connect(self.on_edit_labscript_file_clicked)\n self.ui.toolButton_reset_shot_output_folder.clicked.connect(self.on_reset_shot_output_folder_clicked)\n self.ui.lineEdit_labscript_file.textChanged.connect(self.on_labscript_file_text_changed)\n self.ui.lineEdit_shot_output_folder.textChanged.connect(self.on_shot_output_folder_text_changed)\n\n # Control buttons; engage, abort, restart subprocess:\n self.ui.pushButton_engage.clicked.connect(self.on_engage_clicked)\n self.ui.pushButton_abort.clicked.connect(self.on_abort_clicked)\n self.ui.pushButton_restart_subprocess.clicked.connect(self.on_restart_subprocess_clicked)\n \n # shuffle master control\n self.ui.pushButton_shuffle.stateChanged.connect(self.on_master_shuffle_clicked)\n\n # Tab closebutton clicked:\n self.ui.tabWidget.tabCloseRequested.connect(self.on_tabCloseRequested)\n\n # Axes tab; right click menu, menu actions, reordering\n # self.ui.treeView_axes.customContextMenuRequested.connect(self.on_treeView_axes_context_menu_requested)\n self.action_axes_check_selected.triggered.connect(self.on_axes_check_selected_triggered)\n self.action_axes_uncheck_selected.triggered.connect(self.on_axes_uncheck_selected_triggered)\n self.ui.toolButton_axis_to_top.clicked.connect(self.on_axis_to_top_clicked)\n self.ui.toolButton_axis_up.clicked.connect(self.on_axis_up_clicked)\n self.ui.toolButton_axis_down.clicked.connect(self.on_axis_down_clicked)\n self.ui.toolButton_axis_to_bottom.clicked.connect(self.on_axis_to_bottom_clicked)\n # axes tab item changed handler\n self.axes_model.itemChanged.connect(self.on_axes_item_changed)\n self.axes_model.rowsRemoved.connect(self.update_global_shuffle_state)\n self.axes_model.rowsInserted.connect(self.update_global_shuffle_state)\n\n # Groups tab; right click menu, menu actions, open globals file, new globals file, diff globals file,\n self.ui.treeView_groups.customContextMenuRequested.connect(self.on_treeView_groups_context_menu_requested)\n self.action_groups_set_selection_active.triggered.connect(\n lambda: self.on_groups_set_selection_active_triggered(QtCore.Qt.Checked))\n self.action_groups_set_selection_inactive.triggered.connect(\n lambda: self.on_groups_set_selection_active_triggered(QtCore.Qt.Unchecked))\n self.action_groups_delete_selected.triggered.connect(self.on_groups_delete_selected_triggered)\n self.action_groups_open_selected.triggered.connect(self.on_groups_open_selected_triggered)\n self.action_groups_close_selected_groups.triggered.connect(self.on_groups_close_selected_groups_triggered)\n self.action_groups_close_selected_files.triggered.connect(self.on_groups_close_selected_files_triggered)\n\n self.ui.pushButton_open_globals_file.clicked.connect(self.on_open_globals_file_clicked)\n self.ui.pushButton_new_globals_file.clicked.connect(self.on_new_globals_file_clicked)\n self.ui.pushButton_diff_globals_file.clicked.connect(self.on_diff_globals_file_clicked)\n self.ui.treeView_groups.leftClicked.connect(self.on_treeView_groups_leftClicked)\n self.ui.treeView_groups.doubleLeftClicked.connect(self.on_treeView_groups_doubleLeftClicked)\n self.groups_model.itemChanged.connect(self.on_groups_model_item_changed)\n # A context manager with which we can temporarily disconnect the above connection.\n self.groups_model_item_changed_disconnected = DisconnectContextManager(\n self.groups_model.itemChanged, self.on_groups_model_item_changed)\n \n # Keyboard shortcuts:\n engage_shortcut = QtWidgets.QShortcut('F5', self.ui,\n lambda: self.ui.pushButton_engage.clicked.emit(False))\n engage_shortcut.setAutoRepeat(False)\n QtWidgets.QShortcut('ctrl+W', self.ui, self.close_current_tab)\n QtWidgets.QShortcut('ctrl+Tab', self.ui, lambda: self.switch_tabs(+1))\n QtWidgets.QShortcut('ctrl+shift+Tab', self.ui, lambda: self.switch_tabs(-1))\n\n # Tell Windows how to handle our windows in the the taskbar, making pinning work properly and stuff:\n if os.name == 'nt':\n self.ui.newWindow.connect(set_win_appusermodel)\n self.output_box_window.newWindow.connect(set_win_appusermodel)\n\n def on_close_event(self):\n save_data = self.get_save_data()\n if self.last_save_data is not None and save_data != self.last_save_data:\n message = ('Current configuration (which groups are active/open and other GUI state) '\n 'has changed: save config file \\'%s\\'?' % self.last_save_config_file)\n reply = QtWidgets.QMessageBox.question(self.ui, 'Quit runmanager', message,\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)\n if reply == QtWidgets.QMessageBox.Cancel:\n return False\n if reply == QtWidgets.QMessageBox.Yes:\n self.save_configuration(self.last_save_config_file)\n self.to_child.put(['quit', None])\n return True\n\n def close_current_tab(self):\n current_tab_widget = self.ui.tabWidget.currentWidget()\n for (globals_file, group_name), tab in self.currently_open_groups.items():\n if tab.ui is current_tab_widget:\n self.close_group(globals_file, group_name)\n\n def switch_tabs(self, change):\n current_index = self.ui.tabWidget.currentIndex()\n n_tabs = self.ui.tabWidget.count()\n new_index = (current_index + change) % n_tabs\n self.ui.tabWidget.setCurrentIndex(new_index)\n\n def on_output_popout_button_clicked(self):\n if self.output_box_is_popped_out:\n self.ui.verticalLayout_output_tab.addWidget(self.output_box.output_textedit)\n self.output_box_window.hide()\n self.output_popout_button.setIcon(QtGui.QIcon(':/qtutils/fugue/arrow-out'))\n else:\n # pop it out\n # self.ui.verticalLayout_output_tab.remove(self.output_box)\n self.output_box_window_verticalLayout.addWidget(self.output_box.output_textedit)\n self.output_popout_button.setIcon(QtGui.QIcon(':/qtutils/fugue/arrow-in'))\n self.output_box_window.show()\n self.output_box_is_popped_out = not self.output_box_is_popped_out\n\n def on_select_labscript_file_clicked(self, checked):\n labscript_file = QtWidgets.QFileDialog.getOpenFileName(self.ui,\n 'Select labscript file',\n self.last_opened_labscript_folder,\n \"Python files (*.py)\")\n if type(labscript_file) is tuple:\n labscript_file, _ = labscript_file\n\n if not labscript_file:\n # User cancelled selection\n return\n # Convert to standard platform specific path, otherwise Qt likes forward slashes:\n labscript_file = os.path.abspath(labscript_file)\n if not os.path.isfile(labscript_file):\n error_dialog(\"No such file %s.\" % labscript_file)\n return\n # Save the containing folder for use next time we open the dialog box:\n self.last_opened_labscript_folder = os.path.dirname(labscript_file)\n # Write the file to the lineEdit:\n self.ui.lineEdit_labscript_file.setText(labscript_file)\n # Check if the output folder needs to be updated:\n self.check_output_folder_update()\n\n def on_edit_labscript_file_clicked(self, checked):\n # get path to text editor\n editor_path = self.exp_config.get('programs', 'text_editor')\n editor_args = self.exp_config.get('programs', 'text_editor_arguments')\n # Get the current labscript file:\n current_labscript_file = self.ui.lineEdit_labscript_file.text()\n # Ignore if no file selected\n if not current_labscript_file:\n return\n if not editor_path:\n error_dialog(\"No editor specified in the labconfig.\")\n if '{file}' in editor_args:\n # Split the args on spaces into a list, replacing {file} with the labscript file\n editor_args = [arg if arg != '{file}' else current_labscript_file for arg in editor_args.split()]\n else:\n # Otherwise if {file} isn't already in there, append it to the other args:\n editor_args = [current_labscript_file] + editor_args.split()\n try:\n subprocess.Popen([editor_path] + editor_args)\n except Exception as e:\n error_dialog(\"Unable to launch text editor specified in %s. Error was: %s\" %\n (self.exp_config.config_path, str(e)))\n\n def on_select_shot_output_folder_clicked(self, checked):\n shot_output_folder = QtWidgets.QFileDialog.getExistingDirectory(self.ui,\n 'Select shot output folder',\n self.last_selected_shot_output_folder)\n if type(shot_output_folder) is tuple:\n shot_output_folder, _ = shot_output_folder\n\n if not shot_output_folder:\n # User cancelled selection\n return\n # Convert to standard platform specific path, otherwise Qt likes forward slashes:\n shot_output_folder = os.path.abspath(shot_output_folder)\n # Save the containing folder for use next time we open the dialog box:\n self.last_selected_shot_output_folder = os.path.dirname(shot_output_folder)\n # Write the file to the lineEdit:\n self.ui.lineEdit_shot_output_folder.setText(shot_output_folder)\n # Update our knowledge about whether this is the default output folder or not:\n self.check_output_folder_update()\n\n def on_reset_shot_output_folder_clicked(self, checked):\n current_default_output_folder = self.get_default_output_folder()\n if current_default_output_folder is None:\n return\n self.ui.lineEdit_shot_output_folder.setText(current_default_output_folder)\n self.check_output_folder_update()\n\n def on_labscript_file_text_changed(self, text):\n # Blank out the 'edit labscript file' button if no labscript file is\n # selected\n enabled = bool(text)\n self.ui.toolButton_edit_labscript_file.setEnabled(enabled)\n # Blank out the 'select shot output folder' button if no labscript\n # file is selected:\n self.ui.toolButton_select_shot_output_folder.setEnabled(enabled)\n self.ui.lineEdit_labscript_file.setToolTip(text)\n self.previous_default_output_folder = self.get_default_output_folder()\n\n def on_shot_output_folder_text_changed(self, text):\n # Blank out the 'reset default output folder' button if the user is\n # already using the default output folder\n if text == self.get_default_output_folder():\n self.non_default_folder = False\n else:\n self.non_default_folder = True\n self.ui.toolButton_reset_shot_output_folder.setEnabled(self.non_default_folder)\n self.ui.label_non_default_folder.setVisible(self.non_default_folder)\n self.ui.lineEdit_shot_output_folder.setToolTip(text)\n\n def on_engage_clicked(self):\n logger.info('Engage')\n try:\n send_to_BLACS = self.ui.checkBox_run_shots.isChecked()\n send_to_runviewer = self.ui.checkBox_view_shots.isChecked()\n labscript_file = self.ui.lineEdit_labscript_file.text()\n # even though we shuffle on a per global basis, if ALL of the globals are set to shuffle, then we may as well shuffle again. This helps shuffle shots more randomly than just shuffling within each level (because without this, you would still do all shots with the outer most variable the same, etc)\n shuffle = self.ui.pushButton_shuffle.checkState() == QtCore.Qt.Checked\n if not labscript_file:\n raise Exception('Error: No labscript file selected')\n output_folder = self.ui.lineEdit_shot_output_folder.text()\n if not output_folder:\n raise Exception('Error: No output folder selected')\n BLACS_host = self.ui.lineEdit_BLACS_hostname.text()\n logger.info('Parsing globals...')\n active_groups = self.get_active_groups()\n # Get ordering of expansion globals\n expansion_order = {}\n for i in range(self.axes_model.rowCount()):\n item = self.axes_model.item(i, self.AXES_COL_NAME)\n shuffle_item = self.axes_model.item(i, self.AXES_COL_SHUFFLE)\n name = item.data(self.AXES_ROLE_NAME)\n expansion_order[name] = {'order':i, 'shuffle':shuffle_item.checkState()}\n \n try:\n sequenceglobals, shots, evaled_globals, global_hierarchy, expansions = self.parse_globals(active_groups, expansion_order=expansion_order)\n except Exception as e:\n raise Exception('Error parsing globals:\\n%s\\nCompilation aborted.' % str(e))\n logger.info('Making h5 files')\n labscript_file, run_files = self.make_h5_files(\n labscript_file, output_folder, sequenceglobals, shots, shuffle)\n self.ui.pushButton_abort.setEnabled(True)\n self.compile_queue.put([labscript_file, run_files, send_to_BLACS, BLACS_host, send_to_runviewer])\n except Exception as e:\n self.output_box.output('%s\\n\\n' % str(e), red=True)\n logger.info('end engage')\n\n def on_abort_clicked(self):\n self.compilation_aborted.set()\n\n def on_restart_subprocess_clicked(self):\n # Kill and restart the compilation subprocess\n self.to_child.put(['quit', None])\n self.from_child.put(['done', False])\n time.sleep(0.1)\n self.output_box.output('Asking subprocess to quit...')\n timeout_time = time.time() + 2\n QtCore.QTimer.singleShot(50, lambda: self.check_child_exited(timeout_time, kill=False))\n\n def check_child_exited(self, timeout_time, kill=False):\n self.child.poll()\n if self.child.returncode is None and time.time() < timeout_time:\n QtCore.QTimer.singleShot(50, lambda: self.check_child_exited(timeout_time, kill))\n return\n elif self.child.returncode is None:\n if not kill:\n self.child.terminate()\n self.output_box.output('not responding.\\n')\n timeout_time = time.time() + 2\n QtCore.QTimer.singleShot(50, lambda: self.check_child_exited(timeout_time, kill=True))\n return\n else:\n self.child.kill()\n self.output_box.output('Killed\\n', red=True)\n elif kill:\n self.output_box.output('Terminated\\n', red=True)\n else:\n self.output_box.output('done.\\n')\n self.output_box.output('Spawning new compiler subprocess...')\n self.to_child, self.from_child, self.child = process_tree.subprocess(\n 'batch_compiler.py', output_redirection_port=self.output_box.port\n )\n self.output_box.output('done.\\n')\n self.output_box.output('Ready.\\n\\n')\n\n def on_tabCloseRequested(self, index):\n tab_page = self.ui.tabWidget.widget(index)\n for (globals_file, group_name), group_tab in self.currently_open_groups.items():\n if group_tab.ui is tab_page:\n self.close_group(globals_file, group_name)\n break\n\n def on_treeView_axes_context_menu_requested(self, point):\n raise NotImplementedError\n # menu = QtWidgets.QMenu(self.ui)\n # menu.addAction(self.action_axes_check_selected)\n # menu.addAction(self.action_axes_uncheck_selected)\n # menu.exec_(QtGui.QCursor.pos())\n pass\n\n def on_axes_check_selected_triggered(self, *args):\n raise NotImplementedError\n\n def on_axes_uncheck_selected_triggered(self, *args):\n raise NotImplementedError\n \n def on_axis_to_top_clicked(self, checked):\n # Get the selection model from the treeview\n selection_model = self.ui.treeView_axes.selectionModel() \n # Create a list of select row indices\n selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]\n # For each row selected\n for i,row in enumerate(selected_row_list):\n # only move the row while it is not element 0, and the row above it is not selected\n # (note that while a row above may have been initially selected, it should by now, be one row higher\n # since we start moving elements of the list upwards starting from the lowest index)\n while row > 0 and (row-1) not in selected_row_list:\n # Remove the selected row\n items = self.axes_model.takeRow(row)\n # Add the selected row into a position one above\n self.axes_model.insertRow(row-1,items)\n # Since it is now a newly inserted row, select it again\n selection_model.select(self.axes_model.indexFromItem(items[0]),QtCore.QItemSelectionModel.SelectCurrent|QtCore.QItemSelectionModel.Rows)\n # reupdate the list of selected indices to reflect this change\n selected_row_list[i] -= 1\n row -= 1\n \n self.update_axes_indentation()\n\n def on_axis_up_clicked(self, checked):\n # Get the selection model from the treeview\n selection_model = self.ui.treeView_axes.selectionModel() \n # Create a list of select row indices\n selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]\n # For each row selected\n for i,row in enumerate(selected_row_list):\n # only move the row if it is not element 0, and the row above it is not selected\n # (note that while a row above may have been initially selected, it should by now, be one row higher\n # since we start moving elements of the list upwards starting from the lowest index)\n if row > 0 and (row-1) not in selected_row_list:\n # Remove the selected row\n items = self.axes_model.takeRow(row)\n # Add the selected row into a position one above\n self.axes_model.insertRow(row-1,items)\n # Since it is now a newly inserted row, select it again\n selection_model.select(self.axes_model.indexFromItem(items[0]),QtCore.QItemSelectionModel.SelectCurrent|QtCore.QItemSelectionModel.Rows)\n # reupdate the list of selected indices to reflect this change\n selected_row_list[i] -= 1\n \n self.update_axes_indentation()\n\n def on_axis_down_clicked(self, checked):\n # Get the selection model from the treeview\n selection_model = self.ui.treeView_axes.selectionModel() \n # Create a list of select row indices\n selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]\n # For each row selected\n for i,row in enumerate(selected_row_list):\n # only move the row if it is not the last element, and the row above it is not selected\n # (note that while a row below may have been initially selected, it should by now, be one row lower\n # since we start moving elements of the list upwards starting from the highest index)\n if row < self.axes_model.rowCount()-1 and (row+1) not in selected_row_list:\n # Remove the selected row\n items = self.axes_model.takeRow(row)\n # Add the selected row into a position one above\n self.axes_model.insertRow(row+1,items)\n # Since it is now a newly inserted row, select it again\n selection_model.select(self.axes_model.indexFromItem(items[0]),QtCore.QItemSelectionModel.SelectCurrent|QtCore.QItemSelectionModel.Rows)\n # reupdate the list of selected indices to reflect this change\n selected_row_list[i] += 1\n \n self.update_axes_indentation()\n\n def on_axis_to_bottom_clicked(self, checked):\n selection_model = self.ui.treeView_axes.selectionModel() \n # Create a list of select row indices\n selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]\n # For each row selected\n for i,row in enumerate(selected_row_list):\n # only move the row while it is not the last element, and the row above it is not selected\n # (note that while a row below may have been initially selected, it should by now, be one row lower\n # since we start moving elements of the list upwards starting from the highest index)\n while row < self.axes_model.rowCount()-1 and (row+1) not in selected_row_list:\n # Remove the selected row\n items = self.axes_model.takeRow(row)\n # Add the selected row into a position one above\n self.axes_model.insertRow(row+1,items)\n # Since it is now a newly inserted row, select it again\n selection_model.select(self.axes_model.indexFromItem(items[0]),QtCore.QItemSelectionModel.SelectCurrent|QtCore.QItemSelectionModel.Rows)\n # reupdate the list of selected indices to reflect this change\n selected_row_list[i] += 1\n row += 1\n \n self.update_axes_indentation()\n \n def on_axes_item_changed(self, item):\n if item.column() == self.AXES_COL_SHUFFLE:\n self.update_global_shuffle_state()\n \n def update_global_shuffle_state(self, *args, **kwargs):\n all_checked = True\n none_checked = True\n for i in range(self.axes_model.rowCount()):\n check_state = self.axes_model.item(i, self.AXES_COL_SHUFFLE).checkState() == QtCore.Qt.Checked\n all_checked = all_checked and check_state\n none_checked = none_checked and not check_state\n \n if not all_checked and not none_checked:\n self.ui.pushButton_shuffle.setTristate(True)\n self.ui.pushButton_shuffle.setCheckState(QtCore.Qt.PartiallyChecked)\n elif none_checked and not all_checked:\n self.ui.pushButton_shuffle.setTristate(False)\n self.ui.pushButton_shuffle.setCheckState(QtCore.Qt.Unchecked)\n elif all_checked and not none_checked:\n self.ui.pushButton_shuffle.setTristate(False)\n self.ui.pushButton_shuffle.setCheckState(QtCore.Qt.Checked)\n else:\n # No axes. Set if partially checked, otherwise else leave it alone:\n if self.ui.pushButton_shuffle.checkState() == QtCore.Qt.PartiallyChecked:\n self.ui.pushButton_shuffle.setTristate(False)\n self.ui.pushButton_shuffle.setCheckState(QtCore.Qt.Checked)\n\n\n def on_master_shuffle_clicked(self, state):\n if state in [QtCore.Qt.Checked, QtCore.Qt.Unchecked]:\n self.ui.pushButton_shuffle.setTristate(False)\n for i in range(self.axes_model.rowCount()):\n item = self.axes_model.item(i, self.AXES_COL_SHUFFLE)\n if item.checkState() != state:\n self.axes_model.item(i, self.AXES_COL_SHUFFLE).setCheckState(state)\n\n def on_treeView_groups_context_menu_requested(self, point):\n menu = QtWidgets.QMenu(self.ui)\n menu.addAction(self.action_groups_set_selection_active)\n menu.addAction(self.action_groups_set_selection_inactive)\n menu.addAction(self.action_groups_delete_selected)\n menu.addAction(self.action_groups_open_selected)\n menu.addAction(self.action_groups_close_selected_groups)\n menu.addAction(self.action_groups_close_selected_files)\n copy_menu = QtWidgets.QMenu('Copy selected group(s) to...', menu)\n copy_menu.setIcon(QtGui.QIcon(':/qtutils/fugue/blue-document-copy'))\n menu.addMenu(copy_menu)\n move_menu = QtWidgets.QMenu('Move selected group(s) to...', menu)\n move_menu.setIcon(QtGui.QIcon(':/qtutils/fugue/blue-document--arrow'))\n menu.addMenu(move_menu)\n\n # Create a dict of all filepaths -> filenames\n filenames = {}\n for index in range(self.groups_model.rowCount()):\n filepath = self.groups_model.item(index, self.GROUPS_COL_NAME).text()\n filenames[filepath] = filepath.split(os.sep)[-1]\n\n # expand duplicate filenames until there is nomore duplicates\n new_filename = {}\n i = 2\n while new_filename != filenames:\n for filepath, filename in filenames.items():\n if list(filenames.values()).count(filename) > 1:\n new_filename[filepath] = os.sep.join(filepath.split(os.sep)[-i:])\n else:\n new_filename[filepath] = filename\n filenames = new_filename\n i += 1\n\n # add all filenames to the copy and move submenu\n for filepath, filename in filenames.items():\n copy_menu.addAction(filename, lambda filepath=filepath: self.on_groups_copy_selected_groups_triggered(filepath, False))\n move_menu.addAction(filename, lambda filepath=filepath: self.on_groups_copy_selected_groups_triggered(filepath, True))\n\n menu.exec_(QtGui.QCursor.pos())\n\n def on_groups_copy_selected_groups_triggered(self, dest_globals_file=None, delete_source_group=False):\n selected_indexes = self.ui.treeView_groups.selectedIndexes()\n selected_items = (self.groups_model.itemFromIndex(index) for index in selected_indexes)\n name_items = [item for item in selected_items\n if item.column() == self.GROUPS_COL_NAME\n and item.parent() is not None]\n for item in name_items:\n source_globals_file = item.parent().text()\n self.copy_group(source_globals_file, item.text(), dest_globals_file, delete_source_group)\n\n def on_groups_set_selection_active_triggered(self, checked_state):\n selected_indexes = self.ui.treeView_groups.selectedIndexes()\n # Filter to only include the 'active' column:\n selected_items = (self.groups_model.itemFromIndex(index) for index in selected_indexes)\n active_items = (item for item in selected_items\n if item.column() == self.GROUPS_COL_ACTIVE\n and item.parent() is not None)\n for item in active_items:\n item.setCheckState(checked_state)\n\n def on_groups_delete_selected_triggered(self):\n selected_indexes = self.ui.treeView_groups.selectedIndexes()\n selected_items = (self.groups_model.itemFromIndex(index) for index in selected_indexes)\n name_items = [item for item in selected_items\n if item.column() == self.GROUPS_COL_NAME\n and item.parent() is not None]\n # If multiple selected, show 'delete n groups?' message. Otherwise,\n # pass confirm=True to self.delete_group so it can show the regular\n # message.\n confirm_multiple = (len(name_items) > 1)\n if confirm_multiple:\n if not question_dialog(\"Delete %d groups?\" % len(name_items)):\n return\n for item in name_items:\n globals_file = item.parent().text()\n group_name = item.text()\n self.delete_group(globals_file, group_name, confirm=not confirm_multiple)\n\n def on_groups_open_selected_triggered(self):\n selected_indexes = self.ui.treeView_groups.selectedIndexes()\n selected_items = [self.groups_model.itemFromIndex(index) for index in selected_indexes]\n name_items = [item for item in selected_items\n if item.column() == self.GROUPS_COL_NAME\n and item.parent() is not None]\n\n # Include all grous of selected globals files:\n for item in selected_items:\n if item.parent() is None:\n children = [item.child(i) for i in range(item.rowCount())]\n # Exclude <add new group> item, which is not selectable\n name_items += [child for child in children if child.isSelectable() ]\n\n filenames = set(item.parent().text() for item in name_items)\n for item in name_items:\n globals_file = item.parent().text()\n group_name = item.text()\n if (globals_file, group_name) not in self.currently_open_groups:\n self.open_group(globals_file, group_name, trigger_preparse=False)\n if name_items:\n self.globals_changed()\n\n def on_groups_close_selected_groups_triggered(self):\n selected_indexes = self.ui.treeView_groups.selectedIndexes()\n selected_items = (self.groups_model.itemFromIndex(index) for index in selected_indexes)\n name_items = [item for item in selected_items\n if item.column() == self.GROUPS_COL_NAME\n and item.parent() is not None]\n for item in name_items:\n globals_file = item.parent().text()\n group_name = item.text()\n if (globals_file, group_name) in self.currently_open_groups:\n self.close_group(globals_file, group_name)\n\n def on_groups_close_selected_files_triggered(self):\n selected_indexes = self.ui.treeView_groups.selectedIndexes()\n selected_items = (self.groups_model.itemFromIndex(index) for index in selected_indexes)\n name_items = [item for item in selected_items\n if item.column() == self.GROUPS_COL_NAME\n and item.parent() is None]\n child_openclose_items = [item.child(i, self.GROUPS_COL_OPENCLOSE)\n for item in name_items\n for i in range(item.rowCount())]\n child_is_open = [child_item.data(self.GROUPS_ROLE_GROUP_IS_OPEN)\n for child_item in child_openclose_items]\n if any(child_is_open):\n if not question_dialog('Close %d file(s)? This will close %d currently open group(s).' %\n (len(name_items), child_is_open.count(True))):\n return\n for item in name_items:\n globals_file = item.text()\n self.close_globals_file(globals_file, confirm=False)\n\n def on_open_globals_file_clicked(self):\n globals_file = QtWidgets.QFileDialog.getOpenFileName(self.ui,\n 'Select globals file',\n self.last_opened_globals_folder,\n \"HDF5 files (*.h5)\")\n if type(globals_file) is tuple:\n globals_file, _ = globals_file\n\n if not globals_file:\n # User cancelled selection\n return\n # Convert to standard platform specific path, otherwise Qt likes forward slashes:\n globals_file = os.path.abspath(globals_file)\n if not os.path.isfile(globals_file):\n error_dialog(\"No such file %s.\" % globals_file)\n return\n # Save the containing folder for use next time we open the dialog box:\n self.last_opened_globals_folder = os.path.dirname(globals_file)\n # Open the file:\n self.open_globals_file(globals_file)\n\n def on_new_globals_file_clicked(self):\n globals_file = QtWidgets.QFileDialog.getSaveFileName(self.ui,\n 'Create new globals file',\n self.last_opened_globals_folder,\n \"HDF5 files (*.h5)\")\n if type(globals_file) is tuple:\n globals_file, _ = globals_file\n\n if not globals_file:\n # User cancelled\n return\n # Convert to standard platform specific path, otherwise Qt likes\n # forward slashes:\n globals_file = os.path.abspath(globals_file)\n # Save the containing folder for use next time we open the dialog box:\n self.last_opened_globals_folder = os.path.dirname(globals_file)\n # Create the new file and open it:\n runmanager.new_globals_file(globals_file)\n self.open_globals_file(globals_file)\n\n def on_diff_globals_file_clicked(self):\n globals_file = QtWidgets.QFileDialog.getOpenFileName(self.ui,\n 'Select globals file to compare',\n self.last_opened_globals_folder,\n \"HDF5 files (*.h5)\")\n if type(globals_file) is tuple:\n globals_file, _ = globals_file\n\n if not globals_file:\n # User cancelled\n return\n\n # Convert to standard platform specific path, otherwise Qt likes forward slashes:\n globals_file = os.path.abspath(globals_file)\n\n # Get runmanager's globals\n active_groups = self.get_active_groups()\n if active_groups is None:\n # Invalid group selection\n return\n\n # Get file's globals groups\n other_groups = runmanager.get_all_groups(globals_file)\n\n # Display the output tab so the user can see the output:\n self.ui.tabWidget.setCurrentWidget(self.ui.tab_output)\n self.output_box.output('Globals diff with:\\n%s\\n\\n' % globals_file)\n\n # Do the globals diff\n globals_diff_table = runmanager.globals_diff_groups(active_groups, other_groups)\n self.output_box.output(globals_diff_table)\n self.output_box.output('Ready.\\n\\n')\n\n def on_treeView_groups_leftClicked(self, index):\n \"\"\"Here we respond to user clicks on the treeview. We do the following:\n - If the user clicks on the <click to add group> dummy row, we go into\n edit mode on it so they can enter the name of the new group they\n want.\n - If the user clicks on the icon to open or close a globals file or a\n group, we call the appropriate open and close methods and update the\n open/close data role on the model.\n - If the user clicks delete on a globals group, we call a delete\n method, which deletes it after confirmation, and closes it if it was\n open.\n \"\"\"\n if qapplication.keyboardModifiers() != QtCore.Qt.NoModifier:\n # Only handle mouseclicks with no keyboard modifiers.\n return\n item = self.groups_model.itemFromIndex(index)\n # The 'name' item in the same row:\n name_index = index.sibling(index.row(), self.GROUPS_COL_NAME)\n name_item = self.groups_model.itemFromIndex(name_index)\n # The parent item, None if there is no parent:\n parent_item = item.parent()\n # What kind of row did the user click on?\n # A globals file, a group, or a 'click to add group' row?\n if item.data(self.GROUPS_ROLE_IS_DUMMY_ROW):\n # They clicked on an 'add new group' row. Enter editing\n # mode on the name item so they can enter a name for\n # the new group:\n self.ui.treeView_groups.setCurrentIndex(name_index)\n self.ui.treeView_groups.edit(name_index)\n if item.column() == self.GROUPS_COL_ACTIVE:\n # They clicked on the active column. Toggle the checkbox. We do\n # this manually because setting the item checkable means the model\n # changes before we catch the mouse click. This is a pain because\n # we want the ensuing sorting (if the user is sorting by the\n # enabled column) to keep the the selection. If the user only\n # selected the column by clicking on it, then the sort happens\n # before they selected it, and the resort happens without a visual\n # indication of where the item went, because it never got\n # selected.\n state = item.checkState()\n if state in (QtCore.Qt.Unchecked, QtCore.Qt.PartiallyChecked):\n item.setCheckState(QtCore.Qt.Checked)\n elif state == QtCore.Qt.Checked:\n item.setCheckState(QtCore.Qt.Unchecked)\n else:\n raise AssertionError('Invalid Check state')\n # If this changed the sort order, ensure the item is still visible:\n scroll_view_to_row_if_current(self.ui.treeView_groups, item)\n elif parent_item is None:\n # They clicked on a globals file row.\n globals_file = name_item.text()\n # What column did they click on?\n if item.column() == self.GROUPS_COL_OPENCLOSE:\n # They clicked the close button. Close the file:\n self.close_globals_file(globals_file)\n else:\n # They clicked on a globals group row.\n globals_file = parent_item.text()\n group_name = name_item.text()\n # What column did they click on?\n if item.column() == self.GROUPS_COL_DELETE:\n # They clicked the delete button. Delete the group:\n self.delete_group(globals_file, group_name, confirm=True)\n elif item.column() == self.GROUPS_COL_OPENCLOSE:\n # They clicked the open/close button. Which is it, open or close?\n group_is_open = item.data(self.GROUPS_ROLE_GROUP_IS_OPEN)\n if group_is_open:\n self.close_group(globals_file, group_name)\n else:\n self.open_group(globals_file, group_name)\n\n def on_treeView_groups_doubleLeftClicked(self, index):\n item = self.groups_model.itemFromIndex(index)\n # The parent item, None if there is no parent:\n parent_item = item.parent()\n if item.data(self.GROUPS_ROLE_IS_DUMMY_ROW):\n return\n elif parent_item and item.column() == self.GROUPS_COL_NAME:\n # it's a group name item. What's the group and file name?\n globals_file = parent_item.text()\n group_name = item.text()\n if (globals_file, group_name) not in self.currently_open_groups:\n self.open_group(globals_file, group_name)\n # Focus the tab:\n group_tab = self.currently_open_groups[globals_file, group_name]\n for i in range(self.ui.tabWidget.count()):\n if self.ui.tabWidget.widget(i) is group_tab.ui:\n self.ui.tabWidget.setCurrentIndex(i)\n break\n\n def on_groups_model_item_changed(self, item):\n \"\"\"This function is for responding to data changes in the model. The\n methods for responding to changes different columns do different\n things. Mostly they make other data changes for model consistency, but\n also group creation and renaming is handled in response to changes to\n the 'name' column. When we change things elsewhere, we prefer to only\n change one thing, and the rest of the changes are triggered here. So\n here we do the following:\n\n Be careful not to recurse unsafely into this method - changing\n something that itself triggers further changes is fine so long as they\n peter out and don't get stuck in a loop. If recursion needs to be\n stopped, one can disconnect the signal temporarily with the context\n manager self.groups_model_item_changed_disconnected. But use this\n sparingly, otherwise there's the risk that some required data updates\n will be forgotten about and won't happen.\n \"\"\"\n if item.column() == self.GROUPS_COL_NAME:\n self.on_groups_model_name_changed(item)\n elif item.column() == self.GROUPS_COL_ACTIVE:\n self.on_groups_model_active_changed(item)\n elif item.column() == self.GROUPS_COL_OPENCLOSE:\n self.on_groups_model_openclose_changed(item)\n\n def on_groups_model_name_changed(self, item):\n \"\"\"Handles group renaming and creation of new groups due to the user\n editing the <click to add group> item\"\"\"\n parent_item = item.parent()\n # File rows are supposed to be uneditable, but just to be sure we have\n # a group row:\n assert parent_item is not None\n if item.data(self.GROUPS_ROLE_IS_DUMMY_ROW):\n item_text = item.text()\n if item_text != self.GROUPS_DUMMY_ROW_TEXT:\n # The user has made a new globals group by editing the <click\n # to add group> item.\n globals_file = parent_item.text()\n group_name = item_text\n self.new_group(globals_file, group_name)\n else:\n # User has renamed a globals group.\n new_group_name = item.text()\n previous_group_name = item.data(self.GROUPS_ROLE_PREVIOUS_NAME)\n # Ensure it truly is a name change, and not something else about\n # the item changing:\n if new_group_name != previous_group_name:\n globals_file = parent_item.text()\n self.rename_group(globals_file, previous_group_name, new_group_name)\n\n def on_groups_model_active_changed(self, item):\n \"\"\"Sets the sort data for the item in response to its check state\n changing. Also, if this is the first time this function has been\n called on the stack, that is, the change was initiated externally\n instead of via recursion from this function itself, then set the check\n state of other items for consistency. This entails checking/unchecking\n all group rows in response to the file row's check state changing, or\n changing the file row's check state to reflect the check state of the\n child group rows. That's why we need to keep track of the recursion\n depth - so that those changes we make don't in turn cause further\n changes. But we don't disconnect the on_changed signal altogether,\n because we still want to do the update of the sort data, and anything\n else that might be added in future.\"\"\"\n self.on_groups_model_active_changed_recursion_depth += 1\n try:\n check_state = item.checkState()\n # Ensure sort data matches active state:\n item.setData(check_state, self.GROUPS_ROLE_SORT_DATA)\n if self.on_groups_model_active_changed_recursion_depth > 1:\n # Prevent all below code from running in response to data changes\n # initiated from within this method itself. The code above this\n # check still runs in response to all changes.\n return\n\n parent_item = item.parent()\n if parent_item is not None:\n # A 'group active' checkbox changed due to external action (not from this method itself).\n # Update the parent file checkbox to reflect the state of its children\n children = [parent_item.child(i, self.GROUPS_COL_ACTIVE) for i in range(parent_item.rowCount())]\n child_states = [child.checkState() for child in children\n if not child.data(self.GROUPS_ROLE_IS_DUMMY_ROW)]\n parent_active_index = parent_item.index().sibling(parent_item.index().row(), self.GROUPS_COL_ACTIVE)\n parent_active_item = self.groups_model.itemFromIndex(parent_active_index)\n if all(state == QtCore.Qt.Checked for state in child_states):\n parent_active_item.setCheckState(QtCore.Qt.Checked)\n elif all(state == QtCore.Qt.Unchecked for state in child_states):\n parent_active_item.setCheckState(QtCore.Qt.Unchecked)\n else:\n parent_active_item.setCheckState(QtCore.Qt.PartiallyChecked)\n else:\n # A 'file active' checkbox changed due to external action (not from this method itself).\n # Update the check state of all children to match.\n name_index = item.index().sibling(item.index().row(), self.GROUPS_COL_NAME)\n name_item = self.groups_model.itemFromIndex(name_index)\n checkstate = item.checkState()\n children = [name_item.child(i, self.GROUPS_COL_ACTIVE) for i in range(name_item.rowCount())]\n for child in children:\n if not child.data(self.GROUPS_ROLE_IS_DUMMY_ROW):\n child.setCheckState(checkstate)\n finally:\n self.on_groups_model_active_changed_recursion_depth -= 1\n if self.on_groups_model_active_changed_recursion_depth == 0:\n self.do_model_sort()\n # Trigger a preparse to occur:\n self.globals_changed()\n\n def on_groups_model_openclose_changed(self, item):\n \"\"\"Sets item sort data and icon in response to the open/close state of a group\n changing.\"\"\"\n parent_item = item.parent()\n # The open/close state of a globals group changed. It is definitely a\n # group, not a file, as the open/close state of a file shouldn't be\n # changing.\n assert parent_item is not None # Just to be sure.\n # Ensure the sort data matches the open/close state:\n group_is_open = item.data(self.GROUPS_ROLE_GROUP_IS_OPEN)\n item.setData(group_is_open, self.GROUPS_ROLE_SORT_DATA)\n # Set the appropriate icon and tooltip. Changing the icon causes\n # itemChanged to be emitted, even if it the same icon, and even if we\n # were to use the same QIcon instance. So to avoid infinite recursion\n # we temporarily disconnect the signal whilst we set the icons.\n with self.groups_model_item_changed_disconnected:\n if group_is_open:\n item.setIcon(QtGui.QIcon(':qtutils/fugue/cross'))\n item.setToolTip('Close globals group.')\n else:\n item.setIcon(QtGui.QIcon(':qtutils/fugue/plus'))\n item.setToolTip('Load globals group into runmanager.')\n self.do_model_sort()\n # If this changed the sort order, ensure the item is still visible:\n scroll_view_to_row_if_current(self.ui.treeView_groups, item)\n\n @inmain_decorator()\n def get_default_output_folder(self):\n \"\"\"Returns what the default output folder would be right now, based on\n the current date and selected labscript file. Returns empty string if\n no labscript file is selected. Does not create the default output\n folder, does not check if it exists.\"\"\"\n current_labscript_file = self.ui.lineEdit_labscript_file.text()\n if not current_labscript_file:\n return ''\n _, default_output_folder, _ = runmanager.new_sequence_details(\n current_labscript_file,\n config=self.exp_config,\n increment_sequence_index=False,\n )\n default_output_folder = os.path.normpath(default_output_folder)\n return default_output_folder\n\n def rollover_shot_output_folder(self):\n \"\"\"Runs in a thread, checking every 30 seconds if the default output folder has\n changed, likely because the date has changed, but also possible because another\n instance of runmanager has incremented the sequence index. If the defaulr output\n folder has changed, and if runmanager is configured to use the default output\n folder, sets the folder in which compiled shots will be put. Does not create the\n folder if it does not already exist, this will be done at compile-time.\"\"\"\n while True:\n time.sleep(30)\n try:\n self.check_output_folder_update()\n except Exception as e:\n # Don't stop the thread.\n logger.exception(\"error checking default output folder\")\n\n @inmain_decorator()\n def check_output_folder_update(self):\n \"\"\"Do a single check of whether the output folder needs updating. This\n is implemented as a separate function to the above loop so that the\n whole check happens at once in the Qt main thread and hence is atomic\n and can't be interfered with by other Qt calls in the program.\"\"\"\n current_default_output_folder = self.get_default_output_folder()\n if current_default_output_folder is None:\n # No labscript file selected:\n return\n currently_selected_output_folder = self.ui.lineEdit_shot_output_folder.text()\n if current_default_output_folder != self.previous_default_output_folder:\n # It's a new day, or a new labscript file.\n # Is the user using default folders?\n if currently_selected_output_folder == self.previous_default_output_folder:\n # Yes they are. In that case, update to use the new folder:\n self.ui.lineEdit_shot_output_folder.setText(current_default_output_folder)\n self.previous_default_output_folder = current_default_output_folder\n\n @inmain_decorator()\n def globals_changed(self):\n \"\"\"Called from either self, a GroupTab, or the RemoteServer to inform runmanager\n that something about globals has changed, and that they need parsing again.\"\"\"\n self.ui.pushButton_engage.setEnabled(False)\n self.preparse_globals_required.put(None)\n\n def update_axes_indentation(self):\n for i in range(self.axes_model.rowCount()):\n item = self.axes_model.item(i, self.AXES_COL_NAME)\n text = item.text().lstrip()\n text = ' '*i + text\n item.setText(text)\n \n @inmain_decorator() # Is called by preparser thread\n def update_axes_tab(self, expansions, dimensions):\n # get set of expansions\n expansion_list = []\n for global_name, expansion in expansions.items():\n if expansion:\n if expansion == 'outer':\n expansion_list.append('outer '+global_name)\n else:\n expansion_list.append('zip '+expansion)\n \n expansion_list = set(expansion_list)\n \n # find items to delete\n for i in reversed(range(self.axes_model.rowCount())):\n item = self.axes_model.item(i, self.AXES_COL_NAME)\n name = item.data(self.AXES_ROLE_NAME)\n if name not in expansion_list:\n item = self.axes_model.takeRow(i)\n del item\n else:\n length_item = self.axes_model.item(i, self.AXES_COL_LENGTH)\n if name in dimensions:\n length_item.setText(\"{}\".format(dimensions[name]))\n else:\n length_item.setText('Unknown')\n \n # remove from expansions list so we don't add it again\n expansion_list.remove(name)\n \n # add new rows\n for expansion_name in expansion_list:\n shuffle = self.ui.pushButton_shuffle.checkState() != QtCore.Qt.Unchecked\n self.add_item_to_axes_model(expansion_name, shuffle, dimensions)\n \n self.update_axes_indentation() \n\n def add_item_to_axes_model(self, expansion_name, shuffle, dimensions = None):\n if dimensions is None:\n dimensions = {}\n \n items = []\n \n expansion_type, name = expansion_name.split()\n name_item = QtGui.QStandardItem(name)\n name_item.setData(expansion_name, self.AXES_ROLE_NAME)\n if expansion_type == 'outer':\n name_item.setIcon(QtGui.QIcon(':qtutils/custom/outer'))\n else:\n name_item.setIcon(QtGui.QIcon(':qtutils/custom/zip'))\n items.append(name_item)\n \n length = 'Unknown'\n if expansion_name in dimensions:\n length = \"{}\".format(dimensions[expansion_name])\n length_item = QtGui.QStandardItem(length)\n items.append(length_item)\n \n shuffle_item = QtGui.QStandardItem()\n shuffle_item.setCheckable(True)\n shuffle_item.setCheckState(QtCore.Qt.Checked if shuffle else QtCore.Qt.Unchecked)\n \n items.append(shuffle_item)\n \n self.axes_model.appendRow(items)\n \n @inmain_decorator() # Is called by preparser thread\n def update_tabs_parsing_indication(self, active_groups, sequence_globals, evaled_globals, n_shots):\n for group_tab in self.currently_open_groups.values():\n group_tab.update_parse_indication(active_groups, sequence_globals, evaled_globals)\n self.ui.pushButton_engage.setEnabled(True)\n if n_shots == 1:\n n_shots_string = '(1 shot)'\n else:\n n_shots_string = '({} shots)'.format(n_shots)\n self.ui.pushButton_engage.setText('Engage {}'.format(n_shots_string))\n\n def preparse_globals(self):\n active_groups = self.get_active_groups()\n if active_groups is None:\n # There was an error, get_active_groups has already shown\n # it to the user.\n return\n # Expansion mode is automatically updated when the global's\n # type changes. If this occurs, we will have to parse again to\n # include the change:\n while True:\n results = self.parse_globals(active_groups, raise_exceptions=False, expand_globals=False, return_dimensions = True)\n sequence_globals, shots, evaled_globals, global_hierarchy, expansions, dimensions = results\n self.n_shots = len(shots)\n expansions_changed = self.guess_expansion_modes(\n active_groups, evaled_globals, global_hierarchy, expansions)\n if not expansions_changed:\n # Now expand globals while parsing to calculate the number of shots.\n # this must only be done after the expansion type guessing has been updated to avoid exceptions\n # when changing a zip group from a list to a single value\n results = self.parse_globals(active_groups, raise_exceptions=False, expand_globals=True, return_dimensions = True)\n sequence_globals, shots, evaled_globals, global_hierarchy, expansions, dimensions = results\n self.n_shots = len(shots)\n break\n self.update_tabs_parsing_indication(active_groups, sequence_globals, evaled_globals, self.n_shots)\n self.update_axes_tab(expansions, dimensions)\n\n def preparse_globals_loop(self):\n \"\"\"Runs in a thread, waiting on a threading.Event that tells us when\n some globals have changed, and calls parse_globals to evaluate them\n all before feeding the results back to the relevant tabs to be\n displayed.\"\"\"\n while True:\n try:\n # Wait until we're needed:\n self.preparse_globals_required.get()\n n_requests = 1\n # Wait until the main thread is idle before clearing the queue of\n # requests. This way if preparsing is triggered multiple times within\n # the main thread before it becomes idle, we can respond to this all at\n # once, once they are all done, rather than starting too early and\n # having to preparse again.\n with qtlock:\n while True:\n try:\n self.preparse_globals_required.get(block=False)\n n_requests += 1\n except queue.Empty:\n break\n # Do some work:\n self.preparse_globals()\n # Tell any callers calling preparse_globals_required.join() that we are\n # done with their request:\n for _ in range(n_requests):\n self.preparse_globals_required.task_done()\n except Exception:\n # Raise the error, but keep going so we don't take down the\n # whole thread if there is a bug.\n exc_info = sys.exc_info()\n raise_exception_in_thread(exc_info)\n\n def wait_until_preparse_complete(self):\n \"\"\"Block until the preparse loop has finished pending work\"\"\"\n self.preparse_globals_required.join()\n\n def get_group_item_by_name(self, globals_file, group_name, column, previous_name=None):\n \"\"\"Returns an item from the row representing a globals group in the\n groups model. Which item is returned is set by the column argument.\"\"\"\n parent_item = self.groups_model.findItems(globals_file, column=self.GROUPS_COL_NAME)[0]\n possible_name_items = self.groups_model.findItems(group_name, QtCore.Qt.MatchRecursive,\n column=self.GROUPS_COL_NAME)\n # Don't accidentally match on other groups or files with the same name\n # as this group:\n possible_name_items = [item for item in possible_name_items if item.parent() == parent_item]\n if previous_name is not None:\n # Also filter by previous name, useful for telling rows apart when\n # a rename is in progress and two rows may temporarily contain the\n # same name (though the rename code with throw an error and revert\n # it).\n possible_name_items = [item for item in possible_name_items\n if item.data(self.GROUPS_ROLE_PREVIOUS_NAME) == previous_name]\n elif group_name != self.GROUPS_DUMMY_ROW_TEXT:\n # Don't return the dummy item unless they asked for it explicitly\n # - if a new group is being created, its name might be\n # simultaneously present in its own row and the dummy row too.\n possible_name_items = [item for item in possible_name_items\n if not item.data(self.GROUPS_ROLE_IS_DUMMY_ROW)]\n\n if len(possible_name_items) > 1:\n raise LookupError('Multiple items found')\n elif not possible_name_items:\n raise LookupError('No item found')\n name_item = possible_name_items[0]\n name_index = name_item.index()\n # Found the name item, get the sibling item for the column requested:\n item_index = name_index.sibling(name_index.row(), column)\n item = self.groups_model.itemFromIndex(item_index)\n return item\n\n def do_model_sort(self):\n header = self.ui.treeView_groups.header()\n sort_column = header.sortIndicatorSection()\n sort_order = header.sortIndicatorOrder()\n self.ui.treeView_groups.sortByColumn(sort_column, sort_order)\n\n @inmain_decorator() # Can be called from a non-main thread\n def get_active_groups(self, interactive=True):\n \"\"\"Returns active groups in the format {group_name: globals_file}.\n Displays an error dialog and returns None if multiple groups of the\n same name are selected, this is invalid - selected groups must be\n uniquely named. If interactive=False, raises the exception instead.\"\"\"\n active_groups = {}\n for i in range(self.groups_model.rowCount()):\n file_name_item = self.groups_model.item(i, self.GROUPS_COL_NAME)\n for j in range(file_name_item.rowCount()):\n group_name_item = file_name_item.child(j, self.GROUPS_COL_NAME)\n group_active_item = file_name_item.child(j, self.GROUPS_COL_ACTIVE)\n if group_active_item.checkState() == QtCore.Qt.Checked:\n group_name = group_name_item.text()\n globals_file = file_name_item.text()\n if group_name in active_groups:\n msg = (\n 'There are two active groups named %s. ' % group_name\n + 'Active groups must have unique names.'\n )\n if interactive:\n error_dialog(msg)\n return\n raise RuntimeError(msg)\n active_groups[group_name] = globals_file\n return active_groups\n\n def open_globals_file(self, globals_file):\n # Do nothing if this file is already open:\n if self.groups_model.findItems(globals_file, column=self.GROUPS_COL_NAME):\n return\n\n # Get the groups:\n groups = runmanager.get_grouplist(globals_file)\n # Add the parent row:\n file_name_item = QtGui.QStandardItem(globals_file)\n file_name_item.setEditable(False)\n file_name_item.setToolTip(globals_file)\n # Sort column by name:\n file_name_item.setData(globals_file, self.GROUPS_ROLE_SORT_DATA)\n\n file_active_item = QtGui.QStandardItem()\n file_active_item.setCheckState(QtCore.Qt.Unchecked)\n # Sort column by CheckState - must keep this updated when checkstate changes:\n file_active_item.setData(QtCore.Qt.Unchecked, self.GROUPS_ROLE_SORT_DATA)\n file_active_item.setEditable(False)\n file_active_item.setToolTip('Check to set all the file\\'s groups as active.')\n\n file_delete_item = QtGui.QStandardItem() # Blank, only groups have a delete button\n file_delete_item.setEditable(False)\n # Must be set to something so that the dummy row doesn't get sorted first:\n file_delete_item.setData(False, self.GROUPS_ROLE_SORT_DATA)\n\n file_close_item = QtGui.QStandardItem()\n file_close_item.setIcon(QtGui.QIcon(':qtutils/fugue/cross'))\n file_close_item.setEditable(False)\n file_close_item.setToolTip('Close globals file.')\n\n self.groups_model.appendRow([file_name_item, file_active_item, file_delete_item, file_close_item])\n\n # Add the groups as children:\n for group_name in groups:\n row = self.make_group_row(group_name)\n file_name_item.appendRow(row)\n\n # Finally, add the <Click to add group> row at the bottom:\n dummy_name_item = QtGui.QStandardItem(self.GROUPS_DUMMY_ROW_TEXT)\n dummy_name_item.setToolTip('Click to add group')\n # This lets later code know that this row does\n # not correspond to an actual globals group:\n dummy_name_item.setData(True, self.GROUPS_ROLE_IS_DUMMY_ROW)\n dummy_name_item.setData(self.GROUPS_DUMMY_ROW_TEXT, self.GROUPS_ROLE_PREVIOUS_NAME)\n dummy_name_item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable) # Clears the 'selectable' flag\n\n dummy_active_item = QtGui.QStandardItem()\n dummy_active_item.setData(True, self.GROUPS_ROLE_IS_DUMMY_ROW)\n dummy_active_item.setFlags(QtCore.Qt.NoItemFlags)\n\n dummy_delete_item = QtGui.QStandardItem()\n dummy_delete_item.setData(True, self.GROUPS_ROLE_IS_DUMMY_ROW)\n dummy_delete_item.setFlags(QtCore.Qt.NoItemFlags)\n\n dummy_open_close_item = QtGui.QStandardItem()\n dummy_open_close_item.setData(True, self.GROUPS_ROLE_IS_DUMMY_ROW)\n dummy_open_close_item.setFlags(QtCore.Qt.NoItemFlags)\n\n # Not setting anything as the above items' sort role has the effect of\n # ensuring this row is always sorted to the end of the list, without\n # us having to implement any custom sorting methods or subclassing\n # anything, yay.\n\n file_name_item.appendRow([dummy_name_item, dummy_active_item, dummy_delete_item, dummy_open_close_item])\n # Expand the child items to be visible:\n self.ui.treeView_groups.setExpanded(file_name_item.index(), True)\n self.globals_changed()\n self.do_model_sort()\n # If this changed the sort order, ensure the file item is visible:\n scroll_view_to_row_if_current(self.ui.treeView_groups, file_name_item)\n\n def make_group_row(self, group_name):\n \"\"\"Returns a new row representing one group in the groups tab, ready to be\n inserted into the model.\"\"\"\n group_name_item = QtGui.QStandardItem(group_name)\n # We keep the previous name around so that we can detect what changed:\n group_name_item.setData(group_name, self.GROUPS_ROLE_PREVIOUS_NAME)\n # Sort column by name:\n group_name_item.setData(group_name, self.GROUPS_ROLE_SORT_DATA)\n\n group_active_item = QtGui.QStandardItem()\n group_active_item.setCheckState(QtCore.Qt.Unchecked)\n # Sort column by CheckState - must keep this updated whenever the\n # checkstate changes:\n group_active_item.setData(QtCore.Qt.Unchecked, self.GROUPS_ROLE_SORT_DATA)\n group_active_item.setEditable(False)\n group_active_item.setToolTip(\n 'Whether or not the globals within this group should be used by runmanager for compilation.')\n\n group_delete_item = QtGui.QStandardItem()\n group_delete_item.setIcon(QtGui.QIcon(':qtutils/fugue/minus'))\n # Must be set to something so that the dummy row doesn't get sorted first:\n group_delete_item.setData(False, self.GROUPS_ROLE_SORT_DATA)\n group_delete_item.setEditable(False)\n group_delete_item.setToolTip('Delete globals group from file.')\n\n group_open_close_item = QtGui.QStandardItem()\n group_open_close_item.setIcon(QtGui.QIcon(':qtutils/fugue/plus'))\n group_open_close_item.setData(False, self.GROUPS_ROLE_GROUP_IS_OPEN)\n # Sort column by whether group is open - must keep this manually\n # updated when the state changes:\n group_open_close_item.setData(False, self.GROUPS_ROLE_SORT_DATA)\n group_open_close_item.setEditable(False)\n group_open_close_item.setToolTip('Load globals group into runmananger.')\n\n row = [group_name_item, group_active_item, group_delete_item, group_open_close_item]\n return row\n\n def close_globals_file(self, globals_file, confirm=True):\n item = self.groups_model.findItems(globals_file, column=self.GROUPS_COL_NAME)[0]\n # Close any open groups in this globals file:\n\n child_name_items = [item.child(i, self.GROUPS_COL_NAME) for i in range(item.rowCount())]\n child_openclose_items = [item.child(i, self.GROUPS_COL_OPENCLOSE) for i in range(item.rowCount())]\n child_is_open = [child_item.data(self.GROUPS_ROLE_GROUP_IS_OPEN)\n for child_item in child_openclose_items]\n if confirm and any(child_is_open):\n if not question_dialog('Close %s? This will close %d currently open group(s).' %\n (globals_file, child_is_open.count(True))):\n return\n to_close = [name_item for name_item, is_open in zip(child_name_items, child_is_open) if is_open]\n for name_item in to_close:\n group_name = name_item.text()\n self.close_group(globals_file, group_name)\n\n # Remove the globals file from the model:\n self.groups_model.removeRow(item.row())\n self.globals_changed()\n\n def copy_group(self, source_globals_file, source_group_name, dest_globals_file=None, delete_source_group=False):\n \"\"\"This function copys a group of globals with the name source_group_name from the file\n source_globals_file to a new file dest_globals_file. If delete_source_group is True\n the source group is deleted after copying\"\"\"\n if delete_source_group and source_globals_file == dest_globals_file:\n return\n try:\n dest_group_name = runmanager.copy_group(source_globals_file, source_group_name, dest_globals_file, delete_source_group)\n except Exception as e:\n error_dialog(str(e))\n else:\n # Insert the newly created globals group into the model, as a\n # child row of the new globals file.\n if dest_globals_file is None:\n dest_globals_file = source_globals_file\n\n # find the new groups parent row by filepath\n for index in range(self.groups_model.rowCount()):\n if self.groups_model.item(index, self.GROUPS_COL_NAME).text() == dest_globals_file:\n parent_row = self.groups_model.item(index)\n break\n\n last_index = parent_row.rowCount()\n # Insert it as the row before the last (dummy) row:\n group_row = self.make_group_row(dest_group_name)\n parent_row.insertRow(last_index - 1, group_row)\n self.do_model_sort()\n\n # Open the group\n self.open_group(dest_globals_file, dest_group_name)\n name_item = group_row[self.GROUPS_COL_NAME]\n self.globals_changed()\n self.ui.treeView_groups.setCurrentIndex(name_item.index())\n\n # delete original\n if delete_source_group:\n self.delete_group(source_globals_file, source_group_name, confirm=False)\n\n # If this changed the sort order, ensure the group item is still visible:\n scroll_view_to_row_if_current(self.ui.treeView_groups, name_item)\n\n def new_group(self, globals_file, group_name):\n item = self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_NAME,\n previous_name=self.GROUPS_DUMMY_ROW_TEXT)\n try:\n runmanager.new_group(globals_file, group_name)\n except Exception as e:\n error_dialog(str(e))\n else:\n # Insert the newly created globals group into the model, as a\n # child row of the globals file it belong to.\n group_row = self.make_group_row(group_name)\n last_index = item.parent().rowCount()\n # Insert it as the row before the last (dummy) row:\n item.parent().insertRow(last_index - 1, group_row)\n self.do_model_sort()\n # Open the group and mark it active:\n self.open_group(globals_file, group_name)\n active_item = group_row[self.GROUPS_COL_ACTIVE]\n name_item = group_row[self.GROUPS_COL_NAME]\n active_item.setCheckState(QtCore.Qt.Checked)\n self.globals_changed()\n self.ui.treeView_groups.setCurrentIndex(name_item.index())\n # If this changed the sort order, ensure the group item is still visible:\n scroll_view_to_row_if_current(self.ui.treeView_groups, name_item)\n finally:\n # Set the dummy row's text back ready for another group to be created:\n item.setText(self.GROUPS_DUMMY_ROW_TEXT)\n\n def open_group(self, globals_file, group_name, trigger_preparse=True):\n assert (globals_file, group_name) not in self.currently_open_groups # sanity check\n group_tab = GroupTab(self.ui.tabWidget, globals_file, group_name)\n self.currently_open_groups[globals_file, group_name] = group_tab\n\n # Set the open/close state in the groups_model. itemChanged will be\n # emitted and self.on_groups_model_item_changed will handle updating\n # the other data roles, icons etc:\n openclose_item = self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_OPENCLOSE)\n openclose_item.setData(True, self.GROUPS_ROLE_GROUP_IS_OPEN)\n # Trigger a preparse to occur in light of this. Calling code can\n # disable this so that multiple groups can be opened at once without\n # triggering a preparse. If they do so, they should call\n # self.globals_changed() themselves.\n if trigger_preparse:\n self.globals_changed()\n\n def rename_group(self, globals_file, previous_group_name, new_group_name):\n item = self.get_group_item_by_name(globals_file, new_group_name, self.GROUPS_COL_NAME,\n previous_name=previous_group_name)\n try:\n runmanager.rename_group(globals_file, previous_group_name, new_group_name)\n except Exception as e:\n error_dialog(str(e))\n # Set the item text back to the old name, since the rename failed:\n item.setText(previous_group_name)\n else:\n item.setData(new_group_name, self.GROUPS_ROLE_PREVIOUS_NAME)\n item.setData(new_group_name, self.GROUPS_ROLE_SORT_DATA)\n self.do_model_sort()\n # If this changed the sort order, ensure the group item is still visible:\n scroll_view_to_row_if_current(self.ui.treeView_groups, item)\n group_tab = self.currently_open_groups.pop((globals_file, previous_group_name), None)\n if group_tab is not None:\n # Change labels and tooltips appropriately if the group is open:\n group_tab.set_file_and_group_name(globals_file, new_group_name)\n # Re-add it to the dictionary under the new name:\n self.currently_open_groups[globals_file, new_group_name] = group_tab\n\n def close_group(self, globals_file, group_name):\n group_tab = self.currently_open_groups.pop((globals_file, group_name), None)\n assert group_tab is not None # Just in case\n group_tab.close()\n openclose_item = self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_OPENCLOSE)\n openclose_item.setData(False, self.GROUPS_ROLE_GROUP_IS_OPEN)\n\n def delete_group(self, globals_file, group_name, confirm=True):\n if confirm:\n if not question_dialog(\"Delete the group '%s'?\" % group_name):\n return\n # If the group is open, close it:\n group_tab = self.currently_open_groups.get((globals_file, group_name))\n if group_tab is not None:\n self.close_group(globals_file, group_name)\n runmanager.delete_group(globals_file, group_name)\n # Find the entry for this group in self.groups_model and remove it:\n name_item = self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_NAME)\n name_item.parent().removeRow(name_item.row())\n self.globals_changed()\n\n def on_save_configuration_triggered(self):\n if self.last_save_config_file is None:\n self.on_save_configuration_as_triggered()\n self.ui.actionSave_configuration_as.setEnabled(True)\n self.ui.actionRevert_configuration.setEnabled(True)\n else:\n self.save_configuration(self.last_save_config_file)\n\n def on_revert_configuration_triggered(self):\n save_data = self.get_save_data()\n if self.last_save_data is not None and save_data != self.last_save_data:\n message = 'Revert configuration to the last saved state in \\'%s\\'?' % self.last_save_config_file\n reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Cancel)\n if reply == QtWidgets.QMessageBox.Cancel:\n return\n elif reply == QtWidgets.QMessageBox.Yes:\n self.load_configuration(self.last_save_config_file)\n else:\n error_dialog('no changes to revert')\n\n def on_save_configuration_as_triggered(self):\n if self.last_save_config_file is not None:\n default = self.last_save_config_file\n else:\n default = os.path.join(self.exp_config.get('paths', 'experiment_shot_storage'), 'runmanager.ini')\n save_file = QtWidgets.QFileDialog.getSaveFileName(self.ui,\n 'Select file to save current runmanager configuration',\n default,\n \"config files (*.ini)\")\n if type(save_file) is tuple:\n save_file, _ = save_file\n\n if not save_file:\n # User cancelled\n return\n # Convert to standard platform specific path, otherwise Qt likes\n # forward slashes:\n save_file = os.path.abspath(save_file)\n self.save_configuration(save_file)\n\n def get_save_data(self):\n # Get the currently open files and active groups:\n h5_files_open = []\n active_groups = []\n for i in range(self.groups_model.rowCount()):\n file_name_item = self.groups_model.item(i, self.GROUPS_COL_NAME)\n globals_file_name = file_name_item.text()\n h5_files_open.append(globals_file_name)\n for j in range(file_name_item.rowCount()):\n group_name_item = file_name_item.child(j, self.GROUPS_COL_NAME)\n group_name = group_name_item.text()\n group_active_item = file_name_item.child(j, self.GROUPS_COL_ACTIVE)\n if group_active_item.checkState() == QtCore.Qt.Checked:\n active_groups.append((globals_file_name, group_name))\n # Get the currently open groups:\n groups_open = []\n for i in range(self.ui.tabWidget.count()):\n tab_page = self.ui.tabWidget.widget(i)\n for (globals_file_name, group_name), group_tab in self.currently_open_groups.items():\n if group_tab.ui is tab_page:\n groups_open.append((globals_file_name, group_name))\n break\n # Get the labscript file, output folder, and whether the output folder\n # is default:\n current_labscript_file = self.ui.lineEdit_labscript_file.text()\n shot_output_folder = self.ui.lineEdit_shot_output_folder.text()\n is_using_default_shot_output_folder = (shot_output_folder == self.get_default_output_folder())\n # Only save the shot output folder if not using the default, that way\n # the folder updating as the day rolls over will not be detected as a\n # change to the save data:\n if is_using_default_shot_output_folder:\n shot_output_folder = ''\n\n # Get the server hostnames:\n BLACS_host = self.ui.lineEdit_BLACS_hostname.text()\n\n send_to_runviewer = self.ui.checkBox_view_shots.isChecked()\n send_to_BLACS = self.ui.checkBox_run_shots.isChecked()\n shuffle = self.ui.pushButton_shuffle.isChecked()\n\n # axes tab information\n axes = []\n for i in range(self.axes_model.rowCount()):\n name_item = self.axes_model.item(i, self.AXES_COL_NAME)\n shuffle_item = self.axes_model.item(i, self.AXES_COL_SHUFFLE)\n shuffle_state = shuffle_item.checkState()\n \n axes.append((name_item.data(self.AXES_ROLE_NAME), 1 if shuffle_state == QtCore.Qt.Checked else 0))\n \n save_data = {'h5_files_open': h5_files_open,\n 'active_groups': active_groups,\n 'groups_open': groups_open,\n 'current_labscript_file': current_labscript_file,\n 'shot_output_folder': shot_output_folder,\n 'is_using_default_shot_output_folder': is_using_default_shot_output_folder,\n 'send_to_runviewer': send_to_runviewer,\n 'send_to_BLACS': send_to_BLACS,\n 'shuffle': shuffle,\n 'axes': axes,\n 'BLACS_host': BLACS_host}\n return save_data\n\n def save_configuration(self, save_file):\n runmanager_config = LabConfig(save_file)\n save_data = self.get_save_data()\n self.last_save_config_file = save_file\n self.last_save_data = save_data\n for key, value in save_data.items():\n runmanager_config.set('runmanager_state', key, pprint.pformat(value))\n\n def on_load_configuration_triggered(self):\n save_data = self.get_save_data()\n if self.last_save_data is not None and save_data != self.last_save_data:\n message = ('Current configuration (which groups are active/open and other GUI state) '\n 'has changed: save config file \\'%s\\'?' % self.last_save_config_file)\n reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)\n if reply == QtWidgets.QMessageBox.Cancel:\n return\n if reply == QtWidgets.QMessageBox.Yes:\n self.save_configuration(self.last_save_config_file)\n\n if self.last_save_config_file is not None:\n default = self.last_save_config_file\n else:\n default = os.path.join(self.exp_config.get('paths', 'experiment_shot_storage'), 'runmanager.ini')\n\n file = QtWidgets.QFileDialog.getOpenFileName(self.ui,\n 'Select runmanager configuration file to load',\n default,\n \"config files (*.ini)\")\n if type(file) is tuple:\n file, _ = file\n\n if not file:\n # User cancelled\n return\n # Convert to standard platform specific path, otherwise Qt likes\n # forward slashes:\n file = os.path.abspath(file)\n self.load_configuration(file)\n\n def load_configuration(self, filename):\n self.last_save_config_file = filename\n self.ui.actionSave_configuration.setText('Save configuration %s'%filename)\n # Close all files:\n save_data = self.get_save_data()\n for globals_file in save_data['h5_files_open']:\n self.close_globals_file(globals_file, confirm=False)\n # Ensure folder exists, if this was opened programmatically we are\n # creating the file, so the directory had better exist!\n runmanager_config = LabConfig(filename)\n\n has_been_a_warning = [False]\n def warning(message):\n if not has_been_a_warning[0]:\n has_been_a_warning[0] = True\n self.output_box.output('\\n')\n self.output_box.output('Warning: %s\\n' % message, red=True)\n\n try:\n h5_files_open = ast.literal_eval(runmanager_config.get('runmanager_state', 'h5_files_open'))\n except Exception:\n pass\n else:\n for globals_file in h5_files_open:\n if os.path.exists(globals_file):\n try:\n self.open_globals_file(globals_file)\n self.last_opened_globals_folder = os.path.dirname(globals_file)\n except Exception:\n raise_exception_in_thread(sys.exc_info())\n continue\n else:\n self.output_box.output('\\nWarning: globals file %s no longer exists\\n' % globals_file, red=True)\n try:\n active_groups = ast.literal_eval(runmanager_config.get('runmanager_state', 'active_groups'))\n except Exception:\n pass\n else:\n for globals_file, group_name in active_groups:\n try:\n group_active_item = self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_ACTIVE)\n group_active_item.setCheckState(QtCore.Qt.Checked)\n except LookupError:\n warning(\"previously active group '%s' in %s no longer exists\" % (group_name, globals_file))\n try:\n groups_open = ast.literal_eval(runmanager_config.get('runmanager_state', 'groups_open'))\n except Exception:\n pass\n else:\n for globals_file, group_name in groups_open:\n # First check if it exists:\n try:\n self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_NAME)\n except LookupError:\n warning(\"previously open group '%s' in %s no longer exists\" % (group_name, globals_file))\n else:\n self.open_group(globals_file, group_name)\n\n try:\n current_labscript_file = ast.literal_eval(\n runmanager_config.get('runmanager_state', 'current_labscript_file'))\n except Exception:\n pass\n else:\n if os.path.exists(current_labscript_file):\n self.ui.lineEdit_labscript_file.setText(current_labscript_file)\n self.last_opened_labscript_folder = os.path.dirname(current_labscript_file)\n elif current_labscript_file:\n warning('previously selected labscript file %s no longer exists' % current_labscript_file)\n try:\n shot_output_folder = ast.literal_eval(runmanager_config.get('runmanager_state', 'shot_output_folder'))\n except Exception:\n pass\n else:\n self.ui.lineEdit_shot_output_folder.setText(shot_output_folder)\n self.last_selected_shot_output_folder = os.path.dirname(shot_output_folder)\n try:\n is_using_default_shot_output_folder = ast.literal_eval(\n runmanager_config.get('runmanager_state', 'is_using_default_shot_output_folder'))\n except Exception:\n pass\n else:\n if is_using_default_shot_output_folder:\n default_output_folder = self.get_default_output_folder()\n self.ui.lineEdit_shot_output_folder.setText(default_output_folder)\n self.last_selected_shot_output_folder = os.path.dirname(default_output_folder)\n try:\n send_to_runviewer = ast.literal_eval(runmanager_config.get('runmanager_state', 'send_to_runviewer'))\n except Exception:\n pass\n else:\n self.ui.checkBox_view_shots.setChecked(send_to_runviewer)\n try:\n send_to_BLACS = ast.literal_eval(runmanager_config.get('runmanager_state', 'send_to_BLACS'))\n except Exception:\n pass\n else:\n self.ui.checkBox_run_shots.setChecked(send_to_BLACS)\n \n # clear the axes model first\n if self.axes_model.rowCount():\n self.axes_model.removeRows(0, self.axes_model.rowCount())\n # set the state of the global shuffle button. This ensure that if no axes items get loaded afterwards\n # (e.g. because the globals in the .ini file are no longer expansion globals), then we still have \n # an approximate state for the shuffle button that will apply to whatever globals are to be expanded.\n try:\n shuffle = ast.literal_eval(runmanager_config.get('runmanager_state', 'shuffle'))\n except Exception:\n pass\n else:\n if shuffle:\n self.ui.pushButton_shuffle.setChecked(True)\n # Now load the axes states (order and shuffle). This will also ensure the shuffle button matches the \n # state of these items (since we don't save/restore the tri-state nature of the global shuffle button\n try:\n axes = ast.literal_eval(runmanager_config.get('runmanager_state', 'axes'))\n except Exception:\n pass\n else:\n if isinstance(axes, list):\n # clear model\n for name, shuffle in axes:\n self.add_item_to_axes_model(name, shuffle)\n self.update_axes_indentation() \n try:\n BLACS_host = ast.literal_eval(runmanager_config.get('runmanager_state', 'BLACS_host'))\n except Exception:\n pass\n else:\n self.ui.lineEdit_BLACS_hostname.setText(BLACS_host)\n # Set as self.last_save_data:\n save_data = self.get_save_data()\n self.last_save_data = save_data\n self.ui.actionSave_configuration_as.setEnabled(True)\n self.ui.actionRevert_configuration.setEnabled(True)\n\n def compile_loop(self):\n while True:\n try:\n labscript_file, run_files, send_to_BLACS, BLACS_host, send_to_runviewer = self.compile_queue.get()\n run_files = iter(run_files) # Should already be in iterator but just in case\n while True:\n if self.compilation_aborted.is_set():\n self.output_box.output('Compilation aborted.\\n\\n', red=True)\n break\n try:\n try:\n # We do next() instead of looping over run_files\n # so that if compilation is aborted we won't\n # create an extra file unnecessarily.\n run_file = next(run_files)\n except StopIteration:\n self.output_box.output('Ready.\\n\\n')\n break\n else:\n self.to_child.put(['compile', [labscript_file, run_file]])\n signal, success = self.from_child.get()\n assert signal == 'done'\n if not success:\n self.compilation_aborted.set()\n continue\n if send_to_BLACS:\n self.send_to_BLACS(run_file, BLACS_host)\n if send_to_runviewer:\n self.send_to_runviewer(run_file)\n except Exception as e:\n self.output_box.output(str(e) + '\\n', red=True)\n self.compilation_aborted.set()\n inmain(self.ui.pushButton_abort.setEnabled, False)\n self.compilation_aborted.clear()\n except Exception:\n # Raise it so whatever bug it is gets seen, but keep going so\n # the thread keeps functioning:\n exc_info = sys.exc_info()\n raise_exception_in_thread(exc_info)\n continue\n\n def parse_globals(self, active_groups, raise_exceptions=True, expand_globals=True, expansion_order = None, return_dimensions = False):\n sequence_globals = runmanager.get_globals(active_groups)\n #logger.info('got sequence globals')\n evaled_globals, global_hierarchy, expansions = runmanager.evaluate_globals(sequence_globals, raise_exceptions)\n #logger.info('evaluated sequence globals')\n if expand_globals:\n if return_dimensions:\n shots, dimensions = runmanager.expand_globals(sequence_globals, evaled_globals, expansion_order, return_dimensions=return_dimensions)\n else:\n shots = runmanager.expand_globals(sequence_globals, evaled_globals, expansion_order)\n else:\n shots = []\n dimensions = {}\n #logger.info('expanded sequence globals')\n if return_dimensions:\n return sequence_globals, shots, evaled_globals, global_hierarchy, expansions, dimensions\n else:\n return sequence_globals, shots, evaled_globals, global_hierarchy, expansions\n\n def guess_expansion_modes(self, active_groups, evaled_globals, global_hierarchy, expansions):\n \"\"\"This function is designed to be called iteratively. It changes the\n expansion type of globals that reference other globals - such that\n globals referencing an iterable global will be zipped with it, rather\n than outer producted. Each time this method is called,\n self.parse_globals should also be called, so that the globals are\n evaluated with their new expansion modes, if they changed. This should\n be performed repeatedly until there are no more changes. Note that\n this method does not return what expansion types it thinks globals\n should have - it *actually writes them to the globals HDF5 file*. So\n it is up to later code to ensure it re-reads the expansion mode from\n the HDF5 file before proceeding. At present this method is only called\n from self.preparse_globals(), so see there to see how it fits in with\n everything else. This method uses four instance attributes to store\n state: self.previous_evaled_globals, self.previous_global_hierarchy,\n self.previous_expansion_types and self.previous_expansions. This is\n neccesary so that it can detect changes.\"\"\"\n\n # Do nothing if there were exceptions:\n for group_name in evaled_globals:\n for global_name in evaled_globals[group_name]:\n value = evaled_globals[group_name][global_name]\n if isinstance(value, Exception):\n # Let ExpansionErrors through through, as they occur\n # when the user has changed the value without changing\n # the expansion type:\n if isinstance(value, runmanager.ExpansionError):\n continue\n return False\n # Did the guessed expansion type for any of the globals change?\n expansion_types_changed = False\n expansion_types = {}\n for group_name in evaled_globals:\n for global_name in evaled_globals[group_name]:\n new_value = evaled_globals[group_name][global_name]\n try:\n previous_value = self.previous_evaled_globals[group_name][global_name]\n except KeyError:\n # This variable is used to guess the expansion type\n # \n # If we already have an expansion specified for this, but\n # don't have a previous value, then we should use the \n # new_value for the guess as we are likely loading from HDF5\n # file for the first time (and either way, don't want to \n # overwrite what the user has put in the expansion type)\n #\n # If we don't have an expansion...\n # then we set it to '0' which will result in an\n # expansion type guess of '' (emptys string) This will\n # either result in nothing being done to the expansion\n # type or the expansion type being found to be 'outer',\n # which will then make it go through the machinery below\n if global_name in expansions and expansions[global_name]:\n previous_value = new_value\n else:\n previous_value = 0\n\n new_guess = runmanager.guess_expansion_type(new_value)\n previous_guess = runmanager.guess_expansion_type(previous_value)\n\n if new_guess == 'outer':\n expansion_types[global_name] = {'previous_guess': previous_guess,\n 'new_guess': new_guess,\n 'group_name': group_name,\n 'value': new_value\n }\n elif new_guess != previous_guess:\n filename = active_groups[group_name]\n runmanager.set_expansion(filename, group_name, global_name, new_guess)\n expansions[global_name] = new_guess\n expansion_types_changed = True\n\n # recursively find dependencies and add them to a zip group!\n def find_dependencies(global_name, global_hierarchy, expansion_types):\n results = set()\n for name, dependencies in global_hierarchy.items():\n if name in expansion_types and global_name in dependencies:\n results.add(name)\n results = results.union(find_dependencies(name, global_hierarchy, expansion_types))\n return results\n\n def global_depends_on_global_with_outer_product(global_name, global_hierarchy, expansions):\n if global_name not in global_hierarchy:\n return False\n else:\n for dependency in global_hierarchy[global_name]:\n if expansions[dependency]:\n return True\n\n def set_expansion_type_guess(expansion_types, expansions, global_name, expansion_to_set, new=True):\n if new:\n key = 'new_guess'\n else:\n key = 'previous_guess'\n \n # debug logging\n log_if_global(global_name, [], 'setting expansion type for new dependency' if new else 'setting expansion type for old dependencies')\n \n \n # only do this if the expansion is *not* already set to a specific zip group\n if global_name in expansions and expansions[global_name] != '' and expansions[global_name] != 'outer':\n expansion_types[global_name][key] = expansions[global_name]\n \n # debug logging\n log_if_global(global_name, [], 'Using existing expansion %s for %s'%(expansions[global_name], global_name))\n else:\n expansion_types[global_name][key] = expansion_to_set\n expansions[global_name] = expansion_to_set\n \n # debug logging\n log_if_global(global_name, [], 'Using existing expansion %s for %s'%(expansion_to_set, global_name))\n \n \n for global_name in sorted(expansion_types):\n # we have a global that does not depend on anything that has an\n # expansion type of 'outer'\n if (not global_depends_on_global_with_outer_product(global_name, global_hierarchy, expansions)\n and not isinstance(expansion_types[global_name]['value'], runmanager.ExpansionError)):\n current_dependencies = find_dependencies(global_name, global_hierarchy, expansion_types)\n\n # if this global has other globals that use it, then add them\n # all to a zip group with the name of this global\n if current_dependencies:\n for dependency in current_dependencies: \n set_expansion_type_guess(expansion_types, expansions, dependency, str(global_name))\n \n set_expansion_type_guess(expansion_types, expansions, global_name, str(global_name))\n\n for global_name in sorted(self.previous_expansion_types):\n if (not global_depends_on_global_with_outer_product(\n global_name, self.previous_global_hierarchy, self.previous_expansions)\n and not isinstance(self.previous_expansion_types[global_name]['value'], runmanager.ExpansionError)):\n old_dependencies = find_dependencies(global_name, self.previous_global_hierarchy, self.previous_expansion_types)\n # if this global has other globals that use it, then add them\n # all to a zip group with the name of this global\n if old_dependencies:\n for dependency in old_dependencies:\n if dependency in expansion_types:\n set_expansion_type_guess(expansion_types, self.previous_expansions, dependency, str(global_name), new=False)\n if global_name in expansion_types:\n set_expansion_type_guess(expansion_types, self.previous_expansions, global_name, str(global_name), new=False)\n\n for global_name, guesses in expansion_types.items():\n if guesses['new_guess'] != guesses['previous_guess']:\n filename = active_groups[guesses['group_name']]\n runmanager.set_expansion(\n filename, str(guesses['group_name']), str(global_name), str(guesses['new_guess']))\n expansions[global_name] = guesses['new_guess']\n expansion_types_changed = True\n\n # Now check everything that has an expansion type not equal to outer.\n # If it has one, but is not iteratble, remove it from teh zip group\n for group_name in evaled_globals:\n for global_name in evaled_globals[group_name]:\n if expansions[global_name] and expansions[global_name] != 'outer':\n try:\n iter(evaled_globals[group_name][global_name])\n except Exception:\n filename = active_groups[group_name]\n runmanager.set_expansion(filename, group_name, global_name, '')\n expansion_types_changed = True\n\n self.previous_evaled_globals = evaled_globals\n self.previous_global_hierarchy = global_hierarchy\n self.previous_expansion_types = expansion_types\n self.previous_expansions = expansions\n\n return expansion_types_changed\n\n def make_h5_files(self, labscript_file, output_folder, sequence_globals, shots, shuffle):\n sequence_attrs, default_output_dir, filename_prefix = runmanager.new_sequence_details(\n labscript_file, config=self.exp_config, increment_sequence_index=True\n )\n if output_folder == self.previous_default_output_folder:\n # The user is using dthe efault output folder. Just in case the sequence\n # index has been updated or the date has changed, use the default_output dir\n # obtained from new_sequence_details, as it is race-free, whereas the one\n # from the UI may be out of date since we only update it once a second.\n output_folder = default_output_dir\n self.check_output_folder_update()\n run_files = runmanager.make_run_files(\n output_folder,\n sequence_globals,\n shots,\n sequence_attrs,\n filename_prefix,\n shuffle,\n )\n logger.debug(run_files)\n return labscript_file, run_files\n\n def send_to_BLACS(self, run_file, BLACS_hostname):\n port = int(self.exp_config.get('ports', 'BLACS'))\n agnostic_path = shared_drive.path_to_agnostic(run_file)\n self.output_box.output('Submitting run file %s.\\n' % os.path.basename(run_file))\n try:\n response = zmq_get(port, BLACS_hostname, data=agnostic_path)\n if 'added successfully' in response:\n self.output_box.output(response)\n else:\n raise Exception(response)\n except Exception as e:\n self.output_box.output('Couldn\\'t submit job to control server: %s\\n' % str(e), red=True)\n self.compilation_aborted.set()\n\n def send_to_runviewer(self, run_file):\n runviewer_port = int(self.exp_config.get('ports', 'runviewer'))\n agnostic_path = shared_drive.path_to_agnostic(run_file)\n try:\n response = zmq_get(runviewer_port, 'localhost', data='hello', timeout=1)\n if 'hello' not in response:\n raise Exception(response)\n except Exception as e:\n logger.info('runviewer not running, attempting to start...')\n # Runviewer not running, start it:\n if os.name == 'nt':\n creationflags = 0x00000008 # DETACHED_PROCESS from the win32 API\n subprocess.Popen([sys.executable, '-m', 'runviewer'],\n creationflags=creationflags, stdout=None, stderr=None,\n close_fds=True)\n else:\n devnull = open(os.devnull, 'w')\n if not os.fork():\n os.setsid()\n subprocess.Popen([sys.executable, '-m', 'runviewer'],\n stdin=devnull, stdout=devnull, stderr=devnull, close_fds=True)\n os._exit(0)\n try:\n zmq_get(runviewer_port, 'localhost', data='hello', timeout=15)\n except Exception as e:\n self.output_box.output('Couldn\\'t submit shot to runviewer: %s\\n\\n' % str(e), red=True)\n\n try:\n response = zmq_get(runviewer_port, 'localhost', data=agnostic_path, timeout=0.5)\n if 'ok' not in response:\n raise Exception(response)\n else:\n self.output_box.output('Shot %s sent to runviewer.\\n' % os.path.basename(run_file))\n except Exception as e:\n self.output_box.output('Couldn\\'t submit shot to runviewer: %s\\n\\n' % str(e), red=True)\n\n\nclass RemoteServer(ZMQServer):\n def __init__(self):\n port = app.exp_config.getint(\n 'ports', 'runmanager', fallback=runmanager.remote.DEFAULT_PORT\n )\n ZMQServer.__init__(self, port=port)\n\n def handle_get_globals(self, raw=False):\n active_groups = inmain(app.get_active_groups, interactive=False)\n sequence_globals = runmanager.get_globals(active_groups)\n all_globals = {}\n if raw:\n for group_globals in sequence_globals.values():\n values_only = {name: val for name, (val, _, _) in group_globals.items()}\n all_globals.update(values_only)\n else:\n evaled_globals, global_hierarchy, expansions = runmanager.evaluate_globals(\n sequence_globals, raise_exceptions=False\n )\n for group_globals in evaled_globals.values():\n all_globals.update(group_globals)\n return all_globals\n\n @inmain_decorator()\n def handle_set_globals(self, globals, raw=False):\n active_groups = app.get_active_groups(interactive=False)\n sequence_globals = runmanager.get_globals(active_groups)\n try:\n for global_name, new_value in globals.items():\n # Unless raw=True, convert to str representation for saving to the GUI\n # or file. If this does not result in an object the user can actually\n # use, evaluation will error and the caller will find out about it later\n if not raw:\n new_value = repr(new_value)\n elif not isinstance(new_value, (str, bytes)):\n msg = \"global %s must be a string if raw=True, not %s\"\n raise TypeError(msg % (global_name, new_value.__class__.__name__))\n\n # Find the group this global is in:\n for group_name, group_globals in sequence_globals.items():\n globals_file = active_groups[group_name]\n if global_name in group_globals:\n # Confirm it's not also in another group:\n for other_name, other_globals in sequence_globals.items():\n if other_globals is not group_globals:\n if global_name in other_globals:\n msg = \"\"\"Cannot set global %s, it is defined in\n multiple active groups: %s and %s\"\"\"\n msg = msg % (global_name, group_name, other_name)\n raise RuntimeError(dedent(msg))\n previous_value, _, _ = sequence_globals[group_name][global_name]\n\n # Append expression-final comments in the previous expression to\n # the new one:\n comments = runmanager.find_comments(previous_value)\n if comments:\n # Only the final comment\n comment_start, comment_end = comments[-1]\n # Only if the comment is the last thing in the expression:\n if comment_end == len(previous_value):\n new_value += previous_value[comment_start:comment_end]\n try:\n # Is the group open?\n group_tab = app.currently_open_groups[\n globals_file, group_name\n ]\n except KeyError:\n # Group is not open. Change the global value on disk:\n runmanager.set_value(\n globals_file, group_name, global_name, new_value\n )\n else:\n # Group is open. Change the global value via the GUI:\n group_tab.change_global_value(\n global_name,\n previous_value,\n new_value,\n interactive=False,\n )\n break\n else:\n # Global was not found.\n msg = \"Global %s not found in any active group\" % global_name\n raise ValueError(msg)\n finally:\n # Trigger preparsing of globals to occur so that changes in globals not in\n # open tabs are reflected in the GUI, such as n_shots, errors on other\n # globals that depend on them, etc.\n app.globals_changed()\n\n def handle_engage(self):\n app.wait_until_preparse_complete()\n inmain(app.on_engage_clicked)\n\n @inmain_decorator()\n def handle_abort(self):\n app.on_abort_clicked()\n\n @inmain_decorator()\n def handle_get_run_shots(self):\n return app.ui.checkBox_run_shots.isChecked()\n\n @inmain_decorator()\n def handle_set_run_shots(self, value):\n app.ui.checkBox_run_shots.setChecked(value)\n\n @inmain_decorator()\n def handle_get_view_shots(self):\n return app.ui.checkBox_view_shots.isChecked()\n\n @inmain_decorator()\n def handle_set_view_shots(self, value):\n app.ui.checkBox_view_shots.setChecked(value)\n\n @inmain_decorator()\n def handle_get_shuffle(self):\n return app.ui.pushButton_shuffle.isChecked()\n\n @inmain_decorator()\n def handle_set_shuffle(self, value):\n app.ui.pushButton_shuffle.setChecked(value)\n\n def handle_n_shots(self):\n # Wait until any current preparsing is done, to ensure this is not racy w.r.t\n # previous remote calls:\n app.wait_until_preparse_complete()\n return app.n_shots\n\n @inmain_decorator()\n def handle_get_labscript_file(self):\n labscript_file = app.ui.lineEdit_labscript_file.text()\n return os.path.abspath(labscript_file)\n\n @inmain_decorator()\n def handle_set_labscript_file(self, value):\n labscript_file = os.path.abspath(value)\n app.ui.lineEdit_labscript_file.setText(labscript_file)\n\n @inmain_decorator()\n def handle_get_shot_output_folder(self):\n shot_output_folder = app.ui.lineEdit_shot_output_folder.text()\n return os.path.abspath(shot_output_folder)\n\n @inmain_decorator()\n def handle_set_shot_output_folder(self, value):\n shot_output_folder = os.path.abspath(value)\n app.ui.lineEdit_shot_output_folder.setText(shot_output_folder)\n\n def handle_error_in_globals(self):\n try:\n # This will raise an exception if there are multiple active groups of the\n # same name:\n active_groups = inmain(app.get_active_groups, interactive=False)\n sequence_globals = runmanager.get_globals(active_groups)\n # This will raise an exception if any of the globals can't be evaluated:\n runmanager.evaluate_globals(sequence_globals, raise_exceptions=True)\n except Exception:\n return True\n return False\n\n def handle_is_output_folder_default(self):\n return not app.non_default_folder\n\n @inmain_decorator()\n def handle_reset_shot_output_folder(self):\n app.on_reset_shot_output_folder_clicked(None)\n\n def handler(self, request_data):\n cmd, args, kwargs = request_data\n if cmd == 'hello':\n return 'hello'\n elif cmd == '__version__':\n return runmanager.__version__\n try:\n return getattr(self, 'handle_' + cmd)(*args, **kwargs)\n except Exception as e:\n msg = traceback.format_exc()\n msg = \"Runmanager server returned an exception:\\n\" + msg\n return e.__class__(msg)\n\n\nif __name__ == \"__main__\":\n logger = setup_logging('runmanager')\n labscript_utils.excepthook.set_logger(logger)\n logger.info('\\n\\n===============starting===============\\n')\n qapplication = QtWidgets.QApplication(sys.argv)\n qapplication.setAttribute(QtCore.Qt.AA_DontShowIconsInMenus, False)\n app = RunManager()\n splash.update_text('Starting remote server')\n remote_server = RemoteServer()\n splash.hide()\n qapplication.exec_()\n remote_server.shutdown()\n"
] | [
[
"matplotlib.use"
]
] |
zzheng93/code_ms_ml_mam4 | [
"5ff8734142884f67c9e25f96892296454602928f"
] | [
"3_analysis/util.py"
] | [
"import xarray as xr\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport cartopy.crs as ccrs\nimport matplotlib.patches as mpatches\n\ndef open_nc(path,scale=1.0): \n ds=(xr.open_dataset(path)*scale)\n ds=ds.assign_coords(lon=(((ds.lon + 180) % 360) - 180))\n ds=ds.reindex(lon=sorted(ds.lon)) \n return ds\n\ndef select_data(ds, lat_min, lat_max, lon_min, lon_max):\n \"\"\"\n select the dataset given the box information\n \"\"\"\n ds_select = ds.where((ds.lat>=lat_min)&(ds.lat<=lat_max)\n & (ds.lon>=lon_min) & (ds.lon<=lon_max))\n return ds_select\n\ndef year_comp(chi, lat_min, lat_max, lon_min, lon_max):\n file_path = {}\n parent_path = \"/data/keeling/a/zzheng25/d/mam4_paper_data/chi_only/comp_analysis/\"\n SRF_ls = [\n \"bc_a1_SRF\",\"bc_a4_SRF\",\n \"dst_a1_SRF\",\"dst_a2_SRF\",\n \"ncl_a1_SRF\",\"ncl_a2_SRF\",\n \"pom_a1_SRF\",\"pom_a4_SRF\",\n \"so4_a1_SRF\",\"so4_a2_SRF\",\n \"soa_a1_SRF\",\"soa_a2_SRF\"\n ]\n\n per_ls = [vari[:-4]+\"_per\" for vari in SRF_ls]\n\n# MAM4 = open_nc(file_path[\"MAM4\"]+chi+\"_mean.nc\",scale=100.0)[chi]\n# ML = open_nc(file_path[\"MAM4\"]+chi+\"_mean.nc\",scale=100.0)[chi]\n# diff = open_nc(file_path[\"diff\"]+chi+\"_mean.nc\",scale=100.0)[chi]\n comp = open_nc(parent_path + \"2011_year_comp.nc\")\n\n # select the region based on lat and lon\n# MAM4_s = select_data(MAM4,lat_min, lat_max, lon_min, lon_max)\n# ML_s = select_data(ML,lat_min, lat_max, lon_min, lon_max)\n# diff_s = select_data(diff,lat_min, lat_max, lon_min, lon_max)\n comp_s = select_data(comp,lat_min, lat_max, lon_min, lon_max)\n\n df_mean=comp_s.to_dataframe()[per_ls].mean()\n\n d = {\n \"bc\":[0,df_mean[\"bc_a1_per\"],df_mean[\"bc_a4_per\"]],\n \"dst\":[df_mean[\"dst_a2_per\"],df_mean[\"dst_a1_per\"],0],\n \"ncl\":[df_mean[\"ncl_a2_per\"],df_mean[\"ncl_a1_per\"],0],\n \"pom\":[0,df_mean[\"pom_a1_per\"],df_mean[\"pom_a4_per\"]],\n \"soa\":[df_mean[\"soa_a2_per\"],df_mean[\"soa_a1_per\"],0],\n \"so4\":[df_mean[\"so4_a2_per\"],df_mean[\"so4_a1_per\"],0]\n }\n\n df = pd.DataFrame(data=d)\n df = df.rename(index={0: \"Aitken\", 1: \"Accumulation\", 2: \"Primary carbon\"})\n \n return df\n\ndef plot_difference_with_anchor(da_mam4,da_ml,da_diff,lat_min, lat_max, lon_min, lon_max):\n \"\"\"\n Plot the 1 x 3 figures using anchor\n \"\"\"\n fig = plt.figure(figsize=(16,4))\n ax1 = plt.subplot(131,projection=ccrs.PlateCarree());\n im1=da_mam4.plot(ax=ax1,\n vmax=100,vmin=0,add_colorbar=False,cmap='RdYlBu_r')\n ax1.add_patch(mpatches.Rectangle(xy=[lon_min, lat_min], \n width=(lon_max-lon_min), \n height=(lat_max-lat_min),\n edgecolor=\"black\",\n facecolor='None',\n lw=1.5))\n ax1.coastlines(alpha=0.66)\n ax1.set_title(\"mam4\")\n plt.colorbar(im1, orientation=\"horizontal\", pad=0.15)\n #plt.show()\n\n ax2 = plt.subplot(132,projection=ccrs.PlateCarree());\n im2=da_ml.plot(ax=ax2,\n vmax=100,vmin=0,add_colorbar=False,cmap='RdYlBu_r')\n ax2.add_patch(mpatches.Rectangle(xy=[lon_min, lat_min], \n width=(lon_max-lon_min), \n height=(lat_max-lat_min),\n edgecolor=\"black\",\n facecolor='None',\n lw=1.5))\n ax2.coastlines(alpha=0.66)\n ax2.set_title(\"ml\")\n plt.colorbar(im2, orientation=\"horizontal\", pad=0.15)\n #plt.show()\n\n \n ax3 = plt.subplot(133,projection=ccrs.PlateCarree());\n im3=da_diff.plot(ax=ax3,\n vmin=-100,vmax=100,add_colorbar=False,cmap=\"bwr\")\n ax3.add_patch(mpatches.Rectangle(xy=[lon_min, lat_min], \n width=(lon_max-lon_min), \n height=(lat_max-lat_min),\n edgecolor=\"black\",\n facecolor='None',\n lw=2.5))\n ax3.coastlines(alpha=0.66)\n ax3.set_title(\"diff\")\n plt.colorbar(im3, orientation=\"horizontal\", pad=0.15)\n plt.show()\n"
] | [
[
"matplotlib.pyplot.colorbar",
"pandas.DataFrame",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.patches.Rectangle"
]
] |
Littlehong-1997/echo-1 | [
"d77887fc9a3122508c8eba487e6034494ccc0a83"
] | [
"train/method/model/load_data.py"
] | [
"import numpy as np\nfrom tensorflow.keras import preprocessing\nimport cv2\nimport pandas as pd\nfrom postprocessing import *\nimport os\nimport Augmentor\n\n\ndef load_datasets(filepath, sample_list, label_list, mark, a4c_or_a2c, m):\n \"\"\"\n we have illustrated the file structure of datasets and label in readme.\n filepath : File storage directory.\n sample_list : the ones separate for train val and test.\n label_list : the list of golden_label correspond to sample_list.\n label[-1] : start_mark which indicates the first frame number of .csv is ED or ES.\n mark is 1 -->train 0--->test 2--->val.\n a2c_or_a4c is num 2 or 4\n \"\"\"\n # here can adjust n to apply your datasets\n if mark:\n n = 4000\n else:\n n = 4000\n dst_pair1 = np.zeros(shape=(n, m, m, 1), dtype=np.float32)\n dst_pair2 = np.zeros(shape=(n, m, m, 1), dtype=np.float32)\n dst_label = np.zeros(shape=(n,), dtype=np.int32)\n k = 0\n label_list_copy = copy.deepcopy(label_list)\n for number in range(len(sample_list)):\n label = label_list_copy[sample_list[number]-1] # o--->up 1--->down\n start_mark = label.pop()\n for i in (label):\n position = label.index(i)\n if position == len(label)-1:\n break\n j = label[position+1]\n for t in range(i,j):\n # load imgs: from number i to number j-1-->pair1\n # i+1 j-->pair2\n img_p1 = cv2.imread(filepath+\"Patient\"+(\"000\"+str(sample_list[number]))[-4:] +\n \"\\\\a\"+str(a4c_or_a2c)+\"c\\\\\"+str(t)+'.png', 0)\n img_p2 = cv2.imread(filepath+\"Patient\"+(\"000\"+str(sample_list[number]))[-4:] +\n \"\\\\a\"+str(a4c_or_a2c)+\"c\\\\\"+str(t+1)+'.png', 0)\n # cut and unsamping use cv2.resize\n # original 600*800--cut-->512*512--->resize by cv2 ---> m*m\n dst_pair1[k, :, :, 0] = cv2.resize(img_p1[80:592, 176:688].reshape(512, -1, 1), (m, m))/255.0\n dst_pair2[k, :, :, 0] = cv2.resize(img_p2[80:592, 176:688].reshape(512, -1, 1), (m, m))/255.0\n if start_mark == 0: # up\n dst_label[k] = 0 \n else:\n dst_label[k] = 1 \n k += 1\n if start_mark == 0:\n start_mark = 1\n else:\n start_mark = 0\n if mark == 1:\n pathname = 'train'\n elif mark == 0:\n pathname = 'test'\n else:\n pathname = \"val\"\n # save the imgs for augmentation before training.\n os.mkdir('../'+pathname+'p1/') \n os.mkdir('../'+pathname+'p2/')\n K = 0\n for i in (dst_pair1[:k]):\n preprocessing.image.save_img('../'+pathname+'p1/'+str(K)+'.png', i)\n K += 1\n K = 0\n for i in (dst_pair2[:k]):\n preprocessing.image.save_img('../'+pathname+'p2/'+str(K)+'.png', i)\n K += 1\n return dst_pair1[:k], dst_pair2[:k], dst_label[:k]\n \n \ndef augment():\n \"\"\"\n we use Augmentor lib\n a pipeline of augment\n no params input\n \"\"\"\n print(\"augmenting......\")\n path1 = '../trainp1/'\n path2 = '../trainp2/'\n # path of pair1 and pair2 similar to img & mask task for segmentation\n p = Augmentor.Pipeline(path1) # pair1\n p.ground_truth(path2) # pair2\n p.rotate(probability=0.3, max_left_rotation=3, max_right_rotation=3) \n p.flip_left_right(probability=0.2) \n p.random_distortion(0.5, 2, 2, 2)\n p.zoom(probability=0.5, min_factor=0.95, max_factor=1.05)\n p.process()\n\n\ndef load_aug_data(path, m):\n \"\"\"\n m: img_shape,e.g.64 128 256\n return matrix of shape (n,m,m,1)\n \"\"\"\n aug_path = path+'output/'\n p1 = np.zeros(shape=(int(len(os.listdir(aug_path))/2), m, m, 1), dtype=np.float32)\n p2 = np.zeros(shape=(int(len(os.listdir(aug_path))/2), m, m, 1), dtype=np.float32)\n for filename in (os.listdir(aug_path)):\n img = preprocessing.image.load_img(aug_path+filename, color_mode=\"grayscale\")\n if filename[:2] == 'tr': # pair1\n index = filename.index('.')\n i = int(filename[17:index])\n p1[i] = preprocessing.image.img_to_array(img)/255.0\n else:\n index = filename.index('.')\n i = int(filename[25:index])\n p2[i] = preprocessing.image.img_to_array(img)/255.0\n print('aug_data is loaded!')\n return p1, p2\n\n\ndef get_label(path): # get ED ES label\n \"\"\"\n input a .csv file as describing on readme.md.\n return a list of label\n \"\"\"\n label_csv = pd.read_csv(path)\n label_list = []\n trans_list = list(np.array(label_csv).astype(np.int32))\n for i in trans_list:\n temp = []\n for j in i:\n if j >= 0:\n temp.append(j)\n label_list.append(temp)\n return label_list \n \n\n"
] | [
[
"numpy.array",
"tensorflow.keras.preprocessing.image.load_img",
"numpy.zeros",
"tensorflow.keras.preprocessing.image.img_to_array",
"pandas.read_csv"
]
] |
ejmejm/GoHeuristics | [
"9336d661abd48aa31ff5c9ed50cc2fbbd4472ebe"
] | [
"board.py"
] | [
"import numpy as np\nimport queue\n\nboard_size = 19\n\ndef make_move(board, move, player, enemy, debug=False):\n board = board.reshape(board_size, board_size)\n board[move[0]][move[1]] = player\n empty = 0\n\n group_captures = 0\n if move[0] + 1 <= 18 and board[move[0]+1][move[1]] == enemy and check_liberties(board, np.array([move[0]+1, move[1]])) == 0:\n remove_stones(board, np.array([move[0]+1, move[1]]))\n group_captures += 1\n if move[0] - 1 >= 0 and board[move[0]-1][move[1]] == enemy and check_liberties(board, np.array([move[0]-1, move[1]])) == 0:\n remove_stones(board, np.array([move[0]-1, move[1]]))\n group_captures += 1\n if move[1] + 1 <= 18 and board[move[0]][move[1]+1] == enemy and check_liberties(board, np.array([move[0], move[1]+1])) == 0:\n remove_stones(board, np.array([move[0], move[1]+1]))\n group_captures += 1\n if move[1] - 1 >= 0 and board[move[0]][move[1]-1] == enemy and check_liberties(board, np.array([move[0], move[1]-1])) == 0:\n remove_stones(board, np.array([move[0], move[1]-1]))\n group_captures += 1\n if group_captures == 0 and check_liberties(board, move) == 0:\n board[move[0]][move[1]] = empty\n if debug == True:\n print(\"ERROR! Illegal suicide move\")\n return None\n\n return board\n\ndef check_liberties(board, position):\n player = board[position[0]][position[1]]\n if player == 0:\n print(\"ERROR! Cannot check the liberties of an empty space\")\n return;\n empty = 0\n board_check = np.empty_like(board)\n board_check.fill(False)\n positions = queue.Queue()\n positions.put(position)\n board_check[position[0]][position[1]] = True\n\n liberties = 0\n while positions.empty() == False:\n c_move = positions.get()\n if c_move[0] + 1 <= 18 and board_check[c_move[0]+1][c_move[1]] == False:\n if board[c_move[0]+1][c_move[1]] == player:\n positions.put(np.array([c_move[0]+1, c_move[1]]))\n elif board[c_move[0]+1][c_move[1]] == empty:\n liberties += 1\n board_check[c_move[0]+1][c_move[1]] = True\n if c_move[0] - 1 >= 0 and board_check[c_move[0]-1][c_move[1]] == False:\n if board[c_move[0]-1][c_move[1]] == player:\n positions.put(np.array([c_move[0]-1, c_move[1]]))\n elif board[c_move[0]-1][c_move[1]] == empty:\n liberties += 1\n board_check[c_move[0]-1][c_move[1]] = True\n if c_move[1] + 1 <= 18 and board_check[c_move[0]][c_move[1]+1] == False:\n if board[c_move[0]][c_move[1]+1] == player:\n positions.put(np.array([c_move[0], c_move[1]+1]))\n elif board[c_move[0]][c_move[1]+1] == empty:\n liberties += 1\n board_check[c_move[0]][c_move[1]+1] = True\n if c_move[1] - 1 >= 0 and board_check[c_move[0]][c_move[1]-1] == False:\n if board[c_move[0]][c_move[1]-1] == player:\n positions.put(np.array([c_move[0], c_move[1]-1]))\n elif board[c_move[0]][c_move[1]-1] == empty:\n liberties += 1\n board_check[c_move[0]][c_move[1]-1] = True\n return liberties\n\ndef remove_stones(board, position):\n player = board[position[0]][position[1]]\n if player == 0:\n print(\"ERROR! Cannot remove an empty space\")\n return;\n empty = 0\n board_check = np.empty_like(board)\n board_check.fill(False)\n positions = queue.Queue()\n positions.put(position)\n board_check[position[0]][position[1]] = True\n\n while positions.empty() == False:\n c_move = positions.get()\n if c_move[0] + 1 <= 18 and board_check[c_move[0]+1][c_move[1]] == False:\n if board[c_move[0]+1][c_move[1]] == player:\n positions.put(np.array([c_move[0]+1, c_move[1]]))\n board_check[c_move[0]+1][c_move[1]] = True\n if c_move[0] - 1 >= 0 and board_check[c_move[0]-1][c_move[1]] == False:\n if board[c_move[0]-1][c_move[1]] == player:\n positions.put(np.array([c_move[0]-1, c_move[1]]))\n board_check[c_move[0]-1][c_move[1]] = True\n if c_move[1] + 1 <= 18 and board_check[c_move[0]][c_move[1]+1] == False:\n if board[c_move[0]][c_move[1]+1] == player:\n positions.put(np.array([c_move[0], c_move[1]+1]))\n board_check[c_move[0]][c_move[1]+1] = True\n if c_move[1] - 1 >= 0 and board_check[c_move[0]][c_move[1]-1] == False:\n if board[c_move[0]][c_move[1]-1] == player:\n positions.put(np.array([c_move[0], c_move[1]-1]))\n board_check[c_move[0]][c_move[1]-1] = True\n board[c_move[0]][c_move[1]] = empty\n return board\n"
] | [
[
"numpy.array",
"numpy.empty_like"
]
] |
jgruber99/Sampford_Sampling | [
"413c207957e57ac4f44086c9a0c25bef9f60a936"
] | [
"fairseq/modules/transformer_sentence_encoder_layer.py"
] | [
"# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom fairseq.modules import MultiheadAttention, BertLayerNorm\n\n\ndef gelu(x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Implementation of the gelu activation function.\n \"\"\"\n return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))\n\n\nclass TransformerSentenceEncoderLayer(nn.Module):\n \"\"\"\n Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained\n models.\n\n If the flag use_bert_layer_norm is set then we use the custom\n BertLayerNorm module instead of nn.LayerNorm.\n \"\"\"\n\n def __init__(\n self,\n embedding_dim: float = 768,\n ffn_embedding_dim: float = 3072,\n num_attention_heads: float = 8,\n dropout: float = 0.1,\n attention_dropout: float = 0.1,\n activation_dropout: float = 0.1,\n encoder_normalize_before: bool = False,\n use_bert_layer_norm: bool = False,\n use_gelu: bool = True,\n ) -> None:\n\n super().__init__()\n # Initialize parameters\n self.embedding_dim = embedding_dim\n self.dropout = dropout\n self.activation_dropout = activation_dropout\n self.normalize_before = encoder_normalize_before\n\n # Initialize blocks\n self.activation_fn = gelu if use_gelu else F.relu\n self.self_attn = MultiheadAttention(\n self.embedding_dim, num_attention_heads, dropout=attention_dropout\n )\n\n # layer norm associated with the self attention layer\n self.self_attn_layer_norm = (\n BertLayerNorm(self.embedding_dim)\n if use_bert_layer_norm\n else nn.LayerNorm(self.embedding_dim, eps=1e-12)\n )\n self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)\n self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)\n\n # layer norm associated with the position wise feed-forward NN\n self.final_layer_norm = (\n BertLayerNorm(self.embedding_dim)\n if use_bert_layer_norm\n else nn.LayerNorm(self.embedding_dim, eps=1e-12)\n )\n\n def _maybe_layer_norm(\n self,\n layer_norm: nn.Module,\n x: torch.Tensor,\n before: bool = False,\n after: bool = False,\n ):\n assert before ^ after\n if after ^ self.normalize_before:\n return layer_norm(x)\n else:\n return x\n\n def forward(\n self,\n x: torch.Tensor,\n self_attn_mask: torch.Tensor = None,\n self_attn_padding_mask: torch.Tensor = None,\n ):\n \"\"\"\n LayerNorm is applied either before or after the self-attention/ffn\n modules similar to the original Transformer imlementation.\n \"\"\"\n\n residual = x\n x = self._maybe_layer_norm(self.self_attn_layer_norm, x, before=True)\n x, attn = self.self_attn(\n query=x,\n key=x,\n value=x,\n key_padding_mask=self_attn_padding_mask,\n need_weights=False,\n attn_mask=self_attn_mask,\n )\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self._maybe_layer_norm(self.self_attn_layer_norm, x, after=True)\n\n residual = x\n x = self._maybe_layer_norm(self.final_layer_norm, x, before=True)\n x = self.activation_fn(self.fc1(x))\n x = F.dropout(x, p=self.activation_dropout, training=self.training)\n x = self.fc2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self._maybe_layer_norm(self.final_layer_norm, x, after=True)\n return x, attn\n"
] | [
[
"torch.nn.Linear",
"torch.nn.functional.dropout",
"torch.nn.LayerNorm"
]
] |
AkashBhave/neureign | [
"1297ab261906c2a166b258a29bcd5dd556cefd65"
] | [
"neureign/models.py"
] | [
"import numpy as np\n\nfrom neureign import activations\n\n\nclass ANN:\n def __init__(self):\n self.input_size = 2\n self.output_size = 1\n self.hidden_size = 3\n\n self.w1 = np.random.randn(self.input_size, self.hidden_size)\n self.w2 = np.random.randn(self.hidden_size, self.output_size)\n\n def forward(self, x):\n self.z1 = np.dot(x, self.w1);\n self.z2 = activations.sigmoid(self.z1)\n self.z3 = np.dot(self.z2, self.w2)\n\n output = activations.sigmoid(self.z3)\n return output\n"
] | [
[
"numpy.dot",
"numpy.random.randn"
]
] |
uci-cbcl/scFAN | [
"b1955c88c6ec97c447c254dc0ebdd33778e09099"
] | [
"data_iter.py"
] | [
"import numpy as np\nimport pdb\ntry:\n import pyBigWig\nexcept:\n pdb.set_trace()\n import pyBigWig\nfrom keras.preprocessing.image import Iterator\n\n\n# Modified from keras\nclass DataIterator(Iterator):\n def __init__(self, data_list, genome, batch_size, seqlen, bigwig_rc_order=None, shuffle=False, seed=1337):\n self.data_list = data_list\n if data_list is None or len(data_list) == 0:\n self.num_bigwigs = 0\n else:\n self.num_bigwigs = len(data_list[0][4])\n self.num_meta = len(data_list[0][5])\n if bigwig_rc_order is None:\n self.bigwig_rc_order = np.arange(self.num_bigwigs)\n else:\n self.bigwig_rc_order = bigwig_rc_order\n self.genome = genome\n self.seqlen = seqlen\n self.nucleotides = np.array(['A', 'C', 'G', 'T'])\n if data_list is None or len(data_list) == 0:\n self.labeled = False\n else:\n self.labeled = len(data_list[0]) == 7\n if self.labeled:\n self.num_tfs = len(data_list[0][6])\n super(DataIterator, self).__init__(len(data_list), batch_size, shuffle, seed)\n\n def __len__(self):\n return len(self.data_list)\n\n def next(self):\n # for python 2.x.\n # Keeps under lock only the mechanism which advances\n # the indexing of each batch\n # see http://anandology.com/blog/using-iterators-and-generators/\n with self.lock:\n index_array, current_index, current_batch_size = next(self.index_generator)\n batch_X_seq = np.zeros((current_batch_size, self.seqlen, 4), dtype=bool)\n batch_X_bigwig = np.zeros((current_batch_size, self.seqlen, self.num_bigwigs), dtype=np.float32)\n if self.num_meta:\n batch_X_meta = np.zeros((current_batch_size, self.num_meta), dtype=np.float32)\n if self.labeled:\n batch_y = np.zeros((current_batch_size, self.num_tfs), dtype=bool)\n for i, j in enumerate(index_array):\n data = self.data_list[j]\n\n chrom = data[0]\n start = data[1]\n stop = data[2]\n shift = data[3]\n bigwig_files = data[4]\n meta = data[5]\n if shift:\n s = np.random.randint(-shift, shift+1)\n start += s\n stop += s\n med = (start + stop) / 2\n start = med - self.seqlen / 2\n stop = med + self.seqlen / 2\n batch_X_seq[i] = self.genome[chrom][start:stop]\n if self.num_meta:\n batch_X_meta[i] = meta\n for k, bigwig_file in enumerate(bigwig_files):\n bigwig = pyBigWig.open(bigwig_file)\n sample_bigwig = np.array(bigwig.values(chrom, start, stop))\n bigwig.close()\n sample_bigwig[np.isnan(sample_bigwig)] = 0\n batch_X_bigwig[i, :, k] = sample_bigwig\n if k == 2:\n #batch_X_bigwig[i, :, k-1] = 0.5*batch_X_bigwig[i, :, k-1]+0.5*batch_X_bigwig[i, :, k]\n batch_X_bigwig[i, :, k-1] = (1-0.75)*batch_X_bigwig[i, :, k-1]+0.75*batch_X_bigwig[i, :, k]\n if self.labeled:\n batch_y[i] = data[6]\n # otherwise the binding code is 'U', so leave as 0\n batch_X_seq_rc = batch_X_seq[:, ::-1, ::-1]\n if k == 2:\n batch_X_bigwig = batch_X_bigwig[:,:,:2]\n batch_X_bigwig_rc = batch_X_bigwig[:, ::-1, self.bigwig_rc_order[:2]]\n else:\n batch_X_bigwig_rc = batch_X_bigwig[:, ::-1, self.bigwig_rc_order]\n #batch_X_bigwig_rc = batch_X_bigwig[:, ::-1, self.bigwig_rc_order]\n batch_X_fwd = np.concatenate([batch_X_seq, batch_X_bigwig], axis=-1)\n batch_X_rev = np.concatenate([batch_X_seq_rc, batch_X_bigwig_rc], axis=-1)\n if self.num_meta:\n batch_x = [batch_X_fwd, batch_X_rev, batch_X_meta]\n else:\n batch_x = [batch_X_fwd, batch_X_rev]\n if self.labeled:\n return batch_x, batch_y\n return batch_x\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.isnan",
"numpy.zeros",
"numpy.random.randint",
"numpy.arange"
]
] |
noajshu/learning_irreps | [
"c2ed8dffdb5bf93ca5952fe8d093ea3e8c5e7237"
] | [
"tools/tensor.py"
] | [
"import torch\nimport itertools\nimport numpy as np\nimport scipy.linalg\n\nimport os\nif os.environ.get('DEVICE'):\n device = torch.device(os.environ.get('DEVICE'))\nelse:\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\ndtype = torch.float32\n\ndef cplx_contractor(n):\n c = torch.zeros(*[2 for dim in range(n+1)], dtype=dtype, device=device)\n for bs in itertools.product(*(n*[[0,1]])):\n c[bs][sum(bs)%2] = (-1)**((sum(bs)-(sum(bs)%2))/2)\n return c\n\ndef cplx_matmul(A, B):\n return torch.einsum('ijs,jkp,spt->ikt', A, B, cplx_contractor(2))\n\ndef cplx_bracket(A, B):\n return cplx_matmul(A, B) - cplx_matmul(B, A)\n\ndef cplx_to_np(A):\n return A[...,0].cpu().numpy() + 1j*A[...,1].cpu().numpy()\n\ndef bracket(A, B):\n return A.mm(B) - B.mm(A)\n\ndef np_to_cplx(A):\n return torch.stack(\n (\n torch.tensor(A.real, device=device, dtype=dtype),\n torch.tensor(A.imag, device=device, dtype=dtype),\n ),\n dim=-1\n )\n\nconst_i = torch.tensor([0,1], device=device, dtype=dtype)\ndef i_times(A):\n return torch.einsum('spt,s,...p->...t', cplx_contractor(2), const_i, A)\n\ndef swap(i, j):\n return torch.stack(\n (\n torch.tensor(\n np.eye(4)[[{i:j, j:i}.get(k, k) for k in range(4)]],\n dtype=dtype, device=device\n ),\n torch.zeros(4,4, dtype=dtype, device=device),\n ),\n dim=-1\n )\n\n\ndef perm_parity(lst):\n '''\n Given a permutation of the digits 0..N in order as a list,\n returns its parity (or sign): +1 for even parity; -1 for odd.\n '''\n parity = 1\n for i in range(0,len(lst)-1):\n if lst[i] != i:\n parity *= -1\n mn = min(range(i,len(lst)), key=lst.__getitem__)\n lst[i],lst[mn] = lst[mn],lst[i]\n return parity\n\n\ndef levicivita(lst):\n if len(set(lst)) != len(lst):\n return 0\n return perm_parity(lst)\n\ndef levi_nonzero(lst):\n missing = {0,1,2}.difference(set(lst))\n if len(missing) != 1:\n return 0, -1\n index = list(missing)[0]\n coeff = levicivita(lst + [index])\n return coeff, index\n\n\ndef random_walk(gens):\n n = gens[0].shape[0]\n x_0 = np.random.normal(size=(n,))\n X = [x_0]\n for i in range(n):\n X.append((\n scipy.linalg.expm(sum(np.random.normal() * g for g in gens)) @ X[-1].reshape(n, 1)\n ).reshape(n,))\n return np.array(X).T\n\n\ndef slugify(value):\n \"\"\"\n Normalizes string, converts to lowercase, removes non-alpha characters,\n and converts spaces to hyphens.\n \"\"\"\n value = value.replace('\\'', 'p')\n import unicodedata\n import re\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('utf-8')\n value = re.sub('[^\\w\\s-]', '', value).strip().lower()\n value = re.sub('[-\\s]+', '-', value)\n return value\n\n\nif __name__ == '__main__':\n # tests\n A = torch.randn(3, 3, 2, device=device, dtype=dtype)\n B = torch.randn(3, 3, 2, device=device, dtype=dtype)\n assert np.sum(np.abs(cplx_to_np(A) @ cplx_to_np(B) - cplx_to_np(cplx_matmul(A, B)))) < 1e-5\n"
] | [
[
"torch.zeros",
"numpy.random.normal",
"numpy.array",
"numpy.eye",
"torch.cuda.is_available",
"torch.tensor",
"torch.randn"
]
] |
kkiesling/openmc | [
"7e1f486be333c6ee4ebbe4025f0a8b4dcbdae7c3"
] | [
"tests/unit_tests/test_deplete_resultslist.py"
] | [
"\"\"\"Tests the ResultsList class\"\"\"\n\nfrom pathlib import Path\nfrom math import inf\n\nimport numpy as np\nimport pytest\nimport openmc.deplete\n\n\[email protected]\ndef res():\n \"\"\"Load the reference results\"\"\"\n filename = (Path(__file__).parents[1] / 'regression_tests' / 'deplete'\n / 'test_reference.h5')\n return openmc.deplete.ResultsList.from_hdf5(filename)\n\n\ndef test_get_atoms(res):\n \"\"\"Tests evaluating single nuclide concentration.\"\"\"\n t, n = res.get_atoms(\"1\", \"Xe135\")\n\n t_ref = np.array([0.0, 1296000.0, 2592000.0, 3888000.0])\n n_ref = np.array(\n [6.67473282e+08, 3.72442707e+14, 3.61129692e+14, 4.01920099e+14])\n\n np.testing.assert_allclose(t, t_ref)\n np.testing.assert_allclose(n, n_ref)\n\n # Check alternate units\n volume = res[0].volume[\"1\"]\n\n t_days, n_cm3 = res.get_atoms(\"1\", \"Xe135\", nuc_units=\"atom/cm3\", time_units=\"d\")\n\n assert t_days == pytest.approx(t_ref / (60 * 60 * 24))\n assert n_cm3 == pytest.approx(n_ref / volume)\n\n t_min, n_bcm = res.get_atoms(\"1\", \"Xe135\", nuc_units=\"atom/b-cm\", time_units=\"min\")\n assert n_bcm == pytest.approx(n_cm3 * 1e-24)\n assert t_min == pytest.approx(t_ref / 60)\n\n t_hour, _n = res.get_atoms(\"1\", \"Xe135\", time_units=\"h\")\n assert t_hour == pytest.approx(t_ref / (60 * 60))\n\n\ndef test_get_reaction_rate(res):\n \"\"\"Tests evaluating reaction rate.\"\"\"\n t, r = res.get_reaction_rate(\"1\", \"Xe135\", \"(n,gamma)\")\n\n t_ref = [0.0, 1296000.0, 2592000.0, 3888000.0]\n n_ref = [6.67473282e+08, 3.72442707e+14, 3.61129692e+14, 4.01920099e+14]\n xs_ref = [5.10301159e-05, 3.19379638e-05, 4.50543806e-05, 4.71004301e-05]\n\n np.testing.assert_allclose(t, t_ref)\n np.testing.assert_allclose(r, np.array(n_ref) * xs_ref)\n\n\ndef test_get_keff(res):\n \"\"\"Tests evaluating keff.\"\"\"\n t, k = res.get_keff()\n t_min, k = res.get_keff(time_units='min')\n\n t_ref = [0.0, 1296000.0, 2592000.0, 3888000.0]\n k_ref = [1.21409662, 1.16518654, 1.25357797, 1.22611968]\n u_ref = [0.0278795195, 0.0233141097, 0.0167899218, 0.0246734716]\n\n np.testing.assert_allclose(t, t_ref)\n np.testing.assert_allclose(t_min * 60, t_ref)\n np.testing.assert_allclose(k[:, 0], k_ref)\n np.testing.assert_allclose(k[:, 1], u_ref)\n\n\[email protected](\"unit\", (\"s\", \"d\", \"min\", \"h\"))\ndef test_get_steps(unit):\n # Make a ResultsList full of near-empty Result instances\n # Just fill out a time schedule\n results = openmc.deplete.ResultsList()\n # Time in units of unit\n times = np.linspace(0, 100, num=5)\n if unit == \"d\":\n conversion_to_seconds = 60 * 60 * 24\n elif unit == \"h\":\n conversion_to_seconds = 60 * 60\n elif unit == \"min\":\n conversion_to_seconds = 60\n else:\n conversion_to_seconds = 1\n\n for ix in range(times.size):\n res = openmc.deplete.Results()\n res.time = times[ix:ix + 1] * conversion_to_seconds\n results.append(res)\n\n for expected, value in enumerate(times):\n actual = results.get_step_where(\n value, time_units=unit, atol=0, rtol=0)\n assert actual == expected, (value, results[actual].time[0])\n\n with pytest.raises(ValueError):\n # Emulate a result file with a non-zero initial point in time\n # as in starting from a restart\n results.get_step_where(times[0] - 1, time_units=unit, atol=0, rtol=0)\n\n with pytest.raises(ValueError):\n results.get_step_where(times[-1] + 1, time_units=unit, atol=0, rtol=0)\n\n # Grab intermediate points with a small offset\n delta = (times[1] - times[0])\n offset = delta * 0.1\n for expected, value in enumerate(times[1:-1], start=1):\n # Shoot a little low and a little high\n for mult in (1, -1):\n target = value + mult * offset\n # Compare using absolute and relative tolerances\n actual = results.get_step_where(\n target, time_units=unit, atol=offset * 2, rtol=inf)\n assert actual == expected, (\n target, times[actual], times[expected], offset)\n\n actual = results.get_step_where(\n target, time_units=unit, atol=inf, rtol=offset / value)\n assert actual == expected, (\n target, times[actual], times[expected], offset)\n # Check that the lower index is returned for the exact mid-point\n target = value + delta * 0.5\n actual = results.get_step_where(\n target, time_units=unit, atol=delta, rtol=delta / value)\n assert actual == expected\n\n # Shoot way over with no tolerance -> just give closest value\n actual = results.get_step_where(\n times[-1] * 100, time_units=unit, atol=inf, rtol=inf)\n assert actual == times.size - 1\n"
] | [
[
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.linspace"
]
] |
Waller-Lab/SpectralDiffuserCam | [
"1a9ea9c82f10998cae13f7a222d0a9e854fa9192"
] | [
"Python/fista_spectral_cupy.py"
] | [
"import sys\nglobal device\ndevice= sys.argv[1]\nsys.path.append('helper_functions/')\n\nif device == 'GPU':\n import cupy as np\n import tv_approx_haar_cp as tv\n \n print('device = ', device, ', using GPU and cupy')\nelse:\n import numpy as np\n import tv_approx_haar_np as tv\n print('device = ', device, ', using CPU and numpy')\n\nimport helper_functions.helper_functions as fc\nimport numpy as numpy\nimport matplotlib.pyplot as plt\n\n\n\nclass fista_spectral_numpy():\n def __init__(self, h, mask):\n \n ## Initialize constants \n self.DIMS0 = h.shape[0] # Image Dimensions\n self.DIMS1 = h.shape[1] # Image Dimensions\n \n self.spectral_channels = mask.shape[-1] # Number of spectral channels \n \n self.py = int((self.DIMS0)//2) # Pad size\n self.px = int((self.DIMS1)//2) # Pad size\n \n # FFT of point spread function \n self.H = np.expand_dims(np.fft.fft2((np.fft.ifftshift(self.pad(h), axes = (0,1))), axes = (0,1)), -1)\n self.Hconj = np.conj(self.H) \n \n self.mask = mask\n \n # Calculate the eigenvalue to set the step size \n maxeig = self.power_iteration(self.Hpower, (self.DIMS0*2, self.DIMS1*2), 10)\n self.L = maxeig*45\n \n \n self.prox_method = 'tv' # options: 'non-neg', 'tv', 'native'\n \n # Define soft-thresholding constants\n self.tau = .5 # Native sparsity tuning parameter\n self.tv_lambda = 0.00005 # TV tuning parameter\n self.tv_lambdaw = 0.00005 # TV tuning parameter for wavelength \n self.lowrank_lambda = 0.00005 # Low rank tuning parameter\n \n \n # Number of iterations of FISTA\n self.iters = 500\n \n self.show_recon_progress = True # Display the intermediate results\n self.print_every = 20 # Sets how often to print the image\n \n self.l_data = []\n self.l_tv = []\n \n # Power iteration to calculate eigenvalue \n def power_iteration(self, A, sample_vect_shape, num_iters):\n bk = np.random.randn(sample_vect_shape[0], sample_vect_shape[1])\n for i in range(0, num_iters):\n bk1 = A(bk)\n bk1_norm = np.linalg.norm(bk1)\n\n bk = bk1/bk1_norm\n Mx = A(bk)\n xx = np.transpose(np.dot(bk.ravel(), bk.ravel()))\n eig_b = np.transpose(bk.ravel()).dot(Mx.ravel())/xx\n\n return eig_b\n \n # Helper functions for forward model \n def crop(self,x):\n return x[self.py:-self.py, self.px:-self.px]\n \n def pad(self,x):\n if len(x.shape) == 2: \n out = np.pad(x, ([self.py, self.py], [self.px,self.px]), mode = 'constant')\n elif len(x.shape) == 3:\n out = np.pad(x, ([self.py, self.py], [self.px,self.px], [0, 0]), mode = 'constant')\n return out\n \n def Hpower(self, x):\n x = np.fft.ifft2(self.H* np.fft.fft2(np.expand_dims(x,-1), axes = (0,1)), axes = (0,1))\n x = np.sum(self.mask* self.crop(np.real(x)), 2)\n x = self.pad(x)\n return x\n \n def Hfor(self, x):\n x = np.fft.ifft2(self.H* np.fft.fft2(x, axes = (0,1)), axes = (0,1))\n x = np.sum(self.mask* self.crop(np.real(x)), 2)\n return x\n\n def Hadj(self, x):\n x = np.expand_dims(x,-1)\n x = x*self.mask\n x = self.pad(x)\n\n x = np.fft.fft2(x, axes = (0,1))\n x = np.fft.ifft2(self.Hconj*x, axes = (0,1))\n x = np.real(x)\n return x\n \n def soft_thresh(self, x, tau):\n out = np.maximum(np.abs(x)- tau, 0)\n out = out*np.sign(x)\n return out \n \n def prox(self,x):\n if self.prox_method == 'tv':\n x = 0.5*(np.maximum(x,0) + tv.tv3dApproxHaar(x, self.tv_lambda/self.L, self.tv_lambdaw))\n if self.prox_method == 'native':\n x = np.maximum(x,0) + self.soft_thresh(x, self.tau)\n if self.prox_method == 'non-neg':\n x = np.maximum(x,0) \n return x\n \n def tv(self, x):\n d = np.zeros_like(x)\n d[0:-1,:] = (x[0:-1,:] - x[1:, :])**2\n d[:,0:-1] = d[:,0:-1] + (x[:,0:-1] - x[:,1:])**2\n return np.sum(np.sqrt(d))\n \n def loss(self,x,err):\n if self.prox_method == 'tv':\n self.l_data.append(np.linalg.norm(err)**2)\n self.l_tv.append(2*self.tv_lambda/self.L * self.tv(x))\n \n l = np.linalg.norm(err)**2 + 2*self.tv_lambda/self.L * self.tv(x)\n if self.prox_method == 'native':\n l = np.linalg.norm(err)**2 + 2*self.tv_lambda/self.L * np.linalg.norm(x.ravel(), 1)\n if self.prox_method == 'non-neg':\n l = np.linalg.norm(err)**2\n return l\n \n # Main FISTA update \n def fista_update(self, vk, tk, xk, inputs):\n\n error = self.Hfor(vk) - inputs\n grads = self.Hadj(error)\n \n xup = self.prox(vk - 1/self.L * grads)\n tup = 1 + np.sqrt(1 + 4*tk**2)/2\n vup = xup + (tk-1)/tup * (xup-xk)\n \n return vup, tup, xup, self.loss(xup, error)\n\n\n # Run FISTA \n def run(self, inputs): \n\n # Initialize variables to zero \n xk = np.zeros((self.DIMS0*2, self.DIMS1*2, self.spectral_channels))\n vk = np.zeros((self.DIMS0*2, self.DIMS1*2, self.spectral_channels))\n tk = 1.0\n \n llist = []\n\n # Start FISTA loop \n for i in range(0,self.iters):\n vk, tk, xk, l = self.fista_update(vk, tk, xk, inputs)\n \n llist.append(l)\n \n # Print out the intermediate results and the loss \n if self.show_recon_progress== True and i%self.print_every == 0:\n print('iteration: ', i, ' loss: ', l)\n if device == 'GPU':\n out_img = np.asnumpy(self.crop(xk))\n else:\n out_img = self.crop(xk)\n fc_img = fc.pre_plot(fc.stack_rgb_opt(out_img))\n plt.figure(figsize = (10,3))\n plt.subplot(1,2,1), plt.imshow(fc_img/numpy.max(fc_img)); plt.title('Reconstruction')\n plt.subplot(1,2,2), plt.plot(llist); plt.title('Loss')\n plt.show()\n self.out_img = out_img\n xout = self.crop(xk) \n return xout, llist\n"
] | [
[
"numpy.fft.ifft2",
"numpy.fft.fft2",
"numpy.sign",
"numpy.max",
"numpy.zeros_like",
"numpy.linalg.norm",
"numpy.conj",
"numpy.sqrt",
"numpy.expand_dims",
"matplotlib.pyplot.subplot",
"numpy.pad",
"numpy.zeros",
"matplotlib.pyplot.title",
"numpy.random.randn",
"numpy.real",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.abs",
"numpy.maximum"
]
] |
smdp2000/BETYdb-YABA | [
"64b540ef21838d5b47469896b95672bb4ac00f48"
] | [
"app/Meta.py"
] | [
"\"\"\"\nThis is the Meta module and supports all the REST actions for the yaba.yaml\n\"\"\"\n# Importing modules\nimport os\nimport geopandas as gpd\nimport traceback\nimport logging\nimport pandas as pd\n\nfrom flask import make_response,Response,jsonify\nfrom flask import json\nfrom db import *\nfrom werkzeug import secure_filename,FileStorage\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.exc import OperationalError\nfrom shapely.geometry import Polygon\n\n\n\nUPLOAD_FOLDER = 'temp'\n\n\ndef save_tempFile(File):\n # Validate that what we have been supplied with is infact a FileStorage\n if not isinstance(File, FileStorage):\n raise TypeError(\"Storage must be a werkzeug.FileStorage\")\n\n # Sanitise the filename\n a_file_name = secure_filename(File.filename)\n\n UPLOAD_PATH=os.path.join(os.getcwd(),UPLOAD_FOLDER,a_file_name)\n # Save file \n File.save(UPLOAD_PATH)\n return None\n\ndef insert_experiments(username,fileName):\n \"\"\"\n This function responds to a request for /yaba/v1/experiments with csv file.\n It first checks the file is appropriate one and then add new column user_id to dataframe.\n Insertion is done to \"experiments\" table.\n\n :fileName: CSV file with experiments meta data\n :return: 201 on success\n 400 if file is unsuitable or does not contain appropriate columns\n 409 Intregrity or Constraint error : 23503 foreign_key_violation | 23505 unique_violation\n 500 Database Connection Error\n 401 Unauthorized | No user exists\n 410 Default error.See logs for more information\n \"\"\"\n \n try:\n user_id=fetch_id(username, table='users')\n\n if not user_id:\n return 401\n #Reading the CSV file into DataFrame\n data = pd.read_csv(fileName,delimiter = ',')\n\n #Checking necessary columns are there.\n columns=data.columns.values.tolist()\n accepted_columns=['name','start_date','end_date','description','design']\n \n if(all(x in accepted_columns for x in columns)):\n data['user_id']=user_id\n data['design'].fillna('some text', inplace=True)\n insert_table(table='experiments',data=data)\n msg = {'Message' : 'Successfully inserted',\n 'Table Affected' : 'Experiments',\n 'Lines Inserted': data.shape[0]}\n \n return make_response(jsonify(msg), 201)\n\n else:\n msg = {'Message' : 'File not acceptable.Check the format of file or columns'}\n return make_response(jsonify(msg), 400)\n \n except OperationalError:\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n msg = {'Message' : 'Database Conection Error'}\n return make_response(jsonify(msg), 500)\n except IntegrityError:\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n msg = {'Message' : '(psycopg2.errors.UniqueViolation) duplicate key value violates unique constraint \"unique_name_per_species\"'}\n return make_response(jsonify(msg), 409)\n except Exception :\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n return 410 \n \ndef insert_sites(fileName,shp_file,dbf_file,prj_file,shx_file):\n \"\"\"\n This function responds to a request for /yaba/v1/sites with csv and shape files.Checks\n file is appropriate one.Takes all the shape file.Extracts geometry from Geopandas dataframe\n and then added to Pandas dataframe.Insertion is done to \"sites\" table\n\n\n :shp_file: .shp file\n :dbf_file: .dbf file\n :prj_file: .prj file\n :shx_file: .shx file\n :fileName: CSV file with Sites meta data\n :return: 201 on success\n 400 if file is unsuitable or does not contain appropriate columns\n 409 Intregrity or Constraint error : 23503 foreign_key_violation | 23505 unique_violation\n 500 Database Connection Error\n 401 Unauthorized | No user exists\n 410 Default error.See logs for more information\n \"\"\"\n try: \n #Reading the csv as Dataframe\n data = pd.read_csv(fileName,delimiter = ',')\n\n #Checking necessary columns are there.\n columns=data.columns.values.tolist()\n accepted_columns=['sitename','city','state','country','notes','greenhouse','geometry','time_zone','soil','soilnotes']\n \n #Saving files temporarily.(Will be deleted later)\n save_tempFile(shp_file)\n save_tempFile(dbf_file)\n save_tempFile(prj_file)\n save_tempFile(shx_file)\n\n #Getting the files path from temp folder\n shp_file_target = os.path.join(os.getcwd(),UPLOAD_FOLDER,shp_file.filename)\n dbf_file_target = os.path.join(os.getcwd(),UPLOAD_FOLDER,dbf_file.filename)\n prj_file_target = os.path.join(os.getcwd(),UPLOAD_FOLDER,prj_file.filename)\n shx_file_target = os.path.join(os.getcwd(),UPLOAD_FOLDER,shx_file.filename)\n\n\n #Reading the shapefile as DataFrame\n data_g1=gpd.read_file(shp_file_target) \n\n data_g = data_g1.to_crs({'init': 'epsg:4326'})\n\n for index, row in data_g.iterrows():\n flat_list = []\n for pt in list(row['geometry'].exterior.coords):\n pt=pt+(115,)\n flat_list.append(pt)\n poly = Polygon(flat_list)\n data.loc[index, 'geometry'] = poly\n \n if(all(x in accepted_columns for x in columns)):\n data['time_zone'].fillna(\"America/Phoenix\", inplace = True)\n data['soilnotes'].fillna(\"some text\", inplace = True) \n data['greenhouse'].fillna(\"f\", inplace = True) \n data=data.fillna('')\n file_name='sites_n.csv'\n\n data.to_csv(file_name, encoding='utf-8', index=False)\n \n #Inserting in Bety\n insert_sites_table(table='sites',data=file_name)\n msg = {'Message' : 'Successfully inserted',\n 'Table Affected' : 'Sites',\n 'Lines Inserted': data_g.shape[0]}\n os.remove(file_name)\n \n return make_response(jsonify(msg), 201)\n\n else:\n msg = {'Message' : 'File not acceptable.Check the format of file or columns'}\n return make_response(jsonify(msg), 400)\n \n except OperationalError:\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n msg = {'Message' : 'Database Conection Error'}\n return make_response(jsonify(msg), 500)\n except IntegrityError:\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n msg = {'Message' : '(psycopg2.errors.UniqueViolation) duplicate key value violates unique constraint \"unique_name_per_species\"'}\n return make_response(jsonify(msg), 409)\n except Exception:\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n return 410\n finally:\n os.remove(shp_file_target)\n os.remove(dbf_file_target)\n os.remove(prj_file_target)\n os.remove(shx_file_target) \n\ndef insert_treatments(username,fileName):\n \"\"\"\n This function responds to a request for /yaba/v1/treatments\n with csv file.Insertion is done to \"treatments\" table.\n\n :fileName: CSV with treatments meta data\n :return: 201 on success\n 400 if file is unsuitable or does not contain appropriate columns\n 409 Intregrity or Constraint error : 23503 foreign_key_violation | 23505 unique_violation\n 500 Database Connection Error\n 401 Unauthorized | No user exists\n 410 Default error.See logs for more information\n \"\"\"\n try:\n user_id=fetch_id(username, table='users')\n\n if not user_id:\n return 401\n\n #Reading the CSV file into DataFrame\n data = pd.read_csv(fileName,delimiter = ',')\n\n #Checking necessary columns are there.\n columns=data.columns.values.tolist()\n accepted_columns=['name','definition','control']\n \n if(all(x in accepted_columns for x in columns)):\n data['user_id']=user_id\n insert_table(table='treatments',data=data)\n msg = {'Message' : 'Successfully inserted',\n 'Table Affected' : 'Treatments',\n 'Lines Inserted': data.shape[0]}\n \n return make_response(jsonify(msg), 201)\n\n else:\n msg = {'Message' : 'File not acceptable.Check the format of file or columns'}\n return make_response(jsonify(msg), 400)\n \n except OperationalError:\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n msg = {'Message' : 'Database Conection Error'}\n return make_response(jsonify(msg), 500)\n except IntegrityError:\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n msg = {'Message' : '(psycopg2.errors.UniqueViolation) duplicate key value violates unique constraint \"unique_name_per_species\"'}\n return make_response(jsonify(msg), 409)\n except Exception :\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n return 410 \n\ndef insert_cultivars(fileName):\n \"\"\"\n This function responds to a request for /yaba/v1/cultivars\n with csv file\n\n\n :fileName: CSV file with cultivars meta data\n :return: 201 on success\n 400 if file is unsuitable or does not contain appropriate columns\n 409 Intregrity or Constraint error : 23503 foreign_key_violation | 23505 unique_violation\n 500 Database Connection Error\n 401 Unauthorized | No user exists\n 410 Default error.See logs for more information\n \"\"\"\n \n\n try:\n data = pd.read_csv(fileName,delimiter = ',')\n\n specie_id=fetch_specie_id(data['species'][0])\n\n #Checking necessary columns are there.\n columns=data.columns.values.tolist()\n accepted_columns=['name','species','ecotype','notes']\n \n if(all(x in accepted_columns for x in columns)):\n new_data = pd.DataFrame(columns=['name', 'specie_id','name','ecotype','notes'])\n\n new_data['name']=data['name']\n new_data['specie_id']=specie_id\n new_data['ecotype']='some text'\n new_data['notes']='some text'\n\n insert_table(table='cultivars',data=new_data)\n\n msg = {'Message' : 'Successfully inserted',\n 'Table Affected' : 'Cultivars',\n 'Lines Inserted': data.shape[0]}\n \n return make_response(jsonify(msg), 201)\n\n else:\n msg = {'Message' : 'File not acceptable.Check the format of file or columns'}\n return make_response(jsonify(msg), 400)\n \n except OperationalError:\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n msg = {'Message' : 'Database Conection Error'}\n return make_response(jsonify(msg), 500)\n except IntegrityError:\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n msg = {'Message' : '(psycopg2.errors.UniqueViolation) duplicate key value violates unique constraint \"unique_name_per_species\"'}\n return make_response(jsonify(msg), 409)\n except Exception :\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n return 410 \n \ndef insert_citations(username,fileName):\n \"\"\"\n This function responds to a request for /yaba/v1/citations\n with csv file\n\n :fileName: CSV with treatments meta data\n :return: 201 on success\n 400 if file is unsuitable or does not contain appropriate columns\n 409 Intregrity or Constraint error : 23503 foreign_key_violation | 23505 unique_violation\n 500 Database Connection Error\n 401 Unauthorized | No user exists\n 410 Default error.See logs for more information\n \"\"\"\n try:\n user_id=fetch_id(username, table='users')\n\n if not user_id:\n return 401\n\n data = pd.read_csv(fileName,delimiter = ',')\n #Checking necessary columns are there.\n columns=data.columns.values.tolist()\n accepted_columns=['author','year','title','journal','vol','pg','url','pdf','doi']\n \n if(all(x in accepted_columns for x in columns)):\n #Reading the CSV file into DataFrame \n data['user_id']=user_id\n insert_table(table='citations',data=data)\n msg = {'Message' : 'Successfully inserted',\n 'Table Affected' : 'Citations',\n 'Lines Inserted': data.shape[0]}\n \n return make_response(jsonify(msg), 201)\n\n else:\n msg = {'Message' : 'File not acceptable and Check the format of file or columns'}\n return make_response(jsonify(msg), 400)\n \n except OperationalError:\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n msg = {'Message' : 'Database Conection Error'}\n return make_response(jsonify(msg), 500)\n except IntegrityError:\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n msg = {'Message' : '(psycopg2.errors.UniqueViolation) duplicate key value violation.'}\n return make_response(jsonify(msg), 409)\n except Exception :\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n return 410 \n \ndef insert_experimentSites(fileName):\n \"\"\"\n This function responds to a request for /yaba/v1/experiments_sites\n with csv file\n\n\n :fileName: CSV file with experimentsSites meta data\n :return: 201 on success\n 400 if file is unsuitable or does not contain appropriate columns\n 409 Intregrity or Constraint error : 23503 foreign_key_violation | 23505 unique_violation\n 500 Database Connection Error\n 401 Unauthorized | No user exists\n 410 Default error.See logs for more information\n \"\"\"\n try:\n data = pd.read_csv(fileName,delimiter = ',')\n #Checking necessary columns are there.\n columns=data.columns.values.tolist()\n accepted_columns=['experiment_name','sitename']\n \n if(all(x in accepted_columns for x in columns)):\n new_data = pd.DataFrame(columns=['experiment_id', 'site_id'])\n new_data['experiment_id'] = data.apply(lambda row: fetch_id(row['experiment_name'],table='experiments'), axis=1)\n new_data['site_id'] = data.apply(lambda row: fetch_sites_id(row['sitename']), axis=1) \n insert_table(table='experiments_sites',data=new_data)\n msg = {'Message' : 'Successfully inserted',\n 'Table Affected' : 'Experiments_sites',\n 'Lines Inserted': data.shape[0]}\n \n return make_response(jsonify(msg), 201)\n\n else:\n msg = {'Message' : 'File not acceptable.Check the format of file or columns'}\n return make_response(jsonify(msg), 400)\n \n except OperationalError:\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n msg = {'Message' : 'Database Conection Error'}\n return make_response(jsonify(msg), 500)\n except IntegrityError:\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n msg = {'Message' : '(psycopg2.errors.UniqueViolation) duplicate key value violation.'}\n return make_response(jsonify(msg), 409)\n except Exception :\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n return 410\n\ndef insert_experimentTreatments(fileName):\n \"\"\"\n This function responds to a request for /yaba/v1/experiments_treatments\n with csv file\n\n\n :fileName: CSV file with experiments_treatments meta data\n :return: 201 on success\n 400 if file is unsuitable or does not contain appropriate columns\n 409 Intregrity or Constraint error : 23503 foreign_key_violation | 23505 unique_violation\n 500 Database Connection Error\n 401 Unauthorized | No user exists\n 410 Default error.See logs for more information\n \"\"\"\n try:\n \n data = pd.read_csv(fileName,delimiter = ',')\n\n #Checking necessary columns are there.\n columns=data.columns.values.tolist()\n accepted_columns=['experiment_name','treatment_name'] \n\n if(all(x in accepted_columns for x in columns)):\n new_data = pd.DataFrame(columns=['experiment_id', 'treatment_id'])\n new_data['experiment_id'] = data.apply(lambda row: fetch_id(row['experiment_name'],table='experiments'), axis=1)\n new_data['treatment_id'] = data.apply(lambda row: fetch_id(row['treatment_name'],table='treatments'), axis=1)\n insert_table(table='experiments_treatments',data=new_data)\n msg = {'Message' : 'Successfully inserted',\n 'Table Affected' : 'Experiments_treatments',\n 'Lines Inserted': data.shape[0]}\n \n return make_response(jsonify(msg), 201)\n\n else:\n msg = {'Message' : 'File not acceptable.Check the format of file or columns'}\n return make_response(jsonify(msg), 400)\n \n except OperationalError:\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n msg = {'Message' : 'Database Conection Error'}\n return make_response(jsonify(msg), 500)\n except IntegrityError:\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n msg = {'Message' : '(psycopg2.errors.UniqueViolation) duplicate key value violation.'}\n return make_response(jsonify(msg), 409)\n except Exception :\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n return 410\n\ndef insert_sitesCultivars(fileName):\n \"\"\"\n This function responds to a request for /yaba/v1/sites_cultivars\n with csv file\n\n\n :fileName: CSV file with sites_cultivars meta data\n :return: 201 on success\n 400 if file is unsuitable or does not contain appropriate columns\n 409 Intregrity or Constraint error : 23503 foreign_key_violation | 23505 unique_violation\n 500 Database Connection Error\n 401 Unauthorized | No user exists\n 410 Default error.See logs for more information\n \"\"\"\n try:\n data = pd.read_csv(fileName,delimiter = ',')\n\n #Checking necessary columns are there.\n columns=data.columns.values.tolist()\n accepted_columns=['sitename','cultivar_name','specie_id'] \n\n if(all(x in accepted_columns for x in columns)):\n new_data = pd.DataFrame(columns=['site_id', 'cultivar_id'])\n new_data['site_id'] = data.apply(lambda row: fetch_sites_id(row['sitename']), axis=1)\n new_data['cultivar_id'] = data.apply(lambda row: fetch_cultivars_id(row['cultivar_name'],row['specie_id']), axis=1)\n insert_table(table='sites_cultivars',data=new_data)\n msg = {'Message' : 'Successfully inserted',\n 'Table Affected' : 'Sites_cultivars',\n 'Lines Inserted': data.shape[0]}\n \n return make_response(jsonify(msg), 201)\n\n else:\n msg = {'Message' : 'File not acceptable.Check the format of file or columns'}\n return make_response(jsonify(msg), 400)\n \n except OperationalError:\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n msg = {'Message' : 'Database Conection Error or Inconsistent Data'}\n return make_response(jsonify(msg), 500)\n except IntegrityError:\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n msg = {'Message' : '(psycopg2.errors.UniqueViolation) duplicate key value violation.'}\n return make_response(jsonify(msg), 409)\n except Exception :\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n return 410\n \ndef insert_citationsSites(fileName):\n \"\"\"\n This function responds to a request for /yaba/v1/citations_sites\n with csv file\n\n\n :fileName: CSV file with citations_sites meta data\n :return: 201 on success\n 400 if file is unsuitable or does not contain appropriate columns\n 409 Intregrity or Constraint error : 23503 foreign_key_violation | 23505 unique_violation\n 500 Database Connection Error\n 401 Unauthorized | No user exists\n 410 Default error.See logs for more information\n \"\"\"\n \n try:\n data = pd.read_csv(fileName,delimiter = ',')\n\n #Checking necessary columns are there.\n columns=data.columns.values.tolist()\n accepted_columns=['author','year','title','sitename']\n\n if(all(x in accepted_columns for x in columns)):\n new_data = pd.DataFrame(columns=['citation_id','site_id'])\n new_data['site_id'] = data.apply(lambda row: fetch_sites_id(row['sitename']), axis=1)\n new_data['citation_id'] = data.apply(lambda row: fetch_citations_id(row['author'],row['year'],row['title']), axis=1)\n\n insert_table(table='citations_sites',data=new_data)\n msg = {'Message' : 'Successfully inserted',\n 'Table Affected' : 'Citations_sites',\n 'Lines Inserted': data.shape[0]}\n \n return make_response(jsonify(msg), 201)\n\n else:\n msg = {'Message' : 'File not acceptable and Check the format of file or columns',\n 'Table':'citations_sites'}\n return make_response(jsonify(msg), 400)\n \n except OperationalError:\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n msg = {'Message' : 'Database Conection Error'}\n return make_response(jsonify(msg), 500)\n except IntegrityError:\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n msg = {'Message' : '(psycopg2.errors.UniqueViolation) duplicate key value violation.'}\n return make_response(jsonify(msg), 409)\n except Exception :\n # Logs the error appropriately\n logging.error(traceback.format_exc())\n return 410"
] | [
[
"pandas.DataFrame",
"pandas.read_csv"
]
] |
liujiajun/examples | [
"f5816f1720208c205411265357220afe03a5d16f"
] | [
"player_rulebased-A_py/player_rulebased-A.py"
] | [
"#!/usr/bin/python3\n\nfrom __future__ import print_function\n\nfrom twisted.internet import reactor\nfrom twisted.internet.defer import inlineCallbacks\n\nfrom autobahn.wamp.serializer import MsgPackSerializer\nfrom autobahn.wamp.types import ComponentConfig\nfrom autobahn.twisted.wamp import ApplicationSession, ApplicationRunner\n\nimport argparse\nimport random\nimport math\nimport sys\n\nimport base64\nimport numpy as np\n\nimport helper\n\n# reset_reason\nNONE = 0\nGAME_START = 1\nSCORE_MYTEAM = 2\nSCORE_OPPONENT = 3\nGAME_END = 4\nDEADLOCK = 5\nGOALKICK = 6\nCORNERKICK = 7\nPENALTYKICK = 8\nHALFTIME = 9\nEPISODE_END = 10\n\n# game_state\nSTATE_DEFAULT = 0\nSTATE_KICKOFF = 1\nSTATE_GOALKICK = 2\nSTATE_CORNERKICK = 3\nSTATE_PENALTYKICK = 4\n\n# coordinates\nMY_TEAM = 0\nOP_TEAM = 1\nBALL = 2\nX = 0\nY = 1\nTH = 2\nACTIVE = 3\nTOUCH = 4\n\n\nclass Received_Image(object):\n def __init__(self, resolution, colorChannels):\n self.resolution = resolution\n self.colorChannels = colorChannels\n # need to initialize the matrix at timestep 0\n self.ImageBuffer = np.zeros((resolution[1], resolution[0], colorChannels)) # rows, columns, colorchannels\n\n def update_image(self, received_parts):\n self.received_parts = received_parts\n for i in range(0, len(received_parts)):\n dec_msg = base64.b64decode(self.received_parts[i].b64, '-_') # decode the base64 message\n np_msg = np.fromstring(dec_msg, dtype=np.uint8) # convert byte array to numpy array\n reshaped_msg = np_msg.reshape((self.received_parts[i].height, self.received_parts[i].width, 3))\n for j in range(0, self.received_parts[i].height): # y axis\n for k in range(0, self.received_parts[i].width): # x axis\n self.ImageBuffer[j + self.received_parts[i].y, k + self.received_parts[i].x, 0] = reshaped_msg[\n j, k, 0] # blue channel\n self.ImageBuffer[j + self.received_parts[i].y, k + self.received_parts[i].x, 1] = reshaped_msg[\n j, k, 1] # green channel\n self.ImageBuffer[j + self.received_parts[i].y, k + self.received_parts[i].x, 2] = reshaped_msg[\n j, k, 2] # red channel\n\n\nclass SubImage(object):\n def __init__(self, x, y, width, height, b64):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.b64 = b64\n\n\nclass Frame(object):\n def __init__(self):\n self.time = None\n self.score = None\n self.reset_reason = None\n self.game_state = None\n self.subimages = None\n self.coordinates = None\n self.half_passed = None\n\n\nclass Component(ApplicationSession):\n \"\"\"\n AI Base + Rule Based Algorithm\n \"\"\"\n\n def __init__(self, config):\n ApplicationSession.__init__(self, config)\n\n def printConsole(self, message):\n print(message)\n sys.__stdout__.flush()\n\n def onConnect(self):\n self.join(self.config.realm)\n\n @inlineCallbacks\n def onJoin(self, details):\n\n ##############################################################################\n def init_variables(self, info):\n # Here you have the information of the game (virtual init() in random_walk.cpp)\n # List: game_time, number_of_robots\n # field, goal, penalty_area, goal_area, resolution Dimension: [x, y]\n # ball_radius, ball_mass,\n # robot_size, robot_height, axle_length, robot_body_mass, ID: [0, 1, 2, 3, 4]\n # wheel_radius, wheel_mass, ID: [0, 1, 2, 3, 4]\n # max_linear_velocity, max_torque, codewords, ID: [0, 1, 2, 3, 4]\n self.game_time = info['game_time']\n self.number_of_robots = info['number_of_robots']\n\n self.field = info['field']\n self.goal = info['goal']\n self.penalty_area = info['penalty_area']\n # self.goal_area = info['goal_area']\n self.resolution = info['resolution']\n\n self.ball_radius = info['ball_radius']\n # self.ball_mass = info['ball_mass']\n\n self.robot_size = info['robot_size']\n # self.robot_height = info['robot_height']\n # self.axle_length = info['axle_length']\n # self.robot_body_mass = info['robot_body_mass']\n\n # self.wheel_radius = info['wheel_radius']\n # self.wheel_mass = info['wheel_mass']\n\n self.max_linear_velocity = info['max_linear_velocity']\n # self.max_torque = info['max_torque']\n # self.codewords = info['codewords']\n\n self.colorChannels = 3\n self.end_of_frame = False\n self.image = Received_Image(self.resolution, self.colorChannels)\n self.cur_posture = []\n self.cur_ball = []\n self.prev_posture = []\n self.prev_ball = []\n self.previous_frame = Frame()\n\n self.cur_count = 0\n self.end_count = 0\n self.prev_sender = None\n self.sender = None\n self.touch = [False,False,False,False,False]\n self.prev_receiver = None\n self.receiver = None\n self.def_idx = 0\n self.atk_idx = 0\n self.closest_order = []\n self.player_state = [None,None,None,None,None]\n\n self.wheels = [0 for _ in range(10)]\n return\n\n ##############################################################################\n\n try:\n info = yield self.call(u'aiwc.get_info', args.key)\n except Exception as e:\n self.printConsole(\"Error: {}\".format(e))\n else:\n try:\n self.sub = yield self.subscribe(self.on_event, args.key)\n except Exception as e2:\n self.printConsole(\"Error: {}\".format(e2))\n\n init_variables(self, info)\n\n try:\n yield self.call(u'aiwc.ready', args.key)\n except Exception as e:\n self.printConsole(\"Error: {}\".format(e))\n else:\n self.printConsole(\"I am ready for the game!\")\n\n # set the left and right wheel velocities of robot with id 'id'\n # 'max_velocity' scales the velocities up to the point where at least one of wheel is operating at max velocity\n def set_wheel_velocity(self, id, left_wheel, right_wheel, max_velocity):\n multiplier = 1\n\n # wheel velocities need to be scaled so that none of wheels exceed the maximum velocity available\n # otherwise, the velocity above the limit will be set to the max velocity by the simulation program\n # if that happens, the velocity ratio between left and right wheels will be changed that the robot may not execute\n # turning actions correctly.\n if (abs(left_wheel) > self.max_linear_velocity[id] or abs(right_wheel) > self.max_linear_velocity[\n id] or max_velocity):\n if (abs(left_wheel) > abs(right_wheel)):\n multiplier = self.max_linear_velocity[id] / abs(left_wheel)\n else:\n multiplier = self.max_linear_velocity[id] / abs(right_wheel)\n\n self.wheels[2 * id] = left_wheel * multiplier\n self.wheels[2 * id + 1] = right_wheel * multiplier\n\n # let the robot with id 'id' move to a target position (x, y)\n # the trajectory to reach the target position is determined by several different parameters\n def set_target_position(self, id, x, y, scale, mult_lin, mult_ang, max_velocity):\n damping = 0.35\n ka = 0\n sign = 1\n\n # calculate how far the target position is from the robot\n dx = x - self.cur_posture[id][X]\n dy = y - self.cur_posture[id][Y]\n d_e = math.sqrt(math.pow(dx, 2) + math.pow(dy, 2))\n\n # calculate how much the direction is off\n desired_th = (math.pi / 2) if (dx == 0 and dy == 0) else math.atan2(dy, dx)\n d_th = desired_th - self.cur_posture[id][TH]\n while (d_th > math.pi):\n d_th -= 2 * math.pi\n while (d_th < -math.pi):\n d_th += 2 * math.pi\n\n # based on how far the target position is, set a parameter that\n # decides how much importance should be put into changing directions\n # farther the target is, less need to change directions fastly\n if (d_e > 1):\n ka = 17 / 90\n elif (d_e > 0.5):\n ka = 19 / 90\n elif (d_e > 0.3):\n ka = 21 / 90\n elif (d_e > 0.2):\n ka = 23 / 90\n else:\n ka = 25 / 90\n\n # if the target position is at rear of the robot, drive backward instead\n if (d_th > helper.d2r(95)):\n d_th -= math.pi\n sign = -1\n elif (d_th < helper.d2r(-95)):\n d_th += math.pi\n sign = -1\n\n # if the direction is off by more than 85 degrees,\n # make a turn first instead of start moving toward the target\n if (abs(d_th) > helper.d2r(85)):\n self.set_wheel_velocity(id, -mult_ang * d_th, mult_ang * d_th, False)\n # otherwise\n else:\n # scale the angular velocity further down if the direction is off by less than 40 degrees\n if (d_e < 5 and abs(d_th) < helper.d2r(40)):\n ka = 0.1\n ka *= 4\n\n # set the wheel velocity\n # 'sign' determines the direction [forward, backward]\n # 'scale' scales the overall velocity at which the robot is driving\n # 'mult_lin' scales the linear velocity at which the robot is driving\n # larger distance 'd_e' scales the base linear velocity higher\n # 'damping' slows the linear velocity down\n # 'mult_ang' and 'ka' scales the angular velocity at which the robot is driving\n # larger angular difference 'd_th' scales the base angular velocity higher\n # if 'max_velocity' is true, the overall velocity is scaled to the point\n # where at least one wheel is operating at maximum velocity\n self.set_wheel_velocity(id,\n sign * scale * (mult_lin * (\n 1 / (1 + math.exp(-3 * d_e)) - damping) - mult_ang * ka * d_th),\n sign * scale * (mult_lin * (\n 1 / (1 + math.exp(-3 * d_e)) - damping) + mult_ang * ka * d_th),\n max_velocity)\n\n # copy coordinates from frames to different variables just for convenience\n def get_coord(self, received_frame):\n self.cur_ball = received_frame.coordinates[BALL]\n self.cur_posture = received_frame.coordinates[MY_TEAM]\n self.cur_posture_op = received_frame.coordinates[OP_TEAM]\n self.prev_ball = self.previous_frame.coordinates[BALL]\n self.prev_posture = self.previous_frame.coordinates[MY_TEAM]\n self.prev_posture_op = self.previous_frame.coordinates[OP_TEAM]\n\n # find a defender and a forward closest to the ball\n def find_closest_robot(self):\n # find the closest defender\n min_idx = 0\n min_dist = 9999.99\n def_dist = 9999.99\n\n all_dist = []\n\n for i in [1, 2]:\n measured_dist = helper.dist(self.cur_ball[X], self.cur_posture[i][X], self.cur_ball[Y],\n self.cur_posture[i][Y])\n all_dist.append(measured_dist)\n if (measured_dist < min_dist):\n min_dist = measured_dist\n def_dist = min_dist\n min_idx = i\n\n self.def_idx = min_idx\n\n # find the closest forward\n min_idx = 0\n min_dist = 9999.99\n atk_dist = 9999.99\n\n for i in [3, 4]:\n measured_dist = helper.dist(self.cur_ball[X], self.cur_posture[i][X], self.cur_ball[Y],\n self.cur_posture[i][Y])\n all_dist.append(measured_dist)\n if (measured_dist < min_dist):\n min_dist = measured_dist\n atk_dist = min_dist\n min_idx = i\n\n self.atk_idx = min_idx\n\n # record the robot closer to the ball between the two too\n self.closest_order = np.argsort(all_dist) + 1\n\n # predict where the ball will be located after 'steps' steps\n def predict_ball_location(self, steps):\n dx = self.cur_ball[X] - self.prev_ball[X]\n dy = self.cur_ball[Y] - self.prev_ball[Y]\n return [self.cur_ball[X] + steps * dx, self.cur_ball[Y] + steps * dy]\n\n # let the robot face toward specific direction\n def face_specific_position(self, id, x, y):\n dx = x - self.cur_posture[id][X]\n dy = y - self.cur_posture[id][Y]\n\n desired_th = (math.pi / 2) if (dx == 0 and dy == 0) else math.atan2(dy, dx)\n\n self.angle(id, desired_th)\n\n # returns the angle toward a specific position from current robot posture\n def direction_angle(self, id, x, y):\n dx = x - self.cur_posture[id][X]\n dy = y - self.cur_posture[id][Y]\n\n return ((math.pi / 2) if (dx == 0 and dy == 0) else math.atan2(dy, dx))\n\n # turn to face 'desired_th' direction\n def angle(self, id, desired_th):\n mult_ang = 0.4\n\n d_th = desired_th - self.cur_posture[id][TH]\n d_th = helper.trim_radian(d_th)\n\n # the robot instead puts the direction rear if the angle difference is large\n if (d_th > helper.d2r(95)):\n d_th -= math.pi\n sign = -1\n elif (d_th < helper.d2r(-95)):\n d_th += math.pi\n sign = -1\n\n self.set_wheel_velocity(id, -mult_ang * d_th, mult_ang * d_th, False)\n\n # checks if a certain position is inside the penalty area of 'team'\n def in_penalty_area(self, obj, team):\n if (abs(obj[Y]) > self.penalty_area[Y] / 2):\n return False\n\n if (team == MY_TEAM):\n return (obj[X] < -self.field[X] / 2 + self.penalty_area[X])\n else:\n return (obj[X] > self.field[X] / 2 - self.penalty_area[X])\n\n # check if the ball is coming toward the robot\n def ball_coming_toward_robot(self, id):\n x_dir = abs(self.cur_posture[id][X] - self.prev_ball[X]) > abs(self.cur_posture[id][X] - self.cur_ball[X])\n y_dir = abs(self.cur_posture[id][Y] - self.prev_ball[Y]) > abs(self.cur_posture[id][Y] - self.cur_ball[Y])\n\n # ball is coming closer\n if (x_dir and y_dir):\n return True\n else:\n return False\n\n # check if the robot with id 'id' has a chance to shoot\n def shoot_chance(self, id):\n dx = self.cur_ball[X] - self.cur_posture[id][X]\n dy = self.cur_ball[Y] - self.cur_posture[id][Y]\n\n # if the ball is located further on left than the robot, it will be hard to shoot\n if (dx < 0):\n return False\n\n # if the robot->ball direction aligns with opponent's goal, the robot can shoot\n y = (self.field[X] / 2 - self.cur_ball[X]) * dy / dx + self.cur_posture[id][Y]\n if (abs(y) < self.goal[Y] / 2):\n return True\n else:\n return False\n\n # check if sender/receiver pair should be reset\n def reset_condition(self) :\n # if the time is over, setting is reset\n if (self.end_count > 0 and self.end_count - self.cur_count < 0) :\n return True\n\n # if there is no sender and receiver is not in shoot chance, setting is cleared\n if not self.sender is None :\n if not self.shoot_chance(self.sender) :\n return True\n return False\n\n # check if a sender can be selected\n def set_sender_condition(self) :\n for i in range(1,5) :\n # if this robot is near the ball, it will be a sender candidate\n dist = helper.dist(self.cur_posture[i][X], self.cur_ball[X], self.cur_posture[i][Y], self.cur_ball[Y])\n if dist < 0.5 and self.cur_posture[i][ACTIVE]: return True\n return False\n\n # check if a receiver should be selected\n def set_receiver_condition(self) :\n # if a sender exists, any other robots can be receiver candidates\n if self.sender != None and self.receiver == None: return True\n return False\n\n # select a sender\n def set_sender(self, _player_list):\n distance_list = []\n for sender in _player_list :\n predict_ball = self.predict_ball_location(3)\n ball_distance = helper.dist(predict_ball[X], self.cur_posture[sender][X], predict_ball[Y], self.cur_posture[sender][Y])\n distance_list.append(ball_distance)\n\n # if the distance between ball and sender is less than 1, choose the closest robot as the sender\n if min(distance_list) < 1.0 :\n return distance_list.index(min(distance_list)) + 1\n\n # otherwise, there is no sender\n return None\n\n # select a receiver\n def set_receiver(self, _player_list):\n receiver_op_dist_list = []\n for receiver in _player_list :\n temp_receiver_op_dist_list = []\n # the sender is not a receiver candidate\n if receiver == self.sender :\n receiver_op_dist_list.append(999)\n continue\n\n # the distance between the robot and opponents\n for op in range(1, 5) : #[1,2,3,4]\n op_distance = helper.dist(self.cur_posture[receiver][X], self.cur_posture_op[op][X], self.cur_posture[receiver][Y], self.cur_posture_op[op][Y])\n temp_receiver_op_dist_list.append(op_distance)\n\n # save the shortest distance between this robot and one of opponents\n receiver_op_dist_list.append(min(temp_receiver_op_dist_list))\n\n receiver_ball_list = []\n for r in receiver_op_dist_list :\n # if the minimum distance between player and opponent's player is less than 0.5, this robot cannot be receiver\n if r < 0.5 or r == 999:\n receiver_ball_list.append(999)\n continue\n id = receiver_op_dist_list.index(r) + 1\n receiver_ball_distance = helper.dist(self.cur_ball[X], self.cur_posture[id][X], self.cur_ball[Y], self.cur_posture[id][Y])\n receiver_ball_list.append(receiver_ball_distance)\n\n if min(receiver_ball_list) < 999 :\n min_id = receiver_ball_list.index(min(receiver_ball_list)) + 1\n return min_id\n return None\n\n def pass_ball(self):\n if self.prev_sender == self.receiver or self.prev_receiver == self.sender :# and not None in [self.prev_sender, self.prev_receiver, self.sender, self.receiver] :\n self.sender = self.prev_sender\n self.receiver = self.prev_receiver\n\n self.receive_ball()\n self.send_ball()\n\n self.prev_sender = self.sender\n self.prev_receiver = self.receiver\n\n def send_ball(self) :\n if self.sender == None :\n return\n\n goal_dist = helper.dist(4.0, self.cur_posture[self.sender][X], 0, self.cur_posture[self.sender][Y])\n # if the sender has a shoot chance, it tries to shoot\n if self.shoot_chance(self.sender) :\n if goal_dist > 0.3 * self.field[X] / 2:\n self.actions(self.sender, 'dribble',refine=True)\n return\n else :\n self.actions(self.sender, 'kick')\n return\n\n # if the receiver exists, get the distance between the sender and the receiver\n sender_receiver_dist = None\n if not self.receiver == None :\n sender_receiver_dist = helper.dist(self.cur_posture[self.sender][X], self.cur_posture[self.receiver][X],self.cur_posture[self.sender][Y], self.cur_posture[self.receiver][Y])\n\n # if the sender is close to the receiver, the sender kicks the ball\n if not sender_receiver_dist == None :\n if sender_receiver_dist < 0.3 and not self.cur_posture[self.receiver][TOUCH]:\n self.actions(self.sender, 'kick')\n return\n\n ift, theta_diff = self.is_facing_target(self.sender, self.cur_ball[X], self.cur_ball[Y])\n if not ift :\n # after the sender kicks, it stops\n if theta_diff > math.pi * 3/4 :\n self.actions(self.sender, None)\n return\n else :\n self.actions(self.sender, 'follow',refine=True)\n return\n\n # if the ball is in front of the sender and sender is moving backward\n if self.cur_posture[self.sender][X] < - 0.8 * self.field[X] / 2 :\n if self.cur_posture[self.sender][X] - self.prev_posture[self.sender][X] < 0 :\n self.actions(self.sender, 'backward')\n\n self.actions(self.sender, 'dribble',refine=True)\n return\n\n def receive_ball(self) :\n # if receiver does not exist, do nothing\n if self.receiver == None :\n return\n\n goal_dist = helper.dist(4.0, self.cur_posture[self.receiver][X], 0, self.cur_posture[self.receiver][Y])\n # if sender is in shoot chance, receiver does nothing(reset)\n if self.shoot_chance(self.sender) :\n self.actions(self.receiver,None)\n return\n # if receiver is in shoot chance, receiver try to shoot\n if self.shoot_chance(self.receiver) :\n if goal_dist > 0.3 * self.field[X] / 2:\n self.actions(self.receiver, 'dribble',refine=True)\n return\n else :\n self.actions(self.receiver, 'kick')\n return\n\n # if sender exists\n if not self.sender == None :\n s2risFace, _ = self.is_facing_target(self.sender, self.cur_posture[self.receiver][X], self.cur_posture[self.receiver][Y],4)\n r2sisFace, _ = self.is_facing_target(self.receiver, self.cur_posture[self.sender][X], self.cur_posture[self.sender][Y],4)\n # if sender and receiver directs each other\n if s2risFace and r2sisFace :\n if self.cur_posture[self.receiver][TH] > 0 or self.cur_posture[self.receiver][TH] < -3 :\n self.actions(self.receiver,'follow', [self.prev_posture[self.receiver][X], self.prev_posture[self.receiver][Y] - 0.5 * self.field[Y]])\n return\n self.actions(self.receiver, 'follow',[self.prev_posture[self.receiver][X], self.prev_posture[self.receiver][Y] + 0.5 * self.field[Y]])\n return\n\n r_point = self.cur_ball\n # if sender exists\n if not self.sender == None:\n r_point = self.receive_position()\n receiver_ball_dist = helper.dist(self.cur_ball[X], self.cur_posture[self.receiver][X], self.cur_ball[Y],self.cur_posture[self.receiver][Y])\n # if ball is close to receiver\n if receiver_ball_dist > 0.3 * self.field[X] / 2 :\n self.actions(self.receiver, 'follow', [r_point[X], r_point[Y]],refine=True)\n return\n\n r2bisFace, _ = self.is_facing_target(self.receiver, self.cur_ball[X], self.cur_ball[Y], 4)\n if not r2bisFace :\n self.actions(self.receiver, 'follow',refine=True)\n return\n # if receiver is moving to our goal area\n if self.cur_posture[self.receiver][X] < - 0.8 * self.field[X] / 2 :\n if self.cur_posture[self.receiver][X] - self.prev_posture[self.receiver][X] < 0 :\n self.actions(self.receiver, 'backward')\n\n self.actions(self.receiver, 'dribble')\n return\n\n # let robot with id 'id' execute an action directed by 'mode'\n def actions(self, id, mode = None, target_pts = None, params = None, refine = False) :\n if id == None :\n return\n\n # if the player state is set to 'stop', force the mode to be 'stop'\n if self.player_state[id] == 'stop' :\n mode = 'stop'\n\n if mode == None :\n # reset all robot status\n if self.sender == id :\n self.sender = None\n self.touch = [False, False, False, False, False]\n if self.receiver == id :\n self.receiver = None\n self.player_state[id] = None\n return\n if mode == 'follow' :\n # let the robot follow the ball\n if target_pts == None :\n target_pts = self.predict_ball_location(3)\n if params == None :\n params = [1.0, 3.0, 0.6, False]\n if refine :\n self.set_pos_parameters(id, target_pts, params)\n self.set_target_position(id, target_pts[X], target_pts[Y], params[0], params[1], params[2], params[3])\n self.player_state[id] = 'follow'\n return\n if mode == 'dribble' :\n # let the robot follow the ball but at a faster speed\n if target_pts == None :\n target_pts = self.cur_ball\n if params == None :\n params = [1.4, 5.0, 0.8, False]\n if refine :\n self.set_pos_parameters(id, target_pts, params)\n self.set_target_position(id, target_pts[X], target_pts[Y], params[0], params[1], params[2], params[3])\n self.player_state[id] = 'dribble'\n return\n if mode == 'kick' :\n # kick the ball\n if target_pts == None :\n target_pts = self.cur_ball\n if params == None :\n params = [1.4, 5.0, 0.8, True]\n if self.end_count == 0 and not self.touch[id] :\n self.end_count = self.cur_count + 10 # 0.05 * cnt seconds\n self.player_state[id] = 'kick'\n if self.touch[id] :\n self.player_state[id] = 'stop'\n if not self.touch[id] :\n self.touch[id] = self.cur_posture[id][TOUCH]\n if self.player_state[id] == 'stop' :\n params = [0.0, 0.0, 0.0, False]\n self.set_target_position(id, target_pts[X], target_pts[Y], params[0], params[1], params[2], params[3])\n return\n if mode == 'stop' :\n # stop while counter is on\n if params == None :\n params = [0.0, 0.0, False]\n self.set_wheel_velocity(id, params[0], params[1], params[2])\n if self.end_count == 0 :\n self.end_count = self.cur_count + 5 # 0.05 * cnt seconds\n self.player_state[id] = 'stop'\n if self.end_count - 1 == self.cur_count :\n self.player_state[id] = None\n return\n if mode == 'backward' :\n # retreat from the current position\n if target_pts == None :\n target_pts = [self.cur_posture[id][X] + 0.2, self.cur_posture[id][Y]]\n if params == None :\n params = [1.4, 5.0, 0.8, False]\n if refine :\n self.set_pos_parameters(id, target_pts, params)\n self.set_target_position(id, target_pts[X], target_pts[Y], params[0], params[1], params[2], params[3])\n self.player_state[id] = 'backward'\n return\n if mode == 'position' :\n # go toward target position\n self.set_target_position(id, target_pts[X], target_pts[Y], params[0], params[1], params[2], params[3])\n return\n\n def set_pos_parameters(self,id,target_pts,params,mult = 1.2):\n prev_dist = helper.dist(self.prev_posture[id][X],target_pts[X],self.prev_posture[id][Y],target_pts[Y])\n cur_dist = helper.dist(self.cur_posture[id][X],target_pts[X],self.cur_posture[id][Y],target_pts[Y])\n if cur_dist > prev_dist - 0.02 :\n params = [params[0] * mult, params[1] * mult, params[2] * mult, params[3]]\n return params\n\n def is_facing_target(self, id, x, y, div = 4):\n dx = x - self.cur_posture[id][X]\n dy = y - self.cur_posture[id][Y]\n ds = math.sqrt(dx * dx + dy * dy)\n desired_th = (self.cur_posture[id][TH] if (ds == 0) else math.acos(dx / ds))\n\n theta = self.cur_posture[id][TH]\n if desired_th < 0:\n desired_th += math.pi * 2\n if theta < 0:\n theta += math.pi * 2\n diff_theta = abs(desired_th - theta)\n if diff_theta > math.pi:\n diff_theta = min(diff_theta, math.pi * 2 - diff_theta)\n if diff_theta < math.pi / div or diff_theta > math.pi * (1 - 1 / div):\n return [True, diff_theta]\n return [False, diff_theta]\n\n def receive_position(self):\n step = 5\n ball_receiver_dist = helper.dist(self.cur_ball[X], self.cur_posture[self.receiver][X], self.cur_ball[Y],\n self.cur_posture[self.receiver][Y])\n prev_ball_receiver_dist = helper.dist(self.prev_ball[X], self.prev_posture[self.receiver][X],\n self.prev_ball[Y], self.prev_posture[self.receiver][Y])\n\n diff_dist = prev_ball_receiver_dist - ball_receiver_dist\n if diff_dist > 0:\n step = ball_receiver_dist # diff_dist\n\n step = min(step, 15)\n\n predict_pass_point = self.predict_ball_location(step)\n\n ball_goal_dist = helper.dist(self.cur_ball[X], self.field[X] / 2, self.cur_ball[Y], 0)\n prev_ball_goal_dist = helper.dist(self.prev_ball[X], self.field[X] / 2, self.prev_ball[Y], 0)\n if ball_goal_dist > prev_ball_goal_dist:\n predict_pass_point[X] = predict_pass_point[X] - 0.15\n\n return predict_pass_point\n\n @inlineCallbacks\n def on_event(self, f):\n\n @inlineCallbacks\n def set_wheel(self, robot_wheels):\n yield self.call(u'aiwc.set_speed', args.key, robot_wheels)\n return\n\n # a basic goalkeeper rulbased algorithm\n def goalkeeper(self, id):\n # default desired position\n x = (-self.field[X] / 2) + (self.robot_size[id] / 2) + 0.05\n y = max(min(self.cur_ball[Y], (self.goal[Y] / 2 - self.robot_size[id] / 2)),\n -self.goal[Y] / 2 + self.robot_size[id] / 2)\n\n # if the robot is inside the goal, try to get out\n if (self.cur_posture[id][X] < -self.field[X] / 2):\n if (self.cur_posture[id][Y] < 0):\n self.set_target_position(id, x, self.cur_posture[id][Y] + 0.2, 1.4, 5.0, 0.4, False)\n else:\n self.set_target_position(id, x, self.cur_posture[id][Y] - 0.2, 1.4, 5.0, 0.4, False)\n # if the goalkeeper is outside the penalty area\n elif (not self.in_penalty_area(self.cur_posture[id], MY_TEAM)):\n # return to the desired position\n self.set_target_position(id, x, y, 1.4, 5.0, 0.4, True)\n # if the goalkeeper is inside the penalty area\n else:\n # if the ball is inside the penalty area\n if (self.in_penalty_area(self.cur_ball, MY_TEAM)):\n # if the ball is behind the goalkeeper\n if (self.cur_ball[X] < self.cur_posture[id][X]):\n # if the ball is not blocking the goalkeeper's path\n if (abs(self.cur_ball[Y] - self.cur_posture[id][Y]) > 2 * self.robot_size[id]):\n # try to get ahead of the ball\n self.set_target_position(id, self.cur_ball[X] - self.robot_size[id], self.cur_posture[id][Y], 1.4, 5.0,\n 0.4, False)\n else:\n # just give up and try not to make a suicidal goal\n self.angle(id, math.pi / 2)\n # if the ball is ahead of the goalkeeper\n else:\n desired_th = self.direction_angle(id, self.cur_ball[X], self.cur_ball[Y])\n rad_diff = helper.trim_radian(desired_th - self.cur_posture[id][TH])\n # if the robot direction is too away from the ball direction\n if (rad_diff > math.pi / 3):\n # give up kicking the ball and block the goalpost\n self.set_target_position(id, x, y, 1.4, 5.0, 0.4, False)\n else:\n # try to kick the ball away from the goal\n self.set_target_position(id, self.cur_ball[X], self.cur_ball[Y], 1.4, 3.0, 0.8, True)\n # if the ball is not in the penalty area\n else:\n # if the ball is within alert range and y position is not too different\n if (self.cur_ball[X] < -self.field[X] / 2 + 1.5 * self.penalty_area[X] and abs(\n self.cur_ball[Y]) < 1.5 * self.penalty_area[Y] / 2 and abs(\n self.cur_ball[Y] - self.cur_posture[id][Y]) < 0.2):\n self.face_specific_position(id, self.cur_ball[X], self.cur_ball[Y])\n # otherwise\n else:\n self.set_target_position(id, x, y, 1.4, 5.0, 0.4, True)\n\n # a basic defender rulebased algorithm\n def defender(self, id):\n # if the robot is inside the goal, try to get out\n if (self.cur_posture[id][X] < -self.field[X] / 2):\n if (self.cur_posture[id][Y] < 0):\n self.set_target_position(id, -0.7 * self.field[X] / 2, self.cur_posture[id][Y] + 0.2, 1.4, 3.5, 0.6, False)\n else:\n self.set_target_position(id, -0.7 * self.field[X] / 2, self.cur_posture[id][Y] - 0.2, 1.4, 3.5, 0.6, False)\n return\n # the defender may try to shoot if condition meets\n if (id == self.def_idx and self.shoot_chance(id) and self.cur_ball[X] < 0.3 * self.field[X] / 2):\n self.set_target_position(id, self.cur_ball[X], self.cur_ball[Y], 1.4, 5.0, 0.4, True)\n return\n\n # if this defender is closer to the ball than the other defender\n if (id == self.def_idx):\n # ball is on our side\n if (self.cur_ball[X] < 0):\n # if the robot can push the ball toward opponent's side, do it\n if (self.cur_posture[id][X] < self.cur_ball[X] - self.ball_radius):\n self.set_target_position(id, self.cur_ball[X], self.cur_ball[Y], 1.4, 5.0, 0.4, True)\n else:\n # otherwise go behind the ball\n if (abs(self.cur_ball[Y] - self.cur_posture[id][Y]) > 0.3):\n self.set_target_position(id, max(self.cur_ball[X] - 0.5, -self.field[X] / 2 + self.robot_size[id] / 2),\n self.cur_ball[Y], 1.4, 3.5, 0.6, False)\n else:\n self.set_target_position(id, max(self.cur_ball[X] - 0.5, -self.field[X] / 2 + self.robot_size[id] / 2),\n self.cur_posture[id][Y], 1.4, 3.5, 0.6, False)\n else:\n self.set_target_position(id, -0.7 * self.field[X] / 2, self.cur_ball[Y], 1.4, 3.5, 0.4, False)\n # if this defender is not closer to the ball than the other defender\n else:\n # ball is on our side\n if (self.cur_ball[X] < 0):\n # ball is on our left\n if (self.cur_ball[Y] > self.goal[Y] / 2 + 0.15):\n self.set_target_position(id,\n max(self.cur_ball[X] - 0.5, -self.field[X] / 2 + self.robot_size[id] / 2 + 0.1),\n self.goal[Y] / 2 + 0.15, 1.4, 3.5, 0.4, False)\n # ball is on our right\n elif (self.cur_ball[Y] < -self.goal[Y] / 2 - 0.15):\n self.set_target_position(id,\n max(self.cur_ball[X] - 0.5, -self.field[X] / 2 + self.robot_size[id] / 2 + 0.1),\n -self.goal[Y] / 2 - 0.15, 1.4, 3.5, 0.4, False)\n # ball is in center\n else:\n self.set_target_position(id,\n max(self.cur_ball[X] - 0.5, -self.field[X] / 2 + self.robot_size[id] / 2 + 0.1),\n self.cur_ball[Y], 1.4, 3.5, 0.4, False)\n else:\n # ball is on right side\n if (self.cur_ball[Y] < 0):\n self.set_target_position(id, -0.7 * self.field[X] / 2,\n min(self.cur_ball[Y] + 0.5, self.field[Y] / 2 - self.robot_size[id] / 2), 1.4,\n 3.5, 0.4, False)\n # ball is on left side\n else:\n self.set_target_position(id, -0.7 * self.field[X] / 2,\n max(self.cur_ball[Y] - 0.5, -self.field[Y] / 2 + self.robot_size[id] / 2), 1.4,\n 3.5, 0.4, False)\n\n # a basic forward rulebased algorithm\n def forward(self, id):\n # if the robot is blocking the ball's path toward opponent side\n if (self.cur_ball[X] > -0.3 * self.field[X] / 2 and self.cur_ball[X] < 0.3 * self.field[X] / 2 and\n self.cur_posture[id][X] > self.cur_ball[X] + 0.1 and abs(\n self.cur_posture[id][Y] - self.cur_ball[Y]) < 0.3):\n if (self.cur_ball[Y] < 0):\n self.set_target_position(id, self.cur_posture[id][X] - 0.25, self.cur_ball[Y] + 0.75, 1.4, 3.0, 0.8, False)\n else:\n self.set_target_position(id, self.cur_posture[id][X] - 0.25, self.cur_ball[Y] - 0.75, 1.4, 3.0, 0.8, False)\n return\n\n # if the robot can shoot from current position\n if (id == self.atk_idx and self.shoot_chance(id)):\n pred_ball = self.predict_ball_location(2)\n self.set_target_position(id, pred_ball[X], pred_ball[Y], 1.4, 5.0, 0.4, True)\n return\n\n # if the ball is coming toward the robot, seek for shoot chance\n if (id == self.atk_idx and self.ball_coming_toward_robot(id)):\n dx = self.cur_ball[X] - self.prev_ball[X]\n dy = self.cur_ball[Y] - self.prev_ball[Y]\n pred_x = (self.cur_posture[id][Y] - self.cur_ball[Y]) * dx / dy + self.cur_ball[X]\n steps = (self.cur_posture[id][Y] - self.cur_ball[Y]) / dy\n\n # if the ball will be located in front of the robot\n if (pred_x > self.cur_posture[id][X]):\n pred_dist = pred_x - self.cur_posture[id][X]\n # if the predicted ball location is close enough\n if (pred_dist > 0.1 and pred_dist < 0.3 and steps < 10):\n # find the direction towards the opponent goal and look toward it\n goal_angle = self.direction_angle(id, self.field[X] / 2, 0)\n self.angle(id, goal_angle)\n return\n\n # if this forward is closer to the ball than the other forward\n if (id == self.atk_idx):\n if (self.cur_ball[X] > -0.3 * self.field[X] / 2):\n # if the robot can push the ball toward opponent's side, do it\n if (self.cur_posture[id][X] < self.cur_ball[X] - self.ball_radius):\n self.set_target_position(id, self.cur_ball[X], self.cur_ball[Y], 1.4, 5.0, 0.4, True)\n else:\n # otherwise go behind the ball\n if (abs(self.cur_ball[Y] - self.cur_posture[id][Y]) > 0.3):\n self.set_target_position(id, self.cur_ball[X] - 0.2, self.cur_ball[Y], 1.4, 3.5, 0.6, False)\n else:\n self.set_target_position(id, self.cur_ball[X] - 0.2, self.cur_posture[id][Y], 1.4, 3.5, 0.6, False)\n else:\n self.set_target_position(id, -0.1 * self.field[X] / 2, self.cur_ball[Y], 1.4, 3.5, 0.4, False)\n # if this forward is not closer to the ball than the other forward\n else:\n if (self.cur_ball[X] > -0.3 * self.field[X] / 2):\n # ball is on our right\n if (self.cur_ball[Y] < 0):\n self.set_target_position(id, self.cur_ball[X] - 0.25, self.goal[Y] / 2, 1.4, 3.5, 0.4, False)\n # ball is on our left\n else:\n self.set_target_position(id, self.cur_ball[X] - 0.25, -self.goal[Y] / 2, 1.4, 3.5, 0.4, False)\n else:\n # ball is on right side\n if (self.cur_ball[Y] < 0):\n self.set_target_position(id, -0.1 * self.field[X] / 2,\n min(-self.cur_ball[Y] - 0.5, self.field[Y] / 2 - self.robot_size[id] / 2), 1.4,\n 3.5, 0.4, False)\n # ball is on left side\n else:\n self.set_target_position(id, -0.1 * self.field[X] / 2,\n max(-self.cur_ball[Y] + 0.5, -self.field[Y] / 2 + self.robot_size[id] / 2), 1.4,\n 3.5, 0.4, False)\n\n def default_rulebased(self, player_list):\n for p in player_list:\n # Add actions instead of default rulebase(goalkeeper, defender, forward) actions\n # If this robot is stuck at field sides, move forward the center\n if pow(self.prev_posture[p][X] - self.cur_posture[p][X],2) + pow(self.prev_posture[p][Y] - self.cur_posture[p][Y],2) < 5e-6:\n if self.cur_posture[p][Y] > 0 :\n self.set_target_position(p, 0, 0, 1.4, 3.5, 0.4, False)\n continue\n if p == 0:\n goalkeeper(self, 0)\n continue\n if p == 1 or p == 2:\n defender(self, p)\n continue\n if p == 3 or p == 4:\n forward(self, p)\n continue\n\n def passing_play(self, player_list):\n def find_active_player(self, ids):\n _ids = []\n for i in ids:\n if self.cur_posture[i][ACTIVE] :\n _ids.append(i)\n return _ids\n # select only alive player\n _player_list = find_active_player(self, player_list)\n self.cur_count = round(received_frame.time * 20) # count = 50 ms\n\n if self.end_count == self.cur_count :\n self.end_count = 0\n\n if self.reset_condition() :\n self.sender = None\n self.sender_touch = False\n self.receiver = None\n # check if sender exists\n if self.set_sender_condition() :\n self.sender = self.set_sender( _player_list)\n # check if receiver exists\n if self.set_receiver_condition():\n self.receiver = self.set_receiver(_player_list)\n\n if (self.sender != None and self.receiver != None):\n self.pass_ball()\n # if player is sender\n if self.sender in _player_list:\n _player_list.remove(self.sender)\n # if player is receiver\n if self.receiver in _player_list:\n _player_list.remove(self.receiver)\n\n default_rulebased(self, _player_list)\n return\n\n # initiate empty frame\n received_frame = Frame()\n received_subimages = []\n\n if 'time' in f:\n received_frame.time = f['time']\n if 'score' in f:\n received_frame.score = f['score']\n if 'reset_reason' in f:\n received_frame.reset_reason = f['reset_reason']\n if 'game_state' in f:\n received_frame.game_state = f['game_state']\n if 'ball_ownership' in f:\n received_frame.ball_ownership = f['ball_ownership']\n if 'half_passed' in f:\n received_frame.half_passed = f['half_passed']\n if 'subimages' in f:\n received_frame.subimages = f['subimages']\n for s in received_frame.subimages:\n received_subimages.append(SubImage(s['x'],\n s['y'],\n s['w'],\n s['h'],\n s['base64'].encode('utf8')))\n self.image.update_image(received_subimages)\n if 'coordinates' in f:\n received_frame.coordinates = f['coordinates']\n if 'EOF' in f:\n self.end_of_frame = f['EOF']\n\n if (self.end_of_frame):\n # to get the image at the end of each frame use the variable:\n # self.image.ImageBuffer\n\n if (received_frame.reset_reason != NONE):\n self.previous_frame = received_frame\n\n self.get_coord(received_frame)\n self.find_closest_robot()\n\n if (received_frame.reset_reason == EPISODE_END):\n # EPISODE_END is sent instead of GAME_END when 'repeat' option is set to 'true'\n # to mark the end of episode\n # you can reinitialize the parameters, count the number of episodes done, etc. here\n\n # this example does not do anything at episode end\n pass\n\n if (received_frame.reset_reason == HALFTIME):\n # halftime is met - from next frame, received_frame.half_passed will be set to True\n # although the simulation switches sides,\n # coordinates and images given to your AI soccer algorithm will stay the same\n # that your team is red and located on left side whether it is 1st half or 2nd half\n\n # this example does not do anything at halftime\n pass\n\n ##############################################################################\n if (received_frame.game_state == STATE_DEFAULT):\n # robot functions in STATE_DEFAULT\n # goalkeeper simply executes goalkeeper algorithm on its own\n goalkeeper(self, 0)\n\n # defenders and forwards can pass ball to each other if necessary\n passing_play(self, [1, 2, 3, 4])\n set_wheel(self, self.wheels)\n ##############################################################################\n elif (received_frame.game_state == STATE_KICKOFF):\n # if the ball belongs to my team, initiate kickoff\n if (received_frame.ball_ownership):\n self.set_target_position(4, 0, 0, 1.4, 3.0, 0.4, False)\n\n set_wheel(self, self.wheels)\n ##############################################################################\n elif (received_frame.game_state == STATE_GOALKICK):\n # if the ball belongs to my team,\n # drive the goalkeeper to kick the ball\n if (received_frame.ball_ownership):\n self.set_wheel_velocity(0, self.max_linear_velocity[0], self.max_linear_velocity[0], True)\n\n set_wheel(self, self.wheels)\n ##############################################################################\n elif (received_frame.game_state == STATE_CORNERKICK):\n # just play as simple as possible\n goalkeeper(self, 0)\n defender(self, 1)\n defender(self, 2)\n forward(self, 3)\n forward(self, 4)\n\n set_wheel(self, self.wheels)\n ##############################################################################\n elif (received_frame.game_state == STATE_PENALTYKICK):\n # if the ball belongs to my team,\n # drive the forward to kick the ball\n if (received_frame.ball_ownership):\n self.set_wheel_velocity(4, self.max_linear_velocity[0], self.max_linear_velocity[0], True)\n\n set_wheel(self, self.wheels)\n ##############################################################################\n if (received_frame.reset_reason == GAME_END):\n # (virtual finish() in random_walk.cpp)\n # save your data\n with open(args.datapath + '/result.txt', 'w') as output:\n # output.write('yourvariables')\n output.close()\n # unsubscribe; reset or leave\n yield self.sub.unsubscribe()\n try:\n yield self.leave()\n except Exception as e:\n self.printConsole(\"Error: {}\".format(e))\n ##############################################################################\n\n self.end_of_frame = False\n self.previous_frame = received_frame\n\n def onDisconnect(self):\n if reactor.running:\n reactor.stop()\n\n\nif __name__ == '__main__':\n\n try:\n unicode\n except NameError:\n # Define 'unicode' for Python 3\n def unicode(s, *_):\n return s\n\n\n def to_unicode(s):\n return unicode(s, \"utf-8\")\n\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"server_ip\", type=to_unicode)\n parser.add_argument(\"port\", type=to_unicode)\n parser.add_argument(\"realm\", type=to_unicode)\n parser.add_argument(\"key\", type=to_unicode)\n parser.add_argument(\"datapath\", type=to_unicode)\n\n args = parser.parse_args()\n\n ai_sv = \"rs://\" + args.server_ip + \":\" + args.port\n ai_realm = args.realm\n\n # create a Wamp session object\n session = Component(ComponentConfig(ai_realm, {}))\n\n # initialize the msgpack serializer\n serializer = MsgPackSerializer()\n\n # use Wamp-over-rawsocket\n runner = ApplicationRunner(ai_sv, ai_realm, serializers=[serializer])\n\n runner.run(session, auto_reconnect=False)\n"
] | [
[
"numpy.fromstring",
"numpy.argsort",
"numpy.zeros"
]
] |
fluiddyn/fluiddyn | [
"04d125cb4590da7d5db80dac1d20577ce012a005"
] | [
"fluiddyn/calcul/test/test_signal.py"
] | [
"import unittest\n\nimport numpy as np\n\nfrom ..signal import FunctionLinInterp, decimate, deriv, smooth\n\n\nclass TestFFTW1DReal2Complex(unittest.TestCase):\n def test_signal(self):\n x = np.linspace(0, 2 * np.pi, 100)\n f = np.sin(x) + 0.02 * (np.random.rand(100) - 0.5)\n smooth(f, window_len=11, window=\"hanning\")\n deriv(f, x, method=\"diff\")\n deriv(f, x, dx=1, method=\"convolve\")\n deriv(f, x, dx=1, method=\"gaussian_filter\")\n\n sig = np.zeros([4, 4, 4])\n decimate(sig, 2, nwindow=2, axis=0)\n\n lin_interp = FunctionLinInterp(x, f)\n lin_interp(2)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.linspace",
"numpy.sin",
"numpy.random.rand",
"numpy.zeros"
]
] |
cogitate3/stock | [
"bba986cbbb17de9f424c3b5417a17d1bb1204403"
] | [
"datahub/zdt.py"
] | [
"# -*- coding=utf-8 -*-\n__author__ = 'Rocky'\n'''\nhttp://30daydo.com\nContact: [email protected]\n'''\n# 每天的涨跌停\nimport sys\nsys.path.append('..')\nimport re\nimport time\nimport os\nfrom configure.util import notify\nfrom configure.settings import config_dict\nimport pandas as pd\nfrom configure.settings import DBSelector, send_from_aliyun\nimport requests\nimport datetime\nfrom common.BaseService import BaseService\n\n\nclass GetZDT(BaseService):\n\n def __init__(self, today=None):\n '''\n TODAY 格式 20200701\n :param today:\n '''\n super(GetZDT, self).__init__('log/zdt.log')\n\n if today:\n self.today = today\n else:\n self.today = time.strftime(\"%Y%m%d\")\n\n self.user_agent = \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/64.0.3282.167 Chrome/64.0.3282.167 Safari/537.36\"\n self.path = config_dict('data_path')\n self.zdt_url = f'http://home.flashdata2.jrj.com.cn/limitStatistic/ztForce/{self.today}.js'\n self.zrzt_url = 'http://hqdata.jrj.com.cn/zrztjrbx/limitup.js'\n self.host = \"home.flashdata2.jrj.com.cn\"\n self.reference = \"http://stock.jrj.com.cn/tzzs/zdtwdj/zdforce.shtml\"\n\n self.header_zdt = {\"User-Agent\": self.user_agent,\n \"Host\": self.host,\n \"Referer\": self.reference}\n\n self.zdt_indexx = ['代码', '名称', '最新价格', '涨跌幅', '封成比', '封流比', '封单金额', '最后一次涨停时间', '第一次涨停时间', '打开次数',\n '振幅',\n '涨停强度']\n\n self.zrzt_indexx = ['序号', '代码', '名称', '昨日涨停时间', '最新价格', '今日涨幅', '最大涨幅', '最大跌幅', '是否连板', '连续涨停次数',\n '昨日涨停强度', '今日涨停强度', '是否停牌', '昨天的日期', '昨日涨停价', '今日开盘价格', '今日开盘涨幅']\n self.header_zrzt = {\"User-Agent\": self.user_agent,\n \"Host\": \"hqdata.jrj.com.cn\",\n \"Referer\": \"http://stock.jrj.com.cn/tzzs/zrztjrbx.shtml\"\n }\n\n self.DB = DBSelector()\n\n def download(self, url, headers, retry=5):\n\n for i in range(retry):\n try:\n resp = requests.get(url=url, headers=headers)\n content = resp.text\n md_check = re.findall('summary|lasttradedate', content)\n if content and len(md_check) > 0:\n return content\n else:\n time.sleep(60)\n self.logger.info('failed to get content, retry: {}'.format(i))\n continue\n except Exception as e:\n notify(title='获取涨跌停数据出错', desp=f'{self.__class__}')\n self.logger.error(e)\n time.sleep(60)\n continue\n\n return None\n\n def convert_json(self, content):\n p = re.compile(r'\"Data\":(.*)};', re.S)\n if len(content) <= 0:\n self.logger.info('Content\\'s length is 0')\n exit(0)\n result = p.findall(content)\n if result:\n try:\n t1 = result[0]\n t2 = re.sub('[\\\\r\\\\n]', '', t1)\n t2 = re.sub(',,', ',0,0', t2)\n t2 = re.sub('Infinity', '-1', t2)\n t2 = re.sub('NaN', '-1', t2)\n t2 = list(eval(t2))\n return t2\n except Exception as e:\n notify(title='获取涨跌停数据出错', desp=f'{self.__class__}')\n self.logger.info(e)\n return None\n else:\n return None\n\n def convert_dataframe(self, data, index, choice, post_fix):\n engine = self.DB.get_engine('db_zdt', 'qq')\n data_len = len(data)\n\n if choice == 1:\n for i in range(data_len):\n data[i][choice] = data[i][choice]\n\n df = pd.DataFrame(data, columns=index)\n\n # 今日涨停\n if choice == 1:\n self.today_zt(df, post_fix, engine)\n # 昨日涨停\n if choice == 2:\n self.yesterday_zt(df, post_fix, engine)\n\n # 今日涨停存储\n def today_zt(self, df, post_fix, engine):\n filename = os.path.join(\n self.path, self.today + \"_\" + post_fix + \".xls\")\n\n df['今天的日期'] = self.today\n df.to_excel(filename, encoding='gbk')\n try:\n df.to_sql(self.today + post_fix, engine, if_exists='fail')\n except Exception as e:\n self.logger.info(e)\n\n # 昨日涨停今日的状态,今日涨停\n def yesterday_zt(self, df, post_fix, engine):\n df = df.set_index('序号')\n formula = lambda x: round(x * 100, 3)\n df['最大涨幅'] = df['最大涨幅'].map(formula)\n df['最大跌幅'] = df['最大跌幅'].map(formula)\n df['今日开盘涨幅'] = df['今日开盘涨幅'].map(formula)\n df['昨日涨停强度'] = df['昨日涨停强度'].map(lambda x: round(x, 0))\n df['今日涨停强度'] = df['今日涨停强度'].map(lambda x: round(x, 0))\n\n try:\n df.to_sql(self.today + post_fix, engine, if_exists='fail')\n except Exception as e:\n notify(f'{self.__class__} 出错')\n self.logger.info(e)\n\n title,content = self.generate_html(df)\n try:\n send_from_aliyun(title, content, types='html')\n except Exception as e:\n self.logger.error(e)\n\n def generate_html(self,df):\n avg = round(df['今日涨幅'].mean(), 2)\n median = round(df['今日涨幅'].median(), 2)\n min_v = round(df['今日涨幅'].min(), 2)\n min_index = df['今日涨幅'].argmin()\n min_percent_name = df.iloc[min_index]['名称']\n current = datetime.datetime.now().strftime('%Y-%m-%d')\n title = '昨涨停今天{}平均涨{}\\n'.format(current, avg)\n content = '<p>昨天涨停今天<font color=\"red\">{}</font></p>' \\\n '<p>平均涨幅 <font color=\"red\">{}</font></p>' \\\n '<p>涨幅中位数 <font color=\"red\">{}</font></p>' \\\n '<p>涨幅最小 <font color=\"red\">{}</font></p>' \\\n '<p>涨幅最小股 <font color=\"red\">{}</font></p>'.format(current, avg, median, min_v, min_percent_name)\n\n return title,content\n def start(self):\n zdt_content = self.download(self.zdt_url, headers=self.header_zdt)\n zdt_js = self.convert_json(zdt_content)\n self.convert_dataframe(zdt_js, self.zdt_indexx, 1, 'zdt')\n # 昨日涨停数据会如果不是当天获取会失效\n zrzt_content = self.download(self.zrzt_url, headers=self.header_zrzt)\n zrzt_js = self.convert_json(zrzt_content)\n self.convert_dataframe(zrzt_js, self.zrzt_indexx, 2, 'zrzt')\n\n\nif __name__ == '__main__':\n obj = GetZDT()\n obj.start()\n"
] | [
[
"pandas.DataFrame"
]
] |
liorgolgher/python-pysight | [
"029634d328c18fde4fc4ed666980b2e537e18814"
] | [
"src/pysight/nd_hist_generator/volume_gen.py"
] | [
"import attr\nfrom attr.validators import instance_of\nimport pandas as pd\nimport numpy as np\nimport itertools\nfrom typing import Generator\nimport psutil\n\n\[email protected](slots=True)\nclass VolumeGenerator:\n \"\"\"\n Generate the list of volume chunks to be processed.\n Main method is \"create_frame_slices\", which returns a generator containing\n slice objects that signify the chunks of volumes to be processed simultaneously.\n\n :param pd.DataFrame frames: Frames for the entire dataset. Should not contain a closing, right-edge, frame.\n :param tuple data_shape: Shape of the final n-dimensional array (from the Output object)\n :param int MAX_BYTES_ALLOWED: Number of bytes that can be held in RAM. Calculated using the\n ``psutil`` package if not supplied manually.\n \"\"\"\n\n frames = attr.ib(validator=instance_of(pd.Series), repr=False)\n data_shape = attr.ib(validator=instance_of(tuple))\n MAX_BYTES_ALLOWED = attr.ib(default=0, validator=instance_of(int))\n num_of_frames = attr.ib(init=False)\n bytes_per_frames = attr.ib(init=False)\n full_frame_chunks = attr.ib(init=False)\n frame_slices = attr.ib(init=False)\n frames_per_chunk = attr.ib(init=False)\n num_of_chunks = attr.ib(init=False)\n\n def __attrs_post_init__(self):\n if self.MAX_BYTES_ALLOWED == 0:\n try:\n avail = psutil.virtual_memory().available\n except AttributeError:\n self.MAX_BYTES_ALLOWED = 1_000_000_000\n else:\n self.MAX_BYTES_ALLOWED = avail // 32 # magic number\n\n def create_frame_slices(self, create_slices=True) -> Generator:\n \"\"\"\n Main method for the pipeline. Returns a generator with slices that\n signify the start time and end time of each chunk of frames. The indexing\n is inclusive-inclusive, and not inclusive-exclusive, since it's done using\n pandas' ``.loc`` method.\n\n :param bool create_slices: Used for testing, always keep ``True`` for actual code.\n \"\"\"\n self.bytes_per_frames = np.prod(self.data_shape[1:]) * 8\n self.frames_per_chunk = int(\n max(1, self.MAX_BYTES_ALLOWED // self.bytes_per_frames)\n )\n self.num_of_frames = len(self.frames)\n self.num_of_chunks = int(max(1, len(self.frames) // self.frames_per_chunk))\n self.full_frame_chunks = self.__grouper()\n if create_slices:\n self.frame_slices = self.__generate_frame_slices()\n return self.frame_slices\n\n def __grouper(self) -> Generator:\n \"\"\" Chunk volume times into maximal-sized groups of values. \"\"\"\n args = [iter(self.frames.to_numpy())] * self.frames_per_chunk\n return itertools.zip_longest(*args, fillvalue=np.nan)\n\n def __generate_frame_slices(self) -> Generator:\n if self.frames_per_chunk == 1:\n return (slice(frame[0], frame[0]) for frame in self.full_frame_chunks)\n\n start_and_end = []\n for chunk in self.full_frame_chunks:\n first, last = chunk[0], chunk[-1]\n if np.isnan(last):\n for val in reversed(chunk[:-1]):\n if val is not np.nan:\n last = val\n break\n start_and_end.append((first, last))\n\n return (slice(np.uint64(t[0]), np.uint64(t[1])) for t in start_and_end)\n"
] | [
[
"numpy.uint64",
"numpy.prod",
"numpy.isnan"
]
] |
fixstars/clpy | [
"693485f85397cc110fa45803c36c30c24c297df0"
] | [
"tests/clpy_tests/binary_tests/test_packing.py"
] | [
"import numpy\nimport unittest\n\nfrom clpy import testing\n\n\[email protected]\nclass TestPacking(unittest.TestCase):\n\n _multiprocess_can_split_ = True\n\n @testing.with_requires('numpy>=1.10')\n @testing.for_int_dtypes()\n @testing.numpy_clpy_array_equal()\n def check_packbits(self, data, xp, dtype):\n # Note numpy <= 1.9 raises an Exception when an input array is bool.\n # See https://github.com/numpy/numpy/issues/5377\n a = xp.array(data, dtype=dtype)\n return xp.packbits(a)\n\n @testing.numpy_clpy_array_equal()\n def check_unpackbits(self, data, xp):\n a = xp.array(data, dtype=xp.uint8)\n return xp.unpackbits(a)\n\n def test_packbits(self):\n self.check_packbits([0])\n self.check_packbits([1])\n self.check_packbits([0, 1])\n self.check_packbits([1, 0, 1, 1, 0, 1, 1, 1])\n self.check_packbits([1, 0, 1, 1, 0, 1, 1, 1, 1])\n self.check_packbits(numpy.arange(24).reshape((2, 3, 4)) % 2)\n\n @testing.with_requires('numpy>=1.12')\n def test_packbits_empty(self):\n # Note packbits of numpy <= 1.11 has a bug against empty arrays.\n # See https://github.com/numpy/numpy/issues/8324\n self.check_packbits([])\n\n def test_unpackbits(self):\n self.check_unpackbits([])\n self.check_unpackbits([0])\n self.check_unpackbits([1])\n self.check_unpackbits([255])\n self.check_unpackbits([100, 200, 123, 213])\n"
] | [
[
"numpy.arange"
]
] |
leonardoSaaads/CartaSmith | [
"b7fde20666746b6fe9b08261b55b0bfea9c3a128"
] | [
"CartaSmith.py"
] | [
"# Feito por Leonardo Saads Pinto\n# Setembro - 2021\n\n# Importando as bibliotecas padrões.\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass CartaSmith:\n \"\"\"\n BIBLIOTECA PARA FUNCIONALIDADES DA CARTA DE SMITH\n\n FUNÇÕES:\n - AJUSTAR\n - SUMARIO\n - PLOT_SMITH\n - PLOT COEFICIENTES\n \"\"\"\n\n def __init__(self, RF=None, ZN=None, ZL=None, Z0=None):\n # Impedancia Característica (Z0): float\n self.__Z0 = Z0\n # Impedancia da Onda (ZL): list\n self.__ZL = ZL\n # Impedancia Normalizada (ZN): list\n self.__ZN = ZN\n # Coeficiente de Reflexão (RF): list\n self.__RF = RF\n\n # DEFININDO GETTERS\n\n @property\n def Z0(self):\n \"\"\"\n Getter para a impedancia característica.\n :return: impedancia característica Z0.\n \"\"\"\n return self.__Z0\n\n @property\n def ZL(self):\n \"\"\"\n Getter para a impedancia de onda.\n :return: impedancia de onda ZL.\n \"\"\"\n return self.__ZL\n\n @property\n def ZN(self):\n \"\"\"\n Getter para a impedancia normalizada.\n :return: impedancia de onda ZN.\n \"\"\"\n return self.__ZN\n\n @property\n def RF(self):\n \"\"\"\n Getter para o coeficiente de reflexão da onda.\n :return: coeficiente de reflexão da onda.\n \"\"\"\n return self.__RF\n\n # DEFININDO SETTERS\n\n @Z0.setter\n def Z0(self, valor):\n \"\"\"\n Setter para a Impedancia Característica.\n :param valor: parâmetro Z0.\n :return: None\n \"\"\"\n self.__Z0 = valor\n\n @ZL.setter\n def ZL(self, valor):\n \"\"\"\n Setter para a Impedancia de Onda.\n :param valor: parâmetro ZL (número complexo).\n :return: None\n \"\"\"\n self.__ZL = valor\n\n @ZN.setter\n def ZN(self, valor):\n \"\"\"\n Setter para a Impedancia Normalizada..\n :param valor: Parâmetro ZN (número complexo).\n :return: None\n \"\"\"\n self.__ZN = valor\n\n @RF.setter\n def RF(self, valor):\n \"\"\"\n Setter para a Coeficiente de Reflexão.\n :param valor: Coeficiente de Reflexão (número complexo).\n :return: None\n \"\"\"\n self.__RF = valor\n\n # FUNÇÕES DE AJUSTES\n\n def ajustar(self):\n \"\"\"\n Essa Função irá ajustar todos os parâmetros com base em determinados dados de entrada.\n :return: Null\n \"\"\"\n # NECESSÁRIO UM FOR QUE RODE 2 VEZES (atualizar 2 dados)\n for i in range(2):\n # Ajuste do ZO\n if self.__ZL is not None and self.__ZN is not None and self.__Z0 is None:\n self.__Z0 = np.mean(self.__ZL / self.__ZN)\n\n # Ajuste do ZL\n if self.__Z0 is not None and self.__ZN is not None and self.__ZL is None:\n self.__ZL = self.__ZN * self.__Z0\n\n # Ajuste do ZN\n if self.__ZL is not None and self.__Z0 is not None and self.__ZN is None:\n self.__ZN = self.__ZL / self.__Z0\n\n if self.__RF is not None and self.__ZN is None:\n self.__ZN = (-self.__RF - 1)/(self.__RF - 1)\n\n # Ajude da Reflexão\n if self.ZL is not None and self.Z0 is not None and self.RF is None:\n self.__RF = (self.ZL - self.Z0)/(self.ZL + self.Z0)\n\n # FUNÇÕES DE INFORMAÇÕES\n\n def sumario(self):\n \"\"\"\n Printa o sumário de todos os parâmetros da carta de Smith.\n :return: Null\n \"\"\"\n\n lista_parametros = [self.RF, self.ZN, self.Z0, self.ZL]\n parametros_desconhecidos = [1 for item in lista_parametros if item is None]\n\n print(f'--------------------- MODELO ---------------------\\n' \n f'Impedância Característica: {self.Z0}\\n'\n f'Impedãncia de Onda: {self.ZL}\\n'\n f'Impedância Normalizada: {self.ZN}\\n'\n f'Coeficiente de Reflexão: {self.RF}\\n'\n f'--------------------------------------------------\\n'\n f'O número de parâmetros desconhecidos é: {sum(parametros_desconhecidos)}\\n'\n f'--------------------------------------------------')\n\n # ALERTAS PARA USUÁRIOS:\n if sum(parametros_desconhecidos) == 2:\n print('ATENÇÃO: Utilize o comando \".ajustar()\" para determinar os parâmetros desconhecidos.\\n')\n\n if sum(parametros_desconhecidos) == 3:\n print('ATENÇÃO: Informado apenas um parâmetro. Adicione outro para prosseguir.\\n')\n\n # FUNÇÕES DE PLOTAGEM E ANÁLISE GRÁFICA\n\n # PLOTANDO A CARTA DE SMITH - SERVE DE BASE PARA AS OUTRAS FUNÇÕES\n @staticmethod\n def plotar_carta():\n\n # FUNÇÃO PARA A PLOTAGEM DOS CÍRCULOS\n def circulo(xc, yc, r, a1=0, a2=2*np.pi):\n # Theta vai de 0 à 2*pi particionado em 360 vezes. (OBS: Graus)\n theta = np.linspace(a1, a2, 360)\n # Parametrização do cículo.\n x = xc + r * np.cos(theta)\n y = yc + r * np.sin(theta)\n return x, y\n\n # DEFININDO A QUANTIDADE DE RAIOS QUE SERÃO PLOTADOS - EIXO X\n raios_eixo_x = np.linspace(0, 1, 5)\n raios_eixo_y = [0.1, 0.5, 1, 2, 4]\n a1_y = [1.8, 2.5, np.pi, 3.79, 4.22]\n\n # PLOTANDO OS RAIOS NO EIXO X\n for r in raios_eixo_x:\n # create the figure\n x, y = circulo(r, 0, 1 - r)\n plt.plot(x, y, color='black')\n\n # PLOTANDO OS RAIOS NO EIXO Y\n for i in range(len(raios_eixo_y)):\n x, y = circulo(1, raios_eixo_y[i], raios_eixo_y[i], a1_y[i], 3/2*np.pi)\n plt.plot(x, y, color='black')\n x, y = circulo(1, -raios_eixo_y[i], raios_eixo_y[i], np.pi/2, 2*np.pi - a1_y[i])\n plt.plot(x, y, color='black')\n\n # LINHA ENTRE (-1,0) À (1, 0)\n plt.plot([-1, 1], [0, 0], color='black')\n\n # PLOTANDO PONTOS IMPORTANTES\n plt.plot(0, 0, color='black', marker='o')\n plt.plot(-1, 0, color='black', marker='>')\n plt.plot(0, 1, color='black', marker='v')\n plt.plot(0, -1, color='black', marker='^')\n\n # EIXOS\n plt.axis('equal')\n plt.axis('off')\n\n # PLOTANDO O TÍTULO\n plt.title(\"Carta de Smith\")\n\n def plotar_coeficiente(self):\n if self.RF is not None:\n self.plotar_carta()\n plt.plot(self.RF.real, self.RF.imag, 'ro', label=\"Coeficientes Reflexão\")\n plt.show()\n else:\n print('Parâmetro dos coeficientes de reflexão não determinados.\\n')\n\n\nif __name__ == '__main__':\n teste2 = CartaSmith(\n ZL=np.array([100 + 50j, 75 - 100j, 200j, 150, 999999, 0, 50, 184-900j])\n )\n teste2.sumario()\n teste2.Z0 = 50\n teste2.ajustar()\n teste2.sumario()\n teste2.plotar_coeficiente()\n"
] | [
[
"numpy.array",
"numpy.sin",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"numpy.mean",
"numpy.cos",
"matplotlib.pyplot.show",
"numpy.linspace",
"matplotlib.pyplot.axis"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.